Commit 505fd21d6138545aa5e96aa738975e6a9deb98a9
Committed by
Herbert Xu
1 parent
811d8f0626
Exists in
master
and in
39 other branches
crypto: cryptd - Use nivcipher in cryptd_alloc_ablkcipher
Use crypto_alloc_base() instead of crypto_alloc_ablkcipher() to allocate underlying tfm in cryptd_alloc_ablkcipher. Because crypto_alloc_ablkcipher() prefer GENIV encapsulated crypto instead of raw one, while cryptd_alloc_ablkcipher needed the raw one. Signed-off-by: Huang Ying <ying.huang@intel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Showing 1 changed file with 9 additions and 5 deletions Inline Diff
crypto/cryptd.c
1 | /* | 1 | /* |
2 | * Software async crypto daemon. | 2 | * Software async crypto daemon. |
3 | * | 3 | * |
4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | 4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the Free | 7 | * under the terms of the GNU General Public License as published by the Free |
8 | * Software Foundation; either version 2 of the License, or (at your option) | 8 | * Software Foundation; either version 2 of the License, or (at your option) |
9 | * any later version. | 9 | * any later version. |
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <crypto/algapi.h> | 13 | #include <crypto/algapi.h> |
14 | #include <crypto/internal/hash.h> | 14 | #include <crypto/internal/hash.h> |
15 | #include <crypto/cryptd.h> | 15 | #include <crypto/cryptd.h> |
16 | #include <crypto/crypto_wq.h> | 16 | #include <crypto/crypto_wq.h> |
17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/list.h> | 20 | #include <linux/list.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/scatterlist.h> | 22 | #include <linux/scatterlist.h> |
23 | #include <linux/sched.h> | 23 | #include <linux/sched.h> |
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | 25 | ||
26 | #define CRYPTD_MAX_CPU_QLEN 100 | 26 | #define CRYPTD_MAX_CPU_QLEN 100 |
27 | 27 | ||
28 | struct cryptd_cpu_queue { | 28 | struct cryptd_cpu_queue { |
29 | struct crypto_queue queue; | 29 | struct crypto_queue queue; |
30 | struct work_struct work; | 30 | struct work_struct work; |
31 | }; | 31 | }; |
32 | 32 | ||
33 | struct cryptd_queue { | 33 | struct cryptd_queue { |
34 | struct cryptd_cpu_queue *cpu_queue; | 34 | struct cryptd_cpu_queue *cpu_queue; |
35 | }; | 35 | }; |
36 | 36 | ||
37 | struct cryptd_instance_ctx { | 37 | struct cryptd_instance_ctx { |
38 | struct crypto_spawn spawn; | 38 | struct crypto_spawn spawn; |
39 | struct cryptd_queue *queue; | 39 | struct cryptd_queue *queue; |
40 | }; | 40 | }; |
41 | 41 | ||
42 | struct cryptd_blkcipher_ctx { | 42 | struct cryptd_blkcipher_ctx { |
43 | struct crypto_blkcipher *child; | 43 | struct crypto_blkcipher *child; |
44 | }; | 44 | }; |
45 | 45 | ||
46 | struct cryptd_blkcipher_request_ctx { | 46 | struct cryptd_blkcipher_request_ctx { |
47 | crypto_completion_t complete; | 47 | crypto_completion_t complete; |
48 | }; | 48 | }; |
49 | 49 | ||
50 | struct cryptd_hash_ctx { | 50 | struct cryptd_hash_ctx { |
51 | struct crypto_hash *child; | 51 | struct crypto_hash *child; |
52 | }; | 52 | }; |
53 | 53 | ||
54 | struct cryptd_hash_request_ctx { | 54 | struct cryptd_hash_request_ctx { |
55 | crypto_completion_t complete; | 55 | crypto_completion_t complete; |
56 | }; | 56 | }; |
57 | 57 | ||
58 | static void cryptd_queue_worker(struct work_struct *work); | 58 | static void cryptd_queue_worker(struct work_struct *work); |
59 | 59 | ||
60 | static int cryptd_init_queue(struct cryptd_queue *queue, | 60 | static int cryptd_init_queue(struct cryptd_queue *queue, |
61 | unsigned int max_cpu_qlen) | 61 | unsigned int max_cpu_qlen) |
62 | { | 62 | { |
63 | int cpu; | 63 | int cpu; |
64 | struct cryptd_cpu_queue *cpu_queue; | 64 | struct cryptd_cpu_queue *cpu_queue; |
65 | 65 | ||
66 | queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); | 66 | queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); |
67 | if (!queue->cpu_queue) | 67 | if (!queue->cpu_queue) |
68 | return -ENOMEM; | 68 | return -ENOMEM; |
69 | for_each_possible_cpu(cpu) { | 69 | for_each_possible_cpu(cpu) { |
70 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | 70 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); |
71 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); | 71 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); |
72 | INIT_WORK(&cpu_queue->work, cryptd_queue_worker); | 72 | INIT_WORK(&cpu_queue->work, cryptd_queue_worker); |
73 | } | 73 | } |
74 | return 0; | 74 | return 0; |
75 | } | 75 | } |
76 | 76 | ||
77 | static void cryptd_fini_queue(struct cryptd_queue *queue) | 77 | static void cryptd_fini_queue(struct cryptd_queue *queue) |
78 | { | 78 | { |
79 | int cpu; | 79 | int cpu; |
80 | struct cryptd_cpu_queue *cpu_queue; | 80 | struct cryptd_cpu_queue *cpu_queue; |
81 | 81 | ||
82 | for_each_possible_cpu(cpu) { | 82 | for_each_possible_cpu(cpu) { |
83 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | 83 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); |
84 | BUG_ON(cpu_queue->queue.qlen); | 84 | BUG_ON(cpu_queue->queue.qlen); |
85 | } | 85 | } |
86 | free_percpu(queue->cpu_queue); | 86 | free_percpu(queue->cpu_queue); |
87 | } | 87 | } |
88 | 88 | ||
89 | static int cryptd_enqueue_request(struct cryptd_queue *queue, | 89 | static int cryptd_enqueue_request(struct cryptd_queue *queue, |
90 | struct crypto_async_request *request) | 90 | struct crypto_async_request *request) |
91 | { | 91 | { |
92 | int cpu, err; | 92 | int cpu, err; |
93 | struct cryptd_cpu_queue *cpu_queue; | 93 | struct cryptd_cpu_queue *cpu_queue; |
94 | 94 | ||
95 | cpu = get_cpu(); | 95 | cpu = get_cpu(); |
96 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | 96 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); |
97 | err = crypto_enqueue_request(&cpu_queue->queue, request); | 97 | err = crypto_enqueue_request(&cpu_queue->queue, request); |
98 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); | 98 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); |
99 | put_cpu(); | 99 | put_cpu(); |
100 | 100 | ||
101 | return err; | 101 | return err; |
102 | } | 102 | } |
103 | 103 | ||
104 | /* Called in workqueue context, do one real cryption work (via | 104 | /* Called in workqueue context, do one real cryption work (via |
105 | * req->complete) and reschedule itself if there are more work to | 105 | * req->complete) and reschedule itself if there are more work to |
106 | * do. */ | 106 | * do. */ |
107 | static void cryptd_queue_worker(struct work_struct *work) | 107 | static void cryptd_queue_worker(struct work_struct *work) |
108 | { | 108 | { |
109 | struct cryptd_cpu_queue *cpu_queue; | 109 | struct cryptd_cpu_queue *cpu_queue; |
110 | struct crypto_async_request *req, *backlog; | 110 | struct crypto_async_request *req, *backlog; |
111 | 111 | ||
112 | cpu_queue = container_of(work, struct cryptd_cpu_queue, work); | 112 | cpu_queue = container_of(work, struct cryptd_cpu_queue, work); |
113 | /* Only handle one request at a time to avoid hogging crypto | 113 | /* Only handle one request at a time to avoid hogging crypto |
114 | * workqueue. preempt_disable/enable is used to prevent | 114 | * workqueue. preempt_disable/enable is used to prevent |
115 | * being preempted by cryptd_enqueue_request() */ | 115 | * being preempted by cryptd_enqueue_request() */ |
116 | preempt_disable(); | 116 | preempt_disable(); |
117 | backlog = crypto_get_backlog(&cpu_queue->queue); | 117 | backlog = crypto_get_backlog(&cpu_queue->queue); |
118 | req = crypto_dequeue_request(&cpu_queue->queue); | 118 | req = crypto_dequeue_request(&cpu_queue->queue); |
119 | preempt_enable(); | 119 | preempt_enable(); |
120 | 120 | ||
121 | if (!req) | 121 | if (!req) |
122 | return; | 122 | return; |
123 | 123 | ||
124 | if (backlog) | 124 | if (backlog) |
125 | backlog->complete(backlog, -EINPROGRESS); | 125 | backlog->complete(backlog, -EINPROGRESS); |
126 | req->complete(req, 0); | 126 | req->complete(req, 0); |
127 | 127 | ||
128 | if (cpu_queue->queue.qlen) | 128 | if (cpu_queue->queue.qlen) |
129 | queue_work(kcrypto_wq, &cpu_queue->work); | 129 | queue_work(kcrypto_wq, &cpu_queue->work); |
130 | } | 130 | } |
131 | 131 | ||
132 | static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) | 132 | static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) |
133 | { | 133 | { |
134 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 134 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
135 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | 135 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); |
136 | return ictx->queue; | 136 | return ictx->queue; |
137 | } | 137 | } |
138 | 138 | ||
139 | static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, | 139 | static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, |
140 | const u8 *key, unsigned int keylen) | 140 | const u8 *key, unsigned int keylen) |
141 | { | 141 | { |
142 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); | 142 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); |
143 | struct crypto_blkcipher *child = ctx->child; | 143 | struct crypto_blkcipher *child = ctx->child; |
144 | int err; | 144 | int err; |
145 | 145 | ||
146 | crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 146 | crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
147 | crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & | 147 | crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & |
148 | CRYPTO_TFM_REQ_MASK); | 148 | CRYPTO_TFM_REQ_MASK); |
149 | err = crypto_blkcipher_setkey(child, key, keylen); | 149 | err = crypto_blkcipher_setkey(child, key, keylen); |
150 | crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) & | 150 | crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) & |
151 | CRYPTO_TFM_RES_MASK); | 151 | CRYPTO_TFM_RES_MASK); |
152 | return err; | 152 | return err; |
153 | } | 153 | } |
154 | 154 | ||
155 | static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, | 155 | static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, |
156 | struct crypto_blkcipher *child, | 156 | struct crypto_blkcipher *child, |
157 | int err, | 157 | int err, |
158 | int (*crypt)(struct blkcipher_desc *desc, | 158 | int (*crypt)(struct blkcipher_desc *desc, |
159 | struct scatterlist *dst, | 159 | struct scatterlist *dst, |
160 | struct scatterlist *src, | 160 | struct scatterlist *src, |
161 | unsigned int len)) | 161 | unsigned int len)) |
162 | { | 162 | { |
163 | struct cryptd_blkcipher_request_ctx *rctx; | 163 | struct cryptd_blkcipher_request_ctx *rctx; |
164 | struct blkcipher_desc desc; | 164 | struct blkcipher_desc desc; |
165 | 165 | ||
166 | rctx = ablkcipher_request_ctx(req); | 166 | rctx = ablkcipher_request_ctx(req); |
167 | 167 | ||
168 | if (unlikely(err == -EINPROGRESS)) | 168 | if (unlikely(err == -EINPROGRESS)) |
169 | goto out; | 169 | goto out; |
170 | 170 | ||
171 | desc.tfm = child; | 171 | desc.tfm = child; |
172 | desc.info = req->info; | 172 | desc.info = req->info; |
173 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 173 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
174 | 174 | ||
175 | err = crypt(&desc, req->dst, req->src, req->nbytes); | 175 | err = crypt(&desc, req->dst, req->src, req->nbytes); |
176 | 176 | ||
177 | req->base.complete = rctx->complete; | 177 | req->base.complete = rctx->complete; |
178 | 178 | ||
179 | out: | 179 | out: |
180 | local_bh_disable(); | 180 | local_bh_disable(); |
181 | rctx->complete(&req->base, err); | 181 | rctx->complete(&req->base, err); |
182 | local_bh_enable(); | 182 | local_bh_enable(); |
183 | } | 183 | } |
184 | 184 | ||
185 | static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) | 185 | static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) |
186 | { | 186 | { |
187 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); | 187 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); |
188 | struct crypto_blkcipher *child = ctx->child; | 188 | struct crypto_blkcipher *child = ctx->child; |
189 | 189 | ||
190 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, | 190 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, |
191 | crypto_blkcipher_crt(child)->encrypt); | 191 | crypto_blkcipher_crt(child)->encrypt); |
192 | } | 192 | } |
193 | 193 | ||
194 | static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) | 194 | static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) |
195 | { | 195 | { |
196 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); | 196 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); |
197 | struct crypto_blkcipher *child = ctx->child; | 197 | struct crypto_blkcipher *child = ctx->child; |
198 | 198 | ||
199 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, | 199 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, |
200 | crypto_blkcipher_crt(child)->decrypt); | 200 | crypto_blkcipher_crt(child)->decrypt); |
201 | } | 201 | } |
202 | 202 | ||
203 | static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, | 203 | static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, |
204 | crypto_completion_t complete) | 204 | crypto_completion_t complete) |
205 | { | 205 | { |
206 | struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); | 206 | struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); |
207 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 207 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
208 | struct cryptd_queue *queue; | 208 | struct cryptd_queue *queue; |
209 | 209 | ||
210 | queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm)); | 210 | queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm)); |
211 | rctx->complete = req->base.complete; | 211 | rctx->complete = req->base.complete; |
212 | req->base.complete = complete; | 212 | req->base.complete = complete; |
213 | 213 | ||
214 | return cryptd_enqueue_request(queue, &req->base); | 214 | return cryptd_enqueue_request(queue, &req->base); |
215 | } | 215 | } |
216 | 216 | ||
217 | static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) | 217 | static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) |
218 | { | 218 | { |
219 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); | 219 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); |
220 | } | 220 | } |
221 | 221 | ||
222 | static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) | 222 | static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) |
223 | { | 223 | { |
224 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt); | 224 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt); |
225 | } | 225 | } |
226 | 226 | ||
227 | static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) | 227 | static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) |
228 | { | 228 | { |
229 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 229 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
230 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | 230 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); |
231 | struct crypto_spawn *spawn = &ictx->spawn; | 231 | struct crypto_spawn *spawn = &ictx->spawn; |
232 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | 232 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); |
233 | struct crypto_blkcipher *cipher; | 233 | struct crypto_blkcipher *cipher; |
234 | 234 | ||
235 | cipher = crypto_spawn_blkcipher(spawn); | 235 | cipher = crypto_spawn_blkcipher(spawn); |
236 | if (IS_ERR(cipher)) | 236 | if (IS_ERR(cipher)) |
237 | return PTR_ERR(cipher); | 237 | return PTR_ERR(cipher); |
238 | 238 | ||
239 | ctx->child = cipher; | 239 | ctx->child = cipher; |
240 | tfm->crt_ablkcipher.reqsize = | 240 | tfm->crt_ablkcipher.reqsize = |
241 | sizeof(struct cryptd_blkcipher_request_ctx); | 241 | sizeof(struct cryptd_blkcipher_request_ctx); |
242 | return 0; | 242 | return 0; |
243 | } | 243 | } |
244 | 244 | ||
245 | static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) | 245 | static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) |
246 | { | 246 | { |
247 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | 247 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); |
248 | 248 | ||
249 | crypto_free_blkcipher(ctx->child); | 249 | crypto_free_blkcipher(ctx->child); |
250 | } | 250 | } |
251 | 251 | ||
252 | static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, | 252 | static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, |
253 | struct cryptd_queue *queue) | 253 | struct cryptd_queue *queue) |
254 | { | 254 | { |
255 | struct crypto_instance *inst; | 255 | struct crypto_instance *inst; |
256 | struct cryptd_instance_ctx *ctx; | 256 | struct cryptd_instance_ctx *ctx; |
257 | int err; | 257 | int err; |
258 | 258 | ||
259 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | 259 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); |
260 | if (!inst) { | 260 | if (!inst) { |
261 | inst = ERR_PTR(-ENOMEM); | 261 | inst = ERR_PTR(-ENOMEM); |
262 | goto out; | 262 | goto out; |
263 | } | 263 | } |
264 | 264 | ||
265 | err = -ENAMETOOLONG; | 265 | err = -ENAMETOOLONG; |
266 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 266 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
267 | "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | 267 | "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
268 | goto out_free_inst; | 268 | goto out_free_inst; |
269 | 269 | ||
270 | ctx = crypto_instance_ctx(inst); | 270 | ctx = crypto_instance_ctx(inst); |
271 | err = crypto_init_spawn(&ctx->spawn, alg, inst, | 271 | err = crypto_init_spawn(&ctx->spawn, alg, inst, |
272 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); | 272 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); |
273 | if (err) | 273 | if (err) |
274 | goto out_free_inst; | 274 | goto out_free_inst; |
275 | 275 | ||
276 | ctx->queue = queue; | 276 | ctx->queue = queue; |
277 | 277 | ||
278 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | 278 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); |
279 | 279 | ||
280 | inst->alg.cra_priority = alg->cra_priority + 50; | 280 | inst->alg.cra_priority = alg->cra_priority + 50; |
281 | inst->alg.cra_blocksize = alg->cra_blocksize; | 281 | inst->alg.cra_blocksize = alg->cra_blocksize; |
282 | inst->alg.cra_alignmask = alg->cra_alignmask; | 282 | inst->alg.cra_alignmask = alg->cra_alignmask; |
283 | 283 | ||
284 | out: | 284 | out: |
285 | return inst; | 285 | return inst; |
286 | 286 | ||
287 | out_free_inst: | 287 | out_free_inst: |
288 | kfree(inst); | 288 | kfree(inst); |
289 | inst = ERR_PTR(err); | 289 | inst = ERR_PTR(err); |
290 | goto out; | 290 | goto out; |
291 | } | 291 | } |
292 | 292 | ||
293 | static struct crypto_instance *cryptd_alloc_blkcipher( | 293 | static struct crypto_instance *cryptd_alloc_blkcipher( |
294 | struct rtattr **tb, struct cryptd_queue *queue) | 294 | struct rtattr **tb, struct cryptd_queue *queue) |
295 | { | 295 | { |
296 | struct crypto_instance *inst; | 296 | struct crypto_instance *inst; |
297 | struct crypto_alg *alg; | 297 | struct crypto_alg *alg; |
298 | 298 | ||
299 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, | 299 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, |
300 | CRYPTO_ALG_TYPE_MASK); | 300 | CRYPTO_ALG_TYPE_MASK); |
301 | if (IS_ERR(alg)) | 301 | if (IS_ERR(alg)) |
302 | return ERR_CAST(alg); | 302 | return ERR_CAST(alg); |
303 | 303 | ||
304 | inst = cryptd_alloc_instance(alg, queue); | 304 | inst = cryptd_alloc_instance(alg, queue); |
305 | if (IS_ERR(inst)) | 305 | if (IS_ERR(inst)) |
306 | goto out_put_alg; | 306 | goto out_put_alg; |
307 | 307 | ||
308 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; | 308 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; |
309 | inst->alg.cra_type = &crypto_ablkcipher_type; | 309 | inst->alg.cra_type = &crypto_ablkcipher_type; |
310 | 310 | ||
311 | inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; | 311 | inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; |
312 | inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; | 312 | inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; |
313 | inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; | 313 | inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; |
314 | 314 | ||
315 | inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv; | 315 | inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv; |
316 | 316 | ||
317 | inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); | 317 | inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); |
318 | 318 | ||
319 | inst->alg.cra_init = cryptd_blkcipher_init_tfm; | 319 | inst->alg.cra_init = cryptd_blkcipher_init_tfm; |
320 | inst->alg.cra_exit = cryptd_blkcipher_exit_tfm; | 320 | inst->alg.cra_exit = cryptd_blkcipher_exit_tfm; |
321 | 321 | ||
322 | inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey; | 322 | inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey; |
323 | inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; | 323 | inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; |
324 | inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; | 324 | inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; |
325 | 325 | ||
326 | out_put_alg: | 326 | out_put_alg: |
327 | crypto_mod_put(alg); | 327 | crypto_mod_put(alg); |
328 | return inst; | 328 | return inst; |
329 | } | 329 | } |
330 | 330 | ||
331 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) | 331 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) |
332 | { | 332 | { |
333 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 333 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
334 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | 334 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); |
335 | struct crypto_spawn *spawn = &ictx->spawn; | 335 | struct crypto_spawn *spawn = &ictx->spawn; |
336 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | 336 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
337 | struct crypto_hash *cipher; | 337 | struct crypto_hash *cipher; |
338 | 338 | ||
339 | cipher = crypto_spawn_hash(spawn); | 339 | cipher = crypto_spawn_hash(spawn); |
340 | if (IS_ERR(cipher)) | 340 | if (IS_ERR(cipher)) |
341 | return PTR_ERR(cipher); | 341 | return PTR_ERR(cipher); |
342 | 342 | ||
343 | ctx->child = cipher; | 343 | ctx->child = cipher; |
344 | tfm->crt_ahash.reqsize = | 344 | tfm->crt_ahash.reqsize = |
345 | sizeof(struct cryptd_hash_request_ctx); | 345 | sizeof(struct cryptd_hash_request_ctx); |
346 | return 0; | 346 | return 0; |
347 | } | 347 | } |
348 | 348 | ||
349 | static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) | 349 | static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) |
350 | { | 350 | { |
351 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | 351 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
352 | 352 | ||
353 | crypto_free_hash(ctx->child); | 353 | crypto_free_hash(ctx->child); |
354 | } | 354 | } |
355 | 355 | ||
356 | static int cryptd_hash_setkey(struct crypto_ahash *parent, | 356 | static int cryptd_hash_setkey(struct crypto_ahash *parent, |
357 | const u8 *key, unsigned int keylen) | 357 | const u8 *key, unsigned int keylen) |
358 | { | 358 | { |
359 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); | 359 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); |
360 | struct crypto_hash *child = ctx->child; | 360 | struct crypto_hash *child = ctx->child; |
361 | int err; | 361 | int err; |
362 | 362 | ||
363 | crypto_hash_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 363 | crypto_hash_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
364 | crypto_hash_set_flags(child, crypto_ahash_get_flags(parent) & | 364 | crypto_hash_set_flags(child, crypto_ahash_get_flags(parent) & |
365 | CRYPTO_TFM_REQ_MASK); | 365 | CRYPTO_TFM_REQ_MASK); |
366 | err = crypto_hash_setkey(child, key, keylen); | 366 | err = crypto_hash_setkey(child, key, keylen); |
367 | crypto_ahash_set_flags(parent, crypto_hash_get_flags(child) & | 367 | crypto_ahash_set_flags(parent, crypto_hash_get_flags(child) & |
368 | CRYPTO_TFM_RES_MASK); | 368 | CRYPTO_TFM_RES_MASK); |
369 | return err; | 369 | return err; |
370 | } | 370 | } |
371 | 371 | ||
372 | static int cryptd_hash_enqueue(struct ahash_request *req, | 372 | static int cryptd_hash_enqueue(struct ahash_request *req, |
373 | crypto_completion_t complete) | 373 | crypto_completion_t complete) |
374 | { | 374 | { |
375 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 375 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
376 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 376 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
377 | struct cryptd_queue *queue = | 377 | struct cryptd_queue *queue = |
378 | cryptd_get_queue(crypto_ahash_tfm(tfm)); | 378 | cryptd_get_queue(crypto_ahash_tfm(tfm)); |
379 | 379 | ||
380 | rctx->complete = req->base.complete; | 380 | rctx->complete = req->base.complete; |
381 | req->base.complete = complete; | 381 | req->base.complete = complete; |
382 | 382 | ||
383 | return cryptd_enqueue_request(queue, &req->base); | 383 | return cryptd_enqueue_request(queue, &req->base); |
384 | } | 384 | } |
385 | 385 | ||
386 | static void cryptd_hash_init(struct crypto_async_request *req_async, int err) | 386 | static void cryptd_hash_init(struct crypto_async_request *req_async, int err) |
387 | { | 387 | { |
388 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | 388 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); |
389 | struct crypto_hash *child = ctx->child; | 389 | struct crypto_hash *child = ctx->child; |
390 | struct ahash_request *req = ahash_request_cast(req_async); | 390 | struct ahash_request *req = ahash_request_cast(req_async); |
391 | struct cryptd_hash_request_ctx *rctx; | 391 | struct cryptd_hash_request_ctx *rctx; |
392 | struct hash_desc desc; | 392 | struct hash_desc desc; |
393 | 393 | ||
394 | rctx = ahash_request_ctx(req); | 394 | rctx = ahash_request_ctx(req); |
395 | 395 | ||
396 | if (unlikely(err == -EINPROGRESS)) | 396 | if (unlikely(err == -EINPROGRESS)) |
397 | goto out; | 397 | goto out; |
398 | 398 | ||
399 | desc.tfm = child; | 399 | desc.tfm = child; |
400 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 400 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
401 | 401 | ||
402 | err = crypto_hash_crt(child)->init(&desc); | 402 | err = crypto_hash_crt(child)->init(&desc); |
403 | 403 | ||
404 | req->base.complete = rctx->complete; | 404 | req->base.complete = rctx->complete; |
405 | 405 | ||
406 | out: | 406 | out: |
407 | local_bh_disable(); | 407 | local_bh_disable(); |
408 | rctx->complete(&req->base, err); | 408 | rctx->complete(&req->base, err); |
409 | local_bh_enable(); | 409 | local_bh_enable(); |
410 | } | 410 | } |
411 | 411 | ||
412 | static int cryptd_hash_init_enqueue(struct ahash_request *req) | 412 | static int cryptd_hash_init_enqueue(struct ahash_request *req) |
413 | { | 413 | { |
414 | return cryptd_hash_enqueue(req, cryptd_hash_init); | 414 | return cryptd_hash_enqueue(req, cryptd_hash_init); |
415 | } | 415 | } |
416 | 416 | ||
417 | static void cryptd_hash_update(struct crypto_async_request *req_async, int err) | 417 | static void cryptd_hash_update(struct crypto_async_request *req_async, int err) |
418 | { | 418 | { |
419 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | 419 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); |
420 | struct crypto_hash *child = ctx->child; | 420 | struct crypto_hash *child = ctx->child; |
421 | struct ahash_request *req = ahash_request_cast(req_async); | 421 | struct ahash_request *req = ahash_request_cast(req_async); |
422 | struct cryptd_hash_request_ctx *rctx; | 422 | struct cryptd_hash_request_ctx *rctx; |
423 | struct hash_desc desc; | 423 | struct hash_desc desc; |
424 | 424 | ||
425 | rctx = ahash_request_ctx(req); | 425 | rctx = ahash_request_ctx(req); |
426 | 426 | ||
427 | if (unlikely(err == -EINPROGRESS)) | 427 | if (unlikely(err == -EINPROGRESS)) |
428 | goto out; | 428 | goto out; |
429 | 429 | ||
430 | desc.tfm = child; | 430 | desc.tfm = child; |
431 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 431 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
432 | 432 | ||
433 | err = crypto_hash_crt(child)->update(&desc, | 433 | err = crypto_hash_crt(child)->update(&desc, |
434 | req->src, | 434 | req->src, |
435 | req->nbytes); | 435 | req->nbytes); |
436 | 436 | ||
437 | req->base.complete = rctx->complete; | 437 | req->base.complete = rctx->complete; |
438 | 438 | ||
439 | out: | 439 | out: |
440 | local_bh_disable(); | 440 | local_bh_disable(); |
441 | rctx->complete(&req->base, err); | 441 | rctx->complete(&req->base, err); |
442 | local_bh_enable(); | 442 | local_bh_enable(); |
443 | } | 443 | } |
444 | 444 | ||
445 | static int cryptd_hash_update_enqueue(struct ahash_request *req) | 445 | static int cryptd_hash_update_enqueue(struct ahash_request *req) |
446 | { | 446 | { |
447 | return cryptd_hash_enqueue(req, cryptd_hash_update); | 447 | return cryptd_hash_enqueue(req, cryptd_hash_update); |
448 | } | 448 | } |
449 | 449 | ||
450 | static void cryptd_hash_final(struct crypto_async_request *req_async, int err) | 450 | static void cryptd_hash_final(struct crypto_async_request *req_async, int err) |
451 | { | 451 | { |
452 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | 452 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); |
453 | struct crypto_hash *child = ctx->child; | 453 | struct crypto_hash *child = ctx->child; |
454 | struct ahash_request *req = ahash_request_cast(req_async); | 454 | struct ahash_request *req = ahash_request_cast(req_async); |
455 | struct cryptd_hash_request_ctx *rctx; | 455 | struct cryptd_hash_request_ctx *rctx; |
456 | struct hash_desc desc; | 456 | struct hash_desc desc; |
457 | 457 | ||
458 | rctx = ahash_request_ctx(req); | 458 | rctx = ahash_request_ctx(req); |
459 | 459 | ||
460 | if (unlikely(err == -EINPROGRESS)) | 460 | if (unlikely(err == -EINPROGRESS)) |
461 | goto out; | 461 | goto out; |
462 | 462 | ||
463 | desc.tfm = child; | 463 | desc.tfm = child; |
464 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 464 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
465 | 465 | ||
466 | err = crypto_hash_crt(child)->final(&desc, req->result); | 466 | err = crypto_hash_crt(child)->final(&desc, req->result); |
467 | 467 | ||
468 | req->base.complete = rctx->complete; | 468 | req->base.complete = rctx->complete; |
469 | 469 | ||
470 | out: | 470 | out: |
471 | local_bh_disable(); | 471 | local_bh_disable(); |
472 | rctx->complete(&req->base, err); | 472 | rctx->complete(&req->base, err); |
473 | local_bh_enable(); | 473 | local_bh_enable(); |
474 | } | 474 | } |
475 | 475 | ||
476 | static int cryptd_hash_final_enqueue(struct ahash_request *req) | 476 | static int cryptd_hash_final_enqueue(struct ahash_request *req) |
477 | { | 477 | { |
478 | return cryptd_hash_enqueue(req, cryptd_hash_final); | 478 | return cryptd_hash_enqueue(req, cryptd_hash_final); |
479 | } | 479 | } |
480 | 480 | ||
481 | static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) | 481 | static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) |
482 | { | 482 | { |
483 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | 483 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); |
484 | struct crypto_hash *child = ctx->child; | 484 | struct crypto_hash *child = ctx->child; |
485 | struct ahash_request *req = ahash_request_cast(req_async); | 485 | struct ahash_request *req = ahash_request_cast(req_async); |
486 | struct cryptd_hash_request_ctx *rctx; | 486 | struct cryptd_hash_request_ctx *rctx; |
487 | struct hash_desc desc; | 487 | struct hash_desc desc; |
488 | 488 | ||
489 | rctx = ahash_request_ctx(req); | 489 | rctx = ahash_request_ctx(req); |
490 | 490 | ||
491 | if (unlikely(err == -EINPROGRESS)) | 491 | if (unlikely(err == -EINPROGRESS)) |
492 | goto out; | 492 | goto out; |
493 | 493 | ||
494 | desc.tfm = child; | 494 | desc.tfm = child; |
495 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 495 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
496 | 496 | ||
497 | err = crypto_hash_crt(child)->digest(&desc, | 497 | err = crypto_hash_crt(child)->digest(&desc, |
498 | req->src, | 498 | req->src, |
499 | req->nbytes, | 499 | req->nbytes, |
500 | req->result); | 500 | req->result); |
501 | 501 | ||
502 | req->base.complete = rctx->complete; | 502 | req->base.complete = rctx->complete; |
503 | 503 | ||
504 | out: | 504 | out: |
505 | local_bh_disable(); | 505 | local_bh_disable(); |
506 | rctx->complete(&req->base, err); | 506 | rctx->complete(&req->base, err); |
507 | local_bh_enable(); | 507 | local_bh_enable(); |
508 | } | 508 | } |
509 | 509 | ||
510 | static int cryptd_hash_digest_enqueue(struct ahash_request *req) | 510 | static int cryptd_hash_digest_enqueue(struct ahash_request *req) |
511 | { | 511 | { |
512 | return cryptd_hash_enqueue(req, cryptd_hash_digest); | 512 | return cryptd_hash_enqueue(req, cryptd_hash_digest); |
513 | } | 513 | } |
514 | 514 | ||
515 | static struct crypto_instance *cryptd_alloc_hash( | 515 | static struct crypto_instance *cryptd_alloc_hash( |
516 | struct rtattr **tb, struct cryptd_queue *queue) | 516 | struct rtattr **tb, struct cryptd_queue *queue) |
517 | { | 517 | { |
518 | struct crypto_instance *inst; | 518 | struct crypto_instance *inst; |
519 | struct crypto_alg *alg; | 519 | struct crypto_alg *alg; |
520 | 520 | ||
521 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH, | 521 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH, |
522 | CRYPTO_ALG_TYPE_HASH_MASK); | 522 | CRYPTO_ALG_TYPE_HASH_MASK); |
523 | if (IS_ERR(alg)) | 523 | if (IS_ERR(alg)) |
524 | return ERR_PTR(PTR_ERR(alg)); | 524 | return ERR_PTR(PTR_ERR(alg)); |
525 | 525 | ||
526 | inst = cryptd_alloc_instance(alg, queue); | 526 | inst = cryptd_alloc_instance(alg, queue); |
527 | if (IS_ERR(inst)) | 527 | if (IS_ERR(inst)) |
528 | goto out_put_alg; | 528 | goto out_put_alg; |
529 | 529 | ||
530 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC; | 530 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC; |
531 | inst->alg.cra_type = &crypto_ahash_type; | 531 | inst->alg.cra_type = &crypto_ahash_type; |
532 | 532 | ||
533 | inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize; | 533 | inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize; |
534 | inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx); | 534 | inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx); |
535 | 535 | ||
536 | inst->alg.cra_init = cryptd_hash_init_tfm; | 536 | inst->alg.cra_init = cryptd_hash_init_tfm; |
537 | inst->alg.cra_exit = cryptd_hash_exit_tfm; | 537 | inst->alg.cra_exit = cryptd_hash_exit_tfm; |
538 | 538 | ||
539 | inst->alg.cra_ahash.init = cryptd_hash_init_enqueue; | 539 | inst->alg.cra_ahash.init = cryptd_hash_init_enqueue; |
540 | inst->alg.cra_ahash.update = cryptd_hash_update_enqueue; | 540 | inst->alg.cra_ahash.update = cryptd_hash_update_enqueue; |
541 | inst->alg.cra_ahash.final = cryptd_hash_final_enqueue; | 541 | inst->alg.cra_ahash.final = cryptd_hash_final_enqueue; |
542 | inst->alg.cra_ahash.setkey = cryptd_hash_setkey; | 542 | inst->alg.cra_ahash.setkey = cryptd_hash_setkey; |
543 | inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue; | 543 | inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue; |
544 | 544 | ||
545 | out_put_alg: | 545 | out_put_alg: |
546 | crypto_mod_put(alg); | 546 | crypto_mod_put(alg); |
547 | return inst; | 547 | return inst; |
548 | } | 548 | } |
549 | 549 | ||
550 | static struct cryptd_queue queue; | 550 | static struct cryptd_queue queue; |
551 | 551 | ||
552 | static struct crypto_instance *cryptd_alloc(struct rtattr **tb) | 552 | static struct crypto_instance *cryptd_alloc(struct rtattr **tb) |
553 | { | 553 | { |
554 | struct crypto_attr_type *algt; | 554 | struct crypto_attr_type *algt; |
555 | 555 | ||
556 | algt = crypto_get_attr_type(tb); | 556 | algt = crypto_get_attr_type(tb); |
557 | if (IS_ERR(algt)) | 557 | if (IS_ERR(algt)) |
558 | return ERR_CAST(algt); | 558 | return ERR_CAST(algt); |
559 | 559 | ||
560 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | 560 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { |
561 | case CRYPTO_ALG_TYPE_BLKCIPHER: | 561 | case CRYPTO_ALG_TYPE_BLKCIPHER: |
562 | return cryptd_alloc_blkcipher(tb, &queue); | 562 | return cryptd_alloc_blkcipher(tb, &queue); |
563 | case CRYPTO_ALG_TYPE_DIGEST: | 563 | case CRYPTO_ALG_TYPE_DIGEST: |
564 | return cryptd_alloc_hash(tb, &queue); | 564 | return cryptd_alloc_hash(tb, &queue); |
565 | } | 565 | } |
566 | 566 | ||
567 | return ERR_PTR(-EINVAL); | 567 | return ERR_PTR(-EINVAL); |
568 | } | 568 | } |
569 | 569 | ||
570 | static void cryptd_free(struct crypto_instance *inst) | 570 | static void cryptd_free(struct crypto_instance *inst) |
571 | { | 571 | { |
572 | struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); | 572 | struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); |
573 | 573 | ||
574 | crypto_drop_spawn(&ctx->spawn); | 574 | crypto_drop_spawn(&ctx->spawn); |
575 | kfree(inst); | 575 | kfree(inst); |
576 | } | 576 | } |
577 | 577 | ||
578 | static struct crypto_template cryptd_tmpl = { | 578 | static struct crypto_template cryptd_tmpl = { |
579 | .name = "cryptd", | 579 | .name = "cryptd", |
580 | .alloc = cryptd_alloc, | 580 | .alloc = cryptd_alloc, |
581 | .free = cryptd_free, | 581 | .free = cryptd_free, |
582 | .module = THIS_MODULE, | 582 | .module = THIS_MODULE, |
583 | }; | 583 | }; |
584 | 584 | ||
585 | struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, | 585 | struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, |
586 | u32 type, u32 mask) | 586 | u32 type, u32 mask) |
587 | { | 587 | { |
588 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | 588 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; |
589 | struct crypto_ablkcipher *tfm; | 589 | struct crypto_tfm *tfm; |
590 | 590 | ||
591 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | 591 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, |
592 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | 592 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) |
593 | return ERR_PTR(-EINVAL); | 593 | return ERR_PTR(-EINVAL); |
594 | tfm = crypto_alloc_ablkcipher(cryptd_alg_name, type, mask); | 594 | type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); |
595 | type |= CRYPTO_ALG_TYPE_BLKCIPHER; | ||
596 | mask &= ~CRYPTO_ALG_TYPE_MASK; | ||
597 | mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK); | ||
598 | tfm = crypto_alloc_base(cryptd_alg_name, type, mask); | ||
595 | if (IS_ERR(tfm)) | 599 | if (IS_ERR(tfm)) |
596 | return ERR_CAST(tfm); | 600 | return ERR_CAST(tfm); |
597 | if (crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_module != THIS_MODULE) { | 601 | if (tfm->__crt_alg->cra_module != THIS_MODULE) { |
598 | crypto_free_ablkcipher(tfm); | 602 | crypto_free_tfm(tfm); |
599 | return ERR_PTR(-EINVAL); | 603 | return ERR_PTR(-EINVAL); |
600 | } | 604 | } |
601 | 605 | ||
602 | return __cryptd_ablkcipher_cast(tfm); | 606 | return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm)); |
603 | } | 607 | } |
604 | EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); | 608 | EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); |
605 | 609 | ||
606 | struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm) | 610 | struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm) |
607 | { | 611 | { |
608 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); | 612 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); |
609 | return ctx->child; | 613 | return ctx->child; |
610 | } | 614 | } |
611 | EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); | 615 | EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); |
612 | 616 | ||
613 | void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) | 617 | void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) |
614 | { | 618 | { |
615 | crypto_free_ablkcipher(&tfm->base); | 619 | crypto_free_ablkcipher(&tfm->base); |
616 | } | 620 | } |
617 | EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); | 621 | EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); |
618 | 622 | ||
619 | static int __init cryptd_init(void) | 623 | static int __init cryptd_init(void) |
620 | { | 624 | { |
621 | int err; | 625 | int err; |
622 | 626 | ||
623 | err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN); | 627 | err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN); |
624 | if (err) | 628 | if (err) |
625 | return err; | 629 | return err; |
626 | 630 | ||
627 | err = crypto_register_template(&cryptd_tmpl); | 631 | err = crypto_register_template(&cryptd_tmpl); |
628 | if (err) | 632 | if (err) |
629 | cryptd_fini_queue(&queue); | 633 | cryptd_fini_queue(&queue); |
630 | 634 | ||
631 | return err; | 635 | return err; |
632 | } | 636 | } |
633 | 637 | ||
634 | static void __exit cryptd_exit(void) | 638 | static void __exit cryptd_exit(void) |
635 | { | 639 | { |
636 | cryptd_fini_queue(&queue); | 640 | cryptd_fini_queue(&queue); |
637 | crypto_unregister_template(&cryptd_tmpl); | 641 | crypto_unregister_template(&cryptd_tmpl); |
638 | } | 642 | } |
639 | 643 | ||
640 | module_init(cryptd_init); | 644 | module_init(cryptd_init); |
641 | module_exit(cryptd_exit); | 645 | module_exit(cryptd_exit); |
642 | 646 | ||
643 | MODULE_LICENSE("GPL"); | 647 | MODULE_LICENSE("GPL"); |
644 | MODULE_DESCRIPTION("Software async crypto daemon"); | 648 | MODULE_DESCRIPTION("Software async crypto daemon"); |
645 | 649 |