Commit cc74f4bc111e9554bcd6445ad0fe1d90e5d2eb34

Authored by Steffen Klassert
Committed by Herbert Xu
1 parent 7424713b83

crypto: pcrypt - Dont calulate a callback cpu on empty callback cpumask

If the callback cpumask is empty, we crash with a division by zero
when we try to calculate a callback cpu. So we don't update the callback
cpu in pcrypt_do_parallel if the callback cpumask is empty.

Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

Showing 1 changed file with 3 additions and 0 deletions Inline Diff

1 /* 1 /*
2 * pcrypt - Parallel crypto wrapper. 2 * pcrypt - Parallel crypto wrapper.
3 * 3 *
4 * Copyright (C) 2009 secunet Security Networks AG 4 * Copyright (C) 2009 secunet Security Networks AG
5 * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com> 5 * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License, 8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation. 9 * version 2, as published by the Free Software Foundation.
10 * 10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT 11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details. 14 * more details.
15 * 15 *
16 * You should have received a copy of the GNU General Public License along with 16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 */ 19 */
20 20
21 #include <crypto/algapi.h> 21 #include <crypto/algapi.h>
22 #include <crypto/internal/aead.h> 22 #include <crypto/internal/aead.h>
23 #include <linux/err.h> 23 #include <linux/err.h>
24 #include <linux/init.h> 24 #include <linux/init.h>
25 #include <linux/module.h> 25 #include <linux/module.h>
26 #include <linux/slab.h> 26 #include <linux/slab.h>
27 #include <linux/notifier.h> 27 #include <linux/notifier.h>
28 #include <linux/kobject.h> 28 #include <linux/kobject.h>
29 #include <crypto/pcrypt.h> 29 #include <crypto/pcrypt.h>
30 30
31 struct pcrypt_instance { 31 struct pcrypt_instance {
32 const char *name; 32 const char *name;
33 struct padata_instance *pinst; 33 struct padata_instance *pinst;
34 struct workqueue_struct *wq; 34 struct workqueue_struct *wq;
35 35
36 /* 36 /*
37 * Cpumask for callback CPUs. It should be 37 * Cpumask for callback CPUs. It should be
38 * equal to serial cpumask of corresponding padata instance, 38 * equal to serial cpumask of corresponding padata instance,
39 * so it is updated when padata notifies us about serial 39 * so it is updated when padata notifies us about serial
40 * cpumask change. 40 * cpumask change.
41 * 41 *
42 * cb_cpumask is protected by RCU. This fact prevents us from 42 * cb_cpumask is protected by RCU. This fact prevents us from
43 * using cpumask_var_t directly because the actual type of 43 * using cpumask_var_t directly because the actual type of
44 * cpumsak_var_t depends on kernel configuration(particularly on 44 * cpumsak_var_t depends on kernel configuration(particularly on
45 * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration 45 * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration
46 * cpumask_var_t may be either a pointer to the struct cpumask 46 * cpumask_var_t may be either a pointer to the struct cpumask
47 * or a variable allocated on the stack. Thus we can not safely use 47 * or a variable allocated on the stack. Thus we can not safely use
48 * cpumask_var_t with RCU operations such as rcu_assign_pointer or 48 * cpumask_var_t with RCU operations such as rcu_assign_pointer or
49 * rcu_dereference. So cpumask_var_t is wrapped with struct 49 * rcu_dereference. So cpumask_var_t is wrapped with struct
50 * pcrypt_cpumask which makes possible to use it with RCU. 50 * pcrypt_cpumask which makes possible to use it with RCU.
51 */ 51 */
52 struct pcrypt_cpumask { 52 struct pcrypt_cpumask {
53 cpumask_var_t mask; 53 cpumask_var_t mask;
54 } *cb_cpumask; 54 } *cb_cpumask;
55 struct notifier_block nblock; 55 struct notifier_block nblock;
56 }; 56 };
57 57
58 static struct pcrypt_instance pencrypt; 58 static struct pcrypt_instance pencrypt;
59 static struct pcrypt_instance pdecrypt; 59 static struct pcrypt_instance pdecrypt;
60 static struct kset *pcrypt_kset; 60 static struct kset *pcrypt_kset;
61 61
62 struct pcrypt_instance_ctx { 62 struct pcrypt_instance_ctx {
63 struct crypto_spawn spawn; 63 struct crypto_spawn spawn;
64 unsigned int tfm_count; 64 unsigned int tfm_count;
65 }; 65 };
66 66
67 struct pcrypt_aead_ctx { 67 struct pcrypt_aead_ctx {
68 struct crypto_aead *child; 68 struct crypto_aead *child;
69 unsigned int cb_cpu; 69 unsigned int cb_cpu;
70 }; 70 };
71 71
72 static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu, 72 static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu,
73 struct pcrypt_instance *pcrypt) 73 struct pcrypt_instance *pcrypt)
74 { 74 {
75 unsigned int cpu_index, cpu, i; 75 unsigned int cpu_index, cpu, i;
76 struct pcrypt_cpumask *cpumask; 76 struct pcrypt_cpumask *cpumask;
77 77
78 cpu = *cb_cpu; 78 cpu = *cb_cpu;
79 79
80 rcu_read_lock_bh(); 80 rcu_read_lock_bh();
81 cpumask = rcu_dereference(pcrypt->cb_cpumask); 81 cpumask = rcu_dereference(pcrypt->cb_cpumask);
82 if (cpumask_test_cpu(cpu, cpumask->mask)) 82 if (cpumask_test_cpu(cpu, cpumask->mask))
83 goto out; 83 goto out;
84 84
85 if (!cpumask_weight(cpumask->mask))
86 goto out;
87
85 cpu_index = cpu % cpumask_weight(cpumask->mask); 88 cpu_index = cpu % cpumask_weight(cpumask->mask);
86 89
87 cpu = cpumask_first(cpumask->mask); 90 cpu = cpumask_first(cpumask->mask);
88 for (i = 0; i < cpu_index; i++) 91 for (i = 0; i < cpu_index; i++)
89 cpu = cpumask_next(cpu, cpumask->mask); 92 cpu = cpumask_next(cpu, cpumask->mask);
90 93
91 *cb_cpu = cpu; 94 *cb_cpu = cpu;
92 95
93 out: 96 out:
94 rcu_read_unlock_bh(); 97 rcu_read_unlock_bh();
95 return padata_do_parallel(pcrypt->pinst, padata, cpu); 98 return padata_do_parallel(pcrypt->pinst, padata, cpu);
96 } 99 }
97 100
98 static int pcrypt_aead_setkey(struct crypto_aead *parent, 101 static int pcrypt_aead_setkey(struct crypto_aead *parent,
99 const u8 *key, unsigned int keylen) 102 const u8 *key, unsigned int keylen)
100 { 103 {
101 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); 104 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
102 105
103 return crypto_aead_setkey(ctx->child, key, keylen); 106 return crypto_aead_setkey(ctx->child, key, keylen);
104 } 107 }
105 108
106 static int pcrypt_aead_setauthsize(struct crypto_aead *parent, 109 static int pcrypt_aead_setauthsize(struct crypto_aead *parent,
107 unsigned int authsize) 110 unsigned int authsize)
108 { 111 {
109 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); 112 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
110 113
111 return crypto_aead_setauthsize(ctx->child, authsize); 114 return crypto_aead_setauthsize(ctx->child, authsize);
112 } 115 }
113 116
114 static void pcrypt_aead_serial(struct padata_priv *padata) 117 static void pcrypt_aead_serial(struct padata_priv *padata)
115 { 118 {
116 struct pcrypt_request *preq = pcrypt_padata_request(padata); 119 struct pcrypt_request *preq = pcrypt_padata_request(padata);
117 struct aead_request *req = pcrypt_request_ctx(preq); 120 struct aead_request *req = pcrypt_request_ctx(preq);
118 121
119 aead_request_complete(req->base.data, padata->info); 122 aead_request_complete(req->base.data, padata->info);
120 } 123 }
121 124
122 static void pcrypt_aead_giv_serial(struct padata_priv *padata) 125 static void pcrypt_aead_giv_serial(struct padata_priv *padata)
123 { 126 {
124 struct pcrypt_request *preq = pcrypt_padata_request(padata); 127 struct pcrypt_request *preq = pcrypt_padata_request(padata);
125 struct aead_givcrypt_request *req = pcrypt_request_ctx(preq); 128 struct aead_givcrypt_request *req = pcrypt_request_ctx(preq);
126 129
127 aead_request_complete(req->areq.base.data, padata->info); 130 aead_request_complete(req->areq.base.data, padata->info);
128 } 131 }
129 132
130 static void pcrypt_aead_done(struct crypto_async_request *areq, int err) 133 static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
131 { 134 {
132 struct aead_request *req = areq->data; 135 struct aead_request *req = areq->data;
133 struct pcrypt_request *preq = aead_request_ctx(req); 136 struct pcrypt_request *preq = aead_request_ctx(req);
134 struct padata_priv *padata = pcrypt_request_padata(preq); 137 struct padata_priv *padata = pcrypt_request_padata(preq);
135 138
136 padata->info = err; 139 padata->info = err;
137 req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; 140 req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
138 141
139 padata_do_serial(padata); 142 padata_do_serial(padata);
140 } 143 }
141 144
142 static void pcrypt_aead_enc(struct padata_priv *padata) 145 static void pcrypt_aead_enc(struct padata_priv *padata)
143 { 146 {
144 struct pcrypt_request *preq = pcrypt_padata_request(padata); 147 struct pcrypt_request *preq = pcrypt_padata_request(padata);
145 struct aead_request *req = pcrypt_request_ctx(preq); 148 struct aead_request *req = pcrypt_request_ctx(preq);
146 149
147 padata->info = crypto_aead_encrypt(req); 150 padata->info = crypto_aead_encrypt(req);
148 151
149 if (padata->info == -EINPROGRESS) 152 if (padata->info == -EINPROGRESS)
150 return; 153 return;
151 154
152 padata_do_serial(padata); 155 padata_do_serial(padata);
153 } 156 }
154 157
155 static int pcrypt_aead_encrypt(struct aead_request *req) 158 static int pcrypt_aead_encrypt(struct aead_request *req)
156 { 159 {
157 int err; 160 int err;
158 struct pcrypt_request *preq = aead_request_ctx(req); 161 struct pcrypt_request *preq = aead_request_ctx(req);
159 struct aead_request *creq = pcrypt_request_ctx(preq); 162 struct aead_request *creq = pcrypt_request_ctx(preq);
160 struct padata_priv *padata = pcrypt_request_padata(preq); 163 struct padata_priv *padata = pcrypt_request_padata(preq);
161 struct crypto_aead *aead = crypto_aead_reqtfm(req); 164 struct crypto_aead *aead = crypto_aead_reqtfm(req);
162 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); 165 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
163 u32 flags = aead_request_flags(req); 166 u32 flags = aead_request_flags(req);
164 167
165 memset(padata, 0, sizeof(struct padata_priv)); 168 memset(padata, 0, sizeof(struct padata_priv));
166 169
167 padata->parallel = pcrypt_aead_enc; 170 padata->parallel = pcrypt_aead_enc;
168 padata->serial = pcrypt_aead_serial; 171 padata->serial = pcrypt_aead_serial;
169 172
170 aead_request_set_tfm(creq, ctx->child); 173 aead_request_set_tfm(creq, ctx->child);
171 aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, 174 aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
172 pcrypt_aead_done, req); 175 pcrypt_aead_done, req);
173 aead_request_set_crypt(creq, req->src, req->dst, 176 aead_request_set_crypt(creq, req->src, req->dst,
174 req->cryptlen, req->iv); 177 req->cryptlen, req->iv);
175 aead_request_set_assoc(creq, req->assoc, req->assoclen); 178 aead_request_set_assoc(creq, req->assoc, req->assoclen);
176 179
177 err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); 180 err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
178 if (!err) 181 if (!err)
179 return -EINPROGRESS; 182 return -EINPROGRESS;
180 183
181 return err; 184 return err;
182 } 185 }
183 186
184 static void pcrypt_aead_dec(struct padata_priv *padata) 187 static void pcrypt_aead_dec(struct padata_priv *padata)
185 { 188 {
186 struct pcrypt_request *preq = pcrypt_padata_request(padata); 189 struct pcrypt_request *preq = pcrypt_padata_request(padata);
187 struct aead_request *req = pcrypt_request_ctx(preq); 190 struct aead_request *req = pcrypt_request_ctx(preq);
188 191
189 padata->info = crypto_aead_decrypt(req); 192 padata->info = crypto_aead_decrypt(req);
190 193
191 if (padata->info == -EINPROGRESS) 194 if (padata->info == -EINPROGRESS)
192 return; 195 return;
193 196
194 padata_do_serial(padata); 197 padata_do_serial(padata);
195 } 198 }
196 199
197 static int pcrypt_aead_decrypt(struct aead_request *req) 200 static int pcrypt_aead_decrypt(struct aead_request *req)
198 { 201 {
199 int err; 202 int err;
200 struct pcrypt_request *preq = aead_request_ctx(req); 203 struct pcrypt_request *preq = aead_request_ctx(req);
201 struct aead_request *creq = pcrypt_request_ctx(preq); 204 struct aead_request *creq = pcrypt_request_ctx(preq);
202 struct padata_priv *padata = pcrypt_request_padata(preq); 205 struct padata_priv *padata = pcrypt_request_padata(preq);
203 struct crypto_aead *aead = crypto_aead_reqtfm(req); 206 struct crypto_aead *aead = crypto_aead_reqtfm(req);
204 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); 207 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
205 u32 flags = aead_request_flags(req); 208 u32 flags = aead_request_flags(req);
206 209
207 memset(padata, 0, sizeof(struct padata_priv)); 210 memset(padata, 0, sizeof(struct padata_priv));
208 211
209 padata->parallel = pcrypt_aead_dec; 212 padata->parallel = pcrypt_aead_dec;
210 padata->serial = pcrypt_aead_serial; 213 padata->serial = pcrypt_aead_serial;
211 214
212 aead_request_set_tfm(creq, ctx->child); 215 aead_request_set_tfm(creq, ctx->child);
213 aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, 216 aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
214 pcrypt_aead_done, req); 217 pcrypt_aead_done, req);
215 aead_request_set_crypt(creq, req->src, req->dst, 218 aead_request_set_crypt(creq, req->src, req->dst,
216 req->cryptlen, req->iv); 219 req->cryptlen, req->iv);
217 aead_request_set_assoc(creq, req->assoc, req->assoclen); 220 aead_request_set_assoc(creq, req->assoc, req->assoclen);
218 221
219 err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt); 222 err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt);
220 if (!err) 223 if (!err)
221 return -EINPROGRESS; 224 return -EINPROGRESS;
222 225
223 return err; 226 return err;
224 } 227 }
225 228
226 static void pcrypt_aead_givenc(struct padata_priv *padata) 229 static void pcrypt_aead_givenc(struct padata_priv *padata)
227 { 230 {
228 struct pcrypt_request *preq = pcrypt_padata_request(padata); 231 struct pcrypt_request *preq = pcrypt_padata_request(padata);
229 struct aead_givcrypt_request *req = pcrypt_request_ctx(preq); 232 struct aead_givcrypt_request *req = pcrypt_request_ctx(preq);
230 233
231 padata->info = crypto_aead_givencrypt(req); 234 padata->info = crypto_aead_givencrypt(req);
232 235
233 if (padata->info == -EINPROGRESS) 236 if (padata->info == -EINPROGRESS)
234 return; 237 return;
235 238
236 padata_do_serial(padata); 239 padata_do_serial(padata);
237 } 240 }
238 241
239 static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req) 242 static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req)
240 { 243 {
241 int err; 244 int err;
242 struct aead_request *areq = &req->areq; 245 struct aead_request *areq = &req->areq;
243 struct pcrypt_request *preq = aead_request_ctx(areq); 246 struct pcrypt_request *preq = aead_request_ctx(areq);
244 struct aead_givcrypt_request *creq = pcrypt_request_ctx(preq); 247 struct aead_givcrypt_request *creq = pcrypt_request_ctx(preq);
245 struct padata_priv *padata = pcrypt_request_padata(preq); 248 struct padata_priv *padata = pcrypt_request_padata(preq);
246 struct crypto_aead *aead = aead_givcrypt_reqtfm(req); 249 struct crypto_aead *aead = aead_givcrypt_reqtfm(req);
247 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); 250 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
248 u32 flags = aead_request_flags(areq); 251 u32 flags = aead_request_flags(areq);
249 252
250 memset(padata, 0, sizeof(struct padata_priv)); 253 memset(padata, 0, sizeof(struct padata_priv));
251 254
252 padata->parallel = pcrypt_aead_givenc; 255 padata->parallel = pcrypt_aead_givenc;
253 padata->serial = pcrypt_aead_giv_serial; 256 padata->serial = pcrypt_aead_giv_serial;
254 257
255 aead_givcrypt_set_tfm(creq, ctx->child); 258 aead_givcrypt_set_tfm(creq, ctx->child);
256 aead_givcrypt_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, 259 aead_givcrypt_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
257 pcrypt_aead_done, areq); 260 pcrypt_aead_done, areq);
258 aead_givcrypt_set_crypt(creq, areq->src, areq->dst, 261 aead_givcrypt_set_crypt(creq, areq->src, areq->dst,
259 areq->cryptlen, areq->iv); 262 areq->cryptlen, areq->iv);
260 aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen); 263 aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen);
261 aead_givcrypt_set_giv(creq, req->giv, req->seq); 264 aead_givcrypt_set_giv(creq, req->giv, req->seq);
262 265
263 err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); 266 err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
264 if (!err) 267 if (!err)
265 return -EINPROGRESS; 268 return -EINPROGRESS;
266 269
267 return err; 270 return err;
268 } 271 }
269 272
270 static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm) 273 static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm)
271 { 274 {
272 int cpu, cpu_index; 275 int cpu, cpu_index;
273 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 276 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
274 struct pcrypt_instance_ctx *ictx = crypto_instance_ctx(inst); 277 struct pcrypt_instance_ctx *ictx = crypto_instance_ctx(inst);
275 struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm); 278 struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
276 struct crypto_aead *cipher; 279 struct crypto_aead *cipher;
277 280
278 ictx->tfm_count++; 281 ictx->tfm_count++;
279 282
280 cpu_index = ictx->tfm_count % cpumask_weight(cpu_active_mask); 283 cpu_index = ictx->tfm_count % cpumask_weight(cpu_active_mask);
281 284
282 ctx->cb_cpu = cpumask_first(cpu_active_mask); 285 ctx->cb_cpu = cpumask_first(cpu_active_mask);
283 for (cpu = 0; cpu < cpu_index; cpu++) 286 for (cpu = 0; cpu < cpu_index; cpu++)
284 ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_active_mask); 287 ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_active_mask);
285 288
286 cipher = crypto_spawn_aead(crypto_instance_ctx(inst)); 289 cipher = crypto_spawn_aead(crypto_instance_ctx(inst));
287 290
288 if (IS_ERR(cipher)) 291 if (IS_ERR(cipher))
289 return PTR_ERR(cipher); 292 return PTR_ERR(cipher);
290 293
291 ctx->child = cipher; 294 ctx->child = cipher;
292 tfm->crt_aead.reqsize = sizeof(struct pcrypt_request) 295 tfm->crt_aead.reqsize = sizeof(struct pcrypt_request)
293 + sizeof(struct aead_givcrypt_request) 296 + sizeof(struct aead_givcrypt_request)
294 + crypto_aead_reqsize(cipher); 297 + crypto_aead_reqsize(cipher);
295 298
296 return 0; 299 return 0;
297 } 300 }
298 301
299 static void pcrypt_aead_exit_tfm(struct crypto_tfm *tfm) 302 static void pcrypt_aead_exit_tfm(struct crypto_tfm *tfm)
300 { 303 {
301 struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm); 304 struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
302 305
303 crypto_free_aead(ctx->child); 306 crypto_free_aead(ctx->child);
304 } 307 }
305 308
306 static struct crypto_instance *pcrypt_alloc_instance(struct crypto_alg *alg) 309 static struct crypto_instance *pcrypt_alloc_instance(struct crypto_alg *alg)
307 { 310 {
308 struct crypto_instance *inst; 311 struct crypto_instance *inst;
309 struct pcrypt_instance_ctx *ctx; 312 struct pcrypt_instance_ctx *ctx;
310 int err; 313 int err;
311 314
312 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 315 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
313 if (!inst) { 316 if (!inst) {
314 inst = ERR_PTR(-ENOMEM); 317 inst = ERR_PTR(-ENOMEM);
315 goto out; 318 goto out;
316 } 319 }
317 320
318 err = -ENAMETOOLONG; 321 err = -ENAMETOOLONG;
319 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, 322 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
320 "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) 323 "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
321 goto out_free_inst; 324 goto out_free_inst;
322 325
323 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); 326 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
324 327
325 ctx = crypto_instance_ctx(inst); 328 ctx = crypto_instance_ctx(inst);
326 err = crypto_init_spawn(&ctx->spawn, alg, inst, 329 err = crypto_init_spawn(&ctx->spawn, alg, inst,
327 CRYPTO_ALG_TYPE_MASK); 330 CRYPTO_ALG_TYPE_MASK);
328 if (err) 331 if (err)
329 goto out_free_inst; 332 goto out_free_inst;
330 333
331 inst->alg.cra_priority = alg->cra_priority + 100; 334 inst->alg.cra_priority = alg->cra_priority + 100;
332 inst->alg.cra_blocksize = alg->cra_blocksize; 335 inst->alg.cra_blocksize = alg->cra_blocksize;
333 inst->alg.cra_alignmask = alg->cra_alignmask; 336 inst->alg.cra_alignmask = alg->cra_alignmask;
334 337
335 out: 338 out:
336 return inst; 339 return inst;
337 340
338 out_free_inst: 341 out_free_inst:
339 kfree(inst); 342 kfree(inst);
340 inst = ERR_PTR(err); 343 inst = ERR_PTR(err);
341 goto out; 344 goto out;
342 } 345 }
343 346
344 static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb, 347 static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb,
345 u32 type, u32 mask) 348 u32 type, u32 mask)
346 { 349 {
347 struct crypto_instance *inst; 350 struct crypto_instance *inst;
348 struct crypto_alg *alg; 351 struct crypto_alg *alg;
349 352
350 alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK)); 353 alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK));
351 if (IS_ERR(alg)) 354 if (IS_ERR(alg))
352 return ERR_CAST(alg); 355 return ERR_CAST(alg);
353 356
354 inst = pcrypt_alloc_instance(alg); 357 inst = pcrypt_alloc_instance(alg);
355 if (IS_ERR(inst)) 358 if (IS_ERR(inst))
356 goto out_put_alg; 359 goto out_put_alg;
357 360
358 inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; 361 inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
359 inst->alg.cra_type = &crypto_aead_type; 362 inst->alg.cra_type = &crypto_aead_type;
360 363
361 inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize; 364 inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
362 inst->alg.cra_aead.geniv = alg->cra_aead.geniv; 365 inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
363 inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize; 366 inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
364 367
365 inst->alg.cra_ctxsize = sizeof(struct pcrypt_aead_ctx); 368 inst->alg.cra_ctxsize = sizeof(struct pcrypt_aead_ctx);
366 369
367 inst->alg.cra_init = pcrypt_aead_init_tfm; 370 inst->alg.cra_init = pcrypt_aead_init_tfm;
368 inst->alg.cra_exit = pcrypt_aead_exit_tfm; 371 inst->alg.cra_exit = pcrypt_aead_exit_tfm;
369 372
370 inst->alg.cra_aead.setkey = pcrypt_aead_setkey; 373 inst->alg.cra_aead.setkey = pcrypt_aead_setkey;
371 inst->alg.cra_aead.setauthsize = pcrypt_aead_setauthsize; 374 inst->alg.cra_aead.setauthsize = pcrypt_aead_setauthsize;
372 inst->alg.cra_aead.encrypt = pcrypt_aead_encrypt; 375 inst->alg.cra_aead.encrypt = pcrypt_aead_encrypt;
373 inst->alg.cra_aead.decrypt = pcrypt_aead_decrypt; 376 inst->alg.cra_aead.decrypt = pcrypt_aead_decrypt;
374 inst->alg.cra_aead.givencrypt = pcrypt_aead_givencrypt; 377 inst->alg.cra_aead.givencrypt = pcrypt_aead_givencrypt;
375 378
376 out_put_alg: 379 out_put_alg:
377 crypto_mod_put(alg); 380 crypto_mod_put(alg);
378 return inst; 381 return inst;
379 } 382 }
380 383
381 static struct crypto_instance *pcrypt_alloc(struct rtattr **tb) 384 static struct crypto_instance *pcrypt_alloc(struct rtattr **tb)
382 { 385 {
383 struct crypto_attr_type *algt; 386 struct crypto_attr_type *algt;
384 387
385 algt = crypto_get_attr_type(tb); 388 algt = crypto_get_attr_type(tb);
386 if (IS_ERR(algt)) 389 if (IS_ERR(algt))
387 return ERR_CAST(algt); 390 return ERR_CAST(algt);
388 391
389 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { 392 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
390 case CRYPTO_ALG_TYPE_AEAD: 393 case CRYPTO_ALG_TYPE_AEAD:
391 return pcrypt_alloc_aead(tb, algt->type, algt->mask); 394 return pcrypt_alloc_aead(tb, algt->type, algt->mask);
392 } 395 }
393 396
394 return ERR_PTR(-EINVAL); 397 return ERR_PTR(-EINVAL);
395 } 398 }
396 399
397 static void pcrypt_free(struct crypto_instance *inst) 400 static void pcrypt_free(struct crypto_instance *inst)
398 { 401 {
399 struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst); 402 struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
400 403
401 crypto_drop_spawn(&ctx->spawn); 404 crypto_drop_spawn(&ctx->spawn);
402 kfree(inst); 405 kfree(inst);
403 } 406 }
404 407
405 static int pcrypt_cpumask_change_notify(struct notifier_block *self, 408 static int pcrypt_cpumask_change_notify(struct notifier_block *self,
406 unsigned long val, void *data) 409 unsigned long val, void *data)
407 { 410 {
408 struct pcrypt_instance *pcrypt; 411 struct pcrypt_instance *pcrypt;
409 struct pcrypt_cpumask *new_mask, *old_mask; 412 struct pcrypt_cpumask *new_mask, *old_mask;
410 413
411 if (!(val & PADATA_CPU_SERIAL)) 414 if (!(val & PADATA_CPU_SERIAL))
412 return 0; 415 return 0;
413 416
414 pcrypt = container_of(self, struct pcrypt_instance, nblock); 417 pcrypt = container_of(self, struct pcrypt_instance, nblock);
415 new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL); 418 new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL);
416 if (!new_mask) 419 if (!new_mask)
417 return -ENOMEM; 420 return -ENOMEM;
418 if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) { 421 if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) {
419 kfree(new_mask); 422 kfree(new_mask);
420 return -ENOMEM; 423 return -ENOMEM;
421 } 424 }
422 425
423 old_mask = pcrypt->cb_cpumask; 426 old_mask = pcrypt->cb_cpumask;
424 427
425 padata_get_cpumask(pcrypt->pinst, PADATA_CPU_SERIAL, new_mask->mask); 428 padata_get_cpumask(pcrypt->pinst, PADATA_CPU_SERIAL, new_mask->mask);
426 rcu_assign_pointer(pcrypt->cb_cpumask, new_mask); 429 rcu_assign_pointer(pcrypt->cb_cpumask, new_mask);
427 synchronize_rcu_bh(); 430 synchronize_rcu_bh();
428 431
429 free_cpumask_var(old_mask->mask); 432 free_cpumask_var(old_mask->mask);
430 kfree(old_mask); 433 kfree(old_mask);
431 return 0; 434 return 0;
432 } 435 }
433 436
434 static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name) 437 static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
435 { 438 {
436 int ret; 439 int ret;
437 440
438 pinst->kobj.kset = pcrypt_kset; 441 pinst->kobj.kset = pcrypt_kset;
439 ret = kobject_add(&pinst->kobj, NULL, name); 442 ret = kobject_add(&pinst->kobj, NULL, name);
440 if (!ret) 443 if (!ret)
441 kobject_uevent(&pinst->kobj, KOBJ_ADD); 444 kobject_uevent(&pinst->kobj, KOBJ_ADD);
442 445
443 return ret; 446 return ret;
444 } 447 }
445 448
446 static int __pcrypt_init_instance(struct pcrypt_instance *pcrypt, 449 static int __pcrypt_init_instance(struct pcrypt_instance *pcrypt,
447 const char *name) 450 const char *name)
448 { 451 {
449 int ret = -ENOMEM; 452 int ret = -ENOMEM;
450 struct pcrypt_cpumask *mask; 453 struct pcrypt_cpumask *mask;
451 454
452 pcrypt->name = name; 455 pcrypt->name = name;
453 pcrypt->wq = create_workqueue(name); 456 pcrypt->wq = create_workqueue(name);
454 if (!pcrypt->wq) 457 if (!pcrypt->wq)
455 goto err; 458 goto err;
456 459
457 pcrypt->pinst = padata_alloc(pcrypt->wq); 460 pcrypt->pinst = padata_alloc(pcrypt->wq);
458 if (!pcrypt->pinst) 461 if (!pcrypt->pinst)
459 goto err_destroy_workqueue; 462 goto err_destroy_workqueue;
460 463
461 mask = kmalloc(sizeof(*mask), GFP_KERNEL); 464 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
462 if (!mask) 465 if (!mask)
463 goto err_free_padata; 466 goto err_free_padata;
464 if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) { 467 if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) {
465 kfree(mask); 468 kfree(mask);
466 goto err_free_padata; 469 goto err_free_padata;
467 } 470 }
468 471
469 padata_get_cpumask(pcrypt->pinst, PADATA_CPU_SERIAL, mask->mask); 472 padata_get_cpumask(pcrypt->pinst, PADATA_CPU_SERIAL, mask->mask);
470 rcu_assign_pointer(pcrypt->cb_cpumask, mask); 473 rcu_assign_pointer(pcrypt->cb_cpumask, mask);
471 474
472 pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify; 475 pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify;
473 ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); 476 ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
474 if (ret) 477 if (ret)
475 goto err_free_cpumask; 478 goto err_free_cpumask;
476 479
477 ret = pcrypt_sysfs_add(pcrypt->pinst, name); 480 ret = pcrypt_sysfs_add(pcrypt->pinst, name);
478 if (ret) 481 if (ret)
479 goto err_unregister_notifier; 482 goto err_unregister_notifier;
480 483
481 return ret; 484 return ret;
482 err_unregister_notifier: 485 err_unregister_notifier:
483 padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); 486 padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
484 err_free_cpumask: 487 err_free_cpumask:
485 free_cpumask_var(mask->mask); 488 free_cpumask_var(mask->mask);
486 kfree(mask); 489 kfree(mask);
487 err_free_padata: 490 err_free_padata:
488 padata_free(pcrypt->pinst); 491 padata_free(pcrypt->pinst);
489 err_destroy_workqueue: 492 err_destroy_workqueue:
490 destroy_workqueue(pcrypt->wq); 493 destroy_workqueue(pcrypt->wq);
491 err: 494 err:
492 return ret; 495 return ret;
493 } 496 }
494 497
495 static void __pcrypt_deinit_instance(struct pcrypt_instance *pcrypt) 498 static void __pcrypt_deinit_instance(struct pcrypt_instance *pcrypt)
496 { 499 {
497 kobject_put(&pcrypt->pinst->kobj); 500 kobject_put(&pcrypt->pinst->kobj);
498 free_cpumask_var(pcrypt->cb_cpumask->mask); 501 free_cpumask_var(pcrypt->cb_cpumask->mask);
499 kfree(pcrypt->cb_cpumask); 502 kfree(pcrypt->cb_cpumask);
500 503
501 padata_stop(pcrypt->pinst); 504 padata_stop(pcrypt->pinst);
502 padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); 505 padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
503 destroy_workqueue(pcrypt->wq); 506 destroy_workqueue(pcrypt->wq);
504 padata_free(pcrypt->pinst); 507 padata_free(pcrypt->pinst);
505 } 508 }
506 509
507 static struct crypto_template pcrypt_tmpl = { 510 static struct crypto_template pcrypt_tmpl = {
508 .name = "pcrypt", 511 .name = "pcrypt",
509 .alloc = pcrypt_alloc, 512 .alloc = pcrypt_alloc,
510 .free = pcrypt_free, 513 .free = pcrypt_free,
511 .module = THIS_MODULE, 514 .module = THIS_MODULE,
512 }; 515 };
513 516
514 static int __init pcrypt_init(void) 517 static int __init pcrypt_init(void)
515 { 518 {
516 int err = -ENOMEM; 519 int err = -ENOMEM;
517 520
518 pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj); 521 pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj);
519 if (!pcrypt_kset) 522 if (!pcrypt_kset)
520 goto err; 523 goto err;
521 524
522 err = __pcrypt_init_instance(&pencrypt, "pencrypt"); 525 err = __pcrypt_init_instance(&pencrypt, "pencrypt");
523 if (err) 526 if (err)
524 goto err_unreg_kset; 527 goto err_unreg_kset;
525 528
526 err = __pcrypt_init_instance(&pdecrypt, "pdecrypt"); 529 err = __pcrypt_init_instance(&pdecrypt, "pdecrypt");
527 if (err) 530 if (err)
528 goto err_deinit_pencrypt; 531 goto err_deinit_pencrypt;
529 532
530 padata_start(pencrypt.pinst); 533 padata_start(pencrypt.pinst);
531 padata_start(pdecrypt.pinst); 534 padata_start(pdecrypt.pinst);
532 535
533 return crypto_register_template(&pcrypt_tmpl); 536 return crypto_register_template(&pcrypt_tmpl);
534 537
535 err_deinit_pencrypt: 538 err_deinit_pencrypt:
536 __pcrypt_deinit_instance(&pencrypt); 539 __pcrypt_deinit_instance(&pencrypt);
537 err_unreg_kset: 540 err_unreg_kset:
538 kset_unregister(pcrypt_kset); 541 kset_unregister(pcrypt_kset);
539 err: 542 err:
540 return err; 543 return err;
541 } 544 }
542 545
543 static void __exit pcrypt_exit(void) 546 static void __exit pcrypt_exit(void)
544 { 547 {
545 __pcrypt_deinit_instance(&pencrypt); 548 __pcrypt_deinit_instance(&pencrypt);
546 __pcrypt_deinit_instance(&pdecrypt); 549 __pcrypt_deinit_instance(&pdecrypt);
547 550
548 kset_unregister(pcrypt_kset); 551 kset_unregister(pcrypt_kset);
549 crypto_unregister_template(&pcrypt_tmpl); 552 crypto_unregister_template(&pcrypt_tmpl);
550 } 553 }
551 554
552 module_init(pcrypt_init); 555 module_init(pcrypt_init);
553 module_exit(pcrypt_exit); 556 module_exit(pcrypt_exit);
554 557
555 MODULE_LICENSE("GPL"); 558 MODULE_LICENSE("GPL");
556 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); 559 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
557 MODULE_DESCRIPTION("Parallel crypto wrapper"); 560 MODULE_DESCRIPTION("Parallel crypto wrapper");
558 561