Blame view
crypto/pcrypt.c
13.2 KB
5068c7a88
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 |
/* * pcrypt - Parallel crypto wrapper. * * Copyright (C) 2009 secunet Security Networks AG * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include <crypto/algapi.h> #include <crypto/internal/aead.h> |
a5a22e57f
|
23 |
#include <linux/atomic.h> |
5068c7a88
|
24 25 26 27 |
#include <linux/err.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> |
e15bacbeb
|
28 |
#include <linux/notifier.h> |
a3fb1e330
|
29 |
#include <linux/kobject.h> |
d3f64e46a
|
30 |
#include <linux/cpu.h> |
5068c7a88
|
31 |
#include <crypto/pcrypt.h> |
c57e842ef
|
32 |
struct padata_pcrypt { |
e15bacbeb
|
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
struct padata_instance *pinst; struct workqueue_struct *wq; /* * Cpumask for callback CPUs. It should be * equal to serial cpumask of corresponding padata instance, * so it is updated when padata notifies us about serial * cpumask change. * * cb_cpumask is protected by RCU. This fact prevents us from * using cpumask_var_t directly because the actual type of * cpumsak_var_t depends on kernel configuration(particularly on * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration * cpumask_var_t may be either a pointer to the struct cpumask * or a variable allocated on the stack. Thus we can not safely use * cpumask_var_t with RCU operations such as rcu_assign_pointer or * rcu_dereference. So cpumask_var_t is wrapped with struct * pcrypt_cpumask which makes possible to use it with RCU. */ struct pcrypt_cpumask { cpumask_var_t mask; } *cb_cpumask; struct notifier_block nblock; }; |
c57e842ef
|
57 58 |
static struct padata_pcrypt pencrypt; static struct padata_pcrypt pdecrypt; |
a3fb1e330
|
59 |
static struct kset *pcrypt_kset; |
5068c7a88
|
60 61 |
struct pcrypt_instance_ctx { |
66d948e73
|
62 |
struct crypto_aead_spawn spawn; |
a5a22e57f
|
63 |
atomic_t tfm_count; |
5068c7a88
|
64 65 66 67 68 69 70 71 |
}; struct pcrypt_aead_ctx { struct crypto_aead *child; unsigned int cb_cpu; }; static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu, |
c57e842ef
|
72 |
struct padata_pcrypt *pcrypt) |
5068c7a88
|
73 74 |
{ unsigned int cpu_index, cpu, i; |
e15bacbeb
|
75 |
struct pcrypt_cpumask *cpumask; |
5068c7a88
|
76 77 |
cpu = *cb_cpu; |
e15bacbeb
|
78 |
rcu_read_lock_bh(); |
3110e4006
|
79 |
cpumask = rcu_dereference_bh(pcrypt->cb_cpumask); |
e15bacbeb
|
80 |
if (cpumask_test_cpu(cpu, cpumask->mask)) |
5068c7a88
|
81 |
goto out; |
cc74f4bc1
|
82 83 |
if (!cpumask_weight(cpumask->mask)) goto out; |
e15bacbeb
|
84 |
cpu_index = cpu % cpumask_weight(cpumask->mask); |
5068c7a88
|
85 |
|
e15bacbeb
|
86 |
cpu = cpumask_first(cpumask->mask); |
5068c7a88
|
87 |
for (i = 0; i < cpu_index; i++) |
e15bacbeb
|
88 |
cpu = cpumask_next(cpu, cpumask->mask); |
5068c7a88
|
89 90 91 92 |
*cb_cpu = cpu; out: |
e15bacbeb
|
93 94 |
rcu_read_unlock_bh(); return padata_do_parallel(pcrypt->pinst, padata, cpu); |
5068c7a88
|
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 |
} static int pcrypt_aead_setkey(struct crypto_aead *parent, const u8 *key, unsigned int keylen) { struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); return crypto_aead_setkey(ctx->child, key, keylen); } static int pcrypt_aead_setauthsize(struct crypto_aead *parent, unsigned int authsize) { struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); return crypto_aead_setauthsize(ctx->child, authsize); } static void pcrypt_aead_serial(struct padata_priv *padata) { struct pcrypt_request *preq = pcrypt_padata_request(padata); struct aead_request *req = pcrypt_request_ctx(preq); aead_request_complete(req->base.data, padata->info); } |
5068c7a88
|
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
static void pcrypt_aead_done(struct crypto_async_request *areq, int err) { struct aead_request *req = areq->data; struct pcrypt_request *preq = aead_request_ctx(req); struct padata_priv *padata = pcrypt_request_padata(preq); padata->info = err; req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; padata_do_serial(padata); } static void pcrypt_aead_enc(struct padata_priv *padata) { struct pcrypt_request *preq = pcrypt_padata_request(padata); struct aead_request *req = pcrypt_request_ctx(preq); padata->info = crypto_aead_encrypt(req); |
5a1436bee
|
138 |
if (padata->info == -EINPROGRESS) |
5068c7a88
|
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
return; padata_do_serial(padata); } static int pcrypt_aead_encrypt(struct aead_request *req) { int err; struct pcrypt_request *preq = aead_request_ctx(req); struct aead_request *creq = pcrypt_request_ctx(preq); struct padata_priv *padata = pcrypt_request_padata(preq); struct crypto_aead *aead = crypto_aead_reqtfm(req); struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); u32 flags = aead_request_flags(req); memset(padata, 0, sizeof(struct padata_priv)); padata->parallel = pcrypt_aead_enc; padata->serial = pcrypt_aead_serial; aead_request_set_tfm(creq, ctx->child); aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, pcrypt_aead_done, req); aead_request_set_crypt(creq, req->src, req->dst, req->cryptlen, req->iv); |
0496f5606
|
164 |
aead_request_set_ad(creq, req->assoclen); |
5068c7a88
|
165 |
|
e15bacbeb
|
166 |
err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); |
83f619f3c
|
167 168 |
if (!err) return -EINPROGRESS; |
5068c7a88
|
169 170 171 172 173 174 175 176 177 178 |
return err; } static void pcrypt_aead_dec(struct padata_priv *padata) { struct pcrypt_request *preq = pcrypt_padata_request(padata); struct aead_request *req = pcrypt_request_ctx(preq); padata->info = crypto_aead_decrypt(req); |
5a1436bee
|
179 |
if (padata->info == -EINPROGRESS) |
5068c7a88
|
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 |
return; padata_do_serial(padata); } static int pcrypt_aead_decrypt(struct aead_request *req) { int err; struct pcrypt_request *preq = aead_request_ctx(req); struct aead_request *creq = pcrypt_request_ctx(preq); struct padata_priv *padata = pcrypt_request_padata(preq); struct crypto_aead *aead = crypto_aead_reqtfm(req); struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); u32 flags = aead_request_flags(req); memset(padata, 0, sizeof(struct padata_priv)); padata->parallel = pcrypt_aead_dec; padata->serial = pcrypt_aead_serial; aead_request_set_tfm(creq, ctx->child); aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, pcrypt_aead_done, req); aead_request_set_crypt(creq, req->src, req->dst, req->cryptlen, req->iv); |
0496f5606
|
205 |
aead_request_set_ad(creq, req->assoclen); |
5068c7a88
|
206 |
|
e15bacbeb
|
207 |
err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt); |
83f619f3c
|
208 209 |
if (!err) return -EINPROGRESS; |
5068c7a88
|
210 211 212 |
return err; } |
0496f5606
|
213 |
static int pcrypt_aead_init_tfm(struct crypto_aead *tfm) |
5068c7a88
|
214 215 |
{ int cpu, cpu_index; |
0496f5606
|
216 217 218 |
struct aead_instance *inst = aead_alg_instance(tfm); struct pcrypt_instance_ctx *ictx = aead_instance_ctx(inst); struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm); |
5068c7a88
|
219 |
struct crypto_aead *cipher; |
a5a22e57f
|
220 221 |
cpu_index = (unsigned int)atomic_inc_return(&ictx->tfm_count) % cpumask_weight(cpu_online_mask); |
5068c7a88
|
222 |
|
fbf0ca1bf
|
223 |
ctx->cb_cpu = cpumask_first(cpu_online_mask); |
5068c7a88
|
224 |
for (cpu = 0; cpu < cpu_index; cpu++) |
fbf0ca1bf
|
225 |
ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask); |
5068c7a88
|
226 |
|
0496f5606
|
227 |
cipher = crypto_spawn_aead(&ictx->spawn); |
5068c7a88
|
228 229 230 231 232 |
if (IS_ERR(cipher)) return PTR_ERR(cipher); ctx->child = cipher; |
0496f5606
|
233 234 235 |
crypto_aead_set_reqsize(tfm, sizeof(struct pcrypt_request) + sizeof(struct aead_request) + crypto_aead_reqsize(cipher)); |
5068c7a88
|
236 237 238 |
return 0; } |
0496f5606
|
239 |
static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm) |
5068c7a88
|
240 |
{ |
0496f5606
|
241 |
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm); |
5068c7a88
|
242 243 244 |
crypto_free_aead(ctx->child); } |
66d948e73
|
245 246 |
static int pcrypt_init_instance(struct crypto_instance *inst, struct crypto_alg *alg) |
5068c7a88
|
247 |
{ |
5068c7a88
|
248 249 |
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
66d948e73
|
250 |
return -ENAMETOOLONG; |
5068c7a88
|
251 252 |
memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); |
5068c7a88
|
253 254 255 |
inst->alg.cra_priority = alg->cra_priority + 100; inst->alg.cra_blocksize = alg->cra_blocksize; inst->alg.cra_alignmask = alg->cra_alignmask; |
66d948e73
|
256 |
return 0; |
5068c7a88
|
257 |
} |
0496f5606
|
258 259 |
static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, u32 type, u32 mask) |
5068c7a88
|
260 |
{ |
66d948e73
|
261 |
struct pcrypt_instance_ctx *ctx; |
846f97df8
|
262 |
struct crypto_attr_type *algt; |
0496f5606
|
263 264 |
struct aead_instance *inst; struct aead_alg *alg; |
66d948e73
|
265 266 |
const char *name; int err; |
846f97df8
|
267 268 269 |
algt = crypto_get_attr_type(tb); if (IS_ERR(algt)) return PTR_ERR(algt); |
66d948e73
|
270 271 |
name = crypto_attr_alg_name(tb[1]); if (IS_ERR(name)) |
0496f5606
|
272 |
return PTR_ERR(name); |
66d948e73
|
273 274 275 |
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) |
0496f5606
|
276 |
return -ENOMEM; |
66d948e73
|
277 |
|
0496f5606
|
278 279 |
ctx = aead_instance_ctx(inst); crypto_set_aead_spawn(&ctx->spawn, aead_crypto_instance(inst)); |
5068c7a88
|
280 |
|
5e4b8c1fc
|
281 |
err = crypto_grab_aead(&ctx->spawn, name, 0, 0); |
66d948e73
|
282 283 |
if (err) goto out_free_inst; |
5068c7a88
|
284 |
|
0496f5606
|
285 286 |
alg = crypto_spawn_aead_alg(&ctx->spawn); err = pcrypt_init_instance(aead_crypto_instance(inst), &alg->base); |
66d948e73
|
287 288 |
if (err) goto out_drop_aead; |
5068c7a88
|
289 |
|
846f97df8
|
290 |
inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC; |
846f97df8
|
291 |
|
0496f5606
|
292 293 |
inst->alg.ivsize = crypto_aead_alg_ivsize(alg); inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); |
5068c7a88
|
294 |
|
0496f5606
|
295 |
inst->alg.base.cra_ctxsize = sizeof(struct pcrypt_aead_ctx); |
5068c7a88
|
296 |
|
0496f5606
|
297 298 |
inst->alg.init = pcrypt_aead_init_tfm; inst->alg.exit = pcrypt_aead_exit_tfm; |
5068c7a88
|
299 |
|
0496f5606
|
300 301 302 303 |
inst->alg.setkey = pcrypt_aead_setkey; inst->alg.setauthsize = pcrypt_aead_setauthsize; inst->alg.encrypt = pcrypt_aead_encrypt; inst->alg.decrypt = pcrypt_aead_decrypt; |
5068c7a88
|
304 |
|
0496f5606
|
305 306 307 |
err = aead_register_instance(tmpl, inst); if (err) goto out_drop_aead; |
5068c7a88
|
308 |
|
66d948e73
|
309 |
out: |
0496f5606
|
310 |
return err; |
66d948e73
|
311 312 313 314 315 |
out_drop_aead: crypto_drop_aead(&ctx->spawn); out_free_inst: kfree(inst); |
66d948e73
|
316 |
goto out; |
5068c7a88
|
317 |
} |
0496f5606
|
318 |
static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb) |
5068c7a88
|
319 320 321 322 323 |
{ struct crypto_attr_type *algt; algt = crypto_get_attr_type(tb); if (IS_ERR(algt)) |
0496f5606
|
324 |
return PTR_ERR(algt); |
5068c7a88
|
325 326 327 |
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_AEAD: |
0496f5606
|
328 |
return pcrypt_create_aead(tmpl, tb, algt->type, algt->mask); |
5068c7a88
|
329 |
} |
0496f5606
|
330 |
return -EINVAL; |
5068c7a88
|
331 332 333 334 335 |
} static void pcrypt_free(struct crypto_instance *inst) { struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst); |
66d948e73
|
336 |
crypto_drop_aead(&ctx->spawn); |
5068c7a88
|
337 338 |
kfree(inst); } |
e15bacbeb
|
339 340 341 |
static int pcrypt_cpumask_change_notify(struct notifier_block *self, unsigned long val, void *data) { |
c57e842ef
|
342 |
struct padata_pcrypt *pcrypt; |
e15bacbeb
|
343 |
struct pcrypt_cpumask *new_mask, *old_mask; |
d3f64e46a
|
344 |
struct padata_cpumask *cpumask = (struct padata_cpumask *)data; |
e15bacbeb
|
345 346 347 |
if (!(val & PADATA_CPU_SERIAL)) return 0; |
c57e842ef
|
348 |
pcrypt = container_of(self, struct padata_pcrypt, nblock); |
e15bacbeb
|
349 350 351 352 353 354 355 356 357 |
new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL); if (!new_mask) return -ENOMEM; if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) { kfree(new_mask); return -ENOMEM; } old_mask = pcrypt->cb_cpumask; |
d3f64e46a
|
358 |
cpumask_copy(new_mask->mask, cpumask->cbcpu); |
e15bacbeb
|
359 360 361 362 363 364 365 |
rcu_assign_pointer(pcrypt->cb_cpumask, new_mask); synchronize_rcu_bh(); free_cpumask_var(old_mask->mask); kfree(old_mask); return 0; } |
a3fb1e330
|
366 367 368 369 370 371 372 373 374 375 376 |
static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name) { int ret; pinst->kobj.kset = pcrypt_kset; ret = kobject_add(&pinst->kobj, NULL, name); if (!ret) kobject_uevent(&pinst->kobj, KOBJ_ADD); return ret; } |
c57e842ef
|
377 378 |
static int pcrypt_init_padata(struct padata_pcrypt *pcrypt, const char *name) |
e15bacbeb
|
379 380 381 |
{ int ret = -ENOMEM; struct pcrypt_cpumask *mask; |
d3f64e46a
|
382 |
get_online_cpus(); |
d8537548c
|
383 384 |
pcrypt->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1, name); |
e15bacbeb
|
385 386 |
if (!pcrypt->wq) goto err; |
e6cc11707
|
387 |
pcrypt->pinst = padata_alloc_possible(pcrypt->wq); |
e15bacbeb
|
388 389 390 391 392 393 394 395 396 397 |
if (!pcrypt->pinst) goto err_destroy_workqueue; mask = kmalloc(sizeof(*mask), GFP_KERNEL); if (!mask) goto err_free_padata; if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) { kfree(mask); goto err_free_padata; } |
fbf0ca1bf
|
398 |
cpumask_and(mask->mask, cpu_possible_mask, cpu_online_mask); |
e15bacbeb
|
399 400 401 402 403 404 |
rcu_assign_pointer(pcrypt->cb_cpumask, mask); pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify; ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); if (ret) goto err_free_cpumask; |
a3fb1e330
|
405 406 407 |
ret = pcrypt_sysfs_add(pcrypt->pinst, name); if (ret) goto err_unregister_notifier; |
d3f64e46a
|
408 |
put_online_cpus(); |
e15bacbeb
|
409 |
return ret; |
d3f64e46a
|
410 |
|
a3fb1e330
|
411 412 |
err_unregister_notifier: padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); |
e15bacbeb
|
413 414 415 416 417 418 419 420 |
err_free_cpumask: free_cpumask_var(mask->mask); kfree(mask); err_free_padata: padata_free(pcrypt->pinst); err_destroy_workqueue: destroy_workqueue(pcrypt->wq); err: |
d3f64e46a
|
421 |
put_online_cpus(); |
e15bacbeb
|
422 423 |
return ret; } |
c57e842ef
|
424 |
static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) |
e15bacbeb
|
425 426 427 428 429 430 431 432 433 |
{ free_cpumask_var(pcrypt->cb_cpumask->mask); kfree(pcrypt->cb_cpumask); padata_stop(pcrypt->pinst); padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); destroy_workqueue(pcrypt->wq); padata_free(pcrypt->pinst); } |
5068c7a88
|
434 435 |
static struct crypto_template pcrypt_tmpl = { .name = "pcrypt", |
0496f5606
|
436 |
.create = pcrypt_create, |
5068c7a88
|
437 438 439 440 441 442 |
.free = pcrypt_free, .module = THIS_MODULE, }; static int __init pcrypt_init(void) { |
a3fb1e330
|
443 444 445 446 447 |
int err = -ENOMEM; pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj); if (!pcrypt_kset) goto err; |
5068c7a88
|
448 |
|
c57e842ef
|
449 |
err = pcrypt_init_padata(&pencrypt, "pencrypt"); |
4c8791702
|
450 |
if (err) |
a3fb1e330
|
451 |
goto err_unreg_kset; |
4c8791702
|
452 |
|
c57e842ef
|
453 |
err = pcrypt_init_padata(&pdecrypt, "pdecrypt"); |
4c8791702
|
454 |
if (err) |
e15bacbeb
|
455 |
goto err_deinit_pencrypt; |
4c8791702
|
456 |
|
e15bacbeb
|
457 458 |
padata_start(pencrypt.pinst); padata_start(pdecrypt.pinst); |
5068c7a88
|
459 |
|
e15bacbeb
|
460 |
return crypto_register_template(&pcrypt_tmpl); |
5068c7a88
|
461 |
|
e15bacbeb
|
462 |
err_deinit_pencrypt: |
c57e842ef
|
463 |
pcrypt_fini_padata(&pencrypt); |
a3fb1e330
|
464 465 |
err_unreg_kset: kset_unregister(pcrypt_kset); |
5068c7a88
|
466 |
err: |
4c8791702
|
467 |
return err; |
5068c7a88
|
468 469 470 471 |
} static void __exit pcrypt_exit(void) { |
c57e842ef
|
472 473 |
pcrypt_fini_padata(&pencrypt); pcrypt_fini_padata(&pdecrypt); |
5068c7a88
|
474 |
|
a3fb1e330
|
475 |
kset_unregister(pcrypt_kset); |
5068c7a88
|
476 477 478 479 480 481 482 483 484 |
crypto_unregister_template(&pcrypt_tmpl); } module_init(pcrypt_init); module_exit(pcrypt_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); MODULE_DESCRIPTION("Parallel crypto wrapper"); |
4943ba16b
|
485 |
MODULE_ALIAS_CRYPTO("pcrypt"); |