Blame view
crypto/ahash.c
16 KB
004a403c2
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 |
/* * Asynchronous Cryptographic Hash operations. * * This is the asynchronous version of hash.c with notification of * completion via a callback. * * Copyright (c) 2008 Loc Ho <lho@amcc.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ |
20036252f
|
15 16 |
#include <crypto/internal/hash.h> #include <crypto/scatterwalk.h> |
75ecb231f
|
17 |
#include <linux/bug.h> |
004a403c2
|
18 19 20 21 22 23 |
#include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/seq_file.h> |
6238cbaec
|
24 |
#include <linux/cryptouser.h> |
d8c34b949
|
25 |
#include <linux/compiler.h> |
6238cbaec
|
26 |
#include <net/netlink.h> |
004a403c2
|
27 28 |
#include "internal.h" |
66f6ce5e5
|
29 30 31 32 |
struct ahash_request_priv { crypto_completion_t complete; void *data; u8 *result; |
ef0579b64
|
33 |
u32 flags; |
66f6ce5e5
|
34 35 |
void *ubuf[] CRYPTO_MINALIGN_ATTR; }; |
88056ec34
|
36 37 38 39 40 |
static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash) { return container_of(crypto_hash_alg_common(hash), struct ahash_alg, halg); } |
20036252f
|
41 42 43 44 45 46 |
static int hash_walk_next(struct crypto_hash_walk *walk) { unsigned int alignmask = walk->alignmask; unsigned int offset = walk->offset; unsigned int nbytes = min(walk->entrylen, ((unsigned int)(PAGE_SIZE)) - offset); |
75ecb231f
|
47 48 49 50 |
if (walk->flags & CRYPTO_ALG_ASYNC) walk->data = kmap(walk->pg); else walk->data = kmap_atomic(walk->pg); |
20036252f
|
51 |
walk->data += offset; |
23a75eee0
|
52 53 |
if (offset & alignmask) { unsigned int unaligned = alignmask + 1 - (offset & alignmask); |
b516d5140
|
54 |
|
23a75eee0
|
55 56 57 |
if (nbytes > unaligned) nbytes = unaligned; } |
20036252f
|
58 59 60 61 62 63 64 65 66 67 |
walk->entrylen -= nbytes; return nbytes; } static int hash_walk_new_entry(struct crypto_hash_walk *walk) { struct scatterlist *sg; sg = walk->sg; |
20036252f
|
68 |
walk->offset = sg->offset; |
13f4bb78c
|
69 70 |
walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); walk->offset = offset_in_page(walk->offset); |
20036252f
|
71 72 73 74 75 76 77 78 79 80 81 82 |
walk->entrylen = sg->length; if (walk->entrylen > walk->total) walk->entrylen = walk->total; walk->total -= walk->entrylen; return hash_walk_next(walk); } int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) { unsigned int alignmask = walk->alignmask; |
20036252f
|
83 84 |
walk->data -= walk->offset; |
3c5d7703b
|
85 86 |
if (walk->entrylen && (walk->offset & alignmask) && !err) { unsigned int nbytes; |
20036252f
|
87 |
|
3c5d7703b
|
88 89 90 |
walk->offset = ALIGN(walk->offset, alignmask + 1); nbytes = min(walk->entrylen, (unsigned int)(PAGE_SIZE - walk->offset)); |
900a081f6
|
91 |
if (nbytes) { |
3c5d7703b
|
92 |
walk->entrylen -= nbytes; |
900a081f6
|
93 94 95 |
walk->data += walk->offset; return nbytes; } |
20036252f
|
96 |
} |
75ecb231f
|
97 98 99 100 101 102 103 104 105 106 |
if (walk->flags & CRYPTO_ALG_ASYNC) kunmap(walk->pg); else { kunmap_atomic(walk->data); /* * The may sleep test only makes sense for sync users. * Async users don't need to sleep here anyway. */ crypto_yield(walk->flags); } |
20036252f
|
107 108 109 |
if (err) return err; |
3c5d7703b
|
110 |
if (walk->entrylen) { |
d315a0e09
|
111 112 |
walk->offset = 0; walk->pg++; |
20036252f
|
113 |
return hash_walk_next(walk); |
d315a0e09
|
114 |
} |
20036252f
|
115 116 117 |
if (!walk->total) return 0; |
5be4d4c94
|
118 |
walk->sg = sg_next(walk->sg); |
20036252f
|
119 120 121 122 123 124 125 126 127 |
return hash_walk_new_entry(walk); } EXPORT_SYMBOL_GPL(crypto_hash_walk_done); int crypto_hash_walk_first(struct ahash_request *req, struct crypto_hash_walk *walk) { walk->total = req->nbytes; |
6d9529c58
|
128 129 |
if (!walk->total) { walk->entrylen = 0; |
20036252f
|
130 |
return 0; |
6d9529c58
|
131 |
} |
20036252f
|
132 133 134 |
walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); walk->sg = req->src; |
75ecb231f
|
135 |
walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK; |
20036252f
|
136 137 138 139 |
return hash_walk_new_entry(walk); } EXPORT_SYMBOL_GPL(crypto_hash_walk_first); |
75ecb231f
|
140 141 142 143 |
int crypto_ahash_walk_first(struct ahash_request *req, struct crypto_hash_walk *walk) { walk->total = req->nbytes; |
6d9529c58
|
144 145 |
if (!walk->total) { walk->entrylen = 0; |
75ecb231f
|
146 |
return 0; |
6d9529c58
|
147 |
} |
75ecb231f
|
148 149 150 151 152 153 154 155 156 157 158 |
walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); walk->sg = req->src; walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK; walk->flags |= CRYPTO_ALG_ASYNC; BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC); return hash_walk_new_entry(walk); } EXPORT_SYMBOL_GPL(crypto_ahash_walk_first); |
004a403c2
|
159 160 161 |
static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { |
004a403c2
|
162 163 164 165 166 167 |
unsigned long alignmask = crypto_ahash_alignmask(tfm); int ret; u8 *buffer, *alignbuffer; unsigned long absize; absize = keylen + alignmask; |
093900c2b
|
168 |
buffer = kmalloc(absize, GFP_KERNEL); |
004a403c2
|
169 170 171 172 173 |
if (!buffer) return -ENOMEM; alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(alignbuffer, key, keylen); |
a70c52252
|
174 |
ret = tfm->setkey(tfm, alignbuffer, keylen); |
8c32c516e
|
175 |
kzfree(buffer); |
004a403c2
|
176 177 |
return ret; } |
dc410d2d8
|
178 179 180 181 182 183 184 185 186 187 188 189 190 191 |
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { return -ENOSYS; } static void ahash_set_needkey(struct crypto_ahash *tfm) { const struct hash_alg_common *alg = crypto_hash_alg_common(tfm); if (tfm->setkey != ahash_nosetkey && !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY); } |
66f6ce5e5
|
192 |
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, |
004a403c2
|
193 194 |
unsigned int keylen) { |
004a403c2
|
195 |
unsigned long alignmask = crypto_ahash_alignmask(tfm); |
9fa68f620
|
196 |
int err; |
004a403c2
|
197 198 |
if ((unsigned long)key & alignmask) |
9fa68f620
|
199 200 201 |
err = ahash_setkey_unaligned(tfm, key, keylen); else err = tfm->setkey(tfm, key, keylen); |
dc410d2d8
|
202 203 |
if (unlikely(err)) { ahash_set_needkey(tfm); |
9fa68f620
|
204 |
return err; |
dc410d2d8
|
205 |
} |
004a403c2
|
206 |
|
9fa68f620
|
207 208 |
crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); return 0; |
004a403c2
|
209 |
} |
66f6ce5e5
|
210 |
EXPORT_SYMBOL_GPL(crypto_ahash_setkey); |
004a403c2
|
211 |
|
66f6ce5e5
|
212 213 214 215 216 |
static inline unsigned int ahash_align_buffer_size(unsigned len, unsigned long mask) { return len + (mask & ~(crypto_tfm_ctx_alignment() - 1)); } |
1ffc9fbd1
|
217 |
static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) |
66f6ce5e5
|
218 219 220 221 222 |
{ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); unsigned long alignmask = crypto_ahash_alignmask(tfm); unsigned int ds = crypto_ahash_digestsize(tfm); struct ahash_request_priv *priv; |
66f6ce5e5
|
223 224 225 |
priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
5befbd5a7
|
226 |
GFP_KERNEL : GFP_ATOMIC); |
66f6ce5e5
|
227 228 |
if (!priv) return -ENOMEM; |
ab6bf4e5e
|
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 |
/* * WARNING: Voodoo programming below! * * The code below is obscure and hard to understand, thus explanation * is necessary. See include/crypto/hash.h and include/linux/crypto.h * to understand the layout of structures used here! * * The code here will replace portions of the ORIGINAL request with * pointers to new code and buffers so the hashing operation can store * the result in aligned buffer. We will call the modified request * an ADJUSTED request. * * The newly mangled request will look as such: * * req { * .result = ADJUSTED[new aligned buffer] * .base.complete = ADJUSTED[pointer to completion function] * .base.data = ADJUSTED[*req (pointer to self)] * .priv = ADJUSTED[new priv] { * .result = ORIGINAL(result) * .complete = ORIGINAL(base.complete) * .data = ORIGINAL(base.data) * } */ |
66f6ce5e5
|
253 254 255 |
priv->result = req->result; priv->complete = req->base.complete; priv->data = req->base.data; |
ef0579b64
|
256 |
priv->flags = req->base.flags; |
ab6bf4e5e
|
257 258 259 260 261 |
/* * WARNING: We do not backup req->priv here! The req->priv * is for internal use of the Crypto API and the * user must _NOT_ _EVER_ depend on it's content! */ |
66f6ce5e5
|
262 263 |
req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); |
1ffc9fbd1
|
264 |
req->base.complete = cplt; |
66f6ce5e5
|
265 266 |
req->base.data = req; req->priv = priv; |
1ffc9fbd1
|
267 268 |
return 0; } |
ef0579b64
|
269 |
static void ahash_restore_req(struct ahash_request *req, int err) |
1ffc9fbd1
|
270 271 |
{ struct ahash_request_priv *priv = req->priv; |
ef0579b64
|
272 273 274 |
if (!err) memcpy(priv->result, req->result, crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); |
1ffc9fbd1
|
275 276 |
/* Restore the original crypto request. */ req->result = priv->result; |
ef0579b64
|
277 278 279 |
ahash_request_set_callback(req, priv->flags, priv->complete, priv->data); |
1ffc9fbd1
|
280 281 282 283 284 |
req->priv = NULL; /* Free the req->priv.priv from the ADJUSTED request. */ kzfree(priv); } |
ef0579b64
|
285 |
static void ahash_notify_einprogress(struct ahash_request *req) |
1ffc9fbd1
|
286 287 |
{ struct ahash_request_priv *priv = req->priv; |
ef0579b64
|
288 |
struct crypto_async_request oreq; |
1ffc9fbd1
|
289 |
|
ef0579b64
|
290 |
oreq.data = priv->data; |
1ffc9fbd1
|
291 |
|
ef0579b64
|
292 |
priv->complete(&oreq, -EINPROGRESS); |
1ffc9fbd1
|
293 294 295 296 297 |
} static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) { struct ahash_request *areq = req->data; |
ef0579b64
|
298 299 300 301 |
if (err == -EINPROGRESS) { ahash_notify_einprogress(areq); return; } |
1ffc9fbd1
|
302 303 304 305 306 307 308 309 310 311 |
/* * Restore the original request, see ahash_op_unaligned() for what * goes where. * * The "struct ahash_request *req" here is in fact the "req.base" * from the ADJUSTED request from ahash_op_unaligned(), thus as it * is a pointer to self, it is also the ADJUSTED "req" . */ /* First copy req->result into req->priv.result */ |
ef0579b64
|
312 |
ahash_restore_req(areq, err); |
1ffc9fbd1
|
313 314 315 316 317 318 319 320 321 322 323 324 325 |
/* Complete the ORIGINAL request. */ areq->base.complete(&areq->base, err); } static int ahash_op_unaligned(struct ahash_request *req, int (*op)(struct ahash_request *)) { int err; err = ahash_save_req(req, ahash_op_unaligned_done); if (err) return err; |
66f6ce5e5
|
326 |
err = op(req); |
4e5b0ad58
|
327 |
if (err == -EINPROGRESS || err == -EBUSY) |
ef0579b64
|
328 329 330 |
return err; ahash_restore_req(req, err); |
66f6ce5e5
|
331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 |
return err; } static int crypto_ahash_op(struct ahash_request *req, int (*op)(struct ahash_request *)) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); unsigned long alignmask = crypto_ahash_alignmask(tfm); if ((unsigned long)req->result & alignmask) return ahash_op_unaligned(req, op); return op(req); } int crypto_ahash_final(struct ahash_request *req) { return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final); } EXPORT_SYMBOL_GPL(crypto_ahash_final); int crypto_ahash_finup(struct ahash_request *req) { return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup); } EXPORT_SYMBOL_GPL(crypto_ahash_finup); int crypto_ahash_digest(struct ahash_request *req) { |
9fa68f620
|
361 362 363 364 365 366 |
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; return crypto_ahash_op(req, tfm->digest); |
66f6ce5e5
|
367 368 |
} EXPORT_SYMBOL_GPL(crypto_ahash_digest); |
ef0579b64
|
369 |
static void ahash_def_finup_done2(struct crypto_async_request *req, int err) |
66f6ce5e5
|
370 |
{ |
ef0579b64
|
371 |
struct ahash_request *areq = req->data; |
66f6ce5e5
|
372 373 374 |
if (err == -EINPROGRESS) return; |
ef0579b64
|
375 |
ahash_restore_req(areq, err); |
66f6ce5e5
|
376 |
|
d4a7a0fbe
|
377 |
areq->base.complete(&areq->base, err); |
66f6ce5e5
|
378 379 380 381 382 383 384 385 |
} static int ahash_def_finup_finish1(struct ahash_request *req, int err) { if (err) goto out; req->base.complete = ahash_def_finup_done2; |
ef0579b64
|
386 |
|
66f6ce5e5
|
387 |
err = crypto_ahash_reqtfm(req)->final(req); |
4e5b0ad58
|
388 |
if (err == -EINPROGRESS || err == -EBUSY) |
ef0579b64
|
389 |
return err; |
66f6ce5e5
|
390 391 |
out: |
ef0579b64
|
392 |
ahash_restore_req(req, err); |
66f6ce5e5
|
393 394 395 396 397 398 |
return err; } static void ahash_def_finup_done1(struct crypto_async_request *req, int err) { struct ahash_request *areq = req->data; |
66f6ce5e5
|
399 |
|
ef0579b64
|
400 401 402 403 404 405 |
if (err == -EINPROGRESS) { ahash_notify_einprogress(areq); return; } areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; |
66f6ce5e5
|
406 |
err = ahash_def_finup_finish1(areq, err); |
ef0579b64
|
407 408 |
if (areq->priv) return; |
66f6ce5e5
|
409 |
|
d4a7a0fbe
|
410 |
areq->base.complete(&areq->base, err); |
66f6ce5e5
|
411 412 413 414 415 |
} static int ahash_def_finup(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
d4a7a0fbe
|
416 |
int err; |
66f6ce5e5
|
417 |
|
d4a7a0fbe
|
418 419 420 |
err = ahash_save_req(req, ahash_def_finup_done1); if (err) return err; |
66f6ce5e5
|
421 |
|
d4a7a0fbe
|
422 |
err = tfm->update(req); |
4e5b0ad58
|
423 |
if (err == -EINPROGRESS || err == -EBUSY) |
ef0579b64
|
424 |
return err; |
d4a7a0fbe
|
425 |
return ahash_def_finup_finish1(req, err); |
66f6ce5e5
|
426 |
} |
88056ec34
|
427 428 429 430 |
static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) { struct crypto_ahash *hash = __crypto_ahash_cast(tfm); struct ahash_alg *alg = crypto_ahash_alg(hash); |
88056ec34
|
431 |
|
66f6ce5e5
|
432 |
hash->setkey = ahash_nosetkey; |
66f6ce5e5
|
433 |
|
88056ec34
|
434 435 |
if (tfm->__crt_alg->cra_type != &crypto_ahash_type) return crypto_init_shash_ops_async(tfm); |
88056ec34
|
436 437 |
hash->init = alg->init; hash->update = alg->update; |
66f6ce5e5
|
438 439 |
hash->final = alg->final; hash->finup = alg->finup ?: ahash_def_finup; |
88056ec34
|
440 |
hash->digest = alg->digest; |
6f221f7e8
|
441 442 |
hash->export = alg->export; hash->import = alg->import; |
66f6ce5e5
|
443 |
|
a5596d633
|
444 |
if (alg->setkey) { |
66f6ce5e5
|
445 |
hash->setkey = alg->setkey; |
dc410d2d8
|
446 |
ahash_set_needkey(hash); |
a5596d633
|
447 |
} |
88056ec34
|
448 449 450 451 452 453 |
return 0; } static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) { |
2495cf25f
|
454 455 |
if (alg->cra_type != &crypto_ahash_type) return sizeof(struct crypto_shash *); |
88056ec34
|
456 |
|
2495cf25f
|
457 |
return crypto_alg_extsize(alg); |
88056ec34
|
458 |
} |
3acc84739
|
459 |
#ifdef CONFIG_NET |
6238cbaec
|
460 461 462 |
static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_hash rhash; |
9a5467bf7
|
463 |
strncpy(rhash.type, "ahash", sizeof(rhash.type)); |
6238cbaec
|
464 465 466 |
rhash.blocksize = alg->cra_blocksize; rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize; |
6662df33f
|
467 468 469 |
if (nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(struct crypto_report_hash), &rhash)) goto nla_put_failure; |
6238cbaec
|
470 471 472 473 474 |
return 0; nla_put_failure: return -EMSGSIZE; } |
3acc84739
|
475 476 477 478 479 480 |
#else static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) { return -ENOSYS; } #endif |
6238cbaec
|
481 |
|
004a403c2
|
482 |
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) |
d8c34b949
|
483 |
__maybe_unused; |
004a403c2
|
484 485 486 487 488 489 490 491 492 |
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) { seq_printf(m, "type : ahash "); seq_printf(m, "async : %s ", alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no"); seq_printf(m, "blocksize : %u ", alg->cra_blocksize); |
88056ec34
|
493 494 495 |
seq_printf(m, "digestsize : %u ", __crypto_hash_alg_common(alg)->digestsize); |
004a403c2
|
496 497 498 |
} const struct crypto_type crypto_ahash_type = { |
88056ec34
|
499 500 |
.extsize = crypto_ahash_extsize, .init_tfm = crypto_ahash_init_tfm, |
004a403c2
|
501 502 503 |
#ifdef CONFIG_PROC_FS .show = crypto_ahash_show, #endif |
6238cbaec
|
504 |
.report = crypto_ahash_report, |
88056ec34
|
505 506 507 508 |
.maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, .type = CRYPTO_ALG_TYPE_AHASH, .tfmsize = offsetof(struct crypto_ahash, base), |
004a403c2
|
509 510 |
}; EXPORT_SYMBOL_GPL(crypto_ahash_type); |
88056ec34
|
511 512 513 514 515 516 |
struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, u32 mask) { return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask); } EXPORT_SYMBOL_GPL(crypto_alloc_ahash); |
8d18e34c1
|
517 518 519 520 521 |
int crypto_has_ahash(const char *alg_name, u32 type, u32 mask) { return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask); } EXPORT_SYMBOL_GPL(crypto_has_ahash); |
01c2dece4
|
522 523 524 525 526 |
static int ahash_prepare_alg(struct ahash_alg *alg) { struct crypto_alg *base = &alg->halg.base; if (alg->halg.digestsize > PAGE_SIZE / 8 || |
8996eafdc
|
527 528 |
alg->halg.statesize > PAGE_SIZE / 8 || alg->halg.statesize == 0) |
01c2dece4
|
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 |
return -EINVAL; base->cra_type = &crypto_ahash_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; return 0; } int crypto_register_ahash(struct ahash_alg *alg) { struct crypto_alg *base = &alg->halg.base; int err; err = ahash_prepare_alg(alg); if (err) return err; return crypto_register_alg(base); } EXPORT_SYMBOL_GPL(crypto_register_ahash); int crypto_unregister_ahash(struct ahash_alg *alg) { return crypto_unregister_alg(&alg->halg.base); } EXPORT_SYMBOL_GPL(crypto_unregister_ahash); |
6f7473c52
|
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 |
int crypto_register_ahashes(struct ahash_alg *algs, int count) { int i, ret; for (i = 0; i < count; i++) { ret = crypto_register_ahash(&algs[i]); if (ret) goto err; } return 0; err: for (--i; i >= 0; --i) crypto_unregister_ahash(&algs[i]); return ret; } EXPORT_SYMBOL_GPL(crypto_register_ahashes); void crypto_unregister_ahashes(struct ahash_alg *algs, int count) { int i; for (i = count - 1; i >= 0; --i) crypto_unregister_ahash(&algs[i]); } EXPORT_SYMBOL_GPL(crypto_unregister_ahashes); |
01c2dece4
|
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 |
int ahash_register_instance(struct crypto_template *tmpl, struct ahash_instance *inst) { int err; err = ahash_prepare_alg(&inst->alg); if (err) return err; return crypto_register_instance(tmpl, ahash_crypto_instance(inst)); } EXPORT_SYMBOL_GPL(ahash_register_instance); void ahash_free_instance(struct crypto_instance *inst) { crypto_drop_spawn(crypto_instance_ctx(inst)); kfree(ahash_instance(inst)); } EXPORT_SYMBOL_GPL(ahash_free_instance); int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, struct hash_alg_common *alg, struct crypto_instance *inst) { return crypto_init_spawn2(&spawn->base, &alg->base, inst, &crypto_ahash_type); } EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn); struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask) { struct crypto_alg *alg; alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask); return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg); } EXPORT_SYMBOL_GPL(ahash_attr_alg); |
cd6ed77ad
|
621 622 623 624 625 626 627 628 629 630 |
bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) { struct crypto_alg *alg = &halg->base; if (alg->cra_type != &crypto_ahash_type) return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg)); return __crypto_ahash_alg(alg)->setkey != NULL; } EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey); |
004a403c2
|
631 632 |
MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); |