Blame view
crypto/ahash.c
15.7 KB
2874c5fd2
|
1 |
// SPDX-License-Identifier: GPL-2.0-or-later |
004a403c2
|
2 3 4 5 6 7 8 |
/* * Asynchronous Cryptographic Hash operations. * * This is the asynchronous version of hash.c with notification of * completion via a callback. * * Copyright (c) 2008 Loc Ho <lho@amcc.com> |
004a403c2
|
9 |
*/ |
20036252f
|
10 11 |
#include <crypto/internal/hash.h> #include <crypto/scatterwalk.h> |
004a403c2
|
12 13 14 15 16 17 |
#include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/seq_file.h> |
6238cbaec
|
18 |
#include <linux/cryptouser.h> |
d8c34b949
|
19 |
#include <linux/compiler.h> |
6238cbaec
|
20 |
#include <net/netlink.h> |
004a403c2
|
21 22 |
#include "internal.h" |
6d1b41fce
|
23 |
static const struct crypto_type crypto_ahash_type; |
66f6ce5e5
|
24 25 26 27 |
struct ahash_request_priv { crypto_completion_t complete; void *data; u8 *result; |
ef0579b64
|
28 |
u32 flags; |
66f6ce5e5
|
29 30 |
void *ubuf[] CRYPTO_MINALIGN_ATTR; }; |
88056ec34
|
31 32 33 34 35 |
static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash) { return container_of(crypto_hash_alg_common(hash), struct ahash_alg, halg); } |
20036252f
|
36 37 38 39 40 41 |
static int hash_walk_next(struct crypto_hash_walk *walk) { unsigned int alignmask = walk->alignmask; unsigned int offset = walk->offset; unsigned int nbytes = min(walk->entrylen, ((unsigned int)(PAGE_SIZE)) - offset); |
8afa25aa8
|
42 |
walk->data = kmap_atomic(walk->pg); |
20036252f
|
43 |
walk->data += offset; |
23a75eee0
|
44 45 |
if (offset & alignmask) { unsigned int unaligned = alignmask + 1 - (offset & alignmask); |
b516d5140
|
46 |
|
23a75eee0
|
47 48 49 |
if (nbytes > unaligned) nbytes = unaligned; } |
20036252f
|
50 51 52 53 54 55 56 57 58 59 |
walk->entrylen -= nbytes; return nbytes; } static int hash_walk_new_entry(struct crypto_hash_walk *walk) { struct scatterlist *sg; sg = walk->sg; |
20036252f
|
60 |
walk->offset = sg->offset; |
13f4bb78c
|
61 62 |
walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); walk->offset = offset_in_page(walk->offset); |
20036252f
|
63 64 65 66 67 68 69 70 71 72 73 74 |
walk->entrylen = sg->length; if (walk->entrylen > walk->total) walk->entrylen = walk->total; walk->total -= walk->entrylen; return hash_walk_next(walk); } int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) { unsigned int alignmask = walk->alignmask; |
20036252f
|
75 76 |
walk->data -= walk->offset; |
77568e535
|
77 78 |
if (walk->entrylen && (walk->offset & alignmask) && !err) { unsigned int nbytes; |
20036252f
|
79 |
|
77568e535
|
80 81 82 |
walk->offset = ALIGN(walk->offset, alignmask + 1); nbytes = min(walk->entrylen, (unsigned int)(PAGE_SIZE - walk->offset)); |
900a081f6
|
83 |
if (nbytes) { |
77568e535
|
84 |
walk->entrylen -= nbytes; |
900a081f6
|
85 86 87 |
walk->data += walk->offset; return nbytes; } |
20036252f
|
88 |
} |
8afa25aa8
|
89 90 |
kunmap_atomic(walk->data); crypto_yield(walk->flags); |
20036252f
|
91 92 93 |
if (err) return err; |
77568e535
|
94 |
if (walk->entrylen) { |
d315a0e09
|
95 96 |
walk->offset = 0; walk->pg++; |
20036252f
|
97 |
return hash_walk_next(walk); |
d315a0e09
|
98 |
} |
20036252f
|
99 100 101 |
if (!walk->total) return 0; |
5be4d4c94
|
102 |
walk->sg = sg_next(walk->sg); |
20036252f
|
103 104 105 106 107 108 109 110 111 |
return hash_walk_new_entry(walk); } EXPORT_SYMBOL_GPL(crypto_hash_walk_done); int crypto_hash_walk_first(struct ahash_request *req, struct crypto_hash_walk *walk) { walk->total = req->nbytes; |
6d9529c58
|
112 113 |
if (!walk->total) { walk->entrylen = 0; |
20036252f
|
114 |
return 0; |
6d9529c58
|
115 |
} |
20036252f
|
116 117 118 |
walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); walk->sg = req->src; |
8afa25aa8
|
119 |
walk->flags = req->base.flags; |
20036252f
|
120 121 122 123 |
return hash_walk_new_entry(walk); } EXPORT_SYMBOL_GPL(crypto_hash_walk_first); |
004a403c2
|
124 125 126 |
static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { |
004a403c2
|
127 128 129 130 131 132 |
unsigned long alignmask = crypto_ahash_alignmask(tfm); int ret; u8 *buffer, *alignbuffer; unsigned long absize; absize = keylen + alignmask; |
093900c2b
|
133 |
buffer = kmalloc(absize, GFP_KERNEL); |
004a403c2
|
134 135 136 137 138 |
if (!buffer) return -ENOMEM; alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(alignbuffer, key, keylen); |
a70c52252
|
139 |
ret = tfm->setkey(tfm, alignbuffer, keylen); |
453431a54
|
140 |
kfree_sensitive(buffer); |
004a403c2
|
141 142 |
return ret; } |
ba7d7433a
|
143 144 145 146 147 148 149 150 151 152 153 154 155 156 |
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { return -ENOSYS; } static void ahash_set_needkey(struct crypto_ahash *tfm) { const struct hash_alg_common *alg = crypto_hash_alg_common(tfm); if (tfm->setkey != ahash_nosetkey && !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY); } |
66f6ce5e5
|
157 |
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, |
004a403c2
|
158 159 |
unsigned int keylen) { |
004a403c2
|
160 |
unsigned long alignmask = crypto_ahash_alignmask(tfm); |
9fa68f620
|
161 |
int err; |
004a403c2
|
162 163 |
if ((unsigned long)key & alignmask) |
9fa68f620
|
164 165 166 |
err = ahash_setkey_unaligned(tfm, key, keylen); else err = tfm->setkey(tfm, key, keylen); |
ba7d7433a
|
167 168 |
if (unlikely(err)) { ahash_set_needkey(tfm); |
9fa68f620
|
169 |
return err; |
ba7d7433a
|
170 |
} |
004a403c2
|
171 |
|
9fa68f620
|
172 173 |
crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); return 0; |
004a403c2
|
174 |
} |
66f6ce5e5
|
175 |
EXPORT_SYMBOL_GPL(crypto_ahash_setkey); |
004a403c2
|
176 |
|
66f6ce5e5
|
177 178 179 180 181 |
static inline unsigned int ahash_align_buffer_size(unsigned len, unsigned long mask) { return len + (mask & ~(crypto_tfm_ctx_alignment() - 1)); } |
1ffc9fbd1
|
182 |
static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) |
66f6ce5e5
|
183 184 185 186 187 |
{ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); unsigned long alignmask = crypto_ahash_alignmask(tfm); unsigned int ds = crypto_ahash_digestsize(tfm); struct ahash_request_priv *priv; |
66f6ce5e5
|
188 189 190 |
priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
5befbd5a7
|
191 |
GFP_KERNEL : GFP_ATOMIC); |
66f6ce5e5
|
192 193 |
if (!priv) return -ENOMEM; |
ab6bf4e5e
|
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 |
/* * WARNING: Voodoo programming below! * * The code below is obscure and hard to understand, thus explanation * is necessary. See include/crypto/hash.h and include/linux/crypto.h * to understand the layout of structures used here! * * The code here will replace portions of the ORIGINAL request with * pointers to new code and buffers so the hashing operation can store * the result in aligned buffer. We will call the modified request * an ADJUSTED request. * * The newly mangled request will look as such: * * req { * .result = ADJUSTED[new aligned buffer] * .base.complete = ADJUSTED[pointer to completion function] * .base.data = ADJUSTED[*req (pointer to self)] * .priv = ADJUSTED[new priv] { * .result = ORIGINAL(result) * .complete = ORIGINAL(base.complete) * .data = ORIGINAL(base.data) * } */ |
66f6ce5e5
|
218 219 220 |
priv->result = req->result; priv->complete = req->base.complete; priv->data = req->base.data; |
ef0579b64
|
221 |
priv->flags = req->base.flags; |
ab6bf4e5e
|
222 223 224 225 226 |
/* * WARNING: We do not backup req->priv here! The req->priv * is for internal use of the Crypto API and the * user must _NOT_ _EVER_ depend on it's content! */ |
66f6ce5e5
|
227 228 |
req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); |
1ffc9fbd1
|
229 |
req->base.complete = cplt; |
66f6ce5e5
|
230 231 |
req->base.data = req; req->priv = priv; |
1ffc9fbd1
|
232 233 |
return 0; } |
ef0579b64
|
234 |
static void ahash_restore_req(struct ahash_request *req, int err) |
1ffc9fbd1
|
235 236 |
{ struct ahash_request_priv *priv = req->priv; |
ef0579b64
|
237 238 239 |
if (!err) memcpy(priv->result, req->result, crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); |
1ffc9fbd1
|
240 241 |
/* Restore the original crypto request. */ req->result = priv->result; |
ef0579b64
|
242 243 244 |
ahash_request_set_callback(req, priv->flags, priv->complete, priv->data); |
1ffc9fbd1
|
245 246 247 |
req->priv = NULL; /* Free the req->priv.priv from the ADJUSTED request. */ |
453431a54
|
248 |
kfree_sensitive(priv); |
1ffc9fbd1
|
249 |
} |
ef0579b64
|
250 |
static void ahash_notify_einprogress(struct ahash_request *req) |
1ffc9fbd1
|
251 252 |
{ struct ahash_request_priv *priv = req->priv; |
ef0579b64
|
253 |
struct crypto_async_request oreq; |
1ffc9fbd1
|
254 |
|
ef0579b64
|
255 |
oreq.data = priv->data; |
1ffc9fbd1
|
256 |
|
ef0579b64
|
257 |
priv->complete(&oreq, -EINPROGRESS); |
1ffc9fbd1
|
258 259 260 261 262 |
} static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) { struct ahash_request *areq = req->data; |
ef0579b64
|
263 264 265 266 |
if (err == -EINPROGRESS) { ahash_notify_einprogress(areq); return; } |
1ffc9fbd1
|
267 268 269 270 271 272 273 274 275 276 |
/* * Restore the original request, see ahash_op_unaligned() for what * goes where. * * The "struct ahash_request *req" here is in fact the "req.base" * from the ADJUSTED request from ahash_op_unaligned(), thus as it * is a pointer to self, it is also the ADJUSTED "req" . */ /* First copy req->result into req->priv.result */ |
ef0579b64
|
277 |
ahash_restore_req(areq, err); |
1ffc9fbd1
|
278 279 280 281 282 283 284 285 286 287 288 289 290 |
/* Complete the ORIGINAL request. */ areq->base.complete(&areq->base, err); } static int ahash_op_unaligned(struct ahash_request *req, int (*op)(struct ahash_request *)) { int err; err = ahash_save_req(req, ahash_op_unaligned_done); if (err) return err; |
66f6ce5e5
|
291 |
err = op(req); |
4e5b0ad58
|
292 |
if (err == -EINPROGRESS || err == -EBUSY) |
ef0579b64
|
293 294 295 |
return err; ahash_restore_req(req, err); |
66f6ce5e5
|
296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 |
return err; } static int crypto_ahash_op(struct ahash_request *req, int (*op)(struct ahash_request *)) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); unsigned long alignmask = crypto_ahash_alignmask(tfm); if ((unsigned long)req->result & alignmask) return ahash_op_unaligned(req, op); return op(req); } int crypto_ahash_final(struct ahash_request *req) { |
f7d76e05d
|
314 315 316 |
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct crypto_alg *alg = tfm->base.__crt_alg; unsigned int nbytes = req->nbytes; |
cac5818c2
|
317 |
int ret; |
f7d76e05d
|
318 |
crypto_stats_get(alg); |
cac5818c2
|
319 |
ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final); |
f7d76e05d
|
320 |
crypto_stats_ahash_final(nbytes, ret, alg); |
cac5818c2
|
321 |
return ret; |
66f6ce5e5
|
322 323 324 325 326 |
} EXPORT_SYMBOL_GPL(crypto_ahash_final); int crypto_ahash_finup(struct ahash_request *req) { |
f7d76e05d
|
327 328 329 |
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct crypto_alg *alg = tfm->base.__crt_alg; unsigned int nbytes = req->nbytes; |
cac5818c2
|
330 |
int ret; |
f7d76e05d
|
331 |
crypto_stats_get(alg); |
cac5818c2
|
332 |
ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup); |
f7d76e05d
|
333 |
crypto_stats_ahash_final(nbytes, ret, alg); |
cac5818c2
|
334 |
return ret; |
66f6ce5e5
|
335 336 337 338 339 |
} EXPORT_SYMBOL_GPL(crypto_ahash_finup); int crypto_ahash_digest(struct ahash_request *req) { |
9fa68f620
|
340 |
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
f7d76e05d
|
341 342 |
struct crypto_alg *alg = tfm->base.__crt_alg; unsigned int nbytes = req->nbytes; |
cac5818c2
|
343 |
int ret; |
9fa68f620
|
344 |
|
f7d76e05d
|
345 |
crypto_stats_get(alg); |
9fa68f620
|
346 |
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) |
cac5818c2
|
347 348 349 |
ret = -ENOKEY; else ret = crypto_ahash_op(req, tfm->digest); |
f7d76e05d
|
350 |
crypto_stats_ahash_final(nbytes, ret, alg); |
cac5818c2
|
351 |
return ret; |
66f6ce5e5
|
352 353 |
} EXPORT_SYMBOL_GPL(crypto_ahash_digest); |
ef0579b64
|
354 |
static void ahash_def_finup_done2(struct crypto_async_request *req, int err) |
66f6ce5e5
|
355 |
{ |
ef0579b64
|
356 |
struct ahash_request *areq = req->data; |
66f6ce5e5
|
357 358 359 |
if (err == -EINPROGRESS) return; |
ef0579b64
|
360 |
ahash_restore_req(areq, err); |
66f6ce5e5
|
361 |
|
d4a7a0fbe
|
362 |
areq->base.complete(&areq->base, err); |
66f6ce5e5
|
363 364 365 366 367 368 369 370 |
} static int ahash_def_finup_finish1(struct ahash_request *req, int err) { if (err) goto out; req->base.complete = ahash_def_finup_done2; |
ef0579b64
|
371 |
|
66f6ce5e5
|
372 |
err = crypto_ahash_reqtfm(req)->final(req); |
4e5b0ad58
|
373 |
if (err == -EINPROGRESS || err == -EBUSY) |
ef0579b64
|
374 |
return err; |
66f6ce5e5
|
375 376 |
out: |
ef0579b64
|
377 |
ahash_restore_req(req, err); |
66f6ce5e5
|
378 379 380 381 382 383 |
return err; } static void ahash_def_finup_done1(struct crypto_async_request *req, int err) { struct ahash_request *areq = req->data; |
66f6ce5e5
|
384 |
|
ef0579b64
|
385 386 387 388 389 390 |
if (err == -EINPROGRESS) { ahash_notify_einprogress(areq); return; } areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; |
66f6ce5e5
|
391 |
err = ahash_def_finup_finish1(areq, err); |
ef0579b64
|
392 393 |
if (areq->priv) return; |
66f6ce5e5
|
394 |
|
d4a7a0fbe
|
395 |
areq->base.complete(&areq->base, err); |
66f6ce5e5
|
396 397 398 399 400 |
} static int ahash_def_finup(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
d4a7a0fbe
|
401 |
int err; |
66f6ce5e5
|
402 |
|
d4a7a0fbe
|
403 404 405 |
err = ahash_save_req(req, ahash_def_finup_done1); if (err) return err; |
66f6ce5e5
|
406 |
|
d4a7a0fbe
|
407 |
err = tfm->update(req); |
4e5b0ad58
|
408 |
if (err == -EINPROGRESS || err == -EBUSY) |
ef0579b64
|
409 |
return err; |
d4a7a0fbe
|
410 |
return ahash_def_finup_finish1(req, err); |
66f6ce5e5
|
411 |
} |
e73d340db
|
412 413 414 415 416 417 418 |
static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm) { struct crypto_ahash *hash = __crypto_ahash_cast(tfm); struct ahash_alg *alg = crypto_ahash_alg(hash); alg->exit_tfm(hash); } |
88056ec34
|
419 420 421 422 |
static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) { struct crypto_ahash *hash = __crypto_ahash_cast(tfm); struct ahash_alg *alg = crypto_ahash_alg(hash); |
88056ec34
|
423 |
|
66f6ce5e5
|
424 |
hash->setkey = ahash_nosetkey; |
66f6ce5e5
|
425 |
|
88056ec34
|
426 427 |
if (tfm->__crt_alg->cra_type != &crypto_ahash_type) return crypto_init_shash_ops_async(tfm); |
88056ec34
|
428 429 |
hash->init = alg->init; hash->update = alg->update; |
66f6ce5e5
|
430 431 |
hash->final = alg->final; hash->finup = alg->finup ?: ahash_def_finup; |
88056ec34
|
432 |
hash->digest = alg->digest; |
6f221f7e8
|
433 434 |
hash->export = alg->export; hash->import = alg->import; |
66f6ce5e5
|
435 |
|
a5596d633
|
436 |
if (alg->setkey) { |
66f6ce5e5
|
437 |
hash->setkey = alg->setkey; |
ba7d7433a
|
438 |
ahash_set_needkey(hash); |
a5596d633
|
439 |
} |
88056ec34
|
440 |
|
e73d340db
|
441 442 443 444 |
if (alg->exit_tfm) tfm->exit = crypto_ahash_exit_tfm; return alg->init_tfm ? alg->init_tfm(hash) : 0; |
88056ec34
|
445 446 447 448 |
} static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) { |
2495cf25f
|
449 450 |
if (alg->cra_type != &crypto_ahash_type) return sizeof(struct crypto_shash *); |
88056ec34
|
451 |
|
2495cf25f
|
452 |
return crypto_alg_extsize(alg); |
88056ec34
|
453 |
} |
48fb3e578
|
454 455 456 |
static void crypto_ahash_free_instance(struct crypto_instance *inst) { struct ahash_instance *ahash = ahash_instance(inst); |
48fb3e578
|
457 458 |
ahash->free(ahash); } |
3acc84739
|
459 |
#ifdef CONFIG_NET |
6238cbaec
|
460 461 462 |
static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_hash rhash; |
37db69e0b
|
463 464 465 |
memset(&rhash, 0, sizeof(rhash)); strscpy(rhash.type, "ahash", sizeof(rhash.type)); |
6238cbaec
|
466 467 468 |
rhash.blocksize = alg->cra_blocksize; rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize; |
37db69e0b
|
469 |
return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash); |
6238cbaec
|
470 |
} |
3acc84739
|
471 472 473 474 475 476 |
#else static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) { return -ENOSYS; } #endif |
6238cbaec
|
477 |
|
004a403c2
|
478 |
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) |
d8c34b949
|
479 |
__maybe_unused; |
004a403c2
|
480 481 482 483 484 485 486 487 488 |
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) { seq_printf(m, "type : ahash "); seq_printf(m, "async : %s ", alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no"); seq_printf(m, "blocksize : %u ", alg->cra_blocksize); |
88056ec34
|
489 490 491 |
seq_printf(m, "digestsize : %u ", __crypto_hash_alg_common(alg)->digestsize); |
004a403c2
|
492 |
} |
6d1b41fce
|
493 |
static const struct crypto_type crypto_ahash_type = { |
88056ec34
|
494 495 |
.extsize = crypto_ahash_extsize, .init_tfm = crypto_ahash_init_tfm, |
48fb3e578
|
496 |
.free = crypto_ahash_free_instance, |
004a403c2
|
497 498 499 |
#ifdef CONFIG_PROC_FS .show = crypto_ahash_show, #endif |
6238cbaec
|
500 |
.report = crypto_ahash_report, |
88056ec34
|
501 502 503 504 |
.maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, .type = CRYPTO_ALG_TYPE_AHASH, .tfmsize = offsetof(struct crypto_ahash, base), |
004a403c2
|
505 |
}; |
004a403c2
|
506 |
|
84a9c938e
|
507 508 509 510 511 512 513 514 |
int crypto_grab_ahash(struct crypto_ahash_spawn *spawn, struct crypto_instance *inst, const char *name, u32 type, u32 mask) { spawn->base.frontend = &crypto_ahash_type; return crypto_grab_spawn(&spawn->base, inst, name, type, mask); } EXPORT_SYMBOL_GPL(crypto_grab_ahash); |
88056ec34
|
515 516 517 518 519 520 |
struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, u32 mask) { return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask); } EXPORT_SYMBOL_GPL(crypto_alloc_ahash); |
8d18e34c1
|
521 522 523 524 525 |
int crypto_has_ahash(const char *alg_name, u32 type, u32 mask) { return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask); } EXPORT_SYMBOL_GPL(crypto_has_ahash); |
01c2dece4
|
526 527 528 |
static int ahash_prepare_alg(struct ahash_alg *alg) { struct crypto_alg *base = &alg->halg.base; |
b68a7ec1e
|
529 530 |
if (alg->halg.digestsize > HASH_MAX_DIGESTSIZE || alg->halg.statesize > HASH_MAX_STATESIZE || |
8996eafdc
|
531 |
alg->halg.statesize == 0) |
01c2dece4
|
532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 |
return -EINVAL; base->cra_type = &crypto_ahash_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; return 0; } int crypto_register_ahash(struct ahash_alg *alg) { struct crypto_alg *base = &alg->halg.base; int err; err = ahash_prepare_alg(alg); if (err) return err; return crypto_register_alg(base); } EXPORT_SYMBOL_GPL(crypto_register_ahash); |
c6d633a92
|
553 |
void crypto_unregister_ahash(struct ahash_alg *alg) |
01c2dece4
|
554 |
{ |
c6d633a92
|
555 |
crypto_unregister_alg(&alg->halg.base); |
01c2dece4
|
556 557 |
} EXPORT_SYMBOL_GPL(crypto_unregister_ahash); |
6f7473c52
|
558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 |
int crypto_register_ahashes(struct ahash_alg *algs, int count) { int i, ret; for (i = 0; i < count; i++) { ret = crypto_register_ahash(&algs[i]); if (ret) goto err; } return 0; err: for (--i; i >= 0; --i) crypto_unregister_ahash(&algs[i]); return ret; } EXPORT_SYMBOL_GPL(crypto_register_ahashes); void crypto_unregister_ahashes(struct ahash_alg *algs, int count) { int i; for (i = count - 1; i >= 0; --i) crypto_unregister_ahash(&algs[i]); } EXPORT_SYMBOL_GPL(crypto_unregister_ahashes); |
01c2dece4
|
586 587 588 589 |
int ahash_register_instance(struct crypto_template *tmpl, struct ahash_instance *inst) { int err; |
d4fdc2dfa
|
590 591 |
if (WARN_ON(!inst->free)) return -EINVAL; |
01c2dece4
|
592 593 594 595 596 597 598 |
err = ahash_prepare_alg(&inst->alg); if (err) return err; return crypto_register_instance(tmpl, ahash_crypto_instance(inst)); } EXPORT_SYMBOL_GPL(ahash_register_instance); |
cd6ed77ad
|
599 600 601 602 603 604 605 606 607 608 |
bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) { struct crypto_alg *alg = &halg->base; if (alg->cra_type != &crypto_ahash_type) return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg)); return __crypto_ahash_alg(alg)->setkey != NULL; } EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey); |
004a403c2
|
609 610 |
MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); |