Commit 750052dd2400cd09e0864d75b63c2c0bf605056f

Authored by Uri Simchoni
Committed by Herbert Xu
1 parent 0c5c6c4bae

crypto: mv_cesa - Add sha1 and hmac(sha1) async hash drivers

Add sha1 and hmac(sha1) async hash drivers

Signed-off-by: Uri Simchoni <uri@jdland.co.il>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

Showing 2 changed files with 542 additions and 12 deletions Side-by-side Diff

drivers/crypto/mv_cesa.c
... ... @@ -14,8 +14,14 @@
14 14 #include <linux/kthread.h>
15 15 #include <linux/platform_device.h>
16 16 #include <linux/scatterlist.h>
  17 +#include <crypto/internal/hash.h>
  18 +#include <crypto/sha.h>
17 19  
18 20 #include "mv_cesa.h"
  21 +
  22 +#define MV_CESA "MV-CESA:"
  23 +#define MAX_HW_HASH_SIZE 0xFFFF
  24 +
19 25 /*
20 26 * STM:
21 27 * /---------------------------------------\
... ... @@ -38,7 +44,7 @@
38 44 * @dst_sg_it: sg iterator for dst
39 45 * @sg_src_left: bytes left in src to process (scatter list)
40 46 * @src_start: offset to add to src start position (scatter list)
41   - * @crypt_len: length of current crypt process
  47 + * @crypt_len: length of current hw crypt/hash process
42 48 * @hw_nbytes: total bytes to process in hw for this request
43 49 * @copy_back: whether to copy data back (crypt) or not (hash)
44 50 * @sg_dst_left: bytes left dst to process in this scatter list
... ... @@ -81,6 +87,8 @@
81 87 struct req_progress p;
82 88 int max_req_size;
83 89 int sram_size;
  90 + int has_sha1;
  91 + int has_hmac_sha1;
84 92 };
85 93  
86 94 static struct crypto_priv *cpg;
... ... @@ -102,6 +110,31 @@
102 110 int decrypt;
103 111 };
104 112  
  113 +enum hash_op {
  114 + COP_SHA1,
  115 + COP_HMAC_SHA1
  116 +};
  117 +
  118 +struct mv_tfm_hash_ctx {
  119 + struct crypto_shash *fallback;
  120 + struct crypto_shash *base_hash;
  121 + u32 ivs[2 * SHA1_DIGEST_SIZE / 4];
  122 + int count_add;
  123 + enum hash_op op;
  124 +};
  125 +
  126 +struct mv_req_hash_ctx {
  127 + u64 count;
  128 + u32 state[SHA1_DIGEST_SIZE / 4];
  129 + u8 buffer[SHA1_BLOCK_SIZE];
  130 + int first_hash; /* marks that we don't have previous state */
  131 + int last_chunk; /* marks that this is the 'final' request */
  132 + int extra_bytes; /* unprocessed bytes in buffer */
  133 + enum hash_op op;
  134 + int count_add;
  135 + struct scatterlist dummysg;
  136 +};
  137 +
105 138 static void compute_aes_dec_key(struct mv_ctx *ctx)
106 139 {
107 140 struct crypto_aes_ctx gen_aes_key;
... ... @@ -265,6 +298,132 @@
265 298 memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
266 299 }
267 300  
  301 +static void mv_process_hash_current(int first_block)
  302 +{
  303 + struct ahash_request *req = ahash_request_cast(cpg->cur_req);
  304 + struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
  305 + struct req_progress *p = &cpg->p;
  306 + struct sec_accel_config op = { 0 };
  307 + int is_last;
  308 +
  309 + switch (req_ctx->op) {
  310 + case COP_SHA1:
  311 + default:
  312 + op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1;
  313 + break;
  314 + case COP_HMAC_SHA1:
  315 + op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
  316 + break;
  317 + }
  318 +
  319 + op.mac_src_p =
  320 + MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32)
  321 + req_ctx->
  322 + count);
  323 +
  324 + setup_data_in();
  325 +
  326 + op.mac_digest =
  327 + MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len);
  328 + op.mac_iv =
  329 + MAC_INNER_IV_P(SRAM_HMAC_IV_IN) |
  330 + MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT);
  331 +
  332 + is_last = req_ctx->last_chunk
  333 + && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes)
  334 + && (req_ctx->count <= MAX_HW_HASH_SIZE);
  335 + if (req_ctx->first_hash) {
  336 + if (is_last)
  337 + op.config |= CFG_NOT_FRAG;
  338 + else
  339 + op.config |= CFG_FIRST_FRAG;
  340 +
  341 + req_ctx->first_hash = 0;
  342 + } else {
  343 + if (is_last)
  344 + op.config |= CFG_LAST_FRAG;
  345 + else
  346 + op.config |= CFG_MID_FRAG;
  347 + }
  348 +
  349 + memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
  350 +
  351 + writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
  352 + /* GO */
  353 + writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
  354 +
  355 + /*
  356 + * XXX: add timer if the interrupt does not occur for some mystery
  357 + * reason
  358 + */
  359 +}
  360 +
  361 +static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
  362 + struct shash_desc *desc)
  363 +{
  364 + int i;
  365 + struct sha1_state shash_state;
  366 +
  367 + shash_state.count = ctx->count + ctx->count_add;
  368 + for (i = 0; i < 5; i++)
  369 + shash_state.state[i] = ctx->state[i];
  370 + memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer));
  371 + return crypto_shash_import(desc, &shash_state);
  372 +}
  373 +
  374 +static int mv_hash_final_fallback(struct ahash_request *req)
  375 +{
  376 + const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
  377 + struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
  378 + struct {
  379 + struct shash_desc shash;
  380 + char ctx[crypto_shash_descsize(tfm_ctx->fallback)];
  381 + } desc;
  382 + int rc;
  383 +
  384 + desc.shash.tfm = tfm_ctx->fallback;
  385 + desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
  386 + if (unlikely(req_ctx->first_hash)) {
  387 + crypto_shash_init(&desc.shash);
  388 + crypto_shash_update(&desc.shash, req_ctx->buffer,
  389 + req_ctx->extra_bytes);
  390 + } else {
  391 + /* only SHA1 for now....
  392 + */
  393 + rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash);
  394 + if (rc)
  395 + goto out;
  396 + }
  397 + rc = crypto_shash_final(&desc.shash, req->result);
  398 +out:
  399 + return rc;
  400 +}
  401 +
  402 +static void mv_hash_algo_completion(void)
  403 +{
  404 + struct ahash_request *req = ahash_request_cast(cpg->cur_req);
  405 + struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
  406 +
  407 + if (ctx->extra_bytes)
  408 + copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
  409 + sg_miter_stop(&cpg->p.src_sg_it);
  410 +
  411 + ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
  412 + ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
  413 + ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
  414 + ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
  415 + ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
  416 +
  417 + if (likely(ctx->last_chunk)) {
  418 + if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
  419 + memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
  420 + crypto_ahash_digestsize(crypto_ahash_reqtfm
  421 + (req)));
  422 + } else
  423 + mv_hash_final_fallback(req);
  424 + }
  425 +}
  426 +
268 427 static void dequeue_complete_req(void)
269 428 {
270 429 struct crypto_async_request *req = cpg->cur_req;
... ... @@ -332,7 +491,7 @@
332 491 return i;
333 492 }
334 493  
335   -static void mv_enqueue_new_req(struct ablkcipher_request *req)
  494 +static void mv_start_new_crypt_req(struct ablkcipher_request *req)
336 495 {
337 496 struct req_progress *p = &cpg->p;
338 497 int num_sgs;
339 498  
... ... @@ -353,11 +512,68 @@
353 512 mv_process_current_q(1);
354 513 }
355 514  
  515 +static void mv_start_new_hash_req(struct ahash_request *req)
  516 +{
  517 + struct req_progress *p = &cpg->p;
  518 + struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
  519 + const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
  520 + int num_sgs, hw_bytes, old_extra_bytes, rc;
  521 + cpg->cur_req = &req->base;
  522 + memset(p, 0, sizeof(struct req_progress));
  523 + hw_bytes = req->nbytes + ctx->extra_bytes;
  524 + old_extra_bytes = ctx->extra_bytes;
  525 +
  526 + if (unlikely(ctx->extra_bytes)) {
  527 + memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
  528 + ctx->extra_bytes);
  529 + p->crypt_len = ctx->extra_bytes;
  530 + }
  531 +
  532 + memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
  533 +
  534 + if (unlikely(!ctx->first_hash)) {
  535 + writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
  536 + writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
  537 + writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
  538 + writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
  539 + writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
  540 + }
  541 +
  542 + ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
  543 + if (ctx->extra_bytes != 0
  544 + && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
  545 + hw_bytes -= ctx->extra_bytes;
  546 + else
  547 + ctx->extra_bytes = 0;
  548 +
  549 + num_sgs = count_sgs(req->src, req->nbytes);
  550 + sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
  551 +
  552 + if (hw_bytes) {
  553 + p->hw_nbytes = hw_bytes;
  554 + p->complete = mv_hash_algo_completion;
  555 + p->process = mv_process_hash_current;
  556 +
  557 + mv_process_hash_current(1);
  558 + } else {
  559 + copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
  560 + ctx->extra_bytes - old_extra_bytes);
  561 + sg_miter_stop(&p->src_sg_it);
  562 + if (ctx->last_chunk)
  563 + rc = mv_hash_final_fallback(req);
  564 + else
  565 + rc = 0;
  566 + cpg->eng_st = ENGINE_IDLE;
  567 + local_bh_disable();
  568 + req->base.complete(&req->base, rc);
  569 + local_bh_enable();
  570 + }
  571 +}
  572 +
356 573 static int queue_manag(void *data)
357 574 {
358 575 cpg->eng_st = ENGINE_IDLE;
359 576 do {
360   - struct ablkcipher_request *req;
361 577 struct crypto_async_request *async_req = NULL;
362 578 struct crypto_async_request *backlog;
363 579  
... ... @@ -383,9 +599,18 @@
383 599 }
384 600  
385 601 if (async_req) {
386   - req = container_of(async_req,
387   - struct ablkcipher_request, base);
388   - mv_enqueue_new_req(req);
  602 + if (async_req->tfm->__crt_alg->cra_type !=
  603 + &crypto_ahash_type) {
  604 + struct ablkcipher_request *req =
  605 + container_of(async_req,
  606 + struct ablkcipher_request,
  607 + base);
  608 + mv_start_new_crypt_req(req);
  609 + } else {
  610 + struct ahash_request *req =
  611 + ahash_request_cast(async_req);
  612 + mv_start_new_hash_req(req);
  613 + }
389 614 async_req = NULL;
390 615 }
391 616  
... ... @@ -457,6 +682,215 @@
457 682 return 0;
458 683 }
459 684  
  685 +static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op,
  686 + int is_last, unsigned int req_len,
  687 + int count_add)
  688 +{
  689 + memset(ctx, 0, sizeof(*ctx));
  690 + ctx->op = op;
  691 + ctx->count = req_len;
  692 + ctx->first_hash = 1;
  693 + ctx->last_chunk = is_last;
  694 + ctx->count_add = count_add;
  695 +}
  696 +
  697 +static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last,
  698 + unsigned req_len)
  699 +{
  700 + ctx->last_chunk = is_last;
  701 + ctx->count += req_len;
  702 +}
  703 +
  704 +static int mv_hash_init(struct ahash_request *req)
  705 +{
  706 + const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
  707 + mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0,
  708 + tfm_ctx->count_add);
  709 + return 0;
  710 +}
  711 +
  712 +static int mv_hash_update(struct ahash_request *req)
  713 +{
  714 + if (!req->nbytes)
  715 + return 0;
  716 +
  717 + mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes);
  718 + return mv_handle_req(&req->base);
  719 +}
  720 +
  721 +static int mv_hash_final(struct ahash_request *req)
  722 +{
  723 + struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
  724 + /* dummy buffer of 4 bytes */
  725 + sg_init_one(&ctx->dummysg, ctx->buffer, 4);
  726 + /* I think I'm allowed to do that... */
  727 + ahash_request_set_crypt(req, &ctx->dummysg, req->result, 0);
  728 + mv_update_hash_req_ctx(ctx, 1, 0);
  729 + return mv_handle_req(&req->base);
  730 +}
  731 +
  732 +static int mv_hash_finup(struct ahash_request *req)
  733 +{
  734 + if (!req->nbytes)
  735 + return mv_hash_final(req);
  736 +
  737 + mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
  738 + return mv_handle_req(&req->base);
  739 +}
  740 +
  741 +static int mv_hash_digest(struct ahash_request *req)
  742 +{
  743 + const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
  744 + mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1,
  745 + req->nbytes, tfm_ctx->count_add);
  746 + return mv_handle_req(&req->base);
  747 +}
  748 +
  749 +static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate,
  750 + const void *ostate)
  751 +{
  752 + const struct sha1_state *isha1_state = istate, *osha1_state = ostate;
  753 + int i;
  754 + for (i = 0; i < 5; i++) {
  755 + ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]);
  756 + ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]);
  757 + }
  758 +}
  759 +
  760 +static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
  761 + unsigned int keylen)
  762 +{
  763 + int rc;
  764 + struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base);
  765 + int bs, ds, ss;
  766 +
  767 + if (!ctx->base_hash)
  768 + return 0;
  769 +
  770 + rc = crypto_shash_setkey(ctx->fallback, key, keylen);
  771 + if (rc)
  772 + return rc;
  773 +
  774 + /* Can't see a way to extract the ipad/opad from the fallback tfm
  775 + so I'm basically copying code from the hmac module */
  776 + bs = crypto_shash_blocksize(ctx->base_hash);
  777 + ds = crypto_shash_digestsize(ctx->base_hash);
  778 + ss = crypto_shash_statesize(ctx->base_hash);
  779 +
  780 + {
  781 + struct {
  782 + struct shash_desc shash;
  783 + char ctx[crypto_shash_descsize(ctx->base_hash)];
  784 + } desc;
  785 + unsigned int i;
  786 + char ipad[ss];
  787 + char opad[ss];
  788 +
  789 + desc.shash.tfm = ctx->base_hash;
  790 + desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) &
  791 + CRYPTO_TFM_REQ_MAY_SLEEP;
  792 +
  793 + if (keylen > bs) {
  794 + int err;
  795 +
  796 + err =
  797 + crypto_shash_digest(&desc.shash, key, keylen, ipad);
  798 + if (err)
  799 + return err;
  800 +
  801 + keylen = ds;
  802 + } else
  803 + memcpy(ipad, key, keylen);
  804 +
  805 + memset(ipad + keylen, 0, bs - keylen);
  806 + memcpy(opad, ipad, bs);
  807 +
  808 + for (i = 0; i < bs; i++) {
  809 + ipad[i] ^= 0x36;
  810 + opad[i] ^= 0x5c;
  811 + }
  812 +
  813 + rc = crypto_shash_init(&desc.shash) ? :
  814 + crypto_shash_update(&desc.shash, ipad, bs) ? :
  815 + crypto_shash_export(&desc.shash, ipad) ? :
  816 + crypto_shash_init(&desc.shash) ? :
  817 + crypto_shash_update(&desc.shash, opad, bs) ? :
  818 + crypto_shash_export(&desc.shash, opad);
  819 +
  820 + if (rc == 0)
  821 + mv_hash_init_ivs(ctx, ipad, opad);
  822 +
  823 + return rc;
  824 + }
  825 +}
  826 +
  827 +static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
  828 + enum hash_op op, int count_add)
  829 +{
  830 + const char *fallback_driver_name = tfm->__crt_alg->cra_name;
  831 + struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
  832 + struct crypto_shash *fallback_tfm = NULL;
  833 + struct crypto_shash *base_hash = NULL;
  834 + int err = -ENOMEM;
  835 +
  836 + ctx->op = op;
  837 + ctx->count_add = count_add;
  838 +
  839 + /* Allocate a fallback and abort if it failed. */
  840 + fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
  841 + CRYPTO_ALG_NEED_FALLBACK);
  842 + if (IS_ERR(fallback_tfm)) {
  843 + printk(KERN_WARNING MV_CESA
  844 + "Fallback driver '%s' could not be loaded!\n",
  845 + fallback_driver_name);
  846 + err = PTR_ERR(fallback_tfm);
  847 + goto out;
  848 + }
  849 + ctx->fallback = fallback_tfm;
  850 +
  851 + if (base_hash_name) {
  852 + /* Allocate a hash to compute the ipad/opad of hmac. */
  853 + base_hash = crypto_alloc_shash(base_hash_name, 0,
  854 + CRYPTO_ALG_NEED_FALLBACK);
  855 + if (IS_ERR(base_hash)) {
  856 + printk(KERN_WARNING MV_CESA
  857 + "Base driver '%s' could not be loaded!\n",
  858 + base_hash_name);
  859 + err = PTR_ERR(fallback_tfm);
  860 + goto err_bad_base;
  861 + }
  862 + }
  863 + ctx->base_hash = base_hash;
  864 +
  865 + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  866 + sizeof(struct mv_req_hash_ctx) +
  867 + crypto_shash_descsize(ctx->fallback));
  868 + return 0;
  869 +err_bad_base:
  870 + crypto_free_shash(fallback_tfm);
  871 +out:
  872 + return err;
  873 +}
  874 +
  875 +static void mv_cra_hash_exit(struct crypto_tfm *tfm)
  876 +{
  877 + struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
  878 +
  879 + crypto_free_shash(ctx->fallback);
  880 + if (ctx->base_hash)
  881 + crypto_free_shash(ctx->base_hash);
  882 +}
  883 +
  884 +static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm)
  885 +{
  886 + return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0);
  887 +}
  888 +
  889 +static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
  890 +{
  891 + return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
  892 +}
  893 +
460 894 irqreturn_t crypto_int(int irq, void *priv)
461 895 {
462 896 u32 val;
... ... @@ -519,6 +953,53 @@
519 953 },
520 954 };
521 955  
  956 +struct ahash_alg mv_sha1_alg = {
  957 + .init = mv_hash_init,
  958 + .update = mv_hash_update,
  959 + .final = mv_hash_final,
  960 + .finup = mv_hash_finup,
  961 + .digest = mv_hash_digest,
  962 + .halg = {
  963 + .digestsize = SHA1_DIGEST_SIZE,
  964 + .base = {
  965 + .cra_name = "sha1",
  966 + .cra_driver_name = "mv-sha1",
  967 + .cra_priority = 300,
  968 + .cra_flags =
  969 + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
  970 + .cra_blocksize = SHA1_BLOCK_SIZE,
  971 + .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
  972 + .cra_init = mv_cra_hash_sha1_init,
  973 + .cra_exit = mv_cra_hash_exit,
  974 + .cra_module = THIS_MODULE,
  975 + }
  976 + }
  977 +};
  978 +
  979 +struct ahash_alg mv_hmac_sha1_alg = {
  980 + .init = mv_hash_init,
  981 + .update = mv_hash_update,
  982 + .final = mv_hash_final,
  983 + .finup = mv_hash_finup,
  984 + .digest = mv_hash_digest,
  985 + .setkey = mv_hash_setkey,
  986 + .halg = {
  987 + .digestsize = SHA1_DIGEST_SIZE,
  988 + .base = {
  989 + .cra_name = "hmac(sha1)",
  990 + .cra_driver_name = "mv-hmac-sha1",
  991 + .cra_priority = 300,
  992 + .cra_flags =
  993 + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
  994 + .cra_blocksize = SHA1_BLOCK_SIZE,
  995 + .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
  996 + .cra_init = mv_cra_hash_hmac_sha1_init,
  997 + .cra_exit = mv_cra_hash_exit,
  998 + .cra_module = THIS_MODULE,
  999 + }
  1000 + }
  1001 +};
  1002 +
522 1003 static int mv_probe(struct platform_device *pdev)
523 1004 {
524 1005 struct crypto_priv *cp;
... ... @@ -527,7 +1008,7 @@
527 1008 int ret;
528 1009  
529 1010 if (cpg) {
530   - printk(KERN_ERR "Second crypto dev?\n");
  1011 + printk(KERN_ERR MV_CESA "Second crypto dev?\n");
531 1012 return -EEXIST;
532 1013 }
533 1014  
... ... @@ -591,6 +1072,21 @@
591 1072 ret = crypto_register_alg(&mv_aes_alg_cbc);
592 1073 if (ret)
593 1074 goto err_unreg_ecb;
  1075 +
  1076 + ret = crypto_register_ahash(&mv_sha1_alg);
  1077 + if (ret == 0)
  1078 + cpg->has_sha1 = 1;
  1079 + else
  1080 + printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");
  1081 +
  1082 + ret = crypto_register_ahash(&mv_hmac_sha1_alg);
  1083 + if (ret == 0) {
  1084 + cpg->has_hmac_sha1 = 1;
  1085 + } else {
  1086 + printk(KERN_WARNING MV_CESA
  1087 + "Could not register hmac-sha1 driver\n");
  1088 + }
  1089 +
594 1090 return 0;
595 1091 err_unreg_ecb:
596 1092 crypto_unregister_alg(&mv_aes_alg_ecb);
... ... @@ -615,6 +1111,10 @@
615 1111  
616 1112 crypto_unregister_alg(&mv_aes_alg_ecb);
617 1113 crypto_unregister_alg(&mv_aes_alg_cbc);
  1114 + if (cp->has_sha1)
  1115 + crypto_unregister_ahash(&mv_sha1_alg);
  1116 + if (cp->has_hmac_sha1)
  1117 + crypto_unregister_ahash(&mv_hmac_sha1_alg);
618 1118 kthread_stop(cp->queue_th);
619 1119 free_irq(cp->irq, cp);
620 1120 memset(cp->sram, 0, cp->sram_size);
drivers/crypto/mv_cesa.h
1 1 #ifndef __MV_CRYPTO_H__
2 2  
3 3 #define DIGEST_INITIAL_VAL_A 0xdd00
  4 +#define DIGEST_INITIAL_VAL_B 0xdd04
  5 +#define DIGEST_INITIAL_VAL_C 0xdd08
  6 +#define DIGEST_INITIAL_VAL_D 0xdd0c
  7 +#define DIGEST_INITIAL_VAL_E 0xdd10
4 8 #define DES_CMD_REG 0xdd58
5 9  
6 10 #define SEC_ACCEL_CMD 0xde00
... ... @@ -70,6 +74,10 @@
70 74 #define CFG_AES_LEN_128 (0 << 24)
71 75 #define CFG_AES_LEN_192 (1 << 24)
72 76 #define CFG_AES_LEN_256 (2 << 24)
  77 +#define CFG_NOT_FRAG (0 << 30)
  78 +#define CFG_FIRST_FRAG (1 << 30)
  79 +#define CFG_LAST_FRAG (2 << 30)
  80 +#define CFG_MID_FRAG (3 << 30)
73 81  
74 82 u32 enc_p;
75 83 #define ENC_P_SRC(x) (x)
76 84  
... ... @@ -90,7 +98,11 @@
90 98 #define MAC_SRC_TOTAL_LEN(x) ((x) << 16)
91 99  
92 100 u32 mac_digest;
  101 +#define MAC_DIGEST_P(x) (x)
  102 +#define MAC_FRAG_LEN(x) ((x) << 16)
93 103 u32 mac_iv;
  104 +#define MAC_INNER_IV_P(x) (x)
  105 +#define MAC_OUTER_IV_P(x) ((x) << 16)
94 106 }__attribute__ ((packed));
95 107 /*
96 108 * /-----------\ 0
97 109  
98 110  
99 111  
100 112  
... ... @@ -101,20 +113,38 @@
101 113 * | IV IN | 4 * 4
102 114 * |-----------| 0x40 (inplace)
103 115 * | IV BUF | 4 * 4
104   - * |-----------| 0x50
  116 + * |-----------| 0x80
105 117 * | DATA IN | 16 * x (max ->max_req_size)
106   - * |-----------| 0x50 (inplace operation)
  118 + * |-----------| 0x80 (inplace operation)
107 119 * | DATA OUT | 16 * x (max ->max_req_size)
108 120 * \-----------/ SRAM size
109 121 */
  122 +
  123 + /* Hashing memory map:
  124 + * /-----------\ 0
  125 + * | ACCEL CFG | 4 * 8
  126 + * |-----------| 0x20
  127 + * | Inner IV | 5 * 4
  128 + * |-----------| 0x34
  129 + * | Outer IV | 5 * 4
  130 + * |-----------| 0x48
  131 + * | Output BUF| 5 * 4
  132 + * |-----------| 0x80
  133 + * | DATA IN | 64 * x (max ->max_req_size)
  134 + * \-----------/ SRAM size
  135 + */
110 136 #define SRAM_CONFIG 0x00
111 137 #define SRAM_DATA_KEY_P 0x20
112 138 #define SRAM_DATA_IV 0x40
113 139 #define SRAM_DATA_IV_BUF 0x40
114   -#define SRAM_DATA_IN_START 0x50
115   -#define SRAM_DATA_OUT_START 0x50
  140 +#define SRAM_DATA_IN_START 0x80
  141 +#define SRAM_DATA_OUT_START 0x80
116 142  
117   -#define SRAM_CFG_SPACE 0x50
  143 +#define SRAM_HMAC_IV_IN 0x20
  144 +#define SRAM_HMAC_IV_OUT 0x34
  145 +#define SRAM_DIGEST_BUF 0x48
  146 +
  147 +#define SRAM_CFG_SPACE 0x80
118 148  
119 149 #endif