Commit 798eed5d9204b01862985ba0643ce5cf84114072

Authored by Dmitry Kasatkin
Committed by Herbert Xu
1 parent a5d87237bb

crypto: omap-sham - crypto_ahash_final() now not need to be called.

According to the Herbert Xu, client may not always call
crypto_ahash_final().

In the case of error in hash calculation resources will be
automatically cleaned up.

But if no hash calculation error happens and client will not call
crypto_ahash_final() at all, then internal buffer will not be freed,
and clocks will not be disabled.

This patch provides support for atomic crypto_ahash_update() call.
Clocks are now enabled and disabled per update request.

Data buffer is now allocated as a part of request context.
Client is obligated to free it with crypto_free_ahash().

Signed-off-by: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

Showing 1 changed file with 82 additions and 86 deletions Side-by-side Diff

drivers/crypto/omap-sham.c
... ... @@ -89,6 +89,11 @@
89 89 #define OP_UPDATE 1
90 90 #define OP_FINAL 2
91 91  
  92 +#define OMAP_ALIGN_MASK (sizeof(u32)-1)
  93 +#define OMAP_ALIGNED __attribute__((aligned(sizeof(u32))))
  94 +
  95 +#define BUFLEN PAGE_SIZE
  96 +
92 97 struct omap_sham_dev;
93 98  
94 99 struct omap_sham_reqctx {
95 100  
... ... @@ -96,9 +101,8 @@
96 101 unsigned long flags;
97 102 unsigned long op;
98 103  
99   - u8 digest[SHA1_DIGEST_SIZE];
  104 + u8 digest[SHA1_DIGEST_SIZE] OMAP_ALIGNED;
100 105 size_t digcnt;
101   - u8 *buffer;
102 106 size_t bufcnt;
103 107 size_t buflen;
104 108 dma_addr_t dma_addr;
... ... @@ -107,6 +111,8 @@
107 111 struct scatterlist *sg;
108 112 unsigned int offset; /* offset in current sg */
109 113 unsigned int total; /* total request */
  114 +
  115 + u8 buffer[0] OMAP_ALIGNED;
110 116 };
111 117  
112 118 struct omap_sham_hmac_ctx {
113 119  
114 120  
115 121  
116 122  
117 123  
118 124  
... ... @@ -219,31 +225,33 @@
219 225 }
220 226 }
221 227  
222   -static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
223   - int final, int dma)
  228 +static int omap_sham_hw_init(struct omap_sham_dev *dd)
224 229 {
225   - struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
226   - u32 val = length << 5, mask;
  230 + clk_enable(dd->iclk);
227 231  
228   - if (unlikely(!ctx->digcnt)) {
  232 + if (!(dd->flags & FLAGS_INIT)) {
  233 + omap_sham_write_mask(dd, SHA_REG_MASK,
  234 + SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
229 235  
230   - clk_enable(dd->iclk);
  236 + if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
  237 + SHA_REG_SYSSTATUS_RESETDONE))
  238 + return -ETIMEDOUT;
231 239  
232   - if (!(dd->flags & FLAGS_INIT)) {
233   - omap_sham_write_mask(dd, SHA_REG_MASK,
234   - SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
  240 + dd->flags |= FLAGS_INIT;
  241 + dd->err = 0;
  242 + }
235 243  
236   - if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
237   - SHA_REG_SYSSTATUS_RESETDONE)) {
238   - clk_disable(dd->iclk);
239   - return -ETIMEDOUT;
240   - }
241   - dd->flags |= FLAGS_INIT;
242   - dd->err = 0;
243   - }
244   - } else {
  244 + return 0;
  245 +}
  246 +
  247 +static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
  248 + int final, int dma)
  249 +{
  250 + struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
  251 + u32 val = length << 5, mask;
  252 +
  253 + if (likely(ctx->digcnt))
245 254 omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt);
246   - }
247 255  
248 256 omap_sham_write_mask(dd, SHA_REG_MASK,
249 257 SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
250 258  
251 259  
... ... @@ -263,23 +271,19 @@
263 271 SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
264 272  
265 273 omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
266   -
267   - return 0;
268 274 }
269 275  
270 276 static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
271 277 size_t length, int final)
272 278 {
273 279 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
274   - int err, count, len32;
  280 + int count, len32;
275 281 const u32 *buffer = (const u32 *)buf;
276 282  
277 283 dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
278 284 ctx->digcnt, length, final);
279 285  
280   - err = omap_sham_write_ctrl(dd, length, final, 0);
281   - if (err)
282   - return err;
  286 + omap_sham_write_ctrl(dd, length, final, 0);
283 287  
284 288 /* should be non-zero before next lines to disable clocks later */
285 289 ctx->digcnt += length;
286 290  
... ... @@ -302,14 +306,10 @@
302 306 size_t length, int final)
303 307 {
304 308 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
305   - int err, len32;
  309 + int len32;
306 310  
307 311 dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
308 312 ctx->digcnt, length, final);
309   - /* flush cache entries related to our page */
310   - if (dma_addr == ctx->dma_addr)
311   - dma_sync_single_for_device(dd->dev, dma_addr, length,
312   - DMA_TO_DEVICE);
313 313  
314 314 len32 = DIV_ROUND_UP(length, sizeof(u32));
315 315  
316 316  
... ... @@ -320,20 +320,8 @@
320 320 omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC,
321 321 dma_addr, 0, 0);
322 322  
323   - omap_set_dma_dest_params(dd->dma_lch, 0,
324   - OMAP_DMA_AMODE_CONSTANT,
325   - dd->phys_base + SHA_REG_DIN(0), 0, 16);
  323 + omap_sham_write_ctrl(dd, length, final, 1);
326 324  
327   - omap_set_dma_dest_burst_mode(dd->dma_lch,
328   - OMAP_DMA_DATA_BURST_16);
329   -
330   - omap_set_dma_src_burst_mode(dd->dma_lch,
331   - OMAP_DMA_DATA_BURST_4);
332   -
333   - err = omap_sham_write_ctrl(dd, length, final, 1);
334   - if (err)
335   - return err;
336   -
337 325 ctx->digcnt += length;
338 326  
339 327 if (final)
... ... @@ -384,6 +372,21 @@
384 372 return 0;
385 373 }
386 374  
  375 +static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
  376 + struct omap_sham_reqctx *ctx,
  377 + size_t length, int final)
  378 +{
  379 + ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
  380 + DMA_TO_DEVICE);
  381 + if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
  382 + dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
  383 + return -EINVAL;
  384 + }
  385 +
  386 + /* next call does not fail... so no unmap in the case of error */
  387 + return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final);
  388 +}
  389 +
387 390 static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
388 391 {
389 392 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
... ... @@ -403,7 +406,7 @@
403 406 if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
404 407 count = ctx->bufcnt;
405 408 ctx->bufcnt = 0;
406   - return omap_sham_xmit_dma(dd, ctx->dma_addr, count, final);
  409 + return omap_sham_xmit_dma_map(dd, ctx, count, final);
407 410 }
408 411  
409 412 return 0;
... ... @@ -413,7 +416,6 @@
413 416 {
414 417 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
415 418 unsigned int length;
416   - int err;
417 419  
418 420 ctx->flags |= FLAGS_FAST;
419 421  
... ... @@ -427,11 +429,8 @@
427 429  
428 430 ctx->total -= length;
429 431  
430   - err = omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1);
431   - if (err != -EINPROGRESS)
432   - dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
433   -
434   - return err;
  432 + /* next call does not fail... so no unmap in the case of error */
  433 + return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1);
435 434 }
436 435  
437 436 static int omap_sham_update_cpu(struct omap_sham_dev *dd)
... ... @@ -453,6 +452,9 @@
453 452 omap_stop_dma(dd->dma_lch);
454 453 if (ctx->flags & FLAGS_FAST)
455 454 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
  455 + else
  456 + dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
  457 + DMA_TO_DEVICE);
456 458  
457 459 return 0;
458 460 }
459 461  
460 462  
... ... @@ -471,19 +473,10 @@
471 473 ctx->flags |= FLAGS_CLEAN;
472 474 spin_unlock_irqrestore(&dd->lock, flags);
473 475  
474   - if (ctx->digcnt) {
475   - clk_disable(dd->iclk);
  476 + if (ctx->digcnt)
476 477 memcpy(req->result, ctx->digest, (ctx->flags & FLAGS_SHA1) ?
477 478 SHA1_DIGEST_SIZE : MD5_DIGEST_SIZE);
478   - }
479 479  
480   - if (ctx->dma_addr)
481   - dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
482   - DMA_TO_DEVICE);
483   -
484   - if (ctx->buffer)
485   - free_page((unsigned long)ctx->buffer);
486   -
487 480 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
488 481 }
489 482  
490 483  
... ... @@ -520,22 +513,8 @@
520 513  
521 514 ctx->bufcnt = 0;
522 515 ctx->digcnt = 0;
  516 + ctx->buflen = BUFLEN;
523 517  
524   - ctx->buflen = PAGE_SIZE;
525   - ctx->buffer = (void *)__get_free_page(
526   - (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
527   - GFP_KERNEL : GFP_ATOMIC);
528   - if (!ctx->buffer)
529   - return -ENOMEM;
530   -
531   - ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
532   - DMA_TO_DEVICE);
533   - if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
534   - dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
535   - free_page((unsigned long)ctx->buffer);
536   - return -EINVAL;
537   - }
538   -
539 518 if (tctx->flags & FLAGS_HMAC) {
540 519 struct omap_sham_hmac_ctx *bctx = tctx->base;
541 520  
... ... @@ -581,7 +560,7 @@
581 560 use_dma = 0;
582 561  
583 562 if (use_dma)
584   - err = omap_sham_xmit_dma(dd, ctx->dma_addr, ctx->bufcnt, 1);
  563 + err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1);
585 564 else
586 565 err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
587 566  
... ... @@ -615,6 +594,7 @@
615 594 static void omap_sham_finish_req(struct ahash_request *req, int err)
616 595 {
617 596 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
  597 + struct omap_sham_dev *dd = ctx->dd;
618 598  
619 599 if (!err) {
620 600 omap_sham_copy_hash(ctx->dd->req, 1);
... ... @@ -627,7 +607,8 @@
627 607 if ((ctx->flags & FLAGS_FINAL) || err)
628 608 omap_sham_cleanup(req);
629 609  
630   - ctx->dd->flags &= ~FLAGS_BUSY;
  610 + clk_disable(dd->iclk);
  611 + dd->flags &= ~FLAGS_BUSY;
631 612  
632 613 if (req->base.complete)
633 614 req->base.complete(&req->base, err);
... ... @@ -636,7 +617,7 @@
636 617 static int omap_sham_handle_queue(struct omap_sham_dev *dd,
637 618 struct ahash_request *req)
638 619 {
639   - struct crypto_async_request *async_req, *backlog;
  620 + struct crypto_async_request *async_req, *backlog = 0;
640 621 struct omap_sham_reqctx *ctx;
641 622 struct ahash_request *prev_req;
642 623 unsigned long flags;
... ... @@ -672,7 +653,22 @@
672 653 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
673 654 ctx->op, req->nbytes);
674 655  
675   - if (req != prev_req && ctx->digcnt)
  656 +
  657 + err = omap_sham_hw_init(dd);
  658 + if (err)
  659 + goto err1;
  660 +
  661 + omap_set_dma_dest_params(dd->dma_lch, 0,
  662 + OMAP_DMA_AMODE_CONSTANT,
  663 + dd->phys_base + SHA_REG_DIN(0), 0, 16);
  664 +
  665 + omap_set_dma_dest_burst_mode(dd->dma_lch,
  666 + OMAP_DMA_DATA_BURST_16);
  667 +
  668 + omap_set_dma_src_burst_mode(dd->dma_lch,
  669 + OMAP_DMA_DATA_BURST_4);
  670 +
  671 + if (ctx->digcnt)
676 672 /* request has changed - restore hash */
677 673 omap_sham_copy_hash(req, 0);
678 674  
... ... @@ -684,7 +680,7 @@
684 680 } else if (ctx->op == OP_FINAL) {
685 681 err = omap_sham_final_req(dd);
686 682 }
687   -
  683 +err1:
688 684 if (err != -EINPROGRESS) {
689 685 /* done_task will not finish it, so do it here */
690 686 omap_sham_finish_req(req, err);
... ... @@ -868,7 +864,7 @@
868 864 }
869 865  
870 866 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
871   - sizeof(struct omap_sham_reqctx));
  867 + sizeof(struct omap_sham_reqctx) + BUFLEN);
872 868  
873 869 if (alg_base) {
874 870 struct omap_sham_hmac_ctx *bctx = tctx->base;
... ... @@ -954,7 +950,7 @@
954 950 CRYPTO_ALG_NEED_FALLBACK,
955 951 .cra_blocksize = SHA1_BLOCK_SIZE,
956 952 .cra_ctxsize = sizeof(struct omap_sham_ctx),
957   - .cra_alignmask = 0,
  953 + .cra_alignmask = OMAP_ALIGN_MASK,
958 954 .cra_module = THIS_MODULE,
959 955 .cra_init = omap_sham_cra_init,
960 956 .cra_exit = omap_sham_cra_exit,
... ... @@ -978,7 +974,7 @@
978 974 .cra_blocksize = SHA1_BLOCK_SIZE,
979 975 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
980 976 sizeof(struct omap_sham_hmac_ctx),
981   - .cra_alignmask = 0,
  977 + .cra_alignmask = OMAP_ALIGN_MASK,
982 978 .cra_module = THIS_MODULE,
983 979 .cra_init = omap_sham_cra_sha1_init,
984 980 .cra_exit = omap_sham_cra_exit,
... ... @@ -1002,7 +998,7 @@
1002 998 .cra_blocksize = SHA1_BLOCK_SIZE,
1003 999 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1004 1000 sizeof(struct omap_sham_hmac_ctx),
1005   - .cra_alignmask = 0,
  1001 + .cra_alignmask = OMAP_ALIGN_MASK,
1006 1002 .cra_module = THIS_MODULE,
1007 1003 .cra_init = omap_sham_cra_md5_init,
1008 1004 .cra_exit = omap_sham_cra_exit,