Commit 0d44dc59b2b434b29aafeae581d06f81efac7c83
Committed by
Herbert Xu
1 parent
f4f689933c
Exists in
master
and in
20 other branches
crypto: ixp4xx - Fix handling of chained sg buffers
- keep dma functions away from chained scatterlists. Use the existing scatterlist iteration inside the driver to call dma_map_single() for each chunk and avoid dma_map_sg(). Signed-off-by: Christian Hohnstaedt <chohnstaedt@innominate.com> Tested-By: Karl Hiramoto <karl@hiramoto.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Showing 1 changed file with 63 additions and 119 deletions Side-by-side Diff
drivers/crypto/ixp4xx_crypto.c
| ... | ... | @@ -101,6 +101,7 @@ |
| 101 | 101 | u32 phys_addr; |
| 102 | 102 | u32 __reserved[4]; |
| 103 | 103 | struct buffer_desc *next; |
| 104 | + enum dma_data_direction dir; | |
| 104 | 105 | }; |
| 105 | 106 | |
| 106 | 107 | struct crypt_ctl { |
| 107 | 108 | |
| ... | ... | @@ -132,14 +133,10 @@ |
| 132 | 133 | struct ablk_ctx { |
| 133 | 134 | struct buffer_desc *src; |
| 134 | 135 | struct buffer_desc *dst; |
| 135 | - unsigned src_nents; | |
| 136 | - unsigned dst_nents; | |
| 137 | 136 | }; |
| 138 | 137 | |
| 139 | 138 | struct aead_ctx { |
| 140 | 139 | struct buffer_desc *buffer; |
| 141 | - unsigned short assoc_nents; | |
| 142 | - unsigned short src_nents; | |
| 143 | 140 | struct scatterlist ivlist; |
| 144 | 141 | /* used when the hmac is not on one sg entry */ |
| 145 | 142 | u8 *hmac_virt; |
| ... | ... | @@ -312,7 +309,7 @@ |
| 312 | 309 | } |
| 313 | 310 | } |
| 314 | 311 | |
| 315 | -static void free_buf_chain(struct buffer_desc *buf, u32 phys) | |
| 312 | +static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys) | |
| 316 | 313 | { |
| 317 | 314 | while (buf) { |
| 318 | 315 | struct buffer_desc *buf1; |
| ... | ... | @@ -320,6 +317,7 @@ |
| 320 | 317 | |
| 321 | 318 | buf1 = buf->next; |
| 322 | 319 | phys1 = buf->phys_next; |
| 320 | + dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir); | |
| 323 | 321 | dma_pool_free(buffer_pool, buf, phys); |
| 324 | 322 | buf = buf1; |
| 325 | 323 | phys = phys1; |
| ... | ... | @@ -348,7 +346,6 @@ |
| 348 | 346 | struct crypt_ctl *crypt; |
| 349 | 347 | struct ixp_ctx *ctx; |
| 350 | 348 | int failed; |
| 351 | - enum dma_data_direction src_direction = DMA_BIDIRECTIONAL; | |
| 352 | 349 | |
| 353 | 350 | failed = phys & 0x1 ? -EBADMSG : 0; |
| 354 | 351 | phys &= ~0x3; |
| 355 | 352 | |
| ... | ... | @@ -358,13 +355,8 @@ |
| 358 | 355 | case CTL_FLAG_PERFORM_AEAD: { |
| 359 | 356 | struct aead_request *req = crypt->data.aead_req; |
| 360 | 357 | struct aead_ctx *req_ctx = aead_request_ctx(req); |
| 361 | - dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents, | |
| 362 | - DMA_TO_DEVICE); | |
| 363 | - dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL); | |
| 364 | - dma_unmap_sg(dev, req->src, req_ctx->src_nents, | |
| 365 | - DMA_BIDIRECTIONAL); | |
| 366 | 358 | |
| 367 | - free_buf_chain(req_ctx->buffer, crypt->src_buf); | |
| 359 | + free_buf_chain(dev, req_ctx->buffer, crypt->src_buf); | |
| 368 | 360 | if (req_ctx->hmac_virt) { |
| 369 | 361 | finish_scattered_hmac(crypt); |
| 370 | 362 | } |
| 371 | 363 | |
| 372 | 364 | |
| ... | ... | @@ -374,16 +366,11 @@ |
| 374 | 366 | case CTL_FLAG_PERFORM_ABLK: { |
| 375 | 367 | struct ablkcipher_request *req = crypt->data.ablk_req; |
| 376 | 368 | struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req); |
| 377 | - int nents; | |
| 369 | + | |
| 378 | 370 | if (req_ctx->dst) { |
| 379 | - nents = req_ctx->dst_nents; | |
| 380 | - dma_unmap_sg(dev, req->dst, nents, DMA_FROM_DEVICE); | |
| 381 | - free_buf_chain(req_ctx->dst, crypt->dst_buf); | |
| 382 | - src_direction = DMA_TO_DEVICE; | |
| 371 | + free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); | |
| 383 | 372 | } |
| 384 | - nents = req_ctx->src_nents; | |
| 385 | - dma_unmap_sg(dev, req->src, nents, src_direction); | |
| 386 | - free_buf_chain(req_ctx->src, crypt->src_buf); | |
| 373 | + free_buf_chain(dev, req_ctx->src, crypt->src_buf); | |
| 387 | 374 | req->base.complete(&req->base, failed); |
| 388 | 375 | break; |
| 389 | 376 | } |
| 390 | 377 | |
| 391 | 378 | |
| 392 | 379 | |
| 393 | 380 | |
| 394 | 381 | |
| 395 | 382 | |
| 396 | 383 | |
| 397 | 384 | |
| 398 | 385 | |
| ... | ... | @@ -750,56 +737,35 @@ |
| 750 | 737 | return 0; |
| 751 | 738 | } |
| 752 | 739 | |
| 753 | -static int count_sg(struct scatterlist *sg, int nbytes) | |
| 740 | +static struct buffer_desc *chainup_buffers(struct device *dev, | |
| 741 | + struct scatterlist *sg, unsigned nbytes, | |
| 742 | + struct buffer_desc *buf, gfp_t flags, | |
| 743 | + enum dma_data_direction dir) | |
| 754 | 744 | { |
| 755 | - int i; | |
| 756 | - for (i = 0; nbytes > 0; i++, sg = sg_next(sg)) | |
| 757 | - nbytes -= sg->length; | |
| 758 | - return i; | |
| 759 | -} | |
| 760 | - | |
| 761 | -static struct buffer_desc *chainup_buffers(struct scatterlist *sg, | |
| 762 | - unsigned nbytes, struct buffer_desc *buf, gfp_t flags) | |
| 763 | -{ | |
| 764 | - int nents = 0; | |
| 765 | - | |
| 766 | - while (nbytes > 0) { | |
| 745 | + for (;nbytes > 0; sg = scatterwalk_sg_next(sg)) { | |
| 746 | + unsigned len = min(nbytes, sg->length); | |
| 767 | 747 | struct buffer_desc *next_buf; |
| 768 | 748 | u32 next_buf_phys; |
| 769 | - unsigned len = min(nbytes, sg_dma_len(sg)); | |
| 749 | + void *ptr; | |
| 770 | 750 | |
| 771 | - nents++; | |
| 772 | 751 | nbytes -= len; |
| 773 | - if (!buf->phys_addr) { | |
| 774 | - buf->phys_addr = sg_dma_address(sg); | |
| 775 | - buf->buf_len = len; | |
| 776 | - buf->next = NULL; | |
| 777 | - buf->phys_next = 0; | |
| 778 | - goto next; | |
| 779 | - } | |
| 780 | - /* Two consecutive chunks on one page may be handled by the old | |
| 781 | - * buffer descriptor, increased by the length of the new one | |
| 782 | - */ | |
| 783 | - if (sg_dma_address(sg) == buf->phys_addr + buf->buf_len) { | |
| 784 | - buf->buf_len += len; | |
| 785 | - goto next; | |
| 786 | - } | |
| 752 | + ptr = page_address(sg_page(sg)) + sg->offset; | |
| 787 | 753 | next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys); |
| 788 | - if (!next_buf) | |
| 789 | - return NULL; | |
| 754 | + if (!next_buf) { | |
| 755 | + buf = NULL; | |
| 756 | + break; | |
| 757 | + } | |
| 758 | + sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir); | |
| 790 | 759 | buf->next = next_buf; |
| 791 | 760 | buf->phys_next = next_buf_phys; |
| 792 | - | |
| 793 | 761 | buf = next_buf; |
| 794 | - buf->next = NULL; | |
| 795 | - buf->phys_next = 0; | |
| 762 | + | |
| 796 | 763 | buf->phys_addr = sg_dma_address(sg); |
| 797 | 764 | buf->buf_len = len; |
| 798 | -next: | |
| 799 | - if (nbytes > 0) { | |
| 800 | - sg = sg_next(sg); | |
| 801 | - } | |
| 765 | + buf->dir = dir; | |
| 802 | 766 | } |
| 767 | + buf->next = NULL; | |
| 768 | + buf->phys_next = 0; | |
| 803 | 769 | return buf; |
| 804 | 770 | } |
| 805 | 771 | |
| 806 | 772 | |
| 807 | 773 | |
| ... | ... | @@ -860,12 +826,12 @@ |
| 860 | 826 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
| 861 | 827 | struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm); |
| 862 | 828 | unsigned ivsize = crypto_ablkcipher_ivsize(tfm); |
| 863 | - int ret = -ENOMEM; | |
| 864 | 829 | struct ix_sa_dir *dir; |
| 865 | 830 | struct crypt_ctl *crypt; |
| 866 | - unsigned int nbytes = req->nbytes, nents; | |
| 831 | + unsigned int nbytes = req->nbytes; | |
| 867 | 832 | enum dma_data_direction src_direction = DMA_BIDIRECTIONAL; |
| 868 | 833 | struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req); |
| 834 | + struct buffer_desc src_hook; | |
| 869 | 835 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? |
| 870 | 836 | GFP_KERNEL : GFP_ATOMIC; |
| 871 | 837 | |
| ... | ... | @@ -878,7 +844,7 @@ |
| 878 | 844 | |
| 879 | 845 | crypt = get_crypt_desc(); |
| 880 | 846 | if (!crypt) |
| 881 | - return ret; | |
| 847 | + return -ENOMEM; | |
| 882 | 848 | |
| 883 | 849 | crypt->data.ablk_req = req; |
| 884 | 850 | crypt->crypto_ctx = dir->npe_ctx_phys; |
| 885 | 851 | |
| 886 | 852 | |
| 887 | 853 | |
| 888 | 854 | |
| 889 | 855 | |
| 890 | 856 | |
| 891 | 857 | |
| 892 | 858 | |
| 893 | 859 | |
| ... | ... | @@ -891,53 +857,41 @@ |
| 891 | 857 | BUG_ON(ivsize && !req->info); |
| 892 | 858 | memcpy(crypt->iv, req->info, ivsize); |
| 893 | 859 | if (req->src != req->dst) { |
| 860 | + struct buffer_desc dst_hook; | |
| 894 | 861 | crypt->mode |= NPE_OP_NOT_IN_PLACE; |
| 895 | - nents = count_sg(req->dst, nbytes); | |
| 896 | 862 | /* This was never tested by Intel |
| 897 | 863 | * for more than one dst buffer, I think. */ |
| 898 | - BUG_ON(nents != 1); | |
| 899 | - req_ctx->dst_nents = nents; | |
| 900 | - dma_map_sg(dev, req->dst, nents, DMA_FROM_DEVICE); | |
| 901 | - req_ctx->dst = dma_pool_alloc(buffer_pool, flags,&crypt->dst_buf); | |
| 902 | - if (!req_ctx->dst) | |
| 903 | - goto unmap_sg_dest; | |
| 904 | - req_ctx->dst->phys_addr = 0; | |
| 905 | - if (!chainup_buffers(req->dst, nbytes, req_ctx->dst, flags)) | |
| 864 | + BUG_ON(req->dst->length < nbytes); | |
| 865 | + req_ctx->dst = NULL; | |
| 866 | + if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook, | |
| 867 | + flags, DMA_FROM_DEVICE)) | |
| 906 | 868 | goto free_buf_dest; |
| 907 | 869 | src_direction = DMA_TO_DEVICE; |
| 870 | + req_ctx->dst = dst_hook.next; | |
| 871 | + crypt->dst_buf = dst_hook.phys_next; | |
| 908 | 872 | } else { |
| 909 | 873 | req_ctx->dst = NULL; |
| 910 | - req_ctx->dst_nents = 0; | |
| 911 | 874 | } |
| 912 | - nents = count_sg(req->src, nbytes); | |
| 913 | - req_ctx->src_nents = nents; | |
| 914 | - dma_map_sg(dev, req->src, nents, src_direction); | |
| 915 | - | |
| 916 | - req_ctx->src = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf); | |
| 917 | - if (!req_ctx->src) | |
| 918 | - goto unmap_sg_src; | |
| 919 | - req_ctx->src->phys_addr = 0; | |
| 920 | - if (!chainup_buffers(req->src, nbytes, req_ctx->src, flags)) | |
| 875 | + req_ctx->src = NULL; | |
| 876 | + if (!chainup_buffers(dev, req->src, nbytes, &src_hook, | |
| 877 | + flags, src_direction)) | |
| 921 | 878 | goto free_buf_src; |
| 922 | 879 | |
| 880 | + req_ctx->src = src_hook.next; | |
| 881 | + crypt->src_buf = src_hook.phys_next; | |
| 923 | 882 | crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK; |
| 924 | 883 | qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); |
| 925 | 884 | BUG_ON(qmgr_stat_overflow(SEND_QID)); |
| 926 | 885 | return -EINPROGRESS; |
| 927 | 886 | |
| 928 | 887 | free_buf_src: |
| 929 | - free_buf_chain(req_ctx->src, crypt->src_buf); | |
| 930 | -unmap_sg_src: | |
| 931 | - dma_unmap_sg(dev, req->src, req_ctx->src_nents, src_direction); | |
| 888 | + free_buf_chain(dev, req_ctx->src, crypt->src_buf); | |
| 932 | 889 | free_buf_dest: |
| 933 | 890 | if (req->src != req->dst) { |
| 934 | - free_buf_chain(req_ctx->dst, crypt->dst_buf); | |
| 935 | -unmap_sg_dest: | |
| 936 | - dma_unmap_sg(dev, req->src, req_ctx->dst_nents, | |
| 937 | - DMA_FROM_DEVICE); | |
| 891 | + free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); | |
| 938 | 892 | } |
| 939 | 893 | crypt->ctl_flags = CTL_FLAG_UNUSED; |
| 940 | - return ret; | |
| 894 | + return -ENOMEM; | |
| 941 | 895 | } |
| 942 | 896 | |
| 943 | 897 | static int ablk_encrypt(struct ablkcipher_request *req) |
| ... | ... | @@ -985,7 +939,7 @@ |
| 985 | 939 | break; |
| 986 | 940 | |
| 987 | 941 | offset += sg->length; |
| 988 | - sg = sg_next(sg); | |
| 942 | + sg = scatterwalk_sg_next(sg); | |
| 989 | 943 | } |
| 990 | 944 | return (start + nbytes > offset + sg->length); |
| 991 | 945 | } |
| 992 | 946 | |
| ... | ... | @@ -997,11 +951,10 @@ |
| 997 | 951 | struct ixp_ctx *ctx = crypto_aead_ctx(tfm); |
| 998 | 952 | unsigned ivsize = crypto_aead_ivsize(tfm); |
| 999 | 953 | unsigned authsize = crypto_aead_authsize(tfm); |
| 1000 | - int ret = -ENOMEM; | |
| 1001 | 954 | struct ix_sa_dir *dir; |
| 1002 | 955 | struct crypt_ctl *crypt; |
| 1003 | - unsigned int cryptlen, nents; | |
| 1004 | - struct buffer_desc *buf; | |
| 956 | + unsigned int cryptlen; | |
| 957 | + struct buffer_desc *buf, src_hook; | |
| 1005 | 958 | struct aead_ctx *req_ctx = aead_request_ctx(req); |
| 1006 | 959 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? |
| 1007 | 960 | GFP_KERNEL : GFP_ATOMIC; |
| ... | ... | @@ -1022,7 +975,7 @@ |
| 1022 | 975 | } |
| 1023 | 976 | crypt = get_crypt_desc(); |
| 1024 | 977 | if (!crypt) |
| 1025 | - return ret; | |
| 978 | + return -ENOMEM; | |
| 1026 | 979 | |
| 1027 | 980 | crypt->data.aead_req = req; |
| 1028 | 981 | crypt->crypto_ctx = dir->npe_ctx_phys; |
| 1029 | 982 | |
| 1030 | 983 | |
| 1031 | 984 | |
| 1032 | 985 | |
| 1033 | 986 | |
| ... | ... | @@ -1041,31 +994,27 @@ |
| 1041 | 994 | BUG(); /* -ENOTSUP because of my lazyness */ |
| 1042 | 995 | } |
| 1043 | 996 | |
| 1044 | - req_ctx->buffer = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf); | |
| 1045 | - if (!req_ctx->buffer) | |
| 1046 | - goto out; | |
| 1047 | - req_ctx->buffer->phys_addr = 0; | |
| 1048 | 997 | /* ASSOC data */ |
| 1049 | - nents = count_sg(req->assoc, req->assoclen); | |
| 1050 | - req_ctx->assoc_nents = nents; | |
| 1051 | - dma_map_sg(dev, req->assoc, nents, DMA_TO_DEVICE); | |
| 1052 | - buf = chainup_buffers(req->assoc, req->assoclen, req_ctx->buffer,flags); | |
| 998 | + buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook, | |
| 999 | + flags, DMA_TO_DEVICE); | |
| 1000 | + req_ctx->buffer = src_hook.next; | |
| 1001 | + crypt->src_buf = src_hook.phys_next; | |
| 1053 | 1002 | if (!buf) |
| 1054 | - goto unmap_sg_assoc; | |
| 1003 | + goto out; | |
| 1055 | 1004 | /* IV */ |
| 1056 | 1005 | sg_init_table(&req_ctx->ivlist, 1); |
| 1057 | 1006 | sg_set_buf(&req_ctx->ivlist, iv, ivsize); |
| 1058 | - dma_map_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL); | |
| 1059 | - buf = chainup_buffers(&req_ctx->ivlist, ivsize, buf, flags); | |
| 1007 | + buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags, | |
| 1008 | + DMA_BIDIRECTIONAL); | |
| 1060 | 1009 | if (!buf) |
| 1061 | - goto unmap_sg_iv; | |
| 1010 | + goto free_chain; | |
| 1062 | 1011 | if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) { |
| 1063 | 1012 | /* The 12 hmac bytes are scattered, |
| 1064 | 1013 | * we need to copy them into a safe buffer */ |
| 1065 | 1014 | req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, |
| 1066 | 1015 | &crypt->icv_rev_aes); |
| 1067 | 1016 | if (unlikely(!req_ctx->hmac_virt)) |
| 1068 | - goto unmap_sg_iv; | |
| 1017 | + goto free_chain; | |
| 1069 | 1018 | if (!encrypt) { |
| 1070 | 1019 | scatterwalk_map_and_copy(req_ctx->hmac_virt, |
| 1071 | 1020 | req->src, cryptlen, authsize, 0); |
| 1072 | 1021 | |
| 1073 | 1022 | |
| 1074 | 1023 | |
| 1075 | 1024 | |
| 1076 | 1025 | |
| ... | ... | @@ -1075,33 +1024,28 @@ |
| 1075 | 1024 | req_ctx->hmac_virt = NULL; |
| 1076 | 1025 | } |
| 1077 | 1026 | /* Crypt */ |
| 1078 | - nents = count_sg(req->src, cryptlen + authsize); | |
| 1079 | - req_ctx->src_nents = nents; | |
| 1080 | - dma_map_sg(dev, req->src, nents, DMA_BIDIRECTIONAL); | |
| 1081 | - buf = chainup_buffers(req->src, cryptlen + authsize, buf, flags); | |
| 1027 | + buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags, | |
| 1028 | + DMA_BIDIRECTIONAL); | |
| 1082 | 1029 | if (!buf) |
| 1083 | - goto unmap_sg_src; | |
| 1030 | + goto free_hmac_virt; | |
| 1084 | 1031 | if (!req_ctx->hmac_virt) { |
| 1085 | 1032 | crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize; |
| 1086 | 1033 | } |
| 1034 | + | |
| 1087 | 1035 | crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD; |
| 1088 | 1036 | qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); |
| 1089 | 1037 | BUG_ON(qmgr_stat_overflow(SEND_QID)); |
| 1090 | 1038 | return -EINPROGRESS; |
| 1091 | -unmap_sg_src: | |
| 1092 | - dma_unmap_sg(dev, req->src, req_ctx->src_nents, DMA_BIDIRECTIONAL); | |
| 1039 | +free_hmac_virt: | |
| 1093 | 1040 | if (req_ctx->hmac_virt) { |
| 1094 | 1041 | dma_pool_free(buffer_pool, req_ctx->hmac_virt, |
| 1095 | 1042 | crypt->icv_rev_aes); |
| 1096 | 1043 | } |
| 1097 | -unmap_sg_iv: | |
| 1098 | - dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL); | |
| 1099 | -unmap_sg_assoc: | |
| 1100 | - dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents, DMA_TO_DEVICE); | |
| 1101 | - free_buf_chain(req_ctx->buffer, crypt->src_buf); | |
| 1044 | +free_chain: | |
| 1045 | + free_buf_chain(dev, req_ctx->buffer, crypt->src_buf); | |
| 1102 | 1046 | out: |
| 1103 | 1047 | crypt->ctl_flags = CTL_FLAG_UNUSED; |
| 1104 | - return ret; | |
| 1048 | + return -ENOMEM; | |
| 1105 | 1049 | } |
| 1106 | 1050 | |
| 1107 | 1051 | static int aead_setup(struct crypto_aead *tfm, unsigned int authsize) |