Commit a760a6656e6f00bb0144a42a048cf0266646e22c
1 parent
bb402f16ec
Exists in
master
and in
20 other branches
crypto: api - Fix module load deadlock with fallback algorithms
With the mandatory algorithm testing at registration, we have now created a deadlock with algorithms requiring fallbacks. This can happen if the module containing the algorithm requiring fallback is loaded first, without the fallback module being loaded first. The system will then try to test the new algorithm, find that it needs to load a fallback, and then try to load that. As both algorithms share the same module alias, it can attempt to load the original algorithm again and block indefinitely. As algorithms requiring fallbacks are a special case, we can fix this by giving them a different module alias than the rest. Then it's just a matter of using the right aliases according to what algorithms we're trying to find. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Showing 4 changed files with 17 additions and 6 deletions Inline Diff
arch/s390/crypto/aes_s390.c
| 1 | /* | 1 | /* |
| 2 | * Cryptographic API. | 2 | * Cryptographic API. |
| 3 | * | 3 | * |
| 4 | * s390 implementation of the AES Cipher Algorithm. | 4 | * s390 implementation of the AES Cipher Algorithm. |
| 5 | * | 5 | * |
| 6 | * s390 Version: | 6 | * s390 Version: |
| 7 | * Copyright IBM Corp. 2005,2007 | 7 | * Copyright IBM Corp. 2005,2007 |
| 8 | * Author(s): Jan Glauber (jang@de.ibm.com) | 8 | * Author(s): Jan Glauber (jang@de.ibm.com) |
| 9 | * Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback | 9 | * Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback |
| 10 | * | 10 | * |
| 11 | * Derived from "crypto/aes_generic.c" | 11 | * Derived from "crypto/aes_generic.c" |
| 12 | * | 12 | * |
| 13 | * This program is free software; you can redistribute it and/or modify it | 13 | * This program is free software; you can redistribute it and/or modify it |
| 14 | * under the terms of the GNU General Public License as published by the Free | 14 | * under the terms of the GNU General Public License as published by the Free |
| 15 | * Software Foundation; either version 2 of the License, or (at your option) | 15 | * Software Foundation; either version 2 of the License, or (at your option) |
| 16 | * any later version. | 16 | * any later version. |
| 17 | * | 17 | * |
| 18 | */ | 18 | */ |
| 19 | 19 | ||
| 20 | #define KMSG_COMPONENT "aes_s390" | 20 | #define KMSG_COMPONENT "aes_s390" |
| 21 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 21 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
| 22 | 22 | ||
| 23 | #include <crypto/aes.h> | 23 | #include <crypto/aes.h> |
| 24 | #include <crypto/algapi.h> | 24 | #include <crypto/algapi.h> |
| 25 | #include <linux/err.h> | 25 | #include <linux/err.h> |
| 26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
| 27 | #include <linux/init.h> | 27 | #include <linux/init.h> |
| 28 | #include "crypt_s390.h" | 28 | #include "crypt_s390.h" |
| 29 | 29 | ||
| 30 | #define AES_KEYLEN_128 1 | 30 | #define AES_KEYLEN_128 1 |
| 31 | #define AES_KEYLEN_192 2 | 31 | #define AES_KEYLEN_192 2 |
| 32 | #define AES_KEYLEN_256 4 | 32 | #define AES_KEYLEN_256 4 |
| 33 | 33 | ||
| 34 | static char keylen_flag = 0; | 34 | static char keylen_flag = 0; |
| 35 | 35 | ||
| 36 | struct s390_aes_ctx { | 36 | struct s390_aes_ctx { |
| 37 | u8 iv[AES_BLOCK_SIZE]; | 37 | u8 iv[AES_BLOCK_SIZE]; |
| 38 | u8 key[AES_MAX_KEY_SIZE]; | 38 | u8 key[AES_MAX_KEY_SIZE]; |
| 39 | long enc; | 39 | long enc; |
| 40 | long dec; | 40 | long dec; |
| 41 | int key_len; | 41 | int key_len; |
| 42 | union { | 42 | union { |
| 43 | struct crypto_blkcipher *blk; | 43 | struct crypto_blkcipher *blk; |
| 44 | struct crypto_cipher *cip; | 44 | struct crypto_cipher *cip; |
| 45 | } fallback; | 45 | } fallback; |
| 46 | }; | 46 | }; |
| 47 | 47 | ||
| 48 | /* | 48 | /* |
| 49 | * Check if the key_len is supported by the HW. | 49 | * Check if the key_len is supported by the HW. |
| 50 | * Returns 0 if it is, a positive number if it is not and software fallback is | 50 | * Returns 0 if it is, a positive number if it is not and software fallback is |
| 51 | * required or a negative number in case the key size is not valid | 51 | * required or a negative number in case the key size is not valid |
| 52 | */ | 52 | */ |
| 53 | static int need_fallback(unsigned int key_len) | 53 | static int need_fallback(unsigned int key_len) |
| 54 | { | 54 | { |
| 55 | switch (key_len) { | 55 | switch (key_len) { |
| 56 | case 16: | 56 | case 16: |
| 57 | if (!(keylen_flag & AES_KEYLEN_128)) | 57 | if (!(keylen_flag & AES_KEYLEN_128)) |
| 58 | return 1; | 58 | return 1; |
| 59 | break; | 59 | break; |
| 60 | case 24: | 60 | case 24: |
| 61 | if (!(keylen_flag & AES_KEYLEN_192)) | 61 | if (!(keylen_flag & AES_KEYLEN_192)) |
| 62 | return 1; | 62 | return 1; |
| 63 | break; | 63 | break; |
| 64 | case 32: | 64 | case 32: |
| 65 | if (!(keylen_flag & AES_KEYLEN_256)) | 65 | if (!(keylen_flag & AES_KEYLEN_256)) |
| 66 | return 1; | 66 | return 1; |
| 67 | break; | 67 | break; |
| 68 | default: | 68 | default: |
| 69 | return -1; | 69 | return -1; |
| 70 | break; | 70 | break; |
| 71 | } | 71 | } |
| 72 | return 0; | 72 | return 0; |
| 73 | } | 73 | } |
| 74 | 74 | ||
| 75 | static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key, | 75 | static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key, |
| 76 | unsigned int key_len) | 76 | unsigned int key_len) |
| 77 | { | 77 | { |
| 78 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | 78 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
| 79 | int ret; | 79 | int ret; |
| 80 | 80 | ||
| 81 | sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; | 81 | sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; |
| 82 | sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags & | 82 | sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags & |
| 83 | CRYPTO_TFM_REQ_MASK); | 83 | CRYPTO_TFM_REQ_MASK); |
| 84 | 84 | ||
| 85 | ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len); | 85 | ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len); |
| 86 | if (ret) { | 86 | if (ret) { |
| 87 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; | 87 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; |
| 88 | tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags & | 88 | tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags & |
| 89 | CRYPTO_TFM_RES_MASK); | 89 | CRYPTO_TFM_RES_MASK); |
| 90 | } | 90 | } |
| 91 | return ret; | 91 | return ret; |
| 92 | } | 92 | } |
| 93 | 93 | ||
| 94 | static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | 94 | static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, |
| 95 | unsigned int key_len) | 95 | unsigned int key_len) |
| 96 | { | 96 | { |
| 97 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | 97 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
| 98 | u32 *flags = &tfm->crt_flags; | 98 | u32 *flags = &tfm->crt_flags; |
| 99 | int ret; | 99 | int ret; |
| 100 | 100 | ||
| 101 | ret = need_fallback(key_len); | 101 | ret = need_fallback(key_len); |
| 102 | if (ret < 0) { | 102 | if (ret < 0) { |
| 103 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | 103 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; |
| 104 | return -EINVAL; | 104 | return -EINVAL; |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | sctx->key_len = key_len; | 107 | sctx->key_len = key_len; |
| 108 | if (!ret) { | 108 | if (!ret) { |
| 109 | memcpy(sctx->key, in_key, key_len); | 109 | memcpy(sctx->key, in_key, key_len); |
| 110 | return 0; | 110 | return 0; |
| 111 | } | 111 | } |
| 112 | 112 | ||
| 113 | return setkey_fallback_cip(tfm, in_key, key_len); | 113 | return setkey_fallback_cip(tfm, in_key, key_len); |
| 114 | } | 114 | } |
| 115 | 115 | ||
| 116 | static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) | 116 | static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
| 117 | { | 117 | { |
| 118 | const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | 118 | const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
| 119 | 119 | ||
| 120 | if (unlikely(need_fallback(sctx->key_len))) { | 120 | if (unlikely(need_fallback(sctx->key_len))) { |
| 121 | crypto_cipher_encrypt_one(sctx->fallback.cip, out, in); | 121 | crypto_cipher_encrypt_one(sctx->fallback.cip, out, in); |
| 122 | return; | 122 | return; |
| 123 | } | 123 | } |
| 124 | 124 | ||
| 125 | switch (sctx->key_len) { | 125 | switch (sctx->key_len) { |
| 126 | case 16: | 126 | case 16: |
| 127 | crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in, | 127 | crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in, |
| 128 | AES_BLOCK_SIZE); | 128 | AES_BLOCK_SIZE); |
| 129 | break; | 129 | break; |
| 130 | case 24: | 130 | case 24: |
| 131 | crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in, | 131 | crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in, |
| 132 | AES_BLOCK_SIZE); | 132 | AES_BLOCK_SIZE); |
| 133 | break; | 133 | break; |
| 134 | case 32: | 134 | case 32: |
| 135 | crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in, | 135 | crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in, |
| 136 | AES_BLOCK_SIZE); | 136 | AES_BLOCK_SIZE); |
| 137 | break; | 137 | break; |
| 138 | } | 138 | } |
| 139 | } | 139 | } |
| 140 | 140 | ||
| 141 | static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) | 141 | static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
| 142 | { | 142 | { |
| 143 | const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | 143 | const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
| 144 | 144 | ||
| 145 | if (unlikely(need_fallback(sctx->key_len))) { | 145 | if (unlikely(need_fallback(sctx->key_len))) { |
| 146 | crypto_cipher_decrypt_one(sctx->fallback.cip, out, in); | 146 | crypto_cipher_decrypt_one(sctx->fallback.cip, out, in); |
| 147 | return; | 147 | return; |
| 148 | } | 148 | } |
| 149 | 149 | ||
| 150 | switch (sctx->key_len) { | 150 | switch (sctx->key_len) { |
| 151 | case 16: | 151 | case 16: |
| 152 | crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in, | 152 | crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in, |
| 153 | AES_BLOCK_SIZE); | 153 | AES_BLOCK_SIZE); |
| 154 | break; | 154 | break; |
| 155 | case 24: | 155 | case 24: |
| 156 | crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in, | 156 | crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in, |
| 157 | AES_BLOCK_SIZE); | 157 | AES_BLOCK_SIZE); |
| 158 | break; | 158 | break; |
| 159 | case 32: | 159 | case 32: |
| 160 | crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in, | 160 | crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in, |
| 161 | AES_BLOCK_SIZE); | 161 | AES_BLOCK_SIZE); |
| 162 | break; | 162 | break; |
| 163 | } | 163 | } |
| 164 | } | 164 | } |
| 165 | 165 | ||
| 166 | static int fallback_init_cip(struct crypto_tfm *tfm) | 166 | static int fallback_init_cip(struct crypto_tfm *tfm) |
| 167 | { | 167 | { |
| 168 | const char *name = tfm->__crt_alg->cra_name; | 168 | const char *name = tfm->__crt_alg->cra_name; |
| 169 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | 169 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
| 170 | 170 | ||
| 171 | sctx->fallback.cip = crypto_alloc_cipher(name, 0, | 171 | sctx->fallback.cip = crypto_alloc_cipher(name, 0, |
| 172 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | 172 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); |
| 173 | 173 | ||
| 174 | if (IS_ERR(sctx->fallback.cip)) { | 174 | if (IS_ERR(sctx->fallback.cip)) { |
| 175 | pr_err("Allocating AES fallback algorithm %s failed\n", | 175 | pr_err("Allocating AES fallback algorithm %s failed\n", |
| 176 | name); | 176 | name); |
| 177 | return PTR_ERR(sctx->fallback.blk); | 177 | return PTR_ERR(sctx->fallback.blk); |
| 178 | } | 178 | } |
| 179 | 179 | ||
| 180 | return 0; | 180 | return 0; |
| 181 | } | 181 | } |
| 182 | 182 | ||
| 183 | static void fallback_exit_cip(struct crypto_tfm *tfm) | 183 | static void fallback_exit_cip(struct crypto_tfm *tfm) |
| 184 | { | 184 | { |
| 185 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | 185 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
| 186 | 186 | ||
| 187 | crypto_free_cipher(sctx->fallback.cip); | 187 | crypto_free_cipher(sctx->fallback.cip); |
| 188 | sctx->fallback.cip = NULL; | 188 | sctx->fallback.cip = NULL; |
| 189 | } | 189 | } |
| 190 | 190 | ||
| 191 | static struct crypto_alg aes_alg = { | 191 | static struct crypto_alg aes_alg = { |
| 192 | .cra_name = "aes", | 192 | .cra_name = "aes", |
| 193 | .cra_driver_name = "aes-s390", | 193 | .cra_driver_name = "aes-s390", |
| 194 | .cra_priority = CRYPT_S390_PRIORITY, | 194 | .cra_priority = CRYPT_S390_PRIORITY, |
| 195 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER | | 195 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER | |
| 196 | CRYPTO_ALG_NEED_FALLBACK, | 196 | CRYPTO_ALG_NEED_FALLBACK, |
| 197 | .cra_blocksize = AES_BLOCK_SIZE, | 197 | .cra_blocksize = AES_BLOCK_SIZE, |
| 198 | .cra_ctxsize = sizeof(struct s390_aes_ctx), | 198 | .cra_ctxsize = sizeof(struct s390_aes_ctx), |
| 199 | .cra_module = THIS_MODULE, | 199 | .cra_module = THIS_MODULE, |
| 200 | .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), | 200 | .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), |
| 201 | .cra_init = fallback_init_cip, | 201 | .cra_init = fallback_init_cip, |
| 202 | .cra_exit = fallback_exit_cip, | 202 | .cra_exit = fallback_exit_cip, |
| 203 | .cra_u = { | 203 | .cra_u = { |
| 204 | .cipher = { | 204 | .cipher = { |
| 205 | .cia_min_keysize = AES_MIN_KEY_SIZE, | 205 | .cia_min_keysize = AES_MIN_KEY_SIZE, |
| 206 | .cia_max_keysize = AES_MAX_KEY_SIZE, | 206 | .cia_max_keysize = AES_MAX_KEY_SIZE, |
| 207 | .cia_setkey = aes_set_key, | 207 | .cia_setkey = aes_set_key, |
| 208 | .cia_encrypt = aes_encrypt, | 208 | .cia_encrypt = aes_encrypt, |
| 209 | .cia_decrypt = aes_decrypt, | 209 | .cia_decrypt = aes_decrypt, |
| 210 | } | 210 | } |
| 211 | } | 211 | } |
| 212 | }; | 212 | }; |
| 213 | 213 | ||
| 214 | static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key, | 214 | static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key, |
| 215 | unsigned int len) | 215 | unsigned int len) |
| 216 | { | 216 | { |
| 217 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | 217 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
| 218 | unsigned int ret; | 218 | unsigned int ret; |
| 219 | 219 | ||
| 220 | sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; | 220 | sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; |
| 221 | sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags & | 221 | sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags & |
| 222 | CRYPTO_TFM_REQ_MASK); | 222 | CRYPTO_TFM_REQ_MASK); |
| 223 | 223 | ||
| 224 | ret = crypto_blkcipher_setkey(sctx->fallback.blk, key, len); | 224 | ret = crypto_blkcipher_setkey(sctx->fallback.blk, key, len); |
| 225 | if (ret) { | 225 | if (ret) { |
| 226 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; | 226 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; |
| 227 | tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags & | 227 | tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags & |
| 228 | CRYPTO_TFM_RES_MASK); | 228 | CRYPTO_TFM_RES_MASK); |
| 229 | } | 229 | } |
| 230 | return ret; | 230 | return ret; |
| 231 | } | 231 | } |
| 232 | 232 | ||
| 233 | static int fallback_blk_dec(struct blkcipher_desc *desc, | 233 | static int fallback_blk_dec(struct blkcipher_desc *desc, |
| 234 | struct scatterlist *dst, struct scatterlist *src, | 234 | struct scatterlist *dst, struct scatterlist *src, |
| 235 | unsigned int nbytes) | 235 | unsigned int nbytes) |
| 236 | { | 236 | { |
| 237 | unsigned int ret; | 237 | unsigned int ret; |
| 238 | struct crypto_blkcipher *tfm; | 238 | struct crypto_blkcipher *tfm; |
| 239 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | 239 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); |
| 240 | 240 | ||
| 241 | tfm = desc->tfm; | 241 | tfm = desc->tfm; |
| 242 | desc->tfm = sctx->fallback.blk; | 242 | desc->tfm = sctx->fallback.blk; |
| 243 | 243 | ||
| 244 | ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes); | 244 | ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes); |
| 245 | 245 | ||
| 246 | desc->tfm = tfm; | 246 | desc->tfm = tfm; |
| 247 | return ret; | 247 | return ret; |
| 248 | } | 248 | } |
| 249 | 249 | ||
| 250 | static int fallback_blk_enc(struct blkcipher_desc *desc, | 250 | static int fallback_blk_enc(struct blkcipher_desc *desc, |
| 251 | struct scatterlist *dst, struct scatterlist *src, | 251 | struct scatterlist *dst, struct scatterlist *src, |
| 252 | unsigned int nbytes) | 252 | unsigned int nbytes) |
| 253 | { | 253 | { |
| 254 | unsigned int ret; | 254 | unsigned int ret; |
| 255 | struct crypto_blkcipher *tfm; | 255 | struct crypto_blkcipher *tfm; |
| 256 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | 256 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); |
| 257 | 257 | ||
| 258 | tfm = desc->tfm; | 258 | tfm = desc->tfm; |
| 259 | desc->tfm = sctx->fallback.blk; | 259 | desc->tfm = sctx->fallback.blk; |
| 260 | 260 | ||
| 261 | ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); | 261 | ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); |
| 262 | 262 | ||
| 263 | desc->tfm = tfm; | 263 | desc->tfm = tfm; |
| 264 | return ret; | 264 | return ret; |
| 265 | } | 265 | } |
| 266 | 266 | ||
| 267 | static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | 267 | static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, |
| 268 | unsigned int key_len) | 268 | unsigned int key_len) |
| 269 | { | 269 | { |
| 270 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | 270 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
| 271 | int ret; | 271 | int ret; |
| 272 | 272 | ||
| 273 | ret = need_fallback(key_len); | 273 | ret = need_fallback(key_len); |
| 274 | if (ret > 0) { | 274 | if (ret > 0) { |
| 275 | sctx->key_len = key_len; | 275 | sctx->key_len = key_len; |
| 276 | return setkey_fallback_blk(tfm, in_key, key_len); | 276 | return setkey_fallback_blk(tfm, in_key, key_len); |
| 277 | } | 277 | } |
| 278 | 278 | ||
| 279 | switch (key_len) { | 279 | switch (key_len) { |
| 280 | case 16: | 280 | case 16: |
| 281 | sctx->enc = KM_AES_128_ENCRYPT; | 281 | sctx->enc = KM_AES_128_ENCRYPT; |
| 282 | sctx->dec = KM_AES_128_DECRYPT; | 282 | sctx->dec = KM_AES_128_DECRYPT; |
| 283 | break; | 283 | break; |
| 284 | case 24: | 284 | case 24: |
| 285 | sctx->enc = KM_AES_192_ENCRYPT; | 285 | sctx->enc = KM_AES_192_ENCRYPT; |
| 286 | sctx->dec = KM_AES_192_DECRYPT; | 286 | sctx->dec = KM_AES_192_DECRYPT; |
| 287 | break; | 287 | break; |
| 288 | case 32: | 288 | case 32: |
| 289 | sctx->enc = KM_AES_256_ENCRYPT; | 289 | sctx->enc = KM_AES_256_ENCRYPT; |
| 290 | sctx->dec = KM_AES_256_DECRYPT; | 290 | sctx->dec = KM_AES_256_DECRYPT; |
| 291 | break; | 291 | break; |
| 292 | } | 292 | } |
| 293 | 293 | ||
| 294 | return aes_set_key(tfm, in_key, key_len); | 294 | return aes_set_key(tfm, in_key, key_len); |
| 295 | } | 295 | } |
| 296 | 296 | ||
| 297 | static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param, | 297 | static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param, |
| 298 | struct blkcipher_walk *walk) | 298 | struct blkcipher_walk *walk) |
| 299 | { | 299 | { |
| 300 | int ret = blkcipher_walk_virt(desc, walk); | 300 | int ret = blkcipher_walk_virt(desc, walk); |
| 301 | unsigned int nbytes; | 301 | unsigned int nbytes; |
| 302 | 302 | ||
| 303 | while ((nbytes = walk->nbytes)) { | 303 | while ((nbytes = walk->nbytes)) { |
| 304 | /* only use complete blocks */ | 304 | /* only use complete blocks */ |
| 305 | unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1); | 305 | unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1); |
| 306 | u8 *out = walk->dst.virt.addr; | 306 | u8 *out = walk->dst.virt.addr; |
| 307 | u8 *in = walk->src.virt.addr; | 307 | u8 *in = walk->src.virt.addr; |
| 308 | 308 | ||
| 309 | ret = crypt_s390_km(func, param, out, in, n); | 309 | ret = crypt_s390_km(func, param, out, in, n); |
| 310 | BUG_ON((ret < 0) || (ret != n)); | 310 | BUG_ON((ret < 0) || (ret != n)); |
| 311 | 311 | ||
| 312 | nbytes &= AES_BLOCK_SIZE - 1; | 312 | nbytes &= AES_BLOCK_SIZE - 1; |
| 313 | ret = blkcipher_walk_done(desc, walk, nbytes); | 313 | ret = blkcipher_walk_done(desc, walk, nbytes); |
| 314 | } | 314 | } |
| 315 | 315 | ||
| 316 | return ret; | 316 | return ret; |
| 317 | } | 317 | } |
| 318 | 318 | ||
| 319 | static int ecb_aes_encrypt(struct blkcipher_desc *desc, | 319 | static int ecb_aes_encrypt(struct blkcipher_desc *desc, |
| 320 | struct scatterlist *dst, struct scatterlist *src, | 320 | struct scatterlist *dst, struct scatterlist *src, |
| 321 | unsigned int nbytes) | 321 | unsigned int nbytes) |
| 322 | { | 322 | { |
| 323 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | 323 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); |
| 324 | struct blkcipher_walk walk; | 324 | struct blkcipher_walk walk; |
| 325 | 325 | ||
| 326 | if (unlikely(need_fallback(sctx->key_len))) | 326 | if (unlikely(need_fallback(sctx->key_len))) |
| 327 | return fallback_blk_enc(desc, dst, src, nbytes); | 327 | return fallback_blk_enc(desc, dst, src, nbytes); |
| 328 | 328 | ||
| 329 | blkcipher_walk_init(&walk, dst, src, nbytes); | 329 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 330 | return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk); | 330 | return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk); |
| 331 | } | 331 | } |
| 332 | 332 | ||
| 333 | static int ecb_aes_decrypt(struct blkcipher_desc *desc, | 333 | static int ecb_aes_decrypt(struct blkcipher_desc *desc, |
| 334 | struct scatterlist *dst, struct scatterlist *src, | 334 | struct scatterlist *dst, struct scatterlist *src, |
| 335 | unsigned int nbytes) | 335 | unsigned int nbytes) |
| 336 | { | 336 | { |
| 337 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | 337 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); |
| 338 | struct blkcipher_walk walk; | 338 | struct blkcipher_walk walk; |
| 339 | 339 | ||
| 340 | if (unlikely(need_fallback(sctx->key_len))) | 340 | if (unlikely(need_fallback(sctx->key_len))) |
| 341 | return fallback_blk_dec(desc, dst, src, nbytes); | 341 | return fallback_blk_dec(desc, dst, src, nbytes); |
| 342 | 342 | ||
| 343 | blkcipher_walk_init(&walk, dst, src, nbytes); | 343 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 344 | return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk); | 344 | return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk); |
| 345 | } | 345 | } |
| 346 | 346 | ||
| 347 | static int fallback_init_blk(struct crypto_tfm *tfm) | 347 | static int fallback_init_blk(struct crypto_tfm *tfm) |
| 348 | { | 348 | { |
| 349 | const char *name = tfm->__crt_alg->cra_name; | 349 | const char *name = tfm->__crt_alg->cra_name; |
| 350 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | 350 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
| 351 | 351 | ||
| 352 | sctx->fallback.blk = crypto_alloc_blkcipher(name, 0, | 352 | sctx->fallback.blk = crypto_alloc_blkcipher(name, 0, |
| 353 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | 353 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); |
| 354 | 354 | ||
| 355 | if (IS_ERR(sctx->fallback.blk)) { | 355 | if (IS_ERR(sctx->fallback.blk)) { |
| 356 | pr_err("Allocating AES fallback algorithm %s failed\n", | 356 | pr_err("Allocating AES fallback algorithm %s failed\n", |
| 357 | name); | 357 | name); |
| 358 | return PTR_ERR(sctx->fallback.blk); | 358 | return PTR_ERR(sctx->fallback.blk); |
| 359 | } | 359 | } |
| 360 | 360 | ||
| 361 | return 0; | 361 | return 0; |
| 362 | } | 362 | } |
| 363 | 363 | ||
| 364 | static void fallback_exit_blk(struct crypto_tfm *tfm) | 364 | static void fallback_exit_blk(struct crypto_tfm *tfm) |
| 365 | { | 365 | { |
| 366 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | 366 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
| 367 | 367 | ||
| 368 | crypto_free_blkcipher(sctx->fallback.blk); | 368 | crypto_free_blkcipher(sctx->fallback.blk); |
| 369 | sctx->fallback.blk = NULL; | 369 | sctx->fallback.blk = NULL; |
| 370 | } | 370 | } |
| 371 | 371 | ||
| 372 | static struct crypto_alg ecb_aes_alg = { | 372 | static struct crypto_alg ecb_aes_alg = { |
| 373 | .cra_name = "ecb(aes)", | 373 | .cra_name = "ecb(aes)", |
| 374 | .cra_driver_name = "ecb-aes-s390", | 374 | .cra_driver_name = "ecb-aes-s390", |
| 375 | .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, | 375 | .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, |
| 376 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | | 376 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | |
| 377 | CRYPTO_ALG_NEED_FALLBACK, | 377 | CRYPTO_ALG_NEED_FALLBACK, |
| 378 | .cra_blocksize = AES_BLOCK_SIZE, | 378 | .cra_blocksize = AES_BLOCK_SIZE, |
| 379 | .cra_ctxsize = sizeof(struct s390_aes_ctx), | 379 | .cra_ctxsize = sizeof(struct s390_aes_ctx), |
| 380 | .cra_type = &crypto_blkcipher_type, | 380 | .cra_type = &crypto_blkcipher_type, |
| 381 | .cra_module = THIS_MODULE, | 381 | .cra_module = THIS_MODULE, |
| 382 | .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list), | 382 | .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list), |
| 383 | .cra_init = fallback_init_blk, | 383 | .cra_init = fallback_init_blk, |
| 384 | .cra_exit = fallback_exit_blk, | 384 | .cra_exit = fallback_exit_blk, |
| 385 | .cra_u = { | 385 | .cra_u = { |
| 386 | .blkcipher = { | 386 | .blkcipher = { |
| 387 | .min_keysize = AES_MIN_KEY_SIZE, | 387 | .min_keysize = AES_MIN_KEY_SIZE, |
| 388 | .max_keysize = AES_MAX_KEY_SIZE, | 388 | .max_keysize = AES_MAX_KEY_SIZE, |
| 389 | .setkey = ecb_aes_set_key, | 389 | .setkey = ecb_aes_set_key, |
| 390 | .encrypt = ecb_aes_encrypt, | 390 | .encrypt = ecb_aes_encrypt, |
| 391 | .decrypt = ecb_aes_decrypt, | 391 | .decrypt = ecb_aes_decrypt, |
| 392 | } | 392 | } |
| 393 | } | 393 | } |
| 394 | }; | 394 | }; |
| 395 | 395 | ||
| 396 | static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | 396 | static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, |
| 397 | unsigned int key_len) | 397 | unsigned int key_len) |
| 398 | { | 398 | { |
| 399 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | 399 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
| 400 | int ret; | 400 | int ret; |
| 401 | 401 | ||
| 402 | ret = need_fallback(key_len); | 402 | ret = need_fallback(key_len); |
| 403 | if (ret > 0) { | 403 | if (ret > 0) { |
| 404 | sctx->key_len = key_len; | 404 | sctx->key_len = key_len; |
| 405 | return setkey_fallback_blk(tfm, in_key, key_len); | 405 | return setkey_fallback_blk(tfm, in_key, key_len); |
| 406 | } | 406 | } |
| 407 | 407 | ||
| 408 | switch (key_len) { | 408 | switch (key_len) { |
| 409 | case 16: | 409 | case 16: |
| 410 | sctx->enc = KMC_AES_128_ENCRYPT; | 410 | sctx->enc = KMC_AES_128_ENCRYPT; |
| 411 | sctx->dec = KMC_AES_128_DECRYPT; | 411 | sctx->dec = KMC_AES_128_DECRYPT; |
| 412 | break; | 412 | break; |
| 413 | case 24: | 413 | case 24: |
| 414 | sctx->enc = KMC_AES_192_ENCRYPT; | 414 | sctx->enc = KMC_AES_192_ENCRYPT; |
| 415 | sctx->dec = KMC_AES_192_DECRYPT; | 415 | sctx->dec = KMC_AES_192_DECRYPT; |
| 416 | break; | 416 | break; |
| 417 | case 32: | 417 | case 32: |
| 418 | sctx->enc = KMC_AES_256_ENCRYPT; | 418 | sctx->enc = KMC_AES_256_ENCRYPT; |
| 419 | sctx->dec = KMC_AES_256_DECRYPT; | 419 | sctx->dec = KMC_AES_256_DECRYPT; |
| 420 | break; | 420 | break; |
| 421 | } | 421 | } |
| 422 | 422 | ||
| 423 | return aes_set_key(tfm, in_key, key_len); | 423 | return aes_set_key(tfm, in_key, key_len); |
| 424 | } | 424 | } |
| 425 | 425 | ||
| 426 | static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param, | 426 | static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param, |
| 427 | struct blkcipher_walk *walk) | 427 | struct blkcipher_walk *walk) |
| 428 | { | 428 | { |
| 429 | int ret = blkcipher_walk_virt(desc, walk); | 429 | int ret = blkcipher_walk_virt(desc, walk); |
| 430 | unsigned int nbytes = walk->nbytes; | 430 | unsigned int nbytes = walk->nbytes; |
| 431 | 431 | ||
| 432 | if (!nbytes) | 432 | if (!nbytes) |
| 433 | goto out; | 433 | goto out; |
| 434 | 434 | ||
| 435 | memcpy(param, walk->iv, AES_BLOCK_SIZE); | 435 | memcpy(param, walk->iv, AES_BLOCK_SIZE); |
| 436 | do { | 436 | do { |
| 437 | /* only use complete blocks */ | 437 | /* only use complete blocks */ |
| 438 | unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1); | 438 | unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1); |
| 439 | u8 *out = walk->dst.virt.addr; | 439 | u8 *out = walk->dst.virt.addr; |
| 440 | u8 *in = walk->src.virt.addr; | 440 | u8 *in = walk->src.virt.addr; |
| 441 | 441 | ||
| 442 | ret = crypt_s390_kmc(func, param, out, in, n); | 442 | ret = crypt_s390_kmc(func, param, out, in, n); |
| 443 | BUG_ON((ret < 0) || (ret != n)); | 443 | BUG_ON((ret < 0) || (ret != n)); |
| 444 | 444 | ||
| 445 | nbytes &= AES_BLOCK_SIZE - 1; | 445 | nbytes &= AES_BLOCK_SIZE - 1; |
| 446 | ret = blkcipher_walk_done(desc, walk, nbytes); | 446 | ret = blkcipher_walk_done(desc, walk, nbytes); |
| 447 | } while ((nbytes = walk->nbytes)); | 447 | } while ((nbytes = walk->nbytes)); |
| 448 | memcpy(walk->iv, param, AES_BLOCK_SIZE); | 448 | memcpy(walk->iv, param, AES_BLOCK_SIZE); |
| 449 | 449 | ||
| 450 | out: | 450 | out: |
| 451 | return ret; | 451 | return ret; |
| 452 | } | 452 | } |
| 453 | 453 | ||
| 454 | static int cbc_aes_encrypt(struct blkcipher_desc *desc, | 454 | static int cbc_aes_encrypt(struct blkcipher_desc *desc, |
| 455 | struct scatterlist *dst, struct scatterlist *src, | 455 | struct scatterlist *dst, struct scatterlist *src, |
| 456 | unsigned int nbytes) | 456 | unsigned int nbytes) |
| 457 | { | 457 | { |
| 458 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | 458 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); |
| 459 | struct blkcipher_walk walk; | 459 | struct blkcipher_walk walk; |
| 460 | 460 | ||
| 461 | if (unlikely(need_fallback(sctx->key_len))) | 461 | if (unlikely(need_fallback(sctx->key_len))) |
| 462 | return fallback_blk_enc(desc, dst, src, nbytes); | 462 | return fallback_blk_enc(desc, dst, src, nbytes); |
| 463 | 463 | ||
| 464 | blkcipher_walk_init(&walk, dst, src, nbytes); | 464 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 465 | return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk); | 465 | return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk); |
| 466 | } | 466 | } |
| 467 | 467 | ||
| 468 | static int cbc_aes_decrypt(struct blkcipher_desc *desc, | 468 | static int cbc_aes_decrypt(struct blkcipher_desc *desc, |
| 469 | struct scatterlist *dst, struct scatterlist *src, | 469 | struct scatterlist *dst, struct scatterlist *src, |
| 470 | unsigned int nbytes) | 470 | unsigned int nbytes) |
| 471 | { | 471 | { |
| 472 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | 472 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); |
| 473 | struct blkcipher_walk walk; | 473 | struct blkcipher_walk walk; |
| 474 | 474 | ||
| 475 | if (unlikely(need_fallback(sctx->key_len))) | 475 | if (unlikely(need_fallback(sctx->key_len))) |
| 476 | return fallback_blk_dec(desc, dst, src, nbytes); | 476 | return fallback_blk_dec(desc, dst, src, nbytes); |
| 477 | 477 | ||
| 478 | blkcipher_walk_init(&walk, dst, src, nbytes); | 478 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 479 | return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk); | 479 | return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk); |
| 480 | } | 480 | } |
| 481 | 481 | ||
| 482 | static struct crypto_alg cbc_aes_alg = { | 482 | static struct crypto_alg cbc_aes_alg = { |
| 483 | .cra_name = "cbc(aes)", | 483 | .cra_name = "cbc(aes)", |
| 484 | .cra_driver_name = "cbc-aes-s390", | 484 | .cra_driver_name = "cbc-aes-s390", |
| 485 | .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, | 485 | .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, |
| 486 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | | 486 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | |
| 487 | CRYPTO_ALG_NEED_FALLBACK, | 487 | CRYPTO_ALG_NEED_FALLBACK, |
| 488 | .cra_blocksize = AES_BLOCK_SIZE, | 488 | .cra_blocksize = AES_BLOCK_SIZE, |
| 489 | .cra_ctxsize = sizeof(struct s390_aes_ctx), | 489 | .cra_ctxsize = sizeof(struct s390_aes_ctx), |
| 490 | .cra_type = &crypto_blkcipher_type, | 490 | .cra_type = &crypto_blkcipher_type, |
| 491 | .cra_module = THIS_MODULE, | 491 | .cra_module = THIS_MODULE, |
| 492 | .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list), | 492 | .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list), |
| 493 | .cra_init = fallback_init_blk, | 493 | .cra_init = fallback_init_blk, |
| 494 | .cra_exit = fallback_exit_blk, | 494 | .cra_exit = fallback_exit_blk, |
| 495 | .cra_u = { | 495 | .cra_u = { |
| 496 | .blkcipher = { | 496 | .blkcipher = { |
| 497 | .min_keysize = AES_MIN_KEY_SIZE, | 497 | .min_keysize = AES_MIN_KEY_SIZE, |
| 498 | .max_keysize = AES_MAX_KEY_SIZE, | 498 | .max_keysize = AES_MAX_KEY_SIZE, |
| 499 | .ivsize = AES_BLOCK_SIZE, | 499 | .ivsize = AES_BLOCK_SIZE, |
| 500 | .setkey = cbc_aes_set_key, | 500 | .setkey = cbc_aes_set_key, |
| 501 | .encrypt = cbc_aes_encrypt, | 501 | .encrypt = cbc_aes_encrypt, |
| 502 | .decrypt = cbc_aes_decrypt, | 502 | .decrypt = cbc_aes_decrypt, |
| 503 | } | 503 | } |
| 504 | } | 504 | } |
| 505 | }; | 505 | }; |
| 506 | 506 | ||
| 507 | static int __init aes_s390_init(void) | 507 | static int __init aes_s390_init(void) |
| 508 | { | 508 | { |
| 509 | int ret; | 509 | int ret; |
| 510 | 510 | ||
| 511 | if (crypt_s390_func_available(KM_AES_128_ENCRYPT)) | 511 | if (crypt_s390_func_available(KM_AES_128_ENCRYPT)) |
| 512 | keylen_flag |= AES_KEYLEN_128; | 512 | keylen_flag |= AES_KEYLEN_128; |
| 513 | if (crypt_s390_func_available(KM_AES_192_ENCRYPT)) | 513 | if (crypt_s390_func_available(KM_AES_192_ENCRYPT)) |
| 514 | keylen_flag |= AES_KEYLEN_192; | 514 | keylen_flag |= AES_KEYLEN_192; |
| 515 | if (crypt_s390_func_available(KM_AES_256_ENCRYPT)) | 515 | if (crypt_s390_func_available(KM_AES_256_ENCRYPT)) |
| 516 | keylen_flag |= AES_KEYLEN_256; | 516 | keylen_flag |= AES_KEYLEN_256; |
| 517 | 517 | ||
| 518 | if (!keylen_flag) | 518 | if (!keylen_flag) |
| 519 | return -EOPNOTSUPP; | 519 | return -EOPNOTSUPP; |
| 520 | 520 | ||
| 521 | /* z9 109 and z9 BC/EC only support 128 bit key length */ | 521 | /* z9 109 and z9 BC/EC only support 128 bit key length */ |
| 522 | if (keylen_flag == AES_KEYLEN_128) | 522 | if (keylen_flag == AES_KEYLEN_128) |
| 523 | pr_info("AES hardware acceleration is only available for" | 523 | pr_info("AES hardware acceleration is only available for" |
| 524 | " 128-bit keys\n"); | 524 | " 128-bit keys\n"); |
| 525 | 525 | ||
| 526 | ret = crypto_register_alg(&aes_alg); | 526 | ret = crypto_register_alg(&aes_alg); |
| 527 | if (ret) | 527 | if (ret) |
| 528 | goto aes_err; | 528 | goto aes_err; |
| 529 | 529 | ||
| 530 | ret = crypto_register_alg(&ecb_aes_alg); | 530 | ret = crypto_register_alg(&ecb_aes_alg); |
| 531 | if (ret) | 531 | if (ret) |
| 532 | goto ecb_aes_err; | 532 | goto ecb_aes_err; |
| 533 | 533 | ||
| 534 | ret = crypto_register_alg(&cbc_aes_alg); | 534 | ret = crypto_register_alg(&cbc_aes_alg); |
| 535 | if (ret) | 535 | if (ret) |
| 536 | goto cbc_aes_err; | 536 | goto cbc_aes_err; |
| 537 | 537 | ||
| 538 | out: | 538 | out: |
| 539 | return ret; | 539 | return ret; |
| 540 | 540 | ||
| 541 | cbc_aes_err: | 541 | cbc_aes_err: |
| 542 | crypto_unregister_alg(&ecb_aes_alg); | 542 | crypto_unregister_alg(&ecb_aes_alg); |
| 543 | ecb_aes_err: | 543 | ecb_aes_err: |
| 544 | crypto_unregister_alg(&aes_alg); | 544 | crypto_unregister_alg(&aes_alg); |
| 545 | aes_err: | 545 | aes_err: |
| 546 | goto out; | 546 | goto out; |
| 547 | } | 547 | } |
| 548 | 548 | ||
| 549 | static void __exit aes_s390_fini(void) | 549 | static void __exit aes_s390_fini(void) |
| 550 | { | 550 | { |
| 551 | crypto_unregister_alg(&cbc_aes_alg); | 551 | crypto_unregister_alg(&cbc_aes_alg); |
| 552 | crypto_unregister_alg(&ecb_aes_alg); | 552 | crypto_unregister_alg(&ecb_aes_alg); |
| 553 | crypto_unregister_alg(&aes_alg); | 553 | crypto_unregister_alg(&aes_alg); |
| 554 | } | 554 | } |
| 555 | 555 | ||
| 556 | module_init(aes_s390_init); | 556 | module_init(aes_s390_init); |
| 557 | module_exit(aes_s390_fini); | 557 | module_exit(aes_s390_fini); |
| 558 | 558 | ||
| 559 | MODULE_ALIAS("aes"); | 559 | MODULE_ALIAS("aes-all"); |
| 560 | 560 | ||
| 561 | MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); | 561 | MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); |
| 562 | MODULE_LICENSE("GPL"); | 562 | MODULE_LICENSE("GPL"); |
| 563 | 563 |
crypto/api.c
| 1 | /* | 1 | /* |
| 2 | * Scatterlist Cryptographic API. | 2 | * Scatterlist Cryptographic API. |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> | 4 | * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> |
| 5 | * Copyright (c) 2002 David S. Miller (davem@redhat.com) | 5 | * Copyright (c) 2002 David S. Miller (davem@redhat.com) |
| 6 | * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> | 6 | * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> |
| 7 | * | 7 | * |
| 8 | * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> | 8 | * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> |
| 9 | * and Nettle, by Niels Mรถller. | 9 | * and Nettle, by Niels Mรถller. |
| 10 | * | 10 | * |
| 11 | * This program is free software; you can redistribute it and/or modify it | 11 | * This program is free software; you can redistribute it and/or modify it |
| 12 | * under the terms of the GNU General Public License as published by the Free | 12 | * under the terms of the GNU General Public License as published by the Free |
| 13 | * Software Foundation; either version 2 of the License, or (at your option) | 13 | * Software Foundation; either version 2 of the License, or (at your option) |
| 14 | * any later version. | 14 | * any later version. |
| 15 | * | 15 | * |
| 16 | */ | 16 | */ |
| 17 | 17 | ||
| 18 | #include <linux/err.h> | 18 | #include <linux/err.h> |
| 19 | #include <linux/errno.h> | 19 | #include <linux/errno.h> |
| 20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
| 21 | #include <linux/kmod.h> | 21 | #include <linux/kmod.h> |
| 22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
| 23 | #include <linux/param.h> | 23 | #include <linux/param.h> |
| 24 | #include <linux/sched.h> | 24 | #include <linux/sched.h> |
| 25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
| 26 | #include <linux/string.h> | 26 | #include <linux/string.h> |
| 27 | #include "internal.h" | 27 | #include "internal.h" |
| 28 | 28 | ||
| 29 | LIST_HEAD(crypto_alg_list); | 29 | LIST_HEAD(crypto_alg_list); |
| 30 | EXPORT_SYMBOL_GPL(crypto_alg_list); | 30 | EXPORT_SYMBOL_GPL(crypto_alg_list); |
| 31 | DECLARE_RWSEM(crypto_alg_sem); | 31 | DECLARE_RWSEM(crypto_alg_sem); |
| 32 | EXPORT_SYMBOL_GPL(crypto_alg_sem); | 32 | EXPORT_SYMBOL_GPL(crypto_alg_sem); |
| 33 | 33 | ||
| 34 | BLOCKING_NOTIFIER_HEAD(crypto_chain); | 34 | BLOCKING_NOTIFIER_HEAD(crypto_chain); |
| 35 | EXPORT_SYMBOL_GPL(crypto_chain); | 35 | EXPORT_SYMBOL_GPL(crypto_chain); |
| 36 | 36 | ||
| 37 | static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) | 37 | static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) |
| 38 | { | 38 | { |
| 39 | atomic_inc(&alg->cra_refcnt); | 39 | atomic_inc(&alg->cra_refcnt); |
| 40 | return alg; | 40 | return alg; |
| 41 | } | 41 | } |
| 42 | 42 | ||
| 43 | struct crypto_alg *crypto_mod_get(struct crypto_alg *alg) | 43 | struct crypto_alg *crypto_mod_get(struct crypto_alg *alg) |
| 44 | { | 44 | { |
| 45 | return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL; | 45 | return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL; |
| 46 | } | 46 | } |
| 47 | EXPORT_SYMBOL_GPL(crypto_mod_get); | 47 | EXPORT_SYMBOL_GPL(crypto_mod_get); |
| 48 | 48 | ||
| 49 | void crypto_mod_put(struct crypto_alg *alg) | 49 | void crypto_mod_put(struct crypto_alg *alg) |
| 50 | { | 50 | { |
| 51 | struct module *module = alg->cra_module; | 51 | struct module *module = alg->cra_module; |
| 52 | 52 | ||
| 53 | crypto_alg_put(alg); | 53 | crypto_alg_put(alg); |
| 54 | module_put(module); | 54 | module_put(module); |
| 55 | } | 55 | } |
| 56 | EXPORT_SYMBOL_GPL(crypto_mod_put); | 56 | EXPORT_SYMBOL_GPL(crypto_mod_put); |
| 57 | 57 | ||
| 58 | static inline int crypto_is_test_larval(struct crypto_larval *larval) | 58 | static inline int crypto_is_test_larval(struct crypto_larval *larval) |
| 59 | { | 59 | { |
| 60 | return larval->alg.cra_driver_name[0]; | 60 | return larval->alg.cra_driver_name[0]; |
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | static struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, | 63 | static struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, |
| 64 | u32 mask) | 64 | u32 mask) |
| 65 | { | 65 | { |
| 66 | struct crypto_alg *q, *alg = NULL; | 66 | struct crypto_alg *q, *alg = NULL; |
| 67 | int best = -2; | 67 | int best = -2; |
| 68 | 68 | ||
| 69 | list_for_each_entry(q, &crypto_alg_list, cra_list) { | 69 | list_for_each_entry(q, &crypto_alg_list, cra_list) { |
| 70 | int exact, fuzzy; | 70 | int exact, fuzzy; |
| 71 | 71 | ||
| 72 | if (crypto_is_moribund(q)) | 72 | if (crypto_is_moribund(q)) |
| 73 | continue; | 73 | continue; |
| 74 | 74 | ||
| 75 | if ((q->cra_flags ^ type) & mask) | 75 | if ((q->cra_flags ^ type) & mask) |
| 76 | continue; | 76 | continue; |
| 77 | 77 | ||
| 78 | if (crypto_is_larval(q) && | 78 | if (crypto_is_larval(q) && |
| 79 | !crypto_is_test_larval((struct crypto_larval *)q) && | 79 | !crypto_is_test_larval((struct crypto_larval *)q) && |
| 80 | ((struct crypto_larval *)q)->mask != mask) | 80 | ((struct crypto_larval *)q)->mask != mask) |
| 81 | continue; | 81 | continue; |
| 82 | 82 | ||
| 83 | exact = !strcmp(q->cra_driver_name, name); | 83 | exact = !strcmp(q->cra_driver_name, name); |
| 84 | fuzzy = !strcmp(q->cra_name, name); | 84 | fuzzy = !strcmp(q->cra_name, name); |
| 85 | if (!exact && !(fuzzy && q->cra_priority > best)) | 85 | if (!exact && !(fuzzy && q->cra_priority > best)) |
| 86 | continue; | 86 | continue; |
| 87 | 87 | ||
| 88 | if (unlikely(!crypto_mod_get(q))) | 88 | if (unlikely(!crypto_mod_get(q))) |
| 89 | continue; | 89 | continue; |
| 90 | 90 | ||
| 91 | best = q->cra_priority; | 91 | best = q->cra_priority; |
| 92 | if (alg) | 92 | if (alg) |
| 93 | crypto_mod_put(alg); | 93 | crypto_mod_put(alg); |
| 94 | alg = q; | 94 | alg = q; |
| 95 | 95 | ||
| 96 | if (exact) | 96 | if (exact) |
| 97 | break; | 97 | break; |
| 98 | } | 98 | } |
| 99 | 99 | ||
| 100 | return alg; | 100 | return alg; |
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | static void crypto_larval_destroy(struct crypto_alg *alg) | 103 | static void crypto_larval_destroy(struct crypto_alg *alg) |
| 104 | { | 104 | { |
| 105 | struct crypto_larval *larval = (void *)alg; | 105 | struct crypto_larval *larval = (void *)alg; |
| 106 | 106 | ||
| 107 | BUG_ON(!crypto_is_larval(alg)); | 107 | BUG_ON(!crypto_is_larval(alg)); |
| 108 | if (larval->adult) | 108 | if (larval->adult) |
| 109 | crypto_mod_put(larval->adult); | 109 | crypto_mod_put(larval->adult); |
| 110 | kfree(larval); | 110 | kfree(larval); |
| 111 | } | 111 | } |
| 112 | 112 | ||
| 113 | struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask) | 113 | struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask) |
| 114 | { | 114 | { |
| 115 | struct crypto_larval *larval; | 115 | struct crypto_larval *larval; |
| 116 | 116 | ||
| 117 | larval = kzalloc(sizeof(*larval), GFP_KERNEL); | 117 | larval = kzalloc(sizeof(*larval), GFP_KERNEL); |
| 118 | if (!larval) | 118 | if (!larval) |
| 119 | return ERR_PTR(-ENOMEM); | 119 | return ERR_PTR(-ENOMEM); |
| 120 | 120 | ||
| 121 | larval->mask = mask; | 121 | larval->mask = mask; |
| 122 | larval->alg.cra_flags = CRYPTO_ALG_LARVAL | type; | 122 | larval->alg.cra_flags = CRYPTO_ALG_LARVAL | type; |
| 123 | larval->alg.cra_priority = -1; | 123 | larval->alg.cra_priority = -1; |
| 124 | larval->alg.cra_destroy = crypto_larval_destroy; | 124 | larval->alg.cra_destroy = crypto_larval_destroy; |
| 125 | 125 | ||
| 126 | strlcpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME); | 126 | strlcpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME); |
| 127 | init_completion(&larval->completion); | 127 | init_completion(&larval->completion); |
| 128 | 128 | ||
| 129 | return larval; | 129 | return larval; |
| 130 | } | 130 | } |
| 131 | EXPORT_SYMBOL_GPL(crypto_larval_alloc); | 131 | EXPORT_SYMBOL_GPL(crypto_larval_alloc); |
| 132 | 132 | ||
| 133 | static struct crypto_alg *crypto_larval_add(const char *name, u32 type, | 133 | static struct crypto_alg *crypto_larval_add(const char *name, u32 type, |
| 134 | u32 mask) | 134 | u32 mask) |
| 135 | { | 135 | { |
| 136 | struct crypto_alg *alg; | 136 | struct crypto_alg *alg; |
| 137 | struct crypto_larval *larval; | 137 | struct crypto_larval *larval; |
| 138 | 138 | ||
| 139 | larval = crypto_larval_alloc(name, type, mask); | 139 | larval = crypto_larval_alloc(name, type, mask); |
| 140 | if (IS_ERR(larval)) | 140 | if (IS_ERR(larval)) |
| 141 | return ERR_CAST(larval); | 141 | return ERR_CAST(larval); |
| 142 | 142 | ||
| 143 | atomic_set(&larval->alg.cra_refcnt, 2); | 143 | atomic_set(&larval->alg.cra_refcnt, 2); |
| 144 | 144 | ||
| 145 | down_write(&crypto_alg_sem); | 145 | down_write(&crypto_alg_sem); |
| 146 | alg = __crypto_alg_lookup(name, type, mask); | 146 | alg = __crypto_alg_lookup(name, type, mask); |
| 147 | if (!alg) { | 147 | if (!alg) { |
| 148 | alg = &larval->alg; | 148 | alg = &larval->alg; |
| 149 | list_add(&alg->cra_list, &crypto_alg_list); | 149 | list_add(&alg->cra_list, &crypto_alg_list); |
| 150 | } | 150 | } |
| 151 | up_write(&crypto_alg_sem); | 151 | up_write(&crypto_alg_sem); |
| 152 | 152 | ||
| 153 | if (alg != &larval->alg) | 153 | if (alg != &larval->alg) |
| 154 | kfree(larval); | 154 | kfree(larval); |
| 155 | 155 | ||
| 156 | return alg; | 156 | return alg; |
| 157 | } | 157 | } |
| 158 | 158 | ||
| 159 | void crypto_larval_kill(struct crypto_alg *alg) | 159 | void crypto_larval_kill(struct crypto_alg *alg) |
| 160 | { | 160 | { |
| 161 | struct crypto_larval *larval = (void *)alg; | 161 | struct crypto_larval *larval = (void *)alg; |
| 162 | 162 | ||
| 163 | down_write(&crypto_alg_sem); | 163 | down_write(&crypto_alg_sem); |
| 164 | list_del(&alg->cra_list); | 164 | list_del(&alg->cra_list); |
| 165 | up_write(&crypto_alg_sem); | 165 | up_write(&crypto_alg_sem); |
| 166 | complete_all(&larval->completion); | 166 | complete_all(&larval->completion); |
| 167 | crypto_alg_put(alg); | 167 | crypto_alg_put(alg); |
| 168 | } | 168 | } |
| 169 | EXPORT_SYMBOL_GPL(crypto_larval_kill); | 169 | EXPORT_SYMBOL_GPL(crypto_larval_kill); |
| 170 | 170 | ||
| 171 | static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) | 171 | static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) |
| 172 | { | 172 | { |
| 173 | struct crypto_larval *larval = (void *)alg; | 173 | struct crypto_larval *larval = (void *)alg; |
| 174 | long timeout; | 174 | long timeout; |
| 175 | 175 | ||
| 176 | timeout = wait_for_completion_interruptible_timeout( | 176 | timeout = wait_for_completion_interruptible_timeout( |
| 177 | &larval->completion, 60 * HZ); | 177 | &larval->completion, 60 * HZ); |
| 178 | 178 | ||
| 179 | alg = larval->adult; | 179 | alg = larval->adult; |
| 180 | if (timeout < 0) | 180 | if (timeout < 0) |
| 181 | alg = ERR_PTR(-EINTR); | 181 | alg = ERR_PTR(-EINTR); |
| 182 | else if (!timeout) | 182 | else if (!timeout) |
| 183 | alg = ERR_PTR(-ETIMEDOUT); | 183 | alg = ERR_PTR(-ETIMEDOUT); |
| 184 | else if (!alg) | 184 | else if (!alg) |
| 185 | alg = ERR_PTR(-ENOENT); | 185 | alg = ERR_PTR(-ENOENT); |
| 186 | else if (crypto_is_test_larval(larval) && | 186 | else if (crypto_is_test_larval(larval) && |
| 187 | !(alg->cra_flags & CRYPTO_ALG_TESTED)) | 187 | !(alg->cra_flags & CRYPTO_ALG_TESTED)) |
| 188 | alg = ERR_PTR(-EAGAIN); | 188 | alg = ERR_PTR(-EAGAIN); |
| 189 | else if (!crypto_mod_get(alg)) | 189 | else if (!crypto_mod_get(alg)) |
| 190 | alg = ERR_PTR(-EAGAIN); | 190 | alg = ERR_PTR(-EAGAIN); |
| 191 | crypto_mod_put(&larval->alg); | 191 | crypto_mod_put(&larval->alg); |
| 192 | 192 | ||
| 193 | return alg; | 193 | return alg; |
| 194 | } | 194 | } |
| 195 | 195 | ||
| 196 | struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask) | 196 | struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask) |
| 197 | { | 197 | { |
| 198 | struct crypto_alg *alg; | 198 | struct crypto_alg *alg; |
| 199 | 199 | ||
| 200 | down_read(&crypto_alg_sem); | 200 | down_read(&crypto_alg_sem); |
| 201 | alg = __crypto_alg_lookup(name, type, mask); | 201 | alg = __crypto_alg_lookup(name, type, mask); |
| 202 | up_read(&crypto_alg_sem); | 202 | up_read(&crypto_alg_sem); |
| 203 | 203 | ||
| 204 | return alg; | 204 | return alg; |
| 205 | } | 205 | } |
| 206 | EXPORT_SYMBOL_GPL(crypto_alg_lookup); | 206 | EXPORT_SYMBOL_GPL(crypto_alg_lookup); |
| 207 | 207 | ||
| 208 | struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask) | 208 | struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask) |
| 209 | { | 209 | { |
| 210 | struct crypto_alg *alg; | 210 | struct crypto_alg *alg; |
| 211 | 211 | ||
| 212 | if (!name) | 212 | if (!name) |
| 213 | return ERR_PTR(-ENOENT); | 213 | return ERR_PTR(-ENOENT); |
| 214 | 214 | ||
| 215 | mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); | 215 | mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); |
| 216 | type &= mask; | 216 | type &= mask; |
| 217 | 217 | ||
| 218 | alg = try_then_request_module(crypto_alg_lookup(name, type, mask), | 218 | alg = crypto_alg_lookup(name, type, mask); |
| 219 | name); | 219 | if (!alg) { |
| 220 | char tmp[CRYPTO_MAX_ALG_NAME]; | ||
| 221 | |||
| 222 | request_module(name); | ||
| 223 | |||
| 224 | if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask) && | ||
| 225 | snprintf(tmp, sizeof(tmp), "%s-all", name) < sizeof(tmp)) | ||
| 226 | request_module(tmp); | ||
| 227 | |||
| 228 | alg = crypto_alg_lookup(name, type, mask); | ||
| 229 | } | ||
| 230 | |||
| 220 | if (alg) | 231 | if (alg) |
| 221 | return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg; | 232 | return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg; |
| 222 | 233 | ||
| 223 | return crypto_larval_add(name, type, mask); | 234 | return crypto_larval_add(name, type, mask); |
| 224 | } | 235 | } |
| 225 | EXPORT_SYMBOL_GPL(crypto_larval_lookup); | 236 | EXPORT_SYMBOL_GPL(crypto_larval_lookup); |
| 226 | 237 | ||
| 227 | int crypto_probing_notify(unsigned long val, void *v) | 238 | int crypto_probing_notify(unsigned long val, void *v) |
| 228 | { | 239 | { |
| 229 | int ok; | 240 | int ok; |
| 230 | 241 | ||
| 231 | ok = blocking_notifier_call_chain(&crypto_chain, val, v); | 242 | ok = blocking_notifier_call_chain(&crypto_chain, val, v); |
| 232 | if (ok == NOTIFY_DONE) { | 243 | if (ok == NOTIFY_DONE) { |
| 233 | request_module("cryptomgr"); | 244 | request_module("cryptomgr"); |
| 234 | ok = blocking_notifier_call_chain(&crypto_chain, val, v); | 245 | ok = blocking_notifier_call_chain(&crypto_chain, val, v); |
| 235 | } | 246 | } |
| 236 | 247 | ||
| 237 | return ok; | 248 | return ok; |
| 238 | } | 249 | } |
| 239 | EXPORT_SYMBOL_GPL(crypto_probing_notify); | 250 | EXPORT_SYMBOL_GPL(crypto_probing_notify); |
| 240 | 251 | ||
| 241 | struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask) | 252 | struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask) |
| 242 | { | 253 | { |
| 243 | struct crypto_alg *alg; | 254 | struct crypto_alg *alg; |
| 244 | struct crypto_alg *larval; | 255 | struct crypto_alg *larval; |
| 245 | int ok; | 256 | int ok; |
| 246 | 257 | ||
| 247 | if (!(mask & CRYPTO_ALG_TESTED)) { | 258 | if (!(mask & CRYPTO_ALG_TESTED)) { |
| 248 | type |= CRYPTO_ALG_TESTED; | 259 | type |= CRYPTO_ALG_TESTED; |
| 249 | mask |= CRYPTO_ALG_TESTED; | 260 | mask |= CRYPTO_ALG_TESTED; |
| 250 | } | 261 | } |
| 251 | 262 | ||
| 252 | larval = crypto_larval_lookup(name, type, mask); | 263 | larval = crypto_larval_lookup(name, type, mask); |
| 253 | if (IS_ERR(larval) || !crypto_is_larval(larval)) | 264 | if (IS_ERR(larval) || !crypto_is_larval(larval)) |
| 254 | return larval; | 265 | return larval; |
| 255 | 266 | ||
| 256 | ok = crypto_probing_notify(CRYPTO_MSG_ALG_REQUEST, larval); | 267 | ok = crypto_probing_notify(CRYPTO_MSG_ALG_REQUEST, larval); |
| 257 | 268 | ||
| 258 | if (ok == NOTIFY_STOP) | 269 | if (ok == NOTIFY_STOP) |
| 259 | alg = crypto_larval_wait(larval); | 270 | alg = crypto_larval_wait(larval); |
| 260 | else { | 271 | else { |
| 261 | crypto_mod_put(larval); | 272 | crypto_mod_put(larval); |
| 262 | alg = ERR_PTR(-ENOENT); | 273 | alg = ERR_PTR(-ENOENT); |
| 263 | } | 274 | } |
| 264 | crypto_larval_kill(larval); | 275 | crypto_larval_kill(larval); |
| 265 | return alg; | 276 | return alg; |
| 266 | } | 277 | } |
| 267 | EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup); | 278 | EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup); |
| 268 | 279 | ||
| 269 | static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask) | 280 | static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask) |
| 270 | { | 281 | { |
| 271 | const struct crypto_type *type_obj = tfm->__crt_alg->cra_type; | 282 | const struct crypto_type *type_obj = tfm->__crt_alg->cra_type; |
| 272 | 283 | ||
| 273 | if (type_obj) | 284 | if (type_obj) |
| 274 | return type_obj->init(tfm, type, mask); | 285 | return type_obj->init(tfm, type, mask); |
| 275 | 286 | ||
| 276 | switch (crypto_tfm_alg_type(tfm)) { | 287 | switch (crypto_tfm_alg_type(tfm)) { |
| 277 | case CRYPTO_ALG_TYPE_CIPHER: | 288 | case CRYPTO_ALG_TYPE_CIPHER: |
| 278 | return crypto_init_cipher_ops(tfm); | 289 | return crypto_init_cipher_ops(tfm); |
| 279 | 290 | ||
| 280 | case CRYPTO_ALG_TYPE_DIGEST: | 291 | case CRYPTO_ALG_TYPE_DIGEST: |
| 281 | if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) != | 292 | if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) != |
| 282 | CRYPTO_ALG_TYPE_HASH_MASK) | 293 | CRYPTO_ALG_TYPE_HASH_MASK) |
| 283 | return crypto_init_digest_ops_async(tfm); | 294 | return crypto_init_digest_ops_async(tfm); |
| 284 | else | 295 | else |
| 285 | return crypto_init_digest_ops(tfm); | 296 | return crypto_init_digest_ops(tfm); |
| 286 | 297 | ||
| 287 | case CRYPTO_ALG_TYPE_COMPRESS: | 298 | case CRYPTO_ALG_TYPE_COMPRESS: |
| 288 | return crypto_init_compress_ops(tfm); | 299 | return crypto_init_compress_ops(tfm); |
| 289 | 300 | ||
| 290 | default: | 301 | default: |
| 291 | break; | 302 | break; |
| 292 | } | 303 | } |
| 293 | 304 | ||
| 294 | BUG(); | 305 | BUG(); |
| 295 | return -EINVAL; | 306 | return -EINVAL; |
| 296 | } | 307 | } |
| 297 | 308 | ||
| 298 | static void crypto_exit_ops(struct crypto_tfm *tfm) | 309 | static void crypto_exit_ops(struct crypto_tfm *tfm) |
| 299 | { | 310 | { |
| 300 | const struct crypto_type *type = tfm->__crt_alg->cra_type; | 311 | const struct crypto_type *type = tfm->__crt_alg->cra_type; |
| 301 | 312 | ||
| 302 | if (type) { | 313 | if (type) { |
| 303 | if (tfm->exit) | 314 | if (tfm->exit) |
| 304 | tfm->exit(tfm); | 315 | tfm->exit(tfm); |
| 305 | return; | 316 | return; |
| 306 | } | 317 | } |
| 307 | 318 | ||
| 308 | switch (crypto_tfm_alg_type(tfm)) { | 319 | switch (crypto_tfm_alg_type(tfm)) { |
| 309 | case CRYPTO_ALG_TYPE_CIPHER: | 320 | case CRYPTO_ALG_TYPE_CIPHER: |
| 310 | crypto_exit_cipher_ops(tfm); | 321 | crypto_exit_cipher_ops(tfm); |
| 311 | break; | 322 | break; |
| 312 | 323 | ||
| 313 | case CRYPTO_ALG_TYPE_DIGEST: | 324 | case CRYPTO_ALG_TYPE_DIGEST: |
| 314 | crypto_exit_digest_ops(tfm); | 325 | crypto_exit_digest_ops(tfm); |
| 315 | break; | 326 | break; |
| 316 | 327 | ||
| 317 | case CRYPTO_ALG_TYPE_COMPRESS: | 328 | case CRYPTO_ALG_TYPE_COMPRESS: |
| 318 | crypto_exit_compress_ops(tfm); | 329 | crypto_exit_compress_ops(tfm); |
| 319 | break; | 330 | break; |
| 320 | 331 | ||
| 321 | default: | 332 | default: |
| 322 | BUG(); | 333 | BUG(); |
| 323 | 334 | ||
| 324 | } | 335 | } |
| 325 | } | 336 | } |
| 326 | 337 | ||
| 327 | static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) | 338 | static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) |
| 328 | { | 339 | { |
| 329 | const struct crypto_type *type_obj = alg->cra_type; | 340 | const struct crypto_type *type_obj = alg->cra_type; |
| 330 | unsigned int len; | 341 | unsigned int len; |
| 331 | 342 | ||
| 332 | len = alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1); | 343 | len = alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1); |
| 333 | if (type_obj) | 344 | if (type_obj) |
| 334 | return len + type_obj->ctxsize(alg, type, mask); | 345 | return len + type_obj->ctxsize(alg, type, mask); |
| 335 | 346 | ||
| 336 | switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { | 347 | switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { |
| 337 | default: | 348 | default: |
| 338 | BUG(); | 349 | BUG(); |
| 339 | 350 | ||
| 340 | case CRYPTO_ALG_TYPE_CIPHER: | 351 | case CRYPTO_ALG_TYPE_CIPHER: |
| 341 | len += crypto_cipher_ctxsize(alg); | 352 | len += crypto_cipher_ctxsize(alg); |
| 342 | break; | 353 | break; |
| 343 | 354 | ||
| 344 | case CRYPTO_ALG_TYPE_DIGEST: | 355 | case CRYPTO_ALG_TYPE_DIGEST: |
| 345 | len += crypto_digest_ctxsize(alg); | 356 | len += crypto_digest_ctxsize(alg); |
| 346 | break; | 357 | break; |
| 347 | 358 | ||
| 348 | case CRYPTO_ALG_TYPE_COMPRESS: | 359 | case CRYPTO_ALG_TYPE_COMPRESS: |
| 349 | len += crypto_compress_ctxsize(alg); | 360 | len += crypto_compress_ctxsize(alg); |
| 350 | break; | 361 | break; |
| 351 | } | 362 | } |
| 352 | 363 | ||
| 353 | return len; | 364 | return len; |
| 354 | } | 365 | } |
| 355 | 366 | ||
| 356 | void crypto_shoot_alg(struct crypto_alg *alg) | 367 | void crypto_shoot_alg(struct crypto_alg *alg) |
| 357 | { | 368 | { |
| 358 | down_write(&crypto_alg_sem); | 369 | down_write(&crypto_alg_sem); |
| 359 | alg->cra_flags |= CRYPTO_ALG_DYING; | 370 | alg->cra_flags |= CRYPTO_ALG_DYING; |
| 360 | up_write(&crypto_alg_sem); | 371 | up_write(&crypto_alg_sem); |
| 361 | } | 372 | } |
| 362 | EXPORT_SYMBOL_GPL(crypto_shoot_alg); | 373 | EXPORT_SYMBOL_GPL(crypto_shoot_alg); |
| 363 | 374 | ||
| 364 | struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, | 375 | struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, |
| 365 | u32 mask) | 376 | u32 mask) |
| 366 | { | 377 | { |
| 367 | struct crypto_tfm *tfm = NULL; | 378 | struct crypto_tfm *tfm = NULL; |
| 368 | unsigned int tfm_size; | 379 | unsigned int tfm_size; |
| 369 | int err = -ENOMEM; | 380 | int err = -ENOMEM; |
| 370 | 381 | ||
| 371 | tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, type, mask); | 382 | tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, type, mask); |
| 372 | tfm = kzalloc(tfm_size, GFP_KERNEL); | 383 | tfm = kzalloc(tfm_size, GFP_KERNEL); |
| 373 | if (tfm == NULL) | 384 | if (tfm == NULL) |
| 374 | goto out_err; | 385 | goto out_err; |
| 375 | 386 | ||
| 376 | tfm->__crt_alg = alg; | 387 | tfm->__crt_alg = alg; |
| 377 | 388 | ||
| 378 | err = crypto_init_ops(tfm, type, mask); | 389 | err = crypto_init_ops(tfm, type, mask); |
| 379 | if (err) | 390 | if (err) |
| 380 | goto out_free_tfm; | 391 | goto out_free_tfm; |
| 381 | 392 | ||
| 382 | if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm))) | 393 | if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm))) |
| 383 | goto cra_init_failed; | 394 | goto cra_init_failed; |
| 384 | 395 | ||
| 385 | goto out; | 396 | goto out; |
| 386 | 397 | ||
| 387 | cra_init_failed: | 398 | cra_init_failed: |
| 388 | crypto_exit_ops(tfm); | 399 | crypto_exit_ops(tfm); |
| 389 | out_free_tfm: | 400 | out_free_tfm: |
| 390 | if (err == -EAGAIN) | 401 | if (err == -EAGAIN) |
| 391 | crypto_shoot_alg(alg); | 402 | crypto_shoot_alg(alg); |
| 392 | kfree(tfm); | 403 | kfree(tfm); |
| 393 | out_err: | 404 | out_err: |
| 394 | tfm = ERR_PTR(err); | 405 | tfm = ERR_PTR(err); |
| 395 | out: | 406 | out: |
| 396 | return tfm; | 407 | return tfm; |
| 397 | } | 408 | } |
| 398 | EXPORT_SYMBOL_GPL(__crypto_alloc_tfm); | 409 | EXPORT_SYMBOL_GPL(__crypto_alloc_tfm); |
| 399 | 410 | ||
| 400 | /* | 411 | /* |
| 401 | * crypto_alloc_base - Locate algorithm and allocate transform | 412 | * crypto_alloc_base - Locate algorithm and allocate transform |
| 402 | * @alg_name: Name of algorithm | 413 | * @alg_name: Name of algorithm |
| 403 | * @type: Type of algorithm | 414 | * @type: Type of algorithm |
| 404 | * @mask: Mask for type comparison | 415 | * @mask: Mask for type comparison |
| 405 | * | 416 | * |
| 406 | * This function should not be used by new algorithm types. | 417 | * This function should not be used by new algorithm types. |
| 407 | * Plesae use crypto_alloc_tfm instead. | 418 | * Plesae use crypto_alloc_tfm instead. |
| 408 | * | 419 | * |
| 409 | * crypto_alloc_base() will first attempt to locate an already loaded | 420 | * crypto_alloc_base() will first attempt to locate an already loaded |
| 410 | * algorithm. If that fails and the kernel supports dynamically loadable | 421 | * algorithm. If that fails and the kernel supports dynamically loadable |
| 411 | * modules, it will then attempt to load a module of the same name or | 422 | * modules, it will then attempt to load a module of the same name or |
| 412 | * alias. If that fails it will send a query to any loaded crypto manager | 423 | * alias. If that fails it will send a query to any loaded crypto manager |
| 413 | * to construct an algorithm on the fly. A refcount is grabbed on the | 424 | * to construct an algorithm on the fly. A refcount is grabbed on the |
| 414 | * algorithm which is then associated with the new transform. | 425 | * algorithm which is then associated with the new transform. |
| 415 | * | 426 | * |
| 416 | * The returned transform is of a non-determinate type. Most people | 427 | * The returned transform is of a non-determinate type. Most people |
| 417 | * should use one of the more specific allocation functions such as | 428 | * should use one of the more specific allocation functions such as |
| 418 | * crypto_alloc_blkcipher. | 429 | * crypto_alloc_blkcipher. |
| 419 | * | 430 | * |
| 420 | * In case of error the return value is an error pointer. | 431 | * In case of error the return value is an error pointer. |
| 421 | */ | 432 | */ |
| 422 | struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask) | 433 | struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask) |
| 423 | { | 434 | { |
| 424 | struct crypto_tfm *tfm; | 435 | struct crypto_tfm *tfm; |
| 425 | int err; | 436 | int err; |
| 426 | 437 | ||
| 427 | for (;;) { | 438 | for (;;) { |
| 428 | struct crypto_alg *alg; | 439 | struct crypto_alg *alg; |
| 429 | 440 | ||
| 430 | alg = crypto_alg_mod_lookup(alg_name, type, mask); | 441 | alg = crypto_alg_mod_lookup(alg_name, type, mask); |
| 431 | if (IS_ERR(alg)) { | 442 | if (IS_ERR(alg)) { |
| 432 | err = PTR_ERR(alg); | 443 | err = PTR_ERR(alg); |
| 433 | goto err; | 444 | goto err; |
| 434 | } | 445 | } |
| 435 | 446 | ||
| 436 | tfm = __crypto_alloc_tfm(alg, type, mask); | 447 | tfm = __crypto_alloc_tfm(alg, type, mask); |
| 437 | if (!IS_ERR(tfm)) | 448 | if (!IS_ERR(tfm)) |
| 438 | return tfm; | 449 | return tfm; |
| 439 | 450 | ||
| 440 | crypto_mod_put(alg); | 451 | crypto_mod_put(alg); |
| 441 | err = PTR_ERR(tfm); | 452 | err = PTR_ERR(tfm); |
| 442 | 453 | ||
| 443 | err: | 454 | err: |
| 444 | if (err != -EAGAIN) | 455 | if (err != -EAGAIN) |
| 445 | break; | 456 | break; |
| 446 | if (signal_pending(current)) { | 457 | if (signal_pending(current)) { |
| 447 | err = -EINTR; | 458 | err = -EINTR; |
| 448 | break; | 459 | break; |
| 449 | } | 460 | } |
| 450 | } | 461 | } |
| 451 | 462 | ||
| 452 | return ERR_PTR(err); | 463 | return ERR_PTR(err); |
| 453 | } | 464 | } |
| 454 | EXPORT_SYMBOL_GPL(crypto_alloc_base); | 465 | EXPORT_SYMBOL_GPL(crypto_alloc_base); |
| 455 | 466 | ||
| 456 | struct crypto_tfm *crypto_create_tfm(struct crypto_alg *alg, | 467 | struct crypto_tfm *crypto_create_tfm(struct crypto_alg *alg, |
| 457 | const struct crypto_type *frontend) | 468 | const struct crypto_type *frontend) |
| 458 | { | 469 | { |
| 459 | char *mem; | 470 | char *mem; |
| 460 | struct crypto_tfm *tfm = NULL; | 471 | struct crypto_tfm *tfm = NULL; |
| 461 | unsigned int tfmsize; | 472 | unsigned int tfmsize; |
| 462 | unsigned int total; | 473 | unsigned int total; |
| 463 | int err = -ENOMEM; | 474 | int err = -ENOMEM; |
| 464 | 475 | ||
| 465 | tfmsize = frontend->tfmsize; | 476 | tfmsize = frontend->tfmsize; |
| 466 | total = tfmsize + sizeof(*tfm) + frontend->extsize(alg, frontend); | 477 | total = tfmsize + sizeof(*tfm) + frontend->extsize(alg, frontend); |
| 467 | 478 | ||
| 468 | mem = kzalloc(total, GFP_KERNEL); | 479 | mem = kzalloc(total, GFP_KERNEL); |
| 469 | if (mem == NULL) | 480 | if (mem == NULL) |
| 470 | goto out_err; | 481 | goto out_err; |
| 471 | 482 | ||
| 472 | tfm = (struct crypto_tfm *)(mem + tfmsize); | 483 | tfm = (struct crypto_tfm *)(mem + tfmsize); |
| 473 | tfm->__crt_alg = alg; | 484 | tfm->__crt_alg = alg; |
| 474 | 485 | ||
| 475 | err = frontend->init_tfm(tfm, frontend); | 486 | err = frontend->init_tfm(tfm, frontend); |
| 476 | if (err) | 487 | if (err) |
| 477 | goto out_free_tfm; | 488 | goto out_free_tfm; |
| 478 | 489 | ||
| 479 | if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm))) | 490 | if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm))) |
| 480 | goto cra_init_failed; | 491 | goto cra_init_failed; |
| 481 | 492 | ||
| 482 | goto out; | 493 | goto out; |
| 483 | 494 | ||
| 484 | cra_init_failed: | 495 | cra_init_failed: |
| 485 | crypto_exit_ops(tfm); | 496 | crypto_exit_ops(tfm); |
| 486 | out_free_tfm: | 497 | out_free_tfm: |
| 487 | if (err == -EAGAIN) | 498 | if (err == -EAGAIN) |
| 488 | crypto_shoot_alg(alg); | 499 | crypto_shoot_alg(alg); |
| 489 | kfree(mem); | 500 | kfree(mem); |
| 490 | out_err: | 501 | out_err: |
| 491 | tfm = ERR_PTR(err); | 502 | tfm = ERR_PTR(err); |
| 492 | out: | 503 | out: |
| 493 | return tfm; | 504 | return tfm; |
| 494 | } | 505 | } |
| 495 | EXPORT_SYMBOL_GPL(crypto_create_tfm); | 506 | EXPORT_SYMBOL_GPL(crypto_create_tfm); |
| 496 | 507 | ||
| 497 | /* | 508 | /* |
| 498 | * crypto_alloc_tfm - Locate algorithm and allocate transform | 509 | * crypto_alloc_tfm - Locate algorithm and allocate transform |
| 499 | * @alg_name: Name of algorithm | 510 | * @alg_name: Name of algorithm |
| 500 | * @frontend: Frontend algorithm type | 511 | * @frontend: Frontend algorithm type |
| 501 | * @type: Type of algorithm | 512 | * @type: Type of algorithm |
| 502 | * @mask: Mask for type comparison | 513 | * @mask: Mask for type comparison |
| 503 | * | 514 | * |
| 504 | * crypto_alloc_tfm() will first attempt to locate an already loaded | 515 | * crypto_alloc_tfm() will first attempt to locate an already loaded |
| 505 | * algorithm. If that fails and the kernel supports dynamically loadable | 516 | * algorithm. If that fails and the kernel supports dynamically loadable |
| 506 | * modules, it will then attempt to load a module of the same name or | 517 | * modules, it will then attempt to load a module of the same name or |
| 507 | * alias. If that fails it will send a query to any loaded crypto manager | 518 | * alias. If that fails it will send a query to any loaded crypto manager |
| 508 | * to construct an algorithm on the fly. A refcount is grabbed on the | 519 | * to construct an algorithm on the fly. A refcount is grabbed on the |
| 509 | * algorithm which is then associated with the new transform. | 520 | * algorithm which is then associated with the new transform. |
| 510 | * | 521 | * |
| 511 | * The returned transform is of a non-determinate type. Most people | 522 | * The returned transform is of a non-determinate type. Most people |
| 512 | * should use one of the more specific allocation functions such as | 523 | * should use one of the more specific allocation functions such as |
| 513 | * crypto_alloc_blkcipher. | 524 | * crypto_alloc_blkcipher. |
| 514 | * | 525 | * |
| 515 | * In case of error the return value is an error pointer. | 526 | * In case of error the return value is an error pointer. |
| 516 | */ | 527 | */ |
| 517 | struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, | 528 | struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, |
| 518 | const struct crypto_type *frontend, | 529 | const struct crypto_type *frontend, |
| 519 | u32 type, u32 mask) | 530 | u32 type, u32 mask) |
| 520 | { | 531 | { |
| 521 | struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask); | 532 | struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask); |
| 522 | struct crypto_tfm *tfm; | 533 | struct crypto_tfm *tfm; |
| 523 | int err; | 534 | int err; |
| 524 | 535 | ||
| 525 | type &= frontend->maskclear; | 536 | type &= frontend->maskclear; |
| 526 | mask &= frontend->maskclear; | 537 | mask &= frontend->maskclear; |
| 527 | type |= frontend->type; | 538 | type |= frontend->type; |
| 528 | mask |= frontend->maskset; | 539 | mask |= frontend->maskset; |
| 529 | 540 | ||
| 530 | lookup = frontend->lookup ?: crypto_alg_mod_lookup; | 541 | lookup = frontend->lookup ?: crypto_alg_mod_lookup; |
| 531 | 542 | ||
| 532 | for (;;) { | 543 | for (;;) { |
| 533 | struct crypto_alg *alg; | 544 | struct crypto_alg *alg; |
| 534 | 545 | ||
| 535 | alg = lookup(alg_name, type, mask); | 546 | alg = lookup(alg_name, type, mask); |
| 536 | if (IS_ERR(alg)) { | 547 | if (IS_ERR(alg)) { |
| 537 | err = PTR_ERR(alg); | 548 | err = PTR_ERR(alg); |
| 538 | goto err; | 549 | goto err; |
| 539 | } | 550 | } |
| 540 | 551 | ||
| 541 | tfm = crypto_create_tfm(alg, frontend); | 552 | tfm = crypto_create_tfm(alg, frontend); |
| 542 | if (!IS_ERR(tfm)) | 553 | if (!IS_ERR(tfm)) |
| 543 | return tfm; | 554 | return tfm; |
| 544 | 555 | ||
| 545 | crypto_mod_put(alg); | 556 | crypto_mod_put(alg); |
| 546 | err = PTR_ERR(tfm); | 557 | err = PTR_ERR(tfm); |
| 547 | 558 | ||
| 548 | err: | 559 | err: |
| 549 | if (err != -EAGAIN) | 560 | if (err != -EAGAIN) |
| 550 | break; | 561 | break; |
| 551 | if (signal_pending(current)) { | 562 | if (signal_pending(current)) { |
| 552 | err = -EINTR; | 563 | err = -EINTR; |
| 553 | break; | 564 | break; |
| 554 | } | 565 | } |
| 555 | } | 566 | } |
| 556 | 567 | ||
| 557 | return ERR_PTR(err); | 568 | return ERR_PTR(err); |
| 558 | } | 569 | } |
| 559 | EXPORT_SYMBOL_GPL(crypto_alloc_tfm); | 570 | EXPORT_SYMBOL_GPL(crypto_alloc_tfm); |
| 560 | 571 | ||
| 561 | /* | 572 | /* |
| 562 | * crypto_destroy_tfm - Free crypto transform | 573 | * crypto_destroy_tfm - Free crypto transform |
| 563 | * @mem: Start of tfm slab | 574 | * @mem: Start of tfm slab |
| 564 | * @tfm: Transform to free | 575 | * @tfm: Transform to free |
| 565 | * | 576 | * |
| 566 | * This function frees up the transform and any associated resources, | 577 | * This function frees up the transform and any associated resources, |
| 567 | * then drops the refcount on the associated algorithm. | 578 | * then drops the refcount on the associated algorithm. |
| 568 | */ | 579 | */ |
| 569 | void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm) | 580 | void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm) |
| 570 | { | 581 | { |
| 571 | struct crypto_alg *alg; | 582 | struct crypto_alg *alg; |
| 572 | int size; | 583 | int size; |
| 573 | 584 | ||
| 574 | if (unlikely(!mem)) | 585 | if (unlikely(!mem)) |
| 575 | return; | 586 | return; |
| 576 | 587 | ||
| 577 | alg = tfm->__crt_alg; | 588 | alg = tfm->__crt_alg; |
| 578 | size = ksize(mem); | 589 | size = ksize(mem); |
| 579 | 590 | ||
| 580 | if (!tfm->exit && alg->cra_exit) | 591 | if (!tfm->exit && alg->cra_exit) |
| 581 | alg->cra_exit(tfm); | 592 | alg->cra_exit(tfm); |
| 582 | crypto_exit_ops(tfm); | 593 | crypto_exit_ops(tfm); |
| 583 | crypto_mod_put(alg); | 594 | crypto_mod_put(alg); |
| 584 | memset(mem, 0, size); | 595 | memset(mem, 0, size); |
| 585 | kfree(mem); | 596 | kfree(mem); |
| 586 | } | 597 | } |
| 587 | EXPORT_SYMBOL_GPL(crypto_destroy_tfm); | 598 | EXPORT_SYMBOL_GPL(crypto_destroy_tfm); |
| 588 | 599 | ||
| 589 | int crypto_has_alg(const char *name, u32 type, u32 mask) | 600 | int crypto_has_alg(const char *name, u32 type, u32 mask) |
| 590 | { | 601 | { |
| 591 | int ret = 0; | 602 | int ret = 0; |
| 592 | struct crypto_alg *alg = crypto_alg_mod_lookup(name, type, mask); | 603 | struct crypto_alg *alg = crypto_alg_mod_lookup(name, type, mask); |
| 593 | 604 | ||
| 594 | if (!IS_ERR(alg)) { | 605 | if (!IS_ERR(alg)) { |
| 595 | crypto_mod_put(alg); | 606 | crypto_mod_put(alg); |
| 596 | ret = 1; | 607 | ret = 1; |
| 597 | } | 608 | } |
| 598 | 609 | ||
| 599 | return ret; | 610 | return ret; |
| 600 | } | 611 | } |
| 601 | EXPORT_SYMBOL_GPL(crypto_has_alg); | 612 | EXPORT_SYMBOL_GPL(crypto_has_alg); |
| 602 | 613 | ||
| 603 | MODULE_DESCRIPTION("Cryptographic core API"); | 614 | MODULE_DESCRIPTION("Cryptographic core API"); |
| 604 | MODULE_LICENSE("GPL"); | 615 | MODULE_LICENSE("GPL"); |
| 605 | 616 |
drivers/crypto/padlock-aes.c
| 1 | /* | 1 | /* |
| 2 | * Cryptographic API. | 2 | * Cryptographic API. |
| 3 | * | 3 | * |
| 4 | * Support for VIA PadLock hardware crypto engine. | 4 | * Support for VIA PadLock hardware crypto engine. |
| 5 | * | 5 | * |
| 6 | * Copyright (c) 2004 Michal Ludvig <michal@logix.cz> | 6 | * Copyright (c) 2004 Michal Ludvig <michal@logix.cz> |
| 7 | * | 7 | * |
| 8 | */ | 8 | */ |
| 9 | 9 | ||
| 10 | #include <crypto/algapi.h> | 10 | #include <crypto/algapi.h> |
| 11 | #include <crypto/aes.h> | 11 | #include <crypto/aes.h> |
| 12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
| 13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
| 14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
| 15 | #include <linux/errno.h> | 15 | #include <linux/errno.h> |
| 16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
| 17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
| 18 | #include <linux/percpu.h> | 18 | #include <linux/percpu.h> |
| 19 | #include <linux/smp.h> | 19 | #include <linux/smp.h> |
| 20 | #include <asm/byteorder.h> | 20 | #include <asm/byteorder.h> |
| 21 | #include <asm/i387.h> | 21 | #include <asm/i387.h> |
| 22 | #include "padlock.h" | 22 | #include "padlock.h" |
| 23 | 23 | ||
| 24 | /* Control word. */ | 24 | /* Control word. */ |
| 25 | struct cword { | 25 | struct cword { |
| 26 | unsigned int __attribute__ ((__packed__)) | 26 | unsigned int __attribute__ ((__packed__)) |
| 27 | rounds:4, | 27 | rounds:4, |
| 28 | algo:3, | 28 | algo:3, |
| 29 | keygen:1, | 29 | keygen:1, |
| 30 | interm:1, | 30 | interm:1, |
| 31 | encdec:1, | 31 | encdec:1, |
| 32 | ksize:2; | 32 | ksize:2; |
| 33 | } __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); | 33 | } __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); |
| 34 | 34 | ||
| 35 | /* Whenever making any changes to the following | 35 | /* Whenever making any changes to the following |
| 36 | * structure *make sure* you keep E, d_data | 36 | * structure *make sure* you keep E, d_data |
| 37 | * and cword aligned on 16 Bytes boundaries and | 37 | * and cword aligned on 16 Bytes boundaries and |
| 38 | * the Hardware can access 16 * 16 bytes of E and d_data | 38 | * the Hardware can access 16 * 16 bytes of E and d_data |
| 39 | * (only the first 15 * 16 bytes matter but the HW reads | 39 | * (only the first 15 * 16 bytes matter but the HW reads |
| 40 | * more). | 40 | * more). |
| 41 | */ | 41 | */ |
| 42 | struct aes_ctx { | 42 | struct aes_ctx { |
| 43 | u32 E[AES_MAX_KEYLENGTH_U32] | 43 | u32 E[AES_MAX_KEYLENGTH_U32] |
| 44 | __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); | 44 | __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); |
| 45 | u32 d_data[AES_MAX_KEYLENGTH_U32] | 45 | u32 d_data[AES_MAX_KEYLENGTH_U32] |
| 46 | __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); | 46 | __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); |
| 47 | struct { | 47 | struct { |
| 48 | struct cword encrypt; | 48 | struct cword encrypt; |
| 49 | struct cword decrypt; | 49 | struct cword decrypt; |
| 50 | } cword; | 50 | } cword; |
| 51 | u32 *D; | 51 | u32 *D; |
| 52 | }; | 52 | }; |
| 53 | 53 | ||
| 54 | static DEFINE_PER_CPU(struct cword *, last_cword); | 54 | static DEFINE_PER_CPU(struct cword *, last_cword); |
| 55 | 55 | ||
| 56 | /* Tells whether the ACE is capable to generate | 56 | /* Tells whether the ACE is capable to generate |
| 57 | the extended key for a given key_len. */ | 57 | the extended key for a given key_len. */ |
| 58 | static inline int | 58 | static inline int |
| 59 | aes_hw_extkey_available(uint8_t key_len) | 59 | aes_hw_extkey_available(uint8_t key_len) |
| 60 | { | 60 | { |
| 61 | /* TODO: We should check the actual CPU model/stepping | 61 | /* TODO: We should check the actual CPU model/stepping |
| 62 | as it's possible that the capability will be | 62 | as it's possible that the capability will be |
| 63 | added in the next CPU revisions. */ | 63 | added in the next CPU revisions. */ |
| 64 | if (key_len == 16) | 64 | if (key_len == 16) |
| 65 | return 1; | 65 | return 1; |
| 66 | return 0; | 66 | return 0; |
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | static inline struct aes_ctx *aes_ctx_common(void *ctx) | 69 | static inline struct aes_ctx *aes_ctx_common(void *ctx) |
| 70 | { | 70 | { |
| 71 | unsigned long addr = (unsigned long)ctx; | 71 | unsigned long addr = (unsigned long)ctx; |
| 72 | unsigned long align = PADLOCK_ALIGNMENT; | 72 | unsigned long align = PADLOCK_ALIGNMENT; |
| 73 | 73 | ||
| 74 | if (align <= crypto_tfm_ctx_alignment()) | 74 | if (align <= crypto_tfm_ctx_alignment()) |
| 75 | align = 1; | 75 | align = 1; |
| 76 | return (struct aes_ctx *)ALIGN(addr, align); | 76 | return (struct aes_ctx *)ALIGN(addr, align); |
| 77 | } | 77 | } |
| 78 | 78 | ||
| 79 | static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) | 79 | static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) |
| 80 | { | 80 | { |
| 81 | return aes_ctx_common(crypto_tfm_ctx(tfm)); | 81 | return aes_ctx_common(crypto_tfm_ctx(tfm)); |
| 82 | } | 82 | } |
| 83 | 83 | ||
| 84 | static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm) | 84 | static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm) |
| 85 | { | 85 | { |
| 86 | return aes_ctx_common(crypto_blkcipher_ctx(tfm)); | 86 | return aes_ctx_common(crypto_blkcipher_ctx(tfm)); |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | 89 | static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, |
| 90 | unsigned int key_len) | 90 | unsigned int key_len) |
| 91 | { | 91 | { |
| 92 | struct aes_ctx *ctx = aes_ctx(tfm); | 92 | struct aes_ctx *ctx = aes_ctx(tfm); |
| 93 | const __le32 *key = (const __le32 *)in_key; | 93 | const __le32 *key = (const __le32 *)in_key; |
| 94 | u32 *flags = &tfm->crt_flags; | 94 | u32 *flags = &tfm->crt_flags; |
| 95 | struct crypto_aes_ctx gen_aes; | 95 | struct crypto_aes_ctx gen_aes; |
| 96 | int cpu; | 96 | int cpu; |
| 97 | 97 | ||
| 98 | if (key_len % 8) { | 98 | if (key_len % 8) { |
| 99 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | 99 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; |
| 100 | return -EINVAL; | 100 | return -EINVAL; |
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | /* | 103 | /* |
| 104 | * If the hardware is capable of generating the extended key | 104 | * If the hardware is capable of generating the extended key |
| 105 | * itself we must supply the plain key for both encryption | 105 | * itself we must supply the plain key for both encryption |
| 106 | * and decryption. | 106 | * and decryption. |
| 107 | */ | 107 | */ |
| 108 | ctx->D = ctx->E; | 108 | ctx->D = ctx->E; |
| 109 | 109 | ||
| 110 | ctx->E[0] = le32_to_cpu(key[0]); | 110 | ctx->E[0] = le32_to_cpu(key[0]); |
| 111 | ctx->E[1] = le32_to_cpu(key[1]); | 111 | ctx->E[1] = le32_to_cpu(key[1]); |
| 112 | ctx->E[2] = le32_to_cpu(key[2]); | 112 | ctx->E[2] = le32_to_cpu(key[2]); |
| 113 | ctx->E[3] = le32_to_cpu(key[3]); | 113 | ctx->E[3] = le32_to_cpu(key[3]); |
| 114 | 114 | ||
| 115 | /* Prepare control words. */ | 115 | /* Prepare control words. */ |
| 116 | memset(&ctx->cword, 0, sizeof(ctx->cword)); | 116 | memset(&ctx->cword, 0, sizeof(ctx->cword)); |
| 117 | 117 | ||
| 118 | ctx->cword.decrypt.encdec = 1; | 118 | ctx->cword.decrypt.encdec = 1; |
| 119 | ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4; | 119 | ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4; |
| 120 | ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds; | 120 | ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds; |
| 121 | ctx->cword.encrypt.ksize = (key_len - 16) / 8; | 121 | ctx->cword.encrypt.ksize = (key_len - 16) / 8; |
| 122 | ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize; | 122 | ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize; |
| 123 | 123 | ||
| 124 | /* Don't generate extended keys if the hardware can do it. */ | 124 | /* Don't generate extended keys if the hardware can do it. */ |
| 125 | if (aes_hw_extkey_available(key_len)) | 125 | if (aes_hw_extkey_available(key_len)) |
| 126 | goto ok; | 126 | goto ok; |
| 127 | 127 | ||
| 128 | ctx->D = ctx->d_data; | 128 | ctx->D = ctx->d_data; |
| 129 | ctx->cword.encrypt.keygen = 1; | 129 | ctx->cword.encrypt.keygen = 1; |
| 130 | ctx->cword.decrypt.keygen = 1; | 130 | ctx->cword.decrypt.keygen = 1; |
| 131 | 131 | ||
| 132 | if (crypto_aes_expand_key(&gen_aes, in_key, key_len)) { | 132 | if (crypto_aes_expand_key(&gen_aes, in_key, key_len)) { |
| 133 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | 133 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; |
| 134 | return -EINVAL; | 134 | return -EINVAL; |
| 135 | } | 135 | } |
| 136 | 136 | ||
| 137 | memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH); | 137 | memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH); |
| 138 | memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH); | 138 | memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH); |
| 139 | 139 | ||
| 140 | ok: | 140 | ok: |
| 141 | for_each_online_cpu(cpu) | 141 | for_each_online_cpu(cpu) |
| 142 | if (&ctx->cword.encrypt == per_cpu(last_cword, cpu) || | 142 | if (&ctx->cword.encrypt == per_cpu(last_cword, cpu) || |
| 143 | &ctx->cword.decrypt == per_cpu(last_cword, cpu)) | 143 | &ctx->cword.decrypt == per_cpu(last_cword, cpu)) |
| 144 | per_cpu(last_cword, cpu) = NULL; | 144 | per_cpu(last_cword, cpu) = NULL; |
| 145 | 145 | ||
| 146 | return 0; | 146 | return 0; |
| 147 | } | 147 | } |
| 148 | 148 | ||
| 149 | /* ====== Encryption/decryption routines ====== */ | 149 | /* ====== Encryption/decryption routines ====== */ |
| 150 | 150 | ||
| 151 | /* These are the real call to PadLock. */ | 151 | /* These are the real call to PadLock. */ |
| 152 | static inline void padlock_reset_key(struct cword *cword) | 152 | static inline void padlock_reset_key(struct cword *cword) |
| 153 | { | 153 | { |
| 154 | int cpu = raw_smp_processor_id(); | 154 | int cpu = raw_smp_processor_id(); |
| 155 | 155 | ||
| 156 | if (cword != per_cpu(last_cword, cpu)) | 156 | if (cword != per_cpu(last_cword, cpu)) |
| 157 | asm volatile ("pushfl; popfl"); | 157 | asm volatile ("pushfl; popfl"); |
| 158 | } | 158 | } |
| 159 | 159 | ||
| 160 | static inline void padlock_store_cword(struct cword *cword) | 160 | static inline void padlock_store_cword(struct cword *cword) |
| 161 | { | 161 | { |
| 162 | per_cpu(last_cword, raw_smp_processor_id()) = cword; | 162 | per_cpu(last_cword, raw_smp_processor_id()) = cword; |
| 163 | } | 163 | } |
| 164 | 164 | ||
| 165 | /* | 165 | /* |
| 166 | * While the padlock instructions don't use FP/SSE registers, they | 166 | * While the padlock instructions don't use FP/SSE registers, they |
| 167 | * generate a spurious DNA fault when cr0.ts is '1'. These instructions | 167 | * generate a spurious DNA fault when cr0.ts is '1'. These instructions |
| 168 | * should be used only inside the irq_ts_save/restore() context | 168 | * should be used only inside the irq_ts_save/restore() context |
| 169 | */ | 169 | */ |
| 170 | 170 | ||
| 171 | static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, | 171 | static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, |
| 172 | struct cword *control_word) | 172 | struct cword *control_word) |
| 173 | { | 173 | { |
| 174 | asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ | 174 | asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ |
| 175 | : "+S"(input), "+D"(output) | 175 | : "+S"(input), "+D"(output) |
| 176 | : "d"(control_word), "b"(key), "c"(1)); | 176 | : "d"(control_word), "b"(key), "c"(1)); |
| 177 | } | 177 | } |
| 178 | 178 | ||
| 179 | static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword) | 179 | static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword) |
| 180 | { | 180 | { |
| 181 | u8 buf[AES_BLOCK_SIZE * 2 + PADLOCK_ALIGNMENT - 1]; | 181 | u8 buf[AES_BLOCK_SIZE * 2 + PADLOCK_ALIGNMENT - 1]; |
| 182 | u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); | 182 | u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); |
| 183 | 183 | ||
| 184 | memcpy(tmp, in, AES_BLOCK_SIZE); | 184 | memcpy(tmp, in, AES_BLOCK_SIZE); |
| 185 | padlock_xcrypt(tmp, out, key, cword); | 185 | padlock_xcrypt(tmp, out, key, cword); |
| 186 | } | 186 | } |
| 187 | 187 | ||
| 188 | static inline void aes_crypt(const u8 *in, u8 *out, u32 *key, | 188 | static inline void aes_crypt(const u8 *in, u8 *out, u32 *key, |
| 189 | struct cword *cword) | 189 | struct cword *cword) |
| 190 | { | 190 | { |
| 191 | /* padlock_xcrypt requires at least two blocks of data. */ | 191 | /* padlock_xcrypt requires at least two blocks of data. */ |
| 192 | if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) & | 192 | if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) & |
| 193 | (PAGE_SIZE - 1)))) { | 193 | (PAGE_SIZE - 1)))) { |
| 194 | aes_crypt_copy(in, out, key, cword); | 194 | aes_crypt_copy(in, out, key, cword); |
| 195 | return; | 195 | return; |
| 196 | } | 196 | } |
| 197 | 197 | ||
| 198 | padlock_xcrypt(in, out, key, cword); | 198 | padlock_xcrypt(in, out, key, cword); |
| 199 | } | 199 | } |
| 200 | 200 | ||
| 201 | static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, | 201 | static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, |
| 202 | void *control_word, u32 count) | 202 | void *control_word, u32 count) |
| 203 | { | 203 | { |
| 204 | if (count == 1) { | 204 | if (count == 1) { |
| 205 | aes_crypt(input, output, key, control_word); | 205 | aes_crypt(input, output, key, control_word); |
| 206 | return; | 206 | return; |
| 207 | } | 207 | } |
| 208 | 208 | ||
| 209 | asm volatile ("test $1, %%cl;" | 209 | asm volatile ("test $1, %%cl;" |
| 210 | "je 1f;" | 210 | "je 1f;" |
| 211 | "lea -1(%%ecx), %%eax;" | 211 | "lea -1(%%ecx), %%eax;" |
| 212 | "mov $1, %%ecx;" | 212 | "mov $1, %%ecx;" |
| 213 | ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */ | 213 | ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */ |
| 214 | "mov %%eax, %%ecx;" | 214 | "mov %%eax, %%ecx;" |
| 215 | "1:" | 215 | "1:" |
| 216 | ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ | 216 | ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ |
| 217 | : "+S"(input), "+D"(output) | 217 | : "+S"(input), "+D"(output) |
| 218 | : "d"(control_word), "b"(key), "c"(count) | 218 | : "d"(control_word), "b"(key), "c"(count) |
| 219 | : "ax"); | 219 | : "ax"); |
| 220 | } | 220 | } |
| 221 | 221 | ||
| 222 | static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, | 222 | static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, |
| 223 | u8 *iv, void *control_word, u32 count) | 223 | u8 *iv, void *control_word, u32 count) |
| 224 | { | 224 | { |
| 225 | /* rep xcryptcbc */ | 225 | /* rep xcryptcbc */ |
| 226 | asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" | 226 | asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" |
| 227 | : "+S" (input), "+D" (output), "+a" (iv) | 227 | : "+S" (input), "+D" (output), "+a" (iv) |
| 228 | : "d" (control_word), "b" (key), "c" (count)); | 228 | : "d" (control_word), "b" (key), "c" (count)); |
| 229 | return iv; | 229 | return iv; |
| 230 | } | 230 | } |
| 231 | 231 | ||
| 232 | static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) | 232 | static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
| 233 | { | 233 | { |
| 234 | struct aes_ctx *ctx = aes_ctx(tfm); | 234 | struct aes_ctx *ctx = aes_ctx(tfm); |
| 235 | int ts_state; | 235 | int ts_state; |
| 236 | 236 | ||
| 237 | padlock_reset_key(&ctx->cword.encrypt); | 237 | padlock_reset_key(&ctx->cword.encrypt); |
| 238 | ts_state = irq_ts_save(); | 238 | ts_state = irq_ts_save(); |
| 239 | aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); | 239 | aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); |
| 240 | irq_ts_restore(ts_state); | 240 | irq_ts_restore(ts_state); |
| 241 | padlock_store_cword(&ctx->cword.encrypt); | 241 | padlock_store_cword(&ctx->cword.encrypt); |
| 242 | } | 242 | } |
| 243 | 243 | ||
| 244 | static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) | 244 | static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
| 245 | { | 245 | { |
| 246 | struct aes_ctx *ctx = aes_ctx(tfm); | 246 | struct aes_ctx *ctx = aes_ctx(tfm); |
| 247 | int ts_state; | 247 | int ts_state; |
| 248 | 248 | ||
| 249 | padlock_reset_key(&ctx->cword.encrypt); | 249 | padlock_reset_key(&ctx->cword.encrypt); |
| 250 | ts_state = irq_ts_save(); | 250 | ts_state = irq_ts_save(); |
| 251 | aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); | 251 | aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); |
| 252 | irq_ts_restore(ts_state); | 252 | irq_ts_restore(ts_state); |
| 253 | padlock_store_cword(&ctx->cword.encrypt); | 253 | padlock_store_cword(&ctx->cword.encrypt); |
| 254 | } | 254 | } |
| 255 | 255 | ||
| 256 | static struct crypto_alg aes_alg = { | 256 | static struct crypto_alg aes_alg = { |
| 257 | .cra_name = "aes", | 257 | .cra_name = "aes", |
| 258 | .cra_driver_name = "aes-padlock", | 258 | .cra_driver_name = "aes-padlock", |
| 259 | .cra_priority = PADLOCK_CRA_PRIORITY, | 259 | .cra_priority = PADLOCK_CRA_PRIORITY, |
| 260 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 260 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, |
| 261 | .cra_blocksize = AES_BLOCK_SIZE, | 261 | .cra_blocksize = AES_BLOCK_SIZE, |
| 262 | .cra_ctxsize = sizeof(struct aes_ctx), | 262 | .cra_ctxsize = sizeof(struct aes_ctx), |
| 263 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, | 263 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, |
| 264 | .cra_module = THIS_MODULE, | 264 | .cra_module = THIS_MODULE, |
| 265 | .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), | 265 | .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), |
| 266 | .cra_u = { | 266 | .cra_u = { |
| 267 | .cipher = { | 267 | .cipher = { |
| 268 | .cia_min_keysize = AES_MIN_KEY_SIZE, | 268 | .cia_min_keysize = AES_MIN_KEY_SIZE, |
| 269 | .cia_max_keysize = AES_MAX_KEY_SIZE, | 269 | .cia_max_keysize = AES_MAX_KEY_SIZE, |
| 270 | .cia_setkey = aes_set_key, | 270 | .cia_setkey = aes_set_key, |
| 271 | .cia_encrypt = aes_encrypt, | 271 | .cia_encrypt = aes_encrypt, |
| 272 | .cia_decrypt = aes_decrypt, | 272 | .cia_decrypt = aes_decrypt, |
| 273 | } | 273 | } |
| 274 | } | 274 | } |
| 275 | }; | 275 | }; |
| 276 | 276 | ||
| 277 | static int ecb_aes_encrypt(struct blkcipher_desc *desc, | 277 | static int ecb_aes_encrypt(struct blkcipher_desc *desc, |
| 278 | struct scatterlist *dst, struct scatterlist *src, | 278 | struct scatterlist *dst, struct scatterlist *src, |
| 279 | unsigned int nbytes) | 279 | unsigned int nbytes) |
| 280 | { | 280 | { |
| 281 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); | 281 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); |
| 282 | struct blkcipher_walk walk; | 282 | struct blkcipher_walk walk; |
| 283 | int err; | 283 | int err; |
| 284 | int ts_state; | 284 | int ts_state; |
| 285 | 285 | ||
| 286 | padlock_reset_key(&ctx->cword.encrypt); | 286 | padlock_reset_key(&ctx->cword.encrypt); |
| 287 | 287 | ||
| 288 | blkcipher_walk_init(&walk, dst, src, nbytes); | 288 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 289 | err = blkcipher_walk_virt(desc, &walk); | 289 | err = blkcipher_walk_virt(desc, &walk); |
| 290 | 290 | ||
| 291 | ts_state = irq_ts_save(); | 291 | ts_state = irq_ts_save(); |
| 292 | while ((nbytes = walk.nbytes)) { | 292 | while ((nbytes = walk.nbytes)) { |
| 293 | padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, | 293 | padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, |
| 294 | ctx->E, &ctx->cword.encrypt, | 294 | ctx->E, &ctx->cword.encrypt, |
| 295 | nbytes / AES_BLOCK_SIZE); | 295 | nbytes / AES_BLOCK_SIZE); |
| 296 | nbytes &= AES_BLOCK_SIZE - 1; | 296 | nbytes &= AES_BLOCK_SIZE - 1; |
| 297 | err = blkcipher_walk_done(desc, &walk, nbytes); | 297 | err = blkcipher_walk_done(desc, &walk, nbytes); |
| 298 | } | 298 | } |
| 299 | irq_ts_restore(ts_state); | 299 | irq_ts_restore(ts_state); |
| 300 | 300 | ||
| 301 | padlock_store_cword(&ctx->cword.encrypt); | 301 | padlock_store_cword(&ctx->cword.encrypt); |
| 302 | 302 | ||
| 303 | return err; | 303 | return err; |
| 304 | } | 304 | } |
| 305 | 305 | ||
| 306 | static int ecb_aes_decrypt(struct blkcipher_desc *desc, | 306 | static int ecb_aes_decrypt(struct blkcipher_desc *desc, |
| 307 | struct scatterlist *dst, struct scatterlist *src, | 307 | struct scatterlist *dst, struct scatterlist *src, |
| 308 | unsigned int nbytes) | 308 | unsigned int nbytes) |
| 309 | { | 309 | { |
| 310 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); | 310 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); |
| 311 | struct blkcipher_walk walk; | 311 | struct blkcipher_walk walk; |
| 312 | int err; | 312 | int err; |
| 313 | int ts_state; | 313 | int ts_state; |
| 314 | 314 | ||
| 315 | padlock_reset_key(&ctx->cword.decrypt); | 315 | padlock_reset_key(&ctx->cword.decrypt); |
| 316 | 316 | ||
| 317 | blkcipher_walk_init(&walk, dst, src, nbytes); | 317 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 318 | err = blkcipher_walk_virt(desc, &walk); | 318 | err = blkcipher_walk_virt(desc, &walk); |
| 319 | 319 | ||
| 320 | ts_state = irq_ts_save(); | 320 | ts_state = irq_ts_save(); |
| 321 | while ((nbytes = walk.nbytes)) { | 321 | while ((nbytes = walk.nbytes)) { |
| 322 | padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, | 322 | padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, |
| 323 | ctx->D, &ctx->cword.decrypt, | 323 | ctx->D, &ctx->cword.decrypt, |
| 324 | nbytes / AES_BLOCK_SIZE); | 324 | nbytes / AES_BLOCK_SIZE); |
| 325 | nbytes &= AES_BLOCK_SIZE - 1; | 325 | nbytes &= AES_BLOCK_SIZE - 1; |
| 326 | err = blkcipher_walk_done(desc, &walk, nbytes); | 326 | err = blkcipher_walk_done(desc, &walk, nbytes); |
| 327 | } | 327 | } |
| 328 | irq_ts_restore(ts_state); | 328 | irq_ts_restore(ts_state); |
| 329 | 329 | ||
| 330 | padlock_store_cword(&ctx->cword.encrypt); | 330 | padlock_store_cword(&ctx->cword.encrypt); |
| 331 | 331 | ||
| 332 | return err; | 332 | return err; |
| 333 | } | 333 | } |
| 334 | 334 | ||
| 335 | static struct crypto_alg ecb_aes_alg = { | 335 | static struct crypto_alg ecb_aes_alg = { |
| 336 | .cra_name = "ecb(aes)", | 336 | .cra_name = "ecb(aes)", |
| 337 | .cra_driver_name = "ecb-aes-padlock", | 337 | .cra_driver_name = "ecb-aes-padlock", |
| 338 | .cra_priority = PADLOCK_COMPOSITE_PRIORITY, | 338 | .cra_priority = PADLOCK_COMPOSITE_PRIORITY, |
| 339 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | 339 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, |
| 340 | .cra_blocksize = AES_BLOCK_SIZE, | 340 | .cra_blocksize = AES_BLOCK_SIZE, |
| 341 | .cra_ctxsize = sizeof(struct aes_ctx), | 341 | .cra_ctxsize = sizeof(struct aes_ctx), |
| 342 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, | 342 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, |
| 343 | .cra_type = &crypto_blkcipher_type, | 343 | .cra_type = &crypto_blkcipher_type, |
| 344 | .cra_module = THIS_MODULE, | 344 | .cra_module = THIS_MODULE, |
| 345 | .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list), | 345 | .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list), |
| 346 | .cra_u = { | 346 | .cra_u = { |
| 347 | .blkcipher = { | 347 | .blkcipher = { |
| 348 | .min_keysize = AES_MIN_KEY_SIZE, | 348 | .min_keysize = AES_MIN_KEY_SIZE, |
| 349 | .max_keysize = AES_MAX_KEY_SIZE, | 349 | .max_keysize = AES_MAX_KEY_SIZE, |
| 350 | .setkey = aes_set_key, | 350 | .setkey = aes_set_key, |
| 351 | .encrypt = ecb_aes_encrypt, | 351 | .encrypt = ecb_aes_encrypt, |
| 352 | .decrypt = ecb_aes_decrypt, | 352 | .decrypt = ecb_aes_decrypt, |
| 353 | } | 353 | } |
| 354 | } | 354 | } |
| 355 | }; | 355 | }; |
| 356 | 356 | ||
| 357 | static int cbc_aes_encrypt(struct blkcipher_desc *desc, | 357 | static int cbc_aes_encrypt(struct blkcipher_desc *desc, |
| 358 | struct scatterlist *dst, struct scatterlist *src, | 358 | struct scatterlist *dst, struct scatterlist *src, |
| 359 | unsigned int nbytes) | 359 | unsigned int nbytes) |
| 360 | { | 360 | { |
| 361 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); | 361 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); |
| 362 | struct blkcipher_walk walk; | 362 | struct blkcipher_walk walk; |
| 363 | int err; | 363 | int err; |
| 364 | int ts_state; | 364 | int ts_state; |
| 365 | 365 | ||
| 366 | padlock_reset_key(&ctx->cword.encrypt); | 366 | padlock_reset_key(&ctx->cword.encrypt); |
| 367 | 367 | ||
| 368 | blkcipher_walk_init(&walk, dst, src, nbytes); | 368 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 369 | err = blkcipher_walk_virt(desc, &walk); | 369 | err = blkcipher_walk_virt(desc, &walk); |
| 370 | 370 | ||
| 371 | ts_state = irq_ts_save(); | 371 | ts_state = irq_ts_save(); |
| 372 | while ((nbytes = walk.nbytes)) { | 372 | while ((nbytes = walk.nbytes)) { |
| 373 | u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, | 373 | u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, |
| 374 | walk.dst.virt.addr, ctx->E, | 374 | walk.dst.virt.addr, ctx->E, |
| 375 | walk.iv, &ctx->cword.encrypt, | 375 | walk.iv, &ctx->cword.encrypt, |
| 376 | nbytes / AES_BLOCK_SIZE); | 376 | nbytes / AES_BLOCK_SIZE); |
| 377 | memcpy(walk.iv, iv, AES_BLOCK_SIZE); | 377 | memcpy(walk.iv, iv, AES_BLOCK_SIZE); |
| 378 | nbytes &= AES_BLOCK_SIZE - 1; | 378 | nbytes &= AES_BLOCK_SIZE - 1; |
| 379 | err = blkcipher_walk_done(desc, &walk, nbytes); | 379 | err = blkcipher_walk_done(desc, &walk, nbytes); |
| 380 | } | 380 | } |
| 381 | irq_ts_restore(ts_state); | 381 | irq_ts_restore(ts_state); |
| 382 | 382 | ||
| 383 | padlock_store_cword(&ctx->cword.decrypt); | 383 | padlock_store_cword(&ctx->cword.decrypt); |
| 384 | 384 | ||
| 385 | return err; | 385 | return err; |
| 386 | } | 386 | } |
| 387 | 387 | ||
| 388 | static int cbc_aes_decrypt(struct blkcipher_desc *desc, | 388 | static int cbc_aes_decrypt(struct blkcipher_desc *desc, |
| 389 | struct scatterlist *dst, struct scatterlist *src, | 389 | struct scatterlist *dst, struct scatterlist *src, |
| 390 | unsigned int nbytes) | 390 | unsigned int nbytes) |
| 391 | { | 391 | { |
| 392 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); | 392 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); |
| 393 | struct blkcipher_walk walk; | 393 | struct blkcipher_walk walk; |
| 394 | int err; | 394 | int err; |
| 395 | int ts_state; | 395 | int ts_state; |
| 396 | 396 | ||
| 397 | padlock_reset_key(&ctx->cword.encrypt); | 397 | padlock_reset_key(&ctx->cword.encrypt); |
| 398 | 398 | ||
| 399 | blkcipher_walk_init(&walk, dst, src, nbytes); | 399 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 400 | err = blkcipher_walk_virt(desc, &walk); | 400 | err = blkcipher_walk_virt(desc, &walk); |
| 401 | 401 | ||
| 402 | ts_state = irq_ts_save(); | 402 | ts_state = irq_ts_save(); |
| 403 | while ((nbytes = walk.nbytes)) { | 403 | while ((nbytes = walk.nbytes)) { |
| 404 | padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, | 404 | padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, |
| 405 | ctx->D, walk.iv, &ctx->cword.decrypt, | 405 | ctx->D, walk.iv, &ctx->cword.decrypt, |
| 406 | nbytes / AES_BLOCK_SIZE); | 406 | nbytes / AES_BLOCK_SIZE); |
| 407 | nbytes &= AES_BLOCK_SIZE - 1; | 407 | nbytes &= AES_BLOCK_SIZE - 1; |
| 408 | err = blkcipher_walk_done(desc, &walk, nbytes); | 408 | err = blkcipher_walk_done(desc, &walk, nbytes); |
| 409 | } | 409 | } |
| 410 | 410 | ||
| 411 | irq_ts_restore(ts_state); | 411 | irq_ts_restore(ts_state); |
| 412 | 412 | ||
| 413 | padlock_store_cword(&ctx->cword.encrypt); | 413 | padlock_store_cword(&ctx->cword.encrypt); |
| 414 | 414 | ||
| 415 | return err; | 415 | return err; |
| 416 | } | 416 | } |
| 417 | 417 | ||
| 418 | static struct crypto_alg cbc_aes_alg = { | 418 | static struct crypto_alg cbc_aes_alg = { |
| 419 | .cra_name = "cbc(aes)", | 419 | .cra_name = "cbc(aes)", |
| 420 | .cra_driver_name = "cbc-aes-padlock", | 420 | .cra_driver_name = "cbc-aes-padlock", |
| 421 | .cra_priority = PADLOCK_COMPOSITE_PRIORITY, | 421 | .cra_priority = PADLOCK_COMPOSITE_PRIORITY, |
| 422 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | 422 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, |
| 423 | .cra_blocksize = AES_BLOCK_SIZE, | 423 | .cra_blocksize = AES_BLOCK_SIZE, |
| 424 | .cra_ctxsize = sizeof(struct aes_ctx), | 424 | .cra_ctxsize = sizeof(struct aes_ctx), |
| 425 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, | 425 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, |
| 426 | .cra_type = &crypto_blkcipher_type, | 426 | .cra_type = &crypto_blkcipher_type, |
| 427 | .cra_module = THIS_MODULE, | 427 | .cra_module = THIS_MODULE, |
| 428 | .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list), | 428 | .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list), |
| 429 | .cra_u = { | 429 | .cra_u = { |
| 430 | .blkcipher = { | 430 | .blkcipher = { |
| 431 | .min_keysize = AES_MIN_KEY_SIZE, | 431 | .min_keysize = AES_MIN_KEY_SIZE, |
| 432 | .max_keysize = AES_MAX_KEY_SIZE, | 432 | .max_keysize = AES_MAX_KEY_SIZE, |
| 433 | .ivsize = AES_BLOCK_SIZE, | 433 | .ivsize = AES_BLOCK_SIZE, |
| 434 | .setkey = aes_set_key, | 434 | .setkey = aes_set_key, |
| 435 | .encrypt = cbc_aes_encrypt, | 435 | .encrypt = cbc_aes_encrypt, |
| 436 | .decrypt = cbc_aes_decrypt, | 436 | .decrypt = cbc_aes_decrypt, |
| 437 | } | 437 | } |
| 438 | } | 438 | } |
| 439 | }; | 439 | }; |
| 440 | 440 | ||
| 441 | static int __init padlock_init(void) | 441 | static int __init padlock_init(void) |
| 442 | { | 442 | { |
| 443 | int ret; | 443 | int ret; |
| 444 | 444 | ||
| 445 | if (!cpu_has_xcrypt) { | 445 | if (!cpu_has_xcrypt) { |
| 446 | printk(KERN_NOTICE PFX "VIA PadLock not detected.\n"); | 446 | printk(KERN_NOTICE PFX "VIA PadLock not detected.\n"); |
| 447 | return -ENODEV; | 447 | return -ENODEV; |
| 448 | } | 448 | } |
| 449 | 449 | ||
| 450 | if (!cpu_has_xcrypt_enabled) { | 450 | if (!cpu_has_xcrypt_enabled) { |
| 451 | printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); | 451 | printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); |
| 452 | return -ENODEV; | 452 | return -ENODEV; |
| 453 | } | 453 | } |
| 454 | 454 | ||
| 455 | if ((ret = crypto_register_alg(&aes_alg))) | 455 | if ((ret = crypto_register_alg(&aes_alg))) |
| 456 | goto aes_err; | 456 | goto aes_err; |
| 457 | 457 | ||
| 458 | if ((ret = crypto_register_alg(&ecb_aes_alg))) | 458 | if ((ret = crypto_register_alg(&ecb_aes_alg))) |
| 459 | goto ecb_aes_err; | 459 | goto ecb_aes_err; |
| 460 | 460 | ||
| 461 | if ((ret = crypto_register_alg(&cbc_aes_alg))) | 461 | if ((ret = crypto_register_alg(&cbc_aes_alg))) |
| 462 | goto cbc_aes_err; | 462 | goto cbc_aes_err; |
| 463 | 463 | ||
| 464 | printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); | 464 | printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); |
| 465 | 465 | ||
| 466 | out: | 466 | out: |
| 467 | return ret; | 467 | return ret; |
| 468 | 468 | ||
| 469 | cbc_aes_err: | 469 | cbc_aes_err: |
| 470 | crypto_unregister_alg(&ecb_aes_alg); | 470 | crypto_unregister_alg(&ecb_aes_alg); |
| 471 | ecb_aes_err: | 471 | ecb_aes_err: |
| 472 | crypto_unregister_alg(&aes_alg); | 472 | crypto_unregister_alg(&aes_alg); |
| 473 | aes_err: | 473 | aes_err: |
| 474 | printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n"); | 474 | printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n"); |
| 475 | goto out; | 475 | goto out; |
| 476 | } | 476 | } |
| 477 | 477 | ||
| 478 | static void __exit padlock_fini(void) | 478 | static void __exit padlock_fini(void) |
| 479 | { | 479 | { |
| 480 | crypto_unregister_alg(&cbc_aes_alg); | 480 | crypto_unregister_alg(&cbc_aes_alg); |
| 481 | crypto_unregister_alg(&ecb_aes_alg); | 481 | crypto_unregister_alg(&ecb_aes_alg); |
| 482 | crypto_unregister_alg(&aes_alg); | 482 | crypto_unregister_alg(&aes_alg); |
| 483 | } | 483 | } |
| 484 | 484 | ||
| 485 | module_init(padlock_init); | 485 | module_init(padlock_init); |
| 486 | module_exit(padlock_fini); | 486 | module_exit(padlock_fini); |
| 487 | 487 | ||
| 488 | MODULE_DESCRIPTION("VIA PadLock AES algorithm support"); | 488 | MODULE_DESCRIPTION("VIA PadLock AES algorithm support"); |
| 489 | MODULE_LICENSE("GPL"); | 489 | MODULE_LICENSE("GPL"); |
| 490 | MODULE_AUTHOR("Michal Ludvig"); | 490 | MODULE_AUTHOR("Michal Ludvig"); |
| 491 | 491 | ||
| 492 | MODULE_ALIAS("aes"); | 492 | MODULE_ALIAS("aes-all"); |
| 493 | 493 |
drivers/crypto/padlock-sha.c
| 1 | /* | 1 | /* |
| 2 | * Cryptographic API. | 2 | * Cryptographic API. |
| 3 | * | 3 | * |
| 4 | * Support for VIA PadLock hardware crypto engine. | 4 | * Support for VIA PadLock hardware crypto engine. |
| 5 | * | 5 | * |
| 6 | * Copyright (c) 2006 Michal Ludvig <michal@logix.cz> | 6 | * Copyright (c) 2006 Michal Ludvig <michal@logix.cz> |
| 7 | * | 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License as published by | 9 | * it under the terms of the GNU General Public License as published by |
| 10 | * the Free Software Foundation; either version 2 of the License, or | 10 | * the Free Software Foundation; either version 2 of the License, or |
| 11 | * (at your option) any later version. | 11 | * (at your option) any later version. |
| 12 | * | 12 | * |
| 13 | */ | 13 | */ |
| 14 | 14 | ||
| 15 | #include <crypto/algapi.h> | 15 | #include <crypto/algapi.h> |
| 16 | #include <crypto/sha.h> | 16 | #include <crypto/sha.h> |
| 17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
| 18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
| 19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
| 20 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
| 21 | #include <linux/cryptohash.h> | 21 | #include <linux/cryptohash.h> |
| 22 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
| 23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
| 24 | #include <linux/scatterlist.h> | 24 | #include <linux/scatterlist.h> |
| 25 | #include <asm/i387.h> | 25 | #include <asm/i387.h> |
| 26 | #include "padlock.h" | 26 | #include "padlock.h" |
| 27 | 27 | ||
| 28 | #define SHA1_DEFAULT_FALLBACK "sha1-generic" | 28 | #define SHA1_DEFAULT_FALLBACK "sha1-generic" |
| 29 | #define SHA256_DEFAULT_FALLBACK "sha256-generic" | 29 | #define SHA256_DEFAULT_FALLBACK "sha256-generic" |
| 30 | 30 | ||
| 31 | struct padlock_sha_ctx { | 31 | struct padlock_sha_ctx { |
| 32 | char *data; | 32 | char *data; |
| 33 | size_t used; | 33 | size_t used; |
| 34 | int bypass; | 34 | int bypass; |
| 35 | void (*f_sha_padlock)(const char *in, char *out, int count); | 35 | void (*f_sha_padlock)(const char *in, char *out, int count); |
| 36 | struct hash_desc fallback; | 36 | struct hash_desc fallback; |
| 37 | }; | 37 | }; |
| 38 | 38 | ||
| 39 | static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm) | 39 | static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm) |
| 40 | { | 40 | { |
| 41 | return crypto_tfm_ctx(tfm); | 41 | return crypto_tfm_ctx(tfm); |
| 42 | } | 42 | } |
| 43 | 43 | ||
| 44 | /* We'll need aligned address on the stack */ | 44 | /* We'll need aligned address on the stack */ |
| 45 | #define NEAREST_ALIGNED(ptr) \ | 45 | #define NEAREST_ALIGNED(ptr) \ |
| 46 | ((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT)) | 46 | ((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT)) |
| 47 | 47 | ||
| 48 | static struct crypto_alg sha1_alg, sha256_alg; | 48 | static struct crypto_alg sha1_alg, sha256_alg; |
| 49 | 49 | ||
| 50 | static void padlock_sha_bypass(struct crypto_tfm *tfm) | 50 | static void padlock_sha_bypass(struct crypto_tfm *tfm) |
| 51 | { | 51 | { |
| 52 | if (ctx(tfm)->bypass) | 52 | if (ctx(tfm)->bypass) |
| 53 | return; | 53 | return; |
| 54 | 54 | ||
| 55 | crypto_hash_init(&ctx(tfm)->fallback); | 55 | crypto_hash_init(&ctx(tfm)->fallback); |
| 56 | if (ctx(tfm)->data && ctx(tfm)->used) { | 56 | if (ctx(tfm)->data && ctx(tfm)->used) { |
| 57 | struct scatterlist sg; | 57 | struct scatterlist sg; |
| 58 | 58 | ||
| 59 | sg_init_one(&sg, ctx(tfm)->data, ctx(tfm)->used); | 59 | sg_init_one(&sg, ctx(tfm)->data, ctx(tfm)->used); |
| 60 | crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length); | 60 | crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length); |
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | ctx(tfm)->used = 0; | 63 | ctx(tfm)->used = 0; |
| 64 | ctx(tfm)->bypass = 1; | 64 | ctx(tfm)->bypass = 1; |
| 65 | } | 65 | } |
| 66 | 66 | ||
| 67 | static void padlock_sha_init(struct crypto_tfm *tfm) | 67 | static void padlock_sha_init(struct crypto_tfm *tfm) |
| 68 | { | 68 | { |
| 69 | ctx(tfm)->used = 0; | 69 | ctx(tfm)->used = 0; |
| 70 | ctx(tfm)->bypass = 0; | 70 | ctx(tfm)->bypass = 0; |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | static void padlock_sha_update(struct crypto_tfm *tfm, | 73 | static void padlock_sha_update(struct crypto_tfm *tfm, |
| 74 | const uint8_t *data, unsigned int length) | 74 | const uint8_t *data, unsigned int length) |
| 75 | { | 75 | { |
| 76 | /* Our buffer is always one page. */ | 76 | /* Our buffer is always one page. */ |
| 77 | if (unlikely(!ctx(tfm)->bypass && | 77 | if (unlikely(!ctx(tfm)->bypass && |
| 78 | (ctx(tfm)->used + length > PAGE_SIZE))) | 78 | (ctx(tfm)->used + length > PAGE_SIZE))) |
| 79 | padlock_sha_bypass(tfm); | 79 | padlock_sha_bypass(tfm); |
| 80 | 80 | ||
| 81 | if (unlikely(ctx(tfm)->bypass)) { | 81 | if (unlikely(ctx(tfm)->bypass)) { |
| 82 | struct scatterlist sg; | 82 | struct scatterlist sg; |
| 83 | sg_init_one(&sg, (uint8_t *)data, length); | 83 | sg_init_one(&sg, (uint8_t *)data, length); |
| 84 | crypto_hash_update(&ctx(tfm)->fallback, &sg, length); | 84 | crypto_hash_update(&ctx(tfm)->fallback, &sg, length); |
| 85 | return; | 85 | return; |
| 86 | } | 86 | } |
| 87 | 87 | ||
| 88 | memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length); | 88 | memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length); |
| 89 | ctx(tfm)->used += length; | 89 | ctx(tfm)->used += length; |
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | static inline void padlock_output_block(uint32_t *src, | 92 | static inline void padlock_output_block(uint32_t *src, |
| 93 | uint32_t *dst, size_t count) | 93 | uint32_t *dst, size_t count) |
| 94 | { | 94 | { |
| 95 | while (count--) | 95 | while (count--) |
| 96 | *dst++ = swab32(*src++); | 96 | *dst++ = swab32(*src++); |
| 97 | } | 97 | } |
| 98 | 98 | ||
| 99 | static void padlock_do_sha1(const char *in, char *out, int count) | 99 | static void padlock_do_sha1(const char *in, char *out, int count) |
| 100 | { | 100 | { |
| 101 | /* We can't store directly to *out as it may be unaligned. */ | 101 | /* We can't store directly to *out as it may be unaligned. */ |
| 102 | /* BTW Don't reduce the buffer size below 128 Bytes! | 102 | /* BTW Don't reduce the buffer size below 128 Bytes! |
| 103 | * PadLock microcode needs it that big. */ | 103 | * PadLock microcode needs it that big. */ |
| 104 | char buf[128+16]; | 104 | char buf[128+16]; |
| 105 | char *result = NEAREST_ALIGNED(buf); | 105 | char *result = NEAREST_ALIGNED(buf); |
| 106 | int ts_state; | 106 | int ts_state; |
| 107 | 107 | ||
| 108 | ((uint32_t *)result)[0] = SHA1_H0; | 108 | ((uint32_t *)result)[0] = SHA1_H0; |
| 109 | ((uint32_t *)result)[1] = SHA1_H1; | 109 | ((uint32_t *)result)[1] = SHA1_H1; |
| 110 | ((uint32_t *)result)[2] = SHA1_H2; | 110 | ((uint32_t *)result)[2] = SHA1_H2; |
| 111 | ((uint32_t *)result)[3] = SHA1_H3; | 111 | ((uint32_t *)result)[3] = SHA1_H3; |
| 112 | ((uint32_t *)result)[4] = SHA1_H4; | 112 | ((uint32_t *)result)[4] = SHA1_H4; |
| 113 | 113 | ||
| 114 | /* prevent taking the spurious DNA fault with padlock. */ | 114 | /* prevent taking the spurious DNA fault with padlock. */ |
| 115 | ts_state = irq_ts_save(); | 115 | ts_state = irq_ts_save(); |
| 116 | asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ | 116 | asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ |
| 117 | : "+S"(in), "+D"(result) | 117 | : "+S"(in), "+D"(result) |
| 118 | : "c"(count), "a"(0)); | 118 | : "c"(count), "a"(0)); |
| 119 | irq_ts_restore(ts_state); | 119 | irq_ts_restore(ts_state); |
| 120 | 120 | ||
| 121 | padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); | 121 | padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); |
| 122 | } | 122 | } |
| 123 | 123 | ||
| 124 | static void padlock_do_sha256(const char *in, char *out, int count) | 124 | static void padlock_do_sha256(const char *in, char *out, int count) |
| 125 | { | 125 | { |
| 126 | /* We can't store directly to *out as it may be unaligned. */ | 126 | /* We can't store directly to *out as it may be unaligned. */ |
| 127 | /* BTW Don't reduce the buffer size below 128 Bytes! | 127 | /* BTW Don't reduce the buffer size below 128 Bytes! |
| 128 | * PadLock microcode needs it that big. */ | 128 | * PadLock microcode needs it that big. */ |
| 129 | char buf[128+16]; | 129 | char buf[128+16]; |
| 130 | char *result = NEAREST_ALIGNED(buf); | 130 | char *result = NEAREST_ALIGNED(buf); |
| 131 | int ts_state; | 131 | int ts_state; |
| 132 | 132 | ||
| 133 | ((uint32_t *)result)[0] = SHA256_H0; | 133 | ((uint32_t *)result)[0] = SHA256_H0; |
| 134 | ((uint32_t *)result)[1] = SHA256_H1; | 134 | ((uint32_t *)result)[1] = SHA256_H1; |
| 135 | ((uint32_t *)result)[2] = SHA256_H2; | 135 | ((uint32_t *)result)[2] = SHA256_H2; |
| 136 | ((uint32_t *)result)[3] = SHA256_H3; | 136 | ((uint32_t *)result)[3] = SHA256_H3; |
| 137 | ((uint32_t *)result)[4] = SHA256_H4; | 137 | ((uint32_t *)result)[4] = SHA256_H4; |
| 138 | ((uint32_t *)result)[5] = SHA256_H5; | 138 | ((uint32_t *)result)[5] = SHA256_H5; |
| 139 | ((uint32_t *)result)[6] = SHA256_H6; | 139 | ((uint32_t *)result)[6] = SHA256_H6; |
| 140 | ((uint32_t *)result)[7] = SHA256_H7; | 140 | ((uint32_t *)result)[7] = SHA256_H7; |
| 141 | 141 | ||
| 142 | /* prevent taking the spurious DNA fault with padlock. */ | 142 | /* prevent taking the spurious DNA fault with padlock. */ |
| 143 | ts_state = irq_ts_save(); | 143 | ts_state = irq_ts_save(); |
| 144 | asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ | 144 | asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ |
| 145 | : "+S"(in), "+D"(result) | 145 | : "+S"(in), "+D"(result) |
| 146 | : "c"(count), "a"(0)); | 146 | : "c"(count), "a"(0)); |
| 147 | irq_ts_restore(ts_state); | 147 | irq_ts_restore(ts_state); |
| 148 | 148 | ||
| 149 | padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); | 149 | padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); |
| 150 | } | 150 | } |
| 151 | 151 | ||
| 152 | static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out) | 152 | static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out) |
| 153 | { | 153 | { |
| 154 | if (unlikely(ctx(tfm)->bypass)) { | 154 | if (unlikely(ctx(tfm)->bypass)) { |
| 155 | crypto_hash_final(&ctx(tfm)->fallback, out); | 155 | crypto_hash_final(&ctx(tfm)->fallback, out); |
| 156 | ctx(tfm)->bypass = 0; | 156 | ctx(tfm)->bypass = 0; |
| 157 | return; | 157 | return; |
| 158 | } | 158 | } |
| 159 | 159 | ||
| 160 | /* Pass the input buffer to PadLock microcode... */ | 160 | /* Pass the input buffer to PadLock microcode... */ |
| 161 | ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used); | 161 | ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used); |
| 162 | 162 | ||
| 163 | ctx(tfm)->used = 0; | 163 | ctx(tfm)->used = 0; |
| 164 | } | 164 | } |
| 165 | 165 | ||
| 166 | static int padlock_cra_init(struct crypto_tfm *tfm) | 166 | static int padlock_cra_init(struct crypto_tfm *tfm) |
| 167 | { | 167 | { |
| 168 | const char *fallback_driver_name = tfm->__crt_alg->cra_name; | 168 | const char *fallback_driver_name = tfm->__crt_alg->cra_name; |
| 169 | struct crypto_hash *fallback_tfm; | 169 | struct crypto_hash *fallback_tfm; |
| 170 | 170 | ||
| 171 | /* For now we'll allocate one page. This | 171 | /* For now we'll allocate one page. This |
| 172 | * could eventually be configurable one day. */ | 172 | * could eventually be configurable one day. */ |
| 173 | ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL); | 173 | ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL); |
| 174 | if (!ctx(tfm)->data) | 174 | if (!ctx(tfm)->data) |
| 175 | return -ENOMEM; | 175 | return -ENOMEM; |
| 176 | 176 | ||
| 177 | /* Allocate a fallback and abort if it failed. */ | 177 | /* Allocate a fallback and abort if it failed. */ |
| 178 | fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0, | 178 | fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0, |
| 179 | CRYPTO_ALG_ASYNC | | 179 | CRYPTO_ALG_ASYNC | |
| 180 | CRYPTO_ALG_NEED_FALLBACK); | 180 | CRYPTO_ALG_NEED_FALLBACK); |
| 181 | if (IS_ERR(fallback_tfm)) { | 181 | if (IS_ERR(fallback_tfm)) { |
| 182 | printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n", | 182 | printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n", |
| 183 | fallback_driver_name); | 183 | fallback_driver_name); |
| 184 | free_page((unsigned long)(ctx(tfm)->data)); | 184 | free_page((unsigned long)(ctx(tfm)->data)); |
| 185 | return PTR_ERR(fallback_tfm); | 185 | return PTR_ERR(fallback_tfm); |
| 186 | } | 186 | } |
| 187 | 187 | ||
| 188 | ctx(tfm)->fallback.tfm = fallback_tfm; | 188 | ctx(tfm)->fallback.tfm = fallback_tfm; |
| 189 | return 0; | 189 | return 0; |
| 190 | } | 190 | } |
| 191 | 191 | ||
| 192 | static int padlock_sha1_cra_init(struct crypto_tfm *tfm) | 192 | static int padlock_sha1_cra_init(struct crypto_tfm *tfm) |
| 193 | { | 193 | { |
| 194 | ctx(tfm)->f_sha_padlock = padlock_do_sha1; | 194 | ctx(tfm)->f_sha_padlock = padlock_do_sha1; |
| 195 | 195 | ||
| 196 | return padlock_cra_init(tfm); | 196 | return padlock_cra_init(tfm); |
| 197 | } | 197 | } |
| 198 | 198 | ||
| 199 | static int padlock_sha256_cra_init(struct crypto_tfm *tfm) | 199 | static int padlock_sha256_cra_init(struct crypto_tfm *tfm) |
| 200 | { | 200 | { |
| 201 | ctx(tfm)->f_sha_padlock = padlock_do_sha256; | 201 | ctx(tfm)->f_sha_padlock = padlock_do_sha256; |
| 202 | 202 | ||
| 203 | return padlock_cra_init(tfm); | 203 | return padlock_cra_init(tfm); |
| 204 | } | 204 | } |
| 205 | 205 | ||
| 206 | static void padlock_cra_exit(struct crypto_tfm *tfm) | 206 | static void padlock_cra_exit(struct crypto_tfm *tfm) |
| 207 | { | 207 | { |
| 208 | if (ctx(tfm)->data) { | 208 | if (ctx(tfm)->data) { |
| 209 | free_page((unsigned long)(ctx(tfm)->data)); | 209 | free_page((unsigned long)(ctx(tfm)->data)); |
| 210 | ctx(tfm)->data = NULL; | 210 | ctx(tfm)->data = NULL; |
| 211 | } | 211 | } |
| 212 | 212 | ||
| 213 | crypto_free_hash(ctx(tfm)->fallback.tfm); | 213 | crypto_free_hash(ctx(tfm)->fallback.tfm); |
| 214 | ctx(tfm)->fallback.tfm = NULL; | 214 | ctx(tfm)->fallback.tfm = NULL; |
| 215 | } | 215 | } |
| 216 | 216 | ||
| 217 | static struct crypto_alg sha1_alg = { | 217 | static struct crypto_alg sha1_alg = { |
| 218 | .cra_name = "sha1", | 218 | .cra_name = "sha1", |
| 219 | .cra_driver_name = "sha1-padlock", | 219 | .cra_driver_name = "sha1-padlock", |
| 220 | .cra_priority = PADLOCK_CRA_PRIORITY, | 220 | .cra_priority = PADLOCK_CRA_PRIORITY, |
| 221 | .cra_flags = CRYPTO_ALG_TYPE_DIGEST | | 221 | .cra_flags = CRYPTO_ALG_TYPE_DIGEST | |
| 222 | CRYPTO_ALG_NEED_FALLBACK, | 222 | CRYPTO_ALG_NEED_FALLBACK, |
| 223 | .cra_blocksize = SHA1_BLOCK_SIZE, | 223 | .cra_blocksize = SHA1_BLOCK_SIZE, |
| 224 | .cra_ctxsize = sizeof(struct padlock_sha_ctx), | 224 | .cra_ctxsize = sizeof(struct padlock_sha_ctx), |
| 225 | .cra_module = THIS_MODULE, | 225 | .cra_module = THIS_MODULE, |
| 226 | .cra_list = LIST_HEAD_INIT(sha1_alg.cra_list), | 226 | .cra_list = LIST_HEAD_INIT(sha1_alg.cra_list), |
| 227 | .cra_init = padlock_sha1_cra_init, | 227 | .cra_init = padlock_sha1_cra_init, |
| 228 | .cra_exit = padlock_cra_exit, | 228 | .cra_exit = padlock_cra_exit, |
| 229 | .cra_u = { | 229 | .cra_u = { |
| 230 | .digest = { | 230 | .digest = { |
| 231 | .dia_digestsize = SHA1_DIGEST_SIZE, | 231 | .dia_digestsize = SHA1_DIGEST_SIZE, |
| 232 | .dia_init = padlock_sha_init, | 232 | .dia_init = padlock_sha_init, |
| 233 | .dia_update = padlock_sha_update, | 233 | .dia_update = padlock_sha_update, |
| 234 | .dia_final = padlock_sha_final, | 234 | .dia_final = padlock_sha_final, |
| 235 | } | 235 | } |
| 236 | } | 236 | } |
| 237 | }; | 237 | }; |
| 238 | 238 | ||
| 239 | static struct crypto_alg sha256_alg = { | 239 | static struct crypto_alg sha256_alg = { |
| 240 | .cra_name = "sha256", | 240 | .cra_name = "sha256", |
| 241 | .cra_driver_name = "sha256-padlock", | 241 | .cra_driver_name = "sha256-padlock", |
| 242 | .cra_priority = PADLOCK_CRA_PRIORITY, | 242 | .cra_priority = PADLOCK_CRA_PRIORITY, |
| 243 | .cra_flags = CRYPTO_ALG_TYPE_DIGEST | | 243 | .cra_flags = CRYPTO_ALG_TYPE_DIGEST | |
| 244 | CRYPTO_ALG_NEED_FALLBACK, | 244 | CRYPTO_ALG_NEED_FALLBACK, |
| 245 | .cra_blocksize = SHA256_BLOCK_SIZE, | 245 | .cra_blocksize = SHA256_BLOCK_SIZE, |
| 246 | .cra_ctxsize = sizeof(struct padlock_sha_ctx), | 246 | .cra_ctxsize = sizeof(struct padlock_sha_ctx), |
| 247 | .cra_module = THIS_MODULE, | 247 | .cra_module = THIS_MODULE, |
| 248 | .cra_list = LIST_HEAD_INIT(sha256_alg.cra_list), | 248 | .cra_list = LIST_HEAD_INIT(sha256_alg.cra_list), |
| 249 | .cra_init = padlock_sha256_cra_init, | 249 | .cra_init = padlock_sha256_cra_init, |
| 250 | .cra_exit = padlock_cra_exit, | 250 | .cra_exit = padlock_cra_exit, |
| 251 | .cra_u = { | 251 | .cra_u = { |
| 252 | .digest = { | 252 | .digest = { |
| 253 | .dia_digestsize = SHA256_DIGEST_SIZE, | 253 | .dia_digestsize = SHA256_DIGEST_SIZE, |
| 254 | .dia_init = padlock_sha_init, | 254 | .dia_init = padlock_sha_init, |
| 255 | .dia_update = padlock_sha_update, | 255 | .dia_update = padlock_sha_update, |
| 256 | .dia_final = padlock_sha_final, | 256 | .dia_final = padlock_sha_final, |
| 257 | } | 257 | } |
| 258 | } | 258 | } |
| 259 | }; | 259 | }; |
| 260 | 260 | ||
| 261 | static int __init padlock_init(void) | 261 | static int __init padlock_init(void) |
| 262 | { | 262 | { |
| 263 | int rc = -ENODEV; | 263 | int rc = -ENODEV; |
| 264 | 264 | ||
| 265 | if (!cpu_has_phe) { | 265 | if (!cpu_has_phe) { |
| 266 | printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n"); | 266 | printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n"); |
| 267 | return -ENODEV; | 267 | return -ENODEV; |
| 268 | } | 268 | } |
| 269 | 269 | ||
| 270 | if (!cpu_has_phe_enabled) { | 270 | if (!cpu_has_phe_enabled) { |
| 271 | printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); | 271 | printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); |
| 272 | return -ENODEV; | 272 | return -ENODEV; |
| 273 | } | 273 | } |
| 274 | 274 | ||
| 275 | rc = crypto_register_alg(&sha1_alg); | 275 | rc = crypto_register_alg(&sha1_alg); |
| 276 | if (rc) | 276 | if (rc) |
| 277 | goto out; | 277 | goto out; |
| 278 | 278 | ||
| 279 | rc = crypto_register_alg(&sha256_alg); | 279 | rc = crypto_register_alg(&sha256_alg); |
| 280 | if (rc) | 280 | if (rc) |
| 281 | goto out_unreg1; | 281 | goto out_unreg1; |
| 282 | 282 | ||
| 283 | printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n"); | 283 | printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n"); |
| 284 | 284 | ||
| 285 | return 0; | 285 | return 0; |
| 286 | 286 | ||
| 287 | out_unreg1: | 287 | out_unreg1: |
| 288 | crypto_unregister_alg(&sha1_alg); | 288 | crypto_unregister_alg(&sha1_alg); |
| 289 | out: | 289 | out: |
| 290 | printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n"); | 290 | printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n"); |
| 291 | return rc; | 291 | return rc; |
| 292 | } | 292 | } |
| 293 | 293 | ||
| 294 | static void __exit padlock_fini(void) | 294 | static void __exit padlock_fini(void) |
| 295 | { | 295 | { |
| 296 | crypto_unregister_alg(&sha1_alg); | 296 | crypto_unregister_alg(&sha1_alg); |
| 297 | crypto_unregister_alg(&sha256_alg); | 297 | crypto_unregister_alg(&sha256_alg); |
| 298 | } | 298 | } |
| 299 | 299 | ||
| 300 | module_init(padlock_init); | 300 | module_init(padlock_init); |
| 301 | module_exit(padlock_fini); | 301 | module_exit(padlock_fini); |
| 302 | 302 | ||
| 303 | MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support."); | 303 | MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support."); |
| 304 | MODULE_LICENSE("GPL"); | 304 | MODULE_LICENSE("GPL"); |
| 305 | MODULE_AUTHOR("Michal Ludvig"); | 305 | MODULE_AUTHOR("Michal Ludvig"); |
| 306 | 306 | ||
| 307 | MODULE_ALIAS("sha1"); | 307 | MODULE_ALIAS("sha1-all"); |
| 308 | MODULE_ALIAS("sha256"); | 308 | MODULE_ALIAS("sha256-all"); |
| 309 | MODULE_ALIAS("sha1-padlock"); | 309 | MODULE_ALIAS("sha1-padlock"); |
| 310 | MODULE_ALIAS("sha256-padlock"); | 310 | MODULE_ALIAS("sha256-padlock"); |
| 311 | 311 |