Commit 7d024608265eb815ae4ce1e5da097ec9d800dda4

Authored by Herbert Xu
1 parent f63559bef3

crypto: padlock - Use shash fallback for sha

This patch changes padlock sha fallback to shash instead of hash.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

Showing 1 changed file with 52 additions and 30 deletions Side-by-side Diff

drivers/crypto/padlock-sha.c
... ... @@ -12,28 +12,24 @@
12 12 *
13 13 */
14 14  
15   -#include <crypto/algapi.h>
  15 +#include <crypto/internal/hash.h>
16 16 #include <crypto/sha.h>
17 17 #include <linux/err.h>
18 18 #include <linux/module.h>
19 19 #include <linux/init.h>
20 20 #include <linux/errno.h>
21   -#include <linux/cryptohash.h>
22 21 #include <linux/interrupt.h>
23 22 #include <linux/kernel.h>
24 23 #include <linux/scatterlist.h>
25 24 #include <asm/i387.h>
26 25 #include "padlock.h"
27 26  
28   -#define SHA1_DEFAULT_FALLBACK "sha1-generic"
29   -#define SHA256_DEFAULT_FALLBACK "sha256-generic"
30   -
31 27 struct padlock_sha_ctx {
32 28 char *data;
33 29 size_t used;
34 30 int bypass;
35 31 void (*f_sha_padlock)(const char *in, char *out, int count);
36   - struct hash_desc fallback;
  32 + struct shash_desc *fallback;
37 33 };
38 34  
39 35 static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm)
40 36  
41 37  
42 38  
43 39  
44 40  
... ... @@ -47,21 +43,26 @@
47 43  
48 44 static struct crypto_alg sha1_alg, sha256_alg;
49 45  
50   -static void padlock_sha_bypass(struct crypto_tfm *tfm)
  46 +static int padlock_sha_bypass(struct crypto_tfm *tfm)
51 47 {
  48 + int err = 0;
  49 +
52 50 if (ctx(tfm)->bypass)
53   - return;
  51 + goto out;
54 52  
55   - crypto_hash_init(&ctx(tfm)->fallback);
56   - if (ctx(tfm)->data && ctx(tfm)->used) {
57   - struct scatterlist sg;
  53 + err = crypto_shash_init(ctx(tfm)->fallback);
  54 + if (err)
  55 + goto out;
58 56  
59   - sg_init_one(&sg, ctx(tfm)->data, ctx(tfm)->used);
60   - crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length);
61   - }
  57 + if (ctx(tfm)->data && ctx(tfm)->used)
  58 + err = crypto_shash_update(ctx(tfm)->fallback, ctx(tfm)->data,
  59 + ctx(tfm)->used);
62 60  
63 61 ctx(tfm)->used = 0;
64 62 ctx(tfm)->bypass = 1;
  63 +
  64 +out:
  65 + return err;
65 66 }
66 67  
67 68 static void padlock_sha_init(struct crypto_tfm *tfm)
68 69  
69 70  
... ... @@ -73,15 +74,18 @@
73 74 static void padlock_sha_update(struct crypto_tfm *tfm,
74 75 const uint8_t *data, unsigned int length)
75 76 {
  77 + int err;
  78 +
76 79 /* Our buffer is always one page. */
77 80 if (unlikely(!ctx(tfm)->bypass &&
78   - (ctx(tfm)->used + length > PAGE_SIZE)))
79   - padlock_sha_bypass(tfm);
  81 + (ctx(tfm)->used + length > PAGE_SIZE))) {
  82 + err = padlock_sha_bypass(tfm);
  83 + BUG_ON(err);
  84 + }
80 85  
81 86 if (unlikely(ctx(tfm)->bypass)) {
82   - struct scatterlist sg;
83   - sg_init_one(&sg, (uint8_t *)data, length);
84   - crypto_hash_update(&ctx(tfm)->fallback, &sg, length);
  87 + err = crypto_shash_update(ctx(tfm)->fallback, data, length);
  88 + BUG_ON(err);
85 89 return;
86 90 }
87 91  
88 92  
... ... @@ -151,8 +155,11 @@
151 155  
152 156 static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out)
153 157 {
  158 + int err;
  159 +
154 160 if (unlikely(ctx(tfm)->bypass)) {
155   - crypto_hash_final(&ctx(tfm)->fallback, out);
  161 + err = crypto_shash_final(ctx(tfm)->fallback, out);
  162 + BUG_ON(err);
156 163 ctx(tfm)->bypass = 0;
157 164 return;
158 165 }
159 166  
160 167  
161 168  
162 169  
163 170  
... ... @@ -166,27 +173,41 @@
166 173 static int padlock_cra_init(struct crypto_tfm *tfm)
167 174 {
168 175 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
169   - struct crypto_hash *fallback_tfm;
  176 + struct crypto_shash *fallback_tfm;
  177 + int err = -ENOMEM;
170 178  
171 179 /* For now we'll allocate one page. This
172 180 * could eventually be configurable one day. */
173 181 ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL);
174 182 if (!ctx(tfm)->data)
175   - return -ENOMEM;
  183 + goto out;
176 184  
177 185 /* Allocate a fallback and abort if it failed. */
178   - fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0,
179   - CRYPTO_ALG_ASYNC |
180   - CRYPTO_ALG_NEED_FALLBACK);
  186 + fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
  187 + CRYPTO_ALG_NEED_FALLBACK);
181 188 if (IS_ERR(fallback_tfm)) {
182 189 printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
183 190 fallback_driver_name);
184   - free_page((unsigned long)(ctx(tfm)->data));
185   - return PTR_ERR(fallback_tfm);
  191 + err = PTR_ERR(fallback_tfm);
  192 + goto out_free_page;
186 193 }
187 194  
188   - ctx(tfm)->fallback.tfm = fallback_tfm;
  195 + ctx(tfm)->fallback = kmalloc(sizeof(struct shash_desc) +
  196 + crypto_shash_descsize(fallback_tfm),
  197 + GFP_KERNEL);
  198 + if (!ctx(tfm)->fallback)
  199 + goto out_free_tfm;
  200 +
  201 + ctx(tfm)->fallback->tfm = fallback_tfm;
  202 + ctx(tfm)->fallback->flags = 0;
189 203 return 0;
  204 +
  205 +out_free_tfm:
  206 + crypto_free_shash(fallback_tfm);
  207 +out_free_page:
  208 + free_page((unsigned long)(ctx(tfm)->data));
  209 +out:
  210 + return err;
190 211 }
191 212  
192 213 static int padlock_sha1_cra_init(struct crypto_tfm *tfm)
... ... @@ -210,8 +231,9 @@
210 231 ctx(tfm)->data = NULL;
211 232 }
212 233  
213   - crypto_free_hash(ctx(tfm)->fallback.tfm);
214   - ctx(tfm)->fallback.tfm = NULL;
  234 + crypto_free_shash(ctx(tfm)->fallback->tfm);
  235 +
  236 + kzfree(ctx(tfm)->fallback);
215 237 }
216 238  
217 239 static struct crypto_alg sha1_alg = {