Commit bf362759034cf208966dff262c7d740a6b1b3edd

Authored by Dmitry Kasatkin
Committed by Herbert Xu
1 parent 528d26f57a

crypto: omap-sham - hmac calculation bug fix for sha1 base hash

This patch fixes 2 hmac inter-dependent bugs.

1. "omap-sham: hash-in-progress is stored in hw format" commit introduced
optimization where temporary hash had been stored in OMAP specific format
(big endian).
For SHA1 it is different to real hash format, which is little endian.
Final HMAC value was calculated using incorrect hash.
Because CONFIG_CRYPTO_MANAGER_TESTS was disabled this error remained
unnoticed. After enabling this option, bug has been found.

2. HMAC was calculated using temporrary hash value.
For a single-request updates, temporary hash was the final one and
HMAC result was correct. But in fact only the final hash had to be used.
All crypto tests for HMAC produces only single request and
could not catch the problem. This problem is fixed here.

Signed-off-by: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

Showing 1 changed file with 31 additions and 40 deletions Inline Diff

drivers/crypto/omap-sham.c
1 /* 1 /*
2 * Cryptographic API. 2 * Cryptographic API.
3 * 3 *
4 * Support for OMAP SHA1/MD5 HW acceleration. 4 * Support for OMAP SHA1/MD5 HW acceleration.
5 * 5 *
6 * Copyright (c) 2010 Nokia Corporation 6 * Copyright (c) 2010 Nokia Corporation
7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com> 7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published 10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation. 11 * by the Free Software Foundation.
12 * 12 *
13 * Some ideas are from old omap-sha1-md5.c driver. 13 * Some ideas are from old omap-sha1-md5.c driver.
14 */ 14 */
15 15
16 #define pr_fmt(fmt) "%s: " fmt, __func__ 16 #define pr_fmt(fmt) "%s: " fmt, __func__
17 17
18 #include <linux/err.h> 18 #include <linux/err.h>
19 #include <linux/device.h> 19 #include <linux/device.h>
20 #include <linux/module.h> 20 #include <linux/module.h>
21 #include <linux/init.h> 21 #include <linux/init.h>
22 #include <linux/errno.h> 22 #include <linux/errno.h>
23 #include <linux/interrupt.h> 23 #include <linux/interrupt.h>
24 #include <linux/kernel.h> 24 #include <linux/kernel.h>
25 #include <linux/clk.h> 25 #include <linux/clk.h>
26 #include <linux/irq.h> 26 #include <linux/irq.h>
27 #include <linux/io.h> 27 #include <linux/io.h>
28 #include <linux/platform_device.h> 28 #include <linux/platform_device.h>
29 #include <linux/scatterlist.h> 29 #include <linux/scatterlist.h>
30 #include <linux/dma-mapping.h> 30 #include <linux/dma-mapping.h>
31 #include <linux/delay.h> 31 #include <linux/delay.h>
32 #include <linux/crypto.h> 32 #include <linux/crypto.h>
33 #include <linux/cryptohash.h> 33 #include <linux/cryptohash.h>
34 #include <crypto/scatterwalk.h> 34 #include <crypto/scatterwalk.h>
35 #include <crypto/algapi.h> 35 #include <crypto/algapi.h>
36 #include <crypto/sha.h> 36 #include <crypto/sha.h>
37 #include <crypto/hash.h> 37 #include <crypto/hash.h>
38 #include <crypto/internal/hash.h> 38 #include <crypto/internal/hash.h>
39 39
40 #include <plat/cpu.h> 40 #include <plat/cpu.h>
41 #include <plat/dma.h> 41 #include <plat/dma.h>
42 #include <mach/irqs.h> 42 #include <mach/irqs.h>
43 43
44 #define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04)) 44 #define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04))
45 #define SHA_REG_DIN(x) (0x1C + ((x) * 0x04)) 45 #define SHA_REG_DIN(x) (0x1C + ((x) * 0x04))
46 46
47 #define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE 47 #define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE
48 #define MD5_DIGEST_SIZE 16 48 #define MD5_DIGEST_SIZE 16
49 49
50 #define SHA_REG_DIGCNT 0x14 50 #define SHA_REG_DIGCNT 0x14
51 51
52 #define SHA_REG_CTRL 0x18 52 #define SHA_REG_CTRL 0x18
53 #define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5) 53 #define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
54 #define SHA_REG_CTRL_CLOSE_HASH (1 << 4) 54 #define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
55 #define SHA_REG_CTRL_ALGO_CONST (1 << 3) 55 #define SHA_REG_CTRL_ALGO_CONST (1 << 3)
56 #define SHA_REG_CTRL_ALGO (1 << 2) 56 #define SHA_REG_CTRL_ALGO (1 << 2)
57 #define SHA_REG_CTRL_INPUT_READY (1 << 1) 57 #define SHA_REG_CTRL_INPUT_READY (1 << 1)
58 #define SHA_REG_CTRL_OUTPUT_READY (1 << 0) 58 #define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
59 59
60 #define SHA_REG_REV 0x5C 60 #define SHA_REG_REV 0x5C
61 #define SHA_REG_REV_MAJOR 0xF0 61 #define SHA_REG_REV_MAJOR 0xF0
62 #define SHA_REG_REV_MINOR 0x0F 62 #define SHA_REG_REV_MINOR 0x0F
63 63
64 #define SHA_REG_MASK 0x60 64 #define SHA_REG_MASK 0x60
65 #define SHA_REG_MASK_DMA_EN (1 << 3) 65 #define SHA_REG_MASK_DMA_EN (1 << 3)
66 #define SHA_REG_MASK_IT_EN (1 << 2) 66 #define SHA_REG_MASK_IT_EN (1 << 2)
67 #define SHA_REG_MASK_SOFTRESET (1 << 1) 67 #define SHA_REG_MASK_SOFTRESET (1 << 1)
68 #define SHA_REG_AUTOIDLE (1 << 0) 68 #define SHA_REG_AUTOIDLE (1 << 0)
69 69
70 #define SHA_REG_SYSSTATUS 0x64 70 #define SHA_REG_SYSSTATUS 0x64
71 #define SHA_REG_SYSSTATUS_RESETDONE (1 << 0) 71 #define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
72 72
73 #define DEFAULT_TIMEOUT_INTERVAL HZ 73 #define DEFAULT_TIMEOUT_INTERVAL HZ
74 74
75 #define FLAGS_FINUP 0x0002 75 #define FLAGS_FINUP 0x0002
76 #define FLAGS_FINAL 0x0004 76 #define FLAGS_FINAL 0x0004
77 #define FLAGS_SG 0x0008 77 #define FLAGS_SG 0x0008
78 #define FLAGS_SHA1 0x0010 78 #define FLAGS_SHA1 0x0010
79 #define FLAGS_DMA_ACTIVE 0x0020 79 #define FLAGS_DMA_ACTIVE 0x0020
80 #define FLAGS_OUTPUT_READY 0x0040 80 #define FLAGS_OUTPUT_READY 0x0040
81 #define FLAGS_CLEAN 0x0080
82 #define FLAGS_INIT 0x0100 81 #define FLAGS_INIT 0x0100
83 #define FLAGS_CPU 0x0200 82 #define FLAGS_CPU 0x0200
84 #define FLAGS_HMAC 0x0400 83 #define FLAGS_HMAC 0x0400
85 #define FLAGS_ERROR 0x0800 84 #define FLAGS_ERROR 0x0800
86 #define FLAGS_BUSY 0x1000 85 #define FLAGS_BUSY 0x1000
87 86
88 #define OP_UPDATE 1 87 #define OP_UPDATE 1
89 #define OP_FINAL 2 88 #define OP_FINAL 2
90 89
91 #define OMAP_ALIGN_MASK (sizeof(u32)-1) 90 #define OMAP_ALIGN_MASK (sizeof(u32)-1)
92 #define OMAP_ALIGNED __attribute__((aligned(sizeof(u32)))) 91 #define OMAP_ALIGNED __attribute__((aligned(sizeof(u32))))
93 92
94 #define BUFLEN PAGE_SIZE 93 #define BUFLEN PAGE_SIZE
95 94
96 struct omap_sham_dev; 95 struct omap_sham_dev;
97 96
98 struct omap_sham_reqctx { 97 struct omap_sham_reqctx {
99 struct omap_sham_dev *dd; 98 struct omap_sham_dev *dd;
100 unsigned long flags; 99 unsigned long flags;
101 unsigned long op; 100 unsigned long op;
102 101
103 u8 digest[SHA1_DIGEST_SIZE] OMAP_ALIGNED; 102 u8 digest[SHA1_DIGEST_SIZE] OMAP_ALIGNED;
104 size_t digcnt; 103 size_t digcnt;
105 size_t bufcnt; 104 size_t bufcnt;
106 size_t buflen; 105 size_t buflen;
107 dma_addr_t dma_addr; 106 dma_addr_t dma_addr;
108 107
109 /* walk state */ 108 /* walk state */
110 struct scatterlist *sg; 109 struct scatterlist *sg;
111 unsigned int offset; /* offset in current sg */ 110 unsigned int offset; /* offset in current sg */
112 unsigned int total; /* total request */ 111 unsigned int total; /* total request */
113 112
114 u8 buffer[0] OMAP_ALIGNED; 113 u8 buffer[0] OMAP_ALIGNED;
115 }; 114 };
116 115
117 struct omap_sham_hmac_ctx { 116 struct omap_sham_hmac_ctx {
118 struct crypto_shash *shash; 117 struct crypto_shash *shash;
119 u8 ipad[SHA1_MD5_BLOCK_SIZE]; 118 u8 ipad[SHA1_MD5_BLOCK_SIZE];
120 u8 opad[SHA1_MD5_BLOCK_SIZE]; 119 u8 opad[SHA1_MD5_BLOCK_SIZE];
121 }; 120 };
122 121
123 struct omap_sham_ctx { 122 struct omap_sham_ctx {
124 struct omap_sham_dev *dd; 123 struct omap_sham_dev *dd;
125 124
126 unsigned long flags; 125 unsigned long flags;
127 126
128 /* fallback stuff */ 127 /* fallback stuff */
129 struct crypto_shash *fallback; 128 struct crypto_shash *fallback;
130 129
131 struct omap_sham_hmac_ctx base[0]; 130 struct omap_sham_hmac_ctx base[0];
132 }; 131 };
133 132
134 #define OMAP_SHAM_QUEUE_LENGTH 1 133 #define OMAP_SHAM_QUEUE_LENGTH 1
135 134
136 struct omap_sham_dev { 135 struct omap_sham_dev {
137 struct list_head list; 136 struct list_head list;
138 unsigned long phys_base; 137 unsigned long phys_base;
139 struct device *dev; 138 struct device *dev;
140 void __iomem *io_base; 139 void __iomem *io_base;
141 int irq; 140 int irq;
142 struct clk *iclk; 141 struct clk *iclk;
143 spinlock_t lock; 142 spinlock_t lock;
144 int err; 143 int err;
145 int dma; 144 int dma;
146 int dma_lch; 145 int dma_lch;
147 struct tasklet_struct done_task; 146 struct tasklet_struct done_task;
148 struct tasklet_struct queue_task; 147 struct tasklet_struct queue_task;
149 148
150 unsigned long flags; 149 unsigned long flags;
151 struct crypto_queue queue; 150 struct crypto_queue queue;
152 struct ahash_request *req; 151 struct ahash_request *req;
153 }; 152 };
154 153
155 struct omap_sham_drv { 154 struct omap_sham_drv {
156 struct list_head dev_list; 155 struct list_head dev_list;
157 spinlock_t lock; 156 spinlock_t lock;
158 unsigned long flags; 157 unsigned long flags;
159 }; 158 };
160 159
161 static struct omap_sham_drv sham = { 160 static struct omap_sham_drv sham = {
162 .dev_list = LIST_HEAD_INIT(sham.dev_list), 161 .dev_list = LIST_HEAD_INIT(sham.dev_list),
163 .lock = __SPIN_LOCK_UNLOCKED(sham.lock), 162 .lock = __SPIN_LOCK_UNLOCKED(sham.lock),
164 }; 163 };
165 164
166 static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset) 165 static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
167 { 166 {
168 return __raw_readl(dd->io_base + offset); 167 return __raw_readl(dd->io_base + offset);
169 } 168 }
170 169
171 static inline void omap_sham_write(struct omap_sham_dev *dd, 170 static inline void omap_sham_write(struct omap_sham_dev *dd,
172 u32 offset, u32 value) 171 u32 offset, u32 value)
173 { 172 {
174 __raw_writel(value, dd->io_base + offset); 173 __raw_writel(value, dd->io_base + offset);
175 } 174 }
176 175
177 static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address, 176 static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
178 u32 value, u32 mask) 177 u32 value, u32 mask)
179 { 178 {
180 u32 val; 179 u32 val;
181 180
182 val = omap_sham_read(dd, address); 181 val = omap_sham_read(dd, address);
183 val &= ~mask; 182 val &= ~mask;
184 val |= value; 183 val |= value;
185 omap_sham_write(dd, address, val); 184 omap_sham_write(dd, address, val);
186 } 185 }
187 186
188 static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit) 187 static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
189 { 188 {
190 unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL; 189 unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
191 190
192 while (!(omap_sham_read(dd, offset) & bit)) { 191 while (!(omap_sham_read(dd, offset) & bit)) {
193 if (time_is_before_jiffies(timeout)) 192 if (time_is_before_jiffies(timeout))
194 return -ETIMEDOUT; 193 return -ETIMEDOUT;
195 } 194 }
196 195
197 return 0; 196 return 0;
198 } 197 }
199 198
200 static void omap_sham_copy_hash(struct ahash_request *req, int out) 199 static void omap_sham_copy_hash(struct ahash_request *req, int out)
201 { 200 {
202 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 201 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
203 u32 *hash = (u32 *)ctx->digest; 202 u32 *hash = (u32 *)ctx->digest;
204 int i; 203 int i;
205 204
206 /* MD5 is almost unused. So copy sha1 size to reduce code */ 205 /* MD5 is almost unused. So copy sha1 size to reduce code */
207 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) { 206 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
208 if (out) 207 if (out)
209 hash[i] = omap_sham_read(ctx->dd, 208 hash[i] = omap_sham_read(ctx->dd,
210 SHA_REG_DIGEST(i)); 209 SHA_REG_DIGEST(i));
211 else 210 else
212 omap_sham_write(ctx->dd, 211 omap_sham_write(ctx->dd,
213 SHA_REG_DIGEST(i), hash[i]); 212 SHA_REG_DIGEST(i), hash[i]);
214 } 213 }
215 } 214 }
216 215
217 static void omap_sham_copy_ready_hash(struct ahash_request *req) 216 static void omap_sham_copy_ready_hash(struct ahash_request *req)
218 { 217 {
219 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 218 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
220 u32 *in = (u32 *)ctx->digest; 219 u32 *in = (u32 *)ctx->digest;
221 u32 *hash = (u32 *)req->result; 220 u32 *hash = (u32 *)req->result;
222 int i; 221 int i;
223 222
224 if (!hash) 223 if (!hash)
225 return; 224 return;
226 225
227 if (likely(ctx->flags & FLAGS_SHA1)) { 226 if (likely(ctx->flags & FLAGS_SHA1)) {
228 /* SHA1 results are in big endian */ 227 /* SHA1 results are in big endian */
229 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) 228 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
230 hash[i] = be32_to_cpu(in[i]); 229 hash[i] = be32_to_cpu(in[i]);
231 } else { 230 } else {
232 /* MD5 results are in little endian */ 231 /* MD5 results are in little endian */
233 for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++) 232 for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++)
234 hash[i] = le32_to_cpu(in[i]); 233 hash[i] = le32_to_cpu(in[i]);
235 } 234 }
236 } 235 }
237 236
238 static int omap_sham_hw_init(struct omap_sham_dev *dd) 237 static int omap_sham_hw_init(struct omap_sham_dev *dd)
239 { 238 {
240 clk_enable(dd->iclk); 239 clk_enable(dd->iclk);
241 240
242 if (!(dd->flags & FLAGS_INIT)) { 241 if (!(dd->flags & FLAGS_INIT)) {
243 omap_sham_write_mask(dd, SHA_REG_MASK, 242 omap_sham_write_mask(dd, SHA_REG_MASK,
244 SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET); 243 SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
245 244
246 if (omap_sham_wait(dd, SHA_REG_SYSSTATUS, 245 if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
247 SHA_REG_SYSSTATUS_RESETDONE)) 246 SHA_REG_SYSSTATUS_RESETDONE))
248 return -ETIMEDOUT; 247 return -ETIMEDOUT;
249 248
250 dd->flags |= FLAGS_INIT; 249 dd->flags |= FLAGS_INIT;
251 dd->err = 0; 250 dd->err = 0;
252 } 251 }
253 252
254 return 0; 253 return 0;
255 } 254 }
256 255
257 static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, 256 static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
258 int final, int dma) 257 int final, int dma)
259 { 258 {
260 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 259 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
261 u32 val = length << 5, mask; 260 u32 val = length << 5, mask;
262 261
263 if (likely(ctx->digcnt)) 262 if (likely(ctx->digcnt))
264 omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt); 263 omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt);
265 264
266 omap_sham_write_mask(dd, SHA_REG_MASK, 265 omap_sham_write_mask(dd, SHA_REG_MASK,
267 SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0), 266 SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
268 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN); 267 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
269 /* 268 /*
270 * Setting ALGO_CONST only for the first iteration 269 * Setting ALGO_CONST only for the first iteration
271 * and CLOSE_HASH only for the last one. 270 * and CLOSE_HASH only for the last one.
272 */ 271 */
273 if (ctx->flags & FLAGS_SHA1) 272 if (ctx->flags & FLAGS_SHA1)
274 val |= SHA_REG_CTRL_ALGO; 273 val |= SHA_REG_CTRL_ALGO;
275 if (!ctx->digcnt) 274 if (!ctx->digcnt)
276 val |= SHA_REG_CTRL_ALGO_CONST; 275 val |= SHA_REG_CTRL_ALGO_CONST;
277 if (final) 276 if (final)
278 val |= SHA_REG_CTRL_CLOSE_HASH; 277 val |= SHA_REG_CTRL_CLOSE_HASH;
279 278
280 mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH | 279 mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
281 SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH; 280 SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
282 281
283 omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask); 282 omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
284 } 283 }
285 284
286 static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, 285 static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
287 size_t length, int final) 286 size_t length, int final)
288 { 287 {
289 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 288 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
290 int count, len32; 289 int count, len32;
291 const u32 *buffer = (const u32 *)buf; 290 const u32 *buffer = (const u32 *)buf;
292 291
293 dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", 292 dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
294 ctx->digcnt, length, final); 293 ctx->digcnt, length, final);
295 294
296 omap_sham_write_ctrl(dd, length, final, 0); 295 omap_sham_write_ctrl(dd, length, final, 0);
297 296
298 /* should be non-zero before next lines to disable clocks later */ 297 /* should be non-zero before next lines to disable clocks later */
299 ctx->digcnt += length; 298 ctx->digcnt += length;
300 299
301 if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY)) 300 if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY))
302 return -ETIMEDOUT; 301 return -ETIMEDOUT;
303 302
304 if (final) 303 if (final)
305 ctx->flags |= FLAGS_FINAL; /* catch last interrupt */ 304 ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
306 305
307 len32 = DIV_ROUND_UP(length, sizeof(u32)); 306 len32 = DIV_ROUND_UP(length, sizeof(u32));
308 307
309 for (count = 0; count < len32; count++) 308 for (count = 0; count < len32; count++)
310 omap_sham_write(dd, SHA_REG_DIN(count), buffer[count]); 309 omap_sham_write(dd, SHA_REG_DIN(count), buffer[count]);
311 310
312 return -EINPROGRESS; 311 return -EINPROGRESS;
313 } 312 }
314 313
315 static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, 314 static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
316 size_t length, int final) 315 size_t length, int final)
317 { 316 {
318 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 317 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
319 int len32; 318 int len32;
320 319
321 dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", 320 dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
322 ctx->digcnt, length, final); 321 ctx->digcnt, length, final);
323 322
324 len32 = DIV_ROUND_UP(length, sizeof(u32)); 323 len32 = DIV_ROUND_UP(length, sizeof(u32));
325 324
326 omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32, 325 omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32,
327 1, OMAP_DMA_SYNC_PACKET, dd->dma, 326 1, OMAP_DMA_SYNC_PACKET, dd->dma,
328 OMAP_DMA_DST_SYNC_PREFETCH); 327 OMAP_DMA_DST_SYNC_PREFETCH);
329 328
330 omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC, 329 omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC,
331 dma_addr, 0, 0); 330 dma_addr, 0, 0);
332 331
333 omap_sham_write_ctrl(dd, length, final, 1); 332 omap_sham_write_ctrl(dd, length, final, 1);
334 333
335 ctx->digcnt += length; 334 ctx->digcnt += length;
336 335
337 if (final) 336 if (final)
338 ctx->flags |= FLAGS_FINAL; /* catch last interrupt */ 337 ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
339 338
340 dd->flags |= FLAGS_DMA_ACTIVE; 339 dd->flags |= FLAGS_DMA_ACTIVE;
341 340
342 omap_start_dma(dd->dma_lch); 341 omap_start_dma(dd->dma_lch);
343 342
344 return -EINPROGRESS; 343 return -EINPROGRESS;
345 } 344 }
346 345
347 static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx, 346 static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx,
348 const u8 *data, size_t length) 347 const u8 *data, size_t length)
349 { 348 {
350 size_t count = min(length, ctx->buflen - ctx->bufcnt); 349 size_t count = min(length, ctx->buflen - ctx->bufcnt);
351 350
352 count = min(count, ctx->total); 351 count = min(count, ctx->total);
353 if (count <= 0) 352 if (count <= 0)
354 return 0; 353 return 0;
355 memcpy(ctx->buffer + ctx->bufcnt, data, count); 354 memcpy(ctx->buffer + ctx->bufcnt, data, count);
356 ctx->bufcnt += count; 355 ctx->bufcnt += count;
357 356
358 return count; 357 return count;
359 } 358 }
360 359
361 static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx) 360 static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
362 { 361 {
363 size_t count; 362 size_t count;
364 363
365 while (ctx->sg) { 364 while (ctx->sg) {
366 count = omap_sham_append_buffer(ctx, 365 count = omap_sham_append_buffer(ctx,
367 sg_virt(ctx->sg) + ctx->offset, 366 sg_virt(ctx->sg) + ctx->offset,
368 ctx->sg->length - ctx->offset); 367 ctx->sg->length - ctx->offset);
369 if (!count) 368 if (!count)
370 break; 369 break;
371 ctx->offset += count; 370 ctx->offset += count;
372 ctx->total -= count; 371 ctx->total -= count;
373 if (ctx->offset == ctx->sg->length) { 372 if (ctx->offset == ctx->sg->length) {
374 ctx->sg = sg_next(ctx->sg); 373 ctx->sg = sg_next(ctx->sg);
375 if (ctx->sg) 374 if (ctx->sg)
376 ctx->offset = 0; 375 ctx->offset = 0;
377 else 376 else
378 ctx->total = 0; 377 ctx->total = 0;
379 } 378 }
380 } 379 }
381 380
382 return 0; 381 return 0;
383 } 382 }
384 383
385 static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd, 384 static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
386 struct omap_sham_reqctx *ctx, 385 struct omap_sham_reqctx *ctx,
387 size_t length, int final) 386 size_t length, int final)
388 { 387 {
389 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen, 388 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
390 DMA_TO_DEVICE); 389 DMA_TO_DEVICE);
391 if (dma_mapping_error(dd->dev, ctx->dma_addr)) { 390 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
392 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen); 391 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
393 return -EINVAL; 392 return -EINVAL;
394 } 393 }
395 394
396 ctx->flags &= ~FLAGS_SG; 395 ctx->flags &= ~FLAGS_SG;
397 396
398 /* next call does not fail... so no unmap in the case of error */ 397 /* next call does not fail... so no unmap in the case of error */
399 return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final); 398 return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final);
400 } 399 }
401 400
402 static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) 401 static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
403 { 402 {
404 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 403 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
405 unsigned int final; 404 unsigned int final;
406 size_t count; 405 size_t count;
407 406
408 omap_sham_append_sg(ctx); 407 omap_sham_append_sg(ctx);
409 408
410 final = (ctx->flags & FLAGS_FINUP) && !ctx->total; 409 final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
411 410
412 dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n", 411 dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
413 ctx->bufcnt, ctx->digcnt, final); 412 ctx->bufcnt, ctx->digcnt, final);
414 413
415 if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) { 414 if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
416 count = ctx->bufcnt; 415 count = ctx->bufcnt;
417 ctx->bufcnt = 0; 416 ctx->bufcnt = 0;
418 return omap_sham_xmit_dma_map(dd, ctx, count, final); 417 return omap_sham_xmit_dma_map(dd, ctx, count, final);
419 } 418 }
420 419
421 return 0; 420 return 0;
422 } 421 }
423 422
424 /* Start address alignment */ 423 /* Start address alignment */
425 #define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32))) 424 #define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32)))
426 /* SHA1 block size alignment */ 425 /* SHA1 block size alignment */
427 #define SG_SA(sg) (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE)) 426 #define SG_SA(sg) (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE))
428 427
429 static int omap_sham_update_dma_start(struct omap_sham_dev *dd) 428 static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
430 { 429 {
431 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 430 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
432 unsigned int length, final, tail; 431 unsigned int length, final, tail;
433 struct scatterlist *sg; 432 struct scatterlist *sg;
434 433
435 if (!ctx->total) 434 if (!ctx->total)
436 return 0; 435 return 0;
437 436
438 if (ctx->bufcnt || ctx->offset) 437 if (ctx->bufcnt || ctx->offset)
439 return omap_sham_update_dma_slow(dd); 438 return omap_sham_update_dma_slow(dd);
440 439
441 dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n", 440 dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
442 ctx->digcnt, ctx->bufcnt, ctx->total); 441 ctx->digcnt, ctx->bufcnt, ctx->total);
443 442
444 sg = ctx->sg; 443 sg = ctx->sg;
445 444
446 if (!SG_AA(sg)) 445 if (!SG_AA(sg))
447 return omap_sham_update_dma_slow(dd); 446 return omap_sham_update_dma_slow(dd);
448 447
449 if (!sg_is_last(sg) && !SG_SA(sg)) 448 if (!sg_is_last(sg) && !SG_SA(sg))
450 /* size is not SHA1_BLOCK_SIZE aligned */ 449 /* size is not SHA1_BLOCK_SIZE aligned */
451 return omap_sham_update_dma_slow(dd); 450 return omap_sham_update_dma_slow(dd);
452 451
453 length = min(ctx->total, sg->length); 452 length = min(ctx->total, sg->length);
454 453
455 if (sg_is_last(sg)) { 454 if (sg_is_last(sg)) {
456 if (!(ctx->flags & FLAGS_FINUP)) { 455 if (!(ctx->flags & FLAGS_FINUP)) {
457 /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */ 456 /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */
458 tail = length & (SHA1_MD5_BLOCK_SIZE - 1); 457 tail = length & (SHA1_MD5_BLOCK_SIZE - 1);
459 /* without finup() we need one block to close hash */ 458 /* without finup() we need one block to close hash */
460 if (!tail) 459 if (!tail)
461 tail = SHA1_MD5_BLOCK_SIZE; 460 tail = SHA1_MD5_BLOCK_SIZE;
462 length -= tail; 461 length -= tail;
463 } 462 }
464 } 463 }
465 464
466 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { 465 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
467 dev_err(dd->dev, "dma_map_sg error\n"); 466 dev_err(dd->dev, "dma_map_sg error\n");
468 return -EINVAL; 467 return -EINVAL;
469 } 468 }
470 469
471 ctx->flags |= FLAGS_SG; 470 ctx->flags |= FLAGS_SG;
472 471
473 ctx->total -= length; 472 ctx->total -= length;
474 ctx->offset = length; /* offset where to start slow */ 473 ctx->offset = length; /* offset where to start slow */
475 474
476 final = (ctx->flags & FLAGS_FINUP) && !ctx->total; 475 final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
477 476
478 /* next call does not fail... so no unmap in the case of error */ 477 /* next call does not fail... so no unmap in the case of error */
479 return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final); 478 return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final);
480 } 479 }
481 480
482 static int omap_sham_update_cpu(struct omap_sham_dev *dd) 481 static int omap_sham_update_cpu(struct omap_sham_dev *dd)
483 { 482 {
484 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 483 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
485 int bufcnt; 484 int bufcnt;
486 485
487 omap_sham_append_sg(ctx); 486 omap_sham_append_sg(ctx);
488 bufcnt = ctx->bufcnt; 487 bufcnt = ctx->bufcnt;
489 ctx->bufcnt = 0; 488 ctx->bufcnt = 0;
490 489
491 return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1); 490 return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
492 } 491 }
493 492
494 static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) 493 static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
495 { 494 {
496 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 495 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
497 496
498 omap_stop_dma(dd->dma_lch); 497 omap_stop_dma(dd->dma_lch);
499 if (ctx->flags & FLAGS_SG) { 498 if (ctx->flags & FLAGS_SG) {
500 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); 499 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
501 if (ctx->sg->length == ctx->offset) { 500 if (ctx->sg->length == ctx->offset) {
502 ctx->sg = sg_next(ctx->sg); 501 ctx->sg = sg_next(ctx->sg);
503 if (ctx->sg) 502 if (ctx->sg)
504 ctx->offset = 0; 503 ctx->offset = 0;
505 } 504 }
506 } else { 505 } else {
507 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, 506 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
508 DMA_TO_DEVICE); 507 DMA_TO_DEVICE);
509 } 508 }
510 509
511 return 0; 510 return 0;
512 } 511 }
513 512
514 static void omap_sham_cleanup(struct ahash_request *req)
515 {
516 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
517 struct omap_sham_dev *dd = ctx->dd;
518 unsigned long flags;
519
520 spin_lock_irqsave(&dd->lock, flags);
521 if (ctx->flags & FLAGS_CLEAN) {
522 spin_unlock_irqrestore(&dd->lock, flags);
523 return;
524 }
525 ctx->flags |= FLAGS_CLEAN;
526 spin_unlock_irqrestore(&dd->lock, flags);
527
528 if (ctx->digcnt)
529 omap_sham_copy_ready_hash(req);
530
531 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
532 }
533
534 static int omap_sham_init(struct ahash_request *req) 513 static int omap_sham_init(struct ahash_request *req)
535 { 514 {
536 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 515 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
537 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); 516 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
538 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 517 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
539 struct omap_sham_dev *dd = NULL, *tmp; 518 struct omap_sham_dev *dd = NULL, *tmp;
540 519
541 spin_lock_bh(&sham.lock); 520 spin_lock_bh(&sham.lock);
542 if (!tctx->dd) { 521 if (!tctx->dd) {
543 list_for_each_entry(tmp, &sham.dev_list, list) { 522 list_for_each_entry(tmp, &sham.dev_list, list) {
544 dd = tmp; 523 dd = tmp;
545 break; 524 break;
546 } 525 }
547 tctx->dd = dd; 526 tctx->dd = dd;
548 } else { 527 } else {
549 dd = tctx->dd; 528 dd = tctx->dd;
550 } 529 }
551 spin_unlock_bh(&sham.lock); 530 spin_unlock_bh(&sham.lock);
552 531
553 ctx->dd = dd; 532 ctx->dd = dd;
554 533
555 ctx->flags = 0; 534 ctx->flags = 0;
556 535
557 dev_dbg(dd->dev, "init: digest size: %d\n", 536 dev_dbg(dd->dev, "init: digest size: %d\n",
558 crypto_ahash_digestsize(tfm)); 537 crypto_ahash_digestsize(tfm));
559 538
560 if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE) 539 if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
561 ctx->flags |= FLAGS_SHA1; 540 ctx->flags |= FLAGS_SHA1;
562 541
563 ctx->bufcnt = 0; 542 ctx->bufcnt = 0;
564 ctx->digcnt = 0; 543 ctx->digcnt = 0;
565 ctx->buflen = BUFLEN; 544 ctx->buflen = BUFLEN;
566 545
567 if (tctx->flags & FLAGS_HMAC) { 546 if (tctx->flags & FLAGS_HMAC) {
568 struct omap_sham_hmac_ctx *bctx = tctx->base; 547 struct omap_sham_hmac_ctx *bctx = tctx->base;
569 548
570 memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE); 549 memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
571 ctx->bufcnt = SHA1_MD5_BLOCK_SIZE; 550 ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
572 ctx->flags |= FLAGS_HMAC; 551 ctx->flags |= FLAGS_HMAC;
573 } 552 }
574 553
575 return 0; 554 return 0;
576 555
577 } 556 }
578 557
579 static int omap_sham_update_req(struct omap_sham_dev *dd) 558 static int omap_sham_update_req(struct omap_sham_dev *dd)
580 { 559 {
581 struct ahash_request *req = dd->req; 560 struct ahash_request *req = dd->req;
582 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 561 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
583 int err; 562 int err;
584 563
585 dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n", 564 dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
586 ctx->total, ctx->digcnt, (ctx->flags & FLAGS_FINUP) != 0); 565 ctx->total, ctx->digcnt, (ctx->flags & FLAGS_FINUP) != 0);
587 566
588 if (ctx->flags & FLAGS_CPU) 567 if (ctx->flags & FLAGS_CPU)
589 err = omap_sham_update_cpu(dd); 568 err = omap_sham_update_cpu(dd);
590 else 569 else
591 err = omap_sham_update_dma_start(dd); 570 err = omap_sham_update_dma_start(dd);
592 571
593 /* wait for dma completion before can take more data */ 572 /* wait for dma completion before can take more data */
594 dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt); 573 dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
595 574
596 return err; 575 return err;
597 } 576 }
598 577
599 static int omap_sham_final_req(struct omap_sham_dev *dd) 578 static int omap_sham_final_req(struct omap_sham_dev *dd)
600 { 579 {
601 struct ahash_request *req = dd->req; 580 struct ahash_request *req = dd->req;
602 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 581 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
603 int err = 0, use_dma = 1; 582 int err = 0, use_dma = 1;
604 583
605 if (ctx->bufcnt <= 64) 584 if (ctx->bufcnt <= 64)
606 /* faster to handle last block with cpu */ 585 /* faster to handle last block with cpu */
607 use_dma = 0; 586 use_dma = 0;
608 587
609 if (use_dma) 588 if (use_dma)
610 err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1); 589 err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1);
611 else 590 else
612 err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1); 591 err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
613 592
614 ctx->bufcnt = 0; 593 ctx->bufcnt = 0;
615 594
616 dev_dbg(dd->dev, "final_req: err: %d\n", err); 595 dev_dbg(dd->dev, "final_req: err: %d\n", err);
617 596
618 return err; 597 return err;
619 } 598 }
620 599
621 static int omap_sham_finish_req_hmac(struct ahash_request *req) 600 static int omap_sham_finish_hmac(struct ahash_request *req)
622 { 601 {
623 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
624 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 602 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
625 struct omap_sham_hmac_ctx *bctx = tctx->base; 603 struct omap_sham_hmac_ctx *bctx = tctx->base;
626 int bs = crypto_shash_blocksize(bctx->shash); 604 int bs = crypto_shash_blocksize(bctx->shash);
627 int ds = crypto_shash_digestsize(bctx->shash); 605 int ds = crypto_shash_digestsize(bctx->shash);
628 struct { 606 struct {
629 struct shash_desc shash; 607 struct shash_desc shash;
630 char ctx[crypto_shash_descsize(bctx->shash)]; 608 char ctx[crypto_shash_descsize(bctx->shash)];
631 } desc; 609 } desc;
632 610
633 desc.shash.tfm = bctx->shash; 611 desc.shash.tfm = bctx->shash;
634 desc.shash.flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */ 612 desc.shash.flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
635 613
636 return crypto_shash_init(&desc.shash) ?: 614 return crypto_shash_init(&desc.shash) ?:
637 crypto_shash_update(&desc.shash, bctx->opad, bs) ?: 615 crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
638 crypto_shash_finup(&desc.shash, ctx->digest, ds, ctx->digest); 616 crypto_shash_finup(&desc.shash, req->result, ds, req->result);
639 } 617 }
640 618
619 static int omap_sham_finish(struct ahash_request *req)
620 {
621 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
622 struct omap_sham_dev *dd = ctx->dd;
623 int err = 0;
624
625 if (ctx->digcnt) {
626 omap_sham_copy_ready_hash(req);
627 if (ctx->flags & FLAGS_HMAC)
628 err = omap_sham_finish_hmac(req);
629 }
630
631 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
632
633 return err;
634 }
635
641 static void omap_sham_finish_req(struct ahash_request *req, int err) 636 static void omap_sham_finish_req(struct ahash_request *req, int err)
642 { 637 {
643 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 638 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
644 struct omap_sham_dev *dd = ctx->dd; 639 struct omap_sham_dev *dd = ctx->dd;
645 640
646 if (!err) { 641 if (!err) {
647 omap_sham_copy_hash(ctx->dd->req, 1); 642 omap_sham_copy_hash(ctx->dd->req, 1);
648 if (ctx->flags & FLAGS_HMAC) 643 if (ctx->flags & FLAGS_FINAL)
649 err = omap_sham_finish_req_hmac(req); 644 err = omap_sham_finish(req);
650 } else { 645 } else {
651 ctx->flags |= FLAGS_ERROR; 646 ctx->flags |= FLAGS_ERROR;
652 } 647 }
653 648
654 if ((ctx->flags & FLAGS_FINAL) || err)
655 omap_sham_cleanup(req);
656
657 clk_disable(dd->iclk); 649 clk_disable(dd->iclk);
658 dd->flags &= ~FLAGS_BUSY; 650 dd->flags &= ~FLAGS_BUSY;
659 651
660 if (req->base.complete) 652 if (req->base.complete)
661 req->base.complete(&req->base, err); 653 req->base.complete(&req->base, err);
662 } 654 }
663 655
664 static int omap_sham_handle_queue(struct omap_sham_dev *dd, 656 static int omap_sham_handle_queue(struct omap_sham_dev *dd,
665 struct ahash_request *req) 657 struct ahash_request *req)
666 { 658 {
667 struct crypto_async_request *async_req, *backlog; 659 struct crypto_async_request *async_req, *backlog;
668 struct omap_sham_reqctx *ctx; 660 struct omap_sham_reqctx *ctx;
669 struct ahash_request *prev_req; 661 struct ahash_request *prev_req;
670 unsigned long flags; 662 unsigned long flags;
671 int err = 0, ret = 0; 663 int err = 0, ret = 0;
672 664
673 spin_lock_irqsave(&dd->lock, flags); 665 spin_lock_irqsave(&dd->lock, flags);
674 if (req) 666 if (req)
675 ret = ahash_enqueue_request(&dd->queue, req); 667 ret = ahash_enqueue_request(&dd->queue, req);
676 if (dd->flags & FLAGS_BUSY) { 668 if (dd->flags & FLAGS_BUSY) {
677 spin_unlock_irqrestore(&dd->lock, flags); 669 spin_unlock_irqrestore(&dd->lock, flags);
678 return ret; 670 return ret;
679 } 671 }
680 backlog = crypto_get_backlog(&dd->queue); 672 backlog = crypto_get_backlog(&dd->queue);
681 async_req = crypto_dequeue_request(&dd->queue); 673 async_req = crypto_dequeue_request(&dd->queue);
682 if (async_req) 674 if (async_req)
683 dd->flags |= FLAGS_BUSY; 675 dd->flags |= FLAGS_BUSY;
684 spin_unlock_irqrestore(&dd->lock, flags); 676 spin_unlock_irqrestore(&dd->lock, flags);
685 677
686 if (!async_req) 678 if (!async_req)
687 return ret; 679 return ret;
688 680
689 if (backlog) 681 if (backlog)
690 backlog->complete(backlog, -EINPROGRESS); 682 backlog->complete(backlog, -EINPROGRESS);
691 683
692 req = ahash_request_cast(async_req); 684 req = ahash_request_cast(async_req);
693 685
694 prev_req = dd->req; 686 prev_req = dd->req;
695 dd->req = req; 687 dd->req = req;
696 688
697 ctx = ahash_request_ctx(req); 689 ctx = ahash_request_ctx(req);
698 690
699 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", 691 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
700 ctx->op, req->nbytes); 692 ctx->op, req->nbytes);
701 693
702 694
703 err = omap_sham_hw_init(dd); 695 err = omap_sham_hw_init(dd);
704 if (err) 696 if (err)
705 goto err1; 697 goto err1;
706 698
707 omap_set_dma_dest_params(dd->dma_lch, 0, 699 omap_set_dma_dest_params(dd->dma_lch, 0,
708 OMAP_DMA_AMODE_CONSTANT, 700 OMAP_DMA_AMODE_CONSTANT,
709 dd->phys_base + SHA_REG_DIN(0), 0, 16); 701 dd->phys_base + SHA_REG_DIN(0), 0, 16);
710 702
711 omap_set_dma_dest_burst_mode(dd->dma_lch, 703 omap_set_dma_dest_burst_mode(dd->dma_lch,
712 OMAP_DMA_DATA_BURST_16); 704 OMAP_DMA_DATA_BURST_16);
713 705
714 omap_set_dma_src_burst_mode(dd->dma_lch, 706 omap_set_dma_src_burst_mode(dd->dma_lch,
715 OMAP_DMA_DATA_BURST_4); 707 OMAP_DMA_DATA_BURST_4);
716 708
717 if (ctx->digcnt) 709 if (ctx->digcnt)
718 /* request has changed - restore hash */ 710 /* request has changed - restore hash */
719 omap_sham_copy_hash(req, 0); 711 omap_sham_copy_hash(req, 0);
720 712
721 if (ctx->op == OP_UPDATE) { 713 if (ctx->op == OP_UPDATE) {
722 err = omap_sham_update_req(dd); 714 err = omap_sham_update_req(dd);
723 if (err != -EINPROGRESS && (ctx->flags & FLAGS_FINUP)) 715 if (err != -EINPROGRESS && (ctx->flags & FLAGS_FINUP))
724 /* no final() after finup() */ 716 /* no final() after finup() */
725 err = omap_sham_final_req(dd); 717 err = omap_sham_final_req(dd);
726 } else if (ctx->op == OP_FINAL) { 718 } else if (ctx->op == OP_FINAL) {
727 err = omap_sham_final_req(dd); 719 err = omap_sham_final_req(dd);
728 } 720 }
729 err1: 721 err1:
730 if (err != -EINPROGRESS) { 722 if (err != -EINPROGRESS) {
731 /* done_task will not finish it, so do it here */ 723 /* done_task will not finish it, so do it here */
732 omap_sham_finish_req(req, err); 724 omap_sham_finish_req(req, err);
733 tasklet_schedule(&dd->queue_task); 725 tasklet_schedule(&dd->queue_task);
734 } 726 }
735 727
736 dev_dbg(dd->dev, "exit, err: %d\n", err); 728 dev_dbg(dd->dev, "exit, err: %d\n", err);
737 729
738 return ret; 730 return ret;
739 } 731 }
740 732
741 static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) 733 static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
742 { 734 {
743 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 735 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
744 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 736 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
745 struct omap_sham_dev *dd = tctx->dd; 737 struct omap_sham_dev *dd = tctx->dd;
746 738
747 ctx->op = op; 739 ctx->op = op;
748 740
749 return omap_sham_handle_queue(dd, req); 741 return omap_sham_handle_queue(dd, req);
750 } 742 }
751 743
752 static int omap_sham_update(struct ahash_request *req) 744 static int omap_sham_update(struct ahash_request *req)
753 { 745 {
754 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 746 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
755 747
756 if (!req->nbytes) 748 if (!req->nbytes)
757 return 0; 749 return 0;
758 750
759 ctx->total = req->nbytes; 751 ctx->total = req->nbytes;
760 ctx->sg = req->src; 752 ctx->sg = req->src;
761 ctx->offset = 0; 753 ctx->offset = 0;
762 754
763 if (ctx->flags & FLAGS_FINUP) { 755 if (ctx->flags & FLAGS_FINUP) {
764 if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) { 756 if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) {
765 /* 757 /*
766 * OMAP HW accel works only with buffers >= 9 758 * OMAP HW accel works only with buffers >= 9
767 * will switch to bypass in final() 759 * will switch to bypass in final()
768 * final has the same request and data 760 * final has the same request and data
769 */ 761 */
770 omap_sham_append_sg(ctx); 762 omap_sham_append_sg(ctx);
771 return 0; 763 return 0;
772 } else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) { 764 } else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) {
773 /* 765 /*
774 * faster to use CPU for short transfers 766 * faster to use CPU for short transfers
775 */ 767 */
776 ctx->flags |= FLAGS_CPU; 768 ctx->flags |= FLAGS_CPU;
777 } 769 }
778 } else if (ctx->bufcnt + ctx->total < ctx->buflen) { 770 } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
779 omap_sham_append_sg(ctx); 771 omap_sham_append_sg(ctx);
780 return 0; 772 return 0;
781 } 773 }
782 774
783 return omap_sham_enqueue(req, OP_UPDATE); 775 return omap_sham_enqueue(req, OP_UPDATE);
784 } 776 }
785 777
786 static int omap_sham_shash_digest(struct crypto_shash *shash, u32 flags, 778 static int omap_sham_shash_digest(struct crypto_shash *shash, u32 flags,
787 const u8 *data, unsigned int len, u8 *out) 779 const u8 *data, unsigned int len, u8 *out)
788 { 780 {
789 struct { 781 struct {
790 struct shash_desc shash; 782 struct shash_desc shash;
791 char ctx[crypto_shash_descsize(shash)]; 783 char ctx[crypto_shash_descsize(shash)];
792 } desc; 784 } desc;
793 785
794 desc.shash.tfm = shash; 786 desc.shash.tfm = shash;
795 desc.shash.flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP; 787 desc.shash.flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
796 788
797 return crypto_shash_digest(&desc.shash, data, len, out); 789 return crypto_shash_digest(&desc.shash, data, len, out);
798 } 790 }
799 791
800 static int omap_sham_final_shash(struct ahash_request *req) 792 static int omap_sham_final_shash(struct ahash_request *req)
801 { 793 {
802 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 794 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
803 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 795 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
804 796
805 return omap_sham_shash_digest(tctx->fallback, req->base.flags, 797 return omap_sham_shash_digest(tctx->fallback, req->base.flags,
806 ctx->buffer, ctx->bufcnt, req->result); 798 ctx->buffer, ctx->bufcnt, req->result);
807 } 799 }
808 800
809 static int omap_sham_final(struct ahash_request *req) 801 static int omap_sham_final(struct ahash_request *req)
810 { 802 {
811 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 803 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
812 int err = 0;
813 804
814 ctx->flags |= FLAGS_FINUP; 805 ctx->flags |= FLAGS_FINUP;
815 806
816 if (!(ctx->flags & FLAGS_ERROR)) { 807 if (ctx->flags & FLAGS_ERROR)
817 /* OMAP HW accel works only with buffers >= 9 */ 808 return 0; /* uncompleted hash is not needed */
818 /* HMAC is always >= 9 because of ipad */
819 if ((ctx->digcnt + ctx->bufcnt) < 9)
820 err = omap_sham_final_shash(req);
821 else if (ctx->bufcnt)
822 return omap_sham_enqueue(req, OP_FINAL);
823 }
824 809
825 omap_sham_cleanup(req); 810 /* OMAP HW accel works only with buffers >= 9 */
811 /* HMAC is always >= 9 because ipad == block size */
812 if ((ctx->digcnt + ctx->bufcnt) < 9)
813 return omap_sham_final_shash(req);
814 else if (ctx->bufcnt)
815 return omap_sham_enqueue(req, OP_FINAL);
826 816
827 return err; 817 /* copy ready hash (+ finalize hmac) */
818 return omap_sham_finish(req);
828 } 819 }
829 820
830 static int omap_sham_finup(struct ahash_request *req) 821 static int omap_sham_finup(struct ahash_request *req)
831 { 822 {
832 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 823 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
833 int err1, err2; 824 int err1, err2;
834 825
835 ctx->flags |= FLAGS_FINUP; 826 ctx->flags |= FLAGS_FINUP;
836 827
837 err1 = omap_sham_update(req); 828 err1 = omap_sham_update(req);
838 if (err1 == -EINPROGRESS || err1 == -EBUSY) 829 if (err1 == -EINPROGRESS || err1 == -EBUSY)
839 return err1; 830 return err1;
840 /* 831 /*
841 * final() has to be always called to cleanup resources 832 * final() has to be always called to cleanup resources
842 * even if udpate() failed, except EINPROGRESS 833 * even if udpate() failed, except EINPROGRESS
843 */ 834 */
844 err2 = omap_sham_final(req); 835 err2 = omap_sham_final(req);
845 836
846 return err1 ?: err2; 837 return err1 ?: err2;
847 } 838 }
848 839
849 static int omap_sham_digest(struct ahash_request *req) 840 static int omap_sham_digest(struct ahash_request *req)
850 { 841 {
851 return omap_sham_init(req) ?: omap_sham_finup(req); 842 return omap_sham_init(req) ?: omap_sham_finup(req);
852 } 843 }
853 844
854 static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key, 845 static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
855 unsigned int keylen) 846 unsigned int keylen)
856 { 847 {
857 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); 848 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
858 struct omap_sham_hmac_ctx *bctx = tctx->base; 849 struct omap_sham_hmac_ctx *bctx = tctx->base;
859 int bs = crypto_shash_blocksize(bctx->shash); 850 int bs = crypto_shash_blocksize(bctx->shash);
860 int ds = crypto_shash_digestsize(bctx->shash); 851 int ds = crypto_shash_digestsize(bctx->shash);
861 int err, i; 852 int err, i;
862 err = crypto_shash_setkey(tctx->fallback, key, keylen); 853 err = crypto_shash_setkey(tctx->fallback, key, keylen);
863 if (err) 854 if (err)
864 return err; 855 return err;
865 856
866 if (keylen > bs) { 857 if (keylen > bs) {
867 err = omap_sham_shash_digest(bctx->shash, 858 err = omap_sham_shash_digest(bctx->shash,
868 crypto_shash_get_flags(bctx->shash), 859 crypto_shash_get_flags(bctx->shash),
869 key, keylen, bctx->ipad); 860 key, keylen, bctx->ipad);
870 if (err) 861 if (err)
871 return err; 862 return err;
872 keylen = ds; 863 keylen = ds;
873 } else { 864 } else {
874 memcpy(bctx->ipad, key, keylen); 865 memcpy(bctx->ipad, key, keylen);
875 } 866 }
876 867
877 memset(bctx->ipad + keylen, 0, bs - keylen); 868 memset(bctx->ipad + keylen, 0, bs - keylen);
878 memcpy(bctx->opad, bctx->ipad, bs); 869 memcpy(bctx->opad, bctx->ipad, bs);
879 870
880 for (i = 0; i < bs; i++) { 871 for (i = 0; i < bs; i++) {
881 bctx->ipad[i] ^= 0x36; 872 bctx->ipad[i] ^= 0x36;
882 bctx->opad[i] ^= 0x5c; 873 bctx->opad[i] ^= 0x5c;
883 } 874 }
884 875
885 return err; 876 return err;
886 } 877 }
887 878
888 static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) 879 static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
889 { 880 {
890 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); 881 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
891 const char *alg_name = crypto_tfm_alg_name(tfm); 882 const char *alg_name = crypto_tfm_alg_name(tfm);
892 883
893 /* Allocate a fallback and abort if it failed. */ 884 /* Allocate a fallback and abort if it failed. */
894 tctx->fallback = crypto_alloc_shash(alg_name, 0, 885 tctx->fallback = crypto_alloc_shash(alg_name, 0,
895 CRYPTO_ALG_NEED_FALLBACK); 886 CRYPTO_ALG_NEED_FALLBACK);
896 if (IS_ERR(tctx->fallback)) { 887 if (IS_ERR(tctx->fallback)) {
897 pr_err("omap-sham: fallback driver '%s' " 888 pr_err("omap-sham: fallback driver '%s' "
898 "could not be loaded.\n", alg_name); 889 "could not be loaded.\n", alg_name);
899 return PTR_ERR(tctx->fallback); 890 return PTR_ERR(tctx->fallback);
900 } 891 }
901 892
902 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 893 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
903 sizeof(struct omap_sham_reqctx) + BUFLEN); 894 sizeof(struct omap_sham_reqctx) + BUFLEN);
904 895
905 if (alg_base) { 896 if (alg_base) {
906 struct omap_sham_hmac_ctx *bctx = tctx->base; 897 struct omap_sham_hmac_ctx *bctx = tctx->base;
907 tctx->flags |= FLAGS_HMAC; 898 tctx->flags |= FLAGS_HMAC;
908 bctx->shash = crypto_alloc_shash(alg_base, 0, 899 bctx->shash = crypto_alloc_shash(alg_base, 0,
909 CRYPTO_ALG_NEED_FALLBACK); 900 CRYPTO_ALG_NEED_FALLBACK);
910 if (IS_ERR(bctx->shash)) { 901 if (IS_ERR(bctx->shash)) {
911 pr_err("omap-sham: base driver '%s' " 902 pr_err("omap-sham: base driver '%s' "
912 "could not be loaded.\n", alg_base); 903 "could not be loaded.\n", alg_base);
913 crypto_free_shash(tctx->fallback); 904 crypto_free_shash(tctx->fallback);
914 return PTR_ERR(bctx->shash); 905 return PTR_ERR(bctx->shash);
915 } 906 }
916 907
917 } 908 }
918 909
919 return 0; 910 return 0;
920 } 911 }
921 912
922 static int omap_sham_cra_init(struct crypto_tfm *tfm) 913 static int omap_sham_cra_init(struct crypto_tfm *tfm)
923 { 914 {
924 return omap_sham_cra_init_alg(tfm, NULL); 915 return omap_sham_cra_init_alg(tfm, NULL);
925 } 916 }
926 917
927 static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm) 918 static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
928 { 919 {
929 return omap_sham_cra_init_alg(tfm, "sha1"); 920 return omap_sham_cra_init_alg(tfm, "sha1");
930 } 921 }
931 922
932 static int omap_sham_cra_md5_init(struct crypto_tfm *tfm) 923 static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
933 { 924 {
934 return omap_sham_cra_init_alg(tfm, "md5"); 925 return omap_sham_cra_init_alg(tfm, "md5");
935 } 926 }
936 927
937 static void omap_sham_cra_exit(struct crypto_tfm *tfm) 928 static void omap_sham_cra_exit(struct crypto_tfm *tfm)
938 { 929 {
939 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); 930 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
940 931
941 crypto_free_shash(tctx->fallback); 932 crypto_free_shash(tctx->fallback);
942 tctx->fallback = NULL; 933 tctx->fallback = NULL;
943 934
944 if (tctx->flags & FLAGS_HMAC) { 935 if (tctx->flags & FLAGS_HMAC) {
945 struct omap_sham_hmac_ctx *bctx = tctx->base; 936 struct omap_sham_hmac_ctx *bctx = tctx->base;
946 crypto_free_shash(bctx->shash); 937 crypto_free_shash(bctx->shash);
947 } 938 }
948 } 939 }
949 940
950 static struct ahash_alg algs[] = { 941 static struct ahash_alg algs[] = {
951 { 942 {
952 .init = omap_sham_init, 943 .init = omap_sham_init,
953 .update = omap_sham_update, 944 .update = omap_sham_update,
954 .final = omap_sham_final, 945 .final = omap_sham_final,
955 .finup = omap_sham_finup, 946 .finup = omap_sham_finup,
956 .digest = omap_sham_digest, 947 .digest = omap_sham_digest,
957 .halg.digestsize = SHA1_DIGEST_SIZE, 948 .halg.digestsize = SHA1_DIGEST_SIZE,
958 .halg.base = { 949 .halg.base = {
959 .cra_name = "sha1", 950 .cra_name = "sha1",
960 .cra_driver_name = "omap-sha1", 951 .cra_driver_name = "omap-sha1",
961 .cra_priority = 100, 952 .cra_priority = 100,
962 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 953 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
963 CRYPTO_ALG_ASYNC | 954 CRYPTO_ALG_ASYNC |
964 CRYPTO_ALG_NEED_FALLBACK, 955 CRYPTO_ALG_NEED_FALLBACK,
965 .cra_blocksize = SHA1_BLOCK_SIZE, 956 .cra_blocksize = SHA1_BLOCK_SIZE,
966 .cra_ctxsize = sizeof(struct omap_sham_ctx), 957 .cra_ctxsize = sizeof(struct omap_sham_ctx),
967 .cra_alignmask = 0, 958 .cra_alignmask = 0,
968 .cra_module = THIS_MODULE, 959 .cra_module = THIS_MODULE,
969 .cra_init = omap_sham_cra_init, 960 .cra_init = omap_sham_cra_init,
970 .cra_exit = omap_sham_cra_exit, 961 .cra_exit = omap_sham_cra_exit,
971 } 962 }
972 }, 963 },
973 { 964 {
974 .init = omap_sham_init, 965 .init = omap_sham_init,
975 .update = omap_sham_update, 966 .update = omap_sham_update,
976 .final = omap_sham_final, 967 .final = omap_sham_final,
977 .finup = omap_sham_finup, 968 .finup = omap_sham_finup,
978 .digest = omap_sham_digest, 969 .digest = omap_sham_digest,
979 .halg.digestsize = MD5_DIGEST_SIZE, 970 .halg.digestsize = MD5_DIGEST_SIZE,
980 .halg.base = { 971 .halg.base = {
981 .cra_name = "md5", 972 .cra_name = "md5",
982 .cra_driver_name = "omap-md5", 973 .cra_driver_name = "omap-md5",
983 .cra_priority = 100, 974 .cra_priority = 100,
984 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 975 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
985 CRYPTO_ALG_ASYNC | 976 CRYPTO_ALG_ASYNC |
986 CRYPTO_ALG_NEED_FALLBACK, 977 CRYPTO_ALG_NEED_FALLBACK,
987 .cra_blocksize = SHA1_BLOCK_SIZE, 978 .cra_blocksize = SHA1_BLOCK_SIZE,
988 .cra_ctxsize = sizeof(struct omap_sham_ctx), 979 .cra_ctxsize = sizeof(struct omap_sham_ctx),
989 .cra_alignmask = OMAP_ALIGN_MASK, 980 .cra_alignmask = OMAP_ALIGN_MASK,
990 .cra_module = THIS_MODULE, 981 .cra_module = THIS_MODULE,
991 .cra_init = omap_sham_cra_init, 982 .cra_init = omap_sham_cra_init,
992 .cra_exit = omap_sham_cra_exit, 983 .cra_exit = omap_sham_cra_exit,
993 } 984 }
994 }, 985 },
995 { 986 {
996 .init = omap_sham_init, 987 .init = omap_sham_init,
997 .update = omap_sham_update, 988 .update = omap_sham_update,
998 .final = omap_sham_final, 989 .final = omap_sham_final,
999 .finup = omap_sham_finup, 990 .finup = omap_sham_finup,
1000 .digest = omap_sham_digest, 991 .digest = omap_sham_digest,
1001 .setkey = omap_sham_setkey, 992 .setkey = omap_sham_setkey,
1002 .halg.digestsize = SHA1_DIGEST_SIZE, 993 .halg.digestsize = SHA1_DIGEST_SIZE,
1003 .halg.base = { 994 .halg.base = {
1004 .cra_name = "hmac(sha1)", 995 .cra_name = "hmac(sha1)",
1005 .cra_driver_name = "omap-hmac-sha1", 996 .cra_driver_name = "omap-hmac-sha1",
1006 .cra_priority = 100, 997 .cra_priority = 100,
1007 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 998 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1008 CRYPTO_ALG_ASYNC | 999 CRYPTO_ALG_ASYNC |
1009 CRYPTO_ALG_NEED_FALLBACK, 1000 CRYPTO_ALG_NEED_FALLBACK,
1010 .cra_blocksize = SHA1_BLOCK_SIZE, 1001 .cra_blocksize = SHA1_BLOCK_SIZE,
1011 .cra_ctxsize = sizeof(struct omap_sham_ctx) + 1002 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1012 sizeof(struct omap_sham_hmac_ctx), 1003 sizeof(struct omap_sham_hmac_ctx),
1013 .cra_alignmask = OMAP_ALIGN_MASK, 1004 .cra_alignmask = OMAP_ALIGN_MASK,
1014 .cra_module = THIS_MODULE, 1005 .cra_module = THIS_MODULE,
1015 .cra_init = omap_sham_cra_sha1_init, 1006 .cra_init = omap_sham_cra_sha1_init,
1016 .cra_exit = omap_sham_cra_exit, 1007 .cra_exit = omap_sham_cra_exit,
1017 } 1008 }
1018 }, 1009 },
1019 { 1010 {
1020 .init = omap_sham_init, 1011 .init = omap_sham_init,
1021 .update = omap_sham_update, 1012 .update = omap_sham_update,
1022 .final = omap_sham_final, 1013 .final = omap_sham_final,
1023 .finup = omap_sham_finup, 1014 .finup = omap_sham_finup,
1024 .digest = omap_sham_digest, 1015 .digest = omap_sham_digest,
1025 .setkey = omap_sham_setkey, 1016 .setkey = omap_sham_setkey,
1026 .halg.digestsize = MD5_DIGEST_SIZE, 1017 .halg.digestsize = MD5_DIGEST_SIZE,
1027 .halg.base = { 1018 .halg.base = {
1028 .cra_name = "hmac(md5)", 1019 .cra_name = "hmac(md5)",
1029 .cra_driver_name = "omap-hmac-md5", 1020 .cra_driver_name = "omap-hmac-md5",
1030 .cra_priority = 100, 1021 .cra_priority = 100,
1031 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1022 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1032 CRYPTO_ALG_ASYNC | 1023 CRYPTO_ALG_ASYNC |
1033 CRYPTO_ALG_NEED_FALLBACK, 1024 CRYPTO_ALG_NEED_FALLBACK,
1034 .cra_blocksize = SHA1_BLOCK_SIZE, 1025 .cra_blocksize = SHA1_BLOCK_SIZE,
1035 .cra_ctxsize = sizeof(struct omap_sham_ctx) + 1026 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1036 sizeof(struct omap_sham_hmac_ctx), 1027 sizeof(struct omap_sham_hmac_ctx),
1037 .cra_alignmask = OMAP_ALIGN_MASK, 1028 .cra_alignmask = OMAP_ALIGN_MASK,
1038 .cra_module = THIS_MODULE, 1029 .cra_module = THIS_MODULE,
1039 .cra_init = omap_sham_cra_md5_init, 1030 .cra_init = omap_sham_cra_md5_init,
1040 .cra_exit = omap_sham_cra_exit, 1031 .cra_exit = omap_sham_cra_exit,
1041 } 1032 }
1042 } 1033 }
1043 }; 1034 };
1044 1035
1045 static void omap_sham_done_task(unsigned long data) 1036 static void omap_sham_done_task(unsigned long data)
1046 { 1037 {
1047 struct omap_sham_dev *dd = (struct omap_sham_dev *)data; 1038 struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1048 struct ahash_request *req = dd->req; 1039 struct ahash_request *req = dd->req;
1049 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 1040 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1050 int ready = 0, err = 0; 1041 int ready = 0, err = 0;
1051 1042
1052 if (ctx->flags & FLAGS_OUTPUT_READY) { 1043 if (ctx->flags & FLAGS_OUTPUT_READY) {
1053 ctx->flags &= ~FLAGS_OUTPUT_READY; 1044 ctx->flags &= ~FLAGS_OUTPUT_READY;
1054 ready = 1; 1045 ready = 1;
1055 } 1046 }
1056 1047
1057 if (dd->flags & FLAGS_DMA_ACTIVE) { 1048 if (dd->flags & FLAGS_DMA_ACTIVE) {
1058 dd->flags &= ~FLAGS_DMA_ACTIVE; 1049 dd->flags &= ~FLAGS_DMA_ACTIVE;
1059 omap_sham_update_dma_stop(dd); 1050 omap_sham_update_dma_stop(dd);
1060 if (!dd->err) 1051 if (!dd->err)
1061 err = omap_sham_update_dma_start(dd); 1052 err = omap_sham_update_dma_start(dd);
1062 } 1053 }
1063 1054
1064 err = dd->err ? : err; 1055 err = dd->err ? : err;
1065 1056
1066 if (err != -EINPROGRESS && (ready || err)) { 1057 if (err != -EINPROGRESS && (ready || err)) {
1067 dev_dbg(dd->dev, "update done: err: %d\n", err); 1058 dev_dbg(dd->dev, "update done: err: %d\n", err);
1068 /* finish curent request */ 1059 /* finish curent request */
1069 omap_sham_finish_req(req, err); 1060 omap_sham_finish_req(req, err);
1070 /* start new request */ 1061 /* start new request */
1071 omap_sham_handle_queue(dd, NULL); 1062 omap_sham_handle_queue(dd, NULL);
1072 } 1063 }
1073 } 1064 }
1074 1065
1075 static void omap_sham_queue_task(unsigned long data) 1066 static void omap_sham_queue_task(unsigned long data)
1076 { 1067 {
1077 struct omap_sham_dev *dd = (struct omap_sham_dev *)data; 1068 struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1078 1069
1079 omap_sham_handle_queue(dd, NULL); 1070 omap_sham_handle_queue(dd, NULL);
1080 } 1071 }
1081 1072
1082 static irqreturn_t omap_sham_irq(int irq, void *dev_id) 1073 static irqreturn_t omap_sham_irq(int irq, void *dev_id)
1083 { 1074 {
1084 struct omap_sham_dev *dd = dev_id; 1075 struct omap_sham_dev *dd = dev_id;
1085 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 1076 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
1086 1077
1087 if (!ctx) { 1078 if (!ctx) {
1088 dev_err(dd->dev, "unknown interrupt.\n"); 1079 dev_err(dd->dev, "unknown interrupt.\n");
1089 return IRQ_HANDLED; 1080 return IRQ_HANDLED;
1090 } 1081 }
1091 1082
1092 if (unlikely(ctx->flags & FLAGS_FINAL)) 1083 if (unlikely(ctx->flags & FLAGS_FINAL))
1093 /* final -> allow device to go to power-saving mode */ 1084 /* final -> allow device to go to power-saving mode */
1094 omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH); 1085 omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1095 1086
1096 omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY, 1087 omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
1097 SHA_REG_CTRL_OUTPUT_READY); 1088 SHA_REG_CTRL_OUTPUT_READY);
1098 omap_sham_read(dd, SHA_REG_CTRL); 1089 omap_sham_read(dd, SHA_REG_CTRL);
1099 1090
1100 ctx->flags |= FLAGS_OUTPUT_READY; 1091 ctx->flags |= FLAGS_OUTPUT_READY;
1101 dd->err = 0; 1092 dd->err = 0;
1102 tasklet_schedule(&dd->done_task); 1093 tasklet_schedule(&dd->done_task);
1103 1094
1104 return IRQ_HANDLED; 1095 return IRQ_HANDLED;
1105 } 1096 }
1106 1097
1107 static void omap_sham_dma_callback(int lch, u16 ch_status, void *data) 1098 static void omap_sham_dma_callback(int lch, u16 ch_status, void *data)
1108 { 1099 {
1109 struct omap_sham_dev *dd = data; 1100 struct omap_sham_dev *dd = data;
1110 1101
1111 if (ch_status != OMAP_DMA_BLOCK_IRQ) { 1102 if (ch_status != OMAP_DMA_BLOCK_IRQ) {
1112 pr_err("omap-sham DMA error status: 0x%hx\n", ch_status); 1103 pr_err("omap-sham DMA error status: 0x%hx\n", ch_status);
1113 dd->err = -EIO; 1104 dd->err = -EIO;
1114 dd->flags &= ~FLAGS_INIT; /* request to re-initialize */ 1105 dd->flags &= ~FLAGS_INIT; /* request to re-initialize */
1115 } 1106 }
1116 1107
1117 tasklet_schedule(&dd->done_task); 1108 tasklet_schedule(&dd->done_task);
1118 } 1109 }
1119 1110
1120 static int omap_sham_dma_init(struct omap_sham_dev *dd) 1111 static int omap_sham_dma_init(struct omap_sham_dev *dd)
1121 { 1112 {
1122 int err; 1113 int err;
1123 1114
1124 dd->dma_lch = -1; 1115 dd->dma_lch = -1;
1125 1116
1126 err = omap_request_dma(dd->dma, dev_name(dd->dev), 1117 err = omap_request_dma(dd->dma, dev_name(dd->dev),
1127 omap_sham_dma_callback, dd, &dd->dma_lch); 1118 omap_sham_dma_callback, dd, &dd->dma_lch);
1128 if (err) { 1119 if (err) {
1129 dev_err(dd->dev, "Unable to request DMA channel\n"); 1120 dev_err(dd->dev, "Unable to request DMA channel\n");
1130 return err; 1121 return err;
1131 } 1122 }
1132 1123
1133 return 0; 1124 return 0;
1134 } 1125 }
1135 1126
1136 static void omap_sham_dma_cleanup(struct omap_sham_dev *dd) 1127 static void omap_sham_dma_cleanup(struct omap_sham_dev *dd)
1137 { 1128 {
1138 if (dd->dma_lch >= 0) { 1129 if (dd->dma_lch >= 0) {
1139 omap_free_dma(dd->dma_lch); 1130 omap_free_dma(dd->dma_lch);
1140 dd->dma_lch = -1; 1131 dd->dma_lch = -1;
1141 } 1132 }
1142 } 1133 }
1143 1134
1144 static int __devinit omap_sham_probe(struct platform_device *pdev) 1135 static int __devinit omap_sham_probe(struct platform_device *pdev)
1145 { 1136 {
1146 struct omap_sham_dev *dd; 1137 struct omap_sham_dev *dd;
1147 struct device *dev = &pdev->dev; 1138 struct device *dev = &pdev->dev;
1148 struct resource *res; 1139 struct resource *res;
1149 int err, i, j; 1140 int err, i, j;
1150 1141
1151 dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL); 1142 dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL);
1152 if (dd == NULL) { 1143 if (dd == NULL) {
1153 dev_err(dev, "unable to alloc data struct.\n"); 1144 dev_err(dev, "unable to alloc data struct.\n");
1154 err = -ENOMEM; 1145 err = -ENOMEM;
1155 goto data_err; 1146 goto data_err;
1156 } 1147 }
1157 dd->dev = dev; 1148 dd->dev = dev;
1158 platform_set_drvdata(pdev, dd); 1149 platform_set_drvdata(pdev, dd);
1159 1150
1160 INIT_LIST_HEAD(&dd->list); 1151 INIT_LIST_HEAD(&dd->list);
1161 spin_lock_init(&dd->lock); 1152 spin_lock_init(&dd->lock);
1162 tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd); 1153 tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
1163 tasklet_init(&dd->queue_task, omap_sham_queue_task, (unsigned long)dd); 1154 tasklet_init(&dd->queue_task, omap_sham_queue_task, (unsigned long)dd);
1164 crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH); 1155 crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
1165 1156
1166 dd->irq = -1; 1157 dd->irq = -1;
1167 1158
1168 /* Get the base address */ 1159 /* Get the base address */
1169 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1160 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1170 if (!res) { 1161 if (!res) {
1171 dev_err(dev, "no MEM resource info\n"); 1162 dev_err(dev, "no MEM resource info\n");
1172 err = -ENODEV; 1163 err = -ENODEV;
1173 goto res_err; 1164 goto res_err;
1174 } 1165 }
1175 dd->phys_base = res->start; 1166 dd->phys_base = res->start;
1176 1167
1177 /* Get the DMA */ 1168 /* Get the DMA */
1178 res = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1169 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1179 if (!res) { 1170 if (!res) {
1180 dev_err(dev, "no DMA resource info\n"); 1171 dev_err(dev, "no DMA resource info\n");
1181 err = -ENODEV; 1172 err = -ENODEV;
1182 goto res_err; 1173 goto res_err;
1183 } 1174 }
1184 dd->dma = res->start; 1175 dd->dma = res->start;
1185 1176
1186 /* Get the IRQ */ 1177 /* Get the IRQ */
1187 dd->irq = platform_get_irq(pdev, 0); 1178 dd->irq = platform_get_irq(pdev, 0);
1188 if (dd->irq < 0) { 1179 if (dd->irq < 0) {
1189 dev_err(dev, "no IRQ resource info\n"); 1180 dev_err(dev, "no IRQ resource info\n");
1190 err = dd->irq; 1181 err = dd->irq;
1191 goto res_err; 1182 goto res_err;
1192 } 1183 }
1193 1184
1194 err = request_irq(dd->irq, omap_sham_irq, 1185 err = request_irq(dd->irq, omap_sham_irq,
1195 IRQF_TRIGGER_LOW, dev_name(dev), dd); 1186 IRQF_TRIGGER_LOW, dev_name(dev), dd);
1196 if (err) { 1187 if (err) {
1197 dev_err(dev, "unable to request irq.\n"); 1188 dev_err(dev, "unable to request irq.\n");
1198 goto res_err; 1189 goto res_err;
1199 } 1190 }
1200 1191
1201 err = omap_sham_dma_init(dd); 1192 err = omap_sham_dma_init(dd);
1202 if (err) 1193 if (err)
1203 goto dma_err; 1194 goto dma_err;
1204 1195
1205 /* Initializing the clock */ 1196 /* Initializing the clock */
1206 dd->iclk = clk_get(dev, "ick"); 1197 dd->iclk = clk_get(dev, "ick");
1207 if (IS_ERR(dd->iclk)) { 1198 if (IS_ERR(dd->iclk)) {
1208 dev_err(dev, "clock intialization failed.\n"); 1199 dev_err(dev, "clock intialization failed.\n");
1209 err = PTR_ERR(dd->iclk); 1200 err = PTR_ERR(dd->iclk);
1210 goto clk_err; 1201 goto clk_err;
1211 } 1202 }
1212 1203
1213 dd->io_base = ioremap(dd->phys_base, SZ_4K); 1204 dd->io_base = ioremap(dd->phys_base, SZ_4K);
1214 if (!dd->io_base) { 1205 if (!dd->io_base) {
1215 dev_err(dev, "can't ioremap\n"); 1206 dev_err(dev, "can't ioremap\n");
1216 err = -ENOMEM; 1207 err = -ENOMEM;
1217 goto io_err; 1208 goto io_err;
1218 } 1209 }
1219 1210
1220 clk_enable(dd->iclk); 1211 clk_enable(dd->iclk);
1221 dev_info(dev, "hw accel on OMAP rev %u.%u\n", 1212 dev_info(dev, "hw accel on OMAP rev %u.%u\n",
1222 (omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MAJOR) >> 4, 1213 (omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MAJOR) >> 4,
1223 omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MINOR); 1214 omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MINOR);
1224 clk_disable(dd->iclk); 1215 clk_disable(dd->iclk);
1225 1216
1226 spin_lock(&sham.lock); 1217 spin_lock(&sham.lock);
1227 list_add_tail(&dd->list, &sham.dev_list); 1218 list_add_tail(&dd->list, &sham.dev_list);
1228 spin_unlock(&sham.lock); 1219 spin_unlock(&sham.lock);
1229 1220
1230 for (i = 0; i < ARRAY_SIZE(algs); i++) { 1221 for (i = 0; i < ARRAY_SIZE(algs); i++) {
1231 err = crypto_register_ahash(&algs[i]); 1222 err = crypto_register_ahash(&algs[i]);
1232 if (err) 1223 if (err)
1233 goto err_algs; 1224 goto err_algs;
1234 } 1225 }
1235 1226
1236 return 0; 1227 return 0;
1237 1228
1238 err_algs: 1229 err_algs:
1239 for (j = 0; j < i; j++) 1230 for (j = 0; j < i; j++)
1240 crypto_unregister_ahash(&algs[j]); 1231 crypto_unregister_ahash(&algs[j]);
1241 iounmap(dd->io_base); 1232 iounmap(dd->io_base);
1242 io_err: 1233 io_err:
1243 clk_put(dd->iclk); 1234 clk_put(dd->iclk);
1244 clk_err: 1235 clk_err:
1245 omap_sham_dma_cleanup(dd); 1236 omap_sham_dma_cleanup(dd);
1246 dma_err: 1237 dma_err:
1247 if (dd->irq >= 0) 1238 if (dd->irq >= 0)
1248 free_irq(dd->irq, dd); 1239 free_irq(dd->irq, dd);
1249 res_err: 1240 res_err:
1250 kfree(dd); 1241 kfree(dd);
1251 dd = NULL; 1242 dd = NULL;
1252 data_err: 1243 data_err:
1253 dev_err(dev, "initialization failed.\n"); 1244 dev_err(dev, "initialization failed.\n");
1254 1245
1255 return err; 1246 return err;
1256 } 1247 }
1257 1248
1258 static int __devexit omap_sham_remove(struct platform_device *pdev) 1249 static int __devexit omap_sham_remove(struct platform_device *pdev)
1259 { 1250 {
1260 static struct omap_sham_dev *dd; 1251 static struct omap_sham_dev *dd;
1261 int i; 1252 int i;
1262 1253
1263 dd = platform_get_drvdata(pdev); 1254 dd = platform_get_drvdata(pdev);
1264 if (!dd) 1255 if (!dd)
1265 return -ENODEV; 1256 return -ENODEV;
1266 spin_lock(&sham.lock); 1257 spin_lock(&sham.lock);
1267 list_del(&dd->list); 1258 list_del(&dd->list);
1268 spin_unlock(&sham.lock); 1259 spin_unlock(&sham.lock);
1269 for (i = 0; i < ARRAY_SIZE(algs); i++) 1260 for (i = 0; i < ARRAY_SIZE(algs); i++)
1270 crypto_unregister_ahash(&algs[i]); 1261 crypto_unregister_ahash(&algs[i]);
1271 tasklet_kill(&dd->done_task); 1262 tasklet_kill(&dd->done_task);
1272 tasklet_kill(&dd->queue_task); 1263 tasklet_kill(&dd->queue_task);
1273 iounmap(dd->io_base); 1264 iounmap(dd->io_base);
1274 clk_put(dd->iclk); 1265 clk_put(dd->iclk);
1275 omap_sham_dma_cleanup(dd); 1266 omap_sham_dma_cleanup(dd);
1276 if (dd->irq >= 0) 1267 if (dd->irq >= 0)
1277 free_irq(dd->irq, dd); 1268 free_irq(dd->irq, dd);
1278 kfree(dd); 1269 kfree(dd);
1279 dd = NULL; 1270 dd = NULL;
1280 1271
1281 return 0; 1272 return 0;
1282 } 1273 }
1283 1274
1284 static struct platform_driver omap_sham_driver = { 1275 static struct platform_driver omap_sham_driver = {
1285 .probe = omap_sham_probe, 1276 .probe = omap_sham_probe,
1286 .remove = omap_sham_remove, 1277 .remove = omap_sham_remove,
1287 .driver = { 1278 .driver = {
1288 .name = "omap-sham", 1279 .name = "omap-sham",
1289 .owner = THIS_MODULE, 1280 .owner = THIS_MODULE,
1290 }, 1281 },
1291 }; 1282 };
1292 1283
1293 static int __init omap_sham_mod_init(void) 1284 static int __init omap_sham_mod_init(void)
1294 { 1285 {
1295 pr_info("loading %s driver\n", "omap-sham"); 1286 pr_info("loading %s driver\n", "omap-sham");