Commit 7759995c75ae0cbd4c861582908449f6b6208e7a
Committed by
Herbert Xu
1 parent
8652348754
Exists in
master
and in
7 other branches
crypto: mv_cesa - copy remaining bytes to SRAM only when needed
Signed-off-by: Phil Sutter <phil.sutter@viprinet.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Showing 1 changed file with 6 additions and 6 deletions Inline Diff
drivers/crypto/mv_cesa.c
1 | /* | 1 | /* |
2 | * Support for Marvell's crypto engine which can be found on some Orion5X | 2 | * Support for Marvell's crypto engine which can be found on some Orion5X |
3 | * boards. | 3 | * boards. |
4 | * | 4 | * |
5 | * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > | 5 | * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > |
6 | * License: GPLv2 | 6 | * License: GPLv2 |
7 | * | 7 | * |
8 | */ | 8 | */ |
9 | #include <crypto/aes.h> | 9 | #include <crypto/aes.h> |
10 | #include <crypto/algapi.h> | 10 | #include <crypto/algapi.h> |
11 | #include <linux/crypto.h> | 11 | #include <linux/crypto.h> |
12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
13 | #include <linux/io.h> | 13 | #include <linux/io.h> |
14 | #include <linux/kthread.h> | 14 | #include <linux/kthread.h> |
15 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
16 | #include <linux/scatterlist.h> | 16 | #include <linux/scatterlist.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <crypto/internal/hash.h> | 18 | #include <crypto/internal/hash.h> |
19 | #include <crypto/sha.h> | 19 | #include <crypto/sha.h> |
20 | 20 | ||
21 | #include "mv_cesa.h" | 21 | #include "mv_cesa.h" |
22 | 22 | ||
23 | #define MV_CESA "MV-CESA:" | 23 | #define MV_CESA "MV-CESA:" |
24 | #define MAX_HW_HASH_SIZE 0xFFFF | 24 | #define MAX_HW_HASH_SIZE 0xFFFF |
25 | 25 | ||
26 | /* | 26 | /* |
27 | * STM: | 27 | * STM: |
28 | * /---------------------------------------\ | 28 | * /---------------------------------------\ |
29 | * | | request complete | 29 | * | | request complete |
30 | * \./ | | 30 | * \./ | |
31 | * IDLE -> new request -> BUSY -> done -> DEQUEUE | 31 | * IDLE -> new request -> BUSY -> done -> DEQUEUE |
32 | * /°\ | | 32 | * /°\ | |
33 | * | | more scatter entries | 33 | * | | more scatter entries |
34 | * \________________/ | 34 | * \________________/ |
35 | */ | 35 | */ |
36 | enum engine_status { | 36 | enum engine_status { |
37 | ENGINE_IDLE, | 37 | ENGINE_IDLE, |
38 | ENGINE_BUSY, | 38 | ENGINE_BUSY, |
39 | ENGINE_W_DEQUEUE, | 39 | ENGINE_W_DEQUEUE, |
40 | }; | 40 | }; |
41 | 41 | ||
42 | /** | 42 | /** |
43 | * struct req_progress - used for every crypt request | 43 | * struct req_progress - used for every crypt request |
44 | * @src_sg_it: sg iterator for src | 44 | * @src_sg_it: sg iterator for src |
45 | * @dst_sg_it: sg iterator for dst | 45 | * @dst_sg_it: sg iterator for dst |
46 | * @sg_src_left: bytes left in src to process (scatter list) | 46 | * @sg_src_left: bytes left in src to process (scatter list) |
47 | * @src_start: offset to add to src start position (scatter list) | 47 | * @src_start: offset to add to src start position (scatter list) |
48 | * @crypt_len: length of current hw crypt/hash process | 48 | * @crypt_len: length of current hw crypt/hash process |
49 | * @hw_nbytes: total bytes to process in hw for this request | 49 | * @hw_nbytes: total bytes to process in hw for this request |
50 | * @copy_back: whether to copy data back (crypt) or not (hash) | 50 | * @copy_back: whether to copy data back (crypt) or not (hash) |
51 | * @sg_dst_left: bytes left dst to process in this scatter list | 51 | * @sg_dst_left: bytes left dst to process in this scatter list |
52 | * @dst_start: offset to add to dst start position (scatter list) | 52 | * @dst_start: offset to add to dst start position (scatter list) |
53 | * @hw_processed_bytes: number of bytes processed by hw (request). | 53 | * @hw_processed_bytes: number of bytes processed by hw (request). |
54 | * | 54 | * |
55 | * sg helper are used to iterate over the scatterlist. Since the size of the | 55 | * sg helper are used to iterate over the scatterlist. Since the size of the |
56 | * SRAM may be less than the scatter size, this struct struct is used to keep | 56 | * SRAM may be less than the scatter size, this struct struct is used to keep |
57 | * track of progress within current scatterlist. | 57 | * track of progress within current scatterlist. |
58 | */ | 58 | */ |
59 | struct req_progress { | 59 | struct req_progress { |
60 | struct sg_mapping_iter src_sg_it; | 60 | struct sg_mapping_iter src_sg_it; |
61 | struct sg_mapping_iter dst_sg_it; | 61 | struct sg_mapping_iter dst_sg_it; |
62 | void (*complete) (void); | 62 | void (*complete) (void); |
63 | void (*process) (int is_first); | 63 | void (*process) (int is_first); |
64 | 64 | ||
65 | /* src mostly */ | 65 | /* src mostly */ |
66 | int sg_src_left; | 66 | int sg_src_left; |
67 | int src_start; | 67 | int src_start; |
68 | int crypt_len; | 68 | int crypt_len; |
69 | int hw_nbytes; | 69 | int hw_nbytes; |
70 | /* dst mostly */ | 70 | /* dst mostly */ |
71 | int copy_back; | 71 | int copy_back; |
72 | int sg_dst_left; | 72 | int sg_dst_left; |
73 | int dst_start; | 73 | int dst_start; |
74 | int hw_processed_bytes; | 74 | int hw_processed_bytes; |
75 | }; | 75 | }; |
76 | 76 | ||
77 | struct crypto_priv { | 77 | struct crypto_priv { |
78 | void __iomem *reg; | 78 | void __iomem *reg; |
79 | void __iomem *sram; | 79 | void __iomem *sram; |
80 | int irq; | 80 | int irq; |
81 | struct task_struct *queue_th; | 81 | struct task_struct *queue_th; |
82 | 82 | ||
83 | /* the lock protects queue and eng_st */ | 83 | /* the lock protects queue and eng_st */ |
84 | spinlock_t lock; | 84 | spinlock_t lock; |
85 | struct crypto_queue queue; | 85 | struct crypto_queue queue; |
86 | enum engine_status eng_st; | 86 | enum engine_status eng_st; |
87 | struct crypto_async_request *cur_req; | 87 | struct crypto_async_request *cur_req; |
88 | struct req_progress p; | 88 | struct req_progress p; |
89 | int max_req_size; | 89 | int max_req_size; |
90 | int sram_size; | 90 | int sram_size; |
91 | int has_sha1; | 91 | int has_sha1; |
92 | int has_hmac_sha1; | 92 | int has_hmac_sha1; |
93 | }; | 93 | }; |
94 | 94 | ||
95 | static struct crypto_priv *cpg; | 95 | static struct crypto_priv *cpg; |
96 | 96 | ||
97 | struct mv_ctx { | 97 | struct mv_ctx { |
98 | u8 aes_enc_key[AES_KEY_LEN]; | 98 | u8 aes_enc_key[AES_KEY_LEN]; |
99 | u32 aes_dec_key[8]; | 99 | u32 aes_dec_key[8]; |
100 | int key_len; | 100 | int key_len; |
101 | u32 need_calc_aes_dkey; | 101 | u32 need_calc_aes_dkey; |
102 | }; | 102 | }; |
103 | 103 | ||
104 | enum crypto_op { | 104 | enum crypto_op { |
105 | COP_AES_ECB, | 105 | COP_AES_ECB, |
106 | COP_AES_CBC, | 106 | COP_AES_CBC, |
107 | }; | 107 | }; |
108 | 108 | ||
109 | struct mv_req_ctx { | 109 | struct mv_req_ctx { |
110 | enum crypto_op op; | 110 | enum crypto_op op; |
111 | int decrypt; | 111 | int decrypt; |
112 | }; | 112 | }; |
113 | 113 | ||
114 | enum hash_op { | 114 | enum hash_op { |
115 | COP_SHA1, | 115 | COP_SHA1, |
116 | COP_HMAC_SHA1 | 116 | COP_HMAC_SHA1 |
117 | }; | 117 | }; |
118 | 118 | ||
119 | struct mv_tfm_hash_ctx { | 119 | struct mv_tfm_hash_ctx { |
120 | struct crypto_shash *fallback; | 120 | struct crypto_shash *fallback; |
121 | struct crypto_shash *base_hash; | 121 | struct crypto_shash *base_hash; |
122 | u32 ivs[2 * SHA1_DIGEST_SIZE / 4]; | 122 | u32 ivs[2 * SHA1_DIGEST_SIZE / 4]; |
123 | int count_add; | 123 | int count_add; |
124 | enum hash_op op; | 124 | enum hash_op op; |
125 | }; | 125 | }; |
126 | 126 | ||
127 | struct mv_req_hash_ctx { | 127 | struct mv_req_hash_ctx { |
128 | u64 count; | 128 | u64 count; |
129 | u32 state[SHA1_DIGEST_SIZE / 4]; | 129 | u32 state[SHA1_DIGEST_SIZE / 4]; |
130 | u8 buffer[SHA1_BLOCK_SIZE]; | 130 | u8 buffer[SHA1_BLOCK_SIZE]; |
131 | int first_hash; /* marks that we don't have previous state */ | 131 | int first_hash; /* marks that we don't have previous state */ |
132 | int last_chunk; /* marks that this is the 'final' request */ | 132 | int last_chunk; /* marks that this is the 'final' request */ |
133 | int extra_bytes; /* unprocessed bytes in buffer */ | 133 | int extra_bytes; /* unprocessed bytes in buffer */ |
134 | enum hash_op op; | 134 | enum hash_op op; |
135 | int count_add; | 135 | int count_add; |
136 | struct scatterlist dummysg; | 136 | struct scatterlist dummysg; |
137 | }; | 137 | }; |
138 | 138 | ||
139 | static void compute_aes_dec_key(struct mv_ctx *ctx) | 139 | static void compute_aes_dec_key(struct mv_ctx *ctx) |
140 | { | 140 | { |
141 | struct crypto_aes_ctx gen_aes_key; | 141 | struct crypto_aes_ctx gen_aes_key; |
142 | int key_pos; | 142 | int key_pos; |
143 | 143 | ||
144 | if (!ctx->need_calc_aes_dkey) | 144 | if (!ctx->need_calc_aes_dkey) |
145 | return; | 145 | return; |
146 | 146 | ||
147 | crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len); | 147 | crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len); |
148 | 148 | ||
149 | key_pos = ctx->key_len + 24; | 149 | key_pos = ctx->key_len + 24; |
150 | memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4); | 150 | memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4); |
151 | switch (ctx->key_len) { | 151 | switch (ctx->key_len) { |
152 | case AES_KEYSIZE_256: | 152 | case AES_KEYSIZE_256: |
153 | key_pos -= 2; | 153 | key_pos -= 2; |
154 | /* fall */ | 154 | /* fall */ |
155 | case AES_KEYSIZE_192: | 155 | case AES_KEYSIZE_192: |
156 | key_pos -= 2; | 156 | key_pos -= 2; |
157 | memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos], | 157 | memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos], |
158 | 4 * 4); | 158 | 4 * 4); |
159 | break; | 159 | break; |
160 | } | 160 | } |
161 | ctx->need_calc_aes_dkey = 0; | 161 | ctx->need_calc_aes_dkey = 0; |
162 | } | 162 | } |
163 | 163 | ||
164 | static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key, | 164 | static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key, |
165 | unsigned int len) | 165 | unsigned int len) |
166 | { | 166 | { |
167 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | 167 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); |
168 | struct mv_ctx *ctx = crypto_tfm_ctx(tfm); | 168 | struct mv_ctx *ctx = crypto_tfm_ctx(tfm); |
169 | 169 | ||
170 | switch (len) { | 170 | switch (len) { |
171 | case AES_KEYSIZE_128: | 171 | case AES_KEYSIZE_128: |
172 | case AES_KEYSIZE_192: | 172 | case AES_KEYSIZE_192: |
173 | case AES_KEYSIZE_256: | 173 | case AES_KEYSIZE_256: |
174 | break; | 174 | break; |
175 | default: | 175 | default: |
176 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | 176 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
177 | return -EINVAL; | 177 | return -EINVAL; |
178 | } | 178 | } |
179 | ctx->key_len = len; | 179 | ctx->key_len = len; |
180 | ctx->need_calc_aes_dkey = 1; | 180 | ctx->need_calc_aes_dkey = 1; |
181 | 181 | ||
182 | memcpy(ctx->aes_enc_key, key, AES_KEY_LEN); | 182 | memcpy(ctx->aes_enc_key, key, AES_KEY_LEN); |
183 | return 0; | 183 | return 0; |
184 | } | 184 | } |
185 | 185 | ||
186 | static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len) | 186 | static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len) |
187 | { | 187 | { |
188 | int ret; | 188 | int ret; |
189 | void *sbuf; | 189 | void *sbuf; |
190 | int copy_len; | 190 | int copy_len; |
191 | 191 | ||
192 | while (len) { | 192 | while (len) { |
193 | if (!p->sg_src_left) { | 193 | if (!p->sg_src_left) { |
194 | ret = sg_miter_next(&p->src_sg_it); | 194 | ret = sg_miter_next(&p->src_sg_it); |
195 | BUG_ON(!ret); | 195 | BUG_ON(!ret); |
196 | p->sg_src_left = p->src_sg_it.length; | 196 | p->sg_src_left = p->src_sg_it.length; |
197 | p->src_start = 0; | 197 | p->src_start = 0; |
198 | } | 198 | } |
199 | 199 | ||
200 | sbuf = p->src_sg_it.addr + p->src_start; | 200 | sbuf = p->src_sg_it.addr + p->src_start; |
201 | 201 | ||
202 | copy_len = min(p->sg_src_left, len); | 202 | copy_len = min(p->sg_src_left, len); |
203 | memcpy(dbuf, sbuf, copy_len); | 203 | memcpy(dbuf, sbuf, copy_len); |
204 | 204 | ||
205 | p->src_start += copy_len; | 205 | p->src_start += copy_len; |
206 | p->sg_src_left -= copy_len; | 206 | p->sg_src_left -= copy_len; |
207 | 207 | ||
208 | len -= copy_len; | 208 | len -= copy_len; |
209 | dbuf += copy_len; | 209 | dbuf += copy_len; |
210 | } | 210 | } |
211 | } | 211 | } |
212 | 212 | ||
213 | static void setup_data_in(void) | 213 | static void setup_data_in(void) |
214 | { | 214 | { |
215 | struct req_progress *p = &cpg->p; | 215 | struct req_progress *p = &cpg->p; |
216 | int data_in_sram = | 216 | int data_in_sram = |
217 | min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size); | 217 | min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size); |
218 | copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len, | 218 | copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len, |
219 | data_in_sram - p->crypt_len); | 219 | data_in_sram - p->crypt_len); |
220 | p->crypt_len = data_in_sram; | 220 | p->crypt_len = data_in_sram; |
221 | } | 221 | } |
222 | 222 | ||
223 | static void mv_process_current_q(int first_block) | 223 | static void mv_process_current_q(int first_block) |
224 | { | 224 | { |
225 | struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); | 225 | struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); |
226 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | 226 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
227 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | 227 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); |
228 | struct sec_accel_config op; | 228 | struct sec_accel_config op; |
229 | 229 | ||
230 | switch (req_ctx->op) { | 230 | switch (req_ctx->op) { |
231 | case COP_AES_ECB: | 231 | case COP_AES_ECB: |
232 | op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB; | 232 | op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB; |
233 | break; | 233 | break; |
234 | case COP_AES_CBC: | 234 | case COP_AES_CBC: |
235 | default: | 235 | default: |
236 | op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC; | 236 | op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC; |
237 | op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) | | 237 | op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) | |
238 | ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF); | 238 | ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF); |
239 | if (first_block) | 239 | if (first_block) |
240 | memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16); | 240 | memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16); |
241 | break; | 241 | break; |
242 | } | 242 | } |
243 | if (req_ctx->decrypt) { | 243 | if (req_ctx->decrypt) { |
244 | op.config |= CFG_DIR_DEC; | 244 | op.config |= CFG_DIR_DEC; |
245 | memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key, | 245 | memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key, |
246 | AES_KEY_LEN); | 246 | AES_KEY_LEN); |
247 | } else { | 247 | } else { |
248 | op.config |= CFG_DIR_ENC; | 248 | op.config |= CFG_DIR_ENC; |
249 | memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key, | 249 | memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key, |
250 | AES_KEY_LEN); | 250 | AES_KEY_LEN); |
251 | } | 251 | } |
252 | 252 | ||
253 | switch (ctx->key_len) { | 253 | switch (ctx->key_len) { |
254 | case AES_KEYSIZE_128: | 254 | case AES_KEYSIZE_128: |
255 | op.config |= CFG_AES_LEN_128; | 255 | op.config |= CFG_AES_LEN_128; |
256 | break; | 256 | break; |
257 | case AES_KEYSIZE_192: | 257 | case AES_KEYSIZE_192: |
258 | op.config |= CFG_AES_LEN_192; | 258 | op.config |= CFG_AES_LEN_192; |
259 | break; | 259 | break; |
260 | case AES_KEYSIZE_256: | 260 | case AES_KEYSIZE_256: |
261 | op.config |= CFG_AES_LEN_256; | 261 | op.config |= CFG_AES_LEN_256; |
262 | break; | 262 | break; |
263 | } | 263 | } |
264 | op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) | | 264 | op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) | |
265 | ENC_P_DST(SRAM_DATA_OUT_START); | 265 | ENC_P_DST(SRAM_DATA_OUT_START); |
266 | op.enc_key_p = SRAM_DATA_KEY_P; | 266 | op.enc_key_p = SRAM_DATA_KEY_P; |
267 | 267 | ||
268 | setup_data_in(); | 268 | setup_data_in(); |
269 | op.enc_len = cpg->p.crypt_len; | 269 | op.enc_len = cpg->p.crypt_len; |
270 | memcpy(cpg->sram + SRAM_CONFIG, &op, | 270 | memcpy(cpg->sram + SRAM_CONFIG, &op, |
271 | sizeof(struct sec_accel_config)); | 271 | sizeof(struct sec_accel_config)); |
272 | 272 | ||
273 | /* GO */ | 273 | /* GO */ |
274 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); | 274 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); |
275 | 275 | ||
276 | /* | 276 | /* |
277 | * XXX: add timer if the interrupt does not occur for some mystery | 277 | * XXX: add timer if the interrupt does not occur for some mystery |
278 | * reason | 278 | * reason |
279 | */ | 279 | */ |
280 | } | 280 | } |
281 | 281 | ||
282 | static void mv_crypto_algo_completion(void) | 282 | static void mv_crypto_algo_completion(void) |
283 | { | 283 | { |
284 | struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); | 284 | struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); |
285 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | 285 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); |
286 | 286 | ||
287 | sg_miter_stop(&cpg->p.src_sg_it); | 287 | sg_miter_stop(&cpg->p.src_sg_it); |
288 | sg_miter_stop(&cpg->p.dst_sg_it); | 288 | sg_miter_stop(&cpg->p.dst_sg_it); |
289 | 289 | ||
290 | if (req_ctx->op != COP_AES_CBC) | 290 | if (req_ctx->op != COP_AES_CBC) |
291 | return ; | 291 | return ; |
292 | 292 | ||
293 | memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16); | 293 | memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16); |
294 | } | 294 | } |
295 | 295 | ||
296 | static void mv_process_hash_current(int first_block) | 296 | static void mv_process_hash_current(int first_block) |
297 | { | 297 | { |
298 | struct ahash_request *req = ahash_request_cast(cpg->cur_req); | 298 | struct ahash_request *req = ahash_request_cast(cpg->cur_req); |
299 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); | 299 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); |
300 | struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); | 300 | struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); |
301 | struct req_progress *p = &cpg->p; | 301 | struct req_progress *p = &cpg->p; |
302 | struct sec_accel_config op = { 0 }; | 302 | struct sec_accel_config op = { 0 }; |
303 | int is_last; | 303 | int is_last; |
304 | 304 | ||
305 | switch (req_ctx->op) { | 305 | switch (req_ctx->op) { |
306 | case COP_SHA1: | 306 | case COP_SHA1: |
307 | default: | 307 | default: |
308 | op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1; | 308 | op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1; |
309 | break; | 309 | break; |
310 | case COP_HMAC_SHA1: | 310 | case COP_HMAC_SHA1: |
311 | op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1; | 311 | op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1; |
312 | memcpy(cpg->sram + SRAM_HMAC_IV_IN, | 312 | memcpy(cpg->sram + SRAM_HMAC_IV_IN, |
313 | tfm_ctx->ivs, sizeof(tfm_ctx->ivs)); | 313 | tfm_ctx->ivs, sizeof(tfm_ctx->ivs)); |
314 | break; | 314 | break; |
315 | } | 315 | } |
316 | 316 | ||
317 | op.mac_src_p = | 317 | op.mac_src_p = |
318 | MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32) | 318 | MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32) |
319 | req_ctx-> | 319 | req_ctx-> |
320 | count); | 320 | count); |
321 | 321 | ||
322 | setup_data_in(); | 322 | setup_data_in(); |
323 | 323 | ||
324 | op.mac_digest = | 324 | op.mac_digest = |
325 | MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len); | 325 | MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len); |
326 | op.mac_iv = | 326 | op.mac_iv = |
327 | MAC_INNER_IV_P(SRAM_HMAC_IV_IN) | | 327 | MAC_INNER_IV_P(SRAM_HMAC_IV_IN) | |
328 | MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT); | 328 | MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT); |
329 | 329 | ||
330 | is_last = req_ctx->last_chunk | 330 | is_last = req_ctx->last_chunk |
331 | && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes) | 331 | && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes) |
332 | && (req_ctx->count <= MAX_HW_HASH_SIZE); | 332 | && (req_ctx->count <= MAX_HW_HASH_SIZE); |
333 | if (req_ctx->first_hash) { | 333 | if (req_ctx->first_hash) { |
334 | if (is_last) | 334 | if (is_last) |
335 | op.config |= CFG_NOT_FRAG; | 335 | op.config |= CFG_NOT_FRAG; |
336 | else | 336 | else |
337 | op.config |= CFG_FIRST_FRAG; | 337 | op.config |= CFG_FIRST_FRAG; |
338 | 338 | ||
339 | req_ctx->first_hash = 0; | 339 | req_ctx->first_hash = 0; |
340 | } else { | 340 | } else { |
341 | if (is_last) | 341 | if (is_last) |
342 | op.config |= CFG_LAST_FRAG; | 342 | op.config |= CFG_LAST_FRAG; |
343 | else | 343 | else |
344 | op.config |= CFG_MID_FRAG; | 344 | op.config |= CFG_MID_FRAG; |
345 | 345 | ||
346 | writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); | 346 | writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); |
347 | writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); | 347 | writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); |
348 | writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); | 348 | writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); |
349 | writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); | 349 | writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); |
350 | writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); | 350 | writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); |
351 | } | 351 | } |
352 | 352 | ||
353 | memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); | 353 | memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); |
354 | 354 | ||
355 | /* GO */ | 355 | /* GO */ |
356 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); | 356 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); |
357 | 357 | ||
358 | /* | 358 | /* |
359 | * XXX: add timer if the interrupt does not occur for some mystery | 359 | * XXX: add timer if the interrupt does not occur for some mystery |
360 | * reason | 360 | * reason |
361 | */ | 361 | */ |
362 | } | 362 | } |
363 | 363 | ||
364 | static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx, | 364 | static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx, |
365 | struct shash_desc *desc) | 365 | struct shash_desc *desc) |
366 | { | 366 | { |
367 | int i; | 367 | int i; |
368 | struct sha1_state shash_state; | 368 | struct sha1_state shash_state; |
369 | 369 | ||
370 | shash_state.count = ctx->count + ctx->count_add; | 370 | shash_state.count = ctx->count + ctx->count_add; |
371 | for (i = 0; i < 5; i++) | 371 | for (i = 0; i < 5; i++) |
372 | shash_state.state[i] = ctx->state[i]; | 372 | shash_state.state[i] = ctx->state[i]; |
373 | memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer)); | 373 | memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer)); |
374 | return crypto_shash_import(desc, &shash_state); | 374 | return crypto_shash_import(desc, &shash_state); |
375 | } | 375 | } |
376 | 376 | ||
377 | static int mv_hash_final_fallback(struct ahash_request *req) | 377 | static int mv_hash_final_fallback(struct ahash_request *req) |
378 | { | 378 | { |
379 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); | 379 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); |
380 | struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); | 380 | struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); |
381 | struct { | 381 | struct { |
382 | struct shash_desc shash; | 382 | struct shash_desc shash; |
383 | char ctx[crypto_shash_descsize(tfm_ctx->fallback)]; | 383 | char ctx[crypto_shash_descsize(tfm_ctx->fallback)]; |
384 | } desc; | 384 | } desc; |
385 | int rc; | 385 | int rc; |
386 | 386 | ||
387 | desc.shash.tfm = tfm_ctx->fallback; | 387 | desc.shash.tfm = tfm_ctx->fallback; |
388 | desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 388 | desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
389 | if (unlikely(req_ctx->first_hash)) { | 389 | if (unlikely(req_ctx->first_hash)) { |
390 | crypto_shash_init(&desc.shash); | 390 | crypto_shash_init(&desc.shash); |
391 | crypto_shash_update(&desc.shash, req_ctx->buffer, | 391 | crypto_shash_update(&desc.shash, req_ctx->buffer, |
392 | req_ctx->extra_bytes); | 392 | req_ctx->extra_bytes); |
393 | } else { | 393 | } else { |
394 | /* only SHA1 for now.... | 394 | /* only SHA1 for now.... |
395 | */ | 395 | */ |
396 | rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash); | 396 | rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash); |
397 | if (rc) | 397 | if (rc) |
398 | goto out; | 398 | goto out; |
399 | } | 399 | } |
400 | rc = crypto_shash_final(&desc.shash, req->result); | 400 | rc = crypto_shash_final(&desc.shash, req->result); |
401 | out: | 401 | out: |
402 | return rc; | 402 | return rc; |
403 | } | 403 | } |
404 | 404 | ||
405 | static void mv_hash_algo_completion(void) | 405 | static void mv_hash_algo_completion(void) |
406 | { | 406 | { |
407 | struct ahash_request *req = ahash_request_cast(cpg->cur_req); | 407 | struct ahash_request *req = ahash_request_cast(cpg->cur_req); |
408 | struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); | 408 | struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); |
409 | 409 | ||
410 | if (ctx->extra_bytes) | 410 | if (ctx->extra_bytes) |
411 | copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes); | 411 | copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes); |
412 | sg_miter_stop(&cpg->p.src_sg_it); | 412 | sg_miter_stop(&cpg->p.src_sg_it); |
413 | 413 | ||
414 | if (likely(ctx->last_chunk)) { | 414 | if (likely(ctx->last_chunk)) { |
415 | if (likely(ctx->count <= MAX_HW_HASH_SIZE)) { | 415 | if (likely(ctx->count <= MAX_HW_HASH_SIZE)) { |
416 | memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF, | 416 | memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF, |
417 | crypto_ahash_digestsize(crypto_ahash_reqtfm | 417 | crypto_ahash_digestsize(crypto_ahash_reqtfm |
418 | (req))); | 418 | (req))); |
419 | } else | 419 | } else |
420 | mv_hash_final_fallback(req); | 420 | mv_hash_final_fallback(req); |
421 | } else { | 421 | } else { |
422 | ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A); | 422 | ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A); |
423 | ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B); | 423 | ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B); |
424 | ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C); | 424 | ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C); |
425 | ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D); | 425 | ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D); |
426 | ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E); | 426 | ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E); |
427 | } | 427 | } |
428 | } | 428 | } |
429 | 429 | ||
430 | static void dequeue_complete_req(void) | 430 | static void dequeue_complete_req(void) |
431 | { | 431 | { |
432 | struct crypto_async_request *req = cpg->cur_req; | 432 | struct crypto_async_request *req = cpg->cur_req; |
433 | void *buf; | 433 | void *buf; |
434 | int ret; | 434 | int ret; |
435 | cpg->p.hw_processed_bytes += cpg->p.crypt_len; | 435 | cpg->p.hw_processed_bytes += cpg->p.crypt_len; |
436 | if (cpg->p.copy_back) { | 436 | if (cpg->p.copy_back) { |
437 | int need_copy_len = cpg->p.crypt_len; | 437 | int need_copy_len = cpg->p.crypt_len; |
438 | int sram_offset = 0; | 438 | int sram_offset = 0; |
439 | do { | 439 | do { |
440 | int dst_copy; | 440 | int dst_copy; |
441 | 441 | ||
442 | if (!cpg->p.sg_dst_left) { | 442 | if (!cpg->p.sg_dst_left) { |
443 | ret = sg_miter_next(&cpg->p.dst_sg_it); | 443 | ret = sg_miter_next(&cpg->p.dst_sg_it); |
444 | BUG_ON(!ret); | 444 | BUG_ON(!ret); |
445 | cpg->p.sg_dst_left = cpg->p.dst_sg_it.length; | 445 | cpg->p.sg_dst_left = cpg->p.dst_sg_it.length; |
446 | cpg->p.dst_start = 0; | 446 | cpg->p.dst_start = 0; |
447 | } | 447 | } |
448 | 448 | ||
449 | buf = cpg->p.dst_sg_it.addr; | 449 | buf = cpg->p.dst_sg_it.addr; |
450 | buf += cpg->p.dst_start; | 450 | buf += cpg->p.dst_start; |
451 | 451 | ||
452 | dst_copy = min(need_copy_len, cpg->p.sg_dst_left); | 452 | dst_copy = min(need_copy_len, cpg->p.sg_dst_left); |
453 | 453 | ||
454 | memcpy(buf, | 454 | memcpy(buf, |
455 | cpg->sram + SRAM_DATA_OUT_START + sram_offset, | 455 | cpg->sram + SRAM_DATA_OUT_START + sram_offset, |
456 | dst_copy); | 456 | dst_copy); |
457 | sram_offset += dst_copy; | 457 | sram_offset += dst_copy; |
458 | cpg->p.sg_dst_left -= dst_copy; | 458 | cpg->p.sg_dst_left -= dst_copy; |
459 | need_copy_len -= dst_copy; | 459 | need_copy_len -= dst_copy; |
460 | cpg->p.dst_start += dst_copy; | 460 | cpg->p.dst_start += dst_copy; |
461 | } while (need_copy_len > 0); | 461 | } while (need_copy_len > 0); |
462 | } | 462 | } |
463 | 463 | ||
464 | cpg->p.crypt_len = 0; | 464 | cpg->p.crypt_len = 0; |
465 | 465 | ||
466 | BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE); | 466 | BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE); |
467 | if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) { | 467 | if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) { |
468 | /* process next scatter list entry */ | 468 | /* process next scatter list entry */ |
469 | cpg->eng_st = ENGINE_BUSY; | 469 | cpg->eng_st = ENGINE_BUSY; |
470 | cpg->p.process(0); | 470 | cpg->p.process(0); |
471 | } else { | 471 | } else { |
472 | cpg->p.complete(); | 472 | cpg->p.complete(); |
473 | cpg->eng_st = ENGINE_IDLE; | 473 | cpg->eng_st = ENGINE_IDLE; |
474 | local_bh_disable(); | 474 | local_bh_disable(); |
475 | req->complete(req, 0); | 475 | req->complete(req, 0); |
476 | local_bh_enable(); | 476 | local_bh_enable(); |
477 | } | 477 | } |
478 | } | 478 | } |
479 | 479 | ||
480 | static int count_sgs(struct scatterlist *sl, unsigned int total_bytes) | 480 | static int count_sgs(struct scatterlist *sl, unsigned int total_bytes) |
481 | { | 481 | { |
482 | int i = 0; | 482 | int i = 0; |
483 | size_t cur_len; | 483 | size_t cur_len; |
484 | 484 | ||
485 | while (1) { | 485 | while (1) { |
486 | cur_len = sl[i].length; | 486 | cur_len = sl[i].length; |
487 | ++i; | 487 | ++i; |
488 | if (total_bytes > cur_len) | 488 | if (total_bytes > cur_len) |
489 | total_bytes -= cur_len; | 489 | total_bytes -= cur_len; |
490 | else | 490 | else |
491 | break; | 491 | break; |
492 | } | 492 | } |
493 | 493 | ||
494 | return i; | 494 | return i; |
495 | } | 495 | } |
496 | 496 | ||
497 | static void mv_start_new_crypt_req(struct ablkcipher_request *req) | 497 | static void mv_start_new_crypt_req(struct ablkcipher_request *req) |
498 | { | 498 | { |
499 | struct req_progress *p = &cpg->p; | 499 | struct req_progress *p = &cpg->p; |
500 | int num_sgs; | 500 | int num_sgs; |
501 | 501 | ||
502 | cpg->cur_req = &req->base; | 502 | cpg->cur_req = &req->base; |
503 | memset(p, 0, sizeof(struct req_progress)); | 503 | memset(p, 0, sizeof(struct req_progress)); |
504 | p->hw_nbytes = req->nbytes; | 504 | p->hw_nbytes = req->nbytes; |
505 | p->complete = mv_crypto_algo_completion; | 505 | p->complete = mv_crypto_algo_completion; |
506 | p->process = mv_process_current_q; | 506 | p->process = mv_process_current_q; |
507 | p->copy_back = 1; | 507 | p->copy_back = 1; |
508 | 508 | ||
509 | num_sgs = count_sgs(req->src, req->nbytes); | 509 | num_sgs = count_sgs(req->src, req->nbytes); |
510 | sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); | 510 | sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); |
511 | 511 | ||
512 | num_sgs = count_sgs(req->dst, req->nbytes); | 512 | num_sgs = count_sgs(req->dst, req->nbytes); |
513 | sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG); | 513 | sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG); |
514 | 514 | ||
515 | mv_process_current_q(1); | 515 | mv_process_current_q(1); |
516 | } | 516 | } |
517 | 517 | ||
518 | static void mv_start_new_hash_req(struct ahash_request *req) | 518 | static void mv_start_new_hash_req(struct ahash_request *req) |
519 | { | 519 | { |
520 | struct req_progress *p = &cpg->p; | 520 | struct req_progress *p = &cpg->p; |
521 | struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); | 521 | struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); |
522 | int num_sgs, hw_bytes, old_extra_bytes, rc; | 522 | int num_sgs, hw_bytes, old_extra_bytes, rc; |
523 | cpg->cur_req = &req->base; | 523 | cpg->cur_req = &req->base; |
524 | memset(p, 0, sizeof(struct req_progress)); | 524 | memset(p, 0, sizeof(struct req_progress)); |
525 | hw_bytes = req->nbytes + ctx->extra_bytes; | 525 | hw_bytes = req->nbytes + ctx->extra_bytes; |
526 | old_extra_bytes = ctx->extra_bytes; | 526 | old_extra_bytes = ctx->extra_bytes; |
527 | 527 | ||
528 | if (unlikely(ctx->extra_bytes)) { | ||
529 | memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer, | ||
530 | ctx->extra_bytes); | ||
531 | p->crypt_len = ctx->extra_bytes; | ||
532 | } | ||
533 | |||
534 | ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE; | 528 | ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE; |
535 | if (ctx->extra_bytes != 0 | 529 | if (ctx->extra_bytes != 0 |
536 | && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE)) | 530 | && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE)) |
537 | hw_bytes -= ctx->extra_bytes; | 531 | hw_bytes -= ctx->extra_bytes; |
538 | else | 532 | else |
539 | ctx->extra_bytes = 0; | 533 | ctx->extra_bytes = 0; |
540 | 534 | ||
541 | num_sgs = count_sgs(req->src, req->nbytes); | 535 | num_sgs = count_sgs(req->src, req->nbytes); |
542 | sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); | 536 | sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); |
543 | 537 | ||
544 | if (hw_bytes) { | 538 | if (hw_bytes) { |
545 | p->hw_nbytes = hw_bytes; | 539 | p->hw_nbytes = hw_bytes; |
546 | p->complete = mv_hash_algo_completion; | 540 | p->complete = mv_hash_algo_completion; |
547 | p->process = mv_process_hash_current; | 541 | p->process = mv_process_hash_current; |
542 | |||
543 | if (unlikely(old_extra_bytes)) { | ||
544 | memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer, | ||
545 | old_extra_bytes); | ||
546 | p->crypt_len = old_extra_bytes; | ||
547 | } | ||
548 | 548 | ||
549 | mv_process_hash_current(1); | 549 | mv_process_hash_current(1); |
550 | } else { | 550 | } else { |
551 | copy_src_to_buf(p, ctx->buffer + old_extra_bytes, | 551 | copy_src_to_buf(p, ctx->buffer + old_extra_bytes, |
552 | ctx->extra_bytes - old_extra_bytes); | 552 | ctx->extra_bytes - old_extra_bytes); |
553 | sg_miter_stop(&p->src_sg_it); | 553 | sg_miter_stop(&p->src_sg_it); |
554 | if (ctx->last_chunk) | 554 | if (ctx->last_chunk) |
555 | rc = mv_hash_final_fallback(req); | 555 | rc = mv_hash_final_fallback(req); |
556 | else | 556 | else |
557 | rc = 0; | 557 | rc = 0; |
558 | cpg->eng_st = ENGINE_IDLE; | 558 | cpg->eng_st = ENGINE_IDLE; |
559 | local_bh_disable(); | 559 | local_bh_disable(); |
560 | req->base.complete(&req->base, rc); | 560 | req->base.complete(&req->base, rc); |
561 | local_bh_enable(); | 561 | local_bh_enable(); |
562 | } | 562 | } |
563 | } | 563 | } |
564 | 564 | ||
565 | static int queue_manag(void *data) | 565 | static int queue_manag(void *data) |
566 | { | 566 | { |
567 | cpg->eng_st = ENGINE_IDLE; | 567 | cpg->eng_st = ENGINE_IDLE; |
568 | do { | 568 | do { |
569 | struct crypto_async_request *async_req = NULL; | 569 | struct crypto_async_request *async_req = NULL; |
570 | struct crypto_async_request *backlog; | 570 | struct crypto_async_request *backlog; |
571 | 571 | ||
572 | __set_current_state(TASK_INTERRUPTIBLE); | 572 | __set_current_state(TASK_INTERRUPTIBLE); |
573 | 573 | ||
574 | if (cpg->eng_st == ENGINE_W_DEQUEUE) | 574 | if (cpg->eng_st == ENGINE_W_DEQUEUE) |
575 | dequeue_complete_req(); | 575 | dequeue_complete_req(); |
576 | 576 | ||
577 | spin_lock_irq(&cpg->lock); | 577 | spin_lock_irq(&cpg->lock); |
578 | if (cpg->eng_st == ENGINE_IDLE) { | 578 | if (cpg->eng_st == ENGINE_IDLE) { |
579 | backlog = crypto_get_backlog(&cpg->queue); | 579 | backlog = crypto_get_backlog(&cpg->queue); |
580 | async_req = crypto_dequeue_request(&cpg->queue); | 580 | async_req = crypto_dequeue_request(&cpg->queue); |
581 | if (async_req) { | 581 | if (async_req) { |
582 | BUG_ON(cpg->eng_st != ENGINE_IDLE); | 582 | BUG_ON(cpg->eng_st != ENGINE_IDLE); |
583 | cpg->eng_st = ENGINE_BUSY; | 583 | cpg->eng_st = ENGINE_BUSY; |
584 | } | 584 | } |
585 | } | 585 | } |
586 | spin_unlock_irq(&cpg->lock); | 586 | spin_unlock_irq(&cpg->lock); |
587 | 587 | ||
588 | if (backlog) { | 588 | if (backlog) { |
589 | backlog->complete(backlog, -EINPROGRESS); | 589 | backlog->complete(backlog, -EINPROGRESS); |
590 | backlog = NULL; | 590 | backlog = NULL; |
591 | } | 591 | } |
592 | 592 | ||
593 | if (async_req) { | 593 | if (async_req) { |
594 | if (async_req->tfm->__crt_alg->cra_type != | 594 | if (async_req->tfm->__crt_alg->cra_type != |
595 | &crypto_ahash_type) { | 595 | &crypto_ahash_type) { |
596 | struct ablkcipher_request *req = | 596 | struct ablkcipher_request *req = |
597 | ablkcipher_request_cast(async_req); | 597 | ablkcipher_request_cast(async_req); |
598 | mv_start_new_crypt_req(req); | 598 | mv_start_new_crypt_req(req); |
599 | } else { | 599 | } else { |
600 | struct ahash_request *req = | 600 | struct ahash_request *req = |
601 | ahash_request_cast(async_req); | 601 | ahash_request_cast(async_req); |
602 | mv_start_new_hash_req(req); | 602 | mv_start_new_hash_req(req); |
603 | } | 603 | } |
604 | async_req = NULL; | 604 | async_req = NULL; |
605 | } | 605 | } |
606 | 606 | ||
607 | schedule(); | 607 | schedule(); |
608 | 608 | ||
609 | } while (!kthread_should_stop()); | 609 | } while (!kthread_should_stop()); |
610 | return 0; | 610 | return 0; |
611 | } | 611 | } |
612 | 612 | ||
613 | static int mv_handle_req(struct crypto_async_request *req) | 613 | static int mv_handle_req(struct crypto_async_request *req) |
614 | { | 614 | { |
615 | unsigned long flags; | 615 | unsigned long flags; |
616 | int ret; | 616 | int ret; |
617 | 617 | ||
618 | spin_lock_irqsave(&cpg->lock, flags); | 618 | spin_lock_irqsave(&cpg->lock, flags); |
619 | ret = crypto_enqueue_request(&cpg->queue, req); | 619 | ret = crypto_enqueue_request(&cpg->queue, req); |
620 | spin_unlock_irqrestore(&cpg->lock, flags); | 620 | spin_unlock_irqrestore(&cpg->lock, flags); |
621 | wake_up_process(cpg->queue_th); | 621 | wake_up_process(cpg->queue_th); |
622 | return ret; | 622 | return ret; |
623 | } | 623 | } |
624 | 624 | ||
625 | static int mv_enc_aes_ecb(struct ablkcipher_request *req) | 625 | static int mv_enc_aes_ecb(struct ablkcipher_request *req) |
626 | { | 626 | { |
627 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | 627 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); |
628 | 628 | ||
629 | req_ctx->op = COP_AES_ECB; | 629 | req_ctx->op = COP_AES_ECB; |
630 | req_ctx->decrypt = 0; | 630 | req_ctx->decrypt = 0; |
631 | 631 | ||
632 | return mv_handle_req(&req->base); | 632 | return mv_handle_req(&req->base); |
633 | } | 633 | } |
634 | 634 | ||
635 | static int mv_dec_aes_ecb(struct ablkcipher_request *req) | 635 | static int mv_dec_aes_ecb(struct ablkcipher_request *req) |
636 | { | 636 | { |
637 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | 637 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
638 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | 638 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); |
639 | 639 | ||
640 | req_ctx->op = COP_AES_ECB; | 640 | req_ctx->op = COP_AES_ECB; |
641 | req_ctx->decrypt = 1; | 641 | req_ctx->decrypt = 1; |
642 | 642 | ||
643 | compute_aes_dec_key(ctx); | 643 | compute_aes_dec_key(ctx); |
644 | return mv_handle_req(&req->base); | 644 | return mv_handle_req(&req->base); |
645 | } | 645 | } |
646 | 646 | ||
647 | static int mv_enc_aes_cbc(struct ablkcipher_request *req) | 647 | static int mv_enc_aes_cbc(struct ablkcipher_request *req) |
648 | { | 648 | { |
649 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | 649 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); |
650 | 650 | ||
651 | req_ctx->op = COP_AES_CBC; | 651 | req_ctx->op = COP_AES_CBC; |
652 | req_ctx->decrypt = 0; | 652 | req_ctx->decrypt = 0; |
653 | 653 | ||
654 | return mv_handle_req(&req->base); | 654 | return mv_handle_req(&req->base); |
655 | } | 655 | } |
656 | 656 | ||
657 | static int mv_dec_aes_cbc(struct ablkcipher_request *req) | 657 | static int mv_dec_aes_cbc(struct ablkcipher_request *req) |
658 | { | 658 | { |
659 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | 659 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
660 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | 660 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); |
661 | 661 | ||
662 | req_ctx->op = COP_AES_CBC; | 662 | req_ctx->op = COP_AES_CBC; |
663 | req_ctx->decrypt = 1; | 663 | req_ctx->decrypt = 1; |
664 | 664 | ||
665 | compute_aes_dec_key(ctx); | 665 | compute_aes_dec_key(ctx); |
666 | return mv_handle_req(&req->base); | 666 | return mv_handle_req(&req->base); |
667 | } | 667 | } |
668 | 668 | ||
669 | static int mv_cra_init(struct crypto_tfm *tfm) | 669 | static int mv_cra_init(struct crypto_tfm *tfm) |
670 | { | 670 | { |
671 | tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx); | 671 | tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx); |
672 | return 0; | 672 | return 0; |
673 | } | 673 | } |
674 | 674 | ||
675 | static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op, | 675 | static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op, |
676 | int is_last, unsigned int req_len, | 676 | int is_last, unsigned int req_len, |
677 | int count_add) | 677 | int count_add) |
678 | { | 678 | { |
679 | memset(ctx, 0, sizeof(*ctx)); | 679 | memset(ctx, 0, sizeof(*ctx)); |
680 | ctx->op = op; | 680 | ctx->op = op; |
681 | ctx->count = req_len; | 681 | ctx->count = req_len; |
682 | ctx->first_hash = 1; | 682 | ctx->first_hash = 1; |
683 | ctx->last_chunk = is_last; | 683 | ctx->last_chunk = is_last; |
684 | ctx->count_add = count_add; | 684 | ctx->count_add = count_add; |
685 | } | 685 | } |
686 | 686 | ||
687 | static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last, | 687 | static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last, |
688 | unsigned req_len) | 688 | unsigned req_len) |
689 | { | 689 | { |
690 | ctx->last_chunk = is_last; | 690 | ctx->last_chunk = is_last; |
691 | ctx->count += req_len; | 691 | ctx->count += req_len; |
692 | } | 692 | } |
693 | 693 | ||
694 | static int mv_hash_init(struct ahash_request *req) | 694 | static int mv_hash_init(struct ahash_request *req) |
695 | { | 695 | { |
696 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); | 696 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); |
697 | mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0, | 697 | mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0, |
698 | tfm_ctx->count_add); | 698 | tfm_ctx->count_add); |
699 | return 0; | 699 | return 0; |
700 | } | 700 | } |
701 | 701 | ||
702 | static int mv_hash_update(struct ahash_request *req) | 702 | static int mv_hash_update(struct ahash_request *req) |
703 | { | 703 | { |
704 | if (!req->nbytes) | 704 | if (!req->nbytes) |
705 | return 0; | 705 | return 0; |
706 | 706 | ||
707 | mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes); | 707 | mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes); |
708 | return mv_handle_req(&req->base); | 708 | return mv_handle_req(&req->base); |
709 | } | 709 | } |
710 | 710 | ||
711 | static int mv_hash_final(struct ahash_request *req) | 711 | static int mv_hash_final(struct ahash_request *req) |
712 | { | 712 | { |
713 | struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); | 713 | struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); |
714 | /* dummy buffer of 4 bytes */ | 714 | /* dummy buffer of 4 bytes */ |
715 | sg_init_one(&ctx->dummysg, ctx->buffer, 4); | 715 | sg_init_one(&ctx->dummysg, ctx->buffer, 4); |
716 | /* I think I'm allowed to do that... */ | 716 | /* I think I'm allowed to do that... */ |
717 | ahash_request_set_crypt(req, &ctx->dummysg, req->result, 0); | 717 | ahash_request_set_crypt(req, &ctx->dummysg, req->result, 0); |
718 | mv_update_hash_req_ctx(ctx, 1, 0); | 718 | mv_update_hash_req_ctx(ctx, 1, 0); |
719 | return mv_handle_req(&req->base); | 719 | return mv_handle_req(&req->base); |
720 | } | 720 | } |
721 | 721 | ||
722 | static int mv_hash_finup(struct ahash_request *req) | 722 | static int mv_hash_finup(struct ahash_request *req) |
723 | { | 723 | { |
724 | mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes); | 724 | mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes); |
725 | return mv_handle_req(&req->base); | 725 | return mv_handle_req(&req->base); |
726 | } | 726 | } |
727 | 727 | ||
728 | static int mv_hash_digest(struct ahash_request *req) | 728 | static int mv_hash_digest(struct ahash_request *req) |
729 | { | 729 | { |
730 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); | 730 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); |
731 | mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1, | 731 | mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1, |
732 | req->nbytes, tfm_ctx->count_add); | 732 | req->nbytes, tfm_ctx->count_add); |
733 | return mv_handle_req(&req->base); | 733 | return mv_handle_req(&req->base); |
734 | } | 734 | } |
735 | 735 | ||
736 | static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate, | 736 | static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate, |
737 | const void *ostate) | 737 | const void *ostate) |
738 | { | 738 | { |
739 | const struct sha1_state *isha1_state = istate, *osha1_state = ostate; | 739 | const struct sha1_state *isha1_state = istate, *osha1_state = ostate; |
740 | int i; | 740 | int i; |
741 | for (i = 0; i < 5; i++) { | 741 | for (i = 0; i < 5; i++) { |
742 | ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]); | 742 | ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]); |
743 | ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]); | 743 | ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]); |
744 | } | 744 | } |
745 | } | 745 | } |
746 | 746 | ||
747 | static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key, | 747 | static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key, |
748 | unsigned int keylen) | 748 | unsigned int keylen) |
749 | { | 749 | { |
750 | int rc; | 750 | int rc; |
751 | struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base); | 751 | struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base); |
752 | int bs, ds, ss; | 752 | int bs, ds, ss; |
753 | 753 | ||
754 | if (!ctx->base_hash) | 754 | if (!ctx->base_hash) |
755 | return 0; | 755 | return 0; |
756 | 756 | ||
757 | rc = crypto_shash_setkey(ctx->fallback, key, keylen); | 757 | rc = crypto_shash_setkey(ctx->fallback, key, keylen); |
758 | if (rc) | 758 | if (rc) |
759 | return rc; | 759 | return rc; |
760 | 760 | ||
761 | /* Can't see a way to extract the ipad/opad from the fallback tfm | 761 | /* Can't see a way to extract the ipad/opad from the fallback tfm |
762 | so I'm basically copying code from the hmac module */ | 762 | so I'm basically copying code from the hmac module */ |
763 | bs = crypto_shash_blocksize(ctx->base_hash); | 763 | bs = crypto_shash_blocksize(ctx->base_hash); |
764 | ds = crypto_shash_digestsize(ctx->base_hash); | 764 | ds = crypto_shash_digestsize(ctx->base_hash); |
765 | ss = crypto_shash_statesize(ctx->base_hash); | 765 | ss = crypto_shash_statesize(ctx->base_hash); |
766 | 766 | ||
767 | { | 767 | { |
768 | struct { | 768 | struct { |
769 | struct shash_desc shash; | 769 | struct shash_desc shash; |
770 | char ctx[crypto_shash_descsize(ctx->base_hash)]; | 770 | char ctx[crypto_shash_descsize(ctx->base_hash)]; |
771 | } desc; | 771 | } desc; |
772 | unsigned int i; | 772 | unsigned int i; |
773 | char ipad[ss]; | 773 | char ipad[ss]; |
774 | char opad[ss]; | 774 | char opad[ss]; |
775 | 775 | ||
776 | desc.shash.tfm = ctx->base_hash; | 776 | desc.shash.tfm = ctx->base_hash; |
777 | desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) & | 777 | desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) & |
778 | CRYPTO_TFM_REQ_MAY_SLEEP; | 778 | CRYPTO_TFM_REQ_MAY_SLEEP; |
779 | 779 | ||
780 | if (keylen > bs) { | 780 | if (keylen > bs) { |
781 | int err; | 781 | int err; |
782 | 782 | ||
783 | err = | 783 | err = |
784 | crypto_shash_digest(&desc.shash, key, keylen, ipad); | 784 | crypto_shash_digest(&desc.shash, key, keylen, ipad); |
785 | if (err) | 785 | if (err) |
786 | return err; | 786 | return err; |
787 | 787 | ||
788 | keylen = ds; | 788 | keylen = ds; |
789 | } else | 789 | } else |
790 | memcpy(ipad, key, keylen); | 790 | memcpy(ipad, key, keylen); |
791 | 791 | ||
792 | memset(ipad + keylen, 0, bs - keylen); | 792 | memset(ipad + keylen, 0, bs - keylen); |
793 | memcpy(opad, ipad, bs); | 793 | memcpy(opad, ipad, bs); |
794 | 794 | ||
795 | for (i = 0; i < bs; i++) { | 795 | for (i = 0; i < bs; i++) { |
796 | ipad[i] ^= 0x36; | 796 | ipad[i] ^= 0x36; |
797 | opad[i] ^= 0x5c; | 797 | opad[i] ^= 0x5c; |
798 | } | 798 | } |
799 | 799 | ||
800 | rc = crypto_shash_init(&desc.shash) ? : | 800 | rc = crypto_shash_init(&desc.shash) ? : |
801 | crypto_shash_update(&desc.shash, ipad, bs) ? : | 801 | crypto_shash_update(&desc.shash, ipad, bs) ? : |
802 | crypto_shash_export(&desc.shash, ipad) ? : | 802 | crypto_shash_export(&desc.shash, ipad) ? : |
803 | crypto_shash_init(&desc.shash) ? : | 803 | crypto_shash_init(&desc.shash) ? : |
804 | crypto_shash_update(&desc.shash, opad, bs) ? : | 804 | crypto_shash_update(&desc.shash, opad, bs) ? : |
805 | crypto_shash_export(&desc.shash, opad); | 805 | crypto_shash_export(&desc.shash, opad); |
806 | 806 | ||
807 | if (rc == 0) | 807 | if (rc == 0) |
808 | mv_hash_init_ivs(ctx, ipad, opad); | 808 | mv_hash_init_ivs(ctx, ipad, opad); |
809 | 809 | ||
810 | return rc; | 810 | return rc; |
811 | } | 811 | } |
812 | } | 812 | } |
813 | 813 | ||
814 | static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name, | 814 | static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name, |
815 | enum hash_op op, int count_add) | 815 | enum hash_op op, int count_add) |
816 | { | 816 | { |
817 | const char *fallback_driver_name = tfm->__crt_alg->cra_name; | 817 | const char *fallback_driver_name = tfm->__crt_alg->cra_name; |
818 | struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm); | 818 | struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
819 | struct crypto_shash *fallback_tfm = NULL; | 819 | struct crypto_shash *fallback_tfm = NULL; |
820 | struct crypto_shash *base_hash = NULL; | 820 | struct crypto_shash *base_hash = NULL; |
821 | int err = -ENOMEM; | 821 | int err = -ENOMEM; |
822 | 822 | ||
823 | ctx->op = op; | 823 | ctx->op = op; |
824 | ctx->count_add = count_add; | 824 | ctx->count_add = count_add; |
825 | 825 | ||
826 | /* Allocate a fallback and abort if it failed. */ | 826 | /* Allocate a fallback and abort if it failed. */ |
827 | fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0, | 827 | fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0, |
828 | CRYPTO_ALG_NEED_FALLBACK); | 828 | CRYPTO_ALG_NEED_FALLBACK); |
829 | if (IS_ERR(fallback_tfm)) { | 829 | if (IS_ERR(fallback_tfm)) { |
830 | printk(KERN_WARNING MV_CESA | 830 | printk(KERN_WARNING MV_CESA |
831 | "Fallback driver '%s' could not be loaded!\n", | 831 | "Fallback driver '%s' could not be loaded!\n", |
832 | fallback_driver_name); | 832 | fallback_driver_name); |
833 | err = PTR_ERR(fallback_tfm); | 833 | err = PTR_ERR(fallback_tfm); |
834 | goto out; | 834 | goto out; |
835 | } | 835 | } |
836 | ctx->fallback = fallback_tfm; | 836 | ctx->fallback = fallback_tfm; |
837 | 837 | ||
838 | if (base_hash_name) { | 838 | if (base_hash_name) { |
839 | /* Allocate a hash to compute the ipad/opad of hmac. */ | 839 | /* Allocate a hash to compute the ipad/opad of hmac. */ |
840 | base_hash = crypto_alloc_shash(base_hash_name, 0, | 840 | base_hash = crypto_alloc_shash(base_hash_name, 0, |
841 | CRYPTO_ALG_NEED_FALLBACK); | 841 | CRYPTO_ALG_NEED_FALLBACK); |
842 | if (IS_ERR(base_hash)) { | 842 | if (IS_ERR(base_hash)) { |
843 | printk(KERN_WARNING MV_CESA | 843 | printk(KERN_WARNING MV_CESA |
844 | "Base driver '%s' could not be loaded!\n", | 844 | "Base driver '%s' could not be loaded!\n", |
845 | base_hash_name); | 845 | base_hash_name); |
846 | err = PTR_ERR(base_hash); | 846 | err = PTR_ERR(base_hash); |
847 | goto err_bad_base; | 847 | goto err_bad_base; |
848 | } | 848 | } |
849 | } | 849 | } |
850 | ctx->base_hash = base_hash; | 850 | ctx->base_hash = base_hash; |
851 | 851 | ||
852 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | 852 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
853 | sizeof(struct mv_req_hash_ctx) + | 853 | sizeof(struct mv_req_hash_ctx) + |
854 | crypto_shash_descsize(ctx->fallback)); | 854 | crypto_shash_descsize(ctx->fallback)); |
855 | return 0; | 855 | return 0; |
856 | err_bad_base: | 856 | err_bad_base: |
857 | crypto_free_shash(fallback_tfm); | 857 | crypto_free_shash(fallback_tfm); |
858 | out: | 858 | out: |
859 | return err; | 859 | return err; |
860 | } | 860 | } |
861 | 861 | ||
862 | static void mv_cra_hash_exit(struct crypto_tfm *tfm) | 862 | static void mv_cra_hash_exit(struct crypto_tfm *tfm) |
863 | { | 863 | { |
864 | struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm); | 864 | struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
865 | 865 | ||
866 | crypto_free_shash(ctx->fallback); | 866 | crypto_free_shash(ctx->fallback); |
867 | if (ctx->base_hash) | 867 | if (ctx->base_hash) |
868 | crypto_free_shash(ctx->base_hash); | 868 | crypto_free_shash(ctx->base_hash); |
869 | } | 869 | } |
870 | 870 | ||
871 | static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm) | 871 | static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm) |
872 | { | 872 | { |
873 | return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0); | 873 | return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0); |
874 | } | 874 | } |
875 | 875 | ||
876 | static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm) | 876 | static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm) |
877 | { | 877 | { |
878 | return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE); | 878 | return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE); |
879 | } | 879 | } |
880 | 880 | ||
881 | irqreturn_t crypto_int(int irq, void *priv) | 881 | irqreturn_t crypto_int(int irq, void *priv) |
882 | { | 882 | { |
883 | u32 val; | 883 | u32 val; |
884 | 884 | ||
885 | val = readl(cpg->reg + SEC_ACCEL_INT_STATUS); | 885 | val = readl(cpg->reg + SEC_ACCEL_INT_STATUS); |
886 | if (!(val & SEC_INT_ACCEL0_DONE)) | 886 | if (!(val & SEC_INT_ACCEL0_DONE)) |
887 | return IRQ_NONE; | 887 | return IRQ_NONE; |
888 | 888 | ||
889 | val &= ~SEC_INT_ACCEL0_DONE; | 889 | val &= ~SEC_INT_ACCEL0_DONE; |
890 | writel(val, cpg->reg + FPGA_INT_STATUS); | 890 | writel(val, cpg->reg + FPGA_INT_STATUS); |
891 | writel(val, cpg->reg + SEC_ACCEL_INT_STATUS); | 891 | writel(val, cpg->reg + SEC_ACCEL_INT_STATUS); |
892 | BUG_ON(cpg->eng_st != ENGINE_BUSY); | 892 | BUG_ON(cpg->eng_st != ENGINE_BUSY); |
893 | cpg->eng_st = ENGINE_W_DEQUEUE; | 893 | cpg->eng_st = ENGINE_W_DEQUEUE; |
894 | wake_up_process(cpg->queue_th); | 894 | wake_up_process(cpg->queue_th); |
895 | return IRQ_HANDLED; | 895 | return IRQ_HANDLED; |
896 | } | 896 | } |
897 | 897 | ||
898 | struct crypto_alg mv_aes_alg_ecb = { | 898 | struct crypto_alg mv_aes_alg_ecb = { |
899 | .cra_name = "ecb(aes)", | 899 | .cra_name = "ecb(aes)", |
900 | .cra_driver_name = "mv-ecb-aes", | 900 | .cra_driver_name = "mv-ecb-aes", |
901 | .cra_priority = 300, | 901 | .cra_priority = 300, |
902 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 902 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
903 | .cra_blocksize = 16, | 903 | .cra_blocksize = 16, |
904 | .cra_ctxsize = sizeof(struct mv_ctx), | 904 | .cra_ctxsize = sizeof(struct mv_ctx), |
905 | .cra_alignmask = 0, | 905 | .cra_alignmask = 0, |
906 | .cra_type = &crypto_ablkcipher_type, | 906 | .cra_type = &crypto_ablkcipher_type, |
907 | .cra_module = THIS_MODULE, | 907 | .cra_module = THIS_MODULE, |
908 | .cra_init = mv_cra_init, | 908 | .cra_init = mv_cra_init, |
909 | .cra_u = { | 909 | .cra_u = { |
910 | .ablkcipher = { | 910 | .ablkcipher = { |
911 | .min_keysize = AES_MIN_KEY_SIZE, | 911 | .min_keysize = AES_MIN_KEY_SIZE, |
912 | .max_keysize = AES_MAX_KEY_SIZE, | 912 | .max_keysize = AES_MAX_KEY_SIZE, |
913 | .setkey = mv_setkey_aes, | 913 | .setkey = mv_setkey_aes, |
914 | .encrypt = mv_enc_aes_ecb, | 914 | .encrypt = mv_enc_aes_ecb, |
915 | .decrypt = mv_dec_aes_ecb, | 915 | .decrypt = mv_dec_aes_ecb, |
916 | }, | 916 | }, |
917 | }, | 917 | }, |
918 | }; | 918 | }; |
919 | 919 | ||
920 | struct crypto_alg mv_aes_alg_cbc = { | 920 | struct crypto_alg mv_aes_alg_cbc = { |
921 | .cra_name = "cbc(aes)", | 921 | .cra_name = "cbc(aes)", |
922 | .cra_driver_name = "mv-cbc-aes", | 922 | .cra_driver_name = "mv-cbc-aes", |
923 | .cra_priority = 300, | 923 | .cra_priority = 300, |
924 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 924 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
925 | .cra_blocksize = AES_BLOCK_SIZE, | 925 | .cra_blocksize = AES_BLOCK_SIZE, |
926 | .cra_ctxsize = sizeof(struct mv_ctx), | 926 | .cra_ctxsize = sizeof(struct mv_ctx), |
927 | .cra_alignmask = 0, | 927 | .cra_alignmask = 0, |
928 | .cra_type = &crypto_ablkcipher_type, | 928 | .cra_type = &crypto_ablkcipher_type, |
929 | .cra_module = THIS_MODULE, | 929 | .cra_module = THIS_MODULE, |
930 | .cra_init = mv_cra_init, | 930 | .cra_init = mv_cra_init, |
931 | .cra_u = { | 931 | .cra_u = { |
932 | .ablkcipher = { | 932 | .ablkcipher = { |
933 | .ivsize = AES_BLOCK_SIZE, | 933 | .ivsize = AES_BLOCK_SIZE, |
934 | .min_keysize = AES_MIN_KEY_SIZE, | 934 | .min_keysize = AES_MIN_KEY_SIZE, |
935 | .max_keysize = AES_MAX_KEY_SIZE, | 935 | .max_keysize = AES_MAX_KEY_SIZE, |
936 | .setkey = mv_setkey_aes, | 936 | .setkey = mv_setkey_aes, |
937 | .encrypt = mv_enc_aes_cbc, | 937 | .encrypt = mv_enc_aes_cbc, |
938 | .decrypt = mv_dec_aes_cbc, | 938 | .decrypt = mv_dec_aes_cbc, |
939 | }, | 939 | }, |
940 | }, | 940 | }, |
941 | }; | 941 | }; |
942 | 942 | ||
943 | struct ahash_alg mv_sha1_alg = { | 943 | struct ahash_alg mv_sha1_alg = { |
944 | .init = mv_hash_init, | 944 | .init = mv_hash_init, |
945 | .update = mv_hash_update, | 945 | .update = mv_hash_update, |
946 | .final = mv_hash_final, | 946 | .final = mv_hash_final, |
947 | .finup = mv_hash_finup, | 947 | .finup = mv_hash_finup, |
948 | .digest = mv_hash_digest, | 948 | .digest = mv_hash_digest, |
949 | .halg = { | 949 | .halg = { |
950 | .digestsize = SHA1_DIGEST_SIZE, | 950 | .digestsize = SHA1_DIGEST_SIZE, |
951 | .base = { | 951 | .base = { |
952 | .cra_name = "sha1", | 952 | .cra_name = "sha1", |
953 | .cra_driver_name = "mv-sha1", | 953 | .cra_driver_name = "mv-sha1", |
954 | .cra_priority = 300, | 954 | .cra_priority = 300, |
955 | .cra_flags = | 955 | .cra_flags = |
956 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, | 956 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, |
957 | .cra_blocksize = SHA1_BLOCK_SIZE, | 957 | .cra_blocksize = SHA1_BLOCK_SIZE, |
958 | .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx), | 958 | .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx), |
959 | .cra_init = mv_cra_hash_sha1_init, | 959 | .cra_init = mv_cra_hash_sha1_init, |
960 | .cra_exit = mv_cra_hash_exit, | 960 | .cra_exit = mv_cra_hash_exit, |
961 | .cra_module = THIS_MODULE, | 961 | .cra_module = THIS_MODULE, |
962 | } | 962 | } |
963 | } | 963 | } |
964 | }; | 964 | }; |
965 | 965 | ||
966 | struct ahash_alg mv_hmac_sha1_alg = { | 966 | struct ahash_alg mv_hmac_sha1_alg = { |
967 | .init = mv_hash_init, | 967 | .init = mv_hash_init, |
968 | .update = mv_hash_update, | 968 | .update = mv_hash_update, |
969 | .final = mv_hash_final, | 969 | .final = mv_hash_final, |
970 | .finup = mv_hash_finup, | 970 | .finup = mv_hash_finup, |
971 | .digest = mv_hash_digest, | 971 | .digest = mv_hash_digest, |
972 | .setkey = mv_hash_setkey, | 972 | .setkey = mv_hash_setkey, |
973 | .halg = { | 973 | .halg = { |
974 | .digestsize = SHA1_DIGEST_SIZE, | 974 | .digestsize = SHA1_DIGEST_SIZE, |
975 | .base = { | 975 | .base = { |
976 | .cra_name = "hmac(sha1)", | 976 | .cra_name = "hmac(sha1)", |
977 | .cra_driver_name = "mv-hmac-sha1", | 977 | .cra_driver_name = "mv-hmac-sha1", |
978 | .cra_priority = 300, | 978 | .cra_priority = 300, |
979 | .cra_flags = | 979 | .cra_flags = |
980 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, | 980 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, |
981 | .cra_blocksize = SHA1_BLOCK_SIZE, | 981 | .cra_blocksize = SHA1_BLOCK_SIZE, |
982 | .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx), | 982 | .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx), |
983 | .cra_init = mv_cra_hash_hmac_sha1_init, | 983 | .cra_init = mv_cra_hash_hmac_sha1_init, |
984 | .cra_exit = mv_cra_hash_exit, | 984 | .cra_exit = mv_cra_hash_exit, |
985 | .cra_module = THIS_MODULE, | 985 | .cra_module = THIS_MODULE, |
986 | } | 986 | } |
987 | } | 987 | } |
988 | }; | 988 | }; |
989 | 989 | ||
990 | static int mv_probe(struct platform_device *pdev) | 990 | static int mv_probe(struct platform_device *pdev) |
991 | { | 991 | { |
992 | struct crypto_priv *cp; | 992 | struct crypto_priv *cp; |
993 | struct resource *res; | 993 | struct resource *res; |
994 | int irq; | 994 | int irq; |
995 | int ret; | 995 | int ret; |
996 | 996 | ||
997 | if (cpg) { | 997 | if (cpg) { |
998 | printk(KERN_ERR MV_CESA "Second crypto dev?\n"); | 998 | printk(KERN_ERR MV_CESA "Second crypto dev?\n"); |
999 | return -EEXIST; | 999 | return -EEXIST; |
1000 | } | 1000 | } |
1001 | 1001 | ||
1002 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); | 1002 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); |
1003 | if (!res) | 1003 | if (!res) |
1004 | return -ENXIO; | 1004 | return -ENXIO; |
1005 | 1005 | ||
1006 | cp = kzalloc(sizeof(*cp), GFP_KERNEL); | 1006 | cp = kzalloc(sizeof(*cp), GFP_KERNEL); |
1007 | if (!cp) | 1007 | if (!cp) |
1008 | return -ENOMEM; | 1008 | return -ENOMEM; |
1009 | 1009 | ||
1010 | spin_lock_init(&cp->lock); | 1010 | spin_lock_init(&cp->lock); |
1011 | crypto_init_queue(&cp->queue, 50); | 1011 | crypto_init_queue(&cp->queue, 50); |
1012 | cp->reg = ioremap(res->start, resource_size(res)); | 1012 | cp->reg = ioremap(res->start, resource_size(res)); |
1013 | if (!cp->reg) { | 1013 | if (!cp->reg) { |
1014 | ret = -ENOMEM; | 1014 | ret = -ENOMEM; |
1015 | goto err; | 1015 | goto err; |
1016 | } | 1016 | } |
1017 | 1017 | ||
1018 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram"); | 1018 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram"); |
1019 | if (!res) { | 1019 | if (!res) { |
1020 | ret = -ENXIO; | 1020 | ret = -ENXIO; |
1021 | goto err_unmap_reg; | 1021 | goto err_unmap_reg; |
1022 | } | 1022 | } |
1023 | cp->sram_size = resource_size(res); | 1023 | cp->sram_size = resource_size(res); |
1024 | cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; | 1024 | cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; |
1025 | cp->sram = ioremap(res->start, cp->sram_size); | 1025 | cp->sram = ioremap(res->start, cp->sram_size); |
1026 | if (!cp->sram) { | 1026 | if (!cp->sram) { |
1027 | ret = -ENOMEM; | 1027 | ret = -ENOMEM; |
1028 | goto err_unmap_reg; | 1028 | goto err_unmap_reg; |
1029 | } | 1029 | } |
1030 | 1030 | ||
1031 | irq = platform_get_irq(pdev, 0); | 1031 | irq = platform_get_irq(pdev, 0); |
1032 | if (irq < 0 || irq == NO_IRQ) { | 1032 | if (irq < 0 || irq == NO_IRQ) { |
1033 | ret = irq; | 1033 | ret = irq; |
1034 | goto err_unmap_sram; | 1034 | goto err_unmap_sram; |
1035 | } | 1035 | } |
1036 | cp->irq = irq; | 1036 | cp->irq = irq; |
1037 | 1037 | ||
1038 | platform_set_drvdata(pdev, cp); | 1038 | platform_set_drvdata(pdev, cp); |
1039 | cpg = cp; | 1039 | cpg = cp; |
1040 | 1040 | ||
1041 | cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto"); | 1041 | cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto"); |
1042 | if (IS_ERR(cp->queue_th)) { | 1042 | if (IS_ERR(cp->queue_th)) { |
1043 | ret = PTR_ERR(cp->queue_th); | 1043 | ret = PTR_ERR(cp->queue_th); |
1044 | goto err_unmap_sram; | 1044 | goto err_unmap_sram; |
1045 | } | 1045 | } |
1046 | 1046 | ||
1047 | ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev), | 1047 | ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev), |
1048 | cp); | 1048 | cp); |
1049 | if (ret) | 1049 | if (ret) |
1050 | goto err_thread; | 1050 | goto err_thread; |
1051 | 1051 | ||
1052 | writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); | 1052 | writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); |
1053 | writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); | 1053 | writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); |
1054 | writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); | 1054 | writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); |
1055 | 1055 | ||
1056 | ret = crypto_register_alg(&mv_aes_alg_ecb); | 1056 | ret = crypto_register_alg(&mv_aes_alg_ecb); |
1057 | if (ret) { | 1057 | if (ret) { |
1058 | printk(KERN_WARNING MV_CESA | 1058 | printk(KERN_WARNING MV_CESA |
1059 | "Could not register aes-ecb driver\n"); | 1059 | "Could not register aes-ecb driver\n"); |
1060 | goto err_irq; | 1060 | goto err_irq; |
1061 | } | 1061 | } |
1062 | 1062 | ||
1063 | ret = crypto_register_alg(&mv_aes_alg_cbc); | 1063 | ret = crypto_register_alg(&mv_aes_alg_cbc); |
1064 | if (ret) { | 1064 | if (ret) { |
1065 | printk(KERN_WARNING MV_CESA | 1065 | printk(KERN_WARNING MV_CESA |
1066 | "Could not register aes-cbc driver\n"); | 1066 | "Could not register aes-cbc driver\n"); |
1067 | goto err_unreg_ecb; | 1067 | goto err_unreg_ecb; |
1068 | } | 1068 | } |
1069 | 1069 | ||
1070 | ret = crypto_register_ahash(&mv_sha1_alg); | 1070 | ret = crypto_register_ahash(&mv_sha1_alg); |
1071 | if (ret == 0) | 1071 | if (ret == 0) |
1072 | cpg->has_sha1 = 1; | 1072 | cpg->has_sha1 = 1; |
1073 | else | 1073 | else |
1074 | printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n"); | 1074 | printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n"); |
1075 | 1075 | ||
1076 | ret = crypto_register_ahash(&mv_hmac_sha1_alg); | 1076 | ret = crypto_register_ahash(&mv_hmac_sha1_alg); |
1077 | if (ret == 0) { | 1077 | if (ret == 0) { |
1078 | cpg->has_hmac_sha1 = 1; | 1078 | cpg->has_hmac_sha1 = 1; |
1079 | } else { | 1079 | } else { |
1080 | printk(KERN_WARNING MV_CESA | 1080 | printk(KERN_WARNING MV_CESA |
1081 | "Could not register hmac-sha1 driver\n"); | 1081 | "Could not register hmac-sha1 driver\n"); |
1082 | } | 1082 | } |
1083 | 1083 | ||
1084 | return 0; | 1084 | return 0; |
1085 | err_unreg_ecb: | 1085 | err_unreg_ecb: |
1086 | crypto_unregister_alg(&mv_aes_alg_ecb); | 1086 | crypto_unregister_alg(&mv_aes_alg_ecb); |
1087 | err_irq: | 1087 | err_irq: |
1088 | free_irq(irq, cp); | 1088 | free_irq(irq, cp); |
1089 | err_thread: | 1089 | err_thread: |
1090 | kthread_stop(cp->queue_th); | 1090 | kthread_stop(cp->queue_th); |
1091 | err_unmap_sram: | 1091 | err_unmap_sram: |
1092 | iounmap(cp->sram); | 1092 | iounmap(cp->sram); |
1093 | err_unmap_reg: | 1093 | err_unmap_reg: |
1094 | iounmap(cp->reg); | 1094 | iounmap(cp->reg); |
1095 | err: | 1095 | err: |
1096 | kfree(cp); | 1096 | kfree(cp); |
1097 | cpg = NULL; | 1097 | cpg = NULL; |
1098 | platform_set_drvdata(pdev, NULL); | 1098 | platform_set_drvdata(pdev, NULL); |
1099 | return ret; | 1099 | return ret; |
1100 | } | 1100 | } |
1101 | 1101 | ||
1102 | static int mv_remove(struct platform_device *pdev) | 1102 | static int mv_remove(struct platform_device *pdev) |
1103 | { | 1103 | { |
1104 | struct crypto_priv *cp = platform_get_drvdata(pdev); | 1104 | struct crypto_priv *cp = platform_get_drvdata(pdev); |
1105 | 1105 | ||
1106 | crypto_unregister_alg(&mv_aes_alg_ecb); | 1106 | crypto_unregister_alg(&mv_aes_alg_ecb); |
1107 | crypto_unregister_alg(&mv_aes_alg_cbc); | 1107 | crypto_unregister_alg(&mv_aes_alg_cbc); |
1108 | if (cp->has_sha1) | 1108 | if (cp->has_sha1) |
1109 | crypto_unregister_ahash(&mv_sha1_alg); | 1109 | crypto_unregister_ahash(&mv_sha1_alg); |
1110 | if (cp->has_hmac_sha1) | 1110 | if (cp->has_hmac_sha1) |
1111 | crypto_unregister_ahash(&mv_hmac_sha1_alg); | 1111 | crypto_unregister_ahash(&mv_hmac_sha1_alg); |
1112 | kthread_stop(cp->queue_th); | 1112 | kthread_stop(cp->queue_th); |
1113 | free_irq(cp->irq, cp); | 1113 | free_irq(cp->irq, cp); |
1114 | memset(cp->sram, 0, cp->sram_size); | 1114 | memset(cp->sram, 0, cp->sram_size); |
1115 | iounmap(cp->sram); | 1115 | iounmap(cp->sram); |
1116 | iounmap(cp->reg); | 1116 | iounmap(cp->reg); |
1117 | kfree(cp); | 1117 | kfree(cp); |
1118 | cpg = NULL; | 1118 | cpg = NULL; |
1119 | return 0; | 1119 | return 0; |
1120 | } | 1120 | } |
1121 | 1121 | ||
1122 | static struct platform_driver marvell_crypto = { | 1122 | static struct platform_driver marvell_crypto = { |
1123 | .probe = mv_probe, | 1123 | .probe = mv_probe, |
1124 | .remove = mv_remove, | 1124 | .remove = mv_remove, |
1125 | .driver = { | 1125 | .driver = { |
1126 | .owner = THIS_MODULE, | 1126 | .owner = THIS_MODULE, |
1127 | .name = "mv_crypto", | 1127 | .name = "mv_crypto", |
1128 | }, | 1128 | }, |
1129 | }; | 1129 | }; |
1130 | MODULE_ALIAS("platform:mv_crypto"); | 1130 | MODULE_ALIAS("platform:mv_crypto"); |
1131 | 1131 | ||
1132 | static int __init mv_crypto_init(void) | 1132 | static int __init mv_crypto_init(void) |
1133 | { | 1133 | { |
1134 | return platform_driver_register(&marvell_crypto); | 1134 | return platform_driver_register(&marvell_crypto); |
1135 | } | 1135 | } |
1136 | module_init(mv_crypto_init); | 1136 | module_init(mv_crypto_init); |
1137 | 1137 | ||
1138 | static void __exit mv_crypto_exit(void) | 1138 | static void __exit mv_crypto_exit(void) |
1139 | { | 1139 | { |
1140 | platform_driver_unregister(&marvell_crypto); | 1140 | platform_driver_unregister(&marvell_crypto); |
1141 | } | 1141 | } |