Commit 5313231ac9a4334ded1dc205aac60dd63c62ff1d
Committed by
Herbert Xu
1 parent
e13a79acf9
Exists in
ti-lsk-linux-4.1.y
and in
10 other branches
crypto: nx - Moving NX-AES-XCBC to be processed logic
The previous limits were estimated locally in a single step basead on bound values, however it was not correct since when given certain scatterlist the function nx_build_sg_lists was consuming more sg entries than allocated causing a memory corruption and crashes. This patch removes the old logic and replaces it into nx_sg_build_lists in order to build a correct nx_sg list using the correct sg_max limit and bounds. Signed-off-by: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Showing 1 changed file with 63 additions and 18 deletions Side-by-side Diff
drivers/crypto/nx/nx-aes-xcbc.c
... | ... | @@ -75,6 +75,7 @@ |
75 | 75 | u8 keys[2][AES_BLOCK_SIZE]; |
76 | 76 | u8 key[32]; |
77 | 77 | int rc = 0; |
78 | + int len; | |
78 | 79 | |
79 | 80 | /* Change to ECB mode */ |
80 | 81 | csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB; |
81 | 82 | |
82 | 83 | |
83 | 84 | |
... | ... | @@ -86,11 +87,20 @@ |
86 | 87 | memset(keys[0], 0x01, sizeof(keys[0])); |
87 | 88 | memset(keys[1], 0x03, sizeof(keys[1])); |
88 | 89 | |
90 | + len = sizeof(keys); | |
89 | 91 | /* Generate K1 and K3 encrypting the patterns */ |
90 | - in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys, sizeof(keys), | |
92 | + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys, &len, | |
91 | 93 | nx_ctx->ap->sglen); |
92 | - out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) keys, sizeof(keys), | |
94 | + | |
95 | + if (len != sizeof(keys)) | |
96 | + return -EINVAL; | |
97 | + | |
98 | + out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) keys, &len, | |
93 | 99 | nx_ctx->ap->sglen); |
100 | + | |
101 | + if (len != sizeof(keys)) | |
102 | + return -EINVAL; | |
103 | + | |
94 | 104 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); |
95 | 105 | nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); |
96 | 106 | |
97 | 107 | |
98 | 108 | |
99 | 109 | |
... | ... | @@ -103,12 +113,23 @@ |
103 | 113 | /* XOr K3 with the padding for a 0 length message */ |
104 | 114 | keys[1][0] ^= 0x80; |
105 | 115 | |
116 | + len = sizeof(keys[1]); | |
117 | + | |
106 | 118 | /* Encrypt the final result */ |
107 | 119 | memcpy(csbcpb->cpb.aes_ecb.key, keys[0], AES_BLOCK_SIZE); |
108 | - in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys[1], sizeof(keys[1]), | |
120 | + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys[1], &len, | |
109 | 121 | nx_ctx->ap->sglen); |
110 | - out_sg = nx_build_sg_list(nx_ctx->out_sg, out, AES_BLOCK_SIZE, | |
122 | + | |
123 | + if (len != sizeof(keys[1])) | |
124 | + return -EINVAL; | |
125 | + | |
126 | + len = AES_BLOCK_SIZE; | |
127 | + out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, | |
111 | 128 | nx_ctx->ap->sglen); |
129 | + | |
130 | + if (len != AES_BLOCK_SIZE) | |
131 | + return -EINVAL; | |
132 | + | |
112 | 133 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); |
113 | 134 | nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); |
114 | 135 | |
... | ... | @@ -133,6 +154,7 @@ |
133 | 154 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); |
134 | 155 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; |
135 | 156 | struct nx_sg *out_sg; |
157 | + int len; | |
136 | 158 | |
137 | 159 | nx_ctx_init(nx_ctx, HCOP_FC_AES); |
138 | 160 | |
139 | 161 | |
... | ... | @@ -144,8 +166,13 @@ |
144 | 166 | memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE); |
145 | 167 | memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key); |
146 | 168 | |
169 | + len = AES_BLOCK_SIZE; | |
147 | 170 | out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, |
148 | - AES_BLOCK_SIZE, nx_ctx->ap->sglen); | |
171 | + &len, nx_ctx->ap->sglen); | |
172 | + | |
173 | + if (len != AES_BLOCK_SIZE) | |
174 | + return -EINVAL; | |
175 | + | |
149 | 176 | nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); |
150 | 177 | |
151 | 178 | return 0; |
152 | 179 | |
... | ... | @@ -159,10 +186,11 @@ |
159 | 186 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); |
160 | 187 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; |
161 | 188 | struct nx_sg *in_sg; |
162 | - u32 to_process, leftover, total; | |
163 | - u32 max_sg_len; | |
189 | + u32 to_process = 0, leftover, total; | |
190 | + unsigned int max_sg_len; | |
164 | 191 | unsigned long irq_flags; |
165 | 192 | int rc = 0; |
193 | + int data_len; | |
166 | 194 | |
167 | 195 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); |
168 | 196 | |
169 | 197 | |
170 | 198 | |
171 | 199 | |
... | ... | @@ -180,17 +208,15 @@ |
180 | 208 | } |
181 | 209 | |
182 | 210 | in_sg = nx_ctx->in_sg; |
183 | - max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), | |
211 | + max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg), | |
184 | 212 | nx_ctx->ap->sglen); |
213 | + max_sg_len = min_t(u64, max_sg_len, | |
214 | + nx_ctx->ap->databytelen/NX_PAGE_SIZE); | |
185 | 215 | |
186 | 216 | do { |
187 | - | |
188 | - /* to_process: the AES_BLOCK_SIZE data chunk to process in this | |
189 | - * update */ | |
190 | - to_process = min_t(u64, total, nx_ctx->ap->databytelen); | |
191 | - to_process = min_t(u64, to_process, | |
192 | - NX_PAGE_SIZE * (max_sg_len - 1)); | |
217 | + to_process = total - to_process; | |
193 | 218 | to_process = to_process & ~(AES_BLOCK_SIZE - 1); |
219 | + | |
194 | 220 | leftover = total - to_process; |
195 | 221 | |
196 | 222 | /* the hardware will not accept a 0 byte operation for this |
197 | 223 | |
198 | 224 | |
199 | 225 | |
200 | 226 | |
201 | 227 | |
... | ... | @@ -204,15 +230,24 @@ |
204 | 230 | } |
205 | 231 | |
206 | 232 | if (sctx->count) { |
233 | + data_len = sctx->count; | |
207 | 234 | in_sg = nx_build_sg_list(nx_ctx->in_sg, |
208 | 235 | (u8 *) sctx->buffer, |
209 | - sctx->count, | |
236 | + &data_len, | |
210 | 237 | max_sg_len); |
238 | + if (data_len != sctx->count) | |
239 | + return -EINVAL; | |
211 | 240 | } |
241 | + | |
242 | + data_len = to_process - sctx->count; | |
212 | 243 | in_sg = nx_build_sg_list(in_sg, |
213 | 244 | (u8 *) data, |
214 | - to_process - sctx->count, | |
245 | + &data_len, | |
215 | 246 | max_sg_len); |
247 | + | |
248 | + if (data_len != to_process - sctx->count) | |
249 | + return -EINVAL; | |
250 | + | |
216 | 251 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * |
217 | 252 | sizeof(struct nx_sg); |
218 | 253 | |
... | ... | @@ -263,6 +298,7 @@ |
263 | 298 | struct nx_sg *in_sg, *out_sg; |
264 | 299 | unsigned long irq_flags; |
265 | 300 | int rc = 0; |
301 | + int len; | |
266 | 302 | |
267 | 303 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); |
268 | 304 | |
269 | 305 | |
270 | 306 | |
... | ... | @@ -285,10 +321,19 @@ |
285 | 321 | * this is not an intermediate operation */ |
286 | 322 | NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; |
287 | 323 | |
324 | + len = sctx->count; | |
288 | 325 | in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer, |
289 | - sctx->count, nx_ctx->ap->sglen); | |
290 | - out_sg = nx_build_sg_list(nx_ctx->out_sg, out, AES_BLOCK_SIZE, | |
326 | + &len, nx_ctx->ap->sglen); | |
327 | + | |
328 | + if (len != sctx->count) | |
329 | + return -EINVAL; | |
330 | + | |
331 | + len = AES_BLOCK_SIZE; | |
332 | + out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, | |
291 | 333 | nx_ctx->ap->sglen); |
334 | + | |
335 | + if (len != AES_BLOCK_SIZE) | |
336 | + return -EINVAL; | |
292 | 337 | |
293 | 338 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); |
294 | 339 | nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); |