Blame view
crypto/crypto_engine.c
12.2 KB
2874c5fd2
|
1 |
// SPDX-License-Identifier: GPL-2.0-or-later |
735d37b54
|
2 3 4 5 6 7 |
/* * Handle async block request by crypto hardware engine. * * Copyright (C) 2016 Linaro, Inc. * * Author: Baolin Wang <baolin.wang@linaro.org> |
735d37b54
|
8 9 10 11 |
*/ #include <linux/err.h> #include <linux/delay.h> |
2589ad840
|
12 |
#include <crypto/engine.h> |
ae7e81c07
|
13 |
#include <uapi/linux/sched/types.h> |
735d37b54
|
14 15 16 |
#include "internal.h" #define CRYPTO_ENGINE_MAX_QLEN 10 |
735d37b54
|
17 |
/** |
218d1cc18
|
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
* crypto_finalize_request - finalize one request if the request is done * @engine: the hardware engine * @req: the request need to be finalized * @err: error number */ static void crypto_finalize_request(struct crypto_engine *engine, struct crypto_async_request *req, int err) { unsigned long flags; bool finalize_cur_req = false; int ret; struct crypto_engine_ctx *enginectx; spin_lock_irqsave(&engine->queue_lock, flags); if (engine->cur_req == req) finalize_cur_req = true; spin_unlock_irqrestore(&engine->queue_lock, flags); if (finalize_cur_req) { enginectx = crypto_tfm_ctx(req->tfm); if (engine->cur_req_prepared && enginectx->op.unprepare_request) { ret = enginectx->op.unprepare_request(engine, req); if (ret) dev_err(engine->dev, "failed to unprepare request "); } spin_lock_irqsave(&engine->queue_lock, flags); engine->cur_req = NULL; engine->cur_req_prepared = false; spin_unlock_irqrestore(&engine->queue_lock, flags); } req->complete(req, err); kthread_queue_work(engine->kworker, &engine->pump_requests); } /** |
735d37b54
|
57 58 59 60 61 62 63 64 65 66 67 68 |
* crypto_pump_requests - dequeue one request from engine queue to process * @engine: the hardware engine * @in_kthread: true if we are in the context of the request pump thread * * This function checks if there is any request in the engine queue that * needs processing and if so call out to the driver to initialize hardware * and handle each request. */ static void crypto_pump_requests(struct crypto_engine *engine, bool in_kthread) { struct crypto_async_request *async_req, *backlog; |
735d37b54
|
69 70 |
unsigned long flags; bool was_busy = false; |
218d1cc18
|
71 72 |
int ret; struct crypto_engine_ctx *enginectx; |
735d37b54
|
73 74 75 76 77 78 79 80 81 |
spin_lock_irqsave(&engine->queue_lock, flags); /* Make sure we are not already running a request */ if (engine->cur_req) goto out; /* If another context is idling then defer */ if (engine->idling) { |
c4ca2b0b2
|
82 |
kthread_queue_work(engine->kworker, &engine->pump_requests); |
735d37b54
|
83 84 85 86 87 88 89 90 91 92 |
goto out; } /* Check if the engine queue is idle */ if (!crypto_queue_len(&engine->queue) || !engine->running) { if (!engine->busy) goto out; /* Only do teardown in the thread */ if (!in_kthread) { |
c4ca2b0b2
|
93 |
kthread_queue_work(engine->kworker, |
735d37b54
|
94 95 96 97 98 99 100 101 102 103 |
&engine->pump_requests); goto out; } engine->busy = false; engine->idling = true; spin_unlock_irqrestore(&engine->queue_lock, flags); if (engine->unprepare_crypt_hardware && engine->unprepare_crypt_hardware(engine)) |
88d58ef89
|
104 105 |
dev_err(engine->dev, "failed to unprepare crypt hardware "); |
735d37b54
|
106 107 108 109 110 111 112 113 114 115 116 |
spin_lock_irqsave(&engine->queue_lock, flags); engine->idling = false; goto out; } /* Get the fist request from the engine queue to handle */ backlog = crypto_get_backlog(&engine->queue); async_req = crypto_dequeue_request(&engine->queue); if (!async_req) goto out; |
4cba7cf02
|
117 |
engine->cur_req = async_req; |
735d37b54
|
118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
if (backlog) backlog->complete(backlog, -EINPROGRESS); if (engine->busy) was_busy = true; else engine->busy = true; spin_unlock_irqrestore(&engine->queue_lock, flags); /* Until here we get the request need to be encrypted successfully */ if (!was_busy && engine->prepare_crypt_hardware) { ret = engine->prepare_crypt_hardware(engine); if (ret) { |
88d58ef89
|
132 133 |
dev_err(engine->dev, "failed to prepare crypt hardware "); |
735d37b54
|
134 135 136 |
goto req_err; } } |
218d1cc18
|
137 138 139 140 |
enginectx = crypto_tfm_ctx(async_req->tfm); if (enginectx->op.prepare_request) { ret = enginectx->op.prepare_request(engine, async_req); |
4cba7cf02
|
141 |
if (ret) { |
218d1cc18
|
142 143 144 |
dev_err(engine->dev, "failed to prepare request: %d ", ret); |
4cba7cf02
|
145 146 |
goto req_err; } |
218d1cc18
|
147 148 149 150 151 152 153 |
engine->cur_req_prepared = true; } if (!enginectx->op.do_one_request) { dev_err(engine->dev, "failed to do request "); ret = -EINVAL; goto req_err; |
735d37b54
|
154 |
} |
218d1cc18
|
155 156 157 158 159 160 161 |
ret = enginectx->op.do_one_request(engine, async_req); if (ret) { dev_err(engine->dev, "Failed to do one request from queue: %d ", ret); goto req_err; } return; |
735d37b54
|
162 163 |
req_err: |
218d1cc18
|
164 |
crypto_finalize_request(engine, async_req, ret); |
735d37b54
|
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 |
return; out: spin_unlock_irqrestore(&engine->queue_lock, flags); } static void crypto_pump_work(struct kthread_work *work) { struct crypto_engine *engine = container_of(work, struct crypto_engine, pump_requests); crypto_pump_requests(engine, true); } /** |
218d1cc18
|
180 |
* crypto_transfer_request - transfer the new request into the engine queue |
735d37b54
|
181 182 183 |
* @engine: the hardware engine * @req: the request need to be listed into the engine queue */ |
218d1cc18
|
184 185 |
static int crypto_transfer_request(struct crypto_engine *engine, struct crypto_async_request *req, |
4cba7cf02
|
186 |
bool need_pump) |
735d37b54
|
187 188 189 190 191 192 193 194 195 196 |
{ unsigned long flags; int ret; spin_lock_irqsave(&engine->queue_lock, flags); if (!engine->running) { spin_unlock_irqrestore(&engine->queue_lock, flags); return -ESHUTDOWN; } |
218d1cc18
|
197 |
ret = crypto_enqueue_request(&engine->queue, req); |
735d37b54
|
198 199 |
if (!engine->busy && need_pump) |
c4ca2b0b2
|
200 |
kthread_queue_work(engine->kworker, &engine->pump_requests); |
735d37b54
|
201 202 203 204 |
spin_unlock_irqrestore(&engine->queue_lock, flags); return ret; } |
4cba7cf02
|
205 206 |
/** |
218d1cc18
|
207 |
* crypto_transfer_request_to_engine - transfer one request to list |
4cba7cf02
|
208 209 210 211 |
* into the engine queue * @engine: the hardware engine * @req: the request need to be listed into the engine queue */ |
218d1cc18
|
212 213 |
static int crypto_transfer_request_to_engine(struct crypto_engine *engine, struct crypto_async_request *req) |
4cba7cf02
|
214 |
{ |
218d1cc18
|
215 |
return crypto_transfer_request(engine, req, true); |
4cba7cf02
|
216 |
} |
4cba7cf02
|
217 218 |
/** |
218d1cc18
|
219 220 221 222 223 224 225 226 227 228 229 |
* crypto_transfer_aead_request_to_engine - transfer one aead_request * to list into the engine queue * @engine: the hardware engine * @req: the request need to be listed into the engine queue */ int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine, struct aead_request *req) { return crypto_transfer_request_to_engine(engine, &req->base); } EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine); |
4cba7cf02
|
230 |
|
218d1cc18
|
231 232 233 234 235 236 237 238 239 240 |
/** * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request * to list into the engine queue * @engine: the hardware engine * @req: the request need to be listed into the engine queue */ int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine, struct akcipher_request *req) { return crypto_transfer_request_to_engine(engine, &req->base); |
4cba7cf02
|
241 |
} |
218d1cc18
|
242 |
EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine); |
735d37b54
|
243 244 |
/** |
218d1cc18
|
245 246 |
* crypto_transfer_hash_request_to_engine - transfer one ahash_request * to list into the engine queue |
735d37b54
|
247 248 249 |
* @engine: the hardware engine * @req: the request need to be listed into the engine queue */ |
4cba7cf02
|
250 251 |
int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine, struct ahash_request *req) |
735d37b54
|
252 |
{ |
218d1cc18
|
253 |
return crypto_transfer_request_to_engine(engine, &req->base); |
735d37b54
|
254 |
} |
4cba7cf02
|
255 |
EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine); |
735d37b54
|
256 257 |
/** |
218d1cc18
|
258 259 260 261 262 263 264 265 266 267 268 269 270 |
* crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request * to list into the engine queue * @engine: the hardware engine * @req: the request need to be listed into the engine queue */ int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine, struct skcipher_request *req) { return crypto_transfer_request_to_engine(engine, &req->base); } EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine); /** |
218d1cc18
|
271 272 273 274 275 276 277 278 279 280 281 282 |
* crypto_finalize_aead_request - finalize one aead_request if * the request is done * @engine: the hardware engine * @req: the request need to be finalized * @err: error number */ void crypto_finalize_aead_request(struct crypto_engine *engine, struct aead_request *req, int err) { return crypto_finalize_request(engine, &req->base, err); } EXPORT_SYMBOL_GPL(crypto_finalize_aead_request); |
735d37b54
|
283 |
|
218d1cc18
|
284 285 286 287 288 289 290 291 292 293 294 |
/** * crypto_finalize_akcipher_request - finalize one akcipher_request if * the request is done * @engine: the hardware engine * @req: the request need to be finalized * @err: error number */ void crypto_finalize_akcipher_request(struct crypto_engine *engine, struct akcipher_request *req, int err) { return crypto_finalize_request(engine, &req->base, err); |
4cba7cf02
|
295 |
} |
218d1cc18
|
296 |
EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request); |
4cba7cf02
|
297 298 |
/** |
218d1cc18
|
299 300 |
* crypto_finalize_hash_request - finalize one ahash_request if * the request is done |
4cba7cf02
|
301 302 303 304 305 306 307 |
* @engine: the hardware engine * @req: the request need to be finalized * @err: error number */ void crypto_finalize_hash_request(struct crypto_engine *engine, struct ahash_request *req, int err) { |
218d1cc18
|
308 |
return crypto_finalize_request(engine, &req->base, err); |
735d37b54
|
309 |
} |
4cba7cf02
|
310 |
EXPORT_SYMBOL_GPL(crypto_finalize_hash_request); |
735d37b54
|
311 312 |
/** |
218d1cc18
|
313 314 315 316 317 318 319 320 321 322 323 324 325 326 |
* crypto_finalize_skcipher_request - finalize one skcipher_request if * the request is done * @engine: the hardware engine * @req: the request need to be finalized * @err: error number */ void crypto_finalize_skcipher_request(struct crypto_engine *engine, struct skcipher_request *req, int err) { return crypto_finalize_request(engine, &req->base, err); } EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request); /** |
735d37b54
|
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 |
* crypto_engine_start - start the hardware engine * @engine: the hardware engine need to be started * * Return 0 on success, else on fail. */ int crypto_engine_start(struct crypto_engine *engine) { unsigned long flags; spin_lock_irqsave(&engine->queue_lock, flags); if (engine->running || engine->busy) { spin_unlock_irqrestore(&engine->queue_lock, flags); return -EBUSY; } engine->running = true; spin_unlock_irqrestore(&engine->queue_lock, flags); |
c4ca2b0b2
|
345 |
kthread_queue_work(engine->kworker, &engine->pump_requests); |
735d37b54
|
346 347 348 349 350 351 352 353 354 355 356 357 358 359 |
return 0; } EXPORT_SYMBOL_GPL(crypto_engine_start); /** * crypto_engine_stop - stop the hardware engine * @engine: the hardware engine need to be stopped * * Return 0 on success, else on fail. */ int crypto_engine_stop(struct crypto_engine *engine) { unsigned long flags; |
4cba7cf02
|
360 |
unsigned int limit = 500; |
735d37b54
|
361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 |
int ret = 0; spin_lock_irqsave(&engine->queue_lock, flags); /* * If the engine queue is not empty or the engine is on busy state, * we need to wait for a while to pump the requests of engine queue. */ while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) { spin_unlock_irqrestore(&engine->queue_lock, flags); msleep(20); spin_lock_irqsave(&engine->queue_lock, flags); } if (crypto_queue_len(&engine->queue) || engine->busy) ret = -EBUSY; else engine->running = false; spin_unlock_irqrestore(&engine->queue_lock, flags); if (ret) |
88d58ef89
|
383 384 |
dev_warn(engine->dev, "could not stop engine "); |
735d37b54
|
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 |
return ret; } EXPORT_SYMBOL_GPL(crypto_engine_stop); /** * crypto_engine_alloc_init - allocate crypto hardware engine structure and * initialize it. * @dev: the device attached with one hardware engine * @rt: whether this queue is set to run as a realtime task * * This must be called from context that can sleep. * Return: the crypto engine structure on success, else NULL. */ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) { |
d13dfae3c
|
401 |
struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 }; |
735d37b54
|
402 403 404 405 406 407 408 409 |
struct crypto_engine *engine; if (!dev) return NULL; engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL); if (!engine) return NULL; |
88d58ef89
|
410 |
engine->dev = dev; |
735d37b54
|
411 412 413 414 415 416 417 418 419 420 421 |
engine->rt = rt; engine->running = false; engine->busy = false; engine->idling = false; engine->cur_req_prepared = false; engine->priv_data = dev; snprintf(engine->name, sizeof(engine->name), "%s-engine", dev_name(dev)); crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN); spin_lock_init(&engine->queue_lock); |
c4ca2b0b2
|
422 423 |
engine->kworker = kthread_create_worker(0, "%s", engine->name); if (IS_ERR(engine->kworker)) { |
735d37b54
|
424 425 426 427 |
dev_err(dev, "failed to create crypto request pump task "); return NULL; } |
3989144f8
|
428 |
kthread_init_work(&engine->pump_requests, crypto_pump_work); |
735d37b54
|
429 430 431 432 |
if (engine->rt) { dev_info(dev, "will run requests pump with realtime priority "); |
c4ca2b0b2
|
433 |
sched_setscheduler(engine->kworker->task, SCHED_FIFO, ¶m); |
735d37b54
|
434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 |
} return engine; } EXPORT_SYMBOL_GPL(crypto_engine_alloc_init); /** * crypto_engine_exit - free the resources of hardware engine when exit * @engine: the hardware engine need to be freed * * Return 0 for success. */ int crypto_engine_exit(struct crypto_engine *engine) { int ret; ret = crypto_engine_stop(engine); if (ret) return ret; |
c4ca2b0b2
|
453 |
kthread_destroy_worker(engine->kworker); |
735d37b54
|
454 455 456 457 458 459 460 |
return 0; } EXPORT_SYMBOL_GPL(crypto_engine_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Crypto hardware engine framework"); |