2 * Handle async block request by crypto hardware engine.
4 * Copyright (C) 2016 Linaro, Inc.
6 * Author: Baolin Wang <baolin.wang@linaro.org>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
15 #include <linux/err.h>
16 #include <linux/delay.h>
17 #include <crypto/engine.h>
18 #include <crypto/internal/hash.h>
19 #include <uapi/linux/sched/types.h>
22 #define CRYPTO_ENGINE_MAX_QLEN 10
25 * crypto_pump_requests - dequeue one request from engine queue to process
26 * @engine: the hardware engine
27 * @in_kthread: true if we are in the context of the request pump thread
29 * This function checks if there is any request in the engine queue that
30 * needs processing and if so call out to the driver to initialize hardware
31 * and handle each request.
33 static void crypto_pump_requests(struct crypto_engine
*engine
,
36 struct crypto_async_request
*async_req
, *backlog
;
37 struct ahash_request
*hreq
;
38 struct ablkcipher_request
*breq
;
40 bool was_busy
= false;
43 spin_lock_irqsave(&engine
->queue_lock
, flags
);
45 /* Make sure we are not already running a request */
49 /* If another context is idling then defer */
51 kthread_queue_work(engine
->kworker
, &engine
->pump_requests
);
55 /* Check if the engine queue is idle */
56 if (!crypto_queue_len(&engine
->queue
) || !engine
->running
) {
60 /* Only do teardown in the thread */
62 kthread_queue_work(engine
->kworker
,
63 &engine
->pump_requests
);
68 engine
->idling
= true;
69 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
71 if (engine
->unprepare_crypt_hardware
&&
72 engine
->unprepare_crypt_hardware(engine
))
73 dev_err(engine
->dev
, "failed to unprepare crypt hardware\n");
75 spin_lock_irqsave(&engine
->queue_lock
, flags
);
76 engine
->idling
= false;
80 /* Get the fist request from the engine queue to handle */
81 backlog
= crypto_get_backlog(&engine
->queue
);
82 async_req
= crypto_dequeue_request(&engine
->queue
);
86 engine
->cur_req
= async_req
;
88 backlog
->complete(backlog
, -EINPROGRESS
);
95 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
97 rtype
= crypto_tfm_alg_type(engine
->cur_req
->tfm
);
98 /* Until here we get the request need to be encrypted successfully */
99 if (!was_busy
&& engine
->prepare_crypt_hardware
) {
100 ret
= engine
->prepare_crypt_hardware(engine
);
102 dev_err(engine
->dev
, "failed to prepare crypt hardware\n");
108 case CRYPTO_ALG_TYPE_AHASH
:
109 hreq
= ahash_request_cast(engine
->cur_req
);
110 if (engine
->prepare_hash_request
) {
111 ret
= engine
->prepare_hash_request(engine
, hreq
);
113 dev_err(engine
->dev
, "failed to prepare request: %d\n",
117 engine
->cur_req_prepared
= true;
119 ret
= engine
->hash_one_request(engine
, hreq
);
121 dev_err(engine
->dev
, "failed to hash one request from queue\n");
125 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
126 breq
= ablkcipher_request_cast(engine
->cur_req
);
127 if (engine
->prepare_cipher_request
) {
128 ret
= engine
->prepare_cipher_request(engine
, breq
);
130 dev_err(engine
->dev
, "failed to prepare request: %d\n",
134 engine
->cur_req_prepared
= true;
136 ret
= engine
->cipher_one_request(engine
, breq
);
138 dev_err(engine
->dev
, "failed to cipher one request from queue\n");
143 dev_err(engine
->dev
, "failed to prepare request of unknown type\n");
149 case CRYPTO_ALG_TYPE_AHASH
:
150 hreq
= ahash_request_cast(engine
->cur_req
);
151 crypto_finalize_hash_request(engine
, hreq
, ret
);
153 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
154 breq
= ablkcipher_request_cast(engine
->cur_req
);
155 crypto_finalize_cipher_request(engine
, breq
, ret
);
161 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
164 static void crypto_pump_work(struct kthread_work
*work
)
166 struct crypto_engine
*engine
=
167 container_of(work
, struct crypto_engine
, pump_requests
);
169 crypto_pump_requests(engine
, true);
173 * crypto_transfer_cipher_request - transfer the new request into the
175 * @engine: the hardware engine
176 * @req: the request need to be listed into the engine queue
178 int crypto_transfer_cipher_request(struct crypto_engine
*engine
,
179 struct ablkcipher_request
*req
,
185 spin_lock_irqsave(&engine
->queue_lock
, flags
);
187 if (!engine
->running
) {
188 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
192 ret
= ablkcipher_enqueue_request(&engine
->queue
, req
);
194 if (!engine
->busy
&& need_pump
)
195 kthread_queue_work(engine
->kworker
, &engine
->pump_requests
);
197 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
200 EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request
);
203 * crypto_transfer_cipher_request_to_engine - transfer one request to list
204 * into the engine queue
205 * @engine: the hardware engine
206 * @req: the request need to be listed into the engine queue
208 int crypto_transfer_cipher_request_to_engine(struct crypto_engine
*engine
,
209 struct ablkcipher_request
*req
)
211 return crypto_transfer_cipher_request(engine
, req
, true);
213 EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request_to_engine
);
216 * crypto_transfer_hash_request - transfer the new request into the
218 * @engine: the hardware engine
219 * @req: the request need to be listed into the engine queue
221 int crypto_transfer_hash_request(struct crypto_engine
*engine
,
222 struct ahash_request
*req
, bool need_pump
)
227 spin_lock_irqsave(&engine
->queue_lock
, flags
);
229 if (!engine
->running
) {
230 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
234 ret
= ahash_enqueue_request(&engine
->queue
, req
);
236 if (!engine
->busy
&& need_pump
)
237 kthread_queue_work(engine
->kworker
, &engine
->pump_requests
);
239 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
242 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request
);
245 * crypto_transfer_hash_request_to_engine - transfer one request to list
246 * into the engine queue
247 * @engine: the hardware engine
248 * @req: the request need to be listed into the engine queue
250 int crypto_transfer_hash_request_to_engine(struct crypto_engine
*engine
,
251 struct ahash_request
*req
)
253 return crypto_transfer_hash_request(engine
, req
, true);
255 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine
);
258 * crypto_finalize_cipher_request - finalize one request if the request is done
259 * @engine: the hardware engine
260 * @req: the request need to be finalized
263 void crypto_finalize_cipher_request(struct crypto_engine
*engine
,
264 struct ablkcipher_request
*req
, int err
)
267 bool finalize_cur_req
= false;
270 spin_lock_irqsave(&engine
->queue_lock
, flags
);
271 if (engine
->cur_req
== &req
->base
)
272 finalize_cur_req
= true;
273 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
275 if (finalize_cur_req
) {
276 if (engine
->cur_req_prepared
&&
277 engine
->unprepare_cipher_request
) {
278 ret
= engine
->unprepare_cipher_request(engine
, req
);
280 dev_err(engine
->dev
, "failed to unprepare request\n");
282 spin_lock_irqsave(&engine
->queue_lock
, flags
);
283 engine
->cur_req
= NULL
;
284 engine
->cur_req_prepared
= false;
285 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
288 req
->base
.complete(&req
->base
, err
);
290 kthread_queue_work(engine
->kworker
, &engine
->pump_requests
);
292 EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request
);
295 * crypto_finalize_hash_request - finalize one request if the request is done
296 * @engine: the hardware engine
297 * @req: the request need to be finalized
300 void crypto_finalize_hash_request(struct crypto_engine
*engine
,
301 struct ahash_request
*req
, int err
)
304 bool finalize_cur_req
= false;
307 spin_lock_irqsave(&engine
->queue_lock
, flags
);
308 if (engine
->cur_req
== &req
->base
)
309 finalize_cur_req
= true;
310 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
312 if (finalize_cur_req
) {
313 if (engine
->cur_req_prepared
&&
314 engine
->unprepare_hash_request
) {
315 ret
= engine
->unprepare_hash_request(engine
, req
);
317 dev_err(engine
->dev
, "failed to unprepare request\n");
319 spin_lock_irqsave(&engine
->queue_lock
, flags
);
320 engine
->cur_req
= NULL
;
321 engine
->cur_req_prepared
= false;
322 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
325 req
->base
.complete(&req
->base
, err
);
327 kthread_queue_work(engine
->kworker
, &engine
->pump_requests
);
329 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request
);
332 * crypto_engine_start - start the hardware engine
333 * @engine: the hardware engine need to be started
335 * Return 0 on success, else on fail.
337 int crypto_engine_start(struct crypto_engine
*engine
)
341 spin_lock_irqsave(&engine
->queue_lock
, flags
);
343 if (engine
->running
|| engine
->busy
) {
344 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
348 engine
->running
= true;
349 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
351 kthread_queue_work(engine
->kworker
, &engine
->pump_requests
);
355 EXPORT_SYMBOL_GPL(crypto_engine_start
);
358 * crypto_engine_stop - stop the hardware engine
359 * @engine: the hardware engine need to be stopped
361 * Return 0 on success, else on fail.
363 int crypto_engine_stop(struct crypto_engine
*engine
)
366 unsigned int limit
= 500;
369 spin_lock_irqsave(&engine
->queue_lock
, flags
);
372 * If the engine queue is not empty or the engine is on busy state,
373 * we need to wait for a while to pump the requests of engine queue.
375 while ((crypto_queue_len(&engine
->queue
) || engine
->busy
) && limit
--) {
376 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
378 spin_lock_irqsave(&engine
->queue_lock
, flags
);
381 if (crypto_queue_len(&engine
->queue
) || engine
->busy
)
384 engine
->running
= false;
386 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
389 dev_warn(engine
->dev
, "could not stop engine\n");
393 EXPORT_SYMBOL_GPL(crypto_engine_stop
);
396 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
398 * @dev: the device attached with one hardware engine
399 * @rt: whether this queue is set to run as a realtime task
401 * This must be called from context that can sleep.
402 * Return: the crypto engine structure on success, else NULL.
404 struct crypto_engine
*crypto_engine_alloc_init(struct device
*dev
, bool rt
)
406 struct sched_param param
= { .sched_priority
= MAX_RT_PRIO
- 1 };
407 struct crypto_engine
*engine
;
412 engine
= devm_kzalloc(dev
, sizeof(*engine
), GFP_KERNEL
);
418 engine
->running
= false;
419 engine
->busy
= false;
420 engine
->idling
= false;
421 engine
->cur_req_prepared
= false;
422 engine
->priv_data
= dev
;
423 snprintf(engine
->name
, sizeof(engine
->name
),
424 "%s-engine", dev_name(dev
));
426 crypto_init_queue(&engine
->queue
, CRYPTO_ENGINE_MAX_QLEN
);
427 spin_lock_init(&engine
->queue_lock
);
429 engine
->kworker
= kthread_create_worker(0, "%s", engine
->name
);
430 if (IS_ERR(engine
->kworker
)) {
431 dev_err(dev
, "failed to create crypto request pump task\n");
434 kthread_init_work(&engine
->pump_requests
, crypto_pump_work
);
437 dev_info(dev
, "will run requests pump with realtime priority\n");
438 sched_setscheduler(engine
->kworker
->task
, SCHED_FIFO
, ¶m
);
443 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init
);
446 * crypto_engine_exit - free the resources of hardware engine when exit
447 * @engine: the hardware engine need to be freed
449 * Return 0 for success.
451 int crypto_engine_exit(struct crypto_engine
*engine
)
455 ret
= crypto_engine_stop(engine
);
459 kthread_destroy_worker(engine
->kworker
);
463 EXPORT_SYMBOL_GPL(crypto_engine_exit
);
465 MODULE_LICENSE("GPL");
466 MODULE_DESCRIPTION("Crypto hardware engine framework");