2 * Software async crypto daemon.
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
13 #include <crypto/algapi.h>
14 #include <crypto/internal/hash.h>
15 #include <crypto/cryptd.h>
16 #include <crypto/crypto_wq.h>
17 #include <linux/err.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/scatterlist.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
26 #define CRYPTD_MAX_CPU_QLEN 100
28 struct cryptd_cpu_queue
{
29 struct crypto_queue queue
;
30 struct work_struct work
;
34 struct cryptd_cpu_queue
*cpu_queue
;
37 struct cryptd_instance_ctx
{
38 struct crypto_spawn spawn
;
39 struct cryptd_queue
*queue
;
42 struct cryptd_blkcipher_ctx
{
43 struct crypto_blkcipher
*child
;
46 struct cryptd_blkcipher_request_ctx
{
47 crypto_completion_t complete
;
50 struct cryptd_hash_ctx
{
51 struct crypto_hash
*child
;
54 struct cryptd_hash_request_ctx
{
55 crypto_completion_t complete
;
58 static void cryptd_queue_worker(struct work_struct
*work
);
60 static int cryptd_init_queue(struct cryptd_queue
*queue
,
61 unsigned int max_cpu_qlen
)
64 struct cryptd_cpu_queue
*cpu_queue
;
66 queue
->cpu_queue
= alloc_percpu(struct cryptd_cpu_queue
);
67 if (!queue
->cpu_queue
)
69 for_each_possible_cpu(cpu
) {
70 cpu_queue
= per_cpu_ptr(queue
->cpu_queue
, cpu
);
71 crypto_init_queue(&cpu_queue
->queue
, max_cpu_qlen
);
72 INIT_WORK(&cpu_queue
->work
, cryptd_queue_worker
);
77 static void cryptd_fini_queue(struct cryptd_queue
*queue
)
80 struct cryptd_cpu_queue
*cpu_queue
;
82 for_each_possible_cpu(cpu
) {
83 cpu_queue
= per_cpu_ptr(queue
->cpu_queue
, cpu
);
84 BUG_ON(cpu_queue
->queue
.qlen
);
86 free_percpu(queue
->cpu_queue
);
89 static int cryptd_enqueue_request(struct cryptd_queue
*queue
,
90 struct crypto_async_request
*request
)
93 struct cryptd_cpu_queue
*cpu_queue
;
96 cpu_queue
= per_cpu_ptr(queue
->cpu_queue
, cpu
);
97 err
= crypto_enqueue_request(&cpu_queue
->queue
, request
);
98 queue_work_on(cpu
, kcrypto_wq
, &cpu_queue
->work
);
104 /* Called in workqueue context, do one real cryption work (via
105 * req->complete) and reschedule itself if there are more work to
107 static void cryptd_queue_worker(struct work_struct
*work
)
109 struct cryptd_cpu_queue
*cpu_queue
;
110 struct crypto_async_request
*req
, *backlog
;
112 cpu_queue
= container_of(work
, struct cryptd_cpu_queue
, work
);
113 /* Only handle one request at a time to avoid hogging crypto
114 * workqueue. preempt_disable/enable is used to prevent
115 * being preempted by cryptd_enqueue_request() */
117 backlog
= crypto_get_backlog(&cpu_queue
->queue
);
118 req
= crypto_dequeue_request(&cpu_queue
->queue
);
125 backlog
->complete(backlog
, -EINPROGRESS
);
126 req
->complete(req
, 0);
128 if (cpu_queue
->queue
.qlen
)
129 queue_work(kcrypto_wq
, &cpu_queue
->work
);
132 static inline struct cryptd_queue
*cryptd_get_queue(struct crypto_tfm
*tfm
)
134 struct crypto_instance
*inst
= crypto_tfm_alg_instance(tfm
);
135 struct cryptd_instance_ctx
*ictx
= crypto_instance_ctx(inst
);
139 static int cryptd_blkcipher_setkey(struct crypto_ablkcipher
*parent
,
140 const u8
*key
, unsigned int keylen
)
142 struct cryptd_blkcipher_ctx
*ctx
= crypto_ablkcipher_ctx(parent
);
143 struct crypto_blkcipher
*child
= ctx
->child
;
146 crypto_blkcipher_clear_flags(child
, CRYPTO_TFM_REQ_MASK
);
147 crypto_blkcipher_set_flags(child
, crypto_ablkcipher_get_flags(parent
) &
148 CRYPTO_TFM_REQ_MASK
);
149 err
= crypto_blkcipher_setkey(child
, key
, keylen
);
150 crypto_ablkcipher_set_flags(parent
, crypto_blkcipher_get_flags(child
) &
151 CRYPTO_TFM_RES_MASK
);
155 static void cryptd_blkcipher_crypt(struct ablkcipher_request
*req
,
156 struct crypto_blkcipher
*child
,
158 int (*crypt
)(struct blkcipher_desc
*desc
,
159 struct scatterlist
*dst
,
160 struct scatterlist
*src
,
163 struct cryptd_blkcipher_request_ctx
*rctx
;
164 struct blkcipher_desc desc
;
166 rctx
= ablkcipher_request_ctx(req
);
168 if (unlikely(err
== -EINPROGRESS
))
172 desc
.info
= req
->info
;
173 desc
.flags
= CRYPTO_TFM_REQ_MAY_SLEEP
;
175 err
= crypt(&desc
, req
->dst
, req
->src
, req
->nbytes
);
177 req
->base
.complete
= rctx
->complete
;
181 rctx
->complete(&req
->base
, err
);
185 static void cryptd_blkcipher_encrypt(struct crypto_async_request
*req
, int err
)
187 struct cryptd_blkcipher_ctx
*ctx
= crypto_tfm_ctx(req
->tfm
);
188 struct crypto_blkcipher
*child
= ctx
->child
;
190 cryptd_blkcipher_crypt(ablkcipher_request_cast(req
), child
, err
,
191 crypto_blkcipher_crt(child
)->encrypt
);
194 static void cryptd_blkcipher_decrypt(struct crypto_async_request
*req
, int err
)
196 struct cryptd_blkcipher_ctx
*ctx
= crypto_tfm_ctx(req
->tfm
);
197 struct crypto_blkcipher
*child
= ctx
->child
;
199 cryptd_blkcipher_crypt(ablkcipher_request_cast(req
), child
, err
,
200 crypto_blkcipher_crt(child
)->decrypt
);
203 static int cryptd_blkcipher_enqueue(struct ablkcipher_request
*req
,
204 crypto_completion_t complete
)
206 struct cryptd_blkcipher_request_ctx
*rctx
= ablkcipher_request_ctx(req
);
207 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
208 struct cryptd_queue
*queue
;
210 queue
= cryptd_get_queue(crypto_ablkcipher_tfm(tfm
));
211 rctx
->complete
= req
->base
.complete
;
212 req
->base
.complete
= complete
;
214 return cryptd_enqueue_request(queue
, &req
->base
);
217 static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request
*req
)
219 return cryptd_blkcipher_enqueue(req
, cryptd_blkcipher_encrypt
);
222 static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request
*req
)
224 return cryptd_blkcipher_enqueue(req
, cryptd_blkcipher_decrypt
);
227 static int cryptd_blkcipher_init_tfm(struct crypto_tfm
*tfm
)
229 struct crypto_instance
*inst
= crypto_tfm_alg_instance(tfm
);
230 struct cryptd_instance_ctx
*ictx
= crypto_instance_ctx(inst
);
231 struct crypto_spawn
*spawn
= &ictx
->spawn
;
232 struct cryptd_blkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
233 struct crypto_blkcipher
*cipher
;
235 cipher
= crypto_spawn_blkcipher(spawn
);
237 return PTR_ERR(cipher
);
240 tfm
->crt_ablkcipher
.reqsize
=
241 sizeof(struct cryptd_blkcipher_request_ctx
);
245 static void cryptd_blkcipher_exit_tfm(struct crypto_tfm
*tfm
)
247 struct cryptd_blkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
249 crypto_free_blkcipher(ctx
->child
);
252 static struct crypto_instance
*cryptd_alloc_instance(struct crypto_alg
*alg
,
253 struct cryptd_queue
*queue
)
255 struct crypto_instance
*inst
;
256 struct cryptd_instance_ctx
*ctx
;
259 inst
= kzalloc(sizeof(*inst
) + sizeof(*ctx
), GFP_KERNEL
);
261 inst
= ERR_PTR(-ENOMEM
);
266 if (snprintf(inst
->alg
.cra_driver_name
, CRYPTO_MAX_ALG_NAME
,
267 "cryptd(%s)", alg
->cra_driver_name
) >= CRYPTO_MAX_ALG_NAME
)
270 ctx
= crypto_instance_ctx(inst
);
271 err
= crypto_init_spawn(&ctx
->spawn
, alg
, inst
,
272 CRYPTO_ALG_TYPE_MASK
| CRYPTO_ALG_ASYNC
);
278 memcpy(inst
->alg
.cra_name
, alg
->cra_name
, CRYPTO_MAX_ALG_NAME
);
280 inst
->alg
.cra_priority
= alg
->cra_priority
+ 50;
281 inst
->alg
.cra_blocksize
= alg
->cra_blocksize
;
282 inst
->alg
.cra_alignmask
= alg
->cra_alignmask
;
293 static struct crypto_instance
*cryptd_alloc_blkcipher(
294 struct rtattr
**tb
, struct cryptd_queue
*queue
)
296 struct crypto_instance
*inst
;
297 struct crypto_alg
*alg
;
299 alg
= crypto_get_attr_alg(tb
, CRYPTO_ALG_TYPE_BLKCIPHER
,
300 CRYPTO_ALG_TYPE_MASK
);
302 return ERR_CAST(alg
);
304 inst
= cryptd_alloc_instance(alg
, queue
);
308 inst
->alg
.cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
;
309 inst
->alg
.cra_type
= &crypto_ablkcipher_type
;
311 inst
->alg
.cra_ablkcipher
.ivsize
= alg
->cra_blkcipher
.ivsize
;
312 inst
->alg
.cra_ablkcipher
.min_keysize
= alg
->cra_blkcipher
.min_keysize
;
313 inst
->alg
.cra_ablkcipher
.max_keysize
= alg
->cra_blkcipher
.max_keysize
;
315 inst
->alg
.cra_ablkcipher
.geniv
= alg
->cra_blkcipher
.geniv
;
317 inst
->alg
.cra_ctxsize
= sizeof(struct cryptd_blkcipher_ctx
);
319 inst
->alg
.cra_init
= cryptd_blkcipher_init_tfm
;
320 inst
->alg
.cra_exit
= cryptd_blkcipher_exit_tfm
;
322 inst
->alg
.cra_ablkcipher
.setkey
= cryptd_blkcipher_setkey
;
323 inst
->alg
.cra_ablkcipher
.encrypt
= cryptd_blkcipher_encrypt_enqueue
;
324 inst
->alg
.cra_ablkcipher
.decrypt
= cryptd_blkcipher_decrypt_enqueue
;
331 static int cryptd_hash_init_tfm(struct crypto_tfm
*tfm
)
333 struct crypto_instance
*inst
= crypto_tfm_alg_instance(tfm
);
334 struct cryptd_instance_ctx
*ictx
= crypto_instance_ctx(inst
);
335 struct crypto_spawn
*spawn
= &ictx
->spawn
;
336 struct cryptd_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
337 struct crypto_hash
*cipher
;
339 cipher
= crypto_spawn_hash(spawn
);
341 return PTR_ERR(cipher
);
344 tfm
->crt_ahash
.reqsize
=
345 sizeof(struct cryptd_hash_request_ctx
);
349 static void cryptd_hash_exit_tfm(struct crypto_tfm
*tfm
)
351 struct cryptd_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
353 crypto_free_hash(ctx
->child
);
356 static int cryptd_hash_setkey(struct crypto_ahash
*parent
,
357 const u8
*key
, unsigned int keylen
)
359 struct cryptd_hash_ctx
*ctx
= crypto_ahash_ctx(parent
);
360 struct crypto_hash
*child
= ctx
->child
;
363 crypto_hash_clear_flags(child
, CRYPTO_TFM_REQ_MASK
);
364 crypto_hash_set_flags(child
, crypto_ahash_get_flags(parent
) &
365 CRYPTO_TFM_REQ_MASK
);
366 err
= crypto_hash_setkey(child
, key
, keylen
);
367 crypto_ahash_set_flags(parent
, crypto_hash_get_flags(child
) &
368 CRYPTO_TFM_RES_MASK
);
372 static int cryptd_hash_enqueue(struct ahash_request
*req
,
373 crypto_completion_t complete
)
375 struct cryptd_hash_request_ctx
*rctx
= ahash_request_ctx(req
);
376 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
377 struct cryptd_queue
*queue
=
378 cryptd_get_queue(crypto_ahash_tfm(tfm
));
380 rctx
->complete
= req
->base
.complete
;
381 req
->base
.complete
= complete
;
383 return cryptd_enqueue_request(queue
, &req
->base
);
386 static void cryptd_hash_init(struct crypto_async_request
*req_async
, int err
)
388 struct cryptd_hash_ctx
*ctx
= crypto_tfm_ctx(req_async
->tfm
);
389 struct crypto_hash
*child
= ctx
->child
;
390 struct ahash_request
*req
= ahash_request_cast(req_async
);
391 struct cryptd_hash_request_ctx
*rctx
;
392 struct hash_desc desc
;
394 rctx
= ahash_request_ctx(req
);
396 if (unlikely(err
== -EINPROGRESS
))
400 desc
.flags
= CRYPTO_TFM_REQ_MAY_SLEEP
;
402 err
= crypto_hash_crt(child
)->init(&desc
);
404 req
->base
.complete
= rctx
->complete
;
408 rctx
->complete(&req
->base
, err
);
412 static int cryptd_hash_init_enqueue(struct ahash_request
*req
)
414 return cryptd_hash_enqueue(req
, cryptd_hash_init
);
417 static void cryptd_hash_update(struct crypto_async_request
*req_async
, int err
)
419 struct cryptd_hash_ctx
*ctx
= crypto_tfm_ctx(req_async
->tfm
);
420 struct crypto_hash
*child
= ctx
->child
;
421 struct ahash_request
*req
= ahash_request_cast(req_async
);
422 struct cryptd_hash_request_ctx
*rctx
;
423 struct hash_desc desc
;
425 rctx
= ahash_request_ctx(req
);
427 if (unlikely(err
== -EINPROGRESS
))
431 desc
.flags
= CRYPTO_TFM_REQ_MAY_SLEEP
;
433 err
= crypto_hash_crt(child
)->update(&desc
,
437 req
->base
.complete
= rctx
->complete
;
441 rctx
->complete(&req
->base
, err
);
445 static int cryptd_hash_update_enqueue(struct ahash_request
*req
)
447 return cryptd_hash_enqueue(req
, cryptd_hash_update
);
450 static void cryptd_hash_final(struct crypto_async_request
*req_async
, int err
)
452 struct cryptd_hash_ctx
*ctx
= crypto_tfm_ctx(req_async
->tfm
);
453 struct crypto_hash
*child
= ctx
->child
;
454 struct ahash_request
*req
= ahash_request_cast(req_async
);
455 struct cryptd_hash_request_ctx
*rctx
;
456 struct hash_desc desc
;
458 rctx
= ahash_request_ctx(req
);
460 if (unlikely(err
== -EINPROGRESS
))
464 desc
.flags
= CRYPTO_TFM_REQ_MAY_SLEEP
;
466 err
= crypto_hash_crt(child
)->final(&desc
, req
->result
);
468 req
->base
.complete
= rctx
->complete
;
472 rctx
->complete(&req
->base
, err
);
476 static int cryptd_hash_final_enqueue(struct ahash_request
*req
)
478 return cryptd_hash_enqueue(req
, cryptd_hash_final
);
481 static void cryptd_hash_digest(struct crypto_async_request
*req_async
, int err
)
483 struct cryptd_hash_ctx
*ctx
= crypto_tfm_ctx(req_async
->tfm
);
484 struct crypto_hash
*child
= ctx
->child
;
485 struct ahash_request
*req
= ahash_request_cast(req_async
);
486 struct cryptd_hash_request_ctx
*rctx
;
487 struct hash_desc desc
;
489 rctx
= ahash_request_ctx(req
);
491 if (unlikely(err
== -EINPROGRESS
))
495 desc
.flags
= CRYPTO_TFM_REQ_MAY_SLEEP
;
497 err
= crypto_hash_crt(child
)->digest(&desc
,
502 req
->base
.complete
= rctx
->complete
;
506 rctx
->complete(&req
->base
, err
);
510 static int cryptd_hash_digest_enqueue(struct ahash_request
*req
)
512 return cryptd_hash_enqueue(req
, cryptd_hash_digest
);
515 static struct crypto_instance
*cryptd_alloc_hash(
516 struct rtattr
**tb
, struct cryptd_queue
*queue
)
518 struct crypto_instance
*inst
;
519 struct crypto_alg
*alg
;
521 alg
= crypto_get_attr_alg(tb
, CRYPTO_ALG_TYPE_HASH
,
522 CRYPTO_ALG_TYPE_HASH_MASK
);
524 return ERR_PTR(PTR_ERR(alg
));
526 inst
= cryptd_alloc_instance(alg
, queue
);
530 inst
->alg
.cra_flags
= CRYPTO_ALG_TYPE_AHASH
| CRYPTO_ALG_ASYNC
;
531 inst
->alg
.cra_type
= &crypto_ahash_type
;
533 inst
->alg
.cra_ahash
.digestsize
= alg
->cra_hash
.digestsize
;
534 inst
->alg
.cra_ctxsize
= sizeof(struct cryptd_hash_ctx
);
536 inst
->alg
.cra_init
= cryptd_hash_init_tfm
;
537 inst
->alg
.cra_exit
= cryptd_hash_exit_tfm
;
539 inst
->alg
.cra_ahash
.init
= cryptd_hash_init_enqueue
;
540 inst
->alg
.cra_ahash
.update
= cryptd_hash_update_enqueue
;
541 inst
->alg
.cra_ahash
.final
= cryptd_hash_final_enqueue
;
542 inst
->alg
.cra_ahash
.setkey
= cryptd_hash_setkey
;
543 inst
->alg
.cra_ahash
.digest
= cryptd_hash_digest_enqueue
;
550 static struct cryptd_queue queue
;
552 static struct crypto_instance
*cryptd_alloc(struct rtattr
**tb
)
554 struct crypto_attr_type
*algt
;
556 algt
= crypto_get_attr_type(tb
);
558 return ERR_CAST(algt
);
560 switch (algt
->type
& algt
->mask
& CRYPTO_ALG_TYPE_MASK
) {
561 case CRYPTO_ALG_TYPE_BLKCIPHER
:
562 return cryptd_alloc_blkcipher(tb
, &queue
);
563 case CRYPTO_ALG_TYPE_DIGEST
:
564 return cryptd_alloc_hash(tb
, &queue
);
567 return ERR_PTR(-EINVAL
);
570 static void cryptd_free(struct crypto_instance
*inst
)
572 struct cryptd_instance_ctx
*ctx
= crypto_instance_ctx(inst
);
574 crypto_drop_spawn(&ctx
->spawn
);
578 static struct crypto_template cryptd_tmpl
= {
580 .alloc
= cryptd_alloc
,
582 .module
= THIS_MODULE
,
585 struct cryptd_ablkcipher
*cryptd_alloc_ablkcipher(const char *alg_name
,
588 char cryptd_alg_name
[CRYPTO_MAX_ALG_NAME
];
589 struct crypto_ablkcipher
*tfm
;
591 if (snprintf(cryptd_alg_name
, CRYPTO_MAX_ALG_NAME
,
592 "cryptd(%s)", alg_name
) >= CRYPTO_MAX_ALG_NAME
)
593 return ERR_PTR(-EINVAL
);
594 tfm
= crypto_alloc_ablkcipher(cryptd_alg_name
, type
, mask
);
596 return ERR_CAST(tfm
);
597 if (crypto_ablkcipher_tfm(tfm
)->__crt_alg
->cra_module
!= THIS_MODULE
) {
598 crypto_free_ablkcipher(tfm
);
599 return ERR_PTR(-EINVAL
);
602 return __cryptd_ablkcipher_cast(tfm
);
604 EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher
);
606 struct crypto_blkcipher
*cryptd_ablkcipher_child(struct cryptd_ablkcipher
*tfm
)
608 struct cryptd_blkcipher_ctx
*ctx
= crypto_ablkcipher_ctx(&tfm
->base
);
611 EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child
);
613 void cryptd_free_ablkcipher(struct cryptd_ablkcipher
*tfm
)
615 crypto_free_ablkcipher(&tfm
->base
);
617 EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher
);
619 static int __init
cryptd_init(void)
623 err
= cryptd_init_queue(&queue
, CRYPTD_MAX_CPU_QLEN
);
627 err
= crypto_register_template(&cryptd_tmpl
);
629 cryptd_fini_queue(&queue
);
634 static void __exit
cryptd_exit(void)
636 cryptd_fini_queue(&queue
);
637 crypto_unregister_template(&cryptd_tmpl
);
640 module_init(cryptd_init
);
641 module_exit(cryptd_exit
);
643 MODULE_LICENSE("GPL");
644 MODULE_DESCRIPTION("Software async crypto daemon");