[ALSA] hda-intel - Improve HD-audio codec probing robustness
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / crypto / cryptd.c
blob8bf2da835f7beae4513e164b59622d56e0387a76
1 /*
2 * Software async crypto daemon.
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
13 #include <crypto/algapi.h>
14 #include <linux/err.h>
15 #include <linux/init.h>
16 #include <linux/kernel.h>
17 #include <linux/kthread.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
20 #include <linux/mutex.h>
21 #include <linux/scatterlist.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
26 #define CRYPTD_MAX_QLEN 100
28 struct cryptd_state {
29 spinlock_t lock;
30 struct mutex mutex;
31 struct crypto_queue queue;
32 struct task_struct *task;
35 struct cryptd_instance_ctx {
36 struct crypto_spawn spawn;
37 struct cryptd_state *state;
40 struct cryptd_blkcipher_ctx {
41 struct crypto_blkcipher *child;
44 struct cryptd_blkcipher_request_ctx {
45 crypto_completion_t complete;
49 static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm)
51 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
52 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
53 return ictx->state;
56 static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
57 const u8 *key, unsigned int keylen)
59 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
60 struct crypto_blkcipher *child = ctx->child;
61 int err;
63 crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
64 crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
65 CRYPTO_TFM_REQ_MASK);
66 err = crypto_blkcipher_setkey(child, key, keylen);
67 crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
68 CRYPTO_TFM_RES_MASK);
69 return err;
72 static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
73 struct crypto_blkcipher *child,
74 int err,
75 int (*crypt)(struct blkcipher_desc *desc,
76 struct scatterlist *dst,
77 struct scatterlist *src,
78 unsigned int len))
80 struct cryptd_blkcipher_request_ctx *rctx;
81 struct blkcipher_desc desc;
83 rctx = ablkcipher_request_ctx(req);
85 if (unlikely(err == -EINPROGRESS)) {
86 rctx->complete(&req->base, err);
87 return;
90 desc.tfm = child;
91 desc.info = req->info;
92 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
94 err = crypt(&desc, req->dst, req->src, req->nbytes);
96 req->base.complete = rctx->complete;
98 local_bh_disable();
99 req->base.complete(&req->base, err);
100 local_bh_enable();
103 static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
105 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
106 struct crypto_blkcipher *child = ctx->child;
108 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
109 crypto_blkcipher_crt(child)->encrypt);
112 static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
114 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
115 struct crypto_blkcipher *child = ctx->child;
117 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
118 crypto_blkcipher_crt(child)->decrypt);
121 static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
122 crypto_completion_t complete)
124 struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
125 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
126 struct cryptd_state *state =
127 cryptd_get_state(crypto_ablkcipher_tfm(tfm));
128 int err;
130 rctx->complete = req->base.complete;
131 req->base.complete = complete;
133 spin_lock_bh(&state->lock);
134 err = ablkcipher_enqueue_request(&state->queue, req);
135 spin_unlock_bh(&state->lock);
137 wake_up_process(state->task);
138 return err;
141 static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
143 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
146 static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
148 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
151 static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
153 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
154 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
155 struct crypto_spawn *spawn = &ictx->spawn;
156 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
157 struct crypto_blkcipher *cipher;
159 cipher = crypto_spawn_blkcipher(spawn);
160 if (IS_ERR(cipher))
161 return PTR_ERR(cipher);
163 ctx->child = cipher;
164 tfm->crt_ablkcipher.reqsize =
165 sizeof(struct cryptd_blkcipher_request_ctx);
166 return 0;
169 static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
171 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
172 struct cryptd_state *state = cryptd_get_state(tfm);
173 int active;
175 mutex_lock(&state->mutex);
176 active = ablkcipher_tfm_in_queue(&state->queue,
177 __crypto_ablkcipher_cast(tfm));
178 mutex_unlock(&state->mutex);
180 BUG_ON(active);
182 crypto_free_blkcipher(ctx->child);
185 static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg,
186 struct cryptd_state *state)
188 struct crypto_instance *inst;
189 struct cryptd_instance_ctx *ctx;
190 int err;
192 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
193 if (IS_ERR(inst))
194 goto out;
196 err = -ENAMETOOLONG;
197 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
198 "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
199 goto out_free_inst;
201 ctx = crypto_instance_ctx(inst);
202 err = crypto_init_spawn(&ctx->spawn, alg, inst,
203 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
204 if (err)
205 goto out_free_inst;
207 ctx->state = state;
209 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
211 inst->alg.cra_priority = alg->cra_priority + 50;
212 inst->alg.cra_blocksize = alg->cra_blocksize;
213 inst->alg.cra_alignmask = alg->cra_alignmask;
215 out:
216 return inst;
218 out_free_inst:
219 kfree(inst);
220 inst = ERR_PTR(err);
221 goto out;
224 static struct crypto_instance *cryptd_alloc_blkcipher(
225 struct rtattr **tb, struct cryptd_state *state)
227 struct crypto_instance *inst;
228 struct crypto_alg *alg;
230 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
231 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
232 if (IS_ERR(alg))
233 return ERR_PTR(PTR_ERR(alg));
235 inst = cryptd_alloc_instance(alg, state);
236 if (IS_ERR(inst))
237 goto out_put_alg;
239 inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_ASYNC;
240 inst->alg.cra_type = &crypto_ablkcipher_type;
242 inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
243 inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
244 inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
246 inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
248 inst->alg.cra_init = cryptd_blkcipher_init_tfm;
249 inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
251 inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
252 inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
253 inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
255 out_put_alg:
256 crypto_mod_put(alg);
257 return inst;
260 static struct cryptd_state state;
262 static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
264 struct crypto_attr_type *algt;
266 algt = crypto_get_attr_type(tb);
267 if (IS_ERR(algt))
268 return ERR_PTR(PTR_ERR(algt));
270 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
271 case CRYPTO_ALG_TYPE_BLKCIPHER:
272 return cryptd_alloc_blkcipher(tb, &state);
275 return ERR_PTR(-EINVAL);
278 static void cryptd_free(struct crypto_instance *inst)
280 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
282 crypto_drop_spawn(&ctx->spawn);
283 kfree(inst);
286 static struct crypto_template cryptd_tmpl = {
287 .name = "cryptd",
288 .alloc = cryptd_alloc,
289 .free = cryptd_free,
290 .module = THIS_MODULE,
293 static inline int cryptd_create_thread(struct cryptd_state *state,
294 int (*fn)(void *data), const char *name)
296 spin_lock_init(&state->lock);
297 mutex_init(&state->mutex);
298 crypto_init_queue(&state->queue, CRYPTD_MAX_QLEN);
300 state->task = kthread_run(fn, state, name);
301 if (IS_ERR(state->task))
302 return PTR_ERR(state->task);
304 return 0;
307 static inline void cryptd_stop_thread(struct cryptd_state *state)
309 BUG_ON(state->queue.qlen);
310 kthread_stop(state->task);
313 static int cryptd_thread(void *data)
315 struct cryptd_state *state = data;
316 int stop;
318 current->flags |= PF_NOFREEZE;
320 do {
321 struct crypto_async_request *req, *backlog;
323 mutex_lock(&state->mutex);
324 __set_current_state(TASK_INTERRUPTIBLE);
326 spin_lock_bh(&state->lock);
327 backlog = crypto_get_backlog(&state->queue);
328 req = crypto_dequeue_request(&state->queue);
329 spin_unlock_bh(&state->lock);
331 stop = kthread_should_stop();
333 if (stop || req) {
334 __set_current_state(TASK_RUNNING);
335 if (req) {
336 if (backlog)
337 backlog->complete(backlog,
338 -EINPROGRESS);
339 req->complete(req, 0);
343 mutex_unlock(&state->mutex);
345 schedule();
346 } while (!stop);
348 return 0;
351 static int __init cryptd_init(void)
353 int err;
355 err = cryptd_create_thread(&state, cryptd_thread, "cryptd");
356 if (err)
357 return err;
359 err = crypto_register_template(&cryptd_tmpl);
360 if (err)
361 kthread_stop(state.task);
363 return err;
366 static void __exit cryptd_exit(void)
368 cryptd_stop_thread(&state);
369 crypto_unregister_template(&cryptd_tmpl);
372 module_init(cryptd_init);
373 module_exit(cryptd_exit);
375 MODULE_LICENSE("GPL");
376 MODULE_DESCRIPTION("Software async crypto daemon");