crypto: mv_cesa - Enqueue generic async requests
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / crypto / mv_cesa.c
blob8891e2e703e33adf57bb8a3f6d53fcb36fbc3ee9
1 /*
2 * Support for Marvell's crypto engine which can be found on some Orion5X
3 * boards.
5 * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
6 * License: GPLv2
8 */
9 #include <crypto/aes.h>
10 #include <crypto/algapi.h>
11 #include <linux/crypto.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/kthread.h>
15 #include <linux/platform_device.h>
16 #include <linux/scatterlist.h>
18 #include "mv_cesa.h"
20 * STM:
21 * /---------------------------------------\
22 * | | request complete
23 * \./ |
24 * IDLE -> new request -> BUSY -> done -> DEQUEUE
25 * /°\ |
26 * | | more scatter entries
27 * \________________/
29 enum engine_status {
30 ENGINE_IDLE,
31 ENGINE_BUSY,
32 ENGINE_W_DEQUEUE,
35 /**
36 * struct req_progress - used for every crypt request
37 * @src_sg_it: sg iterator for src
38 * @dst_sg_it: sg iterator for dst
39 * @sg_src_left: bytes left in src to process (scatter list)
40 * @src_start: offset to add to src start position (scatter list)
41 * @crypt_len: length of current crypt process
42 * @hw_nbytes: total bytes to process in hw for this request
43 * @sg_dst_left: bytes left dst to process in this scatter list
44 * @dst_start: offset to add to dst start position (scatter list)
45 * @total_req_bytes: total number of bytes processed (request).
47 * sg helper are used to iterate over the scatterlist. Since the size of the
48 * SRAM may be less than the scatter size, this struct struct is used to keep
49 * track of progress within current scatterlist.
51 struct req_progress {
52 struct sg_mapping_iter src_sg_it;
53 struct sg_mapping_iter dst_sg_it;
55 /* src mostly */
56 int sg_src_left;
57 int src_start;
58 int crypt_len;
59 int hw_nbytes;
60 /* dst mostly */
61 int sg_dst_left;
62 int dst_start;
63 int total_req_bytes;
66 struct crypto_priv {
67 void __iomem *reg;
68 void __iomem *sram;
69 int irq;
70 struct task_struct *queue_th;
72 /* the lock protects queue and eng_st */
73 spinlock_t lock;
74 struct crypto_queue queue;
75 enum engine_status eng_st;
76 struct crypto_async_request *cur_req;
77 struct req_progress p;
78 int max_req_size;
79 int sram_size;
82 static struct crypto_priv *cpg;
84 struct mv_ctx {
85 u8 aes_enc_key[AES_KEY_LEN];
86 u32 aes_dec_key[8];
87 int key_len;
88 u32 need_calc_aes_dkey;
91 enum crypto_op {
92 COP_AES_ECB,
93 COP_AES_CBC,
96 struct mv_req_ctx {
97 enum crypto_op op;
98 int decrypt;
101 static void compute_aes_dec_key(struct mv_ctx *ctx)
103 struct crypto_aes_ctx gen_aes_key;
104 int key_pos;
106 if (!ctx->need_calc_aes_dkey)
107 return;
109 crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
111 key_pos = ctx->key_len + 24;
112 memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
113 switch (ctx->key_len) {
114 case AES_KEYSIZE_256:
115 key_pos -= 2;
116 /* fall */
117 case AES_KEYSIZE_192:
118 key_pos -= 2;
119 memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
120 4 * 4);
121 break;
123 ctx->need_calc_aes_dkey = 0;
126 static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
127 unsigned int len)
129 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
130 struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
132 switch (len) {
133 case AES_KEYSIZE_128:
134 case AES_KEYSIZE_192:
135 case AES_KEYSIZE_256:
136 break;
137 default:
138 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
139 return -EINVAL;
141 ctx->key_len = len;
142 ctx->need_calc_aes_dkey = 1;
144 memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
145 return 0;
148 static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
150 int ret;
151 void *sbuf;
152 int copied = 0;
154 while (1) {
155 if (!p->sg_src_left) {
156 ret = sg_miter_next(&p->src_sg_it);
157 BUG_ON(!ret);
158 p->sg_src_left = p->src_sg_it.length;
159 p->src_start = 0;
162 sbuf = p->src_sg_it.addr + p->src_start;
164 if (p->sg_src_left <= len - copied) {
165 memcpy(dbuf + copied, sbuf, p->sg_src_left);
166 copied += p->sg_src_left;
167 p->sg_src_left = 0;
168 if (copied >= len)
169 break;
170 } else {
171 int copy_len = len - copied;
172 memcpy(dbuf + copied, sbuf, copy_len);
173 p->src_start += copy_len;
174 p->sg_src_left -= copy_len;
175 break;
180 static void setup_data_in(void)
182 struct req_progress *p = &cpg->p;
183 p->crypt_len =
184 min(p->hw_nbytes - p->total_req_bytes, cpg->max_req_size);
185 copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START,
186 p->crypt_len);
189 static void mv_process_current_q(int first_block)
191 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
192 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
193 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
194 struct sec_accel_config op;
196 switch (req_ctx->op) {
197 case COP_AES_ECB:
198 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
199 break;
200 case COP_AES_CBC:
201 default:
202 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
203 op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
204 ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
205 if (first_block)
206 memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
207 break;
209 if (req_ctx->decrypt) {
210 op.config |= CFG_DIR_DEC;
211 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
212 AES_KEY_LEN);
213 } else {
214 op.config |= CFG_DIR_ENC;
215 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
216 AES_KEY_LEN);
219 switch (ctx->key_len) {
220 case AES_KEYSIZE_128:
221 op.config |= CFG_AES_LEN_128;
222 break;
223 case AES_KEYSIZE_192:
224 op.config |= CFG_AES_LEN_192;
225 break;
226 case AES_KEYSIZE_256:
227 op.config |= CFG_AES_LEN_256;
228 break;
230 op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
231 ENC_P_DST(SRAM_DATA_OUT_START);
232 op.enc_key_p = SRAM_DATA_KEY_P;
234 setup_data_in();
235 op.enc_len = cpg->p.crypt_len;
236 memcpy(cpg->sram + SRAM_CONFIG, &op,
237 sizeof(struct sec_accel_config));
239 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
240 /* GO */
241 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
244 * XXX: add timer if the interrupt does not occur for some mystery
245 * reason
249 static void mv_crypto_algo_completion(void)
251 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
252 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
254 if (req_ctx->op != COP_AES_CBC)
255 return ;
257 memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
260 static void dequeue_complete_req(void)
262 struct crypto_async_request *req = cpg->cur_req;
263 void *buf;
264 int ret;
265 int need_copy_len = cpg->p.crypt_len;
266 int sram_offset = 0;
268 cpg->p.total_req_bytes += cpg->p.crypt_len;
269 do {
270 int dst_copy;
272 if (!cpg->p.sg_dst_left) {
273 ret = sg_miter_next(&cpg->p.dst_sg_it);
274 BUG_ON(!ret);
275 cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
276 cpg->p.dst_start = 0;
279 buf = cpg->p.dst_sg_it.addr;
280 buf += cpg->p.dst_start;
282 dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
284 memcpy(buf,
285 cpg->sram + SRAM_DATA_OUT_START + sram_offset,
286 dst_copy);
287 sram_offset += dst_copy;
288 cpg->p.sg_dst_left -= dst_copy;
289 need_copy_len -= dst_copy;
290 cpg->p.dst_start += dst_copy;
291 } while (need_copy_len > 0);
293 BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
294 if (cpg->p.total_req_bytes < cpg->p.hw_nbytes) {
295 /* process next scatter list entry */
296 cpg->eng_st = ENGINE_BUSY;
297 mv_process_current_q(0);
298 } else {
299 sg_miter_stop(&cpg->p.src_sg_it);
300 sg_miter_stop(&cpg->p.dst_sg_it);
301 mv_crypto_algo_completion();
302 cpg->eng_st = ENGINE_IDLE;
303 local_bh_disable();
304 req->complete(req, 0);
305 local_bh_enable();
309 static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
311 int i = 0;
312 size_t cur_len;
314 while (1) {
315 cur_len = sl[i].length;
316 ++i;
317 if (total_bytes > cur_len)
318 total_bytes -= cur_len;
319 else
320 break;
323 return i;
326 static void mv_enqueue_new_req(struct ablkcipher_request *req)
328 struct req_progress *p = &cpg->p;
329 int num_sgs;
331 cpg->cur_req = &req->base;
332 memset(p, 0, sizeof(struct req_progress));
333 p->hw_nbytes = req->nbytes;
335 num_sgs = count_sgs(req->src, req->nbytes);
336 sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
338 num_sgs = count_sgs(req->dst, req->nbytes);
339 sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
341 mv_process_current_q(1);
344 static int queue_manag(void *data)
346 cpg->eng_st = ENGINE_IDLE;
347 do {
348 struct ablkcipher_request *req;
349 struct crypto_async_request *async_req = NULL;
350 struct crypto_async_request *backlog;
352 __set_current_state(TASK_INTERRUPTIBLE);
354 if (cpg->eng_st == ENGINE_W_DEQUEUE)
355 dequeue_complete_req();
357 spin_lock_irq(&cpg->lock);
358 if (cpg->eng_st == ENGINE_IDLE) {
359 backlog = crypto_get_backlog(&cpg->queue);
360 async_req = crypto_dequeue_request(&cpg->queue);
361 if (async_req) {
362 BUG_ON(cpg->eng_st != ENGINE_IDLE);
363 cpg->eng_st = ENGINE_BUSY;
366 spin_unlock_irq(&cpg->lock);
368 if (backlog) {
369 backlog->complete(backlog, -EINPROGRESS);
370 backlog = NULL;
373 if (async_req) {
374 req = container_of(async_req,
375 struct ablkcipher_request, base);
376 mv_enqueue_new_req(req);
377 async_req = NULL;
380 schedule();
382 } while (!kthread_should_stop());
383 return 0;
386 static int mv_handle_req(struct crypto_async_request *req)
388 unsigned long flags;
389 int ret;
391 spin_lock_irqsave(&cpg->lock, flags);
392 ret = crypto_enqueue_request(&cpg->queue, req);
393 spin_unlock_irqrestore(&cpg->lock, flags);
394 wake_up_process(cpg->queue_th);
395 return ret;
398 static int mv_enc_aes_ecb(struct ablkcipher_request *req)
400 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
402 req_ctx->op = COP_AES_ECB;
403 req_ctx->decrypt = 0;
405 return mv_handle_req(&req->base);
408 static int mv_dec_aes_ecb(struct ablkcipher_request *req)
410 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
411 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
413 req_ctx->op = COP_AES_ECB;
414 req_ctx->decrypt = 1;
416 compute_aes_dec_key(ctx);
417 return mv_handle_req(&req->base);
420 static int mv_enc_aes_cbc(struct ablkcipher_request *req)
422 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
424 req_ctx->op = COP_AES_CBC;
425 req_ctx->decrypt = 0;
427 return mv_handle_req(&req->base);
430 static int mv_dec_aes_cbc(struct ablkcipher_request *req)
432 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
433 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
435 req_ctx->op = COP_AES_CBC;
436 req_ctx->decrypt = 1;
438 compute_aes_dec_key(ctx);
439 return mv_handle_req(&req->base);
442 static int mv_cra_init(struct crypto_tfm *tfm)
444 tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
445 return 0;
448 irqreturn_t crypto_int(int irq, void *priv)
450 u32 val;
452 val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
453 if (!(val & SEC_INT_ACCEL0_DONE))
454 return IRQ_NONE;
456 val &= ~SEC_INT_ACCEL0_DONE;
457 writel(val, cpg->reg + FPGA_INT_STATUS);
458 writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
459 BUG_ON(cpg->eng_st != ENGINE_BUSY);
460 cpg->eng_st = ENGINE_W_DEQUEUE;
461 wake_up_process(cpg->queue_th);
462 return IRQ_HANDLED;
465 struct crypto_alg mv_aes_alg_ecb = {
466 .cra_name = "ecb(aes)",
467 .cra_driver_name = "mv-ecb-aes",
468 .cra_priority = 300,
469 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
470 .cra_blocksize = 16,
471 .cra_ctxsize = sizeof(struct mv_ctx),
472 .cra_alignmask = 0,
473 .cra_type = &crypto_ablkcipher_type,
474 .cra_module = THIS_MODULE,
475 .cra_init = mv_cra_init,
476 .cra_u = {
477 .ablkcipher = {
478 .min_keysize = AES_MIN_KEY_SIZE,
479 .max_keysize = AES_MAX_KEY_SIZE,
480 .setkey = mv_setkey_aes,
481 .encrypt = mv_enc_aes_ecb,
482 .decrypt = mv_dec_aes_ecb,
487 struct crypto_alg mv_aes_alg_cbc = {
488 .cra_name = "cbc(aes)",
489 .cra_driver_name = "mv-cbc-aes",
490 .cra_priority = 300,
491 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
492 .cra_blocksize = AES_BLOCK_SIZE,
493 .cra_ctxsize = sizeof(struct mv_ctx),
494 .cra_alignmask = 0,
495 .cra_type = &crypto_ablkcipher_type,
496 .cra_module = THIS_MODULE,
497 .cra_init = mv_cra_init,
498 .cra_u = {
499 .ablkcipher = {
500 .ivsize = AES_BLOCK_SIZE,
501 .min_keysize = AES_MIN_KEY_SIZE,
502 .max_keysize = AES_MAX_KEY_SIZE,
503 .setkey = mv_setkey_aes,
504 .encrypt = mv_enc_aes_cbc,
505 .decrypt = mv_dec_aes_cbc,
510 static int mv_probe(struct platform_device *pdev)
512 struct crypto_priv *cp;
513 struct resource *res;
514 int irq;
515 int ret;
517 if (cpg) {
518 printk(KERN_ERR "Second crypto dev?\n");
519 return -EEXIST;
522 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
523 if (!res)
524 return -ENXIO;
526 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
527 if (!cp)
528 return -ENOMEM;
530 spin_lock_init(&cp->lock);
531 crypto_init_queue(&cp->queue, 50);
532 cp->reg = ioremap(res->start, res->end - res->start + 1);
533 if (!cp->reg) {
534 ret = -ENOMEM;
535 goto err;
538 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
539 if (!res) {
540 ret = -ENXIO;
541 goto err_unmap_reg;
543 cp->sram_size = res->end - res->start + 1;
544 cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
545 cp->sram = ioremap(res->start, cp->sram_size);
546 if (!cp->sram) {
547 ret = -ENOMEM;
548 goto err_unmap_reg;
551 irq = platform_get_irq(pdev, 0);
552 if (irq < 0 || irq == NO_IRQ) {
553 ret = irq;
554 goto err_unmap_sram;
556 cp->irq = irq;
558 platform_set_drvdata(pdev, cp);
559 cpg = cp;
561 cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
562 if (IS_ERR(cp->queue_th)) {
563 ret = PTR_ERR(cp->queue_th);
564 goto err_thread;
567 ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
568 cp);
569 if (ret)
570 goto err_unmap_sram;
572 writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
573 writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
575 ret = crypto_register_alg(&mv_aes_alg_ecb);
576 if (ret)
577 goto err_reg;
579 ret = crypto_register_alg(&mv_aes_alg_cbc);
580 if (ret)
581 goto err_unreg_ecb;
582 return 0;
583 err_unreg_ecb:
584 crypto_unregister_alg(&mv_aes_alg_ecb);
585 err_thread:
586 free_irq(irq, cp);
587 err_reg:
588 kthread_stop(cp->queue_th);
589 err_unmap_sram:
590 iounmap(cp->sram);
591 err_unmap_reg:
592 iounmap(cp->reg);
593 err:
594 kfree(cp);
595 cpg = NULL;
596 platform_set_drvdata(pdev, NULL);
597 return ret;
600 static int mv_remove(struct platform_device *pdev)
602 struct crypto_priv *cp = platform_get_drvdata(pdev);
604 crypto_unregister_alg(&mv_aes_alg_ecb);
605 crypto_unregister_alg(&mv_aes_alg_cbc);
606 kthread_stop(cp->queue_th);
607 free_irq(cp->irq, cp);
608 memset(cp->sram, 0, cp->sram_size);
609 iounmap(cp->sram);
610 iounmap(cp->reg);
611 kfree(cp);
612 cpg = NULL;
613 return 0;
616 static struct platform_driver marvell_crypto = {
617 .probe = mv_probe,
618 .remove = mv_remove,
619 .driver = {
620 .owner = THIS_MODULE,
621 .name = "mv_crypto",
624 MODULE_ALIAS("platform:mv_crypto");
626 static int __init mv_crypto_init(void)
628 return platform_driver_register(&marvell_crypto);
630 module_init(mv_crypto_init);
632 static void __exit mv_crypto_exit(void)
634 platform_driver_unregister(&marvell_crypto);
636 module_exit(mv_crypto_exit);
638 MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
639 MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
640 MODULE_LICENSE("GPL");