drm/armada: move frame wait into armada_frame
[linux-2.6/btrfs-unstable.git] / drivers / crypto / atmel-aes.c
blob0f9a9dc06a830f4eb5f9669ba26df7c2f0a95077
1 /*
2 * Cryptographic API.
4 * Support for ATMEL AES HW acceleration.
6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7 * Author: Nicolas Royer <nicolas@eukrea.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
13 * Some ideas are from omap-aes.c driver.
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/clk.h>
22 #include <linux/io.h>
23 #include <linux/hw_random.h>
24 #include <linux/platform_device.h>
26 #include <linux/device.h>
27 #include <linux/init.h>
28 #include <linux/errno.h>
29 #include <linux/interrupt.h>
30 #include <linux/irq.h>
31 #include <linux/scatterlist.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/of_device.h>
34 #include <linux/delay.h>
35 #include <linux/crypto.h>
36 #include <linux/cryptohash.h>
37 #include <crypto/scatterwalk.h>
38 #include <crypto/algapi.h>
39 #include <crypto/aes.h>
40 #include <crypto/hash.h>
41 #include <crypto/internal/hash.h>
42 #include <linux/platform_data/crypto-atmel.h>
43 #include <dt-bindings/dma/at91.h>
44 #include "atmel-aes-regs.h"
46 #define CFB8_BLOCK_SIZE 1
47 #define CFB16_BLOCK_SIZE 2
48 #define CFB32_BLOCK_SIZE 4
49 #define CFB64_BLOCK_SIZE 8
51 /* AES flags */
52 #define AES_FLAGS_MODE_MASK 0x03ff
53 #define AES_FLAGS_ENCRYPT BIT(0)
54 #define AES_FLAGS_CBC BIT(1)
55 #define AES_FLAGS_CFB BIT(2)
56 #define AES_FLAGS_CFB8 BIT(3)
57 #define AES_FLAGS_CFB16 BIT(4)
58 #define AES_FLAGS_CFB32 BIT(5)
59 #define AES_FLAGS_CFB64 BIT(6)
60 #define AES_FLAGS_CFB128 BIT(7)
61 #define AES_FLAGS_OFB BIT(8)
62 #define AES_FLAGS_CTR BIT(9)
64 #define AES_FLAGS_INIT BIT(16)
65 #define AES_FLAGS_DMA BIT(17)
66 #define AES_FLAGS_BUSY BIT(18)
67 #define AES_FLAGS_FAST BIT(19)
69 #define ATMEL_AES_QUEUE_LENGTH 50
71 #define ATMEL_AES_DMA_THRESHOLD 16
74 struct atmel_aes_caps {
75 bool has_dualbuff;
76 bool has_cfb64;
77 u32 max_burst_size;
80 struct atmel_aes_dev;
82 struct atmel_aes_ctx {
83 struct atmel_aes_dev *dd;
85 int keylen;
86 u32 key[AES_KEYSIZE_256 / sizeof(u32)];
88 u16 block_size;
91 struct atmel_aes_reqctx {
92 unsigned long mode;
95 struct atmel_aes_dma {
96 struct dma_chan *chan;
97 struct dma_slave_config dma_conf;
100 struct atmel_aes_dev {
101 struct list_head list;
102 unsigned long phys_base;
103 void __iomem *io_base;
105 struct atmel_aes_ctx *ctx;
106 struct device *dev;
107 struct clk *iclk;
108 int irq;
110 unsigned long flags;
111 int err;
113 spinlock_t lock;
114 struct crypto_queue queue;
116 struct tasklet_struct done_task;
117 struct tasklet_struct queue_task;
119 struct ablkcipher_request *req;
120 size_t total;
122 struct scatterlist *in_sg;
123 unsigned int nb_in_sg;
124 size_t in_offset;
125 struct scatterlist *out_sg;
126 unsigned int nb_out_sg;
127 size_t out_offset;
129 size_t bufcnt;
130 size_t buflen;
131 size_t dma_size;
133 void *buf_in;
134 int dma_in;
135 dma_addr_t dma_addr_in;
136 struct atmel_aes_dma dma_lch_in;
138 void *buf_out;
139 int dma_out;
140 dma_addr_t dma_addr_out;
141 struct atmel_aes_dma dma_lch_out;
143 struct atmel_aes_caps caps;
145 u32 hw_version;
148 struct atmel_aes_drv {
149 struct list_head dev_list;
150 spinlock_t lock;
153 static struct atmel_aes_drv atmel_aes = {
154 .dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
155 .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
158 static int atmel_aes_sg_length(struct ablkcipher_request *req,
159 struct scatterlist *sg)
161 unsigned int total = req->nbytes;
162 int sg_nb;
163 unsigned int len;
164 struct scatterlist *sg_list;
166 sg_nb = 0;
167 sg_list = sg;
168 total = req->nbytes;
170 while (total) {
171 len = min(sg_list->length, total);
173 sg_nb++;
174 total -= len;
176 sg_list = sg_next(sg_list);
177 if (!sg_list)
178 total = 0;
181 return sg_nb;
184 static int atmel_aes_sg_copy(struct scatterlist **sg, size_t *offset,
185 void *buf, size_t buflen, size_t total, int out)
187 unsigned int count, off = 0;
189 while (buflen && total) {
190 count = min((*sg)->length - *offset, total);
191 count = min(count, buflen);
193 if (!count)
194 return off;
196 scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
198 off += count;
199 buflen -= count;
200 *offset += count;
201 total -= count;
203 if (*offset == (*sg)->length) {
204 *sg = sg_next(*sg);
205 if (*sg)
206 *offset = 0;
207 else
208 total = 0;
212 return off;
215 static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
217 return readl_relaxed(dd->io_base + offset);
220 static inline void atmel_aes_write(struct atmel_aes_dev *dd,
221 u32 offset, u32 value)
223 writel_relaxed(value, dd->io_base + offset);
226 static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
227 u32 *value, int count)
229 for (; count--; value++, offset += 4)
230 *value = atmel_aes_read(dd, offset);
233 static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
234 u32 *value, int count)
236 for (; count--; value++, offset += 4)
237 atmel_aes_write(dd, offset, *value);
240 static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx)
242 struct atmel_aes_dev *aes_dd = NULL;
243 struct atmel_aes_dev *tmp;
245 spin_lock_bh(&atmel_aes.lock);
246 if (!ctx->dd) {
247 list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
248 aes_dd = tmp;
249 break;
251 ctx->dd = aes_dd;
252 } else {
253 aes_dd = ctx->dd;
256 spin_unlock_bh(&atmel_aes.lock);
258 return aes_dd;
261 static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
263 clk_prepare_enable(dd->iclk);
265 if (!(dd->flags & AES_FLAGS_INIT)) {
266 atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
267 atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
268 dd->flags |= AES_FLAGS_INIT;
269 dd->err = 0;
272 return 0;
275 static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
277 return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
280 static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
282 atmel_aes_hw_init(dd);
284 dd->hw_version = atmel_aes_get_version(dd);
286 dev_info(dd->dev,
287 "version: 0x%x\n", dd->hw_version);
289 clk_disable_unprepare(dd->iclk);
292 static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err)
294 struct ablkcipher_request *req = dd->req;
296 clk_disable_unprepare(dd->iclk);
297 dd->flags &= ~AES_FLAGS_BUSY;
299 req->base.complete(&req->base, err);
302 static void atmel_aes_dma_callback(void *data)
304 struct atmel_aes_dev *dd = data;
306 /* dma_lch_out - completed */
307 tasklet_schedule(&dd->done_task);
310 static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd,
311 dma_addr_t dma_addr_in, dma_addr_t dma_addr_out, int length)
313 struct scatterlist sg[2];
314 struct dma_async_tx_descriptor *in_desc, *out_desc;
316 dd->dma_size = length;
318 dma_sync_single_for_device(dd->dev, dma_addr_in, length,
319 DMA_TO_DEVICE);
320 dma_sync_single_for_device(dd->dev, dma_addr_out, length,
321 DMA_FROM_DEVICE);
323 if (dd->flags & AES_FLAGS_CFB8) {
324 dd->dma_lch_in.dma_conf.dst_addr_width =
325 DMA_SLAVE_BUSWIDTH_1_BYTE;
326 dd->dma_lch_out.dma_conf.src_addr_width =
327 DMA_SLAVE_BUSWIDTH_1_BYTE;
328 } else if (dd->flags & AES_FLAGS_CFB16) {
329 dd->dma_lch_in.dma_conf.dst_addr_width =
330 DMA_SLAVE_BUSWIDTH_2_BYTES;
331 dd->dma_lch_out.dma_conf.src_addr_width =
332 DMA_SLAVE_BUSWIDTH_2_BYTES;
333 } else {
334 dd->dma_lch_in.dma_conf.dst_addr_width =
335 DMA_SLAVE_BUSWIDTH_4_BYTES;
336 dd->dma_lch_out.dma_conf.src_addr_width =
337 DMA_SLAVE_BUSWIDTH_4_BYTES;
340 if (dd->flags & (AES_FLAGS_CFB8 | AES_FLAGS_CFB16 |
341 AES_FLAGS_CFB32 | AES_FLAGS_CFB64)) {
342 dd->dma_lch_in.dma_conf.src_maxburst = 1;
343 dd->dma_lch_in.dma_conf.dst_maxburst = 1;
344 dd->dma_lch_out.dma_conf.src_maxburst = 1;
345 dd->dma_lch_out.dma_conf.dst_maxburst = 1;
346 } else {
347 dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
348 dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
349 dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
350 dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
353 dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
354 dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
356 dd->flags |= AES_FLAGS_DMA;
358 sg_init_table(&sg[0], 1);
359 sg_dma_address(&sg[0]) = dma_addr_in;
360 sg_dma_len(&sg[0]) = length;
362 sg_init_table(&sg[1], 1);
363 sg_dma_address(&sg[1]) = dma_addr_out;
364 sg_dma_len(&sg[1]) = length;
366 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
367 1, DMA_MEM_TO_DEV,
368 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
369 if (!in_desc)
370 return -EINVAL;
372 out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
373 1, DMA_DEV_TO_MEM,
374 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
375 if (!out_desc)
376 return -EINVAL;
378 out_desc->callback = atmel_aes_dma_callback;
379 out_desc->callback_param = dd;
381 dmaengine_submit(out_desc);
382 dma_async_issue_pending(dd->dma_lch_out.chan);
384 dmaengine_submit(in_desc);
385 dma_async_issue_pending(dd->dma_lch_in.chan);
387 return 0;
390 static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
392 dd->flags &= ~AES_FLAGS_DMA;
394 dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in,
395 dd->dma_size, DMA_TO_DEVICE);
396 dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
397 dd->dma_size, DMA_FROM_DEVICE);
399 /* use cache buffers */
400 dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
401 if (!dd->nb_in_sg)
402 return -EINVAL;
404 dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
405 if (!dd->nb_out_sg)
406 return -EINVAL;
408 dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg,
409 dd->buf_in, dd->total);
411 if (!dd->bufcnt)
412 return -EINVAL;
414 dd->total -= dd->bufcnt;
416 atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
417 atmel_aes_write_n(dd, AES_IDATAR(0), (u32 *) dd->buf_in,
418 dd->bufcnt >> 2);
420 return 0;
423 static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd)
425 int err, fast = 0, in, out;
426 size_t count;
427 dma_addr_t addr_in, addr_out;
429 if ((!dd->in_offset) && (!dd->out_offset)) {
430 /* check for alignment */
431 in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
432 IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
433 out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
434 IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
435 fast = in && out;
437 if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
438 fast = 0;
442 if (fast) {
443 count = min(dd->total, sg_dma_len(dd->in_sg));
444 count = min(count, sg_dma_len(dd->out_sg));
446 err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
447 if (!err) {
448 dev_err(dd->dev, "dma_map_sg() error\n");
449 return -EINVAL;
452 err = dma_map_sg(dd->dev, dd->out_sg, 1,
453 DMA_FROM_DEVICE);
454 if (!err) {
455 dev_err(dd->dev, "dma_map_sg() error\n");
456 dma_unmap_sg(dd->dev, dd->in_sg, 1,
457 DMA_TO_DEVICE);
458 return -EINVAL;
461 addr_in = sg_dma_address(dd->in_sg);
462 addr_out = sg_dma_address(dd->out_sg);
464 dd->flags |= AES_FLAGS_FAST;
466 } else {
467 dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in,
468 dd->dma_size, DMA_TO_DEVICE);
470 /* use cache buffers */
471 count = atmel_aes_sg_copy(&dd->in_sg, &dd->in_offset,
472 dd->buf_in, dd->buflen, dd->total, 0);
474 addr_in = dd->dma_addr_in;
475 addr_out = dd->dma_addr_out;
477 dd->flags &= ~AES_FLAGS_FAST;
480 dd->total -= count;
482 err = atmel_aes_crypt_dma(dd, addr_in, addr_out, count);
484 if (err && (dd->flags & AES_FLAGS_FAST)) {
485 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
486 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
489 return err;
492 static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd)
494 int err;
495 u32 valcr = 0, valmr = 0;
497 err = atmel_aes_hw_init(dd);
499 if (err)
500 return err;
502 /* MR register must be set before IV registers */
503 if (dd->ctx->keylen == AES_KEYSIZE_128)
504 valmr |= AES_MR_KEYSIZE_128;
505 else if (dd->ctx->keylen == AES_KEYSIZE_192)
506 valmr |= AES_MR_KEYSIZE_192;
507 else
508 valmr |= AES_MR_KEYSIZE_256;
510 if (dd->flags & AES_FLAGS_CBC) {
511 valmr |= AES_MR_OPMOD_CBC;
512 } else if (dd->flags & AES_FLAGS_CFB) {
513 valmr |= AES_MR_OPMOD_CFB;
514 if (dd->flags & AES_FLAGS_CFB8)
515 valmr |= AES_MR_CFBS_8b;
516 else if (dd->flags & AES_FLAGS_CFB16)
517 valmr |= AES_MR_CFBS_16b;
518 else if (dd->flags & AES_FLAGS_CFB32)
519 valmr |= AES_MR_CFBS_32b;
520 else if (dd->flags & AES_FLAGS_CFB64)
521 valmr |= AES_MR_CFBS_64b;
522 else if (dd->flags & AES_FLAGS_CFB128)
523 valmr |= AES_MR_CFBS_128b;
524 } else if (dd->flags & AES_FLAGS_OFB) {
525 valmr |= AES_MR_OPMOD_OFB;
526 } else if (dd->flags & AES_FLAGS_CTR) {
527 valmr |= AES_MR_OPMOD_CTR;
528 } else {
529 valmr |= AES_MR_OPMOD_ECB;
532 if (dd->flags & AES_FLAGS_ENCRYPT)
533 valmr |= AES_MR_CYPHER_ENC;
535 if (dd->total > ATMEL_AES_DMA_THRESHOLD) {
536 valmr |= AES_MR_SMOD_IDATAR0;
537 if (dd->caps.has_dualbuff)
538 valmr |= AES_MR_DUALBUFF;
539 } else {
540 valmr |= AES_MR_SMOD_AUTO;
543 atmel_aes_write(dd, AES_CR, valcr);
544 atmel_aes_write(dd, AES_MR, valmr);
546 atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
547 dd->ctx->keylen >> 2);
549 if (((dd->flags & AES_FLAGS_CBC) || (dd->flags & AES_FLAGS_CFB) ||
550 (dd->flags & AES_FLAGS_OFB) || (dd->flags & AES_FLAGS_CTR)) &&
551 dd->req->info) {
552 atmel_aes_write_n(dd, AES_IVR(0), dd->req->info, 4);
555 return 0;
558 static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
559 struct ablkcipher_request *req)
561 struct crypto_async_request *async_req, *backlog;
562 struct atmel_aes_ctx *ctx;
563 struct atmel_aes_reqctx *rctx;
564 unsigned long flags;
565 int err, ret = 0;
567 spin_lock_irqsave(&dd->lock, flags);
568 if (req)
569 ret = ablkcipher_enqueue_request(&dd->queue, req);
570 if (dd->flags & AES_FLAGS_BUSY) {
571 spin_unlock_irqrestore(&dd->lock, flags);
572 return ret;
574 backlog = crypto_get_backlog(&dd->queue);
575 async_req = crypto_dequeue_request(&dd->queue);
576 if (async_req)
577 dd->flags |= AES_FLAGS_BUSY;
578 spin_unlock_irqrestore(&dd->lock, flags);
580 if (!async_req)
581 return ret;
583 if (backlog)
584 backlog->complete(backlog, -EINPROGRESS);
586 req = ablkcipher_request_cast(async_req);
588 /* assign new request to device */
589 dd->req = req;
590 dd->total = req->nbytes;
591 dd->in_offset = 0;
592 dd->in_sg = req->src;
593 dd->out_offset = 0;
594 dd->out_sg = req->dst;
596 rctx = ablkcipher_request_ctx(req);
597 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
598 rctx->mode &= AES_FLAGS_MODE_MASK;
599 dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode;
600 dd->ctx = ctx;
601 ctx->dd = dd;
603 err = atmel_aes_write_ctrl(dd);
604 if (!err) {
605 if (dd->total > ATMEL_AES_DMA_THRESHOLD)
606 err = atmel_aes_crypt_dma_start(dd);
607 else
608 err = atmel_aes_crypt_cpu_start(dd);
610 if (err) {
611 /* aes_task will not finish it, so do it here */
612 atmel_aes_finish_req(dd, err);
613 tasklet_schedule(&dd->queue_task);
616 return ret;
619 static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd)
621 int err = -EINVAL;
622 size_t count;
624 if (dd->flags & AES_FLAGS_DMA) {
625 err = 0;
626 if (dd->flags & AES_FLAGS_FAST) {
627 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
628 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
629 } else {
630 dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
631 dd->dma_size, DMA_FROM_DEVICE);
633 /* copy data */
634 count = atmel_aes_sg_copy(&dd->out_sg, &dd->out_offset,
635 dd->buf_out, dd->buflen, dd->dma_size, 1);
636 if (count != dd->dma_size) {
637 err = -EINVAL;
638 pr_err("not all data converted: %u\n", count);
643 return err;
647 static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
649 int err = -ENOMEM;
651 dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
652 dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
653 dd->buflen = PAGE_SIZE;
654 dd->buflen &= ~(AES_BLOCK_SIZE - 1);
656 if (!dd->buf_in || !dd->buf_out) {
657 dev_err(dd->dev, "unable to alloc pages.\n");
658 goto err_alloc;
661 /* MAP here */
662 dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
663 dd->buflen, DMA_TO_DEVICE);
664 if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
665 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
666 err = -EINVAL;
667 goto err_map_in;
670 dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
671 dd->buflen, DMA_FROM_DEVICE);
672 if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
673 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
674 err = -EINVAL;
675 goto err_map_out;
678 return 0;
680 err_map_out:
681 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
682 DMA_TO_DEVICE);
683 err_map_in:
684 err_alloc:
685 free_page((unsigned long)dd->buf_out);
686 free_page((unsigned long)dd->buf_in);
687 if (err)
688 pr_err("error: %d\n", err);
689 return err;
692 static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
694 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
695 DMA_FROM_DEVICE);
696 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
697 DMA_TO_DEVICE);
698 free_page((unsigned long)dd->buf_out);
699 free_page((unsigned long)dd->buf_in);
702 static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
704 struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(
705 crypto_ablkcipher_reqtfm(req));
706 struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
707 struct atmel_aes_dev *dd;
709 if (mode & AES_FLAGS_CFB8) {
710 if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) {
711 pr_err("request size is not exact amount of CFB8 blocks\n");
712 return -EINVAL;
714 ctx->block_size = CFB8_BLOCK_SIZE;
715 } else if (mode & AES_FLAGS_CFB16) {
716 if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) {
717 pr_err("request size is not exact amount of CFB16 blocks\n");
718 return -EINVAL;
720 ctx->block_size = CFB16_BLOCK_SIZE;
721 } else if (mode & AES_FLAGS_CFB32) {
722 if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) {
723 pr_err("request size is not exact amount of CFB32 blocks\n");
724 return -EINVAL;
726 ctx->block_size = CFB32_BLOCK_SIZE;
727 } else if (mode & AES_FLAGS_CFB64) {
728 if (!IS_ALIGNED(req->nbytes, CFB64_BLOCK_SIZE)) {
729 pr_err("request size is not exact amount of CFB64 blocks\n");
730 return -EINVAL;
732 ctx->block_size = CFB64_BLOCK_SIZE;
733 } else {
734 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
735 pr_err("request size is not exact amount of AES blocks\n");
736 return -EINVAL;
738 ctx->block_size = AES_BLOCK_SIZE;
741 dd = atmel_aes_find_dev(ctx);
742 if (!dd)
743 return -ENODEV;
745 rctx->mode = mode;
747 return atmel_aes_handle_queue(dd, req);
750 static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
752 struct at_dma_slave *sl = slave;
754 if (sl && sl->dma_dev == chan->device->dev) {
755 chan->private = sl;
756 return true;
757 } else {
758 return false;
762 static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
763 struct crypto_platform_data *pdata)
765 int err = -ENOMEM;
766 dma_cap_mask_t mask;
768 dma_cap_zero(mask);
769 dma_cap_set(DMA_SLAVE, mask);
771 /* Try to grab 2 DMA channels */
772 dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask,
773 atmel_aes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
774 if (!dd->dma_lch_in.chan)
775 goto err_dma_in;
777 dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
778 dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
779 AES_IDATAR(0);
780 dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
781 dd->dma_lch_in.dma_conf.src_addr_width =
782 DMA_SLAVE_BUSWIDTH_4_BYTES;
783 dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
784 dd->dma_lch_in.dma_conf.dst_addr_width =
785 DMA_SLAVE_BUSWIDTH_4_BYTES;
786 dd->dma_lch_in.dma_conf.device_fc = false;
788 dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask,
789 atmel_aes_filter, &pdata->dma_slave->txdata, dd->dev, "rx");
790 if (!dd->dma_lch_out.chan)
791 goto err_dma_out;
793 dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
794 dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
795 AES_ODATAR(0);
796 dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
797 dd->dma_lch_out.dma_conf.src_addr_width =
798 DMA_SLAVE_BUSWIDTH_4_BYTES;
799 dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
800 dd->dma_lch_out.dma_conf.dst_addr_width =
801 DMA_SLAVE_BUSWIDTH_4_BYTES;
802 dd->dma_lch_out.dma_conf.device_fc = false;
804 return 0;
806 err_dma_out:
807 dma_release_channel(dd->dma_lch_in.chan);
808 err_dma_in:
809 dev_warn(dd->dev, "no DMA channel available\n");
810 return err;
813 static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
815 dma_release_channel(dd->dma_lch_in.chan);
816 dma_release_channel(dd->dma_lch_out.chan);
819 static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
820 unsigned int keylen)
822 struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
824 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
825 keylen != AES_KEYSIZE_256) {
826 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
827 return -EINVAL;
830 memcpy(ctx->key, key, keylen);
831 ctx->keylen = keylen;
833 return 0;
836 static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
838 return atmel_aes_crypt(req,
839 AES_FLAGS_ENCRYPT);
842 static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
844 return atmel_aes_crypt(req,
848 static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
850 return atmel_aes_crypt(req,
851 AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
854 static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
856 return atmel_aes_crypt(req,
857 AES_FLAGS_CBC);
860 static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
862 return atmel_aes_crypt(req,
863 AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
866 static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
868 return atmel_aes_crypt(req,
869 AES_FLAGS_OFB);
872 static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
874 return atmel_aes_crypt(req,
875 AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB128);
878 static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
880 return atmel_aes_crypt(req,
881 AES_FLAGS_CFB | AES_FLAGS_CFB128);
884 static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
886 return atmel_aes_crypt(req,
887 AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB64);
890 static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
892 return atmel_aes_crypt(req,
893 AES_FLAGS_CFB | AES_FLAGS_CFB64);
896 static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
898 return atmel_aes_crypt(req,
899 AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB32);
902 static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
904 return atmel_aes_crypt(req,
905 AES_FLAGS_CFB | AES_FLAGS_CFB32);
908 static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
910 return atmel_aes_crypt(req,
911 AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB16);
914 static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
916 return atmel_aes_crypt(req,
917 AES_FLAGS_CFB | AES_FLAGS_CFB16);
920 static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
922 return atmel_aes_crypt(req,
923 AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB8);
926 static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
928 return atmel_aes_crypt(req,
929 AES_FLAGS_CFB | AES_FLAGS_CFB8);
932 static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
934 return atmel_aes_crypt(req,
935 AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
938 static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
940 return atmel_aes_crypt(req,
941 AES_FLAGS_CTR);
944 static int atmel_aes_cra_init(struct crypto_tfm *tfm)
946 tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
948 return 0;
951 static void atmel_aes_cra_exit(struct crypto_tfm *tfm)
955 static struct crypto_alg aes_algs[] = {
957 .cra_name = "ecb(aes)",
958 .cra_driver_name = "atmel-ecb-aes",
959 .cra_priority = 100,
960 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
961 .cra_blocksize = AES_BLOCK_SIZE,
962 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
963 .cra_alignmask = 0xf,
964 .cra_type = &crypto_ablkcipher_type,
965 .cra_module = THIS_MODULE,
966 .cra_init = atmel_aes_cra_init,
967 .cra_exit = atmel_aes_cra_exit,
968 .cra_u.ablkcipher = {
969 .min_keysize = AES_MIN_KEY_SIZE,
970 .max_keysize = AES_MAX_KEY_SIZE,
971 .setkey = atmel_aes_setkey,
972 .encrypt = atmel_aes_ecb_encrypt,
973 .decrypt = atmel_aes_ecb_decrypt,
977 .cra_name = "cbc(aes)",
978 .cra_driver_name = "atmel-cbc-aes",
979 .cra_priority = 100,
980 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
981 .cra_blocksize = AES_BLOCK_SIZE,
982 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
983 .cra_alignmask = 0xf,
984 .cra_type = &crypto_ablkcipher_type,
985 .cra_module = THIS_MODULE,
986 .cra_init = atmel_aes_cra_init,
987 .cra_exit = atmel_aes_cra_exit,
988 .cra_u.ablkcipher = {
989 .min_keysize = AES_MIN_KEY_SIZE,
990 .max_keysize = AES_MAX_KEY_SIZE,
991 .ivsize = AES_BLOCK_SIZE,
992 .setkey = atmel_aes_setkey,
993 .encrypt = atmel_aes_cbc_encrypt,
994 .decrypt = atmel_aes_cbc_decrypt,
998 .cra_name = "ofb(aes)",
999 .cra_driver_name = "atmel-ofb-aes",
1000 .cra_priority = 100,
1001 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1002 .cra_blocksize = AES_BLOCK_SIZE,
1003 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1004 .cra_alignmask = 0xf,
1005 .cra_type = &crypto_ablkcipher_type,
1006 .cra_module = THIS_MODULE,
1007 .cra_init = atmel_aes_cra_init,
1008 .cra_exit = atmel_aes_cra_exit,
1009 .cra_u.ablkcipher = {
1010 .min_keysize = AES_MIN_KEY_SIZE,
1011 .max_keysize = AES_MAX_KEY_SIZE,
1012 .ivsize = AES_BLOCK_SIZE,
1013 .setkey = atmel_aes_setkey,
1014 .encrypt = atmel_aes_ofb_encrypt,
1015 .decrypt = atmel_aes_ofb_decrypt,
1019 .cra_name = "cfb(aes)",
1020 .cra_driver_name = "atmel-cfb-aes",
1021 .cra_priority = 100,
1022 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1023 .cra_blocksize = AES_BLOCK_SIZE,
1024 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1025 .cra_alignmask = 0xf,
1026 .cra_type = &crypto_ablkcipher_type,
1027 .cra_module = THIS_MODULE,
1028 .cra_init = atmel_aes_cra_init,
1029 .cra_exit = atmel_aes_cra_exit,
1030 .cra_u.ablkcipher = {
1031 .min_keysize = AES_MIN_KEY_SIZE,
1032 .max_keysize = AES_MAX_KEY_SIZE,
1033 .ivsize = AES_BLOCK_SIZE,
1034 .setkey = atmel_aes_setkey,
1035 .encrypt = atmel_aes_cfb_encrypt,
1036 .decrypt = atmel_aes_cfb_decrypt,
1040 .cra_name = "cfb32(aes)",
1041 .cra_driver_name = "atmel-cfb32-aes",
1042 .cra_priority = 100,
1043 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1044 .cra_blocksize = CFB32_BLOCK_SIZE,
1045 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1046 .cra_alignmask = 0x3,
1047 .cra_type = &crypto_ablkcipher_type,
1048 .cra_module = THIS_MODULE,
1049 .cra_init = atmel_aes_cra_init,
1050 .cra_exit = atmel_aes_cra_exit,
1051 .cra_u.ablkcipher = {
1052 .min_keysize = AES_MIN_KEY_SIZE,
1053 .max_keysize = AES_MAX_KEY_SIZE,
1054 .ivsize = AES_BLOCK_SIZE,
1055 .setkey = atmel_aes_setkey,
1056 .encrypt = atmel_aes_cfb32_encrypt,
1057 .decrypt = atmel_aes_cfb32_decrypt,
1061 .cra_name = "cfb16(aes)",
1062 .cra_driver_name = "atmel-cfb16-aes",
1063 .cra_priority = 100,
1064 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1065 .cra_blocksize = CFB16_BLOCK_SIZE,
1066 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1067 .cra_alignmask = 0x1,
1068 .cra_type = &crypto_ablkcipher_type,
1069 .cra_module = THIS_MODULE,
1070 .cra_init = atmel_aes_cra_init,
1071 .cra_exit = atmel_aes_cra_exit,
1072 .cra_u.ablkcipher = {
1073 .min_keysize = AES_MIN_KEY_SIZE,
1074 .max_keysize = AES_MAX_KEY_SIZE,
1075 .ivsize = AES_BLOCK_SIZE,
1076 .setkey = atmel_aes_setkey,
1077 .encrypt = atmel_aes_cfb16_encrypt,
1078 .decrypt = atmel_aes_cfb16_decrypt,
1082 .cra_name = "cfb8(aes)",
1083 .cra_driver_name = "atmel-cfb8-aes",
1084 .cra_priority = 100,
1085 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1086 .cra_blocksize = CFB8_BLOCK_SIZE,
1087 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1088 .cra_alignmask = 0x0,
1089 .cra_type = &crypto_ablkcipher_type,
1090 .cra_module = THIS_MODULE,
1091 .cra_init = atmel_aes_cra_init,
1092 .cra_exit = atmel_aes_cra_exit,
1093 .cra_u.ablkcipher = {
1094 .min_keysize = AES_MIN_KEY_SIZE,
1095 .max_keysize = AES_MAX_KEY_SIZE,
1096 .ivsize = AES_BLOCK_SIZE,
1097 .setkey = atmel_aes_setkey,
1098 .encrypt = atmel_aes_cfb8_encrypt,
1099 .decrypt = atmel_aes_cfb8_decrypt,
1103 .cra_name = "ctr(aes)",
1104 .cra_driver_name = "atmel-ctr-aes",
1105 .cra_priority = 100,
1106 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1107 .cra_blocksize = AES_BLOCK_SIZE,
1108 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1109 .cra_alignmask = 0xf,
1110 .cra_type = &crypto_ablkcipher_type,
1111 .cra_module = THIS_MODULE,
1112 .cra_init = atmel_aes_cra_init,
1113 .cra_exit = atmel_aes_cra_exit,
1114 .cra_u.ablkcipher = {
1115 .min_keysize = AES_MIN_KEY_SIZE,
1116 .max_keysize = AES_MAX_KEY_SIZE,
1117 .ivsize = AES_BLOCK_SIZE,
1118 .setkey = atmel_aes_setkey,
1119 .encrypt = atmel_aes_ctr_encrypt,
1120 .decrypt = atmel_aes_ctr_decrypt,
1125 static struct crypto_alg aes_cfb64_alg = {
1126 .cra_name = "cfb64(aes)",
1127 .cra_driver_name = "atmel-cfb64-aes",
1128 .cra_priority = 100,
1129 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1130 .cra_blocksize = CFB64_BLOCK_SIZE,
1131 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1132 .cra_alignmask = 0x7,
1133 .cra_type = &crypto_ablkcipher_type,
1134 .cra_module = THIS_MODULE,
1135 .cra_init = atmel_aes_cra_init,
1136 .cra_exit = atmel_aes_cra_exit,
1137 .cra_u.ablkcipher = {
1138 .min_keysize = AES_MIN_KEY_SIZE,
1139 .max_keysize = AES_MAX_KEY_SIZE,
1140 .ivsize = AES_BLOCK_SIZE,
1141 .setkey = atmel_aes_setkey,
1142 .encrypt = atmel_aes_cfb64_encrypt,
1143 .decrypt = atmel_aes_cfb64_decrypt,
1147 static void atmel_aes_queue_task(unsigned long data)
1149 struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
1151 atmel_aes_handle_queue(dd, NULL);
1154 static void atmel_aes_done_task(unsigned long data)
1156 struct atmel_aes_dev *dd = (struct atmel_aes_dev *) data;
1157 int err;
1159 if (!(dd->flags & AES_FLAGS_DMA)) {
1160 atmel_aes_read_n(dd, AES_ODATAR(0), (u32 *) dd->buf_out,
1161 dd->bufcnt >> 2);
1163 if (sg_copy_from_buffer(dd->out_sg, dd->nb_out_sg,
1164 dd->buf_out, dd->bufcnt))
1165 err = 0;
1166 else
1167 err = -EINVAL;
1169 goto cpu_end;
1172 err = atmel_aes_crypt_dma_stop(dd);
1174 err = dd->err ? : err;
1176 if (dd->total && !err) {
1177 if (dd->flags & AES_FLAGS_FAST) {
1178 dd->in_sg = sg_next(dd->in_sg);
1179 dd->out_sg = sg_next(dd->out_sg);
1180 if (!dd->in_sg || !dd->out_sg)
1181 err = -EINVAL;
1183 if (!err)
1184 err = atmel_aes_crypt_dma_start(dd);
1185 if (!err)
1186 return; /* DMA started. Not fininishing. */
1189 cpu_end:
1190 atmel_aes_finish_req(dd, err);
1191 atmel_aes_handle_queue(dd, NULL);
1194 static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
1196 struct atmel_aes_dev *aes_dd = dev_id;
1197 u32 reg;
1199 reg = atmel_aes_read(aes_dd, AES_ISR);
1200 if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
1201 atmel_aes_write(aes_dd, AES_IDR, reg);
1202 if (AES_FLAGS_BUSY & aes_dd->flags)
1203 tasklet_schedule(&aes_dd->done_task);
1204 else
1205 dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
1206 return IRQ_HANDLED;
1209 return IRQ_NONE;
1212 static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
1214 int i;
1216 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1217 crypto_unregister_alg(&aes_algs[i]);
1218 if (dd->caps.has_cfb64)
1219 crypto_unregister_alg(&aes_cfb64_alg);
1222 static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
1224 int err, i, j;
1226 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1227 err = crypto_register_alg(&aes_algs[i]);
1228 if (err)
1229 goto err_aes_algs;
1232 if (dd->caps.has_cfb64) {
1233 err = crypto_register_alg(&aes_cfb64_alg);
1234 if (err)
1235 goto err_aes_cfb64_alg;
1238 return 0;
1240 err_aes_cfb64_alg:
1241 i = ARRAY_SIZE(aes_algs);
1242 err_aes_algs:
1243 for (j = 0; j < i; j++)
1244 crypto_unregister_alg(&aes_algs[j]);
1246 return err;
1249 static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
1251 dd->caps.has_dualbuff = 0;
1252 dd->caps.has_cfb64 = 0;
1253 dd->caps.max_burst_size = 1;
1255 /* keep only major version number */
1256 switch (dd->hw_version & 0xff0) {
1257 case 0x200:
1258 dd->caps.has_dualbuff = 1;
1259 dd->caps.has_cfb64 = 1;
1260 dd->caps.max_burst_size = 4;
1261 break;
1262 case 0x130:
1263 dd->caps.has_dualbuff = 1;
1264 dd->caps.has_cfb64 = 1;
1265 dd->caps.max_burst_size = 4;
1266 break;
1267 case 0x120:
1268 break;
1269 default:
1270 dev_warn(dd->dev,
1271 "Unmanaged aes version, set minimum capabilities\n");
1272 break;
1276 #if defined(CONFIG_OF)
1277 static const struct of_device_id atmel_aes_dt_ids[] = {
1278 { .compatible = "atmel,at91sam9g46-aes" },
1279 { /* sentinel */ }
1281 MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
1283 static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
1285 struct device_node *np = pdev->dev.of_node;
1286 struct crypto_platform_data *pdata;
1288 if (!np) {
1289 dev_err(&pdev->dev, "device node not found\n");
1290 return ERR_PTR(-EINVAL);
1293 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1294 if (!pdata) {
1295 dev_err(&pdev->dev, "could not allocate memory for pdata\n");
1296 return ERR_PTR(-ENOMEM);
1299 pdata->dma_slave = devm_kzalloc(&pdev->dev,
1300 sizeof(*(pdata->dma_slave)),
1301 GFP_KERNEL);
1302 if (!pdata->dma_slave) {
1303 dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
1304 devm_kfree(&pdev->dev, pdata);
1305 return ERR_PTR(-ENOMEM);
1308 return pdata;
1310 #else
1311 static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
1313 return ERR_PTR(-EINVAL);
1315 #endif
1317 static int atmel_aes_probe(struct platform_device *pdev)
1319 struct atmel_aes_dev *aes_dd;
1320 struct crypto_platform_data *pdata;
1321 struct device *dev = &pdev->dev;
1322 struct resource *aes_res;
1323 unsigned long aes_phys_size;
1324 int err;
1326 pdata = pdev->dev.platform_data;
1327 if (!pdata) {
1328 pdata = atmel_aes_of_init(pdev);
1329 if (IS_ERR(pdata)) {
1330 err = PTR_ERR(pdata);
1331 goto aes_dd_err;
1335 if (!pdata->dma_slave) {
1336 err = -ENXIO;
1337 goto aes_dd_err;
1340 aes_dd = kzalloc(sizeof(struct atmel_aes_dev), GFP_KERNEL);
1341 if (aes_dd == NULL) {
1342 dev_err(dev, "unable to alloc data struct.\n");
1343 err = -ENOMEM;
1344 goto aes_dd_err;
1347 aes_dd->dev = dev;
1349 platform_set_drvdata(pdev, aes_dd);
1351 INIT_LIST_HEAD(&aes_dd->list);
1352 spin_lock_init(&aes_dd->lock);
1354 tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
1355 (unsigned long)aes_dd);
1356 tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
1357 (unsigned long)aes_dd);
1359 crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
1361 aes_dd->irq = -1;
1363 /* Get the base address */
1364 aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1365 if (!aes_res) {
1366 dev_err(dev, "no MEM resource info\n");
1367 err = -ENODEV;
1368 goto res_err;
1370 aes_dd->phys_base = aes_res->start;
1371 aes_phys_size = resource_size(aes_res);
1373 /* Get the IRQ */
1374 aes_dd->irq = platform_get_irq(pdev, 0);
1375 if (aes_dd->irq < 0) {
1376 dev_err(dev, "no IRQ resource info\n");
1377 err = aes_dd->irq;
1378 goto aes_irq_err;
1381 err = request_irq(aes_dd->irq, atmel_aes_irq, IRQF_SHARED, "atmel-aes",
1382 aes_dd);
1383 if (err) {
1384 dev_err(dev, "unable to request aes irq.\n");
1385 goto aes_irq_err;
1388 /* Initializing the clock */
1389 aes_dd->iclk = clk_get(&pdev->dev, "aes_clk");
1390 if (IS_ERR(aes_dd->iclk)) {
1391 dev_err(dev, "clock initialization failed.\n");
1392 err = PTR_ERR(aes_dd->iclk);
1393 goto clk_err;
1396 aes_dd->io_base = ioremap(aes_dd->phys_base, aes_phys_size);
1397 if (!aes_dd->io_base) {
1398 dev_err(dev, "can't ioremap\n");
1399 err = -ENOMEM;
1400 goto aes_io_err;
1403 atmel_aes_hw_version_init(aes_dd);
1405 atmel_aes_get_cap(aes_dd);
1407 err = atmel_aes_buff_init(aes_dd);
1408 if (err)
1409 goto err_aes_buff;
1411 err = atmel_aes_dma_init(aes_dd, pdata);
1412 if (err)
1413 goto err_aes_dma;
1415 spin_lock(&atmel_aes.lock);
1416 list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
1417 spin_unlock(&atmel_aes.lock);
1419 err = atmel_aes_register_algs(aes_dd);
1420 if (err)
1421 goto err_algs;
1423 dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n",
1424 dma_chan_name(aes_dd->dma_lch_in.chan),
1425 dma_chan_name(aes_dd->dma_lch_out.chan));
1427 return 0;
1429 err_algs:
1430 spin_lock(&atmel_aes.lock);
1431 list_del(&aes_dd->list);
1432 spin_unlock(&atmel_aes.lock);
1433 atmel_aes_dma_cleanup(aes_dd);
1434 err_aes_dma:
1435 atmel_aes_buff_cleanup(aes_dd);
1436 err_aes_buff:
1437 iounmap(aes_dd->io_base);
1438 aes_io_err:
1439 clk_put(aes_dd->iclk);
1440 clk_err:
1441 free_irq(aes_dd->irq, aes_dd);
1442 aes_irq_err:
1443 res_err:
1444 tasklet_kill(&aes_dd->done_task);
1445 tasklet_kill(&aes_dd->queue_task);
1446 kfree(aes_dd);
1447 aes_dd = NULL;
1448 aes_dd_err:
1449 dev_err(dev, "initialization failed.\n");
1451 return err;
1454 static int atmel_aes_remove(struct platform_device *pdev)
1456 static struct atmel_aes_dev *aes_dd;
1458 aes_dd = platform_get_drvdata(pdev);
1459 if (!aes_dd)
1460 return -ENODEV;
1461 spin_lock(&atmel_aes.lock);
1462 list_del(&aes_dd->list);
1463 spin_unlock(&atmel_aes.lock);
1465 atmel_aes_unregister_algs(aes_dd);
1467 tasklet_kill(&aes_dd->done_task);
1468 tasklet_kill(&aes_dd->queue_task);
1470 atmel_aes_dma_cleanup(aes_dd);
1472 iounmap(aes_dd->io_base);
1474 clk_put(aes_dd->iclk);
1476 if (aes_dd->irq > 0)
1477 free_irq(aes_dd->irq, aes_dd);
1479 kfree(aes_dd);
1480 aes_dd = NULL;
1482 return 0;
1485 static struct platform_driver atmel_aes_driver = {
1486 .probe = atmel_aes_probe,
1487 .remove = atmel_aes_remove,
1488 .driver = {
1489 .name = "atmel_aes",
1490 .of_match_table = of_match_ptr(atmel_aes_dt_ids),
1494 module_platform_driver(atmel_aes_driver);
1496 MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
1497 MODULE_LICENSE("GPL v2");
1498 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");