4 * Support for OMAP AES HW acceleration.
6 * Copyright (c) 2010 Nokia Corporation
7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
8 * Copyright (c) 2011 Texas Instruments Incorporated
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
16 #define pr_fmt(fmt) "%s: " fmt, __func__
18 #include <linux/err.h>
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/errno.h>
22 #include <linux/kernel.h>
23 #include <linux/platform_device.h>
24 #include <linux/scatterlist.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/dmaengine.h>
27 #include <linux/omap-dma.h>
28 #include <linux/pm_runtime.h>
30 #include <linux/of_device.h>
31 #include <linux/of_address.h>
33 #include <linux/crypto.h>
34 #include <linux/interrupt.h>
35 #include <crypto/scatterwalk.h>
36 #include <crypto/aes.h>
38 #define DST_MAXBURST 4
39 #define DMA_MIN (DST_MAXBURST * sizeof(u32))
41 /* OMAP TRM gives bitfields as start:end, where start is the higher bit
42 number. For example 7:0 */
43 #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
44 #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
46 #define AES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \
48 #define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04))
50 #define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs)
51 #define AES_REG_CTRL_CTR_WIDTH_MASK (3 << 7)
52 #define AES_REG_CTRL_CTR_WIDTH_32 (0 << 7)
53 #define AES_REG_CTRL_CTR_WIDTH_64 (1 << 7)
54 #define AES_REG_CTRL_CTR_WIDTH_96 (2 << 7)
55 #define AES_REG_CTRL_CTR_WIDTH_128 (3 << 7)
56 #define AES_REG_CTRL_CTR (1 << 6)
57 #define AES_REG_CTRL_CBC (1 << 5)
58 #define AES_REG_CTRL_KEY_SIZE (3 << 3)
59 #define AES_REG_CTRL_DIRECTION (1 << 2)
60 #define AES_REG_CTRL_INPUT_READY (1 << 1)
61 #define AES_REG_CTRL_OUTPUT_READY (1 << 0)
63 #define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04))
65 #define AES_REG_REV(dd) ((dd)->pdata->rev_ofs)
67 #define AES_REG_MASK(dd) ((dd)->pdata->mask_ofs)
68 #define AES_REG_MASK_SIDLE (1 << 6)
69 #define AES_REG_MASK_START (1 << 5)
70 #define AES_REG_MASK_DMA_OUT_EN (1 << 3)
71 #define AES_REG_MASK_DMA_IN_EN (1 << 2)
72 #define AES_REG_MASK_SOFTRESET (1 << 1)
73 #define AES_REG_AUTOIDLE (1 << 0)
75 #define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04))
77 #define DEFAULT_TIMEOUT (5*HZ)
79 #define FLAGS_MODE_MASK 0x000f
80 #define FLAGS_ENCRYPT BIT(0)
81 #define FLAGS_CBC BIT(1)
82 #define FLAGS_GIV BIT(2)
83 #define FLAGS_CTR BIT(3)
85 #define FLAGS_INIT BIT(4)
86 #define FLAGS_FAST BIT(5)
87 #define FLAGS_BUSY BIT(6)
90 struct omap_aes_dev
*dd
;
93 u32 key
[AES_KEYSIZE_256
/ sizeof(u32
)];
97 struct omap_aes_reqctx
{
101 #define OMAP_AES_QUEUE_LENGTH 1
102 #define OMAP_AES_CACHE_SIZE 0
104 struct omap_aes_algs_info
{
105 struct crypto_alg
*algs_list
;
107 unsigned int registered
;
110 struct omap_aes_pdata
{
111 struct omap_aes_algs_info
*algs_info
;
112 unsigned int algs_info_size
;
114 void (*trigger
)(struct omap_aes_dev
*dd
, int length
);
133 struct omap_aes_dev
{
134 struct list_head list
;
135 unsigned long phys_base
;
136 void __iomem
*io_base
;
137 struct omap_aes_ctx
*ctx
;
143 struct crypto_queue queue
;
145 struct tasklet_struct done_task
;
146 struct tasklet_struct queue_task
;
148 struct ablkcipher_request
*req
;
150 struct scatterlist
*in_sg
;
151 struct scatterlist in_sgl
;
153 struct scatterlist
*out_sg
;
154 struct scatterlist out_sgl
;
161 struct dma_chan
*dma_lch_in
;
162 dma_addr_t dma_addr_in
;
165 struct dma_chan
*dma_lch_out
;
166 dma_addr_t dma_addr_out
;
168 const struct omap_aes_pdata
*pdata
;
171 /* keep registered devices data here */
172 static LIST_HEAD(dev_list
);
173 static DEFINE_SPINLOCK(list_lock
);
175 static inline u32
omap_aes_read(struct omap_aes_dev
*dd
, u32 offset
)
177 return __raw_readl(dd
->io_base
+ offset
);
180 static inline void omap_aes_write(struct omap_aes_dev
*dd
, u32 offset
,
183 __raw_writel(value
, dd
->io_base
+ offset
);
186 static inline void omap_aes_write_mask(struct omap_aes_dev
*dd
, u32 offset
,
191 val
= omap_aes_read(dd
, offset
);
194 omap_aes_write(dd
, offset
, val
);
197 static void omap_aes_write_n(struct omap_aes_dev
*dd
, u32 offset
,
198 u32
*value
, int count
)
200 for (; count
--; value
++, offset
+= 4)
201 omap_aes_write(dd
, offset
, *value
);
204 static int omap_aes_hw_init(struct omap_aes_dev
*dd
)
206 if (!(dd
->flags
& FLAGS_INIT
)) {
207 dd
->flags
|= FLAGS_INIT
;
214 static int omap_aes_write_ctrl(struct omap_aes_dev
*dd
)
220 err
= omap_aes_hw_init(dd
);
224 key32
= dd
->ctx
->keylen
/ sizeof(u32
);
226 /* it seems a key should always be set even if it has not changed */
227 for (i
= 0; i
< key32
; i
++) {
228 omap_aes_write(dd
, AES_REG_KEY(dd
, i
),
229 __le32_to_cpu(dd
->ctx
->key
[i
]));
232 if ((dd
->flags
& (FLAGS_CBC
| FLAGS_CTR
)) && dd
->req
->info
)
233 omap_aes_write_n(dd
, AES_REG_IV(dd
, 0), dd
->req
->info
, 4);
235 val
= FLD_VAL(((dd
->ctx
->keylen
>> 3) - 1), 4, 3);
236 if (dd
->flags
& FLAGS_CBC
)
237 val
|= AES_REG_CTRL_CBC
;
238 if (dd
->flags
& FLAGS_CTR
) {
239 val
|= AES_REG_CTRL_CTR
| AES_REG_CTRL_CTR_WIDTH_32
;
240 mask
= AES_REG_CTRL_CTR
| AES_REG_CTRL_CTR_WIDTH_MASK
;
242 if (dd
->flags
& FLAGS_ENCRYPT
)
243 val
|= AES_REG_CTRL_DIRECTION
;
245 mask
|= AES_REG_CTRL_CBC
| AES_REG_CTRL_DIRECTION
|
246 AES_REG_CTRL_KEY_SIZE
;
248 omap_aes_write_mask(dd
, AES_REG_CTRL(dd
), val
, mask
);
253 static void omap_aes_dma_trigger_omap2(struct omap_aes_dev
*dd
, int length
)
257 val
= dd
->pdata
->dma_start
;
259 if (dd
->dma_lch_out
!= NULL
)
260 val
|= dd
->pdata
->dma_enable_out
;
261 if (dd
->dma_lch_in
!= NULL
)
262 val
|= dd
->pdata
->dma_enable_in
;
264 mask
= dd
->pdata
->dma_enable_out
| dd
->pdata
->dma_enable_in
|
265 dd
->pdata
->dma_start
;
267 omap_aes_write_mask(dd
, AES_REG_MASK(dd
), val
, mask
);
271 static void omap_aes_dma_trigger_omap4(struct omap_aes_dev
*dd
, int length
)
273 omap_aes_write(dd
, AES_REG_LENGTH_N(0), length
);
274 omap_aes_write(dd
, AES_REG_LENGTH_N(1), 0);
276 omap_aes_dma_trigger_omap2(dd
, length
);
279 static void omap_aes_dma_stop(struct omap_aes_dev
*dd
)
283 mask
= dd
->pdata
->dma_enable_out
| dd
->pdata
->dma_enable_in
|
284 dd
->pdata
->dma_start
;
286 omap_aes_write_mask(dd
, AES_REG_MASK(dd
), 0, mask
);
289 static struct omap_aes_dev
*omap_aes_find_dev(struct omap_aes_ctx
*ctx
)
291 struct omap_aes_dev
*dd
= NULL
, *tmp
;
293 spin_lock_bh(&list_lock
);
295 list_for_each_entry(tmp
, &dev_list
, list
) {
296 /* FIXME: take fist available aes core */
302 /* already found before */
305 spin_unlock_bh(&list_lock
);
310 static void omap_aes_dma_out_callback(void *data
)
312 struct omap_aes_dev
*dd
= data
;
314 /* dma_lch_out - completed */
315 tasklet_schedule(&dd
->done_task
);
318 static int omap_aes_dma_init(struct omap_aes_dev
*dd
)
323 dd
->dma_lch_out
= NULL
;
324 dd
->dma_lch_in
= NULL
;
326 dd
->buf_in
= (void *)__get_free_pages(GFP_KERNEL
, OMAP_AES_CACHE_SIZE
);
327 dd
->buf_out
= (void *)__get_free_pages(GFP_KERNEL
, OMAP_AES_CACHE_SIZE
);
328 dd
->buflen
= PAGE_SIZE
<< OMAP_AES_CACHE_SIZE
;
329 dd
->buflen
&= ~(AES_BLOCK_SIZE
- 1);
331 if (!dd
->buf_in
|| !dd
->buf_out
) {
332 dev_err(dd
->dev
, "unable to alloc pages.\n");
337 dd
->dma_addr_in
= dma_map_single(dd
->dev
, dd
->buf_in
, dd
->buflen
,
339 if (dma_mapping_error(dd
->dev
, dd
->dma_addr_in
)) {
340 dev_err(dd
->dev
, "dma %d bytes error\n", dd
->buflen
);
345 dd
->dma_addr_out
= dma_map_single(dd
->dev
, dd
->buf_out
, dd
->buflen
,
347 if (dma_mapping_error(dd
->dev
, dd
->dma_addr_out
)) {
348 dev_err(dd
->dev
, "dma %d bytes error\n", dd
->buflen
);
354 dma_cap_set(DMA_SLAVE
, mask
);
356 dd
->dma_lch_in
= dma_request_slave_channel_compat(mask
,
360 if (!dd
->dma_lch_in
) {
361 dev_err(dd
->dev
, "Unable to request in DMA channel\n");
365 dd
->dma_lch_out
= dma_request_slave_channel_compat(mask
,
369 if (!dd
->dma_lch_out
) {
370 dev_err(dd
->dev
, "Unable to request out DMA channel\n");
377 dma_release_channel(dd
->dma_lch_in
);
379 dma_unmap_single(dd
->dev
, dd
->dma_addr_out
, dd
->buflen
,
382 dma_unmap_single(dd
->dev
, dd
->dma_addr_in
, dd
->buflen
, DMA_TO_DEVICE
);
384 free_pages((unsigned long)dd
->buf_out
, OMAP_AES_CACHE_SIZE
);
385 free_pages((unsigned long)dd
->buf_in
, OMAP_AES_CACHE_SIZE
);
388 pr_err("error: %d\n", err
);
392 static void omap_aes_dma_cleanup(struct omap_aes_dev
*dd
)
394 dma_release_channel(dd
->dma_lch_out
);
395 dma_release_channel(dd
->dma_lch_in
);
396 dma_unmap_single(dd
->dev
, dd
->dma_addr_out
, dd
->buflen
,
398 dma_unmap_single(dd
->dev
, dd
->dma_addr_in
, dd
->buflen
, DMA_TO_DEVICE
);
399 free_pages((unsigned long)dd
->buf_out
, OMAP_AES_CACHE_SIZE
);
400 free_pages((unsigned long)dd
->buf_in
, OMAP_AES_CACHE_SIZE
);
403 static void sg_copy_buf(void *buf
, struct scatterlist
*sg
,
404 unsigned int start
, unsigned int nbytes
, int out
)
406 struct scatter_walk walk
;
411 scatterwalk_start(&walk
, sg
);
412 scatterwalk_advance(&walk
, start
);
413 scatterwalk_copychunks(buf
, &walk
, nbytes
, out
);
414 scatterwalk_done(&walk
, out
, 0);
417 static int sg_copy(struct scatterlist
**sg
, size_t *offset
, void *buf
,
418 size_t buflen
, size_t total
, int out
)
420 unsigned int count
, off
= 0;
422 while (buflen
&& total
) {
423 count
= min((*sg
)->length
- *offset
, total
);
424 count
= min(count
, buflen
);
430 * buflen and total are AES_BLOCK_SIZE size aligned,
431 * so count should be also aligned
434 sg_copy_buf(buf
+ off
, *sg
, *offset
, count
, out
);
441 if (*offset
== (*sg
)->length
) {
453 static int omap_aes_crypt_dma(struct crypto_tfm
*tfm
,
454 struct scatterlist
*in_sg
, struct scatterlist
*out_sg
)
456 struct omap_aes_ctx
*ctx
= crypto_tfm_ctx(tfm
);
457 struct omap_aes_dev
*dd
= ctx
->dd
;
458 struct dma_async_tx_descriptor
*tx_in
, *tx_out
;
459 struct dma_slave_config cfg
;
460 dma_addr_t dma_addr_in
= sg_dma_address(in_sg
);
461 int ret
, length
= sg_dma_len(in_sg
);
463 pr_debug("len: %d\n", length
);
465 dd
->dma_size
= length
;
467 if (!(dd
->flags
& FLAGS_FAST
))
468 dma_sync_single_for_device(dd
->dev
, dma_addr_in
, length
,
471 memset(&cfg
, 0, sizeof(cfg
));
473 cfg
.src_addr
= dd
->phys_base
+ AES_REG_DATA_N(dd
, 0);
474 cfg
.dst_addr
= dd
->phys_base
+ AES_REG_DATA_N(dd
, 0);
475 cfg
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
476 cfg
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
477 cfg
.src_maxburst
= DST_MAXBURST
;
478 cfg
.dst_maxburst
= DST_MAXBURST
;
481 ret
= dmaengine_slave_config(dd
->dma_lch_in
, &cfg
);
483 dev_err(dd
->dev
, "can't configure IN dmaengine slave: %d\n",
488 tx_in
= dmaengine_prep_slave_sg(dd
->dma_lch_in
, in_sg
, 1,
490 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
492 dev_err(dd
->dev
, "IN prep_slave_sg() failed\n");
496 /* No callback necessary */
497 tx_in
->callback_param
= dd
;
500 ret
= dmaengine_slave_config(dd
->dma_lch_out
, &cfg
);
502 dev_err(dd
->dev
, "can't configure OUT dmaengine slave: %d\n",
507 tx_out
= dmaengine_prep_slave_sg(dd
->dma_lch_out
, out_sg
, 1,
509 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
511 dev_err(dd
->dev
, "OUT prep_slave_sg() failed\n");
515 tx_out
->callback
= omap_aes_dma_out_callback
;
516 tx_out
->callback_param
= dd
;
518 dmaengine_submit(tx_in
);
519 dmaengine_submit(tx_out
);
521 dma_async_issue_pending(dd
->dma_lch_in
);
522 dma_async_issue_pending(dd
->dma_lch_out
);
525 dd
->pdata
->trigger(dd
, length
);
530 static int omap_aes_crypt_dma_start(struct omap_aes_dev
*dd
)
532 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(
533 crypto_ablkcipher_reqtfm(dd
->req
));
534 int err
, fast
= 0, in
, out
;
536 dma_addr_t addr_in
, addr_out
;
537 struct scatterlist
*in_sg
, *out_sg
;
540 pr_debug("total: %d\n", dd
->total
);
542 if (sg_is_last(dd
->in_sg
) && sg_is_last(dd
->out_sg
)) {
543 /* check for alignment */
544 in
= IS_ALIGNED((u32
)dd
->in_sg
->offset
, sizeof(u32
));
545 out
= IS_ALIGNED((u32
)dd
->out_sg
->offset
, sizeof(u32
));
551 count
= min(dd
->total
, sg_dma_len(dd
->in_sg
));
552 count
= min(count
, sg_dma_len(dd
->out_sg
));
554 if (count
!= dd
->total
) {
555 pr_err("request length != buffer length\n");
561 err
= dma_map_sg(dd
->dev
, dd
->in_sg
, 1, DMA_TO_DEVICE
);
563 dev_err(dd
->dev
, "dma_map_sg() error\n");
567 err
= dma_map_sg(dd
->dev
, dd
->out_sg
, 1, DMA_FROM_DEVICE
);
569 dev_err(dd
->dev
, "dma_map_sg() error\n");
570 dma_unmap_sg(dd
->dev
, dd
->in_sg
, 1, DMA_TO_DEVICE
);
574 addr_in
= sg_dma_address(dd
->in_sg
);
575 addr_out
= sg_dma_address(dd
->out_sg
);
580 dd
->flags
|= FLAGS_FAST
;
583 /* use cache buffers */
584 count
= sg_copy(&dd
->in_sg
, &dd
->in_offset
, dd
->buf_in
,
585 dd
->buflen
, dd
->total
, 0);
587 len32
= DIV_ROUND_UP(count
, DMA_MIN
) * DMA_MIN
;
590 * The data going into the AES module has been copied
591 * to a local buffer and the data coming out will go
592 * into a local buffer so set up local SG entries for
595 sg_init_table(&dd
->in_sgl
, 1);
596 dd
->in_sgl
.offset
= dd
->in_offset
;
597 sg_dma_len(&dd
->in_sgl
) = len32
;
598 sg_dma_address(&dd
->in_sgl
) = dd
->dma_addr_in
;
600 sg_init_table(&dd
->out_sgl
, 1);
601 dd
->out_sgl
.offset
= dd
->out_offset
;
602 sg_dma_len(&dd
->out_sgl
) = len32
;
603 sg_dma_address(&dd
->out_sgl
) = dd
->dma_addr_out
;
606 out_sg
= &dd
->out_sgl
;
608 addr_in
= dd
->dma_addr_in
;
609 addr_out
= dd
->dma_addr_out
;
611 dd
->flags
&= ~FLAGS_FAST
;
617 err
= omap_aes_crypt_dma(tfm
, in_sg
, out_sg
);
619 dma_unmap_sg(dd
->dev
, dd
->in_sg
, 1, DMA_TO_DEVICE
);
620 dma_unmap_sg(dd
->dev
, dd
->out_sg
, 1, DMA_TO_DEVICE
);
626 static void omap_aes_finish_req(struct omap_aes_dev
*dd
, int err
)
628 struct ablkcipher_request
*req
= dd
->req
;
630 pr_debug("err: %d\n", err
);
632 dd
->flags
&= ~FLAGS_BUSY
;
634 req
->base
.complete(&req
->base
, err
);
637 static int omap_aes_crypt_dma_stop(struct omap_aes_dev
*dd
)
642 pr_debug("total: %d\n", dd
->total
);
644 omap_aes_dma_stop(dd
);
646 dmaengine_terminate_all(dd
->dma_lch_in
);
647 dmaengine_terminate_all(dd
->dma_lch_out
);
649 if (dd
->flags
& FLAGS_FAST
) {
650 dma_unmap_sg(dd
->dev
, dd
->out_sg
, 1, DMA_FROM_DEVICE
);
651 dma_unmap_sg(dd
->dev
, dd
->in_sg
, 1, DMA_TO_DEVICE
);
653 dma_sync_single_for_device(dd
->dev
, dd
->dma_addr_out
,
654 dd
->dma_size
, DMA_FROM_DEVICE
);
657 count
= sg_copy(&dd
->out_sg
, &dd
->out_offset
, dd
->buf_out
,
658 dd
->buflen
, dd
->dma_size
, 1);
659 if (count
!= dd
->dma_size
) {
661 pr_err("not all data converted: %u\n", count
);
668 static int omap_aes_handle_queue(struct omap_aes_dev
*dd
,
669 struct ablkcipher_request
*req
)
671 struct crypto_async_request
*async_req
, *backlog
;
672 struct omap_aes_ctx
*ctx
;
673 struct omap_aes_reqctx
*rctx
;
677 spin_lock_irqsave(&dd
->lock
, flags
);
679 ret
= ablkcipher_enqueue_request(&dd
->queue
, req
);
680 if (dd
->flags
& FLAGS_BUSY
) {
681 spin_unlock_irqrestore(&dd
->lock
, flags
);
684 backlog
= crypto_get_backlog(&dd
->queue
);
685 async_req
= crypto_dequeue_request(&dd
->queue
);
687 dd
->flags
|= FLAGS_BUSY
;
688 spin_unlock_irqrestore(&dd
->lock
, flags
);
694 backlog
->complete(backlog
, -EINPROGRESS
);
696 req
= ablkcipher_request_cast(async_req
);
698 /* assign new request to device */
700 dd
->total
= req
->nbytes
;
702 dd
->in_sg
= req
->src
;
704 dd
->out_sg
= req
->dst
;
706 rctx
= ablkcipher_request_ctx(req
);
707 ctx
= crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req
));
708 rctx
->mode
&= FLAGS_MODE_MASK
;
709 dd
->flags
= (dd
->flags
& ~FLAGS_MODE_MASK
) | rctx
->mode
;
714 err
= omap_aes_write_ctrl(dd
);
716 err
= omap_aes_crypt_dma_start(dd
);
718 /* aes_task will not finish it, so do it here */
719 omap_aes_finish_req(dd
, err
);
720 tasklet_schedule(&dd
->queue_task
);
723 return ret
; /* return ret, which is enqueue return value */
726 static void omap_aes_done_task(unsigned long data
)
728 struct omap_aes_dev
*dd
= (struct omap_aes_dev
*)data
;
733 err
= omap_aes_crypt_dma_stop(dd
);
735 err
= dd
->err
? : err
;
737 if (dd
->total
&& !err
) {
738 err
= omap_aes_crypt_dma_start(dd
);
740 return; /* DMA started. Not fininishing. */
743 omap_aes_finish_req(dd
, err
);
744 omap_aes_handle_queue(dd
, NULL
);
749 static void omap_aes_queue_task(unsigned long data
)
751 struct omap_aes_dev
*dd
= (struct omap_aes_dev
*)data
;
753 omap_aes_handle_queue(dd
, NULL
);
756 static int omap_aes_crypt(struct ablkcipher_request
*req
, unsigned long mode
)
758 struct omap_aes_ctx
*ctx
= crypto_ablkcipher_ctx(
759 crypto_ablkcipher_reqtfm(req
));
760 struct omap_aes_reqctx
*rctx
= ablkcipher_request_ctx(req
);
761 struct omap_aes_dev
*dd
;
763 pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req
->nbytes
,
764 !!(mode
& FLAGS_ENCRYPT
),
765 !!(mode
& FLAGS_CBC
));
767 if (!IS_ALIGNED(req
->nbytes
, AES_BLOCK_SIZE
)) {
768 pr_err("request size is not exact amount of AES blocks\n");
772 dd
= omap_aes_find_dev(ctx
);
778 return omap_aes_handle_queue(dd
, req
);
781 /* ********************** ALG API ************************************ */
783 static int omap_aes_setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
786 struct omap_aes_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
788 if (keylen
!= AES_KEYSIZE_128
&& keylen
!= AES_KEYSIZE_192
&&
789 keylen
!= AES_KEYSIZE_256
)
792 pr_debug("enter, keylen: %d\n", keylen
);
794 memcpy(ctx
->key
, key
, keylen
);
795 ctx
->keylen
= keylen
;
800 static int omap_aes_ecb_encrypt(struct ablkcipher_request
*req
)
802 return omap_aes_crypt(req
, FLAGS_ENCRYPT
);
805 static int omap_aes_ecb_decrypt(struct ablkcipher_request
*req
)
807 return omap_aes_crypt(req
, 0);
810 static int omap_aes_cbc_encrypt(struct ablkcipher_request
*req
)
812 return omap_aes_crypt(req
, FLAGS_ENCRYPT
| FLAGS_CBC
);
815 static int omap_aes_cbc_decrypt(struct ablkcipher_request
*req
)
817 return omap_aes_crypt(req
, FLAGS_CBC
);
820 static int omap_aes_ctr_encrypt(struct ablkcipher_request
*req
)
822 return omap_aes_crypt(req
, FLAGS_ENCRYPT
| FLAGS_CTR
);
825 static int omap_aes_ctr_decrypt(struct ablkcipher_request
*req
)
827 return omap_aes_crypt(req
, FLAGS_CTR
);
830 static int omap_aes_cra_init(struct crypto_tfm
*tfm
)
832 struct omap_aes_dev
*dd
= NULL
;
834 /* Find AES device, currently picks the first device */
835 spin_lock_bh(&list_lock
);
836 list_for_each_entry(dd
, &dev_list
, list
) {
839 spin_unlock_bh(&list_lock
);
841 pm_runtime_get_sync(dd
->dev
);
842 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct omap_aes_reqctx
);
847 static void omap_aes_cra_exit(struct crypto_tfm
*tfm
)
849 struct omap_aes_dev
*dd
= NULL
;
851 /* Find AES device, currently picks the first device */
852 spin_lock_bh(&list_lock
);
853 list_for_each_entry(dd
, &dev_list
, list
) {
856 spin_unlock_bh(&list_lock
);
858 pm_runtime_put_sync(dd
->dev
);
861 /* ********************** ALGS ************************************ */
863 static struct crypto_alg algs_ecb_cbc
[] = {
865 .cra_name
= "ecb(aes)",
866 .cra_driver_name
= "ecb-aes-omap",
868 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
869 CRYPTO_ALG_KERN_DRIVER_ONLY
|
871 .cra_blocksize
= AES_BLOCK_SIZE
,
872 .cra_ctxsize
= sizeof(struct omap_aes_ctx
),
874 .cra_type
= &crypto_ablkcipher_type
,
875 .cra_module
= THIS_MODULE
,
876 .cra_init
= omap_aes_cra_init
,
877 .cra_exit
= omap_aes_cra_exit
,
878 .cra_u
.ablkcipher
= {
879 .min_keysize
= AES_MIN_KEY_SIZE
,
880 .max_keysize
= AES_MAX_KEY_SIZE
,
881 .setkey
= omap_aes_setkey
,
882 .encrypt
= omap_aes_ecb_encrypt
,
883 .decrypt
= omap_aes_ecb_decrypt
,
887 .cra_name
= "cbc(aes)",
888 .cra_driver_name
= "cbc-aes-omap",
890 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
891 CRYPTO_ALG_KERN_DRIVER_ONLY
|
893 .cra_blocksize
= AES_BLOCK_SIZE
,
894 .cra_ctxsize
= sizeof(struct omap_aes_ctx
),
896 .cra_type
= &crypto_ablkcipher_type
,
897 .cra_module
= THIS_MODULE
,
898 .cra_init
= omap_aes_cra_init
,
899 .cra_exit
= omap_aes_cra_exit
,
900 .cra_u
.ablkcipher
= {
901 .min_keysize
= AES_MIN_KEY_SIZE
,
902 .max_keysize
= AES_MAX_KEY_SIZE
,
903 .ivsize
= AES_BLOCK_SIZE
,
904 .setkey
= omap_aes_setkey
,
905 .encrypt
= omap_aes_cbc_encrypt
,
906 .decrypt
= omap_aes_cbc_decrypt
,
911 static struct crypto_alg algs_ctr
[] = {
913 .cra_name
= "ctr(aes)",
914 .cra_driver_name
= "ctr-aes-omap",
916 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
917 CRYPTO_ALG_KERN_DRIVER_ONLY
|
919 .cra_blocksize
= AES_BLOCK_SIZE
,
920 .cra_ctxsize
= sizeof(struct omap_aes_ctx
),
922 .cra_type
= &crypto_ablkcipher_type
,
923 .cra_module
= THIS_MODULE
,
924 .cra_init
= omap_aes_cra_init
,
925 .cra_exit
= omap_aes_cra_exit
,
926 .cra_u
.ablkcipher
= {
927 .min_keysize
= AES_MIN_KEY_SIZE
,
928 .max_keysize
= AES_MAX_KEY_SIZE
,
930 .ivsize
= AES_BLOCK_SIZE
,
931 .setkey
= omap_aes_setkey
,
932 .encrypt
= omap_aes_ctr_encrypt
,
933 .decrypt
= omap_aes_ctr_decrypt
,
938 static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc
[] = {
940 .algs_list
= algs_ecb_cbc
,
941 .size
= ARRAY_SIZE(algs_ecb_cbc
),
945 static const struct omap_aes_pdata omap_aes_pdata_omap2
= {
946 .algs_info
= omap_aes_algs_info_ecb_cbc
,
947 .algs_info_size
= ARRAY_SIZE(omap_aes_algs_info_ecb_cbc
),
948 .trigger
= omap_aes_dma_trigger_omap2
,
955 .dma_enable_in
= BIT(2),
956 .dma_enable_out
= BIT(3),
965 static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc_ctr
[] = {
967 .algs_list
= algs_ecb_cbc
,
968 .size
= ARRAY_SIZE(algs_ecb_cbc
),
971 .algs_list
= algs_ctr
,
972 .size
= ARRAY_SIZE(algs_ctr
),
976 static const struct omap_aes_pdata omap_aes_pdata_omap3
= {
977 .algs_info
= omap_aes_algs_info_ecb_cbc_ctr
,
978 .algs_info_size
= ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr
),
979 .trigger
= omap_aes_dma_trigger_omap2
,
986 .dma_enable_in
= BIT(2),
987 .dma_enable_out
= BIT(3),
995 static const struct omap_aes_pdata omap_aes_pdata_omap4
= {
996 .algs_info
= omap_aes_algs_info_ecb_cbc_ctr
,
997 .algs_info_size
= ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr
),
998 .trigger
= omap_aes_dma_trigger_omap4
,
1005 .dma_enable_in
= BIT(5),
1006 .dma_enable_out
= BIT(6),
1007 .major_mask
= 0x0700,
1009 .minor_mask
= 0x003f,
1013 static const struct of_device_id omap_aes_of_match
[] = {
1015 .compatible
= "ti,omap2-aes",
1016 .data
= &omap_aes_pdata_omap2
,
1019 .compatible
= "ti,omap3-aes",
1020 .data
= &omap_aes_pdata_omap3
,
1023 .compatible
= "ti,omap4-aes",
1024 .data
= &omap_aes_pdata_omap4
,
1028 MODULE_DEVICE_TABLE(of
, omap_aes_of_match
);
1030 static int omap_aes_get_res_of(struct omap_aes_dev
*dd
,
1031 struct device
*dev
, struct resource
*res
)
1033 struct device_node
*node
= dev
->of_node
;
1034 const struct of_device_id
*match
;
1037 match
= of_match_device(of_match_ptr(omap_aes_of_match
), dev
);
1039 dev_err(dev
, "no compatible OF match\n");
1044 err
= of_address_to_resource(node
, 0, res
);
1046 dev_err(dev
, "can't translate OF node address\n");
1051 dd
->dma_out
= -1; /* Dummy value that's unused */
1052 dd
->dma_in
= -1; /* Dummy value that's unused */
1054 dd
->pdata
= match
->data
;
1060 static const struct of_device_id omap_aes_of_match
[] = {
1064 static int omap_aes_get_res_of(struct omap_aes_dev
*dd
,
1065 struct device
*dev
, struct resource
*res
)
1071 static int omap_aes_get_res_pdev(struct omap_aes_dev
*dd
,
1072 struct platform_device
*pdev
, struct resource
*res
)
1074 struct device
*dev
= &pdev
->dev
;
1078 /* Get the base address */
1079 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1081 dev_err(dev
, "no MEM resource info\n");
1085 memcpy(res
, r
, sizeof(*res
));
1087 /* Get the DMA out channel */
1088 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 0);
1090 dev_err(dev
, "no DMA out resource info\n");
1094 dd
->dma_out
= r
->start
;
1096 /* Get the DMA in channel */
1097 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 1);
1099 dev_err(dev
, "no DMA in resource info\n");
1103 dd
->dma_in
= r
->start
;
1105 /* Only OMAP2/3 can be non-DT */
1106 dd
->pdata
= &omap_aes_pdata_omap2
;
1112 static int omap_aes_probe(struct platform_device
*pdev
)
1114 struct device
*dev
= &pdev
->dev
;
1115 struct omap_aes_dev
*dd
;
1116 struct crypto_alg
*algp
;
1117 struct resource res
;
1118 int err
= -ENOMEM
, i
, j
;
1121 dd
= kzalloc(sizeof(struct omap_aes_dev
), GFP_KERNEL
);
1123 dev_err(dev
, "unable to alloc data struct.\n");
1127 platform_set_drvdata(pdev
, dd
);
1129 spin_lock_init(&dd
->lock
);
1130 crypto_init_queue(&dd
->queue
, OMAP_AES_QUEUE_LENGTH
);
1132 err
= (dev
->of_node
) ? omap_aes_get_res_of(dd
, dev
, &res
) :
1133 omap_aes_get_res_pdev(dd
, pdev
, &res
);
1137 dd
->io_base
= devm_ioremap_resource(dev
, &res
);
1138 if (IS_ERR(dd
->io_base
)) {
1139 err
= PTR_ERR(dd
->io_base
);
1142 dd
->phys_base
= res
.start
;
1144 pm_runtime_enable(dev
);
1145 pm_runtime_get_sync(dev
);
1147 omap_aes_dma_stop(dd
);
1149 reg
= omap_aes_read(dd
, AES_REG_REV(dd
));
1151 pm_runtime_put_sync(dev
);
1153 dev_info(dev
, "OMAP AES hw accel rev: %u.%u\n",
1154 (reg
& dd
->pdata
->major_mask
) >> dd
->pdata
->major_shift
,
1155 (reg
& dd
->pdata
->minor_mask
) >> dd
->pdata
->minor_shift
);
1157 tasklet_init(&dd
->done_task
, omap_aes_done_task
, (unsigned long)dd
);
1158 tasklet_init(&dd
->queue_task
, omap_aes_queue_task
, (unsigned long)dd
);
1160 err
= omap_aes_dma_init(dd
);
1164 INIT_LIST_HEAD(&dd
->list
);
1165 spin_lock(&list_lock
);
1166 list_add_tail(&dd
->list
, &dev_list
);
1167 spin_unlock(&list_lock
);
1169 for (i
= 0; i
< dd
->pdata
->algs_info_size
; i
++) {
1170 for (j
= 0; j
< dd
->pdata
->algs_info
[i
].size
; j
++) {
1171 algp
= &dd
->pdata
->algs_info
[i
].algs_list
[j
];
1173 pr_debug("reg alg: %s\n", algp
->cra_name
);
1174 INIT_LIST_HEAD(&algp
->cra_list
);
1176 err
= crypto_register_alg(algp
);
1180 dd
->pdata
->algs_info
[i
].registered
++;
1186 for (i
= dd
->pdata
->algs_info_size
- 1; i
>= 0; i
--)
1187 for (j
= dd
->pdata
->algs_info
[i
].registered
- 1; j
>= 0; j
--)
1188 crypto_unregister_alg(
1189 &dd
->pdata
->algs_info
[i
].algs_list
[j
]);
1190 omap_aes_dma_cleanup(dd
);
1192 tasklet_kill(&dd
->done_task
);
1193 tasklet_kill(&dd
->queue_task
);
1194 pm_runtime_disable(dev
);
1199 dev_err(dev
, "initialization failed.\n");
1203 static int omap_aes_remove(struct platform_device
*pdev
)
1205 struct omap_aes_dev
*dd
= platform_get_drvdata(pdev
);
1211 spin_lock(&list_lock
);
1212 list_del(&dd
->list
);
1213 spin_unlock(&list_lock
);
1215 for (i
= dd
->pdata
->algs_info_size
- 1; i
>= 0; i
--)
1216 for (j
= dd
->pdata
->algs_info
[i
].registered
- 1; j
>= 0; j
--)
1217 crypto_unregister_alg(
1218 &dd
->pdata
->algs_info
[i
].algs_list
[j
]);
1220 tasklet_kill(&dd
->done_task
);
1221 tasklet_kill(&dd
->queue_task
);
1222 omap_aes_dma_cleanup(dd
);
1223 pm_runtime_disable(dd
->dev
);
1230 #ifdef CONFIG_PM_SLEEP
1231 static int omap_aes_suspend(struct device
*dev
)
1233 pm_runtime_put_sync(dev
);
1237 static int omap_aes_resume(struct device
*dev
)
1239 pm_runtime_get_sync(dev
);
1244 static const struct dev_pm_ops omap_aes_pm_ops
= {
1245 SET_SYSTEM_SLEEP_PM_OPS(omap_aes_suspend
, omap_aes_resume
)
1248 static struct platform_driver omap_aes_driver
= {
1249 .probe
= omap_aes_probe
,
1250 .remove
= omap_aes_remove
,
1253 .owner
= THIS_MODULE
,
1254 .pm
= &omap_aes_pm_ops
,
1255 .of_match_table
= omap_aes_of_match
,
1259 module_platform_driver(omap_aes_driver
);
1261 MODULE_DESCRIPTION("OMAP AES hw acceleration support.");
1262 MODULE_LICENSE("GPL v2");
1263 MODULE_AUTHOR("Dmitry Kasatkin");