2 * drivers/crypto/tegra-aes.c
4 * Driver for NVIDIA Tegra AES hardware engine residing inside the
5 * Bit Stream Engine for Video (BSEV) hardware block.
7 * The programming sequence for this engine is with the help
8 * of commands which travel via a command queue residing between the
9 * CPU and the BSEV block. The BSEV engine has an internal RAM (VRAM)
10 * where the final input plaintext, keys and the IV have to be copied
11 * before starting the encrypt/decrypt operation.
13 * Copyright (c) 2010, NVIDIA Corporation.
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
20 * This program is distributed in the hope that it will be useful, but WITHOUT
21 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
22 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
25 * You should have received a copy of the GNU General Public License along
26 * with this program; if not, write to the Free Software Foundation, Inc.,
27 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/clk.h>
35 #include <linux/platform_device.h>
36 #include <linux/scatterlist.h>
37 #include <linux/dma-mapping.h>
39 #include <linux/mutex.h>
40 #include <linux/interrupt.h>
41 #include <linux/completion.h>
42 #include <linux/workqueue.h>
46 #include <crypto/scatterwalk.h>
47 #include <crypto/aes.h>
48 #include <crypto/internal/rng.h>
50 #include "tegra-aes.h"
52 #define FLAGS_MODE_MASK 0x00FF
53 #define FLAGS_ENCRYPT BIT(0)
54 #define FLAGS_CBC BIT(1)
55 #define FLAGS_GIV BIT(2)
56 #define FLAGS_RNG BIT(3)
57 #define FLAGS_OFB BIT(4)
58 #define FLAGS_NEW_KEY BIT(5)
59 #define FLAGS_NEW_IV BIT(6)
60 #define FLAGS_INIT BIT(7)
61 #define FLAGS_FAST BIT(8)
65 * Defines AES engine Max process bytes size in one go, which takes 1 msec.
66 * AES engine spends about 176 cycles/16-bytes or 11 cycles/byte
67 * The duration CPU can use the BSE to 1 msec, then the number of available
68 * cycles of AVP/BSE is 216K. In this duration, AES can process 216/11 ~= 19KB
69 * Based on this AES_HW_DMA_BUFFER_SIZE_BYTES is configured to 16KB.
71 #define AES_HW_DMA_BUFFER_SIZE_BYTES 0x4000
74 * The key table length is 64 bytes
75 * (This includes first upto 32 bytes key + 16 bytes original initial vector
76 * and 16 bytes updated initial vector)
78 #define AES_HW_KEY_TABLE_LENGTH_BYTES 64
81 * The memory being used is divides as follows:
83 * 2. Original IV - 16 bytes
84 * 3. Updated IV - 16 bytes
85 * 4. Key schedule - 256 bytes
87 * 1+2+3 constitute the hw key table.
89 #define AES_HW_IV_SIZE 16
90 #define AES_HW_KEYSCHEDULE_LEN 256
91 #define AES_IVKEY_SIZE (AES_HW_KEY_TABLE_LENGTH_BYTES + AES_HW_KEYSCHEDULE_LEN)
93 /* Define commands required for AES operation */
95 CMD_BLKSTARTENGINE
= 0x0E,
97 CMD_DMACOMPLETE
= 0x11,
102 /* Define sub-commands */
104 SUBCMD_VRAM_SEL
= 0x1,
105 SUBCMD_CRYPTO_TABLE_SEL
= 0x3,
106 SUBCMD_KEY_TABLE_SEL
= 0x8,
109 /* memdma_vd command */
110 #define MEMDMA_DIR_DTOVRAM 0 /* sdram -> vram */
111 #define MEMDMA_DIR_VTODRAM 1 /* vram -> sdram */
112 #define MEMDMA_DIR_SHIFT 25
113 #define MEMDMA_NUM_WORDS_SHIFT 12
115 /* command queue bit shifts */
117 CMDQ_KEYTABLEADDR_SHIFT
= 0,
118 CMDQ_KEYTABLEID_SHIFT
= 17,
119 CMDQ_VRAMSEL_SHIFT
= 23,
120 CMDQ_TABLESEL_SHIFT
= 24,
121 CMDQ_OPCODE_SHIFT
= 26,
125 * The secure key slot contains a unique secure key generated
126 * and loaded by the bootloader. This slot is marked as non-accessible
129 #define SSK_SLOT_NUM 4
131 #define AES_NR_KEYSLOTS 8
132 #define TEGRA_AES_QUEUE_LENGTH 50
133 #define DEFAULT_RNG_BLK_SZ 16
135 /* The command queue depth */
136 #define AES_HW_MAX_ICQ_LENGTH 5
138 struct tegra_aes_slot
{
139 struct list_head node
;
143 static struct tegra_aes_slot ssk
= {
144 .slot_num
= SSK_SLOT_NUM
,
147 struct tegra_aes_reqctx
{
151 struct tegra_aes_dev
{
153 void __iomem
*io_base
;
154 dma_addr_t ivkey_phys_base
;
155 void __iomem
*ivkey_base
;
157 struct tegra_aes_ctx
*ctx
;
160 struct completion op_complete
;
162 dma_addr_t dma_buf_in
;
164 dma_addr_t dma_buf_out
;
166 u8 dt
[DEFAULT_RNG_BLK_SZ
];
170 struct crypto_queue queue
;
171 struct tegra_aes_slot
*slots
;
172 struct ablkcipher_request
*req
;
174 struct scatterlist
*in_sg
;
176 struct scatterlist
*out_sg
;
180 static struct tegra_aes_dev
*aes_dev
;
182 struct tegra_aes_ctx
{
183 struct tegra_aes_dev
*dd
;
185 struct tegra_aes_slot
*slot
;
186 u8 key
[AES_MAX_KEY_SIZE
];
190 static struct tegra_aes_ctx rng_ctx
= {
191 .flags
= FLAGS_NEW_KEY
,
192 .keylen
= AES_KEYSIZE_128
,
195 /* keep registered devices data here */
196 static struct list_head dev_list
;
197 static DEFINE_SPINLOCK(list_lock
);
198 static DEFINE_MUTEX(aes_lock
);
200 static void aes_workqueue_handler(struct work_struct
*work
);
201 static DECLARE_WORK(aes_work
, aes_workqueue_handler
);
202 static struct workqueue_struct
*aes_wq
;
204 extern unsigned long long tegra_chip_uid(void);
206 static inline u32
aes_readl(struct tegra_aes_dev
*dd
, u32 offset
)
208 return readl(dd
->io_base
+ offset
);
211 static inline void aes_writel(struct tegra_aes_dev
*dd
, u32 val
, u32 offset
)
213 writel(val
, dd
->io_base
+ offset
);
216 static int aes_start_crypt(struct tegra_aes_dev
*dd
, u32 in_addr
, u32 out_addr
,
217 int nblocks
, int mode
, bool upd_iv
)
219 u32 cmdq
[AES_HW_MAX_ICQ_LENGTH
];
220 int i
, eng_busy
, icq_empty
, ret
;
223 /* reset all the interrupt bits */
224 aes_writel(dd
, 0xFFFFFFFF, TEGRA_AES_INTR_STATUS
);
226 /* enable error, dma xfer complete interrupts */
227 aes_writel(dd
, 0x33, TEGRA_AES_INT_ENB
);
229 cmdq
[0] = CMD_DMASETUP
<< CMDQ_OPCODE_SHIFT
;
231 cmdq
[2] = CMD_BLKSTARTENGINE
<< CMDQ_OPCODE_SHIFT
| (nblocks
-1);
232 cmdq
[3] = CMD_DMACOMPLETE
<< CMDQ_OPCODE_SHIFT
;
234 value
= aes_readl(dd
, TEGRA_AES_CMDQUE_CONTROL
);
235 /* access SDRAM through AHB */
236 value
&= ~TEGRA_AES_CMDQ_CTRL_SRC_STM_SEL_FIELD
;
237 value
&= ~TEGRA_AES_CMDQ_CTRL_DST_STM_SEL_FIELD
;
238 value
|= TEGRA_AES_CMDQ_CTRL_SRC_STM_SEL_FIELD
|
239 TEGRA_AES_CMDQ_CTRL_DST_STM_SEL_FIELD
|
240 TEGRA_AES_CMDQ_CTRL_ICMDQEN_FIELD
;
241 aes_writel(dd
, value
, TEGRA_AES_CMDQUE_CONTROL
);
242 dev_dbg(dd
->dev
, "cmd_q_ctrl=0x%x", value
);
244 value
= (0x1 << TEGRA_AES_SECURE_INPUT_ALG_SEL_SHIFT
) |
245 ((dd
->ctx
->keylen
* 8) <<
246 TEGRA_AES_SECURE_INPUT_KEY_LEN_SHIFT
) |
247 ((u32
)upd_iv
<< TEGRA_AES_SECURE_IV_SELECT_SHIFT
);
249 if (mode
& FLAGS_CBC
) {
250 value
|= ((((mode
& FLAGS_ENCRYPT
) ? 2 : 3)
251 << TEGRA_AES_SECURE_XOR_POS_SHIFT
) |
252 (((mode
& FLAGS_ENCRYPT
) ? 2 : 3)
253 << TEGRA_AES_SECURE_VCTRAM_SEL_SHIFT
) |
254 ((mode
& FLAGS_ENCRYPT
) ? 1 : 0)
255 << TEGRA_AES_SECURE_CORE_SEL_SHIFT
);
256 } else if (mode
& FLAGS_OFB
) {
257 value
|= ((TEGRA_AES_SECURE_XOR_POS_FIELD
) |
258 (2 << TEGRA_AES_SECURE_INPUT_SEL_SHIFT
) |
259 (TEGRA_AES_SECURE_CORE_SEL_FIELD
));
260 } else if (mode
& FLAGS_RNG
) {
261 value
|= (((mode
& FLAGS_ENCRYPT
) ? 1 : 0)
262 << TEGRA_AES_SECURE_CORE_SEL_SHIFT
|
263 TEGRA_AES_SECURE_RNG_ENB_FIELD
);
265 value
|= (((mode
& FLAGS_ENCRYPT
) ? 1 : 0)
266 << TEGRA_AES_SECURE_CORE_SEL_SHIFT
);
269 dev_dbg(dd
->dev
, "secure_in_sel=0x%x", value
);
270 aes_writel(dd
, value
, TEGRA_AES_SECURE_INPUT_SELECT
);
272 aes_writel(dd
, out_addr
, TEGRA_AES_SECURE_DEST_ADDR
);
273 INIT_COMPLETION(dd
->op_complete
);
275 for (i
= 0; i
< AES_HW_MAX_ICQ_LENGTH
- 1; i
++) {
277 value
= aes_readl(dd
, TEGRA_AES_INTR_STATUS
);
278 eng_busy
= value
& TEGRA_AES_ENGINE_BUSY_FIELD
;
279 icq_empty
= value
& TEGRA_AES_ICQ_EMPTY_FIELD
;
280 } while (eng_busy
& (!icq_empty
));
281 aes_writel(dd
, cmdq
[i
], TEGRA_AES_ICMDQUE_WR
);
284 ret
= wait_for_completion_timeout(&dd
->op_complete
,
285 msecs_to_jiffies(150));
287 dev_err(dd
->dev
, "timed out (0x%x)\n",
288 aes_readl(dd
, TEGRA_AES_INTR_STATUS
));
292 aes_writel(dd
, cmdq
[AES_HW_MAX_ICQ_LENGTH
- 1], TEGRA_AES_ICMDQUE_WR
);
296 static void aes_release_key_slot(struct tegra_aes_slot
*slot
)
298 if (slot
->slot_num
== SSK_SLOT_NUM
)
301 spin_lock(&list_lock
);
302 list_add_tail(&slot
->node
, &dev_list
);
304 spin_unlock(&list_lock
);
307 static struct tegra_aes_slot
*aes_find_key_slot(void)
309 struct tegra_aes_slot
*slot
= NULL
;
310 struct list_head
*new_head
;
313 spin_lock(&list_lock
);
314 empty
= list_empty(&dev_list
);
316 slot
= list_entry(&dev_list
, struct tegra_aes_slot
, node
);
317 new_head
= dev_list
.next
;
319 dev_list
.next
= new_head
->next
;
320 dev_list
.prev
= NULL
;
322 spin_unlock(&list_lock
);
327 static int aes_set_key(struct tegra_aes_dev
*dd
)
330 struct tegra_aes_ctx
*ctx
= dd
->ctx
;
331 int eng_busy
, icq_empty
, dma_busy
;
332 bool use_ssk
= false;
335 if (!dd
->ctx
->slot
) {
336 dev_dbg(dd
->dev
, "using ssk");
337 dd
->ctx
->slot
= &ssk
;
341 /* enable key schedule generation in hardware */
342 value
= aes_readl(dd
, TEGRA_AES_SECURE_CONFIG_EXT
);
343 value
&= ~TEGRA_AES_SECURE_KEY_SCH_DIS_FIELD
;
344 aes_writel(dd
, value
, TEGRA_AES_SECURE_CONFIG_EXT
);
346 /* select the key slot */
347 value
= aes_readl(dd
, TEGRA_AES_SECURE_CONFIG
);
348 value
&= ~TEGRA_AES_SECURE_KEY_INDEX_FIELD
;
349 value
|= (ctx
->slot
->slot_num
<< TEGRA_AES_SECURE_KEY_INDEX_SHIFT
);
350 aes_writel(dd
, value
, TEGRA_AES_SECURE_CONFIG
);
355 /* copy the key table from sdram to vram */
356 cmdq
[0] = CMD_MEMDMAVD
<< CMDQ_OPCODE_SHIFT
|
357 MEMDMA_DIR_DTOVRAM
<< MEMDMA_DIR_SHIFT
|
358 AES_HW_KEY_TABLE_LENGTH_BYTES
/ sizeof(u32
) <<
359 MEMDMA_NUM_WORDS_SHIFT
;
360 cmdq
[1] = (u32
)dd
->ivkey_phys_base
;
362 aes_writel(dd
, cmdq
[0], TEGRA_AES_ICMDQUE_WR
);
363 aes_writel(dd
, cmdq
[1], TEGRA_AES_ICMDQUE_WR
);
366 value
= aes_readl(dd
, TEGRA_AES_INTR_STATUS
);
367 eng_busy
= value
& TEGRA_AES_ENGINE_BUSY_FIELD
;
368 icq_empty
= value
& TEGRA_AES_ICQ_EMPTY_FIELD
;
369 dma_busy
= value
& TEGRA_AES_DMA_BUSY_FIELD
;
370 } while (eng_busy
& (!icq_empty
) & dma_busy
);
372 /* settable command to get key into internal registers */
373 value
= CMD_SETTABLE
<< CMDQ_OPCODE_SHIFT
|
374 SUBCMD_CRYPTO_TABLE_SEL
<< CMDQ_TABLESEL_SHIFT
|
375 SUBCMD_VRAM_SEL
<< CMDQ_VRAMSEL_SHIFT
|
376 (SUBCMD_KEY_TABLE_SEL
| ctx
->slot
->slot_num
) <<
377 CMDQ_KEYTABLEID_SHIFT
;
378 aes_writel(dd
, value
, TEGRA_AES_ICMDQUE_WR
);
381 value
= aes_readl(dd
, TEGRA_AES_INTR_STATUS
);
382 eng_busy
= value
& TEGRA_AES_ENGINE_BUSY_FIELD
;
383 icq_empty
= value
& TEGRA_AES_ICQ_EMPTY_FIELD
;
384 } while (eng_busy
& (!icq_empty
));
389 static int tegra_aes_handle_req(struct tegra_aes_dev
*dd
)
391 struct crypto_async_request
*async_req
, *backlog
;
392 struct crypto_ablkcipher
*tfm
;
393 struct tegra_aes_ctx
*ctx
;
394 struct tegra_aes_reqctx
*rctx
;
395 struct ablkcipher_request
*req
;
397 int dma_max
= AES_HW_DMA_BUFFER_SIZE_BYTES
;
398 int ret
= 0, nblocks
, total
;
400 dma_addr_t addr_in
, addr_out
;
401 struct scatterlist
*in_sg
, *out_sg
;
406 spin_lock_irqsave(&dd
->lock
, flags
);
407 backlog
= crypto_get_backlog(&dd
->queue
);
408 async_req
= crypto_dequeue_request(&dd
->queue
);
410 clear_bit(FLAGS_BUSY
, &dd
->flags
);
411 spin_unlock_irqrestore(&dd
->lock
, flags
);
417 backlog
->complete(backlog
, -EINPROGRESS
);
419 req
= ablkcipher_request_cast(async_req
);
421 dev_dbg(dd
->dev
, "%s: get new req\n", __func__
);
423 if (!req
->src
|| !req
->dst
)
426 /* take mutex to access the aes hw */
427 mutex_lock(&aes_lock
);
429 /* assign new request to device */
431 dd
->total
= req
->nbytes
;
433 dd
->in_sg
= req
->src
;
435 dd
->out_sg
= req
->dst
;
442 tfm
= crypto_ablkcipher_reqtfm(req
);
443 rctx
= ablkcipher_request_ctx(req
);
444 ctx
= crypto_ablkcipher_ctx(tfm
);
445 rctx
->mode
&= FLAGS_MODE_MASK
;
446 dd
->flags
= (dd
->flags
& ~FLAGS_MODE_MASK
) | rctx
->mode
;
448 dd
->iv
= (u8
*)req
->info
;
449 dd
->ivlen
= crypto_ablkcipher_ivsize(tfm
);
451 /* assign new context to device */
455 if (ctx
->flags
& FLAGS_NEW_KEY
) {
457 memcpy(dd
->ivkey_base
, ctx
->key
, ctx
->keylen
);
458 memset(dd
->ivkey_base
+ ctx
->keylen
, 0, AES_HW_KEY_TABLE_LENGTH_BYTES
- ctx
->keylen
);
460 ctx
->flags
&= ~FLAGS_NEW_KEY
;
463 if (((dd
->flags
& FLAGS_CBC
) || (dd
->flags
& FLAGS_OFB
)) && dd
->iv
) {
464 /* set iv to the aes hw slot
465 * Hw generates updated iv only after iv is set in slot.
466 * So key and iv is passed asynchronously.
468 memcpy(dd
->buf_in
, dd
->iv
, dd
->ivlen
);
470 ret
= aes_start_crypt(dd
, (u32
)dd
->dma_buf_in
,
471 dd
->dma_buf_out
, 1, FLAGS_CBC
, false);
473 dev_err(dd
->dev
, "aes_start_crypt fail(%d)\n", ret
);
479 dev_dbg(dd
->dev
, "remain: %d\n", total
);
480 ret
= dma_map_sg(dd
->dev
, in_sg
, 1, DMA_TO_DEVICE
);
482 dev_err(dd
->dev
, "dma_map_sg() error\n");
486 ret
= dma_map_sg(dd
->dev
, out_sg
, 1, DMA_FROM_DEVICE
);
488 dev_err(dd
->dev
, "dma_map_sg() error\n");
489 dma_unmap_sg(dd
->dev
, dd
->in_sg
,
494 addr_in
= sg_dma_address(in_sg
);
495 addr_out
= sg_dma_address(out_sg
);
496 dd
->flags
|= FLAGS_FAST
;
497 count
= min_t(int, sg_dma_len(in_sg
), dma_max
);
498 WARN_ON(sg_dma_len(in_sg
) != sg_dma_len(out_sg
));
499 nblocks
= DIV_ROUND_UP(count
, AES_BLOCK_SIZE
);
501 ret
= aes_start_crypt(dd
, addr_in
, addr_out
, nblocks
,
504 dma_unmap_sg(dd
->dev
, out_sg
, 1, DMA_FROM_DEVICE
);
505 dma_unmap_sg(dd
->dev
, in_sg
, 1, DMA_TO_DEVICE
);
508 dev_err(dd
->dev
, "aes_start_crypt fail(%d)\n", ret
);
511 dd
->flags
&= ~FLAGS_FAST
;
513 dev_dbg(dd
->dev
, "out: copied %d\n", count
);
515 in_sg
= sg_next(in_sg
);
516 out_sg
= sg_next(out_sg
);
517 WARN_ON(((total
!= 0) && (!in_sg
|| !out_sg
)));
521 mutex_unlock(&aes_lock
);
525 if (dd
->req
->base
.complete
)
526 dd
->req
->base
.complete(&dd
->req
->base
, ret
);
528 dev_dbg(dd
->dev
, "%s: exit\n", __func__
);
532 static int tegra_aes_setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
535 struct tegra_aes_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
536 struct tegra_aes_dev
*dd
= aes_dev
;
537 struct tegra_aes_slot
*key_slot
;
539 if ((keylen
!= AES_KEYSIZE_128
) && (keylen
!= AES_KEYSIZE_192
) &&
540 (keylen
!= AES_KEYSIZE_256
)) {
541 dev_err(dd
->dev
, "unsupported key size\n");
542 crypto_ablkcipher_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
546 dev_dbg(dd
->dev
, "keylen: %d\n", keylen
);
552 key_slot
= aes_find_key_slot();
554 dev_err(dd
->dev
, "no empty slot\n");
558 ctx
->slot
= key_slot
;
561 memcpy(ctx
->key
, key
, keylen
);
562 ctx
->keylen
= keylen
;
565 ctx
->flags
|= FLAGS_NEW_KEY
;
566 dev_dbg(dd
->dev
, "done\n");
570 static void aes_workqueue_handler(struct work_struct
*work
)
572 struct tegra_aes_dev
*dd
= aes_dev
;
575 ret
= clk_prepare_enable(dd
->aes_clk
);
577 BUG_ON("clock enable failed");
579 /* empty the crypto queue and then return */
581 ret
= tegra_aes_handle_req(dd
);
584 clk_disable_unprepare(dd
->aes_clk
);
587 static irqreturn_t
aes_irq(int irq
, void *dev_id
)
589 struct tegra_aes_dev
*dd
= (struct tegra_aes_dev
*)dev_id
;
590 u32 value
= aes_readl(dd
, TEGRA_AES_INTR_STATUS
);
591 int busy
= test_bit(FLAGS_BUSY
, &dd
->flags
);
594 dev_dbg(dd
->dev
, "spurious interrupt\n");
598 dev_dbg(dd
->dev
, "irq_stat: 0x%x\n", value
);
599 if (value
& TEGRA_AES_INT_ERROR_MASK
)
600 aes_writel(dd
, TEGRA_AES_INT_ERROR_MASK
, TEGRA_AES_INTR_STATUS
);
602 if (!(value
& TEGRA_AES_ENGINE_BUSY_FIELD
))
603 complete(&dd
->op_complete
);
610 static int tegra_aes_crypt(struct ablkcipher_request
*req
, unsigned long mode
)
612 struct tegra_aes_reqctx
*rctx
= ablkcipher_request_ctx(req
);
613 struct tegra_aes_dev
*dd
= aes_dev
;
618 dev_dbg(dd
->dev
, "nbytes: %d, enc: %d, cbc: %d, ofb: %d\n",
619 req
->nbytes
, !!(mode
& FLAGS_ENCRYPT
),
620 !!(mode
& FLAGS_CBC
), !!(mode
& FLAGS_OFB
));
624 spin_lock_irqsave(&dd
->lock
, flags
);
625 err
= ablkcipher_enqueue_request(&dd
->queue
, req
);
626 busy
= test_and_set_bit(FLAGS_BUSY
, &dd
->flags
);
627 spin_unlock_irqrestore(&dd
->lock
, flags
);
630 queue_work(aes_wq
, &aes_work
);
635 static int tegra_aes_ecb_encrypt(struct ablkcipher_request
*req
)
637 return tegra_aes_crypt(req
, FLAGS_ENCRYPT
);
640 static int tegra_aes_ecb_decrypt(struct ablkcipher_request
*req
)
642 return tegra_aes_crypt(req
, 0);
645 static int tegra_aes_cbc_encrypt(struct ablkcipher_request
*req
)
647 return tegra_aes_crypt(req
, FLAGS_ENCRYPT
| FLAGS_CBC
);
650 static int tegra_aes_cbc_decrypt(struct ablkcipher_request
*req
)
652 return tegra_aes_crypt(req
, FLAGS_CBC
);
655 static int tegra_aes_ofb_encrypt(struct ablkcipher_request
*req
)
657 return tegra_aes_crypt(req
, FLAGS_ENCRYPT
| FLAGS_OFB
);
660 static int tegra_aes_ofb_decrypt(struct ablkcipher_request
*req
)
662 return tegra_aes_crypt(req
, FLAGS_OFB
);
665 static int tegra_aes_get_random(struct crypto_rng
*tfm
, u8
*rdata
,
668 struct tegra_aes_dev
*dd
= aes_dev
;
669 struct tegra_aes_ctx
*ctx
= &rng_ctx
;
671 u8
*dest
= rdata
, *dt
= dd
->dt
;
673 /* take mutex to access the aes hw */
674 mutex_lock(&aes_lock
);
676 ret
= clk_prepare_enable(dd
->aes_clk
);
682 dd
->flags
= FLAGS_ENCRYPT
| FLAGS_RNG
;
684 memcpy(dd
->buf_in
, dt
, DEFAULT_RNG_BLK_SZ
);
686 ret
= aes_start_crypt(dd
, (u32
)dd
->dma_buf_in
,
687 (u32
)dd
->dma_buf_out
, 1, dd
->flags
, true);
689 dev_err(dd
->dev
, "aes_start_crypt fail(%d)\n", ret
);
693 memcpy(dest
, dd
->buf_out
, dlen
);
696 for (i
= DEFAULT_RNG_BLK_SZ
- 1; i
>= 0; i
--) {
703 clk_disable_unprepare(dd
->aes_clk
);
704 mutex_unlock(&aes_lock
);
706 dev_dbg(dd
->dev
, "%s: done\n", __func__
);
710 static int tegra_aes_rng_reset(struct crypto_rng
*tfm
, u8
*seed
,
713 struct tegra_aes_dev
*dd
= aes_dev
;
714 struct tegra_aes_ctx
*ctx
= &rng_ctx
;
715 struct tegra_aes_slot
*key_slot
;
722 dev_err(dd
->dev
, "ctx=0x%x, dd=0x%x\n",
723 (unsigned int)ctx
, (unsigned int)dd
);
727 if (slen
< (DEFAULT_RNG_BLK_SZ
+ AES_KEYSIZE_128
)) {
728 dev_err(dd
->dev
, "seed size invalid");
732 /* take mutex to access the aes hw */
733 mutex_lock(&aes_lock
);
736 key_slot
= aes_find_key_slot();
738 dev_err(dd
->dev
, "no empty slot\n");
739 mutex_unlock(&aes_lock
);
742 ctx
->slot
= key_slot
;
749 ctx
->keylen
= AES_KEYSIZE_128
;
750 ctx
->flags
|= FLAGS_NEW_KEY
;
752 /* copy the key to the key slot */
753 memcpy(dd
->ivkey_base
, seed
+ DEFAULT_RNG_BLK_SZ
, AES_KEYSIZE_128
);
754 memset(dd
->ivkey_base
+ AES_KEYSIZE_128
, 0, AES_HW_KEY_TABLE_LENGTH_BYTES
- AES_KEYSIZE_128
);
759 dd
->flags
= FLAGS_ENCRYPT
| FLAGS_RNG
;
761 ret
= clk_prepare_enable(dd
->aes_clk
);
767 /* set seed to the aes hw slot */
768 memcpy(dd
->buf_in
, dd
->iv
, DEFAULT_RNG_BLK_SZ
);
769 ret
= aes_start_crypt(dd
, (u32
)dd
->dma_buf_in
,
770 dd
->dma_buf_out
, 1, FLAGS_CBC
, false);
772 dev_err(dd
->dev
, "aes_start_crypt fail(%d)\n", ret
);
776 if (dd
->ivlen
>= (2 * DEFAULT_RNG_BLK_SZ
+ AES_KEYSIZE_128
)) {
777 dt
= dd
->iv
+ DEFAULT_RNG_BLK_SZ
+ AES_KEYSIZE_128
;
780 nsec
= timespec_to_ns(&ts
);
782 nsec
^= dd
->ctr
<< 56;
785 tmp
[1] = tegra_chip_uid();
788 memcpy(dd
->dt
, dt
, DEFAULT_RNG_BLK_SZ
);
791 clk_disable_unprepare(dd
->aes_clk
);
792 mutex_unlock(&aes_lock
);
794 dev_dbg(dd
->dev
, "%s: done\n", __func__
);
798 static int tegra_aes_cra_init(struct crypto_tfm
*tfm
)
800 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct tegra_aes_reqctx
);
805 void tegra_aes_cra_exit(struct crypto_tfm
*tfm
)
807 struct tegra_aes_ctx
*ctx
=
808 crypto_ablkcipher_ctx((struct crypto_ablkcipher
*)tfm
);
810 if (ctx
&& ctx
->slot
)
811 aes_release_key_slot(ctx
->slot
);
814 static struct crypto_alg algs
[] = {
816 .cra_name
= "ecb(aes)",
817 .cra_driver_name
= "ecb-aes-tegra",
819 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
820 .cra_blocksize
= AES_BLOCK_SIZE
,
822 .cra_type
= &crypto_ablkcipher_type
,
823 .cra_u
.ablkcipher
= {
824 .min_keysize
= AES_MIN_KEY_SIZE
,
825 .max_keysize
= AES_MAX_KEY_SIZE
,
826 .setkey
= tegra_aes_setkey
,
827 .encrypt
= tegra_aes_ecb_encrypt
,
828 .decrypt
= tegra_aes_ecb_decrypt
,
831 .cra_name
= "cbc(aes)",
832 .cra_driver_name
= "cbc-aes-tegra",
834 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
835 .cra_blocksize
= AES_BLOCK_SIZE
,
837 .cra_type
= &crypto_ablkcipher_type
,
838 .cra_u
.ablkcipher
= {
839 .min_keysize
= AES_MIN_KEY_SIZE
,
840 .max_keysize
= AES_MAX_KEY_SIZE
,
841 .ivsize
= AES_MIN_KEY_SIZE
,
842 .setkey
= tegra_aes_setkey
,
843 .encrypt
= tegra_aes_cbc_encrypt
,
844 .decrypt
= tegra_aes_cbc_decrypt
,
847 .cra_name
= "ofb(aes)",
848 .cra_driver_name
= "ofb-aes-tegra",
850 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
851 .cra_blocksize
= AES_BLOCK_SIZE
,
853 .cra_type
= &crypto_ablkcipher_type
,
854 .cra_u
.ablkcipher
= {
855 .min_keysize
= AES_MIN_KEY_SIZE
,
856 .max_keysize
= AES_MAX_KEY_SIZE
,
857 .ivsize
= AES_MIN_KEY_SIZE
,
858 .setkey
= tegra_aes_setkey
,
859 .encrypt
= tegra_aes_ofb_encrypt
,
860 .decrypt
= tegra_aes_ofb_decrypt
,
863 .cra_name
= "ansi_cprng",
864 .cra_driver_name
= "rng-aes-tegra",
865 .cra_flags
= CRYPTO_ALG_TYPE_RNG
,
866 .cra_ctxsize
= sizeof(struct tegra_aes_ctx
),
867 .cra_type
= &crypto_rng_type
,
869 .rng_make_random
= tegra_aes_get_random
,
870 .rng_reset
= tegra_aes_rng_reset
,
871 .seedsize
= AES_KEYSIZE_128
+ (2 * DEFAULT_RNG_BLK_SZ
),
876 static int tegra_aes_probe(struct platform_device
*pdev
)
878 struct device
*dev
= &pdev
->dev
;
879 struct tegra_aes_dev
*dd
;
880 struct resource
*res
;
881 int err
= -ENOMEM
, i
= 0, j
;
883 dd
= devm_kzalloc(dev
, sizeof(struct tegra_aes_dev
), GFP_KERNEL
);
885 dev_err(dev
, "unable to alloc data struct.\n");
890 platform_set_drvdata(pdev
, dd
);
892 dd
->slots
= devm_kzalloc(dev
, sizeof(struct tegra_aes_slot
) *
893 AES_NR_KEYSLOTS
, GFP_KERNEL
);
894 if (dd
->slots
== NULL
) {
895 dev_err(dev
, "unable to alloc slot struct.\n");
899 spin_lock_init(&dd
->lock
);
900 crypto_init_queue(&dd
->queue
, TEGRA_AES_QUEUE_LENGTH
);
902 /* Get the module base address */
903 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
905 dev_err(dev
, "invalid resource type: base\n");
910 if (!devm_request_mem_region(&pdev
->dev
, res
->start
,
912 dev_name(&pdev
->dev
))) {
913 dev_err(&pdev
->dev
, "Couldn't request MEM resource\n");
917 dd
->io_base
= devm_ioremap(dev
, res
->start
, resource_size(res
));
919 dev_err(dev
, "can't ioremap register space\n");
924 /* Initialize the vde clock */
925 dd
->aes_clk
= clk_get(dev
, "vde");
926 if (IS_ERR(dd
->aes_clk
)) {
927 dev_err(dev
, "iclock intialization failed.\n");
932 err
= clk_set_rate(dd
->aes_clk
, ULONG_MAX
);
934 dev_err(dd
->dev
, "iclk set_rate fail(%d)\n", err
);
939 * the foll contiguous memory is allocated as follows -
940 * - hardware key table
943 dd
->ivkey_base
= dma_alloc_coherent(dev
, AES_HW_KEY_TABLE_LENGTH_BYTES
,
944 &dd
->ivkey_phys_base
,
946 if (!dd
->ivkey_base
) {
947 dev_err(dev
, "can not allocate iv/key buffer\n");
952 dd
->buf_in
= dma_alloc_coherent(dev
, AES_HW_DMA_BUFFER_SIZE_BYTES
,
953 &dd
->dma_buf_in
, GFP_KERNEL
);
955 dev_err(dev
, "can not allocate dma-in buffer\n");
960 dd
->buf_out
= dma_alloc_coherent(dev
, AES_HW_DMA_BUFFER_SIZE_BYTES
,
961 &dd
->dma_buf_out
, GFP_KERNEL
);
963 dev_err(dev
, "can not allocate dma-out buffer\n");
968 init_completion(&dd
->op_complete
);
969 aes_wq
= alloc_workqueue("tegra_aes_wq", WQ_HIGHPRI
| WQ_UNBOUND
, 1);
971 dev_err(dev
, "alloc_workqueue failed\n");
976 res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
978 dev_err(dev
, "invalid resource type: base\n");
982 dd
->irq
= res
->start
;
984 err
= devm_request_irq(dev
, dd
->irq
, aes_irq
, IRQF_TRIGGER_HIGH
|
985 IRQF_SHARED
, "tegra-aes", dd
);
987 dev_err(dev
, "request_irq failed\n");
991 mutex_init(&aes_lock
);
992 INIT_LIST_HEAD(&dev_list
);
994 spin_lock_init(&list_lock
);
995 spin_lock(&list_lock
);
996 for (i
= 0; i
< AES_NR_KEYSLOTS
; i
++) {
997 if (i
== SSK_SLOT_NUM
)
999 dd
->slots
[i
].slot_num
= i
;
1000 INIT_LIST_HEAD(&dd
->slots
[i
].node
);
1001 list_add_tail(&dd
->slots
[i
].node
, &dev_list
);
1003 spin_unlock(&list_lock
);
1006 for (i
= 0; i
< ARRAY_SIZE(algs
); i
++) {
1007 INIT_LIST_HEAD(&algs
[i
].cra_list
);
1009 algs
[i
].cra_priority
= 300;
1010 algs
[i
].cra_ctxsize
= sizeof(struct tegra_aes_ctx
);
1011 algs
[i
].cra_module
= THIS_MODULE
;
1012 algs
[i
].cra_init
= tegra_aes_cra_init
;
1013 algs
[i
].cra_exit
= tegra_aes_cra_exit
;
1015 err
= crypto_register_alg(&algs
[i
]);
1020 dev_info(dev
, "registered");
1024 for (j
= 0; j
< i
; j
++)
1025 crypto_unregister_alg(&algs
[j
]);
1027 dma_free_coherent(dev
, AES_HW_KEY_TABLE_LENGTH_BYTES
,
1028 dd
->ivkey_base
, dd
->ivkey_phys_base
);
1030 dma_free_coherent(dev
, AES_HW_DMA_BUFFER_SIZE_BYTES
,
1031 dd
->buf_in
, dd
->dma_buf_in
);
1033 dma_free_coherent(dev
, AES_HW_DMA_BUFFER_SIZE_BYTES
,
1034 dd
->buf_out
, dd
->dma_buf_out
);
1035 if (IS_ERR(dd
->aes_clk
))
1036 clk_put(dd
->aes_clk
);
1038 destroy_workqueue(aes_wq
);
1039 spin_lock(&list_lock
);
1040 list_del(&dev_list
);
1041 spin_unlock(&list_lock
);
1045 dev_err(dev
, "%s: initialization failed.\n", __func__
);
1049 static int __devexit
tegra_aes_remove(struct platform_device
*pdev
)
1051 struct device
*dev
= &pdev
->dev
;
1052 struct tegra_aes_dev
*dd
= platform_get_drvdata(pdev
);
1055 for (i
= 0; i
< ARRAY_SIZE(algs
); i
++)
1056 crypto_unregister_alg(&algs
[i
]);
1058 cancel_work_sync(&aes_work
);
1059 destroy_workqueue(aes_wq
);
1060 spin_lock(&list_lock
);
1061 list_del(&dev_list
);
1062 spin_unlock(&list_lock
);
1064 dma_free_coherent(dev
, AES_HW_KEY_TABLE_LENGTH_BYTES
,
1065 dd
->ivkey_base
, dd
->ivkey_phys_base
);
1066 dma_free_coherent(dev
, AES_HW_DMA_BUFFER_SIZE_BYTES
,
1067 dd
->buf_in
, dd
->dma_buf_in
);
1068 dma_free_coherent(dev
, AES_HW_DMA_BUFFER_SIZE_BYTES
,
1069 dd
->buf_out
, dd
->dma_buf_out
);
1070 clk_put(dd
->aes_clk
);
1076 static struct of_device_id tegra_aes_of_match
[] __devinitdata
= {
1077 { .compatible
= "nvidia,tegra20-aes", },
1078 { .compatible
= "nvidia,tegra30-aes", },
1082 static struct platform_driver tegra_aes_driver
= {
1083 .probe
= tegra_aes_probe
,
1084 .remove
= __devexit_p(tegra_aes_remove
),
1086 .name
= "tegra-aes",
1087 .owner
= THIS_MODULE
,
1088 .of_match_table
= tegra_aes_of_match
,
1092 module_platform_driver(tegra_aes_driver
);
1094 MODULE_DESCRIPTION("Tegra AES/OFB/CPRNG hw acceleration support.");
1095 MODULE_AUTHOR("NVIDIA Corporation");
1096 MODULE_LICENSE("GPL v2");