4 * Support for OMAP SHA1/MD5 HW acceleration.
6 * Copyright (c) 2010 Nokia Corporation
7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
13 * Some ideas are from old omap-sha1-md5.c driver.
16 #define pr_fmt(fmt) "%s: " fmt, __func__
18 #include <linux/version.h>
19 #include <linux/err.h>
20 #include <linux/device.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/errno.h>
24 #include <linux/interrupt.h>
25 #include <linux/kernel.h>
26 #include <linux/clk.h>
27 #include <linux/irq.h>
29 #include <linux/platform_device.h>
30 #include <linux/scatterlist.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/delay.h>
33 #include <linux/crypto.h>
34 #include <linux/cryptohash.h>
35 #include <crypto/scatterwalk.h>
36 #include <crypto/algapi.h>
37 #include <crypto/sha.h>
38 #include <crypto/hash.h>
39 #include <crypto/internal/hash.h>
43 #include <mach/irqs.h>
45 #define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04))
46 #define SHA_REG_DIN(x) (0x1C + ((x) * 0x04))
48 #define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE
49 #define MD5_DIGEST_SIZE 16
51 #define SHA_REG_DIGCNT 0x14
53 #define SHA_REG_CTRL 0x18
54 #define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
55 #define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
56 #define SHA_REG_CTRL_ALGO_CONST (1 << 3)
57 #define SHA_REG_CTRL_ALGO (1 << 2)
58 #define SHA_REG_CTRL_INPUT_READY (1 << 1)
59 #define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
61 #define SHA_REG_REV 0x5C
62 #define SHA_REG_REV_MAJOR 0xF0
63 #define SHA_REG_REV_MINOR 0x0F
65 #define SHA_REG_MASK 0x60
66 #define SHA_REG_MASK_DMA_EN (1 << 3)
67 #define SHA_REG_MASK_IT_EN (1 << 2)
68 #define SHA_REG_MASK_SOFTRESET (1 << 1)
69 #define SHA_REG_AUTOIDLE (1 << 0)
71 #define SHA_REG_SYSSTATUS 0x64
72 #define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
74 #define DEFAULT_TIMEOUT_INTERVAL HZ
76 #define FLAGS_FIRST 0x0001
77 #define FLAGS_FINUP 0x0002
78 #define FLAGS_FINAL 0x0004
79 #define FLAGS_FAST 0x0008
80 #define FLAGS_SHA1 0x0010
81 #define FLAGS_DMA_ACTIVE 0x0020
82 #define FLAGS_OUTPUT_READY 0x0040
83 #define FLAGS_CLEAN 0x0080
84 #define FLAGS_INIT 0x0100
85 #define FLAGS_CPU 0x0200
86 #define FLAGS_HMAC 0x0400
96 struct omap_sham_reqctx
{
97 struct omap_sham_dev
*dd
;
108 struct scatterlist
*sg
;
109 unsigned int offset
; /* offset in current sg */
110 unsigned int total
; /* total request */
113 struct omap_sham_hmac_ctx
{
114 struct crypto_shash
*shash
;
115 u8 ipad
[SHA1_MD5_BLOCK_SIZE
];
116 u8 opad
[SHA1_MD5_BLOCK_SIZE
];
119 struct omap_sham_ctx
{
120 struct omap_sham_dev
*dd
;
125 struct crypto_shash
*fallback
;
127 struct omap_sham_hmac_ctx base
[0];
130 #define OMAP_SHAM_QUEUE_LENGTH 1
132 struct omap_sham_dev
{
133 struct list_head list
;
134 unsigned long phys_base
;
136 void __iomem
*io_base
;
142 struct tasklet_struct done_task
;
143 struct tasklet_struct queue_task
;
146 struct crypto_queue queue
;
147 struct ahash_request
*req
;
150 struct omap_sham_drv
{
151 struct list_head dev_list
;
156 static struct omap_sham_drv sham
= {
157 .dev_list
= LIST_HEAD_INIT(sham
.dev_list
),
158 .lock
= __SPIN_LOCK_UNLOCKED(sham
.lock
),
161 static inline u32
omap_sham_read(struct omap_sham_dev
*dd
, u32 offset
)
163 return __raw_readl(dd
->io_base
+ offset
);
166 static inline void omap_sham_write(struct omap_sham_dev
*dd
,
167 u32 offset
, u32 value
)
169 __raw_writel(value
, dd
->io_base
+ offset
);
172 static inline void omap_sham_write_mask(struct omap_sham_dev
*dd
, u32 address
,
177 val
= omap_sham_read(dd
, address
);
180 omap_sham_write(dd
, address
, val
);
183 static inline int omap_sham_wait(struct omap_sham_dev
*dd
, u32 offset
, u32 bit
)
185 unsigned long timeout
= jiffies
+ DEFAULT_TIMEOUT_INTERVAL
;
187 while (!(omap_sham_read(dd
, offset
) & bit
)) {
188 if (time_is_before_jiffies(timeout
))
195 static void omap_sham_copy_hash(struct ahash_request
*req
, int out
)
197 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
198 u32
*hash
= (u32
*)req
->result
;
201 if (likely(ctx
->flags
& FLAGS_SHA1
)) {
202 /* SHA1 results are in big endian */
203 for (i
= 0; i
< SHA1_DIGEST_SIZE
/ sizeof(u32
); i
++)
205 hash
[i
] = be32_to_cpu(omap_sham_read(ctx
->dd
,
208 omap_sham_write(ctx
->dd
, SHA_REG_DIGEST(i
),
209 cpu_to_be32(hash
[i
]));
211 /* MD5 results are in little endian */
212 for (i
= 0; i
< MD5_DIGEST_SIZE
/ sizeof(u32
); i
++)
214 hash
[i
] = le32_to_cpu(omap_sham_read(ctx
->dd
,
217 omap_sham_write(ctx
->dd
, SHA_REG_DIGEST(i
),
218 cpu_to_le32(hash
[i
]));
222 static int omap_sham_write_ctrl(struct omap_sham_dev
*dd
, size_t length
,
225 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
226 u32 val
= length
<< 5, mask
;
228 if (unlikely(!ctx
->digcnt
)) {
230 clk_enable(dd
->iclk
);
232 if (!(dd
->flags
& FLAGS_INIT
)) {
233 omap_sham_write_mask(dd
, SHA_REG_MASK
,
234 SHA_REG_MASK_SOFTRESET
, SHA_REG_MASK_SOFTRESET
);
236 if (omap_sham_wait(dd
, SHA_REG_SYSSTATUS
,
237 SHA_REG_SYSSTATUS_RESETDONE
))
240 dd
->flags
|= FLAGS_INIT
;
243 omap_sham_write(dd
, SHA_REG_DIGCNT
, ctx
->digcnt
);
246 omap_sham_write_mask(dd
, SHA_REG_MASK
,
247 SHA_REG_MASK_IT_EN
| (dma
? SHA_REG_MASK_DMA_EN
: 0),
248 SHA_REG_MASK_IT_EN
| SHA_REG_MASK_DMA_EN
);
250 * Setting ALGO_CONST only for the first iteration
251 * and CLOSE_HASH only for the last one.
253 if (ctx
->flags
& FLAGS_SHA1
)
254 val
|= SHA_REG_CTRL_ALGO
;
256 val
|= SHA_REG_CTRL_ALGO_CONST
;
258 val
|= SHA_REG_CTRL_CLOSE_HASH
;
260 mask
= SHA_REG_CTRL_ALGO_CONST
| SHA_REG_CTRL_CLOSE_HASH
|
261 SHA_REG_CTRL_ALGO
| SHA_REG_CTRL_LENGTH
;
263 omap_sham_write_mask(dd
, SHA_REG_CTRL
, val
, mask
);
268 static int omap_sham_xmit_cpu(struct omap_sham_dev
*dd
, const u8
*buf
,
269 size_t length
, int final
)
271 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
272 int err
, count
, len32
;
273 const u32
*buffer
= (const u32
*)buf
;
275 dev_dbg(dd
->dev
, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
276 ctx
->digcnt
, length
, final
);
278 err
= omap_sham_write_ctrl(dd
, length
, final
, 0);
282 if (omap_sham_wait(dd
, SHA_REG_CTRL
, SHA_REG_CTRL_INPUT_READY
))
285 ctx
->digcnt
+= length
;
288 ctx
->flags
|= FLAGS_FINAL
; /* catch last interrupt */
290 len32
= DIV_ROUND_UP(length
, sizeof(u32
));
292 for (count
= 0; count
< len32
; count
++)
293 omap_sham_write(dd
, SHA_REG_DIN(count
), buffer
[count
]);
298 static int omap_sham_xmit_dma(struct omap_sham_dev
*dd
, dma_addr_t dma_addr
,
299 size_t length
, int final
)
301 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
304 dev_dbg(dd
->dev
, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
305 ctx
->digcnt
, length
, final
);
307 /* flush cache entries related to our page */
308 if (dma_addr
== ctx
->dma_addr
)
309 dma_sync_single_for_device(dd
->dev
, dma_addr
, length
,
312 len32
= DIV_ROUND_UP(length
, sizeof(u32
));
314 omap_set_dma_transfer_params(dd
->dma_lch
, OMAP_DMA_DATA_TYPE_S32
, len32
,
315 1, OMAP_DMA_SYNC_PACKET
, dd
->dma
, OMAP_DMA_DST_SYNC
);
317 omap_set_dma_src_params(dd
->dma_lch
, 0, OMAP_DMA_AMODE_POST_INC
,
320 err
= omap_sham_write_ctrl(dd
, length
, final
, 1);
324 ctx
->digcnt
+= length
;
327 ctx
->flags
|= FLAGS_FINAL
; /* catch last interrupt */
329 dd
->flags
|= FLAGS_DMA_ACTIVE
;
331 omap_start_dma(dd
->dma_lch
);
336 static size_t omap_sham_append_buffer(struct omap_sham_reqctx
*ctx
,
337 const u8
*data
, size_t length
)
339 size_t count
= min(length
, ctx
->buflen
- ctx
->bufcnt
);
341 count
= min(count
, ctx
->total
);
344 memcpy(ctx
->buffer
+ ctx
->bufcnt
, data
, count
);
345 ctx
->bufcnt
+= count
;
350 static size_t omap_sham_append_sg(struct omap_sham_reqctx
*ctx
)
355 count
= omap_sham_append_buffer(ctx
,
356 sg_virt(ctx
->sg
) + ctx
->offset
,
357 ctx
->sg
->length
- ctx
->offset
);
360 ctx
->offset
+= count
;
362 if (ctx
->offset
== ctx
->sg
->length
) {
363 ctx
->sg
= sg_next(ctx
->sg
);
374 static int omap_sham_update_dma_slow(struct omap_sham_dev
*dd
)
376 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
383 omap_sham_append_sg(ctx
);
385 final
= (ctx
->flags
& FLAGS_FINUP
) && !ctx
->total
;
387 dev_dbg(dd
->dev
, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
388 ctx
->bufcnt
, ctx
->digcnt
, final
);
390 if (final
|| (ctx
->bufcnt
== ctx
->buflen
&& ctx
->total
)) {
393 return omap_sham_xmit_dma(dd
, ctx
->dma_addr
, count
, final
);
399 static int omap_sham_update_dma_fast(struct omap_sham_dev
*dd
)
401 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
404 ctx
->flags
|= FLAGS_FAST
;
406 length
= min(ctx
->total
, sg_dma_len(ctx
->sg
));
409 if (!dma_map_sg(dd
->dev
, ctx
->sg
, 1, DMA_TO_DEVICE
)) {
410 dev_err(dd
->dev
, "dma_map_sg error\n");
414 ctx
->total
-= length
;
416 return omap_sham_xmit_dma(dd
, sg_dma_address(ctx
->sg
), length
, 1);
419 static int omap_sham_update_cpu(struct omap_sham_dev
*dd
)
421 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
424 omap_sham_append_sg(ctx
);
425 bufcnt
= ctx
->bufcnt
;
428 return omap_sham_xmit_cpu(dd
, ctx
->buffer
, bufcnt
, 1);
431 static int omap_sham_update_dma_stop(struct omap_sham_dev
*dd
)
433 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
435 omap_stop_dma(dd
->dma_lch
);
436 if (ctx
->flags
& FLAGS_FAST
)
437 dma_unmap_sg(dd
->dev
, ctx
->sg
, 1, DMA_TO_DEVICE
);
442 static void omap_sham_cleanup(struct ahash_request
*req
)
444 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
445 struct omap_sham_dev
*dd
= ctx
->dd
;
448 spin_lock_irqsave(&dd
->lock
, flags
);
449 if (ctx
->flags
& FLAGS_CLEAN
) {
450 spin_unlock_irqrestore(&dd
->lock
, flags
);
453 ctx
->flags
|= FLAGS_CLEAN
;
454 spin_unlock_irqrestore(&dd
->lock
, flags
);
457 clk_disable(dd
->iclk
);
460 dma_unmap_single(dd
->dev
, ctx
->dma_addr
, ctx
->buflen
,
464 free_page((unsigned long)ctx
->buffer
);
466 dev_dbg(dd
->dev
, "digcnt: %d, bufcnt: %d\n", ctx
->digcnt
, ctx
->bufcnt
);
469 static int omap_sham_init(struct ahash_request
*req
)
471 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
472 struct omap_sham_ctx
*tctx
= crypto_ahash_ctx(tfm
);
473 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
474 struct omap_sham_dev
*dd
= NULL
, *tmp
;
476 spin_lock_bh(&sham
.lock
);
478 list_for_each_entry(tmp
, &sham
.dev_list
, list
) {
486 spin_unlock_bh(&sham
.lock
);
492 ctx
->flags
|= FLAGS_FIRST
;
494 dev_dbg(dd
->dev
, "init: digest size: %d\n",
495 crypto_ahash_digestsize(tfm
));
497 if (crypto_ahash_digestsize(tfm
) == SHA1_DIGEST_SIZE
)
498 ctx
->flags
|= FLAGS_SHA1
;
503 ctx
->buflen
= PAGE_SIZE
;
504 ctx
->buffer
= (void *)__get_free_page(
505 (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
506 GFP_KERNEL
: GFP_ATOMIC
);
510 ctx
->dma_addr
= dma_map_single(dd
->dev
, ctx
->buffer
, ctx
->buflen
,
512 if (dma_mapping_error(dd
->dev
, ctx
->dma_addr
)) {
513 dev_err(dd
->dev
, "dma %u bytes error\n", ctx
->buflen
);
514 free_page((unsigned long)ctx
->buffer
);
518 if (tctx
->flags
& FLAGS_HMAC
) {
519 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
521 memcpy(ctx
->buffer
, bctx
->ipad
, SHA1_MD5_BLOCK_SIZE
);
522 ctx
->bufcnt
= SHA1_MD5_BLOCK_SIZE
;
523 ctx
->flags
|= FLAGS_HMAC
;
530 static int omap_sham_update_req(struct omap_sham_dev
*dd
)
532 struct ahash_request
*req
= dd
->req
;
533 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
536 dev_dbg(dd
->dev
, "update_req: total: %u, digcnt: %d, finup: %d\n",
537 ctx
->total
, ctx
->digcnt
, (ctx
->flags
& FLAGS_FINUP
) != 0);
539 if (ctx
->flags
& FLAGS_CPU
)
540 err
= omap_sham_update_cpu(dd
);
541 else if (ctx
->flags
& FLAGS_FAST
)
542 err
= omap_sham_update_dma_fast(dd
);
544 err
= omap_sham_update_dma_slow(dd
);
546 /* wait for dma completion before can take more data */
547 dev_dbg(dd
->dev
, "update: err: %d, digcnt: %d\n", err
, ctx
->digcnt
);
552 static int omap_sham_final_req(struct omap_sham_dev
*dd
)
554 struct ahash_request
*req
= dd
->req
;
555 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
556 int err
= 0, use_dma
= 1;
558 if (ctx
->bufcnt
<= 64)
559 /* faster to handle last block with cpu */
563 err
= omap_sham_xmit_dma(dd
, ctx
->dma_addr
, ctx
->bufcnt
, 1);
565 err
= omap_sham_xmit_cpu(dd
, ctx
->buffer
, ctx
->bufcnt
, 1);
569 if (err
!= -EINPROGRESS
)
570 omap_sham_cleanup(req
);
572 dev_dbg(dd
->dev
, "final_req: err: %d\n", err
);
577 static int omap_sham_finish_req_hmac(struct ahash_request
*req
)
579 struct omap_sham_ctx
*tctx
= crypto_tfm_ctx(req
->base
.tfm
);
580 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
581 int bs
= crypto_shash_blocksize(bctx
->shash
);
582 int ds
= crypto_shash_digestsize(bctx
->shash
);
584 struct shash_desc shash
;
585 char ctx
[crypto_shash_descsize(bctx
->shash
)];
588 desc
.shash
.tfm
= bctx
->shash
;
589 desc
.shash
.flags
= 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
591 return crypto_shash_init(&desc
.shash
) ?:
592 crypto_shash_update(&desc
.shash
, bctx
->opad
, bs
) ?:
593 crypto_shash_finup(&desc
.shash
, req
->result
, ds
, req
->result
);
596 static void omap_sham_finish_req(struct ahash_request
*req
, int err
)
598 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
601 omap_sham_copy_hash(ctx
->dd
->req
, 1);
602 if (ctx
->flags
& FLAGS_HMAC
)
603 err
= omap_sham_finish_req_hmac(req
);
606 if (ctx
->flags
& FLAGS_FINAL
)
607 omap_sham_cleanup(req
);
609 clear_bit(FLAGS_BUSY
, &ctx
->dd
->flags
);
611 if (req
->base
.complete
)
612 req
->base
.complete(&req
->base
, err
);
615 static int omap_sham_handle_queue(struct omap_sham_dev
*dd
)
617 struct crypto_async_request
*async_req
, *backlog
;
618 struct omap_sham_reqctx
*ctx
;
619 struct ahash_request
*req
, *prev_req
;
623 if (test_and_set_bit(FLAGS_BUSY
, &dd
->flags
))
626 spin_lock_irqsave(&dd
->lock
, flags
);
627 backlog
= crypto_get_backlog(&dd
->queue
);
628 async_req
= crypto_dequeue_request(&dd
->queue
);
630 clear_bit(FLAGS_BUSY
, &dd
->flags
);
631 spin_unlock_irqrestore(&dd
->lock
, flags
);
637 backlog
->complete(backlog
, -EINPROGRESS
);
639 req
= ahash_request_cast(async_req
);
644 ctx
= ahash_request_ctx(req
);
646 dev_dbg(dd
->dev
, "handling new req, op: %lu, nbytes: %d\n",
647 ctx
->op
, req
->nbytes
);
649 if (req
!= prev_req
&& ctx
->digcnt
)
650 /* request has changed - restore hash */
651 omap_sham_copy_hash(req
, 0);
653 if (ctx
->op
== OP_UPDATE
) {
654 err
= omap_sham_update_req(dd
);
655 if (err
!= -EINPROGRESS
&& (ctx
->flags
& FLAGS_FINUP
))
656 /* no final() after finup() */
657 err
= omap_sham_final_req(dd
);
658 } else if (ctx
->op
== OP_FINAL
) {
659 err
= omap_sham_final_req(dd
);
662 if (err
!= -EINPROGRESS
) {
663 /* done_task will not finish it, so do it here */
664 omap_sham_finish_req(req
, err
);
665 tasklet_schedule(&dd
->queue_task
);
668 dev_dbg(dd
->dev
, "exit, err: %d\n", err
);
673 static int omap_sham_enqueue(struct ahash_request
*req
, unsigned int op
)
675 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
676 struct omap_sham_ctx
*tctx
= crypto_tfm_ctx(req
->base
.tfm
);
677 struct omap_sham_dev
*dd
= tctx
->dd
;
683 spin_lock_irqsave(&dd
->lock
, flags
);
684 err
= ahash_enqueue_request(&dd
->queue
, req
);
685 spin_unlock_irqrestore(&dd
->lock
, flags
);
687 omap_sham_handle_queue(dd
);
692 static int omap_sham_update(struct ahash_request
*req
)
694 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
699 ctx
->total
= req
->nbytes
;
703 if (ctx
->flags
& FLAGS_FINUP
) {
704 if ((ctx
->digcnt
+ ctx
->bufcnt
+ ctx
->total
) < 9) {
706 * OMAP HW accel works only with buffers >= 9
707 * will switch to bypass in final()
708 * final has the same request and data
710 omap_sham_append_sg(ctx
);
712 } else if (ctx
->bufcnt
+ ctx
->total
<= 64) {
713 ctx
->flags
|= FLAGS_CPU
;
714 } else if (!ctx
->bufcnt
&& sg_is_last(ctx
->sg
)) {
715 /* may be can use faster functions */
716 int aligned
= IS_ALIGNED((u32
)ctx
->sg
->offset
,
719 if (aligned
&& (ctx
->flags
& FLAGS_FIRST
))
720 /* digest: first and final */
721 ctx
->flags
|= FLAGS_FAST
;
723 ctx
->flags
&= ~FLAGS_FIRST
;
725 } else if (ctx
->bufcnt
+ ctx
->total
<= ctx
->buflen
) {
726 /* if not finaup -> not fast */
727 omap_sham_append_sg(ctx
);
731 return omap_sham_enqueue(req
, OP_UPDATE
);
734 static int omap_sham_shash_digest(struct crypto_shash
*shash
, u32 flags
,
735 const u8
*data
, unsigned int len
, u8
*out
)
738 struct shash_desc shash
;
739 char ctx
[crypto_shash_descsize(shash
)];
742 desc
.shash
.tfm
= shash
;
743 desc
.shash
.flags
= flags
& CRYPTO_TFM_REQ_MAY_SLEEP
;
745 return crypto_shash_digest(&desc
.shash
, data
, len
, out
);
748 static int omap_sham_final_shash(struct ahash_request
*req
)
750 struct omap_sham_ctx
*tctx
= crypto_tfm_ctx(req
->base
.tfm
);
751 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
753 return omap_sham_shash_digest(tctx
->fallback
, req
->base
.flags
,
754 ctx
->buffer
, ctx
->bufcnt
, req
->result
);
757 static int omap_sham_final(struct ahash_request
*req
)
759 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
762 ctx
->flags
|= FLAGS_FINUP
;
764 /* OMAP HW accel works only with buffers >= 9 */
765 /* HMAC is always >= 9 because of ipad */
766 if ((ctx
->digcnt
+ ctx
->bufcnt
) < 9)
767 err
= omap_sham_final_shash(req
);
768 else if (ctx
->bufcnt
)
769 return omap_sham_enqueue(req
, OP_FINAL
);
771 omap_sham_cleanup(req
);
776 static int omap_sham_finup(struct ahash_request
*req
)
778 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
781 ctx
->flags
|= FLAGS_FINUP
;
783 err1
= omap_sham_update(req
);
784 if (err1
== -EINPROGRESS
)
787 * final() has to be always called to cleanup resources
788 * even if udpate() failed, except EINPROGRESS
790 err2
= omap_sham_final(req
);
795 static int omap_sham_digest(struct ahash_request
*req
)
797 return omap_sham_init(req
) ?: omap_sham_finup(req
);
800 static int omap_sham_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
803 struct omap_sham_ctx
*tctx
= crypto_ahash_ctx(tfm
);
804 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
805 int bs
= crypto_shash_blocksize(bctx
->shash
);
806 int ds
= crypto_shash_digestsize(bctx
->shash
);
808 err
= crypto_shash_setkey(tctx
->fallback
, key
, keylen
);
813 err
= omap_sham_shash_digest(bctx
->shash
,
814 crypto_shash_get_flags(bctx
->shash
),
815 key
, keylen
, bctx
->ipad
);
820 memcpy(bctx
->ipad
, key
, keylen
);
823 memset(bctx
->ipad
+ keylen
, 0, bs
- keylen
);
824 memcpy(bctx
->opad
, bctx
->ipad
, bs
);
826 for (i
= 0; i
< bs
; i
++) {
827 bctx
->ipad
[i
] ^= 0x36;
828 bctx
->opad
[i
] ^= 0x5c;
834 static int omap_sham_cra_init_alg(struct crypto_tfm
*tfm
, const char *alg_base
)
836 struct omap_sham_ctx
*tctx
= crypto_tfm_ctx(tfm
);
837 const char *alg_name
= crypto_tfm_alg_name(tfm
);
839 /* Allocate a fallback and abort if it failed. */
840 tctx
->fallback
= crypto_alloc_shash(alg_name
, 0,
841 CRYPTO_ALG_NEED_FALLBACK
);
842 if (IS_ERR(tctx
->fallback
)) {
843 pr_err("omap-sham: fallback driver '%s' "
844 "could not be loaded.\n", alg_name
);
845 return PTR_ERR(tctx
->fallback
);
848 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
849 sizeof(struct omap_sham_reqctx
));
852 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
853 tctx
->flags
|= FLAGS_HMAC
;
854 bctx
->shash
= crypto_alloc_shash(alg_base
, 0,
855 CRYPTO_ALG_NEED_FALLBACK
);
856 if (IS_ERR(bctx
->shash
)) {
857 pr_err("omap-sham: base driver '%s' "
858 "could not be loaded.\n", alg_base
);
859 crypto_free_shash(tctx
->fallback
);
860 return PTR_ERR(bctx
->shash
);
868 static int omap_sham_cra_init(struct crypto_tfm
*tfm
)
870 return omap_sham_cra_init_alg(tfm
, NULL
);
873 static int omap_sham_cra_sha1_init(struct crypto_tfm
*tfm
)
875 return omap_sham_cra_init_alg(tfm
, "sha1");
878 static int omap_sham_cra_md5_init(struct crypto_tfm
*tfm
)
880 return omap_sham_cra_init_alg(tfm
, "md5");
883 static void omap_sham_cra_exit(struct crypto_tfm
*tfm
)
885 struct omap_sham_ctx
*tctx
= crypto_tfm_ctx(tfm
);
887 crypto_free_shash(tctx
->fallback
);
888 tctx
->fallback
= NULL
;
890 if (tctx
->flags
& FLAGS_HMAC
) {
891 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
892 crypto_free_shash(bctx
->shash
);
896 static struct ahash_alg algs
[] = {
898 .init
= omap_sham_init
,
899 .update
= omap_sham_update
,
900 .final
= omap_sham_final
,
901 .finup
= omap_sham_finup
,
902 .digest
= omap_sham_digest
,
903 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
906 .cra_driver_name
= "omap-sha1",
908 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
910 CRYPTO_ALG_NEED_FALLBACK
,
911 .cra_blocksize
= SHA1_BLOCK_SIZE
,
912 .cra_ctxsize
= sizeof(struct omap_sham_ctx
),
914 .cra_module
= THIS_MODULE
,
915 .cra_init
= omap_sham_cra_init
,
916 .cra_exit
= omap_sham_cra_exit
,
920 .init
= omap_sham_init
,
921 .update
= omap_sham_update
,
922 .final
= omap_sham_final
,
923 .finup
= omap_sham_finup
,
924 .digest
= omap_sham_digest
,
925 .halg
.digestsize
= MD5_DIGEST_SIZE
,
928 .cra_driver_name
= "omap-md5",
930 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
932 CRYPTO_ALG_NEED_FALLBACK
,
933 .cra_blocksize
= SHA1_BLOCK_SIZE
,
934 .cra_ctxsize
= sizeof(struct omap_sham_ctx
),
936 .cra_module
= THIS_MODULE
,
937 .cra_init
= omap_sham_cra_init
,
938 .cra_exit
= omap_sham_cra_exit
,
942 .init
= omap_sham_init
,
943 .update
= omap_sham_update
,
944 .final
= omap_sham_final
,
945 .finup
= omap_sham_finup
,
946 .digest
= omap_sham_digest
,
947 .setkey
= omap_sham_setkey
,
948 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
950 .cra_name
= "hmac(sha1)",
951 .cra_driver_name
= "omap-hmac-sha1",
953 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
955 CRYPTO_ALG_NEED_FALLBACK
,
956 .cra_blocksize
= SHA1_BLOCK_SIZE
,
957 .cra_ctxsize
= sizeof(struct omap_sham_ctx
) +
958 sizeof(struct omap_sham_hmac_ctx
),
960 .cra_module
= THIS_MODULE
,
961 .cra_init
= omap_sham_cra_sha1_init
,
962 .cra_exit
= omap_sham_cra_exit
,
966 .init
= omap_sham_init
,
967 .update
= omap_sham_update
,
968 .final
= omap_sham_final
,
969 .finup
= omap_sham_finup
,
970 .digest
= omap_sham_digest
,
971 .setkey
= omap_sham_setkey
,
972 .halg
.digestsize
= MD5_DIGEST_SIZE
,
974 .cra_name
= "hmac(md5)",
975 .cra_driver_name
= "omap-hmac-md5",
977 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
979 CRYPTO_ALG_NEED_FALLBACK
,
980 .cra_blocksize
= SHA1_BLOCK_SIZE
,
981 .cra_ctxsize
= sizeof(struct omap_sham_ctx
) +
982 sizeof(struct omap_sham_hmac_ctx
),
984 .cra_module
= THIS_MODULE
,
985 .cra_init
= omap_sham_cra_md5_init
,
986 .cra_exit
= omap_sham_cra_exit
,
991 static void omap_sham_done_task(unsigned long data
)
993 struct omap_sham_dev
*dd
= (struct omap_sham_dev
*)data
;
994 struct ahash_request
*req
= dd
->req
;
995 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
998 if (ctx
->flags
& FLAGS_OUTPUT_READY
) {
999 ctx
->flags
&= ~FLAGS_OUTPUT_READY
;
1003 if (dd
->flags
& FLAGS_DMA_ACTIVE
) {
1004 dd
->flags
&= ~FLAGS_DMA_ACTIVE
;
1005 omap_sham_update_dma_stop(dd
);
1006 omap_sham_update_dma_slow(dd
);
1009 if (ready
&& !(dd
->flags
& FLAGS_DMA_ACTIVE
)) {
1010 dev_dbg(dd
->dev
, "update done\n");
1011 /* finish curent request */
1012 omap_sham_finish_req(req
, 0);
1013 /* start new request */
1014 omap_sham_handle_queue(dd
);
1018 static void omap_sham_queue_task(unsigned long data
)
1020 struct omap_sham_dev
*dd
= (struct omap_sham_dev
*)data
;
1022 omap_sham_handle_queue(dd
);
1025 static irqreturn_t
omap_sham_irq(int irq
, void *dev_id
)
1027 struct omap_sham_dev
*dd
= dev_id
;
1028 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
1031 dev_err(dd
->dev
, "unknown interrupt.\n");
1035 if (unlikely(ctx
->flags
& FLAGS_FINAL
))
1036 /* final -> allow device to go to power-saving mode */
1037 omap_sham_write_mask(dd
, SHA_REG_CTRL
, 0, SHA_REG_CTRL_LENGTH
);
1039 omap_sham_write_mask(dd
, SHA_REG_CTRL
, SHA_REG_CTRL_OUTPUT_READY
,
1040 SHA_REG_CTRL_OUTPUT_READY
);
1041 omap_sham_read(dd
, SHA_REG_CTRL
);
1043 ctx
->flags
|= FLAGS_OUTPUT_READY
;
1044 tasklet_schedule(&dd
->done_task
);
1049 static void omap_sham_dma_callback(int lch
, u16 ch_status
, void *data
)
1051 struct omap_sham_dev
*dd
= data
;
1053 if (likely(lch
== dd
->dma_lch
))
1054 tasklet_schedule(&dd
->done_task
);
1057 static int omap_sham_dma_init(struct omap_sham_dev
*dd
)
1063 err
= omap_request_dma(dd
->dma
, dev_name(dd
->dev
),
1064 omap_sham_dma_callback
, dd
, &dd
->dma_lch
);
1066 dev_err(dd
->dev
, "Unable to request DMA channel\n");
1069 omap_set_dma_dest_params(dd
->dma_lch
, 0,
1070 OMAP_DMA_AMODE_CONSTANT
,
1071 dd
->phys_base
+ SHA_REG_DIN(0), 0, 16);
1073 omap_set_dma_dest_burst_mode(dd
->dma_lch
,
1074 OMAP_DMA_DATA_BURST_16
);
1079 static void omap_sham_dma_cleanup(struct omap_sham_dev
*dd
)
1081 if (dd
->dma_lch
>= 0) {
1082 omap_free_dma(dd
->dma_lch
);
1087 static int __devinit
omap_sham_probe(struct platform_device
*pdev
)
1089 struct omap_sham_dev
*dd
;
1090 struct device
*dev
= &pdev
->dev
;
1091 struct resource
*res
;
1094 dd
= kzalloc(sizeof(struct omap_sham_dev
), GFP_KERNEL
);
1096 dev_err(dev
, "unable to alloc data struct.\n");
1101 platform_set_drvdata(pdev
, dd
);
1103 INIT_LIST_HEAD(&dd
->list
);
1104 spin_lock_init(&dd
->lock
);
1105 tasklet_init(&dd
->done_task
, omap_sham_done_task
, (unsigned long)dd
);
1106 tasklet_init(&dd
->queue_task
, omap_sham_queue_task
, (unsigned long)dd
);
1107 crypto_init_queue(&dd
->queue
, OMAP_SHAM_QUEUE_LENGTH
);
1111 /* Get the base address */
1112 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1114 dev_err(dev
, "no MEM resource info\n");
1118 dd
->phys_base
= res
->start
;
1121 res
= platform_get_resource(pdev
, IORESOURCE_DMA
, 0);
1123 dev_err(dev
, "no DMA resource info\n");
1127 dd
->dma
= res
->start
;
1130 dd
->irq
= platform_get_irq(pdev
, 0);
1132 dev_err(dev
, "no IRQ resource info\n");
1137 err
= request_irq(dd
->irq
, omap_sham_irq
,
1138 IRQF_TRIGGER_LOW
, dev_name(dev
), dd
);
1140 dev_err(dev
, "unable to request irq.\n");
1144 err
= omap_sham_dma_init(dd
);
1148 /* Initializing the clock */
1149 dd
->iclk
= clk_get(dev
, "ick");
1151 dev_err(dev
, "clock intialization failed.\n");
1156 dd
->io_base
= ioremap(dd
->phys_base
, SZ_4K
);
1158 dev_err(dev
, "can't ioremap\n");
1163 clk_enable(dd
->iclk
);
1164 dev_info(dev
, "hw accel on OMAP rev %u.%u\n",
1165 (omap_sham_read(dd
, SHA_REG_REV
) & SHA_REG_REV_MAJOR
) >> 4,
1166 omap_sham_read(dd
, SHA_REG_REV
) & SHA_REG_REV_MINOR
);
1167 clk_disable(dd
->iclk
);
1169 spin_lock(&sham
.lock
);
1170 list_add_tail(&dd
->list
, &sham
.dev_list
);
1171 spin_unlock(&sham
.lock
);
1173 for (i
= 0; i
< ARRAY_SIZE(algs
); i
++) {
1174 err
= crypto_register_ahash(&algs
[i
]);
1182 for (j
= 0; j
< i
; j
++)
1183 crypto_unregister_ahash(&algs
[j
]);
1184 iounmap(dd
->io_base
);
1188 omap_sham_dma_cleanup(dd
);
1191 free_irq(dd
->irq
, dd
);
1196 dev_err(dev
, "initialization failed.\n");
1201 static int __devexit
omap_sham_remove(struct platform_device
*pdev
)
1203 static struct omap_sham_dev
*dd
;
1206 dd
= platform_get_drvdata(pdev
);
1209 spin_lock(&sham
.lock
);
1210 list_del(&dd
->list
);
1211 spin_unlock(&sham
.lock
);
1212 for (i
= 0; i
< ARRAY_SIZE(algs
); i
++)
1213 crypto_unregister_ahash(&algs
[i
]);
1214 tasklet_kill(&dd
->done_task
);
1215 tasklet_kill(&dd
->queue_task
);
1216 iounmap(dd
->io_base
);
1218 omap_sham_dma_cleanup(dd
);
1220 free_irq(dd
->irq
, dd
);
1227 static struct platform_driver omap_sham_driver
= {
1228 .probe
= omap_sham_probe
,
1229 .remove
= omap_sham_remove
,
1231 .name
= "omap-sham",
1232 .owner
= THIS_MODULE
,
1236 static int __init
omap_sham_mod_init(void)
1238 pr_info("loading %s driver\n", "omap-sham");
1240 if (!cpu_class_is_omap2() ||
1241 omap_type() != OMAP2_DEVICE_TYPE_SEC
) {
1242 pr_err("Unsupported cpu\n");
1246 return platform_driver_register(&omap_sham_driver
);
1249 static void __exit
omap_sham_mod_exit(void)
1251 platform_driver_unregister(&omap_sham_driver
);
1254 module_init(omap_sham_mod_init
);
1255 module_exit(omap_sham_mod_exit
);
1257 MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
1258 MODULE_LICENSE("GPL v2");
1259 MODULE_AUTHOR("Dmitry Kasatkin");