4 * Support for OMAP SHA1/MD5 HW acceleration.
6 * Copyright (c) 2010 Nokia Corporation
7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
13 * Some ideas are from old omap-sha1-md5.c driver.
16 #define pr_fmt(fmt) "%s: " fmt, __func__
18 #include <linux/err.h>
19 #include <linux/device.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/errno.h>
23 #include <linux/interrupt.h>
24 #include <linux/kernel.h>
25 #include <linux/clk.h>
26 #include <linux/irq.h>
28 #include <linux/platform_device.h>
29 #include <linux/scatterlist.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/delay.h>
32 #include <linux/crypto.h>
33 #include <linux/cryptohash.h>
34 #include <crypto/scatterwalk.h>
35 #include <crypto/algapi.h>
36 #include <crypto/sha.h>
37 #include <crypto/hash.h>
38 #include <crypto/internal/hash.h>
42 #include <mach/irqs.h>
44 #define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04))
45 #define SHA_REG_DIN(x) (0x1C + ((x) * 0x04))
47 #define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE
48 #define MD5_DIGEST_SIZE 16
50 #define SHA_REG_DIGCNT 0x14
52 #define SHA_REG_CTRL 0x18
53 #define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
54 #define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
55 #define SHA_REG_CTRL_ALGO_CONST (1 << 3)
56 #define SHA_REG_CTRL_ALGO (1 << 2)
57 #define SHA_REG_CTRL_INPUT_READY (1 << 1)
58 #define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
60 #define SHA_REG_REV 0x5C
61 #define SHA_REG_REV_MAJOR 0xF0
62 #define SHA_REG_REV_MINOR 0x0F
64 #define SHA_REG_MASK 0x60
65 #define SHA_REG_MASK_DMA_EN (1 << 3)
66 #define SHA_REG_MASK_IT_EN (1 << 2)
67 #define SHA_REG_MASK_SOFTRESET (1 << 1)
68 #define SHA_REG_AUTOIDLE (1 << 0)
70 #define SHA_REG_SYSSTATUS 0x64
71 #define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
73 #define DEFAULT_TIMEOUT_INTERVAL HZ
75 #define FLAGS_FIRST 0x0001
76 #define FLAGS_FINUP 0x0002
77 #define FLAGS_FINAL 0x0004
78 #define FLAGS_FAST 0x0008
79 #define FLAGS_SHA1 0x0010
80 #define FLAGS_DMA_ACTIVE 0x0020
81 #define FLAGS_OUTPUT_READY 0x0040
82 #define FLAGS_CLEAN 0x0080
83 #define FLAGS_INIT 0x0100
84 #define FLAGS_CPU 0x0200
85 #define FLAGS_HMAC 0x0400
86 #define FLAGS_ERROR 0x0800
96 struct omap_sham_reqctx
{
97 struct omap_sham_dev
*dd
;
101 u8 digest
[SHA1_DIGEST_SIZE
];
109 struct scatterlist
*sg
;
110 unsigned int offset
; /* offset in current sg */
111 unsigned int total
; /* total request */
114 struct omap_sham_hmac_ctx
{
115 struct crypto_shash
*shash
;
116 u8 ipad
[SHA1_MD5_BLOCK_SIZE
];
117 u8 opad
[SHA1_MD5_BLOCK_SIZE
];
120 struct omap_sham_ctx
{
121 struct omap_sham_dev
*dd
;
126 struct crypto_shash
*fallback
;
128 struct omap_sham_hmac_ctx base
[0];
131 #define OMAP_SHAM_QUEUE_LENGTH 1
133 struct omap_sham_dev
{
134 struct list_head list
;
135 unsigned long phys_base
;
137 void __iomem
*io_base
;
144 struct tasklet_struct done_task
;
145 struct tasklet_struct queue_task
;
148 struct crypto_queue queue
;
149 struct ahash_request
*req
;
152 struct omap_sham_drv
{
153 struct list_head dev_list
;
158 static struct omap_sham_drv sham
= {
159 .dev_list
= LIST_HEAD_INIT(sham
.dev_list
),
160 .lock
= __SPIN_LOCK_UNLOCKED(sham
.lock
),
163 static inline u32
omap_sham_read(struct omap_sham_dev
*dd
, u32 offset
)
165 return __raw_readl(dd
->io_base
+ offset
);
168 static inline void omap_sham_write(struct omap_sham_dev
*dd
,
169 u32 offset
, u32 value
)
171 __raw_writel(value
, dd
->io_base
+ offset
);
174 static inline void omap_sham_write_mask(struct omap_sham_dev
*dd
, u32 address
,
179 val
= omap_sham_read(dd
, address
);
182 omap_sham_write(dd
, address
, val
);
185 static inline int omap_sham_wait(struct omap_sham_dev
*dd
, u32 offset
, u32 bit
)
187 unsigned long timeout
= jiffies
+ DEFAULT_TIMEOUT_INTERVAL
;
189 while (!(omap_sham_read(dd
, offset
) & bit
)) {
190 if (time_is_before_jiffies(timeout
))
197 static void omap_sham_copy_hash(struct ahash_request
*req
, int out
)
199 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
200 u32
*hash
= (u32
*)ctx
->digest
;
203 if (likely(ctx
->flags
& FLAGS_SHA1
)) {
204 /* SHA1 results are in big endian */
205 for (i
= 0; i
< SHA1_DIGEST_SIZE
/ sizeof(u32
); i
++)
207 hash
[i
] = be32_to_cpu(omap_sham_read(ctx
->dd
,
210 omap_sham_write(ctx
->dd
, SHA_REG_DIGEST(i
),
211 cpu_to_be32(hash
[i
]));
213 /* MD5 results are in little endian */
214 for (i
= 0; i
< MD5_DIGEST_SIZE
/ sizeof(u32
); i
++)
216 hash
[i
] = le32_to_cpu(omap_sham_read(ctx
->dd
,
219 omap_sham_write(ctx
->dd
, SHA_REG_DIGEST(i
),
220 cpu_to_le32(hash
[i
]));
224 static int omap_sham_write_ctrl(struct omap_sham_dev
*dd
, size_t length
,
227 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
228 u32 val
= length
<< 5, mask
;
230 if (unlikely(!ctx
->digcnt
)) {
232 clk_enable(dd
->iclk
);
234 if (!(dd
->flags
& FLAGS_INIT
)) {
235 omap_sham_write_mask(dd
, SHA_REG_MASK
,
236 SHA_REG_MASK_SOFTRESET
, SHA_REG_MASK_SOFTRESET
);
238 if (omap_sham_wait(dd
, SHA_REG_SYSSTATUS
,
239 SHA_REG_SYSSTATUS_RESETDONE
)) {
240 clk_disable(dd
->iclk
);
243 dd
->flags
|= FLAGS_INIT
;
247 omap_sham_write(dd
, SHA_REG_DIGCNT
, ctx
->digcnt
);
250 omap_sham_write_mask(dd
, SHA_REG_MASK
,
251 SHA_REG_MASK_IT_EN
| (dma
? SHA_REG_MASK_DMA_EN
: 0),
252 SHA_REG_MASK_IT_EN
| SHA_REG_MASK_DMA_EN
);
254 * Setting ALGO_CONST only for the first iteration
255 * and CLOSE_HASH only for the last one.
257 if (ctx
->flags
& FLAGS_SHA1
)
258 val
|= SHA_REG_CTRL_ALGO
;
260 val
|= SHA_REG_CTRL_ALGO_CONST
;
262 val
|= SHA_REG_CTRL_CLOSE_HASH
;
264 mask
= SHA_REG_CTRL_ALGO_CONST
| SHA_REG_CTRL_CLOSE_HASH
|
265 SHA_REG_CTRL_ALGO
| SHA_REG_CTRL_LENGTH
;
267 omap_sham_write_mask(dd
, SHA_REG_CTRL
, val
, mask
);
272 static int omap_sham_xmit_cpu(struct omap_sham_dev
*dd
, const u8
*buf
,
273 size_t length
, int final
)
275 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
276 int err
, count
, len32
;
277 const u32
*buffer
= (const u32
*)buf
;
279 dev_dbg(dd
->dev
, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
280 ctx
->digcnt
, length
, final
);
282 err
= omap_sham_write_ctrl(dd
, length
, final
, 0);
286 /* should be non-zero before next lines to disable clocks later */
287 ctx
->digcnt
+= length
;
289 if (omap_sham_wait(dd
, SHA_REG_CTRL
, SHA_REG_CTRL_INPUT_READY
))
293 ctx
->flags
|= FLAGS_FINAL
; /* catch last interrupt */
295 len32
= DIV_ROUND_UP(length
, sizeof(u32
));
297 for (count
= 0; count
< len32
; count
++)
298 omap_sham_write(dd
, SHA_REG_DIN(count
), buffer
[count
]);
303 static int omap_sham_xmit_dma(struct omap_sham_dev
*dd
, dma_addr_t dma_addr
,
304 size_t length
, int final
)
306 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
309 dev_dbg(dd
->dev
, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
310 ctx
->digcnt
, length
, final
);
311 /* flush cache entries related to our page */
312 if (dma_addr
== ctx
->dma_addr
)
313 dma_sync_single_for_device(dd
->dev
, dma_addr
, length
,
316 len32
= DIV_ROUND_UP(length
, sizeof(u32
));
318 omap_set_dma_transfer_params(dd
->dma_lch
, OMAP_DMA_DATA_TYPE_S32
, len32
,
319 1, OMAP_DMA_SYNC_PACKET
, dd
->dma
,
320 OMAP_DMA_DST_SYNC_PREFETCH
);
322 omap_set_dma_src_params(dd
->dma_lch
, 0, OMAP_DMA_AMODE_POST_INC
,
325 omap_set_dma_dest_params(dd
->dma_lch
, 0,
326 OMAP_DMA_AMODE_CONSTANT
,
327 dd
->phys_base
+ SHA_REG_DIN(0), 0, 16);
329 omap_set_dma_dest_burst_mode(dd
->dma_lch
,
330 OMAP_DMA_DATA_BURST_16
);
332 omap_set_dma_src_burst_mode(dd
->dma_lch
,
333 OMAP_DMA_DATA_BURST_4
);
335 err
= omap_sham_write_ctrl(dd
, length
, final
, 1);
339 ctx
->digcnt
+= length
;
342 ctx
->flags
|= FLAGS_FINAL
; /* catch last interrupt */
344 dd
->flags
|= FLAGS_DMA_ACTIVE
;
346 omap_start_dma(dd
->dma_lch
);
351 static size_t omap_sham_append_buffer(struct omap_sham_reqctx
*ctx
,
352 const u8
*data
, size_t length
)
354 size_t count
= min(length
, ctx
->buflen
- ctx
->bufcnt
);
356 count
= min(count
, ctx
->total
);
359 memcpy(ctx
->buffer
+ ctx
->bufcnt
, data
, count
);
360 ctx
->bufcnt
+= count
;
365 static size_t omap_sham_append_sg(struct omap_sham_reqctx
*ctx
)
370 count
= omap_sham_append_buffer(ctx
,
371 sg_virt(ctx
->sg
) + ctx
->offset
,
372 ctx
->sg
->length
- ctx
->offset
);
375 ctx
->offset
+= count
;
377 if (ctx
->offset
== ctx
->sg
->length
) {
378 ctx
->sg
= sg_next(ctx
->sg
);
389 static int omap_sham_update_dma_slow(struct omap_sham_dev
*dd
)
391 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
398 omap_sham_append_sg(ctx
);
400 final
= (ctx
->flags
& FLAGS_FINUP
) && !ctx
->total
;
402 dev_dbg(dd
->dev
, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
403 ctx
->bufcnt
, ctx
->digcnt
, final
);
405 if (final
|| (ctx
->bufcnt
== ctx
->buflen
&& ctx
->total
)) {
408 return omap_sham_xmit_dma(dd
, ctx
->dma_addr
, count
, final
);
414 static int omap_sham_update_dma_fast(struct omap_sham_dev
*dd
)
416 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
420 ctx
->flags
|= FLAGS_FAST
;
422 length
= min(ctx
->total
, sg_dma_len(ctx
->sg
));
425 if (!dma_map_sg(dd
->dev
, ctx
->sg
, 1, DMA_TO_DEVICE
)) {
426 dev_err(dd
->dev
, "dma_map_sg error\n");
430 ctx
->total
-= length
;
432 err
= omap_sham_xmit_dma(dd
, sg_dma_address(ctx
->sg
), length
, 1);
433 if (err
!= -EINPROGRESS
)
434 dma_unmap_sg(dd
->dev
, ctx
->sg
, 1, DMA_TO_DEVICE
);
439 static int omap_sham_update_cpu(struct omap_sham_dev
*dd
)
441 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
444 omap_sham_append_sg(ctx
);
445 bufcnt
= ctx
->bufcnt
;
448 return omap_sham_xmit_cpu(dd
, ctx
->buffer
, bufcnt
, 1);
451 static int omap_sham_update_dma_stop(struct omap_sham_dev
*dd
)
453 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
455 omap_stop_dma(dd
->dma_lch
);
456 if (ctx
->flags
& FLAGS_FAST
)
457 dma_unmap_sg(dd
->dev
, ctx
->sg
, 1, DMA_TO_DEVICE
);
462 static void omap_sham_cleanup(struct ahash_request
*req
)
464 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
465 struct omap_sham_dev
*dd
= ctx
->dd
;
468 spin_lock_irqsave(&dd
->lock
, flags
);
469 if (ctx
->flags
& FLAGS_CLEAN
) {
470 spin_unlock_irqrestore(&dd
->lock
, flags
);
473 ctx
->flags
|= FLAGS_CLEAN
;
474 spin_unlock_irqrestore(&dd
->lock
, flags
);
477 clk_disable(dd
->iclk
);
478 memcpy(req
->result
, ctx
->digest
, (ctx
->flags
& FLAGS_SHA1
) ?
479 SHA1_DIGEST_SIZE
: MD5_DIGEST_SIZE
);
483 dma_unmap_single(dd
->dev
, ctx
->dma_addr
, ctx
->buflen
,
487 free_page((unsigned long)ctx
->buffer
);
489 dev_dbg(dd
->dev
, "digcnt: %d, bufcnt: %d\n", ctx
->digcnt
, ctx
->bufcnt
);
492 static int omap_sham_init(struct ahash_request
*req
)
494 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
495 struct omap_sham_ctx
*tctx
= crypto_ahash_ctx(tfm
);
496 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
497 struct omap_sham_dev
*dd
= NULL
, *tmp
;
499 spin_lock_bh(&sham
.lock
);
501 list_for_each_entry(tmp
, &sham
.dev_list
, list
) {
509 spin_unlock_bh(&sham
.lock
);
515 ctx
->flags
|= FLAGS_FIRST
;
517 dev_dbg(dd
->dev
, "init: digest size: %d\n",
518 crypto_ahash_digestsize(tfm
));
520 if (crypto_ahash_digestsize(tfm
) == SHA1_DIGEST_SIZE
)
521 ctx
->flags
|= FLAGS_SHA1
;
526 ctx
->buflen
= PAGE_SIZE
;
527 ctx
->buffer
= (void *)__get_free_page(
528 (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
529 GFP_KERNEL
: GFP_ATOMIC
);
533 ctx
->dma_addr
= dma_map_single(dd
->dev
, ctx
->buffer
, ctx
->buflen
,
535 if (dma_mapping_error(dd
->dev
, ctx
->dma_addr
)) {
536 dev_err(dd
->dev
, "dma %u bytes error\n", ctx
->buflen
);
537 free_page((unsigned long)ctx
->buffer
);
541 if (tctx
->flags
& FLAGS_HMAC
) {
542 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
544 memcpy(ctx
->buffer
, bctx
->ipad
, SHA1_MD5_BLOCK_SIZE
);
545 ctx
->bufcnt
= SHA1_MD5_BLOCK_SIZE
;
546 ctx
->flags
|= FLAGS_HMAC
;
553 static int omap_sham_update_req(struct omap_sham_dev
*dd
)
555 struct ahash_request
*req
= dd
->req
;
556 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
559 dev_dbg(dd
->dev
, "update_req: total: %u, digcnt: %d, finup: %d\n",
560 ctx
->total
, ctx
->digcnt
, (ctx
->flags
& FLAGS_FINUP
) != 0);
562 if (ctx
->flags
& FLAGS_CPU
)
563 err
= omap_sham_update_cpu(dd
);
564 else if (ctx
->flags
& FLAGS_FAST
)
565 err
= omap_sham_update_dma_fast(dd
);
567 err
= omap_sham_update_dma_slow(dd
);
569 /* wait for dma completion before can take more data */
570 dev_dbg(dd
->dev
, "update: err: %d, digcnt: %d\n", err
, ctx
->digcnt
);
575 static int omap_sham_final_req(struct omap_sham_dev
*dd
)
577 struct ahash_request
*req
= dd
->req
;
578 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
579 int err
= 0, use_dma
= 1;
581 if (ctx
->bufcnt
<= 64)
582 /* faster to handle last block with cpu */
586 err
= omap_sham_xmit_dma(dd
, ctx
->dma_addr
, ctx
->bufcnt
, 1);
588 err
= omap_sham_xmit_cpu(dd
, ctx
->buffer
, ctx
->bufcnt
, 1);
592 dev_dbg(dd
->dev
, "final_req: err: %d\n", err
);
597 static int omap_sham_finish_req_hmac(struct ahash_request
*req
)
599 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
600 struct omap_sham_ctx
*tctx
= crypto_tfm_ctx(req
->base
.tfm
);
601 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
602 int bs
= crypto_shash_blocksize(bctx
->shash
);
603 int ds
= crypto_shash_digestsize(bctx
->shash
);
605 struct shash_desc shash
;
606 char ctx
[crypto_shash_descsize(bctx
->shash
)];
609 desc
.shash
.tfm
= bctx
->shash
;
610 desc
.shash
.flags
= 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
612 return crypto_shash_init(&desc
.shash
) ?:
613 crypto_shash_update(&desc
.shash
, bctx
->opad
, bs
) ?:
614 crypto_shash_finup(&desc
.shash
, ctx
->digest
, ds
, ctx
->digest
);
617 static void omap_sham_finish_req(struct ahash_request
*req
, int err
)
619 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
622 omap_sham_copy_hash(ctx
->dd
->req
, 1);
623 if (ctx
->flags
& FLAGS_HMAC
)
624 err
= omap_sham_finish_req_hmac(req
);
626 ctx
->flags
|= FLAGS_ERROR
;
629 if ((ctx
->flags
& FLAGS_FINAL
) || err
)
630 omap_sham_cleanup(req
);
632 clear_bit(FLAGS_BUSY
, &ctx
->dd
->flags
);
634 if (req
->base
.complete
)
635 req
->base
.complete(&req
->base
, err
);
638 static int omap_sham_handle_queue(struct omap_sham_dev
*dd
)
640 struct crypto_async_request
*async_req
, *backlog
;
641 struct omap_sham_reqctx
*ctx
;
642 struct ahash_request
*req
, *prev_req
;
646 if (test_and_set_bit(FLAGS_BUSY
, &dd
->flags
))
649 spin_lock_irqsave(&dd
->lock
, flags
);
650 backlog
= crypto_get_backlog(&dd
->queue
);
651 async_req
= crypto_dequeue_request(&dd
->queue
);
653 clear_bit(FLAGS_BUSY
, &dd
->flags
);
654 spin_unlock_irqrestore(&dd
->lock
, flags
);
660 backlog
->complete(backlog
, -EINPROGRESS
);
662 req
= ahash_request_cast(async_req
);
667 ctx
= ahash_request_ctx(req
);
669 dev_dbg(dd
->dev
, "handling new req, op: %lu, nbytes: %d\n",
670 ctx
->op
, req
->nbytes
);
672 if (req
!= prev_req
&& ctx
->digcnt
)
673 /* request has changed - restore hash */
674 omap_sham_copy_hash(req
, 0);
676 if (ctx
->op
== OP_UPDATE
) {
677 err
= omap_sham_update_req(dd
);
678 if (err
!= -EINPROGRESS
&& (ctx
->flags
& FLAGS_FINUP
))
679 /* no final() after finup() */
680 err
= omap_sham_final_req(dd
);
681 } else if (ctx
->op
== OP_FINAL
) {
682 err
= omap_sham_final_req(dd
);
685 if (err
!= -EINPROGRESS
) {
686 /* done_task will not finish it, so do it here */
687 omap_sham_finish_req(req
, err
);
688 tasklet_schedule(&dd
->queue_task
);
691 dev_dbg(dd
->dev
, "exit, err: %d\n", err
);
696 static int omap_sham_enqueue(struct ahash_request
*req
, unsigned int op
)
698 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
699 struct omap_sham_ctx
*tctx
= crypto_tfm_ctx(req
->base
.tfm
);
700 struct omap_sham_dev
*dd
= tctx
->dd
;
706 spin_lock_irqsave(&dd
->lock
, flags
);
707 err
= ahash_enqueue_request(&dd
->queue
, req
);
708 spin_unlock_irqrestore(&dd
->lock
, flags
);
710 omap_sham_handle_queue(dd
);
715 static int omap_sham_update(struct ahash_request
*req
)
717 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
722 ctx
->total
= req
->nbytes
;
726 if (ctx
->flags
& FLAGS_FINUP
) {
727 if ((ctx
->digcnt
+ ctx
->bufcnt
+ ctx
->total
) < 9) {
729 * OMAP HW accel works only with buffers >= 9
730 * will switch to bypass in final()
731 * final has the same request and data
733 omap_sham_append_sg(ctx
);
735 } else if (ctx
->bufcnt
+ ctx
->total
<= 64) {
736 ctx
->flags
|= FLAGS_CPU
;
737 } else if (!ctx
->bufcnt
&& sg_is_last(ctx
->sg
)) {
738 /* may be can use faster functions */
739 int aligned
= IS_ALIGNED((u32
)ctx
->sg
->offset
,
742 if (aligned
&& (ctx
->flags
& FLAGS_FIRST
))
743 /* digest: first and final */
744 ctx
->flags
|= FLAGS_FAST
;
746 ctx
->flags
&= ~FLAGS_FIRST
;
748 } else if (ctx
->bufcnt
+ ctx
->total
<= ctx
->buflen
) {
749 /* if not finaup -> not fast */
750 omap_sham_append_sg(ctx
);
754 return omap_sham_enqueue(req
, OP_UPDATE
);
757 static int omap_sham_shash_digest(struct crypto_shash
*shash
, u32 flags
,
758 const u8
*data
, unsigned int len
, u8
*out
)
761 struct shash_desc shash
;
762 char ctx
[crypto_shash_descsize(shash
)];
765 desc
.shash
.tfm
= shash
;
766 desc
.shash
.flags
= flags
& CRYPTO_TFM_REQ_MAY_SLEEP
;
768 return crypto_shash_digest(&desc
.shash
, data
, len
, out
);
771 static int omap_sham_final_shash(struct ahash_request
*req
)
773 struct omap_sham_ctx
*tctx
= crypto_tfm_ctx(req
->base
.tfm
);
774 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
776 return omap_sham_shash_digest(tctx
->fallback
, req
->base
.flags
,
777 ctx
->buffer
, ctx
->bufcnt
, req
->result
);
780 static int omap_sham_final(struct ahash_request
*req
)
782 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
785 ctx
->flags
|= FLAGS_FINUP
;
787 if (!(ctx
->flags
& FLAGS_ERROR
)) {
788 /* OMAP HW accel works only with buffers >= 9 */
789 /* HMAC is always >= 9 because of ipad */
790 if ((ctx
->digcnt
+ ctx
->bufcnt
) < 9)
791 err
= omap_sham_final_shash(req
);
792 else if (ctx
->bufcnt
)
793 return omap_sham_enqueue(req
, OP_FINAL
);
796 omap_sham_cleanup(req
);
801 static int omap_sham_finup(struct ahash_request
*req
)
803 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
806 ctx
->flags
|= FLAGS_FINUP
;
808 err1
= omap_sham_update(req
);
809 if (err1
== -EINPROGRESS
)
812 * final() has to be always called to cleanup resources
813 * even if udpate() failed, except EINPROGRESS
815 err2
= omap_sham_final(req
);
820 static int omap_sham_digest(struct ahash_request
*req
)
822 return omap_sham_init(req
) ?: omap_sham_finup(req
);
825 static int omap_sham_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
828 struct omap_sham_ctx
*tctx
= crypto_ahash_ctx(tfm
);
829 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
830 int bs
= crypto_shash_blocksize(bctx
->shash
);
831 int ds
= crypto_shash_digestsize(bctx
->shash
);
833 err
= crypto_shash_setkey(tctx
->fallback
, key
, keylen
);
838 err
= omap_sham_shash_digest(bctx
->shash
,
839 crypto_shash_get_flags(bctx
->shash
),
840 key
, keylen
, bctx
->ipad
);
845 memcpy(bctx
->ipad
, key
, keylen
);
848 memset(bctx
->ipad
+ keylen
, 0, bs
- keylen
);
849 memcpy(bctx
->opad
, bctx
->ipad
, bs
);
851 for (i
= 0; i
< bs
; i
++) {
852 bctx
->ipad
[i
] ^= 0x36;
853 bctx
->opad
[i
] ^= 0x5c;
859 static int omap_sham_cra_init_alg(struct crypto_tfm
*tfm
, const char *alg_base
)
861 struct omap_sham_ctx
*tctx
= crypto_tfm_ctx(tfm
);
862 const char *alg_name
= crypto_tfm_alg_name(tfm
);
866 /* Allocate a fallback and abort if it failed. */
867 tctx
->fallback
= crypto_alloc_shash(alg_name
, 0,
868 CRYPTO_ALG_NEED_FALLBACK
);
869 if (IS_ERR(tctx
->fallback
)) {
870 pr_err("omap-sham: fallback driver '%s' "
871 "could not be loaded.\n", alg_name
);
872 return PTR_ERR(tctx
->fallback
);
875 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
876 sizeof(struct omap_sham_reqctx
));
879 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
880 tctx
->flags
|= FLAGS_HMAC
;
881 bctx
->shash
= crypto_alloc_shash(alg_base
, 0,
882 CRYPTO_ALG_NEED_FALLBACK
);
883 if (IS_ERR(bctx
->shash
)) {
884 pr_err("omap-sham: base driver '%s' "
885 "could not be loaded.\n", alg_base
);
886 crypto_free_shash(tctx
->fallback
);
887 return PTR_ERR(bctx
->shash
);
895 static int omap_sham_cra_init(struct crypto_tfm
*tfm
)
897 return omap_sham_cra_init_alg(tfm
, NULL
);
900 static int omap_sham_cra_sha1_init(struct crypto_tfm
*tfm
)
902 return omap_sham_cra_init_alg(tfm
, "sha1");
905 static int omap_sham_cra_md5_init(struct crypto_tfm
*tfm
)
907 return omap_sham_cra_init_alg(tfm
, "md5");
910 static void omap_sham_cra_exit(struct crypto_tfm
*tfm
)
912 struct omap_sham_ctx
*tctx
= crypto_tfm_ctx(tfm
);
914 crypto_free_shash(tctx
->fallback
);
915 tctx
->fallback
= NULL
;
917 if (tctx
->flags
& FLAGS_HMAC
) {
918 struct omap_sham_hmac_ctx
*bctx
= tctx
->base
;
919 crypto_free_shash(bctx
->shash
);
923 static struct ahash_alg algs
[] = {
925 .init
= omap_sham_init
,
926 .update
= omap_sham_update
,
927 .final
= omap_sham_final
,
928 .finup
= omap_sham_finup
,
929 .digest
= omap_sham_digest
,
930 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
933 .cra_driver_name
= "omap-sha1",
935 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
937 CRYPTO_ALG_NEED_FALLBACK
,
938 .cra_blocksize
= SHA1_BLOCK_SIZE
,
939 .cra_ctxsize
= sizeof(struct omap_sham_ctx
),
941 .cra_module
= THIS_MODULE
,
942 .cra_init
= omap_sham_cra_init
,
943 .cra_exit
= omap_sham_cra_exit
,
947 .init
= omap_sham_init
,
948 .update
= omap_sham_update
,
949 .final
= omap_sham_final
,
950 .finup
= omap_sham_finup
,
951 .digest
= omap_sham_digest
,
952 .halg
.digestsize
= MD5_DIGEST_SIZE
,
955 .cra_driver_name
= "omap-md5",
957 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
959 CRYPTO_ALG_NEED_FALLBACK
,
960 .cra_blocksize
= SHA1_BLOCK_SIZE
,
961 .cra_ctxsize
= sizeof(struct omap_sham_ctx
),
963 .cra_module
= THIS_MODULE
,
964 .cra_init
= omap_sham_cra_init
,
965 .cra_exit
= omap_sham_cra_exit
,
969 .init
= omap_sham_init
,
970 .update
= omap_sham_update
,
971 .final
= omap_sham_final
,
972 .finup
= omap_sham_finup
,
973 .digest
= omap_sham_digest
,
974 .setkey
= omap_sham_setkey
,
975 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
977 .cra_name
= "hmac(sha1)",
978 .cra_driver_name
= "omap-hmac-sha1",
980 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
982 CRYPTO_ALG_NEED_FALLBACK
,
983 .cra_blocksize
= SHA1_BLOCK_SIZE
,
984 .cra_ctxsize
= sizeof(struct omap_sham_ctx
) +
985 sizeof(struct omap_sham_hmac_ctx
),
987 .cra_module
= THIS_MODULE
,
988 .cra_init
= omap_sham_cra_sha1_init
,
989 .cra_exit
= omap_sham_cra_exit
,
993 .init
= omap_sham_init
,
994 .update
= omap_sham_update
,
995 .final
= omap_sham_final
,
996 .finup
= omap_sham_finup
,
997 .digest
= omap_sham_digest
,
998 .setkey
= omap_sham_setkey
,
999 .halg
.digestsize
= MD5_DIGEST_SIZE
,
1001 .cra_name
= "hmac(md5)",
1002 .cra_driver_name
= "omap-hmac-md5",
1003 .cra_priority
= 100,
1004 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
1006 CRYPTO_ALG_NEED_FALLBACK
,
1007 .cra_blocksize
= SHA1_BLOCK_SIZE
,
1008 .cra_ctxsize
= sizeof(struct omap_sham_ctx
) +
1009 sizeof(struct omap_sham_hmac_ctx
),
1011 .cra_module
= THIS_MODULE
,
1012 .cra_init
= omap_sham_cra_md5_init
,
1013 .cra_exit
= omap_sham_cra_exit
,
1018 static void omap_sham_done_task(unsigned long data
)
1020 struct omap_sham_dev
*dd
= (struct omap_sham_dev
*)data
;
1021 struct ahash_request
*req
= dd
->req
;
1022 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(req
);
1023 int ready
= 0, err
= 0;
1025 if (ctx
->flags
& FLAGS_OUTPUT_READY
) {
1026 ctx
->flags
&= ~FLAGS_OUTPUT_READY
;
1030 if (dd
->flags
& FLAGS_DMA_ACTIVE
) {
1031 dd
->flags
&= ~FLAGS_DMA_ACTIVE
;
1032 omap_sham_update_dma_stop(dd
);
1034 err
= omap_sham_update_dma_slow(dd
);
1037 err
= dd
->err
? : err
;
1039 if (err
!= -EINPROGRESS
&& (ready
|| err
)) {
1040 dev_dbg(dd
->dev
, "update done: err: %d\n", err
);
1041 /* finish curent request */
1042 omap_sham_finish_req(req
, err
);
1043 /* start new request */
1044 omap_sham_handle_queue(dd
);
1048 static void omap_sham_queue_task(unsigned long data
)
1050 struct omap_sham_dev
*dd
= (struct omap_sham_dev
*)data
;
1052 omap_sham_handle_queue(dd
);
1055 static irqreturn_t
omap_sham_irq(int irq
, void *dev_id
)
1057 struct omap_sham_dev
*dd
= dev_id
;
1058 struct omap_sham_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
1061 dev_err(dd
->dev
, "unknown interrupt.\n");
1065 if (unlikely(ctx
->flags
& FLAGS_FINAL
))
1066 /* final -> allow device to go to power-saving mode */
1067 omap_sham_write_mask(dd
, SHA_REG_CTRL
, 0, SHA_REG_CTRL_LENGTH
);
1069 omap_sham_write_mask(dd
, SHA_REG_CTRL
, SHA_REG_CTRL_OUTPUT_READY
,
1070 SHA_REG_CTRL_OUTPUT_READY
);
1071 omap_sham_read(dd
, SHA_REG_CTRL
);
1073 ctx
->flags
|= FLAGS_OUTPUT_READY
;
1075 tasklet_schedule(&dd
->done_task
);
1080 static void omap_sham_dma_callback(int lch
, u16 ch_status
, void *data
)
1082 struct omap_sham_dev
*dd
= data
;
1084 if (ch_status
!= OMAP_DMA_BLOCK_IRQ
) {
1085 pr_err("omap-sham DMA error status: 0x%hx\n", ch_status
);
1087 dd
->flags
&= ~FLAGS_INIT
; /* request to re-initialize */
1090 tasklet_schedule(&dd
->done_task
);
1093 static int omap_sham_dma_init(struct omap_sham_dev
*dd
)
1099 err
= omap_request_dma(dd
->dma
, dev_name(dd
->dev
),
1100 omap_sham_dma_callback
, dd
, &dd
->dma_lch
);
1102 dev_err(dd
->dev
, "Unable to request DMA channel\n");
1109 static void omap_sham_dma_cleanup(struct omap_sham_dev
*dd
)
1111 if (dd
->dma_lch
>= 0) {
1112 omap_free_dma(dd
->dma_lch
);
1117 static int __devinit
omap_sham_probe(struct platform_device
*pdev
)
1119 struct omap_sham_dev
*dd
;
1120 struct device
*dev
= &pdev
->dev
;
1121 struct resource
*res
;
1124 dd
= kzalloc(sizeof(struct omap_sham_dev
), GFP_KERNEL
);
1126 dev_err(dev
, "unable to alloc data struct.\n");
1131 platform_set_drvdata(pdev
, dd
);
1133 INIT_LIST_HEAD(&dd
->list
);
1134 spin_lock_init(&dd
->lock
);
1135 tasklet_init(&dd
->done_task
, omap_sham_done_task
, (unsigned long)dd
);
1136 tasklet_init(&dd
->queue_task
, omap_sham_queue_task
, (unsigned long)dd
);
1137 crypto_init_queue(&dd
->queue
, OMAP_SHAM_QUEUE_LENGTH
);
1141 /* Get the base address */
1142 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1144 dev_err(dev
, "no MEM resource info\n");
1148 dd
->phys_base
= res
->start
;
1151 res
= platform_get_resource(pdev
, IORESOURCE_DMA
, 0);
1153 dev_err(dev
, "no DMA resource info\n");
1157 dd
->dma
= res
->start
;
1160 dd
->irq
= platform_get_irq(pdev
, 0);
1162 dev_err(dev
, "no IRQ resource info\n");
1167 err
= request_irq(dd
->irq
, omap_sham_irq
,
1168 IRQF_TRIGGER_LOW
, dev_name(dev
), dd
);
1170 dev_err(dev
, "unable to request irq.\n");
1174 err
= omap_sham_dma_init(dd
);
1178 /* Initializing the clock */
1179 dd
->iclk
= clk_get(dev
, "ick");
1181 dev_err(dev
, "clock intialization failed.\n");
1186 dd
->io_base
= ioremap(dd
->phys_base
, SZ_4K
);
1188 dev_err(dev
, "can't ioremap\n");
1193 clk_enable(dd
->iclk
);
1194 dev_info(dev
, "hw accel on OMAP rev %u.%u\n",
1195 (omap_sham_read(dd
, SHA_REG_REV
) & SHA_REG_REV_MAJOR
) >> 4,
1196 omap_sham_read(dd
, SHA_REG_REV
) & SHA_REG_REV_MINOR
);
1197 clk_disable(dd
->iclk
);
1199 spin_lock(&sham
.lock
);
1200 list_add_tail(&dd
->list
, &sham
.dev_list
);
1201 spin_unlock(&sham
.lock
);
1203 for (i
= 0; i
< ARRAY_SIZE(algs
); i
++) {
1204 err
= crypto_register_ahash(&algs
[i
]);
1212 for (j
= 0; j
< i
; j
++)
1213 crypto_unregister_ahash(&algs
[j
]);
1214 iounmap(dd
->io_base
);
1218 omap_sham_dma_cleanup(dd
);
1221 free_irq(dd
->irq
, dd
);
1226 dev_err(dev
, "initialization failed.\n");
1231 static int __devexit
omap_sham_remove(struct platform_device
*pdev
)
1233 static struct omap_sham_dev
*dd
;
1236 dd
= platform_get_drvdata(pdev
);
1239 spin_lock(&sham
.lock
);
1240 list_del(&dd
->list
);
1241 spin_unlock(&sham
.lock
);
1242 for (i
= 0; i
< ARRAY_SIZE(algs
); i
++)
1243 crypto_unregister_ahash(&algs
[i
]);
1244 tasklet_kill(&dd
->done_task
);
1245 tasklet_kill(&dd
->queue_task
);
1246 iounmap(dd
->io_base
);
1248 omap_sham_dma_cleanup(dd
);
1250 free_irq(dd
->irq
, dd
);
1257 static struct platform_driver omap_sham_driver
= {
1258 .probe
= omap_sham_probe
,
1259 .remove
= omap_sham_remove
,
1261 .name
= "omap-sham",
1262 .owner
= THIS_MODULE
,
1266 static int __init
omap_sham_mod_init(void)
1268 pr_info("loading %s driver\n", "omap-sham");
1270 if (!cpu_class_is_omap2() ||
1271 omap_type() != OMAP2_DEVICE_TYPE_SEC
) {
1272 pr_err("Unsupported cpu\n");
1276 return platform_driver_register(&omap_sham_driver
);
1279 static void __exit
omap_sham_mod_exit(void)
1281 platform_driver_unregister(&omap_sham_driver
);
1284 module_init(omap_sham_mod_init
);
1285 module_exit(omap_sham_mod_exit
);
1287 MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
1288 MODULE_LICENSE("GPL v2");
1289 MODULE_AUTHOR("Dmitry Kasatkin");