2 * Copyright (c) 2014 Imagination Technologies
3 * Authors: Will Thomas, James Hartley
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
9 * Interface structure taken from omap-sham driver
12 #include <linux/clk.h>
13 #include <linux/dmaengine.h>
14 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/of_device.h>
19 #include <linux/platform_device.h>
20 #include <linux/scatterlist.h>
22 #include <crypto/internal/hash.h>
23 #include <crypto/md5.h>
24 #include <crypto/sha.h>
27 #define CR_RESET_SET 1
28 #define CR_RESET_UNSET 0
30 #define CR_MESSAGE_LENGTH_H 0x4
31 #define CR_MESSAGE_LENGTH_L 0x8
33 #define CR_CONTROL 0xc
34 #define CR_CONTROL_BYTE_ORDER_3210 0
35 #define CR_CONTROL_BYTE_ORDER_0123 1
36 #define CR_CONTROL_BYTE_ORDER_2310 2
37 #define CR_CONTROL_BYTE_ORDER_1032 3
38 #define CR_CONTROL_BYTE_ORDER_SHIFT 8
39 #define CR_CONTROL_ALGO_MD5 0
40 #define CR_CONTROL_ALGO_SHA1 1
41 #define CR_CONTROL_ALGO_SHA224 2
42 #define CR_CONTROL_ALGO_SHA256 3
44 #define CR_INTSTAT 0x10
45 #define CR_INTENAB 0x14
46 #define CR_INTCLEAR 0x18
47 #define CR_INT_RESULTS_AVAILABLE BIT(0)
48 #define CR_INT_NEW_RESULTS_SET BIT(1)
49 #define CR_INT_RESULT_READ_ERR BIT(2)
50 #define CR_INT_MESSAGE_WRITE_ERROR BIT(3)
51 #define CR_INT_STATUS BIT(8)
53 #define CR_RESULT_QUEUE 0x1c
55 #define CR_CORE_REV 0x50
56 #define CR_CORE_DES1 0x60
57 #define CR_CORE_DES2 0x70
59 #define DRIVER_FLAGS_BUSY BIT(0)
60 #define DRIVER_FLAGS_FINAL BIT(1)
61 #define DRIVER_FLAGS_DMA_ACTIVE BIT(2)
62 #define DRIVER_FLAGS_OUTPUT_READY BIT(3)
63 #define DRIVER_FLAGS_INIT BIT(4)
64 #define DRIVER_FLAGS_CPU BIT(5)
65 #define DRIVER_FLAGS_DMA_READY BIT(6)
66 #define DRIVER_FLAGS_ERROR BIT(7)
67 #define DRIVER_FLAGS_SG BIT(8)
68 #define DRIVER_FLAGS_SHA1 BIT(18)
69 #define DRIVER_FLAGS_SHA224 BIT(19)
70 #define DRIVER_FLAGS_SHA256 BIT(20)
71 #define DRIVER_FLAGS_MD5 BIT(21)
73 #define IMG_HASH_QUEUE_LENGTH 20
74 #define IMG_HASH_DMA_BURST 4
75 #define IMG_HASH_DMA_THRESHOLD 64
77 #ifdef __LITTLE_ENDIAN
78 #define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_3210
80 #define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_0123
85 struct img_hash_request_ctx
{
86 struct img_hash_dev
*hdev
;
87 u8 digest
[SHA256_DIGEST_SIZE
] __aligned(sizeof(u32
));
95 struct scatterlist
*sgfirst
;
97 struct scatterlist
*sg
;
106 struct ahash_request fallback_req
;
108 /* Zero length buffer must remain last member of struct */
109 u8 buffer
[0] __aligned(sizeof(u32
));
112 struct img_hash_ctx
{
113 struct img_hash_dev
*hdev
;
115 struct crypto_ahash
*fallback
;
118 struct img_hash_dev
{
119 struct list_head list
;
121 struct clk
*hash_clk
;
123 void __iomem
*io_base
;
125 phys_addr_t bus_addr
;
126 void __iomem
*cpu_addr
;
130 struct tasklet_struct done_task
;
131 struct tasklet_struct dma_task
;
134 struct crypto_queue queue
;
135 struct ahash_request
*req
;
137 struct dma_chan
*dma_lch
;
140 struct img_hash_drv
{
141 struct list_head dev_list
;
145 static struct img_hash_drv img_hash
= {
146 .dev_list
= LIST_HEAD_INIT(img_hash
.dev_list
),
147 .lock
= __SPIN_LOCK_UNLOCKED(img_hash
.lock
),
150 static inline u32
img_hash_read(struct img_hash_dev
*hdev
, u32 offset
)
152 return readl_relaxed(hdev
->io_base
+ offset
);
155 static inline void img_hash_write(struct img_hash_dev
*hdev
,
156 u32 offset
, u32 value
)
158 writel_relaxed(value
, hdev
->io_base
+ offset
);
161 static inline u32
img_hash_read_result_queue(struct img_hash_dev
*hdev
)
163 return be32_to_cpu(img_hash_read(hdev
, CR_RESULT_QUEUE
));
166 static void img_hash_start(struct img_hash_dev
*hdev
, bool dma
)
168 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
169 u32 cr
= IMG_HASH_BYTE_ORDER
<< CR_CONTROL_BYTE_ORDER_SHIFT
;
171 if (ctx
->flags
& DRIVER_FLAGS_MD5
)
172 cr
|= CR_CONTROL_ALGO_MD5
;
173 else if (ctx
->flags
& DRIVER_FLAGS_SHA1
)
174 cr
|= CR_CONTROL_ALGO_SHA1
;
175 else if (ctx
->flags
& DRIVER_FLAGS_SHA224
)
176 cr
|= CR_CONTROL_ALGO_SHA224
;
177 else if (ctx
->flags
& DRIVER_FLAGS_SHA256
)
178 cr
|= CR_CONTROL_ALGO_SHA256
;
179 dev_dbg(hdev
->dev
, "Starting hash process\n");
180 img_hash_write(hdev
, CR_CONTROL
, cr
);
183 * The hardware block requires two cycles between writing the control
184 * register and writing the first word of data in non DMA mode, to
185 * ensure the first data write is not grouped in burst with the control
186 * register write a read is issued to 'flush' the bus.
189 img_hash_read(hdev
, CR_CONTROL
);
192 static int img_hash_xmit_cpu(struct img_hash_dev
*hdev
, const u8
*buf
,
193 size_t length
, int final
)
196 const u32
*buffer
= (const u32
*)buf
;
198 dev_dbg(hdev
->dev
, "xmit_cpu: length: %zu bytes\n", length
);
201 hdev
->flags
|= DRIVER_FLAGS_FINAL
;
203 len32
= DIV_ROUND_UP(length
, sizeof(u32
));
205 for (count
= 0; count
< len32
; count
++)
206 writel_relaxed(buffer
[count
], hdev
->cpu_addr
);
211 static void img_hash_dma_callback(void *data
)
213 struct img_hash_dev
*hdev
= (struct img_hash_dev
*)data
;
214 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
217 img_hash_xmit_cpu(hdev
, ctx
->buffer
, ctx
->bufcnt
, 0);
221 tasklet_schedule(&hdev
->dma_task
);
224 static int img_hash_xmit_dma(struct img_hash_dev
*hdev
, struct scatterlist
*sg
)
226 struct dma_async_tx_descriptor
*desc
;
227 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
229 ctx
->dma_ct
= dma_map_sg(hdev
->dev
, sg
, 1, DMA_TO_DEVICE
);
230 if (ctx
->dma_ct
== 0) {
231 dev_err(hdev
->dev
, "Invalid DMA sg\n");
236 desc
= dmaengine_prep_slave_sg(hdev
->dma_lch
,
240 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
242 dev_err(hdev
->dev
, "Null DMA descriptor\n");
244 dma_unmap_sg(hdev
->dev
, sg
, 1, DMA_TO_DEVICE
);
247 desc
->callback
= img_hash_dma_callback
;
248 desc
->callback_param
= hdev
;
249 dmaengine_submit(desc
);
250 dma_async_issue_pending(hdev
->dma_lch
);
255 static int img_hash_write_via_cpu(struct img_hash_dev
*hdev
)
257 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
259 ctx
->bufcnt
= sg_copy_to_buffer(hdev
->req
->src
, sg_nents(ctx
->sg
),
260 ctx
->buffer
, hdev
->req
->nbytes
);
262 ctx
->total
= hdev
->req
->nbytes
;
265 hdev
->flags
|= (DRIVER_FLAGS_CPU
| DRIVER_FLAGS_FINAL
);
267 img_hash_start(hdev
, false);
269 return img_hash_xmit_cpu(hdev
, ctx
->buffer
, ctx
->total
, 1);
272 static int img_hash_finish(struct ahash_request
*req
)
274 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(req
);
279 memcpy(req
->result
, ctx
->digest
, ctx
->digsize
);
284 static void img_hash_copy_hash(struct ahash_request
*req
)
286 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(req
);
287 u32
*hash
= (u32
*)ctx
->digest
;
290 for (i
= (ctx
->digsize
/ sizeof(u32
)) - 1; i
>= 0; i
--)
291 hash
[i
] = img_hash_read_result_queue(ctx
->hdev
);
294 static void img_hash_finish_req(struct ahash_request
*req
, int err
)
296 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(req
);
297 struct img_hash_dev
*hdev
= ctx
->hdev
;
300 img_hash_copy_hash(req
);
301 if (DRIVER_FLAGS_FINAL
& hdev
->flags
)
302 err
= img_hash_finish(req
);
304 dev_warn(hdev
->dev
, "Hash failed with error %d\n", err
);
305 ctx
->flags
|= DRIVER_FLAGS_ERROR
;
308 hdev
->flags
&= ~(DRIVER_FLAGS_DMA_READY
| DRIVER_FLAGS_OUTPUT_READY
|
309 DRIVER_FLAGS_CPU
| DRIVER_FLAGS_BUSY
| DRIVER_FLAGS_FINAL
);
311 if (req
->base
.complete
)
312 req
->base
.complete(&req
->base
, err
);
315 static int img_hash_write_via_dma(struct img_hash_dev
*hdev
)
317 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
319 img_hash_start(hdev
, true);
321 dev_dbg(hdev
->dev
, "xmit dma size: %d\n", ctx
->total
);
324 hdev
->flags
|= DRIVER_FLAGS_FINAL
;
326 hdev
->flags
|= DRIVER_FLAGS_DMA_ACTIVE
| DRIVER_FLAGS_FINAL
;
328 tasklet_schedule(&hdev
->dma_task
);
333 static int img_hash_dma_init(struct img_hash_dev
*hdev
)
335 struct dma_slave_config dma_conf
;
338 hdev
->dma_lch
= dma_request_slave_channel(hdev
->dev
, "tx");
339 if (!hdev
->dma_lch
) {
340 dev_err(hdev
->dev
, "Couldn't acquire a slave DMA channel.\n");
343 dma_conf
.direction
= DMA_MEM_TO_DEV
;
344 dma_conf
.dst_addr
= hdev
->bus_addr
;
345 dma_conf
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
346 dma_conf
.dst_maxburst
= IMG_HASH_DMA_BURST
;
347 dma_conf
.device_fc
= false;
349 err
= dmaengine_slave_config(hdev
->dma_lch
, &dma_conf
);
351 dev_err(hdev
->dev
, "Couldn't configure DMA slave.\n");
352 dma_release_channel(hdev
->dma_lch
);
359 static void img_hash_dma_task(unsigned long d
)
361 struct img_hash_dev
*hdev
= (struct img_hash_dev
*)d
;
362 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
364 size_t nbytes
, bleft
, wsend
, len
, tbc
;
365 struct scatterlist tsg
;
367 if (!hdev
->req
|| !ctx
->sg
)
370 addr
= sg_virt(ctx
->sg
);
371 nbytes
= ctx
->sg
->length
- ctx
->offset
;
374 * The hash accelerator does not support a data valid mask. This means
375 * that if each dma (i.e. per page) is not a multiple of 4 bytes, the
376 * padding bytes in the last word written by that dma would erroneously
377 * be included in the hash. To avoid this we round down the transfer,
378 * and add the excess to the start of the next dma. It does not matter
379 * that the final dma may not be a multiple of 4 bytes as the hashing
380 * block is programmed to accept the correct number of bytes.
384 wsend
= (nbytes
/ 4);
387 sg_init_one(&tsg
, addr
+ ctx
->offset
, wsend
* 4);
388 if (img_hash_xmit_dma(hdev
, &tsg
)) {
389 dev_err(hdev
->dev
, "DMA failed, falling back to CPU");
390 ctx
->flags
|= DRIVER_FLAGS_CPU
;
392 img_hash_xmit_cpu(hdev
, addr
+ ctx
->offset
,
394 ctx
->sent
+= wsend
* 4;
397 ctx
->sent
+= wsend
* 4;
402 ctx
->bufcnt
= sg_pcopy_to_buffer(ctx
->sgfirst
, ctx
->nents
,
403 ctx
->buffer
, bleft
, ctx
->sent
);
405 ctx
->sg
= sg_next(ctx
->sg
);
406 while (ctx
->sg
&& (ctx
->bufcnt
< 4)) {
407 len
= ctx
->sg
->length
;
408 if (likely(len
> (4 - ctx
->bufcnt
)))
409 len
= 4 - ctx
->bufcnt
;
410 tbc
= sg_pcopy_to_buffer(ctx
->sgfirst
, ctx
->nents
,
411 ctx
->buffer
+ ctx
->bufcnt
, len
,
412 ctx
->sent
+ ctx
->bufcnt
);
414 if (tbc
>= ctx
->sg
->length
) {
415 ctx
->sg
= sg_next(ctx
->sg
);
420 ctx
->sent
+= ctx
->bufcnt
;
424 img_hash_dma_callback(hdev
);
427 ctx
->sg
= sg_next(ctx
->sg
);
431 static int img_hash_write_via_dma_stop(struct img_hash_dev
*hdev
)
433 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(hdev
->req
);
435 if (ctx
->flags
& DRIVER_FLAGS_SG
)
436 dma_unmap_sg(hdev
->dev
, ctx
->sg
, ctx
->dma_ct
, DMA_TO_DEVICE
);
441 static int img_hash_process_data(struct img_hash_dev
*hdev
)
443 struct ahash_request
*req
= hdev
->req
;
444 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(req
);
449 if (req
->nbytes
>= IMG_HASH_DMA_THRESHOLD
) {
450 dev_dbg(hdev
->dev
, "process data request(%d bytes) using DMA\n",
452 err
= img_hash_write_via_dma(hdev
);
454 dev_dbg(hdev
->dev
, "process data request(%d bytes) using CPU\n",
456 err
= img_hash_write_via_cpu(hdev
);
461 static int img_hash_hw_init(struct img_hash_dev
*hdev
)
463 unsigned long long nbits
;
466 img_hash_write(hdev
, CR_RESET
, CR_RESET_SET
);
467 img_hash_write(hdev
, CR_RESET
, CR_RESET_UNSET
);
468 img_hash_write(hdev
, CR_INTENAB
, CR_INT_NEW_RESULTS_SET
);
470 nbits
= (u64
)hdev
->req
->nbytes
<< 3;
473 img_hash_write(hdev
, CR_MESSAGE_LENGTH_H
, u
);
474 img_hash_write(hdev
, CR_MESSAGE_LENGTH_L
, l
);
476 if (!(DRIVER_FLAGS_INIT
& hdev
->flags
)) {
477 hdev
->flags
|= DRIVER_FLAGS_INIT
;
480 dev_dbg(hdev
->dev
, "hw initialized, nbits: %llx\n", nbits
);
484 static int img_hash_init(struct ahash_request
*req
)
486 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
487 struct img_hash_request_ctx
*rctx
= ahash_request_ctx(req
);
488 struct img_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
490 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback
);
491 rctx
->fallback_req
.base
.flags
= req
->base
.flags
492 & CRYPTO_TFM_REQ_MAY_SLEEP
;
494 return crypto_ahash_init(&rctx
->fallback_req
);
497 static int img_hash_handle_queue(struct img_hash_dev
*hdev
,
498 struct ahash_request
*req
)
500 struct crypto_async_request
*async_req
, *backlog
;
501 struct img_hash_request_ctx
*ctx
;
503 int err
= 0, res
= 0;
505 spin_lock_irqsave(&hdev
->lock
, flags
);
508 res
= ahash_enqueue_request(&hdev
->queue
, req
);
510 if (DRIVER_FLAGS_BUSY
& hdev
->flags
) {
511 spin_unlock_irqrestore(&hdev
->lock
, flags
);
515 backlog
= crypto_get_backlog(&hdev
->queue
);
516 async_req
= crypto_dequeue_request(&hdev
->queue
);
518 hdev
->flags
|= DRIVER_FLAGS_BUSY
;
520 spin_unlock_irqrestore(&hdev
->lock
, flags
);
526 backlog
->complete(backlog
, -EINPROGRESS
);
528 req
= ahash_request_cast(async_req
);
531 ctx
= ahash_request_ctx(req
);
533 dev_info(hdev
->dev
, "processing req, op: %lu, bytes: %d\n",
534 ctx
->op
, req
->nbytes
);
536 err
= img_hash_hw_init(hdev
);
539 err
= img_hash_process_data(hdev
);
541 if (err
!= -EINPROGRESS
) {
542 /* done_task will not finish so do it here */
543 img_hash_finish_req(req
, err
);
548 static int img_hash_update(struct ahash_request
*req
)
550 struct img_hash_request_ctx
*rctx
= ahash_request_ctx(req
);
551 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
552 struct img_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
554 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback
);
555 rctx
->fallback_req
.base
.flags
= req
->base
.flags
556 & CRYPTO_TFM_REQ_MAY_SLEEP
;
557 rctx
->fallback_req
.nbytes
= req
->nbytes
;
558 rctx
->fallback_req
.src
= req
->src
;
560 return crypto_ahash_update(&rctx
->fallback_req
);
563 static int img_hash_final(struct ahash_request
*req
)
565 struct img_hash_request_ctx
*rctx
= ahash_request_ctx(req
);
566 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
567 struct img_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
569 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback
);
570 rctx
->fallback_req
.base
.flags
= req
->base
.flags
571 & CRYPTO_TFM_REQ_MAY_SLEEP
;
572 rctx
->fallback_req
.result
= req
->result
;
574 return crypto_ahash_final(&rctx
->fallback_req
);
577 static int img_hash_finup(struct ahash_request
*req
)
579 struct img_hash_request_ctx
*rctx
= ahash_request_ctx(req
);
580 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
581 struct img_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
583 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback
);
584 rctx
->fallback_req
.base
.flags
= req
->base
.flags
585 & CRYPTO_TFM_REQ_MAY_SLEEP
;
586 rctx
->fallback_req
.nbytes
= req
->nbytes
;
587 rctx
->fallback_req
.src
= req
->src
;
588 rctx
->fallback_req
.result
= req
->result
;
590 return crypto_ahash_finup(&rctx
->fallback_req
);
593 static int img_hash_import(struct ahash_request
*req
, const void *in
)
595 struct img_hash_request_ctx
*rctx
= ahash_request_ctx(req
);
596 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
597 struct img_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
599 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback
);
600 rctx
->fallback_req
.base
.flags
= req
->base
.flags
601 & CRYPTO_TFM_REQ_MAY_SLEEP
;
603 return crypto_ahash_import(&rctx
->fallback_req
, in
);
606 static int img_hash_export(struct ahash_request
*req
, void *out
)
608 struct img_hash_request_ctx
*rctx
= ahash_request_ctx(req
);
609 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
610 struct img_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
612 ahash_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback
);
613 rctx
->fallback_req
.base
.flags
= req
->base
.flags
614 & CRYPTO_TFM_REQ_MAY_SLEEP
;
616 return crypto_ahash_export(&rctx
->fallback_req
, out
);
619 static int img_hash_digest(struct ahash_request
*req
)
621 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
622 struct img_hash_ctx
*tctx
= crypto_ahash_ctx(tfm
);
623 struct img_hash_request_ctx
*ctx
= ahash_request_ctx(req
);
624 struct img_hash_dev
*hdev
= NULL
;
625 struct img_hash_dev
*tmp
;
628 spin_lock(&img_hash
.lock
);
630 list_for_each_entry(tmp
, &img_hash
.dev_list
, list
) {
640 spin_unlock(&img_hash
.lock
);
643 ctx
->digsize
= crypto_ahash_digestsize(tfm
);
645 switch (ctx
->digsize
) {
646 case SHA1_DIGEST_SIZE
:
647 ctx
->flags
|= DRIVER_FLAGS_SHA1
;
649 case SHA256_DIGEST_SIZE
:
650 ctx
->flags
|= DRIVER_FLAGS_SHA256
;
652 case SHA224_DIGEST_SIZE
:
653 ctx
->flags
|= DRIVER_FLAGS_SHA224
;
655 case MD5_DIGEST_SIZE
:
656 ctx
->flags
|= DRIVER_FLAGS_MD5
;
665 ctx
->total
= req
->nbytes
;
667 ctx
->sgfirst
= req
->src
;
668 ctx
->nents
= sg_nents(ctx
->sg
);
670 err
= img_hash_handle_queue(tctx
->hdev
, req
);
675 static int img_hash_cra_init(struct crypto_tfm
*tfm
, const char *alg_name
)
677 struct img_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
680 ctx
->fallback
= crypto_alloc_ahash(alg_name
, 0,
681 CRYPTO_ALG_NEED_FALLBACK
);
682 if (IS_ERR(ctx
->fallback
)) {
683 pr_err("img_hash: Could not load fallback driver.\n");
684 err
= PTR_ERR(ctx
->fallback
);
687 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
688 sizeof(struct img_hash_request_ctx
) +
689 crypto_ahash_reqsize(ctx
->fallback
) +
690 IMG_HASH_DMA_THRESHOLD
);
698 static int img_hash_cra_md5_init(struct crypto_tfm
*tfm
)
700 return img_hash_cra_init(tfm
, "md5-generic");
703 static int img_hash_cra_sha1_init(struct crypto_tfm
*tfm
)
705 return img_hash_cra_init(tfm
, "sha1-generic");
708 static int img_hash_cra_sha224_init(struct crypto_tfm
*tfm
)
710 return img_hash_cra_init(tfm
, "sha224-generic");
713 static int img_hash_cra_sha256_init(struct crypto_tfm
*tfm
)
715 return img_hash_cra_init(tfm
, "sha256-generic");
718 static void img_hash_cra_exit(struct crypto_tfm
*tfm
)
720 struct img_hash_ctx
*tctx
= crypto_tfm_ctx(tfm
);
722 crypto_free_ahash(tctx
->fallback
);
725 static irqreturn_t
img_irq_handler(int irq
, void *dev_id
)
727 struct img_hash_dev
*hdev
= dev_id
;
730 reg
= img_hash_read(hdev
, CR_INTSTAT
);
731 img_hash_write(hdev
, CR_INTCLEAR
, reg
);
733 if (reg
& CR_INT_NEW_RESULTS_SET
) {
734 dev_dbg(hdev
->dev
, "IRQ CR_INT_NEW_RESULTS_SET\n");
735 if (DRIVER_FLAGS_BUSY
& hdev
->flags
) {
736 hdev
->flags
|= DRIVER_FLAGS_OUTPUT_READY
;
737 if (!(DRIVER_FLAGS_CPU
& hdev
->flags
))
738 hdev
->flags
|= DRIVER_FLAGS_DMA_READY
;
739 tasklet_schedule(&hdev
->done_task
);
742 "HASH interrupt when no active requests.\n");
744 } else if (reg
& CR_INT_RESULTS_AVAILABLE
) {
746 "IRQ triggered before the hash had completed\n");
747 } else if (reg
& CR_INT_RESULT_READ_ERR
) {
749 "Attempt to read from an empty result queue\n");
750 } else if (reg
& CR_INT_MESSAGE_WRITE_ERROR
) {
752 "Data written before the hardware was configured\n");
757 static struct ahash_alg img_algs
[] = {
759 .init
= img_hash_init
,
760 .update
= img_hash_update
,
761 .final
= img_hash_final
,
762 .finup
= img_hash_finup
,
763 .export
= img_hash_export
,
764 .import
= img_hash_import
,
765 .digest
= img_hash_digest
,
767 .digestsize
= MD5_DIGEST_SIZE
,
768 .statesize
= sizeof(struct md5_state
),
771 .cra_driver_name
= "img-md5",
775 CRYPTO_ALG_NEED_FALLBACK
,
776 .cra_blocksize
= MD5_HMAC_BLOCK_SIZE
,
777 .cra_ctxsize
= sizeof(struct img_hash_ctx
),
778 .cra_init
= img_hash_cra_md5_init
,
779 .cra_exit
= img_hash_cra_exit
,
780 .cra_module
= THIS_MODULE
,
785 .init
= img_hash_init
,
786 .update
= img_hash_update
,
787 .final
= img_hash_final
,
788 .finup
= img_hash_finup
,
789 .export
= img_hash_export
,
790 .import
= img_hash_import
,
791 .digest
= img_hash_digest
,
793 .digestsize
= SHA1_DIGEST_SIZE
,
794 .statesize
= sizeof(struct sha1_state
),
797 .cra_driver_name
= "img-sha1",
801 CRYPTO_ALG_NEED_FALLBACK
,
802 .cra_blocksize
= SHA1_BLOCK_SIZE
,
803 .cra_ctxsize
= sizeof(struct img_hash_ctx
),
804 .cra_init
= img_hash_cra_sha1_init
,
805 .cra_exit
= img_hash_cra_exit
,
806 .cra_module
= THIS_MODULE
,
811 .init
= img_hash_init
,
812 .update
= img_hash_update
,
813 .final
= img_hash_final
,
814 .finup
= img_hash_finup
,
815 .export
= img_hash_export
,
816 .import
= img_hash_import
,
817 .digest
= img_hash_digest
,
819 .digestsize
= SHA224_DIGEST_SIZE
,
820 .statesize
= sizeof(struct sha256_state
),
822 .cra_name
= "sha224",
823 .cra_driver_name
= "img-sha224",
827 CRYPTO_ALG_NEED_FALLBACK
,
828 .cra_blocksize
= SHA224_BLOCK_SIZE
,
829 .cra_ctxsize
= sizeof(struct img_hash_ctx
),
830 .cra_init
= img_hash_cra_sha224_init
,
831 .cra_exit
= img_hash_cra_exit
,
832 .cra_module
= THIS_MODULE
,
837 .init
= img_hash_init
,
838 .update
= img_hash_update
,
839 .final
= img_hash_final
,
840 .finup
= img_hash_finup
,
841 .export
= img_hash_export
,
842 .import
= img_hash_import
,
843 .digest
= img_hash_digest
,
845 .digestsize
= SHA256_DIGEST_SIZE
,
846 .statesize
= sizeof(struct sha256_state
),
848 .cra_name
= "sha256",
849 .cra_driver_name
= "img-sha256",
853 CRYPTO_ALG_NEED_FALLBACK
,
854 .cra_blocksize
= SHA256_BLOCK_SIZE
,
855 .cra_ctxsize
= sizeof(struct img_hash_ctx
),
856 .cra_init
= img_hash_cra_sha256_init
,
857 .cra_exit
= img_hash_cra_exit
,
858 .cra_module
= THIS_MODULE
,
864 static int img_register_algs(struct img_hash_dev
*hdev
)
868 for (i
= 0; i
< ARRAY_SIZE(img_algs
); i
++) {
869 err
= crypto_register_ahash(&img_algs
[i
]);
877 crypto_unregister_ahash(&img_algs
[i
]);
882 static int img_unregister_algs(struct img_hash_dev
*hdev
)
886 for (i
= 0; i
< ARRAY_SIZE(img_algs
); i
++)
887 crypto_unregister_ahash(&img_algs
[i
]);
891 static void img_hash_done_task(unsigned long data
)
893 struct img_hash_dev
*hdev
= (struct img_hash_dev
*)data
;
896 if (hdev
->err
== -EINVAL
) {
901 if (!(DRIVER_FLAGS_BUSY
& hdev
->flags
)) {
902 img_hash_handle_queue(hdev
, NULL
);
906 if (DRIVER_FLAGS_CPU
& hdev
->flags
) {
907 if (DRIVER_FLAGS_OUTPUT_READY
& hdev
->flags
) {
908 hdev
->flags
&= ~DRIVER_FLAGS_OUTPUT_READY
;
911 } else if (DRIVER_FLAGS_DMA_READY
& hdev
->flags
) {
912 if (DRIVER_FLAGS_DMA_ACTIVE
& hdev
->flags
) {
913 hdev
->flags
&= ~DRIVER_FLAGS_DMA_ACTIVE
;
914 img_hash_write_via_dma_stop(hdev
);
920 if (DRIVER_FLAGS_OUTPUT_READY
& hdev
->flags
) {
921 hdev
->flags
&= ~(DRIVER_FLAGS_DMA_READY
|
922 DRIVER_FLAGS_OUTPUT_READY
);
929 img_hash_finish_req(hdev
->req
, err
);
932 static const struct of_device_id img_hash_match
[] = {
933 { .compatible
= "img,hash-accelerator" },
936 MODULE_DEVICE_TABLE(of
, img_hash_match
);
938 static int img_hash_probe(struct platform_device
*pdev
)
940 struct img_hash_dev
*hdev
;
941 struct device
*dev
= &pdev
->dev
;
942 struct resource
*hash_res
;
946 hdev
= devm_kzalloc(dev
, sizeof(*hdev
), GFP_KERNEL
);
950 spin_lock_init(&hdev
->lock
);
954 platform_set_drvdata(pdev
, hdev
);
956 INIT_LIST_HEAD(&hdev
->list
);
958 tasklet_init(&hdev
->done_task
, img_hash_done_task
, (unsigned long)hdev
);
959 tasklet_init(&hdev
->dma_task
, img_hash_dma_task
, (unsigned long)hdev
);
961 crypto_init_queue(&hdev
->queue
, IMG_HASH_QUEUE_LENGTH
);
964 hash_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
966 hdev
->io_base
= devm_ioremap_resource(dev
, hash_res
);
967 if (IS_ERR(hdev
->io_base
)) {
968 err
= PTR_ERR(hdev
->io_base
);
969 dev_err(dev
, "can't ioremap, returned %d\n", err
);
974 /* Write port (DMA or CPU) */
975 hash_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
976 hdev
->cpu_addr
= devm_ioremap_resource(dev
, hash_res
);
977 if (IS_ERR(hdev
->cpu_addr
)) {
978 dev_err(dev
, "can't ioremap write port\n");
979 err
= PTR_ERR(hdev
->cpu_addr
);
982 hdev
->bus_addr
= hash_res
->start
;
984 irq
= platform_get_irq(pdev
, 0);
986 dev_err(dev
, "no IRQ resource info\n");
991 err
= devm_request_irq(dev
, irq
, img_irq_handler
, 0,
992 dev_name(dev
), hdev
);
994 dev_err(dev
, "unable to request irq\n");
997 dev_dbg(dev
, "using IRQ channel %d\n", irq
);
999 hdev
->hash_clk
= devm_clk_get(&pdev
->dev
, "hash");
1000 if (IS_ERR(hdev
->hash_clk
)) {
1001 dev_err(dev
, "clock initialization failed.\n");
1002 err
= PTR_ERR(hdev
->hash_clk
);
1006 hdev
->sys_clk
= devm_clk_get(&pdev
->dev
, "sys");
1007 if (IS_ERR(hdev
->sys_clk
)) {
1008 dev_err(dev
, "clock initialization failed.\n");
1009 err
= PTR_ERR(hdev
->sys_clk
);
1013 err
= clk_prepare_enable(hdev
->hash_clk
);
1017 err
= clk_prepare_enable(hdev
->sys_clk
);
1021 err
= img_hash_dma_init(hdev
);
1025 dev_dbg(dev
, "using %s for DMA transfers\n",
1026 dma_chan_name(hdev
->dma_lch
));
1028 spin_lock(&img_hash
.lock
);
1029 list_add_tail(&hdev
->list
, &img_hash
.dev_list
);
1030 spin_unlock(&img_hash
.lock
);
1032 err
= img_register_algs(hdev
);
1035 dev_info(dev
, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
1040 spin_lock(&img_hash
.lock
);
1041 list_del(&hdev
->list
);
1042 spin_unlock(&img_hash
.lock
);
1043 dma_release_channel(hdev
->dma_lch
);
1045 clk_disable_unprepare(hdev
->sys_clk
);
1047 clk_disable_unprepare(hdev
->hash_clk
);
1049 tasklet_kill(&hdev
->done_task
);
1050 tasklet_kill(&hdev
->dma_task
);
1055 static int img_hash_remove(struct platform_device
*pdev
)
1057 struct img_hash_dev
*hdev
;
1059 hdev
= platform_get_drvdata(pdev
);
1060 spin_lock(&img_hash
.lock
);
1061 list_del(&hdev
->list
);
1062 spin_unlock(&img_hash
.lock
);
1064 img_unregister_algs(hdev
);
1066 tasklet_kill(&hdev
->done_task
);
1067 tasklet_kill(&hdev
->dma_task
);
1069 dma_release_channel(hdev
->dma_lch
);
1071 clk_disable_unprepare(hdev
->hash_clk
);
1072 clk_disable_unprepare(hdev
->sys_clk
);
1077 #ifdef CONFIG_PM_SLEEP
1078 static int img_hash_suspend(struct device
*dev
)
1080 struct img_hash_dev
*hdev
= dev_get_drvdata(dev
);
1082 clk_disable_unprepare(hdev
->hash_clk
);
1083 clk_disable_unprepare(hdev
->sys_clk
);
1088 static int img_hash_resume(struct device
*dev
)
1090 struct img_hash_dev
*hdev
= dev_get_drvdata(dev
);
1093 ret
= clk_prepare_enable(hdev
->hash_clk
);
1097 ret
= clk_prepare_enable(hdev
->sys_clk
);
1099 clk_disable_unprepare(hdev
->hash_clk
);
1105 #endif /* CONFIG_PM_SLEEP */
1107 static const struct dev_pm_ops img_hash_pm_ops
= {
1108 SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend
, img_hash_resume
)
1111 static struct platform_driver img_hash_driver
= {
1112 .probe
= img_hash_probe
,
1113 .remove
= img_hash_remove
,
1115 .name
= "img-hash-accelerator",
1116 .pm
= &img_hash_pm_ops
,
1117 .of_match_table
= of_match_ptr(img_hash_match
),
1120 module_platform_driver(img_hash_driver
);
1122 MODULE_LICENSE("GPL v2");
1123 MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver");
1124 MODULE_AUTHOR("Will Thomas.");
1125 MODULE_AUTHOR("James Hartley <james.hartley@imgtec.com>");