4 * Support for ATMEL SHA1/SHA256 HW acceleration.
6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7 * Author: Nicolas Royer <nicolas@eukrea.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
13 * Some ideas are from omap-sham.c drivers.
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/clk.h>
23 #include <linux/hw_random.h>
24 #include <linux/platform_device.h>
26 #include <linux/device.h>
27 #include <linux/init.h>
28 #include <linux/errno.h>
29 #include <linux/interrupt.h>
30 #include <linux/irq.h>
31 #include <linux/scatterlist.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/delay.h>
34 #include <linux/crypto.h>
35 #include <linux/cryptohash.h>
36 #include <crypto/scatterwalk.h>
37 #include <crypto/algapi.h>
38 #include <crypto/sha.h>
39 #include <crypto/hash.h>
40 #include <crypto/internal/hash.h>
41 #include <linux/platform_data/crypto-atmel.h>
42 #include "atmel-sha-regs.h"
45 #define SHA_FLAGS_BUSY BIT(0)
46 #define SHA_FLAGS_FINAL BIT(1)
47 #define SHA_FLAGS_DMA_ACTIVE BIT(2)
48 #define SHA_FLAGS_OUTPUT_READY BIT(3)
49 #define SHA_FLAGS_INIT BIT(4)
50 #define SHA_FLAGS_CPU BIT(5)
51 #define SHA_FLAGS_DMA_READY BIT(6)
53 #define SHA_FLAGS_FINUP BIT(16)
54 #define SHA_FLAGS_SG BIT(17)
55 #define SHA_FLAGS_SHA1 BIT(18)
56 #define SHA_FLAGS_SHA224 BIT(19)
57 #define SHA_FLAGS_SHA256 BIT(20)
58 #define SHA_FLAGS_SHA384 BIT(21)
59 #define SHA_FLAGS_SHA512 BIT(22)
60 #define SHA_FLAGS_ERROR BIT(23)
61 #define SHA_FLAGS_PAD BIT(24)
63 #define SHA_OP_UPDATE 1
64 #define SHA_OP_FINAL 2
66 #define SHA_BUFFER_LEN PAGE_SIZE
68 #define ATMEL_SHA_DMA_THRESHOLD 56
70 struct atmel_sha_caps
{
79 struct atmel_sha_reqctx
{
80 struct atmel_sha_dev
*dd
;
84 u8 digest
[SHA512_DIGEST_SIZE
] __aligned(sizeof(u32
));
91 struct scatterlist
*sg
;
92 unsigned int offset
; /* offset in current sg */
93 unsigned int total
; /* total request */
97 u8 buffer
[0] __aligned(sizeof(u32
));
100 struct atmel_sha_ctx
{
101 struct atmel_sha_dev
*dd
;
106 struct crypto_shash
*fallback
;
110 #define ATMEL_SHA_QUEUE_LENGTH 50
112 struct atmel_sha_dma
{
113 struct dma_chan
*chan
;
114 struct dma_slave_config dma_conf
;
117 struct atmel_sha_dev
{
118 struct list_head list
;
119 unsigned long phys_base
;
123 void __iomem
*io_base
;
127 struct tasklet_struct done_task
;
130 struct crypto_queue queue
;
131 struct ahash_request
*req
;
133 struct atmel_sha_dma dma_lch_in
;
135 struct atmel_sha_caps caps
;
140 struct atmel_sha_drv
{
141 struct list_head dev_list
;
145 static struct atmel_sha_drv atmel_sha
= {
146 .dev_list
= LIST_HEAD_INIT(atmel_sha
.dev_list
),
147 .lock
= __SPIN_LOCK_UNLOCKED(atmel_sha
.lock
),
150 static inline u32
atmel_sha_read(struct atmel_sha_dev
*dd
, u32 offset
)
152 return readl_relaxed(dd
->io_base
+ offset
);
155 static inline void atmel_sha_write(struct atmel_sha_dev
*dd
,
156 u32 offset
, u32 value
)
158 writel_relaxed(value
, dd
->io_base
+ offset
);
161 static size_t atmel_sha_append_sg(struct atmel_sha_reqctx
*ctx
)
165 while ((ctx
->bufcnt
< ctx
->buflen
) && ctx
->total
) {
166 count
= min(ctx
->sg
->length
- ctx
->offset
, ctx
->total
);
167 count
= min(count
, ctx
->buflen
- ctx
->bufcnt
);
172 scatterwalk_map_and_copy(ctx
->buffer
+ ctx
->bufcnt
, ctx
->sg
,
173 ctx
->offset
, count
, 0);
175 ctx
->bufcnt
+= count
;
176 ctx
->offset
+= count
;
179 if (ctx
->offset
== ctx
->sg
->length
) {
180 ctx
->sg
= sg_next(ctx
->sg
);
192 * The purpose of this padding is to ensure that the padded message is a
193 * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
194 * The bit "1" is appended at the end of the message followed by
195 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
196 * 128 bits block (SHA384/SHA512) equals to the message length in bits
199 * For SHA1/SHA224/SHA256, padlen is calculated as followed:
200 * - if message length < 56 bytes then padlen = 56 - message length
201 * - else padlen = 64 + 56 - message length
203 * For SHA384/SHA512, padlen is calculated as followed:
204 * - if message length < 112 bytes then padlen = 112 - message length
205 * - else padlen = 128 + 112 - message length
207 static void atmel_sha_fill_padding(struct atmel_sha_reqctx
*ctx
, int length
)
209 unsigned int index
, padlen
;
213 size
[0] = ctx
->digcnt
[0];
214 size
[1] = ctx
->digcnt
[1];
216 size
[0] += ctx
->bufcnt
;
217 if (size
[0] < ctx
->bufcnt
)
221 if (size
[0] < length
)
224 bits
[1] = cpu_to_be64(size
[0] << 3);
225 bits
[0] = cpu_to_be64(size
[1] << 3 | size
[0] >> 61);
227 if (ctx
->flags
& (SHA_FLAGS_SHA384
| SHA_FLAGS_SHA512
)) {
228 index
= ctx
->bufcnt
& 0x7f;
229 padlen
= (index
< 112) ? (112 - index
) : ((128+112) - index
);
230 *(ctx
->buffer
+ ctx
->bufcnt
) = 0x80;
231 memset(ctx
->buffer
+ ctx
->bufcnt
+ 1, 0, padlen
-1);
232 memcpy(ctx
->buffer
+ ctx
->bufcnt
+ padlen
, bits
, 16);
233 ctx
->bufcnt
+= padlen
+ 16;
234 ctx
->flags
|= SHA_FLAGS_PAD
;
236 index
= ctx
->bufcnt
& 0x3f;
237 padlen
= (index
< 56) ? (56 - index
) : ((64+56) - index
);
238 *(ctx
->buffer
+ ctx
->bufcnt
) = 0x80;
239 memset(ctx
->buffer
+ ctx
->bufcnt
+ 1, 0, padlen
-1);
240 memcpy(ctx
->buffer
+ ctx
->bufcnt
+ padlen
, &bits
[1], 8);
241 ctx
->bufcnt
+= padlen
+ 8;
242 ctx
->flags
|= SHA_FLAGS_PAD
;
246 static int atmel_sha_init(struct ahash_request
*req
)
248 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
249 struct atmel_sha_ctx
*tctx
= crypto_ahash_ctx(tfm
);
250 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
251 struct atmel_sha_dev
*dd
= NULL
;
252 struct atmel_sha_dev
*tmp
;
254 spin_lock_bh(&atmel_sha
.lock
);
256 list_for_each_entry(tmp
, &atmel_sha
.dev_list
, list
) {
265 spin_unlock_bh(&atmel_sha
.lock
);
271 dev_dbg(dd
->dev
, "init: digest size: %d\n",
272 crypto_ahash_digestsize(tfm
));
274 switch (crypto_ahash_digestsize(tfm
)) {
275 case SHA1_DIGEST_SIZE
:
276 ctx
->flags
|= SHA_FLAGS_SHA1
;
277 ctx
->block_size
= SHA1_BLOCK_SIZE
;
279 case SHA224_DIGEST_SIZE
:
280 ctx
->flags
|= SHA_FLAGS_SHA224
;
281 ctx
->block_size
= SHA224_BLOCK_SIZE
;
283 case SHA256_DIGEST_SIZE
:
284 ctx
->flags
|= SHA_FLAGS_SHA256
;
285 ctx
->block_size
= SHA256_BLOCK_SIZE
;
287 case SHA384_DIGEST_SIZE
:
288 ctx
->flags
|= SHA_FLAGS_SHA384
;
289 ctx
->block_size
= SHA384_BLOCK_SIZE
;
291 case SHA512_DIGEST_SIZE
:
292 ctx
->flags
|= SHA_FLAGS_SHA512
;
293 ctx
->block_size
= SHA512_BLOCK_SIZE
;
303 ctx
->buflen
= SHA_BUFFER_LEN
;
308 static void atmel_sha_write_ctrl(struct atmel_sha_dev
*dd
, int dma
)
310 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
311 u32 valcr
= 0, valmr
= SHA_MR_MODE_AUTO
;
314 if (!dd
->caps
.has_dma
)
315 atmel_sha_write(dd
, SHA_IER
, SHA_INT_TXBUFE
);
316 valmr
= SHA_MR_MODE_PDC
;
317 if (dd
->caps
.has_dualbuff
)
318 valmr
|= SHA_MR_DUALBUFF
;
320 atmel_sha_write(dd
, SHA_IER
, SHA_INT_DATARDY
);
323 if (ctx
->flags
& SHA_FLAGS_SHA1
)
324 valmr
|= SHA_MR_ALGO_SHA1
;
325 else if (ctx
->flags
& SHA_FLAGS_SHA224
)
326 valmr
|= SHA_MR_ALGO_SHA224
;
327 else if (ctx
->flags
& SHA_FLAGS_SHA256
)
328 valmr
|= SHA_MR_ALGO_SHA256
;
329 else if (ctx
->flags
& SHA_FLAGS_SHA384
)
330 valmr
|= SHA_MR_ALGO_SHA384
;
331 else if (ctx
->flags
& SHA_FLAGS_SHA512
)
332 valmr
|= SHA_MR_ALGO_SHA512
;
334 /* Setting CR_FIRST only for the first iteration */
335 if (!(ctx
->digcnt
[0] || ctx
->digcnt
[1]))
336 valcr
= SHA_CR_FIRST
;
338 atmel_sha_write(dd
, SHA_CR
, valcr
);
339 atmel_sha_write(dd
, SHA_MR
, valmr
);
342 static int atmel_sha_xmit_cpu(struct atmel_sha_dev
*dd
, const u8
*buf
,
343 size_t length
, int final
)
345 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
347 const u32
*buffer
= (const u32
*)buf
;
349 dev_dbg(dd
->dev
, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
350 ctx
->digcnt
[1], ctx
->digcnt
[0], length
, final
);
352 atmel_sha_write_ctrl(dd
, 0);
354 /* should be non-zero before next lines to disable clocks later */
355 ctx
->digcnt
[0] += length
;
356 if (ctx
->digcnt
[0] < length
)
360 dd
->flags
|= SHA_FLAGS_FINAL
; /* catch last interrupt */
362 len32
= DIV_ROUND_UP(length
, sizeof(u32
));
364 dd
->flags
|= SHA_FLAGS_CPU
;
366 for (count
= 0; count
< len32
; count
++)
367 atmel_sha_write(dd
, SHA_REG_DIN(count
), buffer
[count
]);
372 static int atmel_sha_xmit_pdc(struct atmel_sha_dev
*dd
, dma_addr_t dma_addr1
,
373 size_t length1
, dma_addr_t dma_addr2
, size_t length2
, int final
)
375 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
378 dev_dbg(dd
->dev
, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
379 ctx
->digcnt
[1], ctx
->digcnt
[0], length1
, final
);
381 len32
= DIV_ROUND_UP(length1
, sizeof(u32
));
382 atmel_sha_write(dd
, SHA_PTCR
, SHA_PTCR_TXTDIS
);
383 atmel_sha_write(dd
, SHA_TPR
, dma_addr1
);
384 atmel_sha_write(dd
, SHA_TCR
, len32
);
386 len32
= DIV_ROUND_UP(length2
, sizeof(u32
));
387 atmel_sha_write(dd
, SHA_TNPR
, dma_addr2
);
388 atmel_sha_write(dd
, SHA_TNCR
, len32
);
390 atmel_sha_write_ctrl(dd
, 1);
392 /* should be non-zero before next lines to disable clocks later */
393 ctx
->digcnt
[0] += length1
;
394 if (ctx
->digcnt
[0] < length1
)
398 dd
->flags
|= SHA_FLAGS_FINAL
; /* catch last interrupt */
400 dd
->flags
|= SHA_FLAGS_DMA_ACTIVE
;
402 /* Start DMA transfer */
403 atmel_sha_write(dd
, SHA_PTCR
, SHA_PTCR_TXTEN
);
408 static void atmel_sha_dma_callback(void *data
)
410 struct atmel_sha_dev
*dd
= data
;
412 /* dma_lch_in - completed - wait DATRDY */
413 atmel_sha_write(dd
, SHA_IER
, SHA_INT_DATARDY
);
416 static int atmel_sha_xmit_dma(struct atmel_sha_dev
*dd
, dma_addr_t dma_addr1
,
417 size_t length1
, dma_addr_t dma_addr2
, size_t length2
, int final
)
419 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
420 struct dma_async_tx_descriptor
*in_desc
;
421 struct scatterlist sg
[2];
423 dev_dbg(dd
->dev
, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
424 ctx
->digcnt
[1], ctx
->digcnt
[0], length1
, final
);
426 if (ctx
->flags
& (SHA_FLAGS_SHA1
| SHA_FLAGS_SHA224
|
428 dd
->dma_lch_in
.dma_conf
.src_maxburst
= 16;
429 dd
->dma_lch_in
.dma_conf
.dst_maxburst
= 16;
431 dd
->dma_lch_in
.dma_conf
.src_maxburst
= 32;
432 dd
->dma_lch_in
.dma_conf
.dst_maxburst
= 32;
435 dmaengine_slave_config(dd
->dma_lch_in
.chan
, &dd
->dma_lch_in
.dma_conf
);
438 sg_init_table(sg
, 2);
439 sg_dma_address(&sg
[0]) = dma_addr1
;
440 sg_dma_len(&sg
[0]) = length1
;
441 sg_dma_address(&sg
[1]) = dma_addr2
;
442 sg_dma_len(&sg
[1]) = length2
;
443 in_desc
= dmaengine_prep_slave_sg(dd
->dma_lch_in
.chan
, sg
, 2,
444 DMA_MEM_TO_DEV
, DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
446 sg_init_table(sg
, 1);
447 sg_dma_address(&sg
[0]) = dma_addr1
;
448 sg_dma_len(&sg
[0]) = length1
;
449 in_desc
= dmaengine_prep_slave_sg(dd
->dma_lch_in
.chan
, sg
, 1,
450 DMA_MEM_TO_DEV
, DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
455 in_desc
->callback
= atmel_sha_dma_callback
;
456 in_desc
->callback_param
= dd
;
458 atmel_sha_write_ctrl(dd
, 1);
460 /* should be non-zero before next lines to disable clocks later */
461 ctx
->digcnt
[0] += length1
;
462 if (ctx
->digcnt
[0] < length1
)
466 dd
->flags
|= SHA_FLAGS_FINAL
; /* catch last interrupt */
468 dd
->flags
|= SHA_FLAGS_DMA_ACTIVE
;
470 /* Start DMA transfer */
471 dmaengine_submit(in_desc
);
472 dma_async_issue_pending(dd
->dma_lch_in
.chan
);
477 static int atmel_sha_xmit_start(struct atmel_sha_dev
*dd
, dma_addr_t dma_addr1
,
478 size_t length1
, dma_addr_t dma_addr2
, size_t length2
, int final
)
480 if (dd
->caps
.has_dma
)
481 return atmel_sha_xmit_dma(dd
, dma_addr1
, length1
,
482 dma_addr2
, length2
, final
);
484 return atmel_sha_xmit_pdc(dd
, dma_addr1
, length1
,
485 dma_addr2
, length2
, final
);
488 static int atmel_sha_update_cpu(struct atmel_sha_dev
*dd
)
490 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
493 atmel_sha_append_sg(ctx
);
494 atmel_sha_fill_padding(ctx
, 0);
495 bufcnt
= ctx
->bufcnt
;
498 return atmel_sha_xmit_cpu(dd
, ctx
->buffer
, bufcnt
, 1);
501 static int atmel_sha_xmit_dma_map(struct atmel_sha_dev
*dd
,
502 struct atmel_sha_reqctx
*ctx
,
503 size_t length
, int final
)
505 ctx
->dma_addr
= dma_map_single(dd
->dev
, ctx
->buffer
,
506 ctx
->buflen
+ ctx
->block_size
, DMA_TO_DEVICE
);
507 if (dma_mapping_error(dd
->dev
, ctx
->dma_addr
)) {
508 dev_err(dd
->dev
, "dma %u bytes error\n", ctx
->buflen
+
513 ctx
->flags
&= ~SHA_FLAGS_SG
;
515 /* next call does not fail... so no unmap in the case of error */
516 return atmel_sha_xmit_start(dd
, ctx
->dma_addr
, length
, 0, 0, final
);
519 static int atmel_sha_update_dma_slow(struct atmel_sha_dev
*dd
)
521 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
525 atmel_sha_append_sg(ctx
);
527 final
= (ctx
->flags
& SHA_FLAGS_FINUP
) && !ctx
->total
;
529 dev_dbg(dd
->dev
, "slow: bufcnt: %u, digcnt: 0x%llx 0x%llx, final: %d\n",
530 ctx
->bufcnt
, ctx
->digcnt
[1], ctx
->digcnt
[0], final
);
533 atmel_sha_fill_padding(ctx
, 0);
535 if (final
|| (ctx
->bufcnt
== ctx
->buflen
&& ctx
->total
)) {
538 return atmel_sha_xmit_dma_map(dd
, ctx
, count
, final
);
544 static int atmel_sha_update_dma_start(struct atmel_sha_dev
*dd
)
546 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
547 unsigned int length
, final
, tail
;
548 struct scatterlist
*sg
;
554 if (ctx
->bufcnt
|| ctx
->offset
)
555 return atmel_sha_update_dma_slow(dd
);
557 dev_dbg(dd
->dev
, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %u, total: %u\n",
558 ctx
->digcnt
[1], ctx
->digcnt
[0], ctx
->bufcnt
, ctx
->total
);
562 if (!IS_ALIGNED(sg
->offset
, sizeof(u32
)))
563 return atmel_sha_update_dma_slow(dd
);
565 if (!sg_is_last(sg
) && !IS_ALIGNED(sg
->length
, ctx
->block_size
))
566 /* size is not ctx->block_size aligned */
567 return atmel_sha_update_dma_slow(dd
);
569 length
= min(ctx
->total
, sg
->length
);
571 if (sg_is_last(sg
)) {
572 if (!(ctx
->flags
& SHA_FLAGS_FINUP
)) {
573 /* not last sg must be ctx->block_size aligned */
574 tail
= length
& (ctx
->block_size
- 1);
579 ctx
->total
-= length
;
580 ctx
->offset
= length
; /* offset where to start slow */
582 final
= (ctx
->flags
& SHA_FLAGS_FINUP
) && !ctx
->total
;
586 tail
= length
& (ctx
->block_size
- 1);
589 ctx
->offset
= length
; /* offset where to start slow */
592 atmel_sha_append_sg(ctx
);
594 atmel_sha_fill_padding(ctx
, length
);
596 ctx
->dma_addr
= dma_map_single(dd
->dev
, ctx
->buffer
,
597 ctx
->buflen
+ ctx
->block_size
, DMA_TO_DEVICE
);
598 if (dma_mapping_error(dd
->dev
, ctx
->dma_addr
)) {
599 dev_err(dd
->dev
, "dma %u bytes error\n",
600 ctx
->buflen
+ ctx
->block_size
);
605 ctx
->flags
&= ~SHA_FLAGS_SG
;
608 return atmel_sha_xmit_start(dd
, ctx
->dma_addr
, count
, 0,
612 if (!dma_map_sg(dd
->dev
, ctx
->sg
, 1,
614 dev_err(dd
->dev
, "dma_map_sg error\n");
618 ctx
->flags
|= SHA_FLAGS_SG
;
622 return atmel_sha_xmit_start(dd
, sg_dma_address(ctx
->sg
),
623 length
, ctx
->dma_addr
, count
, final
);
627 if (!dma_map_sg(dd
->dev
, ctx
->sg
, 1, DMA_TO_DEVICE
)) {
628 dev_err(dd
->dev
, "dma_map_sg error\n");
632 ctx
->flags
|= SHA_FLAGS_SG
;
634 /* next call does not fail... so no unmap in the case of error */
635 return atmel_sha_xmit_start(dd
, sg_dma_address(ctx
->sg
), length
, 0,
639 static int atmel_sha_update_dma_stop(struct atmel_sha_dev
*dd
)
641 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
643 if (ctx
->flags
& SHA_FLAGS_SG
) {
644 dma_unmap_sg(dd
->dev
, ctx
->sg
, 1, DMA_TO_DEVICE
);
645 if (ctx
->sg
->length
== ctx
->offset
) {
646 ctx
->sg
= sg_next(ctx
->sg
);
650 if (ctx
->flags
& SHA_FLAGS_PAD
) {
651 dma_unmap_single(dd
->dev
, ctx
->dma_addr
,
652 ctx
->buflen
+ ctx
->block_size
, DMA_TO_DEVICE
);
655 dma_unmap_single(dd
->dev
, ctx
->dma_addr
, ctx
->buflen
+
656 ctx
->block_size
, DMA_TO_DEVICE
);
662 static int atmel_sha_update_req(struct atmel_sha_dev
*dd
)
664 struct ahash_request
*req
= dd
->req
;
665 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
668 dev_dbg(dd
->dev
, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n",
669 ctx
->total
, ctx
->digcnt
[1], ctx
->digcnt
[0]);
671 if (ctx
->flags
& SHA_FLAGS_CPU
)
672 err
= atmel_sha_update_cpu(dd
);
674 err
= atmel_sha_update_dma_start(dd
);
676 /* wait for dma completion before can take more data */
677 dev_dbg(dd
->dev
, "update: err: %d, digcnt: 0x%llx 0%llx\n",
678 err
, ctx
->digcnt
[1], ctx
->digcnt
[0]);
683 static int atmel_sha_final_req(struct atmel_sha_dev
*dd
)
685 struct ahash_request
*req
= dd
->req
;
686 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
690 if (ctx
->bufcnt
>= ATMEL_SHA_DMA_THRESHOLD
) {
691 atmel_sha_fill_padding(ctx
, 0);
694 err
= atmel_sha_xmit_dma_map(dd
, ctx
, count
, 1);
696 /* faster to handle last block with cpu */
698 atmel_sha_fill_padding(ctx
, 0);
701 err
= atmel_sha_xmit_cpu(dd
, ctx
->buffer
, count
, 1);
704 dev_dbg(dd
->dev
, "final_req: err: %d\n", err
);
709 static void atmel_sha_copy_hash(struct ahash_request
*req
)
711 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
712 u32
*hash
= (u32
*)ctx
->digest
;
715 if (ctx
->flags
& SHA_FLAGS_SHA1
)
716 for (i
= 0; i
< SHA1_DIGEST_SIZE
/ sizeof(u32
); i
++)
717 hash
[i
] = atmel_sha_read(ctx
->dd
, SHA_REG_DIGEST(i
));
718 else if (ctx
->flags
& SHA_FLAGS_SHA224
)
719 for (i
= 0; i
< SHA224_DIGEST_SIZE
/ sizeof(u32
); i
++)
720 hash
[i
] = atmel_sha_read(ctx
->dd
, SHA_REG_DIGEST(i
));
721 else if (ctx
->flags
& SHA_FLAGS_SHA256
)
722 for (i
= 0; i
< SHA256_DIGEST_SIZE
/ sizeof(u32
); i
++)
723 hash
[i
] = atmel_sha_read(ctx
->dd
, SHA_REG_DIGEST(i
));
724 else if (ctx
->flags
& SHA_FLAGS_SHA384
)
725 for (i
= 0; i
< SHA384_DIGEST_SIZE
/ sizeof(u32
); i
++)
726 hash
[i
] = atmel_sha_read(ctx
->dd
, SHA_REG_DIGEST(i
));
728 for (i
= 0; i
< SHA512_DIGEST_SIZE
/ sizeof(u32
); i
++)
729 hash
[i
] = atmel_sha_read(ctx
->dd
, SHA_REG_DIGEST(i
));
732 static void atmel_sha_copy_ready_hash(struct ahash_request
*req
)
734 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
739 if (ctx
->flags
& SHA_FLAGS_SHA1
)
740 memcpy(req
->result
, ctx
->digest
, SHA1_DIGEST_SIZE
);
741 else if (ctx
->flags
& SHA_FLAGS_SHA224
)
742 memcpy(req
->result
, ctx
->digest
, SHA224_DIGEST_SIZE
);
743 else if (ctx
->flags
& SHA_FLAGS_SHA256
)
744 memcpy(req
->result
, ctx
->digest
, SHA256_DIGEST_SIZE
);
745 else if (ctx
->flags
& SHA_FLAGS_SHA384
)
746 memcpy(req
->result
, ctx
->digest
, SHA384_DIGEST_SIZE
);
748 memcpy(req
->result
, ctx
->digest
, SHA512_DIGEST_SIZE
);
751 static int atmel_sha_finish(struct ahash_request
*req
)
753 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
754 struct atmel_sha_dev
*dd
= ctx
->dd
;
757 if (ctx
->digcnt
[0] || ctx
->digcnt
[1])
758 atmel_sha_copy_ready_hash(req
);
760 dev_dbg(dd
->dev
, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx
->digcnt
[1],
761 ctx
->digcnt
[0], ctx
->bufcnt
);
766 static void atmel_sha_finish_req(struct ahash_request
*req
, int err
)
768 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
769 struct atmel_sha_dev
*dd
= ctx
->dd
;
772 atmel_sha_copy_hash(req
);
773 if (SHA_FLAGS_FINAL
& dd
->flags
)
774 err
= atmel_sha_finish(req
);
776 ctx
->flags
|= SHA_FLAGS_ERROR
;
779 /* atomic operation is not needed here */
780 dd
->flags
&= ~(SHA_FLAGS_BUSY
| SHA_FLAGS_FINAL
| SHA_FLAGS_CPU
|
781 SHA_FLAGS_DMA_READY
| SHA_FLAGS_OUTPUT_READY
);
783 clk_disable_unprepare(dd
->iclk
);
785 if (req
->base
.complete
)
786 req
->base
.complete(&req
->base
, err
);
788 /* handle new request */
789 tasklet_schedule(&dd
->done_task
);
792 static int atmel_sha_hw_init(struct atmel_sha_dev
*dd
)
794 clk_prepare_enable(dd
->iclk
);
796 if (!(SHA_FLAGS_INIT
& dd
->flags
)) {
797 atmel_sha_write(dd
, SHA_CR
, SHA_CR_SWRST
);
798 dd
->flags
|= SHA_FLAGS_INIT
;
805 static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev
*dd
)
807 return atmel_sha_read(dd
, SHA_HW_VERSION
) & 0x00000fff;
810 static void atmel_sha_hw_version_init(struct atmel_sha_dev
*dd
)
812 atmel_sha_hw_init(dd
);
814 dd
->hw_version
= atmel_sha_get_version(dd
);
817 "version: 0x%x\n", dd
->hw_version
);
819 clk_disable_unprepare(dd
->iclk
);
822 static int atmel_sha_handle_queue(struct atmel_sha_dev
*dd
,
823 struct ahash_request
*req
)
825 struct crypto_async_request
*async_req
, *backlog
;
826 struct atmel_sha_reqctx
*ctx
;
828 int err
= 0, ret
= 0;
830 spin_lock_irqsave(&dd
->lock
, flags
);
832 ret
= ahash_enqueue_request(&dd
->queue
, req
);
834 if (SHA_FLAGS_BUSY
& dd
->flags
) {
835 spin_unlock_irqrestore(&dd
->lock
, flags
);
839 backlog
= crypto_get_backlog(&dd
->queue
);
840 async_req
= crypto_dequeue_request(&dd
->queue
);
842 dd
->flags
|= SHA_FLAGS_BUSY
;
844 spin_unlock_irqrestore(&dd
->lock
, flags
);
850 backlog
->complete(backlog
, -EINPROGRESS
);
852 req
= ahash_request_cast(async_req
);
854 ctx
= ahash_request_ctx(req
);
856 dev_dbg(dd
->dev
, "handling new req, op: %lu, nbytes: %d\n",
857 ctx
->op
, req
->nbytes
);
859 err
= atmel_sha_hw_init(dd
);
864 if (ctx
->op
== SHA_OP_UPDATE
) {
865 err
= atmel_sha_update_req(dd
);
866 if (err
!= -EINPROGRESS
&& (ctx
->flags
& SHA_FLAGS_FINUP
))
867 /* no final() after finup() */
868 err
= atmel_sha_final_req(dd
);
869 } else if (ctx
->op
== SHA_OP_FINAL
) {
870 err
= atmel_sha_final_req(dd
);
874 if (err
!= -EINPROGRESS
)
875 /* done_task will not finish it, so do it here */
876 atmel_sha_finish_req(req
, err
);
878 dev_dbg(dd
->dev
, "exit, err: %d\n", err
);
883 static int atmel_sha_enqueue(struct ahash_request
*req
, unsigned int op
)
885 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
886 struct atmel_sha_ctx
*tctx
= crypto_tfm_ctx(req
->base
.tfm
);
887 struct atmel_sha_dev
*dd
= tctx
->dd
;
891 return atmel_sha_handle_queue(dd
, req
);
894 static int atmel_sha_update(struct ahash_request
*req
)
896 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
901 ctx
->total
= req
->nbytes
;
905 if (ctx
->flags
& SHA_FLAGS_FINUP
) {
906 if (ctx
->bufcnt
+ ctx
->total
< ATMEL_SHA_DMA_THRESHOLD
)
907 /* faster to use CPU for short transfers */
908 ctx
->flags
|= SHA_FLAGS_CPU
;
909 } else if (ctx
->bufcnt
+ ctx
->total
< ctx
->buflen
) {
910 atmel_sha_append_sg(ctx
);
913 return atmel_sha_enqueue(req
, SHA_OP_UPDATE
);
916 static int atmel_sha_final(struct ahash_request
*req
)
918 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
919 struct atmel_sha_ctx
*tctx
= crypto_tfm_ctx(req
->base
.tfm
);
920 struct atmel_sha_dev
*dd
= tctx
->dd
;
924 ctx
->flags
|= SHA_FLAGS_FINUP
;
926 if (ctx
->flags
& SHA_FLAGS_ERROR
)
927 return 0; /* uncompleted hash is not needed */
930 return atmel_sha_enqueue(req
, SHA_OP_FINAL
);
931 } else if (!(ctx
->flags
& SHA_FLAGS_PAD
)) { /* add padding */
932 err
= atmel_sha_hw_init(dd
);
936 dd
->flags
|= SHA_FLAGS_BUSY
;
937 err
= atmel_sha_final_req(dd
);
939 /* copy ready hash (+ finalize hmac) */
940 return atmel_sha_finish(req
);
944 if (err
!= -EINPROGRESS
)
945 /* done_task will not finish it, so do it here */
946 atmel_sha_finish_req(req
, err
);
951 static int atmel_sha_finup(struct ahash_request
*req
)
953 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
956 ctx
->flags
|= SHA_FLAGS_FINUP
;
958 err1
= atmel_sha_update(req
);
959 if (err1
== -EINPROGRESS
|| err1
== -EBUSY
)
963 * final() has to be always called to cleanup resources
964 * even if udpate() failed, except EINPROGRESS
966 err2
= atmel_sha_final(req
);
971 static int atmel_sha_digest(struct ahash_request
*req
)
973 return atmel_sha_init(req
) ?: atmel_sha_finup(req
);
976 static int atmel_sha_cra_init_alg(struct crypto_tfm
*tfm
, const char *alg_base
)
978 struct atmel_sha_ctx
*tctx
= crypto_tfm_ctx(tfm
);
979 const char *alg_name
= crypto_tfm_alg_name(tfm
);
981 /* Allocate a fallback and abort if it failed. */
982 tctx
->fallback
= crypto_alloc_shash(alg_name
, 0,
983 CRYPTO_ALG_NEED_FALLBACK
);
984 if (IS_ERR(tctx
->fallback
)) {
985 pr_err("atmel-sha: fallback driver '%s' could not be loaded.\n",
987 return PTR_ERR(tctx
->fallback
);
989 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
990 sizeof(struct atmel_sha_reqctx
) +
991 SHA_BUFFER_LEN
+ SHA512_BLOCK_SIZE
);
996 static int atmel_sha_cra_init(struct crypto_tfm
*tfm
)
998 return atmel_sha_cra_init_alg(tfm
, NULL
);
1001 static void atmel_sha_cra_exit(struct crypto_tfm
*tfm
)
1003 struct atmel_sha_ctx
*tctx
= crypto_tfm_ctx(tfm
);
1005 crypto_free_shash(tctx
->fallback
);
1006 tctx
->fallback
= NULL
;
1009 static struct ahash_alg sha_1_256_algs
[] = {
1011 .init
= atmel_sha_init
,
1012 .update
= atmel_sha_update
,
1013 .final
= atmel_sha_final
,
1014 .finup
= atmel_sha_finup
,
1015 .digest
= atmel_sha_digest
,
1017 .digestsize
= SHA1_DIGEST_SIZE
,
1020 .cra_driver_name
= "atmel-sha1",
1021 .cra_priority
= 100,
1022 .cra_flags
= CRYPTO_ALG_ASYNC
|
1023 CRYPTO_ALG_NEED_FALLBACK
,
1024 .cra_blocksize
= SHA1_BLOCK_SIZE
,
1025 .cra_ctxsize
= sizeof(struct atmel_sha_ctx
),
1027 .cra_module
= THIS_MODULE
,
1028 .cra_init
= atmel_sha_cra_init
,
1029 .cra_exit
= atmel_sha_cra_exit
,
1034 .init
= atmel_sha_init
,
1035 .update
= atmel_sha_update
,
1036 .final
= atmel_sha_final
,
1037 .finup
= atmel_sha_finup
,
1038 .digest
= atmel_sha_digest
,
1040 .digestsize
= SHA256_DIGEST_SIZE
,
1042 .cra_name
= "sha256",
1043 .cra_driver_name
= "atmel-sha256",
1044 .cra_priority
= 100,
1045 .cra_flags
= CRYPTO_ALG_ASYNC
|
1046 CRYPTO_ALG_NEED_FALLBACK
,
1047 .cra_blocksize
= SHA256_BLOCK_SIZE
,
1048 .cra_ctxsize
= sizeof(struct atmel_sha_ctx
),
1050 .cra_module
= THIS_MODULE
,
1051 .cra_init
= atmel_sha_cra_init
,
1052 .cra_exit
= atmel_sha_cra_exit
,
1058 static struct ahash_alg sha_224_alg
= {
1059 .init
= atmel_sha_init
,
1060 .update
= atmel_sha_update
,
1061 .final
= atmel_sha_final
,
1062 .finup
= atmel_sha_finup
,
1063 .digest
= atmel_sha_digest
,
1065 .digestsize
= SHA224_DIGEST_SIZE
,
1067 .cra_name
= "sha224",
1068 .cra_driver_name
= "atmel-sha224",
1069 .cra_priority
= 100,
1070 .cra_flags
= CRYPTO_ALG_ASYNC
|
1071 CRYPTO_ALG_NEED_FALLBACK
,
1072 .cra_blocksize
= SHA224_BLOCK_SIZE
,
1073 .cra_ctxsize
= sizeof(struct atmel_sha_ctx
),
1075 .cra_module
= THIS_MODULE
,
1076 .cra_init
= atmel_sha_cra_init
,
1077 .cra_exit
= atmel_sha_cra_exit
,
1082 static struct ahash_alg sha_384_512_algs
[] = {
1084 .init
= atmel_sha_init
,
1085 .update
= atmel_sha_update
,
1086 .final
= atmel_sha_final
,
1087 .finup
= atmel_sha_finup
,
1088 .digest
= atmel_sha_digest
,
1090 .digestsize
= SHA384_DIGEST_SIZE
,
1092 .cra_name
= "sha384",
1093 .cra_driver_name
= "atmel-sha384",
1094 .cra_priority
= 100,
1095 .cra_flags
= CRYPTO_ALG_ASYNC
|
1096 CRYPTO_ALG_NEED_FALLBACK
,
1097 .cra_blocksize
= SHA384_BLOCK_SIZE
,
1098 .cra_ctxsize
= sizeof(struct atmel_sha_ctx
),
1099 .cra_alignmask
= 0x3,
1100 .cra_module
= THIS_MODULE
,
1101 .cra_init
= atmel_sha_cra_init
,
1102 .cra_exit
= atmel_sha_cra_exit
,
1107 .init
= atmel_sha_init
,
1108 .update
= atmel_sha_update
,
1109 .final
= atmel_sha_final
,
1110 .finup
= atmel_sha_finup
,
1111 .digest
= atmel_sha_digest
,
1113 .digestsize
= SHA512_DIGEST_SIZE
,
1115 .cra_name
= "sha512",
1116 .cra_driver_name
= "atmel-sha512",
1117 .cra_priority
= 100,
1118 .cra_flags
= CRYPTO_ALG_ASYNC
|
1119 CRYPTO_ALG_NEED_FALLBACK
,
1120 .cra_blocksize
= SHA512_BLOCK_SIZE
,
1121 .cra_ctxsize
= sizeof(struct atmel_sha_ctx
),
1122 .cra_alignmask
= 0x3,
1123 .cra_module
= THIS_MODULE
,
1124 .cra_init
= atmel_sha_cra_init
,
1125 .cra_exit
= atmel_sha_cra_exit
,
1131 static void atmel_sha_done_task(unsigned long data
)
1133 struct atmel_sha_dev
*dd
= (struct atmel_sha_dev
*)data
;
1136 if (!(SHA_FLAGS_BUSY
& dd
->flags
)) {
1137 atmel_sha_handle_queue(dd
, NULL
);
1141 if (SHA_FLAGS_CPU
& dd
->flags
) {
1142 if (SHA_FLAGS_OUTPUT_READY
& dd
->flags
) {
1143 dd
->flags
&= ~SHA_FLAGS_OUTPUT_READY
;
1146 } else if (SHA_FLAGS_DMA_READY
& dd
->flags
) {
1147 if (SHA_FLAGS_DMA_ACTIVE
& dd
->flags
) {
1148 dd
->flags
&= ~SHA_FLAGS_DMA_ACTIVE
;
1149 atmel_sha_update_dma_stop(dd
);
1155 if (SHA_FLAGS_OUTPUT_READY
& dd
->flags
) {
1156 /* hash or semi-hash ready */
1157 dd
->flags
&= ~(SHA_FLAGS_DMA_READY
|
1158 SHA_FLAGS_OUTPUT_READY
);
1159 err
= atmel_sha_update_dma_start(dd
);
1160 if (err
!= -EINPROGRESS
)
1167 /* finish curent request */
1168 atmel_sha_finish_req(dd
->req
, err
);
1171 static irqreturn_t
atmel_sha_irq(int irq
, void *dev_id
)
1173 struct atmel_sha_dev
*sha_dd
= dev_id
;
1176 reg
= atmel_sha_read(sha_dd
, SHA_ISR
);
1177 if (reg
& atmel_sha_read(sha_dd
, SHA_IMR
)) {
1178 atmel_sha_write(sha_dd
, SHA_IDR
, reg
);
1179 if (SHA_FLAGS_BUSY
& sha_dd
->flags
) {
1180 sha_dd
->flags
|= SHA_FLAGS_OUTPUT_READY
;
1181 if (!(SHA_FLAGS_CPU
& sha_dd
->flags
))
1182 sha_dd
->flags
|= SHA_FLAGS_DMA_READY
;
1183 tasklet_schedule(&sha_dd
->done_task
);
1185 dev_warn(sha_dd
->dev
, "SHA interrupt when no active requests.\n");
1193 static void atmel_sha_unregister_algs(struct atmel_sha_dev
*dd
)
1197 for (i
= 0; i
< ARRAY_SIZE(sha_1_256_algs
); i
++)
1198 crypto_unregister_ahash(&sha_1_256_algs
[i
]);
1200 if (dd
->caps
.has_sha224
)
1201 crypto_unregister_ahash(&sha_224_alg
);
1203 if (dd
->caps
.has_sha_384_512
) {
1204 for (i
= 0; i
< ARRAY_SIZE(sha_384_512_algs
); i
++)
1205 crypto_unregister_ahash(&sha_384_512_algs
[i
]);
1209 static int atmel_sha_register_algs(struct atmel_sha_dev
*dd
)
1213 for (i
= 0; i
< ARRAY_SIZE(sha_1_256_algs
); i
++) {
1214 err
= crypto_register_ahash(&sha_1_256_algs
[i
]);
1216 goto err_sha_1_256_algs
;
1219 if (dd
->caps
.has_sha224
) {
1220 err
= crypto_register_ahash(&sha_224_alg
);
1222 goto err_sha_224_algs
;
1225 if (dd
->caps
.has_sha_384_512
) {
1226 for (i
= 0; i
< ARRAY_SIZE(sha_384_512_algs
); i
++) {
1227 err
= crypto_register_ahash(&sha_384_512_algs
[i
]);
1229 goto err_sha_384_512_algs
;
1235 err_sha_384_512_algs
:
1236 for (j
= 0; j
< i
; j
++)
1237 crypto_unregister_ahash(&sha_384_512_algs
[j
]);
1238 crypto_unregister_ahash(&sha_224_alg
);
1240 i
= ARRAY_SIZE(sha_1_256_algs
);
1242 for (j
= 0; j
< i
; j
++)
1243 crypto_unregister_ahash(&sha_1_256_algs
[j
]);
1248 static bool atmel_sha_filter(struct dma_chan
*chan
, void *slave
)
1250 struct at_dma_slave
*sl
= slave
;
1252 if (sl
&& sl
->dma_dev
== chan
->device
->dev
) {
1260 static int atmel_sha_dma_init(struct atmel_sha_dev
*dd
,
1261 struct crypto_platform_data
*pdata
)
1264 dma_cap_mask_t mask_in
;
1266 if (pdata
&& pdata
->dma_slave
->rxdata
.dma_dev
) {
1267 /* Try to grab DMA channel */
1268 dma_cap_zero(mask_in
);
1269 dma_cap_set(DMA_SLAVE
, mask_in
);
1271 dd
->dma_lch_in
.chan
= dma_request_channel(mask_in
,
1272 atmel_sha_filter
, &pdata
->dma_slave
->rxdata
);
1274 if (!dd
->dma_lch_in
.chan
)
1277 dd
->dma_lch_in
.dma_conf
.direction
= DMA_MEM_TO_DEV
;
1278 dd
->dma_lch_in
.dma_conf
.dst_addr
= dd
->phys_base
+
1280 dd
->dma_lch_in
.dma_conf
.src_maxburst
= 1;
1281 dd
->dma_lch_in
.dma_conf
.src_addr_width
=
1282 DMA_SLAVE_BUSWIDTH_4_BYTES
;
1283 dd
->dma_lch_in
.dma_conf
.dst_maxburst
= 1;
1284 dd
->dma_lch_in
.dma_conf
.dst_addr_width
=
1285 DMA_SLAVE_BUSWIDTH_4_BYTES
;
1286 dd
->dma_lch_in
.dma_conf
.device_fc
= false;
1294 static void atmel_sha_dma_cleanup(struct atmel_sha_dev
*dd
)
1296 dma_release_channel(dd
->dma_lch_in
.chan
);
1299 static void atmel_sha_get_cap(struct atmel_sha_dev
*dd
)
1302 dd
->caps
.has_dma
= 0;
1303 dd
->caps
.has_dualbuff
= 0;
1304 dd
->caps
.has_sha224
= 0;
1305 dd
->caps
.has_sha_384_512
= 0;
1307 /* keep only major version number */
1308 switch (dd
->hw_version
& 0xff0) {
1310 dd
->caps
.has_dma
= 1;
1311 dd
->caps
.has_dualbuff
= 1;
1312 dd
->caps
.has_sha224
= 1;
1313 dd
->caps
.has_sha_384_512
= 1;
1316 dd
->caps
.has_dma
= 1;
1317 dd
->caps
.has_dualbuff
= 1;
1318 dd
->caps
.has_sha224
= 1;
1324 "Unmanaged sha version, set minimum capabilities\n");
1329 static int atmel_sha_probe(struct platform_device
*pdev
)
1331 struct atmel_sha_dev
*sha_dd
;
1332 struct crypto_platform_data
*pdata
;
1333 struct device
*dev
= &pdev
->dev
;
1334 struct resource
*sha_res
;
1335 unsigned long sha_phys_size
;
1338 sha_dd
= kzalloc(sizeof(struct atmel_sha_dev
), GFP_KERNEL
);
1339 if (sha_dd
== NULL
) {
1340 dev_err(dev
, "unable to alloc data struct.\n");
1347 platform_set_drvdata(pdev
, sha_dd
);
1349 INIT_LIST_HEAD(&sha_dd
->list
);
1351 tasklet_init(&sha_dd
->done_task
, atmel_sha_done_task
,
1352 (unsigned long)sha_dd
);
1354 crypto_init_queue(&sha_dd
->queue
, ATMEL_SHA_QUEUE_LENGTH
);
1358 /* Get the base address */
1359 sha_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1361 dev_err(dev
, "no MEM resource info\n");
1365 sha_dd
->phys_base
= sha_res
->start
;
1366 sha_phys_size
= resource_size(sha_res
);
1369 sha_dd
->irq
= platform_get_irq(pdev
, 0);
1370 if (sha_dd
->irq
< 0) {
1371 dev_err(dev
, "no IRQ resource info\n");
1376 err
= request_irq(sha_dd
->irq
, atmel_sha_irq
, IRQF_SHARED
, "atmel-sha",
1379 dev_err(dev
, "unable to request sha irq.\n");
1383 /* Initializing the clock */
1384 sha_dd
->iclk
= clk_get(&pdev
->dev
, "sha_clk");
1385 if (IS_ERR(sha_dd
->iclk
)) {
1386 dev_err(dev
, "clock intialization failed.\n");
1387 err
= PTR_ERR(sha_dd
->iclk
);
1391 sha_dd
->io_base
= ioremap(sha_dd
->phys_base
, sha_phys_size
);
1392 if (!sha_dd
->io_base
) {
1393 dev_err(dev
, "can't ioremap\n");
1398 atmel_sha_hw_version_init(sha_dd
);
1400 atmel_sha_get_cap(sha_dd
);
1402 if (sha_dd
->caps
.has_dma
) {
1403 pdata
= pdev
->dev
.platform_data
;
1405 dev_err(&pdev
->dev
, "platform data not available\n");
1409 err
= atmel_sha_dma_init(sha_dd
, pdata
);
1414 spin_lock(&atmel_sha
.lock
);
1415 list_add_tail(&sha_dd
->list
, &atmel_sha
.dev_list
);
1416 spin_unlock(&atmel_sha
.lock
);
1418 err
= atmel_sha_register_algs(sha_dd
);
1422 dev_info(dev
, "Atmel SHA1/SHA256\n");
1427 spin_lock(&atmel_sha
.lock
);
1428 list_del(&sha_dd
->list
);
1429 spin_unlock(&atmel_sha
.lock
);
1430 if (sha_dd
->caps
.has_dma
)
1431 atmel_sha_dma_cleanup(sha_dd
);
1434 iounmap(sha_dd
->io_base
);
1436 clk_put(sha_dd
->iclk
);
1438 free_irq(sha_dd
->irq
, sha_dd
);
1440 tasklet_kill(&sha_dd
->done_task
);
1444 dev_err(dev
, "initialization failed.\n");
1449 static int atmel_sha_remove(struct platform_device
*pdev
)
1451 static struct atmel_sha_dev
*sha_dd
;
1453 sha_dd
= platform_get_drvdata(pdev
);
1456 spin_lock(&atmel_sha
.lock
);
1457 list_del(&sha_dd
->list
);
1458 spin_unlock(&atmel_sha
.lock
);
1460 atmel_sha_unregister_algs(sha_dd
);
1462 tasklet_kill(&sha_dd
->done_task
);
1464 if (sha_dd
->caps
.has_dma
)
1465 atmel_sha_dma_cleanup(sha_dd
);
1467 iounmap(sha_dd
->io_base
);
1469 clk_put(sha_dd
->iclk
);
1471 if (sha_dd
->irq
>= 0)
1472 free_irq(sha_dd
->irq
, sha_dd
);
1480 static struct platform_driver atmel_sha_driver
= {
1481 .probe
= atmel_sha_probe
,
1482 .remove
= atmel_sha_remove
,
1484 .name
= "atmel_sha",
1485 .owner
= THIS_MODULE
,
1489 module_platform_driver(atmel_sha_driver
);
1491 MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support.");
1492 MODULE_LICENSE("GPL v2");
1493 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");