2 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
5 * Author: Arnaud Ebalard <arno@natisbad.org>
7 * This work is based on an initial version written by
8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
15 #include <crypto/md5.h>
16 #include <crypto/sha.h>
20 struct mv_cesa_ahash_dma_iter
{
21 struct mv_cesa_dma_iter base
;
22 struct mv_cesa_sg_dma_iter src
;
26 mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter
*iter
,
27 struct ahash_request
*req
)
29 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
30 unsigned int len
= req
->nbytes
+ creq
->cache_ptr
;
33 len
&= ~CESA_HASH_BLOCK_SIZE_MSK
;
35 mv_cesa_req_dma_iter_init(&iter
->base
, len
);
36 mv_cesa_sg_dma_iter_init(&iter
->src
, req
->src
, DMA_TO_DEVICE
);
37 iter
->src
.op_offset
= creq
->cache_ptr
;
41 mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter
*iter
)
43 iter
->src
.op_offset
= 0;
45 return mv_cesa_req_dma_iter_next_op(&iter
->base
);
48 static inline int mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_req
*creq
,
51 struct mv_cesa_ahash_dma_req
*dreq
= &creq
->req
.dma
;
53 creq
->cache
= dma_pool_alloc(cesa_dev
->dma
->cache_pool
, flags
,
61 static inline int mv_cesa_ahash_std_alloc_cache(struct mv_cesa_ahash_req
*creq
,
64 creq
->cache
= kzalloc(CESA_MAX_HASH_BLOCK_SIZE
, flags
);
71 static int mv_cesa_ahash_alloc_cache(struct ahash_request
*req
)
73 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
74 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
75 GFP_KERNEL
: GFP_ATOMIC
;
81 if (creq
->req
.base
.type
== CESA_DMA_REQ
)
82 ret
= mv_cesa_ahash_dma_alloc_cache(creq
, flags
);
84 ret
= mv_cesa_ahash_std_alloc_cache(creq
, flags
);
89 static inline void mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_req
*creq
)
91 dma_pool_free(cesa_dev
->dma
->cache_pool
, creq
->cache
,
92 creq
->req
.dma
.cache_dma
);
95 static inline void mv_cesa_ahash_std_free_cache(struct mv_cesa_ahash_req
*creq
)
100 static void mv_cesa_ahash_free_cache(struct mv_cesa_ahash_req
*creq
)
105 if (creq
->req
.base
.type
== CESA_DMA_REQ
)
106 mv_cesa_ahash_dma_free_cache(creq
);
108 mv_cesa_ahash_std_free_cache(creq
);
113 static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req
*req
,
119 req
->padding
= dma_pool_alloc(cesa_dev
->dma
->padding_pool
, flags
,
127 static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req
*req
)
132 dma_pool_free(cesa_dev
->dma
->padding_pool
, req
->padding
,
137 static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request
*req
)
139 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
141 mv_cesa_ahash_dma_free_padding(&creq
->req
.dma
);
144 static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request
*req
)
146 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
148 dma_unmap_sg(cesa_dev
->dev
, req
->src
, creq
->src_nents
, DMA_TO_DEVICE
);
149 mv_cesa_dma_cleanup(&creq
->req
.dma
.base
);
152 static inline void mv_cesa_ahash_cleanup(struct ahash_request
*req
)
154 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
156 if (creq
->req
.base
.type
== CESA_DMA_REQ
)
157 mv_cesa_ahash_dma_cleanup(req
);
160 static void mv_cesa_ahash_last_cleanup(struct ahash_request
*req
)
162 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
164 mv_cesa_ahash_free_cache(creq
);
166 if (creq
->req
.base
.type
== CESA_DMA_REQ
)
167 mv_cesa_ahash_dma_last_cleanup(req
);
170 static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req
*creq
)
172 unsigned int index
, padlen
;
174 index
= creq
->len
& CESA_HASH_BLOCK_SIZE_MSK
;
175 padlen
= (index
< 56) ? (56 - index
) : (64 + 56 - index
);
180 static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req
*creq
, u8
*buf
)
182 unsigned int index
, padlen
;
185 /* Pad out to 56 mod 64 */
186 index
= creq
->len
& CESA_HASH_BLOCK_SIZE_MSK
;
187 padlen
= mv_cesa_ahash_pad_len(creq
);
188 memset(buf
+ 1, 0, padlen
- 1);
191 __le64 bits
= cpu_to_le64(creq
->len
<< 3);
192 memcpy(buf
+ padlen
, &bits
, sizeof(bits
));
194 __be64 bits
= cpu_to_be64(creq
->len
<< 3);
195 memcpy(buf
+ padlen
, &bits
, sizeof(bits
));
201 static void mv_cesa_ahash_std_step(struct ahash_request
*req
)
203 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
204 struct mv_cesa_ahash_std_req
*sreq
= &creq
->req
.std
;
205 struct mv_cesa_engine
*engine
= sreq
->base
.engine
;
206 struct mv_cesa_op_ctx
*op
;
207 unsigned int new_cache_ptr
= 0;
212 memcpy(engine
->sram
+ CESA_SA_DATA_SRAM_OFFSET
, creq
->cache
,
215 len
= min_t(size_t, req
->nbytes
+ creq
->cache_ptr
- sreq
->offset
,
216 CESA_SA_SRAM_PAYLOAD_SIZE
);
218 if (!creq
->last_req
) {
219 new_cache_ptr
= len
& CESA_HASH_BLOCK_SIZE_MSK
;
220 len
&= ~CESA_HASH_BLOCK_SIZE_MSK
;
223 if (len
- creq
->cache_ptr
)
224 sreq
->offset
+= sg_pcopy_to_buffer(req
->src
, creq
->src_nents
,
226 CESA_SA_DATA_SRAM_OFFSET
+
228 len
- creq
->cache_ptr
,
233 frag_mode
= mv_cesa_get_op_cfg(op
) & CESA_SA_DESC_CFG_FRAG_MSK
;
235 if (creq
->last_req
&& sreq
->offset
== req
->nbytes
&&
236 creq
->len
<= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX
) {
237 if (frag_mode
== CESA_SA_DESC_CFG_FIRST_FRAG
)
238 frag_mode
= CESA_SA_DESC_CFG_NOT_FRAG
;
239 else if (frag_mode
== CESA_SA_DESC_CFG_MID_FRAG
)
240 frag_mode
= CESA_SA_DESC_CFG_LAST_FRAG
;
243 if (frag_mode
== CESA_SA_DESC_CFG_NOT_FRAG
||
244 frag_mode
== CESA_SA_DESC_CFG_LAST_FRAG
) {
246 creq
->len
<= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX
) {
247 mv_cesa_set_mac_op_total_len(op
, creq
->len
);
249 int trailerlen
= mv_cesa_ahash_pad_len(creq
) + 8;
251 if (len
+ trailerlen
> CESA_SA_SRAM_PAYLOAD_SIZE
) {
252 len
&= CESA_HASH_BLOCK_SIZE_MSK
;
253 new_cache_ptr
= 64 - trailerlen
;
256 CESA_SA_DATA_SRAM_OFFSET
+ len
,
259 len
+= mv_cesa_ahash_pad_req(creq
,
261 CESA_SA_DATA_SRAM_OFFSET
);
264 if (frag_mode
== CESA_SA_DESC_CFG_LAST_FRAG
)
265 frag_mode
= CESA_SA_DESC_CFG_MID_FRAG
;
267 frag_mode
= CESA_SA_DESC_CFG_FIRST_FRAG
;
271 mv_cesa_set_mac_op_frag_len(op
, len
);
272 mv_cesa_update_op_cfg(op
, frag_mode
, CESA_SA_DESC_CFG_FRAG_MSK
);
274 /* FIXME: only update enc_len field */
275 memcpy(engine
->sram
, op
, sizeof(*op
));
277 if (frag_mode
== CESA_SA_DESC_CFG_FIRST_FRAG
)
278 mv_cesa_update_op_cfg(op
, CESA_SA_DESC_CFG_MID_FRAG
,
279 CESA_SA_DESC_CFG_FRAG_MSK
);
281 creq
->cache_ptr
= new_cache_ptr
;
283 mv_cesa_set_int_mask(engine
, CESA_SA_INT_ACCEL0_DONE
);
284 writel_relaxed(CESA_SA_CFG_PARA_DIS
, engine
->regs
+ CESA_SA_CFG
);
285 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0
, engine
->regs
+ CESA_SA_CMD
);
288 static int mv_cesa_ahash_std_process(struct ahash_request
*req
, u32 status
)
290 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
291 struct mv_cesa_ahash_std_req
*sreq
= &creq
->req
.std
;
293 if (sreq
->offset
< (req
->nbytes
- creq
->cache_ptr
))
299 static inline void mv_cesa_ahash_dma_prepare(struct ahash_request
*req
)
301 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
302 struct mv_cesa_tdma_req
*dreq
= &creq
->req
.dma
.base
;
304 mv_cesa_dma_prepare(dreq
, dreq
->base
.engine
);
307 static void mv_cesa_ahash_std_prepare(struct ahash_request
*req
)
309 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
310 struct mv_cesa_ahash_std_req
*sreq
= &creq
->req
.std
;
311 struct mv_cesa_engine
*engine
= sreq
->base
.engine
;
314 mv_cesa_adjust_op(engine
, &creq
->op_tmpl
);
315 memcpy(engine
->sram
, &creq
->op_tmpl
, sizeof(creq
->op_tmpl
));
318 static void mv_cesa_ahash_step(struct crypto_async_request
*req
)
320 struct ahash_request
*ahashreq
= ahash_request_cast(req
);
321 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(ahashreq
);
323 if (creq
->req
.base
.type
== CESA_DMA_REQ
)
324 mv_cesa_dma_step(&creq
->req
.dma
.base
);
326 mv_cesa_ahash_std_step(ahashreq
);
329 static int mv_cesa_ahash_process(struct crypto_async_request
*req
, u32 status
)
331 struct ahash_request
*ahashreq
= ahash_request_cast(req
);
332 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(ahashreq
);
333 struct mv_cesa_engine
*engine
= creq
->req
.base
.engine
;
334 unsigned int digsize
;
337 if (creq
->req
.base
.type
== CESA_DMA_REQ
)
338 ret
= mv_cesa_dma_process(&creq
->req
.dma
.base
, status
);
340 ret
= mv_cesa_ahash_std_process(ahashreq
, status
);
342 if (ret
== -EINPROGRESS
)
345 digsize
= crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq
));
346 for (i
= 0; i
< digsize
/ 4; i
++)
347 creq
->state
[i
] = readl_relaxed(engine
->regs
+ CESA_IVDIG(i
));
350 sg_pcopy_to_buffer(ahashreq
->src
, creq
->src_nents
,
353 ahashreq
->nbytes
- creq
->cache_ptr
);
355 if (creq
->last_req
) {
357 * Hardware's MD5 digest is in little endian format, but
358 * SHA in big endian format
361 __le32
*result
= (void *)ahashreq
->result
;
363 for (i
= 0; i
< digsize
/ 4; i
++)
364 result
[i
] = cpu_to_le32(creq
->state
[i
]);
366 __be32
*result
= (void *)ahashreq
->result
;
368 for (i
= 0; i
< digsize
/ 4; i
++)
369 result
[i
] = cpu_to_be32(creq
->state
[i
]);
376 static void mv_cesa_ahash_prepare(struct crypto_async_request
*req
,
377 struct mv_cesa_engine
*engine
)
379 struct ahash_request
*ahashreq
= ahash_request_cast(req
);
380 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(ahashreq
);
381 unsigned int digsize
;
384 creq
->req
.base
.engine
= engine
;
386 if (creq
->req
.base
.type
== CESA_DMA_REQ
)
387 mv_cesa_ahash_dma_prepare(ahashreq
);
389 mv_cesa_ahash_std_prepare(ahashreq
);
391 digsize
= crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq
));
392 for (i
= 0; i
< digsize
/ 4; i
++)
393 writel_relaxed(creq
->state
[i
], engine
->regs
+ CESA_IVDIG(i
));
396 static void mv_cesa_ahash_req_cleanup(struct crypto_async_request
*req
)
398 struct ahash_request
*ahashreq
= ahash_request_cast(req
);
399 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(ahashreq
);
402 mv_cesa_ahash_last_cleanup(ahashreq
);
404 mv_cesa_ahash_cleanup(ahashreq
);
407 static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops
= {
408 .step
= mv_cesa_ahash_step
,
409 .process
= mv_cesa_ahash_process
,
410 .prepare
= mv_cesa_ahash_prepare
,
411 .cleanup
= mv_cesa_ahash_req_cleanup
,
414 static int mv_cesa_ahash_init(struct ahash_request
*req
,
415 struct mv_cesa_op_ctx
*tmpl
, bool algo_le
)
417 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
419 memset(creq
, 0, sizeof(*creq
));
420 mv_cesa_update_op_cfg(tmpl
,
421 CESA_SA_DESC_CFG_OP_MAC_ONLY
|
422 CESA_SA_DESC_CFG_FIRST_FRAG
,
423 CESA_SA_DESC_CFG_OP_MSK
|
424 CESA_SA_DESC_CFG_FRAG_MSK
);
425 mv_cesa_set_mac_op_total_len(tmpl
, 0);
426 mv_cesa_set_mac_op_frag_len(tmpl
, 0);
427 creq
->op_tmpl
= *tmpl
;
429 creq
->algo_le
= algo_le
;
434 static inline int mv_cesa_ahash_cra_init(struct crypto_tfm
*tfm
)
436 struct mv_cesa_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
438 ctx
->base
.ops
= &mv_cesa_ahash_req_ops
;
440 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
441 sizeof(struct mv_cesa_ahash_req
));
445 static int mv_cesa_ahash_cache_req(struct ahash_request
*req
, bool *cached
)
447 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
450 if (((creq
->cache_ptr
+ req
->nbytes
) & CESA_HASH_BLOCK_SIZE_MSK
) &&
452 ret
= mv_cesa_ahash_alloc_cache(req
);
457 if (creq
->cache_ptr
+ req
->nbytes
< 64 && !creq
->last_req
) {
463 sg_pcopy_to_buffer(req
->src
, creq
->src_nents
,
464 creq
->cache
+ creq
->cache_ptr
,
467 creq
->cache_ptr
+= req
->nbytes
;
473 static struct mv_cesa_op_ctx
*
474 mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain
*chain
,
475 struct mv_cesa_op_ctx
*tmpl
, unsigned int frag_len
,
478 struct mv_cesa_op_ctx
*op
;
481 op
= mv_cesa_dma_add_op(chain
, tmpl
, false, flags
);
485 /* Set the operation block fragment length. */
486 mv_cesa_set_mac_op_frag_len(op
, frag_len
);
488 /* Append dummy desc to launch operation */
489 ret
= mv_cesa_dma_add_dummy_launch(chain
, flags
);
493 if (mv_cesa_mac_op_is_first_frag(tmpl
))
494 mv_cesa_update_op_cfg(tmpl
,
495 CESA_SA_DESC_CFG_MID_FRAG
,
496 CESA_SA_DESC_CFG_FRAG_MSK
);
502 mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain
*chain
,
503 struct mv_cesa_ahash_dma_iter
*dma_iter
,
504 struct mv_cesa_ahash_req
*creq
,
507 struct mv_cesa_ahash_dma_req
*ahashdreq
= &creq
->req
.dma
;
509 if (!creq
->cache_ptr
)
512 return mv_cesa_dma_add_data_transfer(chain
,
513 CESA_SA_DATA_SRAM_OFFSET
,
514 ahashdreq
->cache_dma
,
516 CESA_TDMA_DST_IN_SRAM
,
520 static struct mv_cesa_op_ctx
*
521 mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain
*chain
,
522 struct mv_cesa_ahash_dma_iter
*dma_iter
,
523 struct mv_cesa_ahash_req
*creq
,
524 unsigned int frag_len
, gfp_t flags
)
526 struct mv_cesa_ahash_dma_req
*ahashdreq
= &creq
->req
.dma
;
527 unsigned int len
, trailerlen
, padoff
= 0;
528 struct mv_cesa_op_ctx
*op
;
532 * If the transfer is smaller than our maximum length, and we have
533 * some data outstanding, we can ask the engine to finish the hash.
535 if (creq
->len
<= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX
&& frag_len
) {
536 op
= mv_cesa_dma_add_frag(chain
, &creq
->op_tmpl
, frag_len
,
541 mv_cesa_set_mac_op_total_len(op
, creq
->len
);
542 mv_cesa_update_op_cfg(op
, mv_cesa_mac_op_is_first_frag(op
) ?
543 CESA_SA_DESC_CFG_NOT_FRAG
:
544 CESA_SA_DESC_CFG_LAST_FRAG
,
545 CESA_SA_DESC_CFG_FRAG_MSK
);
551 * The request is longer than the engine can handle, or we have
552 * no data outstanding. Manually generate the padding, adding it
553 * as a "mid" fragment.
555 ret
= mv_cesa_ahash_dma_alloc_padding(ahashdreq
, flags
);
559 trailerlen
= mv_cesa_ahash_pad_req(creq
, ahashdreq
->padding
);
561 len
= min(CESA_SA_SRAM_PAYLOAD_SIZE
- frag_len
, trailerlen
);
563 ret
= mv_cesa_dma_add_data_transfer(chain
,
564 CESA_SA_DATA_SRAM_OFFSET
+
566 ahashdreq
->padding_dma
,
567 len
, CESA_TDMA_DST_IN_SRAM
,
572 op
= mv_cesa_dma_add_frag(chain
, &creq
->op_tmpl
, frag_len
+ len
,
577 if (len
== trailerlen
)
583 ret
= mv_cesa_dma_add_data_transfer(chain
,
584 CESA_SA_DATA_SRAM_OFFSET
,
585 ahashdreq
->padding_dma
+
588 CESA_TDMA_DST_IN_SRAM
,
593 return mv_cesa_dma_add_frag(chain
, &creq
->op_tmpl
, trailerlen
- padoff
,
597 static int mv_cesa_ahash_dma_req_init(struct ahash_request
*req
)
599 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
600 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
601 GFP_KERNEL
: GFP_ATOMIC
;
602 struct mv_cesa_ahash_dma_req
*ahashdreq
= &creq
->req
.dma
;
603 struct mv_cesa_tdma_req
*dreq
= &ahashdreq
->base
;
604 struct mv_cesa_ahash_dma_iter iter
;
605 struct mv_cesa_op_ctx
*op
= NULL
;
606 unsigned int frag_len
;
609 dreq
->chain
.first
= NULL
;
610 dreq
->chain
.last
= NULL
;
612 if (creq
->src_nents
) {
613 ret
= dma_map_sg(cesa_dev
->dev
, req
->src
, creq
->src_nents
,
621 mv_cesa_tdma_desc_iter_init(&dreq
->chain
);
622 mv_cesa_ahash_req_iter_init(&iter
, req
);
625 * Add the cache (left-over data from a previous block) first.
626 * This will never overflow the SRAM size.
628 ret
= mv_cesa_ahash_dma_add_cache(&dreq
->chain
, &iter
, creq
, flags
);
634 * Add all the new data, inserting an operation block and
635 * launch command between each full SRAM block-worth of
636 * data. We intentionally do not add the final op block.
639 ret
= mv_cesa_dma_add_op_transfers(&dreq
->chain
,
645 frag_len
= iter
.base
.op_len
;
647 if (!mv_cesa_ahash_req_iter_next_op(&iter
))
650 op
= mv_cesa_dma_add_frag(&dreq
->chain
, &creq
->op_tmpl
,
658 /* Account for the data that was in the cache. */
659 frag_len
= iter
.base
.op_len
;
663 * At this point, frag_len indicates whether we have any data
664 * outstanding which needs an operation. Queue up the final
665 * operation, which depends whether this is the final request.
668 op
= mv_cesa_ahash_dma_last_req(&dreq
->chain
, &iter
, creq
,
671 op
= mv_cesa_dma_add_frag(&dreq
->chain
, &creq
->op_tmpl
,
680 /* Add dummy desc to wait for crypto operation end */
681 ret
= mv_cesa_dma_add_dummy_end(&dreq
->chain
, flags
);
687 creq
->cache_ptr
= req
->nbytes
+ creq
->cache_ptr
-
695 mv_cesa_dma_cleanup(dreq
);
696 dma_unmap_sg(cesa_dev
->dev
, req
->src
, creq
->src_nents
, DMA_TO_DEVICE
);
699 mv_cesa_ahash_last_cleanup(req
);
704 static int mv_cesa_ahash_req_init(struct ahash_request
*req
, bool *cached
)
706 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
709 if (cesa_dev
->caps
->has_tdma
)
710 creq
->req
.base
.type
= CESA_DMA_REQ
;
712 creq
->req
.base
.type
= CESA_STD_REQ
;
714 creq
->src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
716 ret
= mv_cesa_ahash_cache_req(req
, cached
);
723 if (creq
->req
.base
.type
== CESA_DMA_REQ
)
724 ret
= mv_cesa_ahash_dma_req_init(req
);
729 static int mv_cesa_ahash_update(struct ahash_request
*req
)
731 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
735 creq
->len
+= req
->nbytes
;
736 ret
= mv_cesa_ahash_req_init(req
, &cached
);
743 ret
= mv_cesa_queue_req(&req
->base
);
744 if (ret
&& ret
!= -EINPROGRESS
) {
745 mv_cesa_ahash_cleanup(req
);
752 static int mv_cesa_ahash_final(struct ahash_request
*req
)
754 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
755 struct mv_cesa_op_ctx
*tmpl
= &creq
->op_tmpl
;
759 mv_cesa_set_mac_op_total_len(tmpl
, creq
->len
);
760 creq
->last_req
= true;
763 ret
= mv_cesa_ahash_req_init(req
, &cached
);
770 ret
= mv_cesa_queue_req(&req
->base
);
771 if (ret
&& ret
!= -EINPROGRESS
)
772 mv_cesa_ahash_cleanup(req
);
777 static int mv_cesa_ahash_finup(struct ahash_request
*req
)
779 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
780 struct mv_cesa_op_ctx
*tmpl
= &creq
->op_tmpl
;
784 creq
->len
+= req
->nbytes
;
785 mv_cesa_set_mac_op_total_len(tmpl
, creq
->len
);
786 creq
->last_req
= true;
788 ret
= mv_cesa_ahash_req_init(req
, &cached
);
795 ret
= mv_cesa_queue_req(&req
->base
);
796 if (ret
&& ret
!= -EINPROGRESS
)
797 mv_cesa_ahash_cleanup(req
);
802 static int mv_cesa_ahash_export(struct ahash_request
*req
, void *hash
,
803 u64
*len
, void *cache
)
805 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
806 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
807 unsigned int digsize
= crypto_ahash_digestsize(ahash
);
808 unsigned int blocksize
;
810 blocksize
= crypto_ahash_blocksize(ahash
);
813 memcpy(hash
, creq
->state
, digsize
);
814 memset(cache
, 0, blocksize
);
816 memcpy(cache
, creq
->cache
, creq
->cache_ptr
);
821 static int mv_cesa_ahash_import(struct ahash_request
*req
, const void *hash
,
822 u64 len
, const void *cache
)
824 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
825 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
826 unsigned int digsize
= crypto_ahash_digestsize(ahash
);
827 unsigned int blocksize
;
828 unsigned int cache_ptr
;
831 ret
= crypto_ahash_init(req
);
835 blocksize
= crypto_ahash_blocksize(ahash
);
836 if (len
>= blocksize
)
837 mv_cesa_update_op_cfg(&creq
->op_tmpl
,
838 CESA_SA_DESC_CFG_MID_FRAG
,
839 CESA_SA_DESC_CFG_FRAG_MSK
);
842 memcpy(creq
->state
, hash
, digsize
);
845 cache_ptr
= do_div(len
, blocksize
);
849 ret
= mv_cesa_ahash_alloc_cache(req
);
853 memcpy(creq
->cache
, cache
, cache_ptr
);
854 creq
->cache_ptr
= cache_ptr
;
859 static int mv_cesa_md5_init(struct ahash_request
*req
)
861 struct mv_cesa_op_ctx tmpl
= { };
863 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_MACM_MD5
);
865 mv_cesa_ahash_init(req
, &tmpl
, true);
870 static int mv_cesa_md5_export(struct ahash_request
*req
, void *out
)
872 struct md5_state
*out_state
= out
;
874 return mv_cesa_ahash_export(req
, out_state
->hash
,
875 &out_state
->byte_count
, out_state
->block
);
878 static int mv_cesa_md5_import(struct ahash_request
*req
, const void *in
)
880 const struct md5_state
*in_state
= in
;
882 return mv_cesa_ahash_import(req
, in_state
->hash
, in_state
->byte_count
,
886 static int mv_cesa_md5_digest(struct ahash_request
*req
)
890 ret
= mv_cesa_md5_init(req
);
894 return mv_cesa_ahash_finup(req
);
897 struct ahash_alg mv_md5_alg
= {
898 .init
= mv_cesa_md5_init
,
899 .update
= mv_cesa_ahash_update
,
900 .final
= mv_cesa_ahash_final
,
901 .finup
= mv_cesa_ahash_finup
,
902 .digest
= mv_cesa_md5_digest
,
903 .export
= mv_cesa_md5_export
,
904 .import
= mv_cesa_md5_import
,
906 .digestsize
= MD5_DIGEST_SIZE
,
907 .statesize
= sizeof(struct md5_state
),
910 .cra_driver_name
= "mv-md5",
912 .cra_flags
= CRYPTO_ALG_ASYNC
|
913 CRYPTO_ALG_KERN_DRIVER_ONLY
,
914 .cra_blocksize
= MD5_HMAC_BLOCK_SIZE
,
915 .cra_ctxsize
= sizeof(struct mv_cesa_hash_ctx
),
916 .cra_init
= mv_cesa_ahash_cra_init
,
917 .cra_module
= THIS_MODULE
,
922 static int mv_cesa_sha1_init(struct ahash_request
*req
)
924 struct mv_cesa_op_ctx tmpl
= { };
926 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_MACM_SHA1
);
928 mv_cesa_ahash_init(req
, &tmpl
, false);
933 static int mv_cesa_sha1_export(struct ahash_request
*req
, void *out
)
935 struct sha1_state
*out_state
= out
;
937 return mv_cesa_ahash_export(req
, out_state
->state
, &out_state
->count
,
941 static int mv_cesa_sha1_import(struct ahash_request
*req
, const void *in
)
943 const struct sha1_state
*in_state
= in
;
945 return mv_cesa_ahash_import(req
, in_state
->state
, in_state
->count
,
949 static int mv_cesa_sha1_digest(struct ahash_request
*req
)
953 ret
= mv_cesa_sha1_init(req
);
957 return mv_cesa_ahash_finup(req
);
960 struct ahash_alg mv_sha1_alg
= {
961 .init
= mv_cesa_sha1_init
,
962 .update
= mv_cesa_ahash_update
,
963 .final
= mv_cesa_ahash_final
,
964 .finup
= mv_cesa_ahash_finup
,
965 .digest
= mv_cesa_sha1_digest
,
966 .export
= mv_cesa_sha1_export
,
967 .import
= mv_cesa_sha1_import
,
969 .digestsize
= SHA1_DIGEST_SIZE
,
970 .statesize
= sizeof(struct sha1_state
),
973 .cra_driver_name
= "mv-sha1",
975 .cra_flags
= CRYPTO_ALG_ASYNC
|
976 CRYPTO_ALG_KERN_DRIVER_ONLY
,
977 .cra_blocksize
= SHA1_BLOCK_SIZE
,
978 .cra_ctxsize
= sizeof(struct mv_cesa_hash_ctx
),
979 .cra_init
= mv_cesa_ahash_cra_init
,
980 .cra_module
= THIS_MODULE
,
985 static int mv_cesa_sha256_init(struct ahash_request
*req
)
987 struct mv_cesa_op_ctx tmpl
= { };
989 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_MACM_SHA256
);
991 mv_cesa_ahash_init(req
, &tmpl
, false);
996 static int mv_cesa_sha256_digest(struct ahash_request
*req
)
1000 ret
= mv_cesa_sha256_init(req
);
1004 return mv_cesa_ahash_finup(req
);
1007 static int mv_cesa_sha256_export(struct ahash_request
*req
, void *out
)
1009 struct sha256_state
*out_state
= out
;
1011 return mv_cesa_ahash_export(req
, out_state
->state
, &out_state
->count
,
1015 static int mv_cesa_sha256_import(struct ahash_request
*req
, const void *in
)
1017 const struct sha256_state
*in_state
= in
;
1019 return mv_cesa_ahash_import(req
, in_state
->state
, in_state
->count
,
1023 struct ahash_alg mv_sha256_alg
= {
1024 .init
= mv_cesa_sha256_init
,
1025 .update
= mv_cesa_ahash_update
,
1026 .final
= mv_cesa_ahash_final
,
1027 .finup
= mv_cesa_ahash_finup
,
1028 .digest
= mv_cesa_sha256_digest
,
1029 .export
= mv_cesa_sha256_export
,
1030 .import
= mv_cesa_sha256_import
,
1032 .digestsize
= SHA256_DIGEST_SIZE
,
1033 .statesize
= sizeof(struct sha256_state
),
1035 .cra_name
= "sha256",
1036 .cra_driver_name
= "mv-sha256",
1037 .cra_priority
= 300,
1038 .cra_flags
= CRYPTO_ALG_ASYNC
|
1039 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1040 .cra_blocksize
= SHA256_BLOCK_SIZE
,
1041 .cra_ctxsize
= sizeof(struct mv_cesa_hash_ctx
),
1042 .cra_init
= mv_cesa_ahash_cra_init
,
1043 .cra_module
= THIS_MODULE
,
1048 struct mv_cesa_ahash_result
{
1049 struct completion completion
;
1053 static void mv_cesa_hmac_ahash_complete(struct crypto_async_request
*req
,
1056 struct mv_cesa_ahash_result
*result
= req
->data
;
1058 if (error
== -EINPROGRESS
)
1061 result
->error
= error
;
1062 complete(&result
->completion
);
1065 static int mv_cesa_ahmac_iv_state_init(struct ahash_request
*req
, u8
*pad
,
1066 void *state
, unsigned int blocksize
)
1068 struct mv_cesa_ahash_result result
;
1069 struct scatterlist sg
;
1072 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
1073 mv_cesa_hmac_ahash_complete
, &result
);
1074 sg_init_one(&sg
, pad
, blocksize
);
1075 ahash_request_set_crypt(req
, &sg
, pad
, blocksize
);
1076 init_completion(&result
.completion
);
1078 ret
= crypto_ahash_init(req
);
1082 ret
= crypto_ahash_update(req
);
1083 if (ret
&& ret
!= -EINPROGRESS
)
1086 wait_for_completion_interruptible(&result
.completion
);
1088 return result
.error
;
1090 ret
= crypto_ahash_export(req
, state
);
1097 static int mv_cesa_ahmac_pad_init(struct ahash_request
*req
,
1098 const u8
*key
, unsigned int keylen
,
1100 unsigned int blocksize
)
1102 struct mv_cesa_ahash_result result
;
1103 struct scatterlist sg
;
1107 if (keylen
<= blocksize
) {
1108 memcpy(ipad
, key
, keylen
);
1110 u8
*keydup
= kmemdup(key
, keylen
, GFP_KERNEL
);
1115 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
1116 mv_cesa_hmac_ahash_complete
,
1118 sg_init_one(&sg
, keydup
, keylen
);
1119 ahash_request_set_crypt(req
, &sg
, ipad
, keylen
);
1120 init_completion(&result
.completion
);
1122 ret
= crypto_ahash_digest(req
);
1123 if (ret
== -EINPROGRESS
) {
1124 wait_for_completion_interruptible(&result
.completion
);
1128 /* Set the memory region to 0 to avoid any leak. */
1129 memset(keydup
, 0, keylen
);
1135 keylen
= crypto_ahash_digestsize(crypto_ahash_reqtfm(req
));
1138 memset(ipad
+ keylen
, 0, blocksize
- keylen
);
1139 memcpy(opad
, ipad
, blocksize
);
1141 for (i
= 0; i
< blocksize
; i
++) {
1149 static int mv_cesa_ahmac_setkey(const char *hash_alg_name
,
1150 const u8
*key
, unsigned int keylen
,
1151 void *istate
, void *ostate
)
1153 struct ahash_request
*req
;
1154 struct crypto_ahash
*tfm
;
1155 unsigned int blocksize
;
1160 tfm
= crypto_alloc_ahash(hash_alg_name
, CRYPTO_ALG_TYPE_AHASH
,
1161 CRYPTO_ALG_TYPE_AHASH_MASK
);
1163 return PTR_ERR(tfm
);
1165 req
= ahash_request_alloc(tfm
, GFP_KERNEL
);
1171 crypto_ahash_clear_flags(tfm
, ~0);
1173 blocksize
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
1175 ipad
= kzalloc(2 * blocksize
, GFP_KERNEL
);
1181 opad
= ipad
+ blocksize
;
1183 ret
= mv_cesa_ahmac_pad_init(req
, key
, keylen
, ipad
, opad
, blocksize
);
1187 ret
= mv_cesa_ahmac_iv_state_init(req
, ipad
, istate
, blocksize
);
1191 ret
= mv_cesa_ahmac_iv_state_init(req
, opad
, ostate
, blocksize
);
1196 ahash_request_free(req
);
1198 crypto_free_ahash(tfm
);
1203 static int mv_cesa_ahmac_cra_init(struct crypto_tfm
*tfm
)
1205 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1207 ctx
->base
.ops
= &mv_cesa_ahash_req_ops
;
1209 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
1210 sizeof(struct mv_cesa_ahash_req
));
1214 static int mv_cesa_ahmac_md5_init(struct ahash_request
*req
)
1216 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
1217 struct mv_cesa_op_ctx tmpl
= { };
1219 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_MACM_HMAC_MD5
);
1220 memcpy(tmpl
.ctx
.hash
.iv
, ctx
->iv
, sizeof(ctx
->iv
));
1222 mv_cesa_ahash_init(req
, &tmpl
, true);
1227 static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
1228 unsigned int keylen
)
1230 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
1231 struct md5_state istate
, ostate
;
1234 ret
= mv_cesa_ahmac_setkey("mv-md5", key
, keylen
, &istate
, &ostate
);
1238 for (i
= 0; i
< ARRAY_SIZE(istate
.hash
); i
++)
1239 ctx
->iv
[i
] = be32_to_cpu(istate
.hash
[i
]);
1241 for (i
= 0; i
< ARRAY_SIZE(ostate
.hash
); i
++)
1242 ctx
->iv
[i
+ 8] = be32_to_cpu(ostate
.hash
[i
]);
1247 static int mv_cesa_ahmac_md5_digest(struct ahash_request
*req
)
1251 ret
= mv_cesa_ahmac_md5_init(req
);
1255 return mv_cesa_ahash_finup(req
);
1258 struct ahash_alg mv_ahmac_md5_alg
= {
1259 .init
= mv_cesa_ahmac_md5_init
,
1260 .update
= mv_cesa_ahash_update
,
1261 .final
= mv_cesa_ahash_final
,
1262 .finup
= mv_cesa_ahash_finup
,
1263 .digest
= mv_cesa_ahmac_md5_digest
,
1264 .setkey
= mv_cesa_ahmac_md5_setkey
,
1265 .export
= mv_cesa_md5_export
,
1266 .import
= mv_cesa_md5_import
,
1268 .digestsize
= MD5_DIGEST_SIZE
,
1269 .statesize
= sizeof(struct md5_state
),
1271 .cra_name
= "hmac(md5)",
1272 .cra_driver_name
= "mv-hmac-md5",
1273 .cra_priority
= 300,
1274 .cra_flags
= CRYPTO_ALG_ASYNC
|
1275 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1276 .cra_blocksize
= MD5_HMAC_BLOCK_SIZE
,
1277 .cra_ctxsize
= sizeof(struct mv_cesa_hmac_ctx
),
1278 .cra_init
= mv_cesa_ahmac_cra_init
,
1279 .cra_module
= THIS_MODULE
,
1284 static int mv_cesa_ahmac_sha1_init(struct ahash_request
*req
)
1286 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
1287 struct mv_cesa_op_ctx tmpl
= { };
1289 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_MACM_HMAC_SHA1
);
1290 memcpy(tmpl
.ctx
.hash
.iv
, ctx
->iv
, sizeof(ctx
->iv
));
1292 mv_cesa_ahash_init(req
, &tmpl
, false);
1297 static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
1298 unsigned int keylen
)
1300 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
1301 struct sha1_state istate
, ostate
;
1304 ret
= mv_cesa_ahmac_setkey("mv-sha1", key
, keylen
, &istate
, &ostate
);
1308 for (i
= 0; i
< ARRAY_SIZE(istate
.state
); i
++)
1309 ctx
->iv
[i
] = be32_to_cpu(istate
.state
[i
]);
1311 for (i
= 0; i
< ARRAY_SIZE(ostate
.state
); i
++)
1312 ctx
->iv
[i
+ 8] = be32_to_cpu(ostate
.state
[i
]);
1317 static int mv_cesa_ahmac_sha1_digest(struct ahash_request
*req
)
1321 ret
= mv_cesa_ahmac_sha1_init(req
);
1325 return mv_cesa_ahash_finup(req
);
1328 struct ahash_alg mv_ahmac_sha1_alg
= {
1329 .init
= mv_cesa_ahmac_sha1_init
,
1330 .update
= mv_cesa_ahash_update
,
1331 .final
= mv_cesa_ahash_final
,
1332 .finup
= mv_cesa_ahash_finup
,
1333 .digest
= mv_cesa_ahmac_sha1_digest
,
1334 .setkey
= mv_cesa_ahmac_sha1_setkey
,
1335 .export
= mv_cesa_sha1_export
,
1336 .import
= mv_cesa_sha1_import
,
1338 .digestsize
= SHA1_DIGEST_SIZE
,
1339 .statesize
= sizeof(struct sha1_state
),
1341 .cra_name
= "hmac(sha1)",
1342 .cra_driver_name
= "mv-hmac-sha1",
1343 .cra_priority
= 300,
1344 .cra_flags
= CRYPTO_ALG_ASYNC
|
1345 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1346 .cra_blocksize
= SHA1_BLOCK_SIZE
,
1347 .cra_ctxsize
= sizeof(struct mv_cesa_hmac_ctx
),
1348 .cra_init
= mv_cesa_ahmac_cra_init
,
1349 .cra_module
= THIS_MODULE
,
1354 static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
1355 unsigned int keylen
)
1357 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
1358 struct sha256_state istate
, ostate
;
1361 ret
= mv_cesa_ahmac_setkey("mv-sha256", key
, keylen
, &istate
, &ostate
);
1365 for (i
= 0; i
< ARRAY_SIZE(istate
.state
); i
++)
1366 ctx
->iv
[i
] = be32_to_cpu(istate
.state
[i
]);
1368 for (i
= 0; i
< ARRAY_SIZE(ostate
.state
); i
++)
1369 ctx
->iv
[i
+ 8] = be32_to_cpu(ostate
.state
[i
]);
1374 static int mv_cesa_ahmac_sha256_init(struct ahash_request
*req
)
1376 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
1377 struct mv_cesa_op_ctx tmpl
= { };
1379 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_MACM_HMAC_SHA256
);
1380 memcpy(tmpl
.ctx
.hash
.iv
, ctx
->iv
, sizeof(ctx
->iv
));
1382 mv_cesa_ahash_init(req
, &tmpl
, false);
1387 static int mv_cesa_ahmac_sha256_digest(struct ahash_request
*req
)
1391 ret
= mv_cesa_ahmac_sha256_init(req
);
1395 return mv_cesa_ahash_finup(req
);
1398 struct ahash_alg mv_ahmac_sha256_alg
= {
1399 .init
= mv_cesa_ahmac_sha256_init
,
1400 .update
= mv_cesa_ahash_update
,
1401 .final
= mv_cesa_ahash_final
,
1402 .finup
= mv_cesa_ahash_finup
,
1403 .digest
= mv_cesa_ahmac_sha256_digest
,
1404 .setkey
= mv_cesa_ahmac_sha256_setkey
,
1405 .export
= mv_cesa_sha256_export
,
1406 .import
= mv_cesa_sha256_import
,
1408 .digestsize
= SHA256_DIGEST_SIZE
,
1409 .statesize
= sizeof(struct sha256_state
),
1411 .cra_name
= "hmac(sha256)",
1412 .cra_driver_name
= "mv-hmac-sha256",
1413 .cra_priority
= 300,
1414 .cra_flags
= CRYPTO_ALG_ASYNC
|
1415 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1416 .cra_blocksize
= SHA256_BLOCK_SIZE
,
1417 .cra_ctxsize
= sizeof(struct mv_cesa_hmac_ctx
),
1418 .cra_init
= mv_cesa_ahmac_cra_init
,
1419 .cra_module
= THIS_MODULE
,