2 * Copyright (C) 2017 Marvell
4 * Antoine Tenart <antoine.tenart@free-electrons.com>
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
11 #include <crypto/hmac.h>
12 #include <crypto/sha.h>
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmapool.h>
20 struct safexcel_ahash_ctx
{
21 struct safexcel_context base
;
22 struct safexcel_crypto_priv
*priv
;
27 u32 ipad
[SHA1_DIGEST_SIZE
/ sizeof(u32
)];
28 u32 opad
[SHA1_DIGEST_SIZE
/ sizeof(u32
)];
31 struct safexcel_ahash_req
{
36 u8 state_sz
; /* expected sate size, only set once */
37 u32 state
[SHA256_DIGEST_SIZE
/ sizeof(u32
)];
42 u8 cache
[SHA256_BLOCK_SIZE
] __aligned(sizeof(u32
));
43 u8 cache_next
[SHA256_BLOCK_SIZE
] __aligned(sizeof(u32
));
46 struct safexcel_ahash_export_state
{
50 u32 state
[SHA256_DIGEST_SIZE
/ sizeof(u32
)];
51 u8 cache
[SHA256_BLOCK_SIZE
];
54 static void safexcel_hash_token(struct safexcel_command_desc
*cdesc
,
55 u32 input_length
, u32 result_length
)
57 struct safexcel_token
*token
=
58 (struct safexcel_token
*)cdesc
->control_data
.token
;
60 token
[0].opcode
= EIP197_TOKEN_OPCODE_DIRECTION
;
61 token
[0].packet_length
= input_length
;
62 token
[0].stat
= EIP197_TOKEN_STAT_LAST_HASH
;
63 token
[0].instructions
= EIP197_TOKEN_INS_TYPE_HASH
;
65 token
[1].opcode
= EIP197_TOKEN_OPCODE_INSERT
;
66 token
[1].packet_length
= result_length
;
67 token
[1].stat
= EIP197_TOKEN_STAT_LAST_HASH
|
68 EIP197_TOKEN_STAT_LAST_PACKET
;
69 token
[1].instructions
= EIP197_TOKEN_INS_TYPE_OUTPUT
|
70 EIP197_TOKEN_INS_INSERT_HASH_DIGEST
;
73 static void safexcel_context_control(struct safexcel_ahash_ctx
*ctx
,
74 struct safexcel_ahash_req
*req
,
75 struct safexcel_command_desc
*cdesc
,
76 unsigned int digestsize
,
77 unsigned int blocksize
)
81 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_TYPE_HASH_OUT
;
82 cdesc
->control_data
.control0
|= ctx
->alg
;
83 cdesc
->control_data
.control0
|= ctx
->digest
;
85 if (ctx
->digest
== CONTEXT_CONTROL_DIGEST_PRECOMPUTED
) {
87 if (ctx
->alg
== CONTEXT_CONTROL_CRYPTO_ALG_SHA1
)
88 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_SIZE(6);
89 else if (ctx
->alg
== CONTEXT_CONTROL_CRYPTO_ALG_SHA224
||
90 ctx
->alg
== CONTEXT_CONTROL_CRYPTO_ALG_SHA256
)
91 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_SIZE(9);
93 cdesc
->control_data
.control1
|= CONTEXT_CONTROL_DIGEST_CNT
;
95 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_RESTART_HASH
;
99 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_NO_FINISH_HASH
;
102 * Copy the input digest if needed, and setup the context
103 * fields. Do this now as we need it to setup the first command
106 if (req
->processed
) {
107 for (i
= 0; i
< digestsize
/ sizeof(u32
); i
++)
108 ctx
->base
.ctxr
->data
[i
] = cpu_to_le32(req
->state
[i
]);
111 ctx
->base
.ctxr
->data
[i
] = cpu_to_le32(req
->processed
/ blocksize
);
113 } else if (ctx
->digest
== CONTEXT_CONTROL_DIGEST_HMAC
) {
114 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_SIZE(10);
116 memcpy(ctx
->base
.ctxr
->data
, ctx
->ipad
, digestsize
);
117 memcpy(ctx
->base
.ctxr
->data
+ digestsize
/ sizeof(u32
),
118 ctx
->opad
, digestsize
);
122 static int safexcel_handle_result(struct safexcel_crypto_priv
*priv
, int ring
,
123 struct crypto_async_request
*async
,
124 bool *should_complete
, int *ret
)
126 struct safexcel_result_desc
*rdesc
;
127 struct ahash_request
*areq
= ahash_request_cast(async
);
128 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
129 struct safexcel_ahash_req
*sreq
= ahash_request_ctx(areq
);
130 int cache_len
, result_sz
= sreq
->state_sz
;
134 spin_lock_bh(&priv
->ring
[ring
].egress_lock
);
135 rdesc
= safexcel_ring_next_rptr(priv
, &priv
->ring
[ring
].rdr
);
138 "hash: result: could not retrieve the result descriptor\n");
139 *ret
= PTR_ERR(rdesc
);
140 } else if (rdesc
->result_data
.error_code
) {
142 "hash: result: result descriptor error (%d)\n",
143 rdesc
->result_data
.error_code
);
147 safexcel_complete(priv
, ring
);
148 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
151 result_sz
= crypto_ahash_digestsize(ahash
);
152 memcpy(sreq
->state
, areq
->result
, result_sz
);
154 dma_unmap_sg(priv
->dev
, areq
->src
,
155 sg_nents_for_len(areq
->src
, areq
->nbytes
), DMA_TO_DEVICE
);
157 safexcel_free_context(priv
, async
, sreq
->state_sz
);
159 cache_len
= sreq
->len
- sreq
->processed
;
161 memcpy(sreq
->cache
, sreq
->cache_next
, cache_len
);
163 *should_complete
= true;
168 static int safexcel_ahash_send(struct crypto_async_request
*async
, int ring
,
169 struct safexcel_request
*request
, int *commands
,
172 struct ahash_request
*areq
= ahash_request_cast(async
);
173 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
174 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
175 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
176 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
177 struct safexcel_command_desc
*cdesc
, *first_cdesc
= NULL
;
178 struct safexcel_result_desc
*rdesc
;
179 struct scatterlist
*sg
;
180 int i
, nents
, queued
, len
, cache_len
, extra
, n_cdesc
= 0, ret
= 0;
182 queued
= len
= req
->len
- req
->processed
;
183 if (queued
< crypto_ahash_blocksize(ahash
))
186 cache_len
= queued
- areq
->nbytes
;
189 * If this is not the last request and the queued data does not fit
190 * into full blocks, cache it for the next send() call.
192 extra
= queued
& (crypto_ahash_blocksize(ahash
) - 1);
193 if (!req
->last_req
&& extra
) {
194 sg_pcopy_to_buffer(areq
->src
, sg_nents(areq
->src
),
195 req
->cache_next
, extra
, areq
->nbytes
- extra
);
201 spin_lock_bh(&priv
->ring
[ring
].egress_lock
);
203 /* Add a command descriptor for the cached data, if any */
205 ctx
->base
.cache
= kzalloc(cache_len
, EIP197_GFP_FLAGS(*async
));
206 if (!ctx
->base
.cache
) {
210 memcpy(ctx
->base
.cache
, req
->cache
, cache_len
);
211 ctx
->base
.cache_dma
= dma_map_single(priv
->dev
, ctx
->base
.cache
,
212 cache_len
, DMA_TO_DEVICE
);
213 if (dma_mapping_error(priv
->dev
, ctx
->base
.cache_dma
)) {
218 ctx
->base
.cache_sz
= cache_len
;
219 first_cdesc
= safexcel_add_cdesc(priv
, ring
, 1,
224 if (IS_ERR(first_cdesc
)) {
225 ret
= PTR_ERR(first_cdesc
);
235 /* Now handle the current ahash request buffer(s) */
236 nents
= dma_map_sg(priv
->dev
, areq
->src
,
237 sg_nents_for_len(areq
->src
, areq
->nbytes
),
244 for_each_sg(areq
->src
, sg
, nents
, i
) {
245 int sglen
= sg_dma_len(sg
);
247 /* Do not overflow the request */
248 if (queued
- sglen
< 0)
251 cdesc
= safexcel_add_cdesc(priv
, ring
, !n_cdesc
,
252 !(queued
- sglen
), sg_dma_address(sg
),
253 sglen
, len
, ctx
->base
.ctxr_dma
);
255 ret
= PTR_ERR(cdesc
);
269 /* Setup the context options */
270 safexcel_context_control(ctx
, req
, first_cdesc
, req
->state_sz
,
271 crypto_ahash_blocksize(ahash
));
274 safexcel_hash_token(first_cdesc
, len
, req
->state_sz
);
276 ctx
->base
.result_dma
= dma_map_single(priv
->dev
, areq
->result
,
277 req
->state_sz
, DMA_FROM_DEVICE
);
278 if (dma_mapping_error(priv
->dev
, ctx
->base
.result_dma
)) {
283 /* Add a result descriptor */
284 rdesc
= safexcel_add_rdesc(priv
, ring
, 1, 1, ctx
->base
.result_dma
,
287 ret
= PTR_ERR(rdesc
);
291 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
293 req
->processed
+= len
;
294 request
->req
= &areq
->base
;
295 ctx
->base
.handle_result
= safexcel_handle_result
;
302 for (i
= 0; i
< n_cdesc
; i
++)
303 safexcel_ring_rollback_wptr(priv
, &priv
->ring
[ring
].cdr
);
305 if (ctx
->base
.cache_dma
) {
306 dma_unmap_single(priv
->dev
, ctx
->base
.cache_dma
,
307 ctx
->base
.cache_sz
, DMA_TO_DEVICE
);
308 ctx
->base
.cache_sz
= 0;
311 if (ctx
->base
.cache
) {
312 kfree(ctx
->base
.cache
);
313 ctx
->base
.cache
= NULL
;
317 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
321 static inline bool safexcel_ahash_needs_inv_get(struct ahash_request
*areq
)
323 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
324 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
325 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
326 unsigned int state_w_sz
= req
->state_sz
/ sizeof(u32
);
329 for (i
= 0; i
< state_w_sz
; i
++)
330 if (ctx
->base
.ctxr
->data
[i
] != cpu_to_le32(req
->state
[i
]))
333 if (ctx
->base
.ctxr
->data
[state_w_sz
] !=
334 cpu_to_le32(req
->processed
/ crypto_ahash_blocksize(ahash
)))
340 static int safexcel_handle_inv_result(struct safexcel_crypto_priv
*priv
,
342 struct crypto_async_request
*async
,
343 bool *should_complete
, int *ret
)
345 struct safexcel_result_desc
*rdesc
;
346 struct ahash_request
*areq
= ahash_request_cast(async
);
347 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
348 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
353 spin_lock_bh(&priv
->ring
[ring
].egress_lock
);
354 rdesc
= safexcel_ring_next_rptr(priv
, &priv
->ring
[ring
].rdr
);
357 "hash: invalidate: could not retrieve the result descriptor\n");
358 *ret
= PTR_ERR(rdesc
);
359 } else if (rdesc
->result_data
.error_code
) {
361 "hash: invalidate: result descriptor error (%d)\n",
362 rdesc
->result_data
.error_code
);
366 safexcel_complete(priv
, ring
);
367 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
369 if (ctx
->base
.exit_inv
) {
370 dma_pool_free(priv
->context_pool
, ctx
->base
.ctxr
,
373 *should_complete
= true;
377 ring
= safexcel_select_ring(priv
);
378 ctx
->base
.ring
= ring
;
379 ctx
->base
.needs_inv
= false;
380 ctx
->base
.send
= safexcel_ahash_send
;
382 spin_lock_bh(&priv
->ring
[ring
].queue_lock
);
383 enq_ret
= crypto_enqueue_request(&priv
->ring
[ring
].queue
, async
);
384 spin_unlock_bh(&priv
->ring
[ring
].queue_lock
);
386 if (enq_ret
!= -EINPROGRESS
)
389 if (!priv
->ring
[ring
].need_dequeue
)
390 safexcel_dequeue(priv
, ring
);
392 *should_complete
= false;
397 static int safexcel_ahash_send_inv(struct crypto_async_request
*async
,
398 int ring
, struct safexcel_request
*request
,
399 int *commands
, int *results
)
401 struct ahash_request
*areq
= ahash_request_cast(async
);
402 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
405 ctx
->base
.handle_result
= safexcel_handle_inv_result
;
406 ret
= safexcel_invalidate_cache(async
, &ctx
->base
, ctx
->priv
,
407 ctx
->base
.ctxr_dma
, ring
, request
);
417 static int safexcel_ahash_exit_inv(struct crypto_tfm
*tfm
)
419 struct safexcel_ahash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
420 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
421 struct ahash_request req
;
422 struct safexcel_inv_result result
= {};
423 int ring
= ctx
->base
.ring
;
425 memset(&req
, 0, sizeof(struct ahash_request
));
427 /* create invalidation request */
428 init_completion(&result
.completion
);
429 ahash_request_set_callback(&req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
430 safexcel_inv_complete
, &result
);
432 ahash_request_set_tfm(&req
, __crypto_ahash_cast(tfm
));
433 ctx
= crypto_tfm_ctx(req
.base
.tfm
);
434 ctx
->base
.exit_inv
= true;
435 ctx
->base
.send
= safexcel_ahash_send_inv
;
437 spin_lock_bh(&priv
->ring
[ring
].queue_lock
);
438 crypto_enqueue_request(&priv
->ring
[ring
].queue
, &req
.base
);
439 spin_unlock_bh(&priv
->ring
[ring
].queue_lock
);
441 if (!priv
->ring
[ring
].need_dequeue
)
442 safexcel_dequeue(priv
, ring
);
444 wait_for_completion_interruptible(&result
.completion
);
447 dev_warn(priv
->dev
, "hash: completion error (%d)\n",
455 static int safexcel_ahash_cache(struct ahash_request
*areq
)
457 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
458 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
459 int queued
, cache_len
;
461 cache_len
= req
->len
- areq
->nbytes
- req
->processed
;
462 queued
= req
->len
- req
->processed
;
465 * In case there isn't enough bytes to proceed (less than a
466 * block size), cache the data until we have enough.
468 if (cache_len
+ areq
->nbytes
<= crypto_ahash_blocksize(ahash
)) {
469 sg_pcopy_to_buffer(areq
->src
, sg_nents(areq
->src
),
470 req
->cache
+ cache_len
,
475 /* We could'nt cache all the data */
479 static int safexcel_ahash_enqueue(struct ahash_request
*areq
)
481 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
482 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
483 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
486 ctx
->base
.send
= safexcel_ahash_send
;
488 if (req
->processed
&& ctx
->digest
== CONTEXT_CONTROL_DIGEST_PRECOMPUTED
)
489 ctx
->base
.needs_inv
= safexcel_ahash_needs_inv_get(areq
);
491 if (ctx
->base
.ctxr
) {
492 if (ctx
->base
.needs_inv
)
493 ctx
->base
.send
= safexcel_ahash_send_inv
;
495 ctx
->base
.ring
= safexcel_select_ring(priv
);
496 ctx
->base
.ctxr
= dma_pool_zalloc(priv
->context_pool
,
497 EIP197_GFP_FLAGS(areq
->base
),
498 &ctx
->base
.ctxr_dma
);
503 ring
= ctx
->base
.ring
;
505 spin_lock_bh(&priv
->ring
[ring
].queue_lock
);
506 ret
= crypto_enqueue_request(&priv
->ring
[ring
].queue
, &areq
->base
);
507 spin_unlock_bh(&priv
->ring
[ring
].queue_lock
);
509 if (!priv
->ring
[ring
].need_dequeue
)
510 safexcel_dequeue(priv
, ring
);
515 static int safexcel_ahash_update(struct ahash_request
*areq
)
517 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
518 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
519 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
521 /* If the request is 0 length, do nothing */
525 req
->len
+= areq
->nbytes
;
527 safexcel_ahash_cache(areq
);
530 * We're not doing partial updates when performing an hmac request.
531 * Everything will be handled by the final() call.
533 if (ctx
->digest
== CONTEXT_CONTROL_DIGEST_HMAC
)
537 return safexcel_ahash_enqueue(areq
);
539 if (!req
->last_req
&&
540 req
->len
- req
->processed
> crypto_ahash_blocksize(ahash
))
541 return safexcel_ahash_enqueue(areq
);
546 static int safexcel_ahash_final(struct ahash_request
*areq
)
548 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
549 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
551 req
->last_req
= true;
554 /* If we have an overall 0 length request */
555 if (!(req
->len
+ areq
->nbytes
)) {
556 if (ctx
->alg
== CONTEXT_CONTROL_CRYPTO_ALG_SHA1
)
557 memcpy(areq
->result
, sha1_zero_message_hash
,
559 else if (ctx
->alg
== CONTEXT_CONTROL_CRYPTO_ALG_SHA224
)
560 memcpy(areq
->result
, sha224_zero_message_hash
,
562 else if (ctx
->alg
== CONTEXT_CONTROL_CRYPTO_ALG_SHA256
)
563 memcpy(areq
->result
, sha256_zero_message_hash
,
569 return safexcel_ahash_enqueue(areq
);
572 static int safexcel_ahash_finup(struct ahash_request
*areq
)
574 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
576 req
->last_req
= true;
579 safexcel_ahash_update(areq
);
580 return safexcel_ahash_final(areq
);
583 static int safexcel_ahash_export(struct ahash_request
*areq
, void *out
)
585 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
586 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
587 struct safexcel_ahash_export_state
*export
= out
;
589 export
->len
= req
->len
;
590 export
->processed
= req
->processed
;
592 memcpy(export
->state
, req
->state
, req
->state_sz
);
593 memset(export
->cache
, 0, crypto_ahash_blocksize(ahash
));
594 memcpy(export
->cache
, req
->cache
, crypto_ahash_blocksize(ahash
));
599 static int safexcel_ahash_import(struct ahash_request
*areq
, const void *in
)
601 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
602 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
603 const struct safexcel_ahash_export_state
*export
= in
;
606 ret
= crypto_ahash_init(areq
);
610 req
->len
= export
->len
;
611 req
->processed
= export
->processed
;
613 memcpy(req
->cache
, export
->cache
, crypto_ahash_blocksize(ahash
));
614 memcpy(req
->state
, export
->state
, req
->state_sz
);
619 static int safexcel_ahash_cra_init(struct crypto_tfm
*tfm
)
621 struct safexcel_ahash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
622 struct safexcel_alg_template
*tmpl
=
623 container_of(__crypto_ahash_alg(tfm
->__crt_alg
),
624 struct safexcel_alg_template
, alg
.ahash
);
626 ctx
->priv
= tmpl
->priv
;
628 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
629 sizeof(struct safexcel_ahash_req
));
633 static int safexcel_sha1_init(struct ahash_request
*areq
)
635 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
636 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
638 memset(req
, 0, sizeof(*req
));
640 req
->state
[0] = SHA1_H0
;
641 req
->state
[1] = SHA1_H1
;
642 req
->state
[2] = SHA1_H2
;
643 req
->state
[3] = SHA1_H3
;
644 req
->state
[4] = SHA1_H4
;
646 ctx
->alg
= CONTEXT_CONTROL_CRYPTO_ALG_SHA1
;
647 ctx
->digest
= CONTEXT_CONTROL_DIGEST_PRECOMPUTED
;
648 req
->state_sz
= SHA1_DIGEST_SIZE
;
653 static int safexcel_sha1_digest(struct ahash_request
*areq
)
655 int ret
= safexcel_sha1_init(areq
);
660 return safexcel_ahash_finup(areq
);
663 static void safexcel_ahash_cra_exit(struct crypto_tfm
*tfm
)
665 struct safexcel_ahash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
666 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
669 /* context not allocated, skip invalidation */
673 ret
= safexcel_ahash_exit_inv(tfm
);
675 dev_warn(priv
->dev
, "hash: invalidation error %d\n", ret
);
678 struct safexcel_alg_template safexcel_alg_sha1
= {
679 .type
= SAFEXCEL_ALG_TYPE_AHASH
,
681 .init
= safexcel_sha1_init
,
682 .update
= safexcel_ahash_update
,
683 .final
= safexcel_ahash_final
,
684 .finup
= safexcel_ahash_finup
,
685 .digest
= safexcel_sha1_digest
,
686 .export
= safexcel_ahash_export
,
687 .import
= safexcel_ahash_import
,
689 .digestsize
= SHA1_DIGEST_SIZE
,
690 .statesize
= sizeof(struct safexcel_ahash_export_state
),
693 .cra_driver_name
= "safexcel-sha1",
695 .cra_flags
= CRYPTO_ALG_ASYNC
|
696 CRYPTO_ALG_KERN_DRIVER_ONLY
,
697 .cra_blocksize
= SHA1_BLOCK_SIZE
,
698 .cra_ctxsize
= sizeof(struct safexcel_ahash_ctx
),
699 .cra_init
= safexcel_ahash_cra_init
,
700 .cra_exit
= safexcel_ahash_cra_exit
,
701 .cra_module
= THIS_MODULE
,
707 static int safexcel_hmac_sha1_init(struct ahash_request
*areq
)
709 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
711 safexcel_sha1_init(areq
);
712 ctx
->digest
= CONTEXT_CONTROL_DIGEST_HMAC
;
716 static int safexcel_hmac_sha1_digest(struct ahash_request
*areq
)
718 int ret
= safexcel_hmac_sha1_init(areq
);
723 return safexcel_ahash_finup(areq
);
726 struct safexcel_ahash_result
{
727 struct completion completion
;
731 static void safexcel_ahash_complete(struct crypto_async_request
*req
, int error
)
733 struct safexcel_ahash_result
*result
= req
->data
;
735 if (error
== -EINPROGRESS
)
738 result
->error
= error
;
739 complete(&result
->completion
);
742 static int safexcel_hmac_init_pad(struct ahash_request
*areq
,
743 unsigned int blocksize
, const u8
*key
,
744 unsigned int keylen
, u8
*ipad
, u8
*opad
)
746 struct safexcel_ahash_result result
;
747 struct scatterlist sg
;
751 if (keylen
<= blocksize
) {
752 memcpy(ipad
, key
, keylen
);
754 keydup
= kmemdup(key
, keylen
, GFP_KERNEL
);
758 ahash_request_set_callback(areq
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
759 safexcel_ahash_complete
, &result
);
760 sg_init_one(&sg
, keydup
, keylen
);
761 ahash_request_set_crypt(areq
, &sg
, ipad
, keylen
);
762 init_completion(&result
.completion
);
764 ret
= crypto_ahash_digest(areq
);
765 if (ret
== -EINPROGRESS
) {
766 wait_for_completion_interruptible(&result
.completion
);
771 memzero_explicit(keydup
, keylen
);
777 keylen
= crypto_ahash_digestsize(crypto_ahash_reqtfm(areq
));
780 memset(ipad
+ keylen
, 0, blocksize
- keylen
);
781 memcpy(opad
, ipad
, blocksize
);
783 for (i
= 0; i
< blocksize
; i
++) {
784 ipad
[i
] ^= HMAC_IPAD_VALUE
;
785 opad
[i
] ^= HMAC_OPAD_VALUE
;
791 static int safexcel_hmac_init_iv(struct ahash_request
*areq
,
792 unsigned int blocksize
, u8
*pad
, void *state
)
794 struct safexcel_ahash_result result
;
795 struct safexcel_ahash_req
*req
;
796 struct scatterlist sg
;
799 ahash_request_set_callback(areq
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
800 safexcel_ahash_complete
, &result
);
801 sg_init_one(&sg
, pad
, blocksize
);
802 ahash_request_set_crypt(areq
, &sg
, pad
, blocksize
);
803 init_completion(&result
.completion
);
805 ret
= crypto_ahash_init(areq
);
809 req
= ahash_request_ctx(areq
);
811 req
->last_req
= true;
813 ret
= crypto_ahash_update(areq
);
814 if (ret
&& ret
!= -EINPROGRESS
)
817 wait_for_completion_interruptible(&result
.completion
);
821 return crypto_ahash_export(areq
, state
);
824 static int safexcel_hmac_setkey(const char *alg
, const u8
*key
,
825 unsigned int keylen
, void *istate
, void *ostate
)
827 struct ahash_request
*areq
;
828 struct crypto_ahash
*tfm
;
829 unsigned int blocksize
;
833 tfm
= crypto_alloc_ahash(alg
, CRYPTO_ALG_TYPE_AHASH
,
834 CRYPTO_ALG_TYPE_AHASH_MASK
);
838 areq
= ahash_request_alloc(tfm
, GFP_KERNEL
);
844 crypto_ahash_clear_flags(tfm
, ~0);
845 blocksize
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
847 ipad
= kzalloc(2 * blocksize
, GFP_KERNEL
);
853 opad
= ipad
+ blocksize
;
855 ret
= safexcel_hmac_init_pad(areq
, blocksize
, key
, keylen
, ipad
, opad
);
859 ret
= safexcel_hmac_init_iv(areq
, blocksize
, ipad
, istate
);
863 ret
= safexcel_hmac_init_iv(areq
, blocksize
, opad
, ostate
);
868 ahash_request_free(areq
);
870 crypto_free_ahash(tfm
);
875 static int safexcel_hmac_sha1_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
878 struct safexcel_ahash_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
879 struct safexcel_ahash_export_state istate
, ostate
;
882 ret
= safexcel_hmac_setkey("safexcel-sha1", key
, keylen
, &istate
, &ostate
);
886 for (i
= 0; i
< SHA1_DIGEST_SIZE
/ sizeof(u32
); i
++) {
887 if (ctx
->ipad
[i
] != le32_to_cpu(istate
.state
[i
]) ||
888 ctx
->opad
[i
] != le32_to_cpu(ostate
.state
[i
])) {
889 ctx
->base
.needs_inv
= true;
894 memcpy(ctx
->ipad
, &istate
.state
, SHA1_DIGEST_SIZE
);
895 memcpy(ctx
->opad
, &ostate
.state
, SHA1_DIGEST_SIZE
);
900 struct safexcel_alg_template safexcel_alg_hmac_sha1
= {
901 .type
= SAFEXCEL_ALG_TYPE_AHASH
,
903 .init
= safexcel_hmac_sha1_init
,
904 .update
= safexcel_ahash_update
,
905 .final
= safexcel_ahash_final
,
906 .finup
= safexcel_ahash_finup
,
907 .digest
= safexcel_hmac_sha1_digest
,
908 .setkey
= safexcel_hmac_sha1_setkey
,
909 .export
= safexcel_ahash_export
,
910 .import
= safexcel_ahash_import
,
912 .digestsize
= SHA1_DIGEST_SIZE
,
913 .statesize
= sizeof(struct safexcel_ahash_export_state
),
915 .cra_name
= "hmac(sha1)",
916 .cra_driver_name
= "safexcel-hmac-sha1",
918 .cra_flags
= CRYPTO_ALG_ASYNC
|
919 CRYPTO_ALG_KERN_DRIVER_ONLY
,
920 .cra_blocksize
= SHA1_BLOCK_SIZE
,
921 .cra_ctxsize
= sizeof(struct safexcel_ahash_ctx
),
922 .cra_init
= safexcel_ahash_cra_init
,
923 .cra_exit
= safexcel_ahash_cra_exit
,
924 .cra_module
= THIS_MODULE
,
930 static int safexcel_sha256_init(struct ahash_request
*areq
)
932 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
933 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
935 memset(req
, 0, sizeof(*req
));
937 req
->state
[0] = SHA256_H0
;
938 req
->state
[1] = SHA256_H1
;
939 req
->state
[2] = SHA256_H2
;
940 req
->state
[3] = SHA256_H3
;
941 req
->state
[4] = SHA256_H4
;
942 req
->state
[5] = SHA256_H5
;
943 req
->state
[6] = SHA256_H6
;
944 req
->state
[7] = SHA256_H7
;
946 ctx
->alg
= CONTEXT_CONTROL_CRYPTO_ALG_SHA256
;
947 ctx
->digest
= CONTEXT_CONTROL_DIGEST_PRECOMPUTED
;
948 req
->state_sz
= SHA256_DIGEST_SIZE
;
953 static int safexcel_sha256_digest(struct ahash_request
*areq
)
955 int ret
= safexcel_sha256_init(areq
);
960 return safexcel_ahash_finup(areq
);
963 struct safexcel_alg_template safexcel_alg_sha256
= {
964 .type
= SAFEXCEL_ALG_TYPE_AHASH
,
966 .init
= safexcel_sha256_init
,
967 .update
= safexcel_ahash_update
,
968 .final
= safexcel_ahash_final
,
969 .finup
= safexcel_ahash_finup
,
970 .digest
= safexcel_sha256_digest
,
971 .export
= safexcel_ahash_export
,
972 .import
= safexcel_ahash_import
,
974 .digestsize
= SHA256_DIGEST_SIZE
,
975 .statesize
= sizeof(struct safexcel_ahash_export_state
),
977 .cra_name
= "sha256",
978 .cra_driver_name
= "safexcel-sha256",
980 .cra_flags
= CRYPTO_ALG_ASYNC
|
981 CRYPTO_ALG_KERN_DRIVER_ONLY
,
982 .cra_blocksize
= SHA256_BLOCK_SIZE
,
983 .cra_ctxsize
= sizeof(struct safexcel_ahash_ctx
),
984 .cra_init
= safexcel_ahash_cra_init
,
985 .cra_exit
= safexcel_ahash_cra_exit
,
986 .cra_module
= THIS_MODULE
,
992 static int safexcel_sha224_init(struct ahash_request
*areq
)
994 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
995 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
997 memset(req
, 0, sizeof(*req
));
999 req
->state
[0] = SHA224_H0
;
1000 req
->state
[1] = SHA224_H1
;
1001 req
->state
[2] = SHA224_H2
;
1002 req
->state
[3] = SHA224_H3
;
1003 req
->state
[4] = SHA224_H4
;
1004 req
->state
[5] = SHA224_H5
;
1005 req
->state
[6] = SHA224_H6
;
1006 req
->state
[7] = SHA224_H7
;
1008 ctx
->alg
= CONTEXT_CONTROL_CRYPTO_ALG_SHA224
;
1009 ctx
->digest
= CONTEXT_CONTROL_DIGEST_PRECOMPUTED
;
1010 req
->state_sz
= SHA256_DIGEST_SIZE
;
1015 static int safexcel_sha224_digest(struct ahash_request
*areq
)
1017 int ret
= safexcel_sha224_init(areq
);
1022 return safexcel_ahash_finup(areq
);
1025 struct safexcel_alg_template safexcel_alg_sha224
= {
1026 .type
= SAFEXCEL_ALG_TYPE_AHASH
,
1028 .init
= safexcel_sha224_init
,
1029 .update
= safexcel_ahash_update
,
1030 .final
= safexcel_ahash_final
,
1031 .finup
= safexcel_ahash_finup
,
1032 .digest
= safexcel_sha224_digest
,
1033 .export
= safexcel_ahash_export
,
1034 .import
= safexcel_ahash_import
,
1036 .digestsize
= SHA224_DIGEST_SIZE
,
1037 .statesize
= sizeof(struct safexcel_ahash_export_state
),
1039 .cra_name
= "sha224",
1040 .cra_driver_name
= "safexcel-sha224",
1041 .cra_priority
= 300,
1042 .cra_flags
= CRYPTO_ALG_ASYNC
|
1043 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1044 .cra_blocksize
= SHA224_BLOCK_SIZE
,
1045 .cra_ctxsize
= sizeof(struct safexcel_ahash_ctx
),
1046 .cra_init
= safexcel_ahash_cra_init
,
1047 .cra_exit
= safexcel_ahash_cra_exit
,
1048 .cra_module
= THIS_MODULE
,