2 * Support for Marvell's crypto engine which can be found on some Orion5X
5 * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
9 #include <crypto/aes.h>
10 #include <crypto/algapi.h>
11 #include <linux/crypto.h>
12 #include <linux/interrupt.h>
14 #include <linux/kthread.h>
15 #include <linux/platform_device.h>
16 #include <linux/scatterlist.h>
21 * /---------------------------------------\
22 * | | request complete
24 * IDLE -> new request -> BUSY -> done -> DEQUEUE
26 * | | more scatter entries
36 * struct req_progress - used for every crypt request
37 * @src_sg_it: sg iterator for src
38 * @dst_sg_it: sg iterator for dst
39 * @sg_src_left: bytes left in src to process (scatter list)
40 * @src_start: offset to add to src start position (scatter list)
41 * @crypt_len: length of current crypt process
42 * @hw_nbytes: total bytes to process in hw for this request
43 * @sg_dst_left: bytes left dst to process in this scatter list
44 * @dst_start: offset to add to dst start position (scatter list)
45 * @total_req_bytes: total number of bytes processed (request).
47 * sg helper are used to iterate over the scatterlist. Since the size of the
48 * SRAM may be less than the scatter size, this struct struct is used to keep
49 * track of progress within current scatterlist.
52 struct sg_mapping_iter src_sg_it
;
53 struct sg_mapping_iter dst_sg_it
;
70 struct task_struct
*queue_th
;
72 /* the lock protects queue and eng_st */
74 struct crypto_queue queue
;
75 enum engine_status eng_st
;
76 struct crypto_async_request
*cur_req
;
77 struct req_progress p
;
82 static struct crypto_priv
*cpg
;
85 u8 aes_enc_key
[AES_KEY_LEN
];
88 u32 need_calc_aes_dkey
;
101 static void compute_aes_dec_key(struct mv_ctx
*ctx
)
103 struct crypto_aes_ctx gen_aes_key
;
106 if (!ctx
->need_calc_aes_dkey
)
109 crypto_aes_expand_key(&gen_aes_key
, ctx
->aes_enc_key
, ctx
->key_len
);
111 key_pos
= ctx
->key_len
+ 24;
112 memcpy(ctx
->aes_dec_key
, &gen_aes_key
.key_enc
[key_pos
], 4 * 4);
113 switch (ctx
->key_len
) {
114 case AES_KEYSIZE_256
:
117 case AES_KEYSIZE_192
:
119 memcpy(&ctx
->aes_dec_key
[4], &gen_aes_key
.key_enc
[key_pos
],
123 ctx
->need_calc_aes_dkey
= 0;
126 static int mv_setkey_aes(struct crypto_ablkcipher
*cipher
, const u8
*key
,
129 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(cipher
);
130 struct mv_ctx
*ctx
= crypto_tfm_ctx(tfm
);
133 case AES_KEYSIZE_128
:
134 case AES_KEYSIZE_192
:
135 case AES_KEYSIZE_256
:
138 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
142 ctx
->need_calc_aes_dkey
= 1;
144 memcpy(ctx
->aes_enc_key
, key
, AES_KEY_LEN
);
148 static void copy_src_to_buf(struct req_progress
*p
, char *dbuf
, int len
)
155 if (!p
->sg_src_left
) {
156 ret
= sg_miter_next(&p
->src_sg_it
);
158 p
->sg_src_left
= p
->src_sg_it
.length
;
162 sbuf
= p
->src_sg_it
.addr
+ p
->src_start
;
164 if (p
->sg_src_left
<= len
- copied
) {
165 memcpy(dbuf
+ copied
, sbuf
, p
->sg_src_left
);
166 copied
+= p
->sg_src_left
;
171 int copy_len
= len
- copied
;
172 memcpy(dbuf
+ copied
, sbuf
, copy_len
);
173 p
->src_start
+= copy_len
;
174 p
->sg_src_left
-= copy_len
;
180 static void setup_data_in(void)
182 struct req_progress
*p
= &cpg
->p
;
184 min(p
->hw_nbytes
- p
->total_req_bytes
, cpg
->max_req_size
);
185 copy_src_to_buf(p
, cpg
->sram
+ SRAM_DATA_IN_START
,
189 static void mv_process_current_q(int first_block
)
191 struct ablkcipher_request
*req
= ablkcipher_request_cast(cpg
->cur_req
);
192 struct mv_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
193 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
194 struct sec_accel_config op
;
196 switch (req_ctx
->op
) {
198 op
.config
= CFG_OP_CRYPT_ONLY
| CFG_ENCM_AES
| CFG_ENC_MODE_ECB
;
202 op
.config
= CFG_OP_CRYPT_ONLY
| CFG_ENCM_AES
| CFG_ENC_MODE_CBC
;
203 op
.enc_iv
= ENC_IV_POINT(SRAM_DATA_IV
) |
204 ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF
);
206 memcpy(cpg
->sram
+ SRAM_DATA_IV
, req
->info
, 16);
209 if (req_ctx
->decrypt
) {
210 op
.config
|= CFG_DIR_DEC
;
211 memcpy(cpg
->sram
+ SRAM_DATA_KEY_P
, ctx
->aes_dec_key
,
214 op
.config
|= CFG_DIR_ENC
;
215 memcpy(cpg
->sram
+ SRAM_DATA_KEY_P
, ctx
->aes_enc_key
,
219 switch (ctx
->key_len
) {
220 case AES_KEYSIZE_128
:
221 op
.config
|= CFG_AES_LEN_128
;
223 case AES_KEYSIZE_192
:
224 op
.config
|= CFG_AES_LEN_192
;
226 case AES_KEYSIZE_256
:
227 op
.config
|= CFG_AES_LEN_256
;
230 op
.enc_p
= ENC_P_SRC(SRAM_DATA_IN_START
) |
231 ENC_P_DST(SRAM_DATA_OUT_START
);
232 op
.enc_key_p
= SRAM_DATA_KEY_P
;
235 op
.enc_len
= cpg
->p
.crypt_len
;
236 memcpy(cpg
->sram
+ SRAM_CONFIG
, &op
,
237 sizeof(struct sec_accel_config
));
239 writel(SRAM_CONFIG
, cpg
->reg
+ SEC_ACCEL_DESC_P0
);
241 writel(SEC_CMD_EN_SEC_ACCL0
, cpg
->reg
+ SEC_ACCEL_CMD
);
244 * XXX: add timer if the interrupt does not occur for some mystery
249 static void mv_crypto_algo_completion(void)
251 struct ablkcipher_request
*req
= ablkcipher_request_cast(cpg
->cur_req
);
252 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
254 if (req_ctx
->op
!= COP_AES_CBC
)
257 memcpy(req
->info
, cpg
->sram
+ SRAM_DATA_IV_BUF
, 16);
260 static void dequeue_complete_req(void)
262 struct crypto_async_request
*req
= cpg
->cur_req
;
265 int need_copy_len
= cpg
->p
.crypt_len
;
268 cpg
->p
.total_req_bytes
+= cpg
->p
.crypt_len
;
272 if (!cpg
->p
.sg_dst_left
) {
273 ret
= sg_miter_next(&cpg
->p
.dst_sg_it
);
275 cpg
->p
.sg_dst_left
= cpg
->p
.dst_sg_it
.length
;
276 cpg
->p
.dst_start
= 0;
279 buf
= cpg
->p
.dst_sg_it
.addr
;
280 buf
+= cpg
->p
.dst_start
;
282 dst_copy
= min(need_copy_len
, cpg
->p
.sg_dst_left
);
285 cpg
->sram
+ SRAM_DATA_OUT_START
+ sram_offset
,
287 sram_offset
+= dst_copy
;
288 cpg
->p
.sg_dst_left
-= dst_copy
;
289 need_copy_len
-= dst_copy
;
290 cpg
->p
.dst_start
+= dst_copy
;
291 } while (need_copy_len
> 0);
293 BUG_ON(cpg
->eng_st
!= ENGINE_W_DEQUEUE
);
294 if (cpg
->p
.total_req_bytes
< cpg
->p
.hw_nbytes
) {
295 /* process next scatter list entry */
296 cpg
->eng_st
= ENGINE_BUSY
;
297 mv_process_current_q(0);
299 sg_miter_stop(&cpg
->p
.src_sg_it
);
300 sg_miter_stop(&cpg
->p
.dst_sg_it
);
301 mv_crypto_algo_completion();
302 cpg
->eng_st
= ENGINE_IDLE
;
304 req
->complete(req
, 0);
309 static int count_sgs(struct scatterlist
*sl
, unsigned int total_bytes
)
315 cur_len
= sl
[i
].length
;
317 if (total_bytes
> cur_len
)
318 total_bytes
-= cur_len
;
326 static void mv_enqueue_new_req(struct ablkcipher_request
*req
)
328 struct req_progress
*p
= &cpg
->p
;
331 cpg
->cur_req
= &req
->base
;
332 memset(p
, 0, sizeof(struct req_progress
));
333 p
->hw_nbytes
= req
->nbytes
;
335 num_sgs
= count_sgs(req
->src
, req
->nbytes
);
336 sg_miter_start(&p
->src_sg_it
, req
->src
, num_sgs
, SG_MITER_FROM_SG
);
338 num_sgs
= count_sgs(req
->dst
, req
->nbytes
);
339 sg_miter_start(&p
->dst_sg_it
, req
->dst
, num_sgs
, SG_MITER_TO_SG
);
341 mv_process_current_q(1);
344 static int queue_manag(void *data
)
346 cpg
->eng_st
= ENGINE_IDLE
;
348 struct ablkcipher_request
*req
;
349 struct crypto_async_request
*async_req
= NULL
;
350 struct crypto_async_request
*backlog
;
352 __set_current_state(TASK_INTERRUPTIBLE
);
354 if (cpg
->eng_st
== ENGINE_W_DEQUEUE
)
355 dequeue_complete_req();
357 spin_lock_irq(&cpg
->lock
);
358 if (cpg
->eng_st
== ENGINE_IDLE
) {
359 backlog
= crypto_get_backlog(&cpg
->queue
);
360 async_req
= crypto_dequeue_request(&cpg
->queue
);
362 BUG_ON(cpg
->eng_st
!= ENGINE_IDLE
);
363 cpg
->eng_st
= ENGINE_BUSY
;
366 spin_unlock_irq(&cpg
->lock
);
369 backlog
->complete(backlog
, -EINPROGRESS
);
374 req
= container_of(async_req
,
375 struct ablkcipher_request
, base
);
376 mv_enqueue_new_req(req
);
382 } while (!kthread_should_stop());
386 static int mv_handle_req(struct crypto_async_request
*req
)
391 spin_lock_irqsave(&cpg
->lock
, flags
);
392 ret
= crypto_enqueue_request(&cpg
->queue
, req
);
393 spin_unlock_irqrestore(&cpg
->lock
, flags
);
394 wake_up_process(cpg
->queue_th
);
398 static int mv_enc_aes_ecb(struct ablkcipher_request
*req
)
400 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
402 req_ctx
->op
= COP_AES_ECB
;
403 req_ctx
->decrypt
= 0;
405 return mv_handle_req(&req
->base
);
408 static int mv_dec_aes_ecb(struct ablkcipher_request
*req
)
410 struct mv_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
411 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
413 req_ctx
->op
= COP_AES_ECB
;
414 req_ctx
->decrypt
= 1;
416 compute_aes_dec_key(ctx
);
417 return mv_handle_req(&req
->base
);
420 static int mv_enc_aes_cbc(struct ablkcipher_request
*req
)
422 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
424 req_ctx
->op
= COP_AES_CBC
;
425 req_ctx
->decrypt
= 0;
427 return mv_handle_req(&req
->base
);
430 static int mv_dec_aes_cbc(struct ablkcipher_request
*req
)
432 struct mv_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
433 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
435 req_ctx
->op
= COP_AES_CBC
;
436 req_ctx
->decrypt
= 1;
438 compute_aes_dec_key(ctx
);
439 return mv_handle_req(&req
->base
);
442 static int mv_cra_init(struct crypto_tfm
*tfm
)
444 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct mv_req_ctx
);
448 irqreturn_t
crypto_int(int irq
, void *priv
)
452 val
= readl(cpg
->reg
+ SEC_ACCEL_INT_STATUS
);
453 if (!(val
& SEC_INT_ACCEL0_DONE
))
456 val
&= ~SEC_INT_ACCEL0_DONE
;
457 writel(val
, cpg
->reg
+ FPGA_INT_STATUS
);
458 writel(val
, cpg
->reg
+ SEC_ACCEL_INT_STATUS
);
459 BUG_ON(cpg
->eng_st
!= ENGINE_BUSY
);
460 cpg
->eng_st
= ENGINE_W_DEQUEUE
;
461 wake_up_process(cpg
->queue_th
);
465 struct crypto_alg mv_aes_alg_ecb
= {
466 .cra_name
= "ecb(aes)",
467 .cra_driver_name
= "mv-ecb-aes",
469 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
471 .cra_ctxsize
= sizeof(struct mv_ctx
),
473 .cra_type
= &crypto_ablkcipher_type
,
474 .cra_module
= THIS_MODULE
,
475 .cra_init
= mv_cra_init
,
478 .min_keysize
= AES_MIN_KEY_SIZE
,
479 .max_keysize
= AES_MAX_KEY_SIZE
,
480 .setkey
= mv_setkey_aes
,
481 .encrypt
= mv_enc_aes_ecb
,
482 .decrypt
= mv_dec_aes_ecb
,
487 struct crypto_alg mv_aes_alg_cbc
= {
488 .cra_name
= "cbc(aes)",
489 .cra_driver_name
= "mv-cbc-aes",
491 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
492 .cra_blocksize
= AES_BLOCK_SIZE
,
493 .cra_ctxsize
= sizeof(struct mv_ctx
),
495 .cra_type
= &crypto_ablkcipher_type
,
496 .cra_module
= THIS_MODULE
,
497 .cra_init
= mv_cra_init
,
500 .ivsize
= AES_BLOCK_SIZE
,
501 .min_keysize
= AES_MIN_KEY_SIZE
,
502 .max_keysize
= AES_MAX_KEY_SIZE
,
503 .setkey
= mv_setkey_aes
,
504 .encrypt
= mv_enc_aes_cbc
,
505 .decrypt
= mv_dec_aes_cbc
,
510 static int mv_probe(struct platform_device
*pdev
)
512 struct crypto_priv
*cp
;
513 struct resource
*res
;
518 printk(KERN_ERR
"Second crypto dev?\n");
522 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "regs");
526 cp
= kzalloc(sizeof(*cp
), GFP_KERNEL
);
530 spin_lock_init(&cp
->lock
);
531 crypto_init_queue(&cp
->queue
, 50);
532 cp
->reg
= ioremap(res
->start
, res
->end
- res
->start
+ 1);
538 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "sram");
543 cp
->sram_size
= res
->end
- res
->start
+ 1;
544 cp
->max_req_size
= cp
->sram_size
- SRAM_CFG_SPACE
;
545 cp
->sram
= ioremap(res
->start
, cp
->sram_size
);
551 irq
= platform_get_irq(pdev
, 0);
552 if (irq
< 0 || irq
== NO_IRQ
) {
558 platform_set_drvdata(pdev
, cp
);
561 cp
->queue_th
= kthread_run(queue_manag
, cp
, "mv_crypto");
562 if (IS_ERR(cp
->queue_th
)) {
563 ret
= PTR_ERR(cp
->queue_th
);
567 ret
= request_irq(irq
, crypto_int
, IRQF_DISABLED
, dev_name(&pdev
->dev
),
572 writel(SEC_INT_ACCEL0_DONE
, cpg
->reg
+ SEC_ACCEL_INT_MASK
);
573 writel(SEC_CFG_STOP_DIG_ERR
, cpg
->reg
+ SEC_ACCEL_CFG
);
575 ret
= crypto_register_alg(&mv_aes_alg_ecb
);
579 ret
= crypto_register_alg(&mv_aes_alg_cbc
);
584 crypto_unregister_alg(&mv_aes_alg_ecb
);
588 kthread_stop(cp
->queue_th
);
596 platform_set_drvdata(pdev
, NULL
);
600 static int mv_remove(struct platform_device
*pdev
)
602 struct crypto_priv
*cp
= platform_get_drvdata(pdev
);
604 crypto_unregister_alg(&mv_aes_alg_ecb
);
605 crypto_unregister_alg(&mv_aes_alg_cbc
);
606 kthread_stop(cp
->queue_th
);
607 free_irq(cp
->irq
, cp
);
608 memset(cp
->sram
, 0, cp
->sram_size
);
616 static struct platform_driver marvell_crypto
= {
620 .owner
= THIS_MODULE
,
624 MODULE_ALIAS("platform:mv_crypto");
626 static int __init
mv_crypto_init(void)
628 return platform_driver_register(&marvell_crypto
);
630 module_init(mv_crypto_init
);
632 static void __exit
mv_crypto_exit(void)
634 platform_driver_unregister(&marvell_crypto
);
636 module_exit(mv_crypto_exit
);
638 MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
639 MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
640 MODULE_LICENSE("GPL");