2 * Support for Marvell's crypto engine which can be found on some Orion5X
5 * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
9 #include <crypto/aes.h>
10 #include <crypto/algapi.h>
11 #include <linux/crypto.h>
12 #include <linux/interrupt.h>
14 #include <linux/kthread.h>
15 #include <linux/platform_device.h>
16 #include <linux/scatterlist.h>
21 * /---------------------------------------\
22 * | | request complete
24 * IDLE -> new request -> BUSY -> done -> DEQUEUE
26 * | | more scatter entries
36 * struct req_progress - used for every crypt request
37 * @src_sg_it: sg iterator for src
38 * @dst_sg_it: sg iterator for dst
39 * @sg_src_left: bytes left in src to process (scatter list)
40 * @src_start: offset to add to src start position (scatter list)
41 * @crypt_len: length of current crypt process
42 * @sg_dst_left: bytes left dst to process in this scatter list
43 * @dst_start: offset to add to dst start position (scatter list)
44 * @total_req_bytes: total number of bytes processed (request).
46 * sg helper are used to iterate over the scatterlist. Since the size of the
47 * SRAM may be less than the scatter size, this struct struct is used to keep
48 * track of progress within current scatterlist.
51 struct sg_mapping_iter src_sg_it
;
52 struct sg_mapping_iter dst_sg_it
;
68 struct task_struct
*queue_th
;
70 /* the lock protects queue and eng_st */
72 struct crypto_queue queue
;
73 enum engine_status eng_st
;
74 struct ablkcipher_request
*cur_req
;
75 struct req_progress p
;
80 static struct crypto_priv
*cpg
;
83 u8 aes_enc_key
[AES_KEY_LEN
];
86 u32 need_calc_aes_dkey
;
99 static void compute_aes_dec_key(struct mv_ctx
*ctx
)
101 struct crypto_aes_ctx gen_aes_key
;
104 if (!ctx
->need_calc_aes_dkey
)
107 crypto_aes_expand_key(&gen_aes_key
, ctx
->aes_enc_key
, ctx
->key_len
);
109 key_pos
= ctx
->key_len
+ 24;
110 memcpy(ctx
->aes_dec_key
, &gen_aes_key
.key_enc
[key_pos
], 4 * 4);
111 switch (ctx
->key_len
) {
112 case AES_KEYSIZE_256
:
115 case AES_KEYSIZE_192
:
117 memcpy(&ctx
->aes_dec_key
[4], &gen_aes_key
.key_enc
[key_pos
],
121 ctx
->need_calc_aes_dkey
= 0;
124 static int mv_setkey_aes(struct crypto_ablkcipher
*cipher
, const u8
*key
,
127 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(cipher
);
128 struct mv_ctx
*ctx
= crypto_tfm_ctx(tfm
);
131 case AES_KEYSIZE_128
:
132 case AES_KEYSIZE_192
:
133 case AES_KEYSIZE_256
:
136 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
140 ctx
->need_calc_aes_dkey
= 1;
142 memcpy(ctx
->aes_enc_key
, key
, AES_KEY_LEN
);
146 static void copy_src_to_buf(struct req_progress
*p
, char *dbuf
, int len
)
153 if (!p
->sg_src_left
) {
154 ret
= sg_miter_next(&p
->src_sg_it
);
156 p
->sg_src_left
= p
->src_sg_it
.length
;
160 sbuf
= p
->src_sg_it
.addr
+ p
->src_start
;
162 if (p
->sg_src_left
<= len
- copied
) {
163 memcpy(dbuf
+ copied
, sbuf
, p
->sg_src_left
);
164 copied
+= p
->sg_src_left
;
169 int copy_len
= len
- copied
;
170 memcpy(dbuf
+ copied
, sbuf
, copy_len
);
171 p
->src_start
+= copy_len
;
172 p
->sg_src_left
-= copy_len
;
178 static void setup_data_in(struct ablkcipher_request
*req
)
180 struct req_progress
*p
= &cpg
->p
;
182 min((int)req
->nbytes
- p
->total_req_bytes
, cpg
->max_req_size
);
183 copy_src_to_buf(p
, cpg
->sram
+ SRAM_DATA_IN_START
,
187 static void mv_process_current_q(int first_block
)
189 struct ablkcipher_request
*req
= cpg
->cur_req
;
190 struct mv_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
191 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
192 struct sec_accel_config op
;
194 switch (req_ctx
->op
) {
196 op
.config
= CFG_OP_CRYPT_ONLY
| CFG_ENCM_AES
| CFG_ENC_MODE_ECB
;
200 op
.config
= CFG_OP_CRYPT_ONLY
| CFG_ENCM_AES
| CFG_ENC_MODE_CBC
;
201 op
.enc_iv
= ENC_IV_POINT(SRAM_DATA_IV
) |
202 ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF
);
204 memcpy(cpg
->sram
+ SRAM_DATA_IV
, req
->info
, 16);
207 if (req_ctx
->decrypt
) {
208 op
.config
|= CFG_DIR_DEC
;
209 memcpy(cpg
->sram
+ SRAM_DATA_KEY_P
, ctx
->aes_dec_key
,
212 op
.config
|= CFG_DIR_ENC
;
213 memcpy(cpg
->sram
+ SRAM_DATA_KEY_P
, ctx
->aes_enc_key
,
217 switch (ctx
->key_len
) {
218 case AES_KEYSIZE_128
:
219 op
.config
|= CFG_AES_LEN_128
;
221 case AES_KEYSIZE_192
:
222 op
.config
|= CFG_AES_LEN_192
;
224 case AES_KEYSIZE_256
:
225 op
.config
|= CFG_AES_LEN_256
;
228 op
.enc_p
= ENC_P_SRC(SRAM_DATA_IN_START
) |
229 ENC_P_DST(SRAM_DATA_OUT_START
);
230 op
.enc_key_p
= SRAM_DATA_KEY_P
;
233 op
.enc_len
= cpg
->p
.crypt_len
;
234 memcpy(cpg
->sram
+ SRAM_CONFIG
, &op
,
235 sizeof(struct sec_accel_config
));
237 writel(SRAM_CONFIG
, cpg
->reg
+ SEC_ACCEL_DESC_P0
);
239 writel(SEC_CMD_EN_SEC_ACCL0
, cpg
->reg
+ SEC_ACCEL_CMD
);
242 * XXX: add timer if the interrupt does not occur for some mystery
247 static void mv_crypto_algo_completion(void)
249 struct ablkcipher_request
*req
= cpg
->cur_req
;
250 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
252 if (req_ctx
->op
!= COP_AES_CBC
)
255 memcpy(req
->info
, cpg
->sram
+ SRAM_DATA_IV_BUF
, 16);
258 static void dequeue_complete_req(void)
260 struct ablkcipher_request
*req
= cpg
->cur_req
;
263 int need_copy_len
= cpg
->p
.crypt_len
;
266 cpg
->p
.total_req_bytes
+= cpg
->p
.crypt_len
;
270 if (!cpg
->p
.sg_dst_left
) {
271 ret
= sg_miter_next(&cpg
->p
.dst_sg_it
);
273 cpg
->p
.sg_dst_left
= cpg
->p
.dst_sg_it
.length
;
274 cpg
->p
.dst_start
= 0;
277 buf
= cpg
->p
.dst_sg_it
.addr
;
278 buf
+= cpg
->p
.dst_start
;
280 dst_copy
= min(need_copy_len
, cpg
->p
.sg_dst_left
);
283 cpg
->sram
+ SRAM_DATA_OUT_START
+ sram_offset
,
285 sram_offset
+= dst_copy
;
286 cpg
->p
.sg_dst_left
-= dst_copy
;
287 need_copy_len
-= dst_copy
;
288 cpg
->p
.dst_start
+= dst_copy
;
289 } while (need_copy_len
> 0);
291 BUG_ON(cpg
->eng_st
!= ENGINE_W_DEQUEUE
);
292 if (cpg
->p
.total_req_bytes
< req
->nbytes
) {
293 /* process next scatter list entry */
294 cpg
->eng_st
= ENGINE_BUSY
;
295 mv_process_current_q(0);
297 sg_miter_stop(&cpg
->p
.src_sg_it
);
298 sg_miter_stop(&cpg
->p
.dst_sg_it
);
299 mv_crypto_algo_completion();
300 cpg
->eng_st
= ENGINE_IDLE
;
302 req
->base
.complete(&req
->base
, 0);
307 static int count_sgs(struct scatterlist
*sl
, unsigned int total_bytes
)
313 cur_len
= sl
[i
].length
;
315 if (total_bytes
> cur_len
)
316 total_bytes
-= cur_len
;
324 static void mv_enqueue_new_req(struct ablkcipher_request
*req
)
329 memset(&cpg
->p
, 0, sizeof(struct req_progress
));
331 num_sgs
= count_sgs(req
->src
, req
->nbytes
);
332 sg_miter_start(&cpg
->p
.src_sg_it
, req
->src
, num_sgs
, SG_MITER_FROM_SG
);
334 num_sgs
= count_sgs(req
->dst
, req
->nbytes
);
335 sg_miter_start(&cpg
->p
.dst_sg_it
, req
->dst
, num_sgs
, SG_MITER_TO_SG
);
336 mv_process_current_q(1);
339 static int queue_manag(void *data
)
341 cpg
->eng_st
= ENGINE_IDLE
;
343 struct ablkcipher_request
*req
;
344 struct crypto_async_request
*async_req
= NULL
;
345 struct crypto_async_request
*backlog
;
347 __set_current_state(TASK_INTERRUPTIBLE
);
349 if (cpg
->eng_st
== ENGINE_W_DEQUEUE
)
350 dequeue_complete_req();
352 spin_lock_irq(&cpg
->lock
);
353 if (cpg
->eng_st
== ENGINE_IDLE
) {
354 backlog
= crypto_get_backlog(&cpg
->queue
);
355 async_req
= crypto_dequeue_request(&cpg
->queue
);
357 BUG_ON(cpg
->eng_st
!= ENGINE_IDLE
);
358 cpg
->eng_st
= ENGINE_BUSY
;
361 spin_unlock_irq(&cpg
->lock
);
364 backlog
->complete(backlog
, -EINPROGRESS
);
369 req
= container_of(async_req
,
370 struct ablkcipher_request
, base
);
371 mv_enqueue_new_req(req
);
377 } while (!kthread_should_stop());
381 static int mv_handle_req(struct ablkcipher_request
*req
)
386 spin_lock_irqsave(&cpg
->lock
, flags
);
387 ret
= ablkcipher_enqueue_request(&cpg
->queue
, req
);
388 spin_unlock_irqrestore(&cpg
->lock
, flags
);
389 wake_up_process(cpg
->queue_th
);
393 static int mv_enc_aes_ecb(struct ablkcipher_request
*req
)
395 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
397 req_ctx
->op
= COP_AES_ECB
;
398 req_ctx
->decrypt
= 0;
400 return mv_handle_req(req
);
403 static int mv_dec_aes_ecb(struct ablkcipher_request
*req
)
405 struct mv_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
406 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
408 req_ctx
->op
= COP_AES_ECB
;
409 req_ctx
->decrypt
= 1;
411 compute_aes_dec_key(ctx
);
412 return mv_handle_req(req
);
415 static int mv_enc_aes_cbc(struct ablkcipher_request
*req
)
417 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
419 req_ctx
->op
= COP_AES_CBC
;
420 req_ctx
->decrypt
= 0;
422 return mv_handle_req(req
);
425 static int mv_dec_aes_cbc(struct ablkcipher_request
*req
)
427 struct mv_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
428 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
430 req_ctx
->op
= COP_AES_CBC
;
431 req_ctx
->decrypt
= 1;
433 compute_aes_dec_key(ctx
);
434 return mv_handle_req(req
);
437 static int mv_cra_init(struct crypto_tfm
*tfm
)
439 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct mv_req_ctx
);
443 irqreturn_t
crypto_int(int irq
, void *priv
)
447 val
= readl(cpg
->reg
+ SEC_ACCEL_INT_STATUS
);
448 if (!(val
& SEC_INT_ACCEL0_DONE
))
451 val
&= ~SEC_INT_ACCEL0_DONE
;
452 writel(val
, cpg
->reg
+ FPGA_INT_STATUS
);
453 writel(val
, cpg
->reg
+ SEC_ACCEL_INT_STATUS
);
454 BUG_ON(cpg
->eng_st
!= ENGINE_BUSY
);
455 cpg
->eng_st
= ENGINE_W_DEQUEUE
;
456 wake_up_process(cpg
->queue_th
);
460 struct crypto_alg mv_aes_alg_ecb
= {
461 .cra_name
= "ecb(aes)",
462 .cra_driver_name
= "mv-ecb-aes",
464 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
466 .cra_ctxsize
= sizeof(struct mv_ctx
),
468 .cra_type
= &crypto_ablkcipher_type
,
469 .cra_module
= THIS_MODULE
,
470 .cra_init
= mv_cra_init
,
473 .min_keysize
= AES_MIN_KEY_SIZE
,
474 .max_keysize
= AES_MAX_KEY_SIZE
,
475 .setkey
= mv_setkey_aes
,
476 .encrypt
= mv_enc_aes_ecb
,
477 .decrypt
= mv_dec_aes_ecb
,
482 struct crypto_alg mv_aes_alg_cbc
= {
483 .cra_name
= "cbc(aes)",
484 .cra_driver_name
= "mv-cbc-aes",
486 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
487 .cra_blocksize
= AES_BLOCK_SIZE
,
488 .cra_ctxsize
= sizeof(struct mv_ctx
),
490 .cra_type
= &crypto_ablkcipher_type
,
491 .cra_module
= THIS_MODULE
,
492 .cra_init
= mv_cra_init
,
495 .ivsize
= AES_BLOCK_SIZE
,
496 .min_keysize
= AES_MIN_KEY_SIZE
,
497 .max_keysize
= AES_MAX_KEY_SIZE
,
498 .setkey
= mv_setkey_aes
,
499 .encrypt
= mv_enc_aes_cbc
,
500 .decrypt
= mv_dec_aes_cbc
,
505 static int mv_probe(struct platform_device
*pdev
)
507 struct crypto_priv
*cp
;
508 struct resource
*res
;
513 printk(KERN_ERR
"Second crypto dev?\n");
517 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "regs");
521 cp
= kzalloc(sizeof(*cp
), GFP_KERNEL
);
525 spin_lock_init(&cp
->lock
);
526 crypto_init_queue(&cp
->queue
, 50);
527 cp
->reg
= ioremap(res
->start
, res
->end
- res
->start
+ 1);
533 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "sram");
538 cp
->sram_size
= res
->end
- res
->start
+ 1;
539 cp
->max_req_size
= cp
->sram_size
- SRAM_CFG_SPACE
;
540 cp
->sram
= ioremap(res
->start
, cp
->sram_size
);
546 irq
= platform_get_irq(pdev
, 0);
547 if (irq
< 0 || irq
== NO_IRQ
) {
553 platform_set_drvdata(pdev
, cp
);
556 cp
->queue_th
= kthread_run(queue_manag
, cp
, "mv_crypto");
557 if (IS_ERR(cp
->queue_th
)) {
558 ret
= PTR_ERR(cp
->queue_th
);
562 ret
= request_irq(irq
, crypto_int
, IRQF_DISABLED
, dev_name(&pdev
->dev
),
567 writel(SEC_INT_ACCEL0_DONE
, cpg
->reg
+ SEC_ACCEL_INT_MASK
);
568 writel(SEC_CFG_STOP_DIG_ERR
, cpg
->reg
+ SEC_ACCEL_CFG
);
570 ret
= crypto_register_alg(&mv_aes_alg_ecb
);
574 ret
= crypto_register_alg(&mv_aes_alg_cbc
);
579 crypto_unregister_alg(&mv_aes_alg_ecb
);
583 kthread_stop(cp
->queue_th
);
591 platform_set_drvdata(pdev
, NULL
);
595 static int mv_remove(struct platform_device
*pdev
)
597 struct crypto_priv
*cp
= platform_get_drvdata(pdev
);
599 crypto_unregister_alg(&mv_aes_alg_ecb
);
600 crypto_unregister_alg(&mv_aes_alg_cbc
);
601 kthread_stop(cp
->queue_th
);
602 free_irq(cp
->irq
, cp
);
603 memset(cp
->sram
, 0, cp
->sram_size
);
611 static struct platform_driver marvell_crypto
= {
615 .owner
= THIS_MODULE
,
619 MODULE_ALIAS("platform:mv_crypto");
621 static int __init
mv_crypto_init(void)
623 return platform_driver_register(&marvell_crypto
);
625 module_init(mv_crypto_init
);
627 static void __exit
mv_crypto_exit(void)
629 platform_driver_unregister(&marvell_crypto
);
631 module_exit(mv_crypto_exit
);
633 MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
634 MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
635 MODULE_LICENSE("GPL");