2 * Support for Marvell's crypto engine which can be found on some Orion5X
5 * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
9 #include <crypto/aes.h>
10 #include <crypto/algapi.h>
11 #include <linux/crypto.h>
12 #include <linux/interrupt.h>
14 #include <linux/kthread.h>
15 #include <linux/platform_device.h>
16 #include <linux/scatterlist.h>
21 * /---------------------------------------\
22 * | | request complete
24 * IDLE -> new request -> BUSY -> done -> DEQUEUE
26 * | | more scatter entries
36 * struct req_progress - used for every crypt request
37 * @src_sg_it: sg iterator for src
38 * @dst_sg_it: sg iterator for dst
39 * @sg_src_left: bytes left in src to process (scatter list)
40 * @src_start: offset to add to src start position (scatter list)
41 * @crypt_len: length of current crypt process
42 * @sg_dst_left: bytes left dst to process in this scatter list
43 * @dst_start: offset to add to dst start position (scatter list)
44 * @total_req_bytes: total number of bytes processed (request).
46 * sg helper are used to iterate over the scatterlist. Since the size of the
47 * SRAM may be less than the scatter size, this struct struct is used to keep
48 * track of progress within current scatterlist.
51 struct sg_mapping_iter src_sg_it
;
52 struct sg_mapping_iter dst_sg_it
;
68 struct task_struct
*queue_th
;
70 /* the lock protects queue and eng_st */
72 struct crypto_queue queue
;
73 enum engine_status eng_st
;
74 struct ablkcipher_request
*cur_req
;
75 struct req_progress p
;
80 static struct crypto_priv
*cpg
;
83 u8 aes_enc_key
[AES_KEY_LEN
];
86 u32 need_calc_aes_dkey
;
99 static void compute_aes_dec_key(struct mv_ctx
*ctx
)
101 struct crypto_aes_ctx gen_aes_key
;
104 if (!ctx
->need_calc_aes_dkey
)
107 crypto_aes_expand_key(&gen_aes_key
, ctx
->aes_enc_key
, ctx
->key_len
);
109 key_pos
= ctx
->key_len
+ 24;
110 memcpy(ctx
->aes_dec_key
, &gen_aes_key
.key_enc
[key_pos
], 4 * 4);
111 switch (ctx
->key_len
) {
112 case AES_KEYSIZE_256
:
115 case AES_KEYSIZE_192
:
117 memcpy(&ctx
->aes_dec_key
[4], &gen_aes_key
.key_enc
[key_pos
],
121 ctx
->need_calc_aes_dkey
= 0;
124 static int mv_setkey_aes(struct crypto_ablkcipher
*cipher
, const u8
*key
,
127 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(cipher
);
128 struct mv_ctx
*ctx
= crypto_tfm_ctx(tfm
);
131 case AES_KEYSIZE_128
:
132 case AES_KEYSIZE_192
:
133 case AES_KEYSIZE_256
:
136 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
140 ctx
->need_calc_aes_dkey
= 1;
142 memcpy(ctx
->aes_enc_key
, key
, AES_KEY_LEN
);
146 static void setup_data_in(struct ablkcipher_request
*req
)
151 if (!cpg
->p
.sg_src_left
) {
152 ret
= sg_miter_next(&cpg
->p
.src_sg_it
);
154 cpg
->p
.sg_src_left
= cpg
->p
.src_sg_it
.length
;
155 cpg
->p
.src_start
= 0;
158 cpg
->p
.crypt_len
= min(cpg
->p
.sg_src_left
, cpg
->max_req_size
);
160 buf
= cpg
->p
.src_sg_it
.addr
;
161 buf
+= cpg
->p
.src_start
;
163 memcpy(cpg
->sram
+ SRAM_DATA_IN_START
, buf
, cpg
->p
.crypt_len
);
165 cpg
->p
.sg_src_left
-= cpg
->p
.crypt_len
;
166 cpg
->p
.src_start
+= cpg
->p
.crypt_len
;
169 static void mv_process_current_q(int first_block
)
171 struct ablkcipher_request
*req
= cpg
->cur_req
;
172 struct mv_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
173 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
174 struct sec_accel_config op
;
176 switch (req_ctx
->op
) {
178 op
.config
= CFG_OP_CRYPT_ONLY
| CFG_ENCM_AES
| CFG_ENC_MODE_ECB
;
182 op
.config
= CFG_OP_CRYPT_ONLY
| CFG_ENCM_AES
| CFG_ENC_MODE_CBC
;
183 op
.enc_iv
= ENC_IV_POINT(SRAM_DATA_IV
) |
184 ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF
);
186 memcpy(cpg
->sram
+ SRAM_DATA_IV
, req
->info
, 16);
189 if (req_ctx
->decrypt
) {
190 op
.config
|= CFG_DIR_DEC
;
191 memcpy(cpg
->sram
+ SRAM_DATA_KEY_P
, ctx
->aes_dec_key
,
194 op
.config
|= CFG_DIR_ENC
;
195 memcpy(cpg
->sram
+ SRAM_DATA_KEY_P
, ctx
->aes_enc_key
,
199 switch (ctx
->key_len
) {
200 case AES_KEYSIZE_128
:
201 op
.config
|= CFG_AES_LEN_128
;
203 case AES_KEYSIZE_192
:
204 op
.config
|= CFG_AES_LEN_192
;
206 case AES_KEYSIZE_256
:
207 op
.config
|= CFG_AES_LEN_256
;
210 op
.enc_p
= ENC_P_SRC(SRAM_DATA_IN_START
) |
211 ENC_P_DST(SRAM_DATA_OUT_START
);
212 op
.enc_key_p
= SRAM_DATA_KEY_P
;
215 op
.enc_len
= cpg
->p
.crypt_len
;
216 memcpy(cpg
->sram
+ SRAM_CONFIG
, &op
,
217 sizeof(struct sec_accel_config
));
219 writel(SRAM_CONFIG
, cpg
->reg
+ SEC_ACCEL_DESC_P0
);
221 writel(SEC_CMD_EN_SEC_ACCL0
, cpg
->reg
+ SEC_ACCEL_CMD
);
224 * XXX: add timer if the interrupt does not occur for some mystery
229 static void mv_crypto_algo_completion(void)
231 struct ablkcipher_request
*req
= cpg
->cur_req
;
232 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
234 if (req_ctx
->op
!= COP_AES_CBC
)
237 memcpy(req
->info
, cpg
->sram
+ SRAM_DATA_IV_BUF
, 16);
240 static void dequeue_complete_req(void)
242 struct ablkcipher_request
*req
= cpg
->cur_req
;
246 cpg
->p
.total_req_bytes
+= cpg
->p
.crypt_len
;
250 if (!cpg
->p
.sg_dst_left
) {
251 ret
= sg_miter_next(&cpg
->p
.dst_sg_it
);
253 cpg
->p
.sg_dst_left
= cpg
->p
.dst_sg_it
.length
;
254 cpg
->p
.dst_start
= 0;
257 buf
= cpg
->p
.dst_sg_it
.addr
;
258 buf
+= cpg
->p
.dst_start
;
260 dst_copy
= min(cpg
->p
.crypt_len
, cpg
->p
.sg_dst_left
);
262 memcpy(buf
, cpg
->sram
+ SRAM_DATA_OUT_START
, dst_copy
);
264 cpg
->p
.sg_dst_left
-= dst_copy
;
265 cpg
->p
.crypt_len
-= dst_copy
;
266 cpg
->p
.dst_start
+= dst_copy
;
267 } while (cpg
->p
.crypt_len
> 0);
269 BUG_ON(cpg
->eng_st
!= ENGINE_W_DEQUEUE
);
270 if (cpg
->p
.total_req_bytes
< req
->nbytes
) {
271 /* process next scatter list entry */
272 cpg
->eng_st
= ENGINE_BUSY
;
273 mv_process_current_q(0);
275 sg_miter_stop(&cpg
->p
.src_sg_it
);
276 sg_miter_stop(&cpg
->p
.dst_sg_it
);
277 mv_crypto_algo_completion();
278 cpg
->eng_st
= ENGINE_IDLE
;
280 req
->base
.complete(&req
->base
, 0);
285 static int count_sgs(struct scatterlist
*sl
, unsigned int total_bytes
)
290 total_bytes
-= sl
[i
].length
;
293 } while (total_bytes
> 0);
298 static void mv_enqueue_new_req(struct ablkcipher_request
*req
)
303 memset(&cpg
->p
, 0, sizeof(struct req_progress
));
305 num_sgs
= count_sgs(req
->src
, req
->nbytes
);
306 sg_miter_start(&cpg
->p
.src_sg_it
, req
->src
, num_sgs
, SG_MITER_FROM_SG
);
308 num_sgs
= count_sgs(req
->dst
, req
->nbytes
);
309 sg_miter_start(&cpg
->p
.dst_sg_it
, req
->dst
, num_sgs
, SG_MITER_TO_SG
);
310 mv_process_current_q(1);
313 static int queue_manag(void *data
)
315 cpg
->eng_st
= ENGINE_IDLE
;
317 struct ablkcipher_request
*req
;
318 struct crypto_async_request
*async_req
= NULL
;
319 struct crypto_async_request
*backlog
;
321 __set_current_state(TASK_INTERRUPTIBLE
);
323 if (cpg
->eng_st
== ENGINE_W_DEQUEUE
)
324 dequeue_complete_req();
326 spin_lock_irq(&cpg
->lock
);
327 if (cpg
->eng_st
== ENGINE_IDLE
) {
328 backlog
= crypto_get_backlog(&cpg
->queue
);
329 async_req
= crypto_dequeue_request(&cpg
->queue
);
331 BUG_ON(cpg
->eng_st
!= ENGINE_IDLE
);
332 cpg
->eng_st
= ENGINE_BUSY
;
335 spin_unlock_irq(&cpg
->lock
);
338 backlog
->complete(backlog
, -EINPROGRESS
);
343 req
= container_of(async_req
,
344 struct ablkcipher_request
, base
);
345 mv_enqueue_new_req(req
);
351 } while (!kthread_should_stop());
355 static int mv_handle_req(struct ablkcipher_request
*req
)
360 spin_lock_irqsave(&cpg
->lock
, flags
);
361 ret
= ablkcipher_enqueue_request(&cpg
->queue
, req
);
362 spin_unlock_irqrestore(&cpg
->lock
, flags
);
363 wake_up_process(cpg
->queue_th
);
367 static int mv_enc_aes_ecb(struct ablkcipher_request
*req
)
369 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
371 req_ctx
->op
= COP_AES_ECB
;
372 req_ctx
->decrypt
= 0;
374 return mv_handle_req(req
);
377 static int mv_dec_aes_ecb(struct ablkcipher_request
*req
)
379 struct mv_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
380 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
382 req_ctx
->op
= COP_AES_ECB
;
383 req_ctx
->decrypt
= 1;
385 compute_aes_dec_key(ctx
);
386 return mv_handle_req(req
);
389 static int mv_enc_aes_cbc(struct ablkcipher_request
*req
)
391 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
393 req_ctx
->op
= COP_AES_CBC
;
394 req_ctx
->decrypt
= 0;
396 return mv_handle_req(req
);
399 static int mv_dec_aes_cbc(struct ablkcipher_request
*req
)
401 struct mv_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
402 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
404 req_ctx
->op
= COP_AES_CBC
;
405 req_ctx
->decrypt
= 1;
407 compute_aes_dec_key(ctx
);
408 return mv_handle_req(req
);
411 static int mv_cra_init(struct crypto_tfm
*tfm
)
413 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct mv_req_ctx
);
417 irqreturn_t
crypto_int(int irq
, void *priv
)
421 val
= readl(cpg
->reg
+ SEC_ACCEL_INT_STATUS
);
422 if (!(val
& SEC_INT_ACCEL0_DONE
))
425 val
&= ~SEC_INT_ACCEL0_DONE
;
426 writel(val
, cpg
->reg
+ FPGA_INT_STATUS
);
427 writel(val
, cpg
->reg
+ SEC_ACCEL_INT_STATUS
);
428 BUG_ON(cpg
->eng_st
!= ENGINE_BUSY
);
429 cpg
->eng_st
= ENGINE_W_DEQUEUE
;
430 wake_up_process(cpg
->queue_th
);
434 struct crypto_alg mv_aes_alg_ecb
= {
435 .cra_name
= "ecb(aes)",
436 .cra_driver_name
= "mv-ecb-aes",
438 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
440 .cra_ctxsize
= sizeof(struct mv_ctx
),
442 .cra_type
= &crypto_ablkcipher_type
,
443 .cra_module
= THIS_MODULE
,
444 .cra_init
= mv_cra_init
,
447 .min_keysize
= AES_MIN_KEY_SIZE
,
448 .max_keysize
= AES_MAX_KEY_SIZE
,
449 .setkey
= mv_setkey_aes
,
450 .encrypt
= mv_enc_aes_ecb
,
451 .decrypt
= mv_dec_aes_ecb
,
456 struct crypto_alg mv_aes_alg_cbc
= {
457 .cra_name
= "cbc(aes)",
458 .cra_driver_name
= "mv-cbc-aes",
460 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
461 .cra_blocksize
= AES_BLOCK_SIZE
,
462 .cra_ctxsize
= sizeof(struct mv_ctx
),
464 .cra_type
= &crypto_ablkcipher_type
,
465 .cra_module
= THIS_MODULE
,
466 .cra_init
= mv_cra_init
,
469 .ivsize
= AES_BLOCK_SIZE
,
470 .min_keysize
= AES_MIN_KEY_SIZE
,
471 .max_keysize
= AES_MAX_KEY_SIZE
,
472 .setkey
= mv_setkey_aes
,
473 .encrypt
= mv_enc_aes_cbc
,
474 .decrypt
= mv_dec_aes_cbc
,
479 static int mv_probe(struct platform_device
*pdev
)
481 struct crypto_priv
*cp
;
482 struct resource
*res
;
487 printk(KERN_ERR
"Second crypto dev?\n");
491 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "regs");
495 cp
= kzalloc(sizeof(*cp
), GFP_KERNEL
);
499 spin_lock_init(&cp
->lock
);
500 crypto_init_queue(&cp
->queue
, 50);
501 cp
->reg
= ioremap(res
->start
, res
->end
- res
->start
+ 1);
507 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "sram");
512 cp
->sram_size
= res
->end
- res
->start
+ 1;
513 cp
->max_req_size
= cp
->sram_size
- SRAM_CFG_SPACE
;
514 cp
->sram
= ioremap(res
->start
, cp
->sram_size
);
520 irq
= platform_get_irq(pdev
, 0);
521 if (irq
< 0 || irq
== NO_IRQ
) {
527 platform_set_drvdata(pdev
, cp
);
530 cp
->queue_th
= kthread_run(queue_manag
, cp
, "mv_crypto");
531 if (IS_ERR(cp
->queue_th
)) {
532 ret
= PTR_ERR(cp
->queue_th
);
536 ret
= request_irq(irq
, crypto_int
, IRQF_DISABLED
, dev_name(&pdev
->dev
),
541 writel(SEC_INT_ACCEL0_DONE
, cpg
->reg
+ SEC_ACCEL_INT_MASK
);
542 writel(SEC_CFG_STOP_DIG_ERR
, cpg
->reg
+ SEC_ACCEL_CFG
);
544 ret
= crypto_register_alg(&mv_aes_alg_ecb
);
548 ret
= crypto_register_alg(&mv_aes_alg_cbc
);
553 crypto_unregister_alg(&mv_aes_alg_ecb
);
557 kthread_stop(cp
->queue_th
);
565 platform_set_drvdata(pdev
, NULL
);
569 static int mv_remove(struct platform_device
*pdev
)
571 struct crypto_priv
*cp
= platform_get_drvdata(pdev
);
573 crypto_unregister_alg(&mv_aes_alg_ecb
);
574 crypto_unregister_alg(&mv_aes_alg_cbc
);
575 kthread_stop(cp
->queue_th
);
576 free_irq(cp
->irq
, cp
);
577 memset(cp
->sram
, 0, cp
->sram_size
);
585 static struct platform_driver marvell_crypto
= {
589 .owner
= THIS_MODULE
,
593 MODULE_ALIAS("platform:mv_crypto");
595 static int __init
mv_crypto_init(void)
597 return platform_driver_register(&marvell_crypto
);
599 module_init(mv_crypto_init
);
601 static void __exit
mv_crypto_exit(void)
603 platform_driver_unregister(&marvell_crypto
);
605 module_exit(mv_crypto_exit
);
607 MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
608 MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
609 MODULE_LICENSE("GPL");