2 * Support for Marvell's crypto engine which can be found on some Orion5X
5 * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
9 #include <crypto/aes.h>
10 #include <crypto/algapi.h>
11 #include <linux/crypto.h>
12 #include <linux/interrupt.h>
14 #include <linux/kthread.h>
15 #include <linux/platform_device.h>
16 #include <linux/scatterlist.h>
21 * /---------------------------------------\
22 * | | request complete
24 * IDLE -> new request -> BUSY -> done -> DEQUEUE
26 * | | more scatter entries
36 * struct req_progress - used for every crypt request
37 * @src_sg_it: sg iterator for src
38 * @dst_sg_it: sg iterator for dst
39 * @sg_src_left: bytes left in src to process (scatter list)
40 * @src_start: offset to add to src start position (scatter list)
41 * @crypt_len: length of current crypt process
42 * @sg_dst_left: bytes left dst to process in this scatter list
43 * @dst_start: offset to add to dst start position (scatter list)
44 * @total_req_bytes: total number of bytes processed (request).
46 * sg helper are used to iterate over the scatterlist. Since the size of the
47 * SRAM may be less than the scatter size, this struct struct is used to keep
48 * track of progress within current scatterlist.
51 struct sg_mapping_iter src_sg_it
;
52 struct sg_mapping_iter dst_sg_it
;
68 struct task_struct
*queue_th
;
70 /* the lock protects queue and eng_st */
72 struct crypto_queue queue
;
73 enum engine_status eng_st
;
74 struct ablkcipher_request
*cur_req
;
75 struct req_progress p
;
80 static struct crypto_priv
*cpg
;
83 u8 aes_enc_key
[AES_KEY_LEN
];
86 u32 need_calc_aes_dkey
;
99 static void compute_aes_dec_key(struct mv_ctx
*ctx
)
101 struct crypto_aes_ctx gen_aes_key
;
104 if (!ctx
->need_calc_aes_dkey
)
107 crypto_aes_expand_key(&gen_aes_key
, ctx
->aes_enc_key
, ctx
->key_len
);
109 key_pos
= ctx
->key_len
+ 24;
110 memcpy(ctx
->aes_dec_key
, &gen_aes_key
.key_enc
[key_pos
], 4 * 4);
111 switch (ctx
->key_len
) {
112 case AES_KEYSIZE_256
:
115 case AES_KEYSIZE_192
:
117 memcpy(&ctx
->aes_dec_key
[4], &gen_aes_key
.key_enc
[key_pos
],
121 ctx
->need_calc_aes_dkey
= 0;
124 static int mv_setkey_aes(struct crypto_ablkcipher
*cipher
, const u8
*key
,
127 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(cipher
);
128 struct mv_ctx
*ctx
= crypto_tfm_ctx(tfm
);
131 case AES_KEYSIZE_128
:
132 case AES_KEYSIZE_192
:
133 case AES_KEYSIZE_256
:
136 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
140 ctx
->need_calc_aes_dkey
= 1;
142 memcpy(ctx
->aes_enc_key
, key
, AES_KEY_LEN
);
146 static void setup_data_in(struct ablkcipher_request
*req
)
151 if (!cpg
->p
.sg_src_left
) {
152 ret
= sg_miter_next(&cpg
->p
.src_sg_it
);
154 cpg
->p
.sg_src_left
= cpg
->p
.src_sg_it
.length
;
155 cpg
->p
.src_start
= 0;
158 cpg
->p
.crypt_len
= min(cpg
->p
.sg_src_left
, cpg
->max_req_size
);
160 buf
= cpg
->p
.src_sg_it
.addr
;
161 buf
+= cpg
->p
.src_start
;
163 memcpy(cpg
->sram
+ SRAM_DATA_IN_START
, buf
, cpg
->p
.crypt_len
);
165 cpg
->p
.sg_src_left
-= cpg
->p
.crypt_len
;
166 cpg
->p
.src_start
+= cpg
->p
.crypt_len
;
169 static void mv_process_current_q(int first_block
)
171 struct ablkcipher_request
*req
= cpg
->cur_req
;
172 struct mv_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
173 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
174 struct sec_accel_config op
;
176 switch (req_ctx
->op
) {
178 op
.config
= CFG_OP_CRYPT_ONLY
| CFG_ENCM_AES
| CFG_ENC_MODE_ECB
;
181 op
.config
= CFG_OP_CRYPT_ONLY
| CFG_ENCM_AES
| CFG_ENC_MODE_CBC
;
182 op
.enc_iv
= ENC_IV_POINT(SRAM_DATA_IV
) |
183 ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF
);
185 memcpy(cpg
->sram
+ SRAM_DATA_IV
, req
->info
, 16);
188 if (req_ctx
->decrypt
) {
189 op
.config
|= CFG_DIR_DEC
;
190 memcpy(cpg
->sram
+ SRAM_DATA_KEY_P
, ctx
->aes_dec_key
,
193 op
.config
|= CFG_DIR_ENC
;
194 memcpy(cpg
->sram
+ SRAM_DATA_KEY_P
, ctx
->aes_enc_key
,
198 switch (ctx
->key_len
) {
199 case AES_KEYSIZE_128
:
200 op
.config
|= CFG_AES_LEN_128
;
202 case AES_KEYSIZE_192
:
203 op
.config
|= CFG_AES_LEN_192
;
205 case AES_KEYSIZE_256
:
206 op
.config
|= CFG_AES_LEN_256
;
209 op
.enc_p
= ENC_P_SRC(SRAM_DATA_IN_START
) |
210 ENC_P_DST(SRAM_DATA_OUT_START
);
211 op
.enc_key_p
= SRAM_DATA_KEY_P
;
214 op
.enc_len
= cpg
->p
.crypt_len
;
215 memcpy(cpg
->sram
+ SRAM_CONFIG
, &op
,
216 sizeof(struct sec_accel_config
));
218 writel(SRAM_CONFIG
, cpg
->reg
+ SEC_ACCEL_DESC_P0
);
220 writel(SEC_CMD_EN_SEC_ACCL0
, cpg
->reg
+ SEC_ACCEL_CMD
);
223 * XXX: add timer if the interrupt does not occur for some mystery
228 static void mv_crypto_algo_completion(void)
230 struct ablkcipher_request
*req
= cpg
->cur_req
;
231 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
233 if (req_ctx
->op
!= COP_AES_CBC
)
236 memcpy(req
->info
, cpg
->sram
+ SRAM_DATA_IV_BUF
, 16);
239 static void dequeue_complete_req(void)
241 struct ablkcipher_request
*req
= cpg
->cur_req
;
245 cpg
->p
.total_req_bytes
+= cpg
->p
.crypt_len
;
249 if (!cpg
->p
.sg_dst_left
) {
250 ret
= sg_miter_next(&cpg
->p
.dst_sg_it
);
252 cpg
->p
.sg_dst_left
= cpg
->p
.dst_sg_it
.length
;
253 cpg
->p
.dst_start
= 0;
256 buf
= cpg
->p
.dst_sg_it
.addr
;
257 buf
+= cpg
->p
.dst_start
;
259 dst_copy
= min(cpg
->p
.crypt_len
, cpg
->p
.sg_dst_left
);
261 memcpy(buf
, cpg
->sram
+ SRAM_DATA_OUT_START
, dst_copy
);
263 cpg
->p
.sg_dst_left
-= dst_copy
;
264 cpg
->p
.crypt_len
-= dst_copy
;
265 cpg
->p
.dst_start
+= dst_copy
;
266 } while (cpg
->p
.crypt_len
> 0);
268 BUG_ON(cpg
->eng_st
!= ENGINE_W_DEQUEUE
);
269 if (cpg
->p
.total_req_bytes
< req
->nbytes
) {
270 /* process next scatter list entry */
271 cpg
->eng_st
= ENGINE_BUSY
;
272 mv_process_current_q(0);
274 sg_miter_stop(&cpg
->p
.src_sg_it
);
275 sg_miter_stop(&cpg
->p
.dst_sg_it
);
276 mv_crypto_algo_completion();
277 cpg
->eng_st
= ENGINE_IDLE
;
278 req
->base
.complete(&req
->base
, 0);
282 static int count_sgs(struct scatterlist
*sl
, unsigned int total_bytes
)
287 total_bytes
-= sl
[i
].length
;
290 } while (total_bytes
> 0);
295 static void mv_enqueue_new_req(struct ablkcipher_request
*req
)
300 memset(&cpg
->p
, 0, sizeof(struct req_progress
));
302 num_sgs
= count_sgs(req
->src
, req
->nbytes
);
303 sg_miter_start(&cpg
->p
.src_sg_it
, req
->src
, num_sgs
, SG_MITER_FROM_SG
);
305 num_sgs
= count_sgs(req
->dst
, req
->nbytes
);
306 sg_miter_start(&cpg
->p
.dst_sg_it
, req
->dst
, num_sgs
, SG_MITER_TO_SG
);
307 mv_process_current_q(1);
310 static int queue_manag(void *data
)
312 cpg
->eng_st
= ENGINE_IDLE
;
314 struct ablkcipher_request
*req
;
315 struct crypto_async_request
*async_req
= NULL
;
316 struct crypto_async_request
*backlog
;
318 __set_current_state(TASK_INTERRUPTIBLE
);
320 if (cpg
->eng_st
== ENGINE_W_DEQUEUE
)
321 dequeue_complete_req();
323 spin_lock_irq(&cpg
->lock
);
324 if (cpg
->eng_st
== ENGINE_IDLE
) {
325 backlog
= crypto_get_backlog(&cpg
->queue
);
326 async_req
= crypto_dequeue_request(&cpg
->queue
);
328 BUG_ON(cpg
->eng_st
!= ENGINE_IDLE
);
329 cpg
->eng_st
= ENGINE_BUSY
;
332 spin_unlock_irq(&cpg
->lock
);
335 backlog
->complete(backlog
, -EINPROGRESS
);
340 req
= container_of(async_req
,
341 struct ablkcipher_request
, base
);
342 mv_enqueue_new_req(req
);
348 } while (!kthread_should_stop());
352 static int mv_handle_req(struct ablkcipher_request
*req
)
357 spin_lock_irqsave(&cpg
->lock
, flags
);
358 ret
= ablkcipher_enqueue_request(&cpg
->queue
, req
);
359 spin_unlock_irqrestore(&cpg
->lock
, flags
);
360 wake_up_process(cpg
->queue_th
);
364 static int mv_enc_aes_ecb(struct ablkcipher_request
*req
)
366 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
368 req_ctx
->op
= COP_AES_ECB
;
369 req_ctx
->decrypt
= 0;
371 return mv_handle_req(req
);
374 static int mv_dec_aes_ecb(struct ablkcipher_request
*req
)
376 struct mv_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
377 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
379 req_ctx
->op
= COP_AES_ECB
;
380 req_ctx
->decrypt
= 1;
382 compute_aes_dec_key(ctx
);
383 return mv_handle_req(req
);
386 static int mv_enc_aes_cbc(struct ablkcipher_request
*req
)
388 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
390 req_ctx
->op
= COP_AES_CBC
;
391 req_ctx
->decrypt
= 0;
393 return mv_handle_req(req
);
396 static int mv_dec_aes_cbc(struct ablkcipher_request
*req
)
398 struct mv_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
399 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
401 req_ctx
->op
= COP_AES_CBC
;
402 req_ctx
->decrypt
= 1;
404 compute_aes_dec_key(ctx
);
405 return mv_handle_req(req
);
408 static int mv_cra_init(struct crypto_tfm
*tfm
)
410 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct mv_req_ctx
);
414 irqreturn_t
crypto_int(int irq
, void *priv
)
418 val
= readl(cpg
->reg
+ SEC_ACCEL_INT_STATUS
);
419 if (!(val
& SEC_INT_ACCEL0_DONE
))
422 val
&= ~SEC_INT_ACCEL0_DONE
;
423 writel(val
, cpg
->reg
+ FPGA_INT_STATUS
);
424 writel(val
, cpg
->reg
+ SEC_ACCEL_INT_STATUS
);
425 BUG_ON(cpg
->eng_st
!= ENGINE_BUSY
);
426 cpg
->eng_st
= ENGINE_W_DEQUEUE
;
427 wake_up_process(cpg
->queue_th
);
431 struct crypto_alg mv_aes_alg_ecb
= {
432 .cra_name
= "ecb(aes)",
433 .cra_driver_name
= "mv-ecb-aes",
435 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
437 .cra_ctxsize
= sizeof(struct mv_ctx
),
439 .cra_type
= &crypto_ablkcipher_type
,
440 .cra_module
= THIS_MODULE
,
441 .cra_init
= mv_cra_init
,
444 .min_keysize
= AES_MIN_KEY_SIZE
,
445 .max_keysize
= AES_MAX_KEY_SIZE
,
446 .setkey
= mv_setkey_aes
,
447 .encrypt
= mv_enc_aes_ecb
,
448 .decrypt
= mv_dec_aes_ecb
,
453 struct crypto_alg mv_aes_alg_cbc
= {
454 .cra_name
= "cbc(aes)",
455 .cra_driver_name
= "mv-cbc-aes",
457 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
458 .cra_blocksize
= AES_BLOCK_SIZE
,
459 .cra_ctxsize
= sizeof(struct mv_ctx
),
461 .cra_type
= &crypto_ablkcipher_type
,
462 .cra_module
= THIS_MODULE
,
463 .cra_init
= mv_cra_init
,
466 .ivsize
= AES_BLOCK_SIZE
,
467 .min_keysize
= AES_MIN_KEY_SIZE
,
468 .max_keysize
= AES_MAX_KEY_SIZE
,
469 .setkey
= mv_setkey_aes
,
470 .encrypt
= mv_enc_aes_cbc
,
471 .decrypt
= mv_dec_aes_cbc
,
476 static int mv_probe(struct platform_device
*pdev
)
478 struct crypto_priv
*cp
;
479 struct resource
*res
;
484 printk(KERN_ERR
"Second crypto dev?\n");
488 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "regs");
492 cp
= kzalloc(sizeof(*cp
), GFP_KERNEL
);
496 spin_lock_init(&cp
->lock
);
497 crypto_init_queue(&cp
->queue
, 50);
498 cp
->reg
= ioremap(res
->start
, res
->end
- res
->start
+ 1);
504 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "sram");
509 cp
->sram_size
= res
->end
- res
->start
+ 1;
510 cp
->max_req_size
= cp
->sram_size
- SRAM_CFG_SPACE
;
511 cp
->sram
= ioremap(res
->start
, cp
->sram_size
);
517 irq
= platform_get_irq(pdev
, 0);
518 if (irq
< 0 || irq
== NO_IRQ
) {
524 platform_set_drvdata(pdev
, cp
);
527 cp
->queue_th
= kthread_run(queue_manag
, cp
, "mv_crypto");
528 if (IS_ERR(cp
->queue_th
)) {
529 ret
= PTR_ERR(cp
->queue_th
);
533 ret
= request_irq(irq
, crypto_int
, IRQF_DISABLED
, dev_name(&pdev
->dev
),
538 writel(SEC_INT_ACCEL0_DONE
, cpg
->reg
+ SEC_ACCEL_INT_MASK
);
539 writel(SEC_CFG_STOP_DIG_ERR
, cpg
->reg
+ SEC_ACCEL_CFG
);
541 ret
= crypto_register_alg(&mv_aes_alg_ecb
);
545 ret
= crypto_register_alg(&mv_aes_alg_cbc
);
550 crypto_unregister_alg(&mv_aes_alg_ecb
);
554 kthread_stop(cp
->queue_th
);
562 platform_set_drvdata(pdev
, NULL
);
566 static int mv_remove(struct platform_device
*pdev
)
568 struct crypto_priv
*cp
= platform_get_drvdata(pdev
);
570 crypto_unregister_alg(&mv_aes_alg_ecb
);
571 crypto_unregister_alg(&mv_aes_alg_cbc
);
572 kthread_stop(cp
->queue_th
);
573 free_irq(cp
->irq
, cp
);
574 memset(cp
->sram
, 0, cp
->sram_size
);
582 static struct platform_driver marvell_crypto
= {
586 .owner
= THIS_MODULE
,
590 MODULE_ALIAS("platform:mv_crypto");
592 static int __init
mv_crypto_init(void)
594 return platform_driver_register(&marvell_crypto
);
596 module_init(mv_crypto_init
);
598 static void __exit
mv_crypto_exit(void)
600 platform_driver_unregister(&marvell_crypto
);
602 module_exit(mv_crypto_exit
);
604 MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
605 MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
606 MODULE_LICENSE("GPL");