2 * Intel IXP4xx NPE-C crypto driver
4 * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
12 #include <linux/platform_device.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/dmapool.h>
15 #include <linux/crypto.h>
16 #include <linux/kernel.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/interrupt.h>
19 #include <linux/spinlock.h>
20 #include <linux/gfp.h>
22 #include <crypto/ctr.h>
23 #include <crypto/des.h>
24 #include <crypto/aes.h>
25 #include <crypto/sha.h>
26 #include <crypto/algapi.h>
27 #include <crypto/aead.h>
28 #include <crypto/authenc.h>
29 #include <crypto/scatterwalk.h>
32 #include <mach/qmgr.h>
36 /* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
37 #define NPE_CTX_LEN 80
38 #define AES_BLOCK128 16
40 #define NPE_OP_HASH_VERIFY 0x01
41 #define NPE_OP_CCM_ENABLE 0x04
42 #define NPE_OP_CRYPT_ENABLE 0x08
43 #define NPE_OP_HASH_ENABLE 0x10
44 #define NPE_OP_NOT_IN_PLACE 0x20
45 #define NPE_OP_HMAC_DISABLE 0x40
46 #define NPE_OP_CRYPT_ENCRYPT 0x80
48 #define NPE_OP_CCM_GEN_MIC 0xcc
49 #define NPE_OP_HASH_GEN_ICV 0x50
50 #define NPE_OP_ENC_GEN_KEY 0xc9
52 #define MOD_ECB 0x0000
53 #define MOD_CTR 0x1000
54 #define MOD_CBC_ENC 0x2000
55 #define MOD_CBC_DEC 0x3000
56 #define MOD_CCM_ENC 0x4000
57 #define MOD_CCM_DEC 0x5000
63 #define CIPH_DECR 0x0000
64 #define CIPH_ENCR 0x0400
66 #define MOD_DES 0x0000
67 #define MOD_TDEA2 0x0100
68 #define MOD_3DES 0x0200
69 #define MOD_AES 0x0800
70 #define MOD_AES128 (0x0800 | KEYLEN_128)
71 #define MOD_AES192 (0x0900 | KEYLEN_192)
72 #define MOD_AES256 (0x0a00 | KEYLEN_256)
75 #define NPE_ID 2 /* NPE C */
77 /* Space for registering when the first
78 * NPE_QLEN crypt_ctl are busy */
79 #define NPE_QLEN_TOTAL 64
84 #define CTL_FLAG_UNUSED 0x0000
85 #define CTL_FLAG_USED 0x1000
86 #define CTL_FLAG_PERFORM_ABLK 0x0001
87 #define CTL_FLAG_GEN_ICV 0x0002
88 #define CTL_FLAG_GEN_REVAES 0x0004
89 #define CTL_FLAG_PERFORM_AEAD 0x0008
90 #define CTL_FLAG_MASK 0x000f
92 #define HMAC_IPAD_VALUE 0x36
93 #define HMAC_OPAD_VALUE 0x5C
94 #define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
96 #define MD5_DIGEST_SIZE 16
104 struct buffer_desc
*next
;
105 enum dma_data_direction dir
;
109 u8 mode
; /* NPE_OP_* operation mode */
112 u8 iv
[MAX_IVLEN
]; /* IV for CBC mode or CTR IV for CTR mode */
113 u32 icv_rev_aes
; /* icv or rev aes */
116 u16 auth_offs
; /* Authentication start offset */
117 u16 auth_len
; /* Authentication data length */
118 u16 crypt_offs
; /* Cryption start offset */
119 u16 crypt_len
; /* Cryption data length */
120 u32 aadAddr
; /* Additional Auth Data Addr for CCM mode */
121 u32 crypto_ctx
; /* NPE Crypto Param structure address */
123 /* Used by Host: 4*4 bytes*/
126 struct ablkcipher_request
*ablk_req
;
127 struct aead_request
*aead_req
;
128 struct crypto_tfm
*tfm
;
130 struct buffer_desc
*regist_buf
;
135 struct buffer_desc
*src
;
136 struct buffer_desc
*dst
;
140 struct buffer_desc
*buffer
;
141 struct scatterlist ivlist
;
142 /* used when the hmac is not on one sg entry */
147 struct ix_hash_algo
{
153 unsigned char *npe_ctx
;
154 dma_addr_t npe_ctx_phys
;
160 struct ix_sa_dir encrypt
;
161 struct ix_sa_dir decrypt
;
163 u8 authkey
[MAX_KEYLEN
];
165 u8 enckey
[MAX_KEYLEN
];
167 u8 nonce
[CTR_RFC3686_NONCE_SIZE
];
169 atomic_t configuring
;
170 struct completion completion
;
174 struct crypto_alg crypto
;
175 const struct ix_hash_algo
*hash
;
182 static const struct ix_hash_algo hash_alg_md5
= {
183 .cfgword
= 0xAA010004,
184 .icv
= "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
185 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
187 static const struct ix_hash_algo hash_alg_sha1
= {
188 .cfgword
= 0x00000005,
189 .icv
= "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
190 "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
193 static struct npe
*npe_c
;
194 static struct dma_pool
*buffer_pool
= NULL
;
195 static struct dma_pool
*ctx_pool
= NULL
;
197 static struct crypt_ctl
*crypt_virt
= NULL
;
198 static dma_addr_t crypt_phys
;
200 static int support_aes
= 1;
202 static void dev_release(struct device
*dev
)
207 #define DRIVER_NAME "ixp4xx_crypto"
208 static struct platform_device pseudo_dev
= {
213 .coherent_dma_mask
= DMA_BIT_MASK(32),
214 .release
= dev_release
,
218 static struct device
*dev
= &pseudo_dev
.dev
;
220 static inline dma_addr_t
crypt_virt2phys(struct crypt_ctl
*virt
)
222 return crypt_phys
+ (virt
- crypt_virt
) * sizeof(struct crypt_ctl
);
225 static inline struct crypt_ctl
*crypt_phys2virt(dma_addr_t phys
)
227 return crypt_virt
+ (phys
- crypt_phys
) / sizeof(struct crypt_ctl
);
230 static inline u32
cipher_cfg_enc(struct crypto_tfm
*tfm
)
232 return container_of(tfm
->__crt_alg
, struct ixp_alg
,crypto
)->cfg_enc
;
235 static inline u32
cipher_cfg_dec(struct crypto_tfm
*tfm
)
237 return container_of(tfm
->__crt_alg
, struct ixp_alg
,crypto
)->cfg_dec
;
240 static inline const struct ix_hash_algo
*ix_hash(struct crypto_tfm
*tfm
)
242 return container_of(tfm
->__crt_alg
, struct ixp_alg
, crypto
)->hash
;
245 static int setup_crypt_desc(void)
247 BUILD_BUG_ON(sizeof(struct crypt_ctl
) != 64);
248 crypt_virt
= dma_alloc_coherent(dev
,
249 NPE_QLEN
* sizeof(struct crypt_ctl
),
250 &crypt_phys
, GFP_KERNEL
);
253 memset(crypt_virt
, 0, NPE_QLEN
* sizeof(struct crypt_ctl
));
257 static spinlock_t desc_lock
;
258 static struct crypt_ctl
*get_crypt_desc(void)
264 spin_lock_irqsave(&desc_lock
, flags
);
266 if (unlikely(!crypt_virt
))
268 if (unlikely(!crypt_virt
)) {
269 spin_unlock_irqrestore(&desc_lock
, flags
);
273 if (crypt_virt
[i
].ctl_flags
== CTL_FLAG_UNUSED
) {
274 if (++idx
>= NPE_QLEN
)
276 crypt_virt
[i
].ctl_flags
= CTL_FLAG_USED
;
277 spin_unlock_irqrestore(&desc_lock
, flags
);
278 return crypt_virt
+i
;
280 spin_unlock_irqrestore(&desc_lock
, flags
);
285 static spinlock_t emerg_lock
;
286 static struct crypt_ctl
*get_crypt_desc_emerg(void)
289 static int idx
= NPE_QLEN
;
290 struct crypt_ctl
*desc
;
293 desc
= get_crypt_desc();
296 if (unlikely(!crypt_virt
))
299 spin_lock_irqsave(&emerg_lock
, flags
);
301 if (crypt_virt
[i
].ctl_flags
== CTL_FLAG_UNUSED
) {
302 if (++idx
>= NPE_QLEN_TOTAL
)
304 crypt_virt
[i
].ctl_flags
= CTL_FLAG_USED
;
305 spin_unlock_irqrestore(&emerg_lock
, flags
);
306 return crypt_virt
+i
;
308 spin_unlock_irqrestore(&emerg_lock
, flags
);
313 static void free_buf_chain(struct device
*dev
, struct buffer_desc
*buf
,u32 phys
)
316 struct buffer_desc
*buf1
;
320 phys1
= buf
->phys_next
;
321 dma_unmap_single(dev
, buf
->phys_next
, buf
->buf_len
, buf
->dir
);
322 dma_pool_free(buffer_pool
, buf
, phys
);
328 static struct tasklet_struct crypto_done_tasklet
;
330 static void finish_scattered_hmac(struct crypt_ctl
*crypt
)
332 struct aead_request
*req
= crypt
->data
.aead_req
;
333 struct aead_ctx
*req_ctx
= aead_request_ctx(req
);
334 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
335 int authsize
= crypto_aead_authsize(tfm
);
336 int decryptlen
= req
->cryptlen
- authsize
;
338 if (req_ctx
->encrypt
) {
339 scatterwalk_map_and_copy(req_ctx
->hmac_virt
,
340 req
->src
, decryptlen
, authsize
, 1);
342 dma_pool_free(buffer_pool
, req_ctx
->hmac_virt
, crypt
->icv_rev_aes
);
345 static void one_packet(dma_addr_t phys
)
347 struct crypt_ctl
*crypt
;
351 failed
= phys
& 0x1 ? -EBADMSG
: 0;
353 crypt
= crypt_phys2virt(phys
);
355 switch (crypt
->ctl_flags
& CTL_FLAG_MASK
) {
356 case CTL_FLAG_PERFORM_AEAD
: {
357 struct aead_request
*req
= crypt
->data
.aead_req
;
358 struct aead_ctx
*req_ctx
= aead_request_ctx(req
);
360 free_buf_chain(dev
, req_ctx
->buffer
, crypt
->src_buf
);
361 if (req_ctx
->hmac_virt
) {
362 finish_scattered_hmac(crypt
);
364 req
->base
.complete(&req
->base
, failed
);
367 case CTL_FLAG_PERFORM_ABLK
: {
368 struct ablkcipher_request
*req
= crypt
->data
.ablk_req
;
369 struct ablk_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
372 free_buf_chain(dev
, req_ctx
->dst
, crypt
->dst_buf
);
374 free_buf_chain(dev
, req_ctx
->src
, crypt
->src_buf
);
375 req
->base
.complete(&req
->base
, failed
);
378 case CTL_FLAG_GEN_ICV
:
379 ctx
= crypto_tfm_ctx(crypt
->data
.tfm
);
380 dma_pool_free(ctx_pool
, crypt
->regist_ptr
,
381 crypt
->regist_buf
->phys_addr
);
382 dma_pool_free(buffer_pool
, crypt
->regist_buf
, crypt
->src_buf
);
383 if (atomic_dec_and_test(&ctx
->configuring
))
384 complete(&ctx
->completion
);
386 case CTL_FLAG_GEN_REVAES
:
387 ctx
= crypto_tfm_ctx(crypt
->data
.tfm
);
388 *(u32
*)ctx
->decrypt
.npe_ctx
&= cpu_to_be32(~CIPH_ENCR
);
389 if (atomic_dec_and_test(&ctx
->configuring
))
390 complete(&ctx
->completion
);
395 crypt
->ctl_flags
= CTL_FLAG_UNUSED
;
398 static void irqhandler(void *_unused
)
400 tasklet_schedule(&crypto_done_tasklet
);
403 static void crypto_done_action(unsigned long arg
)
408 dma_addr_t phys
= qmgr_get_entry(RECV_QID
);
413 tasklet_schedule(&crypto_done_tasklet
);
416 static int init_ixp_crypto(void)
419 u32 msg
[2] = { 0, 0 };
421 if (! ( ~(*IXP4XX_EXP_CFG2
) & (IXP4XX_FEATURE_HASH
|
422 IXP4XX_FEATURE_AES
| IXP4XX_FEATURE_DES
))) {
423 printk(KERN_ERR
"ixp_crypto: No HW crypto available\n");
426 npe_c
= npe_request(NPE_ID
);
430 if (!npe_running(npe_c
)) {
431 ret
= npe_load_firmware(npe_c
, npe_name(npe_c
), dev
);
435 if (npe_recv_message(npe_c
, msg
, "STATUS_MSG"))
438 if (npe_send_message(npe_c
, msg
, "STATUS_MSG"))
441 if (npe_recv_message(npe_c
, msg
, "STATUS_MSG"))
445 switch ((msg
[1]>>16) & 0xff) {
447 printk(KERN_WARNING
"Firmware of %s lacks AES support\n",
456 printk(KERN_ERR
"Firmware of %s lacks crypto support\n",
460 /* buffer_pool will also be used to sometimes store the hmac,
461 * so assure it is large enough
463 BUILD_BUG_ON(SHA1_DIGEST_SIZE
> sizeof(struct buffer_desc
));
464 buffer_pool
= dma_pool_create("buffer", dev
,
465 sizeof(struct buffer_desc
), 32, 0);
470 ctx_pool
= dma_pool_create("context", dev
,
475 ret
= qmgr_request_queue(SEND_QID
, NPE_QLEN_TOTAL
, 0, 0,
476 "ixp_crypto:out", NULL
);
479 ret
= qmgr_request_queue(RECV_QID
, NPE_QLEN
, 0, 0,
480 "ixp_crypto:in", NULL
);
482 qmgr_release_queue(SEND_QID
);
485 qmgr_set_irq(RECV_QID
, QUEUE_IRQ_SRC_NOT_EMPTY
, irqhandler
, NULL
);
486 tasklet_init(&crypto_done_tasklet
, crypto_done_action
, 0);
488 qmgr_enable_irq(RECV_QID
);
492 printk(KERN_ERR
"%s not responding\n", npe_name(npe_c
));
496 dma_pool_destroy(ctx_pool
);
498 dma_pool_destroy(buffer_pool
);
503 static void release_ixp_crypto(void)
505 qmgr_disable_irq(RECV_QID
);
506 tasklet_kill(&crypto_done_tasklet
);
508 qmgr_release_queue(SEND_QID
);
509 qmgr_release_queue(RECV_QID
);
511 dma_pool_destroy(ctx_pool
);
512 dma_pool_destroy(buffer_pool
);
517 dma_free_coherent(dev
,
518 NPE_QLEN_TOTAL
* sizeof( struct crypt_ctl
),
519 crypt_virt
, crypt_phys
);
524 static void reset_sa_dir(struct ix_sa_dir
*dir
)
526 memset(dir
->npe_ctx
, 0, NPE_CTX_LEN
);
527 dir
->npe_ctx_idx
= 0;
531 static int init_sa_dir(struct ix_sa_dir
*dir
)
533 dir
->npe_ctx
= dma_pool_alloc(ctx_pool
, GFP_KERNEL
, &dir
->npe_ctx_phys
);
541 static void free_sa_dir(struct ix_sa_dir
*dir
)
543 memset(dir
->npe_ctx
, 0, NPE_CTX_LEN
);
544 dma_pool_free(ctx_pool
, dir
->npe_ctx
, dir
->npe_ctx_phys
);
547 static int init_tfm(struct crypto_tfm
*tfm
)
549 struct ixp_ctx
*ctx
= crypto_tfm_ctx(tfm
);
552 atomic_set(&ctx
->configuring
, 0);
553 ret
= init_sa_dir(&ctx
->encrypt
);
556 ret
= init_sa_dir(&ctx
->decrypt
);
558 free_sa_dir(&ctx
->encrypt
);
563 static int init_tfm_ablk(struct crypto_tfm
*tfm
)
565 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct ablk_ctx
);
566 return init_tfm(tfm
);
569 static int init_tfm_aead(struct crypto_tfm
*tfm
)
571 tfm
->crt_aead
.reqsize
= sizeof(struct aead_ctx
);
572 return init_tfm(tfm
);
575 static void exit_tfm(struct crypto_tfm
*tfm
)
577 struct ixp_ctx
*ctx
= crypto_tfm_ctx(tfm
);
578 free_sa_dir(&ctx
->encrypt
);
579 free_sa_dir(&ctx
->decrypt
);
582 static int register_chain_var(struct crypto_tfm
*tfm
, u8 xpad
, u32 target
,
583 int init_len
, u32 ctx_addr
, const u8
*key
, int key_len
)
585 struct ixp_ctx
*ctx
= crypto_tfm_ctx(tfm
);
586 struct crypt_ctl
*crypt
;
587 struct buffer_desc
*buf
;
590 u32 pad_phys
, buf_phys
;
592 BUILD_BUG_ON(NPE_CTX_LEN
< HMAC_PAD_BLOCKLEN
);
593 pad
= dma_pool_alloc(ctx_pool
, GFP_KERNEL
, &pad_phys
);
596 buf
= dma_pool_alloc(buffer_pool
, GFP_KERNEL
, &buf_phys
);
598 dma_pool_free(ctx_pool
, pad
, pad_phys
);
601 crypt
= get_crypt_desc_emerg();
603 dma_pool_free(ctx_pool
, pad
, pad_phys
);
604 dma_pool_free(buffer_pool
, buf
, buf_phys
);
608 memcpy(pad
, key
, key_len
);
609 memset(pad
+ key_len
, 0, HMAC_PAD_BLOCKLEN
- key_len
);
610 for (i
= 0; i
< HMAC_PAD_BLOCKLEN
; i
++) {
614 crypt
->data
.tfm
= tfm
;
615 crypt
->regist_ptr
= pad
;
616 crypt
->regist_buf
= buf
;
618 crypt
->auth_offs
= 0;
619 crypt
->auth_len
= HMAC_PAD_BLOCKLEN
;
620 crypt
->crypto_ctx
= ctx_addr
;
621 crypt
->src_buf
= buf_phys
;
622 crypt
->icv_rev_aes
= target
;
623 crypt
->mode
= NPE_OP_HASH_GEN_ICV
;
624 crypt
->init_len
= init_len
;
625 crypt
->ctl_flags
|= CTL_FLAG_GEN_ICV
;
628 buf
->buf_len
= HMAC_PAD_BLOCKLEN
;
630 buf
->phys_addr
= pad_phys
;
632 atomic_inc(&ctx
->configuring
);
633 qmgr_put_entry(SEND_QID
, crypt_virt2phys(crypt
));
634 BUG_ON(qmgr_stat_overflow(SEND_QID
));
638 static int setup_auth(struct crypto_tfm
*tfm
, int encrypt
, unsigned authsize
,
639 const u8
*key
, int key_len
, unsigned digest_len
)
641 u32 itarget
, otarget
, npe_ctx_addr
;
642 unsigned char *cinfo
;
643 int init_len
, ret
= 0;
645 struct ix_sa_dir
*dir
;
646 struct ixp_ctx
*ctx
= crypto_tfm_ctx(tfm
);
647 const struct ix_hash_algo
*algo
;
649 dir
= encrypt
? &ctx
->encrypt
: &ctx
->decrypt
;
650 cinfo
= dir
->npe_ctx
+ dir
->npe_ctx_idx
;
653 /* write cfg word to cryptinfo */
654 cfgword
= algo
->cfgword
| ( authsize
<< 6); /* (authsize/4) << 8 */
655 *(u32
*)cinfo
= cpu_to_be32(cfgword
);
656 cinfo
+= sizeof(cfgword
);
658 /* write ICV to cryptinfo */
659 memcpy(cinfo
, algo
->icv
, digest_len
);
662 itarget
= dir
->npe_ctx_phys
+ dir
->npe_ctx_idx
663 + sizeof(algo
->cfgword
);
664 otarget
= itarget
+ digest_len
;
665 init_len
= cinfo
- (dir
->npe_ctx
+ dir
->npe_ctx_idx
);
666 npe_ctx_addr
= dir
->npe_ctx_phys
+ dir
->npe_ctx_idx
;
668 dir
->npe_ctx_idx
+= init_len
;
669 dir
->npe_mode
|= NPE_OP_HASH_ENABLE
;
672 dir
->npe_mode
|= NPE_OP_HASH_VERIFY
;
674 ret
= register_chain_var(tfm
, HMAC_OPAD_VALUE
, otarget
,
675 init_len
, npe_ctx_addr
, key
, key_len
);
678 return register_chain_var(tfm
, HMAC_IPAD_VALUE
, itarget
,
679 init_len
, npe_ctx_addr
, key
, key_len
);
682 static int gen_rev_aes_key(struct crypto_tfm
*tfm
)
684 struct crypt_ctl
*crypt
;
685 struct ixp_ctx
*ctx
= crypto_tfm_ctx(tfm
);
686 struct ix_sa_dir
*dir
= &ctx
->decrypt
;
688 crypt
= get_crypt_desc_emerg();
692 *(u32
*)dir
->npe_ctx
|= cpu_to_be32(CIPH_ENCR
);
694 crypt
->data
.tfm
= tfm
;
695 crypt
->crypt_offs
= 0;
696 crypt
->crypt_len
= AES_BLOCK128
;
698 crypt
->crypto_ctx
= dir
->npe_ctx_phys
;
699 crypt
->icv_rev_aes
= dir
->npe_ctx_phys
+ sizeof(u32
);
700 crypt
->mode
= NPE_OP_ENC_GEN_KEY
;
701 crypt
->init_len
= dir
->npe_ctx_idx
;
702 crypt
->ctl_flags
|= CTL_FLAG_GEN_REVAES
;
704 atomic_inc(&ctx
->configuring
);
705 qmgr_put_entry(SEND_QID
, crypt_virt2phys(crypt
));
706 BUG_ON(qmgr_stat_overflow(SEND_QID
));
710 static int setup_cipher(struct crypto_tfm
*tfm
, int encrypt
,
711 const u8
*key
, int key_len
)
716 struct ix_sa_dir
*dir
;
717 struct ixp_ctx
*ctx
= crypto_tfm_ctx(tfm
);
718 u32
*flags
= &tfm
->crt_flags
;
720 dir
= encrypt
? &ctx
->encrypt
: &ctx
->decrypt
;
721 cinfo
= dir
->npe_ctx
;
724 cipher_cfg
= cipher_cfg_enc(tfm
);
725 dir
->npe_mode
|= NPE_OP_CRYPT_ENCRYPT
;
727 cipher_cfg
= cipher_cfg_dec(tfm
);
729 if (cipher_cfg
& MOD_AES
) {
731 case 16: keylen_cfg
= MOD_AES128
| KEYLEN_128
; break;
732 case 24: keylen_cfg
= MOD_AES192
| KEYLEN_192
; break;
733 case 32: keylen_cfg
= MOD_AES256
| KEYLEN_256
; break;
735 *flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
738 cipher_cfg
|= keylen_cfg
;
739 } else if (cipher_cfg
& MOD_3DES
) {
740 const u32
*K
= (const u32
*)key
;
741 if (unlikely(!((K
[0] ^ K
[2]) | (K
[1] ^ K
[3])) ||
742 !((K
[2] ^ K
[4]) | (K
[3] ^ K
[5]))))
744 *flags
|= CRYPTO_TFM_RES_BAD_KEY_SCHED
;
748 u32 tmp
[DES_EXPKEY_WORDS
];
749 if (des_ekey(tmp
, key
) == 0) {
750 *flags
|= CRYPTO_TFM_RES_WEAK_KEY
;
753 /* write cfg word to cryptinfo */
754 *(u32
*)cinfo
= cpu_to_be32(cipher_cfg
);
755 cinfo
+= sizeof(cipher_cfg
);
757 /* write cipher key to cryptinfo */
758 memcpy(cinfo
, key
, key_len
);
759 /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
760 if (key_len
< DES3_EDE_KEY_SIZE
&& !(cipher_cfg
& MOD_AES
)) {
761 memset(cinfo
+ key_len
, 0, DES3_EDE_KEY_SIZE
-key_len
);
762 key_len
= DES3_EDE_KEY_SIZE
;
764 dir
->npe_ctx_idx
= sizeof(cipher_cfg
) + key_len
;
765 dir
->npe_mode
|= NPE_OP_CRYPT_ENABLE
;
766 if ((cipher_cfg
& MOD_AES
) && !encrypt
) {
767 return gen_rev_aes_key(tfm
);
772 static struct buffer_desc
*chainup_buffers(struct device
*dev
,
773 struct scatterlist
*sg
, unsigned nbytes
,
774 struct buffer_desc
*buf
, gfp_t flags
,
775 enum dma_data_direction dir
)
777 for (;nbytes
> 0; sg
= scatterwalk_sg_next(sg
)) {
778 unsigned len
= min(nbytes
, sg
->length
);
779 struct buffer_desc
*next_buf
;
784 ptr
= page_address(sg_page(sg
)) + sg
->offset
;
785 next_buf
= dma_pool_alloc(buffer_pool
, flags
, &next_buf_phys
);
790 sg_dma_address(sg
) = dma_map_single(dev
, ptr
, len
, dir
);
791 buf
->next
= next_buf
;
792 buf
->phys_next
= next_buf_phys
;
795 buf
->phys_addr
= sg_dma_address(sg
);
804 static int ablk_setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
805 unsigned int key_len
)
807 struct ixp_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
808 u32
*flags
= &tfm
->base
.crt_flags
;
811 init_completion(&ctx
->completion
);
812 atomic_inc(&ctx
->configuring
);
814 reset_sa_dir(&ctx
->encrypt
);
815 reset_sa_dir(&ctx
->decrypt
);
817 ctx
->encrypt
.npe_mode
= NPE_OP_HMAC_DISABLE
;
818 ctx
->decrypt
.npe_mode
= NPE_OP_HMAC_DISABLE
;
820 ret
= setup_cipher(&tfm
->base
, 0, key
, key_len
);
823 ret
= setup_cipher(&tfm
->base
, 1, key
, key_len
);
827 if (*flags
& CRYPTO_TFM_RES_WEAK_KEY
) {
828 if (*flags
& CRYPTO_TFM_REQ_WEAK_KEY
) {
831 *flags
&= ~CRYPTO_TFM_RES_WEAK_KEY
;
835 if (!atomic_dec_and_test(&ctx
->configuring
))
836 wait_for_completion(&ctx
->completion
);
840 static int ablk_rfc3686_setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
841 unsigned int key_len
)
843 struct ixp_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
845 /* the nonce is stored in bytes at end of key */
846 if (key_len
< CTR_RFC3686_NONCE_SIZE
)
849 memcpy(ctx
->nonce
, key
+ (key_len
- CTR_RFC3686_NONCE_SIZE
),
850 CTR_RFC3686_NONCE_SIZE
);
852 key_len
-= CTR_RFC3686_NONCE_SIZE
;
853 return ablk_setkey(tfm
, key
, key_len
);
856 static int ablk_perform(struct ablkcipher_request
*req
, int encrypt
)
858 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
859 struct ixp_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
860 unsigned ivsize
= crypto_ablkcipher_ivsize(tfm
);
861 struct ix_sa_dir
*dir
;
862 struct crypt_ctl
*crypt
;
863 unsigned int nbytes
= req
->nbytes
;
864 enum dma_data_direction src_direction
= DMA_BIDIRECTIONAL
;
865 struct ablk_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
866 struct buffer_desc src_hook
;
867 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
?
868 GFP_KERNEL
: GFP_ATOMIC
;
870 if (qmgr_stat_full(SEND_QID
))
872 if (atomic_read(&ctx
->configuring
))
875 dir
= encrypt
? &ctx
->encrypt
: &ctx
->decrypt
;
877 crypt
= get_crypt_desc();
881 crypt
->data
.ablk_req
= req
;
882 crypt
->crypto_ctx
= dir
->npe_ctx_phys
;
883 crypt
->mode
= dir
->npe_mode
;
884 crypt
->init_len
= dir
->npe_ctx_idx
;
886 crypt
->crypt_offs
= 0;
887 crypt
->crypt_len
= nbytes
;
889 BUG_ON(ivsize
&& !req
->info
);
890 memcpy(crypt
->iv
, req
->info
, ivsize
);
891 if (req
->src
!= req
->dst
) {
892 struct buffer_desc dst_hook
;
893 crypt
->mode
|= NPE_OP_NOT_IN_PLACE
;
894 /* This was never tested by Intel
895 * for more than one dst buffer, I think. */
896 BUG_ON(req
->dst
->length
< nbytes
);
898 if (!chainup_buffers(dev
, req
->dst
, nbytes
, &dst_hook
,
899 flags
, DMA_FROM_DEVICE
))
901 src_direction
= DMA_TO_DEVICE
;
902 req_ctx
->dst
= dst_hook
.next
;
903 crypt
->dst_buf
= dst_hook
.phys_next
;
908 if (!chainup_buffers(dev
, req
->src
, nbytes
, &src_hook
,
909 flags
, src_direction
))
912 req_ctx
->src
= src_hook
.next
;
913 crypt
->src_buf
= src_hook
.phys_next
;
914 crypt
->ctl_flags
|= CTL_FLAG_PERFORM_ABLK
;
915 qmgr_put_entry(SEND_QID
, crypt_virt2phys(crypt
));
916 BUG_ON(qmgr_stat_overflow(SEND_QID
));
920 free_buf_chain(dev
, req_ctx
->src
, crypt
->src_buf
);
922 if (req
->src
!= req
->dst
) {
923 free_buf_chain(dev
, req_ctx
->dst
, crypt
->dst_buf
);
925 crypt
->ctl_flags
= CTL_FLAG_UNUSED
;
929 static int ablk_encrypt(struct ablkcipher_request
*req
)
931 return ablk_perform(req
, 1);
934 static int ablk_decrypt(struct ablkcipher_request
*req
)
936 return ablk_perform(req
, 0);
939 static int ablk_rfc3686_crypt(struct ablkcipher_request
*req
)
941 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
942 struct ixp_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
943 u8 iv
[CTR_RFC3686_BLOCK_SIZE
];
944 u8
*info
= req
->info
;
947 /* set up counter block */
948 memcpy(iv
, ctx
->nonce
, CTR_RFC3686_NONCE_SIZE
);
949 memcpy(iv
+ CTR_RFC3686_NONCE_SIZE
, info
, CTR_RFC3686_IV_SIZE
);
951 /* initialize counter portion of counter block */
952 *(__be32
*)(iv
+ CTR_RFC3686_NONCE_SIZE
+ CTR_RFC3686_IV_SIZE
) =
956 ret
= ablk_perform(req
, 1);
961 static int hmac_inconsistent(struct scatterlist
*sg
, unsigned start
,
970 if (start
< offset
+ sg
->length
)
973 offset
+= sg
->length
;
974 sg
= scatterwalk_sg_next(sg
);
976 return (start
+ nbytes
> offset
+ sg
->length
);
979 static int aead_perform(struct aead_request
*req
, int encrypt
,
980 int cryptoffset
, int eff_cryptlen
, u8
*iv
)
982 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
983 struct ixp_ctx
*ctx
= crypto_aead_ctx(tfm
);
984 unsigned ivsize
= crypto_aead_ivsize(tfm
);
985 unsigned authsize
= crypto_aead_authsize(tfm
);
986 struct ix_sa_dir
*dir
;
987 struct crypt_ctl
*crypt
;
988 unsigned int cryptlen
;
989 struct buffer_desc
*buf
, src_hook
;
990 struct aead_ctx
*req_ctx
= aead_request_ctx(req
);
991 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
?
992 GFP_KERNEL
: GFP_ATOMIC
;
994 if (qmgr_stat_full(SEND_QID
))
996 if (atomic_read(&ctx
->configuring
))
1000 dir
= &ctx
->encrypt
;
1001 cryptlen
= req
->cryptlen
;
1003 dir
= &ctx
->decrypt
;
1004 /* req->cryptlen includes the authsize when decrypting */
1005 cryptlen
= req
->cryptlen
-authsize
;
1006 eff_cryptlen
-= authsize
;
1008 crypt
= get_crypt_desc();
1012 crypt
->data
.aead_req
= req
;
1013 crypt
->crypto_ctx
= dir
->npe_ctx_phys
;
1014 crypt
->mode
= dir
->npe_mode
;
1015 crypt
->init_len
= dir
->npe_ctx_idx
;
1017 crypt
->crypt_offs
= cryptoffset
;
1018 crypt
->crypt_len
= eff_cryptlen
;
1020 crypt
->auth_offs
= 0;
1021 crypt
->auth_len
= req
->assoclen
+ ivsize
+ cryptlen
;
1022 BUG_ON(ivsize
&& !req
->iv
);
1023 memcpy(crypt
->iv
, req
->iv
, ivsize
);
1025 if (req
->src
!= req
->dst
) {
1026 BUG(); /* -ENOTSUP because of my lazyness */
1030 buf
= chainup_buffers(dev
, req
->assoc
, req
->assoclen
, &src_hook
,
1031 flags
, DMA_TO_DEVICE
);
1032 req_ctx
->buffer
= src_hook
.next
;
1033 crypt
->src_buf
= src_hook
.phys_next
;
1037 sg_init_table(&req_ctx
->ivlist
, 1);
1038 sg_set_buf(&req_ctx
->ivlist
, iv
, ivsize
);
1039 buf
= chainup_buffers(dev
, &req_ctx
->ivlist
, ivsize
, buf
, flags
,
1043 if (unlikely(hmac_inconsistent(req
->src
, cryptlen
, authsize
))) {
1044 /* The 12 hmac bytes are scattered,
1045 * we need to copy them into a safe buffer */
1046 req_ctx
->hmac_virt
= dma_pool_alloc(buffer_pool
, flags
,
1047 &crypt
->icv_rev_aes
);
1048 if (unlikely(!req_ctx
->hmac_virt
))
1051 scatterwalk_map_and_copy(req_ctx
->hmac_virt
,
1052 req
->src
, cryptlen
, authsize
, 0);
1054 req_ctx
->encrypt
= encrypt
;
1056 req_ctx
->hmac_virt
= NULL
;
1059 buf
= chainup_buffers(dev
, req
->src
, cryptlen
+ authsize
, buf
, flags
,
1062 goto free_hmac_virt
;
1063 if (!req_ctx
->hmac_virt
) {
1064 crypt
->icv_rev_aes
= buf
->phys_addr
+ buf
->buf_len
- authsize
;
1067 crypt
->ctl_flags
|= CTL_FLAG_PERFORM_AEAD
;
1068 qmgr_put_entry(SEND_QID
, crypt_virt2phys(crypt
));
1069 BUG_ON(qmgr_stat_overflow(SEND_QID
));
1070 return -EINPROGRESS
;
1072 if (req_ctx
->hmac_virt
) {
1073 dma_pool_free(buffer_pool
, req_ctx
->hmac_virt
,
1074 crypt
->icv_rev_aes
);
1077 free_buf_chain(dev
, req_ctx
->buffer
, crypt
->src_buf
);
1079 crypt
->ctl_flags
= CTL_FLAG_UNUSED
;
1083 static int aead_setup(struct crypto_aead
*tfm
, unsigned int authsize
)
1085 struct ixp_ctx
*ctx
= crypto_aead_ctx(tfm
);
1086 u32
*flags
= &tfm
->base
.crt_flags
;
1087 unsigned digest_len
= crypto_aead_alg(tfm
)->maxauthsize
;
1090 if (!ctx
->enckey_len
&& !ctx
->authkey_len
)
1092 init_completion(&ctx
->completion
);
1093 atomic_inc(&ctx
->configuring
);
1095 reset_sa_dir(&ctx
->encrypt
);
1096 reset_sa_dir(&ctx
->decrypt
);
1098 ret
= setup_cipher(&tfm
->base
, 0, ctx
->enckey
, ctx
->enckey_len
);
1101 ret
= setup_cipher(&tfm
->base
, 1, ctx
->enckey
, ctx
->enckey_len
);
1104 ret
= setup_auth(&tfm
->base
, 0, authsize
, ctx
->authkey
,
1105 ctx
->authkey_len
, digest_len
);
1108 ret
= setup_auth(&tfm
->base
, 1, authsize
, ctx
->authkey
,
1109 ctx
->authkey_len
, digest_len
);
1113 if (*flags
& CRYPTO_TFM_RES_WEAK_KEY
) {
1114 if (*flags
& CRYPTO_TFM_REQ_WEAK_KEY
) {
1118 *flags
&= ~CRYPTO_TFM_RES_WEAK_KEY
;
1122 if (!atomic_dec_and_test(&ctx
->configuring
))
1123 wait_for_completion(&ctx
->completion
);
1127 static int aead_setauthsize(struct crypto_aead
*tfm
, unsigned int authsize
)
1129 int max
= crypto_aead_alg(tfm
)->maxauthsize
>> 2;
1131 if ((authsize
>>2) < 1 || (authsize
>>2) > max
|| (authsize
& 3))
1133 return aead_setup(tfm
, authsize
);
1136 static int aead_setkey(struct crypto_aead
*tfm
, const u8
*key
,
1137 unsigned int keylen
)
1139 struct ixp_ctx
*ctx
= crypto_aead_ctx(tfm
);
1140 struct rtattr
*rta
= (struct rtattr
*)key
;
1141 struct crypto_authenc_key_param
*param
;
1143 if (!RTA_OK(rta
, keylen
))
1145 if (rta
->rta_type
!= CRYPTO_AUTHENC_KEYA_PARAM
)
1147 if (RTA_PAYLOAD(rta
) < sizeof(*param
))
1150 param
= RTA_DATA(rta
);
1151 ctx
->enckey_len
= be32_to_cpu(param
->enckeylen
);
1153 key
+= RTA_ALIGN(rta
->rta_len
);
1154 keylen
-= RTA_ALIGN(rta
->rta_len
);
1156 if (keylen
< ctx
->enckey_len
)
1159 ctx
->authkey_len
= keylen
- ctx
->enckey_len
;
1160 memcpy(ctx
->enckey
, key
+ ctx
->authkey_len
, ctx
->enckey_len
);
1161 memcpy(ctx
->authkey
, key
, ctx
->authkey_len
);
1163 return aead_setup(tfm
, crypto_aead_authsize(tfm
));
1165 ctx
->enckey_len
= 0;
1166 crypto_aead_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
1170 static int aead_encrypt(struct aead_request
*req
)
1172 unsigned ivsize
= crypto_aead_ivsize(crypto_aead_reqtfm(req
));
1173 return aead_perform(req
, 1, req
->assoclen
+ ivsize
,
1174 req
->cryptlen
, req
->iv
);
1177 static int aead_decrypt(struct aead_request
*req
)
1179 unsigned ivsize
= crypto_aead_ivsize(crypto_aead_reqtfm(req
));
1180 return aead_perform(req
, 0, req
->assoclen
+ ivsize
,
1181 req
->cryptlen
, req
->iv
);
1184 static int aead_givencrypt(struct aead_givcrypt_request
*req
)
1186 struct crypto_aead
*tfm
= aead_givcrypt_reqtfm(req
);
1187 struct ixp_ctx
*ctx
= crypto_aead_ctx(tfm
);
1188 unsigned len
, ivsize
= crypto_aead_ivsize(tfm
);
1191 /* copied from eseqiv.c */
1193 get_random_bytes(ctx
->salt
, ivsize
);
1196 memcpy(req
->areq
.iv
, ctx
->salt
, ivsize
);
1198 if (ivsize
> sizeof(u64
)) {
1199 memset(req
->giv
, 0, ivsize
- sizeof(u64
));
1202 seq
= cpu_to_be64(req
->seq
);
1203 memcpy(req
->giv
+ ivsize
- len
, &seq
, len
);
1204 return aead_perform(&req
->areq
, 1, req
->areq
.assoclen
,
1205 req
->areq
.cryptlen
+ivsize
, req
->giv
);
1208 static struct ixp_alg ixp4xx_algos
[] = {
1211 .cra_name
= "cbc(des)",
1212 .cra_blocksize
= DES_BLOCK_SIZE
,
1213 .cra_u
= { .ablkcipher
= {
1214 .min_keysize
= DES_KEY_SIZE
,
1215 .max_keysize
= DES_KEY_SIZE
,
1216 .ivsize
= DES_BLOCK_SIZE
,
1221 .cfg_enc
= CIPH_ENCR
| MOD_DES
| MOD_CBC_ENC
| KEYLEN_192
,
1222 .cfg_dec
= CIPH_DECR
| MOD_DES
| MOD_CBC_DEC
| KEYLEN_192
,
1226 .cra_name
= "ecb(des)",
1227 .cra_blocksize
= DES_BLOCK_SIZE
,
1228 .cra_u
= { .ablkcipher
= {
1229 .min_keysize
= DES_KEY_SIZE
,
1230 .max_keysize
= DES_KEY_SIZE
,
1234 .cfg_enc
= CIPH_ENCR
| MOD_DES
| MOD_ECB
| KEYLEN_192
,
1235 .cfg_dec
= CIPH_DECR
| MOD_DES
| MOD_ECB
| KEYLEN_192
,
1238 .cra_name
= "cbc(des3_ede)",
1239 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1240 .cra_u
= { .ablkcipher
= {
1241 .min_keysize
= DES3_EDE_KEY_SIZE
,
1242 .max_keysize
= DES3_EDE_KEY_SIZE
,
1243 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1248 .cfg_enc
= CIPH_ENCR
| MOD_3DES
| MOD_CBC_ENC
| KEYLEN_192
,
1249 .cfg_dec
= CIPH_DECR
| MOD_3DES
| MOD_CBC_DEC
| KEYLEN_192
,
1252 .cra_name
= "ecb(des3_ede)",
1253 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1254 .cra_u
= { .ablkcipher
= {
1255 .min_keysize
= DES3_EDE_KEY_SIZE
,
1256 .max_keysize
= DES3_EDE_KEY_SIZE
,
1260 .cfg_enc
= CIPH_ENCR
| MOD_3DES
| MOD_ECB
| KEYLEN_192
,
1261 .cfg_dec
= CIPH_DECR
| MOD_3DES
| MOD_ECB
| KEYLEN_192
,
1264 .cra_name
= "cbc(aes)",
1265 .cra_blocksize
= AES_BLOCK_SIZE
,
1266 .cra_u
= { .ablkcipher
= {
1267 .min_keysize
= AES_MIN_KEY_SIZE
,
1268 .max_keysize
= AES_MAX_KEY_SIZE
,
1269 .ivsize
= AES_BLOCK_SIZE
,
1274 .cfg_enc
= CIPH_ENCR
| MOD_AES
| MOD_CBC_ENC
,
1275 .cfg_dec
= CIPH_DECR
| MOD_AES
| MOD_CBC_DEC
,
1278 .cra_name
= "ecb(aes)",
1279 .cra_blocksize
= AES_BLOCK_SIZE
,
1280 .cra_u
= { .ablkcipher
= {
1281 .min_keysize
= AES_MIN_KEY_SIZE
,
1282 .max_keysize
= AES_MAX_KEY_SIZE
,
1286 .cfg_enc
= CIPH_ENCR
| MOD_AES
| MOD_ECB
,
1287 .cfg_dec
= CIPH_DECR
| MOD_AES
| MOD_ECB
,
1290 .cra_name
= "ctr(aes)",
1291 .cra_blocksize
= AES_BLOCK_SIZE
,
1292 .cra_u
= { .ablkcipher
= {
1293 .min_keysize
= AES_MIN_KEY_SIZE
,
1294 .max_keysize
= AES_MAX_KEY_SIZE
,
1295 .ivsize
= AES_BLOCK_SIZE
,
1300 .cfg_enc
= CIPH_ENCR
| MOD_AES
| MOD_CTR
,
1301 .cfg_dec
= CIPH_ENCR
| MOD_AES
| MOD_CTR
,
1304 .cra_name
= "rfc3686(ctr(aes))",
1305 .cra_blocksize
= AES_BLOCK_SIZE
,
1306 .cra_u
= { .ablkcipher
= {
1307 .min_keysize
= AES_MIN_KEY_SIZE
,
1308 .max_keysize
= AES_MAX_KEY_SIZE
,
1309 .ivsize
= AES_BLOCK_SIZE
,
1311 .setkey
= ablk_rfc3686_setkey
,
1312 .encrypt
= ablk_rfc3686_crypt
,
1313 .decrypt
= ablk_rfc3686_crypt
}
1316 .cfg_enc
= CIPH_ENCR
| MOD_AES
| MOD_CTR
,
1317 .cfg_dec
= CIPH_ENCR
| MOD_AES
| MOD_CTR
,
1320 .cra_name
= "authenc(hmac(md5),cbc(des))",
1321 .cra_blocksize
= DES_BLOCK_SIZE
,
1322 .cra_u
= { .aead
= {
1323 .ivsize
= DES_BLOCK_SIZE
,
1324 .maxauthsize
= MD5_DIGEST_SIZE
,
1328 .hash
= &hash_alg_md5
,
1329 .cfg_enc
= CIPH_ENCR
| MOD_DES
| MOD_CBC_ENC
| KEYLEN_192
,
1330 .cfg_dec
= CIPH_DECR
| MOD_DES
| MOD_CBC_DEC
| KEYLEN_192
,
1333 .cra_name
= "authenc(hmac(md5),cbc(des3_ede))",
1334 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1335 .cra_u
= { .aead
= {
1336 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1337 .maxauthsize
= MD5_DIGEST_SIZE
,
1341 .hash
= &hash_alg_md5
,
1342 .cfg_enc
= CIPH_ENCR
| MOD_3DES
| MOD_CBC_ENC
| KEYLEN_192
,
1343 .cfg_dec
= CIPH_DECR
| MOD_3DES
| MOD_CBC_DEC
| KEYLEN_192
,
1346 .cra_name
= "authenc(hmac(sha1),cbc(des))",
1347 .cra_blocksize
= DES_BLOCK_SIZE
,
1348 .cra_u
= { .aead
= {
1349 .ivsize
= DES_BLOCK_SIZE
,
1350 .maxauthsize
= SHA1_DIGEST_SIZE
,
1354 .hash
= &hash_alg_sha1
,
1355 .cfg_enc
= CIPH_ENCR
| MOD_DES
| MOD_CBC_ENC
| KEYLEN_192
,
1356 .cfg_dec
= CIPH_DECR
| MOD_DES
| MOD_CBC_DEC
| KEYLEN_192
,
1359 .cra_name
= "authenc(hmac(sha1),cbc(des3_ede))",
1360 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1361 .cra_u
= { .aead
= {
1362 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1363 .maxauthsize
= SHA1_DIGEST_SIZE
,
1367 .hash
= &hash_alg_sha1
,
1368 .cfg_enc
= CIPH_ENCR
| MOD_3DES
| MOD_CBC_ENC
| KEYLEN_192
,
1369 .cfg_dec
= CIPH_DECR
| MOD_3DES
| MOD_CBC_DEC
| KEYLEN_192
,
1372 .cra_name
= "authenc(hmac(md5),cbc(aes))",
1373 .cra_blocksize
= AES_BLOCK_SIZE
,
1374 .cra_u
= { .aead
= {
1375 .ivsize
= AES_BLOCK_SIZE
,
1376 .maxauthsize
= MD5_DIGEST_SIZE
,
1380 .hash
= &hash_alg_md5
,
1381 .cfg_enc
= CIPH_ENCR
| MOD_AES
| MOD_CBC_ENC
,
1382 .cfg_dec
= CIPH_DECR
| MOD_AES
| MOD_CBC_DEC
,
1385 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
1386 .cra_blocksize
= AES_BLOCK_SIZE
,
1387 .cra_u
= { .aead
= {
1388 .ivsize
= AES_BLOCK_SIZE
,
1389 .maxauthsize
= SHA1_DIGEST_SIZE
,
1393 .hash
= &hash_alg_sha1
,
1394 .cfg_enc
= CIPH_ENCR
| MOD_AES
| MOD_CBC_ENC
,
1395 .cfg_dec
= CIPH_DECR
| MOD_AES
| MOD_CBC_DEC
,
1398 #define IXP_POSTFIX "-ixp4xx"
1399 static int __init
ixp_module_init(void)
1401 int num
= ARRAY_SIZE(ixp4xx_algos
);
1404 if (platform_device_register(&pseudo_dev
))
1407 spin_lock_init(&desc_lock
);
1408 spin_lock_init(&emerg_lock
);
1410 err
= init_ixp_crypto();
1412 platform_device_unregister(&pseudo_dev
);
1415 for (i
=0; i
< num
; i
++) {
1416 struct crypto_alg
*cra
= &ixp4xx_algos
[i
].crypto
;
1418 if (snprintf(cra
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
,
1419 "%s"IXP_POSTFIX
, cra
->cra_name
) >=
1420 CRYPTO_MAX_ALG_NAME
)
1424 if (!support_aes
&& (ixp4xx_algos
[i
].cfg_enc
& MOD_AES
)) {
1427 if (!ixp4xx_algos
[i
].hash
) {
1429 cra
->cra_type
= &crypto_ablkcipher_type
;
1430 cra
->cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
1432 if (!cra
->cra_ablkcipher
.setkey
)
1433 cra
->cra_ablkcipher
.setkey
= ablk_setkey
;
1434 if (!cra
->cra_ablkcipher
.encrypt
)
1435 cra
->cra_ablkcipher
.encrypt
= ablk_encrypt
;
1436 if (!cra
->cra_ablkcipher
.decrypt
)
1437 cra
->cra_ablkcipher
.decrypt
= ablk_decrypt
;
1438 cra
->cra_init
= init_tfm_ablk
;
1441 cra
->cra_type
= &crypto_aead_type
;
1442 cra
->cra_flags
= CRYPTO_ALG_TYPE_AEAD
|
1444 cra
->cra_aead
.setkey
= aead_setkey
;
1445 cra
->cra_aead
.setauthsize
= aead_setauthsize
;
1446 cra
->cra_aead
.encrypt
= aead_encrypt
;
1447 cra
->cra_aead
.decrypt
= aead_decrypt
;
1448 cra
->cra_aead
.givencrypt
= aead_givencrypt
;
1449 cra
->cra_init
= init_tfm_aead
;
1451 cra
->cra_ctxsize
= sizeof(struct ixp_ctx
);
1452 cra
->cra_module
= THIS_MODULE
;
1453 cra
->cra_alignmask
= 3;
1454 cra
->cra_priority
= 300;
1455 cra
->cra_exit
= exit_tfm
;
1456 if (crypto_register_alg(cra
))
1457 printk(KERN_ERR
"Failed to register '%s'\n",
1460 ixp4xx_algos
[i
].registered
= 1;
1465 static void __exit
ixp_module_exit(void)
1467 int num
= ARRAY_SIZE(ixp4xx_algos
);
1470 for (i
=0; i
< num
; i
++) {
1471 if (ixp4xx_algos
[i
].registered
)
1472 crypto_unregister_alg(&ixp4xx_algos
[i
].crypto
);
1474 release_ixp_crypto();
1475 platform_device_unregister(&pseudo_dev
);
1478 module_init(ixp_module_init
);
1479 module_exit(ixp_module_exit
);
1481 MODULE_LICENSE("GPL");
1482 MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1483 MODULE_DESCRIPTION("IXP4xx hardware crypto");