crypto: ixp4xx - Fix handling of chained sg buffers
[linux-2.6/mini2440.git] / drivers / crypto / ixp4xx_crypto.c
blobaf9761ccf9f132a6414aacec926251b06c8b64c6
1 /*
2 * Intel IXP4xx NPE-C crypto driver
4 * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
12 #include <linux/platform_device.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/dmapool.h>
15 #include <linux/crypto.h>
16 #include <linux/kernel.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/interrupt.h>
19 #include <linux/spinlock.h>
21 #include <crypto/ctr.h>
22 #include <crypto/des.h>
23 #include <crypto/aes.h>
24 #include <crypto/sha.h>
25 #include <crypto/algapi.h>
26 #include <crypto/aead.h>
27 #include <crypto/authenc.h>
28 #include <crypto/scatterwalk.h>
30 #include <mach/npe.h>
31 #include <mach/qmgr.h>
33 #define MAX_KEYLEN 32
35 /* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
36 #define NPE_CTX_LEN 80
37 #define AES_BLOCK128 16
39 #define NPE_OP_HASH_VERIFY 0x01
40 #define NPE_OP_CCM_ENABLE 0x04
41 #define NPE_OP_CRYPT_ENABLE 0x08
42 #define NPE_OP_HASH_ENABLE 0x10
43 #define NPE_OP_NOT_IN_PLACE 0x20
44 #define NPE_OP_HMAC_DISABLE 0x40
45 #define NPE_OP_CRYPT_ENCRYPT 0x80
47 #define NPE_OP_CCM_GEN_MIC 0xcc
48 #define NPE_OP_HASH_GEN_ICV 0x50
49 #define NPE_OP_ENC_GEN_KEY 0xc9
51 #define MOD_ECB 0x0000
52 #define MOD_CTR 0x1000
53 #define MOD_CBC_ENC 0x2000
54 #define MOD_CBC_DEC 0x3000
55 #define MOD_CCM_ENC 0x4000
56 #define MOD_CCM_DEC 0x5000
58 #define KEYLEN_128 4
59 #define KEYLEN_192 6
60 #define KEYLEN_256 8
62 #define CIPH_DECR 0x0000
63 #define CIPH_ENCR 0x0400
65 #define MOD_DES 0x0000
66 #define MOD_TDEA2 0x0100
67 #define MOD_3DES 0x0200
68 #define MOD_AES 0x0800
69 #define MOD_AES128 (0x0800 | KEYLEN_128)
70 #define MOD_AES192 (0x0900 | KEYLEN_192)
71 #define MOD_AES256 (0x0a00 | KEYLEN_256)
73 #define MAX_IVLEN 16
74 #define NPE_ID 2 /* NPE C */
75 #define NPE_QLEN 16
76 /* Space for registering when the first
77 * NPE_QLEN crypt_ctl are busy */
78 #define NPE_QLEN_TOTAL 64
80 #define SEND_QID 29
81 #define RECV_QID 30
83 #define CTL_FLAG_UNUSED 0x0000
84 #define CTL_FLAG_USED 0x1000
85 #define CTL_FLAG_PERFORM_ABLK 0x0001
86 #define CTL_FLAG_GEN_ICV 0x0002
87 #define CTL_FLAG_GEN_REVAES 0x0004
88 #define CTL_FLAG_PERFORM_AEAD 0x0008
89 #define CTL_FLAG_MASK 0x000f
91 #define HMAC_IPAD_VALUE 0x36
92 #define HMAC_OPAD_VALUE 0x5C
93 #define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
95 #define MD5_DIGEST_SIZE 16
97 struct buffer_desc {
98 u32 phys_next;
99 u16 buf_len;
100 u16 pkt_len;
101 u32 phys_addr;
102 u32 __reserved[4];
103 struct buffer_desc *next;
104 enum dma_data_direction dir;
107 struct crypt_ctl {
108 u8 mode; /* NPE_OP_* operation mode */
109 u8 init_len;
110 u16 reserved;
111 u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
112 u32 icv_rev_aes; /* icv or rev aes */
113 u32 src_buf;
114 u32 dst_buf;
115 u16 auth_offs; /* Authentication start offset */
116 u16 auth_len; /* Authentication data length */
117 u16 crypt_offs; /* Cryption start offset */
118 u16 crypt_len; /* Cryption data length */
119 u32 aadAddr; /* Additional Auth Data Addr for CCM mode */
120 u32 crypto_ctx; /* NPE Crypto Param structure address */
122 /* Used by Host: 4*4 bytes*/
123 unsigned ctl_flags;
124 union {
125 struct ablkcipher_request *ablk_req;
126 struct aead_request *aead_req;
127 struct crypto_tfm *tfm;
128 } data;
129 struct buffer_desc *regist_buf;
130 u8 *regist_ptr;
133 struct ablk_ctx {
134 struct buffer_desc *src;
135 struct buffer_desc *dst;
138 struct aead_ctx {
139 struct buffer_desc *buffer;
140 struct scatterlist ivlist;
141 /* used when the hmac is not on one sg entry */
142 u8 *hmac_virt;
143 int encrypt;
146 struct ix_hash_algo {
147 u32 cfgword;
148 unsigned char *icv;
151 struct ix_sa_dir {
152 unsigned char *npe_ctx;
153 dma_addr_t npe_ctx_phys;
154 int npe_ctx_idx;
155 u8 npe_mode;
158 struct ixp_ctx {
159 struct ix_sa_dir encrypt;
160 struct ix_sa_dir decrypt;
161 int authkey_len;
162 u8 authkey[MAX_KEYLEN];
163 int enckey_len;
164 u8 enckey[MAX_KEYLEN];
165 u8 salt[MAX_IVLEN];
166 u8 nonce[CTR_RFC3686_NONCE_SIZE];
167 unsigned salted;
168 atomic_t configuring;
169 struct completion completion;
172 struct ixp_alg {
173 struct crypto_alg crypto;
174 const struct ix_hash_algo *hash;
175 u32 cfg_enc;
176 u32 cfg_dec;
178 int registered;
181 static const struct ix_hash_algo hash_alg_md5 = {
182 .cfgword = 0xAA010004,
183 .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
184 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
186 static const struct ix_hash_algo hash_alg_sha1 = {
187 .cfgword = 0x00000005,
188 .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
189 "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
192 static struct npe *npe_c;
193 static struct dma_pool *buffer_pool = NULL;
194 static struct dma_pool *ctx_pool = NULL;
196 static struct crypt_ctl *crypt_virt = NULL;
197 static dma_addr_t crypt_phys;
199 static int support_aes = 1;
201 static void dev_release(struct device *dev)
203 return;
206 #define DRIVER_NAME "ixp4xx_crypto"
207 static struct platform_device pseudo_dev = {
208 .name = DRIVER_NAME,
209 .id = 0,
210 .num_resources = 0,
211 .dev = {
212 .coherent_dma_mask = DMA_32BIT_MASK,
213 .release = dev_release,
217 static struct device *dev = &pseudo_dev.dev;
219 static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
221 return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
224 static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
226 return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
229 static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
231 return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc;
234 static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
236 return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec;
239 static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
241 return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash;
244 static int setup_crypt_desc(void)
246 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
247 crypt_virt = dma_alloc_coherent(dev,
248 NPE_QLEN * sizeof(struct crypt_ctl),
249 &crypt_phys, GFP_KERNEL);
250 if (!crypt_virt)
251 return -ENOMEM;
252 memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl));
253 return 0;
256 static spinlock_t desc_lock;
257 static struct crypt_ctl *get_crypt_desc(void)
259 int i;
260 static int idx = 0;
261 unsigned long flags;
263 spin_lock_irqsave(&desc_lock, flags);
265 if (unlikely(!crypt_virt))
266 setup_crypt_desc();
267 if (unlikely(!crypt_virt)) {
268 spin_unlock_irqrestore(&desc_lock, flags);
269 return NULL;
271 i = idx;
272 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
273 if (++idx >= NPE_QLEN)
274 idx = 0;
275 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
276 spin_unlock_irqrestore(&desc_lock, flags);
277 return crypt_virt +i;
278 } else {
279 spin_unlock_irqrestore(&desc_lock, flags);
280 return NULL;
284 static spinlock_t emerg_lock;
285 static struct crypt_ctl *get_crypt_desc_emerg(void)
287 int i;
288 static int idx = NPE_QLEN;
289 struct crypt_ctl *desc;
290 unsigned long flags;
292 desc = get_crypt_desc();
293 if (desc)
294 return desc;
295 if (unlikely(!crypt_virt))
296 return NULL;
298 spin_lock_irqsave(&emerg_lock, flags);
299 i = idx;
300 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
301 if (++idx >= NPE_QLEN_TOTAL)
302 idx = NPE_QLEN;
303 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
304 spin_unlock_irqrestore(&emerg_lock, flags);
305 return crypt_virt +i;
306 } else {
307 spin_unlock_irqrestore(&emerg_lock, flags);
308 return NULL;
312 static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys)
314 while (buf) {
315 struct buffer_desc *buf1;
316 u32 phys1;
318 buf1 = buf->next;
319 phys1 = buf->phys_next;
320 dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
321 dma_pool_free(buffer_pool, buf, phys);
322 buf = buf1;
323 phys = phys1;
327 static struct tasklet_struct crypto_done_tasklet;
329 static void finish_scattered_hmac(struct crypt_ctl *crypt)
331 struct aead_request *req = crypt->data.aead_req;
332 struct aead_ctx *req_ctx = aead_request_ctx(req);
333 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
334 int authsize = crypto_aead_authsize(tfm);
335 int decryptlen = req->cryptlen - authsize;
337 if (req_ctx->encrypt) {
338 scatterwalk_map_and_copy(req_ctx->hmac_virt,
339 req->src, decryptlen, authsize, 1);
341 dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
344 static void one_packet(dma_addr_t phys)
346 struct crypt_ctl *crypt;
347 struct ixp_ctx *ctx;
348 int failed;
350 failed = phys & 0x1 ? -EBADMSG : 0;
351 phys &= ~0x3;
352 crypt = crypt_phys2virt(phys);
354 switch (crypt->ctl_flags & CTL_FLAG_MASK) {
355 case CTL_FLAG_PERFORM_AEAD: {
356 struct aead_request *req = crypt->data.aead_req;
357 struct aead_ctx *req_ctx = aead_request_ctx(req);
359 free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
360 if (req_ctx->hmac_virt) {
361 finish_scattered_hmac(crypt);
363 req->base.complete(&req->base, failed);
364 break;
366 case CTL_FLAG_PERFORM_ABLK: {
367 struct ablkcipher_request *req = crypt->data.ablk_req;
368 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
370 if (req_ctx->dst) {
371 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
373 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
374 req->base.complete(&req->base, failed);
375 break;
377 case CTL_FLAG_GEN_ICV:
378 ctx = crypto_tfm_ctx(crypt->data.tfm);
379 dma_pool_free(ctx_pool, crypt->regist_ptr,
380 crypt->regist_buf->phys_addr);
381 dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
382 if (atomic_dec_and_test(&ctx->configuring))
383 complete(&ctx->completion);
384 break;
385 case CTL_FLAG_GEN_REVAES:
386 ctx = crypto_tfm_ctx(crypt->data.tfm);
387 *(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
388 if (atomic_dec_and_test(&ctx->configuring))
389 complete(&ctx->completion);
390 break;
391 default:
392 BUG();
394 crypt->ctl_flags = CTL_FLAG_UNUSED;
397 static void irqhandler(void *_unused)
399 tasklet_schedule(&crypto_done_tasklet);
402 static void crypto_done_action(unsigned long arg)
404 int i;
406 for(i=0; i<4; i++) {
407 dma_addr_t phys = qmgr_get_entry(RECV_QID);
408 if (!phys)
409 return;
410 one_packet(phys);
412 tasklet_schedule(&crypto_done_tasklet);
415 static int init_ixp_crypto(void)
417 int ret = -ENODEV;
419 if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
420 IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
421 printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
422 return ret;
424 npe_c = npe_request(NPE_ID);
425 if (!npe_c)
426 return ret;
428 if (!npe_running(npe_c)) {
429 npe_load_firmware(npe_c, npe_name(npe_c), dev);
432 /* buffer_pool will also be used to sometimes store the hmac,
433 * so assure it is large enough
435 BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
436 buffer_pool = dma_pool_create("buffer", dev,
437 sizeof(struct buffer_desc), 32, 0);
438 ret = -ENOMEM;
439 if (!buffer_pool) {
440 goto err;
442 ctx_pool = dma_pool_create("context", dev,
443 NPE_CTX_LEN, 16, 0);
444 if (!ctx_pool) {
445 goto err;
447 ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
448 "ixp_crypto:out", NULL);
449 if (ret)
450 goto err;
451 ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
452 "ixp_crypto:in", NULL);
453 if (ret) {
454 qmgr_release_queue(SEND_QID);
455 goto err;
457 qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
458 tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
460 qmgr_enable_irq(RECV_QID);
461 return 0;
462 err:
463 if (ctx_pool)
464 dma_pool_destroy(ctx_pool);
465 if (buffer_pool)
466 dma_pool_destroy(buffer_pool);
467 npe_release(npe_c);
468 return ret;
471 static void release_ixp_crypto(void)
473 qmgr_disable_irq(RECV_QID);
474 tasklet_kill(&crypto_done_tasklet);
476 qmgr_release_queue(SEND_QID);
477 qmgr_release_queue(RECV_QID);
479 dma_pool_destroy(ctx_pool);
480 dma_pool_destroy(buffer_pool);
482 npe_release(npe_c);
484 if (crypt_virt) {
485 dma_free_coherent(dev,
486 NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
487 crypt_virt, crypt_phys);
489 return;
492 static void reset_sa_dir(struct ix_sa_dir *dir)
494 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
495 dir->npe_ctx_idx = 0;
496 dir->npe_mode = 0;
499 static int init_sa_dir(struct ix_sa_dir *dir)
501 dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
502 if (!dir->npe_ctx) {
503 return -ENOMEM;
505 reset_sa_dir(dir);
506 return 0;
509 static void free_sa_dir(struct ix_sa_dir *dir)
511 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
512 dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
515 static int init_tfm(struct crypto_tfm *tfm)
517 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
518 int ret;
520 atomic_set(&ctx->configuring, 0);
521 ret = init_sa_dir(&ctx->encrypt);
522 if (ret)
523 return ret;
524 ret = init_sa_dir(&ctx->decrypt);
525 if (ret) {
526 free_sa_dir(&ctx->encrypt);
528 return ret;
531 static int init_tfm_ablk(struct crypto_tfm *tfm)
533 tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx);
534 return init_tfm(tfm);
537 static int init_tfm_aead(struct crypto_tfm *tfm)
539 tfm->crt_aead.reqsize = sizeof(struct aead_ctx);
540 return init_tfm(tfm);
543 static void exit_tfm(struct crypto_tfm *tfm)
545 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
546 free_sa_dir(&ctx->encrypt);
547 free_sa_dir(&ctx->decrypt);
550 static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
551 int init_len, u32 ctx_addr, const u8 *key, int key_len)
553 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
554 struct crypt_ctl *crypt;
555 struct buffer_desc *buf;
556 int i;
557 u8 *pad;
558 u32 pad_phys, buf_phys;
560 BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
561 pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
562 if (!pad)
563 return -ENOMEM;
564 buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
565 if (!buf) {
566 dma_pool_free(ctx_pool, pad, pad_phys);
567 return -ENOMEM;
569 crypt = get_crypt_desc_emerg();
570 if (!crypt) {
571 dma_pool_free(ctx_pool, pad, pad_phys);
572 dma_pool_free(buffer_pool, buf, buf_phys);
573 return -EAGAIN;
576 memcpy(pad, key, key_len);
577 memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
578 for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
579 pad[i] ^= xpad;
582 crypt->data.tfm = tfm;
583 crypt->regist_ptr = pad;
584 crypt->regist_buf = buf;
586 crypt->auth_offs = 0;
587 crypt->auth_len = HMAC_PAD_BLOCKLEN;
588 crypt->crypto_ctx = ctx_addr;
589 crypt->src_buf = buf_phys;
590 crypt->icv_rev_aes = target;
591 crypt->mode = NPE_OP_HASH_GEN_ICV;
592 crypt->init_len = init_len;
593 crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
595 buf->next = 0;
596 buf->buf_len = HMAC_PAD_BLOCKLEN;
597 buf->pkt_len = 0;
598 buf->phys_addr = pad_phys;
600 atomic_inc(&ctx->configuring);
601 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
602 BUG_ON(qmgr_stat_overflow(SEND_QID));
603 return 0;
606 static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
607 const u8 *key, int key_len, unsigned digest_len)
609 u32 itarget, otarget, npe_ctx_addr;
610 unsigned char *cinfo;
611 int init_len, ret = 0;
612 u32 cfgword;
613 struct ix_sa_dir *dir;
614 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
615 const struct ix_hash_algo *algo;
617 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
618 cinfo = dir->npe_ctx + dir->npe_ctx_idx;
619 algo = ix_hash(tfm);
621 /* write cfg word to cryptinfo */
622 cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
623 *(u32*)cinfo = cpu_to_be32(cfgword);
624 cinfo += sizeof(cfgword);
626 /* write ICV to cryptinfo */
627 memcpy(cinfo, algo->icv, digest_len);
628 cinfo += digest_len;
630 itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
631 + sizeof(algo->cfgword);
632 otarget = itarget + digest_len;
633 init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
634 npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
636 dir->npe_ctx_idx += init_len;
637 dir->npe_mode |= NPE_OP_HASH_ENABLE;
639 if (!encrypt)
640 dir->npe_mode |= NPE_OP_HASH_VERIFY;
642 ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
643 init_len, npe_ctx_addr, key, key_len);
644 if (ret)
645 return ret;
646 return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
647 init_len, npe_ctx_addr, key, key_len);
650 static int gen_rev_aes_key(struct crypto_tfm *tfm)
652 struct crypt_ctl *crypt;
653 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
654 struct ix_sa_dir *dir = &ctx->decrypt;
656 crypt = get_crypt_desc_emerg();
657 if (!crypt) {
658 return -EAGAIN;
660 *(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
662 crypt->data.tfm = tfm;
663 crypt->crypt_offs = 0;
664 crypt->crypt_len = AES_BLOCK128;
665 crypt->src_buf = 0;
666 crypt->crypto_ctx = dir->npe_ctx_phys;
667 crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
668 crypt->mode = NPE_OP_ENC_GEN_KEY;
669 crypt->init_len = dir->npe_ctx_idx;
670 crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
672 atomic_inc(&ctx->configuring);
673 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
674 BUG_ON(qmgr_stat_overflow(SEND_QID));
675 return 0;
678 static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
679 const u8 *key, int key_len)
681 u8 *cinfo;
682 u32 cipher_cfg;
683 u32 keylen_cfg = 0;
684 struct ix_sa_dir *dir;
685 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
686 u32 *flags = &tfm->crt_flags;
688 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
689 cinfo = dir->npe_ctx;
691 if (encrypt) {
692 cipher_cfg = cipher_cfg_enc(tfm);
693 dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
694 } else {
695 cipher_cfg = cipher_cfg_dec(tfm);
697 if (cipher_cfg & MOD_AES) {
698 switch (key_len) {
699 case 16: keylen_cfg = MOD_AES128 | KEYLEN_128; break;
700 case 24: keylen_cfg = MOD_AES192 | KEYLEN_192; break;
701 case 32: keylen_cfg = MOD_AES256 | KEYLEN_256; break;
702 default:
703 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
704 return -EINVAL;
706 cipher_cfg |= keylen_cfg;
707 } else if (cipher_cfg & MOD_3DES) {
708 const u32 *K = (const u32 *)key;
709 if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
710 !((K[2] ^ K[4]) | (K[3] ^ K[5]))))
712 *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
713 return -EINVAL;
715 } else {
716 u32 tmp[DES_EXPKEY_WORDS];
717 if (des_ekey(tmp, key) == 0) {
718 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
721 /* write cfg word to cryptinfo */
722 *(u32*)cinfo = cpu_to_be32(cipher_cfg);
723 cinfo += sizeof(cipher_cfg);
725 /* write cipher key to cryptinfo */
726 memcpy(cinfo, key, key_len);
727 /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
728 if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
729 memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
730 key_len = DES3_EDE_KEY_SIZE;
732 dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
733 dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
734 if ((cipher_cfg & MOD_AES) && !encrypt) {
735 return gen_rev_aes_key(tfm);
737 return 0;
740 static struct buffer_desc *chainup_buffers(struct device *dev,
741 struct scatterlist *sg, unsigned nbytes,
742 struct buffer_desc *buf, gfp_t flags,
743 enum dma_data_direction dir)
745 for (;nbytes > 0; sg = scatterwalk_sg_next(sg)) {
746 unsigned len = min(nbytes, sg->length);
747 struct buffer_desc *next_buf;
748 u32 next_buf_phys;
749 void *ptr;
751 nbytes -= len;
752 ptr = page_address(sg_page(sg)) + sg->offset;
753 next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
754 if (!next_buf) {
755 buf = NULL;
756 break;
758 sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
759 buf->next = next_buf;
760 buf->phys_next = next_buf_phys;
761 buf = next_buf;
763 buf->phys_addr = sg_dma_address(sg);
764 buf->buf_len = len;
765 buf->dir = dir;
767 buf->next = NULL;
768 buf->phys_next = 0;
769 return buf;
772 static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
773 unsigned int key_len)
775 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
776 u32 *flags = &tfm->base.crt_flags;
777 int ret;
779 init_completion(&ctx->completion);
780 atomic_inc(&ctx->configuring);
782 reset_sa_dir(&ctx->encrypt);
783 reset_sa_dir(&ctx->decrypt);
785 ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
786 ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
788 ret = setup_cipher(&tfm->base, 0, key, key_len);
789 if (ret)
790 goto out;
791 ret = setup_cipher(&tfm->base, 1, key, key_len);
792 if (ret)
793 goto out;
795 if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
796 if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
797 ret = -EINVAL;
798 } else {
799 *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
802 out:
803 if (!atomic_dec_and_test(&ctx->configuring))
804 wait_for_completion(&ctx->completion);
805 return ret;
808 static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
809 unsigned int key_len)
811 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
813 /* the nonce is stored in bytes at end of key */
814 if (key_len < CTR_RFC3686_NONCE_SIZE)
815 return -EINVAL;
817 memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
818 CTR_RFC3686_NONCE_SIZE);
820 key_len -= CTR_RFC3686_NONCE_SIZE;
821 return ablk_setkey(tfm, key, key_len);
824 static int ablk_perform(struct ablkcipher_request *req, int encrypt)
826 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
827 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
828 unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
829 struct ix_sa_dir *dir;
830 struct crypt_ctl *crypt;
831 unsigned int nbytes = req->nbytes;
832 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
833 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
834 struct buffer_desc src_hook;
835 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
836 GFP_KERNEL : GFP_ATOMIC;
838 if (qmgr_stat_full(SEND_QID))
839 return -EAGAIN;
840 if (atomic_read(&ctx->configuring))
841 return -EAGAIN;
843 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
845 crypt = get_crypt_desc();
846 if (!crypt)
847 return -ENOMEM;
849 crypt->data.ablk_req = req;
850 crypt->crypto_ctx = dir->npe_ctx_phys;
851 crypt->mode = dir->npe_mode;
852 crypt->init_len = dir->npe_ctx_idx;
854 crypt->crypt_offs = 0;
855 crypt->crypt_len = nbytes;
857 BUG_ON(ivsize && !req->info);
858 memcpy(crypt->iv, req->info, ivsize);
859 if (req->src != req->dst) {
860 struct buffer_desc dst_hook;
861 crypt->mode |= NPE_OP_NOT_IN_PLACE;
862 /* This was never tested by Intel
863 * for more than one dst buffer, I think. */
864 BUG_ON(req->dst->length < nbytes);
865 req_ctx->dst = NULL;
866 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
867 flags, DMA_FROM_DEVICE))
868 goto free_buf_dest;
869 src_direction = DMA_TO_DEVICE;
870 req_ctx->dst = dst_hook.next;
871 crypt->dst_buf = dst_hook.phys_next;
872 } else {
873 req_ctx->dst = NULL;
875 req_ctx->src = NULL;
876 if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
877 flags, src_direction))
878 goto free_buf_src;
880 req_ctx->src = src_hook.next;
881 crypt->src_buf = src_hook.phys_next;
882 crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
883 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
884 BUG_ON(qmgr_stat_overflow(SEND_QID));
885 return -EINPROGRESS;
887 free_buf_src:
888 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
889 free_buf_dest:
890 if (req->src != req->dst) {
891 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
893 crypt->ctl_flags = CTL_FLAG_UNUSED;
894 return -ENOMEM;
897 static int ablk_encrypt(struct ablkcipher_request *req)
899 return ablk_perform(req, 1);
902 static int ablk_decrypt(struct ablkcipher_request *req)
904 return ablk_perform(req, 0);
907 static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
909 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
910 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
911 u8 iv[CTR_RFC3686_BLOCK_SIZE];
912 u8 *info = req->info;
913 int ret;
915 /* set up counter block */
916 memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
917 memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
919 /* initialize counter portion of counter block */
920 *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
921 cpu_to_be32(1);
923 req->info = iv;
924 ret = ablk_perform(req, 1);
925 req->info = info;
926 return ret;
929 static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
930 unsigned int nbytes)
932 int offset = 0;
934 if (!nbytes)
935 return 0;
937 for (;;) {
938 if (start < offset + sg->length)
939 break;
941 offset += sg->length;
942 sg = scatterwalk_sg_next(sg);
944 return (start + nbytes > offset + sg->length);
947 static int aead_perform(struct aead_request *req, int encrypt,
948 int cryptoffset, int eff_cryptlen, u8 *iv)
950 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
951 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
952 unsigned ivsize = crypto_aead_ivsize(tfm);
953 unsigned authsize = crypto_aead_authsize(tfm);
954 struct ix_sa_dir *dir;
955 struct crypt_ctl *crypt;
956 unsigned int cryptlen;
957 struct buffer_desc *buf, src_hook;
958 struct aead_ctx *req_ctx = aead_request_ctx(req);
959 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
960 GFP_KERNEL : GFP_ATOMIC;
962 if (qmgr_stat_full(SEND_QID))
963 return -EAGAIN;
964 if (atomic_read(&ctx->configuring))
965 return -EAGAIN;
967 if (encrypt) {
968 dir = &ctx->encrypt;
969 cryptlen = req->cryptlen;
970 } else {
971 dir = &ctx->decrypt;
972 /* req->cryptlen includes the authsize when decrypting */
973 cryptlen = req->cryptlen -authsize;
974 eff_cryptlen -= authsize;
976 crypt = get_crypt_desc();
977 if (!crypt)
978 return -ENOMEM;
980 crypt->data.aead_req = req;
981 crypt->crypto_ctx = dir->npe_ctx_phys;
982 crypt->mode = dir->npe_mode;
983 crypt->init_len = dir->npe_ctx_idx;
985 crypt->crypt_offs = cryptoffset;
986 crypt->crypt_len = eff_cryptlen;
988 crypt->auth_offs = 0;
989 crypt->auth_len = req->assoclen + ivsize + cryptlen;
990 BUG_ON(ivsize && !req->iv);
991 memcpy(crypt->iv, req->iv, ivsize);
993 if (req->src != req->dst) {
994 BUG(); /* -ENOTSUP because of my lazyness */
997 /* ASSOC data */
998 buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook,
999 flags, DMA_TO_DEVICE);
1000 req_ctx->buffer = src_hook.next;
1001 crypt->src_buf = src_hook.phys_next;
1002 if (!buf)
1003 goto out;
1004 /* IV */
1005 sg_init_table(&req_ctx->ivlist, 1);
1006 sg_set_buf(&req_ctx->ivlist, iv, ivsize);
1007 buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags,
1008 DMA_BIDIRECTIONAL);
1009 if (!buf)
1010 goto free_chain;
1011 if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
1012 /* The 12 hmac bytes are scattered,
1013 * we need to copy them into a safe buffer */
1014 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
1015 &crypt->icv_rev_aes);
1016 if (unlikely(!req_ctx->hmac_virt))
1017 goto free_chain;
1018 if (!encrypt) {
1019 scatterwalk_map_and_copy(req_ctx->hmac_virt,
1020 req->src, cryptlen, authsize, 0);
1022 req_ctx->encrypt = encrypt;
1023 } else {
1024 req_ctx->hmac_virt = NULL;
1026 /* Crypt */
1027 buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags,
1028 DMA_BIDIRECTIONAL);
1029 if (!buf)
1030 goto free_hmac_virt;
1031 if (!req_ctx->hmac_virt) {
1032 crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
1035 crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1036 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
1037 BUG_ON(qmgr_stat_overflow(SEND_QID));
1038 return -EINPROGRESS;
1039 free_hmac_virt:
1040 if (req_ctx->hmac_virt) {
1041 dma_pool_free(buffer_pool, req_ctx->hmac_virt,
1042 crypt->icv_rev_aes);
1044 free_chain:
1045 free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
1046 out:
1047 crypt->ctl_flags = CTL_FLAG_UNUSED;
1048 return -ENOMEM;
1051 static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1053 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1054 u32 *flags = &tfm->base.crt_flags;
1055 unsigned digest_len = crypto_aead_alg(tfm)->maxauthsize;
1056 int ret;
1058 if (!ctx->enckey_len && !ctx->authkey_len)
1059 return 0;
1060 init_completion(&ctx->completion);
1061 atomic_inc(&ctx->configuring);
1063 reset_sa_dir(&ctx->encrypt);
1064 reset_sa_dir(&ctx->decrypt);
1066 ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1067 if (ret)
1068 goto out;
1069 ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1070 if (ret)
1071 goto out;
1072 ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1073 ctx->authkey_len, digest_len);
1074 if (ret)
1075 goto out;
1076 ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
1077 ctx->authkey_len, digest_len);
1078 if (ret)
1079 goto out;
1081 if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
1082 if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
1083 ret = -EINVAL;
1084 goto out;
1085 } else {
1086 *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
1089 out:
1090 if (!atomic_dec_and_test(&ctx->configuring))
1091 wait_for_completion(&ctx->completion);
1092 return ret;
1095 static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1097 int max = crypto_aead_alg(tfm)->maxauthsize >> 2;
1099 if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
1100 return -EINVAL;
1101 return aead_setup(tfm, authsize);
1104 static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1105 unsigned int keylen)
1107 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1108 struct rtattr *rta = (struct rtattr *)key;
1109 struct crypto_authenc_key_param *param;
1111 if (!RTA_OK(rta, keylen))
1112 goto badkey;
1113 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
1114 goto badkey;
1115 if (RTA_PAYLOAD(rta) < sizeof(*param))
1116 goto badkey;
1118 param = RTA_DATA(rta);
1119 ctx->enckey_len = be32_to_cpu(param->enckeylen);
1121 key += RTA_ALIGN(rta->rta_len);
1122 keylen -= RTA_ALIGN(rta->rta_len);
1124 if (keylen < ctx->enckey_len)
1125 goto badkey;
1127 ctx->authkey_len = keylen - ctx->enckey_len;
1128 memcpy(ctx->enckey, key + ctx->authkey_len, ctx->enckey_len);
1129 memcpy(ctx->authkey, key, ctx->authkey_len);
1131 return aead_setup(tfm, crypto_aead_authsize(tfm));
1132 badkey:
1133 ctx->enckey_len = 0;
1134 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1135 return -EINVAL;
1138 static int aead_encrypt(struct aead_request *req)
1140 unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
1141 return aead_perform(req, 1, req->assoclen + ivsize,
1142 req->cryptlen, req->iv);
1145 static int aead_decrypt(struct aead_request *req)
1147 unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
1148 return aead_perform(req, 0, req->assoclen + ivsize,
1149 req->cryptlen, req->iv);
1152 static int aead_givencrypt(struct aead_givcrypt_request *req)
1154 struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
1155 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1156 unsigned len, ivsize = crypto_aead_ivsize(tfm);
1157 __be64 seq;
1159 /* copied from eseqiv.c */
1160 if (!ctx->salted) {
1161 get_random_bytes(ctx->salt, ivsize);
1162 ctx->salted = 1;
1164 memcpy(req->areq.iv, ctx->salt, ivsize);
1165 len = ivsize;
1166 if (ivsize > sizeof(u64)) {
1167 memset(req->giv, 0, ivsize - sizeof(u64));
1168 len = sizeof(u64);
1170 seq = cpu_to_be64(req->seq);
1171 memcpy(req->giv + ivsize - len, &seq, len);
1172 return aead_perform(&req->areq, 1, req->areq.assoclen,
1173 req->areq.cryptlen +ivsize, req->giv);
1176 static struct ixp_alg ixp4xx_algos[] = {
1178 .crypto = {
1179 .cra_name = "cbc(des)",
1180 .cra_blocksize = DES_BLOCK_SIZE,
1181 .cra_u = { .ablkcipher = {
1182 .min_keysize = DES_KEY_SIZE,
1183 .max_keysize = DES_KEY_SIZE,
1184 .ivsize = DES_BLOCK_SIZE,
1185 .geniv = "eseqiv",
1189 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1190 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1192 }, {
1193 .crypto = {
1194 .cra_name = "ecb(des)",
1195 .cra_blocksize = DES_BLOCK_SIZE,
1196 .cra_u = { .ablkcipher = {
1197 .min_keysize = DES_KEY_SIZE,
1198 .max_keysize = DES_KEY_SIZE,
1202 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1203 .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1204 }, {
1205 .crypto = {
1206 .cra_name = "cbc(des3_ede)",
1207 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1208 .cra_u = { .ablkcipher = {
1209 .min_keysize = DES3_EDE_KEY_SIZE,
1210 .max_keysize = DES3_EDE_KEY_SIZE,
1211 .ivsize = DES3_EDE_BLOCK_SIZE,
1212 .geniv = "eseqiv",
1216 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1217 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1218 }, {
1219 .crypto = {
1220 .cra_name = "ecb(des3_ede)",
1221 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1222 .cra_u = { .ablkcipher = {
1223 .min_keysize = DES3_EDE_KEY_SIZE,
1224 .max_keysize = DES3_EDE_KEY_SIZE,
1228 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1229 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1230 }, {
1231 .crypto = {
1232 .cra_name = "cbc(aes)",
1233 .cra_blocksize = AES_BLOCK_SIZE,
1234 .cra_u = { .ablkcipher = {
1235 .min_keysize = AES_MIN_KEY_SIZE,
1236 .max_keysize = AES_MAX_KEY_SIZE,
1237 .ivsize = AES_BLOCK_SIZE,
1238 .geniv = "eseqiv",
1242 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1243 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1244 }, {
1245 .crypto = {
1246 .cra_name = "ecb(aes)",
1247 .cra_blocksize = AES_BLOCK_SIZE,
1248 .cra_u = { .ablkcipher = {
1249 .min_keysize = AES_MIN_KEY_SIZE,
1250 .max_keysize = AES_MAX_KEY_SIZE,
1254 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1255 .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1256 }, {
1257 .crypto = {
1258 .cra_name = "ctr(aes)",
1259 .cra_blocksize = AES_BLOCK_SIZE,
1260 .cra_u = { .ablkcipher = {
1261 .min_keysize = AES_MIN_KEY_SIZE,
1262 .max_keysize = AES_MAX_KEY_SIZE,
1263 .ivsize = AES_BLOCK_SIZE,
1264 .geniv = "eseqiv",
1268 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1269 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1270 }, {
1271 .crypto = {
1272 .cra_name = "rfc3686(ctr(aes))",
1273 .cra_blocksize = AES_BLOCK_SIZE,
1274 .cra_u = { .ablkcipher = {
1275 .min_keysize = AES_MIN_KEY_SIZE,
1276 .max_keysize = AES_MAX_KEY_SIZE,
1277 .ivsize = AES_BLOCK_SIZE,
1278 .geniv = "eseqiv",
1279 .setkey = ablk_rfc3686_setkey,
1280 .encrypt = ablk_rfc3686_crypt,
1281 .decrypt = ablk_rfc3686_crypt }
1284 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1285 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1286 }, {
1287 .crypto = {
1288 .cra_name = "authenc(hmac(md5),cbc(des))",
1289 .cra_blocksize = DES_BLOCK_SIZE,
1290 .cra_u = { .aead = {
1291 .ivsize = DES_BLOCK_SIZE,
1292 .maxauthsize = MD5_DIGEST_SIZE,
1296 .hash = &hash_alg_md5,
1297 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1298 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1299 }, {
1300 .crypto = {
1301 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1302 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1303 .cra_u = { .aead = {
1304 .ivsize = DES3_EDE_BLOCK_SIZE,
1305 .maxauthsize = MD5_DIGEST_SIZE,
1309 .hash = &hash_alg_md5,
1310 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1311 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1312 }, {
1313 .crypto = {
1314 .cra_name = "authenc(hmac(sha1),cbc(des))",
1315 .cra_blocksize = DES_BLOCK_SIZE,
1316 .cra_u = { .aead = {
1317 .ivsize = DES_BLOCK_SIZE,
1318 .maxauthsize = SHA1_DIGEST_SIZE,
1322 .hash = &hash_alg_sha1,
1323 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1324 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1325 }, {
1326 .crypto = {
1327 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1328 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1329 .cra_u = { .aead = {
1330 .ivsize = DES3_EDE_BLOCK_SIZE,
1331 .maxauthsize = SHA1_DIGEST_SIZE,
1335 .hash = &hash_alg_sha1,
1336 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1337 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1338 }, {
1339 .crypto = {
1340 .cra_name = "authenc(hmac(md5),cbc(aes))",
1341 .cra_blocksize = AES_BLOCK_SIZE,
1342 .cra_u = { .aead = {
1343 .ivsize = AES_BLOCK_SIZE,
1344 .maxauthsize = MD5_DIGEST_SIZE,
1348 .hash = &hash_alg_md5,
1349 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1350 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1351 }, {
1352 .crypto = {
1353 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1354 .cra_blocksize = AES_BLOCK_SIZE,
1355 .cra_u = { .aead = {
1356 .ivsize = AES_BLOCK_SIZE,
1357 .maxauthsize = SHA1_DIGEST_SIZE,
1361 .hash = &hash_alg_sha1,
1362 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1363 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1364 } };
1366 #define IXP_POSTFIX "-ixp4xx"
1367 static int __init ixp_module_init(void)
1369 int num = ARRAY_SIZE(ixp4xx_algos);
1370 int i,err ;
1372 if (platform_device_register(&pseudo_dev))
1373 return -ENODEV;
1375 spin_lock_init(&desc_lock);
1376 spin_lock_init(&emerg_lock);
1378 err = init_ixp_crypto();
1379 if (err) {
1380 platform_device_unregister(&pseudo_dev);
1381 return err;
1383 for (i=0; i< num; i++) {
1384 struct crypto_alg *cra = &ixp4xx_algos[i].crypto;
1386 if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
1387 "%s"IXP_POSTFIX, cra->cra_name) >=
1388 CRYPTO_MAX_ALG_NAME)
1390 continue;
1392 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
1393 continue;
1395 if (!ixp4xx_algos[i].hash) {
1396 /* block ciphers */
1397 cra->cra_type = &crypto_ablkcipher_type;
1398 cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1399 CRYPTO_ALG_ASYNC;
1400 if (!cra->cra_ablkcipher.setkey)
1401 cra->cra_ablkcipher.setkey = ablk_setkey;
1402 if (!cra->cra_ablkcipher.encrypt)
1403 cra->cra_ablkcipher.encrypt = ablk_encrypt;
1404 if (!cra->cra_ablkcipher.decrypt)
1405 cra->cra_ablkcipher.decrypt = ablk_decrypt;
1406 cra->cra_init = init_tfm_ablk;
1407 } else {
1408 /* authenc */
1409 cra->cra_type = &crypto_aead_type;
1410 cra->cra_flags = CRYPTO_ALG_TYPE_AEAD |
1411 CRYPTO_ALG_ASYNC;
1412 cra->cra_aead.setkey = aead_setkey;
1413 cra->cra_aead.setauthsize = aead_setauthsize;
1414 cra->cra_aead.encrypt = aead_encrypt;
1415 cra->cra_aead.decrypt = aead_decrypt;
1416 cra->cra_aead.givencrypt = aead_givencrypt;
1417 cra->cra_init = init_tfm_aead;
1419 cra->cra_ctxsize = sizeof(struct ixp_ctx);
1420 cra->cra_module = THIS_MODULE;
1421 cra->cra_alignmask = 3;
1422 cra->cra_priority = 300;
1423 cra->cra_exit = exit_tfm;
1424 if (crypto_register_alg(cra))
1425 printk(KERN_ERR "Failed to register '%s'\n",
1426 cra->cra_name);
1427 else
1428 ixp4xx_algos[i].registered = 1;
1430 return 0;
1433 static void __exit ixp_module_exit(void)
1435 int num = ARRAY_SIZE(ixp4xx_algos);
1436 int i;
1438 for (i=0; i< num; i++) {
1439 if (ixp4xx_algos[i].registered)
1440 crypto_unregister_alg(&ixp4xx_algos[i].crypto);
1442 release_ixp_crypto();
1443 platform_device_unregister(&pseudo_dev);
1446 module_init(ixp_module_init);
1447 module_exit(ixp_module_exit);
1449 MODULE_LICENSE("GPL");
1450 MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1451 MODULE_DESCRIPTION("IXP4xx hardware crypto");