2 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
3 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
5 * This code was written by Angelos D. Keromytis in Athens, Greece, in
6 * February 2000. Network Security Technologies Inc. (NSTI) kindly
7 * supported the development of this code.
9 * Copyright (c) 2000, 2001 Angelos D. Keromytis
11 * SMP modifications by Matthew Dillon for the DragonFlyBSD Project
13 * Permission to use, copy, and modify this software with or without fee
14 * is hereby granted, provided that this entire notice is included in
15 * all source code copies of any software which is or includes a copy or
16 * modification of this software.
18 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
19 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
20 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
21 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
24 * $FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.23 2009/02/05 17:43:12 imp Exp $
25 * $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/malloc.h>
32 #include <sys/module.h>
33 #include <sys/sysctl.h>
34 #include <sys/errno.h>
35 #include <sys/endian.h>
36 #include <sys/random.h>
37 #include <sys/kernel.h>
39 #include <sys/spinlock2.h>
41 #include <crypto/blowfish/blowfish.h>
42 #include <crypto/sha1.h>
43 #include <opencrypto/rmd160.h>
44 #include <opencrypto/cast.h>
45 #include <opencrypto/skipjack.h>
48 #include <opencrypto/cryptodev.h>
49 #include <opencrypto/cryptosoft.h>
50 #include <opencrypto/xform.h>
54 #include "cryptodev_if.h"
56 static int32_t swcr_id
;
57 static struct swcr_data
**swcr_sessions
= NULL
;
58 static u_int32_t swcr_sesnum
;
59 static u_int32_t swcr_minsesnum
= 1;
61 static struct spinlock swcr_spin
= SPINLOCK_INITIALIZER(swcr_spin
, "swcr_spin");
63 u_int8_t hmac_ipad_buffer
[HMAC_MAX_BLOCK_LEN
];
64 u_int8_t hmac_opad_buffer
[HMAC_MAX_BLOCK_LEN
];
66 static int swcr_encdec(struct cryptodesc
*, struct swcr_data
*, caddr_t
, int);
67 static int swcr_authcompute(struct cryptodesc
*, struct swcr_data
*, caddr_t
, int);
68 static int swcr_combined(struct cryptop
*);
69 static int swcr_compdec(struct cryptodesc
*, struct swcr_data
*, caddr_t
, int);
70 static int swcr_freesession(device_t dev
, u_int64_t tid
);
71 static int swcr_freesession_slot(struct swcr_data
**swdp
, u_int32_t sid
);
74 * Apply a symmetric encryption/decryption algorithm.
77 swcr_encdec(struct cryptodesc
*crd
, struct swcr_data
*sw
, caddr_t buf
,
80 unsigned char iv
[EALG_MAX_BLOCK_LEN
], blk
[EALG_MAX_BLOCK_LEN
], *idat
;
81 unsigned char *ivp
, *nivp
, iv2
[EALG_MAX_BLOCK_LEN
];
84 struct enc_xform
*exf
;
85 int i
, k
, j
, blks
, ivlen
;
87 int explicit_kschedule
;
90 blks
= exf
->blocksize
;
93 /* Check for non-padded data */
94 if (crd
->crd_len
% blks
)
97 /* Initialize the IV */
98 if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
99 /* IV explicitly provided ? */
100 if (crd
->crd_flags
& CRD_F_IV_EXPLICIT
)
101 bcopy(crd
->crd_iv
, iv
, ivlen
);
103 karc4random_buf(iv
, ivlen
);
105 /* Do we need to write the IV */
106 if (!(crd
->crd_flags
& CRD_F_IV_PRESENT
))
107 crypto_copyback(flags
, buf
, crd
->crd_inject
, ivlen
, iv
);
109 } else { /* Decryption */
110 /* IV explicitly provided ? */
111 if (crd
->crd_flags
& CRD_F_IV_EXPLICIT
)
112 bcopy(crd
->crd_iv
, iv
, ivlen
);
115 crypto_copydata(flags
, buf
, crd
->crd_inject
, ivlen
, iv
);
122 * The semantics are seriously broken because the session key
123 * storage was never designed for concurrent ops.
125 if (crd
->crd_flags
& CRD_F_KEY_EXPLICIT
) {
126 kschedule
= kmalloc(exf
->ctxsize
, M_CRYPTO_DATA
,
128 if (kschedule
== NULL
) {
132 error
= exf
->setkey(kschedule
, crd
->crd_key
,
136 explicit_kschedule
= 1;
138 spin_lock(&swcr_spin
);
139 kschedule
= sw
->sw_kschedule
;
140 ++sw
->sw_kschedule_refs
;
141 spin_unlock(&swcr_spin
);
142 explicit_kschedule
= 0;
146 * xforms that provide a reinit method perform all IV
147 * handling themselves.
150 exf
->reinit(kschedule
, iv
);
152 if (flags
& CRYPTO_F_IMBUF
) {
153 struct mbuf
*m
= (struct mbuf
*) buf
;
155 /* Find beginning of data */
156 m
= m_getptr(m
, crd
->crd_skip
, &k
);
166 * If there's insufficient data at the end of
167 * an mbuf, we have to do some copying.
169 if (m
->m_len
< k
+ blks
&& m
->m_len
!= k
) {
170 m_copydata(m
, k
, blks
, blk
);
172 /* Actual encryption/decryption */
174 if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
175 exf
->encrypt(kschedule
,
178 exf
->decrypt(kschedule
,
181 } else if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
182 /* XOR with previous block */
183 for (j
= 0; j
< blks
; j
++)
186 exf
->encrypt(kschedule
, blk
, iv
);
189 * Keep encrypted block for XOR'ing
192 bcopy(blk
, iv
, blks
);
194 } else { /* decrypt */
196 * Keep encrypted block for XOR'ing
199 nivp
= (ivp
== iv
) ? iv2
: iv
;
200 bcopy(blk
, nivp
, blks
);
202 exf
->decrypt(kschedule
, blk
, iv
);
204 /* XOR with previous block */
205 for (j
= 0; j
< blks
; j
++)
211 /* Copy back decrypted block */
212 m_copyback(m
, k
, blks
, blk
);
214 /* Advance pointer */
215 m
= m_getptr(m
, k
+ blks
, &k
);
223 /* Could be done... */
228 /* Skip possibly empty mbufs */
230 for (m
= m
->m_next
; m
&& m
->m_len
== 0;
243 * Warning: idat may point to garbage here, but
244 * we only use it in the while() loop, only if
245 * there are indeed enough data.
247 idat
= mtod(m
, unsigned char *) + k
;
249 while (m
->m_len
>= k
+ blks
&& i
> 0) {
251 if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
252 exf
->encrypt(kschedule
,
255 exf
->decrypt(kschedule
,
258 } else if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
259 /* XOR with previous block/IV */
260 for (j
= 0; j
< blks
; j
++)
263 exf
->encrypt(kschedule
, idat
, iv
);
265 } else { /* decrypt */
267 * Keep encrypted block to be used
268 * in next block's processing.
270 nivp
= (ivp
== iv
) ? iv2
: iv
;
271 bcopy(idat
, nivp
, blks
);
273 exf
->decrypt(kschedule
, idat
, iv
);
275 /* XOR with previous block/IV */
276 for (j
= 0; j
< blks
; j
++)
287 error
= 0; /* Done with mbuf encryption/decryption */
288 } else if (flags
& CRYPTO_F_IOV
) {
289 struct uio
*uio
= (struct uio
*) buf
;
292 /* Find beginning of data */
293 iov
= cuio_getptr(uio
, crd
->crd_skip
, &k
);
303 * If there's insufficient data at the end of
304 * an iovec, we have to do some copying.
306 if (iov
->iov_len
< k
+ blks
&& iov
->iov_len
!= k
) {
307 cuio_copydata(uio
, k
, blks
, blk
);
309 /* Actual encryption/decryption */
311 if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
312 exf
->encrypt(kschedule
,
315 exf
->decrypt(kschedule
,
318 } else if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
319 /* XOR with previous block */
320 for (j
= 0; j
< blks
; j
++)
323 exf
->encrypt(kschedule
, blk
, iv
);
326 * Keep encrypted block for XOR'ing
329 bcopy(blk
, iv
, blks
);
331 } else { /* decrypt */
333 * Keep encrypted block for XOR'ing
336 nivp
= (ivp
== iv
) ? iv2
: iv
;
337 bcopy(blk
, nivp
, blks
);
339 exf
->decrypt(kschedule
, blk
, iv
);
341 /* XOR with previous block */
342 for (j
= 0; j
< blks
; j
++)
348 /* Copy back decrypted block */
349 cuio_copyback(uio
, k
, blks
, blk
);
351 /* Advance pointer */
352 iov
= cuio_getptr(uio
, k
+ blks
, &k
);
360 /* Could be done... */
366 * Warning: idat may point to garbage here, but
367 * we only use it in the while() loop, only if
368 * there are indeed enough data.
370 idat
= (char *)iov
->iov_base
+ k
;
372 while (iov
->iov_len
>= k
+ blks
&& i
> 0) {
374 if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
375 exf
->encrypt(kschedule
,
378 exf
->decrypt(kschedule
,
381 } else if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
382 /* XOR with previous block/IV */
383 for (j
= 0; j
< blks
; j
++)
386 exf
->encrypt(kschedule
, idat
, iv
);
388 } else { /* decrypt */
390 * Keep encrypted block to be used
391 * in next block's processing.
393 nivp
= (ivp
== iv
) ? iv2
: iv
;
394 bcopy(idat
, nivp
, blks
);
396 exf
->decrypt(kschedule
, idat
, iv
);
398 /* XOR with previous block/IV */
399 for (j
= 0; j
< blks
; j
++)
409 if (k
== iov
->iov_len
) {
414 error
= 0; /* Done with iovec encryption/decryption */
420 for(i
= crd
->crd_skip
;
421 i
< crd
->crd_skip
+ crd
->crd_len
; i
+= blks
) {
422 if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
423 exf
->encrypt(kschedule
, buf
+ i
, iv
);
425 exf
->decrypt(kschedule
, buf
+ i
, iv
);
428 } else if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
429 for (i
= crd
->crd_skip
;
430 i
< crd
->crd_skip
+ crd
->crd_len
; i
+= blks
) {
431 /* XOR with the IV/previous block, as appropriate. */
432 if (i
== crd
->crd_skip
)
433 for (k
= 0; k
< blks
; k
++)
434 buf
[i
+ k
] ^= ivp
[k
];
436 for (k
= 0; k
< blks
; k
++)
437 buf
[i
+ k
] ^= buf
[i
+ k
- blks
];
438 exf
->encrypt(kschedule
, buf
+ i
, iv
);
440 } else { /* Decrypt */
442 * Start at the end, so we don't need to keep the
443 * encrypted block as the IV for the next block.
445 for (i
= crd
->crd_skip
+ crd
->crd_len
- blks
;
446 i
>= crd
->crd_skip
; i
-= blks
) {
447 exf
->decrypt(kschedule
, buf
+ i
, iv
);
449 /* XOR with the IV/previous block, as appropriate */
450 if (i
== crd
->crd_skip
)
451 for (k
= 0; k
< blks
; k
++)
452 buf
[i
+ k
] ^= ivp
[k
];
454 for (k
= 0; k
< blks
; k
++)
455 buf
[i
+ k
] ^= buf
[i
+ k
- blks
];
458 error
= 0; /* Done w/contiguous buffer encrypt/decrypt */
463 * Cleanup - explicitly replace the session key if requested
464 * (horrible semantics for concurrent operation)
466 if (explicit_kschedule
) {
468 spin_lock(&swcr_spin
);
469 if (sw
->sw_kschedule
&& sw
->sw_kschedule_refs
== 0) {
470 okschedule
= sw
->sw_kschedule
;
471 sw
->sw_kschedule
= kschedule
;
473 spin_unlock(&swcr_spin
);
475 bzero(okschedule
, exf
->ctxsize
);
476 kfree(okschedule
, M_CRYPTO_DATA
);
479 spin_lock(&swcr_spin
);
480 --sw
->sw_kschedule_refs
;
481 spin_unlock(&swcr_spin
);
489 swcr_authprepare(struct auth_hash
*axf
, struct swcr_data
*sw
, u_char
*key
,
497 case CRYPTO_MD5_HMAC
:
498 case CRYPTO_SHA1_HMAC
:
499 case CRYPTO_SHA2_256_HMAC
:
500 case CRYPTO_SHA2_384_HMAC
:
501 case CRYPTO_SHA2_512_HMAC
:
502 case CRYPTO_NULL_HMAC
:
503 case CRYPTO_RIPEMD160_HMAC
:
504 for (k
= 0; k
< klen
; k
++)
505 key
[k
] ^= HMAC_IPAD_VAL
;
507 axf
->Init(sw
->sw_ictx
);
508 axf
->Update(sw
->sw_ictx
, key
, klen
);
509 axf
->Update(sw
->sw_ictx
, hmac_ipad_buffer
, axf
->blocksize
- klen
);
511 for (k
= 0; k
< klen
; k
++)
512 key
[k
] ^= (HMAC_IPAD_VAL
^ HMAC_OPAD_VAL
);
514 axf
->Init(sw
->sw_octx
);
515 axf
->Update(sw
->sw_octx
, key
, klen
);
516 axf
->Update(sw
->sw_octx
, hmac_opad_buffer
, axf
->blocksize
- klen
);
518 for (k
= 0; k
< klen
; k
++)
519 key
[k
] ^= HMAC_OPAD_VAL
;
521 case CRYPTO_MD5_KPDK
:
522 case CRYPTO_SHA1_KPDK
:
524 /* We need a buffer that can hold an md5 and a sha1 result. */
525 u_char buf
[SHA1_RESULTLEN
];
528 bcopy(key
, sw
->sw_octx
, klen
);
529 axf
->Init(sw
->sw_ictx
);
530 axf
->Update(sw
->sw_ictx
, key
, klen
);
531 axf
->Final(buf
, sw
->sw_ictx
);
535 kprintf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d "
536 "doesn't use keys.\n", __func__
, axf
->type
);
541 * Compute keyed-hash authenticator.
544 swcr_authcompute(struct cryptodesc
*crd
, struct swcr_data
*sw
, caddr_t buf
,
547 unsigned char aalg
[HASH_MAX_LEN
];
548 struct auth_hash
*axf
;
552 if (sw
->sw_ictx
== NULL
)
557 if (crd
->crd_flags
& CRD_F_KEY_EXPLICIT
)
558 swcr_authprepare(axf
, sw
, crd
->crd_key
, crd
->crd_klen
);
560 bcopy(sw
->sw_ictx
, &ctx
, axf
->ctxsize
);
562 err
= crypto_apply(flags
, buf
, crd
->crd_skip
, crd
->crd_len
,
563 (int (*)(void *, void *, unsigned int))axf
->Update
, (caddr_t
)&ctx
);
567 switch (sw
->sw_alg
) {
568 case CRYPTO_MD5_HMAC
:
569 case CRYPTO_SHA1_HMAC
:
570 case CRYPTO_SHA2_256_HMAC
:
571 case CRYPTO_SHA2_384_HMAC
:
572 case CRYPTO_SHA2_512_HMAC
:
573 case CRYPTO_RIPEMD160_HMAC
:
574 if (sw
->sw_octx
== NULL
)
577 axf
->Final(aalg
, &ctx
);
578 bcopy(sw
->sw_octx
, &ctx
, axf
->ctxsize
);
579 axf
->Update(&ctx
, aalg
, axf
->hashsize
);
580 axf
->Final(aalg
, &ctx
);
583 case CRYPTO_MD5_KPDK
:
584 case CRYPTO_SHA1_KPDK
:
585 if (sw
->sw_octx
== NULL
)
588 axf
->Update(&ctx
, sw
->sw_octx
, sw
->sw_klen
);
589 axf
->Final(aalg
, &ctx
);
592 case CRYPTO_NULL_HMAC
:
593 axf
->Final(aalg
, &ctx
);
597 /* Inject the authentication data */
598 crypto_copyback(flags
, buf
, crd
->crd_inject
,
599 sw
->sw_mlen
== 0 ? axf
->hashsize
: sw
->sw_mlen
, aalg
);
604 * Apply a combined encryption-authentication transformation
607 swcr_combined(struct cryptop
*crp
)
609 uint32_t blkbuf
[howmany(EALG_MAX_BLOCK_LEN
, sizeof(uint32_t))];
610 u_char
*blk
= (u_char
*)blkbuf
;
611 u_char aalg
[HASH_MAX_LEN
];
612 u_char iv
[EALG_MAX_BLOCK_LEN
];
615 struct cryptodesc
*crd
, *crda
= NULL
, *crde
= NULL
;
616 struct swcr_data
*sw
, *swa
, *swe
;
617 struct auth_hash
*axf
= NULL
;
618 struct enc_xform
*exf
= NULL
;
619 caddr_t buf
= (caddr_t
)crp
->crp_buf
;
621 int i
, blksz
, ivlen
, len
;
626 for (crd
= crp
->crp_desc
; crd
; crd
= crd
->crd_next
) {
627 for (sw
= swcr_sessions
[crp
->crp_sid
& 0xffffffff];
628 sw
&& sw
->sw_alg
!= crd
->crd_alg
;
634 switch (sw
->sw_alg
) {
635 case CRYPTO_AES_GCM_16
:
636 case CRYPTO_AES_GMAC
:
642 case CRYPTO_AES_128_GMAC
:
643 case CRYPTO_AES_192_GMAC
:
644 case CRYPTO_AES_256_GMAC
:
648 if (swa
->sw_ictx
== NULL
)
650 bcopy(swa
->sw_ictx
, &ctx
, axf
->ctxsize
);
651 blksz
= axf
->blocksize
;
657 if (crde
== NULL
|| crda
== NULL
)
660 /* Initialize the IV */
661 if (crde
->crd_flags
& CRD_F_ENCRYPT
) {
662 /* IV explicitly provided ? */
663 if (crde
->crd_flags
& CRD_F_IV_EXPLICIT
)
664 bcopy(crde
->crd_iv
, iv
, ivlen
);
666 karc4random_buf(iv
, ivlen
);
668 /* Do we need to write the IV */
669 if (!(crde
->crd_flags
& CRD_F_IV_PRESENT
))
670 crypto_copyback(crde
->crd_flags
, buf
, crde
->crd_inject
,
673 } else { /* Decryption */
674 /* IV explicitly provided ? */
675 if (crde
->crd_flags
& CRD_F_IV_EXPLICIT
)
676 bcopy(crde
->crd_iv
, iv
, ivlen
);
679 crypto_copydata(crde
->crd_flags
, buf
, crde
->crd_inject
,
683 /* Supply MAC with IV */
685 axf
->Reinit(&ctx
, iv
, ivlen
);
687 /* Supply MAC with AAD */
688 for (i
= 0; i
< crda
->crd_len
; i
+= blksz
) {
689 len
= MIN(crda
->crd_len
- i
, blksz
);
690 crypto_copydata(crde
->crd_flags
, buf
, crda
->crd_skip
+ i
, len
,
692 axf
->Update(&ctx
, blk
, len
);
695 spin_lock(&swcr_spin
);
696 kschedule
= sw
->sw_kschedule
;
697 ++sw
->sw_kschedule_refs
;
698 spin_unlock(&swcr_spin
);
701 exf
->reinit(kschedule
, iv
);
703 /* Do encryption/decryption with MAC */
704 for (i
= 0; i
< crde
->crd_len
; i
+= blksz
) {
705 len
= MIN(crde
->crd_len
- i
, blksz
);
708 crypto_copydata(crde
->crd_flags
, buf
, crde
->crd_skip
+ i
, len
,
710 if (crde
->crd_flags
& CRD_F_ENCRYPT
) {
711 exf
->encrypt(kschedule
, blk
, iv
);
712 axf
->Update(&ctx
, blk
, len
);
714 axf
->Update(&ctx
, blk
, len
);
715 exf
->decrypt(kschedule
, blk
, iv
);
717 crypto_copyback(crde
->crd_flags
, buf
, crde
->crd_skip
+ i
, len
,
721 /* Do any required special finalization */
722 switch (crda
->crd_alg
) {
723 case CRYPTO_AES_128_GMAC
:
724 case CRYPTO_AES_192_GMAC
:
725 case CRYPTO_AES_256_GMAC
:
728 blkp
= (uint32_t *)blk
+ 1;
729 *blkp
= htobe32(crda
->crd_len
* 8);
730 blkp
= (uint32_t *)blk
+ 3;
731 *blkp
= htobe32(crde
->crd_len
* 8);
732 axf
->Update(&ctx
, blk
, blksz
);
737 axf
->Final(aalg
, &ctx
);
739 /* Inject the authentication data */
740 crypto_copyback(crda
->crd_flags
, crp
->crp_buf
, crda
->crd_inject
,
741 axf
->blocksize
, aalg
);
743 spin_lock(&swcr_spin
);
744 --sw
->sw_kschedule_refs
;
745 spin_unlock(&swcr_spin
);
751 * Apply a compression/decompression algorithm
754 swcr_compdec(struct cryptodesc
*crd
, struct swcr_data
*sw
,
755 caddr_t buf
, int flags
)
757 u_int8_t
*data
, *out
;
758 struct comp_algo
*cxf
;
765 * We must handle the whole buffer of data in one time
766 * then if there is not all the data in the mbuf, we must
769 data
= kmalloc(crd
->crd_len
, M_CRYPTO_DATA
, M_INTWAIT
);
770 crypto_copydata(flags
, buf
, crd
->crd_skip
, crd
->crd_len
, data
);
772 if (crd
->crd_flags
& CRD_F_COMP
)
773 result
= cxf
->compress(data
, crd
->crd_len
, &out
);
775 result
= cxf
->decompress(data
, crd
->crd_len
, &out
);
777 kfree(data
, M_CRYPTO_DATA
);
781 sw
->sw_size
= result
;
782 /* Check the compressed size when doing compression */
783 if (crd
->crd_flags
& CRD_F_COMP
) {
784 if (result
>= crd
->crd_len
) {
785 /* Compression was useless, we lost time */
786 kfree(out
, M_CRYPTO_DATA
);
792 * Copy back the (de)compressed data.
793 * If CRYPTO_F_IMBUF, the mbuf will be extended as necessary.
795 crypto_copyback(flags
, buf
, crd
->crd_skip
, result
, out
);
796 if (result
< crd
->crd_len
) {
797 adj
= result
- crd
->crd_len
;
798 if (flags
& CRYPTO_F_IMBUF
) {
799 adj
= result
- crd
->crd_len
;
800 m_adj((struct mbuf
*)buf
, adj
);
801 } else if (flags
& CRYPTO_F_IOV
) {
802 struct uio
*uio
= (struct uio
*)buf
;
805 adj
= crd
->crd_len
- result
;
806 ind
= uio
->uio_iovcnt
- 1;
808 while (adj
> 0 && ind
>= 0) {
809 if (adj
< uio
->uio_iov
[ind
].iov_len
) {
810 uio
->uio_iov
[ind
].iov_len
-= adj
;
814 adj
-= uio
->uio_iov
[ind
].iov_len
;
815 uio
->uio_iov
[ind
].iov_len
= 0;
821 kfree(out
, M_CRYPTO_DATA
);
826 * Generate a new software session.
829 swcr_newsession(device_t dev
, u_int32_t
*sid
, struct cryptoini
*cri
)
831 struct swcr_data
*swd_base
;
832 struct swcr_data
**swd
;
833 struct swcr_data
**oswd
;
834 struct auth_hash
*axf
;
835 struct enc_xform
*txf
;
836 struct comp_algo
*cxf
;
841 if (sid
== NULL
|| cri
== NULL
)
848 *swd
= kmalloc(sizeof(struct swcr_data
),
849 M_CRYPTO_DATA
, M_WAITOK
| M_ZERO
);
851 switch (cri
->cri_alg
) {
853 txf
= &enc_xform_des
;
855 case CRYPTO_3DES_CBC
:
856 txf
= &enc_xform_3des
;
859 txf
= &enc_xform_blf
;
861 case CRYPTO_CAST_CBC
:
862 txf
= &enc_xform_cast5
;
864 case CRYPTO_SKIPJACK_CBC
:
865 txf
= &enc_xform_skipjack
;
867 case CRYPTO_RIJNDAEL128_CBC
:
868 txf
= &enc_xform_rijndael128
;
871 txf
= &enc_xform_aes_xts
;
874 txf
= &enc_xform_aes_ctr
;
876 case CRYPTO_AES_GCM_16
:
877 txf
= &enc_xform_aes_gcm
;
879 case CRYPTO_AES_GMAC
:
880 txf
= &enc_xform_aes_gmac
;
881 (*swd
)->sw_exf
= txf
;
883 case CRYPTO_CAMELLIA_CBC
:
884 txf
= &enc_xform_camellia
;
886 case CRYPTO_TWOFISH_CBC
:
887 txf
= &enc_xform_twofish
;
889 case CRYPTO_SERPENT_CBC
:
890 txf
= &enc_xform_serpent
;
892 case CRYPTO_TWOFISH_XTS
:
893 txf
= &enc_xform_twofish_xts
;
895 case CRYPTO_SERPENT_XTS
:
896 txf
= &enc_xform_serpent_xts
;
898 case CRYPTO_NULL_CBC
:
899 txf
= &enc_xform_null
;
902 KKASSERT(txf
->ctxsize
> 0);
903 (*swd
)->sw_kschedule
= kmalloc(txf
->ctxsize
,
906 if (cri
->cri_key
!= NULL
) {
907 error
= txf
->setkey((*swd
)->sw_kschedule
,
911 swcr_freesession_slot(&swd_base
, 0);
915 (*swd
)->sw_exf
= txf
;
918 case CRYPTO_MD5_HMAC
:
919 axf
= &auth_hash_hmac_md5
;
921 case CRYPTO_SHA1_HMAC
:
922 axf
= &auth_hash_hmac_sha1
;
924 case CRYPTO_SHA2_256_HMAC
:
925 axf
= &auth_hash_hmac_sha2_256
;
927 case CRYPTO_SHA2_384_HMAC
:
928 axf
= &auth_hash_hmac_sha2_384
;
930 case CRYPTO_SHA2_512_HMAC
:
931 axf
= &auth_hash_hmac_sha2_512
;
933 case CRYPTO_NULL_HMAC
:
934 axf
= &auth_hash_null
;
936 case CRYPTO_RIPEMD160_HMAC
:
937 axf
= &auth_hash_hmac_ripemd_160
;
939 (*swd
)->sw_ictx
= kmalloc(axf
->ctxsize
, M_CRYPTO_DATA
,
941 (*swd
)->sw_octx
= kmalloc(axf
->ctxsize
, M_CRYPTO_DATA
,
944 if (cri
->cri_key
!= NULL
) {
945 swcr_authprepare(axf
, *swd
, cri
->cri_key
,
949 (*swd
)->sw_mlen
= cri
->cri_mlen
;
950 (*swd
)->sw_axf
= axf
;
953 case CRYPTO_MD5_KPDK
:
954 axf
= &auth_hash_key_md5
;
957 case CRYPTO_SHA1_KPDK
:
958 axf
= &auth_hash_key_sha1
;
960 (*swd
)->sw_ictx
= kmalloc(axf
->ctxsize
, M_CRYPTO_DATA
,
962 (*swd
)->sw_octx
= kmalloc(cri
->cri_klen
/ 8,
963 M_CRYPTO_DATA
, M_WAITOK
);
965 /* Store the key so we can "append" it to the payload */
966 if (cri
->cri_key
!= NULL
) {
967 swcr_authprepare(axf
, *swd
, cri
->cri_key
,
971 (*swd
)->sw_mlen
= cri
->cri_mlen
;
972 (*swd
)->sw_axf
= axf
;
976 axf
= &auth_hash_md5
;
980 axf
= &auth_hash_sha1
;
982 (*swd
)->sw_ictx
= kmalloc(axf
->ctxsize
, M_CRYPTO_DATA
,
985 axf
->Init((*swd
)->sw_ictx
);
986 (*swd
)->sw_mlen
= cri
->cri_mlen
;
987 (*swd
)->sw_axf
= axf
;
990 case CRYPTO_AES_128_GMAC
:
991 axf
= &auth_hash_gmac_aes_128
;
994 case CRYPTO_AES_192_GMAC
:
995 axf
= &auth_hash_gmac_aes_192
;
998 case CRYPTO_AES_256_GMAC
:
999 axf
= &auth_hash_gmac_aes_256
;
1001 (*swd
)->sw_ictx
= kmalloc(axf
->ctxsize
, M_CRYPTO_DATA
,
1003 if ((*swd
)->sw_ictx
== NULL
) {
1004 swcr_freesession_slot(&swd_base
, 0);
1008 axf
->Init((*swd
)->sw_ictx
);
1009 error
= axf
->Setkey((*swd
)->sw_ictx
, cri
->cri_key
,
1012 swcr_freesession_slot(&swd_base
, 0);
1015 (*swd
)->sw_axf
= axf
;
1018 case CRYPTO_DEFLATE_COMP
:
1019 cxf
= &comp_algo_deflate
;
1020 (*swd
)->sw_cxf
= cxf
;
1023 swcr_freesession_slot(&swd_base
, 0);
1027 (*swd
)->sw_alg
= cri
->cri_alg
;
1028 cri
= cri
->cri_next
;
1029 swd
= &((*swd
)->sw_next
);
1034 * Atomically allocate a session
1036 spin_lock(&swcr_spin
);
1037 for (i
= swcr_minsesnum
; i
< swcr_sesnum
; ++i
) {
1038 if (swcr_sessions
[i
] == NULL
)
1041 if (i
< swcr_sesnum
) {
1042 swcr_sessions
[i
] = swd_base
;
1043 swcr_minsesnum
= i
+ 1;
1044 spin_unlock(&swcr_spin
);
1048 spin_unlock(&swcr_spin
);
1051 * A larger allocation is required, reallocate the array
1052 * and replace, checking for SMP races.
1054 if (n
< CRYPTO_SW_SESSIONS
)
1055 n
= CRYPTO_SW_SESSIONS
;
1058 swd
= kmalloc(n
* sizeof(struct swcr_data
*),
1059 M_CRYPTO_DATA
, M_WAITOK
| M_ZERO
);
1061 spin_lock(&swcr_spin
);
1062 if (swcr_sesnum
>= n
) {
1063 spin_unlock(&swcr_spin
);
1064 kfree(swd
, M_CRYPTO_DATA
);
1065 } else if (swcr_sesnum
) {
1066 bcopy(swcr_sessions
, swd
,
1067 swcr_sesnum
* sizeof(struct swcr_data
*));
1068 oswd
= swcr_sessions
;
1069 swcr_sessions
= swd
;
1071 spin_unlock(&swcr_spin
);
1072 kfree(oswd
, M_CRYPTO_DATA
);
1074 swcr_sessions
= swd
;
1076 spin_unlock(&swcr_spin
);
1088 swcr_freesession(device_t dev
, u_int64_t tid
)
1090 u_int32_t sid
= CRYPTO_SESID2LID(tid
);
1092 if (sid
> swcr_sesnum
|| swcr_sessions
== NULL
||
1093 swcr_sessions
[sid
] == NULL
) {
1097 /* Silently accept and return */
1101 return(swcr_freesession_slot(&swcr_sessions
[sid
], sid
));
1106 swcr_freesession_slot(struct swcr_data
**swdp
, u_int32_t sid
)
1108 struct enc_xform
*txf
;
1109 struct auth_hash
*axf
;
1110 struct swcr_data
*swd
;
1111 struct swcr_data
*swnext
;
1114 * Protect session detachment with the spinlock.
1116 spin_lock(&swcr_spin
);
1119 if (sid
&& swcr_minsesnum
> sid
)
1120 swcr_minsesnum
= sid
;
1121 spin_unlock(&swcr_spin
);
1124 * Clean up at our leisure.
1126 while ((swd
= swnext
) != NULL
) {
1127 swnext
= swd
->sw_next
;
1129 swd
->sw_next
= NULL
;
1131 switch (swd
->sw_alg
) {
1132 case CRYPTO_DES_CBC
:
1133 case CRYPTO_3DES_CBC
:
1134 case CRYPTO_BLF_CBC
:
1135 case CRYPTO_CAST_CBC
:
1136 case CRYPTO_SKIPJACK_CBC
:
1137 case CRYPTO_RIJNDAEL128_CBC
:
1138 case CRYPTO_AES_XTS
:
1139 case CRYPTO_AES_CTR
:
1140 case CRYPTO_AES_GCM_16
:
1141 case CRYPTO_AES_GMAC
:
1142 case CRYPTO_CAMELLIA_CBC
:
1143 case CRYPTO_TWOFISH_CBC
:
1144 case CRYPTO_SERPENT_CBC
:
1145 case CRYPTO_TWOFISH_XTS
:
1146 case CRYPTO_SERPENT_XTS
:
1147 case CRYPTO_NULL_CBC
:
1150 if (swd
->sw_kschedule
) {
1151 bzero(swd
->sw_kschedule
, txf
->ctxsize
);
1152 kfree(swd
->sw_kschedule
, M_CRYPTO_DATA
);
1156 case CRYPTO_MD5_HMAC
:
1157 case CRYPTO_SHA1_HMAC
:
1158 case CRYPTO_SHA2_256_HMAC
:
1159 case CRYPTO_SHA2_384_HMAC
:
1160 case CRYPTO_SHA2_512_HMAC
:
1161 case CRYPTO_RIPEMD160_HMAC
:
1162 case CRYPTO_NULL_HMAC
:
1166 bzero(swd
->sw_ictx
, axf
->ctxsize
);
1167 kfree(swd
->sw_ictx
, M_CRYPTO_DATA
);
1170 bzero(swd
->sw_octx
, axf
->ctxsize
);
1171 kfree(swd
->sw_octx
, M_CRYPTO_DATA
);
1175 case CRYPTO_MD5_KPDK
:
1176 case CRYPTO_SHA1_KPDK
:
1180 bzero(swd
->sw_ictx
, axf
->ctxsize
);
1181 kfree(swd
->sw_ictx
, M_CRYPTO_DATA
);
1184 bzero(swd
->sw_octx
, swd
->sw_klen
);
1185 kfree(swd
->sw_octx
, M_CRYPTO_DATA
);
1189 case CRYPTO_AES_128_GMAC
:
1190 case CRYPTO_AES_192_GMAC
:
1191 case CRYPTO_AES_256_GMAC
:
1197 bzero(swd
->sw_ictx
, axf
->ctxsize
);
1198 kfree(swd
->sw_ictx
, M_CRYPTO_DATA
);
1202 case CRYPTO_DEFLATE_COMP
:
1206 //FREE(swd, M_CRYPTO_DATA);
1207 kfree(swd
, M_CRYPTO_DATA
);
1213 * Process a software request.
1216 swcr_process(device_t dev
, struct cryptop
*crp
, int hint
)
1218 struct cryptodesc
*crd
;
1219 struct swcr_data
*sw
;
1226 if (crp
->crp_desc
== NULL
|| crp
->crp_buf
== NULL
) {
1227 crp
->crp_etype
= EINVAL
;
1231 lid
= crp
->crp_sid
& 0xffffffff;
1232 if (lid
>= swcr_sesnum
|| lid
== 0 || swcr_sessions
[lid
] == NULL
) {
1233 crp
->crp_etype
= ENOENT
;
1237 /* Go through crypto descriptors, processing as we go */
1238 for (crd
= crp
->crp_desc
; crd
; crd
= crd
->crd_next
) {
1240 * Find the crypto context.
1242 * XXX Note that the logic here prevents us from having
1243 * XXX the same algorithm multiple times in a session
1244 * XXX (or rather, we can but it won't give us the right
1245 * XXX results). To do that, we'd need some way of differentiating
1246 * XXX between the various instances of an algorithm (so we can
1247 * XXX locate the correct crypto context).
1249 for (sw
= swcr_sessions
[lid
];
1250 sw
&& sw
->sw_alg
!= crd
->crd_alg
;
1254 /* No such context ? */
1256 crp
->crp_etype
= EINVAL
;
1259 switch (sw
->sw_alg
) {
1260 case CRYPTO_DES_CBC
:
1261 case CRYPTO_3DES_CBC
:
1262 case CRYPTO_BLF_CBC
:
1263 case CRYPTO_CAST_CBC
:
1264 case CRYPTO_SKIPJACK_CBC
:
1265 case CRYPTO_RIJNDAEL128_CBC
:
1266 case CRYPTO_AES_XTS
:
1267 case CRYPTO_AES_CTR
:
1268 case CRYPTO_CAMELLIA_CBC
:
1269 case CRYPTO_TWOFISH_CBC
:
1270 case CRYPTO_SERPENT_CBC
:
1271 case CRYPTO_TWOFISH_XTS
:
1272 case CRYPTO_SERPENT_XTS
:
1273 if ((crp
->crp_etype
= swcr_encdec(crd
, sw
,
1274 crp
->crp_buf
, crp
->crp_flags
)) != 0)
1277 case CRYPTO_NULL_CBC
:
1280 case CRYPTO_MD5_HMAC
:
1281 case CRYPTO_SHA1_HMAC
:
1282 case CRYPTO_SHA2_256_HMAC
:
1283 case CRYPTO_SHA2_384_HMAC
:
1284 case CRYPTO_SHA2_512_HMAC
:
1285 case CRYPTO_RIPEMD160_HMAC
:
1286 case CRYPTO_NULL_HMAC
:
1287 case CRYPTO_MD5_KPDK
:
1288 case CRYPTO_SHA1_KPDK
:
1291 if ((crp
->crp_etype
= swcr_authcompute(crd
, sw
,
1292 crp
->crp_buf
, crp
->crp_flags
)) != 0)
1296 case CRYPTO_AES_GCM_16
:
1297 case CRYPTO_AES_GMAC
:
1298 case CRYPTO_AES_128_GMAC
:
1299 case CRYPTO_AES_192_GMAC
:
1300 case CRYPTO_AES_256_GMAC
:
1301 crp
->crp_etype
= swcr_combined(crp
);
1304 case CRYPTO_DEFLATE_COMP
:
1305 if ((crp
->crp_etype
= swcr_compdec(crd
, sw
,
1306 crp
->crp_buf
, crp
->crp_flags
)) != 0)
1309 crp
->crp_olen
= (int)sw
->sw_size
;
1313 /* Unknown/unsupported algorithm */
1314 crp
->crp_etype
= EINVAL
;
1326 swcr_identify(driver_t
*drv
, device_t parent
)
1328 /* NB: order 10 is so we get attached after h/w devices */
1329 /* XXX: wouldn't bet about this BUS_ADD_CHILD correctness */
1330 if (device_find_child(parent
, "cryptosoft", -1) == NULL
&&
1331 BUS_ADD_CHILD(parent
, parent
, 10, "cryptosoft", -1) == 0)
1332 panic("cryptosoft: could not attach");
1336 swcr_probe(device_t dev
)
1338 device_set_desc(dev
, "software crypto");
1343 swcr_attach(device_t dev
)
1345 memset(hmac_ipad_buffer
, HMAC_IPAD_VAL
, HMAC_MAX_BLOCK_LEN
);
1346 memset(hmac_opad_buffer
, HMAC_OPAD_VAL
, HMAC_MAX_BLOCK_LEN
);
1348 swcr_id
= crypto_get_driverid(dev
, CRYPTOCAP_F_SOFTWARE
|
1352 device_printf(dev
, "cannot initialize!");
1355 #define REGISTER(alg) \
1356 crypto_register(swcr_id, alg, 0,0)
1357 REGISTER(CRYPTO_DES_CBC
);
1358 REGISTER(CRYPTO_3DES_CBC
);
1359 REGISTER(CRYPTO_BLF_CBC
);
1360 REGISTER(CRYPTO_CAST_CBC
);
1361 REGISTER(CRYPTO_SKIPJACK_CBC
);
1362 REGISTER(CRYPTO_NULL_CBC
);
1363 REGISTER(CRYPTO_MD5_HMAC
);
1364 REGISTER(CRYPTO_SHA1_HMAC
);
1365 REGISTER(CRYPTO_SHA2_256_HMAC
);
1366 REGISTER(CRYPTO_SHA2_384_HMAC
);
1367 REGISTER(CRYPTO_SHA2_512_HMAC
);
1368 REGISTER(CRYPTO_RIPEMD160_HMAC
);
1369 REGISTER(CRYPTO_NULL_HMAC
);
1370 REGISTER(CRYPTO_MD5_KPDK
);
1371 REGISTER(CRYPTO_SHA1_KPDK
);
1372 REGISTER(CRYPTO_MD5
);
1373 REGISTER(CRYPTO_SHA1
);
1374 REGISTER(CRYPTO_RIJNDAEL128_CBC
);
1375 REGISTER(CRYPTO_AES_XTS
);
1376 REGISTER(CRYPTO_AES_CTR
);
1377 REGISTER(CRYPTO_AES_GCM_16
);
1378 REGISTER(CRYPTO_AES_GMAC
);
1379 REGISTER(CRYPTO_AES_128_GMAC
);
1380 REGISTER(CRYPTO_AES_192_GMAC
);
1381 REGISTER(CRYPTO_AES_256_GMAC
);
1382 REGISTER(CRYPTO_CAMELLIA_CBC
);
1383 REGISTER(CRYPTO_TWOFISH_CBC
);
1384 REGISTER(CRYPTO_SERPENT_CBC
);
1385 REGISTER(CRYPTO_TWOFISH_XTS
);
1386 REGISTER(CRYPTO_SERPENT_XTS
);
1387 REGISTER(CRYPTO_DEFLATE_COMP
);
1394 swcr_detach(device_t dev
)
1396 crypto_unregister_all(swcr_id
);
1397 if (swcr_sessions
!= NULL
)
1398 kfree(swcr_sessions
, M_CRYPTO_DATA
);
1402 static device_method_t swcr_methods
[] = {
1403 DEVMETHOD(device_identify
, swcr_identify
),
1404 DEVMETHOD(device_probe
, swcr_probe
),
1405 DEVMETHOD(device_attach
, swcr_attach
),
1406 DEVMETHOD(device_detach
, swcr_detach
),
1408 DEVMETHOD(cryptodev_newsession
, swcr_newsession
),
1409 DEVMETHOD(cryptodev_freesession
,swcr_freesession
),
1410 DEVMETHOD(cryptodev_process
, swcr_process
),
1415 static driver_t swcr_driver
= {
1418 0, /* NB: no softc */
1420 static devclass_t swcr_devclass
;
1423 * NB: We explicitly reference the crypto module so we
1424 * get the necessary ordering when built as a loadable
1425 * module. This is required because we bundle the crypto
1426 * module code together with the cryptosoft driver (otherwise
1427 * normal module dependencies would handle things).
1429 extern int crypto_modevent(struct module
*, int, void *);
1430 /* XXX where to attach */
1431 DRIVER_MODULE(cryptosoft
, nexus
, swcr_driver
, swcr_devclass
, crypto_modevent
,NULL
);
1432 MODULE_VERSION(cryptosoft
, 1);
1433 MODULE_DEPEND(cryptosoft
, crypto
, 1, 1, 1);