2 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
3 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
5 * This code was written by Angelos D. Keromytis in Athens, Greece, in
6 * February 2000. Network Security Technologies Inc. (NSTI) kindly
7 * supported the development of this code.
9 * Copyright (c) 2000, 2001 Angelos D. Keromytis
11 * SMP modifications by Matthew Dillon for the DragonFlyBSD Project
13 * Permission to use, copy, and modify this software with or without fee
14 * is hereby granted, provided that this entire notice is included in
15 * all source code copies of any software which is or includes a copy or
16 * modification of this software.
18 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
19 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
20 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
21 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
24 * $FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.23 2009/02/05 17:43:12 imp Exp $
25 * $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/malloc.h>
32 #include <sys/module.h>
33 #include <sys/sysctl.h>
34 #include <sys/errno.h>
35 #include <sys/endian.h>
36 #include <sys/random.h>
37 #include <sys/kernel.h>
39 #include <sys/spinlock2.h>
41 #include <crypto/blowfish/blowfish.h>
42 #include <crypto/sha1.h>
43 #include <opencrypto/rmd160.h>
44 #include <opencrypto/cast.h>
45 #include <opencrypto/skipjack.h>
48 #include <opencrypto/cryptodev.h>
49 #include <opencrypto/cryptosoft.h>
50 #include <opencrypto/xform.h>
54 #include "cryptodev_if.h"
56 static int32_t swcr_id
;
57 static struct swcr_data
**swcr_sessions
= NULL
;
58 static u_int32_t swcr_sesnum
;
59 static u_int32_t swcr_minsesnum
= 1;
61 static struct spinlock swcr_spin
= SPINLOCK_INITIALIZER(swcr_spin
);
63 u_int8_t hmac_ipad_buffer
[HMAC_MAX_BLOCK_LEN
];
64 u_int8_t hmac_opad_buffer
[HMAC_MAX_BLOCK_LEN
];
66 static int swcr_encdec(struct cryptodesc
*, struct swcr_data
*, caddr_t
, int);
67 static int swcr_authcompute(struct cryptodesc
*, struct swcr_data
*, caddr_t
, int);
68 static int swcr_combined(struct cryptop
*);
69 static int swcr_compdec(struct cryptodesc
*, struct swcr_data
*, caddr_t
, int);
70 static int swcr_freesession(device_t dev
, u_int64_t tid
);
71 static int swcr_freesession_slot(struct swcr_data
**swdp
, u_int32_t sid
);
74 * Apply a symmetric encryption/decryption algorithm.
77 swcr_encdec(struct cryptodesc
*crd
, struct swcr_data
*sw
, caddr_t buf
,
80 unsigned char iv
[EALG_MAX_BLOCK_LEN
], blk
[EALG_MAX_BLOCK_LEN
], *idat
;
81 unsigned char *ivp
, *nivp
, iv2
[EALG_MAX_BLOCK_LEN
];
84 struct enc_xform
*exf
;
85 int i
, k
, j
, blks
, ivlen
;
87 int explicit_kschedule
;
90 blks
= exf
->blocksize
;
93 /* Check for non-padded data */
94 if (crd
->crd_len
% blks
)
97 /* Initialize the IV */
98 if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
99 /* IV explicitly provided ? */
100 if (crd
->crd_flags
& CRD_F_IV_EXPLICIT
)
101 bcopy(crd
->crd_iv
, iv
, ivlen
);
103 karc4rand(iv
, ivlen
);
105 /* Do we need to write the IV */
106 if (!(crd
->crd_flags
& CRD_F_IV_PRESENT
))
107 crypto_copyback(flags
, buf
, crd
->crd_inject
, ivlen
, iv
);
109 } else { /* Decryption */
110 /* IV explicitly provided ? */
111 if (crd
->crd_flags
& CRD_F_IV_EXPLICIT
)
112 bcopy(crd
->crd_iv
, iv
, ivlen
);
115 crypto_copydata(flags
, buf
, crd
->crd_inject
, ivlen
, iv
);
122 * The semantics are seriously broken because the session key
123 * storage was never designed for concurrent ops.
125 if (crd
->crd_flags
& CRD_F_KEY_EXPLICIT
) {
127 explicit_kschedule
= 1;
128 error
= exf
->setkey(&kschedule
,
129 crd
->crd_key
, crd
->crd_klen
/ 8);
133 spin_lock(&swcr_spin
);
134 kschedule
= sw
->sw_kschedule
;
135 ++sw
->sw_kschedule_refs
;
136 spin_unlock(&swcr_spin
);
137 explicit_kschedule
= 0;
141 * xforms that provide a reinit method perform all IV
142 * handling themselves.
145 exf
->reinit(kschedule
, iv
);
147 if (flags
& CRYPTO_F_IMBUF
) {
148 struct mbuf
*m
= (struct mbuf
*) buf
;
150 /* Find beginning of data */
151 m
= m_getptr(m
, crd
->crd_skip
, &k
);
161 * If there's insufficient data at the end of
162 * an mbuf, we have to do some copying.
164 if (m
->m_len
< k
+ blks
&& m
->m_len
!= k
) {
165 m_copydata(m
, k
, blks
, blk
);
167 /* Actual encryption/decryption */
169 if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
170 exf
->encrypt(kschedule
,
173 exf
->decrypt(kschedule
,
176 } else if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
177 /* XOR with previous block */
178 for (j
= 0; j
< blks
; j
++)
181 exf
->encrypt(kschedule
, blk
, iv
);
184 * Keep encrypted block for XOR'ing
187 bcopy(blk
, iv
, blks
);
189 } else { /* decrypt */
191 * Keep encrypted block for XOR'ing
194 nivp
= (ivp
== iv
) ? iv2
: iv
;
195 bcopy(blk
, nivp
, blks
);
197 exf
->decrypt(kschedule
, blk
, iv
);
199 /* XOR with previous block */
200 for (j
= 0; j
< blks
; j
++)
206 /* Copy back decrypted block */
207 m_copyback(m
, k
, blks
, blk
);
209 /* Advance pointer */
210 m
= m_getptr(m
, k
+ blks
, &k
);
218 /* Could be done... */
223 /* Skip possibly empty mbufs */
225 for (m
= m
->m_next
; m
&& m
->m_len
== 0;
238 * Warning: idat may point to garbage here, but
239 * we only use it in the while() loop, only if
240 * there are indeed enough data.
242 idat
= mtod(m
, unsigned char *) + k
;
244 while (m
->m_len
>= k
+ blks
&& i
> 0) {
246 if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
247 exf
->encrypt(kschedule
,
250 exf
->decrypt(kschedule
,
253 } else if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
254 /* XOR with previous block/IV */
255 for (j
= 0; j
< blks
; j
++)
258 exf
->encrypt(kschedule
, idat
, iv
);
260 } else { /* decrypt */
262 * Keep encrypted block to be used
263 * in next block's processing.
265 nivp
= (ivp
== iv
) ? iv2
: iv
;
266 bcopy(idat
, nivp
, blks
);
268 exf
->decrypt(kschedule
, idat
, iv
);
270 /* XOR with previous block/IV */
271 for (j
= 0; j
< blks
; j
++)
282 error
= 0; /* Done with mbuf encryption/decryption */
283 } else if (flags
& CRYPTO_F_IOV
) {
284 struct uio
*uio
= (struct uio
*) buf
;
287 /* Find beginning of data */
288 iov
= cuio_getptr(uio
, crd
->crd_skip
, &k
);
298 * If there's insufficient data at the end of
299 * an iovec, we have to do some copying.
301 if (iov
->iov_len
< k
+ blks
&& iov
->iov_len
!= k
) {
302 cuio_copydata(uio
, k
, blks
, blk
);
304 /* Actual encryption/decryption */
306 if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
307 exf
->encrypt(kschedule
,
310 exf
->decrypt(kschedule
,
313 } else if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
314 /* XOR with previous block */
315 for (j
= 0; j
< blks
; j
++)
318 exf
->encrypt(kschedule
, blk
, iv
);
321 * Keep encrypted block for XOR'ing
324 bcopy(blk
, iv
, blks
);
326 } else { /* decrypt */
328 * Keep encrypted block for XOR'ing
331 nivp
= (ivp
== iv
) ? iv2
: iv
;
332 bcopy(blk
, nivp
, blks
);
334 exf
->decrypt(kschedule
, blk
, iv
);
336 /* XOR with previous block */
337 for (j
= 0; j
< blks
; j
++)
343 /* Copy back decrypted block */
344 cuio_copyback(uio
, k
, blks
, blk
);
346 /* Advance pointer */
347 iov
= cuio_getptr(uio
, k
+ blks
, &k
);
355 /* Could be done... */
361 * Warning: idat may point to garbage here, but
362 * we only use it in the while() loop, only if
363 * there are indeed enough data.
365 idat
= (char *)iov
->iov_base
+ k
;
367 while (iov
->iov_len
>= k
+ blks
&& i
> 0) {
369 if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
370 exf
->encrypt(kschedule
,
373 exf
->decrypt(kschedule
,
376 } else if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
377 /* XOR with previous block/IV */
378 for (j
= 0; j
< blks
; j
++)
381 exf
->encrypt(kschedule
, idat
, iv
);
383 } else { /* decrypt */
385 * Keep encrypted block to be used
386 * in next block's processing.
388 nivp
= (ivp
== iv
) ? iv2
: iv
;
389 bcopy(idat
, nivp
, blks
);
391 exf
->decrypt(kschedule
, idat
, iv
);
393 /* XOR with previous block/IV */
394 for (j
= 0; j
< blks
; j
++)
404 if (k
== iov
->iov_len
) {
409 error
= 0; /* Done with iovec encryption/decryption */
415 for(i
= crd
->crd_skip
;
416 i
< crd
->crd_skip
+ crd
->crd_len
; i
+= blks
) {
417 if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
418 exf
->encrypt(kschedule
, buf
+ i
, iv
);
420 exf
->decrypt(kschedule
, buf
+ i
, iv
);
423 } else if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
424 for (i
= crd
->crd_skip
;
425 i
< crd
->crd_skip
+ crd
->crd_len
; i
+= blks
) {
426 /* XOR with the IV/previous block, as appropriate. */
427 if (i
== crd
->crd_skip
)
428 for (k
= 0; k
< blks
; k
++)
429 buf
[i
+ k
] ^= ivp
[k
];
431 for (k
= 0; k
< blks
; k
++)
432 buf
[i
+ k
] ^= buf
[i
+ k
- blks
];
433 exf
->encrypt(kschedule
, buf
+ i
, iv
);
435 } else { /* Decrypt */
437 * Start at the end, so we don't need to keep the
438 * encrypted block as the IV for the next block.
440 for (i
= crd
->crd_skip
+ crd
->crd_len
- blks
;
441 i
>= crd
->crd_skip
; i
-= blks
) {
442 exf
->decrypt(kschedule
, buf
+ i
, iv
);
444 /* XOR with the IV/previous block, as appropriate */
445 if (i
== crd
->crd_skip
)
446 for (k
= 0; k
< blks
; k
++)
447 buf
[i
+ k
] ^= ivp
[k
];
449 for (k
= 0; k
< blks
; k
++)
450 buf
[i
+ k
] ^= buf
[i
+ k
- blks
];
453 error
= 0; /* Done w/contiguous buffer encrypt/decrypt */
457 * Cleanup - explicitly replace the session key if requested
458 * (horrible semantics for concurrent operation)
460 if (explicit_kschedule
) {
461 spin_lock(&swcr_spin
);
462 if (sw
->sw_kschedule
&& sw
->sw_kschedule_refs
== 0) {
463 okschedule
= sw
->sw_kschedule
;
464 sw
->sw_kschedule
= kschedule
;
468 spin_unlock(&swcr_spin
);
470 exf
->zerokey(&okschedule
);
472 spin_lock(&swcr_spin
);
473 --sw
->sw_kschedule_refs
;
474 spin_unlock(&swcr_spin
);
480 swcr_authprepare(struct auth_hash
*axf
, struct swcr_data
*sw
, u_char
*key
,
488 case CRYPTO_MD5_HMAC
:
489 case CRYPTO_SHA1_HMAC
:
490 case CRYPTO_SHA2_256_HMAC
:
491 case CRYPTO_SHA2_384_HMAC
:
492 case CRYPTO_SHA2_512_HMAC
:
493 case CRYPTO_NULL_HMAC
:
494 case CRYPTO_RIPEMD160_HMAC
:
495 for (k
= 0; k
< klen
; k
++)
496 key
[k
] ^= HMAC_IPAD_VAL
;
498 axf
->Init(sw
->sw_ictx
);
499 axf
->Update(sw
->sw_ictx
, key
, klen
);
500 axf
->Update(sw
->sw_ictx
, hmac_ipad_buffer
, axf
->blocksize
- klen
);
502 for (k
= 0; k
< klen
; k
++)
503 key
[k
] ^= (HMAC_IPAD_VAL
^ HMAC_OPAD_VAL
);
505 axf
->Init(sw
->sw_octx
);
506 axf
->Update(sw
->sw_octx
, key
, klen
);
507 axf
->Update(sw
->sw_octx
, hmac_opad_buffer
, axf
->blocksize
- klen
);
509 for (k
= 0; k
< klen
; k
++)
510 key
[k
] ^= HMAC_OPAD_VAL
;
512 case CRYPTO_MD5_KPDK
:
513 case CRYPTO_SHA1_KPDK
:
515 /* We need a buffer that can hold an md5 and a sha1 result. */
516 u_char buf
[SHA1_RESULTLEN
];
519 bcopy(key
, sw
->sw_octx
, klen
);
520 axf
->Init(sw
->sw_ictx
);
521 axf
->Update(sw
->sw_ictx
, key
, klen
);
522 axf
->Final(buf
, sw
->sw_ictx
);
526 kprintf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d "
527 "doesn't use keys.\n", __func__
, axf
->type
);
532 * Compute keyed-hash authenticator.
535 swcr_authcompute(struct cryptodesc
*crd
, struct swcr_data
*sw
, caddr_t buf
,
538 unsigned char aalg
[HASH_MAX_LEN
];
539 struct auth_hash
*axf
;
543 if (sw
->sw_ictx
== NULL
)
548 if (crd
->crd_flags
& CRD_F_KEY_EXPLICIT
)
549 swcr_authprepare(axf
, sw
, crd
->crd_key
, crd
->crd_klen
);
551 bcopy(sw
->sw_ictx
, &ctx
, axf
->ctxsize
);
553 err
= crypto_apply(flags
, buf
, crd
->crd_skip
, crd
->crd_len
,
554 (int (*)(void *, void *, unsigned int))axf
->Update
, (caddr_t
)&ctx
);
558 switch (sw
->sw_alg
) {
559 case CRYPTO_MD5_HMAC
:
560 case CRYPTO_SHA1_HMAC
:
561 case CRYPTO_SHA2_256_HMAC
:
562 case CRYPTO_SHA2_384_HMAC
:
563 case CRYPTO_SHA2_512_HMAC
:
564 case CRYPTO_RIPEMD160_HMAC
:
565 if (sw
->sw_octx
== NULL
)
568 axf
->Final(aalg
, &ctx
);
569 bcopy(sw
->sw_octx
, &ctx
, axf
->ctxsize
);
570 axf
->Update(&ctx
, aalg
, axf
->hashsize
);
571 axf
->Final(aalg
, &ctx
);
574 case CRYPTO_MD5_KPDK
:
575 case CRYPTO_SHA1_KPDK
:
576 if (sw
->sw_octx
== NULL
)
579 axf
->Update(&ctx
, sw
->sw_octx
, sw
->sw_klen
);
580 axf
->Final(aalg
, &ctx
);
583 case CRYPTO_NULL_HMAC
:
584 axf
->Final(aalg
, &ctx
);
588 /* Inject the authentication data */
589 crypto_copyback(flags
, buf
, crd
->crd_inject
,
590 sw
->sw_mlen
== 0 ? axf
->hashsize
: sw
->sw_mlen
, aalg
);
595 * Apply a combined encryption-authentication transformation
598 swcr_combined(struct cryptop
*crp
)
600 uint32_t blkbuf
[howmany(EALG_MAX_BLOCK_LEN
, sizeof(uint32_t))];
601 u_char
*blk
= (u_char
*)blkbuf
;
602 u_char aalg
[HASH_MAX_LEN
];
603 u_char iv
[EALG_MAX_BLOCK_LEN
];
606 struct cryptodesc
*crd
, *crda
= NULL
, *crde
= NULL
;
607 struct swcr_data
*sw
, *swa
, *swe
;
608 struct auth_hash
*axf
= NULL
;
609 struct enc_xform
*exf
= NULL
;
610 caddr_t buf
= (caddr_t
)crp
->crp_buf
;
612 int i
, blksz
, ivlen
, len
;
617 for (crd
= crp
->crp_desc
; crd
; crd
= crd
->crd_next
) {
618 for (sw
= swcr_sessions
[crp
->crp_sid
& 0xffffffff];
619 sw
&& sw
->sw_alg
!= crd
->crd_alg
;
625 switch (sw
->sw_alg
) {
626 case CRYPTO_AES_GCM_16
:
627 case CRYPTO_AES_GMAC
:
633 case CRYPTO_AES_128_GMAC
:
634 case CRYPTO_AES_192_GMAC
:
635 case CRYPTO_AES_256_GMAC
:
639 if (swa
->sw_ictx
== NULL
)
641 bcopy(swa
->sw_ictx
, &ctx
, axf
->ctxsize
);
642 blksz
= axf
->blocksize
;
648 if (crde
== NULL
|| crda
== NULL
)
651 /* Initialize the IV */
652 if (crde
->crd_flags
& CRD_F_ENCRYPT
) {
653 /* IV explicitly provided ? */
654 if (crde
->crd_flags
& CRD_F_IV_EXPLICIT
)
655 bcopy(crde
->crd_iv
, iv
, ivlen
);
657 karc4rand(iv
, ivlen
);
659 /* Do we need to write the IV */
660 if (!(crde
->crd_flags
& CRD_F_IV_PRESENT
))
661 crypto_copyback(crde
->crd_flags
, buf
, crde
->crd_inject
,
664 } else { /* Decryption */
665 /* IV explicitly provided ? */
666 if (crde
->crd_flags
& CRD_F_IV_EXPLICIT
)
667 bcopy(crde
->crd_iv
, iv
, ivlen
);
670 crypto_copydata(crde
->crd_flags
, buf
, crde
->crd_inject
,
674 /* Supply MAC with IV */
676 axf
->Reinit(&ctx
, iv
, ivlen
);
678 /* Supply MAC with AAD */
679 for (i
= 0; i
< crda
->crd_len
; i
+= blksz
) {
680 len
= MIN(crda
->crd_len
- i
, blksz
);
681 crypto_copydata(crde
->crd_flags
, buf
, crda
->crd_skip
+ i
, len
,
683 axf
->Update(&ctx
, blk
, len
);
686 spin_lock(&swcr_spin
);
687 kschedule
= sw
->sw_kschedule
;
688 ++sw
->sw_kschedule_refs
;
689 spin_unlock(&swcr_spin
);
692 exf
->reinit(kschedule
, iv
);
694 /* Do encryption/decryption with MAC */
695 for (i
= 0; i
< crde
->crd_len
; i
+= blksz
) {
696 len
= MIN(crde
->crd_len
- i
, blksz
);
699 crypto_copydata(crde
->crd_flags
, buf
, crde
->crd_skip
+ i
, len
,
701 if (crde
->crd_flags
& CRD_F_ENCRYPT
) {
702 exf
->encrypt(kschedule
, blk
, iv
);
703 axf
->Update(&ctx
, blk
, len
);
705 axf
->Update(&ctx
, blk
, len
);
706 exf
->decrypt(kschedule
, blk
, iv
);
708 crypto_copyback(crde
->crd_flags
, buf
, crde
->crd_skip
+ i
, len
,
712 /* Do any required special finalization */
713 switch (crda
->crd_alg
) {
714 case CRYPTO_AES_128_GMAC
:
715 case CRYPTO_AES_192_GMAC
:
716 case CRYPTO_AES_256_GMAC
:
719 blkp
= (uint32_t *)blk
+ 1;
720 *blkp
= htobe32(crda
->crd_len
* 8);
721 blkp
= (uint32_t *)blk
+ 3;
722 *blkp
= htobe32(crde
->crd_len
* 8);
723 axf
->Update(&ctx
, blk
, blksz
);
728 axf
->Final(aalg
, &ctx
);
730 /* Inject the authentication data */
731 crypto_copyback(crda
->crd_flags
, crp
->crp_buf
, crda
->crd_inject
,
732 axf
->blocksize
, aalg
);
734 spin_lock(&swcr_spin
);
735 --sw
->sw_kschedule_refs
;
736 spin_unlock(&swcr_spin
);
742 * Apply a compression/decompression algorithm
745 swcr_compdec(struct cryptodesc
*crd
, struct swcr_data
*sw
,
746 caddr_t buf
, int flags
)
748 u_int8_t
*data
, *out
;
749 struct comp_algo
*cxf
;
756 * We must handle the whole buffer of data in one time
757 * then if there is not all the data in the mbuf, we must
760 data
= kmalloc(crd
->crd_len
, M_CRYPTO_DATA
, M_INTWAIT
);
761 crypto_copydata(flags
, buf
, crd
->crd_skip
, crd
->crd_len
, data
);
763 if (crd
->crd_flags
& CRD_F_COMP
)
764 result
= cxf
->compress(data
, crd
->crd_len
, &out
);
766 result
= cxf
->decompress(data
, crd
->crd_len
, &out
);
768 kfree(data
, M_CRYPTO_DATA
);
772 /* Copy back the (de)compressed data. m_copyback is
773 * extending the mbuf as necessary.
775 sw
->sw_size
= result
;
776 /* Check the compressed size when doing compression */
777 if (crd
->crd_flags
& CRD_F_COMP
) {
778 if (result
>= crd
->crd_len
) {
779 /* Compression was useless, we lost time */
780 kfree(out
, M_CRYPTO_DATA
);
785 crypto_copyback(flags
, buf
, crd
->crd_skip
, result
, out
);
786 if (result
< crd
->crd_len
) {
787 adj
= result
- crd
->crd_len
;
788 if (flags
& CRYPTO_F_IMBUF
) {
789 adj
= result
- crd
->crd_len
;
790 m_adj((struct mbuf
*)buf
, adj
);
791 } else if (flags
& CRYPTO_F_IOV
) {
792 struct uio
*uio
= (struct uio
*)buf
;
795 adj
= crd
->crd_len
- result
;
796 ind
= uio
->uio_iovcnt
- 1;
798 while (adj
> 0 && ind
>= 0) {
799 if (adj
< uio
->uio_iov
[ind
].iov_len
) {
800 uio
->uio_iov
[ind
].iov_len
-= adj
;
804 adj
-= uio
->uio_iov
[ind
].iov_len
;
805 uio
->uio_iov
[ind
].iov_len
= 0;
811 kfree(out
, M_CRYPTO_DATA
);
816 * Generate a new software session.
819 swcr_newsession(device_t dev
, u_int32_t
*sid
, struct cryptoini
*cri
)
821 struct swcr_data
*swd_base
;
822 struct swcr_data
**swd
;
823 struct swcr_data
**oswd
;
824 struct auth_hash
*axf
;
825 struct enc_xform
*txf
;
826 struct comp_algo
*cxf
;
831 if (sid
== NULL
|| cri
== NULL
)
838 *swd
= kmalloc(sizeof(struct swcr_data
),
839 M_CRYPTO_DATA
, M_WAITOK
| M_ZERO
);
841 switch (cri
->cri_alg
) {
843 txf
= &enc_xform_des
;
845 case CRYPTO_3DES_CBC
:
846 txf
= &enc_xform_3des
;
849 txf
= &enc_xform_blf
;
851 case CRYPTO_CAST_CBC
:
852 txf
= &enc_xform_cast5
;
854 case CRYPTO_SKIPJACK_CBC
:
855 txf
= &enc_xform_skipjack
;
857 case CRYPTO_RIJNDAEL128_CBC
:
858 txf
= &enc_xform_rijndael128
;
861 txf
= &enc_xform_aes_xts
;
864 txf
= &enc_xform_aes_ctr
;
866 case CRYPTO_AES_GCM_16
:
867 txf
= &enc_xform_aes_gcm
;
869 case CRYPTO_AES_GMAC
:
870 txf
= &enc_xform_aes_gmac
;
871 (*swd
)->sw_exf
= txf
;
873 case CRYPTO_CAMELLIA_CBC
:
874 txf
= &enc_xform_camellia
;
876 case CRYPTO_TWOFISH_CBC
:
877 txf
= &enc_xform_twofish
;
879 case CRYPTO_SERPENT_CBC
:
880 txf
= &enc_xform_serpent
;
882 case CRYPTO_TWOFISH_XTS
:
883 txf
= &enc_xform_twofish_xts
;
885 case CRYPTO_SERPENT_XTS
:
886 txf
= &enc_xform_serpent_xts
;
888 case CRYPTO_NULL_CBC
:
889 txf
= &enc_xform_null
;
892 if (cri
->cri_key
!= NULL
) {
893 error
= txf
->setkey(&((*swd
)->sw_kschedule
),
897 swcr_freesession_slot(&swd_base
, 0);
901 (*swd
)->sw_exf
= txf
;
904 case CRYPTO_MD5_HMAC
:
905 axf
= &auth_hash_hmac_md5
;
907 case CRYPTO_SHA1_HMAC
:
908 axf
= &auth_hash_hmac_sha1
;
910 case CRYPTO_SHA2_256_HMAC
:
911 axf
= &auth_hash_hmac_sha2_256
;
913 case CRYPTO_SHA2_384_HMAC
:
914 axf
= &auth_hash_hmac_sha2_384
;
916 case CRYPTO_SHA2_512_HMAC
:
917 axf
= &auth_hash_hmac_sha2_512
;
919 case CRYPTO_NULL_HMAC
:
920 axf
= &auth_hash_null
;
922 case CRYPTO_RIPEMD160_HMAC
:
923 axf
= &auth_hash_hmac_ripemd_160
;
925 (*swd
)->sw_ictx
= kmalloc(axf
->ctxsize
, M_CRYPTO_DATA
,
928 (*swd
)->sw_octx
= kmalloc(axf
->ctxsize
, M_CRYPTO_DATA
,
931 if (cri
->cri_key
!= NULL
) {
932 swcr_authprepare(axf
, *swd
, cri
->cri_key
,
936 (*swd
)->sw_mlen
= cri
->cri_mlen
;
937 (*swd
)->sw_axf
= axf
;
940 case CRYPTO_MD5_KPDK
:
941 axf
= &auth_hash_key_md5
;
944 case CRYPTO_SHA1_KPDK
:
945 axf
= &auth_hash_key_sha1
;
947 (*swd
)->sw_ictx
= kmalloc(axf
->ctxsize
, M_CRYPTO_DATA
,
950 (*swd
)->sw_octx
= kmalloc(cri
->cri_klen
/ 8,
951 M_CRYPTO_DATA
, M_WAITOK
);
953 /* Store the key so we can "append" it to the payload */
954 if (cri
->cri_key
!= NULL
) {
955 swcr_authprepare(axf
, *swd
, cri
->cri_key
,
959 (*swd
)->sw_mlen
= cri
->cri_mlen
;
960 (*swd
)->sw_axf
= axf
;
964 axf
= &auth_hash_md5
;
968 axf
= &auth_hash_sha1
;
970 (*swd
)->sw_ictx
= kmalloc(axf
->ctxsize
, M_CRYPTO_DATA
,
973 axf
->Init((*swd
)->sw_ictx
);
974 (*swd
)->sw_mlen
= cri
->cri_mlen
;
975 (*swd
)->sw_axf
= axf
;
978 case CRYPTO_AES_128_GMAC
:
979 axf
= &auth_hash_gmac_aes_128
;
982 case CRYPTO_AES_192_GMAC
:
983 axf
= &auth_hash_gmac_aes_192
;
986 case CRYPTO_AES_256_GMAC
:
987 axf
= &auth_hash_gmac_aes_256
;
989 (*swd
)->sw_ictx
= kmalloc(axf
->ctxsize
, M_CRYPTO_DATA
,
991 if ((*swd
)->sw_ictx
== NULL
) {
992 swcr_freesession_slot(&swd_base
, 0);
996 axf
->Init((*swd
)->sw_ictx
);
997 axf
->Setkey((*swd
)->sw_ictx
, cri
->cri_key
,
999 (*swd
)->sw_axf
= axf
;
1002 case CRYPTO_DEFLATE_COMP
:
1003 cxf
= &comp_algo_deflate
;
1004 (*swd
)->sw_cxf
= cxf
;
1007 swcr_freesession_slot(&swd_base
, 0);
1011 (*swd
)->sw_alg
= cri
->cri_alg
;
1012 cri
= cri
->cri_next
;
1013 swd
= &((*swd
)->sw_next
);
1018 * Atomically allocate a session
1020 spin_lock(&swcr_spin
);
1021 for (i
= swcr_minsesnum
; i
< swcr_sesnum
; ++i
) {
1022 if (swcr_sessions
[i
] == NULL
)
1025 if (i
< swcr_sesnum
) {
1026 swcr_sessions
[i
] = swd_base
;
1027 swcr_minsesnum
= i
+ 1;
1028 spin_unlock(&swcr_spin
);
1032 spin_unlock(&swcr_spin
);
1035 * A larger allocation is required, reallocate the array
1036 * and replace, checking for SMP races.
1038 if (n
< CRYPTO_SW_SESSIONS
)
1039 n
= CRYPTO_SW_SESSIONS
;
1042 swd
= kmalloc(n
* sizeof(struct swcr_data
*),
1043 M_CRYPTO_DATA
, M_WAITOK
| M_ZERO
);
1045 spin_lock(&swcr_spin
);
1046 if (swcr_sesnum
>= n
) {
1047 spin_unlock(&swcr_spin
);
1048 kfree(swd
, M_CRYPTO_DATA
);
1049 } else if (swcr_sesnum
) {
1050 bcopy(swcr_sessions
, swd
,
1051 swcr_sesnum
* sizeof(struct swcr_data
*));
1052 oswd
= swcr_sessions
;
1053 swcr_sessions
= swd
;
1055 spin_unlock(&swcr_spin
);
1056 kfree(oswd
, M_CRYPTO_DATA
);
1058 swcr_sessions
= swd
;
1060 spin_unlock(&swcr_spin
);
1072 swcr_freesession(device_t dev
, u_int64_t tid
)
1074 u_int32_t sid
= CRYPTO_SESID2LID(tid
);
1076 if (sid
> swcr_sesnum
|| swcr_sessions
== NULL
||
1077 swcr_sessions
[sid
] == NULL
) {
1081 /* Silently accept and return */
1085 return(swcr_freesession_slot(&swcr_sessions
[sid
], sid
));
1090 swcr_freesession_slot(struct swcr_data
**swdp
, u_int32_t sid
)
1092 struct enc_xform
*txf
;
1093 struct auth_hash
*axf
;
1094 struct swcr_data
*swd
;
1095 struct swcr_data
*swnext
;
1098 * Protect session detachment with the spinlock.
1100 spin_lock(&swcr_spin
);
1103 if (sid
&& swcr_minsesnum
> sid
)
1104 swcr_minsesnum
= sid
;
1105 spin_unlock(&swcr_spin
);
1108 * Clean up at our leisure.
1110 while ((swd
= swnext
) != NULL
) {
1111 swnext
= swd
->sw_next
;
1113 swd
->sw_next
= NULL
;
1115 switch (swd
->sw_alg
) {
1116 case CRYPTO_DES_CBC
:
1117 case CRYPTO_3DES_CBC
:
1118 case CRYPTO_BLF_CBC
:
1119 case CRYPTO_CAST_CBC
:
1120 case CRYPTO_SKIPJACK_CBC
:
1121 case CRYPTO_RIJNDAEL128_CBC
:
1122 case CRYPTO_AES_XTS
:
1123 case CRYPTO_AES_CTR
:
1124 case CRYPTO_AES_GCM_16
:
1125 case CRYPTO_AES_GMAC
:
1126 case CRYPTO_CAMELLIA_CBC
:
1127 case CRYPTO_TWOFISH_CBC
:
1128 case CRYPTO_SERPENT_CBC
:
1129 case CRYPTO_TWOFISH_XTS
:
1130 case CRYPTO_SERPENT_XTS
:
1131 case CRYPTO_NULL_CBC
:
1134 if (swd
->sw_kschedule
)
1135 txf
->zerokey(&(swd
->sw_kschedule
));
1138 case CRYPTO_MD5_HMAC
:
1139 case CRYPTO_SHA1_HMAC
:
1140 case CRYPTO_SHA2_256_HMAC
:
1141 case CRYPTO_SHA2_384_HMAC
:
1142 case CRYPTO_SHA2_512_HMAC
:
1143 case CRYPTO_RIPEMD160_HMAC
:
1144 case CRYPTO_NULL_HMAC
:
1148 bzero(swd
->sw_ictx
, axf
->ctxsize
);
1149 kfree(swd
->sw_ictx
, M_CRYPTO_DATA
);
1152 bzero(swd
->sw_octx
, axf
->ctxsize
);
1153 kfree(swd
->sw_octx
, M_CRYPTO_DATA
);
1157 case CRYPTO_MD5_KPDK
:
1158 case CRYPTO_SHA1_KPDK
:
1162 bzero(swd
->sw_ictx
, axf
->ctxsize
);
1163 kfree(swd
->sw_ictx
, M_CRYPTO_DATA
);
1166 bzero(swd
->sw_octx
, swd
->sw_klen
);
1167 kfree(swd
->sw_octx
, M_CRYPTO_DATA
);
1171 case CRYPTO_AES_128_GMAC
:
1172 case CRYPTO_AES_192_GMAC
:
1173 case CRYPTO_AES_256_GMAC
:
1179 bzero(swd
->sw_ictx
, axf
->ctxsize
);
1180 kfree(swd
->sw_ictx
, M_CRYPTO_DATA
);
1184 case CRYPTO_DEFLATE_COMP
:
1188 //FREE(swd, M_CRYPTO_DATA);
1189 kfree(swd
, M_CRYPTO_DATA
);
1195 * Process a software request.
1198 swcr_process(device_t dev
, struct cryptop
*crp
, int hint
)
1200 struct cryptodesc
*crd
;
1201 struct swcr_data
*sw
;
1208 if (crp
->crp_desc
== NULL
|| crp
->crp_buf
== NULL
) {
1209 crp
->crp_etype
= EINVAL
;
1213 lid
= crp
->crp_sid
& 0xffffffff;
1214 if (lid
>= swcr_sesnum
|| lid
== 0 || swcr_sessions
[lid
] == NULL
) {
1215 crp
->crp_etype
= ENOENT
;
1219 /* Go through crypto descriptors, processing as we go */
1220 for (crd
= crp
->crp_desc
; crd
; crd
= crd
->crd_next
) {
1222 * Find the crypto context.
1224 * XXX Note that the logic here prevents us from having
1225 * XXX the same algorithm multiple times in a session
1226 * XXX (or rather, we can but it won't give us the right
1227 * XXX results). To do that, we'd need some way of differentiating
1228 * XXX between the various instances of an algorithm (so we can
1229 * XXX locate the correct crypto context).
1231 for (sw
= swcr_sessions
[lid
];
1232 sw
&& sw
->sw_alg
!= crd
->crd_alg
;
1236 /* No such context ? */
1238 crp
->crp_etype
= EINVAL
;
1241 switch (sw
->sw_alg
) {
1242 case CRYPTO_DES_CBC
:
1243 case CRYPTO_3DES_CBC
:
1244 case CRYPTO_BLF_CBC
:
1245 case CRYPTO_CAST_CBC
:
1246 case CRYPTO_SKIPJACK_CBC
:
1247 case CRYPTO_RIJNDAEL128_CBC
:
1248 case CRYPTO_AES_XTS
:
1249 case CRYPTO_AES_CTR
:
1250 case CRYPTO_CAMELLIA_CBC
:
1251 case CRYPTO_TWOFISH_CBC
:
1252 case CRYPTO_SERPENT_CBC
:
1253 case CRYPTO_TWOFISH_XTS
:
1254 case CRYPTO_SERPENT_XTS
:
1255 if ((crp
->crp_etype
= swcr_encdec(crd
, sw
,
1256 crp
->crp_buf
, crp
->crp_flags
)) != 0)
1259 case CRYPTO_NULL_CBC
:
1262 case CRYPTO_MD5_HMAC
:
1263 case CRYPTO_SHA1_HMAC
:
1264 case CRYPTO_SHA2_256_HMAC
:
1265 case CRYPTO_SHA2_384_HMAC
:
1266 case CRYPTO_SHA2_512_HMAC
:
1267 case CRYPTO_RIPEMD160_HMAC
:
1268 case CRYPTO_NULL_HMAC
:
1269 case CRYPTO_MD5_KPDK
:
1270 case CRYPTO_SHA1_KPDK
:
1273 if ((crp
->crp_etype
= swcr_authcompute(crd
, sw
,
1274 crp
->crp_buf
, crp
->crp_flags
)) != 0)
1278 case CRYPTO_AES_GCM_16
:
1279 case CRYPTO_AES_GMAC
:
1280 case CRYPTO_AES_128_GMAC
:
1281 case CRYPTO_AES_192_GMAC
:
1282 case CRYPTO_AES_256_GMAC
:
1283 crp
->crp_etype
= swcr_combined(crp
);
1286 case CRYPTO_DEFLATE_COMP
:
1287 if ((crp
->crp_etype
= swcr_compdec(crd
, sw
,
1288 crp
->crp_buf
, crp
->crp_flags
)) != 0)
1291 crp
->crp_olen
= (int)sw
->sw_size
;
1295 /* Unknown/unsupported algorithm */
1296 crp
->crp_etype
= EINVAL
;
1308 swcr_identify(driver_t
*drv
, device_t parent
)
1310 /* NB: order 10 is so we get attached after h/w devices */
1311 /* XXX: wouldn't bet about this BUS_ADD_CHILD correctness */
1312 if (device_find_child(parent
, "cryptosoft", -1) == NULL
&&
1313 BUS_ADD_CHILD(parent
, parent
, 10, "cryptosoft", -1) == 0)
1314 panic("cryptosoft: could not attach");
1318 swcr_probe(device_t dev
)
1320 device_set_desc(dev
, "software crypto");
1325 swcr_attach(device_t dev
)
1327 memset(hmac_ipad_buffer
, HMAC_IPAD_VAL
, HMAC_MAX_BLOCK_LEN
);
1328 memset(hmac_opad_buffer
, HMAC_OPAD_VAL
, HMAC_MAX_BLOCK_LEN
);
1330 swcr_id
= crypto_get_driverid(dev
, CRYPTOCAP_F_SOFTWARE
|
1334 device_printf(dev
, "cannot initialize!");
1337 #define REGISTER(alg) \
1338 crypto_register(swcr_id, alg, 0,0)
1339 REGISTER(CRYPTO_DES_CBC
);
1340 REGISTER(CRYPTO_3DES_CBC
);
1341 REGISTER(CRYPTO_BLF_CBC
);
1342 REGISTER(CRYPTO_CAST_CBC
);
1343 REGISTER(CRYPTO_SKIPJACK_CBC
);
1344 REGISTER(CRYPTO_NULL_CBC
);
1345 REGISTER(CRYPTO_MD5_HMAC
);
1346 REGISTER(CRYPTO_SHA1_HMAC
);
1347 REGISTER(CRYPTO_SHA2_256_HMAC
);
1348 REGISTER(CRYPTO_SHA2_384_HMAC
);
1349 REGISTER(CRYPTO_SHA2_512_HMAC
);
1350 REGISTER(CRYPTO_RIPEMD160_HMAC
);
1351 REGISTER(CRYPTO_NULL_HMAC
);
1352 REGISTER(CRYPTO_MD5_KPDK
);
1353 REGISTER(CRYPTO_SHA1_KPDK
);
1354 REGISTER(CRYPTO_MD5
);
1355 REGISTER(CRYPTO_SHA1
);
1356 REGISTER(CRYPTO_RIJNDAEL128_CBC
);
1357 REGISTER(CRYPTO_AES_XTS
);
1358 REGISTER(CRYPTO_AES_CTR
);
1359 REGISTER(CRYPTO_AES_GCM_16
);
1360 REGISTER(CRYPTO_AES_GMAC
);
1361 REGISTER(CRYPTO_AES_128_GMAC
);
1362 REGISTER(CRYPTO_AES_192_GMAC
);
1363 REGISTER(CRYPTO_AES_256_GMAC
);
1364 REGISTER(CRYPTO_CAMELLIA_CBC
);
1365 REGISTER(CRYPTO_TWOFISH_CBC
);
1366 REGISTER(CRYPTO_SERPENT_CBC
);
1367 REGISTER(CRYPTO_TWOFISH_XTS
);
1368 REGISTER(CRYPTO_SERPENT_XTS
);
1369 REGISTER(CRYPTO_DEFLATE_COMP
);
1376 swcr_detach(device_t dev
)
1378 crypto_unregister_all(swcr_id
);
1379 if (swcr_sessions
!= NULL
)
1380 kfree(swcr_sessions
, M_CRYPTO_DATA
);
1384 static device_method_t swcr_methods
[] = {
1385 DEVMETHOD(device_identify
, swcr_identify
),
1386 DEVMETHOD(device_probe
, swcr_probe
),
1387 DEVMETHOD(device_attach
, swcr_attach
),
1388 DEVMETHOD(device_detach
, swcr_detach
),
1390 DEVMETHOD(cryptodev_newsession
, swcr_newsession
),
1391 DEVMETHOD(cryptodev_freesession
,swcr_freesession
),
1392 DEVMETHOD(cryptodev_process
, swcr_process
),
1397 static driver_t swcr_driver
= {
1400 0, /* NB: no softc */
1402 static devclass_t swcr_devclass
;
1405 * NB: We explicitly reference the crypto module so we
1406 * get the necessary ordering when built as a loadable
1407 * module. This is required because we bundle the crypto
1408 * module code together with the cryptosoft driver (otherwise
1409 * normal module dependencies would handle things).
1411 extern int crypto_modevent(struct module
*, int, void *);
1412 /* XXX where to attach */
1413 DRIVER_MODULE(cryptosoft
, nexus
, swcr_driver
, swcr_devclass
, crypto_modevent
,NULL
);
1414 MODULE_VERSION(cryptosoft
, 1);
1415 MODULE_DEPEND(cryptosoft
, crypto
, 1, 1, 1);