1 /* $FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.23 2009/02/05 17:43:12 imp Exp $ */
2 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */
5 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
6 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
8 * This code was written by Angelos D. Keromytis in Athens, Greece, in
9 * February 2000. Network Security Technologies Inc. (NSTI) kindly
10 * supported the development of this code.
12 * Copyright (c) 2000, 2001 Angelos D. Keromytis
14 * Permission to use, copy, and modify this software with or without fee
15 * is hereby granted, provided that this entire notice is included in
16 * all source code copies of any software which is or includes a copy or
17 * modification of this software.
19 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
20 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
21 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
22 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/malloc.h>
30 #include <sys/module.h>
31 #include <sys/sysctl.h>
32 #include <sys/errno.h>
33 #include <sys/random.h>
34 #include <sys/kernel.h>
37 #include <crypto/blowfish/blowfish.h>
38 #include <crypto/sha1.h>
39 #include <opencrypto/rmd160.h>
40 #include <opencrypto/cast.h>
41 #include <opencrypto/skipjack.h>
44 #include <opencrypto/cryptodev.h>
45 #include <opencrypto/cryptosoft.h>
46 #include <opencrypto/xform.h>
50 #include "cryptodev_if.h"
52 static int32_t swcr_id
;
53 static struct swcr_data
**swcr_sessions
= NULL
;
54 static u_int32_t swcr_sesnum
;
56 u_int8_t hmac_ipad_buffer
[HMAC_MAX_BLOCK_LEN
];
57 u_int8_t hmac_opad_buffer
[HMAC_MAX_BLOCK_LEN
];
59 static int swcr_encdec(struct cryptodesc
*, struct swcr_data
*, caddr_t
, int);
60 static int swcr_authcompute(struct cryptodesc
*, struct swcr_data
*, caddr_t
, int);
61 static int swcr_compdec(struct cryptodesc
*, struct swcr_data
*, caddr_t
, int);
62 static int swcr_freesession(device_t dev
, u_int64_t tid
);
65 * Apply a symmetric encryption/decryption algorithm.
68 swcr_encdec(struct cryptodesc
*crd
, struct swcr_data
*sw
, caddr_t buf
,
71 unsigned char iv
[EALG_MAX_BLOCK_LEN
], blk
[EALG_MAX_BLOCK_LEN
], *idat
;
72 unsigned char *ivp
, piv
[EALG_MAX_BLOCK_LEN
];
73 struct enc_xform
*exf
;
77 blks
= exf
->blocksize
;
79 /* Check for non-padded data */
80 if (crd
->crd_len
% blks
)
83 /* Initialize the IV */
84 if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
85 /* IV explicitly provided ? */
86 if (crd
->crd_flags
& CRD_F_IV_EXPLICIT
)
87 bcopy(crd
->crd_iv
, iv
, blks
);
91 /* Do we need to write the IV */
92 if (!(crd
->crd_flags
& CRD_F_IV_PRESENT
))
93 crypto_copyback(flags
, buf
, crd
->crd_inject
, blks
, iv
);
95 } else { /* Decryption */
96 /* IV explicitly provided ? */
97 if (crd
->crd_flags
& CRD_F_IV_EXPLICIT
)
98 bcopy(crd
->crd_iv
, iv
, blks
);
101 crypto_copydata(flags
, buf
, crd
->crd_inject
, blks
, iv
);
105 if (crd
->crd_flags
& CRD_F_KEY_EXPLICIT
) {
108 if (sw
->sw_kschedule
)
109 exf
->zerokey(&(sw
->sw_kschedule
));
110 error
= exf
->setkey(&sw
->sw_kschedule
,
111 crd
->crd_key
, crd
->crd_klen
/ 8);
117 if (flags
& CRYPTO_F_IMBUF
) {
118 struct mbuf
*m
= (struct mbuf
*) buf
;
120 /* Find beginning of data */
121 m
= m_getptr(m
, crd
->crd_skip
, &k
);
129 * If there's insufficient data at the end of
130 * an mbuf, we have to do some copying.
132 if (m
->m_len
< k
+ blks
&& m
->m_len
!= k
) {
133 m_copydata(m
, k
, blks
, blk
);
135 /* Actual encryption/decryption */
136 if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
137 /* XOR with previous block */
138 for (j
= 0; j
< blks
; j
++)
141 exf
->encrypt(sw
->sw_kschedule
, blk
);
144 * Keep encrypted block for XOR'ing
147 bcopy(blk
, iv
, blks
);
149 } else { /* decrypt */
151 * Keep encrypted block for XOR'ing
155 bcopy(blk
, piv
, blks
);
157 bcopy(blk
, iv
, blks
);
159 exf
->decrypt(sw
->sw_kschedule
, blk
);
161 /* XOR with previous block */
162 for (j
= 0; j
< blks
; j
++)
166 bcopy(piv
, iv
, blks
);
171 /* Copy back decrypted block */
172 m_copyback(m
, k
, blks
, blk
);
174 /* Advance pointer */
175 m
= m_getptr(m
, k
+ blks
, &k
);
181 /* Could be done... */
186 /* Skip possibly empty mbufs */
188 for (m
= m
->m_next
; m
&& m
->m_len
== 0;
199 * Warning: idat may point to garbage here, but
200 * we only use it in the while() loop, only if
201 * there are indeed enough data.
203 idat
= mtod(m
, unsigned char *) + k
;
205 while (m
->m_len
>= k
+ blks
&& i
> 0) {
206 if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
207 /* XOR with previous block/IV */
208 for (j
= 0; j
< blks
; j
++)
211 exf
->encrypt(sw
->sw_kschedule
, idat
);
213 } else { /* decrypt */
215 * Keep encrypted block to be used
216 * in next block's processing.
219 bcopy(idat
, piv
, blks
);
221 bcopy(idat
, iv
, blks
);
223 exf
->decrypt(sw
->sw_kschedule
, idat
);
225 /* XOR with previous block/IV */
226 for (j
= 0; j
< blks
; j
++)
230 bcopy(piv
, iv
, blks
);
241 return 0; /* Done with mbuf encryption/decryption */
242 } else if (flags
& CRYPTO_F_IOV
) {
243 struct uio
*uio
= (struct uio
*) buf
;
246 /* Find beginning of data */
247 iov
= cuio_getptr(uio
, crd
->crd_skip
, &k
);
255 * If there's insufficient data at the end of
256 * an iovec, we have to do some copying.
258 if (iov
->iov_len
< k
+ blks
&& iov
->iov_len
!= k
) {
259 cuio_copydata(uio
, k
, blks
, blk
);
261 /* Actual encryption/decryption */
262 if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
263 /* XOR with previous block */
264 for (j
= 0; j
< blks
; j
++)
267 exf
->encrypt(sw
->sw_kschedule
, blk
);
270 * Keep encrypted block for XOR'ing
273 bcopy(blk
, iv
, blks
);
275 } else { /* decrypt */
277 * Keep encrypted block for XOR'ing
281 bcopy(blk
, piv
, blks
);
283 bcopy(blk
, iv
, blks
);
285 exf
->decrypt(sw
->sw_kschedule
, blk
);
287 /* XOR with previous block */
288 for (j
= 0; j
< blks
; j
++)
292 bcopy(piv
, iv
, blks
);
297 /* Copy back decrypted block */
298 cuio_copyback(uio
, k
, blks
, blk
);
300 /* Advance pointer */
301 iov
= cuio_getptr(uio
, k
+ blks
, &k
);
307 /* Could be done... */
313 * Warning: idat may point to garbage here, but
314 * we only use it in the while() loop, only if
315 * there are indeed enough data.
317 idat
= (char *)iov
->iov_base
+ k
;
319 while (iov
->iov_len
>= k
+ blks
&& i
> 0) {
320 if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
321 /* XOR with previous block/IV */
322 for (j
= 0; j
< blks
; j
++)
325 exf
->encrypt(sw
->sw_kschedule
, idat
);
327 } else { /* decrypt */
329 * Keep encrypted block to be used
330 * in next block's processing.
333 bcopy(idat
, piv
, blks
);
335 bcopy(idat
, iv
, blks
);
337 exf
->decrypt(sw
->sw_kschedule
, idat
);
339 /* XOR with previous block/IV */
340 for (j
= 0; j
< blks
; j
++)
344 bcopy(piv
, iv
, blks
);
353 if (k
== iov
->iov_len
) {
359 return 0; /* Done with iovec encryption/decryption */
360 } else { /* contiguous buffer */
361 if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
362 for (i
= crd
->crd_skip
;
363 i
< crd
->crd_skip
+ crd
->crd_len
; i
+= blks
) {
364 /* XOR with the IV/previous block, as appropriate. */
365 if (i
== crd
->crd_skip
)
366 for (k
= 0; k
< blks
; k
++)
367 buf
[i
+ k
] ^= ivp
[k
];
369 for (k
= 0; k
< blks
; k
++)
370 buf
[i
+ k
] ^= buf
[i
+ k
- blks
];
371 exf
->encrypt(sw
->sw_kschedule
, buf
+ i
);
373 } else { /* Decrypt */
375 * Start at the end, so we don't need to keep the encrypted
376 * block as the IV for the next block.
378 for (i
= crd
->crd_skip
+ crd
->crd_len
- blks
;
379 i
>= crd
->crd_skip
; i
-= blks
) {
380 exf
->decrypt(sw
->sw_kschedule
, buf
+ i
);
382 /* XOR with the IV/previous block, as appropriate */
383 if (i
== crd
->crd_skip
)
384 for (k
= 0; k
< blks
; k
++)
385 buf
[i
+ k
] ^= ivp
[k
];
387 for (k
= 0; k
< blks
; k
++)
388 buf
[i
+ k
] ^= buf
[i
+ k
- blks
];
392 return 0; /* Done with contiguous buffer encryption/decryption */
400 swcr_authprepare(struct auth_hash
*axf
, struct swcr_data
*sw
, u_char
*key
,
408 case CRYPTO_MD5_HMAC
:
409 case CRYPTO_SHA1_HMAC
:
410 case CRYPTO_SHA2_256_HMAC
:
411 case CRYPTO_SHA2_384_HMAC
:
412 case CRYPTO_SHA2_512_HMAC
:
413 case CRYPTO_NULL_HMAC
:
414 case CRYPTO_RIPEMD160_HMAC
:
415 for (k
= 0; k
< klen
; k
++)
416 key
[k
] ^= HMAC_IPAD_VAL
;
418 axf
->Init(sw
->sw_ictx
);
419 axf
->Update(sw
->sw_ictx
, key
, klen
);
420 axf
->Update(sw
->sw_ictx
, hmac_ipad_buffer
, axf
->blocksize
- klen
);
422 for (k
= 0; k
< klen
; k
++)
423 key
[k
] ^= (HMAC_IPAD_VAL
^ HMAC_OPAD_VAL
);
425 axf
->Init(sw
->sw_octx
);
426 axf
->Update(sw
->sw_octx
, key
, klen
);
427 axf
->Update(sw
->sw_octx
, hmac_opad_buffer
, axf
->blocksize
- klen
);
429 for (k
= 0; k
< klen
; k
++)
430 key
[k
] ^= HMAC_OPAD_VAL
;
432 case CRYPTO_MD5_KPDK
:
433 case CRYPTO_SHA1_KPDK
:
435 /* We need a buffer that can hold an md5 and a sha1 result. */
436 u_char buf
[SHA1_RESULTLEN
];
439 bcopy(key
, sw
->sw_octx
, klen
);
440 axf
->Init(sw
->sw_ictx
);
441 axf
->Update(sw
->sw_ictx
, key
, klen
);
442 axf
->Final(buf
, sw
->sw_ictx
);
446 kprintf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d "
447 "doesn't use keys.\n", __func__
, axf
->type
);
452 * Compute keyed-hash authenticator.
455 swcr_authcompute(struct cryptodesc
*crd
, struct swcr_data
*sw
, caddr_t buf
,
458 unsigned char aalg
[HASH_MAX_LEN
];
459 struct auth_hash
*axf
;
463 if (sw
->sw_ictx
== 0)
468 if (crd
->crd_flags
& CRD_F_KEY_EXPLICIT
)
469 swcr_authprepare(axf
, sw
, crd
->crd_key
, crd
->crd_klen
);
471 bcopy(sw
->sw_ictx
, &ctx
, axf
->ctxsize
);
473 err
= crypto_apply(flags
, buf
, crd
->crd_skip
, crd
->crd_len
,
474 (int (*)(void *, void *, unsigned int))axf
->Update
, (caddr_t
)&ctx
);
478 switch (sw
->sw_alg
) {
479 case CRYPTO_MD5_HMAC
:
480 case CRYPTO_SHA1_HMAC
:
481 case CRYPTO_SHA2_256_HMAC
:
482 case CRYPTO_SHA2_384_HMAC
:
483 case CRYPTO_SHA2_512_HMAC
:
484 case CRYPTO_RIPEMD160_HMAC
:
485 if (sw
->sw_octx
== NULL
)
488 axf
->Final(aalg
, &ctx
);
489 bcopy(sw
->sw_octx
, &ctx
, axf
->ctxsize
);
490 axf
->Update(&ctx
, aalg
, axf
->hashsize
);
491 axf
->Final(aalg
, &ctx
);
494 case CRYPTO_MD5_KPDK
:
495 case CRYPTO_SHA1_KPDK
:
496 if (sw
->sw_octx
== NULL
)
499 axf
->Update(&ctx
, sw
->sw_octx
, sw
->sw_klen
);
500 axf
->Final(aalg
, &ctx
);
503 case CRYPTO_NULL_HMAC
:
504 axf
->Final(aalg
, &ctx
);
508 /* Inject the authentication data */
509 crypto_copyback(flags
, buf
, crd
->crd_inject
,
510 sw
->sw_mlen
== 0 ? axf
->hashsize
: sw
->sw_mlen
, aalg
);
515 * Apply a compression/decompression algorithm
518 swcr_compdec(struct cryptodesc
*crd
, struct swcr_data
*sw
,
519 caddr_t buf
, int flags
)
521 u_int8_t
*data
, *out
;
522 struct comp_algo
*cxf
;
528 /* We must handle the whole buffer of data in one time
529 * then if there is not all the data in the mbuf, we must
533 data
= kmalloc(crd
->crd_len
, M_CRYPTO_DATA
, M_NOWAIT
);
536 crypto_copydata(flags
, buf
, crd
->crd_skip
, crd
->crd_len
, data
);
538 if (crd
->crd_flags
& CRD_F_COMP
)
539 result
= cxf
->compress(data
, crd
->crd_len
, &out
);
541 result
= cxf
->decompress(data
, crd
->crd_len
, &out
);
543 kfree(data
, M_CRYPTO_DATA
);
547 /* Copy back the (de)compressed data. m_copyback is
548 * extending the mbuf as necessary.
550 sw
->sw_size
= result
;
551 /* Check the compressed size when doing compression */
552 if (crd
->crd_flags
& CRD_F_COMP
) {
553 if (result
> crd
->crd_len
) {
554 /* Compression was useless, we lost time */
555 kfree(out
, M_CRYPTO_DATA
);
560 crypto_copyback(flags
, buf
, crd
->crd_skip
, result
, out
);
561 if (result
< crd
->crd_len
) {
562 adj
= result
- crd
->crd_len
;
563 if (flags
& CRYPTO_F_IMBUF
) {
564 adj
= result
- crd
->crd_len
;
565 m_adj((struct mbuf
*)buf
, adj
);
566 } else if (flags
& CRYPTO_F_IOV
) {
567 struct uio
*uio
= (struct uio
*)buf
;
570 adj
= crd
->crd_len
- result
;
571 ind
= uio
->uio_iovcnt
- 1;
573 while (adj
> 0 && ind
>= 0) {
574 if (adj
< uio
->uio_iov
[ind
].iov_len
) {
575 uio
->uio_iov
[ind
].iov_len
-= adj
;
579 adj
-= uio
->uio_iov
[ind
].iov_len
;
580 uio
->uio_iov
[ind
].iov_len
= 0;
586 kfree(out
, M_CRYPTO_DATA
);
591 * Generate a new software session.
594 swcr_newsession(device_t dev
, u_int32_t
*sid
, struct cryptoini
*cri
)
596 struct swcr_data
**swd
;
597 struct auth_hash
*axf
;
598 struct enc_xform
*txf
;
599 struct comp_algo
*cxf
;
603 if (sid
== NULL
|| cri
== NULL
)
607 for (i
= 1; i
< swcr_sesnum
; i
++)
608 if (swcr_sessions
[i
] == NULL
)
611 i
= 1; /* NB: to silence compiler warning */
613 if (swcr_sessions
== NULL
|| i
== swcr_sesnum
) {
614 if (swcr_sessions
== NULL
) {
615 i
= 1; /* We leave swcr_sessions[0] empty */
616 swcr_sesnum
= CRYPTO_SW_SESSIONS
;
620 swd
= kmalloc(swcr_sesnum
* sizeof(struct swcr_data
*),
621 M_CRYPTO_DATA
, M_NOWAIT
|M_ZERO
);
623 /* Reset session number */
624 if (swcr_sesnum
== CRYPTO_SW_SESSIONS
)
631 /* Copy existing sessions */
632 if (swcr_sessions
!= NULL
) {
633 bcopy(swcr_sessions
, swd
,
634 (swcr_sesnum
/ 2) * sizeof(struct swcr_data
*));
635 kfree(swcr_sessions
, M_CRYPTO_DATA
);
641 swd
= &swcr_sessions
[i
];
645 *swd
= kmalloc(sizeof(struct swcr_data
),
646 M_CRYPTO_DATA
, M_NOWAIT
|M_ZERO
);
648 swcr_freesession(dev
, i
);
652 switch (cri
->cri_alg
) {
654 txf
= &enc_xform_des
;
656 case CRYPTO_3DES_CBC
:
657 txf
= &enc_xform_3des
;
660 txf
= &enc_xform_blf
;
662 case CRYPTO_CAST_CBC
:
663 txf
= &enc_xform_cast5
;
665 case CRYPTO_SKIPJACK_CBC
:
666 txf
= &enc_xform_skipjack
;
668 case CRYPTO_RIJNDAEL128_CBC
:
669 txf
= &enc_xform_rijndael128
;
671 case CRYPTO_CAMELLIA_CBC
:
672 txf
= &enc_xform_camellia
;
674 case CRYPTO_NULL_CBC
:
675 txf
= &enc_xform_null
;
678 if (cri
->cri_key
!= NULL
) {
679 error
= txf
->setkey(&((*swd
)->sw_kschedule
),
680 cri
->cri_key
, cri
->cri_klen
/ 8);
682 swcr_freesession(dev
, i
);
686 (*swd
)->sw_exf
= txf
;
689 case CRYPTO_MD5_HMAC
:
690 axf
= &auth_hash_hmac_md5
;
692 case CRYPTO_SHA1_HMAC
:
693 axf
= &auth_hash_hmac_sha1
;
695 case CRYPTO_SHA2_256_HMAC
:
696 axf
= &auth_hash_hmac_sha2_256
;
698 case CRYPTO_SHA2_384_HMAC
:
699 axf
= &auth_hash_hmac_sha2_384
;
701 case CRYPTO_SHA2_512_HMAC
:
702 axf
= &auth_hash_hmac_sha2_512
;
704 case CRYPTO_NULL_HMAC
:
705 axf
= &auth_hash_null
;
707 case CRYPTO_RIPEMD160_HMAC
:
708 axf
= &auth_hash_hmac_ripemd_160
;
710 (*swd
)->sw_ictx
= kmalloc(axf
->ctxsize
, M_CRYPTO_DATA
,
712 if ((*swd
)->sw_ictx
== NULL
) {
713 swcr_freesession(dev
, i
);
717 (*swd
)->sw_octx
= kmalloc(axf
->ctxsize
, M_CRYPTO_DATA
,
719 if ((*swd
)->sw_octx
== NULL
) {
720 swcr_freesession(dev
, i
);
724 if (cri
->cri_key
!= NULL
) {
725 swcr_authprepare(axf
, *swd
, cri
->cri_key
,
729 (*swd
)->sw_mlen
= cri
->cri_mlen
;
730 (*swd
)->sw_axf
= axf
;
733 case CRYPTO_MD5_KPDK
:
734 axf
= &auth_hash_key_md5
;
737 case CRYPTO_SHA1_KPDK
:
738 axf
= &auth_hash_key_sha1
;
740 (*swd
)->sw_ictx
= kmalloc(axf
->ctxsize
, M_CRYPTO_DATA
,
742 if ((*swd
)->sw_ictx
== NULL
) {
743 swcr_freesession(dev
, i
);
747 (*swd
)->sw_octx
= kmalloc(cri
->cri_klen
/ 8,
748 M_CRYPTO_DATA
, M_NOWAIT
);
749 if ((*swd
)->sw_octx
== NULL
) {
750 swcr_freesession(dev
, i
);
754 /* Store the key so we can "append" it to the payload */
755 if (cri
->cri_key
!= NULL
) {
756 swcr_authprepare(axf
, *swd
, cri
->cri_key
,
760 (*swd
)->sw_mlen
= cri
->cri_mlen
;
761 (*swd
)->sw_axf
= axf
;
765 axf
= &auth_hash_md5
;
769 axf
= &auth_hash_sha1
;
771 (*swd
)->sw_ictx
= kmalloc(axf
->ctxsize
, M_CRYPTO_DATA
,
773 if ((*swd
)->sw_ictx
== NULL
) {
774 swcr_freesession(dev
, i
);
778 axf
->Init((*swd
)->sw_ictx
);
779 (*swd
)->sw_mlen
= cri
->cri_mlen
;
780 (*swd
)->sw_axf
= axf
;
783 case CRYPTO_DEFLATE_COMP
:
784 cxf
= &comp_algo_deflate
;
785 (*swd
)->sw_cxf
= cxf
;
788 swcr_freesession(dev
, i
);
792 (*swd
)->sw_alg
= cri
->cri_alg
;
794 swd
= &((*swd
)->sw_next
);
803 swcr_freesession(device_t dev
, u_int64_t tid
)
805 struct swcr_data
*swd
;
806 struct enc_xform
*txf
;
807 struct auth_hash
*axf
;
808 struct comp_algo
*cxf
;
809 u_int32_t sid
= CRYPTO_SESID2LID(tid
);
811 if (sid
> swcr_sesnum
|| swcr_sessions
== NULL
||
812 swcr_sessions
[sid
] == NULL
)
815 /* Silently accept and return */
819 while ((swd
= swcr_sessions
[sid
]) != NULL
) {
820 swcr_sessions
[sid
] = swd
->sw_next
;
822 switch (swd
->sw_alg
) {
824 case CRYPTO_3DES_CBC
:
826 case CRYPTO_CAST_CBC
:
827 case CRYPTO_SKIPJACK_CBC
:
828 case CRYPTO_RIJNDAEL128_CBC
:
829 case CRYPTO_CAMELLIA_CBC
:
830 case CRYPTO_NULL_CBC
:
833 if (swd
->sw_kschedule
)
834 txf
->zerokey(&(swd
->sw_kschedule
));
837 case CRYPTO_MD5_HMAC
:
838 case CRYPTO_SHA1_HMAC
:
839 case CRYPTO_SHA2_256_HMAC
:
840 case CRYPTO_SHA2_384_HMAC
:
841 case CRYPTO_SHA2_512_HMAC
:
842 case CRYPTO_RIPEMD160_HMAC
:
843 case CRYPTO_NULL_HMAC
:
847 bzero(swd
->sw_ictx
, axf
->ctxsize
);
848 kfree(swd
->sw_ictx
, M_CRYPTO_DATA
);
851 bzero(swd
->sw_octx
, axf
->ctxsize
);
852 kfree(swd
->sw_octx
, M_CRYPTO_DATA
);
856 case CRYPTO_MD5_KPDK
:
857 case CRYPTO_SHA1_KPDK
:
861 bzero(swd
->sw_ictx
, axf
->ctxsize
);
862 kfree(swd
->sw_ictx
, M_CRYPTO_DATA
);
865 bzero(swd
->sw_octx
, swd
->sw_klen
);
866 kfree(swd
->sw_octx
, M_CRYPTO_DATA
);
875 kfree(swd
->sw_ictx
, M_CRYPTO_DATA
);
878 case CRYPTO_DEFLATE_COMP
:
883 //FREE(swd, M_CRYPTO_DATA);
884 kfree(swd
, M_CRYPTO_DATA
);
890 * Process a software request.
893 swcr_process(device_t dev
, struct cryptop
*crp
, int hint
)
895 struct cryptodesc
*crd
;
896 struct swcr_data
*sw
;
903 if (crp
->crp_desc
== NULL
|| crp
->crp_buf
== NULL
) {
904 crp
->crp_etype
= EINVAL
;
908 lid
= crp
->crp_sid
& 0xffffffff;
909 if (lid
>= swcr_sesnum
|| lid
== 0 || swcr_sessions
[lid
] == NULL
) {
910 crp
->crp_etype
= ENOENT
;
914 /* Go through crypto descriptors, processing as we go */
915 for (crd
= crp
->crp_desc
; crd
; crd
= crd
->crd_next
) {
917 * Find the crypto context.
919 * XXX Note that the logic here prevents us from having
920 * XXX the same algorithm multiple times in a session
921 * XXX (or rather, we can but it won't give us the right
922 * XXX results). To do that, we'd need some way of differentiating
923 * XXX between the various instances of an algorithm (so we can
924 * XXX locate the correct crypto context).
926 for (sw
= swcr_sessions
[lid
];
927 sw
&& sw
->sw_alg
!= crd
->crd_alg
;
931 /* No such context ? */
933 crp
->crp_etype
= EINVAL
;
936 switch (sw
->sw_alg
) {
938 case CRYPTO_3DES_CBC
:
940 case CRYPTO_CAST_CBC
:
941 case CRYPTO_SKIPJACK_CBC
:
942 case CRYPTO_RIJNDAEL128_CBC
:
943 case CRYPTO_CAMELLIA_CBC
:
944 if ((crp
->crp_etype
= swcr_encdec(crd
, sw
,
945 crp
->crp_buf
, crp
->crp_flags
)) != 0)
948 case CRYPTO_NULL_CBC
:
951 case CRYPTO_MD5_HMAC
:
952 case CRYPTO_SHA1_HMAC
:
953 case CRYPTO_SHA2_256_HMAC
:
954 case CRYPTO_SHA2_384_HMAC
:
955 case CRYPTO_SHA2_512_HMAC
:
956 case CRYPTO_RIPEMD160_HMAC
:
957 case CRYPTO_NULL_HMAC
:
958 case CRYPTO_MD5_KPDK
:
959 case CRYPTO_SHA1_KPDK
:
962 if ((crp
->crp_etype
= swcr_authcompute(crd
, sw
,
963 crp
->crp_buf
, crp
->crp_flags
)) != 0)
967 case CRYPTO_DEFLATE_COMP
:
968 if ((crp
->crp_etype
= swcr_compdec(crd
, sw
,
969 crp
->crp_buf
, crp
->crp_flags
)) != 0)
972 crp
->crp_olen
= (int)sw
->sw_size
;
976 /* Unknown/unsupported algorithm */
977 crp
->crp_etype
= EINVAL
;
988 swcr_identify(driver_t
*drv
, device_t parent
)
990 /* NB: order 10 is so we get attached after h/w devices */
991 /* XXX: wouldn't bet about this BUS_ADD_CHILD correctness */
992 if (device_find_child(parent
, "cryptosoft", -1) == NULL
&&
993 BUS_ADD_CHILD(parent
, parent
, 10, "cryptosoft", -1) == 0)
994 panic("cryptosoft: could not attach");
998 swcr_probe(device_t dev
)
1000 device_set_desc(dev
, "software crypto");
1005 swcr_attach(device_t dev
)
1007 memset(hmac_ipad_buffer
, HMAC_IPAD_VAL
, HMAC_MAX_BLOCK_LEN
);
1008 memset(hmac_opad_buffer
, HMAC_OPAD_VAL
, HMAC_MAX_BLOCK_LEN
);
1010 swcr_id
= crypto_get_driverid(dev
,
1011 CRYPTOCAP_F_SOFTWARE
| CRYPTOCAP_F_SYNC
);
1013 device_printf(dev
, "cannot initialize!");
1016 #define REGISTER(alg) \
1017 crypto_register(swcr_id, alg, 0,0)
1018 REGISTER(CRYPTO_DES_CBC
);
1019 REGISTER(CRYPTO_3DES_CBC
);
1020 REGISTER(CRYPTO_BLF_CBC
);
1021 REGISTER(CRYPTO_CAST_CBC
);
1022 REGISTER(CRYPTO_SKIPJACK_CBC
);
1023 REGISTER(CRYPTO_NULL_CBC
);
1024 REGISTER(CRYPTO_MD5_HMAC
);
1025 REGISTER(CRYPTO_SHA1_HMAC
);
1026 REGISTER(CRYPTO_SHA2_256_HMAC
);
1027 REGISTER(CRYPTO_SHA2_384_HMAC
);
1028 REGISTER(CRYPTO_SHA2_512_HMAC
);
1029 REGISTER(CRYPTO_RIPEMD160_HMAC
);
1030 REGISTER(CRYPTO_NULL_HMAC
);
1031 REGISTER(CRYPTO_MD5_KPDK
);
1032 REGISTER(CRYPTO_SHA1_KPDK
);
1033 REGISTER(CRYPTO_MD5
);
1034 REGISTER(CRYPTO_SHA1
);
1035 REGISTER(CRYPTO_RIJNDAEL128_CBC
);
1036 REGISTER(CRYPTO_CAMELLIA_CBC
);
1037 REGISTER(CRYPTO_DEFLATE_COMP
);
1044 swcr_detach(device_t dev
)
1046 crypto_unregister_all(swcr_id
);
1047 if (swcr_sessions
!= NULL
)
1048 kfree(swcr_sessions
, M_CRYPTO_DATA
);
1052 static device_method_t swcr_methods
[] = {
1053 DEVMETHOD(device_identify
, swcr_identify
),
1054 DEVMETHOD(device_probe
, swcr_probe
),
1055 DEVMETHOD(device_attach
, swcr_attach
),
1056 DEVMETHOD(device_detach
, swcr_detach
),
1058 DEVMETHOD(cryptodev_newsession
, swcr_newsession
),
1059 DEVMETHOD(cryptodev_freesession
,swcr_freesession
),
1060 DEVMETHOD(cryptodev_process
, swcr_process
),
1065 static driver_t swcr_driver
= {
1068 0, /* NB: no softc */
1070 static devclass_t swcr_devclass
;
1073 * NB: We explicitly reference the crypto module so we
1074 * get the necessary ordering when built as a loadable
1075 * module. This is required because we bundle the crypto
1076 * module code together with the cryptosoft driver (otherwise
1077 * normal module dependencies would handle things).
1079 extern int crypto_modevent(struct module
*, int, void *);
1080 /* XXX where to attach */
1081 DRIVER_MODULE(cryptosoft
, nexus
, swcr_driver
, swcr_devclass
, crypto_modevent
,0);
1082 MODULE_VERSION(cryptosoft
, 1);
1083 MODULE_DEPEND(cryptosoft
, crypto
, 1, 1, 1);