hyperv: Add API to read raw value of Hyper-V timer.
[dragonfly.git] / sys / opencrypto / cryptosoft.c
blob01eefd6148941fc7e05216aed65d6708de3a2b69
1 /*-
2 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
3 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
5 * This code was written by Angelos D. Keromytis in Athens, Greece, in
6 * February 2000. Network Security Technologies Inc. (NSTI) kindly
7 * supported the development of this code.
9 * Copyright (c) 2000, 2001 Angelos D. Keromytis
11 * SMP modifications by Matthew Dillon for the DragonFlyBSD Project
13 * Permission to use, copy, and modify this software with or without fee
14 * is hereby granted, provided that this entire notice is included in
15 * all source code copies of any software which is or includes a copy or
16 * modification of this software.
18 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
19 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
20 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
21 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
22 * PURPOSE.
24 * $FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.23 2009/02/05 17:43:12 imp Exp $
25 * $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/malloc.h>
31 #include <sys/mbuf.h>
32 #include <sys/module.h>
33 #include <sys/sysctl.h>
34 #include <sys/errno.h>
35 #include <sys/endian.h>
36 #include <sys/random.h>
37 #include <sys/kernel.h>
38 #include <sys/uio.h>
39 #include <sys/spinlock2.h>
41 #include <crypto/blowfish/blowfish.h>
42 #include <crypto/sha1.h>
43 #include <opencrypto/rmd160.h>
44 #include <opencrypto/cast.h>
45 #include <opencrypto/skipjack.h>
46 #include <sys/md5.h>
48 #include <opencrypto/cryptodev.h>
49 #include <opencrypto/cryptosoft.h>
50 #include <opencrypto/xform.h>
52 #include <sys/kobj.h>
53 #include <sys/bus.h>
54 #include "cryptodev_if.h"
56 static int32_t swcr_id;
57 static struct swcr_data **swcr_sessions = NULL;
58 static u_int32_t swcr_sesnum;
59 static u_int32_t swcr_minsesnum = 1;
61 static struct spinlock swcr_spin = SPINLOCK_INITIALIZER(swcr_spin, "swcr_spin");
63 u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN];
64 u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN];
66 static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
67 static int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int);
68 static int swcr_combined(struct cryptop *);
69 static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
70 static int swcr_freesession(device_t dev, u_int64_t tid);
71 static int swcr_freesession_slot(struct swcr_data **swdp, u_int32_t sid);
74 * Apply a symmetric encryption/decryption algorithm.
76 static int
77 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
78 int flags)
80 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
81 unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
82 u_int8_t *kschedule;
83 u_int8_t *okschedule;
84 struct enc_xform *exf;
85 int i, k, j, blks, ivlen;
86 int error;
87 int explicit_kschedule;
89 exf = sw->sw_exf;
90 blks = exf->blocksize;
91 ivlen = exf->ivsize;
93 /* Check for non-padded data */
94 if (crd->crd_len % blks)
95 return EINVAL;
97 /* Initialize the IV */
98 if (crd->crd_flags & CRD_F_ENCRYPT) {
99 /* IV explicitly provided ? */
100 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
101 bcopy(crd->crd_iv, iv, ivlen);
102 else
103 karc4rand(iv, ivlen);
105 /* Do we need to write the IV */
106 if (!(crd->crd_flags & CRD_F_IV_PRESENT))
107 crypto_copyback(flags, buf, crd->crd_inject, ivlen, iv);
109 } else { /* Decryption */
110 /* IV explicitly provided ? */
111 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
112 bcopy(crd->crd_iv, iv, ivlen);
113 else {
114 /* Get IV off buf */
115 crypto_copydata(flags, buf, crd->crd_inject, ivlen, iv);
119 ivp = iv;
122 * The semantics are seriously broken because the session key
123 * storage was never designed for concurrent ops.
125 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
126 kschedule = NULL;
127 explicit_kschedule = 1;
128 error = exf->setkey(&kschedule,
129 crd->crd_key, crd->crd_klen / 8);
130 if (error)
131 goto done;
132 } else {
133 spin_lock(&swcr_spin);
134 kschedule = sw->sw_kschedule;
135 ++sw->sw_kschedule_refs;
136 spin_unlock(&swcr_spin);
137 explicit_kschedule = 0;
141 * xforms that provide a reinit method perform all IV
142 * handling themselves.
144 if (exf->reinit)
145 exf->reinit(kschedule, iv);
147 if (flags & CRYPTO_F_IMBUF) {
148 struct mbuf *m = (struct mbuf *) buf;
150 /* Find beginning of data */
151 m = m_getptr(m, crd->crd_skip, &k);
152 if (m == NULL) {
153 error = EINVAL;
154 goto done;
157 i = crd->crd_len;
159 while (i > 0) {
161 * If there's insufficient data at the end of
162 * an mbuf, we have to do some copying.
164 if (m->m_len < k + blks && m->m_len != k) {
165 m_copydata(m, k, blks, blk);
167 /* Actual encryption/decryption */
168 if (exf->reinit) {
169 if (crd->crd_flags & CRD_F_ENCRYPT) {
170 exf->encrypt(kschedule,
171 blk, iv);
172 } else {
173 exf->decrypt(kschedule,
174 blk, iv);
176 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
177 /* XOR with previous block */
178 for (j = 0; j < blks; j++)
179 blk[j] ^= ivp[j];
181 exf->encrypt(kschedule, blk, iv);
184 * Keep encrypted block for XOR'ing
185 * with next block
187 bcopy(blk, iv, blks);
188 ivp = iv;
189 } else { /* decrypt */
191 * Keep encrypted block for XOR'ing
192 * with next block
194 nivp = (ivp == iv) ? iv2 : iv;
195 bcopy(blk, nivp, blks);
197 exf->decrypt(kschedule, blk, iv);
199 /* XOR with previous block */
200 for (j = 0; j < blks; j++)
201 blk[j] ^= ivp[j];
203 ivp = nivp;
206 /* Copy back decrypted block */
207 m_copyback(m, k, blks, blk);
209 /* Advance pointer */
210 m = m_getptr(m, k + blks, &k);
211 if (m == NULL) {
212 error = EINVAL;
213 goto done;
216 i -= blks;
218 /* Could be done... */
219 if (i == 0)
220 break;
223 /* Skip possibly empty mbufs */
224 if (k == m->m_len) {
225 for (m = m->m_next; m && m->m_len == 0;
226 m = m->m_next)
228 k = 0;
231 /* Sanity check */
232 if (m == NULL) {
233 error = EINVAL;
234 goto done;
238 * Warning: idat may point to garbage here, but
239 * we only use it in the while() loop, only if
240 * there are indeed enough data.
242 idat = mtod(m, unsigned char *) + k;
244 while (m->m_len >= k + blks && i > 0) {
245 if (exf->reinit) {
246 if (crd->crd_flags & CRD_F_ENCRYPT) {
247 exf->encrypt(kschedule,
248 idat, iv);
249 } else {
250 exf->decrypt(kschedule,
251 idat, iv);
253 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
254 /* XOR with previous block/IV */
255 for (j = 0; j < blks; j++)
256 idat[j] ^= ivp[j];
258 exf->encrypt(kschedule, idat, iv);
259 ivp = idat;
260 } else { /* decrypt */
262 * Keep encrypted block to be used
263 * in next block's processing.
265 nivp = (ivp == iv) ? iv2 : iv;
266 bcopy(idat, nivp, blks);
268 exf->decrypt(kschedule, idat, iv);
270 /* XOR with previous block/IV */
271 for (j = 0; j < blks; j++)
272 idat[j] ^= ivp[j];
274 ivp = nivp;
277 idat += blks;
278 k += blks;
279 i -= blks;
282 error = 0; /* Done with mbuf encryption/decryption */
283 } else if (flags & CRYPTO_F_IOV) {
284 struct uio *uio = (struct uio *) buf;
285 struct iovec *iov;
287 /* Find beginning of data */
288 iov = cuio_getptr(uio, crd->crd_skip, &k);
289 if (iov == NULL) {
290 error = EINVAL;
291 goto done;
294 i = crd->crd_len;
296 while (i > 0) {
298 * If there's insufficient data at the end of
299 * an iovec, we have to do some copying.
301 if (iov->iov_len < k + blks && iov->iov_len != k) {
302 cuio_copydata(uio, k, blks, blk);
304 /* Actual encryption/decryption */
305 if (exf->reinit) {
306 if (crd->crd_flags & CRD_F_ENCRYPT) {
307 exf->encrypt(kschedule,
308 blk, iv);
309 } else {
310 exf->decrypt(kschedule,
311 blk, iv);
313 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
314 /* XOR with previous block */
315 for (j = 0; j < blks; j++)
316 blk[j] ^= ivp[j];
318 exf->encrypt(kschedule, blk, iv);
321 * Keep encrypted block for XOR'ing
322 * with next block
324 bcopy(blk, iv, blks);
325 ivp = iv;
326 } else { /* decrypt */
328 * Keep encrypted block for XOR'ing
329 * with next block
331 nivp = (ivp == iv) ? iv2 : iv;
332 bcopy(blk, nivp, blks);
334 exf->decrypt(kschedule, blk, iv);
336 /* XOR with previous block */
337 for (j = 0; j < blks; j++)
338 blk[j] ^= ivp[j];
340 ivp = nivp;
343 /* Copy back decrypted block */
344 cuio_copyback(uio, k, blks, blk);
346 /* Advance pointer */
347 iov = cuio_getptr(uio, k + blks, &k);
348 if (iov == NULL) {
349 error = EINVAL;
350 goto done;
353 i -= blks;
355 /* Could be done... */
356 if (i == 0)
357 break;
361 * Warning: idat may point to garbage here, but
362 * we only use it in the while() loop, only if
363 * there are indeed enough data.
365 idat = (char *)iov->iov_base + k;
367 while (iov->iov_len >= k + blks && i > 0) {
368 if (exf->reinit) {
369 if (crd->crd_flags & CRD_F_ENCRYPT) {
370 exf->encrypt(kschedule,
371 idat, iv);
372 } else {
373 exf->decrypt(kschedule,
374 idat, iv);
376 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
377 /* XOR with previous block/IV */
378 for (j = 0; j < blks; j++)
379 idat[j] ^= ivp[j];
381 exf->encrypt(kschedule, idat, iv);
382 ivp = idat;
383 } else { /* decrypt */
385 * Keep encrypted block to be used
386 * in next block's processing.
388 nivp = (ivp == iv) ? iv2 : iv;
389 bcopy(idat, nivp, blks);
391 exf->decrypt(kschedule, idat, iv);
393 /* XOR with previous block/IV */
394 for (j = 0; j < blks; j++)
395 idat[j] ^= ivp[j];
397 ivp = nivp;
400 idat += blks;
401 k += blks;
402 i -= blks;
404 if (k == iov->iov_len) {
405 iov++;
406 k = 0;
409 error = 0; /* Done with iovec encryption/decryption */
410 } else {
412 * contiguous buffer
414 if (exf->reinit) {
415 for(i = crd->crd_skip;
416 i < crd->crd_skip + crd->crd_len; i += blks) {
417 if (crd->crd_flags & CRD_F_ENCRYPT) {
418 exf->encrypt(kschedule, buf + i, iv);
419 } else {
420 exf->decrypt(kschedule, buf + i, iv);
423 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
424 for (i = crd->crd_skip;
425 i < crd->crd_skip + crd->crd_len; i += blks) {
426 /* XOR with the IV/previous block, as appropriate. */
427 if (i == crd->crd_skip)
428 for (k = 0; k < blks; k++)
429 buf[i + k] ^= ivp[k];
430 else
431 for (k = 0; k < blks; k++)
432 buf[i + k] ^= buf[i + k - blks];
433 exf->encrypt(kschedule, buf + i, iv);
435 } else { /* Decrypt */
437 * Start at the end, so we don't need to keep the
438 * encrypted block as the IV for the next block.
440 for (i = crd->crd_skip + crd->crd_len - blks;
441 i >= crd->crd_skip; i -= blks) {
442 exf->decrypt(kschedule, buf + i, iv);
444 /* XOR with the IV/previous block, as appropriate */
445 if (i == crd->crd_skip)
446 for (k = 0; k < blks; k++)
447 buf[i + k] ^= ivp[k];
448 else
449 for (k = 0; k < blks; k++)
450 buf[i + k] ^= buf[i + k - blks];
453 error = 0; /* Done w/contiguous buffer encrypt/decrypt */
455 done:
457 * Cleanup - explicitly replace the session key if requested
458 * (horrible semantics for concurrent operation)
460 if (explicit_kschedule) {
461 spin_lock(&swcr_spin);
462 if (sw->sw_kschedule && sw->sw_kschedule_refs == 0) {
463 okschedule = sw->sw_kschedule;
464 sw->sw_kschedule = kschedule;
465 } else {
466 okschedule = NULL;
468 spin_unlock(&swcr_spin);
469 if (okschedule)
470 exf->zerokey(&okschedule);
471 } else {
472 spin_lock(&swcr_spin);
473 --sw->sw_kschedule_refs;
474 spin_unlock(&swcr_spin);
476 return error;
479 static void
480 swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key,
481 int klen)
483 int k;
485 klen /= 8;
487 switch (axf->type) {
488 case CRYPTO_MD5_HMAC:
489 case CRYPTO_SHA1_HMAC:
490 case CRYPTO_SHA2_256_HMAC:
491 case CRYPTO_SHA2_384_HMAC:
492 case CRYPTO_SHA2_512_HMAC:
493 case CRYPTO_NULL_HMAC:
494 case CRYPTO_RIPEMD160_HMAC:
495 for (k = 0; k < klen; k++)
496 key[k] ^= HMAC_IPAD_VAL;
498 axf->Init(sw->sw_ictx);
499 axf->Update(sw->sw_ictx, key, klen);
500 axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen);
502 for (k = 0; k < klen; k++)
503 key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
505 axf->Init(sw->sw_octx);
506 axf->Update(sw->sw_octx, key, klen);
507 axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen);
509 for (k = 0; k < klen; k++)
510 key[k] ^= HMAC_OPAD_VAL;
511 break;
512 case CRYPTO_MD5_KPDK:
513 case CRYPTO_SHA1_KPDK:
515 /* We need a buffer that can hold an md5 and a sha1 result. */
516 u_char buf[SHA1_RESULTLEN];
518 sw->sw_klen = klen;
519 bcopy(key, sw->sw_octx, klen);
520 axf->Init(sw->sw_ictx);
521 axf->Update(sw->sw_ictx, key, klen);
522 axf->Final(buf, sw->sw_ictx);
523 break;
525 default:
526 kprintf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d "
527 "doesn't use keys.\n", __func__, axf->type);
532 * Compute keyed-hash authenticator.
534 static int
535 swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
536 int flags)
538 unsigned char aalg[HASH_MAX_LEN];
539 struct auth_hash *axf;
540 union authctx ctx;
541 int err;
543 if (sw->sw_ictx == NULL)
544 return EINVAL;
546 axf = sw->sw_axf;
548 if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
549 swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen);
551 bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
553 err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len,
554 (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
555 if (err)
556 return err;
558 switch (sw->sw_alg) {
559 case CRYPTO_MD5_HMAC:
560 case CRYPTO_SHA1_HMAC:
561 case CRYPTO_SHA2_256_HMAC:
562 case CRYPTO_SHA2_384_HMAC:
563 case CRYPTO_SHA2_512_HMAC:
564 case CRYPTO_RIPEMD160_HMAC:
565 if (sw->sw_octx == NULL)
566 return EINVAL;
568 axf->Final(aalg, &ctx);
569 bcopy(sw->sw_octx, &ctx, axf->ctxsize);
570 axf->Update(&ctx, aalg, axf->hashsize);
571 axf->Final(aalg, &ctx);
572 break;
574 case CRYPTO_MD5_KPDK:
575 case CRYPTO_SHA1_KPDK:
576 if (sw->sw_octx == NULL)
577 return EINVAL;
579 axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
580 axf->Final(aalg, &ctx);
581 break;
583 case CRYPTO_NULL_HMAC:
584 axf->Final(aalg, &ctx);
585 break;
588 /* Inject the authentication data */
589 crypto_copyback(flags, buf, crd->crd_inject,
590 sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg);
591 return 0;
595 * Apply a combined encryption-authentication transformation
597 static int
598 swcr_combined(struct cryptop *crp)
600 uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
601 u_char *blk = (u_char *)blkbuf;
602 u_char aalg[HASH_MAX_LEN];
603 u_char iv[EALG_MAX_BLOCK_LEN];
604 uint8_t *kschedule;
605 union authctx ctx;
606 struct cryptodesc *crd, *crda = NULL, *crde = NULL;
607 struct swcr_data *sw, *swa, *swe;
608 struct auth_hash *axf = NULL;
609 struct enc_xform *exf = NULL;
610 caddr_t buf = (caddr_t)crp->crp_buf;
611 uint32_t *blkp;
612 int i, blksz, ivlen, len;
614 blksz = 0;
615 ivlen = 0;
617 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
618 for (sw = swcr_sessions[crp->crp_sid & 0xffffffff];
619 sw && sw->sw_alg != crd->crd_alg;
620 sw = sw->sw_next)
622 if (sw == NULL)
623 return (EINVAL);
625 switch (sw->sw_alg) {
626 case CRYPTO_AES_GCM_16:
627 case CRYPTO_AES_GMAC:
628 swe = sw;
629 crde = crd;
630 exf = swe->sw_exf;
631 ivlen = exf->ivsize;
632 break;
633 case CRYPTO_AES_128_GMAC:
634 case CRYPTO_AES_192_GMAC:
635 case CRYPTO_AES_256_GMAC:
636 swa = sw;
637 crda = crd;
638 axf = swa->sw_axf;
639 if (swa->sw_ictx == NULL)
640 return (EINVAL);
641 bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
642 blksz = axf->blocksize;
643 break;
644 default:
645 return (EINVAL);
648 if (crde == NULL || crda == NULL)
649 return (EINVAL);
651 /* Initialize the IV */
652 if (crde->crd_flags & CRD_F_ENCRYPT) {
653 /* IV explicitly provided ? */
654 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
655 bcopy(crde->crd_iv, iv, ivlen);
656 else
657 karc4rand(iv, ivlen);
659 /* Do we need to write the IV */
660 if (!(crde->crd_flags & CRD_F_IV_PRESENT))
661 crypto_copyback(crde->crd_flags, buf, crde->crd_inject,
662 ivlen, iv);
664 } else { /* Decryption */
665 /* IV explicitly provided ? */
666 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
667 bcopy(crde->crd_iv, iv, ivlen);
668 else
669 /* Get IV off buf */
670 crypto_copydata(crde->crd_flags, buf, crde->crd_inject,
671 ivlen, iv);
674 /* Supply MAC with IV */
675 if (axf->Reinit)
676 axf->Reinit(&ctx, iv, ivlen);
678 /* Supply MAC with AAD */
679 for (i = 0; i < crda->crd_len; i += blksz) {
680 len = MIN(crda->crd_len - i, blksz);
681 crypto_copydata(crde->crd_flags, buf, crda->crd_skip + i, len,
682 blk);
683 axf->Update(&ctx, blk, len);
686 spin_lock(&swcr_spin);
687 kschedule = sw->sw_kschedule;
688 ++sw->sw_kschedule_refs;
689 spin_unlock(&swcr_spin);
691 if (exf->reinit)
692 exf->reinit(kschedule, iv);
694 /* Do encryption/decryption with MAC */
695 for (i = 0; i < crde->crd_len; i += blksz) {
696 len = MIN(crde->crd_len - i, blksz);
697 if (len < blksz)
698 bzero(blk, blksz);
699 crypto_copydata(crde->crd_flags, buf, crde->crd_skip + i, len,
700 blk);
701 if (crde->crd_flags & CRD_F_ENCRYPT) {
702 exf->encrypt(kschedule, blk, iv);
703 axf->Update(&ctx, blk, len);
704 } else {
705 axf->Update(&ctx, blk, len);
706 exf->decrypt(kschedule, blk, iv);
708 crypto_copyback(crde->crd_flags, buf, crde->crd_skip + i, len,
709 blk);
712 /* Do any required special finalization */
713 switch (crda->crd_alg) {
714 case CRYPTO_AES_128_GMAC:
715 case CRYPTO_AES_192_GMAC:
716 case CRYPTO_AES_256_GMAC:
717 /* length block */
718 bzero(blk, blksz);
719 blkp = (uint32_t *)blk + 1;
720 *blkp = htobe32(crda->crd_len * 8);
721 blkp = (uint32_t *)blk + 3;
722 *blkp = htobe32(crde->crd_len * 8);
723 axf->Update(&ctx, blk, blksz);
724 break;
727 /* Finalize MAC */
728 axf->Final(aalg, &ctx);
730 /* Inject the authentication data */
731 crypto_copyback(crda->crd_flags, crp->crp_buf, crda->crd_inject,
732 axf->blocksize, aalg);
734 spin_lock(&swcr_spin);
735 --sw->sw_kschedule_refs;
736 spin_unlock(&swcr_spin);
738 return (0);
742 * Apply a compression/decompression algorithm
744 static int
745 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
746 caddr_t buf, int flags)
748 u_int8_t *data, *out;
749 struct comp_algo *cxf;
750 int adj;
751 u_int32_t result;
753 cxf = sw->sw_cxf;
756 * We must handle the whole buffer of data in one time
757 * then if there is not all the data in the mbuf, we must
758 * copy in a buffer.
760 data = kmalloc(crd->crd_len, M_CRYPTO_DATA, M_INTWAIT);
761 crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data);
763 if (crd->crd_flags & CRD_F_COMP)
764 result = cxf->compress(data, crd->crd_len, &out);
765 else
766 result = cxf->decompress(data, crd->crd_len, &out);
768 kfree(data, M_CRYPTO_DATA);
769 if (result == 0)
770 return EINVAL;
772 /* Copy back the (de)compressed data. m_copyback is
773 * extending the mbuf as necessary.
775 sw->sw_size = result;
776 /* Check the compressed size when doing compression */
777 if (crd->crd_flags & CRD_F_COMP) {
778 if (result >= crd->crd_len) {
779 /* Compression was useless, we lost time */
780 kfree(out, M_CRYPTO_DATA);
781 return 0;
785 crypto_copyback(flags, buf, crd->crd_skip, result, out);
786 if (result < crd->crd_len) {
787 adj = result - crd->crd_len;
788 if (flags & CRYPTO_F_IMBUF) {
789 adj = result - crd->crd_len;
790 m_adj((struct mbuf *)buf, adj);
791 } else if (flags & CRYPTO_F_IOV) {
792 struct uio *uio = (struct uio *)buf;
793 int ind;
795 adj = crd->crd_len - result;
796 ind = uio->uio_iovcnt - 1;
798 while (adj > 0 && ind >= 0) {
799 if (adj < uio->uio_iov[ind].iov_len) {
800 uio->uio_iov[ind].iov_len -= adj;
801 break;
804 adj -= uio->uio_iov[ind].iov_len;
805 uio->uio_iov[ind].iov_len = 0;
806 ind--;
807 uio->uio_iovcnt--;
811 kfree(out, M_CRYPTO_DATA);
812 return 0;
816 * Generate a new software session.
818 static int
819 swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
821 struct swcr_data *swd_base;
822 struct swcr_data **swd;
823 struct swcr_data **oswd;
824 struct auth_hash *axf;
825 struct enc_xform *txf;
826 struct comp_algo *cxf;
827 u_int32_t i;
828 u_int32_t n;
829 int error;
831 if (sid == NULL || cri == NULL)
832 return EINVAL;
834 swd_base = NULL;
835 swd = &swd_base;
837 while (cri) {
838 *swd = kmalloc(sizeof(struct swcr_data),
839 M_CRYPTO_DATA, M_WAITOK | M_ZERO);
841 switch (cri->cri_alg) {
842 case CRYPTO_DES_CBC:
843 txf = &enc_xform_des;
844 goto enccommon;
845 case CRYPTO_3DES_CBC:
846 txf = &enc_xform_3des;
847 goto enccommon;
848 case CRYPTO_BLF_CBC:
849 txf = &enc_xform_blf;
850 goto enccommon;
851 case CRYPTO_CAST_CBC:
852 txf = &enc_xform_cast5;
853 goto enccommon;
854 case CRYPTO_SKIPJACK_CBC:
855 txf = &enc_xform_skipjack;
856 goto enccommon;
857 case CRYPTO_RIJNDAEL128_CBC:
858 txf = &enc_xform_rijndael128;
859 goto enccommon;
860 case CRYPTO_AES_XTS:
861 txf = &enc_xform_aes_xts;
862 goto enccommon;
863 case CRYPTO_AES_CTR:
864 txf = &enc_xform_aes_ctr;
865 goto enccommon;
866 case CRYPTO_AES_GCM_16:
867 txf = &enc_xform_aes_gcm;
868 goto enccommon;
869 case CRYPTO_AES_GMAC:
870 txf = &enc_xform_aes_gmac;
871 (*swd)->sw_exf = txf;
872 break;
873 case CRYPTO_CAMELLIA_CBC:
874 txf = &enc_xform_camellia;
875 goto enccommon;
876 case CRYPTO_TWOFISH_CBC:
877 txf = &enc_xform_twofish;
878 goto enccommon;
879 case CRYPTO_SERPENT_CBC:
880 txf = &enc_xform_serpent;
881 goto enccommon;
882 case CRYPTO_TWOFISH_XTS:
883 txf = &enc_xform_twofish_xts;
884 goto enccommon;
885 case CRYPTO_SERPENT_XTS:
886 txf = &enc_xform_serpent_xts;
887 goto enccommon;
888 case CRYPTO_NULL_CBC:
889 txf = &enc_xform_null;
890 goto enccommon;
891 enccommon:
892 if (cri->cri_key != NULL) {
893 error = txf->setkey(&((*swd)->sw_kschedule),
894 cri->cri_key,
895 cri->cri_klen / 8);
896 if (error) {
897 swcr_freesession_slot(&swd_base, 0);
898 return error;
901 (*swd)->sw_exf = txf;
902 break;
904 case CRYPTO_MD5_HMAC:
905 axf = &auth_hash_hmac_md5;
906 goto authcommon;
907 case CRYPTO_SHA1_HMAC:
908 axf = &auth_hash_hmac_sha1;
909 goto authcommon;
910 case CRYPTO_SHA2_256_HMAC:
911 axf = &auth_hash_hmac_sha2_256;
912 goto authcommon;
913 case CRYPTO_SHA2_384_HMAC:
914 axf = &auth_hash_hmac_sha2_384;
915 goto authcommon;
916 case CRYPTO_SHA2_512_HMAC:
917 axf = &auth_hash_hmac_sha2_512;
918 goto authcommon;
919 case CRYPTO_NULL_HMAC:
920 axf = &auth_hash_null;
921 goto authcommon;
922 case CRYPTO_RIPEMD160_HMAC:
923 axf = &auth_hash_hmac_ripemd_160;
924 authcommon:
925 (*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
926 M_WAITOK);
928 (*swd)->sw_octx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
929 M_WAITOK);
931 if (cri->cri_key != NULL) {
932 swcr_authprepare(axf, *swd, cri->cri_key,
933 cri->cri_klen);
936 (*swd)->sw_mlen = cri->cri_mlen;
937 (*swd)->sw_axf = axf;
938 break;
940 case CRYPTO_MD5_KPDK:
941 axf = &auth_hash_key_md5;
942 goto auth2common;
944 case CRYPTO_SHA1_KPDK:
945 axf = &auth_hash_key_sha1;
946 auth2common:
947 (*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
948 M_WAITOK);
950 (*swd)->sw_octx = kmalloc(cri->cri_klen / 8,
951 M_CRYPTO_DATA, M_WAITOK);
953 /* Store the key so we can "append" it to the payload */
954 if (cri->cri_key != NULL) {
955 swcr_authprepare(axf, *swd, cri->cri_key,
956 cri->cri_klen);
959 (*swd)->sw_mlen = cri->cri_mlen;
960 (*swd)->sw_axf = axf;
961 break;
962 #ifdef notdef
963 case CRYPTO_MD5:
964 axf = &auth_hash_md5;
965 goto auth3common;
967 case CRYPTO_SHA1:
968 axf = &auth_hash_sha1;
969 auth3common:
970 (*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
971 M_WAITOK);
973 axf->Init((*swd)->sw_ictx);
974 (*swd)->sw_mlen = cri->cri_mlen;
975 (*swd)->sw_axf = axf;
976 break;
977 #endif
978 case CRYPTO_AES_128_GMAC:
979 axf = &auth_hash_gmac_aes_128;
980 goto auth4common;
982 case CRYPTO_AES_192_GMAC:
983 axf = &auth_hash_gmac_aes_192;
984 goto auth4common;
986 case CRYPTO_AES_256_GMAC:
987 axf = &auth_hash_gmac_aes_256;
988 auth4common:
989 (*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
990 M_NOWAIT);
991 if ((*swd)->sw_ictx == NULL) {
992 swcr_freesession_slot(&swd_base, 0);
993 return ENOBUFS;
996 axf->Init((*swd)->sw_ictx);
997 axf->Setkey((*swd)->sw_ictx, cri->cri_key,
998 cri->cri_klen / 8);
999 (*swd)->sw_axf = axf;
1000 break;
1002 case CRYPTO_DEFLATE_COMP:
1003 cxf = &comp_algo_deflate;
1004 (*swd)->sw_cxf = cxf;
1005 break;
1006 default:
1007 swcr_freesession_slot(&swd_base, 0);
1008 return EINVAL;
1011 (*swd)->sw_alg = cri->cri_alg;
1012 cri = cri->cri_next;
1013 swd = &((*swd)->sw_next);
1016 for (;;) {
1018 * Atomically allocate a session
1020 spin_lock(&swcr_spin);
1021 for (i = swcr_minsesnum; i < swcr_sesnum; ++i) {
1022 if (swcr_sessions[i] == NULL)
1023 break;
1025 if (i < swcr_sesnum) {
1026 swcr_sessions[i] = swd_base;
1027 swcr_minsesnum = i + 1;
1028 spin_unlock(&swcr_spin);
1029 break;
1031 n = swcr_sesnum;
1032 spin_unlock(&swcr_spin);
1035 * A larger allocation is required, reallocate the array
1036 * and replace, checking for SMP races.
1038 if (n < CRYPTO_SW_SESSIONS)
1039 n = CRYPTO_SW_SESSIONS;
1040 else
1041 n = n * 3 / 2;
1042 swd = kmalloc(n * sizeof(struct swcr_data *),
1043 M_CRYPTO_DATA, M_WAITOK | M_ZERO);
1045 spin_lock(&swcr_spin);
1046 if (swcr_sesnum >= n) {
1047 spin_unlock(&swcr_spin);
1048 kfree(swd, M_CRYPTO_DATA);
1049 } else if (swcr_sesnum) {
1050 bcopy(swcr_sessions, swd,
1051 swcr_sesnum * sizeof(struct swcr_data *));
1052 oswd = swcr_sessions;
1053 swcr_sessions = swd;
1054 swcr_sesnum = n;
1055 spin_unlock(&swcr_spin);
1056 kfree(oswd, M_CRYPTO_DATA);
1057 } else {
1058 swcr_sessions = swd;
1059 swcr_sesnum = n;
1060 spin_unlock(&swcr_spin);
1064 *sid = i;
1065 return 0;
1069 * Free a session.
1071 static int
1072 swcr_freesession(device_t dev, u_int64_t tid)
1074 u_int32_t sid = CRYPTO_SESID2LID(tid);
1076 if (sid > swcr_sesnum || swcr_sessions == NULL ||
1077 swcr_sessions[sid] == NULL) {
1078 return EINVAL;
1081 /* Silently accept and return */
1082 if (sid == 0)
1083 return 0;
1085 return(swcr_freesession_slot(&swcr_sessions[sid], sid));
1088 static
1090 swcr_freesession_slot(struct swcr_data **swdp, u_int32_t sid)
1092 struct enc_xform *txf;
1093 struct auth_hash *axf;
1094 struct swcr_data *swd;
1095 struct swcr_data *swnext;
1098 * Protect session detachment with the spinlock.
1100 spin_lock(&swcr_spin);
1101 swnext = *swdp;
1102 *swdp = NULL;
1103 if (sid && swcr_minsesnum > sid)
1104 swcr_minsesnum = sid;
1105 spin_unlock(&swcr_spin);
1108 * Clean up at our leisure.
1110 while ((swd = swnext) != NULL) {
1111 swnext = swd->sw_next;
1113 swd->sw_next = NULL;
1115 switch (swd->sw_alg) {
1116 case CRYPTO_DES_CBC:
1117 case CRYPTO_3DES_CBC:
1118 case CRYPTO_BLF_CBC:
1119 case CRYPTO_CAST_CBC:
1120 case CRYPTO_SKIPJACK_CBC:
1121 case CRYPTO_RIJNDAEL128_CBC:
1122 case CRYPTO_AES_XTS:
1123 case CRYPTO_AES_CTR:
1124 case CRYPTO_AES_GCM_16:
1125 case CRYPTO_AES_GMAC:
1126 case CRYPTO_CAMELLIA_CBC:
1127 case CRYPTO_TWOFISH_CBC:
1128 case CRYPTO_SERPENT_CBC:
1129 case CRYPTO_TWOFISH_XTS:
1130 case CRYPTO_SERPENT_XTS:
1131 case CRYPTO_NULL_CBC:
1132 txf = swd->sw_exf;
1134 if (swd->sw_kschedule)
1135 txf->zerokey(&(swd->sw_kschedule));
1136 break;
1138 case CRYPTO_MD5_HMAC:
1139 case CRYPTO_SHA1_HMAC:
1140 case CRYPTO_SHA2_256_HMAC:
1141 case CRYPTO_SHA2_384_HMAC:
1142 case CRYPTO_SHA2_512_HMAC:
1143 case CRYPTO_RIPEMD160_HMAC:
1144 case CRYPTO_NULL_HMAC:
1145 axf = swd->sw_axf;
1147 if (swd->sw_ictx) {
1148 bzero(swd->sw_ictx, axf->ctxsize);
1149 kfree(swd->sw_ictx, M_CRYPTO_DATA);
1151 if (swd->sw_octx) {
1152 bzero(swd->sw_octx, axf->ctxsize);
1153 kfree(swd->sw_octx, M_CRYPTO_DATA);
1155 break;
1157 case CRYPTO_MD5_KPDK:
1158 case CRYPTO_SHA1_KPDK:
1159 axf = swd->sw_axf;
1161 if (swd->sw_ictx) {
1162 bzero(swd->sw_ictx, axf->ctxsize);
1163 kfree(swd->sw_ictx, M_CRYPTO_DATA);
1165 if (swd->sw_octx) {
1166 bzero(swd->sw_octx, swd->sw_klen);
1167 kfree(swd->sw_octx, M_CRYPTO_DATA);
1169 break;
1171 case CRYPTO_AES_128_GMAC:
1172 case CRYPTO_AES_192_GMAC:
1173 case CRYPTO_AES_256_GMAC:
1174 case CRYPTO_MD5:
1175 case CRYPTO_SHA1:
1176 axf = swd->sw_axf;
1178 if (swd->sw_ictx) {
1179 bzero(swd->sw_ictx, axf->ctxsize);
1180 kfree(swd->sw_ictx, M_CRYPTO_DATA);
1182 break;
1184 case CRYPTO_DEFLATE_COMP:
1185 break;
1188 //FREE(swd, M_CRYPTO_DATA);
1189 kfree(swd, M_CRYPTO_DATA);
1191 return 0;
1195 * Process a software request.
1197 static int
1198 swcr_process(device_t dev, struct cryptop *crp, int hint)
1200 struct cryptodesc *crd;
1201 struct swcr_data *sw;
1202 u_int32_t lid;
1204 /* Sanity check */
1205 if (crp == NULL)
1206 return EINVAL;
1208 if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1209 crp->crp_etype = EINVAL;
1210 goto done;
1213 lid = crp->crp_sid & 0xffffffff;
1214 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
1215 crp->crp_etype = ENOENT;
1216 goto done;
1219 /* Go through crypto descriptors, processing as we go */
1220 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1222 * Find the crypto context.
1224 * XXX Note that the logic here prevents us from having
1225 * XXX the same algorithm multiple times in a session
1226 * XXX (or rather, we can but it won't give us the right
1227 * XXX results). To do that, we'd need some way of differentiating
1228 * XXX between the various instances of an algorithm (so we can
1229 * XXX locate the correct crypto context).
1231 for (sw = swcr_sessions[lid];
1232 sw && sw->sw_alg != crd->crd_alg;
1233 sw = sw->sw_next)
1236 /* No such context ? */
1237 if (sw == NULL) {
1238 crp->crp_etype = EINVAL;
1239 goto done;
1241 switch (sw->sw_alg) {
1242 case CRYPTO_DES_CBC:
1243 case CRYPTO_3DES_CBC:
1244 case CRYPTO_BLF_CBC:
1245 case CRYPTO_CAST_CBC:
1246 case CRYPTO_SKIPJACK_CBC:
1247 case CRYPTO_RIJNDAEL128_CBC:
1248 case CRYPTO_AES_XTS:
1249 case CRYPTO_AES_CTR:
1250 case CRYPTO_CAMELLIA_CBC:
1251 case CRYPTO_TWOFISH_CBC:
1252 case CRYPTO_SERPENT_CBC:
1253 case CRYPTO_TWOFISH_XTS:
1254 case CRYPTO_SERPENT_XTS:
1255 if ((crp->crp_etype = swcr_encdec(crd, sw,
1256 crp->crp_buf, crp->crp_flags)) != 0)
1257 goto done;
1258 break;
1259 case CRYPTO_NULL_CBC:
1260 crp->crp_etype = 0;
1261 break;
1262 case CRYPTO_MD5_HMAC:
1263 case CRYPTO_SHA1_HMAC:
1264 case CRYPTO_SHA2_256_HMAC:
1265 case CRYPTO_SHA2_384_HMAC:
1266 case CRYPTO_SHA2_512_HMAC:
1267 case CRYPTO_RIPEMD160_HMAC:
1268 case CRYPTO_NULL_HMAC:
1269 case CRYPTO_MD5_KPDK:
1270 case CRYPTO_SHA1_KPDK:
1271 case CRYPTO_MD5:
1272 case CRYPTO_SHA1:
1273 if ((crp->crp_etype = swcr_authcompute(crd, sw,
1274 crp->crp_buf, crp->crp_flags)) != 0)
1275 goto done;
1276 break;
1278 case CRYPTO_AES_GCM_16:
1279 case CRYPTO_AES_GMAC:
1280 case CRYPTO_AES_128_GMAC:
1281 case CRYPTO_AES_192_GMAC:
1282 case CRYPTO_AES_256_GMAC:
1283 crp->crp_etype = swcr_combined(crp);
1284 goto done;
1286 case CRYPTO_DEFLATE_COMP:
1287 if ((crp->crp_etype = swcr_compdec(crd, sw,
1288 crp->crp_buf, crp->crp_flags)) != 0)
1289 goto done;
1290 else
1291 crp->crp_olen = (int)sw->sw_size;
1292 break;
1294 default:
1295 /* Unknown/unsupported algorithm */
1296 crp->crp_etype = EINVAL;
1297 goto done;
1301 done:
1302 crypto_done(crp);
1303 lwkt_yield();
1304 return 0;
1307 static void
1308 swcr_identify(driver_t *drv, device_t parent)
1310 /* NB: order 10 is so we get attached after h/w devices */
1311 /* XXX: wouldn't bet about this BUS_ADD_CHILD correctness */
1312 if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1313 BUS_ADD_CHILD(parent, parent, 10, "cryptosoft", -1) == 0)
1314 panic("cryptosoft: could not attach");
1317 static int
1318 swcr_probe(device_t dev)
1320 device_set_desc(dev, "software crypto");
1321 return (0);
1324 static int
1325 swcr_attach(device_t dev)
1327 memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN);
1328 memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN);
1330 swcr_id = crypto_get_driverid(dev, CRYPTOCAP_F_SOFTWARE |
1331 CRYPTOCAP_F_SYNC |
1332 CRYPTOCAP_F_SMP);
1333 if (swcr_id < 0) {
1334 device_printf(dev, "cannot initialize!");
1335 return ENOMEM;
1337 #define REGISTER(alg) \
1338 crypto_register(swcr_id, alg, 0,0)
1339 REGISTER(CRYPTO_DES_CBC);
1340 REGISTER(CRYPTO_3DES_CBC);
1341 REGISTER(CRYPTO_BLF_CBC);
1342 REGISTER(CRYPTO_CAST_CBC);
1343 REGISTER(CRYPTO_SKIPJACK_CBC);
1344 REGISTER(CRYPTO_NULL_CBC);
1345 REGISTER(CRYPTO_MD5_HMAC);
1346 REGISTER(CRYPTO_SHA1_HMAC);
1347 REGISTER(CRYPTO_SHA2_256_HMAC);
1348 REGISTER(CRYPTO_SHA2_384_HMAC);
1349 REGISTER(CRYPTO_SHA2_512_HMAC);
1350 REGISTER(CRYPTO_RIPEMD160_HMAC);
1351 REGISTER(CRYPTO_NULL_HMAC);
1352 REGISTER(CRYPTO_MD5_KPDK);
1353 REGISTER(CRYPTO_SHA1_KPDK);
1354 REGISTER(CRYPTO_MD5);
1355 REGISTER(CRYPTO_SHA1);
1356 REGISTER(CRYPTO_RIJNDAEL128_CBC);
1357 REGISTER(CRYPTO_AES_XTS);
1358 REGISTER(CRYPTO_AES_CTR);
1359 REGISTER(CRYPTO_AES_GCM_16);
1360 REGISTER(CRYPTO_AES_GMAC);
1361 REGISTER(CRYPTO_AES_128_GMAC);
1362 REGISTER(CRYPTO_AES_192_GMAC);
1363 REGISTER(CRYPTO_AES_256_GMAC);
1364 REGISTER(CRYPTO_CAMELLIA_CBC);
1365 REGISTER(CRYPTO_TWOFISH_CBC);
1366 REGISTER(CRYPTO_SERPENT_CBC);
1367 REGISTER(CRYPTO_TWOFISH_XTS);
1368 REGISTER(CRYPTO_SERPENT_XTS);
1369 REGISTER(CRYPTO_DEFLATE_COMP);
1370 #undef REGISTER
1372 return 0;
1375 static int
1376 swcr_detach(device_t dev)
1378 crypto_unregister_all(swcr_id);
1379 if (swcr_sessions != NULL)
1380 kfree(swcr_sessions, M_CRYPTO_DATA);
1381 return 0;
1384 static device_method_t swcr_methods[] = {
1385 DEVMETHOD(device_identify, swcr_identify),
1386 DEVMETHOD(device_probe, swcr_probe),
1387 DEVMETHOD(device_attach, swcr_attach),
1388 DEVMETHOD(device_detach, swcr_detach),
1390 DEVMETHOD(cryptodev_newsession, swcr_newsession),
1391 DEVMETHOD(cryptodev_freesession,swcr_freesession),
1392 DEVMETHOD(cryptodev_process, swcr_process),
1394 DEVMETHOD_END
1397 static driver_t swcr_driver = {
1398 "cryptosoft",
1399 swcr_methods,
1400 0, /* NB: no softc */
1402 static devclass_t swcr_devclass;
1405 * NB: We explicitly reference the crypto module so we
1406 * get the necessary ordering when built as a loadable
1407 * module. This is required because we bundle the crypto
1408 * module code together with the cryptosoft driver (otherwise
1409 * normal module dependencies would handle things).
1411 extern int crypto_modevent(struct module *, int, void *);
1412 /* XXX where to attach */
1413 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,NULL);
1414 MODULE_VERSION(cryptosoft, 1);
1415 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);