libc/nls: Sync with FreeBSD.
[dragonfly.git] / sys / opencrypto / crypto.c
blob2666b72b0fd8b6a2823253150e1814a5298e4049
1 /* $FreeBSD: src/sys/opencrypto/crypto.c,v 1.28 2007/10/20 23:23:22 julian Exp $ */
2 /*-
3 * Copyright (c) 2002-2006 Sam Leffler. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 * Cryptographic Subsystem.
29 * This code is derived from the Openbsd Cryptographic Framework (OCF)
30 * that has the copyright shown below. Very little of the original
31 * code remains.
34 /*-
35 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
37 * This code was written by Angelos D. Keromytis in Athens, Greece, in
38 * February 2000. Network Security Technologies Inc. (NSTI) kindly
39 * supported the development of this code.
41 * Copyright (c) 2000, 2001 Angelos D. Keromytis
43 * Permission to use, copy, and modify this software with or without fee
44 * is hereby granted, provided that this entire notice is included in
45 * all source code copies of any software which is or includes a copy or
46 * modification of this software.
48 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
49 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
50 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
51 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
52 * PURPOSE.
55 #define CRYPTO_TIMING /* enable timing support */
57 #include "opt_ddb.h"
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/eventhandler.h>
62 #include <sys/kernel.h>
63 #include <sys/kthread.h>
64 #include <sys/lock.h>
65 #include <sys/module.h>
66 #include <sys/malloc.h>
67 #include <sys/proc.h>
68 #include <sys/sysctl.h>
69 #include <sys/objcache.h>
71 #include <sys/thread2.h>
73 #include <ddb/ddb.h>
75 #include <opencrypto/cryptodev.h>
76 #include <opencrypto/xform.h> /* XXX for M_XDATA */
78 #include <sys/kobj.h>
79 #include <sys/bus.h>
80 #include "cryptodev_if.h"
83 * Crypto drivers register themselves by allocating a slot in the
84 * crypto_drivers table with crypto_get_driverid() and then registering
85 * each algorithm they support with crypto_register() and crypto_kregister().
87 static struct lock crypto_drivers_lock; /* lock on driver table */
88 #define CRYPTO_DRIVER_LOCK() lockmgr(&crypto_drivers_lock, LK_EXCLUSIVE)
89 #define CRYPTO_DRIVER_UNLOCK() lockmgr(&crypto_drivers_lock, LK_RELEASE)
90 #define CRYPTO_DRIVER_ASSERT() KKASSERT(lockstatus(&crypto_drivers_lock, curthread) != 0)
93 * Crypto device/driver capabilities structure.
95 * Synchronization:
96 * (d) - protected by CRYPTO_DRIVER_LOCK()
97 * (q) - protected by CRYPTO_Q_LOCK()
98 * Not tagged fields are read-only.
100 struct cryptocap {
101 device_t cc_dev; /* (d) device/driver */
102 u_int32_t cc_sessions; /* (d) # of sessions */
103 u_int32_t cc_koperations; /* (d) # os asym operations */
105 * Largest possible operator length (in bits) for each type of
106 * encryption algorithm. XXX not used
108 u_int16_t cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1];
109 u_int8_t cc_alg[CRYPTO_ALGORITHM_MAX + 1];
110 u_int8_t cc_kalg[CRK_ALGORITHM_MAX + 1];
112 int cc_flags; /* (d) flags */
113 #define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */
114 int cc_qblocked; /* (q) symmetric q blocked */
115 int cc_kqblocked; /* (q) asymmetric q blocked */
117 static struct cryptocap *crypto_drivers = NULL;
118 static int crypto_drivers_num = 0;
120 typedef struct crypto_tdinfo {
121 TAILQ_HEAD(,cryptop) crp_q; /* request queues */
122 TAILQ_HEAD(,cryptkop) crp_kq;
123 thread_t crp_td;
124 struct lock crp_lock;
125 int crp_sleep;
126 } *crypto_tdinfo_t;
129 * There are two queues for crypto requests; one for symmetric (e.g.
130 * cipher) operations and one for asymmetric (e.g. MOD) operations.
131 * See below for how synchronization is handled.
132 * A single lock is used to lock access to both queues. We could
133 * have one per-queue but having one simplifies handling of block/unblock
134 * operations.
136 static struct crypto_tdinfo tdinfo_array[MAXCPU];
138 #define CRYPTO_Q_LOCK(tdinfo) lockmgr(&tdinfo->crp_lock, LK_EXCLUSIVE)
139 #define CRYPTO_Q_UNLOCK(tdinfo) lockmgr(&tdinfo->crp_lock, LK_RELEASE)
142 * There are two queues for processing completed crypto requests; one
143 * for the symmetric and one for the asymmetric ops. We only need one
144 * but have two to avoid type futzing (cryptop vs. cryptkop). A single
145 * lock is used to lock access to both queues. Note that this lock
146 * must be separate from the lock on request queues to insure driver
147 * callbacks don't generate lock order reversals.
149 static TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queues */
150 static TAILQ_HEAD(,cryptkop) crp_ret_kq;
151 static struct lock crypto_ret_q_lock;
152 #define CRYPTO_RETQ_LOCK() lockmgr(&crypto_ret_q_lock, LK_EXCLUSIVE)
153 #define CRYPTO_RETQ_UNLOCK() lockmgr(&crypto_ret_q_lock, LK_RELEASE)
154 #define CRYPTO_RETQ_EMPTY() (TAILQ_EMPTY(&crp_ret_q) && TAILQ_EMPTY(&crp_ret_kq))
157 * Crypto op and desciptor data structures are allocated
158 * from separate object caches.
160 static struct objcache *cryptop_oc, *cryptodesc_oc;
162 static MALLOC_DEFINE(M_CRYPTO_OP, "crypto op", "crypto op");
163 static MALLOC_DEFINE(M_CRYPTO_DESC, "crypto desc", "crypto desc");
165 int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */
166 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
167 &crypto_userasymcrypto, 0,
168 "Enable/disable user-mode access to asymmetric crypto support");
169 int crypto_devallowsoft = 0; /* only use hardware crypto for asym */
170 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
171 &crypto_devallowsoft, 0,
172 "Enable/disable use of software asym crypto support");
173 int crypto_altdispatch = 0; /* dispatch to alternative cpu */
174 SYSCTL_INT(_kern, OID_AUTO, cryptoaltdispatch, CTLFLAG_RW,
175 &crypto_altdispatch, 0,
176 "Do not queue crypto op on current cpu");
178 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
180 static void crypto_proc(void *dummy);
181 static void crypto_ret_proc(void *dummy);
182 static struct thread *cryptoretthread;
183 static void crypto_destroy(void);
184 static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
185 static int crypto_kinvoke(struct cryptkop *krp, int flags);
187 static struct cryptostats cryptostats;
188 SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
189 cryptostats, "Crypto system statistics");
191 #ifdef CRYPTO_TIMING
192 static int crypto_timing = 0;
193 SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW,
194 &crypto_timing, 0, "Enable/disable crypto timing support");
195 #endif
197 static int
198 crypto_init(void)
200 crypto_tdinfo_t tdinfo;
201 int error;
202 int n;
204 lockinit(&crypto_drivers_lock, "crypto driver table", 0, LK_CANRECURSE);
206 TAILQ_INIT(&crp_ret_q);
207 TAILQ_INIT(&crp_ret_kq);
208 lockinit(&crypto_ret_q_lock, "crypto return queues", 0, LK_CANRECURSE);
210 cryptop_oc = objcache_create_simple(M_CRYPTO_OP, sizeof(struct cryptop));
211 cryptodesc_oc = objcache_create_simple(M_CRYPTO_DESC,
212 sizeof(struct cryptodesc));
213 if (cryptodesc_oc == NULL || cryptop_oc == NULL) {
214 kprintf("crypto_init: cannot setup crypto caches\n");
215 error = ENOMEM;
216 goto bad;
219 crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
220 crypto_drivers = kmalloc(crypto_drivers_num * sizeof(struct cryptocap),
221 M_CRYPTO_DATA, M_WAITOK | M_ZERO);
223 for (n = 0; n < ncpus; ++n) {
224 tdinfo = &tdinfo_array[n];
225 TAILQ_INIT(&tdinfo->crp_q);
226 TAILQ_INIT(&tdinfo->crp_kq);
227 lockinit(&tdinfo->crp_lock, "crypto op queues",
228 0, LK_CANRECURSE);
229 kthread_create_cpu(crypto_proc, tdinfo, &tdinfo->crp_td,
230 n, "crypto %d", n);
232 kthread_create(crypto_ret_proc, NULL,
233 &cryptoretthread, "crypto returns");
234 return 0;
235 bad:
236 crypto_destroy();
237 return error;
241 * Signal a crypto thread to terminate. We use the driver
242 * table lock to synchronize the sleep/wakeups so that we
243 * are sure the threads have terminated before we release
244 * the data structures they use. See crypto_finis below
245 * for the other half of this song-and-dance.
247 static void
248 crypto_terminate(struct thread **tp, void *q)
250 struct thread *t;
252 KKASSERT(lockstatus(&crypto_drivers_lock, curthread) != 0);
253 t = *tp;
254 *tp = NULL;
255 if (t) {
256 kprintf("crypto_terminate: start\n");
257 wakeup_one(q);
258 crit_enter();
259 tsleep_interlock(t, 0);
260 CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */
261 crit_exit();
262 tsleep(t, PINTERLOCKED, "crypto_destroy", 0);
263 CRYPTO_DRIVER_LOCK();
264 kprintf("crypto_terminate: end\n");
268 static void
269 crypto_destroy(void)
271 crypto_tdinfo_t tdinfo;
272 int n;
275 * Terminate any crypto threads.
277 CRYPTO_DRIVER_LOCK();
278 for (n = 0; n < ncpus; ++n) {
279 tdinfo = &tdinfo_array[n];
280 crypto_terminate(&tdinfo->crp_td, &tdinfo->crp_q);
281 lockuninit(&tdinfo->crp_lock);
283 crypto_terminate(&cryptoretthread, &crp_ret_q);
284 CRYPTO_DRIVER_UNLOCK();
286 /* XXX flush queues??? */
289 * Reclaim dynamically allocated resources.
291 if (crypto_drivers != NULL)
292 kfree(crypto_drivers, M_CRYPTO_DATA);
294 if (cryptodesc_oc != NULL)
295 objcache_destroy(cryptodesc_oc);
296 if (cryptop_oc != NULL)
297 objcache_destroy(cryptop_oc);
298 lockuninit(&crypto_ret_q_lock);
299 lockuninit(&crypto_drivers_lock);
302 static struct cryptocap *
303 crypto_checkdriver(u_int32_t hid)
305 if (crypto_drivers == NULL)
306 return NULL;
307 return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
311 * Compare a driver's list of supported algorithms against another
312 * list; return non-zero if all algorithms are supported.
314 static int
315 driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri)
317 const struct cryptoini *cr;
319 /* See if all the algorithms are supported. */
320 for (cr = cri; cr; cr = cr->cri_next)
321 if (cap->cc_alg[cr->cri_alg] == 0)
322 return 0;
323 return 1;
327 * Select a driver for a new session that supports the specified
328 * algorithms and, optionally, is constrained according to the flags.
329 * The algorithm we use here is pretty stupid; just use the
330 * first driver that supports all the algorithms we need. If there
331 * are multiple drivers we choose the driver with the fewest active
332 * sessions. We prefer hardware-backed drivers to software ones.
334 * XXX We need more smarts here (in real life too, but that's
335 * XXX another story altogether).
337 static struct cryptocap *
338 crypto_select_driver(const struct cryptoini *cri, int flags)
340 struct cryptocap *cap, *best;
341 int match, hid;
343 CRYPTO_DRIVER_ASSERT();
346 * Look first for hardware crypto devices if permitted.
348 if (flags & CRYPTOCAP_F_HARDWARE)
349 match = CRYPTOCAP_F_HARDWARE;
350 else
351 match = CRYPTOCAP_F_SOFTWARE;
352 best = NULL;
353 again:
354 for (hid = 0; hid < crypto_drivers_num; hid++) {
355 cap = &crypto_drivers[hid];
357 * If it's not initialized, is in the process of
358 * going away, or is not appropriate (hardware
359 * or software based on match), then skip.
361 if (cap->cc_dev == NULL ||
362 (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
363 (cap->cc_flags & match) == 0)
364 continue;
366 /* verify all the algorithms are supported. */
367 if (driver_suitable(cap, cri)) {
368 if (best == NULL ||
369 cap->cc_sessions < best->cc_sessions)
370 best = cap;
373 if (best != NULL)
374 return best;
375 if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
376 /* sort of an Algol 68-style for loop */
377 match = CRYPTOCAP_F_SOFTWARE;
378 goto again;
380 return best;
384 * Create a new session. The crid argument specifies a crypto
385 * driver to use or constraints on a driver to select (hardware
386 * only, software only, either). Whatever driver is selected
387 * must be capable of the requested crypto algorithms.
390 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int crid)
392 struct cryptocap *cap;
393 u_int32_t hid, lid;
394 int err;
396 CRYPTO_DRIVER_LOCK();
397 if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
399 * Use specified driver; verify it is capable.
401 cap = crypto_checkdriver(crid);
402 if (cap != NULL && !driver_suitable(cap, cri))
403 cap = NULL;
404 } else {
406 * No requested driver; select based on crid flags.
408 cap = crypto_select_driver(cri, crid);
410 * if NULL then can't do everything in one session.
411 * XXX Fix this. We need to inject a "virtual" session
412 * XXX layer right about here.
415 if (cap != NULL) {
416 /* Call the driver initialization routine. */
417 hid = cap - crypto_drivers;
418 lid = hid; /* Pass the driver ID. */
419 err = CRYPTODEV_NEWSESSION(cap->cc_dev, &lid, cri);
420 if (err == 0) {
421 (*sid) = (cap->cc_flags & 0xff000000)
422 | (hid & 0x00ffffff);
423 (*sid) <<= 32;
424 (*sid) |= (lid & 0xffffffff);
425 cap->cc_sessions++;
427 } else
428 err = EINVAL;
429 CRYPTO_DRIVER_UNLOCK();
430 return err;
433 static void
434 crypto_remove(struct cryptocap *cap)
437 KKASSERT(lockstatus(&crypto_drivers_lock, curthread) != 0);
438 if (cap->cc_sessions == 0 && cap->cc_koperations == 0)
439 bzero(cap, sizeof(*cap));
443 * Delete an existing session (or a reserved session on an unregistered
444 * driver).
447 crypto_freesession(u_int64_t sid)
449 struct cryptocap *cap;
450 u_int32_t hid;
451 int err;
453 CRYPTO_DRIVER_LOCK();
455 if (crypto_drivers == NULL) {
456 err = EINVAL;
457 goto done;
460 /* Determine two IDs. */
461 hid = CRYPTO_SESID2HID(sid);
463 if (hid >= crypto_drivers_num) {
464 err = ENOENT;
465 goto done;
467 cap = &crypto_drivers[hid];
469 if (cap->cc_sessions)
470 cap->cc_sessions--;
472 /* Call the driver cleanup routine, if available. */
473 err = CRYPTODEV_FREESESSION(cap->cc_dev, sid);
475 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
476 crypto_remove(cap);
478 done:
479 CRYPTO_DRIVER_UNLOCK();
480 return err;
484 * Return an unused driver id. Used by drivers prior to registering
485 * support for the algorithms they handle.
487 int32_t
488 crypto_get_driverid(device_t dev, int flags)
490 struct cryptocap *newdrv;
491 int i;
493 if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
494 kprintf("%s: no flags specified when registering driver\n",
495 device_get_nameunit(dev));
496 return -1;
499 CRYPTO_DRIVER_LOCK();
501 for (i = 0; i < crypto_drivers_num; i++) {
502 if (crypto_drivers[i].cc_dev == NULL &&
503 (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
504 break;
508 /* Out of entries, allocate some more. */
509 if (i == crypto_drivers_num) {
510 /* Be careful about wrap-around. */
511 if (2 * crypto_drivers_num <= crypto_drivers_num) {
512 CRYPTO_DRIVER_UNLOCK();
513 kprintf("crypto: driver count wraparound!\n");
514 return -1;
517 newdrv = kmalloc(2 * crypto_drivers_num *
518 sizeof(struct cryptocap),
519 M_CRYPTO_DATA, M_WAITOK|M_ZERO);
521 bcopy(crypto_drivers, newdrv,
522 crypto_drivers_num * sizeof(struct cryptocap));
524 crypto_drivers_num *= 2;
526 kfree(crypto_drivers, M_CRYPTO_DATA);
527 crypto_drivers = newdrv;
530 /* NB: state is zero'd on free */
531 crypto_drivers[i].cc_sessions = 1; /* Mark */
532 crypto_drivers[i].cc_dev = dev;
533 crypto_drivers[i].cc_flags = flags;
534 if (bootverbose)
535 kprintf("crypto: assign %s driver id %u, flags %u\n",
536 device_get_nameunit(dev), i, flags);
538 CRYPTO_DRIVER_UNLOCK();
540 return i;
544 * Lookup a driver by name. We match against the full device
545 * name and unit, and against just the name. The latter gives
546 * us a simple widlcarding by device name. On success return the
547 * driver/hardware identifier; otherwise return -1.
550 crypto_find_driver(const char *match)
552 int i, len = strlen(match);
554 CRYPTO_DRIVER_LOCK();
555 for (i = 0; i < crypto_drivers_num; i++) {
556 device_t dev = crypto_drivers[i].cc_dev;
557 if (dev == NULL ||
558 (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP))
559 continue;
560 if (strncmp(match, device_get_nameunit(dev), len) == 0 ||
561 strncmp(match, device_get_name(dev), len) == 0)
562 break;
564 CRYPTO_DRIVER_UNLOCK();
565 return i < crypto_drivers_num ? i : -1;
569 * Return the device_t for the specified driver or NULL
570 * if the driver identifier is invalid.
572 device_t
573 crypto_find_device_byhid(int hid)
575 struct cryptocap *cap = crypto_checkdriver(hid);
576 return cap != NULL ? cap->cc_dev : NULL;
580 * Return the device/driver capabilities.
583 crypto_getcaps(int hid)
585 struct cryptocap *cap = crypto_checkdriver(hid);
586 return cap != NULL ? cap->cc_flags : 0;
590 * Register support for a key-related algorithm. This routine
591 * is called once for each algorithm supported a driver.
594 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags)
596 struct cryptocap *cap;
597 int err;
599 CRYPTO_DRIVER_LOCK();
601 cap = crypto_checkdriver(driverid);
602 if (cap != NULL &&
603 (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
605 * XXX Do some performance testing to determine placing.
606 * XXX We probably need an auxiliary data structure that
607 * XXX describes relative performances.
610 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
611 if (bootverbose)
612 kprintf("crypto: %s registers key alg %u flags %u\n"
613 , device_get_nameunit(cap->cc_dev)
614 , kalg
615 , flags
618 err = 0;
619 } else
620 err = EINVAL;
622 CRYPTO_DRIVER_UNLOCK();
623 return err;
627 * Register support for a non-key-related algorithm. This routine
628 * is called once for each such algorithm supported by a driver.
631 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
632 u_int32_t flags)
634 struct cryptocap *cap;
635 int err;
637 CRYPTO_DRIVER_LOCK();
639 cap = crypto_checkdriver(driverid);
640 /* NB: algorithms are in the range [1..max] */
641 if (cap != NULL &&
642 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
644 * XXX Do some performance testing to determine placing.
645 * XXX We probably need an auxiliary data structure that
646 * XXX describes relative performances.
649 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
650 cap->cc_max_op_len[alg] = maxoplen;
651 if (bootverbose)
652 kprintf("crypto: %s registers alg %u flags %u maxoplen %u\n"
653 , device_get_nameunit(cap->cc_dev)
654 , alg
655 , flags
656 , maxoplen
658 cap->cc_sessions = 0; /* Unmark */
659 err = 0;
660 } else
661 err = EINVAL;
663 CRYPTO_DRIVER_UNLOCK();
664 return err;
667 static void
668 driver_finis(struct cryptocap *cap)
670 u_int32_t ses, kops;
672 CRYPTO_DRIVER_ASSERT();
674 ses = cap->cc_sessions;
675 kops = cap->cc_koperations;
676 bzero(cap, sizeof(*cap));
677 if (ses != 0 || kops != 0) {
679 * If there are pending sessions,
680 * just mark as invalid.
682 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
683 cap->cc_sessions = ses;
684 cap->cc_koperations = kops;
689 * Unregister a crypto driver. If there are pending sessions using it,
690 * leave enough information around so that subsequent calls using those
691 * sessions will correctly detect the driver has been unregistered and
692 * reroute requests.
695 crypto_unregister(u_int32_t driverid, int alg)
697 struct cryptocap *cap;
698 int i, err;
700 CRYPTO_DRIVER_LOCK();
701 cap = crypto_checkdriver(driverid);
702 if (cap != NULL &&
703 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
704 cap->cc_alg[alg] != 0) {
705 cap->cc_alg[alg] = 0;
706 cap->cc_max_op_len[alg] = 0;
708 /* Was this the last algorithm ? */
709 for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++) {
710 if (cap->cc_alg[i] != 0)
711 break;
714 if (i == CRYPTO_ALGORITHM_MAX + 1)
715 driver_finis(cap);
716 err = 0;
717 } else {
718 err = EINVAL;
720 CRYPTO_DRIVER_UNLOCK();
722 return err;
726 * Unregister all algorithms associated with a crypto driver.
727 * If there are pending sessions using it, leave enough information
728 * around so that subsequent calls using those sessions will
729 * correctly detect the driver has been unregistered and reroute
730 * requests.
733 crypto_unregister_all(u_int32_t driverid)
735 struct cryptocap *cap;
736 int err;
738 CRYPTO_DRIVER_LOCK();
739 cap = crypto_checkdriver(driverid);
740 if (cap != NULL) {
741 driver_finis(cap);
742 err = 0;
743 } else {
744 err = EINVAL;
746 CRYPTO_DRIVER_UNLOCK();
748 return err;
752 * Clear blockage on a driver. The what parameter indicates whether
753 * the driver is now ready for cryptop's and/or cryptokop's.
756 crypto_unblock(u_int32_t driverid, int what)
758 crypto_tdinfo_t tdinfo;
759 struct cryptocap *cap;
760 int err;
761 int n;
763 CRYPTO_DRIVER_LOCK();
764 cap = crypto_checkdriver(driverid);
765 if (cap != NULL) {
766 if (what & CRYPTO_SYMQ)
767 cap->cc_qblocked = 0;
768 if (what & CRYPTO_ASYMQ)
769 cap->cc_kqblocked = 0;
770 for (n = 0; n < ncpus; ++n) {
771 tdinfo = &tdinfo_array[n];
772 CRYPTO_Q_LOCK(tdinfo);
773 if (tdinfo->crp_sleep)
774 wakeup_one(&tdinfo->crp_q);
775 CRYPTO_Q_UNLOCK(tdinfo);
777 err = 0;
778 } else {
779 err = EINVAL;
781 CRYPTO_DRIVER_UNLOCK();
783 return err;
786 static volatile int dispatch_rover;
789 * Add a crypto request to a queue, to be processed by the kernel thread.
792 crypto_dispatch(struct cryptop *crp)
794 crypto_tdinfo_t tdinfo;
795 struct cryptocap *cap;
796 u_int32_t hid;
797 int result;
798 int n;
800 cryptostats.cs_ops++;
802 #ifdef CRYPTO_TIMING
803 if (crypto_timing)
804 nanouptime(&crp->crp_tstamp);
805 #endif
807 hid = CRYPTO_SESID2HID(crp->crp_sid);
810 * Dispatch the crypto op directly to the driver if the caller
811 * marked the request to be processed immediately or this is
812 * a synchronous callback chain occuring from within a crypto
813 * processing thread.
815 * Fall through to queueing the driver is blocked.
817 if ((crp->crp_flags & CRYPTO_F_BATCH) == 0 ||
818 curthread->td_type == TD_TYPE_CRYPTO) {
819 cap = crypto_checkdriver(hid);
820 /* Driver cannot disappeared when there is an active session. */
821 KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__));
822 if (!cap->cc_qblocked) {
823 result = crypto_invoke(cap, crp, 0);
824 if (result != ERESTART)
825 return (result);
827 * The driver ran out of resources, put the request on
828 * the queue.
834 * Dispatch to a cpu for action if possible. Dispatch to a different
835 * cpu than the current cpu.
837 if (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SMP) {
838 n = atomic_fetchadd_int(&dispatch_rover, 1) & 255;
839 if (crypto_altdispatch && mycpu->gd_cpuid == n)
840 ++n;
841 n = n % ncpus;
842 } else {
843 n = 0;
845 tdinfo = &tdinfo_array[n];
847 CRYPTO_Q_LOCK(tdinfo);
848 TAILQ_INSERT_TAIL(&tdinfo->crp_q, crp, crp_next);
849 if (tdinfo->crp_sleep)
850 wakeup_one(&tdinfo->crp_q);
851 CRYPTO_Q_UNLOCK(tdinfo);
852 return 0;
856 * Add an asymetric crypto request to a queue,
857 * to be processed by the kernel thread.
860 crypto_kdispatch(struct cryptkop *krp)
862 crypto_tdinfo_t tdinfo;
863 int error;
864 int n;
866 cryptostats.cs_kops++;
868 #if 0
869 /* not sure how to test F_SMP here */
870 n = atomic_fetchadd_int(&dispatch_rover, 1) & 255;
871 n = n % ncpus;
872 #endif
873 n = 0;
874 tdinfo = &tdinfo_array[n];
876 error = crypto_kinvoke(krp, krp->krp_crid);
878 if (error == ERESTART) {
879 CRYPTO_Q_LOCK(tdinfo);
880 TAILQ_INSERT_TAIL(&tdinfo->crp_kq, krp, krp_next);
881 if (tdinfo->crp_sleep)
882 wakeup_one(&tdinfo->crp_q);
883 CRYPTO_Q_UNLOCK(tdinfo);
884 error = 0;
886 return error;
890 * Verify a driver is suitable for the specified operation.
892 static __inline int
893 kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp)
895 return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0;
899 * Select a driver for an asym operation. The driver must
900 * support the necessary algorithm. The caller can constrain
901 * which device is selected with the flags parameter. The
902 * algorithm we use here is pretty stupid; just use the first
903 * driver that supports the algorithms we need. If there are
904 * multiple suitable drivers we choose the driver with the
905 * fewest active operations. We prefer hardware-backed
906 * drivers to software ones when either may be used.
908 static struct cryptocap *
909 crypto_select_kdriver(const struct cryptkop *krp, int flags)
911 struct cryptocap *cap, *best;
912 int match, hid;
914 CRYPTO_DRIVER_ASSERT();
917 * Look first for hardware crypto devices if permitted.
919 if (flags & CRYPTOCAP_F_HARDWARE)
920 match = CRYPTOCAP_F_HARDWARE;
921 else
922 match = CRYPTOCAP_F_SOFTWARE;
923 best = NULL;
924 again:
925 for (hid = 0; hid < crypto_drivers_num; hid++) {
926 cap = &crypto_drivers[hid];
928 * If it's not initialized, is in the process of
929 * going away, or is not appropriate (hardware
930 * or software based on match), then skip.
932 if (cap->cc_dev == NULL ||
933 (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
934 (cap->cc_flags & match) == 0)
935 continue;
937 /* verify all the algorithms are supported. */
938 if (kdriver_suitable(cap, krp)) {
939 if (best == NULL ||
940 cap->cc_koperations < best->cc_koperations)
941 best = cap;
944 if (best != NULL)
945 return best;
946 if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
947 /* sort of an Algol 68-style for loop */
948 match = CRYPTOCAP_F_SOFTWARE;
949 goto again;
951 return best;
955 * Dispatch an assymetric crypto request.
957 static int
958 crypto_kinvoke(struct cryptkop *krp, int crid)
960 struct cryptocap *cap = NULL;
961 int error;
963 KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
964 KASSERT(krp->krp_callback != NULL,
965 ("%s: krp->crp_callback == NULL", __func__));
967 CRYPTO_DRIVER_LOCK();
968 if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
969 cap = crypto_checkdriver(crid);
970 if (cap != NULL) {
972 * Driver present, it must support the necessary
973 * algorithm and, if s/w drivers are excluded,
974 * it must be registered as hardware-backed.
976 if (!kdriver_suitable(cap, krp) ||
977 (!crypto_devallowsoft &&
978 (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
979 cap = NULL;
981 } else {
983 * No requested driver; select based on crid flags.
985 if (!crypto_devallowsoft) /* NB: disallow s/w drivers */
986 crid &= ~CRYPTOCAP_F_SOFTWARE;
987 cap = crypto_select_kdriver(krp, crid);
989 if (cap != NULL && !cap->cc_kqblocked) {
990 krp->krp_hid = cap - crypto_drivers;
991 cap->cc_koperations++;
992 CRYPTO_DRIVER_UNLOCK();
993 error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
994 CRYPTO_DRIVER_LOCK();
995 if (error == ERESTART) {
996 cap->cc_koperations--;
997 CRYPTO_DRIVER_UNLOCK();
998 return (error);
1000 } else {
1002 * NB: cap is !NULL if device is blocked; in
1003 * that case return ERESTART so the operation
1004 * is resubmitted if possible.
1006 error = (cap == NULL) ? ENODEV : ERESTART;
1008 CRYPTO_DRIVER_UNLOCK();
1010 if (error) {
1011 krp->krp_status = error;
1012 crypto_kdone(krp);
1014 return 0;
1017 #ifdef CRYPTO_TIMING
1018 static void
1019 crypto_tstat(struct cryptotstat *ts, struct timespec *tv)
1021 struct timespec now, t;
1023 nanouptime(&now);
1024 t.tv_sec = now.tv_sec - tv->tv_sec;
1025 t.tv_nsec = now.tv_nsec - tv->tv_nsec;
1026 if (t.tv_nsec < 0) {
1027 t.tv_sec--;
1028 t.tv_nsec += 1000000000;
1030 timespecadd(&ts->acc, &t);
1031 if (timespeccmp(&t, &ts->min, <))
1032 ts->min = t;
1033 if (timespeccmp(&t, &ts->max, >))
1034 ts->max = t;
1035 ts->count++;
1037 *tv = now;
1039 #endif
1042 * Dispatch a crypto request to the appropriate crypto devices.
1044 static int
1045 crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
1048 KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
1049 KASSERT(crp->crp_callback != NULL,
1050 ("%s: crp->crp_callback == NULL", __func__));
1051 KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__));
1053 #ifdef CRYPTO_TIMING
1054 if (crypto_timing)
1055 crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
1056 #endif
1057 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
1058 struct cryptodesc *crd;
1059 u_int64_t nid;
1062 * Driver has unregistered; migrate the session and return
1063 * an error to the caller so they'll resubmit the op.
1065 * XXX: What if there are more already queued requests for this
1066 * session?
1068 crypto_freesession(crp->crp_sid);
1070 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
1071 crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
1073 /* XXX propagate flags from initial session? */
1074 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI),
1075 CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
1076 crp->crp_sid = nid;
1078 crp->crp_etype = EAGAIN;
1079 crypto_done(crp);
1080 return 0;
1081 } else {
1083 * Invoke the driver to process the request.
1085 return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
1090 * Release a set of crypto descriptors.
1092 void
1093 crypto_freereq(struct cryptop *crp)
1095 struct cryptodesc *crd;
1096 #ifdef DIAGNOSTIC
1097 crypto_tdinfo_t tdinfo;
1098 struct cryptop *crp2;
1099 int n;
1100 #endif
1102 if (crp == NULL)
1103 return;
1105 #ifdef DIAGNOSTIC
1106 for (n = 0; n < ncpus; ++n) {
1107 tdinfo = &tdinfo_array[n];
1109 CRYPTO_Q_LOCK(tdinfo);
1110 TAILQ_FOREACH(crp2, &tdinfo->crp_q, crp_next) {
1111 KASSERT(crp2 != crp,
1112 ("Freeing cryptop from the crypto queue (%p).",
1113 crp));
1115 CRYPTO_Q_UNLOCK(tdinfo);
1117 CRYPTO_RETQ_LOCK();
1118 TAILQ_FOREACH(crp2, &crp_ret_q, crp_next) {
1119 KASSERT(crp2 != crp,
1120 ("Freeing cryptop from the return queue (%p).",
1121 crp));
1123 CRYPTO_RETQ_UNLOCK();
1124 #endif
1126 while ((crd = crp->crp_desc) != NULL) {
1127 crp->crp_desc = crd->crd_next;
1128 objcache_put(cryptodesc_oc, crd);
1130 objcache_put(cryptop_oc, crp);
1134 * Acquire a set of crypto descriptors.
1136 struct cryptop *
1137 crypto_getreq(int num)
1139 struct cryptodesc *crd;
1140 struct cryptop *crp;
1142 crp = objcache_get(cryptop_oc, M_WAITOK);
1143 if (crp != NULL) {
1144 bzero(crp, sizeof (*crp));
1145 while (num--) {
1146 crd = objcache_get(cryptodesc_oc, M_WAITOK);
1147 if (crd == NULL) {
1148 crypto_freereq(crp);
1149 return NULL;
1151 bzero(crd, sizeof (*crd));
1153 crd->crd_next = crp->crp_desc;
1154 crp->crp_desc = crd;
1157 return crp;
1161 * Invoke the callback on behalf of the driver.
1163 void
1164 crypto_done(struct cryptop *crp)
1166 KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0,
1167 ("crypto_done: op already done, flags 0x%x", crp->crp_flags));
1168 crp->crp_flags |= CRYPTO_F_DONE;
1169 if (crp->crp_etype != 0)
1170 cryptostats.cs_errs++;
1171 #ifdef CRYPTO_TIMING
1172 if (crypto_timing)
1173 crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
1174 #endif
1176 * CBIMM means unconditionally do the callback immediately;
1177 * CBIFSYNC means do the callback immediately only if the
1178 * operation was done synchronously. Both are used to avoid
1179 * doing extraneous context switches; the latter is mostly
1180 * used with the software crypto driver.
1182 if ((crp->crp_flags & CRYPTO_F_CBIMM) ||
1183 ((crp->crp_flags & CRYPTO_F_CBIFSYNC) &&
1184 (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC))) {
1186 * Do the callback directly. This is ok when the
1187 * callback routine does very little (e.g. the
1188 * /dev/crypto callback method just does a wakeup).
1190 #ifdef CRYPTO_TIMING
1191 if (crypto_timing) {
1193 * NB: We must copy the timestamp before
1194 * doing the callback as the cryptop is
1195 * likely to be reclaimed.
1197 struct timespec t = crp->crp_tstamp;
1198 crypto_tstat(&cryptostats.cs_cb, &t);
1199 crp->crp_callback(crp);
1200 crypto_tstat(&cryptostats.cs_finis, &t);
1201 } else
1202 #endif
1203 crp->crp_callback(crp);
1204 } else {
1206 * Normal case; queue the callback for the thread.
1208 CRYPTO_RETQ_LOCK();
1209 if (CRYPTO_RETQ_EMPTY())
1210 wakeup_one(&crp_ret_q); /* shared wait channel */
1211 TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
1212 CRYPTO_RETQ_UNLOCK();
1217 * Invoke the callback on behalf of the driver.
1219 void
1220 crypto_kdone(struct cryptkop *krp)
1222 struct cryptocap *cap;
1224 if (krp->krp_status != 0)
1225 cryptostats.cs_kerrs++;
1226 CRYPTO_DRIVER_LOCK();
1227 /* XXX: What if driver is loaded in the meantime? */
1228 if (krp->krp_hid < crypto_drivers_num) {
1229 cap = &crypto_drivers[krp->krp_hid];
1230 cap->cc_koperations--;
1231 KASSERT(cap->cc_koperations >= 0, ("cc_koperations < 0"));
1232 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
1233 crypto_remove(cap);
1235 CRYPTO_DRIVER_UNLOCK();
1236 CRYPTO_RETQ_LOCK();
1237 if (CRYPTO_RETQ_EMPTY())
1238 wakeup_one(&crp_ret_q); /* shared wait channel */
1239 TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
1240 CRYPTO_RETQ_UNLOCK();
1244 crypto_getfeat(int *featp)
1246 int hid, kalg, feat = 0;
1248 CRYPTO_DRIVER_LOCK();
1249 for (hid = 0; hid < crypto_drivers_num; hid++) {
1250 const struct cryptocap *cap = &crypto_drivers[hid];
1252 if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1253 !crypto_devallowsoft) {
1254 continue;
1256 for (kalg = 0; kalg <= CRK_ALGORITHM_MAX; kalg++)
1257 if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED)
1258 feat |= 1 << kalg;
1260 CRYPTO_DRIVER_UNLOCK();
1261 *featp = feat;
1262 return (0);
1266 * Terminate a thread at module unload. The process that
1267 * initiated this is waiting for us to signal that we're gone;
1268 * wake it up and exit. We use the driver table lock to insure
1269 * we don't do the wakeup before they're waiting. There is no
1270 * race here because the waiter sleeps on the proc lock for the
1271 * thread so it gets notified at the right time because of an
1272 * extra wakeup that's done in exit1().
1274 static void
1275 crypto_finis(void *chan)
1277 CRYPTO_DRIVER_LOCK();
1278 wakeup_one(chan);
1279 CRYPTO_DRIVER_UNLOCK();
1280 kthread_exit();
1284 * Crypto thread, dispatches crypto requests.
1286 * MPSAFE
1288 static void
1289 crypto_proc(void *arg)
1291 crypto_tdinfo_t tdinfo = arg;
1292 struct cryptop *crp, *submit;
1293 struct cryptkop *krp;
1294 struct cryptocap *cap;
1295 u_int32_t hid;
1296 int result, hint;
1298 CRYPTO_Q_LOCK(tdinfo);
1300 curthread->td_type = TD_TYPE_CRYPTO;
1302 for (;;) {
1304 * Find the first element in the queue that can be
1305 * processed and look-ahead to see if multiple ops
1306 * are ready for the same driver.
1308 submit = NULL;
1309 hint = 0;
1310 TAILQ_FOREACH(crp, &tdinfo->crp_q, crp_next) {
1311 hid = CRYPTO_SESID2HID(crp->crp_sid);
1312 cap = crypto_checkdriver(hid);
1314 * Driver cannot disappeared when there is an active
1315 * session.
1317 KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
1318 __func__, __LINE__));
1319 if (cap == NULL || cap->cc_dev == NULL) {
1320 /* Op needs to be migrated, process it. */
1321 if (submit == NULL)
1322 submit = crp;
1323 break;
1325 if (!cap->cc_qblocked) {
1326 if (submit != NULL) {
1328 * We stop on finding another op,
1329 * regardless whether its for the same
1330 * driver or not. We could keep
1331 * searching the queue but it might be
1332 * better to just use a per-driver
1333 * queue instead.
1335 if (CRYPTO_SESID2HID(submit->crp_sid) == hid)
1336 hint = CRYPTO_HINT_MORE;
1337 break;
1338 } else {
1339 submit = crp;
1340 if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
1341 break;
1342 /* keep scanning for more are q'd */
1346 if (submit != NULL) {
1347 TAILQ_REMOVE(&tdinfo->crp_q, submit, crp_next);
1348 hid = CRYPTO_SESID2HID(submit->crp_sid);
1349 cap = crypto_checkdriver(hid);
1350 KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
1351 __func__, __LINE__));
1353 CRYPTO_Q_UNLOCK(tdinfo);
1354 result = crypto_invoke(cap, submit, hint);
1355 CRYPTO_Q_LOCK(tdinfo);
1357 if (result == ERESTART) {
1359 * The driver ran out of resources, mark the
1360 * driver ``blocked'' for cryptop's and put
1361 * the request back in the queue. It would
1362 * best to put the request back where we got
1363 * it but that's hard so for now we put it
1364 * at the front. This should be ok; putting
1365 * it at the end does not work.
1367 /* XXX validate sid again? */
1368 crypto_drivers[CRYPTO_SESID2HID(submit->crp_sid)].cc_qblocked = 1;
1369 TAILQ_INSERT_HEAD(&tdinfo->crp_q,
1370 submit, crp_next);
1371 cryptostats.cs_blocks++;
1375 /* As above, but for key ops */
1376 TAILQ_FOREACH(krp, &tdinfo->crp_kq, krp_next) {
1377 cap = crypto_checkdriver(krp->krp_hid);
1378 if (cap == NULL || cap->cc_dev == NULL) {
1380 * Operation needs to be migrated, invalidate
1381 * the assigned device so it will reselect a
1382 * new one below. Propagate the original
1383 * crid selection flags if supplied.
1385 krp->krp_hid = krp->krp_crid &
1386 (CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE);
1387 if (krp->krp_hid == 0)
1388 krp->krp_hid =
1389 CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE;
1390 break;
1392 if (!cap->cc_kqblocked)
1393 break;
1395 if (krp != NULL) {
1396 TAILQ_REMOVE(&tdinfo->crp_kq, krp, krp_next);
1398 CRYPTO_Q_UNLOCK(tdinfo);
1399 result = crypto_kinvoke(krp, krp->krp_hid);
1400 CRYPTO_Q_LOCK(tdinfo);
1402 if (result == ERESTART) {
1404 * The driver ran out of resources, mark the
1405 * driver ``blocked'' for cryptkop's and put
1406 * the request back in the queue. It would
1407 * best to put the request back where we got
1408 * it but that's hard so for now we put it
1409 * at the front. This should be ok; putting
1410 * it at the end does not work.
1412 /* XXX validate sid again? */
1413 crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
1414 TAILQ_INSERT_HEAD(&tdinfo->crp_kq,
1415 krp, krp_next);
1416 cryptostats.cs_kblocks++;
1420 if (submit == NULL && krp == NULL) {
1422 * Nothing more to be processed. Sleep until we're
1423 * woken because there are more ops to process.
1424 * This happens either by submission or by a driver
1425 * becoming unblocked and notifying us through
1426 * crypto_unblock. Note that when we wakeup we
1427 * start processing each queue again from the
1428 * front. It's not clear that it's important to
1429 * preserve this ordering since ops may finish
1430 * out of order if dispatched to different devices
1431 * and some become blocked while others do not.
1433 tdinfo->crp_sleep = 1;
1434 lksleep (&tdinfo->crp_q, &tdinfo->crp_lock,
1435 0, "crypto_wait", 0);
1436 tdinfo->crp_sleep = 0;
1437 if (tdinfo->crp_td == NULL)
1438 break;
1439 cryptostats.cs_intrs++;
1442 CRYPTO_Q_UNLOCK(tdinfo);
1444 crypto_finis(&tdinfo->crp_q);
1448 * Crypto returns thread, does callbacks for processed crypto requests.
1449 * Callbacks are done here, rather than in the crypto drivers, because
1450 * callbacks typically are expensive and would slow interrupt handling.
1452 * MPSAFE
1454 static void
1455 crypto_ret_proc(void *dummy __unused)
1457 struct cryptop *crpt;
1458 struct cryptkop *krpt;
1460 CRYPTO_RETQ_LOCK();
1461 for (;;) {
1462 /* Harvest return q's for completed ops */
1463 crpt = TAILQ_FIRST(&crp_ret_q);
1464 if (crpt != NULL)
1465 TAILQ_REMOVE(&crp_ret_q, crpt, crp_next);
1467 krpt = TAILQ_FIRST(&crp_ret_kq);
1468 if (krpt != NULL)
1469 TAILQ_REMOVE(&crp_ret_kq, krpt, krp_next);
1471 if (crpt != NULL || krpt != NULL) {
1472 CRYPTO_RETQ_UNLOCK();
1474 * Run callbacks unlocked.
1476 if (crpt != NULL) {
1477 #ifdef CRYPTO_TIMING
1478 if (crypto_timing) {
1480 * NB: We must copy the timestamp before
1481 * doing the callback as the cryptop is
1482 * likely to be reclaimed.
1484 struct timespec t = crpt->crp_tstamp;
1485 crypto_tstat(&cryptostats.cs_cb, &t);
1486 crpt->crp_callback(crpt);
1487 crypto_tstat(&cryptostats.cs_finis, &t);
1488 } else
1489 #endif
1490 crpt->crp_callback(crpt);
1492 if (krpt != NULL)
1493 krpt->krp_callback(krpt);
1494 CRYPTO_RETQ_LOCK();
1495 } else {
1497 * Nothing more to be processed. Sleep until we're
1498 * woken because there are more returns to process.
1500 lksleep(&crp_ret_q, &crypto_ret_q_lock,
1501 0, "crypto_ret_wait", 0);
1502 if (cryptoretthread == NULL)
1503 break;
1504 cryptostats.cs_rets++;
1507 CRYPTO_RETQ_UNLOCK();
1509 crypto_finis(&crp_ret_q);
1512 #ifdef DDB
1513 static void
1514 db_show_drivers(void)
1516 int hid;
1518 db_printf("%12s %4s %4s %8s %2s %2s\n"
1519 , "Device"
1520 , "Ses"
1521 , "Kops"
1522 , "Flags"
1523 , "QB"
1524 , "KB"
1526 for (hid = 0; hid < crypto_drivers_num; hid++) {
1527 const struct cryptocap *cap = &crypto_drivers[hid];
1528 if (cap->cc_dev == NULL)
1529 continue;
1530 db_printf("%-12s %4u %4u %08x %2u %2u\n"
1531 , device_get_nameunit(cap->cc_dev)
1532 , cap->cc_sessions
1533 , cap->cc_koperations
1534 , cap->cc_flags
1535 , cap->cc_qblocked
1536 , cap->cc_kqblocked
1541 DB_SHOW_COMMAND(crypto, db_show_crypto)
1543 crypto_tdinfo_t tdinfo;
1544 struct cryptop *crp;
1545 int n;
1547 db_show_drivers();
1548 db_printf("\n");
1550 db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
1551 "HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
1552 "Desc", "Callback");
1554 for (n = 0; n < ncpus; ++n) {
1555 tdinfo = &tdinfo_array[n];
1557 TAILQ_FOREACH(crp, &tdinfo->crp_q, crp_next) {
1558 db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n"
1559 , (int) CRYPTO_SESID2HID(crp->crp_sid)
1560 , (int) CRYPTO_SESID2CAPS(crp->crp_sid)
1561 , crp->crp_ilen, crp->crp_olen
1562 , crp->crp_etype
1563 , crp->crp_flags
1564 , crp->crp_desc
1565 , crp->crp_callback
1569 if (!TAILQ_EMPTY(&crp_ret_q)) {
1570 db_printf("\n%4s %4s %4s %8s\n",
1571 "HID", "Etype", "Flags", "Callback");
1572 TAILQ_FOREACH(crp, &crp_ret_q, crp_next) {
1573 db_printf("%4u %4u %04x %8p\n"
1574 , (int) CRYPTO_SESID2HID(crp->crp_sid)
1575 , crp->crp_etype
1576 , crp->crp_flags
1577 , crp->crp_callback
1583 DB_SHOW_COMMAND(kcrypto, db_show_kcrypto)
1585 crypto_tdinfo_t tdinfo;
1586 struct cryptkop *krp;
1587 int n;
1589 db_show_drivers();
1590 db_printf("\n");
1592 db_printf("%4s %5s %4s %4s %8s %4s %8s\n",
1593 "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback");
1595 for (n = 0; n < ncpus; ++n) {
1596 tdinfo = &tdinfo_array[n];
1598 TAILQ_FOREACH(krp, &tdinfo->crp_kq, krp_next) {
1599 db_printf("%4u %5u %4u %4u %08x %4u %8p\n"
1600 , krp->krp_op
1601 , krp->krp_status
1602 , krp->krp_iparams, krp->krp_oparams
1603 , krp->krp_crid, krp->krp_hid
1604 , krp->krp_callback
1608 if (!TAILQ_EMPTY(&crp_ret_q)) {
1609 db_printf("%4s %5s %8s %4s %8s\n",
1610 "Op", "Status", "CRID", "HID", "Callback");
1611 TAILQ_FOREACH(krp, &crp_ret_kq, krp_next) {
1612 db_printf("%4u %5u %08x %4u %8p\n"
1613 , krp->krp_op
1614 , krp->krp_status
1615 , krp->krp_crid, krp->krp_hid
1616 , krp->krp_callback
1621 #endif
1623 int crypto_modevent(module_t mod, int type, void *unused);
1626 * Initialization code, both for static and dynamic loading.
1627 * Note this is not invoked with the usual MODULE_DECLARE
1628 * mechanism but instead is listed as a dependency by the
1629 * cryptosoft driver. This guarantees proper ordering of
1630 * calls on module load/unload.
1633 crypto_modevent(module_t mod, int type, void *unused)
1635 int error = EINVAL;
1637 switch (type) {
1638 case MOD_LOAD:
1639 error = crypto_init();
1640 if (error == 0 && bootverbose)
1641 kprintf("crypto: <crypto core>\n");
1642 break;
1643 case MOD_UNLOAD:
1644 /*XXX disallow if active sessions */
1645 error = 0;
1646 crypto_destroy();
1647 return 0;
1649 return error;
1651 MODULE_VERSION(crypto, 1);
1652 MODULE_DEPEND(crypto, zlib, 1, 1, 1);