3 * The Regents of the University of California. All rights reserved.
5 * John S. Dyson. All rights reserved.
6 * Copyright (C) 2013-2014
7 * Matthew Dillon, All rights reserved.
9 * This code contains ideas from software contributed to Berkeley by
10 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
11 * System project at Carnegie-Mellon University.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
45 #include <sys/sysctl.h>
46 #include <sys/spinlock.h>
47 #include <sys/thread2.h>
48 #include <sys/spinlock2.h>
50 static void undo_upreq(struct lock
*lkp
);
52 #ifdef DEBUG_CANCEL_LOCKS
54 static int sysctl_cancel_lock(SYSCTL_HANDLER_ARGS
);
55 static int sysctl_cancel_test(SYSCTL_HANDLER_ARGS
);
57 static struct lock cancel_lk
;
58 LOCK_SYSINIT(cancellk
, &cancel_lk
, "cancel", 0);
59 SYSCTL_PROC(_kern
, OID_AUTO
, cancel_lock
, CTLTYPE_INT
|CTLFLAG_RW
, 0, 0,
60 sysctl_cancel_lock
, "I", "test cancelable locks");
61 SYSCTL_PROC(_kern
, OID_AUTO
, cancel_test
, CTLTYPE_INT
|CTLFLAG_RW
, 0, 0,
62 sysctl_cancel_test
, "I", "test cancelable locks");
67 * Locking primitives implementation.
68 * Locks provide shared/exclusive sychronization.
72 #define COUNT(td, x) (td)->td_locks += (x)
78 * Set, change, or release a lock.
82 lockmgr(struct lock
*lkp
, u_int flags
)
84 debuglockmgr(struct lock
*lkp
, u_int flags
,
85 const char *name
, const char *file
, int line
)
102 if (mycpu
->gd_intr_nesting_level
&&
103 (flags
& LK_NOWAIT
) == 0 &&
104 (flags
& LK_TYPE_MASK
) != LK_RELEASE
&&
105 panic_cpu_gd
!= mycpu
109 panic("lockmgr %s from %p: called from interrupt, ipi, "
110 "or hard code section",
111 lkp
->lk_wmesg
, ((int **)&lkp
)[-1]);
113 panic("lockmgr %s from %s:%d: called from interrupt, ipi, "
114 "or hard code section",
115 lkp
->lk_wmesg
, file
, line
);
120 if (mycpu
->gd_spinlocks
&& ((flags
& LK_NOWAIT
) == 0)) {
121 panic("lockmgr %s from %s:%d: called with %d spinlocks held",
122 lkp
->lk_wmesg
, file
, line
, mycpu
->gd_spinlocks
);
126 extflags
= (flags
| lkp
->lk_flags
) & LK_EXTFLG_MASK
;
130 count
= lkp
->lk_count
;
133 switch (flags
& LK_TYPE_MASK
) {
136 * Shared lock critical path case
138 if ((count
& (LKC_EXREQ
|LKC_UPREQ
|LKC_EXCL
)) == 0) {
139 if (atomic_cmpset_int(&lkp
->lk_count
,
148 * If the caller already holds the lock exclusively then
149 * we silently obtain another count on the exclusive lock.
151 * WARNING! The old FreeBSD behavior was to downgrade,
152 * but this creates a problem when recursions
153 * return to the caller and the caller expects
154 * its original exclusive lock to remain exclusively
157 if (lkp
->lk_lockholder
== td
) {
158 KKASSERT(count
& LKC_EXCL
);
159 if ((extflags
& LK_CANRECURSE
) == 0) {
160 if (extflags
& LK_NOWAIT
) {
164 panic("lockmgr: locking against myself");
166 atomic_add_int(&lkp
->lk_count
, 1);
174 pflags
= (extflags
& LK_PCATCH
) ? PCATCH
: 0;
175 timo
= (extflags
& LK_TIMELOCK
) ? lkp
->lk_timo
: 0;
176 wflags
= (td
->td_flags
& TDF_DEADLKTREAT
) ?
177 LKC_EXCL
: (LKC_EXCL
|LKC_EXREQ
|LKC_UPREQ
);
180 * Block while the lock is held exclusively or, conditionally,
181 * if other threads are tring to obtain an exclusive lock or
184 if (count
& wflags
) {
185 if (extflags
& LK_CANCELABLE
) {
186 if (count
& LKC_CANCEL
) {
191 if (extflags
& LK_NOWAIT
) {
195 tsleep_interlock(lkp
, pflags
);
196 if (!atomic_cmpset_int(&lkp
->lk_count
, count
,
197 count
| LKC_SHREQ
)) {
201 mycpu
->gd_cnt
.v_lock_name
[0] = 'S';
202 strncpy(mycpu
->gd_cnt
.v_lock_name
+ 1,
204 sizeof(mycpu
->gd_cnt
.v_lock_name
) - 2);
205 ++mycpu
->gd_cnt
.v_lock_colls
;
207 error
= tsleep(lkp
, pflags
| PINTERLOCKED
,
208 lkp
->lk_wmesg
, timo
);
211 if (extflags
& LK_SLEEPFAIL
) {
219 * Otherwise we can bump the count
221 if (atomic_cmpset_int(&lkp
->lk_count
, count
, count
+ 1)) {
229 * Exclusive lock critical path.
232 if (atomic_cmpset_int(&lkp
->lk_count
, count
,
233 LKC_EXCL
| (count
+ 1))) {
234 lkp
->lk_lockholder
= td
;
242 * Recursive lock if we already hold it exclusively.
244 if (lkp
->lk_lockholder
== td
) {
245 KKASSERT(count
& LKC_EXCL
);
246 if ((extflags
& LK_CANRECURSE
) == 0) {
247 if (extflags
& LK_NOWAIT
) {
251 panic("lockmgr: locking against myself");
253 atomic_add_int(&lkp
->lk_count
, 1);
259 * We will block, handle LK_NOWAIT
261 if (extflags
& LK_NOWAIT
) {
265 if (extflags
& LK_CANCELABLE
) {
266 if (count
& LKC_CANCEL
) {
273 * Wait until we can obtain the exclusive lock. EXREQ is
274 * automatically cleared when all current holders release
275 * so if we abort the operation we can safely leave it set.
276 * There might be other exclusive requesters.
278 pflags
= (extflags
& LK_PCATCH
) ? PCATCH
: 0;
279 timo
= (extflags
& LK_TIMELOCK
) ? lkp
->lk_timo
: 0;
281 tsleep_interlock(lkp
, pflags
);
282 if (!atomic_cmpset_int(&lkp
->lk_count
, count
,
283 count
| LKC_EXREQ
)) {
287 mycpu
->gd_cnt
.v_lock_name
[0] = 'X';
288 strncpy(mycpu
->gd_cnt
.v_lock_name
+ 1,
290 sizeof(mycpu
->gd_cnt
.v_lock_name
) - 2);
291 ++mycpu
->gd_cnt
.v_lock_colls
;
293 error
= tsleep(lkp
, pflags
| PINTERLOCKED
,
294 lkp
->lk_wmesg
, timo
);
297 if (extflags
& LK_SLEEPFAIL
) {
305 * Downgrade an exclusive lock into a shared lock. All
306 * counts on a recursive exclusive lock become shared.
308 * This function always succeeds.
310 if (lkp
->lk_lockholder
!= td
||
311 (count
& (LKC_EXCL
|LKC_MASK
)) != (LKC_EXCL
|1)) {
312 panic("lockmgr: not holding exclusive lock");
316 for (i
= 0; i
< LOCKMGR_DEBUG_ARRAY_SIZE
; i
++) {
317 if (td
->td_lockmgr_stack
[i
] == lkp
&&
318 td
->td_lockmgr_stack_id
[i
] > 0
320 td
->td_lockmgr_stack_id
[i
]--;
326 * NOTE! Must NULL-out lockholder before releasing LKC_EXCL.
328 otd
= lkp
->lk_lockholder
;
329 lkp
->lk_lockholder
= NULL
;
330 if (atomic_cmpset_int(&lkp
->lk_count
, count
,
331 count
& ~(LKC_EXCL
|LKC_SHREQ
))) {
332 if (count
& LKC_SHREQ
)
336 lkp
->lk_lockholder
= otd
;
341 * Upgrade from a single shared lock to an exclusive lock.
343 * If another process is ahead of us to get an upgrade,
344 * then we want to fail rather than have an intervening
345 * exclusive access. The shared lock is released on
348 if (count
& LKC_UPREQ
) {
353 /* fall through into normal upgrade */
357 * Upgrade a shared lock to an exclusive one. This can cause
358 * the lock to be temporarily released and stolen by other
359 * threads. LK_SLEEPFAIL or LK_NOWAIT may be used to detect
360 * this case, or use LK_EXCLUPGRADE.
362 * If the lock is already exclusively owned by us, this
363 * operation is a NOP.
365 * If we return an error (even NOWAIT), the current lock will
368 * Start with the critical path.
370 if ((count
& (LKC_UPREQ
|LKC_EXCL
|LKC_MASK
)) == 1) {
371 if (atomic_cmpset_int(&lkp
->lk_count
, count
,
373 lkp
->lk_lockholder
= td
;
380 * If we already hold the lock exclusively this operation
381 * succeeds and is a NOP.
383 if (count
& LKC_EXCL
) {
384 if (lkp
->lk_lockholder
== td
)
386 panic("lockmgr: upgrade unowned lock");
388 if ((count
& LKC_MASK
) == 0)
389 panic("lockmgr: upgrade unowned lock");
392 * We cannot upgrade without blocking at this point.
394 if (extflags
& LK_NOWAIT
) {
399 if (extflags
& LK_CANCELABLE
) {
400 if (count
& LKC_CANCEL
) {
407 * Release the shared lock and request the upgrade.
409 pflags
= (extflags
& LK_PCATCH
) ? PCATCH
: 0;
410 timo
= (extflags
& LK_TIMELOCK
) ? lkp
->lk_timo
: 0;
411 tsleep_interlock(lkp
, pflags
);
412 wflags
= (count
& LKC_UPREQ
) ? LKC_EXREQ
: LKC_UPREQ
;
415 * If someone else owns UPREQ and this transition would
416 * allow it to be granted, we have to grant it. Otherwise
417 * we release the shared lock.
419 if ((count
& (LKC_UPREQ
|LKC_MASK
)) == (LKC_UPREQ
| 1)) {
420 wflags
|= LKC_EXCL
| LKC_UPGRANT
;
422 wflags
&= ~LKC_UPREQ
;
424 wflags
|= (count
- 1);
427 if (atomic_cmpset_int(&lkp
->lk_count
, count
, wflags
)) {
431 * Must wakeup the thread granted the upgrade.
433 if ((count
& (LKC_UPREQ
|LKC_MASK
)) == (LKC_UPREQ
| 1))
436 mycpu
->gd_cnt
.v_lock_name
[0] = 'U';
437 strncpy(mycpu
->gd_cnt
.v_lock_name
+ 1,
439 sizeof(mycpu
->gd_cnt
.v_lock_name
) - 2);
440 ++mycpu
->gd_cnt
.v_lock_colls
;
442 error
= tsleep(lkp
, pflags
| PINTERLOCKED
,
443 lkp
->lk_wmesg
, timo
);
446 if (extflags
& LK_SLEEPFAIL
) {
452 * Refactor to either LK_EXCLUSIVE or LK_WAITUPGRADE,
453 * depending on whether we were able to acquire the
456 if (count
& LKC_UPREQ
)
457 flags
= LK_EXCLUSIVE
; /* someone else */
459 flags
= LK_WAITUPGRADE
; /* we own the bit */
465 * We own the LKC_UPREQ bit, wait until we are granted the
466 * exclusive lock (LKC_UPGRANT is set).
468 * IF THE OPERATION FAILS (tsleep error tsleep+LK_SLEEPFAIL),
469 * we have to undo the upgrade request and clean up any lock
470 * that might have been granted via a race.
472 if (count
& LKC_UPGRANT
) {
473 if (atomic_cmpset_int(&lkp
->lk_count
, count
,
474 count
& ~LKC_UPGRANT
)) {
475 lkp
->lk_lockholder
= td
;
476 KKASSERT(count
& LKC_EXCL
);
480 } else if ((count
& LKC_CANCEL
) && (extflags
& LK_CANCELABLE
)) {
485 pflags
= (extflags
& LK_PCATCH
) ? PCATCH
: 0;
486 timo
= (extflags
& LK_TIMELOCK
) ? lkp
->lk_timo
: 0;
487 tsleep_interlock(lkp
, pflags
);
488 if (atomic_cmpset_int(&lkp
->lk_count
, count
, count
)) {
490 mycpu
->gd_cnt
.v_lock_name
[0] = 'U';
491 strncpy(mycpu
->gd_cnt
.v_lock_name
+ 1,
493 sizeof(mycpu
->gd_cnt
.v_lock_name
) - 2);
494 ++mycpu
->gd_cnt
.v_lock_colls
;
496 error
= tsleep(lkp
, pflags
| PINTERLOCKED
,
497 lkp
->lk_wmesg
, timo
);
502 if (extflags
& LK_SLEEPFAIL
) {
514 * Release the currently held lock. If releasing the current
515 * lock as part of an error return, error will ALREADY be
518 * When releasing the last lock we automatically transition
519 * LKC_UPREQ to LKC_EXCL|1.
521 * WARNING! We cannot detect when there are multiple exclusive
522 * requests pending. We clear EXREQ unconditionally
523 * on the 1->0 transition so it is possible for
524 * shared requests to race the next exclusive
529 if ((count
& LKC_MASK
) == 0)
530 panic("lockmgr: LK_RELEASE: no lock held");
532 if (count
& LKC_EXCL
) {
533 if (lkp
->lk_lockholder
!= LK_KERNTHREAD
&&
534 lkp
->lk_lockholder
!= td
) {
535 panic("lockmgr: pid %d, not exlusive "
536 "lock holder thr %p/%p unlocking",
537 (td
->td_proc
? td
->td_proc
->p_pid
: -1),
538 td
, lkp
->lk_lockholder
);
540 if ((count
& (LKC_UPREQ
|LKC_MASK
)) == 1) {
542 * Last exclusive count is being released
544 otd
= lkp
->lk_lockholder
;
545 lkp
->lk_lockholder
= NULL
;
546 if (!atomic_cmpset_int(&lkp
->lk_count
, count
,
548 ~(LKC_EXCL
| LKC_EXREQ
|
549 LKC_SHREQ
| LKC_CANCEL
))) {
550 lkp
->lk_lockholder
= otd
;
553 if (count
& (LKC_EXREQ
|LKC_SHREQ
))
556 } else if ((count
& (LKC_UPREQ
|LKC_MASK
)) ==
559 * Last exclusive count is being released but
560 * an upgrade request is present, automatically
561 * grant an exclusive state to the owner of
562 * the upgrade request.
564 otd
= lkp
->lk_lockholder
;
565 lkp
->lk_lockholder
= NULL
;
566 if (!atomic_cmpset_int(&lkp
->lk_count
, count
,
567 (count
& ~LKC_UPREQ
) |
569 lkp
->lk_lockholder
= otd
;
574 otd
= lkp
->lk_lockholder
;
575 if (!atomic_cmpset_int(&lkp
->lk_count
, count
,
582 if (otd
!= LK_KERNTHREAD
)
585 if ((count
& (LKC_UPREQ
|LKC_MASK
)) == 1) {
587 * Last shared count is being released.
589 if (!atomic_cmpset_int(&lkp
->lk_count
, count
,
591 ~(LKC_EXREQ
| LKC_SHREQ
|
595 if (count
& (LKC_EXREQ
|LKC_SHREQ
))
598 } else if ((count
& (LKC_UPREQ
|LKC_MASK
)) ==
601 * Last shared count is being released but
602 * an upgrade request is present, automatically
603 * grant an exclusive state to the owner of
604 * the upgrade request. Masked count
607 if (!atomic_cmpset_int(&lkp
->lk_count
, count
,
608 (count
& ~(LKC_UPREQ
|
610 LKC_EXCL
| LKC_UPGRANT
)) {
615 if (!atomic_cmpset_int(&lkp
->lk_count
, count
,
627 * Start canceling blocked requestors or later requestors.
628 * requestors must use CANCELABLE. Don't waste time issuing
629 * a wakeup if nobody is pending.
631 KKASSERT((count
& LKC_CANCEL
) == 0); /* disallowed case */
632 KKASSERT((count
& LKC_MASK
) != 0); /* issue w/lock held */
633 if (!atomic_cmpset_int(&lkp
->lk_count
,
634 count
, count
| LKC_CANCEL
)) {
637 if (count
& (LKC_EXREQ
|LKC_SHREQ
|LKC_UPREQ
)) {
643 atomic_clear_int(&lkp
->lk_count
, LKC_CANCEL
);
647 panic("lockmgr: unknown locktype request %d",
648 flags
& LK_TYPE_MASK
);
655 * Undo an upgrade request
659 undo_upreq(struct lock
*lkp
)
664 count
= lkp
->lk_count
;
666 if (count
& LKC_UPGRANT
) {
668 * UPREQ was shifted to UPGRANT. We own UPGRANT now,
669 * another thread might own UPREQ. Clear UPGRANT
670 * and release the granted lock.
672 if (atomic_cmpset_int(&lkp
->lk_count
, count
,
673 count
& ~LKC_UPGRANT
)) {
674 lockmgr(lkp
, LK_RELEASE
);
677 } else if (count
& LKC_EXCL
) {
679 * Clear the UPREQ we still own. Nobody to wakeup
680 * here because there is an existing exclusive
683 KKASSERT(count
& LKC_UPREQ
);
684 KKASSERT((count
& LKC_MASK
) > 0);
685 if (atomic_cmpset_int(&lkp
->lk_count
, count
,
686 count
& ~LKC_UPREQ
)) {
690 } else if (count
& LKC_EXREQ
) {
692 * Clear the UPREQ we still own. We cannot wakeup any
693 * shared waiters because there is an exclusive
696 KKASSERT(count
& LKC_UPREQ
);
697 KKASSERT((count
& LKC_MASK
) > 0);
698 if (atomic_cmpset_int(&lkp
->lk_count
, count
,
699 count
& ~LKC_UPREQ
)) {
704 * Clear the UPREQ we still own. Wakeup any shared
707 KKASSERT(count
& LKC_UPREQ
);
708 KKASSERT((count
& LKC_MASK
) > 0);
709 if (atomic_cmpset_int(&lkp
->lk_count
, count
,
711 ~(LKC_UPREQ
| LKC_SHREQ
))) {
712 if (count
& LKC_SHREQ
)
722 lockmgr_kernproc(struct lock
*lp
)
724 struct thread
*td __debugvar
= curthread
;
726 if (lp
->lk_lockholder
!= LK_KERNTHREAD
) {
727 KASSERT(lp
->lk_lockholder
== td
,
728 ("lockmgr_kernproc: lock not owned by curthread %p: %p",
729 td
, lp
->lk_lockholder
));
730 lp
->lk_lockholder
= LK_KERNTHREAD
;
736 * Initialize a lock; required before use.
739 lockinit(struct lock
*lkp
, const char *wmesg
, int timo
, int flags
)
741 lkp
->lk_flags
= (flags
& LK_EXTFLG_MASK
);
743 lkp
->lk_wmesg
= wmesg
;
745 lkp
->lk_lockholder
= LK_NOTHREAD
;
749 * Reinitialize a lock that is being reused for a different purpose, but
750 * which may have pending (blocked) threads sitting on it. The caller
751 * must already hold the interlock.
754 lockreinit(struct lock
*lkp
, const char *wmesg
, int timo
, int flags
)
756 lkp
->lk_wmesg
= wmesg
;
761 * De-initialize a lock. The structure must no longer be used by anyone.
764 lockuninit(struct lock
*lkp
)
766 KKASSERT((lkp
->lk_count
& (LKC_EXREQ
|LKC_SHREQ
|LKC_UPREQ
)) == 0);
770 * Determine the status of a lock.
773 lockstatus(struct lock
*lkp
, struct thread
*td
)
778 count
= lkp
->lk_count
;
781 if (count
& LKC_EXCL
) {
782 if (td
== NULL
|| lkp
->lk_lockholder
== td
)
783 lock_type
= LK_EXCLUSIVE
;
785 lock_type
= LK_EXCLOTHER
;
786 } else if (count
& LKC_MASK
) {
787 lock_type
= LK_SHARED
;
793 * Return non-zero if the caller owns the lock shared or exclusive.
794 * We can only guess re: shared locks.
797 lockowned(struct lock
*lkp
)
799 thread_t td
= curthread
;
802 count
= lkp
->lk_count
;
805 if (count
& LKC_EXCL
)
806 return(lkp
->lk_lockholder
== td
);
808 return((count
& LKC_MASK
) != 0);
812 * Determine the number of holders of a lock.
814 * The non-blocking version can usually be used for assertions.
817 lockcount(struct lock
*lkp
)
819 return(lkp
->lk_count
& LKC_MASK
);
823 lockcountnb(struct lock
*lkp
)
825 return(lkp
->lk_count
& LKC_MASK
);
829 * Print out information about state of a lock. Used by VOP_PRINT
830 * routines to display status about contained locks.
833 lockmgr_printinfo(struct lock
*lkp
)
835 struct thread
*td
= lkp
->lk_lockholder
;
839 count
= lkp
->lk_count
;
842 if (td
&& td
!= LK_KERNTHREAD
&& td
!= LK_NOTHREAD
)
847 if (count
& LKC_EXCL
) {
848 kprintf(" lock type %s: EXCLUS (count %08x) by td %p pid %d",
849 lkp
->lk_wmesg
, count
, td
,
851 } else if (count
& LKC_MASK
) {
852 kprintf(" lock type %s: SHARED (count %08x)",
853 lkp
->lk_wmesg
, count
);
855 kprintf(" lock type %s: NOTHELD", lkp
->lk_wmesg
);
857 if (count
& (LKC_EXREQ
|LKC_SHREQ
))
858 kprintf(" with waiters\n");
864 lock_sysinit(struct lock_args
*arg
)
866 lockinit(arg
->la_lock
, arg
->la_desc
, 0, arg
->la_flags
);
869 #ifdef DEBUG_CANCEL_LOCKS
873 sysctl_cancel_lock(SYSCTL_HANDLER_ARGS
)
879 lockmgr(&cancel_lk
, LK_EXCLUSIVE
);
881 error
= tsleep(&error
, PCATCH
, "canmas", hz
* 5);
882 lockmgr(&cancel_lk
, LK_CANCEL_BEG
);
884 error
= tsleep(&error
, PCATCH
, "canmas", hz
* 5);
886 lockmgr(&cancel_lk
, LK_RELEASE
);
888 SYSCTL_OUT(req
, &error
, sizeof(error
));
897 sysctl_cancel_test(SYSCTL_HANDLER_ARGS
)
902 error
= lockmgr(&cancel_lk
, LK_EXCLUSIVE
|LK_CANCELABLE
);
904 lockmgr(&cancel_lk
, LK_RELEASE
);
905 SYSCTL_OUT(req
, &error
, sizeof(error
));
906 kprintf("test %d\n", error
);