3 * The Regents of the University of California. All rights reserved.
5 * John S. Dyson. All rights reserved.
6 * Copyright (C) 2013-2014
7 * Matthew Dillon, All rights reserved.
9 * This code contains ideas from software contributed to Berkeley by
10 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
11 * System project at Carnegie-Mellon University.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
45 #include <sys/sysctl.h>
46 #include <sys/spinlock.h>
47 #include <sys/thread2.h>
48 #include <sys/spinlock2.h>
50 static void undo_upreq(struct lock
*lkp
);
52 #ifdef DEBUG_CANCEL_LOCKS
54 static int sysctl_cancel_lock(SYSCTL_HANDLER_ARGS
);
55 static int sysctl_cancel_test(SYSCTL_HANDLER_ARGS
);
57 static struct lock cancel_lk
;
58 LOCK_SYSINIT(cancellk
, &cancel_lk
, "cancel", 0);
59 SYSCTL_PROC(_kern
, OID_AUTO
, cancel_lock
, CTLTYPE_INT
|CTLFLAG_RW
, 0, 0,
60 sysctl_cancel_lock
, "I", "test cancelable locks");
61 SYSCTL_PROC(_kern
, OID_AUTO
, cancel_test
, CTLTYPE_INT
|CTLFLAG_RW
, 0, 0,
62 sysctl_cancel_test
, "I", "test cancelable locks");
67 * Locking primitives implementation.
68 * Locks provide shared/exclusive sychronization.
72 #define COUNT(td, x) (td)->td_locks += (x)
77 #define LOCK_WAIT_TIME 100
78 #define LOCK_SAMPLE_WAIT 7
81 * Set, change, or release a lock.
85 lockmgr(struct lock
*lkp
, u_int flags
)
87 debuglockmgr(struct lock
*lkp
, u_int flags
,
88 const char *name
, const char *file
, int line
)
105 if (mycpu
->gd_intr_nesting_level
&&
106 (flags
& LK_NOWAIT
) == 0 &&
107 (flags
& LK_TYPE_MASK
) != LK_RELEASE
&&
108 panic_cpu_gd
!= mycpu
112 panic("lockmgr %s from %p: called from interrupt, ipi, "
113 "or hard code section",
114 lkp
->lk_wmesg
, ((int **)&lkp
)[-1]);
116 panic("lockmgr %s from %s:%d: called from interrupt, ipi, "
117 "or hard code section",
118 lkp
->lk_wmesg
, file
, line
);
123 if (mycpu
->gd_spinlocks
&& ((flags
& LK_NOWAIT
) == 0)) {
124 panic("lockmgr %s from %s:%d: called with %d spinlocks held",
125 lkp
->lk_wmesg
, file
, line
, mycpu
->gd_spinlocks
);
129 extflags
= (flags
| lkp
->lk_flags
) & LK_EXTFLG_MASK
;
133 count
= lkp
->lk_count
;
136 switch (flags
& LK_TYPE_MASK
) {
139 * Shared lock critical path case
141 if ((count
& (LKC_EXREQ
|LKC_UPREQ
|LKC_EXCL
)) == 0) {
142 if (atomic_cmpset_int(&lkp
->lk_count
,
151 * If the caller already holds the lock exclusively then
152 * we silently obtain another count on the exclusive lock.
154 * WARNING! The old FreeBSD behavior was to downgrade,
155 * but this creates a problem when recursions
156 * return to the caller and the caller expects
157 * its original exclusive lock to remain exclusively
160 if (lkp
->lk_lockholder
== td
) {
161 KKASSERT(count
& LKC_EXCL
);
162 if ((extflags
& LK_CANRECURSE
) == 0) {
163 if (extflags
& LK_NOWAIT
) {
167 panic("lockmgr: locking against myself");
169 atomic_add_int(&lkp
->lk_count
, 1);
177 pflags
= (extflags
& LK_PCATCH
) ? PCATCH
: 0;
178 timo
= (extflags
& LK_TIMELOCK
) ? lkp
->lk_timo
: 0;
179 wflags
= (td
->td_flags
& TDF_DEADLKTREAT
) ?
180 LKC_EXCL
: (LKC_EXCL
|LKC_EXREQ
|LKC_UPREQ
);
183 * Block while the lock is held exclusively or, conditionally,
184 * if other threads are tring to obtain an exclusive lock or
187 if (count
& wflags
) {
188 if (extflags
& LK_CANCELABLE
) {
189 if (count
& LKC_CANCEL
) {
194 if (extflags
& LK_NOWAIT
) {
198 tsleep_interlock(lkp
, pflags
);
199 if (!atomic_cmpset_int(&lkp
->lk_count
, count
,
200 count
| LKC_SHREQ
)) {
204 mycpu
->gd_cnt
.v_lock_name
[0] = 'S';
205 strncpy(mycpu
->gd_cnt
.v_lock_name
+ 1,
207 sizeof(mycpu
->gd_cnt
.v_lock_name
) - 2);
208 ++mycpu
->gd_cnt
.v_lock_colls
;
210 error
= tsleep(lkp
, pflags
| PINTERLOCKED
,
211 lkp
->lk_wmesg
, timo
);
214 if (extflags
& LK_SLEEPFAIL
) {
222 * Otherwise we can bump the count
224 if (atomic_cmpset_int(&lkp
->lk_count
, count
, count
+ 1)) {
232 * Exclusive lock critical path.
235 if (atomic_cmpset_int(&lkp
->lk_count
, count
,
236 LKC_EXCL
| (count
+ 1))) {
237 lkp
->lk_lockholder
= td
;
245 * Recursive lock if we already hold it exclusively.
247 if (lkp
->lk_lockholder
== td
) {
248 KKASSERT(count
& LKC_EXCL
);
249 if ((extflags
& LK_CANRECURSE
) == 0) {
250 if (extflags
& LK_NOWAIT
) {
254 panic("lockmgr: locking against myself");
256 atomic_add_int(&lkp
->lk_count
, 1);
262 * We will block, handle LK_NOWAIT
264 if (extflags
& LK_NOWAIT
) {
268 if (extflags
& LK_CANCELABLE
) {
269 if (count
& LKC_CANCEL
) {
276 * Wait until we can obtain the exclusive lock. EXREQ is
277 * automatically cleared when all current holders release
278 * so if we abort the operation we can safely leave it set.
279 * There might be other exclusive requesters.
281 pflags
= (extflags
& LK_PCATCH
) ? PCATCH
: 0;
282 timo
= (extflags
& LK_TIMELOCK
) ? lkp
->lk_timo
: 0;
284 tsleep_interlock(lkp
, pflags
);
285 if (!atomic_cmpset_int(&lkp
->lk_count
, count
,
286 count
| LKC_EXREQ
)) {
290 mycpu
->gd_cnt
.v_lock_name
[0] = 'X';
291 strncpy(mycpu
->gd_cnt
.v_lock_name
+ 1,
293 sizeof(mycpu
->gd_cnt
.v_lock_name
) - 2);
294 ++mycpu
->gd_cnt
.v_lock_colls
;
296 error
= tsleep(lkp
, pflags
| PINTERLOCKED
,
297 lkp
->lk_wmesg
, timo
);
300 if (extflags
& LK_SLEEPFAIL
) {
308 * Downgrade an exclusive lock into a shared lock. All
309 * counts on a recursive exclusive lock become shared.
311 * This function always succeeds.
313 if (lkp
->lk_lockholder
!= td
||
314 (count
& (LKC_EXCL
|LKC_MASK
)) != (LKC_EXCL
|1)) {
315 panic("lockmgr: not holding exclusive lock");
319 for (i
= 0; i
< LOCKMGR_DEBUG_ARRAY_SIZE
; i
++) {
320 if (td
->td_lockmgr_stack
[i
] == lkp
&&
321 td
->td_lockmgr_stack_id
[i
] > 0
323 td
->td_lockmgr_stack_id
[i
]--;
329 * NOTE! Must NULL-out lockholder before releasing LKC_EXCL.
331 otd
= lkp
->lk_lockholder
;
332 lkp
->lk_lockholder
= NULL
;
333 if (atomic_cmpset_int(&lkp
->lk_count
, count
,
334 count
& ~(LKC_EXCL
|LKC_SHREQ
))) {
335 if (count
& LKC_SHREQ
)
339 lkp
->lk_lockholder
= otd
;
344 * Upgrade from a single shared lock to an exclusive lock.
346 * If another process is ahead of us to get an upgrade,
347 * then we want to fail rather than have an intervening
348 * exclusive access. The shared lock is released on
351 if (count
& LKC_UPREQ
) {
356 /* fall through into normal upgrade */
360 * Upgrade a shared lock to an exclusive one. This can cause
361 * the lock to be temporarily released and stolen by other
362 * threads. LK_SLEEPFAIL or LK_NOWAIT may be used to detect
363 * this case, or use LK_EXCLUPGRADE.
365 * If the lock is already exclusively owned by us, this
366 * operation is a NOP.
368 * If we return an error (even NOWAIT), the current lock will
371 * Start with the critical path.
373 if ((count
& (LKC_UPREQ
|LKC_EXCL
|LKC_MASK
)) == 1) {
374 if (atomic_cmpset_int(&lkp
->lk_count
, count
,
376 lkp
->lk_lockholder
= td
;
383 * If we already hold the lock exclusively this operation
384 * succeeds and is a NOP.
386 if (count
& LKC_EXCL
) {
387 if (lkp
->lk_lockholder
== td
)
389 panic("lockmgr: upgrade unowned lock");
391 if ((count
& LKC_MASK
) == 0)
392 panic("lockmgr: upgrade unowned lock");
395 * We cannot upgrade without blocking at this point.
397 if (extflags
& LK_NOWAIT
) {
402 if (extflags
& LK_CANCELABLE
) {
403 if (count
& LKC_CANCEL
) {
410 * Release the shared lock and request the upgrade.
412 pflags
= (extflags
& LK_PCATCH
) ? PCATCH
: 0;
413 timo
= (extflags
& LK_TIMELOCK
) ? lkp
->lk_timo
: 0;
414 tsleep_interlock(lkp
, pflags
);
415 wflags
= (count
& LKC_UPREQ
) ? LKC_EXREQ
: LKC_UPREQ
;
418 * If someone else owns UPREQ and this transition would
419 * allow it to be granted, we have to grant it. Otherwise
420 * we release the shared lock.
422 if ((count
& (LKC_UPREQ
|LKC_MASK
)) == (LKC_UPREQ
| 1)) {
423 wflags
|= LKC_EXCL
| LKC_UPGRANT
;
425 wflags
&= ~LKC_UPREQ
;
427 wflags
|= (count
- 1);
430 if (atomic_cmpset_int(&lkp
->lk_count
, count
, wflags
)) {
434 * Must wakeup the thread granted the upgrade.
436 if ((count
& (LKC_UPREQ
|LKC_MASK
)) == (LKC_UPREQ
| 1))
439 mycpu
->gd_cnt
.v_lock_name
[0] = 'U';
440 strncpy(mycpu
->gd_cnt
.v_lock_name
+ 1,
442 sizeof(mycpu
->gd_cnt
.v_lock_name
) - 2);
443 ++mycpu
->gd_cnt
.v_lock_colls
;
445 error
= tsleep(lkp
, pflags
| PINTERLOCKED
,
446 lkp
->lk_wmesg
, timo
);
449 if (extflags
& LK_SLEEPFAIL
) {
455 * Refactor to either LK_EXCLUSIVE or LK_WAITUPGRADE,
456 * depending on whether we were able to acquire the
459 if (count
& LKC_UPREQ
)
460 flags
= LK_EXCLUSIVE
; /* someone else */
462 flags
= LK_WAITUPGRADE
; /* we own the bit */
468 * We own the LKC_UPREQ bit, wait until we are granted the
469 * exclusive lock (LKC_UPGRANT is set).
471 * IF THE OPERATION FAILS (tsleep error tsleep+LK_SLEEPFAIL),
472 * we have to undo the upgrade request and clean up any lock
473 * that might have been granted via a race.
475 if (count
& LKC_UPGRANT
) {
476 if (atomic_cmpset_int(&lkp
->lk_count
, count
,
477 count
& ~LKC_UPGRANT
)) {
478 lkp
->lk_lockholder
= td
;
479 KKASSERT(count
& LKC_EXCL
);
483 } else if ((count
& LKC_CANCEL
) && (extflags
& LK_CANCELABLE
)) {
488 pflags
= (extflags
& LK_PCATCH
) ? PCATCH
: 0;
489 timo
= (extflags
& LK_TIMELOCK
) ? lkp
->lk_timo
: 0;
490 tsleep_interlock(lkp
, pflags
);
491 if (atomic_cmpset_int(&lkp
->lk_count
, count
, count
)) {
493 mycpu
->gd_cnt
.v_lock_name
[0] = 'U';
494 strncpy(mycpu
->gd_cnt
.v_lock_name
+ 1,
496 sizeof(mycpu
->gd_cnt
.v_lock_name
) - 2);
497 ++mycpu
->gd_cnt
.v_lock_colls
;
499 error
= tsleep(lkp
, pflags
| PINTERLOCKED
,
500 lkp
->lk_wmesg
, timo
);
505 if (extflags
& LK_SLEEPFAIL
) {
517 * Release the currently held lock. If releasing the current
518 * lock as part of an error return, error will ALREADY be
521 * When releasing the last lock we automatically transition
522 * LKC_UPREQ to LKC_EXCL|1.
524 * WARNING! We cannot detect when there are multiple exclusive
525 * requests pending. We clear EXREQ unconditionally
526 * on the 1->0 transition so it is possible for
527 * shared requests to race the next exclusive
532 if ((count
& LKC_MASK
) == 0)
533 panic("lockmgr: LK_RELEASE: no lock held");
535 if (count
& LKC_EXCL
) {
536 if (lkp
->lk_lockholder
!= LK_KERNTHREAD
&&
537 lkp
->lk_lockholder
!= td
) {
538 panic("lockmgr: pid %d, not exlusive "
539 "lock holder thr %p/%p unlocking",
540 (td
->td_proc
? td
->td_proc
->p_pid
: -1),
541 td
, lkp
->lk_lockholder
);
543 if ((count
& (LKC_UPREQ
|LKC_MASK
)) == 1) {
545 * Last exclusive count is being released
547 otd
= lkp
->lk_lockholder
;
548 lkp
->lk_lockholder
= NULL
;
549 if (!atomic_cmpset_int(&lkp
->lk_count
, count
,
551 ~(LKC_EXCL
| LKC_EXREQ
|
552 LKC_SHREQ
| LKC_CANCEL
))) {
553 lkp
->lk_lockholder
= otd
;
556 if (count
& (LKC_EXREQ
|LKC_SHREQ
))
559 } else if ((count
& (LKC_UPREQ
|LKC_MASK
)) ==
562 * Last exclusive count is being released but
563 * an upgrade request is present, automatically
564 * grant an exclusive state to the owner of
565 * the upgrade request.
567 otd
= lkp
->lk_lockholder
;
568 lkp
->lk_lockholder
= NULL
;
569 if (!atomic_cmpset_int(&lkp
->lk_count
, count
,
570 (count
& ~LKC_UPREQ
) |
572 lkp
->lk_lockholder
= otd
;
577 otd
= lkp
->lk_lockholder
;
578 if (!atomic_cmpset_int(&lkp
->lk_count
, count
,
585 if (otd
!= LK_KERNTHREAD
)
588 if ((count
& (LKC_UPREQ
|LKC_MASK
)) == 1) {
590 * Last shared count is being released.
592 if (!atomic_cmpset_int(&lkp
->lk_count
, count
,
594 ~(LKC_EXREQ
| LKC_SHREQ
|
598 if (count
& (LKC_EXREQ
|LKC_SHREQ
))
601 } else if ((count
& (LKC_UPREQ
|LKC_MASK
)) ==
604 * Last shared count is being released but
605 * an upgrade request is present, automatically
606 * grant an exclusive state to the owner of
607 * the upgrade request. Masked count
610 if (!atomic_cmpset_int(&lkp
->lk_count
, count
,
611 (count
& ~(LKC_UPREQ
|
613 LKC_EXCL
| LKC_UPGRANT
)) {
618 if (!atomic_cmpset_int(&lkp
->lk_count
, count
,
630 * Start canceling blocked requestors or later requestors.
631 * requestors must use CANCELABLE. Don't waste time issuing
632 * a wakeup if nobody is pending.
634 KKASSERT((count
& LKC_CANCEL
) == 0); /* disallowed case */
635 KKASSERT((count
& LKC_MASK
) != 0); /* issue w/lock held */
636 if (!atomic_cmpset_int(&lkp
->lk_count
,
637 count
, count
| LKC_CANCEL
)) {
640 if (count
& (LKC_EXREQ
|LKC_SHREQ
|LKC_UPREQ
)) {
646 atomic_clear_int(&lkp
->lk_count
, LKC_CANCEL
);
650 panic("lockmgr: unknown locktype request %d",
651 flags
& LK_TYPE_MASK
);
658 * Undo an upgrade request
662 undo_upreq(struct lock
*lkp
)
667 count
= lkp
->lk_count
;
669 if (count
& LKC_UPGRANT
) {
671 * UPREQ was shifted to UPGRANT. We own UPGRANT now,
672 * another thread might own UPREQ. Clear UPGRANT
673 * and release the granted lock.
675 if (atomic_cmpset_int(&lkp
->lk_count
, count
,
676 count
& ~LKC_UPGRANT
)) {
677 lockmgr(lkp
, LK_RELEASE
);
680 } else if (count
& LKC_EXCL
) {
682 * Clear the UPREQ we still own. Nobody to wakeup
683 * here because there is an existing exclusive
686 KKASSERT(count
& LKC_UPREQ
);
687 KKASSERT((count
& LKC_MASK
) > 0);
688 if (atomic_cmpset_int(&lkp
->lk_count
, count
,
689 count
& ~LKC_UPREQ
)) {
693 } else if (count
& LKC_EXREQ
) {
695 * Clear the UPREQ we still own. We cannot wakeup any
696 * shared waiters because there is an exclusive
699 KKASSERT(count
& LKC_UPREQ
);
700 KKASSERT((count
& LKC_MASK
) > 0);
701 if (atomic_cmpset_int(&lkp
->lk_count
, count
,
702 count
& ~LKC_UPREQ
)) {
707 * Clear the UPREQ we still own. Wakeup any shared
710 KKASSERT(count
& LKC_UPREQ
);
711 KKASSERT((count
& LKC_MASK
) > 0);
712 if (atomic_cmpset_int(&lkp
->lk_count
, count
,
714 ~(LKC_UPREQ
| LKC_SHREQ
))) {
715 if (count
& LKC_SHREQ
)
725 lockmgr_kernproc(struct lock
*lp
)
727 struct thread
*td __debugvar
= curthread
;
729 if (lp
->lk_lockholder
!= LK_KERNTHREAD
) {
730 KASSERT(lp
->lk_lockholder
== td
,
731 ("lockmgr_kernproc: lock not owned by curthread %p", td
));
732 lp
->lk_lockholder
= LK_KERNTHREAD
;
738 * Initialize a lock; required before use.
741 lockinit(struct lock
*lkp
, const char *wmesg
, int timo
, int flags
)
743 lkp
->lk_flags
= (flags
& LK_EXTFLG_MASK
);
745 lkp
->lk_wmesg
= wmesg
;
747 lkp
->lk_lockholder
= LK_NOTHREAD
;
751 * Reinitialize a lock that is being reused for a different purpose, but
752 * which may have pending (blocked) threads sitting on it. The caller
753 * must already hold the interlock.
756 lockreinit(struct lock
*lkp
, const char *wmesg
, int timo
, int flags
)
758 lkp
->lk_wmesg
= wmesg
;
763 * De-initialize a lock. The structure must no longer be used by anyone.
766 lockuninit(struct lock
*lkp
)
768 KKASSERT((lkp
->lk_count
& (LKC_EXREQ
|LKC_SHREQ
|LKC_UPREQ
)) == 0);
772 * Determine the status of a lock.
775 lockstatus(struct lock
*lkp
, struct thread
*td
)
780 count
= lkp
->lk_count
;
783 if (count
& LKC_EXCL
) {
784 if (td
== NULL
|| lkp
->lk_lockholder
== td
)
785 lock_type
= LK_EXCLUSIVE
;
787 lock_type
= LK_EXCLOTHER
;
788 } else if (count
& LKC_MASK
) {
789 lock_type
= LK_SHARED
;
795 * Return non-zero if the caller owns the lock shared or exclusive.
796 * We can only guess re: shared locks.
799 lockowned(struct lock
*lkp
)
801 thread_t td
= curthread
;
804 count
= lkp
->lk_count
;
807 if (count
& LKC_EXCL
)
808 return(lkp
->lk_lockholder
== td
);
810 return((count
& LKC_MASK
) != 0);
814 * Determine the number of holders of a lock.
816 * The non-blocking version can usually be used for assertions.
819 lockcount(struct lock
*lkp
)
821 return(lkp
->lk_count
& LKC_MASK
);
825 lockcountnb(struct lock
*lkp
)
827 return(lkp
->lk_count
& LKC_MASK
);
831 * Print out information about state of a lock. Used by VOP_PRINT
832 * routines to display status about contained locks.
835 lockmgr_printinfo(struct lock
*lkp
)
837 struct thread
*td
= lkp
->lk_lockholder
;
841 count
= lkp
->lk_count
;
844 if (td
&& td
!= LK_KERNTHREAD
&& td
!= LK_NOTHREAD
)
849 if (count
& LKC_EXCL
) {
850 kprintf(" lock type %s: EXCLUS (count %08x) by td %p pid %d",
851 lkp
->lk_wmesg
, count
, td
,
853 } else if (count
& LKC_MASK
) {
854 kprintf(" lock type %s: SHARED (count %08x)",
855 lkp
->lk_wmesg
, count
);
857 kprintf(" lock type %s: NOTHELD", lkp
->lk_wmesg
);
859 if (count
& (LKC_EXREQ
|LKC_SHREQ
))
860 kprintf(" with waiters\n");
866 lock_sysinit(struct lock_args
*arg
)
868 lockinit(arg
->la_lock
, arg
->la_desc
, 0, arg
->la_flags
);
871 #ifdef DEBUG_CANCEL_LOCKS
875 sysctl_cancel_lock(SYSCTL_HANDLER_ARGS
)
881 lockmgr(&cancel_lk
, LK_EXCLUSIVE
);
883 error
= tsleep(&error
, PCATCH
, "canmas", hz
* 5);
884 lockmgr(&cancel_lk
, LK_CANCEL_BEG
);
886 error
= tsleep(&error
, PCATCH
, "canmas", hz
* 5);
888 lockmgr(&cancel_lk
, LK_RELEASE
);
890 SYSCTL_OUT(req
, &error
, sizeof(error
));
899 sysctl_cancel_test(SYSCTL_HANDLER_ARGS
)
904 error
= lockmgr(&cancel_lk
, LK_EXCLUSIVE
|LK_CANCELABLE
);
906 lockmgr(&cancel_lk
, LK_RELEASE
);
907 SYSCTL_OUT(req
, &error
, sizeof(error
));
908 kprintf("test %d\n", error
);