3 * The Regents of the University of California. All rights reserved.
5 * John S. Dyson. All rights reserved.
6 * Copyright (C) 2013-2014
7 * Matthew Dillon, All rights reserved.
9 * This code contains ideas from software contributed to Berkeley by
10 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
11 * System project at Carnegie-Mellon University.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
45 #include <sys/sysctl.h>
46 #include <sys/spinlock.h>
47 #include <sys/thread2.h>
48 #include <sys/spinlock2.h>
49 #include <sys/indefinite2.h>
51 static void undo_upreq(struct lock
*lkp
);
53 #ifdef DEBUG_CANCEL_LOCKS
55 static int sysctl_cancel_lock(SYSCTL_HANDLER_ARGS
);
56 static int sysctl_cancel_test(SYSCTL_HANDLER_ARGS
);
58 static struct lock cancel_lk
;
59 LOCK_SYSINIT(cancellk
, &cancel_lk
, "cancel", 0);
60 SYSCTL_PROC(_kern
, OID_AUTO
, cancel_lock
, CTLTYPE_INT
|CTLFLAG_RW
, 0, 0,
61 sysctl_cancel_lock
, "I", "test cancelable locks");
62 SYSCTL_PROC(_kern
, OID_AUTO
, cancel_test
, CTLTYPE_INT
|CTLFLAG_RW
, 0, 0,
63 sysctl_cancel_test
, "I", "test cancelable locks");
68 SYSCTL_INT(_debug
, OID_AUTO
, lock_test_mode
, CTLFLAG_RW
,
69 &lock_test_mode
, 0, "");
72 * Locking primitives implementation.
73 * Locks provide shared/exclusive sychronization.
77 #define COUNT(td, x) (td)->td_locks += (x)
83 * Set, change, or release a lock.
87 lockmgr(struct lock
*lkp
, u_int flags
)
89 debuglockmgr(struct lock
*lkp
, u_int flags
,
90 const char *name
, const char *file
, int line
)
109 if (mycpu
->gd_intr_nesting_level
&&
110 (flags
& LK_NOWAIT
) == 0 &&
111 (flags
& LK_TYPE_MASK
) != LK_RELEASE
&&
112 panic_cpu_gd
!= mycpu
116 panic("lockmgr %s from %p: called from interrupt, ipi, "
117 "or hard code section",
118 lkp
->lk_wmesg
, ((int **)&lkp
)[-1]);
120 panic("lockmgr %s from %s:%d: called from interrupt, ipi, "
121 "or hard code section",
122 lkp
->lk_wmesg
, file
, line
);
127 if (mycpu
->gd_spinlocks
&& ((flags
& LK_NOWAIT
) == 0)) {
128 panic("lockmgr %s from %s:%d: called with %d spinlocks held",
129 lkp
->lk_wmesg
, file
, line
, mycpu
->gd_spinlocks
);
133 extflags
= (flags
| lkp
->lk_flags
) & LK_EXTFLG_MASK
;
137 count
= lkp
->lk_count
;
140 switch (flags
& LK_TYPE_MASK
) {
143 * Shared lock critical path case
145 if ((count
& (LKC_EXREQ
|LKC_UPREQ
|LKC_EXCL
)) == 0) {
146 if (atomic_cmpset_int(&lkp
->lk_count
,
155 * If the caller already holds the lock exclusively then
156 * we silently obtain another count on the exclusive lock.
158 * WARNING! The old FreeBSD behavior was to downgrade,
159 * but this creates a problem when recursions
160 * return to the caller and the caller expects
161 * its original exclusive lock to remain exclusively
164 if (lkp
->lk_lockholder
== td
) {
165 KKASSERT(count
& LKC_EXCL
);
166 if ((extflags
& LK_CANRECURSE
) == 0) {
167 if (extflags
& LK_NOWAIT
) {
171 panic("lockmgr: locking against myself");
173 atomic_add_int(&lkp
->lk_count
, 1);
181 pflags
= (extflags
& LK_PCATCH
) ? PCATCH
: 0;
182 timo
= (extflags
& LK_TIMELOCK
) ? lkp
->lk_timo
: 0;
183 wflags
= (td
->td_flags
& TDF_DEADLKTREAT
) ?
184 LKC_EXCL
: (LKC_EXCL
|LKC_EXREQ
|LKC_UPREQ
);
187 * Block while the lock is held exclusively or, conditionally,
188 * if other threads are trying to obtain an exclusive lock or
191 if (count
& wflags
) {
192 if (extflags
& LK_CANCELABLE
) {
193 if (count
& LKC_CANCEL
) {
198 if (extflags
& LK_NOWAIT
) {
202 tsleep_interlock(lkp
, pflags
);
203 if (!atomic_cmpset_int(&lkp
->lk_count
, count
,
204 count
| LKC_SHREQ
)) {
208 if (info_init
== 0 &&
209 (lkp
->lk_flags
& LK_NOCOLLSTATS
) == 0) {
210 indefinite_init(&td
->td_indefinite
,
211 lkp
->lk_wmesg
, 1, 'l');
215 error
= tsleep(lkp
, pflags
| PINTERLOCKED
,
216 lkp
->lk_wmesg
, timo
);
219 if (extflags
& LK_SLEEPFAIL
) {
227 * Otherwise we can bump the count
229 if (atomic_cmpset_int(&lkp
->lk_count
, count
, count
+ 1)) {
237 * Exclusive lock critical path.
240 if (atomic_cmpset_int(&lkp
->lk_count
, count
,
241 LKC_EXCL
| (count
+ 1))) {
242 lkp
->lk_lockholder
= td
;
250 * Recursive lock if we already hold it exclusively.
252 if (lkp
->lk_lockholder
== td
) {
253 KKASSERT(count
& LKC_EXCL
);
254 if ((extflags
& LK_CANRECURSE
) == 0) {
255 if (extflags
& LK_NOWAIT
) {
259 panic("lockmgr: locking against myself");
261 atomic_add_int(&lkp
->lk_count
, 1);
267 * We will block, handle LK_NOWAIT
269 if (extflags
& LK_NOWAIT
) {
273 if (extflags
& LK_CANCELABLE
) {
274 if (count
& LKC_CANCEL
) {
281 * Wait until we can obtain the exclusive lock. EXREQ is
282 * automatically cleared when all current holders release
283 * so if we abort the operation we can safely leave it set.
284 * There might be other exclusive requesters.
286 pflags
= (extflags
& LK_PCATCH
) ? PCATCH
: 0;
287 timo
= (extflags
& LK_TIMELOCK
) ? lkp
->lk_timo
: 0;
289 tsleep_interlock(lkp
, pflags
);
290 if (!atomic_cmpset_int(&lkp
->lk_count
, count
,
291 count
| LKC_EXREQ
)) {
295 if (info_init
== 0 &&
296 (lkp
->lk_flags
& LK_NOCOLLSTATS
) == 0) {
297 indefinite_init(&td
->td_indefinite
, lkp
->lk_wmesg
,
302 error
= tsleep(lkp
, pflags
| PINTERLOCKED
,
303 lkp
->lk_wmesg
, timo
);
306 if (extflags
& LK_SLEEPFAIL
) {
310 indefinite_check(&td
->td_indefinite
);
315 * Downgrade an exclusive lock into a shared lock. All
316 * counts on a recursive exclusive lock become shared.
318 * This function always succeeds.
320 if (lkp
->lk_lockholder
!= td
||
321 (count
& (LKC_EXCL
|LKC_MASK
)) != (LKC_EXCL
|1)) {
322 panic("lockmgr: not holding exclusive lock");
326 for (i
= 0; i
< LOCKMGR_DEBUG_ARRAY_SIZE
; i
++) {
327 if (td
->td_lockmgr_stack
[i
] == lkp
&&
328 td
->td_lockmgr_stack_id
[i
] > 0
330 td
->td_lockmgr_stack_id
[i
]--;
336 * NOTE! Must NULL-out lockholder before releasing LKC_EXCL.
338 otd
= lkp
->lk_lockholder
;
339 lkp
->lk_lockholder
= NULL
;
340 if (atomic_cmpset_int(&lkp
->lk_count
, count
,
341 count
& ~(LKC_EXCL
|LKC_SHREQ
))) {
342 if (count
& LKC_SHREQ
)
346 lkp
->lk_lockholder
= otd
;
351 * Upgrade from a single shared lock to an exclusive lock.
353 * If another process is ahead of us to get an upgrade,
354 * then we want to fail rather than have an intervening
355 * exclusive access. The shared lock is released on
358 if (count
& LKC_UPREQ
) {
363 /* fall through into normal upgrade */
367 * Upgrade a shared lock to an exclusive one. This can cause
368 * the lock to be temporarily released and stolen by other
369 * threads. LK_SLEEPFAIL or LK_NOWAIT may be used to detect
370 * this case, or use LK_EXCLUPGRADE.
372 * If the lock is already exclusively owned by us, this
373 * operation is a NOP.
375 * If we return an error (even NOWAIT), the current lock will
378 * Start with the critical path.
380 if ((count
& (LKC_UPREQ
|LKC_EXCL
|LKC_MASK
)) == 1) {
381 if (atomic_cmpset_int(&lkp
->lk_count
, count
,
383 lkp
->lk_lockholder
= td
;
390 * We own a lock coming into this, so there cannot be an
391 * UPGRANT already flagged.
393 KKASSERT((count
& LKC_UPGRANT
) == 0);
396 * If we already hold the lock exclusively this operation
397 * succeeds and is a NOP.
399 if (count
& LKC_EXCL
) {
400 if (lkp
->lk_lockholder
== td
)
402 panic("lockmgr: upgrade unowned lock");
404 if ((count
& LKC_MASK
) == 0)
405 panic("lockmgr: upgrade unowned lock");
408 * We cannot upgrade without blocking at this point.
410 if (extflags
& LK_NOWAIT
) {
415 if (extflags
& LK_CANCELABLE
) {
416 if (count
& LKC_CANCEL
) {
423 * Release the shared lock and request the upgrade.
425 pflags
= (extflags
& LK_PCATCH
) ? PCATCH
: 0;
426 timo
= (extflags
& LK_TIMELOCK
) ? lkp
->lk_timo
: 0;
427 tsleep_interlock(lkp
, pflags
);
428 wflags
= (count
& LKC_UPREQ
) ? LKC_EXREQ
: LKC_UPREQ
;
431 * If someone else owns UPREQ and this transition would
432 * allow it to be granted, we have to grant it. Our
433 * lock count is transfered (we effectively release).
434 * We will then request a normal exclusive lock.
436 * Otherwise we release the shared lock and either do
437 * an UPREQ or an EXREQ. The count is always > 1 in
438 * this case since we handle all other count == 1
439 * situations here and above.
441 if ((count
& (LKC_UPREQ
|LKC_MASK
)) == (LKC_UPREQ
| 1)) {
442 wflags
|= LKC_EXCL
| LKC_UPGRANT
;
444 wflags
&= ~LKC_UPREQ
; /* was set from count */
446 wflags
|= (count
- 1);
449 if (info_init
== 0 &&
450 (lkp
->lk_flags
& LK_NOCOLLSTATS
) == 0) {
451 indefinite_init(&td
->td_indefinite
, lkp
->lk_wmesg
,
456 if (atomic_cmpset_int(&lkp
->lk_count
, count
, wflags
)) {
460 * Must wakeup the thread granted the upgrade.
462 if ((count
& (LKC_UPREQ
|LKC_MASK
)) == (LKC_UPREQ
| 1))
465 error
= tsleep(lkp
, pflags
| PINTERLOCKED
,
466 lkp
->lk_wmesg
, timo
);
468 if ((count
& LKC_UPREQ
) == 0)
472 if (extflags
& LK_SLEEPFAIL
) {
473 if ((count
& LKC_UPREQ
) == 0)
480 * Refactor to either LK_EXCLUSIVE or LK_WAITUPGRADE,
481 * depending on whether we were able to acquire the
484 if (count
& LKC_UPREQ
)
485 flags
= LK_EXCLUSIVE
; /* someone else */
487 flags
= LK_WAITUPGRADE
; /* we own the bit */
489 indefinite_check(&td
->td_indefinite
);
494 * We own the LKC_UPREQ bit, wait until we are granted the
495 * exclusive lock (LKC_UPGRANT is set).
497 * IF THE OPERATION FAILS (tsleep error tsleep+LK_SLEEPFAIL),
498 * we have to undo the upgrade request and clean up any lock
499 * that might have been granted via a race.
501 if (count
& LKC_UPGRANT
) {
502 if (atomic_cmpset_int(&lkp
->lk_count
, count
,
503 count
& ~LKC_UPGRANT
)) {
504 lkp
->lk_lockholder
= td
;
505 KKASSERT(count
& LKC_EXCL
);
509 } else if ((count
& LKC_CANCEL
) && (extflags
& LK_CANCELABLE
)) {
514 pflags
= (extflags
& LK_PCATCH
) ? PCATCH
: 0;
515 timo
= (extflags
& LK_TIMELOCK
) ? lkp
->lk_timo
: 0;
516 tsleep_interlock(lkp
, pflags
);
517 if (atomic_fetchadd_int(&lkp
->lk_count
, 0) == count
) {
518 error
= tsleep(lkp
, pflags
| PINTERLOCKED
,
519 lkp
->lk_wmesg
, timo
);
524 if (extflags
& LK_SLEEPFAIL
) {
532 indefinite_check(&td
->td_indefinite
);
537 * Release the currently held lock. If releasing the current
538 * lock as part of an error return, error will ALREADY be
541 * When releasing the last lock we automatically transition
542 * LKC_UPREQ to LKC_EXCL|1.
544 * WARNING! We cannot detect when there are multiple exclusive
545 * requests pending. We clear EXREQ unconditionally
546 * on the 1->0 transition so it is possible for
547 * shared requests to race the next exclusive
550 * WAERNING! lksleep() assumes that LK_RELEASE does not
555 if ((count
& LKC_MASK
) == 0)
556 panic("lockmgr: LK_RELEASE: no lock held");
558 if (count
& LKC_EXCL
) {
559 if (lkp
->lk_lockholder
!= LK_KERNTHREAD
&&
560 lkp
->lk_lockholder
!= td
) {
561 panic("lockmgr: pid %d, not exlusive "
562 "lock holder thr %p/%p unlocking",
563 (td
->td_proc
? td
->td_proc
->p_pid
: -1),
564 td
, lkp
->lk_lockholder
);
566 if ((count
& (LKC_UPREQ
|LKC_MASK
)) == 1) {
568 * Last exclusive count is being released
570 otd
= lkp
->lk_lockholder
;
571 lkp
->lk_lockholder
= NULL
;
572 if (!atomic_cmpset_int(&lkp
->lk_count
, count
,
574 ~(LKC_EXCL
| LKC_EXREQ
|
575 LKC_SHREQ
| LKC_CANCEL
))) {
576 lkp
->lk_lockholder
= otd
;
579 if (count
& (LKC_EXREQ
|LKC_SHREQ
))
582 } else if ((count
& (LKC_UPREQ
|LKC_MASK
)) ==
585 * Last exclusive count is being released but
586 * an upgrade request is present, automatically
587 * grant an exclusive state to the owner of
588 * the upgrade request.
590 otd
= lkp
->lk_lockholder
;
591 lkp
->lk_lockholder
= NULL
;
592 if (!atomic_cmpset_int(&lkp
->lk_count
, count
,
593 (count
& ~LKC_UPREQ
) |
595 lkp
->lk_lockholder
= otd
;
601 otd
= lkp
->lk_lockholder
;
602 if (!atomic_cmpset_int(&lkp
->lk_count
, count
,
609 if (otd
!= LK_KERNTHREAD
)
612 if ((count
& (LKC_UPREQ
|LKC_MASK
)) == 1) {
614 * Last shared count is being released,
615 * no upgrade request present.
617 if (!atomic_cmpset_int(&lkp
->lk_count
, count
,
619 ~(LKC_EXREQ
| LKC_SHREQ
|
623 if (count
& (LKC_EXREQ
|LKC_SHREQ
))
626 } else if ((count
& (LKC_UPREQ
|LKC_MASK
)) ==
629 * Last shared count is being released but
630 * an upgrade request is present, automatically
631 * grant an exclusive state to the owner of
632 * the upgrade request. Masked count
635 if (!atomic_cmpset_int(&lkp
->lk_count
, count
,
636 (count
& ~(LKC_UPREQ
|
638 LKC_EXCL
| LKC_UPGRANT
)) {
644 * Shared count is greater than 1, just
645 * decrement it by one.
647 if (!atomic_cmpset_int(&lkp
->lk_count
, count
,
659 * Start canceling blocked requestors or later requestors.
660 * requestors must use CANCELABLE. Don't waste time issuing
661 * a wakeup if nobody is pending.
663 KKASSERT((count
& LKC_CANCEL
) == 0); /* disallowed case */
664 KKASSERT((count
& LKC_MASK
) != 0); /* issue w/lock held */
665 if (!atomic_cmpset_int(&lkp
->lk_count
,
666 count
, count
| LKC_CANCEL
)) {
669 if (count
& (LKC_EXREQ
|LKC_SHREQ
|LKC_UPREQ
)) {
675 atomic_clear_int(&lkp
->lk_count
, LKC_CANCEL
);
679 panic("lockmgr: unknown locktype request %d",
680 flags
& LK_TYPE_MASK
);
685 indefinite_done(&td
->td_indefinite
);
691 * Undo an upgrade request
695 undo_upreq(struct lock
*lkp
)
700 count
= lkp
->lk_count
;
702 if (count
& LKC_UPGRANT
) {
704 * UPREQ was shifted to UPGRANT. We own UPGRANT now,
705 * another thread might own UPREQ. Clear UPGRANT
706 * and release the granted lock.
708 if (atomic_cmpset_int(&lkp
->lk_count
, count
,
709 count
& ~LKC_UPGRANT
)) {
710 lkp
->lk_lockholder
= curthread
;
711 lockmgr(lkp
, LK_RELEASE
);
714 } else if (count
& LKC_EXCL
) {
716 * Clear the UPREQ we still own. Nobody to wakeup
717 * here because there is an existing exclusive
720 KKASSERT(count
& LKC_UPREQ
);
721 KKASSERT((count
& LKC_MASK
) > 0);
722 if (atomic_cmpset_int(&lkp
->lk_count
, count
,
723 count
& ~LKC_UPREQ
)) {
727 } else if (count
& LKC_EXREQ
) {
729 * Clear the UPREQ we still own. We cannot wakeup any
730 * shared waiters because there is an exclusive
733 KKASSERT(count
& LKC_UPREQ
);
734 KKASSERT((count
& LKC_MASK
) > 0);
735 if (atomic_cmpset_int(&lkp
->lk_count
, count
,
736 count
& ~LKC_UPREQ
)) {
741 * Clear the UPREQ we still own. Wakeup any shared
744 KKASSERT(count
& LKC_UPREQ
);
745 KKASSERT((count
& LKC_MASK
) > 0);
746 if (atomic_cmpset_int(&lkp
->lk_count
, count
,
748 ~(LKC_UPREQ
| LKC_SHREQ
))) {
749 if (count
& LKC_SHREQ
)
759 lockmgr_kernproc(struct lock
*lp
)
761 struct thread
*td __debugvar
= curthread
;
763 if (lp
->lk_lockholder
!= LK_KERNTHREAD
) {
764 KASSERT(lp
->lk_lockholder
== td
,
765 ("lockmgr_kernproc: lock not owned by curthread %p: %p",
766 td
, lp
->lk_lockholder
));
767 lp
->lk_lockholder
= LK_KERNTHREAD
;
773 * Initialize a lock; required before use.
776 lockinit(struct lock
*lkp
, const char *wmesg
, int timo
, int flags
)
778 lkp
->lk_flags
= (flags
& LK_EXTFLG_MASK
);
780 lkp
->lk_wmesg
= wmesg
;
782 lkp
->lk_lockholder
= LK_NOTHREAD
;
786 * Reinitialize a lock that is being reused for a different purpose, but
787 * which may have pending (blocked) threads sitting on it. The caller
788 * must already hold the interlock.
791 lockreinit(struct lock
*lkp
, const char *wmesg
, int timo
, int flags
)
793 lkp
->lk_wmesg
= wmesg
;
798 * De-initialize a lock. The structure must no longer be used by anyone.
801 lockuninit(struct lock
*lkp
)
803 KKASSERT((lkp
->lk_count
& (LKC_EXREQ
|LKC_SHREQ
|LKC_UPREQ
)) == 0);
807 * Determine the status of a lock.
810 lockstatus(struct lock
*lkp
, struct thread
*td
)
815 count
= lkp
->lk_count
;
818 if (count
& LKC_EXCL
) {
819 if (td
== NULL
|| lkp
->lk_lockholder
== td
)
820 lock_type
= LK_EXCLUSIVE
;
822 lock_type
= LK_EXCLOTHER
;
823 } else if (count
& LKC_MASK
) {
824 lock_type
= LK_SHARED
;
830 * Return non-zero if the caller owns the lock shared or exclusive.
831 * We can only guess re: shared locks.
834 lockowned(struct lock
*lkp
)
836 thread_t td
= curthread
;
839 count
= lkp
->lk_count
;
842 if (count
& LKC_EXCL
)
843 return(lkp
->lk_lockholder
== td
);
845 return((count
& LKC_MASK
) != 0);
849 * Determine the number of holders of a lock.
851 * The non-blocking version can usually be used for assertions.
854 lockcount(struct lock
*lkp
)
856 return(lkp
->lk_count
& LKC_MASK
);
860 lockcountnb(struct lock
*lkp
)
862 return(lkp
->lk_count
& LKC_MASK
);
866 * Print out information about state of a lock. Used by VOP_PRINT
867 * routines to display status about contained locks.
870 lockmgr_printinfo(struct lock
*lkp
)
872 struct thread
*td
= lkp
->lk_lockholder
;
876 count
= lkp
->lk_count
;
879 if (td
&& td
!= LK_KERNTHREAD
&& td
!= LK_NOTHREAD
)
884 if (count
& LKC_EXCL
) {
885 kprintf(" lock type %s: EXCLUS (count %08x) by td %p pid %d",
886 lkp
->lk_wmesg
, count
, td
,
888 } else if (count
& LKC_MASK
) {
889 kprintf(" lock type %s: SHARED (count %08x)",
890 lkp
->lk_wmesg
, count
);
892 kprintf(" lock type %s: NOTHELD", lkp
->lk_wmesg
);
894 if (count
& (LKC_EXREQ
|LKC_SHREQ
))
895 kprintf(" with waiters\n");
901 lock_sysinit(struct lock_args
*arg
)
903 lockinit(arg
->la_lock
, arg
->la_desc
, 0, arg
->la_flags
);
906 #ifdef DEBUG_CANCEL_LOCKS
910 sysctl_cancel_lock(SYSCTL_HANDLER_ARGS
)
916 lockmgr(&cancel_lk
, LK_EXCLUSIVE
);
917 error
= tsleep(&error
, PCATCH
, "canmas", hz
* 5);
918 lockmgr(&cancel_lk
, LK_CANCEL_BEG
);
919 error
= tsleep(&error
, PCATCH
, "canmas", hz
* 5);
920 lockmgr(&cancel_lk
, LK_RELEASE
);
922 SYSCTL_OUT(req
, &error
, sizeof(error
));
931 sysctl_cancel_test(SYSCTL_HANDLER_ARGS
)
936 error
= lockmgr(&cancel_lk
, LK_EXCLUSIVE
|LK_CANCELABLE
);
938 lockmgr(&cancel_lk
, LK_RELEASE
);
939 SYSCTL_OUT(req
, &error
, sizeof(error
));
940 kprintf("test %d\n", error
);