3 * The Regents of the University of California. All rights reserved.
5 * John S. Dyson. All rights reserved.
6 * Copyright (C) 2013-2017
7 * Matthew Dillon, All rights reserved.
9 * This code contains ideas from software contributed to Berkeley by
10 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
11 * System project at Carnegie-Mellon University.
13 * This code is derived from software contributed to The DragonFly Project
14 * by Matthew Dillon <dillon@backplane.com>. Extensively rewritten.
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
48 #include <sys/sysctl.h>
49 #include <sys/spinlock.h>
50 #include <sys/thread2.h>
51 #include <sys/spinlock2.h>
52 #include <sys/indefinite2.h>
54 static void undo_shreq(struct lock
*lkp
);
55 static int undo_upreq(struct lock
*lkp
);
56 static int undo_exreq(struct lock
*lkp
);
58 #ifdef DEBUG_CANCEL_LOCKS
60 static int sysctl_cancel_lock(SYSCTL_HANDLER_ARGS
);
61 static int sysctl_cancel_test(SYSCTL_HANDLER_ARGS
);
63 static struct lock cancel_lk
;
64 LOCK_SYSINIT(cancellk
, &cancel_lk
, "cancel", 0);
65 SYSCTL_PROC(_kern
, OID_AUTO
, cancel_lock
, CTLTYPE_INT
|CTLFLAG_RW
, 0, 0,
66 sysctl_cancel_lock
, "I", "test cancelable locks");
67 SYSCTL_PROC(_kern
, OID_AUTO
, cancel_test
, CTLTYPE_INT
|CTLFLAG_RW
, 0, 0,
68 sysctl_cancel_test
, "I", "test cancelable locks");
73 SYSCTL_INT(_debug
, OID_AUTO
, lock_test_mode
, CTLFLAG_RW
,
74 &lock_test_mode
, 0, "");
77 * Locking primitives implementation.
78 * Locks provide shared/exclusive sychronization.
82 #define COUNT(td, x) (td)->td_locks += (x)
84 #define COUNT(td, x) do { } while (0)
88 * Helper, assert basic conditions
91 _lockmgr_assert(struct lock
*lkp
, u_int flags
)
93 if (mycpu
->gd_intr_nesting_level
&&
94 (flags
& LK_NOWAIT
) == 0 &&
95 (flags
& LK_TYPE_MASK
) != LK_RELEASE
&&
98 panic("lockmgr %s from %p: called from interrupt, ipi, "
99 "or hard code section",
100 lkp
->lk_wmesg
, ((int **)&lkp
)[-1]);
105 * Acquire a shared lock
108 lockmgr_shared(struct lock
*lkp
, u_int flags
)
118 _lockmgr_assert(lkp
, flags
);
119 extflags
= (flags
| lkp
->lk_flags
) & LK_EXTFLG_MASK
;
122 count
= lkp
->lk_count
;
126 * If the caller already holds the lock exclusively then
127 * we silently obtain another count on the exclusive lock.
128 * Avoid accessing lk_lockholder until testing exclusivity.
130 * WARNING! The old FreeBSD behavior was to downgrade,
131 * but this creates a problem when recursions
132 * return to the caller and the caller expects
133 * its original exclusive lock to remain exclusively
136 if ((count
& LKC_XMASK
) && lkp
->lk_lockholder
== td
) {
137 KKASSERT(lkp
->lk_count
& LKC_XMASK
);
138 if ((extflags
& LK_CANRECURSE
) == 0) {
139 if (extflags
& LK_NOWAIT
)
141 panic("lockmgr: locking against myself");
143 atomic_add_64(&lkp
->lk_count
, 1);
149 * Unless TDF_DEADLKTREAT is set, we cannot add LKC_SCOUNT while
150 * SHARED is set and either EXREQ or UPREQ are set.
152 * NOTE: In the race-to-0 case (see undo_shreq()), we could
153 * theoretically work the SMASK == 0 case here.
155 if ((td
->td_flags
& TDF_DEADLKTREAT
) == 0) {
156 while ((count
& LKC_SHARED
) &&
157 (count
& (LKC_EXREQ
| LKC_UPREQ
))) {
159 * Immediate failure conditions
161 if (extflags
& LK_CANCELABLE
) {
162 if (count
& LKC_CANCEL
)
165 if (extflags
& LK_NOWAIT
)
171 pflags
= (extflags
& LK_PCATCH
) ? PCATCH
: 0;
172 timo
= (extflags
& LK_TIMELOCK
) ? lkp
->lk_timo
: 0;
174 tsleep_interlock(lkp
, pflags
);
175 count
= atomic_fetchadd_long(&lkp
->lk_count
, 0);
177 if ((count
& LKC_SHARED
) &&
178 (count
& (LKC_EXREQ
| LKC_UPREQ
))) {
179 error
= tsleep(lkp
, pflags
| PINTERLOCKED
,
180 lkp
->lk_wmesg
, timo
);
183 count
= lkp
->lk_count
;
192 * Bump the SCOUNT field. The shared lock is granted only once
193 * the SHARED flag gets set. If it is already set, we are done.
195 * (Racing an EXREQ or UPREQ operation is ok here, we already did
198 count
= atomic_fetchadd_64(&lkp
->lk_count
, LKC_SCOUNT
) + LKC_SCOUNT
;
204 * We may be able to grant ourselves the bit trivially.
205 * We're done once the SHARED bit is granted.
207 if ((count
& (LKC_XMASK
| LKC_EXREQ
|
208 LKC_UPREQ
| LKC_SHARED
)) == 0) {
209 if (atomic_fcmpset_64(&lkp
->lk_count
,
210 &count
, count
| LKC_SHARED
)) {
211 /* count |= LKC_SHARED; NOT USED */
216 if ((td
->td_flags
& TDF_DEADLKTREAT
) &&
217 (count
& (LKC_XMASK
| LKC_SHARED
)) == 0) {
218 if (atomic_fcmpset_64(&lkp
->lk_count
,
219 &count
, count
| LKC_SHARED
)) {
220 /* count |= LKC_SHARED; NOT USED */
225 if (count
& LKC_SHARED
)
231 pflags
= (extflags
& LK_PCATCH
) ? PCATCH
: 0;
232 timo
= (extflags
& LK_TIMELOCK
) ? lkp
->lk_timo
: 0;
234 if (extflags
& LK_CANCELABLE
) {
235 if (count
& LKC_CANCEL
) {
241 if (extflags
& LK_NOWAIT
) {
248 * Interlocked after the first loop.
251 error
= tsleep(lkp
, pflags
| PINTERLOCKED
,
252 lkp
->lk_wmesg
, timo
);
253 if (extflags
& LK_SLEEPFAIL
) {
266 * Reload, shortcut grant case, then loop interlock
269 count
= lkp
->lk_count
;
270 if (count
& LKC_SHARED
)
272 tsleep_interlock(lkp
, pflags
);
273 count
= atomic_fetchadd_64(&lkp
->lk_count
, 0);
282 * Acquire an exclusive lock
285 lockmgr_exclusive(struct lock
*lkp
, u_int flags
)
295 _lockmgr_assert(lkp
, flags
);
296 extflags
= (flags
| lkp
->lk_flags
) & LK_EXTFLG_MASK
;
300 count
= lkp
->lk_count
;
304 * Recursive lock if we already hold it exclusively. Avoid testing
305 * lk_lockholder until after testing lk_count.
307 if ((count
& LKC_XMASK
) && lkp
->lk_lockholder
== td
) {
308 if ((extflags
& LK_CANRECURSE
) == 0) {
309 if (extflags
& LK_NOWAIT
)
311 panic("lockmgr: locking against myself");
313 count
= atomic_fetchadd_64(&lkp
->lk_count
, 1) + 1;
314 KKASSERT((count
& LKC_XMASK
) > 1);
320 * Trivially acquire the lock, or block until we can set EXREQ.
321 * Set EXREQ2 if EXREQ is already set or the lock is already
322 * held exclusively. EXREQ2 is an aggregation bit to request
325 * WARNING! We cannot set EXREQ if the lock is already held
326 * exclusively because it may race another EXREQ
327 * being cleared and granted. We use the exclusivity
328 * to prevent both EXREQ and UPREQ from being set.
330 * This means that both shared and exclusive requests
331 * have equal priority against a current exclusive holder's
332 * release. Exclusive requests still have priority over
333 * new shared requests when the lock is already held shared.
337 * Normal trivial case
339 if ((count
& (LKC_UPREQ
| LKC_EXREQ
|
341 ((count
& LKC_SHARED
) == 0 ||
342 (count
& LKC_SMASK
) == 0)) {
343 ncount
= (count
+ 1) & ~LKC_SHARED
;
344 if (atomic_fcmpset_64(&lkp
->lk_count
,
346 lkp
->lk_lockholder
= td
;
353 if (extflags
& LK_CANCELABLE
) {
354 if (count
& LKC_CANCEL
)
357 if (extflags
& LK_NOWAIT
)
361 * Interlock to set EXREQ or EXREQ2
363 pflags
= (extflags
& LK_PCATCH
) ? PCATCH
: 0;
364 timo
= (extflags
& LK_TIMELOCK
) ? lkp
->lk_timo
: 0;
366 if (count
& (LKC_EXREQ
| LKC_XMASK
))
367 ncount
= count
| LKC_EXREQ2
;
369 ncount
= count
| LKC_EXREQ
;
370 tsleep_interlock(lkp
, pflags
);
371 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
, ncount
)) {
373 * If we successfully transitioned to EXREQ we
374 * can break out, otherwise we had set EXREQ2 and
377 if ((count
& (LKC_EXREQ
| LKC_XMASK
)) == 0) {
382 error
= tsleep(lkp
, pflags
| PINTERLOCKED
,
383 lkp
->lk_wmesg
, timo
);
384 count
= lkp
->lk_count
; /* relod */
388 if (lock_test_mode
> 0) {
395 if (extflags
& LK_SLEEPFAIL
)
400 * Once EXREQ has been set, wait for it to be granted
401 * We enter the loop with tsleep_interlock() already called.
405 * Waiting for EXREQ to be granted to us.
407 * NOTE! If we try to trivially get the exclusive lock
408 * (basically by racing undo_shreq()) and succeed,
409 * we must still wakeup(lkp) for another exclusive
410 * lock trying to acquire EXREQ. Easier to simply
411 * wait for our own wakeup.
413 if ((count
& LKC_EXREQ
) == 0) {
414 KKASSERT(count
& LKC_XMASK
);
415 lkp
->lk_lockholder
= td
;
421 * Block waiting for our exreq to be granted.
422 * Check cancelation. NOWAIT was already dealt with.
424 if (extflags
& LK_CANCELABLE
) {
425 if (count
& LKC_CANCEL
) {
426 if (undo_exreq(lkp
) == 0) {
427 lkp
->lk_lockholder
= LK_KERNTHREAD
;
428 lockmgr_release(lkp
, 0);
435 pflags
= (extflags
& LK_PCATCH
) ? PCATCH
: 0;
436 timo
= (extflags
& LK_TIMELOCK
) ? lkp
->lk_timo
: 0;
438 error
= tsleep(lkp
, pflags
| PINTERLOCKED
, lkp
->lk_wmesg
, timo
);
440 if (lock_test_mode
> 0) {
446 * A tsleep error is uncommon. If it occurs we have to
447 * undo our EXREQ. If we are granted the exclusive lock
448 * as we try to undo we have to deal with it.
450 if (extflags
& LK_SLEEPFAIL
) {
451 if (undo_exreq(lkp
) == 0) {
452 lkp
->lk_lockholder
= LK_KERNTHREAD
;
453 lockmgr_release(lkp
, 0);
462 lkp
->lk_lockholder
= td
;
469 * Reload after sleep, shortcut grant case.
470 * Then set the interlock and loop.
472 count
= lkp
->lk_count
;
474 if ((count
& LKC_EXREQ
) == 0) {
475 KKASSERT(count
& LKC_XMASK
);
476 lkp
->lk_lockholder
= td
;
480 tsleep_interlock(lkp
, pflags
);
481 count
= atomic_fetchadd_64(&lkp
->lk_count
, 0);
487 * Downgrade an exclusive lock to shared.
489 * This function always succeeds as long as the caller owns a legal
490 * exclusive lock with one reference. UPREQ and EXREQ is ignored.
493 lockmgr_downgrade(struct lock
*lkp
, u_int flags
)
501 extflags
= (flags
| lkp
->lk_flags
) & LK_EXTFLG_MASK
;
503 count
= lkp
->lk_count
;
509 * Downgrade an exclusive lock into a shared lock. All
510 * counts on a recursive exclusive lock become shared.
512 * NOTE: Currently to reduce confusion we only allow
513 * there to be one exclusive lock count, and panic
516 if (lkp
->lk_lockholder
!= td
|| (count
& LKC_XMASK
) != 1) {
517 panic("lockmgr: not holding exclusive lock: "
518 "%p/%p %016jx", lkp
->lk_lockholder
, td
, count
);
522 * NOTE! Must NULL-out lockholder before releasing the
525 * NOTE! There might be pending shared requests, check
528 otd
= lkp
->lk_lockholder
;
529 lkp
->lk_lockholder
= NULL
;
530 ncount
= (count
& ~(LKC_XMASK
| LKC_EXREQ2
)) +
531 ((count
& LKC_XMASK
) << LKC_SSHIFT
);
532 ncount
|= LKC_SHARED
;
534 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
, ncount
)) {
536 * Wakeup any shared waiters (prior SMASK), or
537 * any exclusive requests that couldn't set EXREQ
538 * because the lock had been held exclusively.
540 if (count
& (LKC_SMASK
| LKC_EXREQ2
))
542 /* count = ncount; NOT USED */
545 lkp
->lk_lockholder
= otd
;
552 * Upgrade a shared lock to exclusive. If LK_EXCLUPGRADE then guarantee
553 * that no other exclusive requester can get in front of us and fail
554 * immediately if another upgrade is pending. If we fail, the shared
557 * If LK_EXCLUPGRADE is not set and we cannot upgrade because someone
558 * else is in front of us, we release the shared lock and acquire the
559 * exclusive lock normally. If a failure occurs, the shared lock is
563 lockmgr_upgrade(struct lock
*lkp
, u_int flags
)
573 _lockmgr_assert(lkp
, flags
);
574 extflags
= (flags
| lkp
->lk_flags
) & LK_EXTFLG_MASK
;
577 count
= lkp
->lk_count
;
581 * If we already hold the lock exclusively this operation
582 * succeeds and is a NOP.
584 if (count
& LKC_XMASK
) {
585 if (lkp
->lk_lockholder
== td
)
587 panic("lockmgr: upgrade unowned lock");
589 if ((count
& LKC_SMASK
) == 0)
590 panic("lockmgr: upgrade unowned lock");
593 * Loop to acquire LKC_UPREQ
597 * If UPREQ is already pending, release the shared lock
598 * and acquire an exclusive lock normally.
600 * If NOWAIT or EXCLUPGRADE the operation must be atomic,
601 * and this isn't, so we fail.
603 if (count
& LKC_UPREQ
) {
604 lockmgr_release(lkp
, 0);
605 if ((flags
& LK_TYPE_MASK
) == LK_EXCLUPGRADE
)
607 else if (extflags
& LK_NOWAIT
)
610 error
= lockmgr_exclusive(lkp
, flags
);
615 * Try to immediately grant the upgrade, handle NOWAIT,
616 * or release the shared lock and simultaneously set UPREQ.
618 if ((count
& LKC_SMASK
) == LKC_SCOUNT
) {
622 ncount
= (count
- LKC_SCOUNT
+ 1) & ~LKC_SHARED
;
623 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
, ncount
)) {
624 lkp
->lk_lockholder
= td
;
627 } else if (extflags
& LK_NOWAIT
) {
629 * Early EBUSY if an immediate grant is impossible
631 lockmgr_release(lkp
, 0);
635 * Multiple shared locks present, request the
636 * upgrade and break to the next loop.
638 pflags
= (extflags
& LK_PCATCH
) ? PCATCH
: 0;
639 tsleep_interlock(lkp
, pflags
);
640 ncount
= (count
- LKC_SCOUNT
) | LKC_UPREQ
;
641 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
, ncount
)) {
650 * We have acquired LKC_UPREQ, wait until the upgrade is granted
651 * or the tsleep fails.
653 * NOWAIT and EXCLUPGRADE have already been handled. The first
654 * tsleep_interlock() has already been associated.
660 * We were granted our upgrade. No other UPREQ can be
661 * made pending because we are now exclusive.
663 if ((count
& LKC_UPREQ
) == 0) {
664 KKASSERT((count
& LKC_XMASK
) == 1);
665 lkp
->lk_lockholder
= td
;
669 if (extflags
& LK_CANCELABLE
) {
670 if (count
& LKC_CANCEL
) {
671 if (undo_upreq(lkp
) == 0) {
672 lkp
->lk_lockholder
= LK_KERNTHREAD
;
673 lockmgr_release(lkp
, 0);
680 pflags
= (extflags
& LK_PCATCH
) ? PCATCH
: 0;
681 timo
= (extflags
& LK_TIMELOCK
) ? lkp
->lk_timo
: 0;
683 error
= tsleep(lkp
, pflags
| PINTERLOCKED
, lkp
->lk_wmesg
, timo
);
684 if (extflags
& LK_SLEEPFAIL
) {
685 if (undo_upreq(lkp
) == 0) {
686 lkp
->lk_lockholder
= LK_KERNTHREAD
;
687 lockmgr_release(lkp
, 0);
700 * Reload the lock, short-cut the UPGRANT code before
701 * taking the time to interlock and loop.
703 count
= lkp
->lk_count
;
704 if ((count
& LKC_UPREQ
) == 0) {
705 KKASSERT((count
& LKC_XMASK
) == 1);
706 lkp
->lk_lockholder
= td
;
709 tsleep_interlock(lkp
, pflags
);
710 count
= atomic_fetchadd_64(&lkp
->lk_count
, 0);
717 * Release a held lock
719 * NOTE: When releasing to an unlocked state, we set the SHARED bit
720 * to optimize shared lock requests.
723 lockmgr_release(struct lock
*lkp
, u_int flags
)
731 extflags
= (flags
| lkp
->lk_flags
) & LK_EXTFLG_MASK
;
734 count
= lkp
->lk_count
;
739 * Release the currently held lock, grant all requests
742 * WARNING! lksleep() assumes that LK_RELEASE does not
748 if ((count
& (LKC_SMASK
| LKC_XMASK
)) == 0)
749 panic("lockmgr: LK_RELEASE: no lock held");
751 if (count
& LKC_XMASK
) {
753 * Release exclusively held lock
755 if (lkp
->lk_lockholder
!= LK_KERNTHREAD
&&
756 lkp
->lk_lockholder
!= td
) {
757 panic("lockmgr: pid %d, not exclusive "
758 "lock holder thr %p/%p unlocking",
759 (td
->td_proc
? td
->td_proc
->p_pid
: -1),
760 td
, lkp
->lk_lockholder
);
762 if ((count
& (LKC_UPREQ
| LKC_EXREQ
|
765 * Last exclusive count is being released
766 * with no UPREQ or EXREQ. The SHARED
767 * bit can be set or not without messing
768 * anything up, so precondition it to
769 * SHARED (which is the most cpu-optimal).
771 * Wakeup any EXREQ2. EXREQ cannot be
772 * set while an exclusive count is present
773 * so we have to wakeup any EXREQ2 we find.
775 * We could hint the EXREQ2 by leaving
776 * SHARED unset, but atm I don't see any
779 otd
= lkp
->lk_lockholder
;
780 lkp
->lk_lockholder
= NULL
;
781 ncount
= (count
- 1);
782 ncount
&= ~(LKC_CANCEL
| LKC_EXREQ2
);
783 ncount
|= LKC_SHARED
;
784 if (atomic_fcmpset_64(&lkp
->lk_count
,
786 if (count
& (LKC_SMASK
| LKC_EXREQ2
))
788 if (otd
!= LK_KERNTHREAD
)
790 /* count = ncount; NOT USED */
793 lkp
->lk_lockholder
= otd
;
795 } else if ((count
& (LKC_UPREQ
| LKC_XMASK
)) ==
798 * Last exclusive count is being released but
799 * an upgrade request is present, automatically
800 * grant an exclusive state to the owner of
801 * the upgrade request. Transfer count to
804 * EXREQ cannot be set while an exclusive
805 * holder exists, so do not clear EXREQ2.
807 otd
= lkp
->lk_lockholder
;
808 lkp
->lk_lockholder
= NULL
;
809 ncount
= count
& ~LKC_UPREQ
;
810 if (atomic_fcmpset_64(&lkp
->lk_count
,
813 if (otd
!= LK_KERNTHREAD
)
815 /* count = ncount; NOT USED */
818 lkp
->lk_lockholder
= otd
;
820 } else if ((count
& (LKC_EXREQ
| LKC_XMASK
)) ==
823 * Last exclusive count is being released but
824 * an exclusive request is present. We
825 * automatically grant an exclusive state to
826 * the owner of the exclusive request,
827 * transfering our count.
829 * This case virtually never occurs because
830 * EXREQ is not set while exclusive holders
831 * exist. However, it might be set if a
832 * an exclusive request is pending and a
833 * shared holder upgrades.
835 * Don't bother clearing EXREQ2. A thread
836 * waiting to set EXREQ can't do it while
837 * an exclusive lock is present.
839 otd
= lkp
->lk_lockholder
;
840 lkp
->lk_lockholder
= NULL
;
841 ncount
= count
& ~LKC_EXREQ
;
842 if (atomic_fcmpset_64(&lkp
->lk_count
,
845 if (otd
!= LK_KERNTHREAD
)
847 /* count = ncount; NOT USED */
850 lkp
->lk_lockholder
= otd
;
854 * Multiple exclusive counts, drop by 1.
855 * Since we are the holder and there is more
856 * than one count, we can just decrement it.
859 atomic_fetchadd_long(&lkp
->lk_count
, -1);
860 /* count = count - 1 NOT NEEDED */
861 if (lkp
->lk_lockholder
!= LK_KERNTHREAD
)
868 * Release shared lock
870 KKASSERT((count
& LKC_SHARED
) && (count
& LKC_SMASK
));
871 if ((count
& (LKC_EXREQ
| LKC_UPREQ
| LKC_SMASK
)) ==
874 * Last shared count is being released,
875 * no exclusive or upgrade request present.
876 * Generally leave the shared bit set.
877 * Clear the CANCEL bit.
879 ncount
= (count
- LKC_SCOUNT
) & ~LKC_CANCEL
;
880 if (atomic_fcmpset_64(&lkp
->lk_count
,
883 /* count = ncount; NOT USED */
887 } else if ((count
& (LKC_UPREQ
| LKC_SMASK
)) ==
888 (LKC_UPREQ
| LKC_SCOUNT
)) {
890 * Last shared count is being released but
891 * an upgrade request is present, automatically
892 * grant an exclusive state to the owner of
893 * the upgrade request and transfer the count.
895 ncount
= (count
- LKC_SCOUNT
+ 1) &
896 ~(LKC_UPREQ
| LKC_CANCEL
| LKC_SHARED
);
897 if (atomic_fcmpset_64(&lkp
->lk_count
,
901 /* count = ncount; NOT USED */
905 } else if ((count
& (LKC_EXREQ
| LKC_SMASK
)) ==
906 (LKC_EXREQ
| LKC_SCOUNT
)) {
908 * Last shared count is being released but
909 * an exclusive request is present, we
910 * automatically grant an exclusive state to
911 * the owner of the request and transfer
914 ncount
= (count
- LKC_SCOUNT
+ 1) &
915 ~(LKC_EXREQ
| LKC_EXREQ2
|
916 LKC_CANCEL
| LKC_SHARED
);
917 if (atomic_fcmpset_64(&lkp
->lk_count
,
921 /* count = ncount; NOT USED */
927 * Shared count is greater than 1. We can
928 * just use undo_shreq() to clean things up.
929 * undo_shreq() will also handle races to 0
944 * Start canceling blocked requesters or later requestors.
945 * Only blocked requesters using CANCELABLE can be canceled.
947 * This is intended to then allow other requesters (usually the
948 * caller) to obtain a non-cancelable lock.
950 * Don't waste time issuing a wakeup if nobody is pending.
953 lockmgr_cancel_beg(struct lock
*lkp
, u_int flags
)
957 count
= lkp
->lk_count
;
961 KKASSERT((count
& LKC_CANCEL
) == 0); /* disallowed case */
963 /* issue w/lock held */
964 KKASSERT((count
& (LKC_XMASK
| LKC_SMASK
)) != 0);
966 if (!atomic_fcmpset_64(&lkp
->lk_count
,
967 &count
, count
| LKC_CANCEL
)) {
970 /* count |= LKC_CANCEL; NOT USED */
973 * Wakeup any waiters.
975 * NOTE: EXREQ2 only matters when EXREQ is set, so don't
976 * bother checking EXREQ2.
978 if (count
& (LKC_EXREQ
| LKC_SMASK
| LKC_UPREQ
)) {
987 * End our cancel request (typically after we have acquired
988 * the lock ourselves).
991 lockmgr_cancel_end(struct lock
*lkp
, u_int flags
)
993 atomic_clear_long(&lkp
->lk_count
, LKC_CANCEL
);
999 * Backout SCOUNT from a failed shared lock attempt and handle any race
1000 * to 0. This function is also used by the release code for the less
1001 * optimal race to 0 case.
1003 * WARNING! Since we are unconditionally decrementing LKC_SCOUNT, it is
1004 * possible for the lock to get into a LKC_SHARED + ZERO SCOUNT
1005 * situation. A shared request can block with a ZERO SCOUNT if
1006 * EXREQ or UPREQ is pending in this situation. Be sure to always
1007 * issue a wakeup() in this situation if we are unable to
1008 * transition to an exclusive lock, to handle the race.
1014 undo_shreq(struct lock
*lkp
)
1019 count
= atomic_fetchadd_64(&lkp
->lk_count
, -LKC_SCOUNT
) - LKC_SCOUNT
;
1020 while ((count
& (LKC_EXREQ
| LKC_UPREQ
| LKC_CANCEL
)) &&
1021 (count
& (LKC_SMASK
| LKC_XMASK
)) == 0) {
1023 * Note that UPREQ must have priority over EXREQ, and EXREQ
1024 * over CANCEL, so if the atomic op fails we have to loop up.
1026 if (count
& LKC_UPREQ
) {
1027 ncount
= (count
+ 1) & ~(LKC_UPREQ
| LKC_CANCEL
|
1029 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
, ncount
)) {
1031 /* count = ncount; NOT USED */
1037 if (count
& LKC_EXREQ
) {
1038 ncount
= (count
+ 1) & ~(LKC_EXREQ
| LKC_EXREQ2
|
1039 LKC_CANCEL
| LKC_SHARED
);
1040 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
, ncount
)) {
1042 /* count = ncount; NOT USED */
1048 if (count
& LKC_CANCEL
) {
1049 ncount
= count
& ~LKC_CANCEL
;
1050 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
, ncount
)) {
1052 /* count = ncount; NOT USED */
1061 * Undo an exclusive request. Returns EBUSY if we were able to undo the
1062 * request, and 0 if the request was granted before we could undo it.
1063 * When 0 is returned, the lock state has not been modified. The caller
1064 * is responsible for setting the lockholder to curthread.
1068 undo_exreq(struct lock
*lkp
)
1074 count
= lkp
->lk_count
;
1080 if ((count
& LKC_EXREQ
) == 0) {
1082 * EXREQ was granted. We own the exclusive lock.
1086 if (count
& LKC_XMASK
) {
1088 * Clear the EXREQ we still own. Only wakeup on
1089 * EXREQ2 if no UPREQ. There are still exclusive
1090 * holders so do not wake up any shared locks or
1093 * If there is an UPREQ it will issue a wakeup()
1094 * for any EXREQ wait looops, so we can clear EXREQ2
1097 ncount
= count
& ~(LKC_EXREQ
| LKC_EXREQ2
);
1098 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
, ncount
)) {
1099 if ((count
& (LKC_EXREQ2
| LKC_UPREQ
)) ==
1104 /* count = ncount; NOT USED */
1108 } else if (count
& LKC_UPREQ
) {
1110 * Clear the EXREQ we still own. We cannot wakeup any
1111 * shared or exclusive waiters because there is an
1112 * uprequest pending (that we do not handle here).
1114 * If there is an UPREQ it will issue a wakeup()
1115 * for any EXREQ wait looops, so we can clear EXREQ2
1118 ncount
= count
& ~(LKC_EXREQ
| LKC_EXREQ2
);
1119 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
, ncount
)) {
1124 } else if ((count
& LKC_SHARED
) && (count
& LKC_SMASK
)) {
1126 * No UPREQ, lock not held exclusively, but the lock
1127 * is held shared. Clear EXREQ, wakeup anyone trying
1128 * to get the EXREQ bit (they have to set it
1129 * themselves, EXREQ2 is an aggregation).
1131 ncount
= count
& ~(LKC_EXREQ
| LKC_EXREQ2
);
1132 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
, ncount
)) {
1133 if (count
& LKC_EXREQ2
)
1136 /* count = ncount; NOT USED */
1142 * No UPREQ, lock not held exclusively or shared.
1143 * Grant the EXREQ and wakeup anyone waiting on
1146 ncount
= (count
+ 1) & ~(LKC_EXREQ
| LKC_EXREQ2
);
1147 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
, ncount
)) {
1148 if (count
& LKC_EXREQ2
)
1150 /* count = ncount; NOT USED */
1151 /* we are granting, error == 0 */
1162 * Undo an upgrade request. Returns EBUSY if we were able to undo the
1163 * request, and 0 if the request was granted before we could undo it.
1164 * When 0 is returned, the lock state has not been modified. The caller
1165 * is responsible for setting the lockholder to curthread.
1169 undo_upreq(struct lock
*lkp
)
1175 count
= lkp
->lk_count
;
1181 if ((count
& LKC_UPREQ
) == 0) {
1187 if (count
& LKC_XMASK
) {
1189 * Clear the UPREQ we still own. Nobody to wakeup
1190 * here because there is an existing exclusive
1193 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
,
1194 count
& ~LKC_UPREQ
)) {
1196 /* count &= ~LKC_UPREQ; NOT USED */
1199 } else if (count
& LKC_EXREQ
) {
1201 * Clear the UPREQ we still own. Grant the exclusive
1202 * request and wake it up.
1204 ncount
= (count
+ 1);
1205 ncount
&= ~(LKC_EXREQ
| LKC_EXREQ2
| LKC_UPREQ
);
1207 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
, ncount
)) {
1210 /* count = ncount; NOT USED */
1215 * Clear the UPREQ we still own. Wakeup any shared
1218 ncount
= count
& ~LKC_UPREQ
;
1219 if (count
& LKC_SMASK
)
1220 ncount
|= LKC_SHARED
;
1222 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
, ncount
)) {
1223 if ((count
& LKC_SHARED
) == 0 &&
1224 (ncount
& LKC_SHARED
)) {
1228 /* count = ncount; NOT USED */
1238 lockmgr_kernproc(struct lock
*lp
)
1240 struct thread
*td __debugvar
= curthread
;
1242 if (lp
->lk_lockholder
!= LK_KERNTHREAD
) {
1243 KASSERT(lp
->lk_lockholder
== td
,
1244 ("lockmgr_kernproc: lock not owned by curthread %p: %p",
1245 td
, lp
->lk_lockholder
));
1246 lp
->lk_lockholder
= LK_KERNTHREAD
;
1252 * Initialize a lock; required before use.
1255 lockinit(struct lock
*lkp
, const char *wmesg
, int timo
, int flags
)
1257 lkp
->lk_flags
= (flags
& LK_EXTFLG_MASK
);
1259 lkp
->lk_wmesg
= wmesg
;
1260 lkp
->lk_timo
= timo
;
1261 lkp
->lk_lockholder
= NULL
;
1265 * Reinitialize a lock that is being reused for a different purpose, but
1266 * which may have pending (blocked) threads sitting on it. The caller
1267 * must already hold the interlock.
1270 lockreinit(struct lock
*lkp
, const char *wmesg
, int timo
, int flags
)
1272 lkp
->lk_wmesg
= wmesg
;
1273 lkp
->lk_timo
= timo
;
1277 * De-initialize a lock. The structure must no longer be used by anyone.
1280 lockuninit(struct lock
*lkp
)
1282 uint64_t count __unused
;
1284 count
= lkp
->lk_count
;
1286 KKASSERT((count
& (LKC_EXREQ
| LKC_UPREQ
)) == 0 &&
1287 ((count
& LKC_SHARED
) || (count
& LKC_SMASK
) == 0));
1291 * Determine the status of a lock.
1294 lockstatus(struct lock
*lkp
, struct thread
*td
)
1299 count
= lkp
->lk_count
;
1302 if (count
& (LKC_XMASK
| LKC_SMASK
| LKC_EXREQ
| LKC_UPREQ
)) {
1303 if (count
& LKC_XMASK
) {
1304 if (td
== NULL
|| lkp
->lk_lockholder
== td
)
1305 lock_type
= LK_EXCLUSIVE
;
1307 lock_type
= LK_EXCLOTHER
;
1308 } else if ((count
& LKC_SMASK
) && (count
& LKC_SHARED
)) {
1309 lock_type
= LK_SHARED
;
1316 * Return non-zero if the caller owns the lock shared or exclusive.
1317 * We can only guess re: shared locks.
1320 lockowned(struct lock
*lkp
)
1322 thread_t td
= curthread
;
1325 count
= lkp
->lk_count
;
1328 if (count
& LKC_XMASK
)
1329 return(lkp
->lk_lockholder
== td
);
1331 return((count
& LKC_SMASK
) != 0);
1336 * Determine the number of holders of a lock.
1338 * REMOVED - Cannot be used due to our use of atomic_fetchadd_64()
1339 * for shared locks. Caller can only test if the lock has
1340 * a count or not using lockinuse(lk) (sys/lock.h)
1343 lockcount(struct lock
*lkp
)
1345 panic("lockcount cannot be used");
1349 lockcountnb(struct lock
*lkp
)
1351 panic("lockcount cannot be used");
1356 * Print out information about state of a lock. Used by VOP_PRINT
1357 * routines to display status about contained locks.
1360 lockmgr_printinfo(struct lock
*lkp
)
1362 struct thread
*td
= lkp
->lk_lockholder
;
1366 count
= lkp
->lk_count
;
1369 if (td
&& td
!= LK_KERNTHREAD
)
1374 if (count
& LKC_XMASK
) {
1375 kprintf(" lock type %s: EXCLUS (count %016jx) by td %p pid %d",
1376 lkp
->lk_wmesg
, (intmax_t)count
, td
,
1377 p
? p
->p_pid
: -99);
1378 } else if ((count
& LKC_SMASK
) && (count
& LKC_SHARED
)) {
1379 kprintf(" lock type %s: SHARED (count %016jx)",
1380 lkp
->lk_wmesg
, (intmax_t)count
);
1382 kprintf(" lock type %s: NOTHELD", lkp
->lk_wmesg
);
1384 if ((count
& (LKC_EXREQ
| LKC_UPREQ
)) ||
1385 ((count
& LKC_XMASK
) && (count
& LKC_SMASK
)))
1386 kprintf(" with waiters\n");
1392 lock_sysinit(struct lock_args
*arg
)
1394 lockinit(arg
->la_lock
, arg
->la_desc
, 0, arg
->la_flags
);
1397 #ifdef DEBUG_CANCEL_LOCKS
1401 sysctl_cancel_lock(SYSCTL_HANDLER_ARGS
)
1407 lockmgr(&cancel_lk
, LK_EXCLUSIVE
);
1408 error
= tsleep(&error
, PCATCH
, "canmas", hz
* 5);
1409 lockmgr(&cancel_lk
, LK_CANCEL_BEG
);
1410 error
= tsleep(&error
, PCATCH
, "canmas", hz
* 5);
1411 lockmgr(&cancel_lk
, LK_RELEASE
);
1413 SYSCTL_OUT(req
, &error
, sizeof(error
));
1422 sysctl_cancel_test(SYSCTL_HANDLER_ARGS
)
1427 error
= lockmgr(&cancel_lk
, LK_EXCLUSIVE
|LK_CANCELABLE
);
1429 lockmgr(&cancel_lk
, LK_RELEASE
);
1430 SYSCTL_OUT(req
, &error
, sizeof(error
));
1431 kprintf("test %d\n", error
);