3 * The Regents of the University of California. All rights reserved.
5 * John S. Dyson. All rights reserved.
6 * Copyright (C) 2013-2017
7 * Matthew Dillon, All rights reserved.
9 * This code contains ideas from software contributed to Berkeley by
10 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
11 * System project at Carnegie-Mellon University.
13 * This code is derived from software contributed to The DragonFly Project
14 * by Matthew Dillon <dillon@backplane.com>. Extensively rewritten.
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
48 #include <sys/sysctl.h>
49 #include <sys/spinlock.h>
50 #include <sys/spinlock2.h>
51 #include <sys/indefinite2.h>
53 static void undo_shreq(struct lock
*lkp
);
54 static int undo_upreq(struct lock
*lkp
);
55 static int undo_exreq(struct lock
*lkp
);
57 #ifdef DEBUG_CANCEL_LOCKS
59 static int sysctl_cancel_lock(SYSCTL_HANDLER_ARGS
);
60 static int sysctl_cancel_test(SYSCTL_HANDLER_ARGS
);
62 static struct lock cancel_lk
;
63 LOCK_SYSINIT(cancellk
, &cancel_lk
, "cancel", 0);
64 SYSCTL_PROC(_kern
, OID_AUTO
, cancel_lock
, CTLTYPE_INT
|CTLFLAG_RW
, 0, 0,
65 sysctl_cancel_lock
, "I", "test cancelable locks");
66 SYSCTL_PROC(_kern
, OID_AUTO
, cancel_test
, CTLTYPE_INT
|CTLFLAG_RW
, 0, 0,
67 sysctl_cancel_test
, "I", "test cancelable locks");
71 __read_frequently
int lock_test_mode
;
72 SYSCTL_INT(_debug
, OID_AUTO
, lock_test_mode
, CTLFLAG_RW
,
73 &lock_test_mode
, 0, "");
76 * Locking primitives implementation.
77 * Locks provide shared/exclusive sychronization.
81 #define COUNT(td, x) (td)->td_locks += (x)
83 #define COUNT(td, x) do { } while (0)
87 * Helper, assert basic conditions
90 _lockmgr_assert(struct lock
*lkp
, u_int flags
)
92 if (mycpu
->gd_intr_nesting_level
&&
93 (flags
& LK_NOWAIT
) == 0 &&
94 (flags
& LK_TYPE_MASK
) != LK_RELEASE
&&
97 panic("lockmgr %s from %p: called from interrupt, ipi, "
98 "or hard code section",
99 lkp
->lk_wmesg
, ((int **)&lkp
)[-1]);
104 * Acquire a shared lock
107 lockmgr_shared(struct lock
*lkp
, u_int flags
)
117 _lockmgr_assert(lkp
, flags
);
118 extflags
= (flags
| lkp
->lk_flags
) & LK_EXTFLG_MASK
;
121 count
= lkp
->lk_count
;
125 * If the caller already holds the lock exclusively then
126 * we silently obtain another count on the exclusive lock.
127 * Avoid accessing lk_lockholder until testing exclusivity.
129 * WARNING! The old FreeBSD behavior was to downgrade,
130 * but this creates a problem when recursions
131 * return to the caller and the caller expects
132 * its original exclusive lock to remain exclusively
135 if ((count
& LKC_XMASK
) && lkp
->lk_lockholder
== td
) {
136 KKASSERT(lkp
->lk_count
& LKC_XMASK
);
137 if ((extflags
& LK_CANRECURSE
) == 0) {
138 if (extflags
& LK_NOWAIT
)
140 panic("lockmgr: locking against myself");
142 atomic_add_64(&lkp
->lk_count
, 1);
148 * Unless TDF_DEADLKTREAT is set, we cannot add LKC_SCOUNT while
149 * SHARED is set and either EXREQ or UPREQ are set.
151 * NOTE: In the race-to-0 case (see undo_shreq()), we could
152 * theoretically work the SMASK == 0 case here.
154 if ((td
->td_flags
& TDF_DEADLKTREAT
) == 0) {
155 while ((count
& LKC_SHARED
) &&
156 (count
& (LKC_EXREQ
| LKC_UPREQ
))) {
158 * Immediate failure conditions
160 if (extflags
& LK_CANCELABLE
) {
161 if (count
& LKC_CANCEL
)
164 if (extflags
& LK_NOWAIT
)
170 pflags
= (extflags
& LK_PCATCH
) ? PCATCH
: 0;
171 timo
= (extflags
& LK_TIMELOCK
) ? lkp
->lk_timo
: 0;
173 tsleep_interlock(lkp
, pflags
);
174 count
= atomic_fetchadd_long(&lkp
->lk_count
, 0);
176 if ((count
& LKC_SHARED
) &&
177 (count
& (LKC_EXREQ
| LKC_UPREQ
))) {
178 error
= tsleep(lkp
, pflags
| PINTERLOCKED
,
179 lkp
->lk_wmesg
, timo
);
182 count
= lkp
->lk_count
;
191 * Bump the SCOUNT field. The shared lock is granted only once
192 * the SHARED flag gets set. If it is already set, we are done.
194 * (Racing an EXREQ or UPREQ operation is ok here, we already did
197 count
= atomic_fetchadd_64(&lkp
->lk_count
, LKC_SCOUNT
) + LKC_SCOUNT
;
203 * We may be able to grant ourselves the bit trivially.
204 * We're done once the SHARED bit is granted.
206 if ((count
& (LKC_XMASK
| LKC_EXREQ
|
207 LKC_UPREQ
| LKC_SHARED
)) == 0) {
208 if (atomic_fcmpset_64(&lkp
->lk_count
,
209 &count
, count
| LKC_SHARED
)) {
210 /* count |= LKC_SHARED; NOT USED */
215 if ((td
->td_flags
& TDF_DEADLKTREAT
) &&
216 (count
& (LKC_XMASK
| LKC_SHARED
)) == 0) {
217 if (atomic_fcmpset_64(&lkp
->lk_count
,
218 &count
, count
| LKC_SHARED
)) {
219 /* count |= LKC_SHARED; NOT USED */
224 if (count
& LKC_SHARED
)
230 pflags
= (extflags
& LK_PCATCH
) ? PCATCH
: 0;
231 timo
= (extflags
& LK_TIMELOCK
) ? lkp
->lk_timo
: 0;
233 if (extflags
& LK_CANCELABLE
) {
234 if (count
& LKC_CANCEL
) {
240 if (extflags
& LK_NOWAIT
) {
247 * Interlocked after the first loop.
250 error
= tsleep(lkp
, pflags
| PINTERLOCKED
,
251 lkp
->lk_wmesg
, timo
);
252 if (extflags
& LK_SLEEPFAIL
) {
265 * Reload, shortcut grant case, then loop interlock
268 count
= lkp
->lk_count
;
269 if (count
& LKC_SHARED
)
271 tsleep_interlock(lkp
, pflags
);
272 count
= atomic_fetchadd_64(&lkp
->lk_count
, 0);
281 * Acquire an exclusive lock
284 lockmgr_exclusive(struct lock
*lkp
, u_int flags
)
294 _lockmgr_assert(lkp
, flags
);
295 extflags
= (flags
| lkp
->lk_flags
) & LK_EXTFLG_MASK
;
299 count
= lkp
->lk_count
;
303 * Recursive lock if we already hold it exclusively. Avoid testing
304 * lk_lockholder until after testing lk_count.
306 if ((count
& LKC_XMASK
) && lkp
->lk_lockholder
== td
) {
307 if ((extflags
& LK_CANRECURSE
) == 0) {
308 if (extflags
& LK_NOWAIT
)
310 panic("lockmgr: locking against myself");
312 count
= atomic_fetchadd_64(&lkp
->lk_count
, 1) + 1;
313 KKASSERT((count
& LKC_XMASK
) > 1);
319 * Trivially acquire the lock, or block until we can set EXREQ.
320 * Set EXREQ2 if EXREQ is already set or the lock is already
321 * held exclusively. EXREQ2 is an aggregation bit to request
324 * WARNING! We cannot set EXREQ if the lock is already held
325 * exclusively because it may race another EXREQ
326 * being cleared and granted. We use the exclusivity
327 * to prevent both EXREQ and UPREQ from being set.
329 * This means that both shared and exclusive requests
330 * have equal priority against a current exclusive holder's
331 * release. Exclusive requests still have priority over
332 * new shared requests when the lock is already held shared.
336 * Normal trivial case
338 if ((count
& (LKC_UPREQ
| LKC_EXREQ
|
340 ((count
& LKC_SHARED
) == 0 ||
341 (count
& LKC_SMASK
) == 0)) {
342 ncount
= (count
+ 1) & ~LKC_SHARED
;
343 if (atomic_fcmpset_64(&lkp
->lk_count
,
345 lkp
->lk_lockholder
= td
;
352 if (extflags
& LK_CANCELABLE
) {
353 if (count
& LKC_CANCEL
)
356 if (extflags
& LK_NOWAIT
)
360 * Interlock to set EXREQ or EXREQ2
362 pflags
= (extflags
& LK_PCATCH
) ? PCATCH
: 0;
363 timo
= (extflags
& LK_TIMELOCK
) ? lkp
->lk_timo
: 0;
365 if (count
& (LKC_EXREQ
| LKC_XMASK
))
366 ncount
= count
| LKC_EXREQ2
;
368 ncount
= count
| LKC_EXREQ
;
369 tsleep_interlock(lkp
, pflags
);
370 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
, ncount
)) {
372 * If we successfully transitioned to EXREQ we
373 * can break out, otherwise we had set EXREQ2 and
376 if ((count
& (LKC_EXREQ
| LKC_XMASK
)) == 0) {
381 error
= tsleep(lkp
, pflags
| PINTERLOCKED
,
382 lkp
->lk_wmesg
, timo
);
383 count
= lkp
->lk_count
; /* relod */
387 if (lock_test_mode
> 0) {
394 if (extflags
& LK_SLEEPFAIL
)
399 * Once EXREQ has been set, wait for it to be granted
400 * We enter the loop with tsleep_interlock() already called.
404 * Waiting for EXREQ to be granted to us.
406 * The granting thread will handle the count for us, but we
407 * still have to set lk_lockholder.
409 * NOTE! If we try to trivially get the exclusive lock
410 * (basically by racing undo_shreq()) and succeed,
411 * we must still wakeup(lkp) for another exclusive
412 * lock trying to acquire EXREQ. Easier to simply
413 * wait for our own wakeup.
415 if ((count
& LKC_EXREQ
) == 0) {
416 KKASSERT(count
& LKC_XMASK
);
417 lkp
->lk_lockholder
= td
;
423 * Block waiting for our exreq to be granted.
424 * Check cancelation. NOWAIT was already dealt with.
426 if (extflags
& LK_CANCELABLE
) {
427 if (count
& LKC_CANCEL
) {
428 if (undo_exreq(lkp
) == 0) {
429 lkp
->lk_lockholder
= LK_KERNTHREAD
;
430 lockmgr_release(lkp
, 0);
437 pflags
= (extflags
& LK_PCATCH
) ? PCATCH
: 0;
438 timo
= (extflags
& LK_TIMELOCK
) ? lkp
->lk_timo
: 0;
440 error
= tsleep(lkp
, pflags
| PINTERLOCKED
, lkp
->lk_wmesg
, timo
);
442 if (lock_test_mode
> 0) {
448 * A tsleep error is uncommon. If it occurs we have to
449 * undo our EXREQ. If we are granted the exclusive lock
450 * as we try to undo we have to deal with it.
452 if (extflags
& LK_SLEEPFAIL
) {
453 if (undo_exreq(lkp
) == 0) {
454 lkp
->lk_lockholder
= LK_KERNTHREAD
;
455 lockmgr_release(lkp
, 0);
464 lkp
->lk_lockholder
= td
;
471 * Reload after sleep, shortcut grant case.
472 * Then set the interlock and loop.
474 * The granting thread will handle the count for us, but we
475 * still have to set lk_lockholder.
477 count
= lkp
->lk_count
;
479 if ((count
& LKC_EXREQ
) == 0) {
480 KKASSERT(count
& LKC_XMASK
);
481 lkp
->lk_lockholder
= td
;
485 tsleep_interlock(lkp
, pflags
);
486 count
= atomic_fetchadd_64(&lkp
->lk_count
, 0);
492 * Downgrade an exclusive lock to shared.
494 * This function always succeeds as long as the caller owns a legal
495 * exclusive lock with one reference. UPREQ and EXREQ is ignored.
498 lockmgr_downgrade(struct lock
*lkp
, u_int flags
)
506 extflags
= (flags
| lkp
->lk_flags
) & LK_EXTFLG_MASK
;
508 count
= lkp
->lk_count
;
514 * Downgrade an exclusive lock into a shared lock. All
515 * counts on a recursive exclusive lock become shared.
517 * NOTE: Currently to reduce confusion we only allow
518 * there to be one exclusive lock count, and panic
521 if (lkp
->lk_lockholder
!= td
|| (count
& LKC_XMASK
) != 1) {
522 panic("lockmgr: not holding exclusive lock: "
523 "%p/%p %016jx", lkp
->lk_lockholder
, td
, count
);
527 * NOTE! Must NULL-out lockholder before releasing the
530 * NOTE! There might be pending shared requests, check
533 otd
= lkp
->lk_lockholder
;
534 lkp
->lk_lockholder
= NULL
;
535 ncount
= (count
& ~(LKC_XMASK
| LKC_EXREQ2
)) +
536 ((count
& LKC_XMASK
) << LKC_SSHIFT
);
537 ncount
|= LKC_SHARED
;
539 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
, ncount
)) {
541 * Wakeup any shared waiters (prior SMASK), or
542 * any exclusive requests that couldn't set EXREQ
543 * because the lock had been held exclusively.
545 if (count
& (LKC_SMASK
| LKC_EXREQ2
))
547 /* count = ncount; NOT USED */
550 lkp
->lk_lockholder
= otd
;
557 * Upgrade a shared lock to exclusive. If LK_EXCLUPGRADE then guarantee
558 * that no other exclusive requester can get in front of us and fail
559 * immediately if another upgrade is pending. If we fail, the shared
562 * If LK_EXCLUPGRADE is not set and we cannot upgrade because someone
563 * else is in front of us, we release the shared lock and acquire the
564 * exclusive lock normally. If a failure occurs, the shared lock is
567 * The way this works is that if we cannot instantly upgrade the
568 * shared lock due to various conditions, but we can acquire UPREQ,
569 * we then set UPREQ and wait for the thread blocking us to grant
570 * our upgrade. The other thread grants our upgrade by incrementing
571 * the excl count (to 1) and clearing UPREQ, but it doesn't know 'who'
572 * requested the upgrade so it can't set lk_lockholder. Our thread notices
573 * that LK_UPREQ is now clear and finishes up by setting lk_lockholder.
576 lockmgr_upgrade(struct lock
*lkp
, u_int flags
)
586 _lockmgr_assert(lkp
, flags
);
587 extflags
= (flags
| lkp
->lk_flags
) & LK_EXTFLG_MASK
;
590 count
= lkp
->lk_count
;
594 * If we already hold the lock exclusively this operation
595 * succeeds and is a NOP.
597 if (count
& LKC_XMASK
) {
598 if (lkp
->lk_lockholder
== td
)
600 panic("lockmgr: upgrade unowned lock");
602 if ((count
& LKC_SMASK
) == 0)
603 panic("lockmgr: upgrade unowned lock");
606 * Loop to acquire LKC_UPREQ
610 * If UPREQ is already pending, release the shared lock
611 * and acquire an exclusive lock normally.
613 * If NOWAIT or EXCLUPGRADE the operation must be atomic,
614 * and this isn't, so we fail.
616 if (count
& LKC_UPREQ
) {
617 lockmgr_release(lkp
, 0);
618 if ((flags
& LK_TYPE_MASK
) == LK_EXCLUPGRADE
)
620 else if (extflags
& LK_NOWAIT
)
623 error
= lockmgr_exclusive(lkp
, flags
);
628 * Try to immediately grant the upgrade, handle NOWAIT,
629 * or release the shared lock and simultaneously set UPREQ.
631 if ((count
& LKC_SMASK
) == LKC_SCOUNT
) {
635 ncount
= (count
- LKC_SCOUNT
+ 1) & ~LKC_SHARED
;
636 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
, ncount
)) {
637 lkp
->lk_lockholder
= td
;
640 } else if (extflags
& LK_NOWAIT
) {
642 * Early EBUSY if an immediate grant is impossible
644 lockmgr_release(lkp
, 0);
648 * Multiple shared locks present, request the
649 * upgrade and break to the next loop.
651 pflags
= (extflags
& LK_PCATCH
) ? PCATCH
: 0;
652 tsleep_interlock(lkp
, pflags
);
653 ncount
= (count
- LKC_SCOUNT
) | LKC_UPREQ
;
654 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
, ncount
)) {
663 * We have acquired LKC_UPREQ, wait until the upgrade is granted
664 * or the tsleep fails.
666 * NOWAIT and EXCLUPGRADE have already been handled. The first
667 * tsleep_interlock() has already been associated.
673 * We were granted our upgrade. No other UPREQ can be
674 * made pending because we are now exclusive.
676 * The granting thread will handle the count for us, but we
677 * still have to set lk_lockholder.
679 if ((count
& LKC_UPREQ
) == 0) {
680 KKASSERT((count
& LKC_XMASK
) == 1);
681 lkp
->lk_lockholder
= td
;
685 if (extflags
& LK_CANCELABLE
) {
686 if (count
& LKC_CANCEL
) {
687 if (undo_upreq(lkp
) == 0) {
688 lkp
->lk_lockholder
= LK_KERNTHREAD
;
689 lockmgr_release(lkp
, 0);
696 pflags
= (extflags
& LK_PCATCH
) ? PCATCH
: 0;
697 timo
= (extflags
& LK_TIMELOCK
) ? lkp
->lk_timo
: 0;
699 error
= tsleep(lkp
, pflags
| PINTERLOCKED
, lkp
->lk_wmesg
, timo
);
700 if (extflags
& LK_SLEEPFAIL
) {
701 if (undo_upreq(lkp
) == 0) {
702 lkp
->lk_lockholder
= LK_KERNTHREAD
;
703 lockmgr_release(lkp
, 0);
716 * Reload the lock, short-cut the UPGRANT code before
717 * taking the time to interlock and loop.
719 * The granting thread will handle the count for us, but we
720 * still have to set lk_lockholder.
722 count
= lkp
->lk_count
;
723 if ((count
& LKC_UPREQ
) == 0) {
724 KKASSERT((count
& LKC_XMASK
) == 1);
725 lkp
->lk_lockholder
= td
;
728 tsleep_interlock(lkp
, pflags
);
729 count
= atomic_fetchadd_64(&lkp
->lk_count
, 0);
736 * Release a held lock
738 * NOTE: When releasing to an unlocked state, we set the SHARED bit
739 * to optimize shared lock requests.
742 lockmgr_release(struct lock
*lkp
, u_int flags
)
750 extflags
= (flags
| lkp
->lk_flags
) & LK_EXTFLG_MASK
;
753 count
= lkp
->lk_count
;
758 * Release the currently held lock, grant all requests
761 * WARNING! lksleep() assumes that LK_RELEASE does not
767 if ((count
& (LKC_SMASK
| LKC_XMASK
)) == 0)
768 panic("lockmgr: LK_RELEASE: no lock held");
770 if (count
& LKC_XMASK
) {
772 * Release exclusively held lock
774 if (lkp
->lk_lockholder
!= LK_KERNTHREAD
&&
775 lkp
->lk_lockholder
!= td
) {
776 panic("lockmgr: pid %d, not exclusive "
777 "lock holder thr %p/%p unlocking",
778 (td
->td_proc
? td
->td_proc
->p_pid
: -1),
779 td
, lkp
->lk_lockholder
);
781 if ((count
& (LKC_UPREQ
| LKC_EXREQ
|
784 * Last exclusive count is being released
785 * with no UPREQ or EXREQ. The SHARED
786 * bit can be set or not without messing
787 * anything up, so precondition it to
788 * SHARED (which is the most cpu-optimal).
790 * Wakeup any EXREQ2. EXREQ cannot be
791 * set while an exclusive count is present
792 * so we have to wakeup any EXREQ2 we find.
794 * We could hint the EXREQ2 by leaving
795 * SHARED unset, but atm I don't see any
798 otd
= lkp
->lk_lockholder
;
799 lkp
->lk_lockholder
= NULL
;
800 ncount
= (count
- 1);
801 ncount
&= ~(LKC_CANCEL
| LKC_EXREQ2
);
802 ncount
|= LKC_SHARED
;
803 if (atomic_fcmpset_64(&lkp
->lk_count
,
805 if (count
& (LKC_SMASK
| LKC_EXREQ2
))
807 if (otd
!= LK_KERNTHREAD
)
809 /* count = ncount; NOT USED */
812 lkp
->lk_lockholder
= otd
;
814 } else if ((count
& (LKC_UPREQ
| LKC_XMASK
)) ==
817 * Last exclusive count is being released but
818 * an upgrade request is present, automatically
819 * grant an exclusive state to the owner of
820 * the upgrade request. Transfer count to
823 * The owner of LK_UPREQ is still responsible
824 * for setting lk_lockholder.
826 * EXREQ cannot be set while an exclusive
827 * holder exists, so do not clear EXREQ2.
829 otd
= lkp
->lk_lockholder
;
830 lkp
->lk_lockholder
= NULL
;
831 ncount
= count
& ~LKC_UPREQ
;
832 if (atomic_fcmpset_64(&lkp
->lk_count
,
835 if (otd
!= LK_KERNTHREAD
)
837 /* count = ncount; NOT USED */
840 lkp
->lk_lockholder
= otd
;
842 } else if ((count
& (LKC_EXREQ
| LKC_XMASK
)) ==
845 * Last exclusive count is being released but
846 * an exclusive request is present. We
847 * automatically grant an exclusive state to
848 * the owner of the exclusive request,
849 * transfering our count.
851 * This case virtually never occurs because
852 * EXREQ is not set while exclusive holders
853 * exist. However, it might be set if a
854 * an exclusive request is pending and a
855 * shared holder upgrades.
857 * Don't bother clearing EXREQ2. A thread
858 * waiting to set EXREQ can't do it while
859 * an exclusive lock is present.
861 otd
= lkp
->lk_lockholder
;
862 lkp
->lk_lockholder
= NULL
;
863 ncount
= count
& ~LKC_EXREQ
;
864 if (atomic_fcmpset_64(&lkp
->lk_count
,
867 if (otd
!= LK_KERNTHREAD
)
869 /* count = ncount; NOT USED */
872 lkp
->lk_lockholder
= otd
;
876 * Multiple exclusive counts, drop by 1.
877 * Since we are the holder and there is more
878 * than one count, we can just decrement it.
881 atomic_fetchadd_long(&lkp
->lk_count
, -1);
882 /* count = count - 1 NOT NEEDED */
883 if (lkp
->lk_lockholder
!= LK_KERNTHREAD
)
890 * Release shared lock
892 KKASSERT((count
& LKC_SHARED
) && (count
& LKC_SMASK
));
893 if ((count
& (LKC_EXREQ
| LKC_UPREQ
| LKC_SMASK
)) ==
896 * Last shared count is being released,
897 * no exclusive or upgrade request present.
898 * Generally leave the shared bit set.
899 * Clear the CANCEL bit.
901 ncount
= (count
- LKC_SCOUNT
) & ~LKC_CANCEL
;
902 if (atomic_fcmpset_64(&lkp
->lk_count
,
905 /* count = ncount; NOT USED */
909 } else if ((count
& (LKC_UPREQ
| LKC_SMASK
)) ==
910 (LKC_UPREQ
| LKC_SCOUNT
)) {
912 * Last shared count is being released but
913 * an upgrade request is present, automatically
914 * grant an exclusive state to the owner of
915 * the upgrade request and transfer the count.
917 * The owner of the upgrade request is still
918 * responsible for setting lk_lockholder.
920 ncount
= (count
- LKC_SCOUNT
+ 1) &
921 ~(LKC_UPREQ
| LKC_CANCEL
| LKC_SHARED
);
922 if (atomic_fcmpset_64(&lkp
->lk_count
,
926 /* count = ncount; NOT USED */
930 } else if ((count
& (LKC_EXREQ
| LKC_SMASK
)) ==
931 (LKC_EXREQ
| LKC_SCOUNT
)) {
933 * Last shared count is being released but
934 * an exclusive request is present, we
935 * automatically grant an exclusive state to
936 * the owner of the request and transfer
939 ncount
= (count
- LKC_SCOUNT
+ 1) &
940 ~(LKC_EXREQ
| LKC_EXREQ2
|
941 LKC_CANCEL
| LKC_SHARED
);
942 if (atomic_fcmpset_64(&lkp
->lk_count
,
946 /* count = ncount; NOT USED */
952 * Shared count is greater than 1. We can
953 * just use undo_shreq() to clean things up.
954 * undo_shreq() will also handle races to 0
969 * Start canceling blocked or future requesters. Only blocked/future
970 * requesters who pass the CANCELABLE flag can be canceled.
972 * This is intended to then allow other requesters (usually the
973 * caller) to obtain a non-cancelable lock.
975 * Don't waste time issuing a wakeup if nobody is pending.
978 lockmgr_cancel_beg(struct lock
*lkp
, u_int flags
)
982 count
= lkp
->lk_count
;
986 KKASSERT((count
& LKC_CANCEL
) == 0); /* disallowed case */
988 /* issue w/lock held */
989 KKASSERT((count
& (LKC_XMASK
| LKC_SMASK
)) != 0);
991 if (!atomic_fcmpset_64(&lkp
->lk_count
,
992 &count
, count
| LKC_CANCEL
)) {
995 /* count |= LKC_CANCEL; NOT USED */
998 * Wakeup any waiters.
1000 * NOTE: EXREQ2 must be checked in addition to standard
1001 * wait sources, it is possible for EXREQ2 to be
1002 * set when EXREQ is clear.
1004 if (count
& (LKC_EXREQ
| LKC_EXREQ2
| LKC_SMASK
| LKC_UPREQ
)) {
1013 * End our cancel request (typically after we have acquired
1014 * the lock ourselves).
1017 lockmgr_cancel_end(struct lock
*lkp
, u_int flags
)
1019 atomic_clear_long(&lkp
->lk_count
, LKC_CANCEL
);
1025 * Backout SCOUNT from a failed shared lock attempt and handle any race
1026 * to 0. This function is also used by the release code for the less
1027 * optimal race to 0 case.
1029 * WARNING! Since we are unconditionally decrementing LKC_SCOUNT, it is
1030 * possible for the lock to get into a LKC_SHARED + ZERO SCOUNT
1031 * situation. A shared request can block with a ZERO SCOUNT if
1032 * EXREQ or UPREQ is pending in this situation. Be sure to always
1033 * issue a wakeup() in this situation if we are unable to
1034 * transition to an exclusive lock, to handle the race.
1040 undo_shreq(struct lock
*lkp
)
1045 count
= atomic_fetchadd_64(&lkp
->lk_count
, -LKC_SCOUNT
) - LKC_SCOUNT
;
1046 while ((count
& (LKC_EXREQ
| LKC_UPREQ
| LKC_CANCEL
)) &&
1047 (count
& (LKC_SMASK
| LKC_XMASK
)) == 0) {
1049 * Grant any UPREQ here. This is handled in two parts.
1050 * We grant the UPREQ by incrementing the excl count and
1051 * clearing UPREQ and SHARED (and also CANCEL).
1053 * The owner of UPREQ is still responsible for setting
1056 * Note that UPREQ must have priority over EXREQ, and EXREQ
1057 * over CANCEL, so if the atomic op fails we have to loop up.
1059 if (count
& LKC_UPREQ
) {
1060 ncount
= (count
+ 1) & ~(LKC_UPREQ
| LKC_CANCEL
|
1062 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
, ncount
)) {
1064 /* count = ncount; NOT USED */
1067 wakeup(lkp
); /* XXX probably not needed */
1070 if (count
& LKC_EXREQ
) {
1071 ncount
= (count
+ 1) & ~(LKC_EXREQ
| LKC_EXREQ2
|
1072 LKC_CANCEL
| LKC_SHARED
);
1073 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
, ncount
)) {
1075 /* count = ncount; NOT USED */
1078 wakeup(lkp
); /* XXX probably not needed */
1081 if (count
& LKC_CANCEL
) {
1082 ncount
= count
& ~LKC_CANCEL
;
1083 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
, ncount
)) {
1085 /* count = ncount; NOT USED */
1094 * Undo an exclusive request. Returns EBUSY if we were able to undo the
1095 * request, and 0 if the request was granted before we could undo it.
1096 * When 0 is returned, the lock state has not been modified. The caller
1097 * is responsible for setting the lockholder to curthread.
1101 undo_exreq(struct lock
*lkp
)
1107 count
= lkp
->lk_count
;
1113 if ((count
& LKC_EXREQ
) == 0) {
1115 * EXREQ was granted. We own the exclusive lock.
1119 if (count
& LKC_XMASK
) {
1121 * Clear the EXREQ we still own. Only wakeup on
1122 * EXREQ2 if no UPREQ. There are still exclusive
1123 * holders so do not wake up any shared locks or
1126 * If there is an UPREQ it will issue a wakeup()
1127 * for any EXREQ wait looops, so we can clear EXREQ2
1130 ncount
= count
& ~(LKC_EXREQ
| LKC_EXREQ2
);
1131 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
, ncount
)) {
1132 if ((count
& (LKC_EXREQ2
| LKC_UPREQ
)) ==
1137 /* count = ncount; NOT USED */
1141 } else if (count
& LKC_UPREQ
) {
1143 * Clear the EXREQ we still own. We cannot wakeup any
1144 * shared or exclusive waiters because there is an
1145 * uprequest pending (that we do not handle here).
1147 * If there is an UPREQ it will issue a wakeup()
1148 * for any EXREQ wait looops, so we can clear EXREQ2
1151 ncount
= count
& ~(LKC_EXREQ
| LKC_EXREQ2
);
1152 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
, ncount
)) {
1157 } else if ((count
& LKC_SHARED
) && (count
& LKC_SMASK
)) {
1159 * No UPREQ, lock not held exclusively, but the lock
1160 * is held shared. Clear EXREQ, wakeup anyone trying
1161 * to get the EXREQ bit (they have to set it
1162 * themselves, EXREQ2 is an aggregation).
1164 * We must also wakeup any shared locks blocked
1165 * by the EXREQ, so just issue the wakeup
1166 * unconditionally. See lockmgr_shared() + 76 lines
1169 ncount
= count
& ~(LKC_EXREQ
| LKC_EXREQ2
);
1170 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
, ncount
)) {
1173 /* count = ncount; NOT USED */
1179 * No UPREQ, lock not held exclusively or shared.
1180 * Grant the EXREQ and wakeup anyone waiting on
1183 * We must also issue a wakeup if SHARED is set,
1184 * even without an SCOUNT, due to pre-shared blocking
1185 * that can occur on EXREQ in lockmgr_shared().
1187 ncount
= (count
+ 1) & ~(LKC_EXREQ
| LKC_EXREQ2
);
1188 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
, ncount
)) {
1189 if (count
& (LKC_EXREQ2
| LKC_SHARED
))
1191 /* count = ncount; NOT USED */
1192 /* we are granting, error == 0 */
1203 * Undo an upgrade request. Returns EBUSY if we were able to undo the
1204 * request, and 0 if the request was granted before we could undo it.
1205 * When 0 is returned, the lock state has not been modified. The caller
1206 * is responsible for setting the lockholder to curthread.
1210 undo_upreq(struct lock
*lkp
)
1216 count
= lkp
->lk_count
;
1222 if ((count
& LKC_UPREQ
) == 0) {
1228 if (count
& LKC_XMASK
) {
1230 * Clear the UPREQ we still own. Nobody to wakeup
1231 * here because there is an existing exclusive
1234 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
,
1235 count
& ~LKC_UPREQ
)) {
1237 /* count &= ~LKC_UPREQ; NOT USED */
1240 } else if (count
& LKC_EXREQ
) {
1242 * Clear the UPREQ we still own. Grant the exclusive
1243 * request and wake it up.
1245 ncount
= (count
+ 1);
1246 ncount
&= ~(LKC_EXREQ
| LKC_EXREQ2
| LKC_UPREQ
);
1248 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
, ncount
)) {
1251 /* count = ncount; NOT USED */
1256 * Clear the UPREQ we still own. Wakeup any shared
1259 * We must also issue a wakeup if SHARED was set
1260 * even if no shared waiters due to pre-shared blocking
1261 * that can occur on UPREQ.
1263 ncount
= count
& ~LKC_UPREQ
;
1264 if (count
& LKC_SMASK
)
1265 ncount
|= LKC_SHARED
;
1267 if (atomic_fcmpset_64(&lkp
->lk_count
, &count
, ncount
)) {
1268 if ((count
& LKC_SHARED
) ||
1269 (ncount
& LKC_SHARED
)) {
1273 /* count = ncount; NOT USED */
1283 lockmgr_kernproc(struct lock
*lp
)
1285 struct thread
*td __debugvar
= curthread
;
1287 if (lp
->lk_lockholder
!= LK_KERNTHREAD
) {
1288 KASSERT(lp
->lk_lockholder
== td
,
1289 ("lockmgr_kernproc: lock not owned by curthread %p: %p",
1290 td
, lp
->lk_lockholder
));
1291 lp
->lk_lockholder
= LK_KERNTHREAD
;
1297 * Initialize a lock; required before use.
1300 lockinit(struct lock
*lkp
, const char *wmesg
, int timo
, int flags
)
1302 lkp
->lk_flags
= (flags
& LK_EXTFLG_MASK
);
1304 lkp
->lk_wmesg
= wmesg
;
1305 lkp
->lk_timo
= timo
;
1306 lkp
->lk_lockholder
= NULL
;
1310 * Reinitialize a lock that is being reused for a different purpose, but
1311 * which may have pending (blocked) threads sitting on it. The caller
1312 * must already hold the interlock.
1315 lockreinit(struct lock
*lkp
, const char *wmesg
, int timo
, int flags
)
1317 lkp
->lk_wmesg
= wmesg
;
1318 lkp
->lk_timo
= timo
;
1322 * De-initialize a lock. The structure must no longer be used by anyone.
1325 lockuninit(struct lock
*lkp
)
1327 uint64_t count __unused
;
1329 count
= lkp
->lk_count
;
1331 KKASSERT((count
& (LKC_EXREQ
| LKC_UPREQ
)) == 0 &&
1332 ((count
& LKC_SHARED
) || (count
& LKC_SMASK
) == 0));
1336 * Determine the status of a lock.
1339 lockstatus(struct lock
*lkp
, struct thread
*td
)
1344 count
= lkp
->lk_count
;
1347 if (count
& (LKC_XMASK
| LKC_SMASK
| LKC_EXREQ
| LKC_UPREQ
)) {
1348 if (count
& LKC_XMASK
) {
1349 if (td
== NULL
|| lkp
->lk_lockholder
== td
)
1350 lock_type
= LK_EXCLUSIVE
;
1352 lock_type
= LK_EXCLOTHER
;
1353 } else if ((count
& LKC_SMASK
) && (count
& LKC_SHARED
)) {
1354 lock_type
= LK_SHARED
;
1361 * Return non-zero if the caller owns the lock shared or exclusive.
1362 * We can only guess re: shared locks.
1365 lockowned(struct lock
*lkp
)
1367 thread_t td
= curthread
;
1370 count
= lkp
->lk_count
;
1373 if (count
& LKC_XMASK
)
1374 return(lkp
->lk_lockholder
== td
);
1376 return((count
& LKC_SMASK
) != 0);
1381 * Determine the number of holders of a lock.
1383 * REMOVED - Cannot be used due to our use of atomic_fetchadd_64()
1384 * for shared locks. Caller can only test if the lock has
1385 * a count or not using lockinuse(lk) (sys/lock.h)
1388 lockcount(struct lock
*lkp
)
1390 panic("lockcount cannot be used");
1394 lockcountnb(struct lock
*lkp
)
1396 panic("lockcount cannot be used");
1401 * Print out information about state of a lock. Used by VOP_PRINT
1402 * routines to display status about contained locks.
1405 lockmgr_printinfo(struct lock
*lkp
)
1407 struct thread
*td
= lkp
->lk_lockholder
;
1411 count
= lkp
->lk_count
;
1414 if (td
&& td
!= LK_KERNTHREAD
)
1419 if (count
& LKC_XMASK
) {
1420 kprintf(" lock type %s: EXCLUS (count %016jx) by td %p pid %d",
1421 lkp
->lk_wmesg
, (intmax_t)count
, td
,
1422 p
? p
->p_pid
: -99);
1423 } else if ((count
& LKC_SMASK
) && (count
& LKC_SHARED
)) {
1424 kprintf(" lock type %s: SHARED (count %016jx)",
1425 lkp
->lk_wmesg
, (intmax_t)count
);
1427 kprintf(" lock type %s: NOTHELD", lkp
->lk_wmesg
);
1429 if ((count
& (LKC_EXREQ
| LKC_UPREQ
)) ||
1430 ((count
& LKC_XMASK
) && (count
& LKC_SMASK
)))
1431 kprintf(" with waiters\n");
1437 lock_sysinit(struct lock_args
*arg
)
1439 lockinit(arg
->la_lock
, arg
->la_desc
, 0, arg
->la_flags
);
1442 #ifdef DEBUG_CANCEL_LOCKS
1446 sysctl_cancel_lock(SYSCTL_HANDLER_ARGS
)
1452 lockmgr(&cancel_lk
, LK_EXCLUSIVE
);
1453 error
= tsleep(&error
, PCATCH
, "canmas", hz
* 5);
1454 lockmgr(&cancel_lk
, LK_CANCEL_BEG
);
1455 error
= tsleep(&error
, PCATCH
, "canmas", hz
* 5);
1456 lockmgr(&cancel_lk
, LK_RELEASE
);
1458 SYSCTL_OUT(req
, &error
, sizeof(error
));
1467 sysctl_cancel_test(SYSCTL_HANDLER_ARGS
)
1472 error
= lockmgr(&cancel_lk
, LK_EXCLUSIVE
|LK_CANCELABLE
);
1474 lockmgr(&cancel_lk
, LK_RELEASE
);
1475 SYSCTL_OUT(req
, &error
, sizeof(error
));
1476 kprintf("test %d\n", error
);