2 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
37 #include <sys/lock_profile.h>
38 #include <sys/lockmgr.h>
39 #include <sys/mutex.h>
41 #include <sys/sleepqueue.h>
43 #include <sys/stack.h>
45 #include <sys/systm.h>
47 #include <machine/cpu.h>
53 CTASSERT(((LK_CANRECURSE
| LK_NOSHARE
) & LO_CLASSFLAGS
) ==
54 (LK_CANRECURSE
| LK_NOSHARE
));
56 #define SQ_EXCLUSIVE_QUEUE 0
57 #define SQ_SHARED_QUEUE 1
60 #define _lockmgr_assert(lk, what, file, line)
61 #define TD_LOCKS_INC(td)
62 #define TD_LOCKS_DEC(td)
64 #define TD_LOCKS_INC(td) ((td)->td_locks++)
65 #define TD_LOCKS_DEC(td) ((td)->td_locks--)
67 #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++)
68 #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--)
71 #define STACK_PRINT(lk)
72 #define STACK_SAVE(lk)
73 #define STACK_ZERO(lk)
75 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
76 #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack)
77 #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack)
80 #define LOCK_LOG2(lk, string, arg1, arg2) \
81 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
82 CTR2(KTR_LOCK, (string), (arg1), (arg2))
83 #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \
84 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
85 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
87 #define GIANT_DECLARE \
89 WITNESS_SAVE_DECL(Giant)
90 #define GIANT_RESTORE() do { \
94 WITNESS_RESTORE(&Giant.lock_object, Giant); \
97 #define GIANT_SAVE() do { \
98 if (mtx_owned(&Giant)) { \
99 WITNESS_SAVE(&Giant.lock_object, Giant); \
100 while (mtx_owned(&Giant)) { \
102 mtx_unlock(&Giant); \
107 #define LK_CAN_SHARE(x) \
108 (((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 || \
109 curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
110 #define LK_TRYOP(x) \
113 #define LK_CAN_WITNESS(x) \
114 (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
115 #define LK_TRYWIT(x) \
116 (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
118 #define lockmgr_disowned(lk) \
119 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
121 #define lockmgr_xlocked(lk) \
122 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
124 static void assert_lockmgr(struct lock_object
*lock
, int how
);
126 static void db_show_lockmgr(struct lock_object
*lock
);
128 static void lock_lockmgr(struct lock_object
*lock
, int how
);
129 static int unlock_lockmgr(struct lock_object
*lock
);
131 struct lock_class lock_class_lockmgr
= {
132 .lc_name
= "lockmgr",
133 .lc_flags
= LC_RECURSABLE
| LC_SLEEPABLE
| LC_SLEEPLOCK
| LC_UPGRADABLE
,
134 .lc_assert
= assert_lockmgr
,
136 .lc_ddb_show
= db_show_lockmgr
,
138 .lc_lock
= lock_lockmgr
,
139 .lc_unlock
= unlock_lockmgr
142 static __inline
struct thread
*
143 lockmgr_xholder(struct lock
*lk
)
148 return ((x
& LK_SHARE
) ? NULL
: (struct thread
*)LK_HOLDER(x
));
152 * It assumes sleepq_lock held and returns with this one unheld.
153 * It also assumes the generic interlock is sane and previously checked.
154 * If LK_INTERLOCK is specified the interlock is not reacquired after the
158 sleeplk(struct lock
*lk
, u_int flags
, struct lock_object
*ilk
,
159 const char *wmesg
, int pri
, int timo
, int queue
)
162 struct lock_class
*class;
165 class = (flags
& LK_INTERLOCK
) ? LOCK_CLASS(ilk
) : NULL
;
166 catch = pri
& PCATCH
;
170 LOCK_LOG3(lk
, "%s: %p blocking on the %s sleepqueue", __func__
, lk
,
171 (queue
== SQ_EXCLUSIVE_QUEUE
) ? "exclusive" : "shared");
173 if (flags
& LK_INTERLOCK
)
174 class->lc_unlock(ilk
);
176 sleepq_add(&lk
->lock_object
, NULL
, wmesg
, SLEEPQ_LK
| (catch ?
177 SLEEPQ_INTERRUPTIBLE
: 0), queue
);
178 if ((flags
& LK_TIMELOCK
) && timo
)
179 sleepq_set_timeout(&lk
->lock_object
, timo
);
182 * Decisional switch for real sleeping.
184 if ((flags
& LK_TIMELOCK
) && timo
&& catch)
185 error
= sleepq_timedwait_sig(&lk
->lock_object
, pri
);
186 else if ((flags
& LK_TIMELOCK
) && timo
)
187 error
= sleepq_timedwait(&lk
->lock_object
, pri
);
189 error
= sleepq_wait_sig(&lk
->lock_object
, pri
);
191 sleepq_wait(&lk
->lock_object
, pri
);
193 if ((flags
& LK_SLEEPFAIL
) && error
== 0)
200 wakeupshlk(struct lock
*lk
, const char *file
, int line
)
203 int queue
, wakeup_swapper
;
205 TD_LOCKS_DEC(curthread
);
206 TD_SLOCKS_DEC(curthread
);
207 WITNESS_UNLOCK(&lk
->lock_object
, 0, file
, line
);
208 LOCK_LOG_LOCK("SUNLOCK", &lk
->lock_object
, 0, 0, file
, line
);
215 * If there is more than one shared lock held, just drop one
218 if (LK_SHARERS(x
) > 1) {
219 if (atomic_cmpset_ptr(&lk
->lk_lock
, x
,
226 * If there are not waiters on the exclusive queue, drop the
229 if ((x
& LK_ALL_WAITERS
) == 0) {
230 MPASS(x
== LK_SHARERS_LOCK(1));
231 if (atomic_cmpset_ptr(&lk
->lk_lock
, LK_SHARERS_LOCK(1),
238 * We should have a sharer with waiters, so enter the hard
239 * path in order to handle wakeups correctly.
241 sleepq_lock(&lk
->lock_object
);
242 x
= lk
->lk_lock
& LK_ALL_WAITERS
;
246 * If the lock has exclusive waiters, give them preference in
247 * order to avoid deadlock with shared runners up.
249 if (x
& LK_EXCLUSIVE_WAITERS
) {
250 queue
= SQ_EXCLUSIVE_QUEUE
;
251 v
|= (x
& LK_SHARED_WAITERS
);
253 MPASS(x
== LK_SHARED_WAITERS
);
254 queue
= SQ_SHARED_QUEUE
;
257 if (!atomic_cmpset_ptr(&lk
->lk_lock
, LK_SHARERS_LOCK(1) | x
,
259 sleepq_release(&lk
->lock_object
);
262 LOCK_LOG3(lk
, "%s: %p waking up threads on the %s queue",
263 __func__
, lk
, queue
== SQ_SHARED_QUEUE
? "shared" :
265 wakeup_swapper
= sleepq_broadcast(&lk
->lock_object
, SLEEPQ_LK
,
267 sleepq_release(&lk
->lock_object
);
271 lock_profile_release_lock(&lk
->lock_object
);
272 return (wakeup_swapper
);
276 assert_lockmgr(struct lock_object
*lock
, int what
)
279 panic("lockmgr locks do not support assertions");
283 lock_lockmgr(struct lock_object
*lock
, int how
)
286 panic("lockmgr locks do not support sleep interlocking");
290 unlock_lockmgr(struct lock_object
*lock
)
293 panic("lockmgr locks do not support sleep interlocking");
297 lockinit(struct lock
*lk
, int pri
, const char *wmesg
, int timo
, int flags
)
301 MPASS((flags
& ~LK_INIT_MASK
) == 0);
303 iflags
= LO_RECURSABLE
| LO_SLEEPABLE
| LO_UPGRADABLE
;
304 if ((flags
& LK_NODUP
) == 0)
306 if (flags
& LK_NOPROFILE
)
307 iflags
|= LO_NOPROFILE
;
308 if ((flags
& LK_NOWITNESS
) == 0)
309 iflags
|= LO_WITNESS
;
310 if (flags
& LK_QUIET
)
312 iflags
|= flags
& (LK_CANRECURSE
| LK_NOSHARE
);
314 lk
->lk_lock
= LK_UNLOCKED
;
318 lock_init(&lk
->lock_object
, &lock_class_lockmgr
, wmesg
, NULL
, iflags
);
323 lockdestroy(struct lock
*lk
)
326 KASSERT(lk
->lk_lock
== LK_UNLOCKED
, ("lockmgr still held"));
327 KASSERT(lk
->lk_recurse
== 0, ("lockmgr still recursed"));
328 lock_destroy(&lk
->lock_object
);
332 __lockmgr_args(struct lock
*lk
, u_int flags
, struct lock_object
*ilk
,
333 const char *wmesg
, int pri
, int timo
, const char *file
, int line
)
337 struct lock_class
*class;
341 int contested
, error
, ipri
, itimo
, queue
, wakeup_swapper
;
346 tid
= (uintptr_t)curthread
;
347 op
= (flags
& LK_TYPE_MASK
);
348 iwmesg
= (wmesg
== LK_WMESG_DEFAULT
) ? lk
->lock_object
.lo_name
: wmesg
;
349 ipri
= (pri
== LK_PRIO_DEFAULT
) ? lk
->lk_pri
: pri
;
350 itimo
= (timo
== LK_TIMO_DEFAULT
) ? lk
->lk_timo
: timo
;
352 MPASS((flags
& ~LK_TOTAL_MASK
) == 0);
353 KASSERT((op
& (op
- 1)) == 0,
354 ("%s: Invalid requested operation @ %s:%d", __func__
, file
, line
));
355 KASSERT((flags
& (LK_NOWAIT
| LK_SLEEPFAIL
)) == 0 ||
356 (op
!= LK_DOWNGRADE
&& op
!= LK_RELEASE
),
357 ("%s: Invalid flags in regard of the operation desired @ %s:%d",
358 __func__
, file
, line
));
359 KASSERT((flags
& LK_INTERLOCK
) == 0 || ilk
!= NULL
,
360 ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
361 __func__
, file
, line
));
363 class = (flags
& LK_INTERLOCK
) ? LOCK_CLASS(ilk
) : NULL
;
364 if (panicstr
!= NULL
) {
365 if (flags
& LK_INTERLOCK
)
366 class->lc_unlock(ilk
);
370 if (op
== LK_SHARED
&& (lk
->lock_object
.lo_flags
& LK_NOSHARE
))
376 if (LK_CAN_WITNESS(flags
))
377 WITNESS_CHECKORDER(&lk
->lock_object
, LOP_NEWORDER
,
383 * If no other thread has an exclusive lock, or
384 * no exclusive waiter is present, bump the count of
385 * sharers. Since we have to preserve the state of
386 * waiters, if we fail to acquire the shared lock
387 * loop back and retry.
389 if (LK_CAN_SHARE(x
)) {
390 if (atomic_cmpset_acq_ptr(&lk
->lk_lock
, x
,
395 lock_profile_obtain_lock_failed(&lk
->lock_object
,
396 &contested
, &waittime
);
399 * If the lock is already held by curthread in
400 * exclusive way avoid a deadlock.
402 if (LK_HOLDER(x
) == tid
) {
404 "%s: %p already held in exclusive mode",
411 * If the lock is expected to not sleep just give up
414 if (LK_TRYOP(flags
)) {
415 LOCK_LOG2(lk
, "%s: %p fails the try operation",
422 * Acquire the sleepqueue chain lock because we
423 * probabilly will need to manipulate waiters flags.
425 sleepq_lock(&lk
->lock_object
);
429 * if the lock can be acquired in shared mode, try
432 if (LK_CAN_SHARE(x
)) {
433 sleepq_release(&lk
->lock_object
);
438 * Try to set the LK_SHARED_WAITERS flag. If we fail,
439 * loop back and retry.
441 if ((x
& LK_SHARED_WAITERS
) == 0) {
442 if (!atomic_cmpset_acq_ptr(&lk
->lk_lock
, x
,
443 x
| LK_SHARED_WAITERS
)) {
444 sleepq_release(&lk
->lock_object
);
447 LOCK_LOG2(lk
, "%s: %p set shared waiters flag",
452 * As far as we have been unable to acquire the
453 * shared lock and the shared waiters flag is set,
456 error
= sleeplk(lk
, flags
, ilk
, iwmesg
, ipri
, itimo
,
458 flags
&= ~LK_INTERLOCK
;
461 "%s: interrupted sleep for %p with %d",
462 __func__
, lk
, error
);
465 LOCK_LOG2(lk
, "%s: %p resuming from the sleep queue",
469 lock_profile_obtain_lock_success(&lk
->lock_object
,
470 contested
, waittime
, file
, line
);
471 LOCK_LOG_LOCK("SLOCK", &lk
->lock_object
, 0, 0, file
,
473 WITNESS_LOCK(&lk
->lock_object
, LK_TRYWIT(flags
), file
,
475 TD_LOCKS_INC(curthread
);
476 TD_SLOCKS_INC(curthread
);
481 _lockmgr_assert(lk
, KA_SLOCKED
, file
, line
);
482 x
= lk
->lk_lock
& LK_ALL_WAITERS
;
485 * Try to switch from one shared lock to an exclusive one.
486 * We need to preserve waiters flags during the operation.
488 if (atomic_cmpset_ptr(&lk
->lk_lock
, LK_SHARERS_LOCK(1) | x
,
490 LOCK_LOG_LOCK("XUPGRADE", &lk
->lock_object
, 0, 0, file
,
492 WITNESS_UPGRADE(&lk
->lock_object
, LOP_EXCLUSIVE
|
493 LK_TRYWIT(flags
), file
, line
);
494 TD_SLOCKS_DEC(curthread
);
499 * We have been unable to succeed in upgrading, so just
500 * give up the shared lock.
502 wakeup_swapper
|= wakeupshlk(lk
, file
, line
);
506 if (LK_CAN_WITNESS(flags
))
507 WITNESS_CHECKORDER(&lk
->lock_object
, LOP_NEWORDER
|
508 LOP_EXCLUSIVE
, file
, line
);
511 * If curthread already holds the lock and this one is
512 * allowed to recurse, simply recurse on it.
514 if (lockmgr_xlocked(lk
)) {
515 if ((flags
& LK_CANRECURSE
) == 0 &&
516 (lk
->lock_object
.lo_flags
& LK_CANRECURSE
) == 0) {
519 * If the lock is expected to not panic just
520 * give up and return.
522 if (LK_TRYOP(flags
)) {
524 "%s: %p fails the try operation",
529 if (flags
& LK_INTERLOCK
)
530 class->lc_unlock(ilk
);
531 panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
532 __func__
, iwmesg
, file
, line
);
535 LOCK_LOG2(lk
, "%s: %p recursing", __func__
, lk
);
536 LOCK_LOG_LOCK("XLOCK", &lk
->lock_object
, 0,
537 lk
->lk_recurse
, file
, line
);
538 WITNESS_LOCK(&lk
->lock_object
, LOP_EXCLUSIVE
|
539 LK_TRYWIT(flags
), file
, line
);
540 TD_LOCKS_INC(curthread
);
544 while (!atomic_cmpset_acq_ptr(&lk
->lk_lock
, LK_UNLOCKED
,
546 lock_profile_obtain_lock_failed(&lk
->lock_object
,
547 &contested
, &waittime
);
550 * If the lock is expected to not sleep just give up
553 if (LK_TRYOP(flags
)) {
554 LOCK_LOG2(lk
, "%s: %p fails the try operation",
561 * Acquire the sleepqueue chain lock because we
562 * probabilly will need to manipulate waiters flags.
564 sleepq_lock(&lk
->lock_object
);
566 v
= x
& LK_ALL_WAITERS
;
569 * if the lock has been released while we spun on
570 * the sleepqueue chain lock just try again.
572 if (x
== LK_UNLOCKED
) {
573 sleepq_release(&lk
->lock_object
);
578 * The lock can be in the state where there is a
579 * pending queue of waiters, but still no owner.
580 * This happens when the lock is contested and an
581 * owner is going to claim the lock.
582 * If curthread is the one successfully acquiring it
583 * claim lock ownership and return, preserving waiters
586 if (x
== (LK_UNLOCKED
| v
)) {
587 if (atomic_cmpset_acq_ptr(&lk
->lk_lock
, x
,
589 sleepq_release(&lk
->lock_object
);
591 "%s: %p claimed by a new writer",
595 sleepq_release(&lk
->lock_object
);
600 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
601 * fail, loop back and retry.
603 if ((x
& LK_EXCLUSIVE_WAITERS
) == 0) {
604 if (!atomic_cmpset_ptr(&lk
->lk_lock
, x
,
605 x
| LK_EXCLUSIVE_WAITERS
)) {
606 sleepq_release(&lk
->lock_object
);
609 LOCK_LOG2(lk
, "%s: %p set excl waiters flag",
614 * As far as we have been unable to acquire the
615 * exclusive lock and the exclusive waiters flag
616 * is set, we will sleep.
618 error
= sleeplk(lk
, flags
, ilk
, iwmesg
, ipri
, itimo
,
620 flags
&= ~LK_INTERLOCK
;
623 "%s: interrupted sleep for %p with %d",
624 __func__
, lk
, error
);
627 LOCK_LOG2(lk
, "%s: %p resuming from the sleep queue",
631 lock_profile_obtain_lock_success(&lk
->lock_object
,
632 contested
, waittime
, file
, line
);
633 LOCK_LOG_LOCK("XLOCK", &lk
->lock_object
, 0,
634 lk
->lk_recurse
, file
, line
);
635 WITNESS_LOCK(&lk
->lock_object
, LOP_EXCLUSIVE
|
636 LK_TRYWIT(flags
), file
, line
);
637 TD_LOCKS_INC(curthread
);
642 _lockmgr_assert(lk
, KA_XLOCKED
| KA_NOTRECURSED
, file
, line
);
643 LOCK_LOG_LOCK("XDOWNGRADE", &lk
->lock_object
, 0, 0, file
, line
);
644 WITNESS_DOWNGRADE(&lk
->lock_object
, 0, file
, line
);
645 TD_SLOCKS_INC(curthread
);
648 * In order to preserve waiters flags, just spin.
651 x
= lk
->lk_lock
& LK_ALL_WAITERS
;
652 if (atomic_cmpset_rel_ptr(&lk
->lk_lock
, tid
| x
,
653 LK_SHARERS_LOCK(1) | x
))
659 _lockmgr_assert(lk
, KA_LOCKED
, file
, line
);
662 if ((x
& LK_SHARE
) == 0) {
665 * As first option, treact the lock as if it has not
667 * Fix-up the tid var if the lock has been disowned.
669 if (LK_HOLDER(x
) == LK_KERNPROC
)
672 WITNESS_UNLOCK(&lk
->lock_object
, LOP_EXCLUSIVE
,
674 TD_LOCKS_DEC(curthread
);
676 LOCK_LOG_LOCK("XUNLOCK", &lk
->lock_object
, 0,
677 lk
->lk_recurse
, file
, line
);
680 * The lock is held in exclusive mode.
681 * If the lock is recursed also, then unrecurse it.
683 if (lockmgr_xlocked(lk
) && lockmgr_recursed(lk
)) {
684 LOCK_LOG2(lk
, "%s: %p unrecursing", __func__
,
689 lock_profile_release_lock(&lk
->lock_object
);
691 if (atomic_cmpset_rel_ptr(&lk
->lk_lock
, tid
,
695 sleepq_lock(&lk
->lock_object
);
696 x
= lk
->lk_lock
& LK_ALL_WAITERS
;
700 * If the lock has exclusive waiters, give them
701 * preference in order to avoid deadlock with
704 if (x
& LK_EXCLUSIVE_WAITERS
) {
705 queue
= SQ_EXCLUSIVE_QUEUE
;
706 v
|= (x
& LK_SHARED_WAITERS
);
708 MPASS(x
== LK_SHARED_WAITERS
);
709 queue
= SQ_SHARED_QUEUE
;
713 "%s: %p waking up threads on the %s queue",
714 __func__
, lk
, queue
== SQ_SHARED_QUEUE
? "shared" :
716 atomic_store_rel_ptr(&lk
->lk_lock
, v
);
717 wakeup_swapper
= sleepq_broadcast(&lk
->lock_object
,
718 SLEEPQ_LK
, 0, queue
);
719 sleepq_release(&lk
->lock_object
);
722 wakeup_swapper
= wakeupshlk(lk
, file
, line
);
725 if (LK_CAN_WITNESS(flags
))
726 WITNESS_CHECKORDER(&lk
->lock_object
, LOP_NEWORDER
|
727 LOP_EXCLUSIVE
, file
, line
);
730 * Trying to drain a lock we already own will result in a
733 if (lockmgr_xlocked(lk
)) {
734 if (flags
& LK_INTERLOCK
)
735 class->lc_unlock(ilk
);
736 panic("%s: draining %s with the lock held @ %s:%d\n",
737 __func__
, iwmesg
, file
, line
);
740 while (!atomic_cmpset_acq_ptr(&lk
->lk_lock
, LK_UNLOCKED
, tid
)) {
741 lock_profile_obtain_lock_failed(&lk
->lock_object
,
742 &contested
, &waittime
);
745 * If the lock is expected to not sleep just give up
748 if (LK_TRYOP(flags
)) {
749 LOCK_LOG2(lk
, "%s: %p fails the try operation",
756 * Acquire the sleepqueue chain lock because we
757 * probabilly will need to manipulate waiters flags.
759 sleepq_lock(&lk
->lock_object
);
761 v
= x
& LK_ALL_WAITERS
;
764 * if the lock has been released while we spun on
765 * the sleepqueue chain lock just try again.
767 if (x
== LK_UNLOCKED
) {
768 sleepq_release(&lk
->lock_object
);
772 if (x
== (LK_UNLOCKED
| v
)) {
774 if (v
& LK_EXCLUSIVE_WAITERS
) {
775 queue
= SQ_EXCLUSIVE_QUEUE
;
776 v
&= ~LK_EXCLUSIVE_WAITERS
;
778 MPASS(v
& LK_SHARED_WAITERS
);
779 queue
= SQ_SHARED_QUEUE
;
780 v
&= ~LK_SHARED_WAITERS
;
782 if (!atomic_cmpset_ptr(&lk
->lk_lock
, x
, v
)) {
783 sleepq_release(&lk
->lock_object
);
787 "%s: %p waking up all threads on the %s queue",
788 __func__
, lk
, queue
== SQ_SHARED_QUEUE
?
789 "shared" : "exclusive");
790 wakeup_swapper
|= sleepq_broadcast(
791 &lk
->lock_object
, SLEEPQ_LK
, 0, queue
);
794 * If shared waiters have been woken up we need
795 * to wait for one of them to acquire the lock
796 * before to set the exclusive waiters in
797 * order to avoid a deadlock.
799 if (queue
== SQ_SHARED_QUEUE
) {
800 for (v
= lk
->lk_lock
;
801 (v
& LK_SHARE
) && !LK_SHARERS(v
);
808 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
809 * fail, loop back and retry.
811 if ((x
& LK_EXCLUSIVE_WAITERS
) == 0) {
812 if (!atomic_cmpset_ptr(&lk
->lk_lock
, x
,
813 x
| LK_EXCLUSIVE_WAITERS
)) {
814 sleepq_release(&lk
->lock_object
);
817 LOCK_LOG2(lk
, "%s: %p set drain waiters flag",
822 * As far as we have been unable to acquire the
823 * exclusive lock and the exclusive waiters flag
824 * is set, we will sleep.
826 if (flags
& LK_INTERLOCK
) {
827 class->lc_unlock(ilk
);
828 flags
&= ~LK_INTERLOCK
;
831 sleepq_add(&lk
->lock_object
, NULL
, iwmesg
, SLEEPQ_LK
,
833 sleepq_wait(&lk
->lock_object
, ipri
& PRIMASK
);
835 LOCK_LOG2(lk
, "%s: %p resuming from the sleep queue",
840 lock_profile_obtain_lock_success(&lk
->lock_object
,
841 contested
, waittime
, file
, line
);
842 LOCK_LOG_LOCK("DRAIN", &lk
->lock_object
, 0,
843 lk
->lk_recurse
, file
, line
);
844 WITNESS_LOCK(&lk
->lock_object
, LOP_EXCLUSIVE
|
845 LK_TRYWIT(flags
), file
, line
);
846 TD_LOCKS_INC(curthread
);
851 if (flags
& LK_INTERLOCK
)
852 class->lc_unlock(ilk
);
853 panic("%s: unknown lockmgr request 0x%x\n", __func__
, op
);
856 if (flags
& LK_INTERLOCK
)
857 class->lc_unlock(ilk
);
865 _lockmgr_disown(struct lock
*lk
, const char *file
, int line
)
869 tid
= (uintptr_t)curthread
;
870 _lockmgr_assert(lk
, KA_XLOCKED
| KA_NOTRECURSED
, file
, line
);
873 * If the owner is already LK_KERNPROC just skip the whole operation.
875 if (LK_HOLDER(lk
->lk_lock
) != tid
)
877 LOCK_LOG_LOCK("XDISOWN", &lk
->lock_object
, 0, 0, file
, line
);
878 WITNESS_UNLOCK(&lk
->lock_object
, LOP_EXCLUSIVE
, file
, line
);
879 TD_LOCKS_DEC(curthread
);
882 * In order to preserve waiters flags, just spin.
885 x
= lk
->lk_lock
& LK_ALL_WAITERS
;
886 if (atomic_cmpset_rel_ptr(&lk
->lk_lock
, tid
| x
,
894 lockmgr_printinfo(struct lock
*lk
)
899 if (lk
->lk_lock
== LK_UNLOCKED
)
900 printf(" lock type %s: UNLOCKED\n", lk
->lock_object
.lo_name
);
901 else if (lk
->lk_lock
& LK_SHARE
)
902 printf(" lock type %s: SHARED (count %ju)\n",
903 lk
->lock_object
.lo_name
,
904 (uintmax_t)LK_SHARERS(lk
->lk_lock
));
906 td
= lockmgr_xholder(lk
);
907 printf(" lock type %s: EXCL by thread %p (pid %d)\n",
908 lk
->lock_object
.lo_name
, td
, td
->td_proc
->p_pid
);
912 if (x
& LK_EXCLUSIVE_WAITERS
)
913 printf(" with exclusive waiters pending\n");
914 if (x
& LK_SHARED_WAITERS
)
915 printf(" with shared waiters pending\n");
921 lockstatus(struct lock
*lk
)
930 if ((x
& LK_SHARE
) == 0) {
931 if (v
== (uintptr_t)curthread
|| v
== LK_KERNPROC
)
935 } else if (x
== LK_UNLOCKED
)
941 #ifdef INVARIANT_SUPPORT
943 #undef _lockmgr_assert
947 _lockmgr_assert(struct lock
*lk
, int what
, const char *file
, int line
)
951 if (panicstr
!= NULL
)
955 case KA_SLOCKED
| KA_NOTRECURSED
:
956 case KA_SLOCKED
| KA_RECURSED
:
959 case KA_LOCKED
| KA_NOTRECURSED
:
960 case KA_LOCKED
| KA_RECURSED
:
964 * We cannot trust WITNESS if the lock is held in exclusive
965 * mode and a call to lockmgr_disown() happened.
966 * Workaround this skipping the check if the lock is held in
967 * exclusive mode even for the KA_LOCKED case.
969 if (slocked
|| (lk
->lk_lock
& LK_SHARE
)) {
970 witness_assert(&lk
->lock_object
, what
, file
, line
);
974 if (lk
->lk_lock
== LK_UNLOCKED
||
975 ((lk
->lk_lock
& LK_SHARE
) == 0 && (slocked
||
976 (!lockmgr_xlocked(lk
) && !lockmgr_disowned(lk
)))))
977 panic("Lock %s not %slocked @ %s:%d\n",
978 lk
->lock_object
.lo_name
, slocked
? "share" : "",
981 if ((lk
->lk_lock
& LK_SHARE
) == 0) {
982 if (lockmgr_recursed(lk
)) {
983 if (what
& KA_NOTRECURSED
)
984 panic("Lock %s recursed @ %s:%d\n",
985 lk
->lock_object
.lo_name
, file
,
987 } else if (what
& KA_RECURSED
)
988 panic("Lock %s not recursed @ %s:%d\n",
989 lk
->lock_object
.lo_name
, file
, line
);
993 case KA_XLOCKED
| KA_NOTRECURSED
:
994 case KA_XLOCKED
| KA_RECURSED
:
995 if (!lockmgr_xlocked(lk
) && !lockmgr_disowned(lk
))
996 panic("Lock %s not exclusively locked @ %s:%d\n",
997 lk
->lock_object
.lo_name
, file
, line
);
998 if (lockmgr_recursed(lk
)) {
999 if (what
& KA_NOTRECURSED
)
1000 panic("Lock %s recursed @ %s:%d\n",
1001 lk
->lock_object
.lo_name
, file
, line
);
1002 } else if (what
& KA_RECURSED
)
1003 panic("Lock %s not recursed @ %s:%d\n",
1004 lk
->lock_object
.lo_name
, file
, line
);
1007 if (lockmgr_xlocked(lk
) || lockmgr_disowned(lk
))
1008 panic("Lock %s exclusively locked @ %s:%d\n",
1009 lk
->lock_object
.lo_name
, file
, line
);
1012 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what
, file
,
1020 lockmgr_chain(struct thread
*td
, struct thread
**ownerp
)
1026 if (LOCK_CLASS(&lk
->lock_object
) != &lock_class_lockmgr
)
1028 db_printf("blocked on lockmgr %s", lk
->lock_object
.lo_name
);
1029 if (lk
->lk_lock
& LK_SHARE
)
1030 db_printf("SHARED (count %ju)\n",
1031 (uintmax_t)LK_SHARERS(lk
->lk_lock
));
1033 db_printf("EXCL\n");
1034 *ownerp
= lockmgr_xholder(lk
);
1040 db_show_lockmgr(struct lock_object
*lock
)
1045 lk
= (struct lock
*)lock
;
1047 db_printf(" state: ");
1048 if (lk
->lk_lock
== LK_UNLOCKED
)
1049 db_printf("UNLOCKED\n");
1050 else if (lk
->lk_lock
& LK_SHARE
)
1051 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk
->lk_lock
));
1053 td
= lockmgr_xholder(lk
);
1054 if (td
== (struct thread
*)LK_KERNPROC
)
1055 db_printf("XLOCK: LK_KERNPROC\n");
1057 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td
,
1058 td
->td_tid
, td
->td_proc
->p_pid
,
1059 td
->td_proc
->p_comm
);
1060 if (lockmgr_recursed(lk
))
1061 db_printf(" recursed: %d\n", lk
->lk_recurse
);
1063 db_printf(" waiters: ");
1064 switch (lk
->lk_lock
& LK_ALL_WAITERS
) {
1065 case LK_SHARED_WAITERS
:
1066 db_printf("shared\n");
1067 case LK_EXCLUSIVE_WAITERS
:
1068 db_printf("exclusive\n");
1070 case LK_ALL_WAITERS
:
1071 db_printf("shared and exclusive\n");
1074 db_printf("none\n");