2 * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * Machine independent bits of reader/writer lock implementation.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
39 #include <sys/param.h>
40 #include <sys/systm.h>
42 #include <sys/kernel.h>
46 #include <sys/mutex.h>
48 #include <sys/rmlock.h>
49 #include <sys/sched.h>
51 #include <sys/turnstile.h>
52 #include <sys/lock_profile.h>
53 #include <machine/cpu.h>
60 * A cookie to mark destroyed rmlocks. This is stored in the head of
63 #define RM_DESTROYED ((void *)0xdead)
65 #define rm_destroyed(rm) \
66 (LIST_FIRST(&(rm)->rm_activeReaders) == RM_DESTROYED)
68 #define RMPF_ONQUEUE 1
72 #define _rm_assert(c, what, file, line)
75 static void assert_rm(const struct lock_object
*lock
, int what
);
77 static void db_show_rm(const struct lock_object
*lock
);
79 static void lock_rm(struct lock_object
*lock
, uintptr_t how
);
81 static int owner_rm(const struct lock_object
*lock
, struct thread
**owner
);
83 static uintptr_t unlock_rm(struct lock_object
*lock
);
85 struct lock_class lock_class_rm
= {
87 .lc_flags
= LC_SLEEPLOCK
| LC_RECURSABLE
,
88 .lc_assert
= assert_rm
,
90 .lc_ddb_show
= db_show_rm
,
93 .lc_unlock
= unlock_rm
,
99 struct lock_class lock_class_rm_sleepable
= {
100 .lc_name
= "sleepable rm",
101 .lc_flags
= LC_SLEEPLOCK
| LC_SLEEPABLE
| LC_RECURSABLE
,
102 .lc_assert
= assert_rm
,
104 .lc_ddb_show
= db_show_rm
,
107 .lc_unlock
= unlock_rm
,
109 .lc_owner
= owner_rm
,
114 assert_rm(const struct lock_object
*lock
, int what
)
117 rm_assert((const struct rmlock
*)lock
, what
);
121 lock_rm(struct lock_object
*lock
, uintptr_t how
)
124 struct rm_priotracker
*tracker
;
126 rm
= (struct rmlock
*)lock
;
130 tracker
= (struct rm_priotracker
*)how
;
131 rm_rlock(rm
, tracker
);
136 unlock_rm(struct lock_object
*lock
)
141 struct rm_queue
*queue
;
142 struct rm_priotracker
*tracker
;
145 rm
= (struct rmlock
*)lock
;
148 rm_assert(rm
, RA_LOCKED
| RA_NOTRECURSED
);
153 * Find the right rm_priotracker structure for curthread.
154 * The guarantee about its uniqueness is given by the fact
155 * we already asserted the lock wasn't recursively acquired.
159 pc
= pcpu_find(curcpu
);
160 for (queue
= pc
->pc_rm_queue
.rmq_next
;
161 queue
!= &pc
->pc_rm_queue
; queue
= queue
->rmq_next
) {
162 tracker
= (struct rm_priotracker
*)queue
;
163 if ((tracker
->rmp_rmlock
== rm
) &&
164 (tracker
->rmp_thread
== td
)) {
165 how
= (uintptr_t)tracker
;
169 KASSERT(tracker
!= NULL
,
170 ("rm_priotracker is non-NULL when lock held in read mode"));
172 rm_runlock(rm
, tracker
);
179 owner_rm(const struct lock_object
*lock
, struct thread
**owner
)
181 const struct rmlock
*rm
;
182 struct lock_class
*lc
;
184 rm
= (const struct rmlock
*)lock
;
185 lc
= LOCK_CLASS(&rm
->rm_wlock_object
);
186 return (lc
->lc_owner(&rm
->rm_wlock_object
, owner
));
190 static struct mtx rm_spinlock
;
192 MTX_SYSINIT(rm_spinlock
, &rm_spinlock
, "rm_spinlock", MTX_SPIN
);
195 * Add or remove tracker from per-cpu list.
197 * The per-cpu list can be traversed at any time in forward direction from an
198 * interrupt on the *local* cpu.
201 rm_tracker_add(struct pcpu
*pc
, struct rm_priotracker
*tracker
)
203 struct rm_queue
*next
;
205 /* Initialize all tracker pointers */
206 tracker
->rmp_cpuQueue
.rmq_prev
= &pc
->pc_rm_queue
;
207 next
= pc
->pc_rm_queue
.rmq_next
;
208 tracker
->rmp_cpuQueue
.rmq_next
= next
;
210 /* rmq_prev is not used during froward traversal. */
211 next
->rmq_prev
= &tracker
->rmp_cpuQueue
;
213 /* Update pointer to first element. */
214 pc
->pc_rm_queue
.rmq_next
= &tracker
->rmp_cpuQueue
;
218 * Return a count of the number of trackers the thread 'td' already
219 * has on this CPU for the lock 'rm'.
222 rm_trackers_present(const struct pcpu
*pc
, const struct rmlock
*rm
,
223 const struct thread
*td
)
225 struct rm_queue
*queue
;
226 struct rm_priotracker
*tracker
;
230 for (queue
= pc
->pc_rm_queue
.rmq_next
; queue
!= &pc
->pc_rm_queue
;
231 queue
= queue
->rmq_next
) {
232 tracker
= (struct rm_priotracker
*)queue
;
233 if ((tracker
->rmp_rmlock
== rm
) && (tracker
->rmp_thread
== td
))
240 rm_tracker_remove(struct pcpu
*pc
, struct rm_priotracker
*tracker
)
242 struct rm_queue
*next
, *prev
;
244 next
= tracker
->rmp_cpuQueue
.rmq_next
;
245 prev
= tracker
->rmp_cpuQueue
.rmq_prev
;
247 /* Not used during forward traversal. */
248 next
->rmq_prev
= prev
;
250 /* Remove from list. */
251 prev
->rmq_next
= next
;
255 rm_cleanIPI(void *arg
)
258 struct rmlock
*rm
= arg
;
259 struct rm_priotracker
*tracker
;
260 struct rm_queue
*queue
;
261 pc
= pcpu_find(curcpu
);
263 for (queue
= pc
->pc_rm_queue
.rmq_next
; queue
!= &pc
->pc_rm_queue
;
264 queue
= queue
->rmq_next
) {
265 tracker
= (struct rm_priotracker
*)queue
;
266 if (tracker
->rmp_rmlock
== rm
&& tracker
->rmp_flags
== 0) {
267 tracker
->rmp_flags
= RMPF_ONQUEUE
;
268 mtx_lock_spin(&rm_spinlock
);
269 LIST_INSERT_HEAD(&rm
->rm_activeReaders
, tracker
,
271 mtx_unlock_spin(&rm_spinlock
);
277 rm_init_flags(struct rmlock
*rm
, const char *name
, int opts
)
279 struct lock_class
*lc
;
283 if (!(opts
& RM_NOWITNESS
))
284 liflags
|= LO_WITNESS
;
285 if (opts
& RM_RECURSE
)
286 liflags
|= LO_RECURSABLE
;
289 rm
->rm_writecpus
= all_cpus
;
290 LIST_INIT(&rm
->rm_activeReaders
);
291 if (opts
& RM_SLEEPABLE
) {
292 liflags
|= LO_SLEEPABLE
;
293 lc
= &lock_class_rm_sleepable
;
294 xflags
= (opts
& RM_NEW
? SX_NEW
: 0);
295 sx_init_flags(&rm
->rm_lock_sx
, "rmlock_sx",
296 xflags
| SX_NOWITNESS
);
299 xflags
= (opts
& RM_NEW
? MTX_NEW
: 0);
300 mtx_init(&rm
->rm_lock_mtx
, name
, "rmlock_mtx",
301 xflags
| MTX_NOWITNESS
);
303 lock_init(&rm
->lock_object
, lc
, name
, NULL
, liflags
);
307 rm_init(struct rmlock
*rm
, const char *name
)
310 rm_init_flags(rm
, name
, 0);
314 rm_destroy(struct rmlock
*rm
)
317 rm_assert(rm
, RA_UNLOCKED
);
318 LIST_FIRST(&rm
->rm_activeReaders
) = RM_DESTROYED
;
319 if (rm
->lock_object
.lo_flags
& LO_SLEEPABLE
)
320 sx_destroy(&rm
->rm_lock_sx
);
322 mtx_destroy(&rm
->rm_lock_mtx
);
323 lock_destroy(&rm
->lock_object
);
327 rm_wowned(const struct rmlock
*rm
)
330 if (rm
->lock_object
.lo_flags
& LO_SLEEPABLE
)
331 return (sx_xlocked(&rm
->rm_lock_sx
));
333 return (mtx_owned(&rm
->rm_lock_mtx
));
337 rm_sysinit(void *arg
)
339 struct rm_args
*args
= arg
;
341 rm_init(args
->ra_rm
, args
->ra_desc
);
345 rm_sysinit_flags(void *arg
)
347 struct rm_args_flags
*args
= arg
;
349 rm_init_flags(args
->ra_rm
, args
->ra_desc
, args
->ra_opts
);
353 _rm_rlock_hard(struct rmlock
*rm
, struct rm_priotracker
*tracker
, int trylock
)
358 pc
= pcpu_find(curcpu
);
360 /* Check if we just need to do a proper critical_exit. */
361 if (!CPU_ISSET(pc
->pc_cpuid
, &rm
->rm_writecpus
)) {
366 /* Remove our tracker from the per-cpu list. */
367 rm_tracker_remove(pc
, tracker
);
369 /* Check to see if the IPI granted us the lock after all. */
370 if (tracker
->rmp_flags
) {
371 /* Just add back tracker - we hold the lock. */
372 rm_tracker_add(pc
, tracker
);
378 * We allow readers to aquire a lock even if a writer is blocked if
379 * the lock is recursive and the reader already holds the lock.
381 if ((rm
->lock_object
.lo_flags
& LO_RECURSABLE
) != 0) {
383 * Just grant the lock if this thread already has a tracker
384 * for this lock on the per-cpu queue.
386 if (rm_trackers_present(pc
, rm
, curthread
) != 0) {
387 mtx_lock_spin(&rm_spinlock
);
388 LIST_INSERT_HEAD(&rm
->rm_activeReaders
, tracker
,
390 tracker
->rmp_flags
= RMPF_ONQUEUE
;
391 mtx_unlock_spin(&rm_spinlock
);
392 rm_tracker_add(pc
, tracker
);
402 if (rm
->lock_object
.lo_flags
& LO_SLEEPABLE
) {
403 if (!sx_try_xlock(&rm
->rm_lock_sx
))
406 if (!mtx_trylock(&rm
->rm_lock_mtx
))
410 if (rm
->lock_object
.lo_flags
& LO_SLEEPABLE
) {
411 THREAD_SLEEPING_OK();
412 sx_xlock(&rm
->rm_lock_sx
);
413 THREAD_NO_SLEEPING();
415 mtx_lock(&rm
->rm_lock_mtx
);
419 pc
= pcpu_find(curcpu
);
420 CPU_CLR(pc
->pc_cpuid
, &rm
->rm_writecpus
);
421 rm_tracker_add(pc
, tracker
);
425 if (rm
->lock_object
.lo_flags
& LO_SLEEPABLE
)
426 sx_xunlock(&rm
->rm_lock_sx
);
428 mtx_unlock(&rm
->rm_lock_mtx
);
434 _rm_rlock(struct rmlock
*rm
, struct rm_priotracker
*tracker
, int trylock
)
436 struct thread
*td
= curthread
;
439 if (SCHEDULER_STOPPED())
442 tracker
->rmp_flags
= 0;
443 tracker
->rmp_thread
= td
;
444 tracker
->rmp_rmlock
= rm
;
446 if (rm
->lock_object
.lo_flags
& LO_SLEEPABLE
)
447 THREAD_NO_SLEEPING();
449 td
->td_critnest
++; /* critical_enter(); */
453 pc
= cpuid_to_pcpu
[td
->td_oncpu
]; /* pcpu_find(td->td_oncpu); */
455 rm_tracker_add(pc
, tracker
);
464 * Fast path to combine two common conditions into a single
467 if (0 == (td
->td_owepreempt
|
468 CPU_ISSET(pc
->pc_cpuid
, &rm
->rm_writecpus
)))
471 /* We do not have a read token and need to acquire one. */
472 return _rm_rlock_hard(rm
, tracker
, trylock
);
476 _rm_unlock_hard(struct thread
*td
,struct rm_priotracker
*tracker
)
479 if (td
->td_owepreempt
) {
484 if (!tracker
->rmp_flags
)
487 mtx_lock_spin(&rm_spinlock
);
488 LIST_REMOVE(tracker
, rmp_qentry
);
490 if (tracker
->rmp_flags
& RMPF_SIGNAL
) {
492 struct turnstile
*ts
;
494 rm
= tracker
->rmp_rmlock
;
496 turnstile_chain_lock(&rm
->lock_object
);
497 mtx_unlock_spin(&rm_spinlock
);
499 ts
= turnstile_lookup(&rm
->lock_object
);
501 turnstile_signal(ts
, TS_EXCLUSIVE_QUEUE
);
502 turnstile_unpend(ts
, TS_EXCLUSIVE_LOCK
);
503 turnstile_chain_unlock(&rm
->lock_object
);
505 mtx_unlock_spin(&rm_spinlock
);
509 _rm_runlock(struct rmlock
*rm
, struct rm_priotracker
*tracker
)
512 struct thread
*td
= tracker
->rmp_thread
;
514 if (SCHEDULER_STOPPED())
517 td
->td_critnest
++; /* critical_enter(); */
518 pc
= cpuid_to_pcpu
[td
->td_oncpu
]; /* pcpu_find(td->td_oncpu); */
519 rm_tracker_remove(pc
, tracker
);
523 if (rm
->lock_object
.lo_flags
& LO_SLEEPABLE
)
524 THREAD_SLEEPING_OK();
526 if (0 == (td
->td_owepreempt
| tracker
->rmp_flags
))
529 _rm_unlock_hard(td
, tracker
);
533 _rm_wlock(struct rmlock
*rm
)
535 struct rm_priotracker
*prio
;
536 struct turnstile
*ts
;
539 if (SCHEDULER_STOPPED())
542 if (rm
->lock_object
.lo_flags
& LO_SLEEPABLE
)
543 sx_xlock(&rm
->rm_lock_sx
);
545 mtx_lock(&rm
->rm_lock_mtx
);
547 if (CPU_CMP(&rm
->rm_writecpus
, &all_cpus
)) {
548 /* Get all read tokens back */
550 CPU_NAND(&readcpus
, &rm
->rm_writecpus
);
551 rm
->rm_writecpus
= all_cpus
;
554 * Assumes rm->rm_writecpus update is visible on other CPUs
555 * before rm_cleanIPI is called.
558 smp_rendezvous_cpus(readcpus
,
559 smp_no_rendevous_barrier
,
561 smp_no_rendevous_barrier
,
568 mtx_lock_spin(&rm_spinlock
);
569 while ((prio
= LIST_FIRST(&rm
->rm_activeReaders
)) != NULL
) {
570 ts
= turnstile_trywait(&rm
->lock_object
);
571 prio
->rmp_flags
= RMPF_ONQUEUE
| RMPF_SIGNAL
;
572 mtx_unlock_spin(&rm_spinlock
);
573 turnstile_wait(ts
, prio
->rmp_thread
,
575 mtx_lock_spin(&rm_spinlock
);
577 mtx_unlock_spin(&rm_spinlock
);
582 _rm_wunlock(struct rmlock
*rm
)
585 if (rm
->lock_object
.lo_flags
& LO_SLEEPABLE
)
586 sx_xunlock(&rm
->rm_lock_sx
);
588 mtx_unlock(&rm
->rm_lock_mtx
);
594 _rm_wlock_debug(struct rmlock
*rm
, const char *file
, int line
)
597 if (SCHEDULER_STOPPED())
600 KASSERT(kdb_active
!= 0 || !TD_IS_IDLETHREAD(curthread
),
601 ("rm_wlock() by idle thread %p on rmlock %s @ %s:%d",
602 curthread
, rm
->lock_object
.lo_name
, file
, line
));
603 KASSERT(!rm_destroyed(rm
),
604 ("rm_wlock() of destroyed rmlock @ %s:%d", file
, line
));
605 _rm_assert(rm
, RA_UNLOCKED
, file
, line
);
607 WITNESS_CHECKORDER(&rm
->lock_object
, LOP_NEWORDER
| LOP_EXCLUSIVE
,
612 LOCK_LOG_LOCK("RMWLOCK", &rm
->lock_object
, 0, 0, file
, line
);
613 WITNESS_LOCK(&rm
->lock_object
, LOP_EXCLUSIVE
, file
, line
);
614 TD_LOCKS_INC(curthread
);
618 _rm_wunlock_debug(struct rmlock
*rm
, const char *file
, int line
)
621 if (SCHEDULER_STOPPED())
624 KASSERT(!rm_destroyed(rm
),
625 ("rm_wunlock() of destroyed rmlock @ %s:%d", file
, line
));
626 _rm_assert(rm
, RA_WLOCKED
, file
, line
);
627 WITNESS_UNLOCK(&rm
->lock_object
, LOP_EXCLUSIVE
, file
, line
);
628 LOCK_LOG_LOCK("RMWUNLOCK", &rm
->lock_object
, 0, 0, file
, line
);
630 TD_LOCKS_DEC(curthread
);
634 _rm_rlock_debug(struct rmlock
*rm
, struct rm_priotracker
*tracker
,
635 int trylock
, const char *file
, int line
)
638 if (SCHEDULER_STOPPED())
642 if (!(rm
->lock_object
.lo_flags
& LO_RECURSABLE
) && !trylock
) {
644 KASSERT(rm_trackers_present(pcpu_find(curcpu
), rm
,
646 ("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n",
647 rm
->lock_object
.lo_name
, file
, line
));
651 KASSERT(kdb_active
!= 0 || !TD_IS_IDLETHREAD(curthread
),
652 ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
653 curthread
, rm
->lock_object
.lo_name
, file
, line
));
654 KASSERT(!rm_destroyed(rm
),
655 ("rm_rlock() of destroyed rmlock @ %s:%d", file
, line
));
657 KASSERT(!rm_wowned(rm
),
658 ("rm_rlock: wlock already held for %s @ %s:%d",
659 rm
->lock_object
.lo_name
, file
, line
));
660 WITNESS_CHECKORDER(&rm
->lock_object
, LOP_NEWORDER
, file
, line
,
664 if (_rm_rlock(rm
, tracker
, trylock
)) {
666 LOCK_LOG_TRY("RMRLOCK", &rm
->lock_object
, 0, 1, file
,
669 LOCK_LOG_LOCK("RMRLOCK", &rm
->lock_object
, 0, 0, file
,
671 WITNESS_LOCK(&rm
->lock_object
, 0, file
, line
);
672 TD_LOCKS_INC(curthread
);
675 LOCK_LOG_TRY("RMRLOCK", &rm
->lock_object
, 0, 0, file
, line
);
681 _rm_runlock_debug(struct rmlock
*rm
, struct rm_priotracker
*tracker
,
682 const char *file
, int line
)
685 if (SCHEDULER_STOPPED())
688 KASSERT(!rm_destroyed(rm
),
689 ("rm_runlock() of destroyed rmlock @ %s:%d", file
, line
));
690 _rm_assert(rm
, RA_RLOCKED
, file
, line
);
691 WITNESS_UNLOCK(&rm
->lock_object
, 0, file
, line
);
692 LOCK_LOG_LOCK("RMRUNLOCK", &rm
->lock_object
, 0, 0, file
, line
);
693 _rm_runlock(rm
, tracker
);
694 TD_LOCKS_DEC(curthread
);
700 * Just strip out file and line arguments if no lock debugging is enabled in
701 * the kernel - we are called from a kernel module.
704 _rm_wlock_debug(struct rmlock
*rm
, const char *file
, int line
)
711 _rm_wunlock_debug(struct rmlock
*rm
, const char *file
, int line
)
718 _rm_rlock_debug(struct rmlock
*rm
, struct rm_priotracker
*tracker
,
719 int trylock
, const char *file
, int line
)
722 return _rm_rlock(rm
, tracker
, trylock
);
726 _rm_runlock_debug(struct rmlock
*rm
, struct rm_priotracker
*tracker
,
727 const char *file
, int line
)
730 _rm_runlock(rm
, tracker
);
735 #ifdef INVARIANT_SUPPORT
741 * Note that this does not need to use witness_assert() for read lock
742 * assertions since an exact count of read locks held by this thread
746 _rm_assert(const struct rmlock
*rm
, int what
, const char *file
, int line
)
750 if (panicstr
!= NULL
)
754 case RA_LOCKED
| RA_RECURSED
:
755 case RA_LOCKED
| RA_NOTRECURSED
:
757 case RA_RLOCKED
| RA_RECURSED
:
758 case RA_RLOCKED
| RA_NOTRECURSED
:
760 * Handle the write-locked case. Unlike other
761 * primitives, writers can never recurse.
764 if (what
& RA_RLOCKED
)
765 panic("Lock %s exclusively locked @ %s:%d\n",
766 rm
->lock_object
.lo_name
, file
, line
);
767 if (what
& RA_RECURSED
)
768 panic("Lock %s not recursed @ %s:%d\n",
769 rm
->lock_object
.lo_name
, file
, line
);
774 count
= rm_trackers_present(pcpu_find(curcpu
), rm
, curthread
);
778 panic("Lock %s not %slocked @ %s:%d\n",
779 rm
->lock_object
.lo_name
, (what
& RA_RLOCKED
) ?
780 "read " : "", file
, line
);
782 if (what
& RA_NOTRECURSED
)
783 panic("Lock %s recursed @ %s:%d\n",
784 rm
->lock_object
.lo_name
, file
, line
);
785 } else if (what
& RA_RECURSED
)
786 panic("Lock %s not recursed @ %s:%d\n",
787 rm
->lock_object
.lo_name
, file
, line
);
791 panic("Lock %s not exclusively locked @ %s:%d\n",
792 rm
->lock_object
.lo_name
, file
, line
);
796 panic("Lock %s exclusively locked @ %s:%d\n",
797 rm
->lock_object
.lo_name
, file
, line
);
800 count
= rm_trackers_present(pcpu_find(curcpu
), rm
, curthread
);
804 panic("Lock %s read locked @ %s:%d\n",
805 rm
->lock_object
.lo_name
, file
, line
);
808 panic("Unknown rm lock assertion: %d @ %s:%d", what
, file
,
812 #endif /* INVARIANT_SUPPORT */
816 print_tracker(struct rm_priotracker
*tr
)
821 db_printf(" thread %p (tid %d, pid %d, \"%s\") {", td
, td
->td_tid
,
822 td
->td_proc
->p_pid
, td
->td_name
);
823 if (tr
->rmp_flags
& RMPF_ONQUEUE
) {
824 db_printf("ONQUEUE");
825 if (tr
->rmp_flags
& RMPF_SIGNAL
)
826 db_printf(",SIGNAL");
833 db_show_rm(const struct lock_object
*lock
)
835 struct rm_priotracker
*tr
;
836 struct rm_queue
*queue
;
837 const struct rmlock
*rm
;
838 struct lock_class
*lc
;
841 rm
= (const struct rmlock
*)lock
;
842 db_printf(" writecpus: ");
843 ddb_display_cpuset(__DEQUALIFY(const cpuset_t
*, &rm
->rm_writecpus
));
845 db_printf(" per-CPU readers:\n");
846 STAILQ_FOREACH(pc
, &cpuhead
, pc_allcpu
)
847 for (queue
= pc
->pc_rm_queue
.rmq_next
;
848 queue
!= &pc
->pc_rm_queue
; queue
= queue
->rmq_next
) {
849 tr
= (struct rm_priotracker
*)queue
;
850 if (tr
->rmp_rmlock
== rm
)
853 db_printf(" active readers:\n");
854 LIST_FOREACH(tr
, &rm
->rm_activeReaders
, rmp_qentry
)
856 lc
= LOCK_CLASS(&rm
->rm_wlock_object
);
857 db_printf("Backing write-lock (%s):\n", lc
->lc_name
);
858 lc
->lc_ddb_show(&rm
->rm_wlock_object
);