2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * Machine independent bits of reader/writer lock implementation.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
35 #include "opt_hwpmc_hooks.h"
36 #include "opt_no_adaptive_rwlocks.h"
38 #include <sys/param.h>
41 #include <sys/kernel.h>
43 #include <sys/mutex.h>
45 #include <sys/rwlock.h>
46 #include <sys/sched.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
50 #include <sys/turnstile.h>
52 #include <machine/cpu.h>
54 #if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
55 #define ADAPTIVE_RWLOCKS
59 #include <sys/pmckern.h>
60 PMC_SOFT_DECLARE( , , lock
, failed
);
64 * Return the rwlock address when the lock cookie address is provided.
65 * This functionality assumes that struct rwlock* have a member named rw_lock.
67 #define rwlock2rw(c) (__containerof(c, struct rwlock, rw_lock))
72 static void db_show_rwlock(const struct lock_object
*lock
);
74 static void assert_rw(const struct lock_object
*lock
, int what
);
75 static void lock_rw(struct lock_object
*lock
, uintptr_t how
);
77 static int owner_rw(const struct lock_object
*lock
, struct thread
**owner
);
79 static uintptr_t unlock_rw(struct lock_object
*lock
);
81 struct lock_class lock_class_rw
= {
83 .lc_flags
= LC_SLEEPLOCK
| LC_RECURSABLE
| LC_UPGRADABLE
,
84 .lc_assert
= assert_rw
,
86 .lc_ddb_show
= db_show_rwlock
,
89 .lc_unlock
= unlock_rw
,
95 #ifdef ADAPTIVE_RWLOCKS
96 static int rowner_retries
= 10;
97 static int rowner_loops
= 10000;
98 static SYSCTL_NODE(_debug
, OID_AUTO
, rwlock
, CTLFLAG_RD
, NULL
,
100 SYSCTL_INT(_debug_rwlock
, OID_AUTO
, retry
, CTLFLAG_RW
, &rowner_retries
, 0, "");
101 SYSCTL_INT(_debug_rwlock
, OID_AUTO
, loops
, CTLFLAG_RW
, &rowner_loops
, 0, "");
103 static struct lock_delay_config rw_delay
= {
110 SYSCTL_INT(_debug_rwlock
, OID_AUTO
, delay_initial
, CTLFLAG_RW
, &rw_delay
.initial
,
112 SYSCTL_INT(_debug_rwlock
, OID_AUTO
, delay_step
, CTLFLAG_RW
, &rw_delay
.step
,
114 SYSCTL_INT(_debug_rwlock
, OID_AUTO
, delay_min
, CTLFLAG_RW
, &rw_delay
.min
,
116 SYSCTL_INT(_debug_rwlock
, OID_AUTO
, delay_max
, CTLFLAG_RW
, &rw_delay
.max
,
120 rw_delay_sysinit(void *dummy
)
123 rw_delay
.initial
= mp_ncpus
* 25;
124 rw_delay
.step
= (mp_ncpus
* 25) / 2;
125 rw_delay
.min
= mp_ncpus
* 5;
126 rw_delay
.max
= mp_ncpus
* 25 * 10;
128 LOCK_DELAY_SYSINIT(rw_delay_sysinit
);
132 * Return a pointer to the owning thread if the lock is write-locked or
133 * NULL if the lock is unlocked or read-locked.
135 #define rw_wowner(rw) \
136 ((rw)->rw_lock & RW_LOCK_READ ? NULL : \
137 (struct thread *)RW_OWNER((rw)->rw_lock))
140 * Returns if a write owner is recursed. Write ownership is not assured
141 * here and should be previously checked.
143 #define rw_recursed(rw) ((rw)->rw_recurse != 0)
146 * Return true if curthread helds the lock.
148 #define rw_wlocked(rw) (rw_wowner((rw)) == curthread)
151 * Return a pointer to the owning thread for this lock who should receive
152 * any priority lent by threads that block on this lock. Currently this
153 * is identical to rw_wowner().
155 #define rw_owner(rw) rw_wowner(rw)
158 #define __rw_assert(c, what, file, line)
162 assert_rw(const struct lock_object
*lock
, int what
)
165 rw_assert((const struct rwlock
*)lock
, what
);
169 lock_rw(struct lock_object
*lock
, uintptr_t how
)
173 rw
= (struct rwlock
*)lock
;
181 unlock_rw(struct lock_object
*lock
)
185 rw
= (struct rwlock
*)lock
;
186 rw_assert(rw
, RA_LOCKED
| LA_NOTRECURSED
);
187 if (rw
->rw_lock
& RW_LOCK_READ
) {
198 owner_rw(const struct lock_object
*lock
, struct thread
**owner
)
200 const struct rwlock
*rw
= (const struct rwlock
*)lock
;
201 uintptr_t x
= rw
->rw_lock
;
203 *owner
= rw_wowner(rw
);
204 return ((x
& RW_LOCK_READ
) != 0 ? (RW_READERS(x
) != 0) :
210 _rw_init_flags(volatile uintptr_t *c
, const char *name
, int opts
)
217 MPASS((opts
& ~(RW_DUPOK
| RW_NOPROFILE
| RW_NOWITNESS
| RW_QUIET
|
218 RW_RECURSE
| RW_NEW
)) == 0);
219 ASSERT_ATOMIC_LOAD_PTR(rw
->rw_lock
,
220 ("%s: rw_lock not aligned for %s: %p", __func__
, name
,
223 flags
= LO_UPGRADABLE
;
226 if (opts
& RW_NOPROFILE
)
227 flags
|= LO_NOPROFILE
;
228 if (!(opts
& RW_NOWITNESS
))
230 if (opts
& RW_RECURSE
)
231 flags
|= LO_RECURSABLE
;
237 lock_init(&rw
->lock_object
, &lock_class_rw
, name
, NULL
, flags
);
238 rw
->rw_lock
= RW_UNLOCKED
;
243 _rw_destroy(volatile uintptr_t *c
)
249 KASSERT(rw
->rw_lock
== RW_UNLOCKED
, ("rw lock %p not unlocked", rw
));
250 KASSERT(rw
->rw_recurse
== 0, ("rw lock %p still recursed", rw
));
251 rw
->rw_lock
= RW_DESTROYED
;
252 lock_destroy(&rw
->lock_object
);
256 rw_sysinit(void *arg
)
258 struct rw_args
*args
= arg
;
260 rw_init((struct rwlock
*)args
->ra_rw
, args
->ra_desc
);
264 rw_sysinit_flags(void *arg
)
266 struct rw_args_flags
*args
= arg
;
268 rw_init_flags((struct rwlock
*)args
->ra_rw
, args
->ra_desc
,
273 _rw_wowned(const volatile uintptr_t *c
)
276 return (rw_wowner(rwlock2rw(c
)) == curthread
);
280 _rw_wlock_cookie(volatile uintptr_t *c
, const char *file
, int line
)
284 if (SCHEDULER_STOPPED())
289 KASSERT(kdb_active
!= 0 || !TD_IS_IDLETHREAD(curthread
),
290 ("rw_wlock() by idle thread %p on rwlock %s @ %s:%d",
291 curthread
, rw
->lock_object
.lo_name
, file
, line
));
292 KASSERT(rw
->rw_lock
!= RW_DESTROYED
,
293 ("rw_wlock() of destroyed rwlock @ %s:%d", file
, line
));
294 WITNESS_CHECKORDER(&rw
->lock_object
, LOP_NEWORDER
| LOP_EXCLUSIVE
, file
,
296 __rw_wlock(rw
, curthread
, file
, line
);
297 LOCK_LOG_LOCK("WLOCK", &rw
->lock_object
, 0, rw
->rw_recurse
, file
, line
);
298 WITNESS_LOCK(&rw
->lock_object
, LOP_EXCLUSIVE
, file
, line
);
299 TD_LOCKS_INC(curthread
);
303 __rw_try_wlock(volatile uintptr_t *c
, const char *file
, int line
)
308 if (SCHEDULER_STOPPED())
313 KASSERT(kdb_active
!= 0 || !TD_IS_IDLETHREAD(curthread
),
314 ("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d",
315 curthread
, rw
->lock_object
.lo_name
, file
, line
));
316 KASSERT(rw
->rw_lock
!= RW_DESTROYED
,
317 ("rw_try_wlock() of destroyed rwlock @ %s:%d", file
, line
));
319 if (rw_wlocked(rw
) &&
320 (rw
->lock_object
.lo_flags
& LO_RECURSABLE
) != 0) {
324 rval
= atomic_cmpset_acq_ptr(&rw
->rw_lock
, RW_UNLOCKED
,
325 (uintptr_t)curthread
);
327 LOCK_LOG_TRY("WLOCK", &rw
->lock_object
, 0, rval
, file
, line
);
329 WITNESS_LOCK(&rw
->lock_object
, LOP_EXCLUSIVE
| LOP_TRYLOCK
,
331 if (!rw_recursed(rw
))
332 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire
,
333 rw
, 0, 0, file
, line
, LOCKSTAT_WRITER
);
334 TD_LOCKS_INC(curthread
);
340 _rw_wunlock_cookie(volatile uintptr_t *c
, const char *file
, int line
)
344 if (SCHEDULER_STOPPED())
349 KASSERT(rw
->rw_lock
!= RW_DESTROYED
,
350 ("rw_wunlock() of destroyed rwlock @ %s:%d", file
, line
));
351 __rw_assert(c
, RA_WLOCKED
, file
, line
);
352 WITNESS_UNLOCK(&rw
->lock_object
, LOP_EXCLUSIVE
, file
, line
);
353 LOCK_LOG_LOCK("WUNLOCK", &rw
->lock_object
, 0, rw
->rw_recurse
, file
,
355 __rw_wunlock(rw
, curthread
, file
, line
);
356 TD_LOCKS_DEC(curthread
);
360 * Determines whether a new reader can acquire a lock. Succeeds if the
361 * reader already owns a read lock and the lock is locked for read to
362 * prevent deadlock from reader recursion. Also succeeds if the lock
363 * is unlocked and has no writer waiters or spinners. Failing otherwise
364 * prioritizes writers before readers.
366 #define RW_CAN_READ(_rw) \
367 ((curthread->td_rw_rlocks && (_rw) & RW_LOCK_READ) || ((_rw) & \
368 (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) == \
372 __rw_rlock(volatile uintptr_t *c
, const char *file
, int line
)
375 struct turnstile
*ts
;
376 #ifdef ADAPTIVE_RWLOCKS
377 volatile struct thread
*owner
;
381 #ifdef LOCK_PROFILING
382 uint64_t waittime
= 0;
386 #if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS)
387 struct lock_delay_arg lda
;
392 int64_t sleep_time
= 0;
393 int64_t all_time
= 0;
396 if (SCHEDULER_STOPPED())
399 #if defined(ADAPTIVE_RWLOCKS)
400 lock_delay_arg_init(&lda
, &rw_delay
);
401 #elif defined(KDTRACE_HOOKS)
402 lock_delay_arg_init(&lda
, NULL
);
406 KASSERT(kdb_active
!= 0 || !TD_IS_IDLETHREAD(curthread
),
407 ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d",
408 curthread
, rw
->lock_object
.lo_name
, file
, line
));
409 KASSERT(rw
->rw_lock
!= RW_DESTROYED
,
410 ("rw_rlock() of destroyed rwlock @ %s:%d", file
, line
));
411 KASSERT(rw_wowner(rw
) != curthread
,
412 ("rw_rlock: wlock already held for %s @ %s:%d",
413 rw
->lock_object
.lo_name
, file
, line
));
414 WITNESS_CHECKORDER(&rw
->lock_object
, LOP_NEWORDER
, file
, line
, NULL
);
417 all_time
-= lockstat_nsecs(&rw
->lock_object
);
422 * Handle the easy case. If no other thread has a write
423 * lock, then try to bump up the count of read locks. Note
424 * that we have to preserve the current state of the
425 * RW_LOCK_WRITE_WAITERS flag. If we fail to acquire a
426 * read lock, then rw_lock must have changed, so restart
427 * the loop. Note that this handles the case of a
428 * completely unlocked rwlock since such a lock is encoded
429 * as a read lock with no waiters.
432 if (RW_CAN_READ(v
)) {
434 * The RW_LOCK_READ_WAITERS flag should only be set
435 * if the lock has been unlocked and write waiters
438 if (atomic_cmpset_acq_ptr(&rw
->rw_lock
, v
,
439 v
+ RW_ONE_READER
)) {
440 if (LOCK_LOG_TEST(&rw
->lock_object
, 0))
442 "%s: %p succeed %p -> %p", __func__
,
444 (void *)(v
+ RW_ONE_READER
));
453 PMC_SOFT_CALL( , , lock
, failed
);
455 lock_profile_obtain_lock_failed(&rw
->lock_object
,
456 &contested
, &waittime
);
458 #ifdef ADAPTIVE_RWLOCKS
460 * If the owner is running on another CPU, spin until
461 * the owner stops running or the state of the lock
464 if ((v
& RW_LOCK_READ
) == 0) {
465 owner
= (struct thread
*)RW_OWNER(v
);
466 if (TD_IS_RUNNING(owner
)) {
467 if (LOCK_LOG_TEST(&rw
->lock_object
, 0))
469 "%s: spinning on %p held by %p",
470 __func__
, rw
, owner
);
471 KTR_STATE1(KTR_SCHED
, "thread",
472 sched_tdname(curthread
), "spinning",
473 "lockname:\"%s\"", rw
->lock_object
.lo_name
);
474 while ((struct thread
*)RW_OWNER(rw
->rw_lock
) ==
475 owner
&& TD_IS_RUNNING(owner
))
477 KTR_STATE0(KTR_SCHED
, "thread",
478 sched_tdname(curthread
), "running");
481 } else if (spintries
< rowner_retries
) {
483 KTR_STATE1(KTR_SCHED
, "thread", sched_tdname(curthread
),
484 "spinning", "lockname:\"%s\"",
485 rw
->lock_object
.lo_name
);
486 for (i
= 0; i
< rowner_loops
; i
++) {
488 if ((v
& RW_LOCK_READ
) == 0 || RW_CAN_READ(v
))
493 lda
.spin_cnt
+= rowner_loops
- i
;
495 KTR_STATE0(KTR_SCHED
, "thread", sched_tdname(curthread
),
497 if (i
!= rowner_loops
)
503 * Okay, now it's the hard case. Some other thread already
504 * has a write lock or there are write waiters present,
505 * acquire the turnstile lock so we can begin the process
508 ts
= turnstile_trywait(&rw
->lock_object
);
511 * The lock might have been released while we spun, so
512 * recheck its state and restart the loop if needed.
515 if (RW_CAN_READ(v
)) {
516 turnstile_cancel(ts
);
520 #ifdef ADAPTIVE_RWLOCKS
522 * The current lock owner might have started executing
523 * on another CPU (or the lock could have changed
524 * owners) while we were waiting on the turnstile
525 * chain lock. If so, drop the turnstile lock and try
528 if ((v
& RW_LOCK_READ
) == 0) {
529 owner
= (struct thread
*)RW_OWNER(v
);
530 if (TD_IS_RUNNING(owner
)) {
531 turnstile_cancel(ts
);
538 * The lock is held in write mode or it already has waiters.
540 MPASS(!RW_CAN_READ(v
));
543 * If the RW_LOCK_READ_WAITERS flag is already set, then
544 * we can go ahead and block. If it is not set then try
545 * to set it. If we fail to set it drop the turnstile
546 * lock and restart the loop.
548 if (!(v
& RW_LOCK_READ_WAITERS
)) {
549 if (!atomic_cmpset_ptr(&rw
->rw_lock
, v
,
550 v
| RW_LOCK_READ_WAITERS
)) {
551 turnstile_cancel(ts
);
554 if (LOCK_LOG_TEST(&rw
->lock_object
, 0))
555 CTR2(KTR_LOCK
, "%s: %p set read waiters flag",
560 * We were unable to acquire the lock and the read waiters
561 * flag is set, so we must block on the turnstile.
563 if (LOCK_LOG_TEST(&rw
->lock_object
, 0))
564 CTR2(KTR_LOCK
, "%s: %p blocking on turnstile", __func__
,
567 sleep_time
-= lockstat_nsecs(&rw
->lock_object
);
569 turnstile_wait(ts
, rw_owner(rw
), TS_SHARED_QUEUE
);
571 sleep_time
+= lockstat_nsecs(&rw
->lock_object
);
574 if (LOCK_LOG_TEST(&rw
->lock_object
, 0))
575 CTR2(KTR_LOCK
, "%s: %p resuming from turnstile",
579 all_time
+= lockstat_nsecs(&rw
->lock_object
);
581 LOCKSTAT_RECORD4(rw__block
, rw
, sleep_time
,
582 LOCKSTAT_READER
, (state
& RW_LOCK_READ
) == 0,
583 (state
& RW_LOCK_READ
) == 0 ? 0 : RW_READERS(state
));
585 /* Record only the loops spinning and not sleeping. */
586 if (lda
.spin_cnt
> sleep_cnt
)
587 LOCKSTAT_RECORD4(rw__spin
, rw
, all_time
- sleep_time
,
588 LOCKSTAT_READER
, (state
& RW_LOCK_READ
) == 0,
589 (state
& RW_LOCK_READ
) == 0 ? 0 : RW_READERS(state
));
592 * TODO: acquire "owner of record" here. Here be turnstile dragons
593 * however. turnstiles don't like owners changing between calls to
594 * turnstile_wait() currently.
596 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire
, rw
, contested
,
597 waittime
, file
, line
, LOCKSTAT_READER
);
598 LOCK_LOG_LOCK("RLOCK", &rw
->lock_object
, 0, 0, file
, line
);
599 WITNESS_LOCK(&rw
->lock_object
, 0, file
, line
);
600 TD_LOCKS_INC(curthread
);
601 curthread
->td_rw_rlocks
++;
605 __rw_try_rlock(volatile uintptr_t *c
, const char *file
, int line
)
610 if (SCHEDULER_STOPPED())
615 KASSERT(kdb_active
!= 0 || !TD_IS_IDLETHREAD(curthread
),
616 ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d",
617 curthread
, rw
->lock_object
.lo_name
, file
, line
));
621 KASSERT(rw
->rw_lock
!= RW_DESTROYED
,
622 ("rw_try_rlock() of destroyed rwlock @ %s:%d", file
, line
));
623 if (!(x
& RW_LOCK_READ
))
625 if (atomic_cmpset_acq_ptr(&rw
->rw_lock
, x
, x
+ RW_ONE_READER
)) {
626 LOCK_LOG_TRY("RLOCK", &rw
->lock_object
, 0, 1, file
,
628 WITNESS_LOCK(&rw
->lock_object
, LOP_TRYLOCK
, file
, line
);
629 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire
,
630 rw
, 0, 0, file
, line
, LOCKSTAT_READER
);
631 TD_LOCKS_INC(curthread
);
632 curthread
->td_rw_rlocks
++;
637 LOCK_LOG_TRY("RLOCK", &rw
->lock_object
, 0, 0, file
, line
);
642 _rw_runlock_cookie(volatile uintptr_t *c
, const char *file
, int line
)
645 struct turnstile
*ts
;
646 uintptr_t x
, v
, queue
;
648 if (SCHEDULER_STOPPED())
653 KASSERT(rw
->rw_lock
!= RW_DESTROYED
,
654 ("rw_runlock() of destroyed rwlock @ %s:%d", file
, line
));
655 __rw_assert(c
, RA_RLOCKED
, file
, line
);
656 WITNESS_UNLOCK(&rw
->lock_object
, 0, file
, line
);
657 LOCK_LOG_LOCK("RUNLOCK", &rw
->lock_object
, 0, 0, file
, line
);
659 /* TODO: drop "owner of record" here. */
663 * See if there is more than one read lock held. If so,
664 * just drop one and return.
667 if (RW_READERS(x
) > 1) {
668 if (atomic_cmpset_rel_ptr(&rw
->rw_lock
, x
,
669 x
- RW_ONE_READER
)) {
670 if (LOCK_LOG_TEST(&rw
->lock_object
, 0))
672 "%s: %p succeeded %p -> %p",
673 __func__
, rw
, (void *)x
,
674 (void *)(x
- RW_ONE_READER
));
680 * If there aren't any waiters for a write lock, then try
681 * to drop it quickly.
683 if (!(x
& RW_LOCK_WAITERS
)) {
684 MPASS((x
& ~RW_LOCK_WRITE_SPINNER
) ==
686 if (atomic_cmpset_rel_ptr(&rw
->rw_lock
, x
,
688 if (LOCK_LOG_TEST(&rw
->lock_object
, 0))
689 CTR2(KTR_LOCK
, "%s: %p last succeeded",
696 * Ok, we know we have waiters and we think we are the
697 * last reader, so grab the turnstile lock.
699 turnstile_chain_lock(&rw
->lock_object
);
700 v
= rw
->rw_lock
& (RW_LOCK_WAITERS
| RW_LOCK_WRITE_SPINNER
);
701 MPASS(v
& RW_LOCK_WAITERS
);
704 * Try to drop our lock leaving the lock in a unlocked
707 * If you wanted to do explicit lock handoff you'd have to
708 * do it here. You'd also want to use turnstile_signal()
709 * and you'd have to handle the race where a higher
710 * priority thread blocks on the write lock before the
711 * thread you wakeup actually runs and have the new thread
712 * "steal" the lock. For now it's a lot simpler to just
713 * wakeup all of the waiters.
715 * As above, if we fail, then another thread might have
716 * acquired a read lock, so drop the turnstile lock and
720 if (v
& RW_LOCK_WRITE_WAITERS
) {
721 queue
= TS_EXCLUSIVE_QUEUE
;
722 x
|= (v
& RW_LOCK_READ_WAITERS
);
724 queue
= TS_SHARED_QUEUE
;
725 if (!atomic_cmpset_rel_ptr(&rw
->rw_lock
, RW_READERS_LOCK(1) | v
,
727 turnstile_chain_unlock(&rw
->lock_object
);
730 if (LOCK_LOG_TEST(&rw
->lock_object
, 0))
731 CTR2(KTR_LOCK
, "%s: %p last succeeded with waiters",
735 * Ok. The lock is released and all that's left is to
736 * wake up the waiters. Note that the lock might not be
737 * free anymore, but in that case the writers will just
738 * block again if they run before the new lock holder(s)
741 ts
= turnstile_lookup(&rw
->lock_object
);
743 turnstile_broadcast(ts
, queue
);
744 turnstile_unpend(ts
, TS_SHARED_LOCK
);
745 turnstile_chain_unlock(&rw
->lock_object
);
748 LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release
, rw
, LOCKSTAT_READER
);
749 TD_LOCKS_DEC(curthread
);
750 curthread
->td_rw_rlocks
--;
754 * This function is called when we are unable to obtain a write lock on the
755 * first try. This means that at least one other thread holds either a
756 * read or write lock.
759 __rw_wlock_hard(volatile uintptr_t *c
, uintptr_t tid
, const char *file
,
763 struct turnstile
*ts
;
764 #ifdef ADAPTIVE_RWLOCKS
765 volatile struct thread
*owner
;
770 #ifdef LOCK_PROFILING
771 uint64_t waittime
= 0;
774 #if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS)
775 struct lock_delay_arg lda
;
780 int64_t sleep_time
= 0;
781 int64_t all_time
= 0;
784 if (SCHEDULER_STOPPED())
787 #if defined(ADAPTIVE_RWLOCKS)
788 lock_delay_arg_init(&lda
, &rw_delay
);
789 #elif defined(KDTRACE_HOOKS)
790 lock_delay_arg_init(&lda
, NULL
);
794 if (rw_wlocked(rw
)) {
795 KASSERT(rw
->lock_object
.lo_flags
& LO_RECURSABLE
,
796 ("%s: recursing but non-recursive rw %s @ %s:%d\n",
797 __func__
, rw
->lock_object
.lo_name
, file
, line
));
799 if (LOCK_LOG_TEST(&rw
->lock_object
, 0))
800 CTR2(KTR_LOCK
, "%s: %p recursing", __func__
, rw
);
804 if (LOCK_LOG_TEST(&rw
->lock_object
, 0))
805 CTR5(KTR_LOCK
, "%s: %s contested (lock=%p) at %s:%d", __func__
,
806 rw
->lock_object
.lo_name
, (void *)rw
->rw_lock
, file
, line
);
809 all_time
-= lockstat_nsecs(&rw
->lock_object
);
813 if (rw
->rw_lock
== RW_UNLOCKED
&& _rw_write_lock(rw
, tid
))
819 PMC_SOFT_CALL( , , lock
, failed
);
821 lock_profile_obtain_lock_failed(&rw
->lock_object
,
822 &contested
, &waittime
);
823 #ifdef ADAPTIVE_RWLOCKS
825 * If the lock is write locked and the owner is
826 * running on another CPU, spin until the owner stops
827 * running or the state of the lock changes.
830 owner
= (struct thread
*)RW_OWNER(v
);
831 if (!(v
& RW_LOCK_READ
) && TD_IS_RUNNING(owner
)) {
832 if (LOCK_LOG_TEST(&rw
->lock_object
, 0))
833 CTR3(KTR_LOCK
, "%s: spinning on %p held by %p",
834 __func__
, rw
, owner
);
835 KTR_STATE1(KTR_SCHED
, "thread", sched_tdname(curthread
),
836 "spinning", "lockname:\"%s\"",
837 rw
->lock_object
.lo_name
);
838 while ((struct thread
*)RW_OWNER(rw
->rw_lock
) == owner
&&
839 TD_IS_RUNNING(owner
))
841 KTR_STATE0(KTR_SCHED
, "thread", sched_tdname(curthread
),
845 if ((v
& RW_LOCK_READ
) && RW_READERS(v
) &&
846 spintries
< rowner_retries
) {
847 if (!(v
& RW_LOCK_WRITE_SPINNER
)) {
848 if (!atomic_cmpset_ptr(&rw
->rw_lock
, v
,
849 v
| RW_LOCK_WRITE_SPINNER
)) {
854 KTR_STATE1(KTR_SCHED
, "thread", sched_tdname(curthread
),
855 "spinning", "lockname:\"%s\"",
856 rw
->lock_object
.lo_name
);
857 for (i
= 0; i
< rowner_loops
; i
++) {
858 if ((rw
->rw_lock
& RW_LOCK_WRITE_SPINNER
) == 0)
862 KTR_STATE0(KTR_SCHED
, "thread", sched_tdname(curthread
),
865 lda
.spin_cnt
+= rowner_loops
- i
;
867 if (i
!= rowner_loops
)
871 ts
= turnstile_trywait(&rw
->lock_object
);
874 #ifdef ADAPTIVE_RWLOCKS
876 * The current lock owner might have started executing
877 * on another CPU (or the lock could have changed
878 * owners) while we were waiting on the turnstile
879 * chain lock. If so, drop the turnstile lock and try
882 if (!(v
& RW_LOCK_READ
)) {
883 owner
= (struct thread
*)RW_OWNER(v
);
884 if (TD_IS_RUNNING(owner
)) {
885 turnstile_cancel(ts
);
891 * Check for the waiters flags about this rwlock.
892 * If the lock was released, without maintain any pending
893 * waiters queue, simply try to acquire it.
894 * If a pending waiters queue is present, claim the lock
895 * ownership and maintain the pending queue.
897 x
= v
& (RW_LOCK_WAITERS
| RW_LOCK_WRITE_SPINNER
);
898 if ((v
& ~x
) == RW_UNLOCKED
) {
899 x
&= ~RW_LOCK_WRITE_SPINNER
;
900 if (atomic_cmpset_acq_ptr(&rw
->rw_lock
, v
, tid
| x
)) {
904 turnstile_cancel(ts
);
907 turnstile_cancel(ts
);
911 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
912 * set it. If we fail to set it, then loop back and try
915 if (!(v
& RW_LOCK_WRITE_WAITERS
)) {
916 if (!atomic_cmpset_ptr(&rw
->rw_lock
, v
,
917 v
| RW_LOCK_WRITE_WAITERS
)) {
918 turnstile_cancel(ts
);
921 if (LOCK_LOG_TEST(&rw
->lock_object
, 0))
922 CTR2(KTR_LOCK
, "%s: %p set write waiters flag",
926 * We were unable to acquire the lock and the write waiters
927 * flag is set, so we must block on the turnstile.
929 if (LOCK_LOG_TEST(&rw
->lock_object
, 0))
930 CTR2(KTR_LOCK
, "%s: %p blocking on turnstile", __func__
,
933 sleep_time
-= lockstat_nsecs(&rw
->lock_object
);
935 turnstile_wait(ts
, rw_owner(rw
), TS_EXCLUSIVE_QUEUE
);
937 sleep_time
+= lockstat_nsecs(&rw
->lock_object
);
940 if (LOCK_LOG_TEST(&rw
->lock_object
, 0))
941 CTR2(KTR_LOCK
, "%s: %p resuming from turnstile",
943 #ifdef ADAPTIVE_RWLOCKS
948 all_time
+= lockstat_nsecs(&rw
->lock_object
);
950 LOCKSTAT_RECORD4(rw__block
, rw
, sleep_time
,
951 LOCKSTAT_WRITER
, (state
& RW_LOCK_READ
) == 0,
952 (state
& RW_LOCK_READ
) == 0 ? 0 : RW_READERS(state
));
954 /* Record only the loops spinning and not sleeping. */
955 if (lda
.spin_cnt
> sleep_cnt
)
956 LOCKSTAT_RECORD4(rw__spin
, rw
, all_time
- sleep_time
,
957 LOCKSTAT_WRITER
, (state
& RW_LOCK_READ
) == 0,
958 (state
& RW_LOCK_READ
) == 0 ? 0 : RW_READERS(state
));
960 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire
, rw
, contested
,
961 waittime
, file
, line
, LOCKSTAT_WRITER
);
965 * This function is called if the first try at releasing a write lock failed.
966 * This means that one of the 2 waiter bits must be set indicating that at
967 * least one thread is waiting on this lock.
970 __rw_wunlock_hard(volatile uintptr_t *c
, uintptr_t tid
, const char *file
,
974 struct turnstile
*ts
;
978 if (SCHEDULER_STOPPED())
983 if (rw_wlocked(rw
) && rw_recursed(rw
)) {
985 if (LOCK_LOG_TEST(&rw
->lock_object
, 0))
986 CTR2(KTR_LOCK
, "%s: %p unrecursing", __func__
, rw
);
990 KASSERT(rw
->rw_lock
& (RW_LOCK_READ_WAITERS
| RW_LOCK_WRITE_WAITERS
),
991 ("%s: neither of the waiter flags are set", __func__
));
993 if (LOCK_LOG_TEST(&rw
->lock_object
, 0))
994 CTR2(KTR_LOCK
, "%s: %p contested", __func__
, rw
);
996 turnstile_chain_lock(&rw
->lock_object
);
997 ts
= turnstile_lookup(&rw
->lock_object
);
1001 * Use the same algo as sx locks for now. Prefer waking up shared
1002 * waiters if we have any over writers. This is probably not ideal.
1004 * 'v' is the value we are going to write back to rw_lock. If we
1005 * have waiters on both queues, we need to preserve the state of
1006 * the waiter flag for the queue we don't wake up. For now this is
1007 * hardcoded for the algorithm mentioned above.
1009 * In the case of both readers and writers waiting we wakeup the
1010 * readers but leave the RW_LOCK_WRITE_WAITERS flag set. If a
1011 * new writer comes in before a reader it will claim the lock up
1012 * above. There is probably a potential priority inversion in
1013 * there that could be worked around either by waking both queues
1014 * of waiters or doing some complicated lock handoff gymnastics.
1017 if (rw
->rw_lock
& RW_LOCK_WRITE_WAITERS
) {
1018 queue
= TS_EXCLUSIVE_QUEUE
;
1019 v
|= (rw
->rw_lock
& RW_LOCK_READ_WAITERS
);
1021 queue
= TS_SHARED_QUEUE
;
1023 /* Wake up all waiters for the specific queue. */
1024 if (LOCK_LOG_TEST(&rw
->lock_object
, 0))
1025 CTR3(KTR_LOCK
, "%s: %p waking up %s waiters", __func__
, rw
,
1026 queue
== TS_SHARED_QUEUE
? "read" : "write");
1027 turnstile_broadcast(ts
, queue
);
1028 atomic_store_rel_ptr(&rw
->rw_lock
, v
);
1029 turnstile_unpend(ts
, TS_EXCLUSIVE_LOCK
);
1030 turnstile_chain_unlock(&rw
->lock_object
);
1034 * Attempt to do a non-blocking upgrade from a read lock to a write
1035 * lock. This will only succeed if this thread holds a single read
1036 * lock. Returns true if the upgrade succeeded and false otherwise.
1039 __rw_try_upgrade(volatile uintptr_t *c
, const char *file
, int line
)
1042 uintptr_t v
, x
, tid
;
1043 struct turnstile
*ts
;
1046 if (SCHEDULER_STOPPED())
1051 KASSERT(rw
->rw_lock
!= RW_DESTROYED
,
1052 ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file
, line
));
1053 __rw_assert(c
, RA_RLOCKED
, file
, line
);
1056 * Attempt to switch from one reader to a writer. If there
1057 * are any write waiters, then we will have to lock the
1058 * turnstile first to prevent races with another writer
1059 * calling turnstile_wait() before we have claimed this
1060 * turnstile. So, do the simple case of no waiters first.
1062 tid
= (uintptr_t)curthread
;
1066 if (RW_READERS(v
) > 1)
1068 if (!(v
& RW_LOCK_WAITERS
)) {
1069 success
= atomic_cmpset_ptr(&rw
->rw_lock
, v
, tid
);
1076 * Ok, we think we have waiters, so lock the turnstile.
1078 ts
= turnstile_trywait(&rw
->lock_object
);
1080 if (RW_READERS(v
) > 1) {
1081 turnstile_cancel(ts
);
1085 * Try to switch from one reader to a writer again. This time
1086 * we honor the current state of the waiters flags.
1087 * If we obtain the lock with the flags set, then claim
1088 * ownership of the turnstile.
1090 x
= rw
->rw_lock
& RW_LOCK_WAITERS
;
1091 success
= atomic_cmpset_ptr(&rw
->rw_lock
, v
, tid
| x
);
1094 turnstile_claim(ts
);
1096 turnstile_cancel(ts
);
1099 turnstile_cancel(ts
);
1101 LOCK_LOG_TRY("WUPGRADE", &rw
->lock_object
, 0, success
, file
, line
);
1103 curthread
->td_rw_rlocks
--;
1104 WITNESS_UPGRADE(&rw
->lock_object
, LOP_EXCLUSIVE
| LOP_TRYLOCK
,
1106 LOCKSTAT_RECORD0(rw__upgrade
, rw
);
1112 * Downgrade a write lock into a single read lock.
1115 __rw_downgrade(volatile uintptr_t *c
, const char *file
, int line
)
1118 struct turnstile
*ts
;
1122 if (SCHEDULER_STOPPED())
1127 KASSERT(rw
->rw_lock
!= RW_DESTROYED
,
1128 ("rw_downgrade() of destroyed rwlock @ %s:%d", file
, line
));
1129 __rw_assert(c
, RA_WLOCKED
| RA_NOTRECURSED
, file
, line
);
1131 if (rw_recursed(rw
))
1132 panic("downgrade of a recursed lock");
1135 WITNESS_DOWNGRADE(&rw
->lock_object
, 0, file
, line
);
1138 * Convert from a writer to a single reader. First we handle
1139 * the easy case with no waiters. If there are any waiters, we
1140 * lock the turnstile and "disown" the lock.
1142 tid
= (uintptr_t)curthread
;
1143 if (atomic_cmpset_rel_ptr(&rw
->rw_lock
, tid
, RW_READERS_LOCK(1)))
1147 * Ok, we think we have waiters, so lock the turnstile so we can
1148 * read the waiter flags without any races.
1150 turnstile_chain_lock(&rw
->lock_object
);
1151 v
= rw
->rw_lock
& RW_LOCK_WAITERS
;
1152 rwait
= v
& RW_LOCK_READ_WAITERS
;
1153 wwait
= v
& RW_LOCK_WRITE_WAITERS
;
1154 MPASS(rwait
| wwait
);
1157 * Downgrade from a write lock while preserving waiters flag
1158 * and give up ownership of the turnstile.
1160 ts
= turnstile_lookup(&rw
->lock_object
);
1163 v
&= ~RW_LOCK_READ_WAITERS
;
1164 atomic_store_rel_ptr(&rw
->rw_lock
, RW_READERS_LOCK(1) | v
);
1166 * Wake other readers if there are no writers pending. Otherwise they
1167 * won't be able to acquire the lock anyway.
1169 if (rwait
&& !wwait
) {
1170 turnstile_broadcast(ts
, TS_SHARED_QUEUE
);
1171 turnstile_unpend(ts
, TS_EXCLUSIVE_LOCK
);
1173 turnstile_disown(ts
);
1174 turnstile_chain_unlock(&rw
->lock_object
);
1176 curthread
->td_rw_rlocks
++;
1177 LOCK_LOG_LOCK("WDOWNGRADE", &rw
->lock_object
, 0, 0, file
, line
);
1178 LOCKSTAT_RECORD0(rw__downgrade
, rw
);
1181 #ifdef INVARIANT_SUPPORT
1187 * In the non-WITNESS case, rw_assert() can only detect that at least
1188 * *some* thread owns an rlock, but it cannot guarantee that *this*
1189 * thread owns an rlock.
1192 __rw_assert(const volatile uintptr_t *c
, int what
, const char *file
, int line
)
1194 const struct rwlock
*rw
;
1196 if (panicstr
!= NULL
)
1203 case RA_LOCKED
| RA_RECURSED
:
1204 case RA_LOCKED
| RA_NOTRECURSED
:
1206 case RA_RLOCKED
| RA_RECURSED
:
1207 case RA_RLOCKED
| RA_NOTRECURSED
:
1209 witness_assert(&rw
->lock_object
, what
, file
, line
);
1212 * If some other thread has a write lock or we have one
1213 * and are asserting a read lock, fail. Also, if no one
1214 * has a lock at all, fail.
1216 if (rw
->rw_lock
== RW_UNLOCKED
||
1217 (!(rw
->rw_lock
& RW_LOCK_READ
) && (what
& RA_RLOCKED
||
1218 rw_wowner(rw
) != curthread
)))
1219 panic("Lock %s not %slocked @ %s:%d\n",
1220 rw
->lock_object
.lo_name
, (what
& RA_RLOCKED
) ?
1221 "read " : "", file
, line
);
1223 if (!(rw
->rw_lock
& RW_LOCK_READ
) && !(what
& RA_RLOCKED
)) {
1224 if (rw_recursed(rw
)) {
1225 if (what
& RA_NOTRECURSED
)
1226 panic("Lock %s recursed @ %s:%d\n",
1227 rw
->lock_object
.lo_name
, file
,
1229 } else if (what
& RA_RECURSED
)
1230 panic("Lock %s not recursed @ %s:%d\n",
1231 rw
->lock_object
.lo_name
, file
, line
);
1236 case RA_WLOCKED
| RA_RECURSED
:
1237 case RA_WLOCKED
| RA_NOTRECURSED
:
1238 if (rw_wowner(rw
) != curthread
)
1239 panic("Lock %s not exclusively locked @ %s:%d\n",
1240 rw
->lock_object
.lo_name
, file
, line
);
1241 if (rw_recursed(rw
)) {
1242 if (what
& RA_NOTRECURSED
)
1243 panic("Lock %s recursed @ %s:%d\n",
1244 rw
->lock_object
.lo_name
, file
, line
);
1245 } else if (what
& RA_RECURSED
)
1246 panic("Lock %s not recursed @ %s:%d\n",
1247 rw
->lock_object
.lo_name
, file
, line
);
1251 witness_assert(&rw
->lock_object
, what
, file
, line
);
1254 * If we hold a write lock fail. We can't reliably check
1255 * to see if we hold a read lock or not.
1257 if (rw_wowner(rw
) == curthread
)
1258 panic("Lock %s exclusively locked @ %s:%d\n",
1259 rw
->lock_object
.lo_name
, file
, line
);
1263 panic("Unknown rw lock assertion: %d @ %s:%d", what
, file
,
1267 #endif /* INVARIANT_SUPPORT */
1271 db_show_rwlock(const struct lock_object
*lock
)
1273 const struct rwlock
*rw
;
1276 rw
= (const struct rwlock
*)lock
;
1278 db_printf(" state: ");
1279 if (rw
->rw_lock
== RW_UNLOCKED
)
1280 db_printf("UNLOCKED\n");
1281 else if (rw
->rw_lock
== RW_DESTROYED
) {
1282 db_printf("DESTROYED\n");
1284 } else if (rw
->rw_lock
& RW_LOCK_READ
)
1285 db_printf("RLOCK: %ju locks\n",
1286 (uintmax_t)(RW_READERS(rw
->rw_lock
)));
1289 db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td
,
1290 td
->td_tid
, td
->td_proc
->p_pid
, td
->td_name
);
1291 if (rw_recursed(rw
))
1292 db_printf(" recursed: %u\n", rw
->rw_recurse
);
1294 db_printf(" waiters: ");
1295 switch (rw
->rw_lock
& (RW_LOCK_READ_WAITERS
| RW_LOCK_WRITE_WAITERS
)) {
1296 case RW_LOCK_READ_WAITERS
:
1297 db_printf("readers\n");
1299 case RW_LOCK_WRITE_WAITERS
:
1300 db_printf("writers\n");
1302 case RW_LOCK_READ_WAITERS
| RW_LOCK_WRITE_WAITERS
:
1303 db_printf("readers and writers\n");
1306 db_printf("none\n");