3 * The Regents of the University of California. All rights reserved.
6 * John S. Dyson. All rights reserved.
8 * This code contains ideas from software contributed to Berkeley by
9 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
10 * System project at Carnegie-Mellon University.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
41 * $FreeBSD: src/sys/kern/kern_lock.c,v 1.31.2.3 2001/12/25 01:44:44 dillon Exp $
42 * $DragonFly: src/sys/kern/kern_lock.c,v 1.27 2008/01/09 10:59:12 corecode Exp $
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
52 #include <sys/sysctl.h>
53 #include <sys/spinlock.h>
54 #include <sys/thread2.h>
55 #include <sys/spinlock2.h>
58 * 0: no warnings, 1: warnings, 2: panic
60 static int lockmgr_from_int
= 1;
61 SYSCTL_INT(_debug
, OID_AUTO
, lockmgr_from_int
, CTLFLAG_RW
, &lockmgr_from_int
, 0, "");
64 * Locking primitives implementation.
65 * Locks provide shared/exclusive sychronization.
68 #ifdef SIMPLELOCK_DEBUG
69 #define COUNT(td, x) (td)->td_locks += (x)
74 #define LOCK_WAIT_TIME 100
75 #define LOCK_SAMPLE_WAIT 7
77 #if defined(DIAGNOSTIC)
80 #define LOCK_INLINE __inline
83 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
84 LK_SHARE_NONZERO | LK_WAIT_NONZERO)
86 static int acquire(struct lock
*lkp
, int extflags
, int wanted
);
88 static LOCK_INLINE
void
89 sharelock(struct lock
*lkp
, int incr
) {
90 lkp
->lk_flags
|= LK_SHARE_NONZERO
;
91 lkp
->lk_sharecount
+= incr
;
94 static LOCK_INLINE
int
95 shareunlock(struct lock
*lkp
, int decr
)
99 KASSERT(lkp
->lk_sharecount
>= decr
, ("shareunlock: count < decr"));
101 if (lkp
->lk_sharecount
== decr
) {
102 lkp
->lk_flags
&= ~LK_SHARE_NONZERO
;
103 if (lkp
->lk_flags
& (LK_WANT_UPGRADE
| LK_WANT_EXCL
)) {
106 lkp
->lk_sharecount
= 0;
108 lkp
->lk_sharecount
-= decr
;
114 * lock acquisition helper routine. Called with the lock's spinlock held.
117 acquire(struct lock
*lkp
, int extflags
, int wanted
)
121 if ((extflags
& LK_NOWAIT
) && (lkp
->lk_flags
& wanted
)) {
125 while ((lkp
->lk_flags
& wanted
) != 0) {
126 lkp
->lk_flags
|= LK_WAIT_NONZERO
;
130 * Atomic spinlock release/sleep/reacquire.
132 error
= msleep(lkp
, &lkp
->lk_spinlock
,
133 ((extflags
& LK_PCATCH
) ? PCATCH
: 0),
135 ((extflags
& LK_TIMELOCK
) ? lkp
->lk_timo
: 0));
136 if (lkp
->lk_waitcount
== 1) {
137 lkp
->lk_flags
&= ~LK_WAIT_NONZERO
;
138 lkp
->lk_waitcount
= 0;
144 if (extflags
& LK_SLEEPFAIL
)
151 * Set, change, or release a lock.
153 * Shared requests increment the shared count. Exclusive requests set the
154 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
155 * accepted shared locks and shared-to-exclusive upgrades to go away.
157 * A spinlock is held for most of the procedure. We must not do anything
158 * fancy while holding the spinlock.
162 lockmgr(struct lock
*lkp
, u_int flags
)
164 debuglockmgr(struct lock
*lkp
, u_int flags
,
165 const char *name
, const char *file
, int line
)
177 if (lockmgr_from_int
&& mycpu
->gd_intr_nesting_level
&&
178 (flags
& LK_NOWAIT
) == 0 &&
179 (flags
& LK_TYPE_MASK
) != LK_RELEASE
&& didpanic
== 0) {
181 if (lockmgr_from_int
== 2) {
184 "lockmgr %s from %p: called from interrupt",
185 lkp
->lk_wmesg
, ((int **)&lkp
)[-1]);
189 "lockmgr %s from %p: called from interrupt\n",
190 lkp
->lk_wmesg
, ((int **)&lkp
)[-1]);
193 if (lockmgr_from_int
== 2) {
196 "lockmgr %s from %s:%d: called from interrupt",
197 lkp
->lk_wmesg
, file
, line
);
201 "lockmgr %s from %s:%d: called from interrupt\n",
202 lkp
->lk_wmesg
, file
, line
);
207 spin_lock_wr(&lkp
->lk_spinlock
);
209 extflags
= (flags
| lkp
->lk_flags
) & LK_EXTFLG_MASK
;
212 switch (flags
& LK_TYPE_MASK
) {
215 * If we are not the exclusive lock holder, we have to block
216 * while there is an exclusive lock holder or while an
217 * exclusive lock request or upgrade request is in progress.
219 * However, if P_DEADLKTREAT is set, we override exclusive
220 * lock requests or upgrade requests ( but not the exclusive
223 if (lkp
->lk_lockholder
!= td
) {
224 if (td
->td_flags
& TDF_DEADLKTREAT
) {
234 LK_HAVE_EXCL
| LK_WANT_EXCL
|
245 * We hold an exclusive lock, so downgrade it to shared.
246 * An alternative would be to fail with EDEADLK.
250 /* fall into downgrade */
253 if (lkp
->lk_lockholder
!= td
|| lkp
->lk_exclusivecount
== 0) {
254 spin_unlock_wr(&lkp
->lk_spinlock
);
255 panic("lockmgr: not holding exclusive lock");
257 sharelock(lkp
, lkp
->lk_exclusivecount
);
258 lkp
->lk_exclusivecount
= 0;
259 lkp
->lk_flags
&= ~LK_HAVE_EXCL
;
260 lkp
->lk_lockholder
= LK_NOTHREAD
;
261 if (lkp
->lk_waitcount
)
267 * If another process is ahead of us to get an upgrade,
268 * then we want to fail rather than have an intervening
271 if (lkp
->lk_flags
& LK_WANT_UPGRADE
) {
272 dowakeup
= shareunlock(lkp
, 1);
277 /* fall into normal upgrade */
281 * Upgrade a shared lock to an exclusive one. If another
282 * shared lock has already requested an upgrade to an
283 * exclusive lock, our shared lock is released and an
284 * exclusive lock is requested (which will be granted
285 * after the upgrade). If we return an error, the file
286 * will always be unlocked.
288 if ((lkp
->lk_lockholder
== td
) || (lkp
->lk_sharecount
<= 0)) {
289 spin_unlock_wr(&lkp
->lk_spinlock
);
290 panic("lockmgr: upgrade exclusive lock");
292 dowakeup
+= shareunlock(lkp
, 1);
295 * If we are just polling, check to see if we will block.
297 if ((extflags
& LK_NOWAIT
) &&
298 ((lkp
->lk_flags
& LK_WANT_UPGRADE
) ||
299 lkp
->lk_sharecount
> 1)) {
303 if ((lkp
->lk_flags
& LK_WANT_UPGRADE
) == 0) {
305 * We are first shared lock to request an upgrade, so
306 * request upgrade and wait for the shared count to
307 * drop to zero, then take exclusive lock.
309 lkp
->lk_flags
|= LK_WANT_UPGRADE
;
310 error
= acquire(lkp
, extflags
, LK_SHARE_NONZERO
);
311 lkp
->lk_flags
&= ~LK_WANT_UPGRADE
;
315 lkp
->lk_flags
|= LK_HAVE_EXCL
;
316 lkp
->lk_lockholder
= td
;
317 if (lkp
->lk_exclusivecount
!= 0) {
318 spin_unlock_wr(&lkp
->lk_spinlock
);
319 panic("lockmgr: non-zero exclusive count");
321 lkp
->lk_exclusivecount
= 1;
322 #if defined(DEBUG_LOCKS)
323 lkp
->lk_filename
= file
;
324 lkp
->lk_lineno
= line
;
325 lkp
->lk_lockername
= name
;
331 * Someone else has requested upgrade. Release our shared
332 * lock, awaken upgrade requestor if we are the last shared
333 * lock, then request an exclusive lock.
335 if ( (lkp
->lk_flags
& (LK_SHARE_NONZERO
|LK_WAIT_NONZERO
)) ==
339 /* fall into exclusive request */
342 if (lkp
->lk_lockholder
== td
&& td
!= LK_KERNTHREAD
) {
346 if ((extflags
& (LK_NOWAIT
| LK_CANRECURSE
)) == 0) {
347 spin_unlock_wr(&lkp
->lk_spinlock
);
348 panic("lockmgr: locking against myself");
350 if ((extflags
& LK_CANRECURSE
) != 0) {
351 lkp
->lk_exclusivecount
++;
357 * If we are just polling, check to see if we will sleep.
359 if ((extflags
& LK_NOWAIT
) &&
360 (lkp
->lk_flags
& (LK_HAVE_EXCL
| LK_WANT_EXCL
| LK_WANT_UPGRADE
| LK_SHARE_NONZERO
))) {
365 * Try to acquire the want_exclusive flag.
367 error
= acquire(lkp
, extflags
, (LK_HAVE_EXCL
| LK_WANT_EXCL
));
370 lkp
->lk_flags
|= LK_WANT_EXCL
;
372 * Wait for shared locks and upgrades to finish.
374 error
= acquire(lkp
, extflags
, LK_WANT_UPGRADE
| LK_SHARE_NONZERO
);
375 lkp
->lk_flags
&= ~LK_WANT_EXCL
;
378 lkp
->lk_flags
|= LK_HAVE_EXCL
;
379 lkp
->lk_lockholder
= td
;
380 if (lkp
->lk_exclusivecount
!= 0) {
381 spin_unlock_wr(&lkp
->lk_spinlock
);
382 panic("lockmgr: non-zero exclusive count");
384 lkp
->lk_exclusivecount
= 1;
385 #if defined(DEBUG_LOCKS)
386 lkp
->lk_filename
= file
;
387 lkp
->lk_lineno
= line
;
388 lkp
->lk_lockername
= name
;
394 if (lkp
->lk_exclusivecount
!= 0) {
395 if (lkp
->lk_lockholder
!= td
&&
396 lkp
->lk_lockholder
!= LK_KERNTHREAD
) {
397 spin_unlock_wr(&lkp
->lk_spinlock
);
398 panic("lockmgr: pid %d, not %s thr %p unlocking",
399 (td
->td_proc
? td
->td_proc
->p_pid
: -99),
400 "exclusive lock holder",
403 if (lkp
->lk_lockholder
!= LK_KERNTHREAD
) {
406 if (lkp
->lk_exclusivecount
== 1) {
407 lkp
->lk_flags
&= ~LK_HAVE_EXCL
;
408 lkp
->lk_lockholder
= LK_NOTHREAD
;
409 lkp
->lk_exclusivecount
= 0;
411 lkp
->lk_exclusivecount
--;
413 } else if (lkp
->lk_flags
& LK_SHARE_NONZERO
) {
414 dowakeup
+= shareunlock(lkp
, 1);
417 if (lkp
->lk_flags
& LK_WAIT_NONZERO
)
422 spin_unlock_wr(&lkp
->lk_spinlock
);
423 panic("lockmgr: unknown locktype request %d",
424 flags
& LK_TYPE_MASK
);
427 spin_unlock_wr(&lkp
->lk_spinlock
);
434 lockmgr_kernproc(struct lock
*lp
)
436 struct thread
*td
= curthread
;
438 if (lp
->lk_lockholder
!= LK_KERNTHREAD
) {
439 KASSERT(lp
->lk_lockholder
== td
,
440 ("lockmgr_kernproc: lock not owned by curthread %p", td
));
442 lp
->lk_lockholder
= LK_KERNTHREAD
;
447 * Set the lock to be exclusively held. The caller is holding the lock's
448 * spinlock and the spinlock remains held on return. A panic will occur
449 * if the lock cannot be set to exclusive.
452 lockmgr_setexclusive_interlocked(struct lock
*lkp
)
454 thread_t td
= curthread
;
456 KKASSERT((lkp
->lk_flags
& (LK_HAVE_EXCL
|LK_SHARE_NONZERO
)) == 0);
457 KKASSERT(lkp
->lk_exclusivecount
== 0);
458 lkp
->lk_flags
|= LK_HAVE_EXCL
;
459 lkp
->lk_lockholder
= td
;
460 lkp
->lk_exclusivecount
= 1;
465 * Clear the caller's exclusive lock. The caller is holding the lock's
466 * spinlock. THIS FUNCTION WILL UNLOCK THE SPINLOCK.
468 * A panic will occur if the caller does not hold the lock.
471 lockmgr_clrexclusive_interlocked(struct lock
*lkp
)
473 thread_t td
= curthread
;
476 KKASSERT((lkp
->lk_flags
& LK_HAVE_EXCL
) && lkp
->lk_exclusivecount
== 1
477 && lkp
->lk_lockholder
== td
);
478 lkp
->lk_lockholder
= LK_NOTHREAD
;
479 lkp
->lk_flags
&= ~LK_HAVE_EXCL
;
480 lkp
->lk_exclusivecount
= 0;
481 if (lkp
->lk_flags
& LK_WAIT_NONZERO
)
484 spin_unlock_wr(&lkp
->lk_spinlock
);
490 * Initialize a lock; required before use.
493 lockinit(struct lock
*lkp
, char *wmesg
, int timo
, int flags
)
495 spin_init(&lkp
->lk_spinlock
);
496 lkp
->lk_flags
= (flags
& LK_EXTFLG_MASK
);
497 lkp
->lk_sharecount
= 0;
498 lkp
->lk_waitcount
= 0;
499 lkp
->lk_exclusivecount
= 0;
500 lkp
->lk_wmesg
= wmesg
;
502 lkp
->lk_lockholder
= LK_NOTHREAD
;
506 * Reinitialize a lock that is being reused for a different purpose, but
507 * which may have pending (blocked) threads sitting on it. The caller
508 * must already hold the interlock.
511 lockreinit(struct lock
*lkp
, char *wmesg
, int timo
, int flags
)
513 spin_lock_wr(&lkp
->lk_spinlock
);
514 lkp
->lk_flags
= (lkp
->lk_flags
& ~LK_EXTFLG_MASK
) |
515 (flags
& LK_EXTFLG_MASK
);
516 lkp
->lk_wmesg
= wmesg
;
518 spin_unlock_wr(&lkp
->lk_spinlock
);
522 * Requires that the caller is the exclusive owner of this lock.
525 lockuninit(struct lock
*l
)
528 * At this point we should have removed all the references to this lock
529 * so there can't be anyone waiting on it.
531 KKASSERT(l
->lk_waitcount
== 0);
533 spin_uninit(&l
->lk_spinlock
);
537 * Determine the status of a lock.
540 lockstatus(struct lock
*lkp
, struct thread
*td
)
544 spin_lock_wr(&lkp
->lk_spinlock
);
545 if (lkp
->lk_exclusivecount
!= 0) {
546 if (td
== NULL
|| lkp
->lk_lockholder
== td
)
547 lock_type
= LK_EXCLUSIVE
;
549 lock_type
= LK_EXCLOTHER
;
550 } else if (lkp
->lk_sharecount
!= 0) {
551 lock_type
= LK_SHARED
;
553 spin_unlock_wr(&lkp
->lk_spinlock
);
558 * Determine the number of holders of a lock.
560 * The non-blocking version can usually be used for assertions.
563 lockcount(struct lock
*lkp
)
567 spin_lock_wr(&lkp
->lk_spinlock
);
568 count
= lkp
->lk_exclusivecount
+ lkp
->lk_sharecount
;
569 spin_unlock_wr(&lkp
->lk_spinlock
);
574 lockcountnb(struct lock
*lkp
)
576 return (lkp
->lk_exclusivecount
+ lkp
->lk_sharecount
);
580 * Print out information about state of a lock. Used by VOP_PRINT
581 * routines to display status about contained locks.
584 lockmgr_printinfo(struct lock
*lkp
)
586 struct thread
*td
= lkp
->lk_lockholder
;
589 if (td
&& td
!= LK_KERNTHREAD
&& td
!= LK_NOTHREAD
)
594 if (lkp
->lk_sharecount
)
595 kprintf(" lock type %s: SHARED (count %d)", lkp
->lk_wmesg
,
597 else if (lkp
->lk_flags
& LK_HAVE_EXCL
)
598 kprintf(" lock type %s: EXCL (count %d) by td %p pid %d",
599 lkp
->lk_wmesg
, lkp
->lk_exclusivecount
, td
,
601 if (lkp
->lk_waitcount
> 0)
602 kprintf(" with %d pending", lkp
->lk_waitcount
);