Continue working the abort path. Move SS_ABORTING flag handling inward
[dragonfly.git] / sys / kern / kern_lock.c
blobafcb251592656b3e2cfe4f0c80ec6f2aa8b1dfcf
1 /*
2 * Copyright (c) 1995
3 * The Regents of the University of California. All rights reserved.
5 * Copyright (C) 1997
6 * John S. Dyson. All rights reserved.
8 * This code contains ideas from software contributed to Berkeley by
9 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
10 * System project at Carnegie-Mellon University.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
40 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
41 * $FreeBSD: src/sys/kern/kern_lock.c,v 1.31.2.3 2001/12/25 01:44:44 dillon Exp $
42 * $DragonFly: src/sys/kern/kern_lock.c,v 1.27 2008/01/09 10:59:12 corecode Exp $
45 #include "opt_lint.h"
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/proc.h>
51 #include <sys/lock.h>
52 #include <sys/sysctl.h>
53 #include <sys/spinlock.h>
54 #include <sys/thread2.h>
55 #include <sys/spinlock2.h>
58 * 0: no warnings, 1: warnings, 2: panic
60 static int lockmgr_from_int = 1;
61 SYSCTL_INT(_debug, OID_AUTO, lockmgr_from_int, CTLFLAG_RW, &lockmgr_from_int, 0, "");
64 * Locking primitives implementation.
65 * Locks provide shared/exclusive sychronization.
68 #ifdef SIMPLELOCK_DEBUG
69 #define COUNT(td, x) (td)->td_locks += (x)
70 #else
71 #define COUNT(td, x)
72 #endif
74 #define LOCK_WAIT_TIME 100
75 #define LOCK_SAMPLE_WAIT 7
77 #if defined(DIAGNOSTIC)
78 #define LOCK_INLINE
79 #else
80 #define LOCK_INLINE __inline
81 #endif
83 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
84 LK_SHARE_NONZERO | LK_WAIT_NONZERO)
86 static int acquire(struct lock *lkp, int extflags, int wanted);
88 static LOCK_INLINE void
89 sharelock(struct lock *lkp, int incr) {
90 lkp->lk_flags |= LK_SHARE_NONZERO;
91 lkp->lk_sharecount += incr;
94 static LOCK_INLINE int
95 shareunlock(struct lock *lkp, int decr)
97 int dowakeup = 0;
99 KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
101 if (lkp->lk_sharecount == decr) {
102 lkp->lk_flags &= ~LK_SHARE_NONZERO;
103 if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
104 dowakeup = 1;
106 lkp->lk_sharecount = 0;
107 } else {
108 lkp->lk_sharecount -= decr;
110 return(dowakeup);
114 * lock acquisition helper routine. Called with the lock's spinlock held.
116 static int
117 acquire(struct lock *lkp, int extflags, int wanted)
119 int error;
121 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) {
122 return EBUSY;
125 while ((lkp->lk_flags & wanted) != 0) {
126 lkp->lk_flags |= LK_WAIT_NONZERO;
127 lkp->lk_waitcount++;
130 * Atomic spinlock release/sleep/reacquire.
132 error = msleep(lkp, &lkp->lk_spinlock,
133 ((extflags & LK_PCATCH) ? PCATCH : 0),
134 lkp->lk_wmesg,
135 ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
136 if (lkp->lk_waitcount == 1) {
137 lkp->lk_flags &= ~LK_WAIT_NONZERO;
138 lkp->lk_waitcount = 0;
139 } else {
140 lkp->lk_waitcount--;
142 if (error)
143 return error;
144 if (extflags & LK_SLEEPFAIL)
145 return ENOLCK;
147 return 0;
151 * Set, change, or release a lock.
153 * Shared requests increment the shared count. Exclusive requests set the
154 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
155 * accepted shared locks and shared-to-exclusive upgrades to go away.
157 * A spinlock is held for most of the procedure. We must not do anything
158 * fancy while holding the spinlock.
161 #ifndef DEBUG_LOCKS
162 lockmgr(struct lock *lkp, u_int flags)
163 #else
164 debuglockmgr(struct lock *lkp, u_int flags,
165 const char *name, const char *file, int line)
166 #endif
168 thread_t td;
169 int error;
170 int extflags;
171 int dowakeup;
172 static int didpanic;
174 error = 0;
175 dowakeup = 0;
177 if (lockmgr_from_int && mycpu->gd_intr_nesting_level &&
178 (flags & LK_NOWAIT) == 0 &&
179 (flags & LK_TYPE_MASK) != LK_RELEASE && didpanic == 0) {
180 #ifndef DEBUG_LOCKS
181 if (lockmgr_from_int == 2) {
182 didpanic = 1;
183 panic(
184 "lockmgr %s from %p: called from interrupt",
185 lkp->lk_wmesg, ((int **)&lkp)[-1]);
186 didpanic = 0;
187 } else {
188 kprintf(
189 "lockmgr %s from %p: called from interrupt\n",
190 lkp->lk_wmesg, ((int **)&lkp)[-1]);
192 #else
193 if (lockmgr_from_int == 2) {
194 didpanic = 1;
195 panic(
196 "lockmgr %s from %s:%d: called from interrupt",
197 lkp->lk_wmesg, file, line);
198 didpanic = 0;
199 } else {
200 kprintf(
201 "lockmgr %s from %s:%d: called from interrupt\n",
202 lkp->lk_wmesg, file, line);
204 #endif
207 spin_lock_wr(&lkp->lk_spinlock);
209 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
210 td = curthread;
212 switch (flags & LK_TYPE_MASK) {
213 case LK_SHARED:
215 * If we are not the exclusive lock holder, we have to block
216 * while there is an exclusive lock holder or while an
217 * exclusive lock request or upgrade request is in progress.
219 * However, if P_DEADLKTREAT is set, we override exclusive
220 * lock requests or upgrade requests ( but not the exclusive
221 * lock itself ).
223 if (lkp->lk_lockholder != td) {
224 if (td->td_flags & TDF_DEADLKTREAT) {
225 error = acquire(
226 lkp,
227 extflags,
228 LK_HAVE_EXCL
230 } else {
231 error = acquire(
232 lkp,
233 extflags,
234 LK_HAVE_EXCL | LK_WANT_EXCL |
235 LK_WANT_UPGRADE
238 if (error)
239 break;
240 sharelock(lkp, 1);
241 COUNT(td, 1);
242 break;
245 * We hold an exclusive lock, so downgrade it to shared.
246 * An alternative would be to fail with EDEADLK.
248 sharelock(lkp, 1);
249 COUNT(td, 1);
250 /* fall into downgrade */
252 case LK_DOWNGRADE:
253 if (lkp->lk_lockholder != td || lkp->lk_exclusivecount == 0) {
254 spin_unlock_wr(&lkp->lk_spinlock);
255 panic("lockmgr: not holding exclusive lock");
257 sharelock(lkp, lkp->lk_exclusivecount);
258 lkp->lk_exclusivecount = 0;
259 lkp->lk_flags &= ~LK_HAVE_EXCL;
260 lkp->lk_lockholder = LK_NOTHREAD;
261 if (lkp->lk_waitcount)
262 dowakeup = 1;
263 break;
265 case LK_EXCLUPGRADE:
267 * If another process is ahead of us to get an upgrade,
268 * then we want to fail rather than have an intervening
269 * exclusive access.
271 if (lkp->lk_flags & LK_WANT_UPGRADE) {
272 dowakeup = shareunlock(lkp, 1);
273 COUNT(td, -1);
274 error = EBUSY;
275 break;
277 /* fall into normal upgrade */
279 case LK_UPGRADE:
281 * Upgrade a shared lock to an exclusive one. If another
282 * shared lock has already requested an upgrade to an
283 * exclusive lock, our shared lock is released and an
284 * exclusive lock is requested (which will be granted
285 * after the upgrade). If we return an error, the file
286 * will always be unlocked.
288 if ((lkp->lk_lockholder == td) || (lkp->lk_sharecount <= 0)) {
289 spin_unlock_wr(&lkp->lk_spinlock);
290 panic("lockmgr: upgrade exclusive lock");
292 dowakeup += shareunlock(lkp, 1);
293 COUNT(td, -1);
295 * If we are just polling, check to see if we will block.
297 if ((extflags & LK_NOWAIT) &&
298 ((lkp->lk_flags & LK_WANT_UPGRADE) ||
299 lkp->lk_sharecount > 1)) {
300 error = EBUSY;
301 break;
303 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
305 * We are first shared lock to request an upgrade, so
306 * request upgrade and wait for the shared count to
307 * drop to zero, then take exclusive lock.
309 lkp->lk_flags |= LK_WANT_UPGRADE;
310 error = acquire(lkp, extflags, LK_SHARE_NONZERO);
311 lkp->lk_flags &= ~LK_WANT_UPGRADE;
313 if (error)
314 break;
315 lkp->lk_flags |= LK_HAVE_EXCL;
316 lkp->lk_lockholder = td;
317 if (lkp->lk_exclusivecount != 0) {
318 spin_unlock_wr(&lkp->lk_spinlock);
319 panic("lockmgr: non-zero exclusive count");
321 lkp->lk_exclusivecount = 1;
322 #if defined(DEBUG_LOCKS)
323 lkp->lk_filename = file;
324 lkp->lk_lineno = line;
325 lkp->lk_lockername = name;
326 #endif
327 COUNT(td, 1);
328 break;
331 * Someone else has requested upgrade. Release our shared
332 * lock, awaken upgrade requestor if we are the last shared
333 * lock, then request an exclusive lock.
335 if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
336 LK_WAIT_NONZERO) {
337 ++dowakeup;
339 /* fall into exclusive request */
341 case LK_EXCLUSIVE:
342 if (lkp->lk_lockholder == td && td != LK_KERNTHREAD) {
344 * Recursive lock.
346 if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) {
347 spin_unlock_wr(&lkp->lk_spinlock);
348 panic("lockmgr: locking against myself");
350 if ((extflags & LK_CANRECURSE) != 0) {
351 lkp->lk_exclusivecount++;
352 COUNT(td, 1);
353 break;
357 * If we are just polling, check to see if we will sleep.
359 if ((extflags & LK_NOWAIT) &&
360 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
361 error = EBUSY;
362 break;
365 * Try to acquire the want_exclusive flag.
367 error = acquire(lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL));
368 if (error)
369 break;
370 lkp->lk_flags |= LK_WANT_EXCL;
372 * Wait for shared locks and upgrades to finish.
374 error = acquire(lkp, extflags, LK_WANT_UPGRADE | LK_SHARE_NONZERO);
375 lkp->lk_flags &= ~LK_WANT_EXCL;
376 if (error)
377 break;
378 lkp->lk_flags |= LK_HAVE_EXCL;
379 lkp->lk_lockholder = td;
380 if (lkp->lk_exclusivecount != 0) {
381 spin_unlock_wr(&lkp->lk_spinlock);
382 panic("lockmgr: non-zero exclusive count");
384 lkp->lk_exclusivecount = 1;
385 #if defined(DEBUG_LOCKS)
386 lkp->lk_filename = file;
387 lkp->lk_lineno = line;
388 lkp->lk_lockername = name;
389 #endif
390 COUNT(td, 1);
391 break;
393 case LK_RELEASE:
394 if (lkp->lk_exclusivecount != 0) {
395 if (lkp->lk_lockholder != td &&
396 lkp->lk_lockholder != LK_KERNTHREAD) {
397 spin_unlock_wr(&lkp->lk_spinlock);
398 panic("lockmgr: pid %d, not %s thr %p unlocking",
399 (td->td_proc ? td->td_proc->p_pid : -99),
400 "exclusive lock holder",
401 lkp->lk_lockholder);
403 if (lkp->lk_lockholder != LK_KERNTHREAD) {
404 COUNT(td, -1);
406 if (lkp->lk_exclusivecount == 1) {
407 lkp->lk_flags &= ~LK_HAVE_EXCL;
408 lkp->lk_lockholder = LK_NOTHREAD;
409 lkp->lk_exclusivecount = 0;
410 } else {
411 lkp->lk_exclusivecount--;
413 } else if (lkp->lk_flags & LK_SHARE_NONZERO) {
414 dowakeup += shareunlock(lkp, 1);
415 COUNT(td, -1);
417 if (lkp->lk_flags & LK_WAIT_NONZERO)
418 ++dowakeup;
419 break;
421 default:
422 spin_unlock_wr(&lkp->lk_spinlock);
423 panic("lockmgr: unknown locktype request %d",
424 flags & LK_TYPE_MASK);
425 /* NOTREACHED */
427 spin_unlock_wr(&lkp->lk_spinlock);
428 if (dowakeup)
429 wakeup(lkp);
430 return (error);
433 void
434 lockmgr_kernproc(struct lock *lp)
436 struct thread *td = curthread;
438 if (lp->lk_lockholder != LK_KERNTHREAD) {
439 KASSERT(lp->lk_lockholder == td,
440 ("lockmgr_kernproc: lock not owned by curthread %p", td));
441 COUNT(td, -1);
442 lp->lk_lockholder = LK_KERNTHREAD;
447 * Set the lock to be exclusively held. The caller is holding the lock's
448 * spinlock and the spinlock remains held on return. A panic will occur
449 * if the lock cannot be set to exclusive.
451 void
452 lockmgr_setexclusive_interlocked(struct lock *lkp)
454 thread_t td = curthread;
456 KKASSERT((lkp->lk_flags & (LK_HAVE_EXCL|LK_SHARE_NONZERO)) == 0);
457 KKASSERT(lkp->lk_exclusivecount == 0);
458 lkp->lk_flags |= LK_HAVE_EXCL;
459 lkp->lk_lockholder = td;
460 lkp->lk_exclusivecount = 1;
461 COUNT(td, 1);
465 * Clear the caller's exclusive lock. The caller is holding the lock's
466 * spinlock. THIS FUNCTION WILL UNLOCK THE SPINLOCK.
468 * A panic will occur if the caller does not hold the lock.
470 void
471 lockmgr_clrexclusive_interlocked(struct lock *lkp)
473 thread_t td = curthread;
474 int dowakeup = 0;
476 KKASSERT((lkp->lk_flags & LK_HAVE_EXCL) && lkp->lk_exclusivecount == 1
477 && lkp->lk_lockholder == td);
478 lkp->lk_lockholder = LK_NOTHREAD;
479 lkp->lk_flags &= ~LK_HAVE_EXCL;
480 lkp->lk_exclusivecount = 0;
481 if (lkp->lk_flags & LK_WAIT_NONZERO)
482 dowakeup = 1;
483 COUNT(td, -1);
484 spin_unlock_wr(&lkp->lk_spinlock);
485 if (dowakeup)
486 wakeup((void *)lkp);
490 * Initialize a lock; required before use.
492 void
493 lockinit(struct lock *lkp, char *wmesg, int timo, int flags)
495 spin_init(&lkp->lk_spinlock);
496 lkp->lk_flags = (flags & LK_EXTFLG_MASK);
497 lkp->lk_sharecount = 0;
498 lkp->lk_waitcount = 0;
499 lkp->lk_exclusivecount = 0;
500 lkp->lk_wmesg = wmesg;
501 lkp->lk_timo = timo;
502 lkp->lk_lockholder = LK_NOTHREAD;
506 * Reinitialize a lock that is being reused for a different purpose, but
507 * which may have pending (blocked) threads sitting on it. The caller
508 * must already hold the interlock.
510 void
511 lockreinit(struct lock *lkp, char *wmesg, int timo, int flags)
513 spin_lock_wr(&lkp->lk_spinlock);
514 lkp->lk_flags = (lkp->lk_flags & ~LK_EXTFLG_MASK) |
515 (flags & LK_EXTFLG_MASK);
516 lkp->lk_wmesg = wmesg;
517 lkp->lk_timo = timo;
518 spin_unlock_wr(&lkp->lk_spinlock);
522 * Requires that the caller is the exclusive owner of this lock.
524 void
525 lockuninit(struct lock *l)
528 * At this point we should have removed all the references to this lock
529 * so there can't be anyone waiting on it.
531 KKASSERT(l->lk_waitcount == 0);
533 spin_uninit(&l->lk_spinlock);
537 * Determine the status of a lock.
540 lockstatus(struct lock *lkp, struct thread *td)
542 int lock_type = 0;
544 spin_lock_wr(&lkp->lk_spinlock);
545 if (lkp->lk_exclusivecount != 0) {
546 if (td == NULL || lkp->lk_lockholder == td)
547 lock_type = LK_EXCLUSIVE;
548 else
549 lock_type = LK_EXCLOTHER;
550 } else if (lkp->lk_sharecount != 0) {
551 lock_type = LK_SHARED;
553 spin_unlock_wr(&lkp->lk_spinlock);
554 return (lock_type);
558 * Determine the number of holders of a lock.
560 * The non-blocking version can usually be used for assertions.
563 lockcount(struct lock *lkp)
565 int count;
567 spin_lock_wr(&lkp->lk_spinlock);
568 count = lkp->lk_exclusivecount + lkp->lk_sharecount;
569 spin_unlock_wr(&lkp->lk_spinlock);
570 return (count);
574 lockcountnb(struct lock *lkp)
576 return (lkp->lk_exclusivecount + lkp->lk_sharecount);
580 * Print out information about state of a lock. Used by VOP_PRINT
581 * routines to display status about contained locks.
583 void
584 lockmgr_printinfo(struct lock *lkp)
586 struct thread *td = lkp->lk_lockholder;
587 struct proc *p;
589 if (td && td != LK_KERNTHREAD && td != LK_NOTHREAD)
590 p = td->td_proc;
591 else
592 p = NULL;
594 if (lkp->lk_sharecount)
595 kprintf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
596 lkp->lk_sharecount);
597 else if (lkp->lk_flags & LK_HAVE_EXCL)
598 kprintf(" lock type %s: EXCL (count %d) by td %p pid %d",
599 lkp->lk_wmesg, lkp->lk_exclusivecount, td,
600 p ? p->p_pid : -99);
601 if (lkp->lk_waitcount > 0)
602 kprintf(" with %d pending", lkp->lk_waitcount);