ifnet: Delete INET address should not fail; panic upon failure.
[dragonfly.git] / sys / kern / kern_lock.c
blobc08aa42046bb639b3cac85b92b06e55ad2570dad
1 /*
2 * Copyright (c) 1995
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (C) 1997
5 * John S. Dyson. All rights reserved.
6 * Copyright (C) 2013-2014
7 * Matthew Dillon, All rights reserved.
9 * This code contains ideas from software contributed to Berkeley by
10 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
11 * System project at Carnegie-Mellon University.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
38 #include "opt_lint.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/proc.h>
44 #include <sys/lock.h>
45 #include <sys/sysctl.h>
46 #include <sys/spinlock.h>
47 #include <sys/thread2.h>
48 #include <sys/spinlock2.h>
50 static void undo_upreq(struct lock *lkp);
52 #ifdef DEBUG_CANCEL_LOCKS
54 static int sysctl_cancel_lock(SYSCTL_HANDLER_ARGS);
55 static int sysctl_cancel_test(SYSCTL_HANDLER_ARGS);
57 static struct lock cancel_lk;
58 LOCK_SYSINIT(cancellk, &cancel_lk, "cancel", 0);
59 SYSCTL_PROC(_kern, OID_AUTO, cancel_lock, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
60 sysctl_cancel_lock, "I", "test cancelable locks");
61 SYSCTL_PROC(_kern, OID_AUTO, cancel_test, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
62 sysctl_cancel_test, "I", "test cancelable locks");
64 #endif
67 * Locking primitives implementation.
68 * Locks provide shared/exclusive sychronization.
71 #ifdef DEBUG_LOCKS
72 #define COUNT(td, x) (td)->td_locks += (x)
73 #else
74 #define COUNT(td, x)
75 #endif
78 * Set, change, or release a lock.
80 int
81 #ifndef DEBUG_LOCKS
82 lockmgr(struct lock *lkp, u_int flags)
83 #else
84 debuglockmgr(struct lock *lkp, u_int flags,
85 const char *name, const char *file, int line)
86 #endif
88 thread_t td;
89 thread_t otd;
90 int error;
91 int extflags;
92 int count;
93 int pflags;
94 int wflags;
95 int timo;
96 #ifdef DEBUG_LOCKS
97 int i;
98 #endif
100 error = 0;
102 if (mycpu->gd_intr_nesting_level &&
103 (flags & LK_NOWAIT) == 0 &&
104 (flags & LK_TYPE_MASK) != LK_RELEASE &&
105 panic_cpu_gd != mycpu
108 #ifndef DEBUG_LOCKS
109 panic("lockmgr %s from %p: called from interrupt, ipi, "
110 "or hard code section",
111 lkp->lk_wmesg, ((int **)&lkp)[-1]);
112 #else
113 panic("lockmgr %s from %s:%d: called from interrupt, ipi, "
114 "or hard code section",
115 lkp->lk_wmesg, file, line);
116 #endif
119 #ifdef DEBUG_LOCKS
120 if (mycpu->gd_spinlocks && ((flags & LK_NOWAIT) == 0)) {
121 panic("lockmgr %s from %s:%d: called with %d spinlocks held",
122 lkp->lk_wmesg, file, line, mycpu->gd_spinlocks);
124 #endif
126 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
127 td = curthread;
129 again:
130 count = lkp->lk_count;
131 cpu_ccfence();
133 switch (flags & LK_TYPE_MASK) {
134 case LK_SHARED:
136 * Shared lock critical path case
138 if ((count & (LKC_EXREQ|LKC_UPREQ|LKC_EXCL)) == 0) {
139 if (atomic_cmpset_int(&lkp->lk_count,
140 count, count + 1)) {
141 COUNT(td, 1);
142 break;
144 goto again;
148 * If the caller already holds the lock exclusively then
149 * we silently obtain another count on the exclusive lock.
151 * WARNING! The old FreeBSD behavior was to downgrade,
152 * but this creates a problem when recursions
153 * return to the caller and the caller expects
154 * its original exclusive lock to remain exclusively
155 * locked.
157 if (lkp->lk_lockholder == td) {
158 KKASSERT(count & LKC_EXCL);
159 if ((extflags & LK_CANRECURSE) == 0) {
160 if (extflags & LK_NOWAIT) {
161 error = EBUSY;
162 break;
164 panic("lockmgr: locking against myself");
166 atomic_add_int(&lkp->lk_count, 1);
167 COUNT(td, 1);
168 break;
172 * Slow path
174 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
175 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
176 wflags = (td->td_flags & TDF_DEADLKTREAT) ?
177 LKC_EXCL : (LKC_EXCL|LKC_EXREQ|LKC_UPREQ);
180 * Block while the lock is held exclusively or, conditionally,
181 * if other threads are tring to obtain an exclusive lock or
182 * upgrade to one.
184 if (count & wflags) {
185 if (extflags & LK_CANCELABLE) {
186 if (count & LKC_CANCEL) {
187 error = ENOLCK;
188 break;
191 if (extflags & LK_NOWAIT) {
192 error = EBUSY;
193 break;
195 tsleep_interlock(lkp, pflags);
196 if (!atomic_cmpset_int(&lkp->lk_count, count,
197 count | LKC_SHREQ)) {
198 goto again;
201 mycpu->gd_cnt.v_lock_name[0] = 'S';
202 strncpy(mycpu->gd_cnt.v_lock_name + 1,
203 lkp->lk_wmesg,
204 sizeof(mycpu->gd_cnt.v_lock_name) - 2);
205 ++mycpu->gd_cnt.v_lock_colls;
207 error = tsleep(lkp, pflags | PINTERLOCKED,
208 lkp->lk_wmesg, timo);
209 if (error)
210 break;
211 if (extflags & LK_SLEEPFAIL) {
212 error = ENOLCK;
213 break;
215 goto again;
219 * Otherwise we can bump the count
221 if (atomic_cmpset_int(&lkp->lk_count, count, count + 1)) {
222 COUNT(td, 1);
223 break;
225 goto again;
227 case LK_EXCLUSIVE:
229 * Exclusive lock critical path.
231 if (count == 0) {
232 if (atomic_cmpset_int(&lkp->lk_count, count,
233 LKC_EXCL | (count + 1))) {
234 lkp->lk_lockholder = td;
235 COUNT(td, 1);
236 break;
238 goto again;
242 * Recursive lock if we already hold it exclusively.
244 if (lkp->lk_lockholder == td) {
245 KKASSERT(count & LKC_EXCL);
246 if ((extflags & LK_CANRECURSE) == 0) {
247 if (extflags & LK_NOWAIT) {
248 error = EBUSY;
249 break;
251 panic("lockmgr: locking against myself");
253 atomic_add_int(&lkp->lk_count, 1);
254 COUNT(td, 1);
255 break;
259 * We will block, handle LK_NOWAIT
261 if (extflags & LK_NOWAIT) {
262 error = EBUSY;
263 break;
265 if (extflags & LK_CANCELABLE) {
266 if (count & LKC_CANCEL) {
267 error = ENOLCK;
268 break;
273 * Wait until we can obtain the exclusive lock. EXREQ is
274 * automatically cleared when all current holders release
275 * so if we abort the operation we can safely leave it set.
276 * There might be other exclusive requesters.
278 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
279 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
281 tsleep_interlock(lkp, pflags);
282 if (!atomic_cmpset_int(&lkp->lk_count, count,
283 count | LKC_EXREQ)) {
284 goto again;
287 mycpu->gd_cnt.v_lock_name[0] = 'X';
288 strncpy(mycpu->gd_cnt.v_lock_name + 1,
289 lkp->lk_wmesg,
290 sizeof(mycpu->gd_cnt.v_lock_name) - 2);
291 ++mycpu->gd_cnt.v_lock_colls;
293 error = tsleep(lkp, pflags | PINTERLOCKED,
294 lkp->lk_wmesg, timo);
295 if (error)
296 break;
297 if (extflags & LK_SLEEPFAIL) {
298 error = ENOLCK;
299 break;
301 goto again;
303 case LK_DOWNGRADE:
305 * Downgrade an exclusive lock into a shared lock. All
306 * counts on a recursive exclusive lock become shared.
308 * This function always succeeds.
310 if (lkp->lk_lockholder != td ||
311 (count & (LKC_EXCL|LKC_MASK)) != (LKC_EXCL|1)) {
312 panic("lockmgr: not holding exclusive lock");
315 #ifdef DEBUG_LOCKS
316 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
317 if (td->td_lockmgr_stack[i] == lkp &&
318 td->td_lockmgr_stack_id[i] > 0
320 td->td_lockmgr_stack_id[i]--;
321 break;
324 #endif
326 * NOTE! Must NULL-out lockholder before releasing LKC_EXCL.
328 otd = lkp->lk_lockholder;
329 lkp->lk_lockholder = NULL;
330 if (atomic_cmpset_int(&lkp->lk_count, count,
331 count & ~(LKC_EXCL|LKC_SHREQ))) {
332 if (count & LKC_SHREQ)
333 wakeup(lkp);
334 break;
336 lkp->lk_lockholder = otd;
337 goto again;
339 case LK_EXCLUPGRADE:
341 * Upgrade from a single shared lock to an exclusive lock.
343 * If another process is ahead of us to get an upgrade,
344 * then we want to fail rather than have an intervening
345 * exclusive access. The shared lock is released on
346 * failure.
348 if (count & LKC_UPREQ) {
349 flags = LK_RELEASE;
350 error = EBUSY;
351 goto again;
353 /* fall through into normal upgrade */
355 case LK_UPGRADE:
357 * Upgrade a shared lock to an exclusive one. This can cause
358 * the lock to be temporarily released and stolen by other
359 * threads. LK_SLEEPFAIL or LK_NOWAIT may be used to detect
360 * this case, or use LK_EXCLUPGRADE.
362 * If the lock is already exclusively owned by us, this
363 * operation is a NOP.
365 * If we return an error (even NOWAIT), the current lock will
366 * be released.
368 * Start with the critical path.
370 if ((count & (LKC_UPREQ|LKC_EXCL|LKC_MASK)) == 1) {
371 if (atomic_cmpset_int(&lkp->lk_count, count,
372 count | LKC_EXCL)) {
373 lkp->lk_lockholder = td;
374 break;
376 goto again;
380 * If we already hold the lock exclusively this operation
381 * succeeds and is a NOP.
383 if (count & LKC_EXCL) {
384 if (lkp->lk_lockholder == td)
385 break;
386 panic("lockmgr: upgrade unowned lock");
388 if ((count & LKC_MASK) == 0)
389 panic("lockmgr: upgrade unowned lock");
392 * We cannot upgrade without blocking at this point.
394 if (extflags & LK_NOWAIT) {
395 flags = LK_RELEASE;
396 error = EBUSY;
397 goto again;
399 if (extflags & LK_CANCELABLE) {
400 if (count & LKC_CANCEL) {
401 error = ENOLCK;
402 break;
407 * Release the shared lock and request the upgrade.
409 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
410 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
411 tsleep_interlock(lkp, pflags);
412 wflags = (count & LKC_UPREQ) ? LKC_EXREQ : LKC_UPREQ;
415 * If someone else owns UPREQ and this transition would
416 * allow it to be granted, we have to grant it. Otherwise
417 * we release the shared lock.
419 if ((count & (LKC_UPREQ|LKC_MASK)) == (LKC_UPREQ | 1)) {
420 wflags |= LKC_EXCL | LKC_UPGRANT;
421 wflags |= count;
422 wflags &= ~LKC_UPREQ;
423 } else {
424 wflags |= (count - 1);
427 if (atomic_cmpset_int(&lkp->lk_count, count, wflags)) {
428 COUNT(td, -1);
431 * Must wakeup the thread granted the upgrade.
433 if ((count & (LKC_UPREQ|LKC_MASK)) == (LKC_UPREQ | 1))
434 wakeup(lkp);
436 mycpu->gd_cnt.v_lock_name[0] = 'U';
437 strncpy(mycpu->gd_cnt.v_lock_name + 1,
438 lkp->lk_wmesg,
439 sizeof(mycpu->gd_cnt.v_lock_name) - 2);
440 ++mycpu->gd_cnt.v_lock_colls;
442 error = tsleep(lkp, pflags | PINTERLOCKED,
443 lkp->lk_wmesg, timo);
444 if (error)
445 break;
446 if (extflags & LK_SLEEPFAIL) {
447 error = ENOLCK;
448 break;
452 * Refactor to either LK_EXCLUSIVE or LK_WAITUPGRADE,
453 * depending on whether we were able to acquire the
454 * LKC_UPREQ bit.
456 if (count & LKC_UPREQ)
457 flags = LK_EXCLUSIVE; /* someone else */
458 else
459 flags = LK_WAITUPGRADE; /* we own the bit */
461 goto again;
463 case LK_WAITUPGRADE:
465 * We own the LKC_UPREQ bit, wait until we are granted the
466 * exclusive lock (LKC_UPGRANT is set).
468 * IF THE OPERATION FAILS (tsleep error tsleep+LK_SLEEPFAIL),
469 * we have to undo the upgrade request and clean up any lock
470 * that might have been granted via a race.
472 if (count & LKC_UPGRANT) {
473 if (atomic_cmpset_int(&lkp->lk_count, count,
474 count & ~LKC_UPGRANT)) {
475 lkp->lk_lockholder = td;
476 KKASSERT(count & LKC_EXCL);
477 break;
479 /* retry */
480 } else if ((count & LKC_CANCEL) && (extflags & LK_CANCELABLE)) {
481 undo_upreq(lkp);
482 error = ENOLCK;
483 break;
484 } else {
485 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
486 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
487 tsleep_interlock(lkp, pflags);
488 if (atomic_cmpset_int(&lkp->lk_count, count, count)) {
490 mycpu->gd_cnt.v_lock_name[0] = 'U';
491 strncpy(mycpu->gd_cnt.v_lock_name + 1,
492 lkp->lk_wmesg,
493 sizeof(mycpu->gd_cnt.v_lock_name) - 2);
494 ++mycpu->gd_cnt.v_lock_colls;
496 error = tsleep(lkp, pflags | PINTERLOCKED,
497 lkp->lk_wmesg, timo);
498 if (error) {
499 undo_upreq(lkp);
500 break;
502 if (extflags & LK_SLEEPFAIL) {
503 error = ENOLCK;
504 undo_upreq(lkp);
505 break;
508 /* retry */
510 goto again;
512 case LK_RELEASE:
514 * Release the currently held lock. If releasing the current
515 * lock as part of an error return, error will ALREADY be
516 * non-zero.
518 * When releasing the last lock we automatically transition
519 * LKC_UPREQ to LKC_EXCL|1.
521 * WARNING! We cannot detect when there are multiple exclusive
522 * requests pending. We clear EXREQ unconditionally
523 * on the 1->0 transition so it is possible for
524 * shared requests to race the next exclusive
525 * request.
527 * Always succeeds.
529 if ((count & LKC_MASK) == 0)
530 panic("lockmgr: LK_RELEASE: no lock held");
532 if (count & LKC_EXCL) {
533 if (lkp->lk_lockholder != LK_KERNTHREAD &&
534 lkp->lk_lockholder != td) {
535 panic("lockmgr: pid %d, not exlusive "
536 "lock holder thr %p/%p unlocking",
537 (td->td_proc ? td->td_proc->p_pid : -1),
538 td, lkp->lk_lockholder);
540 if ((count & (LKC_UPREQ|LKC_MASK)) == 1) {
542 * Last exclusive count is being released
544 otd = lkp->lk_lockholder;
545 lkp->lk_lockholder = NULL;
546 if (!atomic_cmpset_int(&lkp->lk_count, count,
547 (count - 1) &
548 ~(LKC_EXCL | LKC_EXREQ |
549 LKC_SHREQ| LKC_CANCEL))) {
550 lkp->lk_lockholder = otd;
551 goto again;
553 if (count & (LKC_EXREQ|LKC_SHREQ))
554 wakeup(lkp);
555 /* success */
556 } else if ((count & (LKC_UPREQ|LKC_MASK)) ==
557 (LKC_UPREQ | 1)) {
559 * Last exclusive count is being released but
560 * an upgrade request is present, automatically
561 * grant an exclusive state to the owner of
562 * the upgrade request.
564 otd = lkp->lk_lockholder;
565 lkp->lk_lockholder = NULL;
566 if (!atomic_cmpset_int(&lkp->lk_count, count,
567 (count & ~LKC_UPREQ) |
568 LKC_UPGRANT)) {
569 lkp->lk_lockholder = otd;
571 wakeup(lkp);
572 /* success */
573 } else {
574 otd = lkp->lk_lockholder;
575 if (!atomic_cmpset_int(&lkp->lk_count, count,
576 count - 1)) {
577 goto again;
579 /* success */
581 /* success */
582 if (otd != LK_KERNTHREAD)
583 COUNT(td, -1);
584 } else {
585 if ((count & (LKC_UPREQ|LKC_MASK)) == 1) {
587 * Last shared count is being released.
589 if (!atomic_cmpset_int(&lkp->lk_count, count,
590 (count - 1) &
591 ~(LKC_EXREQ | LKC_SHREQ |
592 LKC_CANCEL))) {
593 goto again;
595 if (count & (LKC_EXREQ|LKC_SHREQ))
596 wakeup(lkp);
597 /* success */
598 } else if ((count & (LKC_UPREQ|LKC_MASK)) ==
599 (LKC_UPREQ | 1)) {
601 * Last shared count is being released but
602 * an upgrade request is present, automatically
603 * grant an exclusive state to the owner of
604 * the upgrade request. Masked count
605 * remains 1.
607 if (!atomic_cmpset_int(&lkp->lk_count, count,
608 (count & ~(LKC_UPREQ |
609 LKC_CANCEL)) |
610 LKC_EXCL | LKC_UPGRANT)) {
611 goto again;
613 wakeup(lkp);
614 } else {
615 if (!atomic_cmpset_int(&lkp->lk_count, count,
616 count - 1)) {
617 goto again;
620 /* success */
621 COUNT(td, -1);
623 break;
625 case LK_CANCEL_BEG:
627 * Start canceling blocked requestors or later requestors.
628 * requestors must use CANCELABLE. Don't waste time issuing
629 * a wakeup if nobody is pending.
631 KKASSERT((count & LKC_CANCEL) == 0); /* disallowed case */
632 KKASSERT((count & LKC_MASK) != 0); /* issue w/lock held */
633 if (!atomic_cmpset_int(&lkp->lk_count,
634 count, count | LKC_CANCEL)) {
635 goto again;
637 if (count & (LKC_EXREQ|LKC_SHREQ|LKC_UPREQ)) {
638 wakeup(lkp);
640 break;
642 case LK_CANCEL_END:
643 atomic_clear_int(&lkp->lk_count, LKC_CANCEL);
644 break;
646 default:
647 panic("lockmgr: unknown locktype request %d",
648 flags & LK_TYPE_MASK);
649 /* NOTREACHED */
651 return (error);
655 * Undo an upgrade request
657 static
658 void
659 undo_upreq(struct lock *lkp)
661 int count;
663 for (;;) {
664 count = lkp->lk_count;
665 cpu_ccfence();
666 if (count & LKC_UPGRANT) {
668 * UPREQ was shifted to UPGRANT. We own UPGRANT now,
669 * another thread might own UPREQ. Clear UPGRANT
670 * and release the granted lock.
672 if (atomic_cmpset_int(&lkp->lk_count, count,
673 count & ~LKC_UPGRANT)) {
674 lockmgr(lkp, LK_RELEASE);
675 break;
677 } else if (count & LKC_EXCL) {
679 * Clear the UPREQ we still own. Nobody to wakeup
680 * here because there is an existing exclusive
681 * holder.
683 KKASSERT(count & LKC_UPREQ);
684 KKASSERT((count & LKC_MASK) > 0);
685 if (atomic_cmpset_int(&lkp->lk_count, count,
686 count & ~LKC_UPREQ)) {
687 wakeup(lkp);
688 break;
690 } else if (count & LKC_EXREQ) {
692 * Clear the UPREQ we still own. We cannot wakeup any
693 * shared waiters because there is an exclusive
694 * request pending.
696 KKASSERT(count & LKC_UPREQ);
697 KKASSERT((count & LKC_MASK) > 0);
698 if (atomic_cmpset_int(&lkp->lk_count, count,
699 count & ~LKC_UPREQ)) {
700 break;
702 } else {
704 * Clear the UPREQ we still own. Wakeup any shared
705 * waiters.
707 KKASSERT(count & LKC_UPREQ);
708 KKASSERT((count & LKC_MASK) > 0);
709 if (atomic_cmpset_int(&lkp->lk_count, count,
710 count &
711 ~(LKC_UPREQ | LKC_SHREQ))) {
712 if (count & LKC_SHREQ)
713 wakeup(lkp);
714 break;
717 /* retry */
721 void
722 lockmgr_kernproc(struct lock *lp)
724 struct thread *td __debugvar = curthread;
726 if (lp->lk_lockholder != LK_KERNTHREAD) {
727 KASSERT(lp->lk_lockholder == td,
728 ("lockmgr_kernproc: lock not owned by curthread %p: %p",
729 td, lp->lk_lockholder));
730 lp->lk_lockholder = LK_KERNTHREAD;
731 COUNT(td, -1);
736 * Initialize a lock; required before use.
738 void
739 lockinit(struct lock *lkp, const char *wmesg, int timo, int flags)
741 lkp->lk_flags = (flags & LK_EXTFLG_MASK);
742 lkp->lk_count = 0;
743 lkp->lk_wmesg = wmesg;
744 lkp->lk_timo = timo;
745 lkp->lk_lockholder = LK_NOTHREAD;
749 * Reinitialize a lock that is being reused for a different purpose, but
750 * which may have pending (blocked) threads sitting on it. The caller
751 * must already hold the interlock.
753 void
754 lockreinit(struct lock *lkp, const char *wmesg, int timo, int flags)
756 lkp->lk_wmesg = wmesg;
757 lkp->lk_timo = timo;
761 * De-initialize a lock. The structure must no longer be used by anyone.
763 void
764 lockuninit(struct lock *lkp)
766 KKASSERT((lkp->lk_count & (LKC_EXREQ|LKC_SHREQ|LKC_UPREQ)) == 0);
770 * Determine the status of a lock.
773 lockstatus(struct lock *lkp, struct thread *td)
775 int lock_type = 0;
776 int count;
778 count = lkp->lk_count;
779 cpu_ccfence();
781 if (count & LKC_EXCL) {
782 if (td == NULL || lkp->lk_lockholder == td)
783 lock_type = LK_EXCLUSIVE;
784 else
785 lock_type = LK_EXCLOTHER;
786 } else if (count & LKC_MASK) {
787 lock_type = LK_SHARED;
789 return (lock_type);
793 * Return non-zero if the caller owns the lock shared or exclusive.
794 * We can only guess re: shared locks.
797 lockowned(struct lock *lkp)
799 thread_t td = curthread;
800 int count;
802 count = lkp->lk_count;
803 cpu_ccfence();
805 if (count & LKC_EXCL)
806 return(lkp->lk_lockholder == td);
807 else
808 return((count & LKC_MASK) != 0);
812 * Determine the number of holders of a lock.
814 * The non-blocking version can usually be used for assertions.
817 lockcount(struct lock *lkp)
819 return(lkp->lk_count & LKC_MASK);
823 lockcountnb(struct lock *lkp)
825 return(lkp->lk_count & LKC_MASK);
829 * Print out information about state of a lock. Used by VOP_PRINT
830 * routines to display status about contained locks.
832 void
833 lockmgr_printinfo(struct lock *lkp)
835 struct thread *td = lkp->lk_lockholder;
836 struct proc *p;
837 int count;
839 count = lkp->lk_count;
840 cpu_ccfence();
842 if (td && td != LK_KERNTHREAD && td != LK_NOTHREAD)
843 p = td->td_proc;
844 else
845 p = NULL;
847 if (count & LKC_EXCL) {
848 kprintf(" lock type %s: EXCLUS (count %08x) by td %p pid %d",
849 lkp->lk_wmesg, count, td,
850 p ? p->p_pid : -99);
851 } else if (count & LKC_MASK) {
852 kprintf(" lock type %s: SHARED (count %08x)",
853 lkp->lk_wmesg, count);
854 } else {
855 kprintf(" lock type %s: NOTHELD", lkp->lk_wmesg);
857 if (count & (LKC_EXREQ|LKC_SHREQ))
858 kprintf(" with waiters\n");
859 else
860 kprintf("\n");
863 void
864 lock_sysinit(struct lock_args *arg)
866 lockinit(arg->la_lock, arg->la_desc, 0, arg->la_flags);
869 #ifdef DEBUG_CANCEL_LOCKS
871 static
873 sysctl_cancel_lock(SYSCTL_HANDLER_ARGS)
875 int error;
877 if (req->newptr) {
878 SYSCTL_XUNLOCK();
879 lockmgr(&cancel_lk, LK_EXCLUSIVE);
880 kprintf("x");
881 error = tsleep(&error, PCATCH, "canmas", hz * 5);
882 lockmgr(&cancel_lk, LK_CANCEL_BEG);
883 kprintf("y");
884 error = tsleep(&error, PCATCH, "canmas", hz * 5);
885 kprintf("z");
886 lockmgr(&cancel_lk, LK_RELEASE);
887 SYSCTL_XLOCK();
888 SYSCTL_OUT(req, &error, sizeof(error));
890 error = 0;
892 return error;
895 static
897 sysctl_cancel_test(SYSCTL_HANDLER_ARGS)
899 int error;
901 if (req->newptr) {
902 error = lockmgr(&cancel_lk, LK_EXCLUSIVE|LK_CANCELABLE);
903 if (error == 0)
904 lockmgr(&cancel_lk, LK_RELEASE);
905 SYSCTL_OUT(req, &error, sizeof(error));
906 kprintf("test %d\n", error);
909 return 0;
912 #endif