kernel - Refactor smp collision statistics
[dragonfly.git] / sys / kern / kern_lock.c
blobf95acb3315e5f841b5a933357a8d4203af02878c
1 /*
2 * Copyright (c) 1995
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (C) 1997
5 * John S. Dyson. All rights reserved.
6 * Copyright (C) 2013-2014
7 * Matthew Dillon, All rights reserved.
9 * This code contains ideas from software contributed to Berkeley by
10 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
11 * System project at Carnegie-Mellon University.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
38 #include "opt_lint.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/proc.h>
44 #include <sys/lock.h>
45 #include <sys/sysctl.h>
46 #include <sys/spinlock.h>
47 #include <sys/thread2.h>
48 #include <sys/spinlock2.h>
49 #include <sys/indefinite2.h>
51 static void undo_upreq(struct lock *lkp);
53 #ifdef DEBUG_CANCEL_LOCKS
55 static int sysctl_cancel_lock(SYSCTL_HANDLER_ARGS);
56 static int sysctl_cancel_test(SYSCTL_HANDLER_ARGS);
58 static struct lock cancel_lk;
59 LOCK_SYSINIT(cancellk, &cancel_lk, "cancel", 0);
60 SYSCTL_PROC(_kern, OID_AUTO, cancel_lock, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
61 sysctl_cancel_lock, "I", "test cancelable locks");
62 SYSCTL_PROC(_kern, OID_AUTO, cancel_test, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
63 sysctl_cancel_test, "I", "test cancelable locks");
65 #endif
67 int lock_test_mode;
68 SYSCTL_INT(_debug, OID_AUTO, lock_test_mode, CTLFLAG_RW,
69 &lock_test_mode, 0, "");
72 * Locking primitives implementation.
73 * Locks provide shared/exclusive sychronization.
76 #ifdef DEBUG_LOCKS
77 #define COUNT(td, x) (td)->td_locks += (x)
78 #else
79 #define COUNT(td, x)
80 #endif
83 * Set, change, or release a lock.
85 int
86 #ifndef DEBUG_LOCKS
87 lockmgr(struct lock *lkp, u_int flags)
88 #else
89 debuglockmgr(struct lock *lkp, u_int flags,
90 const char *name, const char *file, int line)
91 #endif
93 thread_t td;
94 thread_t otd;
95 int error;
96 int extflags;
97 int count;
98 int pflags;
99 int wflags;
100 int timo;
101 int info_init;
102 #ifdef DEBUG_LOCKS
103 int i;
104 #endif
106 error = 0;
107 info_init = 0;
109 if (mycpu->gd_intr_nesting_level &&
110 (flags & LK_NOWAIT) == 0 &&
111 (flags & LK_TYPE_MASK) != LK_RELEASE &&
112 panic_cpu_gd != mycpu
115 #ifndef DEBUG_LOCKS
116 panic("lockmgr %s from %p: called from interrupt, ipi, "
117 "or hard code section",
118 lkp->lk_wmesg, ((int **)&lkp)[-1]);
119 #else
120 panic("lockmgr %s from %s:%d: called from interrupt, ipi, "
121 "or hard code section",
122 lkp->lk_wmesg, file, line);
123 #endif
126 #ifdef DEBUG_LOCKS
127 if (mycpu->gd_spinlocks && ((flags & LK_NOWAIT) == 0)) {
128 panic("lockmgr %s from %s:%d: called with %d spinlocks held",
129 lkp->lk_wmesg, file, line, mycpu->gd_spinlocks);
131 #endif
133 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
134 td = curthread;
136 again:
137 count = lkp->lk_count;
138 cpu_ccfence();
140 switch (flags & LK_TYPE_MASK) {
141 case LK_SHARED:
143 * Shared lock critical path case
145 if ((count & (LKC_EXREQ|LKC_UPREQ|LKC_EXCL)) == 0) {
146 if (atomic_cmpset_int(&lkp->lk_count,
147 count, count + 1)) {
148 COUNT(td, 1);
149 break;
151 goto again;
155 * If the caller already holds the lock exclusively then
156 * we silently obtain another count on the exclusive lock.
158 * WARNING! The old FreeBSD behavior was to downgrade,
159 * but this creates a problem when recursions
160 * return to the caller and the caller expects
161 * its original exclusive lock to remain exclusively
162 * locked.
164 if (lkp->lk_lockholder == td) {
165 KKASSERT(count & LKC_EXCL);
166 if ((extflags & LK_CANRECURSE) == 0) {
167 if (extflags & LK_NOWAIT) {
168 error = EBUSY;
169 break;
171 panic("lockmgr: locking against myself");
173 atomic_add_int(&lkp->lk_count, 1);
174 COUNT(td, 1);
175 break;
179 * Slow path
181 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
182 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
183 wflags = (td->td_flags & TDF_DEADLKTREAT) ?
184 LKC_EXCL : (LKC_EXCL|LKC_EXREQ|LKC_UPREQ);
187 * Block while the lock is held exclusively or, conditionally,
188 * if other threads are trying to obtain an exclusive lock or
189 * upgrade to one.
191 if (count & wflags) {
192 if (extflags & LK_CANCELABLE) {
193 if (count & LKC_CANCEL) {
194 error = ENOLCK;
195 break;
198 if (extflags & LK_NOWAIT) {
199 error = EBUSY;
200 break;
202 tsleep_interlock(lkp, pflags);
203 if (!atomic_cmpset_int(&lkp->lk_count, count,
204 count | LKC_SHREQ)) {
205 goto again;
208 if (info_init == 0 &&
209 (lkp->lk_flags & LK_NOCOLLSTATS) == 0) {
210 indefinite_init(&td->td_indefinite,
211 lkp->lk_wmesg, 1, 'l');
212 info_init = 1;
215 error = tsleep(lkp, pflags | PINTERLOCKED,
216 lkp->lk_wmesg, timo);
217 if (error)
218 break;
219 if (extflags & LK_SLEEPFAIL) {
220 error = ENOLCK;
221 break;
223 goto again;
227 * Otherwise we can bump the count
229 if (atomic_cmpset_int(&lkp->lk_count, count, count + 1)) {
230 COUNT(td, 1);
231 break;
233 goto again;
235 case LK_EXCLUSIVE:
237 * Exclusive lock critical path.
239 if (count == 0) {
240 if (atomic_cmpset_int(&lkp->lk_count, count,
241 LKC_EXCL | (count + 1))) {
242 lkp->lk_lockholder = td;
243 COUNT(td, 1);
244 break;
246 goto again;
250 * Recursive lock if we already hold it exclusively.
252 if (lkp->lk_lockholder == td) {
253 KKASSERT(count & LKC_EXCL);
254 if ((extflags & LK_CANRECURSE) == 0) {
255 if (extflags & LK_NOWAIT) {
256 error = EBUSY;
257 break;
259 panic("lockmgr: locking against myself");
261 atomic_add_int(&lkp->lk_count, 1);
262 COUNT(td, 1);
263 break;
267 * We will block, handle LK_NOWAIT
269 if (extflags & LK_NOWAIT) {
270 error = EBUSY;
271 break;
273 if (extflags & LK_CANCELABLE) {
274 if (count & LKC_CANCEL) {
275 error = ENOLCK;
276 break;
281 * Wait until we can obtain the exclusive lock. EXREQ is
282 * automatically cleared when all current holders release
283 * so if we abort the operation we can safely leave it set.
284 * There might be other exclusive requesters.
286 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
287 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
289 tsleep_interlock(lkp, pflags);
290 if (!atomic_cmpset_int(&lkp->lk_count, count,
291 count | LKC_EXREQ)) {
292 goto again;
295 if (info_init == 0 &&
296 (lkp->lk_flags & LK_NOCOLLSTATS) == 0) {
297 indefinite_init(&td->td_indefinite, lkp->lk_wmesg,
298 1, 'L');
299 info_init = 1;
302 error = tsleep(lkp, pflags | PINTERLOCKED,
303 lkp->lk_wmesg, timo);
304 if (error)
305 break;
306 if (extflags & LK_SLEEPFAIL) {
307 error = ENOLCK;
308 break;
310 indefinite_check(&td->td_indefinite);
311 goto again;
313 case LK_DOWNGRADE:
315 * Downgrade an exclusive lock into a shared lock. All
316 * counts on a recursive exclusive lock become shared.
318 * This function always succeeds.
320 if (lkp->lk_lockholder != td ||
321 (count & (LKC_EXCL|LKC_MASK)) != (LKC_EXCL|1)) {
322 panic("lockmgr: not holding exclusive lock");
325 #ifdef DEBUG_LOCKS
326 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
327 if (td->td_lockmgr_stack[i] == lkp &&
328 td->td_lockmgr_stack_id[i] > 0
330 td->td_lockmgr_stack_id[i]--;
331 break;
334 #endif
336 * NOTE! Must NULL-out lockholder before releasing LKC_EXCL.
338 otd = lkp->lk_lockholder;
339 lkp->lk_lockholder = NULL;
340 if (atomic_cmpset_int(&lkp->lk_count, count,
341 count & ~(LKC_EXCL|LKC_SHREQ))) {
342 if (count & LKC_SHREQ)
343 wakeup(lkp);
344 break;
346 lkp->lk_lockholder = otd;
347 goto again;
349 case LK_EXCLUPGRADE:
351 * Upgrade from a single shared lock to an exclusive lock.
353 * If another process is ahead of us to get an upgrade,
354 * then we want to fail rather than have an intervening
355 * exclusive access. The shared lock is released on
356 * failure.
358 if (count & LKC_UPREQ) {
359 flags = LK_RELEASE;
360 error = EBUSY;
361 goto again;
363 /* fall through into normal upgrade */
365 case LK_UPGRADE:
367 * Upgrade a shared lock to an exclusive one. This can cause
368 * the lock to be temporarily released and stolen by other
369 * threads. LK_SLEEPFAIL or LK_NOWAIT may be used to detect
370 * this case, or use LK_EXCLUPGRADE.
372 * If the lock is already exclusively owned by us, this
373 * operation is a NOP.
375 * If we return an error (even NOWAIT), the current lock will
376 * be released.
378 * Start with the critical path.
380 if ((count & (LKC_UPREQ|LKC_EXCL|LKC_MASK)) == 1) {
381 if (atomic_cmpset_int(&lkp->lk_count, count,
382 count | LKC_EXCL)) {
383 lkp->lk_lockholder = td;
384 break;
386 goto again;
390 * We own a lock coming into this, so there cannot be an
391 * UPGRANT already flagged.
393 KKASSERT((count & LKC_UPGRANT) == 0);
396 * If we already hold the lock exclusively this operation
397 * succeeds and is a NOP.
399 if (count & LKC_EXCL) {
400 if (lkp->lk_lockholder == td)
401 break;
402 panic("lockmgr: upgrade unowned lock");
404 if ((count & LKC_MASK) == 0)
405 panic("lockmgr: upgrade unowned lock");
408 * We cannot upgrade without blocking at this point.
410 if (extflags & LK_NOWAIT) {
411 flags = LK_RELEASE;
412 error = EBUSY;
413 goto again;
415 if (extflags & LK_CANCELABLE) {
416 if (count & LKC_CANCEL) {
417 error = ENOLCK;
418 break;
423 * Release the shared lock and request the upgrade.
425 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
426 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
427 tsleep_interlock(lkp, pflags);
428 wflags = (count & LKC_UPREQ) ? LKC_EXREQ : LKC_UPREQ;
431 * If someone else owns UPREQ and this transition would
432 * allow it to be granted, we have to grant it. Our
433 * lock count is transfered (we effectively release).
434 * We will then request a normal exclusive lock.
436 * Otherwise we release the shared lock and either do
437 * an UPREQ or an EXREQ. The count is always > 1 in
438 * this case since we handle all other count == 1
439 * situations here and above.
441 if ((count & (LKC_UPREQ|LKC_MASK)) == (LKC_UPREQ | 1)) {
442 wflags |= LKC_EXCL | LKC_UPGRANT;
443 wflags |= count;
444 wflags &= ~LKC_UPREQ; /* was set from count */
445 } else {
446 wflags |= (count - 1);
449 if (info_init == 0 &&
450 (lkp->lk_flags & LK_NOCOLLSTATS) == 0) {
451 indefinite_init(&td->td_indefinite, lkp->lk_wmesg,
452 1, 'U');
453 info_init = 1;
456 if (atomic_cmpset_int(&lkp->lk_count, count, wflags)) {
457 COUNT(td, -1);
460 * Must wakeup the thread granted the upgrade.
462 if ((count & (LKC_UPREQ|LKC_MASK)) == (LKC_UPREQ | 1))
463 wakeup(lkp);
465 error = tsleep(lkp, pflags | PINTERLOCKED,
466 lkp->lk_wmesg, timo);
467 if (error) {
468 if ((count & LKC_UPREQ) == 0)
469 undo_upreq(lkp);
470 break;
472 if (extflags & LK_SLEEPFAIL) {
473 if ((count & LKC_UPREQ) == 0)
474 undo_upreq(lkp);
475 error = ENOLCK;
476 break;
480 * Refactor to either LK_EXCLUSIVE or LK_WAITUPGRADE,
481 * depending on whether we were able to acquire the
482 * LKC_UPREQ bit.
484 if (count & LKC_UPREQ)
485 flags = LK_EXCLUSIVE; /* someone else */
486 else
487 flags = LK_WAITUPGRADE; /* we own the bit */
489 indefinite_check(&td->td_indefinite);
490 goto again;
492 case LK_WAITUPGRADE:
494 * We own the LKC_UPREQ bit, wait until we are granted the
495 * exclusive lock (LKC_UPGRANT is set).
497 * IF THE OPERATION FAILS (tsleep error tsleep+LK_SLEEPFAIL),
498 * we have to undo the upgrade request and clean up any lock
499 * that might have been granted via a race.
501 if (count & LKC_UPGRANT) {
502 if (atomic_cmpset_int(&lkp->lk_count, count,
503 count & ~LKC_UPGRANT)) {
504 lkp->lk_lockholder = td;
505 KKASSERT(count & LKC_EXCL);
506 break;
508 /* retry */
509 } else if ((count & LKC_CANCEL) && (extflags & LK_CANCELABLE)) {
510 undo_upreq(lkp);
511 error = ENOLCK;
512 break;
513 } else {
514 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
515 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
516 tsleep_interlock(lkp, pflags);
517 if (atomic_fetchadd_int(&lkp->lk_count, 0) == count) {
518 error = tsleep(lkp, pflags | PINTERLOCKED,
519 lkp->lk_wmesg, timo);
520 if (error) {
521 undo_upreq(lkp);
522 break;
524 if (extflags & LK_SLEEPFAIL) {
525 error = ENOLCK;
526 undo_upreq(lkp);
527 break;
530 /* retry */
532 indefinite_check(&td->td_indefinite);
533 goto again;
535 case LK_RELEASE:
537 * Release the currently held lock. If releasing the current
538 * lock as part of an error return, error will ALREADY be
539 * non-zero.
541 * When releasing the last lock we automatically transition
542 * LKC_UPREQ to LKC_EXCL|1.
544 * WARNING! We cannot detect when there are multiple exclusive
545 * requests pending. We clear EXREQ unconditionally
546 * on the 1->0 transition so it is possible for
547 * shared requests to race the next exclusive
548 * request.
550 * WAERNING! lksleep() assumes that LK_RELEASE does not
551 * block.
553 * Always succeeds.
555 if ((count & LKC_MASK) == 0)
556 panic("lockmgr: LK_RELEASE: no lock held");
558 if (count & LKC_EXCL) {
559 if (lkp->lk_lockholder != LK_KERNTHREAD &&
560 lkp->lk_lockholder != td) {
561 panic("lockmgr: pid %d, not exlusive "
562 "lock holder thr %p/%p unlocking",
563 (td->td_proc ? td->td_proc->p_pid : -1),
564 td, lkp->lk_lockholder);
566 if ((count & (LKC_UPREQ|LKC_MASK)) == 1) {
568 * Last exclusive count is being released
570 otd = lkp->lk_lockholder;
571 lkp->lk_lockholder = NULL;
572 if (!atomic_cmpset_int(&lkp->lk_count, count,
573 (count - 1) &
574 ~(LKC_EXCL | LKC_EXREQ |
575 LKC_SHREQ| LKC_CANCEL))) {
576 lkp->lk_lockholder = otd;
577 goto again;
579 if (count & (LKC_EXREQ|LKC_SHREQ))
580 wakeup(lkp);
581 /* success */
582 } else if ((count & (LKC_UPREQ|LKC_MASK)) ==
583 (LKC_UPREQ | 1)) {
585 * Last exclusive count is being released but
586 * an upgrade request is present, automatically
587 * grant an exclusive state to the owner of
588 * the upgrade request.
590 otd = lkp->lk_lockholder;
591 lkp->lk_lockholder = NULL;
592 if (!atomic_cmpset_int(&lkp->lk_count, count,
593 (count & ~LKC_UPREQ) |
594 LKC_UPGRANT)) {
595 lkp->lk_lockholder = otd;
596 goto again;
598 wakeup(lkp);
599 /* success */
600 } else {
601 otd = lkp->lk_lockholder;
602 if (!atomic_cmpset_int(&lkp->lk_count, count,
603 count - 1)) {
604 goto again;
606 /* success */
608 /* success */
609 if (otd != LK_KERNTHREAD)
610 COUNT(td, -1);
611 } else {
612 if ((count & (LKC_UPREQ|LKC_MASK)) == 1) {
614 * Last shared count is being released,
615 * no upgrade request present.
617 if (!atomic_cmpset_int(&lkp->lk_count, count,
618 (count - 1) &
619 ~(LKC_EXREQ | LKC_SHREQ |
620 LKC_CANCEL))) {
621 goto again;
623 if (count & (LKC_EXREQ|LKC_SHREQ))
624 wakeup(lkp);
625 /* success */
626 } else if ((count & (LKC_UPREQ|LKC_MASK)) ==
627 (LKC_UPREQ | 1)) {
629 * Last shared count is being released but
630 * an upgrade request is present, automatically
631 * grant an exclusive state to the owner of
632 * the upgrade request. Masked count
633 * remains 1.
635 if (!atomic_cmpset_int(&lkp->lk_count, count,
636 (count & ~(LKC_UPREQ |
637 LKC_CANCEL)) |
638 LKC_EXCL | LKC_UPGRANT)) {
639 goto again;
641 wakeup(lkp);
642 } else {
644 * Shared count is greater than 1, just
645 * decrement it by one.
647 if (!atomic_cmpset_int(&lkp->lk_count, count,
648 count - 1)) {
649 goto again;
652 /* success */
653 COUNT(td, -1);
655 break;
657 case LK_CANCEL_BEG:
659 * Start canceling blocked requestors or later requestors.
660 * requestors must use CANCELABLE. Don't waste time issuing
661 * a wakeup if nobody is pending.
663 KKASSERT((count & LKC_CANCEL) == 0); /* disallowed case */
664 KKASSERT((count & LKC_MASK) != 0); /* issue w/lock held */
665 if (!atomic_cmpset_int(&lkp->lk_count,
666 count, count | LKC_CANCEL)) {
667 goto again;
669 if (count & (LKC_EXREQ|LKC_SHREQ|LKC_UPREQ)) {
670 wakeup(lkp);
672 break;
674 case LK_CANCEL_END:
675 atomic_clear_int(&lkp->lk_count, LKC_CANCEL);
676 break;
678 default:
679 panic("lockmgr: unknown locktype request %d",
680 flags & LK_TYPE_MASK);
681 /* NOTREACHED */
684 if (info_init)
685 indefinite_done(&td->td_indefinite);
687 return (error);
691 * Undo an upgrade request
693 static
694 void
695 undo_upreq(struct lock *lkp)
697 int count;
699 for (;;) {
700 count = lkp->lk_count;
701 cpu_ccfence();
702 if (count & LKC_UPGRANT) {
704 * UPREQ was shifted to UPGRANT. We own UPGRANT now,
705 * another thread might own UPREQ. Clear UPGRANT
706 * and release the granted lock.
708 if (atomic_cmpset_int(&lkp->lk_count, count,
709 count & ~LKC_UPGRANT)) {
710 lkp->lk_lockholder = curthread;
711 lockmgr(lkp, LK_RELEASE);
712 break;
714 } else if (count & LKC_EXCL) {
716 * Clear the UPREQ we still own. Nobody to wakeup
717 * here because there is an existing exclusive
718 * holder.
720 KKASSERT(count & LKC_UPREQ);
721 KKASSERT((count & LKC_MASK) > 0);
722 if (atomic_cmpset_int(&lkp->lk_count, count,
723 count & ~LKC_UPREQ)) {
724 wakeup(lkp);
725 break;
727 } else if (count & LKC_EXREQ) {
729 * Clear the UPREQ we still own. We cannot wakeup any
730 * shared waiters because there is an exclusive
731 * request pending.
733 KKASSERT(count & LKC_UPREQ);
734 KKASSERT((count & LKC_MASK) > 0);
735 if (atomic_cmpset_int(&lkp->lk_count, count,
736 count & ~LKC_UPREQ)) {
737 break;
739 } else {
741 * Clear the UPREQ we still own. Wakeup any shared
742 * waiters.
744 KKASSERT(count & LKC_UPREQ);
745 KKASSERT((count & LKC_MASK) > 0);
746 if (atomic_cmpset_int(&lkp->lk_count, count,
747 count &
748 ~(LKC_UPREQ | LKC_SHREQ))) {
749 if (count & LKC_SHREQ)
750 wakeup(lkp);
751 break;
754 /* retry */
758 void
759 lockmgr_kernproc(struct lock *lp)
761 struct thread *td __debugvar = curthread;
763 if (lp->lk_lockholder != LK_KERNTHREAD) {
764 KASSERT(lp->lk_lockholder == td,
765 ("lockmgr_kernproc: lock not owned by curthread %p: %p",
766 td, lp->lk_lockholder));
767 lp->lk_lockholder = LK_KERNTHREAD;
768 COUNT(td, -1);
773 * Initialize a lock; required before use.
775 void
776 lockinit(struct lock *lkp, const char *wmesg, int timo, int flags)
778 lkp->lk_flags = (flags & LK_EXTFLG_MASK);
779 lkp->lk_count = 0;
780 lkp->lk_wmesg = wmesg;
781 lkp->lk_timo = timo;
782 lkp->lk_lockholder = LK_NOTHREAD;
786 * Reinitialize a lock that is being reused for a different purpose, but
787 * which may have pending (blocked) threads sitting on it. The caller
788 * must already hold the interlock.
790 void
791 lockreinit(struct lock *lkp, const char *wmesg, int timo, int flags)
793 lkp->lk_wmesg = wmesg;
794 lkp->lk_timo = timo;
798 * De-initialize a lock. The structure must no longer be used by anyone.
800 void
801 lockuninit(struct lock *lkp)
803 KKASSERT((lkp->lk_count & (LKC_EXREQ|LKC_SHREQ|LKC_UPREQ)) == 0);
807 * Determine the status of a lock.
810 lockstatus(struct lock *lkp, struct thread *td)
812 int lock_type = 0;
813 int count;
815 count = lkp->lk_count;
816 cpu_ccfence();
818 if (count & LKC_EXCL) {
819 if (td == NULL || lkp->lk_lockholder == td)
820 lock_type = LK_EXCLUSIVE;
821 else
822 lock_type = LK_EXCLOTHER;
823 } else if (count & LKC_MASK) {
824 lock_type = LK_SHARED;
826 return (lock_type);
830 * Return non-zero if the caller owns the lock shared or exclusive.
831 * We can only guess re: shared locks.
834 lockowned(struct lock *lkp)
836 thread_t td = curthread;
837 int count;
839 count = lkp->lk_count;
840 cpu_ccfence();
842 if (count & LKC_EXCL)
843 return(lkp->lk_lockholder == td);
844 else
845 return((count & LKC_MASK) != 0);
849 * Determine the number of holders of a lock.
851 * The non-blocking version can usually be used for assertions.
854 lockcount(struct lock *lkp)
856 return(lkp->lk_count & LKC_MASK);
860 lockcountnb(struct lock *lkp)
862 return(lkp->lk_count & LKC_MASK);
866 * Print out information about state of a lock. Used by VOP_PRINT
867 * routines to display status about contained locks.
869 void
870 lockmgr_printinfo(struct lock *lkp)
872 struct thread *td = lkp->lk_lockholder;
873 struct proc *p;
874 int count;
876 count = lkp->lk_count;
877 cpu_ccfence();
879 if (td && td != LK_KERNTHREAD && td != LK_NOTHREAD)
880 p = td->td_proc;
881 else
882 p = NULL;
884 if (count & LKC_EXCL) {
885 kprintf(" lock type %s: EXCLUS (count %08x) by td %p pid %d",
886 lkp->lk_wmesg, count, td,
887 p ? p->p_pid : -99);
888 } else if (count & LKC_MASK) {
889 kprintf(" lock type %s: SHARED (count %08x)",
890 lkp->lk_wmesg, count);
891 } else {
892 kprintf(" lock type %s: NOTHELD", lkp->lk_wmesg);
894 if (count & (LKC_EXREQ|LKC_SHREQ))
895 kprintf(" with waiters\n");
896 else
897 kprintf("\n");
900 void
901 lock_sysinit(struct lock_args *arg)
903 lockinit(arg->la_lock, arg->la_desc, 0, arg->la_flags);
906 #ifdef DEBUG_CANCEL_LOCKS
908 static
910 sysctl_cancel_lock(SYSCTL_HANDLER_ARGS)
912 int error;
914 if (req->newptr) {
915 SYSCTL_XUNLOCK();
916 lockmgr(&cancel_lk, LK_EXCLUSIVE);
917 error = tsleep(&error, PCATCH, "canmas", hz * 5);
918 lockmgr(&cancel_lk, LK_CANCEL_BEG);
919 error = tsleep(&error, PCATCH, "canmas", hz * 5);
920 lockmgr(&cancel_lk, LK_RELEASE);
921 SYSCTL_XLOCK();
922 SYSCTL_OUT(req, &error, sizeof(error));
924 error = 0;
926 return error;
929 static
931 sysctl_cancel_test(SYSCTL_HANDLER_ARGS)
933 int error;
935 if (req->newptr) {
936 error = lockmgr(&cancel_lk, LK_EXCLUSIVE|LK_CANCELABLE);
937 if (error == 0)
938 lockmgr(&cancel_lk, LK_RELEASE);
939 SYSCTL_OUT(req, &error, sizeof(error));
940 kprintf("test %d\n", error);
943 return 0;
946 #endif