drm/i915: Use dev->pdev to get PCI device revisions
[dragonfly.git] / sys / kern / kern_lock.c
bloba8f47e39169f8603e0c98ef81989f540e3c31191
1 /*
2 * Copyright (c) 1995
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (C) 1997
5 * John S. Dyson. All rights reserved.
6 * Copyright (C) 2013-2014
7 * Matthew Dillon, All rights reserved.
9 * This code contains ideas from software contributed to Berkeley by
10 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
11 * System project at Carnegie-Mellon University.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
38 #include "opt_lint.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/proc.h>
44 #include <sys/lock.h>
45 #include <sys/sysctl.h>
46 #include <sys/spinlock.h>
47 #include <sys/thread2.h>
48 #include <sys/spinlock2.h>
50 static void undo_upreq(struct lock *lkp);
52 #ifdef DEBUG_CANCEL_LOCKS
54 static int sysctl_cancel_lock(SYSCTL_HANDLER_ARGS);
55 static int sysctl_cancel_test(SYSCTL_HANDLER_ARGS);
57 static struct lock cancel_lk;
58 LOCK_SYSINIT(cancellk, &cancel_lk, "cancel", 0);
59 SYSCTL_PROC(_kern, OID_AUTO, cancel_lock, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
60 sysctl_cancel_lock, "I", "test cancelable locks");
61 SYSCTL_PROC(_kern, OID_AUTO, cancel_test, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
62 sysctl_cancel_test, "I", "test cancelable locks");
64 #endif
67 * Locking primitives implementation.
68 * Locks provide shared/exclusive sychronization.
71 #ifdef DEBUG_LOCKS
72 #define COUNT(td, x) (td)->td_locks += (x)
73 #else
74 #define COUNT(td, x)
75 #endif
77 #define LOCK_WAIT_TIME 100
78 #define LOCK_SAMPLE_WAIT 7
81 * Set, change, or release a lock.
83 int
84 #ifndef DEBUG_LOCKS
85 lockmgr(struct lock *lkp, u_int flags)
86 #else
87 debuglockmgr(struct lock *lkp, u_int flags,
88 const char *name, const char *file, int line)
89 #endif
91 thread_t td;
92 thread_t otd;
93 int error;
94 int extflags;
95 int count;
96 int pflags;
97 int wflags;
98 int timo;
99 #ifdef DEBUG_LOCKS
100 int i;
101 #endif
103 error = 0;
105 if (mycpu->gd_intr_nesting_level &&
106 (flags & LK_NOWAIT) == 0 &&
107 (flags & LK_TYPE_MASK) != LK_RELEASE &&
108 panic_cpu_gd != mycpu
111 #ifndef DEBUG_LOCKS
112 panic("lockmgr %s from %p: called from interrupt, ipi, "
113 "or hard code section",
114 lkp->lk_wmesg, ((int **)&lkp)[-1]);
115 #else
116 panic("lockmgr %s from %s:%d: called from interrupt, ipi, "
117 "or hard code section",
118 lkp->lk_wmesg, file, line);
119 #endif
122 #ifdef DEBUG_LOCKS
123 if (mycpu->gd_spinlocks && ((flags & LK_NOWAIT) == 0)) {
124 panic("lockmgr %s from %s:%d: called with %d spinlocks held",
125 lkp->lk_wmesg, file, line, mycpu->gd_spinlocks);
127 #endif
129 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
130 td = curthread;
132 again:
133 count = lkp->lk_count;
134 cpu_ccfence();
136 switch (flags & LK_TYPE_MASK) {
137 case LK_SHARED:
139 * Shared lock critical path case
141 if ((count & (LKC_EXREQ|LKC_UPREQ|LKC_EXCL)) == 0) {
142 if (atomic_cmpset_int(&lkp->lk_count,
143 count, count + 1)) {
144 COUNT(td, 1);
145 break;
147 goto again;
151 * If the caller already holds the lock exclusively then
152 * we silently obtain another count on the exclusive lock.
154 * WARNING! The old FreeBSD behavior was to downgrade,
155 * but this creates a problem when recursions
156 * return to the caller and the caller expects
157 * its original exclusive lock to remain exclusively
158 * locked.
160 if (lkp->lk_lockholder == td) {
161 KKASSERT(count & LKC_EXCL);
162 if ((extflags & LK_CANRECURSE) == 0) {
163 if (extflags & LK_NOWAIT) {
164 error = EBUSY;
165 break;
167 panic("lockmgr: locking against myself");
169 atomic_add_int(&lkp->lk_count, 1);
170 COUNT(td, 1);
171 break;
175 * Slow path
177 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
178 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
179 wflags = (td->td_flags & TDF_DEADLKTREAT) ?
180 LKC_EXCL : (LKC_EXCL|LKC_EXREQ|LKC_UPREQ);
183 * Block while the lock is held exclusively or, conditionally,
184 * if other threads are tring to obtain an exclusive lock or
185 * upgrade to one.
187 if (count & wflags) {
188 if (extflags & LK_CANCELABLE) {
189 if (count & LKC_CANCEL) {
190 error = ENOLCK;
191 break;
194 if (extflags & LK_NOWAIT) {
195 error = EBUSY;
196 break;
198 tsleep_interlock(lkp, pflags);
199 if (!atomic_cmpset_int(&lkp->lk_count, count,
200 count | LKC_SHREQ)) {
201 goto again;
204 mycpu->gd_cnt.v_lock_name[0] = 'S';
205 strncpy(mycpu->gd_cnt.v_lock_name + 1,
206 lkp->lk_wmesg,
207 sizeof(mycpu->gd_cnt.v_lock_name) - 2);
208 ++mycpu->gd_cnt.v_lock_colls;
210 error = tsleep(lkp, pflags | PINTERLOCKED,
211 lkp->lk_wmesg, timo);
212 if (error)
213 break;
214 if (extflags & LK_SLEEPFAIL) {
215 error = ENOLCK;
216 break;
218 goto again;
222 * Otherwise we can bump the count
224 if (atomic_cmpset_int(&lkp->lk_count, count, count + 1)) {
225 COUNT(td, 1);
226 break;
228 goto again;
230 case LK_EXCLUSIVE:
232 * Exclusive lock critical path.
234 if (count == 0) {
235 if (atomic_cmpset_int(&lkp->lk_count, count,
236 LKC_EXCL | (count + 1))) {
237 lkp->lk_lockholder = td;
238 COUNT(td, 1);
239 break;
241 goto again;
245 * Recursive lock if we already hold it exclusively.
247 if (lkp->lk_lockholder == td) {
248 KKASSERT(count & LKC_EXCL);
249 if ((extflags & LK_CANRECURSE) == 0) {
250 if (extflags & LK_NOWAIT) {
251 error = EBUSY;
252 break;
254 panic("lockmgr: locking against myself");
256 atomic_add_int(&lkp->lk_count, 1);
257 COUNT(td, 1);
258 break;
262 * We will block, handle LK_NOWAIT
264 if (extflags & LK_NOWAIT) {
265 error = EBUSY;
266 break;
268 if (extflags & LK_CANCELABLE) {
269 if (count & LKC_CANCEL) {
270 error = ENOLCK;
271 break;
276 * Wait until we can obtain the exclusive lock. EXREQ is
277 * automatically cleared when all current holders release
278 * so if we abort the operation we can safely leave it set.
279 * There might be other exclusive requesters.
281 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
282 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
284 tsleep_interlock(lkp, pflags);
285 if (!atomic_cmpset_int(&lkp->lk_count, count,
286 count | LKC_EXREQ)) {
287 goto again;
290 mycpu->gd_cnt.v_lock_name[0] = 'X';
291 strncpy(mycpu->gd_cnt.v_lock_name + 1,
292 lkp->lk_wmesg,
293 sizeof(mycpu->gd_cnt.v_lock_name) - 2);
294 ++mycpu->gd_cnt.v_lock_colls;
296 error = tsleep(lkp, pflags | PINTERLOCKED,
297 lkp->lk_wmesg, timo);
298 if (error)
299 break;
300 if (extflags & LK_SLEEPFAIL) {
301 error = ENOLCK;
302 break;
304 goto again;
306 case LK_DOWNGRADE:
308 * Downgrade an exclusive lock into a shared lock. All
309 * counts on a recursive exclusive lock become shared.
311 * This function always succeeds.
313 if (lkp->lk_lockholder != td ||
314 (count & (LKC_EXCL|LKC_MASK)) != (LKC_EXCL|1)) {
315 panic("lockmgr: not holding exclusive lock");
318 #ifdef DEBUG_LOCKS
319 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
320 if (td->td_lockmgr_stack[i] == lkp &&
321 td->td_lockmgr_stack_id[i] > 0
323 td->td_lockmgr_stack_id[i]--;
324 break;
327 #endif
329 * NOTE! Must NULL-out lockholder before releasing LKC_EXCL.
331 otd = lkp->lk_lockholder;
332 lkp->lk_lockholder = NULL;
333 if (atomic_cmpset_int(&lkp->lk_count, count,
334 count & ~(LKC_EXCL|LKC_SHREQ))) {
335 if (count & LKC_SHREQ)
336 wakeup(lkp);
337 break;
339 lkp->lk_lockholder = otd;
340 goto again;
342 case LK_EXCLUPGRADE:
344 * Upgrade from a single shared lock to an exclusive lock.
346 * If another process is ahead of us to get an upgrade,
347 * then we want to fail rather than have an intervening
348 * exclusive access. The shared lock is released on
349 * failure.
351 if (count & LKC_UPREQ) {
352 flags = LK_RELEASE;
353 error = EBUSY;
354 goto again;
356 /* fall through into normal upgrade */
358 case LK_UPGRADE:
360 * Upgrade a shared lock to an exclusive one. This can cause
361 * the lock to be temporarily released and stolen by other
362 * threads. LK_SLEEPFAIL or LK_NOWAIT may be used to detect
363 * this case, or use LK_EXCLUPGRADE.
365 * If the lock is already exclusively owned by us, this
366 * operation is a NOP.
368 * If we return an error (even NOWAIT), the current lock will
369 * be released.
371 * Start with the critical path.
373 if ((count & (LKC_UPREQ|LKC_EXCL|LKC_MASK)) == 1) {
374 if (atomic_cmpset_int(&lkp->lk_count, count,
375 count | LKC_EXCL)) {
376 lkp->lk_lockholder = td;
377 break;
379 goto again;
383 * If we already hold the lock exclusively this operation
384 * succeeds and is a NOP.
386 if (count & LKC_EXCL) {
387 if (lkp->lk_lockholder == td)
388 break;
389 panic("lockmgr: upgrade unowned lock");
391 if ((count & LKC_MASK) == 0)
392 panic("lockmgr: upgrade unowned lock");
395 * We cannot upgrade without blocking at this point.
397 if (extflags & LK_NOWAIT) {
398 flags = LK_RELEASE;
399 error = EBUSY;
400 goto again;
402 if (extflags & LK_CANCELABLE) {
403 if (count & LKC_CANCEL) {
404 error = ENOLCK;
405 break;
410 * Release the shared lock and request the upgrade.
412 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
413 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
414 tsleep_interlock(lkp, pflags);
415 wflags = (count & LKC_UPREQ) ? LKC_EXREQ : LKC_UPREQ;
418 * If someone else owns UPREQ and this transition would
419 * allow it to be granted, we have to grant it. Otherwise
420 * we release the shared lock.
422 if ((count & (LKC_UPREQ|LKC_MASK)) == (LKC_UPREQ | 1)) {
423 wflags |= LKC_EXCL | LKC_UPGRANT;
424 wflags |= count;
425 wflags &= ~LKC_UPREQ;
426 } else {
427 wflags |= (count - 1);
430 if (atomic_cmpset_int(&lkp->lk_count, count, wflags)) {
431 COUNT(td, -1);
434 * Must wakeup the thread granted the upgrade.
436 if ((count & (LKC_UPREQ|LKC_MASK)) == (LKC_UPREQ | 1))
437 wakeup(lkp);
439 mycpu->gd_cnt.v_lock_name[0] = 'U';
440 strncpy(mycpu->gd_cnt.v_lock_name + 1,
441 lkp->lk_wmesg,
442 sizeof(mycpu->gd_cnt.v_lock_name) - 2);
443 ++mycpu->gd_cnt.v_lock_colls;
445 error = tsleep(lkp, pflags | PINTERLOCKED,
446 lkp->lk_wmesg, timo);
447 if (error)
448 break;
449 if (extflags & LK_SLEEPFAIL) {
450 error = ENOLCK;
451 break;
455 * Refactor to either LK_EXCLUSIVE or LK_WAITUPGRADE,
456 * depending on whether we were able to acquire the
457 * LKC_UPREQ bit.
459 if (count & LKC_UPREQ)
460 flags = LK_EXCLUSIVE; /* someone else */
461 else
462 flags = LK_WAITUPGRADE; /* we own the bit */
464 goto again;
466 case LK_WAITUPGRADE:
468 * We own the LKC_UPREQ bit, wait until we are granted the
469 * exclusive lock (LKC_UPGRANT is set).
471 * IF THE OPERATION FAILS (tsleep error tsleep+LK_SLEEPFAIL),
472 * we have to undo the upgrade request and clean up any lock
473 * that might have been granted via a race.
475 if (count & LKC_UPGRANT) {
476 if (atomic_cmpset_int(&lkp->lk_count, count,
477 count & ~LKC_UPGRANT)) {
478 lkp->lk_lockholder = td;
479 KKASSERT(count & LKC_EXCL);
480 break;
482 /* retry */
483 } else if ((count & LKC_CANCEL) && (extflags & LK_CANCELABLE)) {
484 undo_upreq(lkp);
485 error = ENOLCK;
486 break;
487 } else {
488 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
489 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
490 tsleep_interlock(lkp, pflags);
491 if (atomic_cmpset_int(&lkp->lk_count, count, count)) {
493 mycpu->gd_cnt.v_lock_name[0] = 'U';
494 strncpy(mycpu->gd_cnt.v_lock_name + 1,
495 lkp->lk_wmesg,
496 sizeof(mycpu->gd_cnt.v_lock_name) - 2);
497 ++mycpu->gd_cnt.v_lock_colls;
499 error = tsleep(lkp, pflags | PINTERLOCKED,
500 lkp->lk_wmesg, timo);
501 if (error) {
502 undo_upreq(lkp);
503 break;
505 if (extflags & LK_SLEEPFAIL) {
506 error = ENOLCK;
507 undo_upreq(lkp);
508 break;
511 /* retry */
513 goto again;
515 case LK_RELEASE:
517 * Release the currently held lock. If releasing the current
518 * lock as part of an error return, error will ALREADY be
519 * non-zero.
521 * When releasing the last lock we automatically transition
522 * LKC_UPREQ to LKC_EXCL|1.
524 * WARNING! We cannot detect when there are multiple exclusive
525 * requests pending. We clear EXREQ unconditionally
526 * on the 1->0 transition so it is possible for
527 * shared requests to race the next exclusive
528 * request.
530 * Always succeeds.
532 if ((count & LKC_MASK) == 0)
533 panic("lockmgr: LK_RELEASE: no lock held");
535 if (count & LKC_EXCL) {
536 if (lkp->lk_lockholder != LK_KERNTHREAD &&
537 lkp->lk_lockholder != td) {
538 panic("lockmgr: pid %d, not exlusive "
539 "lock holder thr %p/%p unlocking",
540 (td->td_proc ? td->td_proc->p_pid : -1),
541 td, lkp->lk_lockholder);
543 if ((count & (LKC_UPREQ|LKC_MASK)) == 1) {
545 * Last exclusive count is being released
547 otd = lkp->lk_lockholder;
548 lkp->lk_lockholder = NULL;
549 if (!atomic_cmpset_int(&lkp->lk_count, count,
550 (count - 1) &
551 ~(LKC_EXCL | LKC_EXREQ |
552 LKC_SHREQ| LKC_CANCEL))) {
553 lkp->lk_lockholder = otd;
554 goto again;
556 if (count & (LKC_EXREQ|LKC_SHREQ))
557 wakeup(lkp);
558 /* success */
559 } else if ((count & (LKC_UPREQ|LKC_MASK)) ==
560 (LKC_UPREQ | 1)) {
562 * Last exclusive count is being released but
563 * an upgrade request is present, automatically
564 * grant an exclusive state to the owner of
565 * the upgrade request.
567 otd = lkp->lk_lockholder;
568 lkp->lk_lockholder = NULL;
569 if (!atomic_cmpset_int(&lkp->lk_count, count,
570 (count & ~LKC_UPREQ) |
571 LKC_UPGRANT)) {
572 lkp->lk_lockholder = otd;
574 wakeup(lkp);
575 /* success */
576 } else {
577 otd = lkp->lk_lockholder;
578 if (!atomic_cmpset_int(&lkp->lk_count, count,
579 count - 1)) {
580 goto again;
582 /* success */
584 /* success */
585 if (otd != LK_KERNTHREAD)
586 COUNT(td, -1);
587 } else {
588 if ((count & (LKC_UPREQ|LKC_MASK)) == 1) {
590 * Last shared count is being released.
592 if (!atomic_cmpset_int(&lkp->lk_count, count,
593 (count - 1) &
594 ~(LKC_EXREQ | LKC_SHREQ |
595 LKC_CANCEL))) {
596 goto again;
598 if (count & (LKC_EXREQ|LKC_SHREQ))
599 wakeup(lkp);
600 /* success */
601 } else if ((count & (LKC_UPREQ|LKC_MASK)) ==
602 (LKC_UPREQ | 1)) {
604 * Last shared count is being released but
605 * an upgrade request is present, automatically
606 * grant an exclusive state to the owner of
607 * the upgrade request. Masked count
608 * remains 1.
610 if (!atomic_cmpset_int(&lkp->lk_count, count,
611 (count & ~(LKC_UPREQ |
612 LKC_CANCEL)) |
613 LKC_EXCL | LKC_UPGRANT)) {
614 goto again;
616 wakeup(lkp);
617 } else {
618 if (!atomic_cmpset_int(&lkp->lk_count, count,
619 count - 1)) {
620 goto again;
623 /* success */
624 COUNT(td, -1);
626 break;
628 case LK_CANCEL_BEG:
630 * Start canceling blocked requestors or later requestors.
631 * requestors must use CANCELABLE. Don't waste time issuing
632 * a wakeup if nobody is pending.
634 KKASSERT((count & LKC_CANCEL) == 0); /* disallowed case */
635 KKASSERT((count & LKC_MASK) != 0); /* issue w/lock held */
636 if (!atomic_cmpset_int(&lkp->lk_count,
637 count, count | LKC_CANCEL)) {
638 goto again;
640 if (count & (LKC_EXREQ|LKC_SHREQ|LKC_UPREQ)) {
641 wakeup(lkp);
643 break;
645 case LK_CANCEL_END:
646 atomic_clear_int(&lkp->lk_count, LKC_CANCEL);
647 break;
649 default:
650 panic("lockmgr: unknown locktype request %d",
651 flags & LK_TYPE_MASK);
652 /* NOTREACHED */
654 return (error);
658 * Undo an upgrade request
660 static
661 void
662 undo_upreq(struct lock *lkp)
664 int count;
666 for (;;) {
667 count = lkp->lk_count;
668 cpu_ccfence();
669 if (count & LKC_UPGRANT) {
671 * UPREQ was shifted to UPGRANT. We own UPGRANT now,
672 * another thread might own UPREQ. Clear UPGRANT
673 * and release the granted lock.
675 if (atomic_cmpset_int(&lkp->lk_count, count,
676 count & ~LKC_UPGRANT)) {
677 lockmgr(lkp, LK_RELEASE);
678 break;
680 } else if (count & LKC_EXCL) {
682 * Clear the UPREQ we still own. Nobody to wakeup
683 * here because there is an existing exclusive
684 * holder.
686 KKASSERT(count & LKC_UPREQ);
687 KKASSERT((count & LKC_MASK) > 0);
688 if (atomic_cmpset_int(&lkp->lk_count, count,
689 count & ~LKC_UPREQ)) {
690 wakeup(lkp);
691 break;
693 } else if (count & LKC_EXREQ) {
695 * Clear the UPREQ we still own. We cannot wakeup any
696 * shared waiters because there is an exclusive
697 * request pending.
699 KKASSERT(count & LKC_UPREQ);
700 KKASSERT((count & LKC_MASK) > 0);
701 if (atomic_cmpset_int(&lkp->lk_count, count,
702 count & ~LKC_UPREQ)) {
703 break;
705 } else {
707 * Clear the UPREQ we still own. Wakeup any shared
708 * waiters.
710 KKASSERT(count & LKC_UPREQ);
711 KKASSERT((count & LKC_MASK) > 0);
712 if (atomic_cmpset_int(&lkp->lk_count, count,
713 count &
714 ~(LKC_UPREQ | LKC_SHREQ))) {
715 if (count & LKC_SHREQ)
716 wakeup(lkp);
717 break;
720 /* retry */
724 void
725 lockmgr_kernproc(struct lock *lp)
727 struct thread *td __debugvar = curthread;
729 if (lp->lk_lockholder != LK_KERNTHREAD) {
730 KASSERT(lp->lk_lockholder == td,
731 ("lockmgr_kernproc: lock not owned by curthread %p", td));
732 lp->lk_lockholder = LK_KERNTHREAD;
733 COUNT(td, -1);
738 * Initialize a lock; required before use.
740 void
741 lockinit(struct lock *lkp, const char *wmesg, int timo, int flags)
743 lkp->lk_flags = (flags & LK_EXTFLG_MASK);
744 lkp->lk_count = 0;
745 lkp->lk_wmesg = wmesg;
746 lkp->lk_timo = timo;
747 lkp->lk_lockholder = LK_NOTHREAD;
751 * Reinitialize a lock that is being reused for a different purpose, but
752 * which may have pending (blocked) threads sitting on it. The caller
753 * must already hold the interlock.
755 void
756 lockreinit(struct lock *lkp, const char *wmesg, int timo, int flags)
758 lkp->lk_wmesg = wmesg;
759 lkp->lk_timo = timo;
763 * De-initialize a lock. The structure must no longer be used by anyone.
765 void
766 lockuninit(struct lock *lkp)
768 KKASSERT((lkp->lk_count & (LKC_EXREQ|LKC_SHREQ|LKC_UPREQ)) == 0);
772 * Determine the status of a lock.
775 lockstatus(struct lock *lkp, struct thread *td)
777 int lock_type = 0;
778 int count;
780 count = lkp->lk_count;
781 cpu_ccfence();
783 if (count & LKC_EXCL) {
784 if (td == NULL || lkp->lk_lockholder == td)
785 lock_type = LK_EXCLUSIVE;
786 else
787 lock_type = LK_EXCLOTHER;
788 } else if (count & LKC_MASK) {
789 lock_type = LK_SHARED;
791 return (lock_type);
795 * Return non-zero if the caller owns the lock shared or exclusive.
796 * We can only guess re: shared locks.
799 lockowned(struct lock *lkp)
801 thread_t td = curthread;
802 int count;
804 count = lkp->lk_count;
805 cpu_ccfence();
807 if (count & LKC_EXCL)
808 return(lkp->lk_lockholder == td);
809 else
810 return((count & LKC_MASK) != 0);
814 * Determine the number of holders of a lock.
816 * The non-blocking version can usually be used for assertions.
819 lockcount(struct lock *lkp)
821 return(lkp->lk_count & LKC_MASK);
825 lockcountnb(struct lock *lkp)
827 return(lkp->lk_count & LKC_MASK);
831 * Print out information about state of a lock. Used by VOP_PRINT
832 * routines to display status about contained locks.
834 void
835 lockmgr_printinfo(struct lock *lkp)
837 struct thread *td = lkp->lk_lockholder;
838 struct proc *p;
839 int count;
841 count = lkp->lk_count;
842 cpu_ccfence();
844 if (td && td != LK_KERNTHREAD && td != LK_NOTHREAD)
845 p = td->td_proc;
846 else
847 p = NULL;
849 if (count & LKC_EXCL) {
850 kprintf(" lock type %s: EXCLUS (count %08x) by td %p pid %d",
851 lkp->lk_wmesg, count, td,
852 p ? p->p_pid : -99);
853 } else if (count & LKC_MASK) {
854 kprintf(" lock type %s: SHARED (count %08x)",
855 lkp->lk_wmesg, count);
856 } else {
857 kprintf(" lock type %s: NOTHELD", lkp->lk_wmesg);
859 if (count & (LKC_EXREQ|LKC_SHREQ))
860 kprintf(" with waiters\n");
861 else
862 kprintf("\n");
865 void
866 lock_sysinit(struct lock_args *arg)
868 lockinit(arg->la_lock, arg->la_desc, 0, arg->la_flags);
871 #ifdef DEBUG_CANCEL_LOCKS
873 static
875 sysctl_cancel_lock(SYSCTL_HANDLER_ARGS)
877 int error;
879 if (req->newptr) {
880 SYSCTL_XUNLOCK();
881 lockmgr(&cancel_lk, LK_EXCLUSIVE);
882 kprintf("x");
883 error = tsleep(&error, PCATCH, "canmas", hz * 5);
884 lockmgr(&cancel_lk, LK_CANCEL_BEG);
885 kprintf("y");
886 error = tsleep(&error, PCATCH, "canmas", hz * 5);
887 kprintf("z");
888 lockmgr(&cancel_lk, LK_RELEASE);
889 SYSCTL_XLOCK();
890 SYSCTL_OUT(req, &error, sizeof(error));
892 error = 0;
894 return error;
897 static
899 sysctl_cancel_test(SYSCTL_HANDLER_ARGS)
901 int error;
903 if (req->newptr) {
904 error = lockmgr(&cancel_lk, LK_EXCLUSIVE|LK_CANCELABLE);
905 if (error == 0)
906 lockmgr(&cancel_lk, LK_RELEASE);
907 SYSCTL_OUT(req, &error, sizeof(error));
908 kprintf("test %d\n", error);
911 return 0;
914 #endif