tools - Fix backup file permissions for hammer-backup.sh
[dragonfly.git] / sys / kern / kern_lock.c
blob75e7db0f257e6ab65094a05967b7d43f13aa09d3
1 /*
2 * Copyright (c) 1995
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (C) 1997
5 * John S. Dyson. All rights reserved.
6 * Copyright (C) 2013
7 * Matthew Dillon, All rights reserved.
9 * This code contains ideas from software contributed to Berkeley by
10 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
11 * System project at Carnegie-Mellon University.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
38 #include "opt_lint.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/proc.h>
44 #include <sys/lock.h>
45 #include <sys/sysctl.h>
46 #include <sys/spinlock.h>
47 #include <sys/thread2.h>
48 #include <sys/spinlock2.h>
50 static void undo_upreq(struct lock *lkp);
53 * Locking primitives implementation.
54 * Locks provide shared/exclusive sychronization.
57 #ifdef DEBUG_LOCKS
58 #define COUNT(td, x) (td)->td_locks += (x)
59 #else
60 #define COUNT(td, x)
61 #endif
63 #define LOCK_WAIT_TIME 100
64 #define LOCK_SAMPLE_WAIT 7
67 * Set, change, or release a lock.
70 int
71 #ifndef DEBUG_LOCKS
72 lockmgr(struct lock *lkp, u_int flags)
73 #else
74 debuglockmgr(struct lock *lkp, u_int flags,
75 const char *name, const char *file, int line)
76 #endif
78 thread_t td;
79 thread_t otd;
80 int error;
81 int extflags;
82 int count;
83 int pflags;
84 int wflags;
85 int timo;
86 #ifdef DEBUG_LOCKS
87 int i;
88 #endif
90 error = 0;
92 if (mycpu->gd_intr_nesting_level &&
93 (flags & LK_NOWAIT) == 0 &&
94 (flags & LK_TYPE_MASK) != LK_RELEASE &&
95 panic_cpu_gd != mycpu
96 ) {
98 #ifndef DEBUG_LOCKS
99 panic("lockmgr %s from %p: called from interrupt, ipi, "
100 "or hard code section",
101 lkp->lk_wmesg, ((int **)&lkp)[-1]);
102 #else
103 panic("lockmgr %s from %s:%d: called from interrupt, ipi, "
104 "or hard code section",
105 lkp->lk_wmesg, file, line);
106 #endif
109 #ifdef DEBUG_LOCKS
110 if (mycpu->gd_spinlocks && ((flags & LK_NOWAIT) == 0)) {
111 panic("lockmgr %s from %s:%d: called with %d spinlocks held",
112 lkp->lk_wmesg, file, line, mycpu->gd_spinlocks);
114 #endif
116 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
117 td = curthread;
119 again:
120 count = lkp->lk_count;
121 cpu_ccfence();
123 switch (flags & LK_TYPE_MASK) {
124 case LK_SHARED:
126 * Shared lock critical path case
128 if ((count & (LKC_EXREQ|LKC_UPREQ|LKC_EXCL)) == 0) {
129 if (atomic_cmpset_int(&lkp->lk_count,
130 count, count + 1)) {
131 COUNT(td, 1);
132 break;
134 goto again;
138 * If the caller already holds the lock exclusively then
139 * we silently obtain another count on the exclusive lock.
141 * WARNING! The old FreeBSD behavior was to downgrade,
142 * but this creates a problem when recursions
143 * return to the caller and the caller expects
144 * its original exclusive lock to remain exclusively
145 * locked.
147 if (lkp->lk_lockholder == td) {
148 KKASSERT(count & LKC_EXCL);
149 if ((extflags & LK_CANRECURSE) == 0) {
150 if (extflags & LK_NOWAIT) {
151 error = EBUSY;
152 break;
154 panic("lockmgr: locking against myself");
156 atomic_add_int(&lkp->lk_count, 1);
157 COUNT(td, 1);
158 break;
162 * Slow path
164 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
165 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
166 wflags = (td->td_flags & TDF_DEADLKTREAT) ?
167 LKC_EXCL : (LKC_EXCL|LKC_EXREQ|LKC_UPREQ);
170 * Block while the lock is held exclusively or, conditionally,
171 * if other threads are tring to obtain an exclusive lock or
172 * upgrade to one.
174 if (count & wflags) {
175 if (extflags & LK_NOWAIT) {
176 error = EBUSY;
177 break;
179 tsleep_interlock(lkp, pflags);
180 if (!atomic_cmpset_int(&lkp->lk_count, count,
181 count | LKC_SHREQ)) {
182 goto again;
185 mycpu->gd_cnt.v_lock_name[0] = 'S';
186 strncpy(mycpu->gd_cnt.v_lock_name + 1,
187 lkp->lk_wmesg,
188 sizeof(mycpu->gd_cnt.v_lock_name) - 2);
189 ++mycpu->gd_cnt.v_lock_colls;
191 error = tsleep(lkp, pflags | PINTERLOCKED,
192 lkp->lk_wmesg, timo);
193 if (error)
194 break;
195 if (extflags & LK_SLEEPFAIL) {
196 error = ENOLCK;
197 break;
199 goto again;
203 * Otherwise we can bump the count
205 if (atomic_cmpset_int(&lkp->lk_count, count, count + 1)) {
206 COUNT(td, 1);
207 break;
209 goto again;
211 case LK_EXCLUSIVE:
213 * Exclusive lock critical path.
215 if (count == 0) {
216 if (atomic_cmpset_int(&lkp->lk_count, count,
217 LKC_EXCL | (count + 1))) {
218 lkp->lk_lockholder = td;
219 COUNT(td, 1);
220 break;
222 goto again;
226 * Recursive lock if we already hold it exclusively.
228 if (lkp->lk_lockholder == td) {
229 KKASSERT(count & LKC_EXCL);
230 if ((extflags & LK_CANRECURSE) == 0) {
231 if (extflags & LK_NOWAIT) {
232 error = EBUSY;
233 break;
235 panic("lockmgr: locking against myself");
237 atomic_add_int(&lkp->lk_count, 1);
238 COUNT(td, 1);
239 break;
243 * We will block, handle LK_NOWAIT
245 if (extflags & LK_NOWAIT) {
246 error = EBUSY;
247 break;
251 * Wait until we can obtain the exclusive lock. EXREQ is
252 * automatically cleared when all current holders release
253 * so if we abort the operation we can safely leave it set.
254 * There might be other exclusive requesters.
256 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
257 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
259 tsleep_interlock(lkp, pflags);
260 if (!atomic_cmpset_int(&lkp->lk_count, count,
261 count | LKC_EXREQ)) {
262 goto again;
265 mycpu->gd_cnt.v_lock_name[0] = 'X';
266 strncpy(mycpu->gd_cnt.v_lock_name + 1,
267 lkp->lk_wmesg,
268 sizeof(mycpu->gd_cnt.v_lock_name) - 2);
269 ++mycpu->gd_cnt.v_lock_colls;
271 error = tsleep(lkp, pflags | PINTERLOCKED,
272 lkp->lk_wmesg, timo);
273 if (error)
274 break;
275 if (extflags & LK_SLEEPFAIL) {
276 error = ENOLCK;
277 break;
279 goto again;
281 case LK_DOWNGRADE:
283 * Downgrade an exclusive lock into a shared lock. All
284 * counts on a recursive exclusive lock become shared.
286 * This function always succeeds.
288 if (lkp->lk_lockholder != td ||
289 (count & (LKC_EXCL|LKC_MASK)) != (LKC_EXCL|1)) {
290 panic("lockmgr: not holding exclusive lock");
293 #ifdef DEBUG_LOCKS
294 for (i = 0; i < LOCKMGR_DEBUG_ARRAY_SIZE; i++) {
295 if (td->td_lockmgr_stack[i] == lkp &&
296 td->td_lockmgr_stack_id[i] > 0
298 td->td_lockmgr_stack_id[i]--;
299 break;
302 #endif
304 * NOTE! Must NULL-out lockholder before releasing LKC_EXCL.
306 otd = lkp->lk_lockholder;
307 lkp->lk_lockholder = NULL;
308 if (atomic_cmpset_int(&lkp->lk_count, count,
309 count & ~(LKC_EXCL|LKC_SHREQ))) {
310 if (count & LKC_SHREQ)
311 wakeup(lkp);
312 break;
314 lkp->lk_lockholder = otd;
315 goto again;
317 case LK_EXCLUPGRADE:
319 * Upgrade from a single shared lock to an exclusive lock.
321 * If another process is ahead of us to get an upgrade,
322 * then we want to fail rather than have an intervening
323 * exclusive access. The shared lock is released on
324 * failure.
326 if (count & LKC_UPREQ) {
327 flags = LK_RELEASE;
328 error = EBUSY;
329 goto again;
331 /* fall through into normal upgrade */
333 case LK_UPGRADE:
335 * Upgrade a shared lock to an exclusive one. This can cause
336 * the lock to be temporarily released and stolen by other
337 * threads. LK_SLEEPFAIL or LK_NOWAIT may be used to detect
338 * this case, or use LK_EXCLUPGRADE.
340 * If the lock is already exclusively owned by us, this
341 * operation is a NOP.
343 * If we return an error (even NOWAIT), the current lock will
344 * be released.
346 * Start with the critical path.
348 if ((count & (LKC_UPREQ|LKC_EXCL|LKC_MASK)) == 1) {
349 if (atomic_cmpset_int(&lkp->lk_count, count,
350 count | LKC_EXCL)) {
351 lkp->lk_lockholder = td;
352 break;
354 goto again;
358 * If we already hold the lock exclusively this operation
359 * succeeds and is a NOP.
361 if (count & LKC_EXCL) {
362 if (lkp->lk_lockholder == td)
363 break;
364 panic("lockmgr: upgrade unowned lock");
366 if ((count & LKC_MASK) == 0)
367 panic("lockmgr: upgrade unowned lock");
370 * We cannot upgrade without blocking at this point.
372 if (extflags & LK_NOWAIT) {
373 flags = LK_RELEASE;
374 error = EBUSY;
375 goto again;
379 * Release the shared lock and request the upgrade.
381 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
382 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
383 tsleep_interlock(lkp, pflags);
384 wflags = (count & LKC_UPREQ) ? LKC_EXREQ : LKC_UPREQ;
387 * If someone else owns UPREQ and this transition would
388 * allow it to be granted, we have to grant it. Otherwise
389 * we release the shared lock.
391 if ((count & (LKC_UPREQ|LKC_MASK)) == (LKC_UPREQ | 1)) {
392 wflags |= LKC_EXCL | LKC_UPGRANT;
393 wflags |= count;
394 wflags &= ~LKC_UPREQ;
395 } else {
396 wflags |= (count - 1);
399 if (atomic_cmpset_int(&lkp->lk_count, count, wflags)) {
400 COUNT(td, -1);
403 * Must wakeup the thread granted the upgrade.
405 if ((count & (LKC_UPREQ|LKC_MASK)) == (LKC_UPREQ | 1))
406 wakeup(lkp);
408 mycpu->gd_cnt.v_lock_name[0] = 'U';
409 strncpy(mycpu->gd_cnt.v_lock_name + 1,
410 lkp->lk_wmesg,
411 sizeof(mycpu->gd_cnt.v_lock_name) - 2);
412 ++mycpu->gd_cnt.v_lock_colls;
414 error = tsleep(lkp, pflags | PINTERLOCKED,
415 lkp->lk_wmesg, timo);
416 if (error)
417 break;
418 if (extflags & LK_SLEEPFAIL) {
419 error = ENOLCK;
420 break;
424 * Refactor to either LK_EXCLUSIVE or LK_WAITUPGRADE,
425 * depending on whether we were able to acquire the
426 * LKC_UPREQ bit.
428 if (count & LKC_UPREQ)
429 flags = LK_EXCLUSIVE; /* someone else */
430 else
431 flags = LK_WAITUPGRADE; /* we own the bit */
433 goto again;
435 case LK_WAITUPGRADE:
437 * We own the LKC_UPREQ bit, wait until we are granted the
438 * exclusive lock (LKC_UPGRANT is set).
440 * IF THE OPERATION FAILS (tsleep error tsleep+LK_SLEEPFAIL),
441 * we have to undo the upgrade request and clean up any lock
442 * that might have been granted via a race.
444 if (count & LKC_UPGRANT) {
445 if (atomic_cmpset_int(&lkp->lk_count, count,
446 count & ~LKC_UPGRANT)) {
447 lkp->lk_lockholder = td;
448 KKASSERT(count & LKC_EXCL);
449 break;
451 /* retry */
452 } else {
453 pflags = (extflags & LK_PCATCH) ? PCATCH : 0;
454 timo = (extflags & LK_TIMELOCK) ? lkp->lk_timo : 0;
455 tsleep_interlock(lkp, pflags);
456 if (atomic_cmpset_int(&lkp->lk_count, count, count)) {
458 mycpu->gd_cnt.v_lock_name[0] = 'U';
459 strncpy(mycpu->gd_cnt.v_lock_name + 1,
460 lkp->lk_wmesg,
461 sizeof(mycpu->gd_cnt.v_lock_name) - 2);
462 ++mycpu->gd_cnt.v_lock_colls;
464 error = tsleep(lkp, pflags | PINTERLOCKED,
465 lkp->lk_wmesg, timo);
466 if (error) {
467 undo_upreq(lkp);
468 break;
470 if (extflags & LK_SLEEPFAIL) {
471 error = ENOLCK;
472 undo_upreq(lkp);
473 break;
476 /* retry */
478 goto again;
480 case LK_RELEASE:
482 * Release the currently held lock. If releasing the current
483 * lock as part of an error return, error will ALREADY be
484 * non-zero.
486 * When releasing the last lock we automatically transition
487 * LKC_UPREQ to LKC_EXCL|1.
489 * WARNING! We cannot detect when there are multiple exclusive
490 * requests pending. We clear EXREQ unconditionally
491 * on the 1->0 transition so it is possible for
492 * shared requests to race the next exclusive
493 * request.
495 * Always succeeds.
497 if ((count & LKC_MASK) == 0)
498 panic("lockmgr: LK_RELEASE: no lock held");
500 if (count & LKC_EXCL) {
501 if (lkp->lk_lockholder != LK_KERNTHREAD &&
502 lkp->lk_lockholder != td) {
503 panic("lockmgr: pid %d, not exlusive "
504 "lock holder thr %p/%p unlocking",
505 (td->td_proc ? td->td_proc->p_pid : -1),
506 td, lkp->lk_lockholder);
508 if ((count & (LKC_UPREQ|LKC_MASK)) == 1) {
510 * Last exclusive count is being released
512 otd = lkp->lk_lockholder;
513 lkp->lk_lockholder = NULL;
514 if (!atomic_cmpset_int(&lkp->lk_count, count,
515 (count - 1) &
516 ~(LKC_EXCL|LKC_EXREQ|LKC_SHREQ))) {
517 lkp->lk_lockholder = otd;
518 goto again;
520 if (count & (LKC_EXREQ|LKC_SHREQ))
521 wakeup(lkp);
522 /* success */
523 } else if ((count & (LKC_UPREQ|LKC_MASK)) ==
524 (LKC_UPREQ | 1)) {
526 * Last exclusive count is being released but
527 * an upgrade request is present, automatically
528 * grant an exclusive state to the owner of
529 * the upgrade request.
531 otd = lkp->lk_lockholder;
532 lkp->lk_lockholder = NULL;
533 if (!atomic_cmpset_int(&lkp->lk_count, count,
534 (count & ~LKC_UPREQ) |
535 LKC_UPGRANT)) {
536 lkp->lk_lockholder = otd;
538 wakeup(lkp);
539 /* success */
540 } else {
541 otd = lkp->lk_lockholder;
542 if (!atomic_cmpset_int(&lkp->lk_count, count,
543 count - 1)) {
544 goto again;
546 /* success */
548 /* success */
549 if (otd != LK_KERNTHREAD)
550 COUNT(td, -1);
551 } else {
552 if ((count & (LKC_UPREQ|LKC_MASK)) == 1) {
554 * Last shared count is being released.
556 if (!atomic_cmpset_int(&lkp->lk_count, count,
557 (count - 1) &
558 ~(LKC_EXREQ|LKC_SHREQ))) {
559 goto again;
561 if (count & (LKC_EXREQ|LKC_SHREQ))
562 wakeup(lkp);
563 /* success */
564 } else if ((count & (LKC_UPREQ|LKC_MASK)) ==
565 (LKC_UPREQ | 1)) {
567 * Last shared count is being released but
568 * an upgrade request is present, automatically
569 * grant an exclusive state to the owner of
570 * the upgrade request.
572 if (!atomic_cmpset_int(&lkp->lk_count, count,
573 (count & ~LKC_UPREQ) |
574 LKC_EXCL | LKC_UPGRANT)) {
575 goto again;
577 wakeup(lkp);
578 } else {
579 if (!atomic_cmpset_int(&lkp->lk_count, count,
580 count - 1)) {
581 goto again;
584 /* success */
585 COUNT(td, -1);
587 break;
589 default:
590 panic("lockmgr: unknown locktype request %d",
591 flags & LK_TYPE_MASK);
592 /* NOTREACHED */
594 return (error);
598 * Undo an upgrade request
600 static
601 void
602 undo_upreq(struct lock *lkp)
604 int count;
606 for (;;) {
607 count = lkp->lk_count;
608 cpu_ccfence();
609 if (count & LKC_UPGRANT) {
611 * UPREQ was shifted to UPGRANT. We own UPGRANT now,
612 * another thread might own UPREQ. Clear UPGRANT
613 * and release the granted lock.
615 if (atomic_cmpset_int(&lkp->lk_count, count,
616 count & ~LKC_UPGRANT)) {
617 lockmgr(lkp, LK_RELEASE);
618 break;
620 } else if (count & LKC_EXCL) {
622 * Clear the UPREQ we still own. Nobody to wakeup
623 * here because there is an existing exclusive
624 * holder.
626 KKASSERT(count & LKC_UPREQ);
627 KKASSERT((count & LKC_MASK) > 0);
628 if (atomic_cmpset_int(&lkp->lk_count, count,
629 count & ~LKC_UPREQ)) {
630 wakeup(lkp);
631 break;
633 } else if (count & LKC_EXREQ) {
635 * Clear the UPREQ we still own. We cannot wakeup any
636 * shared waiters because there is an exclusive
637 * request pending.
639 KKASSERT(count & LKC_UPREQ);
640 KKASSERT((count & LKC_MASK) > 0);
641 if (atomic_cmpset_int(&lkp->lk_count, count,
642 count & ~LKC_UPREQ)) {
643 break;
645 } else {
647 * Clear the UPREQ we still own. Wakeup any shared
648 * waiters.
650 KKASSERT(count & LKC_UPREQ);
651 KKASSERT((count & LKC_MASK) > 0);
652 if (atomic_cmpset_int(&lkp->lk_count, count,
653 count &
654 ~(LKC_UPREQ | LKC_SHREQ))) {
655 if (count & LKC_SHREQ)
656 wakeup(lkp);
657 break;
660 /* retry */
664 void
665 lockmgr_kernproc(struct lock *lp)
667 struct thread *td __debugvar = curthread;
669 if (lp->lk_lockholder != LK_KERNTHREAD) {
670 KASSERT(lp->lk_lockholder == td,
671 ("lockmgr_kernproc: lock not owned by curthread %p", td));
672 lp->lk_lockholder = LK_KERNTHREAD;
673 COUNT(td, -1);
678 * Initialize a lock; required before use.
680 void
681 lockinit(struct lock *lkp, const char *wmesg, int timo, int flags)
683 lkp->lk_flags = (flags & LK_EXTFLG_MASK);
684 lkp->lk_count = 0;
685 lkp->lk_wmesg = wmesg;
686 lkp->lk_timo = timo;
687 lkp->lk_lockholder = LK_NOTHREAD;
691 * Reinitialize a lock that is being reused for a different purpose, but
692 * which may have pending (blocked) threads sitting on it. The caller
693 * must already hold the interlock.
695 void
696 lockreinit(struct lock *lkp, const char *wmesg, int timo, int flags)
698 lkp->lk_wmesg = wmesg;
699 lkp->lk_timo = timo;
703 * De-initialize a lock. The structure must no longer be used by anyone.
705 void
706 lockuninit(struct lock *lkp)
708 KKASSERT((lkp->lk_count & (LKC_EXREQ|LKC_SHREQ|LKC_UPREQ)) == 0);
712 * Determine the status of a lock.
715 lockstatus(struct lock *lkp, struct thread *td)
717 int lock_type = 0;
718 int count;
720 count = lkp->lk_count;
721 cpu_ccfence();
723 if (count & LKC_EXCL) {
724 if (td == NULL || lkp->lk_lockholder == td)
725 lock_type = LK_EXCLUSIVE;
726 else
727 lock_type = LK_EXCLOTHER;
728 } else if (count & LKC_MASK) {
729 lock_type = LK_SHARED;
731 return (lock_type);
735 * Return non-zero if the caller owns the lock shared or exclusive.
736 * We can only guess re: shared locks.
739 lockowned(struct lock *lkp)
741 thread_t td = curthread;
742 int count;
744 count = lkp->lk_count;
745 cpu_ccfence();
747 if (count & LKC_EXCL)
748 return(lkp->lk_lockholder == td);
749 else
750 return((count & LKC_MASK) != 0);
754 * Determine the number of holders of a lock.
756 * The non-blocking version can usually be used for assertions.
759 lockcount(struct lock *lkp)
761 return(lkp->lk_count & LKC_MASK);
765 lockcountnb(struct lock *lkp)
767 return(lkp->lk_count & LKC_MASK);
771 * Print out information about state of a lock. Used by VOP_PRINT
772 * routines to display status about contained locks.
774 void
775 lockmgr_printinfo(struct lock *lkp)
777 struct thread *td = lkp->lk_lockholder;
778 struct proc *p;
779 int count;
781 count = lkp->lk_count;
782 cpu_ccfence();
784 if (td && td != LK_KERNTHREAD && td != LK_NOTHREAD)
785 p = td->td_proc;
786 else
787 p = NULL;
789 if (count & LKC_EXCL) {
790 kprintf(" lock type %s: EXCLUS (count %08x) by td %p pid %d",
791 lkp->lk_wmesg, count, td,
792 p ? p->p_pid : -99);
793 } else if (count & LKC_MASK) {
794 kprintf(" lock type %s: SHARED (count %08x)",
795 lkp->lk_wmesg, count);
796 } else {
797 kprintf(" lock type %s: NOTHELD", lkp->lk_wmesg);
799 if (count & (LKC_EXREQ|LKC_SHREQ))
800 kprintf(" with waiters\n");
801 else
802 kprintf("\n");
805 void
806 lock_sysinit(struct lock_args *arg)
808 lockinit(arg->la_lock, arg->la_desc, 0, arg->la_flags);