4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright 2015, Joyent, Inc.
29 #include "thr_uberdata.h"
30 #include <sys/rtpriocntl.h>
34 #if defined(THREAD_DEBUG)
35 #define INCR32(x) (((x) != UINT32_MAX)? (x)++ : 0)
36 #define INCR(x) ((x)++)
37 #define DECR(x) ((x)--)
38 #define MAXINCR(m, x) ((m < ++x)? (m = x) : 0)
47 * This mutex is initialized to be held by lwp#1.
48 * It is used to block a thread that has returned from a mutex_lock()
49 * of a LOCK_PRIO_INHERIT mutex with an unrecoverable error.
51 mutex_t stall_mutex
= DEFAULTMUTEX
;
53 static int shared_mutex_held(mutex_t
*);
54 static int mutex_queuelock_adaptive(mutex_t
*);
55 static void mutex_wakeup_all(mutex_t
*);
58 * Lock statistics support functions.
61 record_begin_hold(tdb_mutex_stats_t
*msp
)
63 tdb_incr(msp
->mutex_lock
);
64 msp
->mutex_begin_hold
= gethrtime();
68 record_hold_time(tdb_mutex_stats_t
*msp
)
70 hrtime_t now
= gethrtime();
72 if (msp
->mutex_begin_hold
)
73 msp
->mutex_hold_time
+= now
- msp
->mutex_begin_hold
;
74 msp
->mutex_begin_hold
= 0;
79 * Called once at library initialization.
84 if (set_lock_byte(&stall_mutex
.mutex_lockw
))
85 thr_panic("mutex_setup() cannot acquire stall_mutex");
86 stall_mutex
.mutex_owner
= (uintptr_t)curthread
;
90 * The default spin count of 1000 is experimentally determined.
91 * On sun4u machines with any number of processors it could be raised
92 * to 10,000 but that (experimentally) makes almost no difference.
93 * The environment variable:
94 * _THREAD_ADAPTIVE_SPIN=count
95 * can be used to override and set the count in the range [0 .. 1,000,000].
97 int thread_adaptive_spin
= 1000;
98 uint_t thread_max_spinners
= 100;
99 int thread_queue_verify
= 0;
103 * Distinguish spinning for queue locks from spinning for regular locks.
104 * We try harder to acquire queue locks by spinning.
105 * The environment variable:
106 * _THREAD_QUEUE_SPIN=count
107 * can be used to override and set the count in the range [0 .. 1,000,000].
109 int thread_queue_spin
= 10000;
111 #define ALL_ATTRIBUTES \
112 (LOCK_RECURSIVE | LOCK_ERRORCHECK | \
113 LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT | \
117 * 'type' can be one of USYNC_THREAD, USYNC_PROCESS, or USYNC_PROCESS_ROBUST,
118 * augmented by zero or more the flags:
125 #pragma weak _mutex_init = mutex_init
128 mutex_init(mutex_t
*mp
, int type
, void *arg
)
130 int basetype
= (type
& ~ALL_ATTRIBUTES
);
131 const pcclass_t
*pccp
;
135 if (basetype
== USYNC_PROCESS_ROBUST
) {
137 * USYNC_PROCESS_ROBUST is a deprecated historical type.
138 * We change it into (USYNC_PROCESS | LOCK_ROBUST) but
139 * retain the USYNC_PROCESS_ROBUST flag so we can return
140 * ELOCKUNMAPPED when necessary (only USYNC_PROCESS_ROBUST
141 * mutexes will ever draw ELOCKUNMAPPED).
143 type
|= (USYNC_PROCESS
| LOCK_ROBUST
);
144 basetype
= USYNC_PROCESS
;
147 if (type
& LOCK_PRIO_PROTECT
)
148 pccp
= get_info_by_policy(SCHED_FIFO
);
149 if ((basetype
!= USYNC_THREAD
&& basetype
!= USYNC_PROCESS
) ||
150 (type
& (LOCK_PRIO_INHERIT
| LOCK_PRIO_PROTECT
))
151 == (LOCK_PRIO_INHERIT
| LOCK_PRIO_PROTECT
) ||
152 ((type
& LOCK_PRIO_PROTECT
) &&
153 ((ceil
= *(int *)arg
) < pccp
->pcc_primin
||
154 ceil
> pccp
->pcc_primax
))) {
156 } else if (type
& LOCK_ROBUST
) {
158 * Callers of mutex_init() with the LOCK_ROBUST attribute
159 * are required to pass an initially all-zero mutex.
160 * Multiple calls to mutex_init() are allowed; all but
161 * the first return EBUSY. A call to mutex_init() is
162 * allowed to make an inconsistent robust lock consistent
163 * (for historical usage, even though the proper interface
164 * for this is mutex_consistent()). Note that we use
165 * atomic_or_16() to set the LOCK_INITED flag so as
166 * not to disturb surrounding bits (LOCK_OWNERDEAD, etc).
168 if (!(mp
->mutex_flag
& LOCK_INITED
)) {
169 mp
->mutex_type
= (uint8_t)type
;
170 atomic_or_16(&mp
->mutex_flag
, LOCK_INITED
);
171 mp
->mutex_magic
= MUTEX_MAGIC
;
172 } else if (type
!= mp
->mutex_type
||
173 ((type
& LOCK_PRIO_PROTECT
) && mp
->mutex_ceiling
!= ceil
)) {
175 } else if (mutex_consistent(mp
) != 0) {
178 /* register a process robust mutex with the kernel */
179 if (basetype
== USYNC_PROCESS
)
182 (void) memset(mp
, 0, sizeof (*mp
));
183 mp
->mutex_type
= (uint8_t)type
;
184 mp
->mutex_flag
= LOCK_INITED
;
185 mp
->mutex_magic
= MUTEX_MAGIC
;
188 if (error
== 0 && (type
& LOCK_PRIO_PROTECT
)) {
189 mp
->mutex_ceiling
= ceil
;
193 * This should be at the beginning of the function,
194 * but for the sake of old broken applications that
195 * do not have proper alignment for their mutexes
196 * (and don't check the return code from mutex_init),
197 * we put it here, after initializing the mutex regardless.
200 ((uintptr_t)mp
& (_LONG_LONG_ALIGNMENT
- 1)) &&
201 curthread
->ul_misaligned
== 0)
208 * Delete mp from list of ceiling mutexes owned by curthread.
209 * Return 1 if the head of the chain was updated.
212 _ceil_mylist_del(mutex_t
*mp
)
214 ulwp_t
*self
= curthread
;
218 for (mcpp
= &self
->ul_mxchain
;
219 (mcp
= *mcpp
) != NULL
;
220 mcpp
= &mcp
->mxchain_next
) {
221 if (mcp
->mxchain_mx
== mp
) {
222 *mcpp
= mcp
->mxchain_next
;
223 lfree(mcp
, sizeof (*mcp
));
224 return (mcpp
== &self
->ul_mxchain
);
231 * Add mp to the list of ceiling mutexes owned by curthread.
232 * Return ENOMEM if no memory could be allocated.
235 _ceil_mylist_add(mutex_t
*mp
)
237 ulwp_t
*self
= curthread
;
240 if ((mcp
= lmalloc(sizeof (*mcp
))) == NULL
)
242 mcp
->mxchain_mx
= mp
;
243 mcp
->mxchain_next
= self
->ul_mxchain
;
244 self
->ul_mxchain
= mcp
;
249 * Helper function for _ceil_prio_inherit() and _ceil_prio_waive(), below.
252 set_rt_priority(ulwp_t
*self
, int prio
)
256 pcparm
.pc_cid
= self
->ul_rtclassid
;
257 ((rtparms_t
*)pcparm
.pc_clparms
)->rt_tqnsecs
= RT_NOCHANGE
;
258 ((rtparms_t
*)pcparm
.pc_clparms
)->rt_pri
= prio
;
259 (void) priocntl(P_LWPID
, self
->ul_lwpid
, PC_SETPARMS
, &pcparm
);
263 * Inherit priority from ceiling.
264 * This changes the effective priority, not the assigned priority.
267 _ceil_prio_inherit(int prio
)
269 ulwp_t
*self
= curthread
;
271 self
->ul_epri
= prio
;
272 set_rt_priority(self
, prio
);
276 * Waive inherited ceiling priority. Inherit from head of owned ceiling locks
277 * if holding at least one ceiling lock. If no ceiling locks are held at this
278 * point, disinherit completely, reverting back to assigned priority.
281 _ceil_prio_waive(void)
283 ulwp_t
*self
= curthread
;
284 mxchain_t
*mcp
= self
->ul_mxchain
;
291 prio
= mcp
->mxchain_mx
->mutex_ceiling
;
292 self
->ul_epri
= prio
;
294 set_rt_priority(self
, prio
);
298 * Clear the lock byte. Retain the waiters byte and the spinners byte.
299 * Return the old value of the lock word.
302 clear_lockbyte(volatile uint32_t *lockword
)
309 new = old
& ~LOCKMASK
;
310 } while (atomic_cas_32(lockword
, old
, new) != old
);
316 * Same as clear_lockbyte(), but operates on mutex_lockword64.
317 * The mutex_ownerpid field is cleared along with the lock byte.
320 clear_lockbyte64(volatile uint64_t *lockword64
)
327 new = old
& ~LOCKMASK64
;
328 } while (atomic_cas_64(lockword64
, old
, new) != old
);
334 * Similar to set_lock_byte(), which only tries to set the lock byte.
335 * Here, we attempt to set the lock byte AND the mutex_ownerpid, keeping
336 * the remaining bytes constant. This atomic operation is required for the
337 * correctness of process-shared robust locks, otherwise there would be
338 * a window or vulnerability in which the lock byte had been set but the
339 * mutex_ownerpid had not yet been set. If the process were to die in
340 * this window of vulnerability (due to some other thread calling exit()
341 * or the process receiving a fatal signal), the mutex would be left locked
342 * but without a process-ID to determine which process was holding the lock.
343 * The kernel would then be unable to mark the robust mutex as LOCK_OWNERDEAD
344 * when the process died. For all other cases of process-shared locks, this
345 * operation is just a convenience, for the sake of common code.
347 * This operation requires process-shared robust locks to be properly
348 * aligned on an 8-byte boundary, at least on sparc machines, lest the
349 * operation incur an alignment fault. This is automatic when locks
350 * are declared properly using the mutex_t or pthread_mutex_t data types
351 * and the application does not allocate dynamic memory on less than an
352 * 8-byte boundary. See the 'horrible hack' comments below for cases
353 * dealing with such broken applications.
356 set_lock_byte64(volatile uint64_t *lockword64
, pid_t ownerpid
)
361 old
= *lockword64
& ~LOCKMASK64
;
362 new = old
| ((uint64_t)(uint_t
)ownerpid
<< PIDSHIFT
) | LOCKBYTE64
;
363 if (atomic_cas_64(lockword64
, old
, new) == old
)
370 * Increment the spinners count in the mutex lock word.
371 * Return 0 on success. Return -1 if the count would overflow.
374 spinners_incr(volatile uint32_t *lockword
, uint8_t max_spinners
)
381 if (((old
& SPINNERMASK
) >> SPINNERSHIFT
) >= max_spinners
)
383 new = old
+ (1 << SPINNERSHIFT
);
384 } while (atomic_cas_32(lockword
, old
, new) != old
);
390 * Decrement the spinners count in the mutex lock word.
391 * Return the new value of the lock word.
394 spinners_decr(volatile uint32_t *lockword
)
400 new = old
= *lockword
;
401 if (new & SPINNERMASK
)
402 new -= (1 << SPINNERSHIFT
);
403 } while (atomic_cas_32(lockword
, old
, new) != old
);
409 * Non-preemptive spin locks. Used by queue_lock().
410 * No lock statistics are gathered for these locks.
411 * No DTrace probes are provided for these locks.
414 spin_lock_set(mutex_t
*mp
)
416 ulwp_t
*self
= curthread
;
419 if (set_lock_byte(&mp
->mutex_lockw
) == 0) {
420 mp
->mutex_owner
= (uintptr_t)self
;
424 * Spin for a while, attempting to acquire the lock.
426 INCR32(self
->ul_spin_lock_spin
);
427 if (mutex_queuelock_adaptive(mp
) == 0 ||
428 set_lock_byte(&mp
->mutex_lockw
) == 0) {
429 mp
->mutex_owner
= (uintptr_t)self
;
433 * Try harder if we were previously at a no premption level.
435 if (self
->ul_preempt
> 1) {
436 INCR32(self
->ul_spin_lock_spin2
);
437 if (mutex_queuelock_adaptive(mp
) == 0 ||
438 set_lock_byte(&mp
->mutex_lockw
) == 0) {
439 mp
->mutex_owner
= (uintptr_t)self
;
444 * Give up and block in the kernel for the mutex.
446 INCR32(self
->ul_spin_lock_sleep
);
447 (void) ___lwp_mutex_timedlock(mp
, NULL
, self
);
451 spin_lock_clear(mutex_t
*mp
)
453 ulwp_t
*self
= curthread
;
456 if (atomic_swap_32(&mp
->mutex_lockword
, 0) & WAITERMASK
) {
457 (void) ___lwp_mutex_wakeup(mp
, 0);
458 INCR32(self
->ul_spin_lock_wakeup
);
464 * Allocate the sleep queue hash table.
469 ulwp_t
*self
= curthread
;
470 uberdata_t
*udp
= self
->ul_uberdata
;
476 * No locks are needed; we call here only when single-threaded.
478 ASSERT(self
== udp
->ulwp_one
);
479 ASSERT(!udp
->uberflags
.uf_mt
);
480 if ((data
= mmap(NULL
, 2 * QHASHSIZE
* sizeof (queue_head_t
),
481 PROT_READ
|PROT_WRITE
, MAP_PRIVATE
|MAP_ANON
, -1, (off_t
)0))
483 thr_panic("cannot allocate thread queue_head table");
484 udp
->queue_head
= qp
= (queue_head_t
*)data
;
485 for (i
= 0; i
< 2 * QHASHSIZE
; qp
++, i
++) {
486 qp
->qh_type
= (i
< QHASHSIZE
)? MX
: CV
;
487 qp
->qh_lock
.mutex_flag
= LOCK_INITED
;
488 qp
->qh_lock
.mutex_magic
= MUTEX_MAGIC
;
489 qp
->qh_hlist
= &qp
->qh_def_root
;
490 #if defined(THREAD_DEBUG)
497 #if defined(THREAD_DEBUG)
500 * Debugging: verify correctness of a sleep queue.
503 QVERIFY(queue_head_t
*qp
)
505 ulwp_t
*self
= curthread
;
506 uberdata_t
*udp
= self
->ul_uberdata
;
515 ASSERT(qp
>= udp
->queue_head
&& (qp
- udp
->queue_head
) < 2 * QHASHSIZE
);
516 ASSERT(MUTEX_OWNED(&qp
->qh_lock
, self
));
517 for (cnt
= 0, qrp
= qp
->qh_hlist
; qrp
!= NULL
; qrp
= qrp
->qr_next
) {
519 ASSERT((qrp
->qr_head
!= NULL
&& qrp
->qr_tail
!= NULL
) ||
520 (qrp
->qr_head
== NULL
&& qrp
->qr_tail
== NULL
));
522 ASSERT(qp
->qh_hlen
== cnt
&& qp
->qh_hmax
>= cnt
);
523 qtype
= ((qp
- udp
->queue_head
) < QHASHSIZE
)? MX
: CV
;
524 ASSERT(qp
->qh_type
== qtype
);
525 if (!thread_queue_verify
)
527 /* real expensive stuff, only for _THREAD_QUEUE_VERIFY */
528 for (cnt
= 0, qrp
= qp
->qh_hlist
; qrp
!= NULL
; qrp
= qrp
->qr_next
) {
529 for (prev
= NULL
, ulwp
= qrp
->qr_head
; ulwp
!= NULL
;
530 prev
= ulwp
, ulwp
= ulwp
->ul_link
) {
533 ASSERT(prev
== NULL
|| prev
->ul_writer
);
534 ASSERT(ulwp
->ul_qtype
== qtype
);
535 ASSERT(ulwp
->ul_wchan
!= NULL
);
536 ASSERT(ulwp
->ul_sleepq
== qp
);
537 wchan
= ulwp
->ul_wchan
;
538 ASSERT(qrp
->qr_wchan
== wchan
);
539 index
= QUEUE_HASH(wchan
, qtype
);
540 ASSERT(&udp
->queue_head
[index
] == qp
);
542 ASSERT(qrp
->qr_tail
== prev
);
544 ASSERT(qp
->qh_qlen
== cnt
);
547 #else /* THREAD_DEBUG */
551 #endif /* THREAD_DEBUG */
554 * Acquire a queue head.
557 queue_lock(void *wchan
, int qtype
)
559 uberdata_t
*udp
= curthread
->ul_uberdata
;
563 ASSERT(qtype
== MX
|| qtype
== CV
);
566 * It is possible that we could be called while still single-threaded.
567 * If so, we call queue_alloc() to allocate the queue_head[] array.
569 if ((qp
= udp
->queue_head
) == NULL
) {
571 qp
= udp
->queue_head
;
573 qp
+= QUEUE_HASH(wchan
, qtype
);
574 spin_lock_set(&qp
->qh_lock
);
575 for (qrp
= qp
->qh_hlist
; qrp
!= NULL
; qrp
= qrp
->qr_next
)
576 if (qrp
->qr_wchan
== wchan
)
578 if (qrp
== NULL
&& qp
->qh_def_root
.qr_head
== NULL
) {
579 /* the default queue root is available; use it */
580 qrp
= &qp
->qh_def_root
;
581 qrp
->qr_wchan
= wchan
;
582 ASSERT(qrp
->qr_next
== NULL
);
583 ASSERT(qrp
->qr_tail
== NULL
&&
584 qrp
->qr_rtcount
== 0 && qrp
->qr_qlen
== 0);
586 qp
->qh_wchan
= wchan
; /* valid until queue_unlock() is called */
587 qp
->qh_root
= qrp
; /* valid until queue_unlock() is called */
588 INCR32(qp
->qh_lockcount
);
594 * Release a queue head.
597 queue_unlock(queue_head_t
*qp
)
600 spin_lock_clear(&qp
->qh_lock
);
604 * For rwlock queueing, we must queue writers ahead of readers of the
605 * same priority. We do this by making writers appear to have a half
606 * point higher priority for purposes of priority comparisons below.
608 #define CMP_PRIO(ulwp) ((real_priority(ulwp) << 1) + (ulwp)->ul_writer)
611 enqueue(queue_head_t
*qp
, ulwp_t
*ulwp
, int force_fifo
)
616 int pri
= CMP_PRIO(ulwp
);
618 ASSERT(MUTEX_OWNED(&qp
->qh_lock
, curthread
));
619 ASSERT(ulwp
->ul_sleepq
!= qp
);
621 if ((qrp
= qp
->qh_root
) == NULL
) {
622 /* use the thread's queue root for the linkage */
623 qrp
= &ulwp
->ul_queue_root
;
624 qrp
->qr_next
= qp
->qh_hlist
;
628 qrp
->qr_wchan
= qp
->qh_wchan
;
632 qp
->qh_hlist
->qr_prev
= qrp
;
635 MAXINCR(qp
->qh_hmax
, qp
->qh_hlen
);
639 * LIFO queue ordering is unfair and can lead to starvation,
640 * but it gives better performance for heavily contended locks.
641 * We use thread_queue_fifo (range is 0..8) to determine
642 * the frequency of FIFO vs LIFO queuing:
643 * 0 : every 256th time (almost always LIFO)
644 * 1 : every 128th time
645 * 2 : every 64th time
646 * 3 : every 32nd time
647 * 4 : every 16th time (the default value, mostly LIFO)
651 * 8 : every time (never LIFO, always FIFO)
652 * Note that there is always some degree of FIFO ordering.
653 * This breaks live lock conditions that occur in applications
654 * that are written assuming (incorrectly) that threads acquire
655 * locks fairly, that is, in roughly round-robin order.
656 * In any event, the queue is maintained in kernel priority order.
658 * If force_fifo is non-zero, fifo queueing is forced.
659 * SUSV3 requires this for semaphores.
661 if (qrp
->qr_head
== NULL
) {
663 * The queue is empty. LIFO/FIFO doesn't matter.
665 ASSERT(qrp
->qr_tail
== NULL
);
666 ulwpp
= &qrp
->qr_head
;
667 } else if (force_fifo
|
668 (((++qp
->qh_qcnt
<< curthread
->ul_queue_fifo
) & 0xff) == 0)) {
670 * Enqueue after the last thread whose priority is greater
671 * than or equal to the priority of the thread being queued.
672 * Attempt first to go directly onto the tail of the queue.
674 if (pri
<= CMP_PRIO(qrp
->qr_tail
))
675 ulwpp
= &qrp
->qr_tail
->ul_link
;
677 for (ulwpp
= &qrp
->qr_head
; (next
= *ulwpp
) != NULL
;
678 ulwpp
= &next
->ul_link
)
679 if (pri
> CMP_PRIO(next
))
684 * Enqueue before the first thread whose priority is less
685 * than or equal to the priority of the thread being queued.
686 * Hopefully we can go directly onto the head of the queue.
688 for (ulwpp
= &qrp
->qr_head
; (next
= *ulwpp
) != NULL
;
689 ulwpp
= &next
->ul_link
)
690 if (pri
>= CMP_PRIO(next
))
693 if ((ulwp
->ul_link
= *ulwpp
) == NULL
)
697 ulwp
->ul_sleepq
= qp
;
698 ulwp
->ul_wchan
= qp
->qh_wchan
;
699 ulwp
->ul_qtype
= qp
->qh_type
;
700 if ((ulwp
->ul_schedctl
!= NULL
&&
701 ulwp
->ul_schedctl
->sc_cid
== ulwp
->ul_rtclassid
) |
703 ulwp
->ul_rtqueued
= 1;
706 MAXINCR(qrp
->qr_qmax
, qrp
->qr_qlen
);
707 MAXINCR(qp
->qh_qmax
, qp
->qh_qlen
);
711 * Helper function for queue_slot() and queue_slot_rt().
712 * Try to find a non-suspended thread on the queue.
715 queue_slot_runnable(ulwp_t
**ulwpp
, ulwp_t
**prevp
, int rt
)
718 ulwp_t
**foundpp
= NULL
;
724 (ulwp
= *ulwpp
) != NULL
;
725 prev
= ulwp
, ulwpp
= &ulwp
->ul_link
) {
726 if (ulwp
->ul_stop
) /* skip suspended threads */
728 tpri
= rt
? CMP_PRIO(ulwp
) : 0;
729 if (tpri
> priority
) {
741 * For real-time, we search the entire queue because the dispatch
742 * (kernel) priorities may have changed since enqueueing.
745 queue_slot_rt(ulwp_t
**ulwpp_org
, ulwp_t
**prevp
)
747 ulwp_t
**ulwpp
= ulwpp_org
;
748 ulwp_t
*ulwp
= *ulwpp
;
749 ulwp_t
**foundpp
= ulwpp
;
750 int priority
= CMP_PRIO(ulwp
);
754 for (prev
= ulwp
, ulwpp
= &ulwp
->ul_link
;
755 (ulwp
= *ulwpp
) != NULL
;
756 prev
= ulwp
, ulwpp
= &ulwp
->ul_link
) {
757 tpri
= CMP_PRIO(ulwp
);
758 if (tpri
> priority
) {
767 * Try not to return a suspended thread.
768 * This mimics the old libthread's behavior.
771 (ulwpp
= queue_slot_runnable(ulwpp_org
, prevp
, 1)) != NULL
) {
780 queue_slot(queue_head_t
*qp
, ulwp_t
**prevp
, int *more
)
787 ASSERT(MUTEX_OWNED(&qp
->qh_lock
, curthread
));
789 if ((qrp
= qp
->qh_root
) == NULL
|| (ulwp
= qrp
->qr_head
) == NULL
) {
791 return (NULL
); /* no lwps on the queue */
793 rt
= (qrp
->qr_rtcount
!= 0);
795 if (ulwp
->ul_link
== NULL
) { /* only one lwp on the queue */
798 return (&qrp
->qr_head
);
802 if (rt
) /* real-time queue */
803 return (queue_slot_rt(&qrp
->qr_head
, prevp
));
805 * Try not to return a suspended thread.
806 * This mimics the old libthread's behavior.
809 (ulwpp
= queue_slot_runnable(&qrp
->qr_head
, prevp
, 0)) != NULL
) {
815 * The common case; just pick the first thread on the queue.
818 return (&qrp
->qr_head
);
822 * Common code for unlinking an lwp from a user-level sleep queue.
825 queue_unlink(queue_head_t
*qp
, ulwp_t
**ulwpp
, ulwp_t
*prev
)
827 queue_root_t
*qrp
= qp
->qh_root
;
829 ulwp_t
*ulwp
= *ulwpp
;
832 ASSERT(MUTEX_OWNED(&qp
->qh_lock
, curthread
));
833 ASSERT(qp
->qh_wchan
!= NULL
&& ulwp
->ul_wchan
== qp
->qh_wchan
);
837 if (ulwp
->ul_rtqueued
) {
838 ulwp
->ul_rtqueued
= 0;
841 next
= ulwp
->ul_link
;
843 ulwp
->ul_link
= NULL
;
844 if (qrp
->qr_tail
== ulwp
)
846 if (qrp
== &ulwp
->ul_queue_root
) {
848 * We can't continue to use the unlinked thread's
849 * queue root for the linkage.
851 queue_root_t
*qr_next
= qrp
->qr_next
;
852 queue_root_t
*qr_prev
= qrp
->qr_prev
;
855 /* switch to using the last thread's queue root */
856 ASSERT(qrp
->qr_qlen
!= 0);
857 nqrp
= &qrp
->qr_tail
->ul_queue_root
;
860 qr_next
->qr_prev
= nqrp
;
862 qr_prev
->qr_next
= nqrp
;
867 /* empty queue root; just delete from the hash list */
868 ASSERT(qrp
->qr_qlen
== 0);
870 qr_next
->qr_prev
= qr_prev
;
872 qr_prev
->qr_next
= qr_next
;
874 qp
->qh_hlist
= qr_next
;
882 dequeue(queue_head_t
*qp
, int *more
)
888 if ((ulwpp
= queue_slot(qp
, &prev
, more
)) == NULL
)
891 queue_unlink(qp
, ulwpp
, prev
);
892 ulwp
->ul_sleepq
= NULL
;
893 ulwp
->ul_wchan
= NULL
;
898 * Return a pointer to the highest priority thread sleeping on wchan.
901 queue_waiter(queue_head_t
*qp
)
907 if ((ulwpp
= queue_slot(qp
, &prev
, &more
)) == NULL
)
913 dequeue_self(queue_head_t
*qp
)
915 ulwp_t
*self
= curthread
;
922 ASSERT(MUTEX_OWNED(&qp
->qh_lock
, self
));
924 /* find self on the sleep queue */
925 if ((qrp
= qp
->qh_root
) != NULL
) {
926 for (prev
= NULL
, ulwpp
= &qrp
->qr_head
;
927 (ulwp
= *ulwpp
) != NULL
;
928 prev
= ulwp
, ulwpp
= &ulwp
->ul_link
) {
930 queue_unlink(qp
, ulwpp
, prev
);
931 self
->ul_cvmutex
= NULL
;
932 self
->ul_sleepq
= NULL
;
933 self
->ul_wchan
= NULL
;
941 thr_panic("dequeue_self(): curthread not found on queue");
943 return ((qrp
= qp
->qh_root
) != NULL
&& qrp
->qr_head
!= NULL
);
947 * Called from call_user_handler() and _thrp_suspend() to take
948 * ourself off of our sleep queue so we can grab locks.
953 ulwp_t
*self
= curthread
;
957 * Calling enter_critical()/exit_critical() here would lead
958 * to recursion. Just manipulate self->ul_critical directly.
961 while (self
->ul_sleepq
!= NULL
) {
962 qp
= queue_lock(self
->ul_wchan
, self
->ul_qtype
);
964 * We may have been moved from a CV queue to a
965 * mutex queue while we were attempting queue_lock().
966 * If so, just loop around and try again.
967 * dequeue_self() clears self->ul_sleepq.
969 if (qp
== self
->ul_sleepq
)
970 (void) dequeue_self(qp
);
978 * Common code for calling the the ___lwp_mutex_timedlock() system call.
979 * Returns with mutex_owner and mutex_ownerpid set correctly.
982 mutex_lock_kernel(mutex_t
*mp
, timespec_t
*tsp
, tdb_mutex_stats_t
*msp
)
984 ulwp_t
*self
= curthread
;
985 uberdata_t
*udp
= self
->ul_uberdata
;
986 int mtype
= mp
->mutex_type
;
987 hrtime_t begin_sleep
;
991 self
->ul_sp
= stkptr();
993 if (__td_event_report(self
, TD_SLEEP
, udp
)) {
994 self
->ul_td_evbuf
.eventnum
= TD_SLEEP
;
995 self
->ul_td_evbuf
.eventdata
= mp
;
996 tdb_event(TD_SLEEP
, udp
);
999 tdb_incr(msp
->mutex_sleep
);
1000 begin_sleep
= gethrtime();
1003 DTRACE_PROBE1(plockstat
, mutex__block
, mp
);
1007 * A return value of EOWNERDEAD or ELOCKUNMAPPED
1008 * means we successfully acquired the lock.
1010 if ((error
= ___lwp_mutex_timedlock(mp
, tsp
, self
)) != 0 &&
1011 error
!= EOWNERDEAD
&& error
!= ELOCKUNMAPPED
) {
1016 if (mtype
& USYNC_PROCESS
) {
1018 * Defend against forkall(). We may be the child,
1019 * in which case we don't actually own the mutex.
1021 enter_critical(self
);
1022 if (mp
->mutex_ownerpid
== udp
->pid
) {
1023 exit_critical(self
);
1027 exit_critical(self
);
1035 msp
->mutex_sleep_time
+= gethrtime() - begin_sleep
;
1036 self
->ul_wchan
= NULL
;
1040 ASSERT(mp
->mutex_owner
== (uintptr_t)self
);
1041 DTRACE_PROBE2(plockstat
, mutex__blocked
, mp
, 1);
1042 DTRACE_PROBE3(plockstat
, mutex__acquire
, mp
, 0, 0);
1044 DTRACE_PROBE2(plockstat
, mutex__blocked
, mp
, 0);
1045 DTRACE_PROBE2(plockstat
, mutex__error
, mp
, error
);
1052 * Common code for calling the ___lwp_mutex_trylock() system call.
1053 * Returns with mutex_owner and mutex_ownerpid set correctly.
1056 mutex_trylock_kernel(mutex_t
*mp
)
1058 ulwp_t
*self
= curthread
;
1059 uberdata_t
*udp
= self
->ul_uberdata
;
1060 int mtype
= mp
->mutex_type
;
1066 * A return value of EOWNERDEAD or ELOCKUNMAPPED
1067 * means we successfully acquired the lock.
1069 if ((error
= ___lwp_mutex_trylock(mp
, self
)) != 0 &&
1070 error
!= EOWNERDEAD
&& error
!= ELOCKUNMAPPED
) {
1075 if (mtype
& USYNC_PROCESS
) {
1077 * Defend against forkall(). We may be the child,
1078 * in which case we don't actually own the mutex.
1080 enter_critical(self
);
1081 if (mp
->mutex_ownerpid
== udp
->pid
) {
1082 exit_critical(self
);
1086 exit_critical(self
);
1094 ASSERT(mp
->mutex_owner
== (uintptr_t)self
);
1095 DTRACE_PROBE3(plockstat
, mutex__acquire
, mp
, 0, 0);
1096 } else if (error
!= EBUSY
) {
1097 DTRACE_PROBE2(plockstat
, mutex__error
, mp
, error
);
1103 volatile sc_shared_t
*
1104 setup_schedctl(void)
1106 ulwp_t
*self
= curthread
;
1107 volatile sc_shared_t
*scp
;
1110 if ((scp
= self
->ul_schedctl
) == NULL
&& /* no shared state yet */
1111 !self
->ul_vfork
&& /* not a child of vfork() */
1112 !self
->ul_schedctl_called
) { /* haven't been called before */
1113 enter_critical(self
);
1114 self
->ul_schedctl_called
= &self
->ul_uberdata
->uberflags
;
1115 if ((tmp
= __schedctl()) != (sc_shared_t
*)(-1))
1116 self
->ul_schedctl
= scp
= tmp
;
1117 exit_critical(self
);
1120 * Unless the call to setup_schedctl() is surrounded
1121 * by enter_critical()/exit_critical(), the address
1122 * we are returning could be invalid due to a forkall()
1123 * having occurred in another thread.
1129 * Interfaces from libsched, incorporated into libc.
1130 * libsched.so.1 is now a filter library onto libc.
1132 #pragma weak schedctl_lookup = schedctl_init
1136 volatile sc_shared_t
*scp
= setup_schedctl();
1137 return ((scp
== NULL
)? NULL
: (schedctl_t
*)&scp
->sc_preemptctl
);
1146 * Contract private interface for java.
1147 * Set up the schedctl data if it doesn't exist yet.
1148 * Return a pointer to the pointer to the schedctl data.
1150 volatile sc_shared_t
*volatile *
1153 ulwp_t
*self
= curthread
;
1154 volatile sc_shared_t
*volatile *ptr
;
1158 if (*(ptr
= &self
->ul_schedctl
) == NULL
)
1159 (void) setup_schedctl();
1164 * Block signals and attempt to block preemption.
1165 * no_preempt()/preempt() must be used in pairs but can be nested.
1168 no_preempt(ulwp_t
*self
)
1170 volatile sc_shared_t
*scp
;
1172 if (self
->ul_preempt
++ == 0) {
1173 enter_critical(self
);
1174 if ((scp
= self
->ul_schedctl
) != NULL
||
1175 (scp
= setup_schedctl()) != NULL
) {
1177 * Save the pre-existing preempt value.
1179 self
->ul_savpreempt
= scp
->sc_preemptctl
.sc_nopreempt
;
1180 scp
->sc_preemptctl
.sc_nopreempt
= 1;
1186 * Undo the effects of no_preempt().
1189 preempt(ulwp_t
*self
)
1191 volatile sc_shared_t
*scp
;
1193 ASSERT(self
->ul_preempt
> 0);
1194 if (--self
->ul_preempt
== 0) {
1195 if ((scp
= self
->ul_schedctl
) != NULL
) {
1197 * Restore the pre-existing preempt value.
1199 scp
->sc_preemptctl
.sc_nopreempt
= self
->ul_savpreempt
;
1200 if (scp
->sc_preemptctl
.sc_yield
&&
1201 scp
->sc_preemptctl
.sc_nopreempt
== 0) {
1203 if (scp
->sc_preemptctl
.sc_yield
) {
1205 * Shouldn't happen. This is either
1206 * a race condition or the thread
1207 * just entered the real-time class.
1210 scp
->sc_preemptctl
.sc_yield
= 0;
1214 exit_critical(self
);
1219 * If a call to preempt() would cause the current thread to yield or to
1220 * take deferred actions in exit_critical(), then unpark the specified
1221 * lwp so it can run while we delay. Return the original lwpid if the
1222 * unpark was not performed, else return zero. The tests are a repeat
1223 * of some of the tests in preempt(), above. This is a statistical
1224 * optimization solely for cond_sleep_queue(), below.
1227 preempt_unpark(ulwp_t
*self
, lwpid_t lwpid
)
1229 volatile sc_shared_t
*scp
= self
->ul_schedctl
;
1231 ASSERT(self
->ul_preempt
== 1 && self
->ul_critical
> 0);
1232 if ((scp
!= NULL
&& scp
->sc_preemptctl
.sc_yield
) ||
1233 (self
->ul_curplease
&& self
->ul_critical
== 1)) {
1234 (void) __lwp_unpark(lwpid
);
1241 * Spin for a while (if 'tryhard' is true), trying to grab the lock.
1242 * If this fails, return EBUSY and let the caller deal with it.
1243 * If this succeeds, return 0 with mutex_owner set to curthread.
1246 mutex_trylock_adaptive(mutex_t
*mp
, int tryhard
)
1248 ulwp_t
*self
= curthread
;
1251 volatile sc_shared_t
*scp
;
1252 volatile uint8_t *lockp
= (volatile uint8_t *)&mp
->mutex_lockw
;
1253 volatile uint64_t *ownerp
= (volatile uint64_t *)&mp
->mutex_owner
;
1254 uint32_t new_lockword
;
1257 uint8_t max_spinners
;
1259 ASSERT(!(mp
->mutex_type
& USYNC_PROCESS
));
1261 if (MUTEX_OWNED(mp
, self
))
1264 enter_critical(self
);
1266 /* short-cut, not definitive (see below) */
1267 if (mp
->mutex_flag
& LOCK_NOTRECOVERABLE
) {
1268 ASSERT(mp
->mutex_type
& LOCK_ROBUST
);
1269 error
= ENOTRECOVERABLE
;
1274 * Make one attempt to acquire the lock before
1275 * incurring the overhead of the spin loop.
1277 if (set_lock_byte(lockp
) == 0) {
1278 *ownerp
= (uintptr_t)self
;
1285 ncpus
= (int)_sysconf(_SC_NPROCESSORS_ONLN
);
1286 if ((max_spinners
= self
->ul_max_spinners
) >= ncpus
)
1287 max_spinners
= ncpus
- 1;
1288 max_count
= (max_spinners
!= 0)? self
->ul_adaptive_spin
: 0;
1293 * This spin loop is unfair to lwps that have already dropped into
1294 * the kernel to sleep. They will starve on a highly-contended mutex.
1295 * This is just too bad. The adaptive spin algorithm is intended
1296 * to allow programs with highly-contended locks (that is, broken
1297 * programs) to execute with reasonable speed despite their contention.
1298 * Being fair would reduce the speed of such programs and well-written
1299 * programs will not suffer in any case.
1301 if (spinners_incr(&mp
->mutex_lockword
, max_spinners
) == -1)
1303 DTRACE_PROBE1(plockstat
, mutex__spin
, mp
);
1304 for (count
= 1; ; count
++) {
1305 if (*lockp
== 0 && set_lock_byte(lockp
) == 0) {
1306 *ownerp
= (uintptr_t)self
;
1310 if (count
== max_count
)
1314 * Stop spinning if the mutex owner is not running on
1315 * a processor; it will not drop the lock any time soon
1316 * and we would just be wasting time to keep spinning.
1318 * Note that we are looking at another thread (ulwp_t)
1319 * without ensuring that the other thread does not exit.
1320 * The scheme relies on ulwp_t structures never being
1321 * deallocated by the library (the library employs a free
1322 * list of ulwp_t structs that are reused when new threads
1323 * are created) and on schedctl shared memory never being
1324 * deallocated once created via __schedctl().
1326 * Thus, the worst that can happen when the spinning thread
1327 * looks at the owner's schedctl data is that it is looking
1328 * at some other thread's schedctl data. This almost never
1329 * happens and is benign when it does.
1331 if ((ulwp
= (ulwp_t
*)(uintptr_t)*ownerp
) != NULL
&&
1332 ((scp
= ulwp
->ul_schedctl
) == NULL
||
1333 scp
->sc_state
!= SC_ONPROC
))
1336 new_lockword
= spinners_decr(&mp
->mutex_lockword
);
1337 if (error
&& (new_lockword
& (LOCKMASK
| SPINNERMASK
)) == 0) {
1339 * We haven't yet acquired the lock, the lock
1340 * is free, and there are no other spinners.
1341 * Make one final attempt to acquire the lock.
1343 * This isn't strictly necessary since mutex_lock_queue()
1344 * (the next action this thread will take if it doesn't
1345 * acquire the lock here) makes one attempt to acquire
1346 * the lock before putting the thread to sleep.
1348 * If the next action for this thread (on failure here)
1349 * were not to call mutex_lock_queue(), this would be
1350 * necessary for correctness, to avoid ending up with an
1351 * unheld mutex with waiters but no one to wake them up.
1353 if (set_lock_byte(lockp
) == 0) {
1354 *ownerp
= (uintptr_t)self
;
1361 if (error
== 0 && (mp
->mutex_flag
& LOCK_NOTRECOVERABLE
)) {
1362 ASSERT(mp
->mutex_type
& LOCK_ROBUST
);
1364 * We shouldn't own the mutex.
1365 * Just clear the lock; everyone has already been waked up.
1368 (void) clear_lockbyte(&mp
->mutex_lockword
);
1369 error
= ENOTRECOVERABLE
;
1372 exit_critical(self
);
1376 DTRACE_PROBE3(plockstat
, mutex__spun
, mp
, 0, count
);
1378 if (error
!= EBUSY
) {
1379 DTRACE_PROBE2(plockstat
, mutex__error
, mp
, error
);
1383 DTRACE_PROBE3(plockstat
, mutex__spun
, mp
, 1, count
);
1385 DTRACE_PROBE3(plockstat
, mutex__acquire
, mp
, 0, count
);
1386 if (mp
->mutex_flag
& LOCK_OWNERDEAD
) {
1387 ASSERT(mp
->mutex_type
& LOCK_ROBUST
);
1396 * Same as mutex_trylock_adaptive(), except specifically for queue locks.
1397 * The owner field is not set here; the caller (spin_lock_set()) sets it.
1400 mutex_queuelock_adaptive(mutex_t
*mp
)
1403 volatile sc_shared_t
*scp
;
1404 volatile uint8_t *lockp
;
1405 volatile uint64_t *ownerp
;
1406 int count
= curthread
->ul_queue_spin
;
1408 ASSERT(mp
->mutex_type
== USYNC_THREAD
);
1413 lockp
= (volatile uint8_t *)&mp
->mutex_lockw
;
1414 ownerp
= (volatile uint64_t *)&mp
->mutex_owner
;
1415 while (--count
>= 0) {
1416 if (*lockp
== 0 && set_lock_byte(lockp
) == 0)
1419 if ((ulwp
= (ulwp_t
*)(uintptr_t)*ownerp
) != NULL
&&
1420 ((scp
= ulwp
->ul_schedctl
) == NULL
||
1421 scp
->sc_state
!= SC_ONPROC
))
1429 * Like mutex_trylock_adaptive(), but for process-shared mutexes.
1430 * Spin for a while (if 'tryhard' is true), trying to grab the lock.
1431 * If this fails, return EBUSY and let the caller deal with it.
1432 * If this succeeds, return 0 with mutex_owner set to curthread
1433 * and mutex_ownerpid set to the current pid.
1436 mutex_trylock_process(mutex_t
*mp
, int tryhard
)
1438 ulwp_t
*self
= curthread
;
1439 uberdata_t
*udp
= self
->ul_uberdata
;
1441 volatile uint64_t *lockp
= (volatile uint64_t *)&mp
->mutex_lockword64
;
1442 uint32_t new_lockword
;
1445 uint8_t max_spinners
;
1447 #if defined(__sparc) && !defined(_LP64)
1448 /* horrible hack, necessary only on 32-bit sparc */
1449 int fix_alignment_problem
=
1450 (((uintptr_t)mp
& (_LONG_LONG_ALIGNMENT
- 1)) &&
1451 self
->ul_misaligned
&& !(mp
->mutex_type
& LOCK_ROBUST
));
1454 ASSERT(mp
->mutex_type
& USYNC_PROCESS
);
1456 if (shared_mutex_held(mp
))
1459 enter_critical(self
);
1461 /* short-cut, not definitive (see below) */
1462 if (mp
->mutex_flag
& LOCK_NOTRECOVERABLE
) {
1463 ASSERT(mp
->mutex_type
& LOCK_ROBUST
);
1464 error
= ENOTRECOVERABLE
;
1469 * Make one attempt to acquire the lock before
1470 * incurring the overhead of the spin loop.
1472 #if defined(__sparc) && !defined(_LP64)
1473 /* horrible hack, necessary only on 32-bit sparc */
1474 if (fix_alignment_problem
) {
1475 if (set_lock_byte(&mp
->mutex_lockw
) == 0) {
1476 mp
->mutex_ownerpid
= udp
->pid
;
1477 mp
->mutex_owner
= (uintptr_t)self
;
1483 if (set_lock_byte64(lockp
, udp
->pid
) == 0) {
1484 mp
->mutex_owner
= (uintptr_t)self
;
1485 /* mp->mutex_ownerpid was set by set_lock_byte64() */
1492 ncpus
= (int)_sysconf(_SC_NPROCESSORS_ONLN
);
1493 if ((max_spinners
= self
->ul_max_spinners
) >= ncpus
)
1494 max_spinners
= ncpus
- 1;
1495 max_count
= (max_spinners
!= 0)? self
->ul_adaptive_spin
: 0;
1500 * This is a process-shared mutex.
1501 * We cannot know if the owner is running on a processor.
1502 * We just spin and hope that it is on a processor.
1504 if (spinners_incr(&mp
->mutex_lockword
, max_spinners
) == -1)
1506 DTRACE_PROBE1(plockstat
, mutex__spin
, mp
);
1507 for (count
= 1; ; count
++) {
1508 #if defined(__sparc) && !defined(_LP64)
1509 /* horrible hack, necessary only on 32-bit sparc */
1510 if (fix_alignment_problem
) {
1511 if ((*lockp
& LOCKMASK64
) == 0 &&
1512 set_lock_byte(&mp
->mutex_lockw
) == 0) {
1513 mp
->mutex_ownerpid
= udp
->pid
;
1514 mp
->mutex_owner
= (uintptr_t)self
;
1520 if ((*lockp
& LOCKMASK64
) == 0 &&
1521 set_lock_byte64(lockp
, udp
->pid
) == 0) {
1522 mp
->mutex_owner
= (uintptr_t)self
;
1523 /* mp->mutex_ownerpid was set by set_lock_byte64() */
1527 if (count
== max_count
)
1531 new_lockword
= spinners_decr(&mp
->mutex_lockword
);
1532 if (error
&& (new_lockword
& (LOCKMASK
| SPINNERMASK
)) == 0) {
1534 * We haven't yet acquired the lock, the lock
1535 * is free, and there are no other spinners.
1536 * Make one final attempt to acquire the lock.
1538 * This isn't strictly necessary since mutex_lock_kernel()
1539 * (the next action this thread will take if it doesn't
1540 * acquire the lock here) makes one attempt to acquire
1541 * the lock before putting the thread to sleep.
1543 * If the next action for this thread (on failure here)
1544 * were not to call mutex_lock_kernel(), this would be
1545 * necessary for correctness, to avoid ending up with an
1546 * unheld mutex with waiters but no one to wake them up.
1548 #if defined(__sparc) && !defined(_LP64)
1549 /* horrible hack, necessary only on 32-bit sparc */
1550 if (fix_alignment_problem
) {
1551 if (set_lock_byte(&mp
->mutex_lockw
) == 0) {
1552 mp
->mutex_ownerpid
= udp
->pid
;
1553 mp
->mutex_owner
= (uintptr_t)self
;
1558 if (set_lock_byte64(lockp
, udp
->pid
) == 0) {
1559 mp
->mutex_owner
= (uintptr_t)self
;
1560 /* mp->mutex_ownerpid was set by set_lock_byte64() */
1567 if (error
== 0 && (mp
->mutex_flag
& LOCK_NOTRECOVERABLE
)) {
1568 ASSERT(mp
->mutex_type
& LOCK_ROBUST
);
1570 * We shouldn't own the mutex.
1571 * Just clear the lock; everyone has already been waked up.
1573 mp
->mutex_owner
= 0;
1574 /* mp->mutex_ownerpid is cleared by clear_lockbyte64() */
1575 (void) clear_lockbyte64(&mp
->mutex_lockword64
);
1576 error
= ENOTRECOVERABLE
;
1579 exit_critical(self
);
1583 DTRACE_PROBE3(plockstat
, mutex__spun
, mp
, 0, count
);
1585 if (error
!= EBUSY
) {
1586 DTRACE_PROBE2(plockstat
, mutex__error
, mp
, error
);
1590 DTRACE_PROBE3(plockstat
, mutex__spun
, mp
, 1, count
);
1592 DTRACE_PROBE3(plockstat
, mutex__acquire
, mp
, 0, count
);
1593 if (mp
->mutex_flag
& (LOCK_OWNERDEAD
| LOCK_UNMAPPED
)) {
1594 ASSERT(mp
->mutex_type
& LOCK_ROBUST
);
1595 if (mp
->mutex_flag
& LOCK_OWNERDEAD
)
1597 else if (mp
->mutex_type
& USYNC_PROCESS_ROBUST
)
1598 error
= ELOCKUNMAPPED
;
1608 * Mutex wakeup code for releasing a USYNC_THREAD mutex.
1609 * Returns the lwpid of the thread that was dequeued, if any.
1610 * The caller of mutex_wakeup() must call __lwp_unpark(lwpid)
1611 * to wake up the specified lwp.
1614 mutex_wakeup(mutex_t
*mp
)
1622 * Dequeue a waiter from the sleep queue. Don't touch the mutex
1623 * waiters bit if no one was found on the queue because the mutex
1624 * might have been deallocated or reallocated for another purpose.
1626 qp
= queue_lock(mp
, MX
);
1627 if ((ulwp
= dequeue(qp
, &more
)) != NULL
) {
1628 lwpid
= ulwp
->ul_lwpid
;
1629 mp
->mutex_waiters
= more
;
1636 * Mutex wakeup code for releasing all waiters on a USYNC_THREAD mutex.
1639 mutex_wakeup_all(mutex_t
*mp
)
1644 int maxlwps
= MAXLWPS
;
1646 lwpid_t buffer
[MAXLWPS
];
1647 lwpid_t
*lwpid
= buffer
;
1650 * Walk the list of waiters and prepare to wake up all of them.
1651 * The waiters flag has already been cleared from the mutex.
1653 * We keep track of lwpids that are to be unparked in lwpid[].
1654 * __lwp_unpark_all() is called to unpark all of them after
1655 * they have been removed from the sleep queue and the sleep
1656 * queue lock has been dropped. If we run out of space in our
1657 * on-stack buffer, we need to allocate more but we can't call
1658 * lmalloc() because we are holding a queue lock when the overflow
1659 * occurs and lmalloc() acquires a lock. We can't use alloca()
1660 * either because the application may have allocated a small
1661 * stack and we don't want to overrun the stack. So we call
1662 * alloc_lwpids() to allocate a bigger buffer using the mmap()
1663 * system call directly since that path acquires no locks.
1665 qp
= queue_lock(mp
, MX
);
1667 if ((qrp
= qp
->qh_root
) == NULL
||
1668 (ulwp
= qrp
->qr_head
) == NULL
)
1670 ASSERT(ulwp
->ul_wchan
== mp
);
1671 queue_unlink(qp
, &qrp
->qr_head
, NULL
);
1672 ulwp
->ul_sleepq
= NULL
;
1673 ulwp
->ul_wchan
= NULL
;
1674 if (nlwpid
== maxlwps
)
1675 lwpid
= alloc_lwpids(lwpid
, &nlwpid
, &maxlwps
);
1676 lwpid
[nlwpid
++] = ulwp
->ul_lwpid
;
1682 mp
->mutex_waiters
= 0;
1683 no_preempt(curthread
);
1686 (void) __lwp_unpark(lwpid
[0]);
1688 (void) __lwp_unpark_all(lwpid
, nlwpid
);
1692 if (lwpid
!= buffer
)
1693 (void) munmap((caddr_t
)lwpid
, maxlwps
* sizeof (lwpid_t
));
1697 * Release a process-private mutex.
1698 * As an optimization, if there are waiters but there are also spinners
1699 * attempting to acquire the mutex, then don't bother waking up a waiter;
1700 * one of the spinners will acquire the mutex soon and it would be a waste
1701 * of resources to wake up some thread just to have it spin for a while
1702 * and then possibly go back to sleep. See mutex_trylock_adaptive().
1705 mutex_unlock_queue(mutex_t
*mp
, int release_all
)
1707 ulwp_t
*self
= curthread
;
1709 uint32_t old_lockword
;
1711 DTRACE_PROBE2(plockstat
, mutex__release
, mp
, 0);
1713 mp
->mutex_owner
= 0;
1714 old_lockword
= clear_lockbyte(&mp
->mutex_lockword
);
1715 if ((old_lockword
& WAITERMASK
) &&
1716 (release_all
|| (old_lockword
& SPINNERMASK
) == 0)) {
1717 no_preempt(self
); /* ensure a prompt wakeup */
1719 mutex_wakeup_all(mp
);
1721 lwpid
= mutex_wakeup(mp
);
1730 * Like mutex_unlock_queue(), but for process-shared mutexes.
1733 mutex_unlock_process(mutex_t
*mp
, int release_all
)
1735 ulwp_t
*self
= curthread
;
1736 uint64_t old_lockword64
;
1738 DTRACE_PROBE2(plockstat
, mutex__release
, mp
, 0);
1740 mp
->mutex_owner
= 0;
1741 #if defined(__sparc) && !defined(_LP64)
1742 /* horrible hack, necessary only on 32-bit sparc */
1743 if (((uintptr_t)mp
& (_LONG_LONG_ALIGNMENT
- 1)) &&
1744 self
->ul_misaligned
&& !(mp
->mutex_type
& LOCK_ROBUST
)) {
1745 uint32_t old_lockword
;
1746 mp
->mutex_ownerpid
= 0;
1747 old_lockword
= clear_lockbyte(&mp
->mutex_lockword
);
1748 if ((old_lockword
& WAITERMASK
) &&
1749 (release_all
|| (old_lockword
& SPINNERMASK
) == 0)) {
1750 no_preempt(self
); /* ensure a prompt wakeup */
1751 (void) ___lwp_mutex_wakeup(mp
, release_all
);
1758 /* mp->mutex_ownerpid is cleared by clear_lockbyte64() */
1759 old_lockword64
= clear_lockbyte64(&mp
->mutex_lockword64
);
1760 if ((old_lockword64
& WAITERMASK64
) &&
1761 (release_all
|| (old_lockword64
& SPINNERMASK64
) == 0)) {
1762 no_preempt(self
); /* ensure a prompt wakeup */
1763 (void) ___lwp_mutex_wakeup(mp
, release_all
);
1773 (void) mutex_lock_kernel(&stall_mutex
, NULL
, NULL
);
1777 * Acquire a USYNC_THREAD mutex via user-level sleep queues.
1778 * We failed set_lock_byte(&mp->mutex_lockw) before coming here.
1779 * If successful, returns with mutex_owner set correctly.
1782 mutex_lock_queue(ulwp_t
*self
, tdb_mutex_stats_t
*msp
, mutex_t
*mp
,
1785 uberdata_t
*udp
= curthread
->ul_uberdata
;
1787 hrtime_t begin_sleep
;
1790 self
->ul_sp
= stkptr();
1791 if (__td_event_report(self
, TD_SLEEP
, udp
)) {
1792 self
->ul_wchan
= mp
;
1793 self
->ul_td_evbuf
.eventnum
= TD_SLEEP
;
1794 self
->ul_td_evbuf
.eventdata
= mp
;
1795 tdb_event(TD_SLEEP
, udp
);
1798 tdb_incr(msp
->mutex_sleep
);
1799 begin_sleep
= gethrtime();
1802 DTRACE_PROBE1(plockstat
, mutex__block
, mp
);
1805 * Put ourself on the sleep queue, and while we are
1806 * unable to grab the lock, go park in the kernel.
1807 * Take ourself off the sleep queue after we acquire the lock.
1808 * The waiter bit can be set/cleared only while holding the queue lock.
1810 qp
= queue_lock(mp
, MX
);
1811 enqueue(qp
, self
, 0);
1812 mp
->mutex_waiters
= 1;
1814 if (set_lock_byte(&mp
->mutex_lockw
) == 0) {
1815 mp
->mutex_owner
= (uintptr_t)self
;
1816 mp
->mutex_waiters
= dequeue_self(qp
);
1819 set_parking_flag(self
, 1);
1822 * __lwp_park() will return the residual time in tsp
1823 * if we are unparked before the timeout expires.
1825 error
= __lwp_park(tsp
, 0);
1826 set_parking_flag(self
, 0);
1828 * We could have taken a signal or suspended ourself.
1829 * If we did, then we removed ourself from the queue.
1830 * Someone else may have removed us from the queue
1831 * as a consequence of mutex_unlock(). We may have
1832 * gotten a timeout from __lwp_park(). Or we may still
1833 * be on the queue and this is just a spurious wakeup.
1835 qp
= queue_lock(mp
, MX
);
1836 if (self
->ul_sleepq
== NULL
) {
1838 mp
->mutex_waiters
= queue_waiter(qp
)? 1 : 0;
1843 if (set_lock_byte(&mp
->mutex_lockw
) == 0) {
1844 mp
->mutex_owner
= (uintptr_t)self
;
1847 enqueue(qp
, self
, 0);
1848 mp
->mutex_waiters
= 1;
1850 ASSERT(self
->ul_sleepq
== qp
&&
1851 self
->ul_qtype
== MX
&&
1852 self
->ul_wchan
== mp
);
1854 if (error
!= EINTR
) {
1855 mp
->mutex_waiters
= dequeue_self(qp
);
1861 ASSERT(self
->ul_sleepq
== NULL
&& self
->ul_link
== NULL
&&
1862 self
->ul_wchan
== NULL
);
1865 ASSERT(error
== 0 || error
== EINVAL
|| error
== ETIME
);
1867 if (error
== 0 && (mp
->mutex_flag
& LOCK_NOTRECOVERABLE
)) {
1868 ASSERT(mp
->mutex_type
& LOCK_ROBUST
);
1870 * We shouldn't own the mutex.
1871 * Just clear the lock; everyone has already been waked up.
1873 mp
->mutex_owner
= 0;
1874 (void) clear_lockbyte(&mp
->mutex_lockword
);
1875 error
= ENOTRECOVERABLE
;
1881 msp
->mutex_sleep_time
+= gethrtime() - begin_sleep
;
1884 DTRACE_PROBE2(plockstat
, mutex__blocked
, mp
, 0);
1885 DTRACE_PROBE2(plockstat
, mutex__error
, mp
, error
);
1887 DTRACE_PROBE2(plockstat
, mutex__blocked
, mp
, 1);
1888 DTRACE_PROBE3(plockstat
, mutex__acquire
, mp
, 0, 0);
1889 if (mp
->mutex_flag
& LOCK_OWNERDEAD
) {
1890 ASSERT(mp
->mutex_type
& LOCK_ROBUST
);
1899 mutex_recursion(mutex_t
*mp
, int mtype
, int try)
1901 ASSERT(mutex_held(mp
));
1902 ASSERT(mtype
& (LOCK_RECURSIVE
|LOCK_ERRORCHECK
));
1903 ASSERT(try == MUTEX_TRY
|| try == MUTEX_LOCK
);
1905 if (mtype
& LOCK_RECURSIVE
) {
1906 if (mp
->mutex_rcount
== RECURSION_MAX
) {
1907 DTRACE_PROBE2(plockstat
, mutex__error
, mp
, EAGAIN
);
1911 DTRACE_PROBE3(plockstat
, mutex__acquire
, mp
, 1, 0);
1914 if (try == MUTEX_LOCK
) {
1915 DTRACE_PROBE2(plockstat
, mutex__error
, mp
, EDEADLK
);
1922 * Register this USYNC_PROCESS|LOCK_ROBUST mutex with the kernel so
1923 * it can apply LOCK_OWNERDEAD|LOCK_UNMAPPED if it becomes necessary.
1924 * We use tdb_hash_lock here and in the synch object tracking code in
1925 * the tdb_agent.c file. There is no conflict between these two usages.
1928 register_lock(mutex_t
*mp
)
1930 uberdata_t
*udp
= curthread
->ul_uberdata
;
1931 uint_t hash
= LOCK_HASH(mp
);
1937 if ((table
= udp
->robustlocks
) == NULL
) {
1938 lmutex_lock(&udp
->tdb_hash_lock
);
1939 if ((table
= udp
->robustlocks
) == NULL
) {
1940 table
= lmalloc(LOCKHASHSZ
* sizeof (robust_t
*));
1942 udp
->robustlocks
= table
;
1944 lmutex_unlock(&udp
->tdb_hash_lock
);
1949 * First search the registered table with no locks held.
1950 * This is safe because the table never shrinks
1951 * and we can only get a false negative.
1953 for (rlp
= table
[hash
]; rlp
!= NULL
; rlp
= rlp
->robust_next
) {
1954 if (rlp
->robust_lock
== mp
) /* already registered */
1959 * The lock was not found.
1960 * Repeat the operation with tdb_hash_lock held.
1962 lmutex_lock(&udp
->tdb_hash_lock
);
1965 for (rlpp
= &table
[hash
];
1966 (rlp
= *rlpp
) != NULL
;
1967 rlpp
= &rlp
->robust_next
) {
1968 if (rlp
->robust_lock
== mp
) { /* already registered */
1969 lmutex_unlock(&udp
->tdb_hash_lock
);
1972 /* remember the first invalid entry, if any */
1973 if (rlp
->robust_lock
== INVALID_ADDR
&& invalid
== NULL
)
1978 * The lock has never been registered.
1979 * Add it to the table and register it now.
1981 if ((rlp
= invalid
) != NULL
) {
1983 * Reuse the invalid entry we found above.
1984 * The linkages are still correct.
1986 rlp
->robust_lock
= mp
;
1990 * Allocate a new entry and add it to
1991 * the hash table and to the global list.
1993 rlp
= lmalloc(sizeof (*rlp
));
1994 rlp
->robust_lock
= mp
;
1995 rlp
->robust_next
= NULL
;
1996 rlp
->robust_list
= udp
->robustlist
;
1997 udp
->robustlist
= rlp
;
2002 lmutex_unlock(&udp
->tdb_hash_lock
);
2004 (void) ___lwp_mutex_register(mp
, &rlp
->robust_lock
);
2008 * This is called in the child of fork()/forkall() to start over
2009 * with a clean slate. (Each process must register its own locks.)
2010 * No locks are needed because all other threads are suspended or gone.
2013 unregister_locks(void)
2015 uberdata_t
*udp
= curthread
->ul_uberdata
;
2021 * Do this first, before calling lfree().
2023 table
= udp
->robustlocks
;
2024 udp
->robustlocks
= NULL
;
2025 rlp
= udp
->robustlist
;
2026 udp
->robustlist
= NULL
;
2029 * Do this by traversing the global list, not the hash table.
2031 while (rlp
!= NULL
) {
2032 next
= rlp
->robust_list
;
2033 lfree(rlp
, sizeof (*rlp
));
2037 lfree(table
, LOCKHASHSZ
* sizeof (robust_t
*));
2041 * Returns with mutex_owner set correctly.
2044 mutex_lock_internal(mutex_t
*mp
, timespec_t
*tsp
, int try)
2046 ulwp_t
*self
= curthread
;
2047 uberdata_t
*udp
= self
->ul_uberdata
;
2048 int mtype
= mp
->mutex_type
;
2049 tdb_mutex_stats_t
*msp
= MUTEX_STATS(mp
, udp
);
2051 int noceil
= try & MUTEX_NOCEIL
;
2055 try &= ~MUTEX_NOCEIL
;
2056 ASSERT(try == MUTEX_TRY
|| try == MUTEX_LOCK
);
2058 if (!self
->ul_schedctl_called
)
2059 (void) setup_schedctl();
2061 if (msp
&& try == MUTEX_TRY
)
2062 tdb_incr(msp
->mutex_try
);
2064 if ((mtype
& (LOCK_RECURSIVE
|LOCK_ERRORCHECK
)) && mutex_held(mp
))
2065 return (mutex_recursion(mp
, mtype
, try));
2067 if (self
->ul_error_detection
&& try == MUTEX_LOCK
&&
2068 tsp
== NULL
&& mutex_held(mp
))
2069 lock_error(mp
, "mutex_lock", NULL
, NULL
);
2071 if ((mtype
& LOCK_PRIO_PROTECT
) && noceil
== 0) {
2073 if (self
->ul_cid
!= self
->ul_rtclassid
) {
2074 DTRACE_PROBE2(plockstat
, mutex__error
, mp
, EPERM
);
2077 ceil
= mp
->mutex_ceiling
;
2078 myprio
= self
->ul_epri
? self
->ul_epri
: self
->ul_pri
;
2079 if (myprio
> ceil
) {
2080 DTRACE_PROBE2(plockstat
, mutex__error
, mp
, EINVAL
);
2083 if ((error
= _ceil_mylist_add(mp
)) != 0) {
2084 DTRACE_PROBE2(plockstat
, mutex__error
, mp
, error
);
2088 _ceil_prio_inherit(ceil
);
2091 if ((mtype
& (USYNC_PROCESS
| LOCK_ROBUST
))
2092 == (USYNC_PROCESS
| LOCK_ROBUST
))
2095 if (mtype
& LOCK_PRIO_INHERIT
) {
2096 /* go straight to the kernel */
2097 if (try == MUTEX_TRY
)
2098 error
= mutex_trylock_kernel(mp
);
2099 else /* MUTEX_LOCK */
2100 error
= mutex_lock_kernel(mp
, tsp
, msp
);
2102 * The kernel never sets or clears the lock byte
2103 * for LOCK_PRIO_INHERIT mutexes.
2104 * Set it here for consistency.
2109 mp
->mutex_lockw
= LOCKSET
;
2114 mp
->mutex_lockw
= LOCKSET
;
2116 case ENOTRECOVERABLE
:
2117 ASSERT(mtype
& LOCK_ROBUST
);
2120 if (try == MUTEX_TRY
) {
2122 } else if (tsp
!= NULL
) { /* simulate a timeout */
2124 * Note: mutex_timedlock() never returns EINTR.
2126 timespec_t ts
= *tsp
;
2129 while (__nanosleep(&ts
, &rts
) == EINTR
)
2132 } else { /* simulate a deadlock */
2137 } else if (mtype
& USYNC_PROCESS
) {
2138 error
= mutex_trylock_process(mp
, try == MUTEX_LOCK
);
2139 if (error
== EBUSY
&& try == MUTEX_LOCK
)
2140 error
= mutex_lock_kernel(mp
, tsp
, msp
);
2141 } else { /* USYNC_THREAD */
2142 error
= mutex_trylock_adaptive(mp
, try == MUTEX_LOCK
);
2143 if (error
== EBUSY
&& try == MUTEX_LOCK
)
2144 error
= mutex_lock_queue(self
, msp
, mp
, tsp
);
2151 if (mtype
& LOCK_ROBUST
)
2154 record_begin_hold(msp
);
2157 if ((mtype
& LOCK_PRIO_PROTECT
) && noceil
== 0) {
2158 (void) _ceil_mylist_del(mp
);
2162 if (try == MUTEX_TRY
) {
2164 tdb_incr(msp
->mutex_try_fail
);
2165 if (__td_event_report(self
, TD_LOCK_TRY
, udp
)) {
2166 self
->ul_td_evbuf
.eventnum
= TD_LOCK_TRY
;
2167 tdb_event(TD_LOCK_TRY
, udp
);
2177 fast_process_lock(mutex_t
*mp
, timespec_t
*tsp
, int mtype
, int try)
2179 ulwp_t
*self
= curthread
;
2180 uberdata_t
*udp
= self
->ul_uberdata
;
2183 * We know that USYNC_PROCESS is set in mtype and that
2184 * zero, one, or both of the flags LOCK_RECURSIVE and
2185 * LOCK_ERRORCHECK are set, and that no other flags are set.
2187 ASSERT((mtype
& ~(USYNC_PROCESS
|LOCK_RECURSIVE
|LOCK_ERRORCHECK
)) == 0);
2188 enter_critical(self
);
2189 #if defined(__sparc) && !defined(_LP64)
2190 /* horrible hack, necessary only on 32-bit sparc */
2191 if (((uintptr_t)mp
& (_LONG_LONG_ALIGNMENT
- 1)) &&
2192 self
->ul_misaligned
) {
2193 if (set_lock_byte(&mp
->mutex_lockw
) == 0) {
2194 mp
->mutex_ownerpid
= udp
->pid
;
2195 mp
->mutex_owner
= (uintptr_t)self
;
2196 exit_critical(self
);
2197 DTRACE_PROBE3(plockstat
, mutex__acquire
, mp
, 0, 0);
2202 if (set_lock_byte64(&mp
->mutex_lockword64
, udp
->pid
) == 0) {
2203 mp
->mutex_owner
= (uintptr_t)self
;
2204 /* mp->mutex_ownerpid was set by set_lock_byte64() */
2205 exit_critical(self
);
2206 DTRACE_PROBE3(plockstat
, mutex__acquire
, mp
, 0, 0);
2209 exit_critical(self
);
2211 if ((mtype
& (LOCK_RECURSIVE
|LOCK_ERRORCHECK
)) && shared_mutex_held(mp
))
2212 return (mutex_recursion(mp
, mtype
, try));
2214 if (try == MUTEX_LOCK
) {
2215 if (mutex_trylock_process(mp
, 1) == 0)
2217 return (mutex_lock_kernel(mp
, tsp
, NULL
));
2220 if (__td_event_report(self
, TD_LOCK_TRY
, udp
)) {
2221 self
->ul_td_evbuf
.eventnum
= TD_LOCK_TRY
;
2222 tdb_event(TD_LOCK_TRY
, udp
);
2228 mutex_lock_impl(mutex_t
*mp
, timespec_t
*tsp
)
2230 ulwp_t
*self
= curthread
;
2231 int mtype
= mp
->mutex_type
;
2232 uberflags_t
*gflags
;
2234 if (((uintptr_t)mp
& (_LONG_LONG_ALIGNMENT
- 1)) &&
2235 self
->ul_error_detection
&& self
->ul_misaligned
== 0)
2236 lock_error(mp
, "mutex_lock", NULL
, "mutex is misaligned");
2239 * Optimize the case of USYNC_THREAD, including
2240 * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases,
2241 * no error detection, no lock statistics,
2242 * and the process has only a single thread.
2243 * (Most likely a traditional single-threaded application.)
2245 if (((mtype
& ~(LOCK_RECURSIVE
|LOCK_ERRORCHECK
)) |
2246 self
->ul_uberdata
->uberflags
.uf_all
) == 0) {
2248 * Only one thread exists so we don't need an atomic operation.
2249 * We do, however, need to protect against signals.
2251 if (mp
->mutex_lockw
== 0) {
2253 mp
->mutex_lockw
= LOCKSET
;
2254 mp
->mutex_owner
= (uintptr_t)self
;
2256 DTRACE_PROBE3(plockstat
, mutex__acquire
, mp
, 0, 0);
2259 if (mtype
&& MUTEX_OWNER(mp
) == self
)
2260 return (mutex_recursion(mp
, mtype
, MUTEX_LOCK
));
2262 * We have reached a deadlock, probably because the
2263 * process is executing non-async-signal-safe code in
2264 * a signal handler and is attempting to acquire a lock
2265 * that it already owns. This is not surprising, given
2266 * bad programming practices over the years that has
2267 * resulted in applications calling printf() and such
2268 * in their signal handlers. Unless the user has told
2269 * us that the signal handlers are safe by setting:
2270 * export _THREAD_ASYNC_SAFE=1
2271 * we return EDEADLK rather than actually deadlocking.
2274 MUTEX_OWNER(mp
) == self
&& !self
->ul_async_safe
) {
2275 DTRACE_PROBE2(plockstat
, mutex__error
, mp
, EDEADLK
);
2281 * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS,
2282 * no error detection, and no lock statistics.
2283 * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases.
2285 if ((gflags
= self
->ul_schedctl_called
) != NULL
&&
2286 (gflags
->uf_trs_ted
|
2287 (mtype
& ~(USYNC_PROCESS
|LOCK_RECURSIVE
|LOCK_ERRORCHECK
))) == 0) {
2288 if (mtype
& USYNC_PROCESS
)
2289 return (fast_process_lock(mp
, tsp
, mtype
, MUTEX_LOCK
));
2291 if (set_lock_byte(&mp
->mutex_lockw
) == 0) {
2292 mp
->mutex_owner
= (uintptr_t)self
;
2294 DTRACE_PROBE3(plockstat
, mutex__acquire
, mp
, 0, 0);
2298 if (mtype
&& MUTEX_OWNER(mp
) == self
)
2299 return (mutex_recursion(mp
, mtype
, MUTEX_LOCK
));
2300 if (mutex_trylock_adaptive(mp
, 1) != 0)
2301 return (mutex_lock_queue(self
, NULL
, mp
, tsp
));
2305 /* else do it the long way */
2306 return (mutex_lock_internal(mp
, tsp
, MUTEX_LOCK
));
2309 #pragma weak pthread_mutex_lock = mutex_lock
2310 #pragma weak _mutex_lock = mutex_lock
2312 mutex_lock(mutex_t
*mp
)
2314 ASSERT(!curthread
->ul_critical
|| curthread
->ul_bindflags
);
2315 return (mutex_lock_impl(mp
, NULL
));
2319 mutex_enter(mutex_t
*mp
)
2322 int attr
= mp
->mutex_type
& ALL_ATTRIBUTES
;
2325 * Require LOCK_ERRORCHECK, accept LOCK_RECURSIVE.
2327 if (attr
!= LOCK_ERRORCHECK
&&
2328 attr
!= (LOCK_ERRORCHECK
| LOCK_RECURSIVE
)) {
2329 mutex_panic(mp
, "mutex_enter: bad mutex type");
2331 ret
= mutex_lock(mp
);
2332 if (ret
== EDEADLK
) {
2333 mutex_panic(mp
, "recursive mutex_enter");
2334 } else if (ret
== EAGAIN
) {
2335 mutex_panic(mp
, "excessive recursive mutex_enter");
2336 } else if (ret
!= 0) {
2337 mutex_panic(mp
, "unknown mutex_enter failure");
2342 pthread_mutex_timedlock(pthread_mutex_t
*_RESTRICT_KYWD mp
,
2343 const struct timespec
*_RESTRICT_KYWD abstime
)
2348 ASSERT(!curthread
->ul_critical
|| curthread
->ul_bindflags
);
2349 abstime_to_reltime(CLOCK_REALTIME
, abstime
, &tslocal
);
2350 error
= mutex_lock_impl((mutex_t
*)mp
, &tslocal
);
2357 pthread_mutex_reltimedlock_np(pthread_mutex_t
*_RESTRICT_KYWD mp
,
2358 const struct timespec
*_RESTRICT_KYWD reltime
)
2363 ASSERT(!curthread
->ul_critical
|| curthread
->ul_bindflags
);
2365 error
= mutex_lock_impl((mutex_t
*)mp
, &tslocal
);
2371 #pragma weak pthread_mutex_trylock = mutex_trylock
2373 mutex_trylock(mutex_t
*mp
)
2375 ulwp_t
*self
= curthread
;
2376 uberdata_t
*udp
= self
->ul_uberdata
;
2377 int mtype
= mp
->mutex_type
;
2378 uberflags_t
*gflags
;
2380 ASSERT(!curthread
->ul_critical
|| curthread
->ul_bindflags
);
2383 * Optimize the case of USYNC_THREAD, including
2384 * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases,
2385 * no error detection, no lock statistics,
2386 * and the process has only a single thread.
2387 * (Most likely a traditional single-threaded application.)
2389 if (((mtype
& ~(LOCK_RECURSIVE
|LOCK_ERRORCHECK
)) |
2390 udp
->uberflags
.uf_all
) == 0) {
2392 * Only one thread exists so we don't need an atomic operation.
2393 * We do, however, need to protect against signals.
2395 if (mp
->mutex_lockw
== 0) {
2397 mp
->mutex_lockw
= LOCKSET
;
2398 mp
->mutex_owner
= (uintptr_t)self
;
2400 DTRACE_PROBE3(plockstat
, mutex__acquire
, mp
, 0, 0);
2403 if (mtype
&& MUTEX_OWNER(mp
) == self
)
2404 return (mutex_recursion(mp
, mtype
, MUTEX_TRY
));
2409 * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS,
2410 * no error detection, and no lock statistics.
2411 * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases.
2413 if ((gflags
= self
->ul_schedctl_called
) != NULL
&&
2414 (gflags
->uf_trs_ted
|
2415 (mtype
& ~(USYNC_PROCESS
|LOCK_RECURSIVE
|LOCK_ERRORCHECK
))) == 0) {
2416 if (mtype
& USYNC_PROCESS
)
2417 return (fast_process_lock(mp
, NULL
, mtype
, MUTEX_TRY
));
2419 if (set_lock_byte(&mp
->mutex_lockw
) == 0) {
2420 mp
->mutex_owner
= (uintptr_t)self
;
2422 DTRACE_PROBE3(plockstat
, mutex__acquire
, mp
, 0, 0);
2426 if (mtype
&& MUTEX_OWNER(mp
) == self
)
2427 return (mutex_recursion(mp
, mtype
, MUTEX_TRY
));
2428 if (__td_event_report(self
, TD_LOCK_TRY
, udp
)) {
2429 self
->ul_td_evbuf
.eventnum
= TD_LOCK_TRY
;
2430 tdb_event(TD_LOCK_TRY
, udp
);
2435 /* else do it the long way */
2436 return (mutex_lock_internal(mp
, NULL
, MUTEX_TRY
));
2440 mutex_unlock_internal(mutex_t
*mp
, int retain_robust_flags
)
2442 ulwp_t
*self
= curthread
;
2443 uberdata_t
*udp
= self
->ul_uberdata
;
2444 int mtype
= mp
->mutex_type
;
2445 tdb_mutex_stats_t
*msp
;
2450 if ((mtype
& (LOCK_ERRORCHECK
| LOCK_ROBUST
)) &&
2454 if (self
->ul_error_detection
&& !mutex_held(mp
))
2455 lock_error(mp
, "mutex_unlock", NULL
, NULL
);
2457 if ((mtype
& LOCK_RECURSIVE
) && mp
->mutex_rcount
!= 0) {
2459 DTRACE_PROBE2(plockstat
, mutex__release
, mp
, 1);
2463 if ((msp
= MUTEX_STATS(mp
, udp
)) != NULL
)
2464 (void) record_hold_time(msp
);
2466 if (!retain_robust_flags
&& !(mtype
& LOCK_PRIO_INHERIT
) &&
2467 (mp
->mutex_flag
& (LOCK_OWNERDEAD
| LOCK_UNMAPPED
))) {
2468 ASSERT(mtype
& LOCK_ROBUST
);
2469 mp
->mutex_flag
&= ~(LOCK_OWNERDEAD
| LOCK_UNMAPPED
);
2470 mp
->mutex_flag
|= LOCK_NOTRECOVERABLE
;
2472 release_all
= ((mp
->mutex_flag
& LOCK_NOTRECOVERABLE
) != 0);
2474 if (mtype
& LOCK_PRIO_INHERIT
) {
2476 mp
->mutex_owner
= 0;
2477 /* mp->mutex_ownerpid is cleared by ___lwp_mutex_unlock() */
2478 DTRACE_PROBE2(plockstat
, mutex__release
, mp
, 0);
2479 mp
->mutex_lockw
= LOCKCLEAR
;
2481 error
= ___lwp_mutex_unlock(mp
);
2483 } else if (mtype
& USYNC_PROCESS
) {
2484 mutex_unlock_process(mp
, release_all
);
2485 } else { /* USYNC_THREAD */
2486 if ((lwpid
= mutex_unlock_queue(mp
, release_all
)) != 0) {
2487 (void) __lwp_unpark(lwpid
);
2492 if (mtype
& LOCK_ROBUST
)
2495 if ((mtype
& LOCK_PRIO_PROTECT
) && _ceil_mylist_del(mp
))
2501 #pragma weak pthread_mutex_unlock = mutex_unlock
2502 #pragma weak _mutex_unlock = mutex_unlock
2504 mutex_unlock(mutex_t
*mp
)
2506 ulwp_t
*self
= curthread
;
2507 int mtype
= mp
->mutex_type
;
2508 uberflags_t
*gflags
;
2513 * Optimize the case of USYNC_THREAD, including
2514 * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases,
2515 * no error detection, no lock statistics,
2516 * and the process has only a single thread.
2517 * (Most likely a traditional single-threaded application.)
2519 if (((mtype
& ~(LOCK_RECURSIVE
|LOCK_ERRORCHECK
)) |
2520 self
->ul_uberdata
->uberflags
.uf_all
) == 0) {
2523 * At this point we know that one or both of the
2524 * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set.
2526 if ((mtype
& LOCK_ERRORCHECK
) && !MUTEX_OWNED(mp
, self
))
2528 if ((mtype
& LOCK_RECURSIVE
) && mp
->mutex_rcount
!= 0) {
2530 DTRACE_PROBE2(plockstat
, mutex__release
, mp
, 1);
2535 * Only one thread exists so we don't need an atomic operation.
2536 * Also, there can be no waiters.
2539 mp
->mutex_owner
= 0;
2540 mp
->mutex_lockword
= 0;
2542 DTRACE_PROBE2(plockstat
, mutex__release
, mp
, 0);
2547 * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS,
2548 * no error detection, and no lock statistics.
2549 * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases.
2551 if ((gflags
= self
->ul_schedctl_called
) != NULL
) {
2552 if (((el
= gflags
->uf_trs_ted
) | mtype
) == 0) {
2554 if ((lwpid
= mutex_unlock_queue(mp
, 0)) != 0) {
2555 (void) __lwp_unpark(lwpid
);
2560 if (el
) /* error detection or lock statistics */
2562 if ((mtype
& ~(LOCK_RECURSIVE
|LOCK_ERRORCHECK
)) == 0) {
2564 * At this point we know that one or both of the
2565 * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set.
2567 if ((mtype
& LOCK_ERRORCHECK
) && !MUTEX_OWNED(mp
, self
))
2569 if ((mtype
& LOCK_RECURSIVE
) && mp
->mutex_rcount
!= 0) {
2571 DTRACE_PROBE2(plockstat
, mutex__release
, mp
, 1);
2577 ~(USYNC_PROCESS
|LOCK_RECURSIVE
|LOCK_ERRORCHECK
)) == 0) {
2579 * At this point we know that zero, one, or both of the
2580 * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set and
2581 * that the USYNC_PROCESS flag is set.
2583 if ((mtype
& LOCK_ERRORCHECK
) && !shared_mutex_held(mp
))
2585 if ((mtype
& LOCK_RECURSIVE
) && mp
->mutex_rcount
!= 0) {
2587 DTRACE_PROBE2(plockstat
, mutex__release
, mp
, 1);
2590 mutex_unlock_process(mp
, 0);
2595 /* else do it the long way */
2597 return (mutex_unlock_internal(mp
, 0));
2601 mutex_exit(mutex_t
*mp
)
2604 int attr
= mp
->mutex_type
& ALL_ATTRIBUTES
;
2606 if (attr
!= LOCK_ERRORCHECK
&&
2607 attr
!= (LOCK_ERRORCHECK
| LOCK_RECURSIVE
)) {
2608 mutex_panic(mp
, "mutex_exit: bad mutex type");
2610 ret
= mutex_unlock(mp
);
2612 mutex_panic(mp
, "mutex_exit: not owner");
2613 } else if (ret
!= 0) {
2614 mutex_panic(mp
, "unknown mutex_exit failure");
2620 * Internally to the library, almost all mutex lock/unlock actions
2621 * go through these lmutex_ functions, to protect critical regions.
2622 * We replicate a bit of code from mutex_lock() and mutex_unlock()
2623 * to make these functions faster since we know that the mutex type
2624 * of all internal locks is USYNC_THREAD. We also know that internal
2625 * locking can never fail, so we panic if it does.
2628 lmutex_lock(mutex_t
*mp
)
2630 ulwp_t
*self
= curthread
;
2631 uberdata_t
*udp
= self
->ul_uberdata
;
2633 ASSERT(mp
->mutex_type
== USYNC_THREAD
);
2635 enter_critical(self
);
2637 * Optimize the case of no lock statistics and only a single thread.
2638 * (Most likely a traditional single-threaded application.)
2640 if (udp
->uberflags
.uf_all
== 0) {
2642 * Only one thread exists; the mutex must be free.
2644 ASSERT(mp
->mutex_lockw
== 0);
2645 mp
->mutex_lockw
= LOCKSET
;
2646 mp
->mutex_owner
= (uintptr_t)self
;
2647 DTRACE_PROBE3(plockstat
, mutex__acquire
, mp
, 0, 0);
2649 tdb_mutex_stats_t
*msp
= MUTEX_STATS(mp
, udp
);
2651 if (!self
->ul_schedctl_called
)
2652 (void) setup_schedctl();
2654 if (set_lock_byte(&mp
->mutex_lockw
) == 0) {
2655 mp
->mutex_owner
= (uintptr_t)self
;
2656 DTRACE_PROBE3(plockstat
, mutex__acquire
, mp
, 0, 0);
2657 } else if (mutex_trylock_adaptive(mp
, 1) != 0) {
2658 (void) mutex_lock_queue(self
, msp
, mp
, NULL
);
2662 record_begin_hold(msp
);
2667 lmutex_unlock(mutex_t
*mp
)
2669 ulwp_t
*self
= curthread
;
2670 uberdata_t
*udp
= self
->ul_uberdata
;
2672 ASSERT(mp
->mutex_type
== USYNC_THREAD
);
2675 * Optimize the case of no lock statistics and only a single thread.
2676 * (Most likely a traditional single-threaded application.)
2678 if (udp
->uberflags
.uf_all
== 0) {
2680 * Only one thread exists so there can be no waiters.
2682 mp
->mutex_owner
= 0;
2683 mp
->mutex_lockword
= 0;
2684 DTRACE_PROBE2(plockstat
, mutex__release
, mp
, 0);
2686 tdb_mutex_stats_t
*msp
= MUTEX_STATS(mp
, udp
);
2690 (void) record_hold_time(msp
);
2691 if ((lwpid
= mutex_unlock_queue(mp
, 0)) != 0) {
2692 (void) __lwp_unpark(lwpid
);
2696 exit_critical(self
);
2700 * For specialized code in libc, like the asynchronous i/o code,
2701 * the following sig_*() locking primitives are used in order
2702 * to make the code asynchronous signal safe. Signals are
2703 * deferred while locks acquired by these functions are held.
2706 sig_mutex_lock(mutex_t
*mp
)
2708 ulwp_t
*self
= curthread
;
2711 (void) mutex_lock(mp
);
2715 sig_mutex_unlock(mutex_t
*mp
)
2717 ulwp_t
*self
= curthread
;
2719 (void) mutex_unlock(mp
);
2724 sig_mutex_trylock(mutex_t
*mp
)
2726 ulwp_t
*self
= curthread
;
2730 if ((error
= mutex_trylock(mp
)) != 0)
2736 * sig_cond_wait() is a cancellation point.
2739 sig_cond_wait(cond_t
*cv
, mutex_t
*mp
)
2743 ASSERT(curthread
->ul_sigdefer
!= 0);
2744 pthread_testcancel();
2745 error
= __cond_wait(cv
, mp
);
2746 if (error
== EINTR
&& curthread
->ul_cursig
) {
2747 sig_mutex_unlock(mp
);
2748 /* take the deferred signal here */
2751 pthread_testcancel();
2756 * sig_cond_reltimedwait() is a cancellation point.
2759 sig_cond_reltimedwait(cond_t
*cv
, mutex_t
*mp
, const timespec_t
*ts
)
2763 ASSERT(curthread
->ul_sigdefer
!= 0);
2764 pthread_testcancel();
2765 error
= __cond_reltimedwait(cv
, mp
, ts
);
2766 if (error
== EINTR
&& curthread
->ul_cursig
) {
2767 sig_mutex_unlock(mp
);
2768 /* take the deferred signal here */
2771 pthread_testcancel();
2776 * For specialized code in libc, like the stdio code.
2777 * the following cancel_safe_*() locking primitives are used in
2778 * order to make the code cancellation-safe. Cancellation is
2779 * deferred while locks acquired by these functions are held.
2782 cancel_safe_mutex_lock(mutex_t
*mp
)
2784 (void) mutex_lock(mp
);
2785 curthread
->ul_libc_locks
++;
2789 cancel_safe_mutex_trylock(mutex_t
*mp
)
2793 if ((error
= mutex_trylock(mp
)) == 0)
2794 curthread
->ul_libc_locks
++;
2799 cancel_safe_mutex_unlock(mutex_t
*mp
)
2801 ulwp_t
*self
= curthread
;
2803 ASSERT(self
->ul_libc_locks
!= 0);
2805 (void) mutex_unlock(mp
);
2808 * Decrement the count of locks held by cancel_safe_mutex_lock().
2809 * If we are then in a position to terminate cleanly and
2810 * if there is a pending cancellation and cancellation
2811 * is not disabled and we received EINTR from a recent
2812 * system call then perform the cancellation action now.
2814 if (--self
->ul_libc_locks
== 0 &&
2815 !(self
->ul_vfork
| self
->ul_nocancel
|
2816 self
->ul_critical
| self
->ul_sigdefer
) &&
2818 pthread_exit(PTHREAD_CANCELED
);
2822 shared_mutex_held(mutex_t
*mparg
)
2825 * The 'volatile' is necessary to make sure the compiler doesn't
2826 * reorder the tests of the various components of the mutex.
2827 * They must be tested in this order:
2831 * This relies on the fact that everywhere mutex_lockw is cleared,
2832 * mutex_owner and mutex_ownerpid are cleared before mutex_lockw
2833 * is cleared, and that everywhere mutex_lockw is set, mutex_owner
2834 * and mutex_ownerpid are set after mutex_lockw is set, and that
2835 * mutex_lockw is set or cleared with a memory barrier.
2837 volatile mutex_t
*mp
= (volatile mutex_t
*)mparg
;
2838 ulwp_t
*self
= curthread
;
2839 uberdata_t
*udp
= self
->ul_uberdata
;
2841 return (MUTEX_OWNED(mp
, self
) && mp
->mutex_ownerpid
== udp
->pid
);
2844 #pragma weak _mutex_held = mutex_held
2846 mutex_held(mutex_t
*mparg
)
2848 volatile mutex_t
*mp
= (volatile mutex_t
*)mparg
;
2850 if (mparg
->mutex_type
& USYNC_PROCESS
)
2851 return (shared_mutex_held(mparg
));
2852 return (MUTEX_OWNED(mp
, curthread
));
2855 #pragma weak pthread_mutex_destroy = mutex_destroy
2856 #pragma weak _mutex_destroy = mutex_destroy
2858 mutex_destroy(mutex_t
*mp
)
2860 if (mp
->mutex_type
& USYNC_PROCESS
)
2862 (void) memset(mp
, 0, sizeof (*mp
));
2863 tdb_sync_obj_deregister(mp
);
2867 #pragma weak pthread_mutex_consistent_np = mutex_consistent
2868 #pragma weak pthread_mutex_consistent = mutex_consistent
2870 mutex_consistent(mutex_t
*mp
)
2873 * Do this only for an inconsistent, initialized robust lock
2874 * that we hold. For all other cases, return EINVAL.
2876 if (mutex_held(mp
) &&
2877 (mp
->mutex_type
& LOCK_ROBUST
) &&
2878 (mp
->mutex_flag
& LOCK_INITED
) &&
2879 (mp
->mutex_flag
& (LOCK_OWNERDEAD
| LOCK_UNMAPPED
))) {
2880 mp
->mutex_flag
&= ~(LOCK_OWNERDEAD
| LOCK_UNMAPPED
);
2881 mp
->mutex_rcount
= 0;
2888 * Spin locks are separate from ordinary mutexes,
2889 * but we use the same data structure for them.
2893 pthread_spin_init(pthread_spinlock_t
*lock
, int pshared
)
2895 mutex_t
*mp
= (mutex_t
*)lock
;
2897 (void) memset(mp
, 0, sizeof (*mp
));
2898 if (pshared
== PTHREAD_PROCESS_SHARED
)
2899 mp
->mutex_type
= USYNC_PROCESS
;
2901 mp
->mutex_type
= USYNC_THREAD
;
2902 mp
->mutex_flag
= LOCK_INITED
;
2903 mp
->mutex_magic
= MUTEX_MAGIC
;
2906 * This should be at the beginning of the function,
2907 * but for the sake of old broken applications that
2908 * do not have proper alignment for their mutexes
2909 * (and don't check the return code from pthread_spin_init),
2910 * we put it here, after initializing the mutex regardless.
2912 if (((uintptr_t)mp
& (_LONG_LONG_ALIGNMENT
- 1)) &&
2913 curthread
->ul_misaligned
== 0)
2920 pthread_spin_destroy(pthread_spinlock_t
*lock
)
2922 (void) memset(lock
, 0, sizeof (*lock
));
2927 pthread_spin_trylock(pthread_spinlock_t
*lock
)
2929 mutex_t
*mp
= (mutex_t
*)lock
;
2930 ulwp_t
*self
= curthread
;
2934 if (set_lock_byte(&mp
->mutex_lockw
) != 0)
2937 mp
->mutex_owner
= (uintptr_t)self
;
2938 if (mp
->mutex_type
== USYNC_PROCESS
)
2939 mp
->mutex_ownerpid
= self
->ul_uberdata
->pid
;
2940 DTRACE_PROBE3(plockstat
, mutex__acquire
, mp
, 0, 0);
2947 pthread_spin_lock(pthread_spinlock_t
*lock
)
2949 mutex_t
*mp
= (mutex_t
*)lock
;
2950 ulwp_t
*self
= curthread
;
2951 volatile uint8_t *lockp
= (volatile uint8_t *)&mp
->mutex_lockw
;
2954 ASSERT(!self
->ul_critical
|| self
->ul_bindflags
);
2956 DTRACE_PROBE1(plockstat
, mutex__spin
, mp
);
2959 * We don't care whether the owner is running on a processor.
2960 * We just spin because that's what this interface requires.
2963 if (*lockp
== 0) { /* lock byte appears to be clear */
2965 if (set_lock_byte(lockp
) == 0)
2969 if (count
< INT_MAX
)
2973 mp
->mutex_owner
= (uintptr_t)self
;
2974 if (mp
->mutex_type
== USYNC_PROCESS
)
2975 mp
->mutex_ownerpid
= self
->ul_uberdata
->pid
;
2978 DTRACE_PROBE3(plockstat
, mutex__spun
, mp
, 1, count
);
2980 DTRACE_PROBE3(plockstat
, mutex__acquire
, mp
, 0, count
);
2985 pthread_spin_unlock(pthread_spinlock_t
*lock
)
2987 mutex_t
*mp
= (mutex_t
*)lock
;
2988 ulwp_t
*self
= curthread
;
2991 mp
->mutex_owner
= 0;
2992 mp
->mutex_ownerpid
= 0;
2993 DTRACE_PROBE2(plockstat
, mutex__release
, mp
, 0);
2994 (void) atomic_swap_32(&mp
->mutex_lockword
, 0);
2999 #define INITIAL_LOCKS 8 /* initial size of ul_heldlocks.array */
3002 * Find/allocate an entry for 'lock' in our array of held locks.
3005 find_lock_entry(mutex_t
*lock
)
3007 ulwp_t
*self
= curthread
;
3008 mutex_t
**remembered
= NULL
;
3012 if ((nlocks
= self
->ul_heldlockcnt
) != 0)
3013 lockptr
= self
->ul_heldlocks
.array
;
3016 lockptr
= &self
->ul_heldlocks
.single
;
3019 for (; nlocks
; nlocks
--, lockptr
++) {
3020 if (*lockptr
== lock
)
3022 if (*lockptr
== NULL
&& remembered
== NULL
)
3023 remembered
= lockptr
;
3025 if (remembered
!= NULL
) {
3027 return (remembered
);
3031 * No entry available. Allocate more space, converting
3032 * the single entry into an array of entries if necessary.
3034 if ((nlocks
= self
->ul_heldlockcnt
) == 0) {
3036 * Initial allocation of the array.
3037 * Convert the single entry into an array.
3039 self
->ul_heldlockcnt
= nlocks
= INITIAL_LOCKS
;
3040 lockptr
= lmalloc(nlocks
* sizeof (mutex_t
*));
3042 * The single entry becomes the first entry in the array.
3044 *lockptr
= self
->ul_heldlocks
.single
;
3045 self
->ul_heldlocks
.array
= lockptr
;
3047 * Return the next available entry in the array.
3053 * Reallocate the array, double the size each time.
3055 lockptr
= lmalloc(nlocks
* 2 * sizeof (mutex_t
*));
3056 (void) memcpy(lockptr
, self
->ul_heldlocks
.array
,
3057 nlocks
* sizeof (mutex_t
*));
3058 lfree(self
->ul_heldlocks
.array
, nlocks
* sizeof (mutex_t
*));
3059 self
->ul_heldlocks
.array
= lockptr
;
3060 self
->ul_heldlockcnt
*= 2;
3062 * Return the next available entry in the newly allocated array.
3064 *(lockptr
+= nlocks
) = lock
;
3069 * Insert 'lock' into our list of held locks.
3070 * Currently only used for LOCK_ROBUST mutexes.
3073 remember_lock(mutex_t
*lock
)
3075 (void) find_lock_entry(lock
);
3079 * Remove 'lock' from our list of held locks.
3080 * Currently only used for LOCK_ROBUST mutexes.
3083 forget_lock(mutex_t
*lock
)
3085 *find_lock_entry(lock
) = NULL
;
3089 * Free the array of held locks.
3092 heldlock_free(ulwp_t
*ulwp
)
3096 if ((nlocks
= ulwp
->ul_heldlockcnt
) != 0)
3097 lfree(ulwp
->ul_heldlocks
.array
, nlocks
* sizeof (mutex_t
*));
3098 ulwp
->ul_heldlockcnt
= 0;
3099 ulwp
->ul_heldlocks
.array
= NULL
;
3103 * Mark all held LOCK_ROBUST mutexes LOCK_OWNERDEAD.
3104 * Called from _thrp_exit() to deal with abandoned locks.
3109 ulwp_t
*self
= curthread
;
3114 if ((nlocks
= self
->ul_heldlockcnt
) != 0)
3115 lockptr
= self
->ul_heldlocks
.array
;
3118 lockptr
= &self
->ul_heldlocks
.single
;
3121 for (; nlocks
; nlocks
--, lockptr
++) {
3123 * The kernel takes care of transitioning held
3124 * LOCK_PRIO_INHERIT mutexes to LOCK_OWNERDEAD.
3125 * We avoid that case here.
3127 if ((mp
= *lockptr
) != NULL
&&
3129 (mp
->mutex_type
& (LOCK_ROBUST
| LOCK_PRIO_INHERIT
)) ==
3131 mp
->mutex_rcount
= 0;
3132 if (!(mp
->mutex_flag
& LOCK_UNMAPPED
))
3133 mp
->mutex_flag
|= LOCK_OWNERDEAD
;
3134 (void) mutex_unlock_internal(mp
, 1);
3138 heldlock_free(self
);
3141 #pragma weak _cond_init = cond_init
3144 cond_init(cond_t
*cvp
, int type
, void *arg
)
3146 if (type
!= USYNC_THREAD
&& type
!= USYNC_PROCESS
)
3148 (void) memset(cvp
, 0, sizeof (*cvp
));
3149 cvp
->cond_type
= (uint16_t)type
;
3150 cvp
->cond_magic
= COND_MAGIC
;
3153 * This should be at the beginning of the function,
3154 * but for the sake of old broken applications that
3155 * do not have proper alignment for their condvars
3156 * (and don't check the return code from cond_init),
3157 * we put it here, after initializing the condvar regardless.
3159 if (((uintptr_t)cvp
& (_LONG_LONG_ALIGNMENT
- 1)) &&
3160 curthread
->ul_misaligned
== 0)
3167 * cond_sleep_queue(): utility function for cond_wait_queue().
3169 * Go to sleep on a condvar sleep queue, expect to be waked up
3170 * by someone calling cond_signal() or cond_broadcast() or due
3171 * to receiving a UNIX signal or being cancelled, or just simply
3172 * due to a spurious wakeup (like someome calling forkall()).
3174 * The associated mutex is *not* reacquired before returning.
3175 * That must be done by the caller of cond_sleep_queue().
3178 cond_sleep_queue(cond_t
*cvp
, mutex_t
*mp
, timespec_t
*tsp
)
3180 ulwp_t
*self
= curthread
;
3190 * Put ourself on the CV sleep queue, unlock the mutex, then
3191 * park ourself and unpark a candidate lwp to grab the mutex.
3192 * We must go onto the CV sleep queue before dropping the
3193 * mutex in order to guarantee atomicity of the operation.
3195 self
->ul_sp
= stkptr();
3196 qp
= queue_lock(cvp
, CV
);
3197 enqueue(qp
, self
, 0);
3198 cvp
->cond_waiters_user
= 1;
3199 self
->ul_cvmutex
= mp
;
3200 self
->ul_cv_wake
= cv_wake
= (tsp
!= NULL
);
3201 self
->ul_signalled
= 0;
3202 if (mp
->mutex_flag
& LOCK_OWNERDEAD
) {
3203 mp
->mutex_flag
&= ~LOCK_OWNERDEAD
;
3204 mp
->mutex_flag
|= LOCK_NOTRECOVERABLE
;
3206 release_all
= ((mp
->mutex_flag
& LOCK_NOTRECOVERABLE
) != 0);
3207 lwpid
= mutex_unlock_queue(mp
, release_all
);
3209 set_parking_flag(self
, 1);
3212 lwpid
= preempt_unpark(self
, lwpid
);
3216 * We may have a deferred signal present,
3217 * in which case we should return EINTR.
3218 * Also, we may have received a SIGCANCEL; if so
3219 * and we are cancelable we should return EINTR.
3220 * We force an immediate EINTR return from
3221 * __lwp_park() by turning our parking flag off.
3223 if (self
->ul_cursig
!= 0 ||
3224 (self
->ul_cancelable
&& self
->ul_cancel_pending
))
3225 set_parking_flag(self
, 0);
3227 * __lwp_park() will return the residual time in tsp
3228 * if we are unparked before the timeout expires.
3230 error
= __lwp_park(tsp
, lwpid
);
3231 set_parking_flag(self
, 0);
3232 lwpid
= 0; /* unpark the other lwp only once */
3234 * We were waked up by cond_signal(), cond_broadcast(),
3235 * by an interrupt or timeout (EINTR or ETIME),
3236 * or we may just have gotten a spurious wakeup.
3238 qp
= queue_lock(cvp
, CV
);
3240 mqp
= queue_lock(mp
, MX
);
3241 if (self
->ul_sleepq
== NULL
)
3244 * We are on either the condvar sleep queue or the
3245 * mutex sleep queue. Break out of the sleep if we
3246 * were interrupted or we timed out (EINTR or ETIME).
3247 * Else this is a spurious wakeup; continue the loop.
3249 if (!cv_wake
&& self
->ul_sleepq
== mqp
) { /* mutex queue */
3251 mp
->mutex_waiters
= dequeue_self(mqp
);
3254 tsp
= NULL
; /* no more timeout */
3255 } else if (self
->ul_sleepq
== qp
) { /* condvar queue */
3257 cvp
->cond_waiters_user
= dequeue_self(qp
);
3261 * Else a spurious wakeup on the condvar queue.
3262 * __lwp_park() has already adjusted the timeout.
3265 thr_panic("cond_sleep_queue(): thread not on queue");
3272 self
->ul_cv_wake
= 0;
3273 ASSERT(self
->ul_cvmutex
== NULL
);
3274 ASSERT(self
->ul_sleepq
== NULL
&& self
->ul_link
== NULL
&&
3275 self
->ul_wchan
== NULL
);
3277 signalled
= self
->ul_signalled
;
3278 self
->ul_signalled
= 0;
3284 * If we were concurrently cond_signal()d and any of:
3285 * received a UNIX signal, were cancelled, or got a timeout,
3286 * then perform another cond_signal() to avoid consuming it.
3288 if (error
&& signalled
)
3289 (void) cond_signal(cvp
);
3295 cond_wait_check_alignment(cond_t
*cvp
, mutex_t
*mp
)
3297 if ((uintptr_t)mp
& (_LONG_LONG_ALIGNMENT
- 1))
3298 lock_error(mp
, "cond_wait", cvp
, "mutex is misaligned");
3299 if ((uintptr_t)cvp
& (_LONG_LONG_ALIGNMENT
- 1))
3300 lock_error(mp
, "cond_wait", cvp
, "condvar is misaligned");
3304 cond_wait_queue(cond_t
*cvp
, mutex_t
*mp
, timespec_t
*tsp
)
3306 ulwp_t
*self
= curthread
;
3310 if (self
->ul_error_detection
&& self
->ul_misaligned
== 0)
3311 cond_wait_check_alignment(cvp
, mp
);
3314 * The old thread library was programmed to defer signals
3315 * while in cond_wait() so that the associated mutex would
3316 * be guaranteed to be held when the application signal
3317 * handler was invoked.
3319 * We do not behave this way by default; the state of the
3320 * associated mutex in the signal handler is undefined.
3322 * To accommodate applications that depend on the old
3323 * behavior, the _THREAD_COND_WAIT_DEFER environment
3324 * variable can be set to 1 and we will behave in the
3325 * old way with respect to cond_wait().
3327 if (self
->ul_cond_wait_defer
)
3330 error
= cond_sleep_queue(cvp
, mp
, tsp
);
3333 * Reacquire the mutex.
3335 if ((merror
= mutex_lock_impl(mp
, NULL
)) != 0)
3339 * Take any deferred signal now, after we have reacquired the mutex.
3341 if (self
->ul_cond_wait_defer
)
3348 * cond_sleep_kernel(): utility function for cond_wait_kernel().
3349 * See the comment ahead of cond_sleep_queue(), above.
3352 cond_sleep_kernel(cond_t
*cvp
, mutex_t
*mp
, timespec_t
*tsp
)
3354 int mtype
= mp
->mutex_type
;
3355 ulwp_t
*self
= curthread
;
3358 if ((mtype
& LOCK_PRIO_PROTECT
) && _ceil_mylist_del(mp
))
3361 self
->ul_sp
= stkptr();
3362 self
->ul_wchan
= cvp
;
3364 mp
->mutex_owner
= 0;
3365 /* mp->mutex_ownerpid is cleared by ___lwp_cond_wait() */
3366 if (mtype
& LOCK_PRIO_INHERIT
) {
3367 mp
->mutex_lockw
= LOCKCLEAR
;
3371 * ___lwp_cond_wait() returns immediately with EINTR if
3372 * set_parking_flag(self,0) is called on this lwp before it
3373 * goes to sleep in the kernel. sigacthandler() calls this
3374 * when a deferred signal is noted. This assures that we don't
3375 * get stuck in ___lwp_cond_wait() with all signals blocked
3376 * due to taking a deferred signal before going to sleep.
3378 set_parking_flag(self
, 1);
3379 if (self
->ul_cursig
!= 0 ||
3380 (self
->ul_cancelable
&& self
->ul_cancel_pending
))
3381 set_parking_flag(self
, 0);
3382 error
= ___lwp_cond_wait(cvp
, mp
, tsp
, 1);
3383 set_parking_flag(self
, 0);
3386 self
->ul_wchan
= NULL
;
3391 cond_wait_kernel(cond_t
*cvp
, mutex_t
*mp
, timespec_t
*tsp
)
3393 ulwp_t
*self
= curthread
;
3397 if (self
->ul_error_detection
&& self
->ul_misaligned
== 0)
3398 cond_wait_check_alignment(cvp
, mp
);
3401 * See the large comment in cond_wait_queue(), above.
3403 if (self
->ul_cond_wait_defer
)
3406 error
= cond_sleep_kernel(cvp
, mp
, tsp
);
3409 * Override the return code from ___lwp_cond_wait()
3410 * with any non-zero return code from mutex_lock().
3411 * This addresses robust lock failures in particular;
3412 * the caller must see the EOWNERDEAD or ENOTRECOVERABLE
3413 * errors in order to take corrective action.
3415 if ((merror
= mutex_lock_impl(mp
, NULL
)) != 0)
3419 * Take any deferred signal now, after we have reacquired the mutex.
3421 if (self
->ul_cond_wait_defer
)
3428 * Common code for cond_wait() and cond_timedwait()
3431 cond_wait_common(cond_t
*cvp
, mutex_t
*mp
, timespec_t
*tsp
)
3433 int mtype
= mp
->mutex_type
;
3434 hrtime_t begin_sleep
= 0;
3435 ulwp_t
*self
= curthread
;
3436 uberdata_t
*udp
= self
->ul_uberdata
;
3437 tdb_cond_stats_t
*csp
= COND_STATS(cvp
, udp
);
3438 tdb_mutex_stats_t
*msp
= MUTEX_STATS(mp
, udp
);
3443 * The SUSV3 Posix spec for pthread_cond_timedwait() states:
3444 * Except in the case of [ETIMEDOUT], all these error checks
3445 * shall act as if they were performed immediately at the
3446 * beginning of processing for the function and shall cause
3447 * an error return, in effect, prior to modifying the state
3448 * of the mutex specified by mutex or the condition variable
3449 * specified by cond.
3450 * Therefore, we must return EINVAL now if the timout is invalid.
3453 (tsp
->tv_sec
< 0 || (ulong_t
)tsp
->tv_nsec
>= NANOSEC
))
3456 if (__td_event_report(self
, TD_SLEEP
, udp
)) {
3457 self
->ul_sp
= stkptr();
3458 self
->ul_wchan
= cvp
;
3459 self
->ul_td_evbuf
.eventnum
= TD_SLEEP
;
3460 self
->ul_td_evbuf
.eventdata
= cvp
;
3461 tdb_event(TD_SLEEP
, udp
);
3466 tdb_incr(csp
->cond_timedwait
);
3468 tdb_incr(csp
->cond_wait
);
3471 begin_sleep
= record_hold_time(msp
);
3473 begin_sleep
= gethrtime();
3475 if (self
->ul_error_detection
) {
3476 if (!mutex_held(mp
))
3477 lock_error(mp
, "cond_wait", cvp
, NULL
);
3478 if ((mtype
& LOCK_RECURSIVE
) && mp
->mutex_rcount
!= 0)
3479 lock_error(mp
, "recursive mutex in cond_wait",
3481 if (cvp
->cond_type
& USYNC_PROCESS
) {
3482 if (!(mtype
& USYNC_PROCESS
))
3483 lock_error(mp
, "cond_wait", cvp
,
3484 "condvar process-shared, "
3485 "mutex process-private");
3487 if (mtype
& USYNC_PROCESS
)
3488 lock_error(mp
, "cond_wait", cvp
,
3489 "condvar process-private, "
3490 "mutex process-shared");
3495 * We deal with recursive mutexes by completely
3496 * dropping the lock and restoring the recursion
3497 * count after waking up. This is arguably wrong,
3498 * but it obeys the principle of least astonishment.
3500 rcount
= mp
->mutex_rcount
;
3501 mp
->mutex_rcount
= 0;
3503 (USYNC_PROCESS
| LOCK_PRIO_INHERIT
| LOCK_PRIO_PROTECT
)) |
3504 (cvp
->cond_type
& USYNC_PROCESS
))
3505 error
= cond_wait_kernel(cvp
, mp
, tsp
);
3507 error
= cond_wait_queue(cvp
, mp
, tsp
);
3508 mp
->mutex_rcount
= rcount
;
3511 hrtime_t lapse
= gethrtime() - begin_sleep
;
3513 csp
->cond_wait_sleep_time
+= lapse
;
3515 csp
->cond_timedwait_sleep_time
+= lapse
;
3517 tdb_incr(csp
->cond_timedwait_timeout
);
3524 * cond_wait() is a cancellation point but __cond_wait() is not.
3525 * Internally, libc calls the non-cancellation version.
3526 * Other libraries need to use pthread_setcancelstate(), as appropriate,
3527 * since __cond_wait() is not exported from libc.
3530 __cond_wait(cond_t
*cvp
, mutex_t
*mp
)
3532 ulwp_t
*self
= curthread
;
3533 uberdata_t
*udp
= self
->ul_uberdata
;
3534 uberflags_t
*gflags
;
3536 if ((mp
->mutex_type
& (LOCK_ERRORCHECK
| LOCK_ROBUST
)) &&
3541 * Optimize the common case of USYNC_THREAD plus
3542 * no error detection, no lock statistics, and no event tracing.
3544 if ((gflags
= self
->ul_schedctl_called
) != NULL
&&
3545 (cvp
->cond_type
| mp
->mutex_type
| gflags
->uf_trs_ted
|
3546 self
->ul_td_events_enable
|
3547 udp
->tdb
.tdb_ev_global_mask
.event_bits
[0]) == 0)
3548 return (cond_wait_queue(cvp
, mp
, NULL
));
3551 * Else do it the long way.
3553 return (cond_wait_common(cvp
, mp
, NULL
));
3556 #pragma weak _cond_wait = cond_wait
3558 cond_wait(cond_t
*cvp
, mutex_t
*mp
)
3563 error
= __cond_wait(cvp
, mp
);
3567 _canceloff_nocancel();
3572 * pthread_cond_wait() is a cancellation point.
3575 pthread_cond_wait(pthread_cond_t
*_RESTRICT_KYWD cvp
,
3576 pthread_mutex_t
*_RESTRICT_KYWD mp
)
3580 error
= cond_wait((cond_t
*)cvp
, (mutex_t
*)mp
);
3581 return ((error
== EINTR
)? 0 : error
);
3585 * cond_timedwait() is a cancellation point but __cond_timedwait() is not.
3588 __cond_timedwait(cond_t
*cvp
, mutex_t
*mp
, const timespec_t
*abstime
)
3590 clockid_t clock_id
= cvp
->cond_clockid
;
3594 if ((mp
->mutex_type
& (LOCK_ERRORCHECK
| LOCK_ROBUST
)) &&
3598 if (clock_id
!= CLOCK_REALTIME
&& clock_id
!= CLOCK_HIGHRES
)
3599 clock_id
= CLOCK_REALTIME
;
3600 abstime_to_reltime(clock_id
, abstime
, &reltime
);
3601 error
= cond_wait_common(cvp
, mp
, &reltime
);
3602 if (error
== ETIME
&& clock_id
== CLOCK_HIGHRES
) {
3604 * Don't return ETIME if we didn't really get a timeout.
3605 * This can happen if we return because someone resets
3606 * the system clock. Just return zero in this case,
3607 * giving a spurious wakeup but not a timeout.
3609 if ((hrtime_t
)(uint32_t)abstime
->tv_sec
* NANOSEC
+
3610 abstime
->tv_nsec
> gethrtime())
3617 cond_timedwait(cond_t
*cvp
, mutex_t
*mp
, const timespec_t
*abstime
)
3622 error
= __cond_timedwait(cvp
, mp
, abstime
);
3626 _canceloff_nocancel();
3631 * pthread_cond_timedwait() is a cancellation point.
3634 pthread_cond_timedwait(pthread_cond_t
*_RESTRICT_KYWD cvp
,
3635 pthread_mutex_t
*_RESTRICT_KYWD mp
,
3636 const struct timespec
*_RESTRICT_KYWD abstime
)
3640 error
= cond_timedwait((cond_t
*)cvp
, (mutex_t
*)mp
, abstime
);
3643 else if (error
== EINTR
)
3649 * cond_reltimedwait() is a cancellation point but __cond_reltimedwait() is not.
3652 __cond_reltimedwait(cond_t
*cvp
, mutex_t
*mp
, const timespec_t
*reltime
)
3654 timespec_t tslocal
= *reltime
;
3656 if ((mp
->mutex_type
& (LOCK_ERRORCHECK
| LOCK_ROBUST
)) &&
3660 return (cond_wait_common(cvp
, mp
, &tslocal
));
3664 cond_reltimedwait(cond_t
*cvp
, mutex_t
*mp
, const timespec_t
*reltime
)
3669 error
= __cond_reltimedwait(cvp
, mp
, reltime
);
3673 _canceloff_nocancel();
3678 pthread_cond_reltimedwait_np(pthread_cond_t
*_RESTRICT_KYWD cvp
,
3679 pthread_mutex_t
*_RESTRICT_KYWD mp
,
3680 const struct timespec
*_RESTRICT_KYWD reltime
)
3684 error
= cond_reltimedwait((cond_t
*)cvp
, (mutex_t
*)mp
, reltime
);
3687 else if (error
== EINTR
)
3692 #pragma weak pthread_cond_signal = cond_signal
3693 #pragma weak _cond_signal = cond_signal
3695 cond_signal(cond_t
*cvp
)
3697 ulwp_t
*self
= curthread
;
3698 uberdata_t
*udp
= self
->ul_uberdata
;
3699 tdb_cond_stats_t
*csp
= COND_STATS(cvp
, udp
);
3711 tdb_incr(csp
->cond_signal
);
3713 if (cvp
->cond_waiters_kernel
) /* someone sleeping in the kernel? */
3714 error
= _lwp_cond_signal(cvp
);
3716 if (!cvp
->cond_waiters_user
) /* no one sleeping at user-level */
3720 * Move someone from the condvar sleep queue to the mutex sleep
3721 * queue for the mutex that he will acquire on being waked up.
3722 * We can do this only if we own the mutex he will acquire.
3723 * If we do not own the mutex, or if his ul_cv_wake flag
3724 * is set, just dequeue and unpark him.
3726 qp
= queue_lock(cvp
, CV
);
3727 ulwpp
= queue_slot(qp
, &prev
, &more
);
3728 cvp
->cond_waiters_user
= more
;
3729 if (ulwpp
== NULL
) { /* no one on the sleep queue */
3736 * Inform the thread that he was the recipient of a cond_signal().
3737 * This lets him deal with cond_signal() and, concurrently,
3738 * one or more of a cancellation, a UNIX signal, or a timeout.
3739 * These latter conditions must not consume a cond_signal().
3741 ulwp
->ul_signalled
= 1;
3744 * Dequeue the waiter but leave his ul_sleepq non-NULL
3745 * while we move him to the mutex queue so that he can
3746 * deal properly with spurious wakeups.
3748 queue_unlink(qp
, ulwpp
, prev
);
3750 mp
= ulwp
->ul_cvmutex
; /* the mutex he will acquire */
3751 ulwp
->ul_cvmutex
= NULL
;
3754 if (ulwp
->ul_cv_wake
|| !MUTEX_OWNED(mp
, self
)) {
3755 /* just wake him up */
3756 lwpid
= ulwp
->ul_lwpid
;
3758 ulwp
->ul_sleepq
= NULL
;
3759 ulwp
->ul_wchan
= NULL
;
3761 (void) __lwp_unpark(lwpid
);
3764 /* move him to the mutex queue */
3765 mqp
= queue_lock(mp
, MX
);
3766 enqueue(mqp
, ulwp
, 0);
3767 mp
->mutex_waiters
= 1;
3776 * Utility function called by mutex_wakeup_all(), cond_broadcast(),
3777 * and rw_queue_release() to (re)allocate a big buffer to hold the
3778 * lwpids of all the threads to be set running after they are removed
3779 * from their sleep queues. Since we are holding a queue lock, we
3780 * cannot call any function that might acquire a lock. mmap(), munmap(),
3781 * lwp_unpark_all() are simple system calls and are safe in this regard.
3784 alloc_lwpids(lwpid_t
*lwpid
, int *nlwpid_ptr
, int *maxlwps_ptr
)
3787 * Allocate NEWLWPS ids on the first overflow.
3788 * Double the allocation each time after that.
3790 int nlwpid
= *nlwpid_ptr
;
3791 int maxlwps
= *maxlwps_ptr
;
3792 int first_allocation
;
3796 ASSERT(nlwpid
== maxlwps
);
3798 first_allocation
= (maxlwps
== MAXLWPS
);
3799 newlwps
= first_allocation
? NEWLWPS
: 2 * maxlwps
;
3800 vaddr
= mmap(NULL
, newlwps
* sizeof (lwpid_t
),
3801 PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_ANON
, -1, (off_t
)0);
3803 if (vaddr
== MAP_FAILED
) {
3805 * Let's hope this never happens.
3806 * If it does, then we have a terrible
3807 * thundering herd on our hands.
3809 (void) __lwp_unpark_all(lwpid
, nlwpid
);
3812 (void) memcpy(vaddr
, lwpid
, maxlwps
* sizeof (lwpid_t
));
3813 if (!first_allocation
)
3814 (void) munmap((caddr_t
)lwpid
,
3815 maxlwps
* sizeof (lwpid_t
));
3817 *maxlwps_ptr
= newlwps
;
3823 #pragma weak pthread_cond_broadcast = cond_broadcast
3824 #pragma weak _cond_broadcast = cond_broadcast
3826 cond_broadcast(cond_t
*cvp
)
3828 ulwp_t
*self
= curthread
;
3829 uberdata_t
*udp
= self
->ul_uberdata
;
3830 tdb_cond_stats_t
*csp
= COND_STATS(cvp
, udp
);
3835 mutex_t
*mp_cache
= NULL
;
3836 queue_head_t
*mqp
= NULL
;
3839 int maxlwps
= MAXLWPS
;
3840 lwpid_t buffer
[MAXLWPS
];
3841 lwpid_t
*lwpid
= buffer
;
3844 tdb_incr(csp
->cond_broadcast
);
3846 if (cvp
->cond_waiters_kernel
) /* someone sleeping in the kernel? */
3847 error
= _lwp_cond_broadcast(cvp
);
3849 if (!cvp
->cond_waiters_user
) /* no one sleeping at user-level */
3853 * Move everyone from the condvar sleep queue to the mutex sleep
3854 * queue for the mutex that they will acquire on being waked up.
3855 * We can do this only if we own the mutex they will acquire.
3856 * If we do not own the mutex, or if their ul_cv_wake flag
3857 * is set, just dequeue and unpark them.
3859 * We keep track of lwpids that are to be unparked in lwpid[].
3860 * __lwp_unpark_all() is called to unpark all of them after
3861 * they have been removed from the sleep queue and the sleep
3862 * queue lock has been dropped. If we run out of space in our
3863 * on-stack buffer, we need to allocate more but we can't call
3864 * lmalloc() because we are holding a queue lock when the overflow
3865 * occurs and lmalloc() acquires a lock. We can't use alloca()
3866 * either because the application may have allocated a small
3867 * stack and we don't want to overrun the stack. So we call
3868 * alloc_lwpids() to allocate a bigger buffer using the mmap()
3869 * system call directly since that path acquires no locks.
3871 qp
= queue_lock(cvp
, CV
);
3872 cvp
->cond_waiters_user
= 0;
3874 if ((qrp
= qp
->qh_root
) == NULL
||
3875 (ulwp
= qrp
->qr_head
) == NULL
)
3877 ASSERT(ulwp
->ul_wchan
== cvp
);
3878 queue_unlink(qp
, &qrp
->qr_head
, NULL
);
3879 mp
= ulwp
->ul_cvmutex
; /* his mutex */
3880 ulwp
->ul_cvmutex
= NULL
;
3882 if (ulwp
->ul_cv_wake
|| !MUTEX_OWNED(mp
, self
)) {
3883 /* just wake him up */
3884 ulwp
->ul_sleepq
= NULL
;
3885 ulwp
->ul_wchan
= NULL
;
3886 if (nlwpid
== maxlwps
)
3887 lwpid
= alloc_lwpids(lwpid
, &nlwpid
, &maxlwps
);
3888 lwpid
[nlwpid
++] = ulwp
->ul_lwpid
;
3890 /* move him to the mutex queue */
3891 if (mp
!= mp_cache
) {
3895 mqp
= queue_lock(mp
, MX
);
3897 enqueue(mqp
, ulwp
, 0);
3898 mp
->mutex_waiters
= 1;
3909 (void) __lwp_unpark(lwpid
[0]);
3911 (void) __lwp_unpark_all(lwpid
, nlwpid
);
3914 if (lwpid
!= buffer
)
3915 (void) munmap((caddr_t
)lwpid
, maxlwps
* sizeof (lwpid_t
));
3919 #pragma weak pthread_cond_destroy = cond_destroy
3921 cond_destroy(cond_t
*cvp
)
3923 cvp
->cond_magic
= 0;
3924 tdb_sync_obj_deregister(cvp
);
3928 #if defined(THREAD_DEBUG)
3930 assert_no_libc_locks_held(void)
3932 ASSERT(!curthread
->ul_critical
|| curthread
->ul_bindflags
);
3935 /* protected by link_lock */
3936 uint64_t spin_lock_spin
;
3937 uint64_t spin_lock_spin2
;
3938 uint64_t spin_lock_sleep
;
3939 uint64_t spin_lock_wakeup
;
3942 * Record spin lock statistics.
3943 * Called by a thread exiting itself in thrp_exit().
3944 * Also called via atexit() from the thread calling
3945 * exit() to do all the other threads as well.
3948 record_spin_locks(ulwp_t
*ulwp
)
3950 spin_lock_spin
+= ulwp
->ul_spin_lock_spin
;
3951 spin_lock_spin2
+= ulwp
->ul_spin_lock_spin2
;
3952 spin_lock_sleep
+= ulwp
->ul_spin_lock_sleep
;
3953 spin_lock_wakeup
+= ulwp
->ul_spin_lock_wakeup
;
3954 ulwp
->ul_spin_lock_spin
= 0;
3955 ulwp
->ul_spin_lock_spin2
= 0;
3956 ulwp
->ul_spin_lock_sleep
= 0;
3957 ulwp
->ul_spin_lock_wakeup
= 0;
3961 * atexit function: dump the queue statistics to stderr.
3965 dump_queue_statistics(void)
3967 uberdata_t
*udp
= curthread
->ul_uberdata
;
3970 uint64_t spin_lock_total
= 0;
3972 if (udp
->queue_head
== NULL
|| thread_queue_dump
== 0)
3975 if (fprintf(stderr
, "\n%5d mutex queues:\n", QHASHSIZE
) < 0 ||
3976 fprintf(stderr
, "queue# lockcount max qlen max hlen\n") < 0)
3978 for (qn
= 0, qp
= udp
->queue_head
; qn
< QHASHSIZE
; qn
++, qp
++) {
3979 if (qp
->qh_lockcount
== 0)
3981 spin_lock_total
+= qp
->qh_lockcount
;
3982 if (fprintf(stderr
, "%5d %12llu%12u%12u\n", qn
,
3983 (u_longlong_t
)qp
->qh_lockcount
,
3984 qp
->qh_qmax
, qp
->qh_hmax
) < 0)
3988 if (fprintf(stderr
, "\n%5d condvar queues:\n", QHASHSIZE
) < 0 ||
3989 fprintf(stderr
, "queue# lockcount max qlen max hlen\n") < 0)
3991 for (qn
= 0; qn
< QHASHSIZE
; qn
++, qp
++) {
3992 if (qp
->qh_lockcount
== 0)
3994 spin_lock_total
+= qp
->qh_lockcount
;
3995 if (fprintf(stderr
, "%5d %12llu%12u%12u\n", qn
,
3996 (u_longlong_t
)qp
->qh_lockcount
,
3997 qp
->qh_qmax
, qp
->qh_hmax
) < 0)
4001 (void) fprintf(stderr
, "\n spin_lock_total = %10llu\n",
4002 (u_longlong_t
)spin_lock_total
);
4003 (void) fprintf(stderr
, " spin_lock_spin = %10llu\n",
4004 (u_longlong_t
)spin_lock_spin
);
4005 (void) fprintf(stderr
, " spin_lock_spin2 = %10llu\n",
4006 (u_longlong_t
)spin_lock_spin2
);
4007 (void) fprintf(stderr
, " spin_lock_sleep = %10llu\n",
4008 (u_longlong_t
)spin_lock_sleep
);
4009 (void) fprintf(stderr
, " spin_lock_wakeup = %10llu\n",
4010 (u_longlong_t
)spin_lock_wakeup
);