2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3 * Copyright (c) 2006 David Xu <yfxu@corp.netease.com>.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by John Birrell.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * $DragonFly: src/lib/libthread_xu/thread/thr_mutex.c,v 1.15 2008/05/09 16:03:27 dillon Exp $
36 #include "namespace.h"
37 #include <machine/tls.h>
42 #include <sys/queue.h>
44 #include "un-namespace.h"
46 #include "thr_private.h"
48 #if defined(_PTHREADS_INVARIANTS)
49 #define MUTEX_INIT_LINK(m) do { \
50 (m)->m_qe.tqe_prev = NULL; \
51 (m)->m_qe.tqe_next = NULL; \
53 #define MUTEX_ASSERT_IS_OWNED(m) do { \
54 if ((m)->m_qe.tqe_prev == NULL) \
55 PANIC("mutex is not on list"); \
57 #define MUTEX_ASSERT_NOT_OWNED(m) do { \
58 if (((m)->m_qe.tqe_prev != NULL) || \
59 ((m)->m_qe.tqe_next != NULL)) \
60 PANIC("mutex is on list"); \
62 #define THR_ASSERT_NOT_IN_SYNCQ(thr) do { \
63 THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
64 "thread in syncq when it shouldn't be."); \
67 #define MUTEX_INIT_LINK(m)
68 #define MUTEX_ASSERT_IS_OWNED(m)
69 #define MUTEX_ASSERT_NOT_OWNED(m)
70 #define THR_ASSERT_NOT_IN_SYNCQ(thr)
73 #define THR_IN_MUTEXQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
74 #define MUTEX_DESTROY(m) do { \
78 umtx_t _mutex_static_lock
;
83 static int mutex_self_trylock(pthread_mutex_t
);
84 static int mutex_self_lock(pthread_mutex_t
,
85 const struct timespec
*abstime
);
86 static int mutex_unlock_common(pthread_mutex_t
*);
88 int __pthread_mutex_init(pthread_mutex_t
*mutex
,
89 const pthread_mutexattr_t
*mutex_attr
);
90 int __pthread_mutex_trylock(pthread_mutex_t
*mutex
);
91 int __pthread_mutex_lock(pthread_mutex_t
*mutex
);
92 int __pthread_mutex_timedlock(pthread_mutex_t
*mutex
,
93 const struct timespec
*abs_timeout
);
96 mutex_init(pthread_mutex_t
*mutex
,
97 const pthread_mutexattr_t
*mutex_attr
, int private)
99 const struct pthread_mutex_attr
*attr
;
100 struct pthread_mutex
*pmutex
;
102 if (mutex_attr
== NULL
) {
103 attr
= &_pthread_mutexattr_default
;
106 if (attr
->m_type
< PTHREAD_MUTEX_ERRORCHECK
||
107 attr
->m_type
>= MUTEX_TYPE_MAX
)
109 if (attr
->m_protocol
< PTHREAD_PRIO_NONE
||
110 attr
->m_protocol
> PTHREAD_PRIO_PROTECT
)
114 if ((pmutex
= (pthread_mutex_t
)
115 malloc(sizeof(struct pthread_mutex
))) == NULL
)
118 _thr_umtx_init(&pmutex
->m_lock
);
119 pmutex
->m_type
= attr
->m_type
;
120 pmutex
->m_protocol
= attr
->m_protocol
;
121 TAILQ_INIT(&pmutex
->m_queue
);
122 pmutex
->m_owner
= NULL
;
123 pmutex
->m_flags
= attr
->m_flags
| MUTEX_FLAGS_INITED
;
125 pmutex
->m_flags
|= MUTEX_FLAGS_PRIVATE
;
127 pmutex
->m_refcount
= 0;
128 if (attr
->m_protocol
== PTHREAD_PRIO_PROTECT
)
129 pmutex
->m_prio
= attr
->m_ceiling
;
132 pmutex
->m_saved_prio
= 0;
133 MUTEX_INIT_LINK(pmutex
);
139 init_static(struct pthread
*thread
, pthread_mutex_t
*mutex
)
143 THR_LOCK_ACQUIRE(thread
, &_mutex_static_lock
);
146 ret
= mutex_init(mutex
, NULL
, 0);
150 THR_LOCK_RELEASE(thread
, &_mutex_static_lock
);
156 init_static_private(struct pthread
*thread
, pthread_mutex_t
*mutex
)
160 THR_LOCK_ACQUIRE(thread
, &_mutex_static_lock
);
163 ret
= mutex_init(mutex
, NULL
, 1);
167 THR_LOCK_RELEASE(thread
, &_mutex_static_lock
);
173 _pthread_mutex_init(pthread_mutex_t
*mutex
,
174 const pthread_mutexattr_t
*mutex_attr
)
176 return mutex_init(mutex
, mutex_attr
, 1);
180 __pthread_mutex_init(pthread_mutex_t
*mutex
,
181 const pthread_mutexattr_t
*mutex_attr
)
183 return mutex_init(mutex
, mutex_attr
, 0);
187 _mutex_reinit(pthread_mutex_t
*mutex
)
189 _thr_umtx_init(&(*mutex
)->m_lock
);
190 TAILQ_INIT(&(*mutex
)->m_queue
);
191 MUTEX_INIT_LINK(*mutex
);
192 (*mutex
)->m_owner
= NULL
;
193 (*mutex
)->m_count
= 0;
194 (*mutex
)->m_refcount
= 0;
195 (*mutex
)->m_prio
= 0;
196 (*mutex
)->m_saved_prio
= 0;
201 _mutex_fork(struct pthread
*curthread
)
203 struct pthread_mutex
*m
;
205 TAILQ_FOREACH(m
, &curthread
->mutexq
, m_qe
)
206 m
->m_lock
= UMTX_LOCKED
;
210 _pthread_mutex_destroy(pthread_mutex_t
*mutex
)
212 struct pthread
*curthread
= tls_get_curthread();
216 if (mutex
== NULL
|| *mutex
== NULL
)
220 * Try to lock the mutex structure, we only need to
221 * try once, if failed, the mutex is in used.
223 ret
= THR_UMTX_TRYLOCK(curthread
, &(*mutex
)->m_lock
);
228 * Check mutex other fields to see if this mutex is
229 * in use. Mostly for prority mutex types, or there
230 * are condition variables referencing it.
232 if (((*mutex
)->m_owner
!= NULL
) ||
233 (TAILQ_FIRST(&(*mutex
)->m_queue
) != NULL
) ||
234 ((*mutex
)->m_refcount
!= 0)) {
235 THR_UMTX_UNLOCK(curthread
, &(*mutex
)->m_lock
);
239 * Save a pointer to the mutex so it can be free'd
240 * and set the caller's pointer to NULL:
245 /* Unlock the mutex structure: */
246 THR_UMTX_UNLOCK(curthread
, &m
->m_lock
);
249 * Free the memory allocated for the mutex
252 MUTEX_ASSERT_NOT_OWNED(m
);
257 /* Return the completion status: */
262 mutex_trylock_common(struct pthread
*curthread
, pthread_mutex_t
*mutex
)
264 struct pthread_mutex
*m
;
268 ret
= THR_UMTX_TRYLOCK(curthread
, &m
->m_lock
);
270 m
->m_owner
= curthread
;
271 /* Add to the list of owned mutexes: */
272 MUTEX_ASSERT_NOT_OWNED(m
);
273 TAILQ_INSERT_TAIL(&curthread
->mutexq
,
275 } else if (m
->m_owner
== curthread
) {
276 ret
= mutex_self_trylock(m
);
283 __pthread_mutex_trylock(pthread_mutex_t
*m
)
285 struct pthread
*curthread
= tls_get_curthread();
288 if (__predict_false(m
== NULL
))
291 * If the mutex is statically initialized, perform the dynamic
294 if (__predict_false(*m
== NULL
)) {
295 ret
= init_static(curthread
, m
);
296 if (__predict_false(ret
!= 0))
299 return (mutex_trylock_common(curthread
, m
));
303 _pthread_mutex_trylock(pthread_mutex_t
*m
)
305 struct pthread
*curthread
= tls_get_curthread();
309 * If the mutex is statically initialized, perform the dynamic
310 * initialization marking the mutex private (delete safe):
312 if (__predict_false(*m
== NULL
)) {
313 ret
= init_static_private(curthread
, m
);
314 if (__predict_false(ret
!= 0))
317 return (mutex_trylock_common(curthread
, m
));
321 mutex_lock_common(struct pthread
*curthread
, pthread_mutex_t
*mutex
,
322 const struct timespec
* abstime
)
324 struct timespec ts
, ts2
;
325 struct pthread_mutex
*m
;
329 ret
= THR_UMTX_TRYLOCK(curthread
, &m
->m_lock
);
331 m
->m_owner
= curthread
;
332 /* Add to the list of owned mutexes: */
333 MUTEX_ASSERT_NOT_OWNED(m
);
334 TAILQ_INSERT_TAIL(&curthread
->mutexq
,
336 } else if (m
->m_owner
== curthread
) {
337 ret
= mutex_self_lock(m
, abstime
);
339 if (abstime
== NULL
) {
340 THR_UMTX_LOCK(curthread
, &m
->m_lock
);
342 } else if (__predict_false(
343 abstime
->tv_sec
< 0 || abstime
->tv_nsec
< 0 ||
344 abstime
->tv_nsec
>= 1000000000)) {
347 clock_gettime(CLOCK_REALTIME
, &ts
);
348 TIMESPEC_SUB(&ts2
, abstime
, &ts
);
349 ret
= THR_UMTX_TIMEDLOCK(curthread
,
352 * Timed out wait is not restarted if
353 * it was interrupted, not worth to do it.
359 m
->m_owner
= curthread
;
360 /* Add to the list of owned mutexes: */
361 MUTEX_ASSERT_NOT_OWNED(m
);
362 TAILQ_INSERT_TAIL(&curthread
->mutexq
,
370 __pthread_mutex_lock(pthread_mutex_t
*m
)
372 struct pthread
*curthread
;
377 if (__predict_false(m
== NULL
))
381 * If the mutex is statically initialized, perform the dynamic
384 curthread
= tls_get_curthread();
385 if (__predict_false(*m
== NULL
)) {
386 ret
= init_static(curthread
, m
);
387 if (__predict_false(ret
))
390 return (mutex_lock_common(curthread
, m
, NULL
));
394 _pthread_mutex_lock(pthread_mutex_t
*m
)
396 struct pthread
*curthread
;
401 if (__predict_false(m
== NULL
))
405 * If the mutex is statically initialized, perform the dynamic
406 * initialization marking it private (delete safe):
408 curthread
= tls_get_curthread();
409 if (__predict_false(*m
== NULL
)) {
410 ret
= init_static_private(curthread
, m
);
411 if (__predict_false(ret
))
414 return (mutex_lock_common(curthread
, m
, NULL
));
418 __pthread_mutex_timedlock(pthread_mutex_t
*m
,
419 const struct timespec
*abs_timeout
)
421 struct pthread
*curthread
;
426 if (__predict_false(m
== NULL
))
430 * If the mutex is statically initialized, perform the dynamic
433 curthread
= tls_get_curthread();
434 if (__predict_false(*m
== NULL
)) {
435 ret
= init_static(curthread
, m
);
436 if (__predict_false(ret
))
439 return (mutex_lock_common(curthread
, m
, abs_timeout
));
443 _pthread_mutex_timedlock(pthread_mutex_t
*m
,
444 const struct timespec
*abs_timeout
)
446 struct pthread
*curthread
;
451 if (__predict_false(m
== NULL
))
454 curthread
= tls_get_curthread();
457 * If the mutex is statically initialized, perform the dynamic
458 * initialization marking it private (delete safe):
460 if (__predict_false(*m
== NULL
)) {
461 ret
= init_static_private(curthread
, m
);
462 if (__predict_false(ret
))
465 return (mutex_lock_common(curthread
, m
, abs_timeout
));
469 _pthread_mutex_unlock(pthread_mutex_t
*m
)
471 if (__predict_false(m
== NULL
))
473 return (mutex_unlock_common(m
));
477 mutex_self_trylock(pthread_mutex_t m
)
482 /* case PTHREAD_MUTEX_DEFAULT: */
483 case PTHREAD_MUTEX_ERRORCHECK
:
484 case PTHREAD_MUTEX_NORMAL
:
488 case PTHREAD_MUTEX_RECURSIVE
:
489 /* Increment the lock count: */
490 if (m
->m_count
+ 1 > 0) {
498 /* Trap invalid mutex types; */
506 mutex_self_lock(pthread_mutex_t m
, const struct timespec
*abstime
)
508 struct timespec ts1
, ts2
;
512 /* case PTHREAD_MUTEX_DEFAULT: */
513 case PTHREAD_MUTEX_ERRORCHECK
:
515 clock_gettime(CLOCK_REALTIME
, &ts1
);
516 TIMESPEC_SUB(&ts2
, abstime
, &ts1
);
517 __sys_nanosleep(&ts2
, NULL
);
521 * POSIX specifies that mutexes should return
522 * EDEADLK if a recursive lock is detected.
528 case PTHREAD_MUTEX_NORMAL
:
530 * What SS2 define as a 'normal' mutex. Intentionally
531 * deadlock on attempts to get a lock you already own.
535 clock_gettime(CLOCK_REALTIME
, &ts1
);
536 TIMESPEC_SUB(&ts2
, abstime
, &ts1
);
537 __sys_nanosleep(&ts2
, NULL
);
543 __sys_nanosleep(&ts1
, NULL
);
547 case PTHREAD_MUTEX_RECURSIVE
:
548 /* Increment the lock count: */
549 if (m
->m_count
+ 1 > 0) {
557 /* Trap invalid mutex types; */
565 mutex_unlock_common(pthread_mutex_t
*mutex
)
567 struct pthread
*curthread
= tls_get_curthread();
568 struct pthread_mutex
*m
;
570 if (__predict_false((m
= *mutex
)== NULL
))
572 if (__predict_false(m
->m_owner
!= curthread
))
576 m
->m_type
== PTHREAD_MUTEX_RECURSIVE
&&
581 * Clear the count in case this is a recursive mutex.
585 /* Remove the mutex from the threads queue. */
586 MUTEX_ASSERT_IS_OWNED(m
);
587 TAILQ_REMOVE(&curthread
->mutexq
, m
, m_qe
);
590 * Hand off the mutex to the next waiting thread.
592 THR_UMTX_UNLOCK(curthread
, &m
->m_lock
);
598 _mutex_cv_lock(pthread_mutex_t
*m
, int count
)
602 if ((ret
= _pthread_mutex_lock(m
)) == 0) {
604 (*m
)->m_count
+= count
;
610 _mutex_cv_unlock(pthread_mutex_t
*mutex
, int *count
)
612 struct pthread
*curthread
= tls_get_curthread();
613 struct pthread_mutex
*m
;
615 if (__predict_false(mutex
== NULL
))
617 if (__predict_false((m
= *mutex
) == NULL
))
619 if (__predict_false(m
->m_owner
!= curthread
))
626 /* Remove the mutex from the threads queue. */
627 MUTEX_ASSERT_IS_OWNED(m
);
628 TAILQ_REMOVE(&curthread
->mutexq
, m
, m_qe
);
630 THR_UMTX_UNLOCK(curthread
, &m
->m_lock
);
635 _mutex_unlock_private(pthread_t pthread
)
637 struct pthread_mutex
*m
, *m_next
;
639 for (m
= TAILQ_FIRST(&pthread
->mutexq
); m
!= NULL
; m
= m_next
) {
640 m_next
= TAILQ_NEXT(m
, m_qe
);
641 if ((m
->m_flags
& MUTEX_FLAGS_PRIVATE
) != 0)
642 _pthread_mutex_unlock(&m
);
646 __strong_reference(__pthread_mutex_init
, pthread_mutex_init
);
647 __strong_reference(__pthread_mutex_lock
, pthread_mutex_lock
);
648 __strong_reference(__pthread_mutex_timedlock
, pthread_mutex_timedlock
);
649 __strong_reference(__pthread_mutex_trylock
, pthread_mutex_trylock
);
651 /* Single underscore versions provided for libc internal usage: */
652 /* No difference between libc and application usage of these: */
653 __strong_reference(_pthread_mutex_destroy
, pthread_mutex_destroy
);
654 __strong_reference(_pthread_mutex_unlock
, pthread_mutex_unlock
);