dma: beautify queue listing output
[dragonfly.git] / lib / libthread_xu / thread / thr_mutex.c
blobd2ad1e51b4ca28357a989c9e7e2dc7a8f224166a
1 /*
2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3 * Copyright (c) 2006 David Xu <yfxu@corp.netease.com>.
4 * All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by John Birrell.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
33 * $DragonFly: src/lib/libthread_xu/thread/thr_mutex.c,v 1.15 2008/05/09 16:03:27 dillon Exp $
36 #include "namespace.h"
37 #include <machine/tls.h>
39 #include <errno.h>
40 #include <stdlib.h>
41 #include <string.h>
42 #include <sys/queue.h>
43 #include <pthread.h>
44 #include "un-namespace.h"
46 #include "thr_private.h"
48 #if defined(_PTHREADS_INVARIANTS)
49 #define MUTEX_INIT_LINK(m) do { \
50 (m)->m_qe.tqe_prev = NULL; \
51 (m)->m_qe.tqe_next = NULL; \
52 } while (0)
53 #define MUTEX_ASSERT_IS_OWNED(m) do { \
54 if ((m)->m_qe.tqe_prev == NULL) \
55 PANIC("mutex is not on list"); \
56 } while (0)
57 #define MUTEX_ASSERT_NOT_OWNED(m) do { \
58 if (((m)->m_qe.tqe_prev != NULL) || \
59 ((m)->m_qe.tqe_next != NULL)) \
60 PANIC("mutex is on list"); \
61 } while (0)
62 #define THR_ASSERT_NOT_IN_SYNCQ(thr) do { \
63 THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
64 "thread in syncq when it shouldn't be."); \
65 } while (0);
66 #else
67 #define MUTEX_INIT_LINK(m)
68 #define MUTEX_ASSERT_IS_OWNED(m)
69 #define MUTEX_ASSERT_NOT_OWNED(m)
70 #define THR_ASSERT_NOT_IN_SYNCQ(thr)
71 #endif
73 #define THR_IN_MUTEXQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
74 #define MUTEX_DESTROY(m) do { \
75 free(m); \
76 } while (0)
78 umtx_t _mutex_static_lock;
81 * Prototypes
83 static int mutex_self_trylock(pthread_mutex_t);
84 static int mutex_self_lock(pthread_mutex_t,
85 const struct timespec *abstime);
86 static int mutex_unlock_common(pthread_mutex_t *);
88 int __pthread_mutex_init(pthread_mutex_t *mutex,
89 const pthread_mutexattr_t *mutex_attr);
90 int __pthread_mutex_trylock(pthread_mutex_t *mutex);
91 int __pthread_mutex_lock(pthread_mutex_t *mutex);
92 int __pthread_mutex_timedlock(pthread_mutex_t *mutex,
93 const struct timespec *abs_timeout);
95 static int
96 mutex_init(pthread_mutex_t *mutex,
97 const pthread_mutexattr_t *mutex_attr, int private)
99 const struct pthread_mutex_attr *attr;
100 struct pthread_mutex *pmutex;
102 if (mutex_attr == NULL) {
103 attr = &_pthread_mutexattr_default;
104 } else {
105 attr = *mutex_attr;
106 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
107 attr->m_type >= MUTEX_TYPE_MAX)
108 return (EINVAL);
109 if (attr->m_protocol < PTHREAD_PRIO_NONE ||
110 attr->m_protocol > PTHREAD_PRIO_PROTECT)
111 return (EINVAL);
114 if ((pmutex = (pthread_mutex_t)
115 malloc(sizeof(struct pthread_mutex))) == NULL)
116 return (ENOMEM);
118 _thr_umtx_init(&pmutex->m_lock);
119 pmutex->m_type = attr->m_type;
120 pmutex->m_protocol = attr->m_protocol;
121 TAILQ_INIT(&pmutex->m_queue);
122 pmutex->m_owner = NULL;
123 pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED;
124 if (private)
125 pmutex->m_flags |= MUTEX_FLAGS_PRIVATE;
126 pmutex->m_count = 0;
127 pmutex->m_refcount = 0;
128 if (attr->m_protocol == PTHREAD_PRIO_PROTECT)
129 pmutex->m_prio = attr->m_ceiling;
130 else
131 pmutex->m_prio = -1;
132 pmutex->m_saved_prio = 0;
133 MUTEX_INIT_LINK(pmutex);
134 *mutex = pmutex;
135 return (0);
138 static int
139 init_static(struct pthread *thread, pthread_mutex_t *mutex)
141 int ret;
143 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
145 if (*mutex == NULL)
146 ret = mutex_init(mutex, NULL, 0);
147 else
148 ret = 0;
150 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
152 return (ret);
155 static int
156 init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
158 int ret;
160 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
162 if (*mutex == NULL)
163 ret = mutex_init(mutex, NULL, 1);
164 else
165 ret = 0;
167 THR_LOCK_RELEASE(thread, &_mutex_static_lock);
169 return (ret);
173 _pthread_mutex_init(pthread_mutex_t *mutex,
174 const pthread_mutexattr_t *mutex_attr)
176 return mutex_init(mutex, mutex_attr, 1);
180 __pthread_mutex_init(pthread_mutex_t *mutex,
181 const pthread_mutexattr_t *mutex_attr)
183 return mutex_init(mutex, mutex_attr, 0);
187 _mutex_reinit(pthread_mutex_t *mutex)
189 _thr_umtx_init(&(*mutex)->m_lock);
190 TAILQ_INIT(&(*mutex)->m_queue);
191 MUTEX_INIT_LINK(*mutex);
192 (*mutex)->m_owner = NULL;
193 (*mutex)->m_count = 0;
194 (*mutex)->m_refcount = 0;
195 (*mutex)->m_prio = 0;
196 (*mutex)->m_saved_prio = 0;
197 return (0);
200 void
201 _mutex_fork(struct pthread *curthread)
203 struct pthread_mutex *m;
205 TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
206 m->m_lock = UMTX_LOCKED;
210 _pthread_mutex_destroy(pthread_mutex_t *mutex)
212 struct pthread *curthread = tls_get_curthread();
213 pthread_mutex_t m;
214 int ret = 0;
216 if (mutex == NULL || *mutex == NULL)
217 ret = EINVAL;
218 else {
220 * Try to lock the mutex structure, we only need to
221 * try once, if failed, the mutex is in used.
223 ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock);
224 if (ret)
225 return (ret);
228 * Check mutex other fields to see if this mutex is
229 * in use. Mostly for prority mutex types, or there
230 * are condition variables referencing it.
232 if (((*mutex)->m_owner != NULL) ||
233 (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
234 ((*mutex)->m_refcount != 0)) {
235 THR_UMTX_UNLOCK(curthread, &(*mutex)->m_lock);
236 ret = EBUSY;
237 } else {
239 * Save a pointer to the mutex so it can be free'd
240 * and set the caller's pointer to NULL:
242 m = *mutex;
243 *mutex = NULL;
245 /* Unlock the mutex structure: */
246 THR_UMTX_UNLOCK(curthread, &m->m_lock);
249 * Free the memory allocated for the mutex
250 * structure:
252 MUTEX_ASSERT_NOT_OWNED(m);
253 MUTEX_DESTROY(m);
257 /* Return the completion status: */
258 return (ret);
261 static int
262 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
264 struct pthread_mutex *m;
265 int ret;
267 m = *mutex;
268 ret = THR_UMTX_TRYLOCK(curthread, &m->m_lock);
269 if (ret == 0) {
270 m->m_owner = curthread;
271 /* Add to the list of owned mutexes: */
272 MUTEX_ASSERT_NOT_OWNED(m);
273 TAILQ_INSERT_TAIL(&curthread->mutexq,
274 m, m_qe);
275 } else if (m->m_owner == curthread) {
276 ret = mutex_self_trylock(m);
277 } /* else {} */
279 return (ret);
283 __pthread_mutex_trylock(pthread_mutex_t *m)
285 struct pthread *curthread = tls_get_curthread();
286 int ret;
288 if (__predict_false(m == NULL))
289 return(EINVAL);
291 * If the mutex is statically initialized, perform the dynamic
292 * initialization:
294 if (__predict_false(*m == NULL)) {
295 ret = init_static(curthread, m);
296 if (__predict_false(ret != 0))
297 return (ret);
299 return (mutex_trylock_common(curthread, m));
303 _pthread_mutex_trylock(pthread_mutex_t *m)
305 struct pthread *curthread = tls_get_curthread();
306 int ret = 0;
309 * If the mutex is statically initialized, perform the dynamic
310 * initialization marking the mutex private (delete safe):
312 if (__predict_false(*m == NULL)) {
313 ret = init_static_private(curthread, m);
314 if (__predict_false(ret != 0))
315 return (ret);
317 return (mutex_trylock_common(curthread, m));
320 static int
321 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex,
322 const struct timespec * abstime)
324 struct timespec ts, ts2;
325 struct pthread_mutex *m;
326 int ret = 0;
328 m = *mutex;
329 ret = THR_UMTX_TRYLOCK(curthread, &m->m_lock);
330 if (ret == 0) {
331 m->m_owner = curthread;
332 /* Add to the list of owned mutexes: */
333 MUTEX_ASSERT_NOT_OWNED(m);
334 TAILQ_INSERT_TAIL(&curthread->mutexq,
335 m, m_qe);
336 } else if (m->m_owner == curthread) {
337 ret = mutex_self_lock(m, abstime);
338 } else {
339 if (abstime == NULL) {
340 THR_UMTX_LOCK(curthread, &m->m_lock);
341 ret = 0;
342 } else if (__predict_false(
343 abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
344 abstime->tv_nsec >= 1000000000)) {
345 ret = EINVAL;
346 } else {
347 clock_gettime(CLOCK_REALTIME, &ts);
348 TIMESPEC_SUB(&ts2, abstime, &ts);
349 ret = THR_UMTX_TIMEDLOCK(curthread,
350 &m->m_lock, &ts2);
352 * Timed out wait is not restarted if
353 * it was interrupted, not worth to do it.
355 if (ret == EINTR)
356 ret = ETIMEDOUT;
358 if (ret == 0) {
359 m->m_owner = curthread;
360 /* Add to the list of owned mutexes: */
361 MUTEX_ASSERT_NOT_OWNED(m);
362 TAILQ_INSERT_TAIL(&curthread->mutexq,
363 m, m_qe);
366 return (ret);
370 __pthread_mutex_lock(pthread_mutex_t *m)
372 struct pthread *curthread;
373 int ret;
375 _thr_check_init();
377 if (__predict_false(m == NULL))
378 return(EINVAL);
381 * If the mutex is statically initialized, perform the dynamic
382 * initialization:
384 curthread = tls_get_curthread();
385 if (__predict_false(*m == NULL)) {
386 ret = init_static(curthread, m);
387 if (__predict_false(ret))
388 return (ret);
390 return (mutex_lock_common(curthread, m, NULL));
394 _pthread_mutex_lock(pthread_mutex_t *m)
396 struct pthread *curthread;
397 int ret;
399 _thr_check_init();
401 if (__predict_false(m == NULL))
402 return(EINVAL);
405 * If the mutex is statically initialized, perform the dynamic
406 * initialization marking it private (delete safe):
408 curthread = tls_get_curthread();
409 if (__predict_false(*m == NULL)) {
410 ret = init_static_private(curthread, m);
411 if (__predict_false(ret))
412 return (ret);
414 return (mutex_lock_common(curthread, m, NULL));
418 __pthread_mutex_timedlock(pthread_mutex_t *m,
419 const struct timespec *abs_timeout)
421 struct pthread *curthread;
422 int ret;
424 _thr_check_init();
426 if (__predict_false(m == NULL))
427 return(EINVAL);
430 * If the mutex is statically initialized, perform the dynamic
431 * initialization:
433 curthread = tls_get_curthread();
434 if (__predict_false(*m == NULL)) {
435 ret = init_static(curthread, m);
436 if (__predict_false(ret))
437 return (ret);
439 return (mutex_lock_common(curthread, m, abs_timeout));
443 _pthread_mutex_timedlock(pthread_mutex_t *m,
444 const struct timespec *abs_timeout)
446 struct pthread *curthread;
447 int ret;
449 _thr_check_init();
451 if (__predict_false(m == NULL))
452 return(EINVAL);
454 curthread = tls_get_curthread();
457 * If the mutex is statically initialized, perform the dynamic
458 * initialization marking it private (delete safe):
460 if (__predict_false(*m == NULL)) {
461 ret = init_static_private(curthread, m);
462 if (__predict_false(ret))
463 return (ret);
465 return (mutex_lock_common(curthread, m, abs_timeout));
469 _pthread_mutex_unlock(pthread_mutex_t *m)
471 if (__predict_false(m == NULL))
472 return(EINVAL);
473 return (mutex_unlock_common(m));
476 static int
477 mutex_self_trylock(pthread_mutex_t m)
479 int ret;
481 switch (m->m_type) {
482 /* case PTHREAD_MUTEX_DEFAULT: */
483 case PTHREAD_MUTEX_ERRORCHECK:
484 case PTHREAD_MUTEX_NORMAL:
485 ret = EBUSY;
486 break;
488 case PTHREAD_MUTEX_RECURSIVE:
489 /* Increment the lock count: */
490 if (m->m_count + 1 > 0) {
491 m->m_count++;
492 ret = 0;
493 } else
494 ret = EAGAIN;
495 break;
497 default:
498 /* Trap invalid mutex types; */
499 ret = EINVAL;
502 return (ret);
505 static int
506 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime)
508 struct timespec ts1, ts2;
509 int ret;
511 switch (m->m_type) {
512 /* case PTHREAD_MUTEX_DEFAULT: */
513 case PTHREAD_MUTEX_ERRORCHECK:
514 if (abstime) {
515 clock_gettime(CLOCK_REALTIME, &ts1);
516 TIMESPEC_SUB(&ts2, abstime, &ts1);
517 __sys_nanosleep(&ts2, NULL);
518 ret = ETIMEDOUT;
519 } else {
521 * POSIX specifies that mutexes should return
522 * EDEADLK if a recursive lock is detected.
524 ret = EDEADLK;
526 break;
528 case PTHREAD_MUTEX_NORMAL:
530 * What SS2 define as a 'normal' mutex. Intentionally
531 * deadlock on attempts to get a lock you already own.
533 ret = 0;
534 if (abstime) {
535 clock_gettime(CLOCK_REALTIME, &ts1);
536 TIMESPEC_SUB(&ts2, abstime, &ts1);
537 __sys_nanosleep(&ts2, NULL);
538 ret = ETIMEDOUT;
539 } else {
540 ts1.tv_sec = 30;
541 ts1.tv_nsec = 0;
542 for (;;)
543 __sys_nanosleep(&ts1, NULL);
545 break;
547 case PTHREAD_MUTEX_RECURSIVE:
548 /* Increment the lock count: */
549 if (m->m_count + 1 > 0) {
550 m->m_count++;
551 ret = 0;
552 } else
553 ret = EAGAIN;
554 break;
556 default:
557 /* Trap invalid mutex types; */
558 ret = EINVAL;
561 return (ret);
564 static int
565 mutex_unlock_common(pthread_mutex_t *mutex)
567 struct pthread *curthread = tls_get_curthread();
568 struct pthread_mutex *m;
570 if (__predict_false((m = *mutex)== NULL))
571 return (EINVAL);
572 if (__predict_false(m->m_owner != curthread))
573 return (EPERM);
575 if (__predict_false(
576 m->m_type == PTHREAD_MUTEX_RECURSIVE &&
577 m->m_count > 0)) {
578 m->m_count--;
579 } else {
581 * Clear the count in case this is a recursive mutex.
583 m->m_count = 0;
584 m->m_owner = NULL;
585 /* Remove the mutex from the threads queue. */
586 MUTEX_ASSERT_IS_OWNED(m);
587 TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
588 MUTEX_INIT_LINK(m);
590 * Hand off the mutex to the next waiting thread.
592 THR_UMTX_UNLOCK(curthread, &m->m_lock);
594 return (0);
598 _mutex_cv_lock(pthread_mutex_t *m, int count)
600 int ret;
602 if ((ret = _pthread_mutex_lock(m)) == 0) {
603 (*m)->m_refcount--;
604 (*m)->m_count += count;
606 return (ret);
610 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count)
612 struct pthread *curthread = tls_get_curthread();
613 struct pthread_mutex *m;
615 if (__predict_false(mutex == NULL))
616 return (EINVAL);
617 if (__predict_false((m = *mutex) == NULL))
618 return (EINVAL);
619 if (__predict_false(m->m_owner != curthread))
620 return (EPERM);
622 *count = m->m_count;
623 m->m_count = 0;
624 m->m_refcount++;
625 m->m_owner = NULL;
626 /* Remove the mutex from the threads queue. */
627 MUTEX_ASSERT_IS_OWNED(m);
628 TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
629 MUTEX_INIT_LINK(m);
630 THR_UMTX_UNLOCK(curthread, &m->m_lock);
631 return (0);
634 void
635 _mutex_unlock_private(pthread_t pthread)
637 struct pthread_mutex *m, *m_next;
639 for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
640 m_next = TAILQ_NEXT(m, m_qe);
641 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
642 _pthread_mutex_unlock(&m);
646 __strong_reference(__pthread_mutex_init, pthread_mutex_init);
647 __strong_reference(__pthread_mutex_lock, pthread_mutex_lock);
648 __strong_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
649 __strong_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
651 /* Single underscore versions provided for libc internal usage: */
652 /* No difference between libc and application usage of these: */
653 __strong_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
654 __strong_reference(_pthread_mutex_unlock, pthread_mutex_unlock);