2 * Wrappers around mutex/cond/thread functions
4 * Copyright Red Hat, Inc. 2009
7 * Marcelo Tosatti <mtosatti@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "qemu/thread.h"
15 #include "qemu/atomic.h"
16 #include "qemu/notify.h"
19 static bool name_threads
;
21 void qemu_thread_naming(bool enable
)
23 name_threads
= enable
;
25 #ifndef CONFIG_THREAD_SETNAME_BYTHREAD
26 /* This is a debugging option, not fatal */
28 fprintf(stderr
, "qemu: thread naming not supported on this host\n");
33 static void error_exit(int err
, const char *msg
)
35 fprintf(stderr
, "qemu: %s: %s\n", msg
, strerror(err
));
39 void qemu_mutex_init(QemuMutex
*mutex
)
43 err
= pthread_mutex_init(&mutex
->lock
, NULL
);
45 error_exit(err
, __func__
);
46 mutex
->initialized
= true;
49 void qemu_mutex_destroy(QemuMutex
*mutex
)
53 assert(mutex
->initialized
);
54 mutex
->initialized
= false;
55 err
= pthread_mutex_destroy(&mutex
->lock
);
57 error_exit(err
, __func__
);
60 void qemu_mutex_lock_impl(QemuMutex
*mutex
, const char *file
, const int line
)
64 assert(mutex
->initialized
);
65 trace_qemu_mutex_lock(mutex
, file
, line
);
67 err
= pthread_mutex_lock(&mutex
->lock
);
69 error_exit(err
, __func__
);
71 trace_qemu_mutex_locked(mutex
, file
, line
);
74 int qemu_mutex_trylock_impl(QemuMutex
*mutex
, const char *file
, const int line
)
78 assert(mutex
->initialized
);
79 err
= pthread_mutex_trylock(&mutex
->lock
);
81 trace_qemu_mutex_locked(mutex
, file
, line
);
85 error_exit(err
, __func__
);
90 void qemu_mutex_unlock_impl(QemuMutex
*mutex
, const char *file
, const int line
)
94 assert(mutex
->initialized
);
95 err
= pthread_mutex_unlock(&mutex
->lock
);
97 error_exit(err
, __func__
);
99 trace_qemu_mutex_unlock(mutex
, file
, line
);
102 void qemu_rec_mutex_init(QemuRecMutex
*mutex
)
105 pthread_mutexattr_t attr
;
107 pthread_mutexattr_init(&attr
);
108 pthread_mutexattr_settype(&attr
, PTHREAD_MUTEX_RECURSIVE
);
109 err
= pthread_mutex_init(&mutex
->lock
, &attr
);
110 pthread_mutexattr_destroy(&attr
);
112 error_exit(err
, __func__
);
114 mutex
->initialized
= true;
117 void qemu_cond_init(QemuCond
*cond
)
121 err
= pthread_cond_init(&cond
->cond
, NULL
);
123 error_exit(err
, __func__
);
124 cond
->initialized
= true;
127 void qemu_cond_destroy(QemuCond
*cond
)
131 assert(cond
->initialized
);
132 cond
->initialized
= false;
133 err
= pthread_cond_destroy(&cond
->cond
);
135 error_exit(err
, __func__
);
138 void qemu_cond_signal(QemuCond
*cond
)
142 assert(cond
->initialized
);
143 err
= pthread_cond_signal(&cond
->cond
);
145 error_exit(err
, __func__
);
148 void qemu_cond_broadcast(QemuCond
*cond
)
152 assert(cond
->initialized
);
153 err
= pthread_cond_broadcast(&cond
->cond
);
155 error_exit(err
, __func__
);
158 void qemu_cond_wait_impl(QemuCond
*cond
, QemuMutex
*mutex
, const char *file
, const int line
)
162 assert(cond
->initialized
);
163 trace_qemu_mutex_unlock(mutex
, file
, line
);
164 err
= pthread_cond_wait(&cond
->cond
, &mutex
->lock
);
165 trace_qemu_mutex_locked(mutex
, file
, line
);
167 error_exit(err
, __func__
);
170 void qemu_sem_init(QemuSemaphore
*sem
, int init
)
174 #ifndef CONFIG_SEM_TIMEDWAIT
175 rc
= pthread_mutex_init(&sem
->lock
, NULL
);
177 error_exit(rc
, __func__
);
179 rc
= pthread_cond_init(&sem
->cond
, NULL
);
181 error_exit(rc
, __func__
);
184 error_exit(EINVAL
, __func__
);
188 rc
= sem_init(&sem
->sem
, 0, init
);
190 error_exit(errno
, __func__
);
193 sem
->initialized
= true;
196 void qemu_sem_destroy(QemuSemaphore
*sem
)
200 assert(sem
->initialized
);
201 sem
->initialized
= false;
202 #ifndef CONFIG_SEM_TIMEDWAIT
203 rc
= pthread_cond_destroy(&sem
->cond
);
205 error_exit(rc
, __func__
);
207 rc
= pthread_mutex_destroy(&sem
->lock
);
209 error_exit(rc
, __func__
);
212 rc
= sem_destroy(&sem
->sem
);
214 error_exit(errno
, __func__
);
219 void qemu_sem_post(QemuSemaphore
*sem
)
223 assert(sem
->initialized
);
224 #ifndef CONFIG_SEM_TIMEDWAIT
225 pthread_mutex_lock(&sem
->lock
);
226 if (sem
->count
== UINT_MAX
) {
230 rc
= pthread_cond_signal(&sem
->cond
);
232 pthread_mutex_unlock(&sem
->lock
);
234 error_exit(rc
, __func__
);
237 rc
= sem_post(&sem
->sem
);
239 error_exit(errno
, __func__
);
244 static void compute_abs_deadline(struct timespec
*ts
, int ms
)
247 gettimeofday(&tv
, NULL
);
248 ts
->tv_nsec
= tv
.tv_usec
* 1000 + (ms
% 1000) * 1000000;
249 ts
->tv_sec
= tv
.tv_sec
+ ms
/ 1000;
250 if (ts
->tv_nsec
>= 1000000000) {
252 ts
->tv_nsec
-= 1000000000;
256 int qemu_sem_timedwait(QemuSemaphore
*sem
, int ms
)
261 assert(sem
->initialized
);
262 #ifndef CONFIG_SEM_TIMEDWAIT
264 compute_abs_deadline(&ts
, ms
);
265 pthread_mutex_lock(&sem
->lock
);
266 while (sem
->count
== 0) {
267 rc
= pthread_cond_timedwait(&sem
->cond
, &sem
->lock
, &ts
);
268 if (rc
== ETIMEDOUT
) {
272 error_exit(rc
, __func__
);
275 if (rc
!= ETIMEDOUT
) {
278 pthread_mutex_unlock(&sem
->lock
);
279 return (rc
== ETIMEDOUT
? -1 : 0);
282 /* This is cheaper than sem_timedwait. */
284 rc
= sem_trywait(&sem
->sem
);
285 } while (rc
== -1 && errno
== EINTR
);
286 if (rc
== -1 && errno
== EAGAIN
) {
290 compute_abs_deadline(&ts
, ms
);
292 rc
= sem_timedwait(&sem
->sem
, &ts
);
293 } while (rc
== -1 && errno
== EINTR
);
294 if (rc
== -1 && errno
== ETIMEDOUT
) {
299 error_exit(errno
, __func__
);
305 void qemu_sem_wait(QemuSemaphore
*sem
)
309 assert(sem
->initialized
);
310 #ifndef CONFIG_SEM_TIMEDWAIT
311 pthread_mutex_lock(&sem
->lock
);
312 while (sem
->count
== 0) {
313 rc
= pthread_cond_wait(&sem
->cond
, &sem
->lock
);
315 error_exit(rc
, __func__
);
319 pthread_mutex_unlock(&sem
->lock
);
322 rc
= sem_wait(&sem
->sem
);
323 } while (rc
== -1 && errno
== EINTR
);
325 error_exit(errno
, __func__
);
331 #include "qemu/futex.h"
333 static inline void qemu_futex_wake(QemuEvent
*ev
, int n
)
335 assert(ev
->initialized
);
336 pthread_mutex_lock(&ev
->lock
);
338 pthread_cond_signal(&ev
->cond
);
340 pthread_cond_broadcast(&ev
->cond
);
342 pthread_mutex_unlock(&ev
->lock
);
345 static inline void qemu_futex_wait(QemuEvent
*ev
, unsigned val
)
347 assert(ev
->initialized
);
348 pthread_mutex_lock(&ev
->lock
);
349 if (ev
->value
== val
) {
350 pthread_cond_wait(&ev
->cond
, &ev
->lock
);
352 pthread_mutex_unlock(&ev
->lock
);
356 /* Valid transitions:
357 * - free->set, when setting the event
358 * - busy->set, when setting the event, followed by qemu_futex_wake
359 * - set->free, when resetting the event
360 * - free->busy, when waiting
362 * set->busy does not happen (it can be observed from the outside but
363 * it really is set->free->busy).
365 * busy->free provably cannot happen; to enforce it, the set->free transition
366 * is done with an OR, which becomes a no-op if the event has concurrently
367 * transitioned to free or busy.
374 void qemu_event_init(QemuEvent
*ev
, bool init
)
377 pthread_mutex_init(&ev
->lock
, NULL
);
378 pthread_cond_init(&ev
->cond
, NULL
);
381 ev
->value
= (init
? EV_SET
: EV_FREE
);
382 ev
->initialized
= true;
385 void qemu_event_destroy(QemuEvent
*ev
)
387 assert(ev
->initialized
);
388 ev
->initialized
= false;
390 pthread_mutex_destroy(&ev
->lock
);
391 pthread_cond_destroy(&ev
->cond
);
395 void qemu_event_set(QemuEvent
*ev
)
397 /* qemu_event_set has release semantics, but because it *loads*
398 * ev->value we need a full memory barrier here.
400 assert(ev
->initialized
);
402 if (atomic_read(&ev
->value
) != EV_SET
) {
403 if (atomic_xchg(&ev
->value
, EV_SET
) == EV_BUSY
) {
404 /* There were waiters, wake them up. */
405 qemu_futex_wake(ev
, INT_MAX
);
410 void qemu_event_reset(QemuEvent
*ev
)
414 assert(ev
->initialized
);
415 value
= atomic_read(&ev
->value
);
417 if (value
== EV_SET
) {
419 * If there was a concurrent reset (or even reset+wait),
420 * do nothing. Otherwise change EV_SET->EV_FREE.
422 atomic_or(&ev
->value
, EV_FREE
);
426 void qemu_event_wait(QemuEvent
*ev
)
430 assert(ev
->initialized
);
431 value
= atomic_read(&ev
->value
);
433 if (value
!= EV_SET
) {
434 if (value
== EV_FREE
) {
436 * Leave the event reset and tell qemu_event_set that there
437 * are waiters. No need to retry, because there cannot be
438 * a concurrent busy->free transition. After the CAS, the
439 * event will be either set or busy.
441 if (atomic_cmpxchg(&ev
->value
, EV_FREE
, EV_BUSY
) == EV_SET
) {
445 qemu_futex_wait(ev
, EV_BUSY
);
449 static pthread_key_t exit_key
;
451 union NotifierThreadData
{
455 QEMU_BUILD_BUG_ON(sizeof(union NotifierThreadData
) != sizeof(void *));
457 void qemu_thread_atexit_add(Notifier
*notifier
)
459 union NotifierThreadData ntd
;
460 ntd
.ptr
= pthread_getspecific(exit_key
);
461 notifier_list_add(&ntd
.list
, notifier
);
462 pthread_setspecific(exit_key
, ntd
.ptr
);
465 void qemu_thread_atexit_remove(Notifier
*notifier
)
467 union NotifierThreadData ntd
;
468 ntd
.ptr
= pthread_getspecific(exit_key
);
469 notifier_remove(notifier
);
470 pthread_setspecific(exit_key
, ntd
.ptr
);
473 static void qemu_thread_atexit_run(void *arg
)
475 union NotifierThreadData ntd
= { .ptr
= arg
};
476 notifier_list_notify(&ntd
.list
, NULL
);
479 static void __attribute__((constructor
)) qemu_thread_atexit_init(void)
481 pthread_key_create(&exit_key
, qemu_thread_atexit_run
);
486 void *(*start_routine
)(void *);
491 static void *qemu_thread_start(void *args
)
493 QemuThreadArgs
*qemu_thread_args
= args
;
494 void *(*start_routine
)(void *) = qemu_thread_args
->start_routine
;
495 void *arg
= qemu_thread_args
->arg
;
497 #ifdef CONFIG_PTHREAD_SETNAME_NP
498 /* Attempt to set the threads name; note that this is for debug, so
499 * we're not going to fail if we can't set it.
501 if (name_threads
&& qemu_thread_args
->name
) {
502 pthread_setname_np(pthread_self(), qemu_thread_args
->name
);
505 g_free(qemu_thread_args
->name
);
506 g_free(qemu_thread_args
);
507 return start_routine(arg
);
510 void qemu_thread_create(QemuThread
*thread
, const char *name
,
511 void *(*start_routine
)(void*),
514 sigset_t set
, oldset
;
517 QemuThreadArgs
*qemu_thread_args
;
519 err
= pthread_attr_init(&attr
);
521 error_exit(err
, __func__
);
524 if (mode
== QEMU_THREAD_DETACHED
) {
525 pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
528 /* Leave signal handling to the iothread. */
530 pthread_sigmask(SIG_SETMASK
, &set
, &oldset
);
532 qemu_thread_args
= g_new0(QemuThreadArgs
, 1);
533 qemu_thread_args
->name
= g_strdup(name
);
534 qemu_thread_args
->start_routine
= start_routine
;
535 qemu_thread_args
->arg
= arg
;
537 err
= pthread_create(&thread
->thread
, &attr
,
538 qemu_thread_start
, qemu_thread_args
);
541 error_exit(err
, __func__
);
543 pthread_sigmask(SIG_SETMASK
, &oldset
, NULL
);
545 pthread_attr_destroy(&attr
);
548 void qemu_thread_get_self(QemuThread
*thread
)
550 thread
->thread
= pthread_self();
553 bool qemu_thread_is_self(QemuThread
*thread
)
555 return pthread_equal(pthread_self(), thread
->thread
);
558 void qemu_thread_exit(void *retval
)
560 pthread_exit(retval
);
563 void *qemu_thread_join(QemuThread
*thread
)
568 err
= pthread_join(thread
->thread
, &ret
);
570 error_exit(err
, __func__
);