2 * Wrappers around mutex/cond/thread functions
4 * Copyright Red Hat, Inc. 2009
7 * Marcelo Tosatti <mtosatti@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
13 #include "qemu/osdep.h"
15 #include <sys/syscall.h>
16 #include <linux/futex.h>
18 #include "qemu/thread.h"
19 #include "qemu/atomic.h"
20 #include "qemu/notify.h"
22 static bool name_threads
;
24 void qemu_thread_naming(bool enable
)
26 name_threads
= enable
;
28 #ifndef CONFIG_THREAD_SETNAME_BYTHREAD
29 /* This is a debugging option, not fatal */
31 fprintf(stderr
, "qemu: thread naming not supported on this host\n");
36 static void error_exit(int err
, const char *msg
)
38 fprintf(stderr
, "qemu: %s: %s\n", msg
, strerror(err
));
42 void qemu_mutex_init(QemuMutex
*mutex
)
46 err
= pthread_mutex_init(&mutex
->lock
, NULL
);
48 error_exit(err
, __func__
);
51 void qemu_mutex_destroy(QemuMutex
*mutex
)
55 err
= pthread_mutex_destroy(&mutex
->lock
);
57 error_exit(err
, __func__
);
60 void qemu_mutex_lock(QemuMutex
*mutex
)
64 err
= pthread_mutex_lock(&mutex
->lock
);
66 error_exit(err
, __func__
);
69 int qemu_mutex_trylock(QemuMutex
*mutex
)
71 return pthread_mutex_trylock(&mutex
->lock
);
74 void qemu_mutex_unlock(QemuMutex
*mutex
)
78 err
= pthread_mutex_unlock(&mutex
->lock
);
80 error_exit(err
, __func__
);
83 void qemu_cond_init(QemuCond
*cond
)
87 err
= pthread_cond_init(&cond
->cond
, NULL
);
89 error_exit(err
, __func__
);
92 void qemu_cond_destroy(QemuCond
*cond
)
96 err
= pthread_cond_destroy(&cond
->cond
);
98 error_exit(err
, __func__
);
101 void qemu_cond_signal(QemuCond
*cond
)
105 err
= pthread_cond_signal(&cond
->cond
);
107 error_exit(err
, __func__
);
110 void qemu_cond_broadcast(QemuCond
*cond
)
114 err
= pthread_cond_broadcast(&cond
->cond
);
116 error_exit(err
, __func__
);
119 void qemu_cond_wait(QemuCond
*cond
, QemuMutex
*mutex
)
123 err
= pthread_cond_wait(&cond
->cond
, &mutex
->lock
);
125 error_exit(err
, __func__
);
128 void qemu_sem_init(QemuSemaphore
*sem
, int init
)
132 #if defined(__APPLE__) || defined(__NetBSD__)
133 rc
= pthread_mutex_init(&sem
->lock
, NULL
);
135 error_exit(rc
, __func__
);
137 rc
= pthread_cond_init(&sem
->cond
, NULL
);
139 error_exit(rc
, __func__
);
142 error_exit(EINVAL
, __func__
);
146 rc
= sem_init(&sem
->sem
, 0, init
);
148 error_exit(errno
, __func__
);
153 void qemu_sem_destroy(QemuSemaphore
*sem
)
157 #if defined(__APPLE__) || defined(__NetBSD__)
158 rc
= pthread_cond_destroy(&sem
->cond
);
160 error_exit(rc
, __func__
);
162 rc
= pthread_mutex_destroy(&sem
->lock
);
164 error_exit(rc
, __func__
);
167 rc
= sem_destroy(&sem
->sem
);
169 error_exit(errno
, __func__
);
174 void qemu_sem_post(QemuSemaphore
*sem
)
178 #if defined(__APPLE__) || defined(__NetBSD__)
179 pthread_mutex_lock(&sem
->lock
);
180 if (sem
->count
== UINT_MAX
) {
184 rc
= pthread_cond_signal(&sem
->cond
);
186 pthread_mutex_unlock(&sem
->lock
);
188 error_exit(rc
, __func__
);
191 rc
= sem_post(&sem
->sem
);
193 error_exit(errno
, __func__
);
198 static void compute_abs_deadline(struct timespec
*ts
, int ms
)
201 gettimeofday(&tv
, NULL
);
202 ts
->tv_nsec
= tv
.tv_usec
* 1000 + (ms
% 1000) * 1000000;
203 ts
->tv_sec
= tv
.tv_sec
+ ms
/ 1000;
204 if (ts
->tv_nsec
>= 1000000000) {
206 ts
->tv_nsec
-= 1000000000;
210 int qemu_sem_timedwait(QemuSemaphore
*sem
, int ms
)
215 #if defined(__APPLE__) || defined(__NetBSD__)
217 compute_abs_deadline(&ts
, ms
);
218 pthread_mutex_lock(&sem
->lock
);
219 while (sem
->count
== 0) {
220 rc
= pthread_cond_timedwait(&sem
->cond
, &sem
->lock
, &ts
);
221 if (rc
== ETIMEDOUT
) {
225 error_exit(rc
, __func__
);
228 if (rc
!= ETIMEDOUT
) {
231 pthread_mutex_unlock(&sem
->lock
);
232 return (rc
== ETIMEDOUT
? -1 : 0);
235 /* This is cheaper than sem_timedwait. */
237 rc
= sem_trywait(&sem
->sem
);
238 } while (rc
== -1 && errno
== EINTR
);
239 if (rc
== -1 && errno
== EAGAIN
) {
243 compute_abs_deadline(&ts
, ms
);
245 rc
= sem_timedwait(&sem
->sem
, &ts
);
246 } while (rc
== -1 && errno
== EINTR
);
247 if (rc
== -1 && errno
== ETIMEDOUT
) {
252 error_exit(errno
, __func__
);
258 void qemu_sem_wait(QemuSemaphore
*sem
)
262 #if defined(__APPLE__) || defined(__NetBSD__)
263 pthread_mutex_lock(&sem
->lock
);
264 while (sem
->count
== 0) {
265 rc
= pthread_cond_wait(&sem
->cond
, &sem
->lock
);
267 error_exit(rc
, __func__
);
271 pthread_mutex_unlock(&sem
->lock
);
274 rc
= sem_wait(&sem
->sem
);
275 } while (rc
== -1 && errno
== EINTR
);
277 error_exit(errno
, __func__
);
283 #define futex(...) syscall(__NR_futex, __VA_ARGS__)
285 static inline void futex_wake(QemuEvent
*ev
, int n
)
287 futex(ev
, FUTEX_WAKE
, n
, NULL
, NULL
, 0);
290 static inline void futex_wait(QemuEvent
*ev
, unsigned val
)
292 while (futex(ev
, FUTEX_WAIT
, (int) val
, NULL
, NULL
, 0)) {
297 break; /* get out of switch and retry */
304 static inline void futex_wake(QemuEvent
*ev
, int n
)
306 pthread_mutex_lock(&ev
->lock
);
308 pthread_cond_signal(&ev
->cond
);
310 pthread_cond_broadcast(&ev
->cond
);
312 pthread_mutex_unlock(&ev
->lock
);
315 static inline void futex_wait(QemuEvent
*ev
, unsigned val
)
317 pthread_mutex_lock(&ev
->lock
);
318 if (ev
->value
== val
) {
319 pthread_cond_wait(&ev
->cond
, &ev
->lock
);
321 pthread_mutex_unlock(&ev
->lock
);
325 /* Valid transitions:
326 * - free->set, when setting the event
327 * - busy->set, when setting the event, followed by futex_wake
328 * - set->free, when resetting the event
329 * - free->busy, when waiting
331 * set->busy does not happen (it can be observed from the outside but
332 * it really is set->free->busy).
334 * busy->free provably cannot happen; to enforce it, the set->free transition
335 * is done with an OR, which becomes a no-op if the event has concurrently
336 * transitioned to free or busy.
343 void qemu_event_init(QemuEvent
*ev
, bool init
)
346 pthread_mutex_init(&ev
->lock
, NULL
);
347 pthread_cond_init(&ev
->cond
, NULL
);
350 ev
->value
= (init
? EV_SET
: EV_FREE
);
353 void qemu_event_destroy(QemuEvent
*ev
)
356 pthread_mutex_destroy(&ev
->lock
);
357 pthread_cond_destroy(&ev
->cond
);
361 void qemu_event_set(QemuEvent
*ev
)
363 /* qemu_event_set has release semantics, but because it *loads*
364 * ev->value we need a full memory barrier here.
367 if (atomic_read(&ev
->value
) != EV_SET
) {
368 if (atomic_xchg(&ev
->value
, EV_SET
) == EV_BUSY
) {
369 /* There were waiters, wake them up. */
370 futex_wake(ev
, INT_MAX
);
375 void qemu_event_reset(QemuEvent
*ev
)
379 value
= atomic_read(&ev
->value
);
381 if (value
== EV_SET
) {
383 * If there was a concurrent reset (or even reset+wait),
384 * do nothing. Otherwise change EV_SET->EV_FREE.
386 atomic_or(&ev
->value
, EV_FREE
);
390 void qemu_event_wait(QemuEvent
*ev
)
394 value
= atomic_read(&ev
->value
);
396 if (value
!= EV_SET
) {
397 if (value
== EV_FREE
) {
399 * Leave the event reset and tell qemu_event_set that there
400 * are waiters. No need to retry, because there cannot be
401 * a concurrent busy->free transition. After the CAS, the
402 * event will be either set or busy.
404 if (atomic_cmpxchg(&ev
->value
, EV_FREE
, EV_BUSY
) == EV_SET
) {
408 futex_wait(ev
, EV_BUSY
);
412 static pthread_key_t exit_key
;
414 union NotifierThreadData
{
418 QEMU_BUILD_BUG_ON(sizeof(union NotifierThreadData
) != sizeof(void *));
420 void qemu_thread_atexit_add(Notifier
*notifier
)
422 union NotifierThreadData ntd
;
423 ntd
.ptr
= pthread_getspecific(exit_key
);
424 notifier_list_add(&ntd
.list
, notifier
);
425 pthread_setspecific(exit_key
, ntd
.ptr
);
428 void qemu_thread_atexit_remove(Notifier
*notifier
)
430 union NotifierThreadData ntd
;
431 ntd
.ptr
= pthread_getspecific(exit_key
);
432 notifier_remove(notifier
);
433 pthread_setspecific(exit_key
, ntd
.ptr
);
436 static void qemu_thread_atexit_run(void *arg
)
438 union NotifierThreadData ntd
= { .ptr
= arg
};
439 notifier_list_notify(&ntd
.list
, NULL
);
442 static void __attribute__((constructor
)) qemu_thread_atexit_init(void)
444 pthread_key_create(&exit_key
, qemu_thread_atexit_run
);
448 /* Attempt to set the threads name; note that this is for debug, so
449 * we're not going to fail if we can't set it.
451 static void qemu_thread_set_name(QemuThread
*thread
, const char *name
)
453 #ifdef CONFIG_PTHREAD_SETNAME_NP
454 pthread_setname_np(thread
->thread
, name
);
458 void qemu_thread_create(QemuThread
*thread
, const char *name
,
459 void *(*start_routine
)(void*),
462 sigset_t set
, oldset
;
466 err
= pthread_attr_init(&attr
);
468 error_exit(err
, __func__
);
470 if (mode
== QEMU_THREAD_DETACHED
) {
471 err
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
473 error_exit(err
, __func__
);
477 /* Leave signal handling to the iothread. */
479 pthread_sigmask(SIG_SETMASK
, &set
, &oldset
);
480 err
= pthread_create(&thread
->thread
, &attr
, start_routine
, arg
);
482 error_exit(err
, __func__
);
485 qemu_thread_set_name(thread
, name
);
488 pthread_sigmask(SIG_SETMASK
, &oldset
, NULL
);
490 pthread_attr_destroy(&attr
);
493 void qemu_thread_get_self(QemuThread
*thread
)
495 thread
->thread
= pthread_self();
498 bool qemu_thread_is_self(QemuThread
*thread
)
500 return pthread_equal(pthread_self(), thread
->thread
);
503 void qemu_thread_exit(void *retval
)
505 pthread_exit(retval
);
508 void *qemu_thread_join(QemuThread
*thread
)
513 err
= pthread_join(thread
->thread
, &ret
);
515 error_exit(err
, __func__
);