2 * Wrappers around mutex/cond/thread functions
4 * Copyright Red Hat, Inc. 2009
7 * Marcelo Tosatti <mtosatti@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
24 #include <sys/syscall.h>
25 #include <linux/futex.h>
27 #include "qemu/thread.h"
28 #include "qemu/atomic.h"
30 static bool name_threads
;
32 void qemu_thread_naming(bool enable
)
34 name_threads
= enable
;
37 static void error_exit(int err
, const char *msg
)
39 fprintf(stderr
, "qemu: %s: %s\n", msg
, strerror(err
));
43 void qemu_mutex_init(QemuMutex
*mutex
)
46 pthread_mutexattr_t mutexattr
;
48 pthread_mutexattr_init(&mutexattr
);
49 pthread_mutexattr_settype(&mutexattr
, PTHREAD_MUTEX_ERRORCHECK
);
50 err
= pthread_mutex_init(&mutex
->lock
, &mutexattr
);
51 pthread_mutexattr_destroy(&mutexattr
);
53 error_exit(err
, __func__
);
56 void qemu_mutex_destroy(QemuMutex
*mutex
)
60 err
= pthread_mutex_destroy(&mutex
->lock
);
62 error_exit(err
, __func__
);
65 void qemu_mutex_lock(QemuMutex
*mutex
)
69 err
= pthread_mutex_lock(&mutex
->lock
);
71 error_exit(err
, __func__
);
74 int qemu_mutex_trylock(QemuMutex
*mutex
)
76 return pthread_mutex_trylock(&mutex
->lock
);
79 void qemu_mutex_unlock(QemuMutex
*mutex
)
83 err
= pthread_mutex_unlock(&mutex
->lock
);
85 error_exit(err
, __func__
);
88 void qemu_cond_init(QemuCond
*cond
)
92 err
= pthread_cond_init(&cond
->cond
, NULL
);
94 error_exit(err
, __func__
);
97 void qemu_cond_destroy(QemuCond
*cond
)
101 err
= pthread_cond_destroy(&cond
->cond
);
103 error_exit(err
, __func__
);
106 void qemu_cond_signal(QemuCond
*cond
)
110 err
= pthread_cond_signal(&cond
->cond
);
112 error_exit(err
, __func__
);
115 void qemu_cond_broadcast(QemuCond
*cond
)
119 err
= pthread_cond_broadcast(&cond
->cond
);
121 error_exit(err
, __func__
);
124 void qemu_cond_wait(QemuCond
*cond
, QemuMutex
*mutex
)
128 err
= pthread_cond_wait(&cond
->cond
, &mutex
->lock
);
130 error_exit(err
, __func__
);
133 void qemu_sem_init(QemuSemaphore
*sem
, int init
)
137 #if defined(__APPLE__) || defined(__NetBSD__)
138 rc
= pthread_mutex_init(&sem
->lock
, NULL
);
140 error_exit(rc
, __func__
);
142 rc
= pthread_cond_init(&sem
->cond
, NULL
);
144 error_exit(rc
, __func__
);
147 error_exit(EINVAL
, __func__
);
151 rc
= sem_init(&sem
->sem
, 0, init
);
153 error_exit(errno
, __func__
);
158 void qemu_sem_destroy(QemuSemaphore
*sem
)
162 #if defined(__APPLE__) || defined(__NetBSD__)
163 rc
= pthread_cond_destroy(&sem
->cond
);
165 error_exit(rc
, __func__
);
167 rc
= pthread_mutex_destroy(&sem
->lock
);
169 error_exit(rc
, __func__
);
172 rc
= sem_destroy(&sem
->sem
);
174 error_exit(errno
, __func__
);
179 void qemu_sem_post(QemuSemaphore
*sem
)
183 #if defined(__APPLE__) || defined(__NetBSD__)
184 pthread_mutex_lock(&sem
->lock
);
185 if (sem
->count
== UINT_MAX
) {
189 rc
= pthread_cond_signal(&sem
->cond
);
191 pthread_mutex_unlock(&sem
->lock
);
193 error_exit(rc
, __func__
);
196 rc
= sem_post(&sem
->sem
);
198 error_exit(errno
, __func__
);
203 static void compute_abs_deadline(struct timespec
*ts
, int ms
)
206 gettimeofday(&tv
, NULL
);
207 ts
->tv_nsec
= tv
.tv_usec
* 1000 + (ms
% 1000) * 1000000;
208 ts
->tv_sec
= tv
.tv_sec
+ ms
/ 1000;
209 if (ts
->tv_nsec
>= 1000000000) {
211 ts
->tv_nsec
-= 1000000000;
215 int qemu_sem_timedwait(QemuSemaphore
*sem
, int ms
)
220 #if defined(__APPLE__) || defined(__NetBSD__)
222 compute_abs_deadline(&ts
, ms
);
223 pthread_mutex_lock(&sem
->lock
);
224 while (sem
->count
== 0) {
225 rc
= pthread_cond_timedwait(&sem
->cond
, &sem
->lock
, &ts
);
226 if (rc
== ETIMEDOUT
) {
230 error_exit(rc
, __func__
);
233 if (rc
!= ETIMEDOUT
) {
236 pthread_mutex_unlock(&sem
->lock
);
237 return (rc
== ETIMEDOUT
? -1 : 0);
240 /* This is cheaper than sem_timedwait. */
242 rc
= sem_trywait(&sem
->sem
);
243 } while (rc
== -1 && errno
== EINTR
);
244 if (rc
== -1 && errno
== EAGAIN
) {
248 compute_abs_deadline(&ts
, ms
);
250 rc
= sem_timedwait(&sem
->sem
, &ts
);
251 } while (rc
== -1 && errno
== EINTR
);
252 if (rc
== -1 && errno
== ETIMEDOUT
) {
257 error_exit(errno
, __func__
);
263 void qemu_sem_wait(QemuSemaphore
*sem
)
267 #if defined(__APPLE__) || defined(__NetBSD__)
268 pthread_mutex_lock(&sem
->lock
);
269 while (sem
->count
== 0) {
270 rc
= pthread_cond_wait(&sem
->cond
, &sem
->lock
);
272 error_exit(rc
, __func__
);
276 pthread_mutex_unlock(&sem
->lock
);
279 rc
= sem_wait(&sem
->sem
);
280 } while (rc
== -1 && errno
== EINTR
);
282 error_exit(errno
, __func__
);
288 #define futex(...) syscall(__NR_futex, __VA_ARGS__)
290 static inline void futex_wake(QemuEvent
*ev
, int n
)
292 futex(ev
, FUTEX_WAKE
, n
, NULL
, NULL
, 0);
295 static inline void futex_wait(QemuEvent
*ev
, unsigned val
)
297 futex(ev
, FUTEX_WAIT
, (int) val
, NULL
, NULL
, 0);
300 static inline void futex_wake(QemuEvent
*ev
, int n
)
303 pthread_cond_signal(&ev
->cond
);
305 pthread_cond_broadcast(&ev
->cond
);
309 static inline void futex_wait(QemuEvent
*ev
, unsigned val
)
311 pthread_mutex_lock(&ev
->lock
);
312 if (ev
->value
== val
) {
313 pthread_cond_wait(&ev
->cond
, &ev
->lock
);
315 pthread_mutex_unlock(&ev
->lock
);
319 /* Valid transitions:
320 * - free->set, when setting the event
321 * - busy->set, when setting the event, followed by futex_wake
322 * - set->free, when resetting the event
323 * - free->busy, when waiting
325 * set->busy does not happen (it can be observed from the outside but
326 * it really is set->free->busy).
328 * busy->free provably cannot happen; to enforce it, the set->free transition
329 * is done with an OR, which becomes a no-op if the event has concurrently
330 * transitioned to free or busy.
337 void qemu_event_init(QemuEvent
*ev
, bool init
)
340 pthread_mutex_init(&ev
->lock
, NULL
);
341 pthread_cond_init(&ev
->cond
, NULL
);
344 ev
->value
= (init
? EV_SET
: EV_FREE
);
347 void qemu_event_destroy(QemuEvent
*ev
)
350 pthread_mutex_destroy(&ev
->lock
);
351 pthread_cond_destroy(&ev
->cond
);
355 void qemu_event_set(QemuEvent
*ev
)
357 if (atomic_mb_read(&ev
->value
) != EV_SET
) {
358 if (atomic_xchg(&ev
->value
, EV_SET
) == EV_BUSY
) {
359 /* There were waiters, wake them up. */
360 futex_wake(ev
, INT_MAX
);
365 void qemu_event_reset(QemuEvent
*ev
)
367 if (atomic_mb_read(&ev
->value
) == EV_SET
) {
369 * If there was a concurrent reset (or even reset+wait),
370 * do nothing. Otherwise change EV_SET->EV_FREE.
372 atomic_or(&ev
->value
, EV_FREE
);
376 void qemu_event_wait(QemuEvent
*ev
)
380 value
= atomic_mb_read(&ev
->value
);
381 if (value
!= EV_SET
) {
382 if (value
== EV_FREE
) {
384 * Leave the event reset and tell qemu_event_set that there
385 * are waiters. No need to retry, because there cannot be
386 * a concurent busy->free transition. After the CAS, the
387 * event will be either set or busy.
389 if (atomic_cmpxchg(&ev
->value
, EV_FREE
, EV_BUSY
) == EV_SET
) {
393 futex_wait(ev
, EV_BUSY
);
397 void qemu_thread_create(QemuThread
*thread
, const char *name
,
398 void *(*start_routine
)(void*),
401 sigset_t set
, oldset
;
405 err
= pthread_attr_init(&attr
);
407 error_exit(err
, __func__
);
409 if (mode
== QEMU_THREAD_DETACHED
) {
410 err
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
412 error_exit(err
, __func__
);
416 /* Leave signal handling to the iothread. */
418 pthread_sigmask(SIG_SETMASK
, &set
, &oldset
);
419 err
= pthread_create(&thread
->thread
, &attr
, start_routine
, arg
);
421 error_exit(err
, __func__
);
423 #if defined(__GLIBC__) && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 12))
425 pthread_setname_np(thread
->thread
, name
);
429 pthread_sigmask(SIG_SETMASK
, &oldset
, NULL
);
431 pthread_attr_destroy(&attr
);
434 void qemu_thread_get_self(QemuThread
*thread
)
436 thread
->thread
= pthread_self();
439 bool qemu_thread_is_self(QemuThread
*thread
)
441 return pthread_equal(pthread_self(), thread
->thread
);
444 void qemu_thread_exit(void *retval
)
446 pthread_exit(retval
);
449 void *qemu_thread_join(QemuThread
*thread
)
454 err
= pthread_join(thread
->thread
, &ret
);
456 error_exit(err
, __func__
);