2 * Wrappers around mutex/cond/thread functions
4 * Copyright Red Hat, Inc. 2009
7 * Marcelo Tosatti <mtosatti@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
24 #include <sys/syscall.h>
25 #include <linux/futex.h>
27 #include "qemu/thread.h"
28 #include "qemu/atomic.h"
30 static void error_exit(int err
, const char *msg
)
32 fprintf(stderr
, "qemu: %s: %s\n", msg
, strerror(err
));
36 void qemu_mutex_init(QemuMutex
*mutex
)
39 pthread_mutexattr_t mutexattr
;
41 pthread_mutexattr_init(&mutexattr
);
42 pthread_mutexattr_settype(&mutexattr
, PTHREAD_MUTEX_ERRORCHECK
);
43 err
= pthread_mutex_init(&mutex
->lock
, &mutexattr
);
44 pthread_mutexattr_destroy(&mutexattr
);
46 error_exit(err
, __func__
);
49 void qemu_mutex_destroy(QemuMutex
*mutex
)
53 err
= pthread_mutex_destroy(&mutex
->lock
);
55 error_exit(err
, __func__
);
58 void qemu_mutex_lock(QemuMutex
*mutex
)
62 err
= pthread_mutex_lock(&mutex
->lock
);
64 error_exit(err
, __func__
);
67 int qemu_mutex_trylock(QemuMutex
*mutex
)
69 return pthread_mutex_trylock(&mutex
->lock
);
72 void qemu_mutex_unlock(QemuMutex
*mutex
)
76 err
= pthread_mutex_unlock(&mutex
->lock
);
78 error_exit(err
, __func__
);
81 void qemu_cond_init(QemuCond
*cond
)
85 err
= pthread_cond_init(&cond
->cond
, NULL
);
87 error_exit(err
, __func__
);
90 void qemu_cond_destroy(QemuCond
*cond
)
94 err
= pthread_cond_destroy(&cond
->cond
);
96 error_exit(err
, __func__
);
99 void qemu_cond_signal(QemuCond
*cond
)
103 err
= pthread_cond_signal(&cond
->cond
);
105 error_exit(err
, __func__
);
108 void qemu_cond_broadcast(QemuCond
*cond
)
112 err
= pthread_cond_broadcast(&cond
->cond
);
114 error_exit(err
, __func__
);
117 void qemu_cond_wait(QemuCond
*cond
, QemuMutex
*mutex
)
121 err
= pthread_cond_wait(&cond
->cond
, &mutex
->lock
);
123 error_exit(err
, __func__
);
126 void qemu_sem_init(QemuSemaphore
*sem
, int init
)
130 #if defined(__APPLE__) || defined(__NetBSD__)
131 rc
= pthread_mutex_init(&sem
->lock
, NULL
);
133 error_exit(rc
, __func__
);
135 rc
= pthread_cond_init(&sem
->cond
, NULL
);
137 error_exit(rc
, __func__
);
140 error_exit(EINVAL
, __func__
);
144 rc
= sem_init(&sem
->sem
, 0, init
);
146 error_exit(errno
, __func__
);
151 void qemu_sem_destroy(QemuSemaphore
*sem
)
155 #if defined(__APPLE__) || defined(__NetBSD__)
156 rc
= pthread_cond_destroy(&sem
->cond
);
158 error_exit(rc
, __func__
);
160 rc
= pthread_mutex_destroy(&sem
->lock
);
162 error_exit(rc
, __func__
);
165 rc
= sem_destroy(&sem
->sem
);
167 error_exit(errno
, __func__
);
172 void qemu_sem_post(QemuSemaphore
*sem
)
176 #if defined(__APPLE__) || defined(__NetBSD__)
177 pthread_mutex_lock(&sem
->lock
);
178 if (sem
->count
== UINT_MAX
) {
182 rc
= pthread_cond_signal(&sem
->cond
);
184 pthread_mutex_unlock(&sem
->lock
);
186 error_exit(rc
, __func__
);
189 rc
= sem_post(&sem
->sem
);
191 error_exit(errno
, __func__
);
196 static void compute_abs_deadline(struct timespec
*ts
, int ms
)
199 gettimeofday(&tv
, NULL
);
200 ts
->tv_nsec
= tv
.tv_usec
* 1000 + (ms
% 1000) * 1000000;
201 ts
->tv_sec
= tv
.tv_sec
+ ms
/ 1000;
202 if (ts
->tv_nsec
>= 1000000000) {
204 ts
->tv_nsec
-= 1000000000;
208 int qemu_sem_timedwait(QemuSemaphore
*sem
, int ms
)
213 #if defined(__APPLE__) || defined(__NetBSD__)
215 compute_abs_deadline(&ts
, ms
);
216 pthread_mutex_lock(&sem
->lock
);
217 while (sem
->count
== 0) {
218 rc
= pthread_cond_timedwait(&sem
->cond
, &sem
->lock
, &ts
);
219 if (rc
== ETIMEDOUT
) {
223 error_exit(rc
, __func__
);
226 if (rc
!= ETIMEDOUT
) {
229 pthread_mutex_unlock(&sem
->lock
);
230 return (rc
== ETIMEDOUT
? -1 : 0);
233 /* This is cheaper than sem_timedwait. */
235 rc
= sem_trywait(&sem
->sem
);
236 } while (rc
== -1 && errno
== EINTR
);
237 if (rc
== -1 && errno
== EAGAIN
) {
241 compute_abs_deadline(&ts
, ms
);
243 rc
= sem_timedwait(&sem
->sem
, &ts
);
244 } while (rc
== -1 && errno
== EINTR
);
245 if (rc
== -1 && errno
== ETIMEDOUT
) {
250 error_exit(errno
, __func__
);
256 void qemu_sem_wait(QemuSemaphore
*sem
)
260 #if defined(__APPLE__) || defined(__NetBSD__)
261 pthread_mutex_lock(&sem
->lock
);
262 while (sem
->count
== 0) {
263 rc
= pthread_cond_wait(&sem
->cond
, &sem
->lock
);
265 error_exit(rc
, __func__
);
269 pthread_mutex_unlock(&sem
->lock
);
272 rc
= sem_wait(&sem
->sem
);
273 } while (rc
== -1 && errno
== EINTR
);
275 error_exit(errno
, __func__
);
281 #define futex(...) syscall(__NR_futex, __VA_ARGS__)
283 static inline void futex_wake(QemuEvent
*ev
, int n
)
285 futex(ev
, FUTEX_WAKE
, n
, NULL
, NULL
, 0);
288 static inline void futex_wait(QemuEvent
*ev
, unsigned val
)
290 futex(ev
, FUTEX_WAIT
, (int) val
, NULL
, NULL
, 0);
293 static inline void futex_wake(QemuEvent
*ev
, int n
)
296 pthread_cond_signal(&ev
->cond
);
298 pthread_cond_broadcast(&ev
->cond
);
302 static inline void futex_wait(QemuEvent
*ev
, unsigned val
)
304 pthread_mutex_lock(&ev
->lock
);
305 if (ev
->value
== val
) {
306 pthread_cond_wait(&ev
->cond
, &ev
->lock
);
308 pthread_mutex_unlock(&ev
->lock
);
312 /* Valid transitions:
313 * - free->set, when setting the event
314 * - busy->set, when setting the event, followed by futex_wake
315 * - set->free, when resetting the event
316 * - free->busy, when waiting
318 * set->busy does not happen (it can be observed from the outside but
319 * it really is set->free->busy).
321 * busy->free provably cannot happen; to enforce it, the set->free transition
322 * is done with an OR, which becomes a no-op if the event has concurrently
323 * transitioned to free or busy.
330 void qemu_event_init(QemuEvent
*ev
, bool init
)
333 pthread_mutex_init(&ev
->lock
, NULL
);
334 pthread_cond_init(&ev
->cond
, NULL
);
337 ev
->value
= (init
? EV_SET
: EV_FREE
);
340 void qemu_event_destroy(QemuEvent
*ev
)
343 pthread_mutex_destroy(&ev
->lock
);
344 pthread_cond_destroy(&ev
->cond
);
348 void qemu_event_set(QemuEvent
*ev
)
350 if (atomic_mb_read(&ev
->value
) != EV_SET
) {
351 if (atomic_xchg(&ev
->value
, EV_SET
) == EV_BUSY
) {
352 /* There were waiters, wake them up. */
353 futex_wake(ev
, INT_MAX
);
358 void qemu_event_reset(QemuEvent
*ev
)
360 if (atomic_mb_read(&ev
->value
) == EV_SET
) {
362 * If there was a concurrent reset (or even reset+wait),
363 * do nothing. Otherwise change EV_SET->EV_FREE.
365 atomic_or(&ev
->value
, EV_FREE
);
369 void qemu_event_wait(QemuEvent
*ev
)
373 value
= atomic_mb_read(&ev
->value
);
374 if (value
!= EV_SET
) {
375 if (value
== EV_FREE
) {
377 * Leave the event reset and tell qemu_event_set that there
378 * are waiters. No need to retry, because there cannot be
379 * a concurent busy->free transition. After the CAS, the
380 * event will be either set or busy.
382 if (atomic_cmpxchg(&ev
->value
, EV_FREE
, EV_BUSY
) == EV_SET
) {
386 futex_wait(ev
, EV_BUSY
);
391 void qemu_thread_create(QemuThread
*thread
,
392 void *(*start_routine
)(void*),
395 sigset_t set
, oldset
;
399 err
= pthread_attr_init(&attr
);
401 error_exit(err
, __func__
);
403 if (mode
== QEMU_THREAD_DETACHED
) {
404 err
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
406 error_exit(err
, __func__
);
410 /* Leave signal handling to the iothread. */
412 pthread_sigmask(SIG_SETMASK
, &set
, &oldset
);
413 err
= pthread_create(&thread
->thread
, &attr
, start_routine
, arg
);
415 error_exit(err
, __func__
);
417 pthread_sigmask(SIG_SETMASK
, &oldset
, NULL
);
419 pthread_attr_destroy(&attr
);
422 void qemu_thread_get_self(QemuThread
*thread
)
424 thread
->thread
= pthread_self();
427 bool qemu_thread_is_self(QemuThread
*thread
)
429 return pthread_equal(pthread_self(), thread
->thread
);
432 void qemu_thread_exit(void *retval
)
434 pthread_exit(retval
);
437 void *qemu_thread_join(QemuThread
*thread
)
442 err
= pthread_join(thread
->thread
, &ret
);
444 error_exit(err
, __func__
);