block: mark AioContext as recursive
[qemu/rayw.git] / util / qemu-thread-posix.c
blobd05a6497e147dbb223df2249aefc21d01f356e95
1 /*
2 * Wrappers around mutex/cond/thread functions
4 * Copyright Red Hat, Inc. 2009
6 * Author:
7 * Marcelo Tosatti <mtosatti@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
13 #include <stdlib.h>
14 #include <stdio.h>
15 #include <errno.h>
16 #include <time.h>
17 #include <signal.h>
18 #include <stdint.h>
19 #include <string.h>
20 #include <limits.h>
21 #include <unistd.h>
22 #include <sys/time.h>
23 #ifdef __linux__
24 #include <sys/syscall.h>
25 #include <linux/futex.h>
26 #endif
27 #include "qemu/thread.h"
28 #include "qemu/atomic.h"
30 static bool name_threads;
32 void qemu_thread_naming(bool enable)
34 name_threads = enable;
36 #ifndef CONFIG_THREAD_SETNAME_BYTHREAD
37 /* This is a debugging option, not fatal */
38 if (enable) {
39 fprintf(stderr, "qemu: thread naming not supported on this host\n");
41 #endif
44 static void error_exit(int err, const char *msg)
46 fprintf(stderr, "qemu: %s: %s\n", msg, strerror(err));
47 abort();
50 void qemu_mutex_init(QemuMutex *mutex)
52 int err;
53 pthread_mutexattr_t mutexattr;
55 pthread_mutexattr_init(&mutexattr);
56 pthread_mutexattr_settype(&mutexattr, PTHREAD_MUTEX_ERRORCHECK);
57 err = pthread_mutex_init(&mutex->lock, &mutexattr);
58 pthread_mutexattr_destroy(&mutexattr);
59 if (err)
60 error_exit(err, __func__);
63 void qemu_mutex_destroy(QemuMutex *mutex)
65 int err;
67 err = pthread_mutex_destroy(&mutex->lock);
68 if (err)
69 error_exit(err, __func__);
72 void qemu_mutex_lock(QemuMutex *mutex)
74 int err;
76 err = pthread_mutex_lock(&mutex->lock);
77 if (err)
78 error_exit(err, __func__);
81 int qemu_mutex_trylock(QemuMutex *mutex)
83 return pthread_mutex_trylock(&mutex->lock);
86 void qemu_mutex_unlock(QemuMutex *mutex)
88 int err;
90 err = pthread_mutex_unlock(&mutex->lock);
91 if (err)
92 error_exit(err, __func__);
95 void qemu_cond_init(QemuCond *cond)
97 int err;
99 err = pthread_cond_init(&cond->cond, NULL);
100 if (err)
101 error_exit(err, __func__);
104 void qemu_cond_destroy(QemuCond *cond)
106 int err;
108 err = pthread_cond_destroy(&cond->cond);
109 if (err)
110 error_exit(err, __func__);
113 void qemu_cond_signal(QemuCond *cond)
115 int err;
117 err = pthread_cond_signal(&cond->cond);
118 if (err)
119 error_exit(err, __func__);
122 void qemu_cond_broadcast(QemuCond *cond)
124 int err;
126 err = pthread_cond_broadcast(&cond->cond);
127 if (err)
128 error_exit(err, __func__);
131 void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
133 int err;
135 err = pthread_cond_wait(&cond->cond, &mutex->lock);
136 if (err)
137 error_exit(err, __func__);
140 void qemu_sem_init(QemuSemaphore *sem, int init)
142 int rc;
144 #if defined(__APPLE__) || defined(__NetBSD__)
145 rc = pthread_mutex_init(&sem->lock, NULL);
146 if (rc != 0) {
147 error_exit(rc, __func__);
149 rc = pthread_cond_init(&sem->cond, NULL);
150 if (rc != 0) {
151 error_exit(rc, __func__);
153 if (init < 0) {
154 error_exit(EINVAL, __func__);
156 sem->count = init;
157 #else
158 rc = sem_init(&sem->sem, 0, init);
159 if (rc < 0) {
160 error_exit(errno, __func__);
162 #endif
165 void qemu_sem_destroy(QemuSemaphore *sem)
167 int rc;
169 #if defined(__APPLE__) || defined(__NetBSD__)
170 rc = pthread_cond_destroy(&sem->cond);
171 if (rc < 0) {
172 error_exit(rc, __func__);
174 rc = pthread_mutex_destroy(&sem->lock);
175 if (rc < 0) {
176 error_exit(rc, __func__);
178 #else
179 rc = sem_destroy(&sem->sem);
180 if (rc < 0) {
181 error_exit(errno, __func__);
183 #endif
186 void qemu_sem_post(QemuSemaphore *sem)
188 int rc;
190 #if defined(__APPLE__) || defined(__NetBSD__)
191 pthread_mutex_lock(&sem->lock);
192 if (sem->count == UINT_MAX) {
193 rc = EINVAL;
194 } else {
195 sem->count++;
196 rc = pthread_cond_signal(&sem->cond);
198 pthread_mutex_unlock(&sem->lock);
199 if (rc != 0) {
200 error_exit(rc, __func__);
202 #else
203 rc = sem_post(&sem->sem);
204 if (rc < 0) {
205 error_exit(errno, __func__);
207 #endif
210 static void compute_abs_deadline(struct timespec *ts, int ms)
212 struct timeval tv;
213 gettimeofday(&tv, NULL);
214 ts->tv_nsec = tv.tv_usec * 1000 + (ms % 1000) * 1000000;
215 ts->tv_sec = tv.tv_sec + ms / 1000;
216 if (ts->tv_nsec >= 1000000000) {
217 ts->tv_sec++;
218 ts->tv_nsec -= 1000000000;
222 int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
224 int rc;
225 struct timespec ts;
227 #if defined(__APPLE__) || defined(__NetBSD__)
228 rc = 0;
229 compute_abs_deadline(&ts, ms);
230 pthread_mutex_lock(&sem->lock);
231 while (sem->count == 0) {
232 rc = pthread_cond_timedwait(&sem->cond, &sem->lock, &ts);
233 if (rc == ETIMEDOUT) {
234 break;
236 if (rc != 0) {
237 error_exit(rc, __func__);
240 if (rc != ETIMEDOUT) {
241 --sem->count;
243 pthread_mutex_unlock(&sem->lock);
244 return (rc == ETIMEDOUT ? -1 : 0);
245 #else
246 if (ms <= 0) {
247 /* This is cheaper than sem_timedwait. */
248 do {
249 rc = sem_trywait(&sem->sem);
250 } while (rc == -1 && errno == EINTR);
251 if (rc == -1 && errno == EAGAIN) {
252 return -1;
254 } else {
255 compute_abs_deadline(&ts, ms);
256 do {
257 rc = sem_timedwait(&sem->sem, &ts);
258 } while (rc == -1 && errno == EINTR);
259 if (rc == -1 && errno == ETIMEDOUT) {
260 return -1;
263 if (rc < 0) {
264 error_exit(errno, __func__);
266 return 0;
267 #endif
270 void qemu_sem_wait(QemuSemaphore *sem)
272 int rc;
274 #if defined(__APPLE__) || defined(__NetBSD__)
275 pthread_mutex_lock(&sem->lock);
276 while (sem->count == 0) {
277 rc = pthread_cond_wait(&sem->cond, &sem->lock);
278 if (rc != 0) {
279 error_exit(rc, __func__);
282 --sem->count;
283 pthread_mutex_unlock(&sem->lock);
284 #else
285 do {
286 rc = sem_wait(&sem->sem);
287 } while (rc == -1 && errno == EINTR);
288 if (rc < 0) {
289 error_exit(errno, __func__);
291 #endif
294 #ifdef __linux__
295 #define futex(...) syscall(__NR_futex, __VA_ARGS__)
297 static inline void futex_wake(QemuEvent *ev, int n)
299 futex(ev, FUTEX_WAKE, n, NULL, NULL, 0);
302 static inline void futex_wait(QemuEvent *ev, unsigned val)
304 futex(ev, FUTEX_WAIT, (int) val, NULL, NULL, 0);
306 #else
307 static inline void futex_wake(QemuEvent *ev, int n)
309 if (n == 1) {
310 pthread_cond_signal(&ev->cond);
311 } else {
312 pthread_cond_broadcast(&ev->cond);
316 static inline void futex_wait(QemuEvent *ev, unsigned val)
318 pthread_mutex_lock(&ev->lock);
319 if (ev->value == val) {
320 pthread_cond_wait(&ev->cond, &ev->lock);
322 pthread_mutex_unlock(&ev->lock);
324 #endif
326 /* Valid transitions:
327 * - free->set, when setting the event
328 * - busy->set, when setting the event, followed by futex_wake
329 * - set->free, when resetting the event
330 * - free->busy, when waiting
332 * set->busy does not happen (it can be observed from the outside but
333 * it really is set->free->busy).
335 * busy->free provably cannot happen; to enforce it, the set->free transition
336 * is done with an OR, which becomes a no-op if the event has concurrently
337 * transitioned to free or busy.
340 #define EV_SET 0
341 #define EV_FREE 1
342 #define EV_BUSY -1
344 void qemu_event_init(QemuEvent *ev, bool init)
346 #ifndef __linux__
347 pthread_mutex_init(&ev->lock, NULL);
348 pthread_cond_init(&ev->cond, NULL);
349 #endif
351 ev->value = (init ? EV_SET : EV_FREE);
354 void qemu_event_destroy(QemuEvent *ev)
356 #ifndef __linux__
357 pthread_mutex_destroy(&ev->lock);
358 pthread_cond_destroy(&ev->cond);
359 #endif
362 void qemu_event_set(QemuEvent *ev)
364 if (atomic_mb_read(&ev->value) != EV_SET) {
365 if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
366 /* There were waiters, wake them up. */
367 futex_wake(ev, INT_MAX);
372 void qemu_event_reset(QemuEvent *ev)
374 if (atomic_mb_read(&ev->value) == EV_SET) {
376 * If there was a concurrent reset (or even reset+wait),
377 * do nothing. Otherwise change EV_SET->EV_FREE.
379 atomic_or(&ev->value, EV_FREE);
383 void qemu_event_wait(QemuEvent *ev)
385 unsigned value;
387 value = atomic_mb_read(&ev->value);
388 if (value != EV_SET) {
389 if (value == EV_FREE) {
391 * Leave the event reset and tell qemu_event_set that there
392 * are waiters. No need to retry, because there cannot be
393 * a concurent busy->free transition. After the CAS, the
394 * event will be either set or busy.
396 if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
397 return;
400 futex_wait(ev, EV_BUSY);
404 /* Attempt to set the threads name; note that this is for debug, so
405 * we're not going to fail if we can't set it.
407 static void qemu_thread_set_name(QemuThread *thread, const char *name)
409 #ifdef CONFIG_PTHREAD_SETNAME_NP
410 pthread_setname_np(thread->thread, name);
411 #endif
414 void qemu_thread_create(QemuThread *thread, const char *name,
415 void *(*start_routine)(void*),
416 void *arg, int mode)
418 sigset_t set, oldset;
419 int err;
420 pthread_attr_t attr;
422 err = pthread_attr_init(&attr);
423 if (err) {
424 error_exit(err, __func__);
426 if (mode == QEMU_THREAD_DETACHED) {
427 err = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
428 if (err) {
429 error_exit(err, __func__);
433 /* Leave signal handling to the iothread. */
434 sigfillset(&set);
435 pthread_sigmask(SIG_SETMASK, &set, &oldset);
436 err = pthread_create(&thread->thread, &attr, start_routine, arg);
437 if (err)
438 error_exit(err, __func__);
440 if (name_threads) {
441 qemu_thread_set_name(thread, name);
444 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
446 pthread_attr_destroy(&attr);
449 void qemu_thread_get_self(QemuThread *thread)
451 thread->thread = pthread_self();
454 bool qemu_thread_is_self(QemuThread *thread)
456 return pthread_equal(pthread_self(), thread->thread);
459 void qemu_thread_exit(void *retval)
461 pthread_exit(retval);
464 void *qemu_thread_join(QemuThread *thread)
466 int err;
467 void *ret;
469 err = pthread_join(thread->thread, &ret);
470 if (err) {
471 error_exit(err, __func__);
473 return ret;