configure: Don't use __int128_t for clang versions before 3.2
[qemu/ar7.git] / util / qemu-thread-posix.c
blob960d7f5d4280b50f95cedfef789474cc98fd2052
1 /*
2 * Wrappers around mutex/cond/thread functions
4 * Copyright Red Hat, Inc. 2009
6 * Author:
7 * Marcelo Tosatti <mtosatti@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
13 #include <stdlib.h>
14 #include <stdio.h>
15 #include <errno.h>
16 #include <time.h>
17 #include <signal.h>
18 #include <stdint.h>
19 #include <string.h>
20 #include <limits.h>
21 #include <unistd.h>
22 #include <sys/time.h>
23 #ifdef __linux__
24 #include <sys/syscall.h>
25 #include <linux/futex.h>
26 #endif
27 #include "qemu/thread.h"
28 #include "qemu/atomic.h"
30 static bool name_threads;
32 void qemu_thread_naming(bool enable)
34 name_threads = enable;
37 static void error_exit(int err, const char *msg)
39 fprintf(stderr, "qemu: %s: %s\n", msg, strerror(err));
40 abort();
43 void qemu_mutex_init(QemuMutex *mutex)
45 int err;
46 pthread_mutexattr_t mutexattr;
48 pthread_mutexattr_init(&mutexattr);
49 pthread_mutexattr_settype(&mutexattr, PTHREAD_MUTEX_ERRORCHECK);
50 err = pthread_mutex_init(&mutex->lock, &mutexattr);
51 pthread_mutexattr_destroy(&mutexattr);
52 if (err)
53 error_exit(err, __func__);
56 void qemu_mutex_destroy(QemuMutex *mutex)
58 int err;
60 err = pthread_mutex_destroy(&mutex->lock);
61 if (err)
62 error_exit(err, __func__);
65 void qemu_mutex_lock(QemuMutex *mutex)
67 int err;
69 err = pthread_mutex_lock(&mutex->lock);
70 if (err)
71 error_exit(err, __func__);
74 int qemu_mutex_trylock(QemuMutex *mutex)
76 return pthread_mutex_trylock(&mutex->lock);
79 void qemu_mutex_unlock(QemuMutex *mutex)
81 int err;
83 err = pthread_mutex_unlock(&mutex->lock);
84 if (err)
85 error_exit(err, __func__);
88 void qemu_cond_init(QemuCond *cond)
90 int err;
92 err = pthread_cond_init(&cond->cond, NULL);
93 if (err)
94 error_exit(err, __func__);
97 void qemu_cond_destroy(QemuCond *cond)
99 int err;
101 err = pthread_cond_destroy(&cond->cond);
102 if (err)
103 error_exit(err, __func__);
106 void qemu_cond_signal(QemuCond *cond)
108 int err;
110 err = pthread_cond_signal(&cond->cond);
111 if (err)
112 error_exit(err, __func__);
115 void qemu_cond_broadcast(QemuCond *cond)
117 int err;
119 err = pthread_cond_broadcast(&cond->cond);
120 if (err)
121 error_exit(err, __func__);
124 void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
126 int err;
128 err = pthread_cond_wait(&cond->cond, &mutex->lock);
129 if (err)
130 error_exit(err, __func__);
133 void qemu_sem_init(QemuSemaphore *sem, int init)
135 int rc;
137 #if defined(__APPLE__) || defined(__NetBSD__)
138 rc = pthread_mutex_init(&sem->lock, NULL);
139 if (rc != 0) {
140 error_exit(rc, __func__);
142 rc = pthread_cond_init(&sem->cond, NULL);
143 if (rc != 0) {
144 error_exit(rc, __func__);
146 if (init < 0) {
147 error_exit(EINVAL, __func__);
149 sem->count = init;
150 #else
151 rc = sem_init(&sem->sem, 0, init);
152 if (rc < 0) {
153 error_exit(errno, __func__);
155 #endif
158 void qemu_sem_destroy(QemuSemaphore *sem)
160 int rc;
162 #if defined(__APPLE__) || defined(__NetBSD__)
163 rc = pthread_cond_destroy(&sem->cond);
164 if (rc < 0) {
165 error_exit(rc, __func__);
167 rc = pthread_mutex_destroy(&sem->lock);
168 if (rc < 0) {
169 error_exit(rc, __func__);
171 #else
172 rc = sem_destroy(&sem->sem);
173 if (rc < 0) {
174 error_exit(errno, __func__);
176 #endif
179 void qemu_sem_post(QemuSemaphore *sem)
181 int rc;
183 #if defined(__APPLE__) || defined(__NetBSD__)
184 pthread_mutex_lock(&sem->lock);
185 if (sem->count == UINT_MAX) {
186 rc = EINVAL;
187 } else {
188 sem->count++;
189 rc = pthread_cond_signal(&sem->cond);
191 pthread_mutex_unlock(&sem->lock);
192 if (rc != 0) {
193 error_exit(rc, __func__);
195 #else
196 rc = sem_post(&sem->sem);
197 if (rc < 0) {
198 error_exit(errno, __func__);
200 #endif
203 static void compute_abs_deadline(struct timespec *ts, int ms)
205 struct timeval tv;
206 gettimeofday(&tv, NULL);
207 ts->tv_nsec = tv.tv_usec * 1000 + (ms % 1000) * 1000000;
208 ts->tv_sec = tv.tv_sec + ms / 1000;
209 if (ts->tv_nsec >= 1000000000) {
210 ts->tv_sec++;
211 ts->tv_nsec -= 1000000000;
215 int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
217 int rc;
218 struct timespec ts;
220 #if defined(__APPLE__) || defined(__NetBSD__)
221 rc = 0;
222 compute_abs_deadline(&ts, ms);
223 pthread_mutex_lock(&sem->lock);
224 while (sem->count == 0) {
225 rc = pthread_cond_timedwait(&sem->cond, &sem->lock, &ts);
226 if (rc == ETIMEDOUT) {
227 break;
229 if (rc != 0) {
230 error_exit(rc, __func__);
233 if (rc != ETIMEDOUT) {
234 --sem->count;
236 pthread_mutex_unlock(&sem->lock);
237 return (rc == ETIMEDOUT ? -1 : 0);
238 #else
239 if (ms <= 0) {
240 /* This is cheaper than sem_timedwait. */
241 do {
242 rc = sem_trywait(&sem->sem);
243 } while (rc == -1 && errno == EINTR);
244 if (rc == -1 && errno == EAGAIN) {
245 return -1;
247 } else {
248 compute_abs_deadline(&ts, ms);
249 do {
250 rc = sem_timedwait(&sem->sem, &ts);
251 } while (rc == -1 && errno == EINTR);
252 if (rc == -1 && errno == ETIMEDOUT) {
253 return -1;
256 if (rc < 0) {
257 error_exit(errno, __func__);
259 return 0;
260 #endif
263 void qemu_sem_wait(QemuSemaphore *sem)
265 int rc;
267 #if defined(__APPLE__) || defined(__NetBSD__)
268 pthread_mutex_lock(&sem->lock);
269 while (sem->count == 0) {
270 rc = pthread_cond_wait(&sem->cond, &sem->lock);
271 if (rc != 0) {
272 error_exit(rc, __func__);
275 --sem->count;
276 pthread_mutex_unlock(&sem->lock);
277 #else
278 do {
279 rc = sem_wait(&sem->sem);
280 } while (rc == -1 && errno == EINTR);
281 if (rc < 0) {
282 error_exit(errno, __func__);
284 #endif
287 #ifdef __linux__
288 #define futex(...) syscall(__NR_futex, __VA_ARGS__)
290 static inline void futex_wake(QemuEvent *ev, int n)
292 futex(ev, FUTEX_WAKE, n, NULL, NULL, 0);
295 static inline void futex_wait(QemuEvent *ev, unsigned val)
297 futex(ev, FUTEX_WAIT, (int) val, NULL, NULL, 0);
299 #else
300 static inline void futex_wake(QemuEvent *ev, int n)
302 if (n == 1) {
303 pthread_cond_signal(&ev->cond);
304 } else {
305 pthread_cond_broadcast(&ev->cond);
309 static inline void futex_wait(QemuEvent *ev, unsigned val)
311 pthread_mutex_lock(&ev->lock);
312 if (ev->value == val) {
313 pthread_cond_wait(&ev->cond, &ev->lock);
315 pthread_mutex_unlock(&ev->lock);
317 #endif
319 /* Valid transitions:
320 * - free->set, when setting the event
321 * - busy->set, when setting the event, followed by futex_wake
322 * - set->free, when resetting the event
323 * - free->busy, when waiting
325 * set->busy does not happen (it can be observed from the outside but
326 * it really is set->free->busy).
328 * busy->free provably cannot happen; to enforce it, the set->free transition
329 * is done with an OR, which becomes a no-op if the event has concurrently
330 * transitioned to free or busy.
333 #define EV_SET 0
334 #define EV_FREE 1
335 #define EV_BUSY -1
337 void qemu_event_init(QemuEvent *ev, bool init)
339 #ifndef __linux__
340 pthread_mutex_init(&ev->lock, NULL);
341 pthread_cond_init(&ev->cond, NULL);
342 #endif
344 ev->value = (init ? EV_SET : EV_FREE);
347 void qemu_event_destroy(QemuEvent *ev)
349 #ifndef __linux__
350 pthread_mutex_destroy(&ev->lock);
351 pthread_cond_destroy(&ev->cond);
352 #endif
355 void qemu_event_set(QemuEvent *ev)
357 if (atomic_mb_read(&ev->value) != EV_SET) {
358 if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
359 /* There were waiters, wake them up. */
360 futex_wake(ev, INT_MAX);
365 void qemu_event_reset(QemuEvent *ev)
367 if (atomic_mb_read(&ev->value) == EV_SET) {
369 * If there was a concurrent reset (or even reset+wait),
370 * do nothing. Otherwise change EV_SET->EV_FREE.
372 atomic_or(&ev->value, EV_FREE);
376 void qemu_event_wait(QemuEvent *ev)
378 unsigned value;
380 value = atomic_mb_read(&ev->value);
381 if (value != EV_SET) {
382 if (value == EV_FREE) {
384 * Leave the event reset and tell qemu_event_set that there
385 * are waiters. No need to retry, because there cannot be
386 * a concurent busy->free transition. After the CAS, the
387 * event will be either set or busy.
389 if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
390 return;
393 futex_wait(ev, EV_BUSY);
397 void qemu_thread_create(QemuThread *thread, const char *name,
398 void *(*start_routine)(void*),
399 void *arg, int mode)
401 sigset_t set, oldset;
402 int err;
403 pthread_attr_t attr;
405 err = pthread_attr_init(&attr);
406 if (err) {
407 error_exit(err, __func__);
409 if (mode == QEMU_THREAD_DETACHED) {
410 err = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
411 if (err) {
412 error_exit(err, __func__);
416 /* Leave signal handling to the iothread. */
417 sigfillset(&set);
418 pthread_sigmask(SIG_SETMASK, &set, &oldset);
419 err = pthread_create(&thread->thread, &attr, start_routine, arg);
420 if (err)
421 error_exit(err, __func__);
423 #if defined(__GLIBC__) && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 12))
424 if (name_threads) {
425 pthread_setname_np(thread->thread, name);
427 #endif
429 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
431 pthread_attr_destroy(&attr);
434 void qemu_thread_get_self(QemuThread *thread)
436 thread->thread = pthread_self();
439 bool qemu_thread_is_self(QemuThread *thread)
441 return pthread_equal(pthread_self(), thread->thread);
444 void qemu_thread_exit(void *retval)
446 pthread_exit(retval);
449 void *qemu_thread_join(QemuThread *thread)
451 int err;
452 void *ret;
454 err = pthread_join(thread->thread, &ret);
455 if (err) {
456 error_exit(err, __func__);
458 return ret;