char/cadence_uart: Delete redundant rx rst logic
[qemu.git] / util / qemu-thread-posix.c
blob37dd298631037fb6ecde7b0a9e95ff4920288f34
1 /*
2 * Wrappers around mutex/cond/thread functions
4 * Copyright Red Hat, Inc. 2009
6 * Author:
7 * Marcelo Tosatti <mtosatti@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
13 #include <stdlib.h>
14 #include <stdio.h>
15 #include <errno.h>
16 #include <time.h>
17 #include <signal.h>
18 #include <stdint.h>
19 #include <string.h>
20 #include <limits.h>
21 #include <unistd.h>
22 #include <sys/time.h>
23 #ifdef __linux__
24 #include <sys/syscall.h>
25 #include <linux/futex.h>
26 #endif
27 #include "qemu/thread.h"
28 #include "qemu/atomic.h"
30 static void error_exit(int err, const char *msg)
32 fprintf(stderr, "qemu: %s: %s\n", msg, strerror(err));
33 abort();
36 void qemu_mutex_init(QemuMutex *mutex)
38 int err;
39 pthread_mutexattr_t mutexattr;
41 pthread_mutexattr_init(&mutexattr);
42 pthread_mutexattr_settype(&mutexattr, PTHREAD_MUTEX_ERRORCHECK);
43 err = pthread_mutex_init(&mutex->lock, &mutexattr);
44 pthread_mutexattr_destroy(&mutexattr);
45 if (err)
46 error_exit(err, __func__);
49 void qemu_mutex_destroy(QemuMutex *mutex)
51 int err;
53 err = pthread_mutex_destroy(&mutex->lock);
54 if (err)
55 error_exit(err, __func__);
58 void qemu_mutex_lock(QemuMutex *mutex)
60 int err;
62 err = pthread_mutex_lock(&mutex->lock);
63 if (err)
64 error_exit(err, __func__);
67 int qemu_mutex_trylock(QemuMutex *mutex)
69 return pthread_mutex_trylock(&mutex->lock);
72 void qemu_mutex_unlock(QemuMutex *mutex)
74 int err;
76 err = pthread_mutex_unlock(&mutex->lock);
77 if (err)
78 error_exit(err, __func__);
81 void qemu_cond_init(QemuCond *cond)
83 int err;
85 err = pthread_cond_init(&cond->cond, NULL);
86 if (err)
87 error_exit(err, __func__);
90 void qemu_cond_destroy(QemuCond *cond)
92 int err;
94 err = pthread_cond_destroy(&cond->cond);
95 if (err)
96 error_exit(err, __func__);
99 void qemu_cond_signal(QemuCond *cond)
101 int err;
103 err = pthread_cond_signal(&cond->cond);
104 if (err)
105 error_exit(err, __func__);
108 void qemu_cond_broadcast(QemuCond *cond)
110 int err;
112 err = pthread_cond_broadcast(&cond->cond);
113 if (err)
114 error_exit(err, __func__);
117 void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
119 int err;
121 err = pthread_cond_wait(&cond->cond, &mutex->lock);
122 if (err)
123 error_exit(err, __func__);
126 void qemu_sem_init(QemuSemaphore *sem, int init)
128 int rc;
130 #if defined(__APPLE__) || defined(__NetBSD__)
131 rc = pthread_mutex_init(&sem->lock, NULL);
132 if (rc != 0) {
133 error_exit(rc, __func__);
135 rc = pthread_cond_init(&sem->cond, NULL);
136 if (rc != 0) {
137 error_exit(rc, __func__);
139 if (init < 0) {
140 error_exit(EINVAL, __func__);
142 sem->count = init;
143 #else
144 rc = sem_init(&sem->sem, 0, init);
145 if (rc < 0) {
146 error_exit(errno, __func__);
148 #endif
151 void qemu_sem_destroy(QemuSemaphore *sem)
153 int rc;
155 #if defined(__APPLE__) || defined(__NetBSD__)
156 rc = pthread_cond_destroy(&sem->cond);
157 if (rc < 0) {
158 error_exit(rc, __func__);
160 rc = pthread_mutex_destroy(&sem->lock);
161 if (rc < 0) {
162 error_exit(rc, __func__);
164 #else
165 rc = sem_destroy(&sem->sem);
166 if (rc < 0) {
167 error_exit(errno, __func__);
169 #endif
172 void qemu_sem_post(QemuSemaphore *sem)
174 int rc;
176 #if defined(__APPLE__) || defined(__NetBSD__)
177 pthread_mutex_lock(&sem->lock);
178 if (sem->count == UINT_MAX) {
179 rc = EINVAL;
180 } else {
181 sem->count++;
182 rc = pthread_cond_signal(&sem->cond);
184 pthread_mutex_unlock(&sem->lock);
185 if (rc != 0) {
186 error_exit(rc, __func__);
188 #else
189 rc = sem_post(&sem->sem);
190 if (rc < 0) {
191 error_exit(errno, __func__);
193 #endif
196 static void compute_abs_deadline(struct timespec *ts, int ms)
198 struct timeval tv;
199 gettimeofday(&tv, NULL);
200 ts->tv_nsec = tv.tv_usec * 1000 + (ms % 1000) * 1000000;
201 ts->tv_sec = tv.tv_sec + ms / 1000;
202 if (ts->tv_nsec >= 1000000000) {
203 ts->tv_sec++;
204 ts->tv_nsec -= 1000000000;
208 int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
210 int rc;
211 struct timespec ts;
213 #if defined(__APPLE__) || defined(__NetBSD__)
214 rc = 0;
215 compute_abs_deadline(&ts, ms);
216 pthread_mutex_lock(&sem->lock);
217 while (sem->count == 0) {
218 rc = pthread_cond_timedwait(&sem->cond, &sem->lock, &ts);
219 if (rc == ETIMEDOUT) {
220 break;
222 if (rc != 0) {
223 error_exit(rc, __func__);
226 if (rc != ETIMEDOUT) {
227 --sem->count;
229 pthread_mutex_unlock(&sem->lock);
230 return (rc == ETIMEDOUT ? -1 : 0);
231 #else
232 if (ms <= 0) {
233 /* This is cheaper than sem_timedwait. */
234 do {
235 rc = sem_trywait(&sem->sem);
236 } while (rc == -1 && errno == EINTR);
237 if (rc == -1 && errno == EAGAIN) {
238 return -1;
240 } else {
241 compute_abs_deadline(&ts, ms);
242 do {
243 rc = sem_timedwait(&sem->sem, &ts);
244 } while (rc == -1 && errno == EINTR);
245 if (rc == -1 && errno == ETIMEDOUT) {
246 return -1;
249 if (rc < 0) {
250 error_exit(errno, __func__);
252 return 0;
253 #endif
256 void qemu_sem_wait(QemuSemaphore *sem)
258 int rc;
260 #if defined(__APPLE__) || defined(__NetBSD__)
261 pthread_mutex_lock(&sem->lock);
262 while (sem->count == 0) {
263 rc = pthread_cond_wait(&sem->cond, &sem->lock);
264 if (rc != 0) {
265 error_exit(rc, __func__);
268 --sem->count;
269 pthread_mutex_unlock(&sem->lock);
270 #else
271 do {
272 rc = sem_wait(&sem->sem);
273 } while (rc == -1 && errno == EINTR);
274 if (rc < 0) {
275 error_exit(errno, __func__);
277 #endif
280 #ifdef __linux__
281 #define futex(...) syscall(__NR_futex, __VA_ARGS__)
283 static inline void futex_wake(QemuEvent *ev, int n)
285 futex(ev, FUTEX_WAKE, n, NULL, NULL, 0);
288 static inline void futex_wait(QemuEvent *ev, unsigned val)
290 futex(ev, FUTEX_WAIT, (int) val, NULL, NULL, 0);
292 #else
293 static inline void futex_wake(QemuEvent *ev, int n)
295 if (n == 1) {
296 pthread_cond_signal(&ev->cond);
297 } else {
298 pthread_cond_broadcast(&ev->cond);
302 static inline void futex_wait(QemuEvent *ev, unsigned val)
304 pthread_mutex_lock(&ev->lock);
305 if (ev->value == val) {
306 pthread_cond_wait(&ev->cond, &ev->lock);
308 pthread_mutex_unlock(&ev->lock);
310 #endif
312 /* Valid transitions:
313 * - free->set, when setting the event
314 * - busy->set, when setting the event, followed by futex_wake
315 * - set->free, when resetting the event
316 * - free->busy, when waiting
318 * set->busy does not happen (it can be observed from the outside but
319 * it really is set->free->busy).
321 * busy->free provably cannot happen; to enforce it, the set->free transition
322 * is done with an OR, which becomes a no-op if the event has concurrently
323 * transitioned to free or busy.
326 #define EV_SET 0
327 #define EV_FREE 1
328 #define EV_BUSY -1
330 void qemu_event_init(QemuEvent *ev, bool init)
332 #ifndef __linux__
333 pthread_mutex_init(&ev->lock, NULL);
334 pthread_cond_init(&ev->cond, NULL);
335 #endif
337 ev->value = (init ? EV_SET : EV_FREE);
340 void qemu_event_destroy(QemuEvent *ev)
342 #ifndef __linux__
343 pthread_mutex_destroy(&ev->lock);
344 pthread_cond_destroy(&ev->cond);
345 #endif
348 void qemu_event_set(QemuEvent *ev)
350 if (atomic_mb_read(&ev->value) != EV_SET) {
351 if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
352 /* There were waiters, wake them up. */
353 futex_wake(ev, INT_MAX);
358 void qemu_event_reset(QemuEvent *ev)
360 if (atomic_mb_read(&ev->value) == EV_SET) {
362 * If there was a concurrent reset (or even reset+wait),
363 * do nothing. Otherwise change EV_SET->EV_FREE.
365 atomic_or(&ev->value, EV_FREE);
369 void qemu_event_wait(QemuEvent *ev)
371 unsigned value;
373 value = atomic_mb_read(&ev->value);
374 if (value != EV_SET) {
375 if (value == EV_FREE) {
377 * Leave the event reset and tell qemu_event_set that there
378 * are waiters. No need to retry, because there cannot be
379 * a concurent busy->free transition. After the CAS, the
380 * event will be either set or busy.
382 if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
383 return;
386 futex_wait(ev, EV_BUSY);
391 void qemu_thread_create(QemuThread *thread,
392 void *(*start_routine)(void*),
393 void *arg, int mode)
395 sigset_t set, oldset;
396 int err;
397 pthread_attr_t attr;
399 err = pthread_attr_init(&attr);
400 if (err) {
401 error_exit(err, __func__);
403 if (mode == QEMU_THREAD_DETACHED) {
404 err = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
405 if (err) {
406 error_exit(err, __func__);
410 /* Leave signal handling to the iothread. */
411 sigfillset(&set);
412 pthread_sigmask(SIG_SETMASK, &set, &oldset);
413 err = pthread_create(&thread->thread, &attr, start_routine, arg);
414 if (err)
415 error_exit(err, __func__);
417 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
419 pthread_attr_destroy(&attr);
422 void qemu_thread_get_self(QemuThread *thread)
424 thread->thread = pthread_self();
427 bool qemu_thread_is_self(QemuThread *thread)
429 return pthread_equal(pthread_self(), thread->thread);
432 void qemu_thread_exit(void *retval)
434 pthread_exit(retval);
437 void *qemu_thread_join(QemuThread *thread)
439 int err;
440 void *ret;
442 err = pthread_join(thread->thread, &ret);
443 if (err) {
444 error_exit(err, __func__);
446 return ret;