nvdimm: implement NVDIMM device abstract
[qemu/kevin.git] / util / qemu-thread-posix.c
blobdbd8094fcee63a78016f145d2d7e25a9d14d662e
1 /*
2 * Wrappers around mutex/cond/thread functions
4 * Copyright Red Hat, Inc. 2009
6 * Author:
7 * Marcelo Tosatti <mtosatti@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
13 #include <stdlib.h>
14 #include <stdio.h>
15 #include <errno.h>
16 #include <time.h>
17 #include <signal.h>
18 #include <stdint.h>
19 #include <string.h>
20 #include <limits.h>
21 #include <unistd.h>
22 #include <sys/time.h>
23 #ifdef __linux__
24 #include <sys/syscall.h>
25 #include <linux/futex.h>
26 #endif
27 #include "qemu/thread.h"
28 #include "qemu/atomic.h"
29 #include "qemu/notify.h"
31 static bool name_threads;
33 void qemu_thread_naming(bool enable)
35 name_threads = enable;
37 #ifndef CONFIG_THREAD_SETNAME_BYTHREAD
38 /* This is a debugging option, not fatal */
39 if (enable) {
40 fprintf(stderr, "qemu: thread naming not supported on this host\n");
42 #endif
45 static void error_exit(int err, const char *msg)
47 fprintf(stderr, "qemu: %s: %s\n", msg, strerror(err));
48 abort();
51 void qemu_mutex_init(QemuMutex *mutex)
53 int err;
55 err = pthread_mutex_init(&mutex->lock, NULL);
56 if (err)
57 error_exit(err, __func__);
60 void qemu_mutex_destroy(QemuMutex *mutex)
62 int err;
64 err = pthread_mutex_destroy(&mutex->lock);
65 if (err)
66 error_exit(err, __func__);
69 void qemu_mutex_lock(QemuMutex *mutex)
71 int err;
73 err = pthread_mutex_lock(&mutex->lock);
74 if (err)
75 error_exit(err, __func__);
78 int qemu_mutex_trylock(QemuMutex *mutex)
80 return pthread_mutex_trylock(&mutex->lock);
83 void qemu_mutex_unlock(QemuMutex *mutex)
85 int err;
87 err = pthread_mutex_unlock(&mutex->lock);
88 if (err)
89 error_exit(err, __func__);
92 void qemu_cond_init(QemuCond *cond)
94 int err;
96 err = pthread_cond_init(&cond->cond, NULL);
97 if (err)
98 error_exit(err, __func__);
101 void qemu_cond_destroy(QemuCond *cond)
103 int err;
105 err = pthread_cond_destroy(&cond->cond);
106 if (err)
107 error_exit(err, __func__);
110 void qemu_cond_signal(QemuCond *cond)
112 int err;
114 err = pthread_cond_signal(&cond->cond);
115 if (err)
116 error_exit(err, __func__);
119 void qemu_cond_broadcast(QemuCond *cond)
121 int err;
123 err = pthread_cond_broadcast(&cond->cond);
124 if (err)
125 error_exit(err, __func__);
128 void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
130 int err;
132 err = pthread_cond_wait(&cond->cond, &mutex->lock);
133 if (err)
134 error_exit(err, __func__);
137 void qemu_sem_init(QemuSemaphore *sem, int init)
139 int rc;
141 #if defined(__APPLE__) || defined(__NetBSD__)
142 rc = pthread_mutex_init(&sem->lock, NULL);
143 if (rc != 0) {
144 error_exit(rc, __func__);
146 rc = pthread_cond_init(&sem->cond, NULL);
147 if (rc != 0) {
148 error_exit(rc, __func__);
150 if (init < 0) {
151 error_exit(EINVAL, __func__);
153 sem->count = init;
154 #else
155 rc = sem_init(&sem->sem, 0, init);
156 if (rc < 0) {
157 error_exit(errno, __func__);
159 #endif
162 void qemu_sem_destroy(QemuSemaphore *sem)
164 int rc;
166 #if defined(__APPLE__) || defined(__NetBSD__)
167 rc = pthread_cond_destroy(&sem->cond);
168 if (rc < 0) {
169 error_exit(rc, __func__);
171 rc = pthread_mutex_destroy(&sem->lock);
172 if (rc < 0) {
173 error_exit(rc, __func__);
175 #else
176 rc = sem_destroy(&sem->sem);
177 if (rc < 0) {
178 error_exit(errno, __func__);
180 #endif
183 void qemu_sem_post(QemuSemaphore *sem)
185 int rc;
187 #if defined(__APPLE__) || defined(__NetBSD__)
188 pthread_mutex_lock(&sem->lock);
189 if (sem->count == UINT_MAX) {
190 rc = EINVAL;
191 } else {
192 sem->count++;
193 rc = pthread_cond_signal(&sem->cond);
195 pthread_mutex_unlock(&sem->lock);
196 if (rc != 0) {
197 error_exit(rc, __func__);
199 #else
200 rc = sem_post(&sem->sem);
201 if (rc < 0) {
202 error_exit(errno, __func__);
204 #endif
207 static void compute_abs_deadline(struct timespec *ts, int ms)
209 struct timeval tv;
210 gettimeofday(&tv, NULL);
211 ts->tv_nsec = tv.tv_usec * 1000 + (ms % 1000) * 1000000;
212 ts->tv_sec = tv.tv_sec + ms / 1000;
213 if (ts->tv_nsec >= 1000000000) {
214 ts->tv_sec++;
215 ts->tv_nsec -= 1000000000;
219 int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
221 int rc;
222 struct timespec ts;
224 #if defined(__APPLE__) || defined(__NetBSD__)
225 rc = 0;
226 compute_abs_deadline(&ts, ms);
227 pthread_mutex_lock(&sem->lock);
228 while (sem->count == 0) {
229 rc = pthread_cond_timedwait(&sem->cond, &sem->lock, &ts);
230 if (rc == ETIMEDOUT) {
231 break;
233 if (rc != 0) {
234 error_exit(rc, __func__);
237 if (rc != ETIMEDOUT) {
238 --sem->count;
240 pthread_mutex_unlock(&sem->lock);
241 return (rc == ETIMEDOUT ? -1 : 0);
242 #else
243 if (ms <= 0) {
244 /* This is cheaper than sem_timedwait. */
245 do {
246 rc = sem_trywait(&sem->sem);
247 } while (rc == -1 && errno == EINTR);
248 if (rc == -1 && errno == EAGAIN) {
249 return -1;
251 } else {
252 compute_abs_deadline(&ts, ms);
253 do {
254 rc = sem_timedwait(&sem->sem, &ts);
255 } while (rc == -1 && errno == EINTR);
256 if (rc == -1 && errno == ETIMEDOUT) {
257 return -1;
260 if (rc < 0) {
261 error_exit(errno, __func__);
263 return 0;
264 #endif
267 void qemu_sem_wait(QemuSemaphore *sem)
269 int rc;
271 #if defined(__APPLE__) || defined(__NetBSD__)
272 pthread_mutex_lock(&sem->lock);
273 while (sem->count == 0) {
274 rc = pthread_cond_wait(&sem->cond, &sem->lock);
275 if (rc != 0) {
276 error_exit(rc, __func__);
279 --sem->count;
280 pthread_mutex_unlock(&sem->lock);
281 #else
282 do {
283 rc = sem_wait(&sem->sem);
284 } while (rc == -1 && errno == EINTR);
285 if (rc < 0) {
286 error_exit(errno, __func__);
288 #endif
291 #ifdef __linux__
292 #define futex(...) syscall(__NR_futex, __VA_ARGS__)
294 static inline void futex_wake(QemuEvent *ev, int n)
296 futex(ev, FUTEX_WAKE, n, NULL, NULL, 0);
299 static inline void futex_wait(QemuEvent *ev, unsigned val)
301 while (futex(ev, FUTEX_WAIT, (int) val, NULL, NULL, 0)) {
302 switch (errno) {
303 case EWOULDBLOCK:
304 return;
305 case EINTR:
306 break; /* get out of switch and retry */
307 default:
308 abort();
312 #else
313 static inline void futex_wake(QemuEvent *ev, int n)
315 pthread_mutex_lock(&ev->lock);
316 if (n == 1) {
317 pthread_cond_signal(&ev->cond);
318 } else {
319 pthread_cond_broadcast(&ev->cond);
321 pthread_mutex_unlock(&ev->lock);
324 static inline void futex_wait(QemuEvent *ev, unsigned val)
326 pthread_mutex_lock(&ev->lock);
327 if (ev->value == val) {
328 pthread_cond_wait(&ev->cond, &ev->lock);
330 pthread_mutex_unlock(&ev->lock);
332 #endif
334 /* Valid transitions:
335 * - free->set, when setting the event
336 * - busy->set, when setting the event, followed by futex_wake
337 * - set->free, when resetting the event
338 * - free->busy, when waiting
340 * set->busy does not happen (it can be observed from the outside but
341 * it really is set->free->busy).
343 * busy->free provably cannot happen; to enforce it, the set->free transition
344 * is done with an OR, which becomes a no-op if the event has concurrently
345 * transitioned to free or busy.
348 #define EV_SET 0
349 #define EV_FREE 1
350 #define EV_BUSY -1
352 void qemu_event_init(QemuEvent *ev, bool init)
354 #ifndef __linux__
355 pthread_mutex_init(&ev->lock, NULL);
356 pthread_cond_init(&ev->cond, NULL);
357 #endif
359 ev->value = (init ? EV_SET : EV_FREE);
362 void qemu_event_destroy(QemuEvent *ev)
364 #ifndef __linux__
365 pthread_mutex_destroy(&ev->lock);
366 pthread_cond_destroy(&ev->cond);
367 #endif
370 void qemu_event_set(QemuEvent *ev)
372 if (atomic_mb_read(&ev->value) != EV_SET) {
373 if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
374 /* There were waiters, wake them up. */
375 futex_wake(ev, INT_MAX);
380 void qemu_event_reset(QemuEvent *ev)
382 if (atomic_mb_read(&ev->value) == EV_SET) {
384 * If there was a concurrent reset (or even reset+wait),
385 * do nothing. Otherwise change EV_SET->EV_FREE.
387 atomic_or(&ev->value, EV_FREE);
391 void qemu_event_wait(QemuEvent *ev)
393 unsigned value;
395 value = atomic_mb_read(&ev->value);
396 if (value != EV_SET) {
397 if (value == EV_FREE) {
399 * Leave the event reset and tell qemu_event_set that there
400 * are waiters. No need to retry, because there cannot be
401 * a concurrent busy->free transition. After the CAS, the
402 * event will be either set or busy.
404 if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
405 return;
408 futex_wait(ev, EV_BUSY);
412 static pthread_key_t exit_key;
414 union NotifierThreadData {
415 void *ptr;
416 NotifierList list;
418 QEMU_BUILD_BUG_ON(sizeof(union NotifierThreadData) != sizeof(void *));
420 void qemu_thread_atexit_add(Notifier *notifier)
422 union NotifierThreadData ntd;
423 ntd.ptr = pthread_getspecific(exit_key);
424 notifier_list_add(&ntd.list, notifier);
425 pthread_setspecific(exit_key, ntd.ptr);
428 void qemu_thread_atexit_remove(Notifier *notifier)
430 union NotifierThreadData ntd;
431 ntd.ptr = pthread_getspecific(exit_key);
432 notifier_remove(notifier);
433 pthread_setspecific(exit_key, ntd.ptr);
436 static void qemu_thread_atexit_run(void *arg)
438 union NotifierThreadData ntd = { .ptr = arg };
439 notifier_list_notify(&ntd.list, NULL);
442 static void __attribute__((constructor)) qemu_thread_atexit_init(void)
444 pthread_key_create(&exit_key, qemu_thread_atexit_run);
448 /* Attempt to set the threads name; note that this is for debug, so
449 * we're not going to fail if we can't set it.
451 static void qemu_thread_set_name(QemuThread *thread, const char *name)
453 #ifdef CONFIG_PTHREAD_SETNAME_NP
454 pthread_setname_np(thread->thread, name);
455 #endif
458 void qemu_thread_create(QemuThread *thread, const char *name,
459 void *(*start_routine)(void*),
460 void *arg, int mode)
462 sigset_t set, oldset;
463 int err;
464 pthread_attr_t attr;
466 err = pthread_attr_init(&attr);
467 if (err) {
468 error_exit(err, __func__);
470 if (mode == QEMU_THREAD_DETACHED) {
471 err = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
472 if (err) {
473 error_exit(err, __func__);
477 /* Leave signal handling to the iothread. */
478 sigfillset(&set);
479 pthread_sigmask(SIG_SETMASK, &set, &oldset);
480 err = pthread_create(&thread->thread, &attr, start_routine, arg);
481 if (err)
482 error_exit(err, __func__);
484 if (name_threads) {
485 qemu_thread_set_name(thread, name);
488 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
490 pthread_attr_destroy(&attr);
493 void qemu_thread_get_self(QemuThread *thread)
495 thread->thread = pthread_self();
498 bool qemu_thread_is_self(QemuThread *thread)
500 return pthread_equal(pthread_self(), thread->thread);
503 void qemu_thread_exit(void *retval)
505 pthread_exit(retval);
508 void *qemu_thread_join(QemuThread *thread)
510 int err;
511 void *ret;
513 err = pthread_join(thread->thread, &ret);
514 if (err) {
515 error_exit(err, __func__);
517 return ret;