hw/ppc: Drop useless CONFIG_KVM ifdefery
[qemu/ar7.git] / include / qemu / thread.h
blob55d83a907cd287096367b19d5d2a8bd1b6061656
1 #ifndef QEMU_THREAD_H
2 #define QEMU_THREAD_H
4 #include "qemu/processor.h"
5 #include "qemu/atomic.h"
7 typedef struct QemuCond QemuCond;
8 typedef struct QemuSemaphore QemuSemaphore;
9 typedef struct QemuEvent QemuEvent;
10 typedef struct QemuLockCnt QemuLockCnt;
11 typedef struct QemuThread QemuThread;
13 #ifdef _WIN32
14 #include "qemu/thread-win32.h"
15 #else
16 #include "qemu/thread-posix.h"
17 #endif
19 /* include QSP header once QemuMutex, QemuCond etc. are defined */
20 #include "qemu/qsp.h"
22 #define QEMU_THREAD_JOINABLE 0
23 #define QEMU_THREAD_DETACHED 1
25 void qemu_mutex_init(QemuMutex *mutex);
26 void qemu_mutex_destroy(QemuMutex *mutex);
27 int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line);
28 void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line);
29 void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line);
31 typedef void (*QemuMutexLockFunc)(QemuMutex *m, const char *f, int l);
32 typedef int (*QemuMutexTrylockFunc)(QemuMutex *m, const char *f, int l);
33 typedef void (*QemuRecMutexLockFunc)(QemuRecMutex *m, const char *f, int l);
34 typedef int (*QemuRecMutexTrylockFunc)(QemuRecMutex *m, const char *f, int l);
35 typedef void (*QemuCondWaitFunc)(QemuCond *c, QemuMutex *m, const char *f,
36 int l);
38 extern QemuMutexLockFunc qemu_bql_mutex_lock_func;
39 extern QemuMutexLockFunc qemu_mutex_lock_func;
40 extern QemuMutexTrylockFunc qemu_mutex_trylock_func;
41 extern QemuRecMutexLockFunc qemu_rec_mutex_lock_func;
42 extern QemuRecMutexTrylockFunc qemu_rec_mutex_trylock_func;
43 extern QemuCondWaitFunc qemu_cond_wait_func;
45 /* convenience macros to bypass the profiler */
46 #define qemu_mutex_lock__raw(m) \
47 qemu_mutex_lock_impl(m, __FILE__, __LINE__)
48 #define qemu_mutex_trylock__raw(m) \
49 qemu_mutex_trylock_impl(m, __FILE__, __LINE__)
51 #ifdef __COVERITY__
53 * Coverity is severely confused by the indirect function calls,
54 * hide them.
56 #define qemu_mutex_lock(m) \
57 qemu_mutex_lock_impl(m, __FILE__, __LINE__);
58 #define qemu_mutex_trylock(m) \
59 qemu_mutex_trylock_impl(m, __FILE__, __LINE__);
60 #define qemu_rec_mutex_lock(m) \
61 qemu_rec_mutex_lock_impl(m, __FILE__, __LINE__);
62 #define qemu_rec_mutex_trylock(m) \
63 qemu_rec_mutex_trylock_impl(m, __FILE__, __LINE__);
64 #define qemu_cond_wait(c, m) \
65 qemu_cond_wait_impl(c, m, __FILE__, __LINE__);
66 #else
67 #define qemu_mutex_lock(m) ({ \
68 QemuMutexLockFunc _f = atomic_read(&qemu_mutex_lock_func); \
69 _f(m, __FILE__, __LINE__); \
72 #define qemu_mutex_trylock(m) ({ \
73 QemuMutexTrylockFunc _f = atomic_read(&qemu_mutex_trylock_func); \
74 _f(m, __FILE__, __LINE__); \
77 #define qemu_rec_mutex_lock(m) ({ \
78 QemuRecMutexLockFunc _f = atomic_read(&qemu_rec_mutex_lock_func); \
79 _f(m, __FILE__, __LINE__); \
82 #define qemu_rec_mutex_trylock(m) ({ \
83 QemuRecMutexTrylockFunc _f; \
84 _f = atomic_read(&qemu_rec_mutex_trylock_func); \
85 _f(m, __FILE__, __LINE__); \
88 #define qemu_cond_wait(c, m) ({ \
89 QemuCondWaitFunc _f = atomic_read(&qemu_cond_wait_func); \
90 _f(c, m, __FILE__, __LINE__); \
92 #endif
94 #define qemu_mutex_unlock(mutex) \
95 qemu_mutex_unlock_impl(mutex, __FILE__, __LINE__)
97 static inline void (qemu_mutex_lock)(QemuMutex *mutex)
99 qemu_mutex_lock(mutex);
102 static inline int (qemu_mutex_trylock)(QemuMutex *mutex)
104 return qemu_mutex_trylock(mutex);
107 static inline void (qemu_mutex_unlock)(QemuMutex *mutex)
109 qemu_mutex_unlock(mutex);
112 static inline void (qemu_rec_mutex_lock)(QemuRecMutex *mutex)
114 qemu_rec_mutex_lock(mutex);
117 static inline int (qemu_rec_mutex_trylock)(QemuRecMutex *mutex)
119 return qemu_rec_mutex_trylock(mutex);
122 /* Prototypes for other functions are in thread-posix.h/thread-win32.h. */
123 void qemu_rec_mutex_init(QemuRecMutex *mutex);
125 void qemu_cond_init(QemuCond *cond);
126 void qemu_cond_destroy(QemuCond *cond);
129 * IMPORTANT: The implementation does not guarantee that pthread_cond_signal
130 * and pthread_cond_broadcast can be called except while the same mutex is
131 * held as in the corresponding pthread_cond_wait calls!
133 void qemu_cond_signal(QemuCond *cond);
134 void qemu_cond_broadcast(QemuCond *cond);
135 void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex,
136 const char *file, const int line);
138 static inline void (qemu_cond_wait)(QemuCond *cond, QemuMutex *mutex)
140 qemu_cond_wait(cond, mutex);
143 void qemu_sem_init(QemuSemaphore *sem, int init);
144 void qemu_sem_post(QemuSemaphore *sem);
145 void qemu_sem_wait(QemuSemaphore *sem);
146 int qemu_sem_timedwait(QemuSemaphore *sem, int ms);
147 void qemu_sem_destroy(QemuSemaphore *sem);
149 void qemu_event_init(QemuEvent *ev, bool init);
150 void qemu_event_set(QemuEvent *ev);
151 void qemu_event_reset(QemuEvent *ev);
152 void qemu_event_wait(QemuEvent *ev);
153 void qemu_event_destroy(QemuEvent *ev);
155 void qemu_thread_create(QemuThread *thread, const char *name,
156 void *(*start_routine)(void *),
157 void *arg, int mode);
158 void *qemu_thread_join(QemuThread *thread);
159 void qemu_thread_get_self(QemuThread *thread);
160 bool qemu_thread_is_self(QemuThread *thread);
161 void qemu_thread_exit(void *retval);
162 void qemu_thread_naming(bool enable);
164 struct Notifier;
166 * qemu_thread_atexit_add:
167 * @notifier: Notifier to add
169 * Add the specified notifier to a list which will be run via
170 * notifier_list_notify() when this thread exits (either by calling
171 * qemu_thread_exit() or by returning from its start_routine).
172 * The usual usage is that the caller passes a Notifier which is
173 * a per-thread variable; it can then use the callback to free
174 * other per-thread data.
176 * If the thread exits as part of the entire process exiting,
177 * it is unspecified whether notifiers are called or not.
179 void qemu_thread_atexit_add(struct Notifier *notifier);
181 * qemu_thread_atexit_remove:
182 * @notifier: Notifier to remove
184 * Remove the specified notifier from the thread-exit notification
185 * list. It is not valid to try to remove a notifier which is not
186 * on the list.
188 void qemu_thread_atexit_remove(struct Notifier *notifier);
190 struct QemuSpin {
191 int value;
194 static inline void qemu_spin_init(QemuSpin *spin)
196 __sync_lock_release(&spin->value);
199 static inline void qemu_spin_lock(QemuSpin *spin)
201 while (unlikely(__sync_lock_test_and_set(&spin->value, true))) {
202 while (atomic_read(&spin->value)) {
203 cpu_relax();
208 static inline bool qemu_spin_trylock(QemuSpin *spin)
210 return __sync_lock_test_and_set(&spin->value, true);
213 static inline bool qemu_spin_locked(QemuSpin *spin)
215 return atomic_read(&spin->value);
218 static inline void qemu_spin_unlock(QemuSpin *spin)
220 __sync_lock_release(&spin->value);
223 struct QemuLockCnt {
224 #ifndef CONFIG_LINUX
225 QemuMutex mutex;
226 #endif
227 unsigned count;
231 * qemu_lockcnt_init: initialize a QemuLockcnt
232 * @lockcnt: the lockcnt to initialize
234 * Initialize lockcnt's counter to zero and prepare its mutex
235 * for usage.
237 void qemu_lockcnt_init(QemuLockCnt *lockcnt);
240 * qemu_lockcnt_destroy: destroy a QemuLockcnt
241 * @lockcnt: the lockcnt to destruct
243 * Destroy lockcnt's mutex.
245 void qemu_lockcnt_destroy(QemuLockCnt *lockcnt);
248 * qemu_lockcnt_inc: increment a QemuLockCnt's counter
249 * @lockcnt: the lockcnt to operate on
251 * If the lockcnt's count is zero, wait for critical sections
252 * to finish and increment lockcnt's count to 1. If the count
253 * is not zero, just increment it.
255 * Because this function can wait on the mutex, it must not be
256 * called while the lockcnt's mutex is held by the current thread.
257 * For the same reason, qemu_lockcnt_inc can also contribute to
258 * AB-BA deadlocks. This is a sample deadlock scenario:
260 * thread 1 thread 2
261 * -------------------------------------------------------
262 * qemu_lockcnt_lock(&lc1);
263 * qemu_lockcnt_lock(&lc2);
264 * qemu_lockcnt_inc(&lc2);
265 * qemu_lockcnt_inc(&lc1);
267 void qemu_lockcnt_inc(QemuLockCnt *lockcnt);
270 * qemu_lockcnt_dec: decrement a QemuLockCnt's counter
271 * @lockcnt: the lockcnt to operate on
273 void qemu_lockcnt_dec(QemuLockCnt *lockcnt);
276 * qemu_lockcnt_dec_and_lock: decrement a QemuLockCnt's counter and
277 * possibly lock it.
278 * @lockcnt: the lockcnt to operate on
280 * Decrement lockcnt's count. If the new count is zero, lock
281 * the mutex and return true. Otherwise, return false.
283 bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt);
286 * qemu_lockcnt_dec_if_lock: possibly decrement a QemuLockCnt's counter and
287 * lock it.
288 * @lockcnt: the lockcnt to operate on
290 * If the count is 1, decrement the count to zero, lock
291 * the mutex and return true. Otherwise, return false.
293 bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt);
296 * qemu_lockcnt_lock: lock a QemuLockCnt's mutex.
297 * @lockcnt: the lockcnt to operate on
299 * Remember that concurrent visits are not blocked unless the count is
300 * also zero. You can use qemu_lockcnt_count to check for this inside a
301 * critical section.
303 void qemu_lockcnt_lock(QemuLockCnt *lockcnt);
306 * qemu_lockcnt_unlock: release a QemuLockCnt's mutex.
307 * @lockcnt: the lockcnt to operate on.
309 void qemu_lockcnt_unlock(QemuLockCnt *lockcnt);
312 * qemu_lockcnt_inc_and_unlock: combined unlock/increment on a QemuLockCnt.
313 * @lockcnt: the lockcnt to operate on.
315 * This is the same as
317 * qemu_lockcnt_unlock(lockcnt);
318 * qemu_lockcnt_inc(lockcnt);
320 * but more efficient.
322 void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt);
325 * qemu_lockcnt_count: query a LockCnt's count.
326 * @lockcnt: the lockcnt to query.
328 * Note that the count can change at any time. Still, while the
329 * lockcnt is locked, one can usefully check whether the count
330 * is non-zero.
332 unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt);
334 #endif