2 * coroutine queues and locks
4 * Copyright (c) 2011 Kevin Wolf <kwolf@redhat.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * The lock-free mutex implementation is based on OSv
25 * (core/lfmutex.cc, include/lockfree/mutex.hh).
26 * Copyright (C) 2013 Cloudius Systems, Ltd.
29 #include "qemu/osdep.h"
30 #include "qemu/coroutine.h"
31 #include "qemu/coroutine_int.h"
32 #include "qemu/processor.h"
33 #include "qemu/queue.h"
34 #include "block/aio.h"
37 void qemu_co_queue_init(CoQueue
*queue
)
39 QSIMPLEQ_INIT(&queue
->entries
);
42 void coroutine_fn
qemu_co_queue_wait_impl(CoQueue
*queue
, QemuLockable
*lock
)
44 Coroutine
*self
= qemu_coroutine_self();
45 QSIMPLEQ_INSERT_TAIL(&queue
->entries
, self
, co_queue_next
);
48 qemu_lockable_unlock(lock
);
51 /* There is no race condition here. Other threads will call
52 * aio_co_schedule on our AioContext, which can reenter this
53 * coroutine but only after this yield and after the main loop
54 * has gone through the next iteration.
56 qemu_coroutine_yield();
57 assert(qemu_in_coroutine());
59 /* TODO: OSv implements wait morphing here, where the wakeup
60 * primitive automatically places the woken coroutine on the
61 * mutex's queue. This avoids the thundering herd effect.
62 * This could be implemented for CoMutexes, but not really for
63 * other cases of QemuLockable.
66 qemu_lockable_lock(lock
);
70 static bool qemu_co_queue_do_restart(CoQueue
*queue
, bool single
)
74 if (QSIMPLEQ_EMPTY(&queue
->entries
)) {
78 while ((next
= QSIMPLEQ_FIRST(&queue
->entries
)) != NULL
) {
79 QSIMPLEQ_REMOVE_HEAD(&queue
->entries
, co_queue_next
);
88 bool coroutine_fn
qemu_co_queue_next(CoQueue
*queue
)
90 assert(qemu_in_coroutine());
91 return qemu_co_queue_do_restart(queue
, true);
94 void coroutine_fn
qemu_co_queue_restart_all(CoQueue
*queue
)
96 assert(qemu_in_coroutine());
97 qemu_co_queue_do_restart(queue
, false);
100 bool qemu_co_enter_next_impl(CoQueue
*queue
, QemuLockable
*lock
)
104 next
= QSIMPLEQ_FIRST(&queue
->entries
);
109 QSIMPLEQ_REMOVE_HEAD(&queue
->entries
, co_queue_next
);
111 qemu_lockable_unlock(lock
);
115 qemu_lockable_lock(lock
);
120 bool qemu_co_queue_empty(CoQueue
*queue
)
122 return QSIMPLEQ_FIRST(&queue
->entries
) == NULL
;
125 /* The wait records are handled with a multiple-producer, single-consumer
126 * lock-free queue. There cannot be two concurrent pop_waiter() calls
127 * because pop_waiter() can only be called while mutex->handoff is zero.
128 * This can happen in three cases:
129 * - in qemu_co_mutex_unlock, before the hand-off protocol has started.
130 * In this case, qemu_co_mutex_lock will see mutex->handoff == 0 and
131 * not take part in the handoff.
132 * - in qemu_co_mutex_lock, if it steals the hand-off responsibility from
133 * qemu_co_mutex_unlock. In this case, qemu_co_mutex_unlock will fail
134 * the cmpxchg (it will see either 0 or the next sequence value) and
135 * exit. The next hand-off cannot begin until qemu_co_mutex_lock has
137 * - in qemu_co_mutex_unlock, if it takes the hand-off token itself.
138 * In this case another iteration starts with mutex->handoff == 0;
139 * a concurrent qemu_co_mutex_lock will fail the cmpxchg, and
140 * qemu_co_mutex_unlock will go back to case (1).
142 * The following functions manage this queue.
144 typedef struct CoWaitRecord
{
146 QSLIST_ENTRY(CoWaitRecord
) next
;
149 static void push_waiter(CoMutex
*mutex
, CoWaitRecord
*w
)
151 w
->co
= qemu_coroutine_self();
152 QSLIST_INSERT_HEAD_ATOMIC(&mutex
->from_push
, w
, next
);
155 static void move_waiters(CoMutex
*mutex
)
157 QSLIST_HEAD(, CoWaitRecord
) reversed
;
158 QSLIST_MOVE_ATOMIC(&reversed
, &mutex
->from_push
);
159 while (!QSLIST_EMPTY(&reversed
)) {
160 CoWaitRecord
*w
= QSLIST_FIRST(&reversed
);
161 QSLIST_REMOVE_HEAD(&reversed
, next
);
162 QSLIST_INSERT_HEAD(&mutex
->to_pop
, w
, next
);
166 static CoWaitRecord
*pop_waiter(CoMutex
*mutex
)
170 if (QSLIST_EMPTY(&mutex
->to_pop
)) {
172 if (QSLIST_EMPTY(&mutex
->to_pop
)) {
176 w
= QSLIST_FIRST(&mutex
->to_pop
);
177 QSLIST_REMOVE_HEAD(&mutex
->to_pop
, next
);
181 static bool has_waiters(CoMutex
*mutex
)
183 return QSLIST_EMPTY(&mutex
->to_pop
) || QSLIST_EMPTY(&mutex
->from_push
);
186 void qemu_co_mutex_init(CoMutex
*mutex
)
188 memset(mutex
, 0, sizeof(*mutex
));
191 static void coroutine_fn
qemu_co_mutex_wake(CoMutex
*mutex
, Coroutine
*co
)
193 /* Read co before co->ctx; pairs with smp_wmb() in
194 * qemu_coroutine_enter().
196 smp_read_barrier_depends();
197 mutex
->ctx
= co
->ctx
;
201 static void coroutine_fn
qemu_co_mutex_lock_slowpath(AioContext
*ctx
,
204 Coroutine
*self
= qemu_coroutine_self();
206 unsigned old_handoff
;
208 trace_qemu_co_mutex_lock_entry(mutex
, self
);
210 push_waiter(mutex
, &w
);
212 /* This is the "Responsibility Hand-Off" protocol; a lock() picks from
213 * a concurrent unlock() the responsibility of waking somebody up.
215 old_handoff
= atomic_mb_read(&mutex
->handoff
);
217 has_waiters(mutex
) &&
218 atomic_cmpxchg(&mutex
->handoff
, old_handoff
, 0) == old_handoff
) {
219 /* There can be no concurrent pops, because there can be only
220 * one active handoff at a time.
222 CoWaitRecord
*to_wake
= pop_waiter(mutex
);
223 Coroutine
*co
= to_wake
->co
;
225 /* We got the lock ourselves! */
226 assert(to_wake
== &w
);
231 qemu_co_mutex_wake(mutex
, co
);
234 qemu_coroutine_yield();
235 trace_qemu_co_mutex_lock_return(mutex
, self
);
238 void coroutine_fn
qemu_co_mutex_lock(CoMutex
*mutex
)
240 AioContext
*ctx
= qemu_get_current_aio_context();
241 Coroutine
*self
= qemu_coroutine_self();
244 /* Running a very small critical section on pthread_mutex_t and CoMutex
245 * shows that pthread_mutex_t is much faster because it doesn't actually
246 * go to sleep. What happens is that the critical section is shorter
247 * than the latency of entering the kernel and thus FUTEX_WAIT always
248 * fails. With CoMutex there is no such latency but you still want to
249 * avoid wait and wakeup. So introduce it artificially.
253 waiters
= atomic_cmpxchg(&mutex
->locked
, 0, 1);
255 while (waiters
== 1 && ++i
< 1000) {
256 if (atomic_read(&mutex
->ctx
) == ctx
) {
259 if (atomic_read(&mutex
->locked
) == 0) {
260 goto retry_fast_path
;
264 waiters
= atomic_fetch_inc(&mutex
->locked
);
269 trace_qemu_co_mutex_lock_uncontended(mutex
, self
);
272 qemu_co_mutex_lock_slowpath(ctx
, mutex
);
274 mutex
->holder
= self
;
278 void coroutine_fn
qemu_co_mutex_unlock(CoMutex
*mutex
)
280 Coroutine
*self
= qemu_coroutine_self();
282 trace_qemu_co_mutex_unlock_entry(mutex
, self
);
284 assert(mutex
->locked
);
285 assert(mutex
->holder
== self
);
286 assert(qemu_in_coroutine());
289 mutex
->holder
= NULL
;
291 if (atomic_fetch_dec(&mutex
->locked
) == 1) {
292 /* No waiting qemu_co_mutex_lock(). Pfew, that was easy! */
297 CoWaitRecord
*to_wake
= pop_waiter(mutex
);
298 unsigned our_handoff
;
301 qemu_co_mutex_wake(mutex
, to_wake
->co
);
305 /* Some concurrent lock() is in progress (we know this because
306 * mutex->locked was >1) but it hasn't yet put itself on the wait
307 * queue. Pick a sequence number for the handoff protocol (not 0).
309 if (++mutex
->sequence
== 0) {
313 our_handoff
= mutex
->sequence
;
314 atomic_mb_set(&mutex
->handoff
, our_handoff
);
315 if (!has_waiters(mutex
)) {
316 /* The concurrent lock has not added itself yet, so it
317 * will be able to pick our handoff.
322 /* Try to do the handoff protocol ourselves; if somebody else has
323 * already taken it, however, we're done and they're responsible.
325 if (atomic_cmpxchg(&mutex
->handoff
, our_handoff
, 0) != our_handoff
) {
330 trace_qemu_co_mutex_unlock_return(mutex
, self
);
333 void qemu_co_rwlock_init(CoRwlock
*lock
)
335 memset(lock
, 0, sizeof(*lock
));
336 qemu_co_queue_init(&lock
->queue
);
337 qemu_co_mutex_init(&lock
->mutex
);
340 void qemu_co_rwlock_rdlock(CoRwlock
*lock
)
342 Coroutine
*self
= qemu_coroutine_self();
344 qemu_co_mutex_lock(&lock
->mutex
);
345 /* For fairness, wait if a writer is in line. */
346 while (lock
->pending_writer
) {
347 qemu_co_queue_wait(&lock
->queue
, &lock
->mutex
);
350 qemu_co_mutex_unlock(&lock
->mutex
);
352 /* The rest of the read-side critical section is run without the mutex. */
356 void qemu_co_rwlock_unlock(CoRwlock
*lock
)
358 Coroutine
*self
= qemu_coroutine_self();
360 assert(qemu_in_coroutine());
362 /* The critical section started in qemu_co_rwlock_wrlock. */
363 qemu_co_queue_restart_all(&lock
->queue
);
367 qemu_co_mutex_lock(&lock
->mutex
);
369 assert(lock
->reader
>= 0);
370 /* Wakeup only one waiting writer */
372 qemu_co_queue_next(&lock
->queue
);
375 qemu_co_mutex_unlock(&lock
->mutex
);
378 void qemu_co_rwlock_downgrade(CoRwlock
*lock
)
380 Coroutine
*self
= qemu_coroutine_self();
382 /* lock->mutex critical section started in qemu_co_rwlock_wrlock or
383 * qemu_co_rwlock_upgrade.
385 assert(lock
->reader
== 0);
387 qemu_co_mutex_unlock(&lock
->mutex
);
389 /* The rest of the read-side critical section is run without the mutex. */
393 void qemu_co_rwlock_wrlock(CoRwlock
*lock
)
395 qemu_co_mutex_lock(&lock
->mutex
);
396 lock
->pending_writer
++;
397 while (lock
->reader
) {
398 qemu_co_queue_wait(&lock
->queue
, &lock
->mutex
);
400 lock
->pending_writer
--;
402 /* The rest of the write-side critical section is run with
403 * the mutex taken, so that lock->reader remains zero.
404 * There is no need to update self->locks_held.
408 void qemu_co_rwlock_upgrade(CoRwlock
*lock
)
410 Coroutine
*self
= qemu_coroutine_self();
412 qemu_co_mutex_lock(&lock
->mutex
);
413 assert(lock
->reader
> 0);
415 lock
->pending_writer
++;
416 while (lock
->reader
) {
417 qemu_co_queue_wait(&lock
->queue
, &lock
->mutex
);
419 lock
->pending_writer
--;
421 /* The rest of the write-side critical section is run with
422 * the mutex taken, similar to qemu_co_rwlock_wrlock. Do
423 * not account for the lock twice in self->locks_held.