2 * coroutine queues and locks
4 * Copyright (c) 2011 Kevin Wolf <kwolf@redhat.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * The lock-free mutex implementation is based on OSv
25 * (core/lfmutex.cc, include/lockfree/mutex.hh).
26 * Copyright (C) 2013 Cloudius Systems, Ltd.
29 #include "qemu/osdep.h"
30 #include "qemu-common.h"
31 #include "qemu/coroutine.h"
32 #include "qemu/coroutine_int.h"
33 #include "qemu/processor.h"
34 #include "qemu/queue.h"
35 #include "block/aio.h"
38 void qemu_co_queue_init(CoQueue
*queue
)
40 QSIMPLEQ_INIT(&queue
->entries
);
43 void coroutine_fn
qemu_co_queue_wait_impl(CoQueue
*queue
, QemuLockable
*lock
)
45 Coroutine
*self
= qemu_coroutine_self();
46 QSIMPLEQ_INSERT_TAIL(&queue
->entries
, self
, co_queue_next
);
49 qemu_lockable_unlock(lock
);
52 /* There is no race condition here. Other threads will call
53 * aio_co_schedule on our AioContext, which can reenter this
54 * coroutine but only after this yield and after the main loop
55 * has gone through the next iteration.
57 qemu_coroutine_yield();
58 assert(qemu_in_coroutine());
60 /* TODO: OSv implements wait morphing here, where the wakeup
61 * primitive automatically places the woken coroutine on the
62 * mutex's queue. This avoids the thundering herd effect.
63 * This could be implemented for CoMutexes, but not really for
64 * other cases of QemuLockable.
67 qemu_lockable_lock(lock
);
71 static bool qemu_co_queue_do_restart(CoQueue
*queue
, bool single
)
75 if (QSIMPLEQ_EMPTY(&queue
->entries
)) {
79 while ((next
= QSIMPLEQ_FIRST(&queue
->entries
)) != NULL
) {
80 QSIMPLEQ_REMOVE_HEAD(&queue
->entries
, co_queue_next
);
89 bool coroutine_fn
qemu_co_queue_next(CoQueue
*queue
)
91 assert(qemu_in_coroutine());
92 return qemu_co_queue_do_restart(queue
, true);
95 void coroutine_fn
qemu_co_queue_restart_all(CoQueue
*queue
)
97 assert(qemu_in_coroutine());
98 qemu_co_queue_do_restart(queue
, false);
101 bool qemu_co_enter_next_impl(CoQueue
*queue
, QemuLockable
*lock
)
105 next
= QSIMPLEQ_FIRST(&queue
->entries
);
110 QSIMPLEQ_REMOVE_HEAD(&queue
->entries
, co_queue_next
);
112 qemu_lockable_unlock(lock
);
116 qemu_lockable_lock(lock
);
121 bool qemu_co_queue_empty(CoQueue
*queue
)
123 return QSIMPLEQ_FIRST(&queue
->entries
) == NULL
;
126 /* The wait records are handled with a multiple-producer, single-consumer
127 * lock-free queue. There cannot be two concurrent pop_waiter() calls
128 * because pop_waiter() can only be called while mutex->handoff is zero.
129 * This can happen in three cases:
130 * - in qemu_co_mutex_unlock, before the hand-off protocol has started.
131 * In this case, qemu_co_mutex_lock will see mutex->handoff == 0 and
132 * not take part in the handoff.
133 * - in qemu_co_mutex_lock, if it steals the hand-off responsibility from
134 * qemu_co_mutex_unlock. In this case, qemu_co_mutex_unlock will fail
135 * the cmpxchg (it will see either 0 or the next sequence value) and
136 * exit. The next hand-off cannot begin until qemu_co_mutex_lock has
138 * - in qemu_co_mutex_unlock, if it takes the hand-off token itself.
139 * In this case another iteration starts with mutex->handoff == 0;
140 * a concurrent qemu_co_mutex_lock will fail the cmpxchg, and
141 * qemu_co_mutex_unlock will go back to case (1).
143 * The following functions manage this queue.
145 typedef struct CoWaitRecord
{
147 QSLIST_ENTRY(CoWaitRecord
) next
;
150 static void push_waiter(CoMutex
*mutex
, CoWaitRecord
*w
)
152 w
->co
= qemu_coroutine_self();
153 QSLIST_INSERT_HEAD_ATOMIC(&mutex
->from_push
, w
, next
);
156 static void move_waiters(CoMutex
*mutex
)
158 QSLIST_HEAD(, CoWaitRecord
) reversed
;
159 QSLIST_MOVE_ATOMIC(&reversed
, &mutex
->from_push
);
160 while (!QSLIST_EMPTY(&reversed
)) {
161 CoWaitRecord
*w
= QSLIST_FIRST(&reversed
);
162 QSLIST_REMOVE_HEAD(&reversed
, next
);
163 QSLIST_INSERT_HEAD(&mutex
->to_pop
, w
, next
);
167 static CoWaitRecord
*pop_waiter(CoMutex
*mutex
)
171 if (QSLIST_EMPTY(&mutex
->to_pop
)) {
173 if (QSLIST_EMPTY(&mutex
->to_pop
)) {
177 w
= QSLIST_FIRST(&mutex
->to_pop
);
178 QSLIST_REMOVE_HEAD(&mutex
->to_pop
, next
);
182 static bool has_waiters(CoMutex
*mutex
)
184 return QSLIST_EMPTY(&mutex
->to_pop
) || QSLIST_EMPTY(&mutex
->from_push
);
187 void qemu_co_mutex_init(CoMutex
*mutex
)
189 memset(mutex
, 0, sizeof(*mutex
));
192 static void coroutine_fn
qemu_co_mutex_wake(CoMutex
*mutex
, Coroutine
*co
)
194 /* Read co before co->ctx; pairs with smp_wmb() in
195 * qemu_coroutine_enter().
197 smp_read_barrier_depends();
198 mutex
->ctx
= co
->ctx
;
202 static void coroutine_fn
qemu_co_mutex_lock_slowpath(AioContext
*ctx
,
205 Coroutine
*self
= qemu_coroutine_self();
207 unsigned old_handoff
;
209 trace_qemu_co_mutex_lock_entry(mutex
, self
);
211 push_waiter(mutex
, &w
);
213 /* This is the "Responsibility Hand-Off" protocol; a lock() picks from
214 * a concurrent unlock() the responsibility of waking somebody up.
216 old_handoff
= atomic_mb_read(&mutex
->handoff
);
218 has_waiters(mutex
) &&
219 atomic_cmpxchg(&mutex
->handoff
, old_handoff
, 0) == old_handoff
) {
220 /* There can be no concurrent pops, because there can be only
221 * one active handoff at a time.
223 CoWaitRecord
*to_wake
= pop_waiter(mutex
);
224 Coroutine
*co
= to_wake
->co
;
226 /* We got the lock ourselves! */
227 assert(to_wake
== &w
);
232 qemu_co_mutex_wake(mutex
, co
);
235 qemu_coroutine_yield();
236 trace_qemu_co_mutex_lock_return(mutex
, self
);
239 void coroutine_fn
qemu_co_mutex_lock(CoMutex
*mutex
)
241 AioContext
*ctx
= qemu_get_current_aio_context();
242 Coroutine
*self
= qemu_coroutine_self();
245 /* Running a very small critical section on pthread_mutex_t and CoMutex
246 * shows that pthread_mutex_t is much faster because it doesn't actually
247 * go to sleep. What happens is that the critical section is shorter
248 * than the latency of entering the kernel and thus FUTEX_WAIT always
249 * fails. With CoMutex there is no such latency but you still want to
250 * avoid wait and wakeup. So introduce it artificially.
254 waiters
= atomic_cmpxchg(&mutex
->locked
, 0, 1);
256 while (waiters
== 1 && ++i
< 1000) {
257 if (atomic_read(&mutex
->ctx
) == ctx
) {
260 if (atomic_read(&mutex
->locked
) == 0) {
261 goto retry_fast_path
;
265 waiters
= atomic_fetch_inc(&mutex
->locked
);
270 trace_qemu_co_mutex_lock_uncontended(mutex
, self
);
273 qemu_co_mutex_lock_slowpath(ctx
, mutex
);
275 mutex
->holder
= self
;
279 void coroutine_fn
qemu_co_mutex_unlock(CoMutex
*mutex
)
281 Coroutine
*self
= qemu_coroutine_self();
283 trace_qemu_co_mutex_unlock_entry(mutex
, self
);
285 assert(mutex
->locked
);
286 assert(mutex
->holder
== self
);
287 assert(qemu_in_coroutine());
290 mutex
->holder
= NULL
;
292 if (atomic_fetch_dec(&mutex
->locked
) == 1) {
293 /* No waiting qemu_co_mutex_lock(). Pfew, that was easy! */
298 CoWaitRecord
*to_wake
= pop_waiter(mutex
);
299 unsigned our_handoff
;
302 qemu_co_mutex_wake(mutex
, to_wake
->co
);
306 /* Some concurrent lock() is in progress (we know this because
307 * mutex->locked was >1) but it hasn't yet put itself on the wait
308 * queue. Pick a sequence number for the handoff protocol (not 0).
310 if (++mutex
->sequence
== 0) {
314 our_handoff
= mutex
->sequence
;
315 atomic_mb_set(&mutex
->handoff
, our_handoff
);
316 if (!has_waiters(mutex
)) {
317 /* The concurrent lock has not added itself yet, so it
318 * will be able to pick our handoff.
323 /* Try to do the handoff protocol ourselves; if somebody else has
324 * already taken it, however, we're done and they're responsible.
326 if (atomic_cmpxchg(&mutex
->handoff
, our_handoff
, 0) != our_handoff
) {
331 trace_qemu_co_mutex_unlock_return(mutex
, self
);
334 void qemu_co_rwlock_init(CoRwlock
*lock
)
336 memset(lock
, 0, sizeof(*lock
));
337 qemu_co_queue_init(&lock
->queue
);
338 qemu_co_mutex_init(&lock
->mutex
);
341 void qemu_co_rwlock_rdlock(CoRwlock
*lock
)
343 Coroutine
*self
= qemu_coroutine_self();
345 qemu_co_mutex_lock(&lock
->mutex
);
346 /* For fairness, wait if a writer is in line. */
347 while (lock
->pending_writer
) {
348 qemu_co_queue_wait(&lock
->queue
, &lock
->mutex
);
351 qemu_co_mutex_unlock(&lock
->mutex
);
353 /* The rest of the read-side critical section is run without the mutex. */
357 void qemu_co_rwlock_unlock(CoRwlock
*lock
)
359 Coroutine
*self
= qemu_coroutine_self();
361 assert(qemu_in_coroutine());
363 /* The critical section started in qemu_co_rwlock_wrlock. */
364 qemu_co_queue_restart_all(&lock
->queue
);
368 qemu_co_mutex_lock(&lock
->mutex
);
370 assert(lock
->reader
>= 0);
371 /* Wakeup only one waiting writer */
373 qemu_co_queue_next(&lock
->queue
);
376 qemu_co_mutex_unlock(&lock
->mutex
);
379 void qemu_co_rwlock_downgrade(CoRwlock
*lock
)
381 Coroutine
*self
= qemu_coroutine_self();
383 /* lock->mutex critical section started in qemu_co_rwlock_wrlock or
384 * qemu_co_rwlock_upgrade.
386 assert(lock
->reader
== 0);
388 qemu_co_mutex_unlock(&lock
->mutex
);
390 /* The rest of the read-side critical section is run without the mutex. */
394 void qemu_co_rwlock_wrlock(CoRwlock
*lock
)
396 qemu_co_mutex_lock(&lock
->mutex
);
397 lock
->pending_writer
++;
398 while (lock
->reader
) {
399 qemu_co_queue_wait(&lock
->queue
, &lock
->mutex
);
401 lock
->pending_writer
--;
403 /* The rest of the write-side critical section is run with
404 * the mutex taken, so that lock->reader remains zero.
405 * There is no need to update self->locks_held.
409 void qemu_co_rwlock_upgrade(CoRwlock
*lock
)
411 Coroutine
*self
= qemu_coroutine_self();
413 qemu_co_mutex_lock(&lock
->mutex
);
414 assert(lock
->reader
> 0);
416 lock
->pending_writer
++;
417 while (lock
->reader
) {
418 qemu_co_queue_wait(&lock
->queue
, &lock
->mutex
);
420 lock
->pending_writer
--;
422 /* The rest of the write-side critical section is run with
423 * the mutex taken, similar to qemu_co_rwlock_wrlock. Do
424 * not account for the lock twice in self->locks_held.