2 * coroutine queues and locks
4 * Copyright (c) 2011 Kevin Wolf <kwolf@redhat.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * The lock-free mutex implementation is based on OSv
25 * (core/lfmutex.cc, include/lockfree/mutex.hh).
26 * Copyright (C) 2013 Cloudius Systems, Ltd.
29 #include "qemu/osdep.h"
30 #include "qemu/coroutine_int.h"
31 #include "qemu/processor.h"
32 #include "qemu/queue.h"
33 #include "block/aio.h"
36 void qemu_co_queue_init(CoQueue
*queue
)
38 QSIMPLEQ_INIT(&queue
->entries
);
41 void coroutine_fn
qemu_co_queue_wait_impl(CoQueue
*queue
, QemuLockable
*lock
,
42 CoQueueWaitFlags flags
)
44 Coroutine
*self
= qemu_coroutine_self();
45 if (flags
& CO_QUEUE_WAIT_FRONT
) {
46 QSIMPLEQ_INSERT_HEAD(&queue
->entries
, self
, co_queue_next
);
48 QSIMPLEQ_INSERT_TAIL(&queue
->entries
, self
, co_queue_next
);
52 qemu_lockable_unlock(lock
);
55 /* There is no race condition here. Other threads will call
56 * aio_co_schedule on our AioContext, which can reenter this
57 * coroutine but only after this yield and after the main loop
58 * has gone through the next iteration.
60 qemu_coroutine_yield();
61 assert(qemu_in_coroutine());
63 /* TODO: OSv implements wait morphing here, where the wakeup
64 * primitive automatically places the woken coroutine on the
65 * mutex's queue. This avoids the thundering herd effect.
66 * This could be implemented for CoMutexes, but not really for
67 * other cases of QemuLockable.
70 qemu_lockable_lock(lock
);
74 bool qemu_co_enter_next_impl(CoQueue
*queue
, QemuLockable
*lock
)
78 next
= QSIMPLEQ_FIRST(&queue
->entries
);
83 QSIMPLEQ_REMOVE_HEAD(&queue
->entries
, co_queue_next
);
85 qemu_lockable_unlock(lock
);
89 qemu_lockable_lock(lock
);
94 bool coroutine_fn
qemu_co_queue_next(CoQueue
*queue
)
96 /* No unlock/lock needed in coroutine context. */
97 return qemu_co_enter_next_impl(queue
, NULL
);
100 void qemu_co_enter_all_impl(CoQueue
*queue
, QemuLockable
*lock
)
102 while (qemu_co_enter_next_impl(queue
, lock
)) {
107 void coroutine_fn
qemu_co_queue_restart_all(CoQueue
*queue
)
109 /* No unlock/lock needed in coroutine context. */
110 qemu_co_enter_all_impl(queue
, NULL
);
113 bool qemu_co_queue_empty(CoQueue
*queue
)
115 return QSIMPLEQ_FIRST(&queue
->entries
) == NULL
;
118 /* The wait records are handled with a multiple-producer, single-consumer
119 * lock-free queue. There cannot be two concurrent pop_waiter() calls
120 * because pop_waiter() can only be called while mutex->handoff is zero.
121 * This can happen in three cases:
122 * - in qemu_co_mutex_unlock, before the hand-off protocol has started.
123 * In this case, qemu_co_mutex_lock will see mutex->handoff == 0 and
124 * not take part in the handoff.
125 * - in qemu_co_mutex_lock, if it steals the hand-off responsibility from
126 * qemu_co_mutex_unlock. In this case, qemu_co_mutex_unlock will fail
127 * the cmpxchg (it will see either 0 or the next sequence value) and
128 * exit. The next hand-off cannot begin until qemu_co_mutex_lock has
130 * - in qemu_co_mutex_unlock, if it takes the hand-off token itself.
131 * In this case another iteration starts with mutex->handoff == 0;
132 * a concurrent qemu_co_mutex_lock will fail the cmpxchg, and
133 * qemu_co_mutex_unlock will go back to case (1).
135 * The following functions manage this queue.
137 typedef struct CoWaitRecord
{
139 QSLIST_ENTRY(CoWaitRecord
) next
;
142 static void coroutine_fn
push_waiter(CoMutex
*mutex
, CoWaitRecord
*w
)
144 w
->co
= qemu_coroutine_self();
145 QSLIST_INSERT_HEAD_ATOMIC(&mutex
->from_push
, w
, next
);
148 static void move_waiters(CoMutex
*mutex
)
150 QSLIST_HEAD(, CoWaitRecord
) reversed
;
151 QSLIST_MOVE_ATOMIC(&reversed
, &mutex
->from_push
);
152 while (!QSLIST_EMPTY(&reversed
)) {
153 CoWaitRecord
*w
= QSLIST_FIRST(&reversed
);
154 QSLIST_REMOVE_HEAD(&reversed
, next
);
155 QSLIST_INSERT_HEAD(&mutex
->to_pop
, w
, next
);
159 static CoWaitRecord
*pop_waiter(CoMutex
*mutex
)
163 if (QSLIST_EMPTY(&mutex
->to_pop
)) {
165 if (QSLIST_EMPTY(&mutex
->to_pop
)) {
169 w
= QSLIST_FIRST(&mutex
->to_pop
);
170 QSLIST_REMOVE_HEAD(&mutex
->to_pop
, next
);
174 static bool has_waiters(CoMutex
*mutex
)
176 return QSLIST_EMPTY(&mutex
->to_pop
) || QSLIST_EMPTY(&mutex
->from_push
);
179 void qemu_co_mutex_init(CoMutex
*mutex
)
181 memset(mutex
, 0, sizeof(*mutex
));
184 static void coroutine_fn
qemu_co_mutex_wake(CoMutex
*mutex
, Coroutine
*co
)
186 /* Read co before co->ctx; pairs with smp_wmb() in
187 * qemu_coroutine_enter().
189 smp_read_barrier_depends();
190 mutex
->ctx
= co
->ctx
;
194 static void coroutine_fn
qemu_co_mutex_lock_slowpath(AioContext
*ctx
,
197 Coroutine
*self
= qemu_coroutine_self();
199 unsigned old_handoff
;
201 trace_qemu_co_mutex_lock_entry(mutex
, self
);
202 push_waiter(mutex
, &w
);
204 /* This is the "Responsibility Hand-Off" protocol; a lock() picks from
205 * a concurrent unlock() the responsibility of waking somebody up.
207 old_handoff
= qatomic_mb_read(&mutex
->handoff
);
209 has_waiters(mutex
) &&
210 qatomic_cmpxchg(&mutex
->handoff
, old_handoff
, 0) == old_handoff
) {
211 /* There can be no concurrent pops, because there can be only
212 * one active handoff at a time.
214 CoWaitRecord
*to_wake
= pop_waiter(mutex
);
215 Coroutine
*co
= to_wake
->co
;
217 /* We got the lock ourselves! */
218 assert(to_wake
== &w
);
223 qemu_co_mutex_wake(mutex
, co
);
226 qemu_coroutine_yield();
227 trace_qemu_co_mutex_lock_return(mutex
, self
);
230 void coroutine_fn
qemu_co_mutex_lock(CoMutex
*mutex
)
232 AioContext
*ctx
= qemu_get_current_aio_context();
233 Coroutine
*self
= qemu_coroutine_self();
236 /* Running a very small critical section on pthread_mutex_t and CoMutex
237 * shows that pthread_mutex_t is much faster because it doesn't actually
238 * go to sleep. What happens is that the critical section is shorter
239 * than the latency of entering the kernel and thus FUTEX_WAIT always
240 * fails. With CoMutex there is no such latency but you still want to
241 * avoid wait and wakeup. So introduce it artificially.
245 waiters
= qatomic_cmpxchg(&mutex
->locked
, 0, 1);
247 while (waiters
== 1 && ++i
< 1000) {
248 if (qatomic_read(&mutex
->ctx
) == ctx
) {
251 if (qatomic_read(&mutex
->locked
) == 0) {
252 goto retry_fast_path
;
256 waiters
= qatomic_fetch_inc(&mutex
->locked
);
261 trace_qemu_co_mutex_lock_uncontended(mutex
, self
);
264 qemu_co_mutex_lock_slowpath(ctx
, mutex
);
266 mutex
->holder
= self
;
270 void coroutine_fn
qemu_co_mutex_unlock(CoMutex
*mutex
)
272 Coroutine
*self
= qemu_coroutine_self();
274 trace_qemu_co_mutex_unlock_entry(mutex
, self
);
276 assert(mutex
->locked
);
277 assert(mutex
->holder
== self
);
278 assert(qemu_in_coroutine());
281 mutex
->holder
= NULL
;
283 if (qatomic_fetch_dec(&mutex
->locked
) == 1) {
284 /* No waiting qemu_co_mutex_lock(). Pfew, that was easy! */
289 CoWaitRecord
*to_wake
= pop_waiter(mutex
);
290 unsigned our_handoff
;
293 qemu_co_mutex_wake(mutex
, to_wake
->co
);
297 /* Some concurrent lock() is in progress (we know this because
298 * mutex->locked was >1) but it hasn't yet put itself on the wait
299 * queue. Pick a sequence number for the handoff protocol (not 0).
301 if (++mutex
->sequence
== 0) {
305 our_handoff
= mutex
->sequence
;
306 qatomic_mb_set(&mutex
->handoff
, our_handoff
);
307 if (!has_waiters(mutex
)) {
308 /* The concurrent lock has not added itself yet, so it
309 * will be able to pick our handoff.
314 /* Try to do the handoff protocol ourselves; if somebody else has
315 * already taken it, however, we're done and they're responsible.
317 if (qatomic_cmpxchg(&mutex
->handoff
, our_handoff
, 0) != our_handoff
) {
322 trace_qemu_co_mutex_unlock_return(mutex
, self
);
328 QSIMPLEQ_ENTRY(CoRwTicket
) next
;
331 void qemu_co_rwlock_init(CoRwlock
*lock
)
333 qemu_co_mutex_init(&lock
->mutex
);
335 QSIMPLEQ_INIT(&lock
->tickets
);
338 /* Releases the internal CoMutex. */
339 static void coroutine_fn
qemu_co_rwlock_maybe_wake_one(CoRwlock
*lock
)
341 CoRwTicket
*tkt
= QSIMPLEQ_FIRST(&lock
->tickets
);
342 Coroutine
*co
= NULL
;
345 * Setting lock->owners here prevents rdlock and wrlock from
346 * sneaking in between unlock and wake.
351 if (lock
->owners
>= 0) {
356 if (lock
->owners
== 0) {
364 QSIMPLEQ_REMOVE_HEAD(&lock
->tickets
, next
);
365 qemu_co_mutex_unlock(&lock
->mutex
);
368 qemu_co_mutex_unlock(&lock
->mutex
);
372 void coroutine_fn
qemu_co_rwlock_rdlock(CoRwlock
*lock
)
374 Coroutine
*self
= qemu_coroutine_self();
376 qemu_co_mutex_lock(&lock
->mutex
);
377 /* For fairness, wait if a writer is in line. */
378 if (lock
->owners
== 0 || (lock
->owners
> 0 && QSIMPLEQ_EMPTY(&lock
->tickets
))) {
380 qemu_co_mutex_unlock(&lock
->mutex
);
382 CoRwTicket my_ticket
= { true, self
};
384 QSIMPLEQ_INSERT_TAIL(&lock
->tickets
, &my_ticket
, next
);
385 qemu_co_mutex_unlock(&lock
->mutex
);
386 qemu_coroutine_yield();
387 assert(lock
->owners
>= 1);
389 /* Possibly wake another reader, which will wake the next in line. */
390 qemu_co_mutex_lock(&lock
->mutex
);
391 qemu_co_rwlock_maybe_wake_one(lock
);
397 void coroutine_fn
qemu_co_rwlock_unlock(CoRwlock
*lock
)
399 Coroutine
*self
= qemu_coroutine_self();
401 assert(qemu_in_coroutine());
404 qemu_co_mutex_lock(&lock
->mutex
);
405 if (lock
->owners
> 0) {
408 assert(lock
->owners
== -1);
412 qemu_co_rwlock_maybe_wake_one(lock
);
415 void coroutine_fn
qemu_co_rwlock_downgrade(CoRwlock
*lock
)
417 qemu_co_mutex_lock(&lock
->mutex
);
418 assert(lock
->owners
== -1);
421 /* Possibly wake another reader, which will wake the next in line. */
422 qemu_co_rwlock_maybe_wake_one(lock
);
425 void coroutine_fn
qemu_co_rwlock_wrlock(CoRwlock
*lock
)
427 Coroutine
*self
= qemu_coroutine_self();
429 qemu_co_mutex_lock(&lock
->mutex
);
430 if (lock
->owners
== 0) {
432 qemu_co_mutex_unlock(&lock
->mutex
);
434 CoRwTicket my_ticket
= { false, qemu_coroutine_self() };
436 QSIMPLEQ_INSERT_TAIL(&lock
->tickets
, &my_ticket
, next
);
437 qemu_co_mutex_unlock(&lock
->mutex
);
438 qemu_coroutine_yield();
439 assert(lock
->owners
== -1);
445 void coroutine_fn
qemu_co_rwlock_upgrade(CoRwlock
*lock
)
447 qemu_co_mutex_lock(&lock
->mutex
);
448 assert(lock
->owners
> 0);
449 /* For fairness, wait if a writer is in line. */
450 if (lock
->owners
== 1 && QSIMPLEQ_EMPTY(&lock
->tickets
)) {
452 qemu_co_mutex_unlock(&lock
->mutex
);
454 CoRwTicket my_ticket
= { false, qemu_coroutine_self() };
457 QSIMPLEQ_INSERT_TAIL(&lock
->tickets
, &my_ticket
, next
);
458 qemu_co_rwlock_maybe_wake_one(lock
);
459 qemu_coroutine_yield();
460 assert(lock
->owners
== -1);