vdpa: commit all host notifier MRs in a single MR transaction
[qemu.git] / util / async.c
blob14d63b3091165264e55d18d9b06afa050e63fca2
1 /*
2 * Data plane event loop
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2009-2017 QEMU contributors
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
26 #include "qemu/osdep.h"
27 #include "qapi/error.h"
28 #include "block/aio.h"
29 #include "block/thread-pool.h"
30 #include "block/graph-lock.h"
31 #include "qemu/main-loop.h"
32 #include "qemu/atomic.h"
33 #include "qemu/rcu_queue.h"
34 #include "block/raw-aio.h"
35 #include "qemu/coroutine_int.h"
36 #include "qemu/coroutine-tls.h"
37 #include "sysemu/cpu-timers.h"
38 #include "trace.h"
40 /***********************************************************/
41 /* bottom halves (can be seen as timers which expire ASAP) */
43 /* QEMUBH::flags values */
44 enum {
45 /* Already enqueued and waiting for aio_bh_poll() */
46 BH_PENDING = (1 << 0),
48 /* Invoke the callback */
49 BH_SCHEDULED = (1 << 1),
51 /* Delete without invoking callback */
52 BH_DELETED = (1 << 2),
54 /* Delete after invoking callback */
55 BH_ONESHOT = (1 << 3),
57 /* Schedule periodically when the event loop is idle */
58 BH_IDLE = (1 << 4),
61 struct QEMUBH {
62 AioContext *ctx;
63 const char *name;
64 QEMUBHFunc *cb;
65 void *opaque;
66 QSLIST_ENTRY(QEMUBH) next;
67 unsigned flags;
70 /* Called concurrently from any thread */
71 static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags)
73 AioContext *ctx = bh->ctx;
74 unsigned old_flags;
77 * The memory barrier implicit in qatomic_fetch_or makes sure that:
78 * 1. idle & any writes needed by the callback are done before the
79 * locations are read in the aio_bh_poll.
80 * 2. ctx is loaded before the callback has a chance to execute and bh
81 * could be freed.
83 old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags);
84 if (!(old_flags & BH_PENDING)) {
85 QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next);
88 aio_notify(ctx);
90 * Workaround for record/replay.
91 * vCPU execution should be suspended when new BH is set.
92 * This is needed to avoid guest timeouts caused
93 * by the long cycles of the execution.
95 icount_notify_exit();
98 /* Only called from aio_bh_poll() and aio_ctx_finalize() */
99 static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags)
101 QEMUBH *bh = QSLIST_FIRST_RCU(head);
103 if (!bh) {
104 return NULL;
107 QSLIST_REMOVE_HEAD(head, next);
110 * The qatomic_and is paired with aio_bh_enqueue(). The implicit memory
111 * barrier ensures that the callback sees all writes done by the scheduling
112 * thread. It also ensures that the scheduling thread sees the cleared
113 * flag before bh->cb has run, and thus will call aio_notify again if
114 * necessary.
116 *flags = qatomic_fetch_and(&bh->flags,
117 ~(BH_PENDING | BH_SCHEDULED | BH_IDLE));
118 return bh;
121 void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb,
122 void *opaque, const char *name)
124 QEMUBH *bh;
125 bh = g_new(QEMUBH, 1);
126 *bh = (QEMUBH){
127 .ctx = ctx,
128 .cb = cb,
129 .opaque = opaque,
130 .name = name,
132 aio_bh_enqueue(bh, BH_SCHEDULED | BH_ONESHOT);
135 QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
136 const char *name)
138 QEMUBH *bh;
139 bh = g_new(QEMUBH, 1);
140 *bh = (QEMUBH){
141 .ctx = ctx,
142 .cb = cb,
143 .opaque = opaque,
144 .name = name,
146 return bh;
149 void aio_bh_call(QEMUBH *bh)
151 bh->cb(bh->opaque);
154 /* Multiple occurrences of aio_bh_poll cannot be called concurrently. */
155 int aio_bh_poll(AioContext *ctx)
157 BHListSlice slice;
158 BHListSlice *s;
159 int ret = 0;
161 QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list);
162 QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next);
164 while ((s = QSIMPLEQ_FIRST(&ctx->bh_slice_list))) {
165 QEMUBH *bh;
166 unsigned flags;
168 bh = aio_bh_dequeue(&s->bh_list, &flags);
169 if (!bh) {
170 QSIMPLEQ_REMOVE_HEAD(&ctx->bh_slice_list, next);
171 continue;
174 if ((flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
175 /* Idle BHs don't count as progress */
176 if (!(flags & BH_IDLE)) {
177 ret = 1;
179 aio_bh_call(bh);
181 if (flags & (BH_DELETED | BH_ONESHOT)) {
182 g_free(bh);
186 return ret;
189 void qemu_bh_schedule_idle(QEMUBH *bh)
191 aio_bh_enqueue(bh, BH_SCHEDULED | BH_IDLE);
194 void qemu_bh_schedule(QEMUBH *bh)
196 aio_bh_enqueue(bh, BH_SCHEDULED);
199 /* This func is async.
201 void qemu_bh_cancel(QEMUBH *bh)
203 qatomic_and(&bh->flags, ~BH_SCHEDULED);
206 /* This func is async.The bottom half will do the delete action at the finial
207 * end.
209 void qemu_bh_delete(QEMUBH *bh)
211 aio_bh_enqueue(bh, BH_DELETED);
214 static int64_t aio_compute_bh_timeout(BHList *head, int timeout)
216 QEMUBH *bh;
218 QSLIST_FOREACH_RCU(bh, head, next) {
219 if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
220 if (bh->flags & BH_IDLE) {
221 /* idle bottom halves will be polled at least
222 * every 10ms */
223 timeout = 10000000;
224 } else {
225 /* non-idle bottom halves will be executed
226 * immediately */
227 return 0;
232 return timeout;
235 int64_t
236 aio_compute_timeout(AioContext *ctx)
238 BHListSlice *s;
239 int64_t deadline;
240 int timeout = -1;
242 timeout = aio_compute_bh_timeout(&ctx->bh_list, timeout);
243 if (timeout == 0) {
244 return 0;
247 QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) {
248 timeout = aio_compute_bh_timeout(&s->bh_list, timeout);
249 if (timeout == 0) {
250 return 0;
254 deadline = timerlistgroup_deadline_ns(&ctx->tlg);
255 if (deadline == 0) {
256 return 0;
257 } else {
258 return qemu_soonest_timeout(timeout, deadline);
262 static gboolean
263 aio_ctx_prepare(GSource *source, gint *timeout)
265 AioContext *ctx = (AioContext *) source;
267 qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) | 1);
270 * Write ctx->notify_me before computing the timeout
271 * (reading bottom half flags, etc.). Pairs with
272 * smp_mb in aio_notify().
274 smp_mb();
276 /* We assume there is no timeout already supplied */
277 *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
279 if (aio_prepare(ctx)) {
280 *timeout = 0;
283 return *timeout == 0;
286 static gboolean
287 aio_ctx_check(GSource *source)
289 AioContext *ctx = (AioContext *) source;
290 QEMUBH *bh;
291 BHListSlice *s;
293 /* Finish computing the timeout before clearing the flag. */
294 qatomic_store_release(&ctx->notify_me, qatomic_read(&ctx->notify_me) & ~1);
295 aio_notify_accept(ctx);
297 QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) {
298 if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
299 return true;
303 QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) {
304 QSLIST_FOREACH_RCU(bh, &s->bh_list, next) {
305 if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
306 return true;
310 return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
313 static gboolean
314 aio_ctx_dispatch(GSource *source,
315 GSourceFunc callback,
316 gpointer user_data)
318 AioContext *ctx = (AioContext *) source;
320 assert(callback == NULL);
321 aio_dispatch(ctx);
322 return true;
325 static void
326 aio_ctx_finalize(GSource *source)
328 AioContext *ctx = (AioContext *) source;
329 QEMUBH *bh;
330 unsigned flags;
332 thread_pool_free(ctx->thread_pool);
334 #ifdef CONFIG_LINUX_AIO
335 if (ctx->linux_aio) {
336 laio_detach_aio_context(ctx->linux_aio, ctx);
337 laio_cleanup(ctx->linux_aio);
338 ctx->linux_aio = NULL;
340 #endif
342 #ifdef CONFIG_LINUX_IO_URING
343 if (ctx->linux_io_uring) {
344 luring_detach_aio_context(ctx->linux_io_uring, ctx);
345 luring_cleanup(ctx->linux_io_uring);
346 ctx->linux_io_uring = NULL;
348 #endif
350 assert(QSLIST_EMPTY(&ctx->scheduled_coroutines));
351 qemu_bh_delete(ctx->co_schedule_bh);
353 /* There must be no aio_bh_poll() calls going on */
354 assert(QSIMPLEQ_EMPTY(&ctx->bh_slice_list));
356 while ((bh = aio_bh_dequeue(&ctx->bh_list, &flags))) {
358 * qemu_bh_delete() must have been called on BHs in this AioContext. In
359 * many cases memory leaks, hangs, or inconsistent state occur when a
360 * BH is leaked because something still expects it to run.
362 * If you hit this, fix the lifecycle of the BH so that
363 * qemu_bh_delete() and any associated cleanup is called before the
364 * AioContext is finalized.
366 if (unlikely(!(flags & BH_DELETED))) {
367 fprintf(stderr, "%s: BH '%s' leaked, aborting...\n",
368 __func__, bh->name);
369 abort();
372 g_free(bh);
375 aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL, NULL);
376 event_notifier_cleanup(&ctx->notifier);
377 qemu_rec_mutex_destroy(&ctx->lock);
378 qemu_lockcnt_destroy(&ctx->list_lock);
379 timerlistgroup_deinit(&ctx->tlg);
380 unregister_aiocontext(ctx);
381 aio_context_destroy(ctx);
384 static GSourceFuncs aio_source_funcs = {
385 aio_ctx_prepare,
386 aio_ctx_check,
387 aio_ctx_dispatch,
388 aio_ctx_finalize
391 GSource *aio_get_g_source(AioContext *ctx)
393 aio_context_use_g_source(ctx);
394 g_source_ref(&ctx->source);
395 return &ctx->source;
398 ThreadPool *aio_get_thread_pool(AioContext *ctx)
400 if (!ctx->thread_pool) {
401 ctx->thread_pool = thread_pool_new(ctx);
403 return ctx->thread_pool;
406 #ifdef CONFIG_LINUX_AIO
407 LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp)
409 if (!ctx->linux_aio) {
410 ctx->linux_aio = laio_init(errp);
411 if (ctx->linux_aio) {
412 laio_attach_aio_context(ctx->linux_aio, ctx);
415 return ctx->linux_aio;
418 LinuxAioState *aio_get_linux_aio(AioContext *ctx)
420 assert(ctx->linux_aio);
421 return ctx->linux_aio;
423 #endif
425 #ifdef CONFIG_LINUX_IO_URING
426 LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp)
428 if (ctx->linux_io_uring) {
429 return ctx->linux_io_uring;
432 ctx->linux_io_uring = luring_init(errp);
433 if (!ctx->linux_io_uring) {
434 return NULL;
437 luring_attach_aio_context(ctx->linux_io_uring, ctx);
438 return ctx->linux_io_uring;
441 LuringState *aio_get_linux_io_uring(AioContext *ctx)
443 assert(ctx->linux_io_uring);
444 return ctx->linux_io_uring;
446 #endif
448 void aio_notify(AioContext *ctx)
451 * Write e.g. bh->flags before writing ctx->notified. Pairs with smp_mb in
452 * aio_notify_accept.
454 smp_wmb();
455 qatomic_set(&ctx->notified, true);
458 * Write ctx->notified before reading ctx->notify_me. Pairs
459 * with smp_mb in aio_ctx_prepare or aio_poll.
461 smp_mb();
462 if (qatomic_read(&ctx->notify_me)) {
463 event_notifier_set(&ctx->notifier);
467 void aio_notify_accept(AioContext *ctx)
469 qatomic_set(&ctx->notified, false);
472 * Write ctx->notified before reading e.g. bh->flags. Pairs with smp_wmb
473 * in aio_notify.
475 smp_mb();
478 static void aio_timerlist_notify(void *opaque, QEMUClockType type)
480 aio_notify(opaque);
483 static void aio_context_notifier_cb(EventNotifier *e)
485 AioContext *ctx = container_of(e, AioContext, notifier);
487 event_notifier_test_and_clear(&ctx->notifier);
490 /* Returns true if aio_notify() was called (e.g. a BH was scheduled) */
491 static bool aio_context_notifier_poll(void *opaque)
493 EventNotifier *e = opaque;
494 AioContext *ctx = container_of(e, AioContext, notifier);
496 return qatomic_read(&ctx->notified);
499 static void aio_context_notifier_poll_ready(EventNotifier *e)
501 /* Do nothing, we just wanted to kick the event loop */
504 static void co_schedule_bh_cb(void *opaque)
506 AioContext *ctx = opaque;
507 QSLIST_HEAD(, Coroutine) straight, reversed;
509 QSLIST_MOVE_ATOMIC(&reversed, &ctx->scheduled_coroutines);
510 QSLIST_INIT(&straight);
512 while (!QSLIST_EMPTY(&reversed)) {
513 Coroutine *co = QSLIST_FIRST(&reversed);
514 QSLIST_REMOVE_HEAD(&reversed, co_scheduled_next);
515 QSLIST_INSERT_HEAD(&straight, co, co_scheduled_next);
518 while (!QSLIST_EMPTY(&straight)) {
519 Coroutine *co = QSLIST_FIRST(&straight);
520 QSLIST_REMOVE_HEAD(&straight, co_scheduled_next);
521 trace_aio_co_schedule_bh_cb(ctx, co);
522 aio_context_acquire(ctx);
524 /* Protected by write barrier in qemu_aio_coroutine_enter */
525 qatomic_set(&co->scheduled, NULL);
526 qemu_aio_coroutine_enter(ctx, co);
527 aio_context_release(ctx);
531 AioContext *aio_context_new(Error **errp)
533 int ret;
534 AioContext *ctx;
536 ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
537 QSLIST_INIT(&ctx->bh_list);
538 QSIMPLEQ_INIT(&ctx->bh_slice_list);
539 aio_context_setup(ctx);
541 ret = event_notifier_init(&ctx->notifier, false);
542 if (ret < 0) {
543 error_setg_errno(errp, -ret, "Failed to initialize event notifier");
544 goto fail;
546 g_source_set_can_recurse(&ctx->source, true);
547 qemu_lockcnt_init(&ctx->list_lock);
549 ctx->co_schedule_bh = aio_bh_new(ctx, co_schedule_bh_cb, ctx);
550 QSLIST_INIT(&ctx->scheduled_coroutines);
552 aio_set_event_notifier(ctx, &ctx->notifier,
553 false,
554 aio_context_notifier_cb,
555 aio_context_notifier_poll,
556 aio_context_notifier_poll_ready);
557 #ifdef CONFIG_LINUX_AIO
558 ctx->linux_aio = NULL;
559 #endif
561 #ifdef CONFIG_LINUX_IO_URING
562 ctx->linux_io_uring = NULL;
563 #endif
565 ctx->thread_pool = NULL;
566 qemu_rec_mutex_init(&ctx->lock);
567 timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
569 ctx->poll_ns = 0;
570 ctx->poll_max_ns = 0;
571 ctx->poll_grow = 0;
572 ctx->poll_shrink = 0;
574 ctx->aio_max_batch = 0;
576 ctx->thread_pool_min = 0;
577 ctx->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT;
579 register_aiocontext(ctx);
581 return ctx;
582 fail:
583 g_source_destroy(&ctx->source);
584 return NULL;
587 void aio_co_schedule(AioContext *ctx, Coroutine *co)
589 trace_aio_co_schedule(ctx, co);
590 const char *scheduled = qatomic_cmpxchg(&co->scheduled, NULL,
591 __func__);
593 if (scheduled) {
594 fprintf(stderr,
595 "%s: Co-routine was already scheduled in '%s'\n",
596 __func__, scheduled);
597 abort();
600 /* The coroutine might run and release the last ctx reference before we
601 * invoke qemu_bh_schedule(). Take a reference to keep ctx alive until
602 * we're done.
604 aio_context_ref(ctx);
606 QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines,
607 co, co_scheduled_next);
608 qemu_bh_schedule(ctx->co_schedule_bh);
610 aio_context_unref(ctx);
613 typedef struct AioCoRescheduleSelf {
614 Coroutine *co;
615 AioContext *new_ctx;
616 } AioCoRescheduleSelf;
618 static void aio_co_reschedule_self_bh(void *opaque)
620 AioCoRescheduleSelf *data = opaque;
621 aio_co_schedule(data->new_ctx, data->co);
624 void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx)
626 AioContext *old_ctx = qemu_get_current_aio_context();
628 if (old_ctx != new_ctx) {
629 AioCoRescheduleSelf data = {
630 .co = qemu_coroutine_self(),
631 .new_ctx = new_ctx,
634 * We can't directly schedule the coroutine in the target context
635 * because this would be racy: The other thread could try to enter the
636 * coroutine before it has yielded in this one.
638 aio_bh_schedule_oneshot(old_ctx, aio_co_reschedule_self_bh, &data);
639 qemu_coroutine_yield();
643 void aio_co_wake(struct Coroutine *co)
645 AioContext *ctx;
647 /* Read coroutine before co->ctx. Matches smp_wmb in
648 * qemu_coroutine_enter.
650 smp_read_barrier_depends();
651 ctx = qatomic_read(&co->ctx);
653 aio_co_enter(ctx, co);
656 void aio_co_enter(AioContext *ctx, struct Coroutine *co)
658 if (ctx != qemu_get_current_aio_context()) {
659 aio_co_schedule(ctx, co);
660 return;
663 if (qemu_in_coroutine()) {
664 Coroutine *self = qemu_coroutine_self();
665 assert(self != co);
666 QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next);
667 } else {
668 aio_context_acquire(ctx);
669 qemu_aio_coroutine_enter(ctx, co);
670 aio_context_release(ctx);
674 void aio_context_ref(AioContext *ctx)
676 g_source_ref(&ctx->source);
679 void aio_context_unref(AioContext *ctx)
681 g_source_unref(&ctx->source);
684 void aio_context_acquire(AioContext *ctx)
686 qemu_rec_mutex_lock(&ctx->lock);
689 void aio_context_release(AioContext *ctx)
691 qemu_rec_mutex_unlock(&ctx->lock);
694 QEMU_DEFINE_STATIC_CO_TLS(AioContext *, my_aiocontext)
696 AioContext *qemu_get_current_aio_context(void)
698 AioContext *ctx = get_my_aiocontext();
699 if (ctx) {
700 return ctx;
702 if (qemu_mutex_iothread_locked()) {
703 /* Possibly in a vCPU thread. */
704 return qemu_get_aio_context();
706 return NULL;
709 void qemu_set_current_aio_context(AioContext *ctx)
711 assert(!get_my_aiocontext());
712 set_my_aiocontext(ctx);
715 void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
716 int64_t max, Error **errp)
719 if (min > max || !max || min > INT_MAX || max > INT_MAX) {
720 error_setg(errp, "bad thread-pool-min/thread-pool-max values");
721 return;
724 ctx->thread_pool_min = min;
725 ctx->thread_pool_max = max;
727 if (ctx->thread_pool) {
728 thread_pool_update_params(ctx->thread_pool, ctx);