2 * QEMU aio implementation
4 * Copyright IBM, Corp. 2008
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "block/block.h"
18 #include "qemu/main-loop.h"
20 #include "qemu/rcu_queue.h"
21 #include "qemu/sockets.h"
22 #include "qemu/cutils.h"
24 #include "aio-posix.h"
26 /* Stop userspace polling on a handler if it isn't active for some time */
27 #define POLL_IDLE_INTERVAL_NS (7 * NANOSECONDS_PER_SECOND)
29 bool aio_poll_disabled(AioContext
*ctx
)
31 return qatomic_read(&ctx
->poll_disable_cnt
);
34 void aio_add_ready_handler(AioHandlerList
*ready_list
,
38 QLIST_SAFE_REMOVE(node
, node_ready
); /* remove from nested parent's list */
39 node
->pfd
.revents
= revents
;
40 QLIST_INSERT_HEAD(ready_list
, node
, node_ready
);
43 static void aio_add_poll_ready_handler(AioHandlerList
*ready_list
,
46 QLIST_SAFE_REMOVE(node
, node_ready
); /* remove from nested parent's list */
47 node
->poll_ready
= true;
48 QLIST_INSERT_HEAD(ready_list
, node
, node_ready
);
51 static AioHandler
*find_aio_handler(AioContext
*ctx
, int fd
)
55 QLIST_FOREACH(node
, &ctx
->aio_handlers
, node
) {
56 if (node
->pfd
.fd
== fd
) {
57 if (!QLIST_IS_INSERTED(node
, node_deleted
)) {
66 static bool aio_remove_fd_handler(AioContext
*ctx
, AioHandler
*node
)
68 /* If the GSource is in the process of being destroyed then
69 * g_source_remove_poll() causes an assertion failure. Skip
70 * removal in that case, because glib cleans up its state during
73 if (!g_source_is_destroyed(&ctx
->source
)) {
74 g_source_remove_poll(&ctx
->source
, &node
->pfd
);
77 node
->pfd
.revents
= 0;
78 node
->poll_ready
= false;
80 /* If the fd monitor has already marked it deleted, leave it alone */
81 if (QLIST_IS_INSERTED(node
, node_deleted
)) {
85 /* If a read is in progress, just mark the node as deleted */
86 if (qemu_lockcnt_count(&ctx
->list_lock
)) {
87 QLIST_INSERT_HEAD_RCU(&ctx
->deleted_aio_handlers
, node
, node_deleted
);
90 /* Otherwise, delete it for real. We can't just mark it as
91 * deleted because deleted nodes are only cleaned up while
92 * no one is walking the handlers list.
94 QLIST_SAFE_REMOVE(node
, node_poll
);
95 QLIST_REMOVE(node
, node
);
99 void aio_set_fd_handler(AioContext
*ctx
,
105 IOHandler
*io_poll_ready
,
109 AioHandler
*new_node
= NULL
;
111 bool deleted
= false;
112 int poll_disable_change
;
114 if (io_poll
&& !io_poll_ready
) {
115 io_poll
= NULL
; /* polling only makes sense if there is a handler */
118 qemu_lockcnt_lock(&ctx
->list_lock
);
120 node
= find_aio_handler(ctx
, fd
);
122 /* Are we deleting the fd handler? */
123 if (!io_read
&& !io_write
&& !io_poll
) {
125 qemu_lockcnt_unlock(&ctx
->list_lock
);
128 /* Clean events in order to unregister fd from the ctx epoll. */
129 node
->pfd
.events
= 0;
131 poll_disable_change
= -!node
->io_poll
;
133 poll_disable_change
= !io_poll
- (node
&& !node
->io_poll
);
137 /* Alloc and insert if it's not already there */
138 new_node
= g_new0(AioHandler
, 1);
140 /* Update handler with latest information */
141 new_node
->io_read
= io_read
;
142 new_node
->io_write
= io_write
;
143 new_node
->io_poll
= io_poll
;
144 new_node
->io_poll_ready
= io_poll_ready
;
145 new_node
->opaque
= opaque
;
146 new_node
->is_external
= is_external
;
149 new_node
->pfd
.fd
= fd
;
151 new_node
->pfd
= node
->pfd
;
153 g_source_add_poll(&ctx
->source
, &new_node
->pfd
);
155 new_node
->pfd
.events
= (io_read
? G_IO_IN
| G_IO_HUP
| G_IO_ERR
: 0);
156 new_node
->pfd
.events
|= (io_write
? G_IO_OUT
| G_IO_ERR
: 0);
158 QLIST_INSERT_HEAD_RCU(&ctx
->aio_handlers
, new_node
, node
);
161 /* No need to order poll_disable_cnt writes against other updates;
162 * the counter is only used to avoid wasting time and latency on
163 * iterated polling when the system call will be ultimately necessary.
164 * Changing handlers is a rare event, and a little wasted polling until
165 * the aio_notify below is not an issue.
167 qatomic_set(&ctx
->poll_disable_cnt
,
168 qatomic_read(&ctx
->poll_disable_cnt
) + poll_disable_change
);
170 ctx
->fdmon_ops
->update(ctx
, node
, new_node
);
172 deleted
= aio_remove_fd_handler(ctx
, node
);
174 qemu_lockcnt_unlock(&ctx
->list_lock
);
182 void aio_set_fd_poll(AioContext
*ctx
, int fd
,
183 IOHandler
*io_poll_begin
,
184 IOHandler
*io_poll_end
)
186 AioHandler
*node
= find_aio_handler(ctx
, fd
);
192 node
->io_poll_begin
= io_poll_begin
;
193 node
->io_poll_end
= io_poll_end
;
196 void aio_set_event_notifier(AioContext
*ctx
,
197 EventNotifier
*notifier
,
199 EventNotifierHandler
*io_read
,
201 EventNotifierHandler
*io_poll_ready
)
203 aio_set_fd_handler(ctx
, event_notifier_get_fd(notifier
), is_external
,
204 (IOHandler
*)io_read
, NULL
, io_poll
,
205 (IOHandler
*)io_poll_ready
, notifier
);
208 void aio_set_event_notifier_poll(AioContext
*ctx
,
209 EventNotifier
*notifier
,
210 EventNotifierHandler
*io_poll_begin
,
211 EventNotifierHandler
*io_poll_end
)
213 aio_set_fd_poll(ctx
, event_notifier_get_fd(notifier
),
214 (IOHandler
*)io_poll_begin
,
215 (IOHandler
*)io_poll_end
);
218 static bool poll_set_started(AioContext
*ctx
, AioHandlerList
*ready_list
,
222 bool progress
= false;
224 if (started
== ctx
->poll_started
) {
228 ctx
->poll_started
= started
;
230 qemu_lockcnt_inc(&ctx
->list_lock
);
231 QLIST_FOREACH(node
, &ctx
->poll_aio_handlers
, node_poll
) {
234 if (QLIST_IS_INSERTED(node
, node_deleted
)) {
239 fn
= node
->io_poll_begin
;
241 fn
= node
->io_poll_end
;
248 /* Poll one last time in case ->io_poll_end() raced with the event */
249 if (!started
&& node
->io_poll(node
->opaque
)) {
250 aio_add_poll_ready_handler(ready_list
, node
);
254 qemu_lockcnt_dec(&ctx
->list_lock
);
260 bool aio_prepare(AioContext
*ctx
)
262 AioHandlerList ready_list
= QLIST_HEAD_INITIALIZER(ready_list
);
264 /* Poll mode cannot be used with glib's event loop, disable it. */
265 poll_set_started(ctx
, &ready_list
, false);
266 /* TODO what to do with this list? */
271 bool aio_pending(AioContext
*ctx
)
277 * We have to walk very carefully in case aio_set_fd_handler is
278 * called while we're walking.
280 qemu_lockcnt_inc(&ctx
->list_lock
);
282 QLIST_FOREACH_RCU(node
, &ctx
->aio_handlers
, node
) {
285 /* TODO should this check poll ready? */
286 revents
= node
->pfd
.revents
& node
->pfd
.events
;
287 if (revents
& (G_IO_IN
| G_IO_HUP
| G_IO_ERR
) && node
->io_read
&&
288 aio_node_check(ctx
, node
->is_external
)) {
292 if (revents
& (G_IO_OUT
| G_IO_ERR
) && node
->io_write
&&
293 aio_node_check(ctx
, node
->is_external
)) {
298 qemu_lockcnt_dec(&ctx
->list_lock
);
303 static void aio_free_deleted_handlers(AioContext
*ctx
)
307 if (QLIST_EMPTY_RCU(&ctx
->deleted_aio_handlers
)) {
310 if (!qemu_lockcnt_dec_if_lock(&ctx
->list_lock
)) {
311 return; /* we are nested, let the parent do the freeing */
314 while ((node
= QLIST_FIRST_RCU(&ctx
->deleted_aio_handlers
))) {
315 QLIST_REMOVE(node
, node
);
316 QLIST_REMOVE(node
, node_deleted
);
317 QLIST_SAFE_REMOVE(node
, node_poll
);
321 qemu_lockcnt_inc_and_unlock(&ctx
->list_lock
);
324 static bool aio_dispatch_handler(AioContext
*ctx
, AioHandler
*node
)
326 bool progress
= false;
330 revents
= node
->pfd
.revents
& node
->pfd
.events
;
331 node
->pfd
.revents
= 0;
333 poll_ready
= node
->poll_ready
;
334 node
->poll_ready
= false;
337 * Start polling AioHandlers when they become ready because activity is
338 * likely to continue. Note that starvation is theoretically possible when
339 * fdmon_supports_polling(), but only until the fd fires for the first
342 if (!QLIST_IS_INSERTED(node
, node_deleted
) &&
343 !QLIST_IS_INSERTED(node
, node_poll
) &&
345 trace_poll_add(ctx
, node
, node
->pfd
.fd
, revents
);
346 if (ctx
->poll_started
&& node
->io_poll_begin
) {
347 node
->io_poll_begin(node
->opaque
);
349 QLIST_INSERT_HEAD(&ctx
->poll_aio_handlers
, node
, node_poll
);
351 if (!QLIST_IS_INSERTED(node
, node_deleted
) &&
352 poll_ready
&& revents
== 0 &&
353 aio_node_check(ctx
, node
->is_external
) &&
354 node
->io_poll_ready
) {
355 node
->io_poll_ready(node
->opaque
);
358 * Return early since revents was zero. aio_notify() does not count as
361 return node
->opaque
!= &ctx
->notifier
;
364 if (!QLIST_IS_INSERTED(node
, node_deleted
) &&
365 (revents
& (G_IO_IN
| G_IO_HUP
| G_IO_ERR
)) &&
366 aio_node_check(ctx
, node
->is_external
) &&
368 node
->io_read(node
->opaque
);
370 /* aio_notify() does not count as progress */
371 if (node
->opaque
!= &ctx
->notifier
) {
375 if (!QLIST_IS_INSERTED(node
, node_deleted
) &&
376 (revents
& (G_IO_OUT
| G_IO_ERR
)) &&
377 aio_node_check(ctx
, node
->is_external
) &&
379 node
->io_write(node
->opaque
);
387 * If we have a list of ready handlers then this is more efficient than
388 * scanning all handlers with aio_dispatch_handlers().
390 static bool aio_dispatch_ready_handlers(AioContext
*ctx
,
391 AioHandlerList
*ready_list
)
393 bool progress
= false;
396 while ((node
= QLIST_FIRST(ready_list
))) {
397 QLIST_REMOVE(node
, node_ready
);
398 progress
= aio_dispatch_handler(ctx
, node
) || progress
;
404 /* Slower than aio_dispatch_ready_handlers() but only used via glib */
405 static bool aio_dispatch_handlers(AioContext
*ctx
)
407 AioHandler
*node
, *tmp
;
408 bool progress
= false;
410 QLIST_FOREACH_SAFE_RCU(node
, &ctx
->aio_handlers
, node
, tmp
) {
411 progress
= aio_dispatch_handler(ctx
, node
) || progress
;
417 void aio_dispatch(AioContext
*ctx
)
419 qemu_lockcnt_inc(&ctx
->list_lock
);
421 aio_dispatch_handlers(ctx
);
422 aio_free_deleted_handlers(ctx
);
423 qemu_lockcnt_dec(&ctx
->list_lock
);
425 timerlistgroup_run_timers(&ctx
->tlg
);
428 static bool run_poll_handlers_once(AioContext
*ctx
,
429 AioHandlerList
*ready_list
,
433 bool progress
= false;
437 QLIST_FOREACH_SAFE(node
, &ctx
->poll_aio_handlers
, node_poll
, tmp
) {
438 if (aio_node_check(ctx
, node
->is_external
) &&
439 node
->io_poll(node
->opaque
)) {
440 aio_add_poll_ready_handler(ready_list
, node
);
442 node
->poll_idle_timeout
= now
+ POLL_IDLE_INTERVAL_NS
;
445 * Polling was successful, exit try_poll_mode immediately
446 * to adjust the next polling time.
449 if (node
->opaque
!= &ctx
->notifier
) {
454 /* Caller handles freeing deleted nodes. Don't do it here. */
460 static bool fdmon_supports_polling(AioContext
*ctx
)
462 return ctx
->fdmon_ops
->need_wait
!= aio_poll_disabled
;
465 static bool remove_idle_poll_handlers(AioContext
*ctx
,
466 AioHandlerList
*ready_list
,
471 bool progress
= false;
474 * File descriptor monitoring implementations without userspace polling
475 * support suffer from starvation when a subset of handlers is polled
476 * because fds will not be processed in a timely fashion. Don't remove
477 * idle poll handlers.
479 if (!fdmon_supports_polling(ctx
)) {
483 QLIST_FOREACH_SAFE(node
, &ctx
->poll_aio_handlers
, node_poll
, tmp
) {
484 if (node
->poll_idle_timeout
== 0LL) {
485 node
->poll_idle_timeout
= now
+ POLL_IDLE_INTERVAL_NS
;
486 } else if (now
>= node
->poll_idle_timeout
) {
487 trace_poll_remove(ctx
, node
, node
->pfd
.fd
);
488 node
->poll_idle_timeout
= 0LL;
489 QLIST_SAFE_REMOVE(node
, node_poll
);
490 if (ctx
->poll_started
&& node
->io_poll_end
) {
491 node
->io_poll_end(node
->opaque
);
494 * Final poll in case ->io_poll_end() races with an event.
495 * Nevermind about re-adding the handler in the rare case where
496 * this causes progress.
498 if (node
->io_poll(node
->opaque
)) {
499 aio_add_poll_ready_handler(ready_list
, node
);
509 /* run_poll_handlers:
510 * @ctx: the AioContext
511 * @ready_list: the list to place ready handlers on
512 * @max_ns: maximum time to poll for, in nanoseconds
514 * Polls for a given time.
516 * Note that the caller must have incremented ctx->list_lock.
518 * Returns: true if progress was made, false otherwise
520 static bool run_poll_handlers(AioContext
*ctx
, AioHandlerList
*ready_list
,
521 int64_t max_ns
, int64_t *timeout
)
524 int64_t start_time
, elapsed_time
;
526 assert(qemu_lockcnt_count(&ctx
->list_lock
) > 0);
528 trace_run_poll_handlers_begin(ctx
, max_ns
, *timeout
);
531 * Optimization: ->io_poll() handlers often contain RCU read critical
532 * sections and we therefore see many rcu_read_lock() -> rcu_read_unlock()
533 * -> rcu_read_lock() -> ... sequences with expensive memory
534 * synchronization primitives. Make the entire polling loop an RCU
535 * critical section because nested rcu_read_lock()/rcu_read_unlock() calls
538 RCU_READ_LOCK_GUARD();
540 start_time
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
542 progress
= run_poll_handlers_once(ctx
, ready_list
,
543 start_time
, timeout
);
544 elapsed_time
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - start_time
;
545 max_ns
= qemu_soonest_timeout(*timeout
, max_ns
);
546 assert(!(max_ns
&& progress
));
547 } while (elapsed_time
< max_ns
&& !ctx
->fdmon_ops
->need_wait(ctx
));
549 if (remove_idle_poll_handlers(ctx
, ready_list
,
550 start_time
+ elapsed_time
)) {
555 /* If time has passed with no successful polling, adjust *timeout to
556 * keep the same ending time.
558 if (*timeout
!= -1) {
559 *timeout
-= MIN(*timeout
, elapsed_time
);
562 trace_run_poll_handlers_end(ctx
, progress
, *timeout
);
567 * @ctx: the AioContext
568 * @ready_list: list to add handlers that need to be run
569 * @timeout: timeout for blocking wait, computed by the caller and updated if
572 * Note that the caller must have incremented ctx->list_lock.
574 * Returns: true if progress was made, false otherwise
576 static bool try_poll_mode(AioContext
*ctx
, AioHandlerList
*ready_list
,
581 if (QLIST_EMPTY_RCU(&ctx
->poll_aio_handlers
)) {
585 max_ns
= qemu_soonest_timeout(*timeout
, ctx
->poll_ns
);
586 if (max_ns
&& !ctx
->fdmon_ops
->need_wait(ctx
)) {
587 poll_set_started(ctx
, ready_list
, true);
589 if (run_poll_handlers(ctx
, ready_list
, max_ns
, timeout
)) {
594 if (poll_set_started(ctx
, ready_list
, false)) {
602 bool aio_poll(AioContext
*ctx
, bool blocking
)
604 AioHandlerList ready_list
= QLIST_HEAD_INITIALIZER(ready_list
);
611 * There cannot be two concurrent aio_poll calls for the same AioContext (or
612 * an aio_poll concurrent with a GSource prepare/check/dispatch callback).
613 * We rely on this below to avoid slow locked accesses to ctx->notify_me.
615 * aio_poll() may only be called in the AioContext's thread. iohandler_ctx
616 * is special in that it runs in the main thread, but that thread's context
617 * is qemu_aio_context.
619 assert(in_aio_context_home_thread(ctx
== iohandler_get_aio_context() ?
620 qemu_get_aio_context() : ctx
));
622 qemu_lockcnt_inc(&ctx
->list_lock
);
624 if (ctx
->poll_max_ns
) {
625 start
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
628 timeout
= blocking
? aio_compute_timeout(ctx
) : 0;
629 progress
= try_poll_mode(ctx
, &ready_list
, &timeout
);
630 assert(!(timeout
&& progress
));
633 * aio_notify can avoid the expensive event_notifier_set if
634 * everything (file descriptors, bottom halves, timers) will
635 * be re-evaluated before the next blocking poll(). This is
636 * already true when aio_poll is called with blocking == false;
637 * if blocking == true, it is only true after poll() returns,
638 * so disable the optimization now.
640 use_notify_me
= timeout
!= 0;
642 qatomic_set(&ctx
->notify_me
, qatomic_read(&ctx
->notify_me
) + 2);
644 * Write ctx->notify_me before reading ctx->notified. Pairs with
645 * smp_mb in aio_notify().
649 /* Don't block if aio_notify() was called */
650 if (qatomic_read(&ctx
->notified
)) {
655 /* If polling is allowed, non-blocking aio_poll does not need the
656 * system call---a single round of run_poll_handlers_once suffices.
658 if (timeout
|| ctx
->fdmon_ops
->need_wait(ctx
)) {
659 ctx
->fdmon_ops
->wait(ctx
, &ready_list
, timeout
);
663 /* Finish the poll before clearing the flag. */
664 qatomic_store_release(&ctx
->notify_me
,
665 qatomic_read(&ctx
->notify_me
) - 2);
668 aio_notify_accept(ctx
);
670 /* Adjust polling time */
671 if (ctx
->poll_max_ns
) {
672 int64_t block_ns
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - start
;
674 if (block_ns
<= ctx
->poll_ns
) {
675 /* This is the sweet spot, no adjustment needed */
676 } else if (block_ns
> ctx
->poll_max_ns
) {
677 /* We'd have to poll for too long, poll less */
678 int64_t old
= ctx
->poll_ns
;
680 if (ctx
->poll_shrink
) {
681 ctx
->poll_ns
/= ctx
->poll_shrink
;
686 trace_poll_shrink(ctx
, old
, ctx
->poll_ns
);
687 } else if (ctx
->poll_ns
< ctx
->poll_max_ns
&&
688 block_ns
< ctx
->poll_max_ns
) {
689 /* There is room to grow, poll longer */
690 int64_t old
= ctx
->poll_ns
;
691 int64_t grow
= ctx
->poll_grow
;
698 ctx
->poll_ns
*= grow
;
700 ctx
->poll_ns
= 4000; /* start polling at 4 microseconds */
703 if (ctx
->poll_ns
> ctx
->poll_max_ns
) {
704 ctx
->poll_ns
= ctx
->poll_max_ns
;
707 trace_poll_grow(ctx
, old
, ctx
->poll_ns
);
711 progress
|= aio_bh_poll(ctx
);
712 progress
|= aio_dispatch_ready_handlers(ctx
, &ready_list
);
714 aio_free_deleted_handlers(ctx
);
716 qemu_lockcnt_dec(&ctx
->list_lock
);
718 progress
|= timerlistgroup_run_timers(&ctx
->tlg
);
723 void aio_context_setup(AioContext
*ctx
)
725 ctx
->fdmon_ops
= &fdmon_poll_ops
;
728 /* Use the fastest fd monitoring implementation if available */
729 if (fdmon_io_uring_setup(ctx
)) {
733 fdmon_epoll_setup(ctx
);
736 void aio_context_destroy(AioContext
*ctx
)
738 fdmon_io_uring_destroy(ctx
);
739 fdmon_epoll_disable(ctx
);
740 aio_free_deleted_handlers(ctx
);
743 void aio_context_use_g_source(AioContext
*ctx
)
746 * Disable io_uring when the glib main loop is used because it doesn't
747 * support mixed glib/aio_poll() usage. It relies on aio_poll() being
748 * called regularly so that changes to the monitored file descriptors are
749 * submitted, otherwise a list of pending fd handlers builds up.
751 fdmon_io_uring_destroy(ctx
);
752 aio_free_deleted_handlers(ctx
);
755 void aio_context_set_poll_params(AioContext
*ctx
, int64_t max_ns
,
756 int64_t grow
, int64_t shrink
, Error
**errp
)
758 /* No thread synchronization here, it doesn't matter if an incorrect value
761 ctx
->poll_max_ns
= max_ns
;
763 ctx
->poll_grow
= grow
;
764 ctx
->poll_shrink
= shrink
;
769 void aio_context_set_aio_params(AioContext
*ctx
, int64_t max_batch
,
773 * No thread synchronization here, it doesn't matter if an incorrect value
776 ctx
->aio_max_batch
= max_batch
;