2 * QEMU aio implementation
4 * Copyright IBM, Corp. 2008
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qemu-common.h"
18 #include "block/block.h"
19 #include "qemu/rcu_queue.h"
20 #include "qemu/sockets.h"
21 #include "qemu/cutils.h"
22 #include "trace-root.h"
23 #ifdef CONFIG_EPOLL_CREATE1
24 #include <sys/epoll.h>
33 IOHandler
*io_poll_begin
;
34 IOHandler
*io_poll_end
;
38 QLIST_ENTRY(AioHandler
) node
;
41 #ifdef CONFIG_EPOLL_CREATE1
43 /* The fd number threashold to switch to epoll */
44 #define EPOLL_ENABLE_THRESHOLD 64
46 static void aio_epoll_disable(AioContext
*ctx
)
48 ctx
->epoll_available
= false;
49 if (!ctx
->epoll_enabled
) {
52 ctx
->epoll_enabled
= false;
56 static inline int epoll_events_from_pfd(int pfd_events
)
58 return (pfd_events
& G_IO_IN
? EPOLLIN
: 0) |
59 (pfd_events
& G_IO_OUT
? EPOLLOUT
: 0) |
60 (pfd_events
& G_IO_HUP
? EPOLLHUP
: 0) |
61 (pfd_events
& G_IO_ERR
? EPOLLERR
: 0);
64 static bool aio_epoll_try_enable(AioContext
*ctx
)
67 struct epoll_event event
;
69 QLIST_FOREACH_RCU(node
, &ctx
->aio_handlers
, node
) {
71 if (node
->deleted
|| !node
->pfd
.events
) {
74 event
.events
= epoll_events_from_pfd(node
->pfd
.events
);
75 event
.data
.ptr
= node
;
76 r
= epoll_ctl(ctx
->epollfd
, EPOLL_CTL_ADD
, node
->pfd
.fd
, &event
);
81 ctx
->epoll_enabled
= true;
85 static void aio_epoll_update(AioContext
*ctx
, AioHandler
*node
, bool is_new
)
87 struct epoll_event event
;
91 if (!ctx
->epoll_enabled
) {
94 if (!node
->pfd
.events
) {
97 event
.data
.ptr
= node
;
98 event
.events
= epoll_events_from_pfd(node
->pfd
.events
);
99 ctl
= is_new
? EPOLL_CTL_ADD
: EPOLL_CTL_MOD
;
102 r
= epoll_ctl(ctx
->epollfd
, ctl
, node
->pfd
.fd
, &event
);
104 aio_epoll_disable(ctx
);
108 static int aio_epoll(AioContext
*ctx
, GPollFD
*pfds
,
109 unsigned npfd
, int64_t timeout
)
113 struct epoll_event events
[128];
116 assert(pfds
[0].fd
== ctx
->epollfd
);
118 ret
= qemu_poll_ns(pfds
, npfd
, timeout
);
120 if (timeout
<= 0 || ret
> 0) {
121 ret
= epoll_wait(ctx
->epollfd
, events
,
122 sizeof(events
) / sizeof(events
[0]),
127 for (i
= 0; i
< ret
; i
++) {
128 int ev
= events
[i
].events
;
129 node
= events
[i
].data
.ptr
;
130 node
->pfd
.revents
= (ev
& EPOLLIN
? G_IO_IN
: 0) |
131 (ev
& EPOLLOUT
? G_IO_OUT
: 0) |
132 (ev
& EPOLLHUP
? G_IO_HUP
: 0) |
133 (ev
& EPOLLERR
? G_IO_ERR
: 0);
140 static bool aio_epoll_enabled(AioContext
*ctx
)
142 /* Fall back to ppoll when external clients are disabled. */
143 return !aio_external_disabled(ctx
) && ctx
->epoll_enabled
;
146 static bool aio_epoll_check_poll(AioContext
*ctx
, GPollFD
*pfds
,
147 unsigned npfd
, int64_t timeout
)
149 if (!ctx
->epoll_available
) {
152 if (aio_epoll_enabled(ctx
)) {
155 if (npfd
>= EPOLL_ENABLE_THRESHOLD
) {
156 if (aio_epoll_try_enable(ctx
)) {
159 aio_epoll_disable(ctx
);
167 static void aio_epoll_update(AioContext
*ctx
, AioHandler
*node
, bool is_new
)
171 static int aio_epoll(AioContext
*ctx
, GPollFD
*pfds
,
172 unsigned npfd
, int64_t timeout
)
177 static bool aio_epoll_enabled(AioContext
*ctx
)
182 static bool aio_epoll_check_poll(AioContext
*ctx
, GPollFD
*pfds
,
183 unsigned npfd
, int64_t timeout
)
190 static AioHandler
*find_aio_handler(AioContext
*ctx
, int fd
)
194 QLIST_FOREACH(node
, &ctx
->aio_handlers
, node
) {
195 if (node
->pfd
.fd
== fd
)
203 void aio_set_fd_handler(AioContext
*ctx
,
213 bool deleted
= false;
215 qemu_lockcnt_lock(&ctx
->list_lock
);
217 node
= find_aio_handler(ctx
, fd
);
219 /* Are we deleting the fd handler? */
220 if (!io_read
&& !io_write
&& !io_poll
) {
222 qemu_lockcnt_unlock(&ctx
->list_lock
);
226 g_source_remove_poll(&ctx
->source
, &node
->pfd
);
228 /* If the lock is held, just mark the node as deleted */
229 if (qemu_lockcnt_count(&ctx
->list_lock
)) {
231 node
->pfd
.revents
= 0;
233 /* Otherwise, delete it for real. We can't just mark it as
234 * deleted because deleted nodes are only cleaned up while
235 * no one is walking the handlers list.
237 QLIST_REMOVE(node
, node
);
241 if (!node
->io_poll
) {
242 ctx
->poll_disable_cnt
--;
246 /* Alloc and insert if it's not already there */
247 node
= g_new0(AioHandler
, 1);
249 QLIST_INSERT_HEAD_RCU(&ctx
->aio_handlers
, node
, node
);
251 g_source_add_poll(&ctx
->source
, &node
->pfd
);
254 ctx
->poll_disable_cnt
+= !io_poll
;
256 ctx
->poll_disable_cnt
+= !io_poll
- !node
->io_poll
;
259 /* Update handler with latest information */
260 node
->io_read
= io_read
;
261 node
->io_write
= io_write
;
262 node
->io_poll
= io_poll
;
263 node
->opaque
= opaque
;
264 node
->is_external
= is_external
;
266 node
->pfd
.events
= (io_read
? G_IO_IN
| G_IO_HUP
| G_IO_ERR
: 0);
267 node
->pfd
.events
|= (io_write
? G_IO_OUT
| G_IO_ERR
: 0);
270 aio_epoll_update(ctx
, node
, is_new
);
271 qemu_lockcnt_unlock(&ctx
->list_lock
);
279 void aio_set_fd_poll(AioContext
*ctx
, int fd
,
280 IOHandler
*io_poll_begin
,
281 IOHandler
*io_poll_end
)
283 AioHandler
*node
= find_aio_handler(ctx
, fd
);
289 node
->io_poll_begin
= io_poll_begin
;
290 node
->io_poll_end
= io_poll_end
;
293 void aio_set_event_notifier(AioContext
*ctx
,
294 EventNotifier
*notifier
,
296 EventNotifierHandler
*io_read
,
299 aio_set_fd_handler(ctx
, event_notifier_get_fd(notifier
), is_external
,
300 (IOHandler
*)io_read
, NULL
, io_poll
, notifier
);
303 void aio_set_event_notifier_poll(AioContext
*ctx
,
304 EventNotifier
*notifier
,
305 EventNotifierHandler
*io_poll_begin
,
306 EventNotifierHandler
*io_poll_end
)
308 aio_set_fd_poll(ctx
, event_notifier_get_fd(notifier
),
309 (IOHandler
*)io_poll_begin
,
310 (IOHandler
*)io_poll_end
);
313 static void poll_set_started(AioContext
*ctx
, bool started
)
317 if (started
== ctx
->poll_started
) {
321 ctx
->poll_started
= started
;
323 qemu_lockcnt_inc(&ctx
->list_lock
);
324 QLIST_FOREACH_RCU(node
, &ctx
->aio_handlers
, node
) {
332 fn
= node
->io_poll_begin
;
334 fn
= node
->io_poll_end
;
341 qemu_lockcnt_dec(&ctx
->list_lock
);
345 bool aio_prepare(AioContext
*ctx
)
347 /* Poll mode cannot be used with glib's event loop, disable it. */
348 poll_set_started(ctx
, false);
353 bool aio_pending(AioContext
*ctx
)
359 * We have to walk very carefully in case aio_set_fd_handler is
360 * called while we're walking.
362 qemu_lockcnt_inc(&ctx
->list_lock
);
364 QLIST_FOREACH_RCU(node
, &ctx
->aio_handlers
, node
) {
367 revents
= node
->pfd
.revents
& node
->pfd
.events
;
368 if (revents
& (G_IO_IN
| G_IO_HUP
| G_IO_ERR
) && node
->io_read
&&
369 aio_node_check(ctx
, node
->is_external
)) {
373 if (revents
& (G_IO_OUT
| G_IO_ERR
) && node
->io_write
&&
374 aio_node_check(ctx
, node
->is_external
)) {
379 qemu_lockcnt_dec(&ctx
->list_lock
);
384 static bool aio_dispatch_handlers(AioContext
*ctx
)
386 AioHandler
*node
, *tmp
;
387 bool progress
= false;
390 * We have to walk very carefully in case aio_set_fd_handler is
391 * called while we're walking.
393 qemu_lockcnt_inc(&ctx
->list_lock
);
395 QLIST_FOREACH_SAFE_RCU(node
, &ctx
->aio_handlers
, node
, tmp
) {
398 revents
= node
->pfd
.revents
& node
->pfd
.events
;
399 node
->pfd
.revents
= 0;
401 if (!node
->deleted
&&
402 (revents
& (G_IO_IN
| G_IO_HUP
| G_IO_ERR
)) &&
403 aio_node_check(ctx
, node
->is_external
) &&
405 node
->io_read(node
->opaque
);
407 /* aio_notify() does not count as progress */
408 if (node
->opaque
!= &ctx
->notifier
) {
412 if (!node
->deleted
&&
413 (revents
& (G_IO_OUT
| G_IO_ERR
)) &&
414 aio_node_check(ctx
, node
->is_external
) &&
416 node
->io_write(node
->opaque
);
421 if (qemu_lockcnt_dec_if_lock(&ctx
->list_lock
)) {
422 QLIST_REMOVE(node
, node
);
424 qemu_lockcnt_inc_and_unlock(&ctx
->list_lock
);
429 qemu_lockcnt_dec(&ctx
->list_lock
);
434 * Note that dispatch_fds == false has the side-effect of post-poning the
435 * freeing of deleted handlers.
437 bool aio_dispatch(AioContext
*ctx
, bool dispatch_fds
)
442 * If there are callbacks left that have been queued, we need to call them.
443 * Do not call select in this case, because it is possible that the caller
444 * does not need a complete flush (as is the case for aio_poll loops).
446 progress
= aio_bh_poll(ctx
);
449 progress
|= aio_dispatch_handlers(ctx
);
453 progress
|= timerlistgroup_run_timers(&ctx
->tlg
);
458 /* These thread-local variables are used only in a small part of aio_poll
459 * around the call to the poll() system call. In particular they are not
460 * used while aio_poll is performing callbacks, which makes it much easier
461 * to think about reentrancy!
463 * Stack-allocated arrays would be perfect but they have size limitations;
464 * heap allocation is expensive enough that we want to reuse arrays across
465 * calls to aio_poll(). And because poll() has to be called without holding
466 * any lock, the arrays cannot be stored in AioContext. Thread-local data
467 * has none of the disadvantages of these three options.
469 static __thread GPollFD
*pollfds
;
470 static __thread AioHandler
**nodes
;
471 static __thread
unsigned npfd
, nalloc
;
472 static __thread Notifier pollfds_cleanup_notifier
;
474 static void pollfds_cleanup(Notifier
*n
, void *unused
)
482 static void add_pollfd(AioHandler
*node
)
484 if (npfd
== nalloc
) {
486 pollfds_cleanup_notifier
.notify
= pollfds_cleanup
;
487 qemu_thread_atexit_add(&pollfds_cleanup_notifier
);
490 g_assert(nalloc
<= INT_MAX
);
493 pollfds
= g_renew(GPollFD
, pollfds
, nalloc
);
494 nodes
= g_renew(AioHandler
*, nodes
, nalloc
);
497 pollfds
[npfd
] = (GPollFD
) {
499 .events
= node
->pfd
.events
,
504 static bool run_poll_handlers_once(AioContext
*ctx
)
506 bool progress
= false;
509 QLIST_FOREACH_RCU(node
, &ctx
->aio_handlers
, node
) {
510 if (!node
->deleted
&& node
->io_poll
&&
511 aio_node_check(ctx
, node
->is_external
) &&
512 node
->io_poll(node
->opaque
)) {
516 /* Caller handles freeing deleted nodes. Don't do it here. */
522 /* run_poll_handlers:
523 * @ctx: the AioContext
524 * @max_ns: maximum time to poll for, in nanoseconds
526 * Polls for a given time.
528 * Note that ctx->notify_me must be non-zero so this function can detect
531 * Note that the caller must have incremented ctx->list_lock.
533 * Returns: true if progress was made, false otherwise
535 static bool run_poll_handlers(AioContext
*ctx
, int64_t max_ns
)
540 assert(ctx
->notify_me
);
541 assert(qemu_lockcnt_count(&ctx
->list_lock
) > 0);
542 assert(ctx
->poll_disable_cnt
== 0);
544 trace_run_poll_handlers_begin(ctx
, max_ns
);
546 end_time
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) + max_ns
;
549 progress
= run_poll_handlers_once(ctx
);
550 } while (!progress
&& qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) < end_time
);
552 trace_run_poll_handlers_end(ctx
, progress
);
558 * @ctx: the AioContext
559 * @blocking: busy polling is only attempted when blocking is true
561 * ctx->notify_me must be non-zero so this function can detect aio_notify().
563 * Note that the caller must have incremented ctx->list_lock.
565 * Returns: true if progress was made, false otherwise
567 static bool try_poll_mode(AioContext
*ctx
, bool blocking
)
569 if (blocking
&& ctx
->poll_max_ns
&& ctx
->poll_disable_cnt
== 0) {
570 /* See qemu_soonest_timeout() uint64_t hack */
571 int64_t max_ns
= MIN((uint64_t)aio_compute_timeout(ctx
),
572 (uint64_t)ctx
->poll_ns
);
575 poll_set_started(ctx
, true);
577 if (run_poll_handlers(ctx
, max_ns
)) {
583 poll_set_started(ctx
, false);
585 /* Even if we don't run busy polling, try polling once in case it can make
586 * progress and the caller will be able to avoid ppoll(2)/epoll_wait(2).
588 return run_poll_handlers_once(ctx
);
591 bool aio_poll(AioContext
*ctx
, bool blocking
)
600 aio_context_acquire(ctx
);
603 /* aio_notify can avoid the expensive event_notifier_set if
604 * everything (file descriptors, bottom halves, timers) will
605 * be re-evaluated before the next blocking poll(). This is
606 * already true when aio_poll is called with blocking == false;
607 * if blocking == true, it is only true after poll() returns,
608 * so disable the optimization now.
611 atomic_add(&ctx
->notify_me
, 2);
614 qemu_lockcnt_inc(&ctx
->list_lock
);
616 if (ctx
->poll_max_ns
) {
617 start
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
620 if (try_poll_mode(ctx
, blocking
)) {
627 if (!aio_epoll_enabled(ctx
)) {
628 QLIST_FOREACH_RCU(node
, &ctx
->aio_handlers
, node
) {
629 if (!node
->deleted
&& node
->pfd
.events
630 && aio_node_check(ctx
, node
->is_external
)) {
636 timeout
= blocking
? aio_compute_timeout(ctx
) : 0;
638 /* wait until next event */
640 aio_context_release(ctx
);
642 if (aio_epoll_check_poll(ctx
, pollfds
, npfd
, timeout
)) {
643 AioHandler epoll_handler
;
645 epoll_handler
.pfd
.fd
= ctx
->epollfd
;
646 epoll_handler
.pfd
.events
= G_IO_IN
| G_IO_OUT
| G_IO_HUP
| G_IO_ERR
;
648 add_pollfd(&epoll_handler
);
649 ret
= aio_epoll(ctx
, pollfds
, npfd
, timeout
);
651 ret
= qemu_poll_ns(pollfds
, npfd
, timeout
);
654 aio_context_acquire(ctx
);
659 atomic_sub(&ctx
->notify_me
, 2);
662 /* Adjust polling time */
663 if (ctx
->poll_max_ns
) {
664 int64_t block_ns
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - start
;
666 if (block_ns
<= ctx
->poll_ns
) {
667 /* This is the sweet spot, no adjustment needed */
668 } else if (block_ns
> ctx
->poll_max_ns
) {
669 /* We'd have to poll for too long, poll less */
670 int64_t old
= ctx
->poll_ns
;
672 if (ctx
->poll_shrink
) {
673 ctx
->poll_ns
/= ctx
->poll_shrink
;
678 trace_poll_shrink(ctx
, old
, ctx
->poll_ns
);
679 } else if (ctx
->poll_ns
< ctx
->poll_max_ns
&&
680 block_ns
< ctx
->poll_max_ns
) {
681 /* There is room to grow, poll longer */
682 int64_t old
= ctx
->poll_ns
;
683 int64_t grow
= ctx
->poll_grow
;
690 ctx
->poll_ns
*= grow
;
692 ctx
->poll_ns
= 4000; /* start polling at 4 microseconds */
695 if (ctx
->poll_ns
> ctx
->poll_max_ns
) {
696 ctx
->poll_ns
= ctx
->poll_max_ns
;
699 trace_poll_grow(ctx
, old
, ctx
->poll_ns
);
703 aio_notify_accept(ctx
);
705 /* if we have any readable fds, dispatch event */
707 for (i
= 0; i
< npfd
; i
++) {
708 nodes
[i
]->pfd
.revents
= pollfds
[i
].revents
;
713 qemu_lockcnt_dec(&ctx
->list_lock
);
715 /* Run dispatch even if there were no readable fds to run timers */
716 if (aio_dispatch(ctx
, ret
> 0)) {
720 aio_context_release(ctx
);
725 void aio_context_setup(AioContext
*ctx
)
727 /* TODO remove this in final patch submission */
728 if (getenv("QEMU_AIO_POLL_MAX_NS")) {
729 fprintf(stderr
, "The QEMU_AIO_POLL_MAX_NS environment variable has "
730 "been replaced with -object iothread,poll-max-ns=NUM\n");
734 #ifdef CONFIG_EPOLL_CREATE1
735 assert(!ctx
->epollfd
);
736 ctx
->epollfd
= epoll_create1(EPOLL_CLOEXEC
);
737 if (ctx
->epollfd
== -1) {
738 fprintf(stderr
, "Failed to create epoll instance: %s", strerror(errno
));
739 ctx
->epoll_available
= false;
741 ctx
->epoll_available
= true;
746 void aio_context_set_poll_params(AioContext
*ctx
, int64_t max_ns
,
747 int64_t grow
, int64_t shrink
, Error
**errp
)
749 /* No thread synchronization here, it doesn't matter if an incorrect value
752 ctx
->poll_max_ns
= max_ns
;
754 ctx
->poll_grow
= grow
;
755 ctx
->poll_shrink
= shrink
;