2 * QEMU aio implementation
4 * Copyright IBM, Corp. 2008
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "block/block.h"
18 #include "qemu/rcu_queue.h"
19 #include "qemu/sockets.h"
20 #include "qemu/cutils.h"
22 #ifdef CONFIG_EPOLL_CREATE1
23 #include <sys/epoll.h>
32 IOHandler
*io_poll_begin
;
33 IOHandler
*io_poll_end
;
37 QLIST_ENTRY(AioHandler
) node
;
40 #ifdef CONFIG_EPOLL_CREATE1
42 /* The fd number threshold to switch to epoll */
43 #define EPOLL_ENABLE_THRESHOLD 64
45 static void aio_epoll_disable(AioContext
*ctx
)
47 ctx
->epoll_enabled
= false;
48 if (!ctx
->epoll_available
) {
51 ctx
->epoll_available
= false;
55 static inline int epoll_events_from_pfd(int pfd_events
)
57 return (pfd_events
& G_IO_IN
? EPOLLIN
: 0) |
58 (pfd_events
& G_IO_OUT
? EPOLLOUT
: 0) |
59 (pfd_events
& G_IO_HUP
? EPOLLHUP
: 0) |
60 (pfd_events
& G_IO_ERR
? EPOLLERR
: 0);
63 static bool aio_epoll_try_enable(AioContext
*ctx
)
66 struct epoll_event event
;
68 QLIST_FOREACH_RCU(node
, &ctx
->aio_handlers
, node
) {
70 if (node
->deleted
|| !node
->pfd
.events
) {
73 event
.events
= epoll_events_from_pfd(node
->pfd
.events
);
74 event
.data
.ptr
= node
;
75 r
= epoll_ctl(ctx
->epollfd
, EPOLL_CTL_ADD
, node
->pfd
.fd
, &event
);
80 ctx
->epoll_enabled
= true;
84 static void aio_epoll_update(AioContext
*ctx
, AioHandler
*node
, bool is_new
)
86 struct epoll_event event
;
90 if (!ctx
->epoll_enabled
) {
93 if (!node
->pfd
.events
) {
96 event
.data
.ptr
= node
;
97 event
.events
= epoll_events_from_pfd(node
->pfd
.events
);
98 ctl
= is_new
? EPOLL_CTL_ADD
: EPOLL_CTL_MOD
;
101 r
= epoll_ctl(ctx
->epollfd
, ctl
, node
->pfd
.fd
, &event
);
103 aio_epoll_disable(ctx
);
107 static int aio_epoll(AioContext
*ctx
, GPollFD
*pfds
,
108 unsigned npfd
, int64_t timeout
)
112 struct epoll_event events
[128];
115 assert(pfds
[0].fd
== ctx
->epollfd
);
117 ret
= qemu_poll_ns(pfds
, npfd
, timeout
);
119 if (timeout
<= 0 || ret
> 0) {
120 ret
= epoll_wait(ctx
->epollfd
, events
,
126 for (i
= 0; i
< ret
; i
++) {
127 int ev
= events
[i
].events
;
128 node
= events
[i
].data
.ptr
;
129 node
->pfd
.revents
= (ev
& EPOLLIN
? G_IO_IN
: 0) |
130 (ev
& EPOLLOUT
? G_IO_OUT
: 0) |
131 (ev
& EPOLLHUP
? G_IO_HUP
: 0) |
132 (ev
& EPOLLERR
? G_IO_ERR
: 0);
139 static bool aio_epoll_enabled(AioContext
*ctx
)
141 /* Fall back to ppoll when external clients are disabled. */
142 return !aio_external_disabled(ctx
) && ctx
->epoll_enabled
;
145 static bool aio_epoll_check_poll(AioContext
*ctx
, GPollFD
*pfds
,
146 unsigned npfd
, int64_t timeout
)
148 if (!ctx
->epoll_available
) {
151 if (aio_epoll_enabled(ctx
)) {
154 if (npfd
>= EPOLL_ENABLE_THRESHOLD
) {
155 if (aio_epoll_try_enable(ctx
)) {
158 aio_epoll_disable(ctx
);
166 static void aio_epoll_update(AioContext
*ctx
, AioHandler
*node
, bool is_new
)
170 static int aio_epoll(AioContext
*ctx
, GPollFD
*pfds
,
171 unsigned npfd
, int64_t timeout
)
176 static bool aio_epoll_enabled(AioContext
*ctx
)
181 static bool aio_epoll_check_poll(AioContext
*ctx
, GPollFD
*pfds
,
182 unsigned npfd
, int64_t timeout
)
189 static AioHandler
*find_aio_handler(AioContext
*ctx
, int fd
)
193 QLIST_FOREACH(node
, &ctx
->aio_handlers
, node
) {
194 if (node
->pfd
.fd
== fd
)
202 static bool aio_remove_fd_handler(AioContext
*ctx
, AioHandler
*node
)
204 /* If the GSource is in the process of being destroyed then
205 * g_source_remove_poll() causes an assertion failure. Skip
206 * removal in that case, because glib cleans up its state during
207 * destruction anyway.
209 if (!g_source_is_destroyed(&ctx
->source
)) {
210 g_source_remove_poll(&ctx
->source
, &node
->pfd
);
213 /* If a read is in progress, just mark the node as deleted */
214 if (qemu_lockcnt_count(&ctx
->list_lock
)) {
216 node
->pfd
.revents
= 0;
219 /* Otherwise, delete it for real. We can't just mark it as
220 * deleted because deleted nodes are only cleaned up while
221 * no one is walking the handlers list.
223 QLIST_REMOVE(node
, node
);
227 void aio_set_fd_handler(AioContext
*ctx
,
236 AioHandler
*new_node
= NULL
;
238 bool deleted
= false;
239 int poll_disable_change
;
241 qemu_lockcnt_lock(&ctx
->list_lock
);
243 node
= find_aio_handler(ctx
, fd
);
245 /* Are we deleting the fd handler? */
246 if (!io_read
&& !io_write
&& !io_poll
) {
248 qemu_lockcnt_unlock(&ctx
->list_lock
);
251 /* Clean events in order to unregister fd from the ctx epoll. */
252 node
->pfd
.events
= 0;
254 poll_disable_change
= -!node
->io_poll
;
256 poll_disable_change
= !io_poll
- (node
&& !node
->io_poll
);
260 /* Alloc and insert if it's not already there */
261 new_node
= g_new0(AioHandler
, 1);
263 /* Update handler with latest information */
264 new_node
->io_read
= io_read
;
265 new_node
->io_write
= io_write
;
266 new_node
->io_poll
= io_poll
;
267 new_node
->opaque
= opaque
;
268 new_node
->is_external
= is_external
;
271 new_node
->pfd
.fd
= fd
;
273 new_node
->pfd
= node
->pfd
;
275 g_source_add_poll(&ctx
->source
, &new_node
->pfd
);
277 new_node
->pfd
.events
= (io_read
? G_IO_IN
| G_IO_HUP
| G_IO_ERR
: 0);
278 new_node
->pfd
.events
|= (io_write
? G_IO_OUT
| G_IO_ERR
: 0);
280 QLIST_INSERT_HEAD_RCU(&ctx
->aio_handlers
, new_node
, node
);
283 deleted
= aio_remove_fd_handler(ctx
, node
);
286 /* No need to order poll_disable_cnt writes against other updates;
287 * the counter is only used to avoid wasting time and latency on
288 * iterated polling when the system call will be ultimately necessary.
289 * Changing handlers is a rare event, and a little wasted polling until
290 * the aio_notify below is not an issue.
292 atomic_set(&ctx
->poll_disable_cnt
,
293 atomic_read(&ctx
->poll_disable_cnt
) + poll_disable_change
);
296 aio_epoll_update(ctx
, new_node
, is_new
);
298 /* Unregister deleted fd_handler */
299 aio_epoll_update(ctx
, node
, false);
301 qemu_lockcnt_unlock(&ctx
->list_lock
);
309 void aio_set_fd_poll(AioContext
*ctx
, int fd
,
310 IOHandler
*io_poll_begin
,
311 IOHandler
*io_poll_end
)
313 AioHandler
*node
= find_aio_handler(ctx
, fd
);
319 node
->io_poll_begin
= io_poll_begin
;
320 node
->io_poll_end
= io_poll_end
;
323 void aio_set_event_notifier(AioContext
*ctx
,
324 EventNotifier
*notifier
,
326 EventNotifierHandler
*io_read
,
329 aio_set_fd_handler(ctx
, event_notifier_get_fd(notifier
), is_external
,
330 (IOHandler
*)io_read
, NULL
, io_poll
, notifier
);
333 void aio_set_event_notifier_poll(AioContext
*ctx
,
334 EventNotifier
*notifier
,
335 EventNotifierHandler
*io_poll_begin
,
336 EventNotifierHandler
*io_poll_end
)
338 aio_set_fd_poll(ctx
, event_notifier_get_fd(notifier
),
339 (IOHandler
*)io_poll_begin
,
340 (IOHandler
*)io_poll_end
);
343 static void poll_set_started(AioContext
*ctx
, bool started
)
347 if (started
== ctx
->poll_started
) {
351 ctx
->poll_started
= started
;
353 qemu_lockcnt_inc(&ctx
->list_lock
);
354 QLIST_FOREACH_RCU(node
, &ctx
->aio_handlers
, node
) {
362 fn
= node
->io_poll_begin
;
364 fn
= node
->io_poll_end
;
371 qemu_lockcnt_dec(&ctx
->list_lock
);
375 bool aio_prepare(AioContext
*ctx
)
377 /* Poll mode cannot be used with glib's event loop, disable it. */
378 poll_set_started(ctx
, false);
383 bool aio_pending(AioContext
*ctx
)
389 * We have to walk very carefully in case aio_set_fd_handler is
390 * called while we're walking.
392 qemu_lockcnt_inc(&ctx
->list_lock
);
394 QLIST_FOREACH_RCU(node
, &ctx
->aio_handlers
, node
) {
397 revents
= node
->pfd
.revents
& node
->pfd
.events
;
398 if (revents
& (G_IO_IN
| G_IO_HUP
| G_IO_ERR
) && node
->io_read
&&
399 aio_node_check(ctx
, node
->is_external
)) {
403 if (revents
& (G_IO_OUT
| G_IO_ERR
) && node
->io_write
&&
404 aio_node_check(ctx
, node
->is_external
)) {
409 qemu_lockcnt_dec(&ctx
->list_lock
);
414 static bool aio_dispatch_handlers(AioContext
*ctx
)
416 AioHandler
*node
, *tmp
;
417 bool progress
= false;
419 QLIST_FOREACH_SAFE_RCU(node
, &ctx
->aio_handlers
, node
, tmp
) {
422 revents
= node
->pfd
.revents
& node
->pfd
.events
;
423 node
->pfd
.revents
= 0;
425 if (!node
->deleted
&&
426 (revents
& (G_IO_IN
| G_IO_HUP
| G_IO_ERR
)) &&
427 aio_node_check(ctx
, node
->is_external
) &&
429 node
->io_read(node
->opaque
);
431 /* aio_notify() does not count as progress */
432 if (node
->opaque
!= &ctx
->notifier
) {
436 if (!node
->deleted
&&
437 (revents
& (G_IO_OUT
| G_IO_ERR
)) &&
438 aio_node_check(ctx
, node
->is_external
) &&
440 node
->io_write(node
->opaque
);
445 if (qemu_lockcnt_dec_if_lock(&ctx
->list_lock
)) {
446 QLIST_REMOVE(node
, node
);
448 qemu_lockcnt_inc_and_unlock(&ctx
->list_lock
);
456 void aio_dispatch(AioContext
*ctx
)
458 qemu_lockcnt_inc(&ctx
->list_lock
);
460 aio_dispatch_handlers(ctx
);
461 qemu_lockcnt_dec(&ctx
->list_lock
);
463 timerlistgroup_run_timers(&ctx
->tlg
);
466 /* These thread-local variables are used only in a small part of aio_poll
467 * around the call to the poll() system call. In particular they are not
468 * used while aio_poll is performing callbacks, which makes it much easier
469 * to think about reentrancy!
471 * Stack-allocated arrays would be perfect but they have size limitations;
472 * heap allocation is expensive enough that we want to reuse arrays across
473 * calls to aio_poll(). And because poll() has to be called without holding
474 * any lock, the arrays cannot be stored in AioContext. Thread-local data
475 * has none of the disadvantages of these three options.
477 static __thread GPollFD
*pollfds
;
478 static __thread AioHandler
**nodes
;
479 static __thread
unsigned npfd
, nalloc
;
480 static __thread Notifier pollfds_cleanup_notifier
;
482 static void pollfds_cleanup(Notifier
*n
, void *unused
)
490 static void add_pollfd(AioHandler
*node
)
492 if (npfd
== nalloc
) {
494 pollfds_cleanup_notifier
.notify
= pollfds_cleanup
;
495 qemu_thread_atexit_add(&pollfds_cleanup_notifier
);
498 g_assert(nalloc
<= INT_MAX
);
501 pollfds
= g_renew(GPollFD
, pollfds
, nalloc
);
502 nodes
= g_renew(AioHandler
*, nodes
, nalloc
);
505 pollfds
[npfd
] = (GPollFD
) {
507 .events
= node
->pfd
.events
,
512 static bool run_poll_handlers_once(AioContext
*ctx
, int64_t *timeout
)
514 bool progress
= false;
517 QLIST_FOREACH_RCU(node
, &ctx
->aio_handlers
, node
) {
518 if (!node
->deleted
&& node
->io_poll
&&
519 aio_node_check(ctx
, node
->is_external
) &&
520 node
->io_poll(node
->opaque
)) {
522 * Polling was successful, exit try_poll_mode immediately
523 * to adjust the next polling time.
526 if (node
->opaque
!= &ctx
->notifier
) {
531 /* Caller handles freeing deleted nodes. Don't do it here. */
537 /* run_poll_handlers:
538 * @ctx: the AioContext
539 * @max_ns: maximum time to poll for, in nanoseconds
541 * Polls for a given time.
543 * Note that ctx->notify_me must be non-zero so this function can detect
546 * Note that the caller must have incremented ctx->list_lock.
548 * Returns: true if progress was made, false otherwise
550 static bool run_poll_handlers(AioContext
*ctx
, int64_t max_ns
, int64_t *timeout
)
553 int64_t start_time
, elapsed_time
;
555 assert(ctx
->notify_me
);
556 assert(qemu_lockcnt_count(&ctx
->list_lock
) > 0);
558 trace_run_poll_handlers_begin(ctx
, max_ns
, *timeout
);
560 start_time
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
562 progress
= run_poll_handlers_once(ctx
, timeout
);
563 elapsed_time
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - start_time
;
564 max_ns
= qemu_soonest_timeout(*timeout
, max_ns
);
565 assert(!(max_ns
&& progress
));
566 } while (elapsed_time
< max_ns
&& !atomic_read(&ctx
->poll_disable_cnt
));
568 /* If time has passed with no successful polling, adjust *timeout to
569 * keep the same ending time.
571 if (*timeout
!= -1) {
572 *timeout
-= MIN(*timeout
, elapsed_time
);
575 trace_run_poll_handlers_end(ctx
, progress
, *timeout
);
580 * @ctx: the AioContext
581 * @timeout: timeout for blocking wait, computed by the caller and updated if
584 * ctx->notify_me must be non-zero so this function can detect aio_notify().
586 * Note that the caller must have incremented ctx->list_lock.
588 * Returns: true if progress was made, false otherwise
590 static bool try_poll_mode(AioContext
*ctx
, int64_t *timeout
)
592 int64_t max_ns
= qemu_soonest_timeout(*timeout
, ctx
->poll_ns
);
594 if (max_ns
&& !atomic_read(&ctx
->poll_disable_cnt
)) {
595 poll_set_started(ctx
, true);
597 if (run_poll_handlers(ctx
, max_ns
, timeout
)) {
602 poll_set_started(ctx
, false);
604 /* Even if we don't run busy polling, try polling once in case it can make
605 * progress and the caller will be able to avoid ppoll(2)/epoll_wait(2).
607 return run_poll_handlers_once(ctx
, timeout
);
610 bool aio_poll(AioContext
*ctx
, bool blocking
)
619 assert(in_aio_context_home_thread(ctx
));
621 /* aio_notify can avoid the expensive event_notifier_set if
622 * everything (file descriptors, bottom halves, timers) will
623 * be re-evaluated before the next blocking poll(). This is
624 * already true when aio_poll is called with blocking == false;
625 * if blocking == true, it is only true after poll() returns,
626 * so disable the optimization now.
629 atomic_add(&ctx
->notify_me
, 2);
632 qemu_lockcnt_inc(&ctx
->list_lock
);
634 if (ctx
->poll_max_ns
) {
635 start
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
638 timeout
= blocking
? aio_compute_timeout(ctx
) : 0;
639 progress
= try_poll_mode(ctx
, &timeout
);
640 assert(!(timeout
&& progress
));
642 /* If polling is allowed, non-blocking aio_poll does not need the
643 * system call---a single round of run_poll_handlers_once suffices.
645 if (timeout
|| atomic_read(&ctx
->poll_disable_cnt
)) {
650 if (!aio_epoll_enabled(ctx
)) {
651 QLIST_FOREACH_RCU(node
, &ctx
->aio_handlers
, node
) {
652 if (!node
->deleted
&& node
->pfd
.events
653 && aio_node_check(ctx
, node
->is_external
)) {
659 /* wait until next event */
660 if (aio_epoll_check_poll(ctx
, pollfds
, npfd
, timeout
)) {
661 AioHandler epoll_handler
;
663 epoll_handler
.pfd
.fd
= ctx
->epollfd
;
664 epoll_handler
.pfd
.events
= G_IO_IN
| G_IO_OUT
| G_IO_HUP
| G_IO_ERR
;
666 add_pollfd(&epoll_handler
);
667 ret
= aio_epoll(ctx
, pollfds
, npfd
, timeout
);
669 ret
= qemu_poll_ns(pollfds
, npfd
, timeout
);
674 atomic_sub(&ctx
->notify_me
, 2);
675 aio_notify_accept(ctx
);
678 /* Adjust polling time */
679 if (ctx
->poll_max_ns
) {
680 int64_t block_ns
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - start
;
682 if (block_ns
<= ctx
->poll_ns
) {
683 /* This is the sweet spot, no adjustment needed */
684 } else if (block_ns
> ctx
->poll_max_ns
) {
685 /* We'd have to poll for too long, poll less */
686 int64_t old
= ctx
->poll_ns
;
688 if (ctx
->poll_shrink
) {
689 ctx
->poll_ns
/= ctx
->poll_shrink
;
694 trace_poll_shrink(ctx
, old
, ctx
->poll_ns
);
695 } else if (ctx
->poll_ns
< ctx
->poll_max_ns
&&
696 block_ns
< ctx
->poll_max_ns
) {
697 /* There is room to grow, poll longer */
698 int64_t old
= ctx
->poll_ns
;
699 int64_t grow
= ctx
->poll_grow
;
706 ctx
->poll_ns
*= grow
;
708 ctx
->poll_ns
= 4000; /* start polling at 4 microseconds */
711 if (ctx
->poll_ns
> ctx
->poll_max_ns
) {
712 ctx
->poll_ns
= ctx
->poll_max_ns
;
715 trace_poll_grow(ctx
, old
, ctx
->poll_ns
);
719 /* if we have any readable fds, dispatch event */
721 for (i
= 0; i
< npfd
; i
++) {
722 nodes
[i
]->pfd
.revents
= pollfds
[i
].revents
;
728 progress
|= aio_bh_poll(ctx
);
731 progress
|= aio_dispatch_handlers(ctx
);
734 qemu_lockcnt_dec(&ctx
->list_lock
);
736 progress
|= timerlistgroup_run_timers(&ctx
->tlg
);
741 void aio_context_setup(AioContext
*ctx
)
743 #ifdef CONFIG_EPOLL_CREATE1
744 assert(!ctx
->epollfd
);
745 ctx
->epollfd
= epoll_create1(EPOLL_CLOEXEC
);
746 if (ctx
->epollfd
== -1) {
747 fprintf(stderr
, "Failed to create epoll instance: %s", strerror(errno
));
748 ctx
->epoll_available
= false;
750 ctx
->epoll_available
= true;
755 void aio_context_destroy(AioContext
*ctx
)
757 #ifdef CONFIG_EPOLL_CREATE1
758 aio_epoll_disable(ctx
);
762 void aio_context_set_poll_params(AioContext
*ctx
, int64_t max_ns
,
763 int64_t grow
, int64_t shrink
, Error
**errp
)
765 /* No thread synchronization here, it doesn't matter if an incorrect value
768 ctx
->poll_max_ns
= max_ns
;
770 ctx
->poll_grow
= grow
;
771 ctx
->poll_shrink
= shrink
;