2 * QEMU aio implementation
4 * Copyright IBM, Corp. 2008
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu-common.h"
17 #include "block/block.h"
18 #include "qemu/queue.h"
19 #include "qemu/sockets.h"
21 #include <sys/epoll.h>
32 QLIST_ENTRY(AioHandler
) node
;
37 /* The fd number threashold to switch to epoll */
38 #define EPOLL_ENABLE_THRESHOLD 64
40 static void aio_epoll_disable(AioContext
*ctx
)
42 ctx
->epoll_available
= false;
43 if (!ctx
->epoll_enabled
) {
46 ctx
->epoll_enabled
= false;
50 static inline int epoll_events_from_pfd(int pfd_events
)
52 return (pfd_events
& G_IO_IN
? EPOLLIN
: 0) |
53 (pfd_events
& G_IO_OUT
? EPOLLOUT
: 0) |
54 (pfd_events
& G_IO_HUP
? EPOLLHUP
: 0) |
55 (pfd_events
& G_IO_ERR
? EPOLLERR
: 0);
58 static bool aio_epoll_try_enable(AioContext
*ctx
)
61 struct epoll_event event
;
63 QLIST_FOREACH(node
, &ctx
->aio_handlers
, node
) {
65 if (node
->deleted
|| !node
->pfd
.events
) {
68 event
.events
= epoll_events_from_pfd(node
->pfd
.events
);
69 event
.data
.ptr
= node
;
70 r
= epoll_ctl(ctx
->epollfd
, EPOLL_CTL_ADD
, node
->pfd
.fd
, &event
);
75 ctx
->epoll_enabled
= true;
79 static void aio_epoll_update(AioContext
*ctx
, AioHandler
*node
, bool is_new
)
81 struct epoll_event event
;
84 if (!ctx
->epoll_enabled
) {
87 if (!node
->pfd
.events
) {
88 r
= epoll_ctl(ctx
->epollfd
, EPOLL_CTL_DEL
, node
->pfd
.fd
, &event
);
90 aio_epoll_disable(ctx
);
93 event
.data
.ptr
= node
;
94 event
.events
= epoll_events_from_pfd(node
->pfd
.events
);
96 r
= epoll_ctl(ctx
->epollfd
, EPOLL_CTL_ADD
, node
->pfd
.fd
, &event
);
98 aio_epoll_disable(ctx
);
101 r
= epoll_ctl(ctx
->epollfd
, EPOLL_CTL_MOD
, node
->pfd
.fd
, &event
);
103 aio_epoll_disable(ctx
);
109 static int aio_epoll(AioContext
*ctx
, GPollFD
*pfds
,
110 unsigned npfd
, int64_t timeout
)
114 struct epoll_event events
[128];
117 assert(pfds
[0].fd
== ctx
->epollfd
);
119 ret
= qemu_poll_ns(pfds
, npfd
, timeout
);
121 if (timeout
<= 0 || ret
> 0) {
122 ret
= epoll_wait(ctx
->epollfd
, events
,
123 sizeof(events
) / sizeof(events
[0]),
128 for (i
= 0; i
< ret
; i
++) {
129 int ev
= events
[i
].events
;
130 node
= events
[i
].data
.ptr
;
131 node
->pfd
.revents
= (ev
& EPOLLIN
? G_IO_IN
: 0) |
132 (ev
& EPOLLOUT
? G_IO_OUT
: 0) |
133 (ev
& EPOLLHUP
? G_IO_HUP
: 0) |
134 (ev
& EPOLLERR
? G_IO_ERR
: 0);
141 static bool aio_epoll_enabled(AioContext
*ctx
)
143 /* Fall back to ppoll when external clients are disabled. */
144 return !aio_external_disabled(ctx
) && ctx
->epoll_enabled
;
147 static bool aio_epoll_check_poll(AioContext
*ctx
, GPollFD
*pfds
,
148 unsigned npfd
, int64_t timeout
)
150 if (!ctx
->epoll_available
) {
153 if (aio_epoll_enabled(ctx
)) {
156 if (npfd
>= EPOLL_ENABLE_THRESHOLD
) {
157 if (aio_epoll_try_enable(ctx
)) {
160 aio_epoll_disable(ctx
);
168 static void aio_epoll_update(AioContext
*ctx
, AioHandler
*node
, bool is_new
)
172 static int aio_epoll(AioContext
*ctx
, GPollFD
*pfds
,
173 unsigned npfd
, int64_t timeout
)
178 static bool aio_epoll_enabled(AioContext
*ctx
)
183 static bool aio_epoll_check_poll(AioContext
*ctx
, GPollFD
*pfds
,
184 unsigned npfd
, int64_t timeout
)
191 static AioHandler
*find_aio_handler(AioContext
*ctx
, int fd
)
195 QLIST_FOREACH(node
, &ctx
->aio_handlers
, node
) {
196 if (node
->pfd
.fd
== fd
)
204 void aio_set_fd_handler(AioContext
*ctx
,
214 node
= find_aio_handler(ctx
, fd
);
216 /* Are we deleting the fd handler? */
217 if (!io_read
&& !io_write
) {
219 g_source_remove_poll(&ctx
->source
, &node
->pfd
);
221 /* If the lock is held, just mark the node as deleted */
222 if (ctx
->walking_handlers
) {
224 node
->pfd
.revents
= 0;
226 /* Otherwise, delete it for real. We can't just mark it as
227 * deleted because deleted nodes are only cleaned up after
228 * releasing the walking_handlers lock.
230 QLIST_REMOVE(node
, node
);
236 /* Alloc and insert if it's not already there */
237 node
= g_new0(AioHandler
, 1);
239 QLIST_INSERT_HEAD(&ctx
->aio_handlers
, node
, node
);
241 g_source_add_poll(&ctx
->source
, &node
->pfd
);
244 /* Update handler with latest information */
245 node
->io_read
= io_read
;
246 node
->io_write
= io_write
;
247 node
->opaque
= opaque
;
248 node
->is_external
= is_external
;
250 node
->pfd
.events
= (io_read
? G_IO_IN
| G_IO_HUP
| G_IO_ERR
: 0);
251 node
->pfd
.events
|= (io_write
? G_IO_OUT
| G_IO_ERR
: 0);
254 aio_epoll_update(ctx
, node
, is_new
);
258 void aio_set_event_notifier(AioContext
*ctx
,
259 EventNotifier
*notifier
,
261 EventNotifierHandler
*io_read
)
263 aio_set_fd_handler(ctx
, event_notifier_get_fd(notifier
),
264 is_external
, (IOHandler
*)io_read
, NULL
, notifier
);
267 bool aio_prepare(AioContext
*ctx
)
272 bool aio_pending(AioContext
*ctx
)
276 QLIST_FOREACH(node
, &ctx
->aio_handlers
, node
) {
279 revents
= node
->pfd
.revents
& node
->pfd
.events
;
280 if (revents
& (G_IO_IN
| G_IO_HUP
| G_IO_ERR
) && node
->io_read
) {
283 if (revents
& (G_IO_OUT
| G_IO_ERR
) && node
->io_write
) {
291 bool aio_dispatch(AioContext
*ctx
)
294 bool progress
= false;
297 * If there are callbacks left that have been queued, we need to call them.
298 * Do not call select in this case, because it is possible that the caller
299 * does not need a complete flush (as is the case for aio_poll loops).
301 if (aio_bh_poll(ctx
)) {
306 * We have to walk very carefully in case aio_set_fd_handler is
307 * called while we're walking.
309 node
= QLIST_FIRST(&ctx
->aio_handlers
);
314 ctx
->walking_handlers
++;
316 revents
= node
->pfd
.revents
& node
->pfd
.events
;
317 node
->pfd
.revents
= 0;
319 if (!node
->deleted
&&
320 (revents
& (G_IO_IN
| G_IO_HUP
| G_IO_ERR
)) &&
322 node
->io_read(node
->opaque
);
324 /* aio_notify() does not count as progress */
325 if (node
->opaque
!= &ctx
->notifier
) {
329 if (!node
->deleted
&&
330 (revents
& (G_IO_OUT
| G_IO_ERR
)) &&
332 node
->io_write(node
->opaque
);
337 node
= QLIST_NEXT(node
, node
);
339 ctx
->walking_handlers
--;
341 if (!ctx
->walking_handlers
&& tmp
->deleted
) {
342 QLIST_REMOVE(tmp
, node
);
348 progress
|= timerlistgroup_run_timers(&ctx
->tlg
);
353 /* These thread-local variables are used only in a small part of aio_poll
354 * around the call to the poll() system call. In particular they are not
355 * used while aio_poll is performing callbacks, which makes it much easier
356 * to think about reentrancy!
358 * Stack-allocated arrays would be perfect but they have size limitations;
359 * heap allocation is expensive enough that we want to reuse arrays across
360 * calls to aio_poll(). And because poll() has to be called without holding
361 * any lock, the arrays cannot be stored in AioContext. Thread-local data
362 * has none of the disadvantages of these three options.
364 static __thread GPollFD
*pollfds
;
365 static __thread AioHandler
**nodes
;
366 static __thread
unsigned npfd
, nalloc
;
367 static __thread Notifier pollfds_cleanup_notifier
;
369 static void pollfds_cleanup(Notifier
*n
, void *unused
)
377 static void add_pollfd(AioHandler
*node
)
379 if (npfd
== nalloc
) {
381 pollfds_cleanup_notifier
.notify
= pollfds_cleanup
;
382 qemu_thread_atexit_add(&pollfds_cleanup_notifier
);
385 g_assert(nalloc
<= INT_MAX
);
388 pollfds
= g_renew(GPollFD
, pollfds
, nalloc
);
389 nodes
= g_renew(AioHandler
*, nodes
, nalloc
);
392 pollfds
[npfd
] = (GPollFD
) {
394 .events
= node
->pfd
.events
,
399 bool aio_poll(AioContext
*ctx
, bool blocking
)
406 aio_context_acquire(ctx
);
409 /* aio_notify can avoid the expensive event_notifier_set if
410 * everything (file descriptors, bottom halves, timers) will
411 * be re-evaluated before the next blocking poll(). This is
412 * already true when aio_poll is called with blocking == false;
413 * if blocking == true, it is only true after poll() returns,
414 * so disable the optimization now.
417 atomic_add(&ctx
->notify_me
, 2);
420 ctx
->walking_handlers
++;
425 QLIST_FOREACH(node
, &ctx
->aio_handlers
, node
) {
426 if (!node
->deleted
&& node
->pfd
.events
427 && !aio_epoll_enabled(ctx
)
428 && aio_node_check(ctx
, node
->is_external
)) {
433 timeout
= blocking
? aio_compute_timeout(ctx
) : 0;
435 /* wait until next event */
437 aio_context_release(ctx
);
439 if (aio_epoll_check_poll(ctx
, pollfds
, npfd
, timeout
)) {
440 AioHandler epoll_handler
;
442 epoll_handler
.pfd
.fd
= ctx
->epollfd
;
443 epoll_handler
.pfd
.events
= G_IO_IN
| G_IO_OUT
| G_IO_HUP
| G_IO_ERR
;
445 add_pollfd(&epoll_handler
);
446 ret
= aio_epoll(ctx
, pollfds
, npfd
, timeout
);
448 ret
= qemu_poll_ns(pollfds
, npfd
, timeout
);
451 atomic_sub(&ctx
->notify_me
, 2);
454 aio_context_acquire(ctx
);
457 aio_notify_accept(ctx
);
459 /* if we have any readable fds, dispatch event */
461 for (i
= 0; i
< npfd
; i
++) {
462 nodes
[i
]->pfd
.revents
= pollfds
[i
].revents
;
467 ctx
->walking_handlers
--;
469 /* Run dispatch even if there were no readable fds to run timers */
470 if (aio_dispatch(ctx
)) {
474 aio_context_release(ctx
);
479 void aio_context_setup(AioContext
*ctx
, Error
**errp
)
482 assert(!ctx
->epollfd
);
483 ctx
->epollfd
= epoll_create1(EPOLL_CLOEXEC
);
484 if (ctx
->epollfd
== -1) {
485 ctx
->epoll_available
= false;
487 ctx
->epoll_available
= true;