2 * QEMU aio implementation
4 * Copyright IBM, Corp. 2008
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu-common.h"
17 #include "block/block.h"
18 #include "qemu/queue.h"
19 #include "qemu/sockets.h"
29 QLIST_ENTRY(AioHandler
) node
;
32 static AioHandler
*find_aio_handler(AioContext
*ctx
, int fd
)
36 QLIST_FOREACH(node
, &ctx
->aio_handlers
, node
) {
37 if (node
->pfd
.fd
== fd
)
45 void aio_set_fd_handler(AioContext
*ctx
,
53 node
= find_aio_handler(ctx
, fd
);
55 /* Are we deleting the fd handler? */
56 if (!io_read
&& !io_write
) {
58 g_source_remove_poll(&ctx
->source
, &node
->pfd
);
60 /* If the lock is held, just mark the node as deleted */
61 if (ctx
->walking_handlers
) {
63 node
->pfd
.revents
= 0;
65 /* Otherwise, delete it for real. We can't just mark it as
66 * deleted because deleted nodes are only cleaned up after
67 * releasing the walking_handlers lock.
69 QLIST_REMOVE(node
, node
);
75 /* Alloc and insert if it's not already there */
76 node
= g_malloc0(sizeof(AioHandler
));
78 QLIST_INSERT_HEAD(&ctx
->aio_handlers
, node
, node
);
80 g_source_add_poll(&ctx
->source
, &node
->pfd
);
82 /* Update handler with latest information */
83 node
->io_read
= io_read
;
84 node
->io_write
= io_write
;
85 node
->opaque
= opaque
;
86 node
->pollfds_idx
= -1;
88 node
->pfd
.events
= (io_read
? G_IO_IN
| G_IO_HUP
| G_IO_ERR
: 0);
89 node
->pfd
.events
|= (io_write
? G_IO_OUT
| G_IO_ERR
: 0);
95 void aio_set_event_notifier(AioContext
*ctx
,
96 EventNotifier
*notifier
,
97 EventNotifierHandler
*io_read
)
99 aio_set_fd_handler(ctx
, event_notifier_get_fd(notifier
),
100 (IOHandler
*)io_read
, NULL
, notifier
);
103 bool aio_prepare(AioContext
*ctx
)
108 bool aio_pending(AioContext
*ctx
)
112 QLIST_FOREACH(node
, &ctx
->aio_handlers
, node
) {
115 revents
= node
->pfd
.revents
& node
->pfd
.events
;
116 if (revents
& (G_IO_IN
| G_IO_HUP
| G_IO_ERR
) && node
->io_read
) {
119 if (revents
& (G_IO_OUT
| G_IO_ERR
) && node
->io_write
) {
127 bool aio_dispatch(AioContext
*ctx
)
130 bool progress
= false;
133 * If there are callbacks left that have been queued, we need to call them.
134 * Do not call select in this case, because it is possible that the caller
135 * does not need a complete flush (as is the case for aio_poll loops).
137 if (aio_bh_poll(ctx
)) {
142 * We have to walk very carefully in case aio_set_fd_handler is
143 * called while we're walking.
145 node
= QLIST_FIRST(&ctx
->aio_handlers
);
150 ctx
->walking_handlers
++;
152 revents
= node
->pfd
.revents
& node
->pfd
.events
;
153 node
->pfd
.revents
= 0;
155 if (!node
->deleted
&&
156 (revents
& (G_IO_IN
| G_IO_HUP
| G_IO_ERR
)) &&
158 node
->io_read(node
->opaque
);
160 /* aio_notify() does not count as progress */
161 if (node
->opaque
!= &ctx
->notifier
) {
165 if (!node
->deleted
&&
166 (revents
& (G_IO_OUT
| G_IO_ERR
)) &&
168 node
->io_write(node
->opaque
);
173 node
= QLIST_NEXT(node
, node
);
175 ctx
->walking_handlers
--;
177 if (!ctx
->walking_handlers
&& tmp
->deleted
) {
178 QLIST_REMOVE(tmp
, node
);
184 progress
|= timerlistgroup_run_timers(&ctx
->tlg
);
189 bool aio_poll(AioContext
*ctx
, bool blocking
)
192 bool was_dispatching
;
196 was_dispatching
= ctx
->dispatching
;
199 /* aio_notify can avoid the expensive event_notifier_set if
200 * everything (file descriptors, bottom halves, timers) will
201 * be re-evaluated before the next blocking poll(). This is
202 * already true when aio_poll is called with blocking == false;
203 * if blocking == true, it is only true after poll() returns.
205 * If we're in a nested event loop, ctx->dispatching might be true.
206 * In that case we can restore it just before returning, but we
207 * have to clear it now.
209 aio_set_dispatching(ctx
, !blocking
);
211 ctx
->walking_handlers
++;
213 g_array_set_size(ctx
->pollfds
, 0);
216 QLIST_FOREACH(node
, &ctx
->aio_handlers
, node
) {
217 node
->pollfds_idx
= -1;
218 if (!node
->deleted
&& node
->pfd
.events
) {
221 .events
= node
->pfd
.events
,
223 node
->pollfds_idx
= ctx
->pollfds
->len
;
224 g_array_append_val(ctx
->pollfds
, pfd
);
228 ctx
->walking_handlers
--;
230 /* wait until next event */
231 ret
= qemu_poll_ns((GPollFD
*)ctx
->pollfds
->data
,
233 blocking
? aio_compute_timeout(ctx
) : 0);
235 /* if we have any readable fds, dispatch event */
237 QLIST_FOREACH(node
, &ctx
->aio_handlers
, node
) {
238 if (node
->pollfds_idx
!= -1) {
239 GPollFD
*pfd
= &g_array_index(ctx
->pollfds
, GPollFD
,
241 node
->pfd
.revents
= pfd
->revents
;
246 /* Run dispatch even if there were no readable fds to run timers */
247 aio_set_dispatching(ctx
, true);
248 if (aio_dispatch(ctx
)) {
252 aio_set_dispatching(ctx
, was_dispatching
);