cpu: Introduce SysemuCPUOps structure
[qemu/ar7.git] / util / aio-win32.c
blob168717b51bd617aa0233a6025fc9ed1e4a4337c3
1 /*
2 * QEMU aio implementation
4 * Copyright IBM Corp., 2008
5 * Copyright Red Hat Inc., 2012
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paolo Bonzini <pbonzini@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
18 #include "qemu/osdep.h"
19 #include "qemu-common.h"
20 #include "block/block.h"
21 #include "qemu/main-loop.h"
22 #include "qemu/queue.h"
23 #include "qemu/sockets.h"
24 #include "qapi/error.h"
25 #include "qemu/rcu_queue.h"
27 struct AioHandler {
28 EventNotifier *e;
29 IOHandler *io_read;
30 IOHandler *io_write;
31 EventNotifierHandler *io_notify;
32 GPollFD pfd;
33 int deleted;
34 void *opaque;
35 bool is_external;
36 QLIST_ENTRY(AioHandler) node;
39 static void aio_remove_fd_handler(AioContext *ctx, AioHandler *node)
42 * If the GSource is in the process of being destroyed then
43 * g_source_remove_poll() causes an assertion failure. Skip
44 * removal in that case, because glib cleans up its state during
45 * destruction anyway.
47 if (!g_source_is_destroyed(&ctx->source)) {
48 g_source_remove_poll(&ctx->source, &node->pfd);
51 /* If aio_poll is in progress, just mark the node as deleted */
52 if (qemu_lockcnt_count(&ctx->list_lock)) {
53 node->deleted = 1;
54 node->pfd.revents = 0;
55 } else {
56 /* Otherwise, delete it for real. We can't just mark it as
57 * deleted because deleted nodes are only cleaned up after
58 * releasing the list_lock.
60 QLIST_REMOVE(node, node);
61 g_free(node);
65 void aio_set_fd_handler(AioContext *ctx,
66 int fd,
67 bool is_external,
68 IOHandler *io_read,
69 IOHandler *io_write,
70 AioPollFn *io_poll,
71 void *opaque)
73 /* fd is a SOCKET in our case */
74 AioHandler *old_node;
75 AioHandler *node = NULL;
77 qemu_lockcnt_lock(&ctx->list_lock);
78 QLIST_FOREACH(old_node, &ctx->aio_handlers, node) {
79 if (old_node->pfd.fd == fd && !old_node->deleted) {
80 break;
84 if (io_read || io_write) {
85 HANDLE event;
86 long bitmask = 0;
88 /* Alloc and insert if it's not already there */
89 node = g_new0(AioHandler, 1);
90 node->pfd.fd = fd;
92 node->pfd.events = 0;
93 if (node->io_read) {
94 node->pfd.events |= G_IO_IN;
96 if (node->io_write) {
97 node->pfd.events |= G_IO_OUT;
100 node->e = &ctx->notifier;
102 /* Update handler with latest information */
103 node->opaque = opaque;
104 node->io_read = io_read;
105 node->io_write = io_write;
106 node->is_external = is_external;
108 if (io_read) {
109 bitmask |= FD_READ | FD_ACCEPT | FD_CLOSE;
112 if (io_write) {
113 bitmask |= FD_WRITE | FD_CONNECT;
116 QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
117 event = event_notifier_get_handle(&ctx->notifier);
118 WSAEventSelect(node->pfd.fd, event, bitmask);
120 if (old_node) {
121 aio_remove_fd_handler(ctx, old_node);
124 qemu_lockcnt_unlock(&ctx->list_lock);
125 aio_notify(ctx);
128 void aio_set_fd_poll(AioContext *ctx, int fd,
129 IOHandler *io_poll_begin,
130 IOHandler *io_poll_end)
132 /* Not implemented */
135 void aio_set_event_notifier(AioContext *ctx,
136 EventNotifier *e,
137 bool is_external,
138 EventNotifierHandler *io_notify,
139 AioPollFn *io_poll)
141 AioHandler *node;
143 qemu_lockcnt_lock(&ctx->list_lock);
144 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
145 if (node->e == e && !node->deleted) {
146 break;
150 /* Are we deleting the fd handler? */
151 if (!io_notify) {
152 if (node) {
153 aio_remove_fd_handler(ctx, node);
155 } else {
156 if (node == NULL) {
157 /* Alloc and insert if it's not already there */
158 node = g_new0(AioHandler, 1);
159 node->e = e;
160 node->pfd.fd = (uintptr_t)event_notifier_get_handle(e);
161 node->pfd.events = G_IO_IN;
162 node->is_external = is_external;
163 QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
165 g_source_add_poll(&ctx->source, &node->pfd);
167 /* Update handler with latest information */
168 node->io_notify = io_notify;
171 qemu_lockcnt_unlock(&ctx->list_lock);
172 aio_notify(ctx);
175 void aio_set_event_notifier_poll(AioContext *ctx,
176 EventNotifier *notifier,
177 EventNotifierHandler *io_poll_begin,
178 EventNotifierHandler *io_poll_end)
180 /* Not implemented */
183 bool aio_prepare(AioContext *ctx)
185 static struct timeval tv0;
186 AioHandler *node;
187 bool have_select_revents = false;
188 fd_set rfds, wfds;
191 * We have to walk very carefully in case aio_set_fd_handler is
192 * called while we're walking.
194 qemu_lockcnt_inc(&ctx->list_lock);
196 /* fill fd sets */
197 FD_ZERO(&rfds);
198 FD_ZERO(&wfds);
199 QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
200 if (node->io_read) {
201 FD_SET ((SOCKET)node->pfd.fd, &rfds);
203 if (node->io_write) {
204 FD_SET ((SOCKET)node->pfd.fd, &wfds);
208 if (select(0, &rfds, &wfds, NULL, &tv0) > 0) {
209 QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
210 node->pfd.revents = 0;
211 if (FD_ISSET(node->pfd.fd, &rfds)) {
212 node->pfd.revents |= G_IO_IN;
213 have_select_revents = true;
216 if (FD_ISSET(node->pfd.fd, &wfds)) {
217 node->pfd.revents |= G_IO_OUT;
218 have_select_revents = true;
223 qemu_lockcnt_dec(&ctx->list_lock);
224 return have_select_revents;
227 bool aio_pending(AioContext *ctx)
229 AioHandler *node;
230 bool result = false;
233 * We have to walk very carefully in case aio_set_fd_handler is
234 * called while we're walking.
236 qemu_lockcnt_inc(&ctx->list_lock);
237 QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
238 if (node->pfd.revents && node->io_notify) {
239 result = true;
240 break;
243 if ((node->pfd.revents & G_IO_IN) && node->io_read) {
244 result = true;
245 break;
247 if ((node->pfd.revents & G_IO_OUT) && node->io_write) {
248 result = true;
249 break;
253 qemu_lockcnt_dec(&ctx->list_lock);
254 return result;
257 static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
259 AioHandler *node;
260 bool progress = false;
261 AioHandler *tmp;
264 * We have to walk very carefully in case aio_set_fd_handler is
265 * called while we're walking.
267 QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
268 int revents = node->pfd.revents;
270 if (!node->deleted &&
271 (revents || event_notifier_get_handle(node->e) == event) &&
272 node->io_notify) {
273 node->pfd.revents = 0;
274 node->io_notify(node->e);
276 /* aio_notify() does not count as progress */
277 if (node->e != &ctx->notifier) {
278 progress = true;
282 if (!node->deleted &&
283 (node->io_read || node->io_write)) {
284 node->pfd.revents = 0;
285 if ((revents & G_IO_IN) && node->io_read) {
286 node->io_read(node->opaque);
287 progress = true;
289 if ((revents & G_IO_OUT) && node->io_write) {
290 node->io_write(node->opaque);
291 progress = true;
294 /* if the next select() will return an event, we have progressed */
295 if (event == event_notifier_get_handle(&ctx->notifier)) {
296 WSANETWORKEVENTS ev;
297 WSAEnumNetworkEvents(node->pfd.fd, event, &ev);
298 if (ev.lNetworkEvents) {
299 progress = true;
304 if (node->deleted) {
305 if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
306 QLIST_REMOVE(node, node);
307 g_free(node);
308 qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
313 return progress;
316 void aio_dispatch(AioContext *ctx)
318 qemu_lockcnt_inc(&ctx->list_lock);
319 aio_bh_poll(ctx);
320 aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
321 qemu_lockcnt_dec(&ctx->list_lock);
322 timerlistgroup_run_timers(&ctx->tlg);
325 bool aio_poll(AioContext *ctx, bool blocking)
327 AioHandler *node;
328 HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
329 bool progress, have_select_revents, first;
330 int count;
331 int timeout;
334 * There cannot be two concurrent aio_poll calls for the same AioContext (or
335 * an aio_poll concurrent with a GSource prepare/check/dispatch callback).
336 * We rely on this below to avoid slow locked accesses to ctx->notify_me.
338 * aio_poll() may only be called in the AioContext's thread. iohandler_ctx
339 * is special in that it runs in the main thread, but that thread's context
340 * is qemu_aio_context.
342 assert(in_aio_context_home_thread(ctx == iohandler_get_aio_context() ?
343 qemu_get_aio_context() : ctx));
344 progress = false;
346 /* aio_notify can avoid the expensive event_notifier_set if
347 * everything (file descriptors, bottom halves, timers) will
348 * be re-evaluated before the next blocking poll(). This is
349 * already true when aio_poll is called with blocking == false;
350 * if blocking == true, it is only true after poll() returns,
351 * so disable the optimization now.
353 if (blocking) {
354 qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) + 2);
356 * Write ctx->notify_me before computing the timeout
357 * (reading bottom half flags, etc.). Pairs with
358 * smp_mb in aio_notify().
360 smp_mb();
363 qemu_lockcnt_inc(&ctx->list_lock);
364 have_select_revents = aio_prepare(ctx);
366 /* fill fd sets */
367 count = 0;
368 QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
369 if (!node->deleted && node->io_notify
370 && aio_node_check(ctx, node->is_external)) {
371 events[count++] = event_notifier_get_handle(node->e);
375 first = true;
377 /* ctx->notifier is always registered. */
378 assert(count > 0);
380 /* Multiple iterations, all of them non-blocking except the first,
381 * may be necessary to process all pending events. After the first
382 * WaitForMultipleObjects call ctx->notify_me will be decremented.
384 do {
385 HANDLE event;
386 int ret;
388 timeout = blocking && !have_select_revents
389 ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
390 ret = WaitForMultipleObjects(count, events, FALSE, timeout);
391 if (blocking) {
392 assert(first);
393 qatomic_store_release(&ctx->notify_me,
394 qatomic_read(&ctx->notify_me) - 2);
395 aio_notify_accept(ctx);
398 if (first) {
399 progress |= aio_bh_poll(ctx);
400 first = false;
403 /* if we have any signaled events, dispatch event */
404 event = NULL;
405 if ((DWORD) (ret - WAIT_OBJECT_0) < count) {
406 event = events[ret - WAIT_OBJECT_0];
407 events[ret - WAIT_OBJECT_0] = events[--count];
408 } else if (!have_select_revents) {
409 break;
412 have_select_revents = false;
413 blocking = false;
415 progress |= aio_dispatch_handlers(ctx, event);
416 } while (count > 0);
418 qemu_lockcnt_dec(&ctx->list_lock);
420 progress |= timerlistgroup_run_timers(&ctx->tlg);
421 return progress;
424 void aio_context_setup(AioContext *ctx)
428 void aio_context_destroy(AioContext *ctx)
432 void aio_context_use_g_source(AioContext *ctx)
436 void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
437 int64_t grow, int64_t shrink, Error **errp)
439 if (max_ns) {
440 error_setg(errp, "AioContext polling is not implemented on Windows");