Fix off-by-1 error in RAM migration code
[qemu/agraf.git] / qemu-aio.h
blob3889fe97a4b35f8fba246ffb662a6e781950c8bf
1 /*
2 * QEMU aio implementation
4 * Copyright IBM, Corp. 2008
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #ifndef QEMU_AIO_H
15 #define QEMU_AIO_H
17 #include "qemu-common.h"
18 #include "qemu-queue.h"
19 #include "event_notifier.h"
21 typedef struct BlockDriverAIOCB BlockDriverAIOCB;
22 typedef void BlockDriverCompletionFunc(void *opaque, int ret);
24 typedef struct AIOCBInfo {
25 void (*cancel)(BlockDriverAIOCB *acb);
26 size_t aiocb_size;
27 } AIOCBInfo;
29 struct BlockDriverAIOCB {
30 const AIOCBInfo *aiocb_info;
31 BlockDriverState *bs;
32 BlockDriverCompletionFunc *cb;
33 void *opaque;
36 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
37 BlockDriverCompletionFunc *cb, void *opaque);
38 void qemu_aio_release(void *p);
40 typedef struct AioHandler AioHandler;
41 typedef void QEMUBHFunc(void *opaque);
42 typedef void IOHandler(void *opaque);
44 typedef struct AioContext {
45 GSource source;
47 /* The list of registered AIO handlers */
48 QLIST_HEAD(, AioHandler) aio_handlers;
50 /* This is a simple lock used to protect the aio_handlers list.
51 * Specifically, it's used to ensure that no callbacks are removed while
52 * we're walking and dispatching callbacks.
54 int walking_handlers;
56 /* Anchor of the list of Bottom Halves belonging to the context */
57 struct QEMUBH *first_bh;
59 /* A simple lock used to protect the first_bh list, and ensure that
60 * no callbacks are removed while we're walking and dispatching callbacks.
62 int walking_bh;
64 /* Used for aio_notify. */
65 EventNotifier notifier;
66 } AioContext;
68 /* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
69 typedef int (AioFlushEventNotifierHandler)(EventNotifier *e);
71 /**
72 * aio_context_new: Allocate a new AioContext.
74 * AioContext provide a mini event-loop that can be waited on synchronously.
75 * They also provide bottom halves, a service to execute a piece of code
76 * as soon as possible.
78 AioContext *aio_context_new(void);
80 /**
81 * aio_context_ref:
82 * @ctx: The AioContext to operate on.
84 * Add a reference to an AioContext.
86 void aio_context_ref(AioContext *ctx);
88 /**
89 * aio_context_unref:
90 * @ctx: The AioContext to operate on.
92 * Drop a reference to an AioContext.
94 void aio_context_unref(AioContext *ctx);
96 /**
97 * aio_bh_new: Allocate a new bottom half structure.
99 * Bottom halves are lightweight callbacks whose invocation is guaranteed
100 * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure
101 * is opaque and must be allocated prior to its use.
103 QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
106 * aio_notify: Force processing of pending events.
108 * Similar to signaling a condition variable, aio_notify forces
109 * aio_wait to exit, so that the next call will re-examine pending events.
110 * The caller of aio_notify will usually call aio_wait again very soon,
111 * or go through another iteration of the GLib main loop. Hence, aio_notify
112 * also has the side effect of recalculating the sets of file descriptors
113 * that the main loop waits for.
115 * Calling aio_notify is rarely necessary, because for example scheduling
116 * a bottom half calls it already.
118 void aio_notify(AioContext *ctx);
121 * aio_bh_poll: Poll bottom halves for an AioContext.
123 * These are internal functions used by the QEMU main loop.
125 int aio_bh_poll(AioContext *ctx);
128 * qemu_bh_schedule: Schedule a bottom half.
130 * Scheduling a bottom half interrupts the main loop and causes the
131 * execution of the callback that was passed to qemu_bh_new.
133 * Bottom halves that are scheduled from a bottom half handler are instantly
134 * invoked. This can create an infinite loop if a bottom half handler
135 * schedules itself.
137 * @bh: The bottom half to be scheduled.
139 void qemu_bh_schedule(QEMUBH *bh);
142 * qemu_bh_cancel: Cancel execution of a bottom half.
144 * Canceling execution of a bottom half undoes the effect of calls to
145 * qemu_bh_schedule without freeing its resources yet. While cancellation
146 * itself is also wait-free and thread-safe, it can of course race with the
147 * loop that executes bottom halves unless you are holding the iothread
148 * mutex. This makes it mostly useless if you are not holding the mutex.
150 * @bh: The bottom half to be canceled.
152 void qemu_bh_cancel(QEMUBH *bh);
155 *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
157 * Deleting a bottom half frees the memory that was allocated for it by
158 * qemu_bh_new. It also implies canceling the bottom half if it was
159 * scheduled.
161 * @bh: The bottom half to be deleted.
163 void qemu_bh_delete(QEMUBH *bh);
165 /* Flush any pending AIO operation. This function will block until all
166 * outstanding AIO operations have been completed or cancelled. */
167 void aio_flush(AioContext *ctx);
169 /* Return whether there are any pending callbacks from the GSource
170 * attached to the AioContext.
172 * This is used internally in the implementation of the GSource.
174 bool aio_pending(AioContext *ctx);
176 /* Progress in completing AIO work to occur. This can issue new pending
177 * aio as a result of executing I/O completion or bh callbacks.
179 * If there is no pending AIO operation or completion (bottom half),
180 * return false. If there are pending bottom halves, return true.
182 * If there are no pending bottom halves, but there are pending AIO
183 * operations, it may not be possible to make any progress without
184 * blocking. If @blocking is true, this function will wait until one
185 * or more AIO events have completed, to ensure something has moved
186 * before returning.
188 * If @blocking is false, this function will also return false if the
189 * function cannot make any progress without blocking.
191 bool aio_poll(AioContext *ctx, bool blocking);
193 #ifdef CONFIG_POSIX
194 /* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
195 typedef int (AioFlushHandler)(void *opaque);
197 /* Register a file descriptor and associated callbacks. Behaves very similarly
198 * to qemu_set_fd_handler2. Unlike qemu_set_fd_handler2, these callbacks will
199 * be invoked when using either qemu_aio_wait() or qemu_aio_flush().
201 * Code that invokes AIO completion functions should rely on this function
202 * instead of qemu_set_fd_handler[2].
204 void aio_set_fd_handler(AioContext *ctx,
205 int fd,
206 IOHandler *io_read,
207 IOHandler *io_write,
208 AioFlushHandler *io_flush,
209 void *opaque);
210 #endif
212 /* Register an event notifier and associated callbacks. Behaves very similarly
213 * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks
214 * will be invoked when using either qemu_aio_wait() or qemu_aio_flush().
216 * Code that invokes AIO completion functions should rely on this function
217 * instead of event_notifier_set_handler.
219 void aio_set_event_notifier(AioContext *ctx,
220 EventNotifier *notifier,
221 EventNotifierHandler *io_read,
222 AioFlushEventNotifierHandler *io_flush);
224 /* Return a GSource that lets the main loop poll the file descriptors attached
225 * to this AioContext.
227 GSource *aio_get_g_source(AioContext *ctx);
229 /* Functions to operate on the main QEMU AioContext. */
231 void qemu_aio_flush(void);
232 bool qemu_aio_wait(void);
233 void qemu_aio_set_event_notifier(EventNotifier *notifier,
234 EventNotifierHandler *io_read,
235 AioFlushEventNotifierHandler *io_flush);
237 #ifdef CONFIG_POSIX
238 void qemu_aio_set_fd_handler(int fd,
239 IOHandler *io_read,
240 IOHandler *io_write,
241 AioFlushHandler *io_flush,
242 void *opaque);
243 #endif
245 #endif