target-i386: allow any alignment for SMBASE
[qemu/ar7.git] / include / block / aio.h
blob400b1b00208292b5e673e1fe432f78732aba4db1
1 /*
2 * QEMU aio implementation
4 * Copyright IBM, Corp. 2008
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #ifndef QEMU_AIO_H
15 #define QEMU_AIO_H
17 #include "qemu/typedefs.h"
18 #include "qemu-common.h"
19 #include "qemu/queue.h"
20 #include "qemu/event_notifier.h"
21 #include "qemu/thread.h"
22 #include "qemu/rfifolock.h"
23 #include "qemu/timer.h"
25 typedef struct BlockAIOCB BlockAIOCB;
26 typedef void BlockCompletionFunc(void *opaque, int ret);
28 typedef struct AIOCBInfo {
29 void (*cancel_async)(BlockAIOCB *acb);
30 AioContext *(*get_aio_context)(BlockAIOCB *acb);
31 size_t aiocb_size;
32 } AIOCBInfo;
34 struct BlockAIOCB {
35 const AIOCBInfo *aiocb_info;
36 BlockDriverState *bs;
37 BlockCompletionFunc *cb;
38 void *opaque;
39 int refcnt;
42 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
43 BlockCompletionFunc *cb, void *opaque);
44 void qemu_aio_unref(void *p);
45 void qemu_aio_ref(void *p);
47 typedef struct AioHandler AioHandler;
48 typedef void QEMUBHFunc(void *opaque);
49 typedef void IOHandler(void *opaque);
51 struct AioContext {
52 GSource source;
54 /* Protects all fields from multi-threaded access */
55 RFifoLock lock;
57 /* The list of registered AIO handlers */
58 QLIST_HEAD(, AioHandler) aio_handlers;
60 /* This is a simple lock used to protect the aio_handlers list.
61 * Specifically, it's used to ensure that no callbacks are removed while
62 * we're walking and dispatching callbacks.
64 int walking_handlers;
66 /* Used to avoid unnecessary event_notifier_set calls in aio_notify;
67 * accessed with atomic primitives. If this field is 0, everything
68 * (file descriptors, bottom halves, timers) will be re-evaluated
69 * before the next blocking poll(), thus the event_notifier_set call
70 * can be skipped. If it is non-zero, you may need to wake up a
71 * concurrent aio_poll or the glib main event loop, making
72 * event_notifier_set necessary.
74 * Bit 0 is reserved for GSource usage of the AioContext, and is 1
75 * between a call to aio_ctx_check and the next call to aio_ctx_dispatch.
76 * Bits 1-31 simply count the number of active calls to aio_poll
77 * that are in the prepare or poll phase.
79 * The GSource and aio_poll must use a different mechanism because
80 * there is no certainty that a call to GSource's prepare callback
81 * (via g_main_context_prepare) is indeed followed by check and
82 * dispatch. It's not clear whether this would be a bug, but let's
83 * play safe and allow it---it will just cause extra calls to
84 * event_notifier_set until the next call to dispatch.
86 * Instead, the aio_poll calls include both the prepare and the
87 * dispatch phase, hence a simple counter is enough for them.
89 uint32_t notify_me;
91 /* lock to protect between bh's adders and deleter */
92 QemuMutex bh_lock;
94 /* Anchor of the list of Bottom Halves belonging to the context */
95 struct QEMUBH *first_bh;
97 /* A simple lock used to protect the first_bh list, and ensure that
98 * no callbacks are removed while we're walking and dispatching callbacks.
100 int walking_bh;
102 /* Used by aio_notify.
104 * "notified" is used to avoid expensive event_notifier_test_and_clear
105 * calls. When it is clear, the EventNotifier is clear, or one thread
106 * is going to clear "notified" before processing more events. False
107 * positives are possible, i.e. "notified" could be set even though the
108 * EventNotifier is clear.
110 * Note that event_notifier_set *cannot* be optimized the same way. For
111 * more information on the problem that would result, see "#ifdef BUG2"
112 * in the docs/aio_notify_accept.promela formal model.
114 bool notified;
115 EventNotifier notifier;
117 /* Scheduling this BH forces the event loop it iterate */
118 QEMUBH *notify_dummy_bh;
120 /* Thread pool for performing work and receiving completion callbacks */
121 struct ThreadPool *thread_pool;
123 /* TimerLists for calling timers - one per clock type */
124 QEMUTimerListGroup tlg;
128 * aio_context_new: Allocate a new AioContext.
130 * AioContext provide a mini event-loop that can be waited on synchronously.
131 * They also provide bottom halves, a service to execute a piece of code
132 * as soon as possible.
134 AioContext *aio_context_new(Error **errp);
137 * aio_context_ref:
138 * @ctx: The AioContext to operate on.
140 * Add a reference to an AioContext.
142 void aio_context_ref(AioContext *ctx);
145 * aio_context_unref:
146 * @ctx: The AioContext to operate on.
148 * Drop a reference to an AioContext.
150 void aio_context_unref(AioContext *ctx);
152 /* Take ownership of the AioContext. If the AioContext will be shared between
153 * threads, and a thread does not want to be interrupted, it will have to
154 * take ownership around calls to aio_poll(). Otherwise, aio_poll()
155 * automatically takes care of calling aio_context_acquire and
156 * aio_context_release.
158 * Access to timers and BHs from a thread that has not acquired AioContext
159 * is possible. Access to callbacks for now must be done while the AioContext
160 * is owned by the thread (FIXME).
162 void aio_context_acquire(AioContext *ctx);
164 /* Relinquish ownership of the AioContext. */
165 void aio_context_release(AioContext *ctx);
168 * aio_bh_new: Allocate a new bottom half structure.
170 * Bottom halves are lightweight callbacks whose invocation is guaranteed
171 * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure
172 * is opaque and must be allocated prior to its use.
174 QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
177 * aio_notify: Force processing of pending events.
179 * Similar to signaling a condition variable, aio_notify forces
180 * aio_wait to exit, so that the next call will re-examine pending events.
181 * The caller of aio_notify will usually call aio_wait again very soon,
182 * or go through another iteration of the GLib main loop. Hence, aio_notify
183 * also has the side effect of recalculating the sets of file descriptors
184 * that the main loop waits for.
186 * Calling aio_notify is rarely necessary, because for example scheduling
187 * a bottom half calls it already.
189 void aio_notify(AioContext *ctx);
192 * aio_notify_accept: Acknowledge receiving an aio_notify.
194 * aio_notify() uses an EventNotifier in order to wake up a sleeping
195 * aio_poll() or g_main_context_iteration(). Calls to aio_notify() are
196 * usually rare, but the AioContext has to clear the EventNotifier on
197 * every aio_poll() or g_main_context_iteration() in order to avoid
198 * busy waiting. This event_notifier_test_and_clear() cannot be done
199 * using the usual aio_context_set_event_notifier(), because it must
200 * be done before processing all events (file descriptors, bottom halves,
201 * timers).
203 * aio_notify_accept() is an optimized event_notifier_test_and_clear()
204 * that is specific to an AioContext's notifier; it is used internally
205 * to clear the EventNotifier only if aio_notify() had been called.
207 void aio_notify_accept(AioContext *ctx);
210 * aio_bh_poll: Poll bottom halves for an AioContext.
212 * These are internal functions used by the QEMU main loop.
213 * And notice that multiple occurrences of aio_bh_poll cannot
214 * be called concurrently
216 int aio_bh_poll(AioContext *ctx);
219 * qemu_bh_schedule: Schedule a bottom half.
221 * Scheduling a bottom half interrupts the main loop and causes the
222 * execution of the callback that was passed to qemu_bh_new.
224 * Bottom halves that are scheduled from a bottom half handler are instantly
225 * invoked. This can create an infinite loop if a bottom half handler
226 * schedules itself.
228 * @bh: The bottom half to be scheduled.
230 void qemu_bh_schedule(QEMUBH *bh);
233 * qemu_bh_cancel: Cancel execution of a bottom half.
235 * Canceling execution of a bottom half undoes the effect of calls to
236 * qemu_bh_schedule without freeing its resources yet. While cancellation
237 * itself is also wait-free and thread-safe, it can of course race with the
238 * loop that executes bottom halves unless you are holding the iothread
239 * mutex. This makes it mostly useless if you are not holding the mutex.
241 * @bh: The bottom half to be canceled.
243 void qemu_bh_cancel(QEMUBH *bh);
246 *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
248 * Deleting a bottom half frees the memory that was allocated for it by
249 * qemu_bh_new. It also implies canceling the bottom half if it was
250 * scheduled.
251 * This func is async. The bottom half will do the delete action at the finial
252 * end.
254 * @bh: The bottom half to be deleted.
256 void qemu_bh_delete(QEMUBH *bh);
258 /* Return whether there are any pending callbacks from the GSource
259 * attached to the AioContext, before g_poll is invoked.
261 * This is used internally in the implementation of the GSource.
263 bool aio_prepare(AioContext *ctx);
265 /* Return whether there are any pending callbacks from the GSource
266 * attached to the AioContext, after g_poll is invoked.
268 * This is used internally in the implementation of the GSource.
270 bool aio_pending(AioContext *ctx);
272 /* Dispatch any pending callbacks from the GSource attached to the AioContext.
274 * This is used internally in the implementation of the GSource.
276 bool aio_dispatch(AioContext *ctx);
278 /* Progress in completing AIO work to occur. This can issue new pending
279 * aio as a result of executing I/O completion or bh callbacks.
281 * Return whether any progress was made by executing AIO or bottom half
282 * handlers. If @blocking == true, this should always be true except
283 * if someone called aio_notify.
285 * If there are no pending bottom halves, but there are pending AIO
286 * operations, it may not be possible to make any progress without
287 * blocking. If @blocking is true, this function will wait until one
288 * or more AIO events have completed, to ensure something has moved
289 * before returning.
291 bool aio_poll(AioContext *ctx, bool blocking);
293 /* Register a file descriptor and associated callbacks. Behaves very similarly
294 * to qemu_set_fd_handler. Unlike qemu_set_fd_handler, these callbacks will
295 * be invoked when using aio_poll().
297 * Code that invokes AIO completion functions should rely on this function
298 * instead of qemu_set_fd_handler[2].
300 void aio_set_fd_handler(AioContext *ctx,
301 int fd,
302 IOHandler *io_read,
303 IOHandler *io_write,
304 void *opaque);
306 /* Register an event notifier and associated callbacks. Behaves very similarly
307 * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks
308 * will be invoked when using aio_poll().
310 * Code that invokes AIO completion functions should rely on this function
311 * instead of event_notifier_set_handler.
313 void aio_set_event_notifier(AioContext *ctx,
314 EventNotifier *notifier,
315 EventNotifierHandler *io_read);
317 /* Return a GSource that lets the main loop poll the file descriptors attached
318 * to this AioContext.
320 GSource *aio_get_g_source(AioContext *ctx);
322 /* Return the ThreadPool bound to this AioContext */
323 struct ThreadPool *aio_get_thread_pool(AioContext *ctx);
326 * aio_timer_new:
327 * @ctx: the aio context
328 * @type: the clock type
329 * @scale: the scale
330 * @cb: the callback to call on timer expiry
331 * @opaque: the opaque pointer to pass to the callback
333 * Allocate a new timer attached to the context @ctx.
334 * The function is responsible for memory allocation.
336 * The preferred interface is aio_timer_init. Use that
337 * unless you really need dynamic memory allocation.
339 * Returns: a pointer to the new timer
341 static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type,
342 int scale,
343 QEMUTimerCB *cb, void *opaque)
345 return timer_new_tl(ctx->tlg.tl[type], scale, cb, opaque);
349 * aio_timer_init:
350 * @ctx: the aio context
351 * @ts: the timer
352 * @type: the clock type
353 * @scale: the scale
354 * @cb: the callback to call on timer expiry
355 * @opaque: the opaque pointer to pass to the callback
357 * Initialise a new timer attached to the context @ctx.
358 * The caller is responsible for memory allocation.
360 static inline void aio_timer_init(AioContext *ctx,
361 QEMUTimer *ts, QEMUClockType type,
362 int scale,
363 QEMUTimerCB *cb, void *opaque)
365 timer_init_tl(ts, ctx->tlg.tl[type], scale, cb, opaque);
369 * aio_compute_timeout:
370 * @ctx: the aio context
372 * Compute the timeout that a blocking aio_poll should use.
374 int64_t aio_compute_timeout(AioContext *ctx);
376 #endif