memhp: consolidate scattered MHPD device declaration
[qemu/ar7.git] / include / block / aio.h
blob4dca54d9c7e3eecad17fdc76e8d9e1bcaf336b29
1 /*
2 * QEMU aio implementation
4 * Copyright IBM, Corp. 2008
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #ifndef QEMU_AIO_H
15 #define QEMU_AIO_H
17 #include "qemu-common.h"
18 #include "qemu/queue.h"
19 #include "qemu/event_notifier.h"
20 #include "qemu/thread.h"
21 #include "qemu/timer.h"
23 typedef struct BlockAIOCB BlockAIOCB;
24 typedef void BlockCompletionFunc(void *opaque, int ret);
26 typedef struct AIOCBInfo {
27 void (*cancel_async)(BlockAIOCB *acb);
28 AioContext *(*get_aio_context)(BlockAIOCB *acb);
29 size_t aiocb_size;
30 } AIOCBInfo;
32 struct BlockAIOCB {
33 const AIOCBInfo *aiocb_info;
34 BlockDriverState *bs;
35 BlockCompletionFunc *cb;
36 void *opaque;
37 int refcnt;
40 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
41 BlockCompletionFunc *cb, void *opaque);
42 void qemu_aio_unref(void *p);
43 void qemu_aio_ref(void *p);
45 typedef struct AioHandler AioHandler;
46 typedef void QEMUBHFunc(void *opaque);
47 typedef bool AioPollFn(void *opaque);
48 typedef void IOHandler(void *opaque);
50 struct ThreadPool;
51 struct LinuxAioState;
53 struct AioContext {
54 GSource source;
56 /* Protects all fields from multi-threaded access */
57 QemuRecMutex lock;
59 /* The list of registered AIO handlers */
60 QLIST_HEAD(, AioHandler) aio_handlers;
62 /* This is a simple lock used to protect the aio_handlers list.
63 * Specifically, it's used to ensure that no callbacks are removed while
64 * we're walking and dispatching callbacks.
66 int walking_handlers;
68 /* Used to avoid unnecessary event_notifier_set calls in aio_notify;
69 * accessed with atomic primitives. If this field is 0, everything
70 * (file descriptors, bottom halves, timers) will be re-evaluated
71 * before the next blocking poll(), thus the event_notifier_set call
72 * can be skipped. If it is non-zero, you may need to wake up a
73 * concurrent aio_poll or the glib main event loop, making
74 * event_notifier_set necessary.
76 * Bit 0 is reserved for GSource usage of the AioContext, and is 1
77 * between a call to aio_ctx_prepare and the next call to aio_ctx_check.
78 * Bits 1-31 simply count the number of active calls to aio_poll
79 * that are in the prepare or poll phase.
81 * The GSource and aio_poll must use a different mechanism because
82 * there is no certainty that a call to GSource's prepare callback
83 * (via g_main_context_prepare) is indeed followed by check and
84 * dispatch. It's not clear whether this would be a bug, but let's
85 * play safe and allow it---it will just cause extra calls to
86 * event_notifier_set until the next call to dispatch.
88 * Instead, the aio_poll calls include both the prepare and the
89 * dispatch phase, hence a simple counter is enough for them.
91 uint32_t notify_me;
93 /* lock to protect between bh's adders and deleter */
94 QemuMutex bh_lock;
96 /* Anchor of the list of Bottom Halves belonging to the context */
97 struct QEMUBH *first_bh;
99 /* A simple lock used to protect the first_bh list, and ensure that
100 * no callbacks are removed while we're walking and dispatching callbacks.
102 int walking_bh;
104 /* Used by aio_notify.
106 * "notified" is used to avoid expensive event_notifier_test_and_clear
107 * calls. When it is clear, the EventNotifier is clear, or one thread
108 * is going to clear "notified" before processing more events. False
109 * positives are possible, i.e. "notified" could be set even though the
110 * EventNotifier is clear.
112 * Note that event_notifier_set *cannot* be optimized the same way. For
113 * more information on the problem that would result, see "#ifdef BUG2"
114 * in the docs/aio_notify_accept.promela formal model.
116 bool notified;
117 EventNotifier notifier;
119 /* Thread pool for performing work and receiving completion callbacks */
120 struct ThreadPool *thread_pool;
122 #ifdef CONFIG_LINUX_AIO
123 /* State for native Linux AIO. Uses aio_context_acquire/release for
124 * locking.
126 struct LinuxAioState *linux_aio;
127 #endif
129 /* TimerLists for calling timers - one per clock type */
130 QEMUTimerListGroup tlg;
132 int external_disable_cnt;
134 /* Number of AioHandlers without .io_poll() */
135 int poll_disable_cnt;
137 /* Polling mode parameters */
138 int64_t poll_ns; /* current polling time in nanoseconds */
139 int64_t poll_max_ns; /* maximum polling time in nanoseconds */
140 int64_t poll_grow; /* polling time growth factor */
141 int64_t poll_shrink; /* polling time shrink factor */
143 /* Are we in polling mode or monitoring file descriptors? */
144 bool poll_started;
146 /* epoll(7) state used when built with CONFIG_EPOLL */
147 int epollfd;
148 bool epoll_enabled;
149 bool epoll_available;
153 * aio_context_new: Allocate a new AioContext.
155 * AioContext provide a mini event-loop that can be waited on synchronously.
156 * They also provide bottom halves, a service to execute a piece of code
157 * as soon as possible.
159 AioContext *aio_context_new(Error **errp);
162 * aio_context_ref:
163 * @ctx: The AioContext to operate on.
165 * Add a reference to an AioContext.
167 void aio_context_ref(AioContext *ctx);
170 * aio_context_unref:
171 * @ctx: The AioContext to operate on.
173 * Drop a reference to an AioContext.
175 void aio_context_unref(AioContext *ctx);
177 /* Take ownership of the AioContext. If the AioContext will be shared between
178 * threads, and a thread does not want to be interrupted, it will have to
179 * take ownership around calls to aio_poll(). Otherwise, aio_poll()
180 * automatically takes care of calling aio_context_acquire and
181 * aio_context_release.
183 * Access to timers and BHs from a thread that has not acquired AioContext
184 * is possible. Access to callbacks for now must be done while the AioContext
185 * is owned by the thread (FIXME).
187 void aio_context_acquire(AioContext *ctx);
189 /* Relinquish ownership of the AioContext. */
190 void aio_context_release(AioContext *ctx);
193 * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run
194 * only once and as soon as possible.
196 void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
199 * aio_bh_new: Allocate a new bottom half structure.
201 * Bottom halves are lightweight callbacks whose invocation is guaranteed
202 * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure
203 * is opaque and must be allocated prior to its use.
205 QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
208 * aio_notify: Force processing of pending events.
210 * Similar to signaling a condition variable, aio_notify forces
211 * aio_poll to exit, so that the next call will re-examine pending events.
212 * The caller of aio_notify will usually call aio_poll again very soon,
213 * or go through another iteration of the GLib main loop. Hence, aio_notify
214 * also has the side effect of recalculating the sets of file descriptors
215 * that the main loop waits for.
217 * Calling aio_notify is rarely necessary, because for example scheduling
218 * a bottom half calls it already.
220 void aio_notify(AioContext *ctx);
223 * aio_notify_accept: Acknowledge receiving an aio_notify.
225 * aio_notify() uses an EventNotifier in order to wake up a sleeping
226 * aio_poll() or g_main_context_iteration(). Calls to aio_notify() are
227 * usually rare, but the AioContext has to clear the EventNotifier on
228 * every aio_poll() or g_main_context_iteration() in order to avoid
229 * busy waiting. This event_notifier_test_and_clear() cannot be done
230 * using the usual aio_context_set_event_notifier(), because it must
231 * be done before processing all events (file descriptors, bottom halves,
232 * timers).
234 * aio_notify_accept() is an optimized event_notifier_test_and_clear()
235 * that is specific to an AioContext's notifier; it is used internally
236 * to clear the EventNotifier only if aio_notify() had been called.
238 void aio_notify_accept(AioContext *ctx);
241 * aio_bh_call: Executes callback function of the specified BH.
243 void aio_bh_call(QEMUBH *bh);
246 * aio_bh_poll: Poll bottom halves for an AioContext.
248 * These are internal functions used by the QEMU main loop.
249 * And notice that multiple occurrences of aio_bh_poll cannot
250 * be called concurrently
252 int aio_bh_poll(AioContext *ctx);
255 * qemu_bh_schedule: Schedule a bottom half.
257 * Scheduling a bottom half interrupts the main loop and causes the
258 * execution of the callback that was passed to qemu_bh_new.
260 * Bottom halves that are scheduled from a bottom half handler are instantly
261 * invoked. This can create an infinite loop if a bottom half handler
262 * schedules itself.
264 * @bh: The bottom half to be scheduled.
266 void qemu_bh_schedule(QEMUBH *bh);
269 * qemu_bh_cancel: Cancel execution of a bottom half.
271 * Canceling execution of a bottom half undoes the effect of calls to
272 * qemu_bh_schedule without freeing its resources yet. While cancellation
273 * itself is also wait-free and thread-safe, it can of course race with the
274 * loop that executes bottom halves unless you are holding the iothread
275 * mutex. This makes it mostly useless if you are not holding the mutex.
277 * @bh: The bottom half to be canceled.
279 void qemu_bh_cancel(QEMUBH *bh);
282 *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
284 * Deleting a bottom half frees the memory that was allocated for it by
285 * qemu_bh_new. It also implies canceling the bottom half if it was
286 * scheduled.
287 * This func is async. The bottom half will do the delete action at the finial
288 * end.
290 * @bh: The bottom half to be deleted.
292 void qemu_bh_delete(QEMUBH *bh);
294 /* Return whether there are any pending callbacks from the GSource
295 * attached to the AioContext, before g_poll is invoked.
297 * This is used internally in the implementation of the GSource.
299 bool aio_prepare(AioContext *ctx);
301 /* Return whether there are any pending callbacks from the GSource
302 * attached to the AioContext, after g_poll is invoked.
304 * This is used internally in the implementation of the GSource.
306 bool aio_pending(AioContext *ctx);
308 /* Dispatch any pending callbacks from the GSource attached to the AioContext.
310 * This is used internally in the implementation of the GSource.
312 * @dispatch_fds: true to process fds, false to skip them
313 * (can be used as an optimization by callers that know there
314 * are no fds ready)
316 bool aio_dispatch(AioContext *ctx, bool dispatch_fds);
318 /* Progress in completing AIO work to occur. This can issue new pending
319 * aio as a result of executing I/O completion or bh callbacks.
321 * Return whether any progress was made by executing AIO or bottom half
322 * handlers. If @blocking == true, this should always be true except
323 * if someone called aio_notify.
325 * If there are no pending bottom halves, but there are pending AIO
326 * operations, it may not be possible to make any progress without
327 * blocking. If @blocking is true, this function will wait until one
328 * or more AIO events have completed, to ensure something has moved
329 * before returning.
331 bool aio_poll(AioContext *ctx, bool blocking);
333 /* Register a file descriptor and associated callbacks. Behaves very similarly
334 * to qemu_set_fd_handler. Unlike qemu_set_fd_handler, these callbacks will
335 * be invoked when using aio_poll().
337 * Code that invokes AIO completion functions should rely on this function
338 * instead of qemu_set_fd_handler[2].
340 void aio_set_fd_handler(AioContext *ctx,
341 int fd,
342 bool is_external,
343 IOHandler *io_read,
344 IOHandler *io_write,
345 AioPollFn *io_poll,
346 void *opaque);
348 /* Set polling begin/end callbacks for a file descriptor that has already been
349 * registered with aio_set_fd_handler. Do nothing if the file descriptor is
350 * not registered.
352 void aio_set_fd_poll(AioContext *ctx, int fd,
353 IOHandler *io_poll_begin,
354 IOHandler *io_poll_end);
356 /* Register an event notifier and associated callbacks. Behaves very similarly
357 * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks
358 * will be invoked when using aio_poll().
360 * Code that invokes AIO completion functions should rely on this function
361 * instead of event_notifier_set_handler.
363 void aio_set_event_notifier(AioContext *ctx,
364 EventNotifier *notifier,
365 bool is_external,
366 EventNotifierHandler *io_read,
367 AioPollFn *io_poll);
369 /* Set polling begin/end callbacks for an event notifier that has already been
370 * registered with aio_set_event_notifier. Do nothing if the event notifier is
371 * not registered.
373 void aio_set_event_notifier_poll(AioContext *ctx,
374 EventNotifier *notifier,
375 EventNotifierHandler *io_poll_begin,
376 EventNotifierHandler *io_poll_end);
378 /* Return a GSource that lets the main loop poll the file descriptors attached
379 * to this AioContext.
381 GSource *aio_get_g_source(AioContext *ctx);
383 /* Return the ThreadPool bound to this AioContext */
384 struct ThreadPool *aio_get_thread_pool(AioContext *ctx);
386 /* Return the LinuxAioState bound to this AioContext */
387 struct LinuxAioState *aio_get_linux_aio(AioContext *ctx);
390 * aio_timer_new:
391 * @ctx: the aio context
392 * @type: the clock type
393 * @scale: the scale
394 * @cb: the callback to call on timer expiry
395 * @opaque: the opaque pointer to pass to the callback
397 * Allocate a new timer attached to the context @ctx.
398 * The function is responsible for memory allocation.
400 * The preferred interface is aio_timer_init. Use that
401 * unless you really need dynamic memory allocation.
403 * Returns: a pointer to the new timer
405 static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type,
406 int scale,
407 QEMUTimerCB *cb, void *opaque)
409 return timer_new_tl(ctx->tlg.tl[type], scale, cb, opaque);
413 * aio_timer_init:
414 * @ctx: the aio context
415 * @ts: the timer
416 * @type: the clock type
417 * @scale: the scale
418 * @cb: the callback to call on timer expiry
419 * @opaque: the opaque pointer to pass to the callback
421 * Initialise a new timer attached to the context @ctx.
422 * The caller is responsible for memory allocation.
424 static inline void aio_timer_init(AioContext *ctx,
425 QEMUTimer *ts, QEMUClockType type,
426 int scale,
427 QEMUTimerCB *cb, void *opaque)
429 timer_init_tl(ts, ctx->tlg.tl[type], scale, cb, opaque);
433 * aio_compute_timeout:
434 * @ctx: the aio context
436 * Compute the timeout that a blocking aio_poll should use.
438 int64_t aio_compute_timeout(AioContext *ctx);
441 * aio_disable_external:
442 * @ctx: the aio context
444 * Disable the further processing of external clients.
446 static inline void aio_disable_external(AioContext *ctx)
448 atomic_inc(&ctx->external_disable_cnt);
452 * aio_enable_external:
453 * @ctx: the aio context
455 * Enable the processing of external clients.
457 static inline void aio_enable_external(AioContext *ctx)
459 assert(ctx->external_disable_cnt > 0);
460 atomic_dec(&ctx->external_disable_cnt);
464 * aio_external_disabled:
465 * @ctx: the aio context
467 * Return true if the external clients are disabled.
469 static inline bool aio_external_disabled(AioContext *ctx)
471 return atomic_read(&ctx->external_disable_cnt);
475 * aio_node_check:
476 * @ctx: the aio context
477 * @is_external: Whether or not the checked node is an external event source.
479 * Check if the node's is_external flag is okay to be polled by the ctx at this
480 * moment. True means green light.
482 static inline bool aio_node_check(AioContext *ctx, bool is_external)
484 return !is_external || !atomic_read(&ctx->external_disable_cnt);
488 * Return the AioContext whose event loop runs in the current thread.
490 * If called from an IOThread this will be the IOThread's AioContext. If
491 * called from another thread it will be the main loop AioContext.
493 AioContext *qemu_get_current_aio_context(void);
496 * @ctx: the aio context
498 * Return whether we are running in the I/O thread that manages @ctx.
500 static inline bool aio_context_in_iothread(AioContext *ctx)
502 return ctx == qemu_get_current_aio_context();
506 * aio_context_setup:
507 * @ctx: the aio context
509 * Initialize the aio context.
511 void aio_context_setup(AioContext *ctx);
514 * aio_context_set_poll_params:
515 * @ctx: the aio context
516 * @max_ns: how long to busy poll for, in nanoseconds
517 * @grow: polling time growth factor
518 * @shrink: polling time shrink factor
520 * Poll mode can be disabled by setting poll_max_ns to 0.
522 void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
523 int64_t grow, int64_t shrink,
524 Error **errp);
526 #endif