Merge tag 'qemu-macppc-20230206' of https://github.com/mcayland/qemu into staging
[qemu.git] / include / block / aio.h
blob8fba6a3584a2cd0f45a9343fbf96bd5972b07282
1 /*
2 * QEMU aio implementation
4 * Copyright IBM, Corp. 2008
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #ifndef QEMU_AIO_H
15 #define QEMU_AIO_H
17 #ifdef CONFIG_LINUX_IO_URING
18 #include <liburing.h>
19 #endif
20 #include "qemu/coroutine-core.h"
21 #include "qemu/queue.h"
22 #include "qemu/event_notifier.h"
23 #include "qemu/thread.h"
24 #include "qemu/timer.h"
25 #include "block/graph-lock.h"
27 typedef struct BlockAIOCB BlockAIOCB;
28 typedef void BlockCompletionFunc(void *opaque, int ret);
30 typedef struct AIOCBInfo {
31 void (*cancel_async)(BlockAIOCB *acb);
32 AioContext *(*get_aio_context)(BlockAIOCB *acb);
33 size_t aiocb_size;
34 } AIOCBInfo;
36 struct BlockAIOCB {
37 const AIOCBInfo *aiocb_info;
38 BlockDriverState *bs;
39 BlockCompletionFunc *cb;
40 void *opaque;
41 int refcnt;
44 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
45 BlockCompletionFunc *cb, void *opaque);
46 void qemu_aio_unref(void *p);
47 void qemu_aio_ref(void *p);
49 typedef struct AioHandler AioHandler;
50 typedef QLIST_HEAD(, AioHandler) AioHandlerList;
51 typedef void QEMUBHFunc(void *opaque);
52 typedef bool AioPollFn(void *opaque);
53 typedef void IOHandler(void *opaque);
55 struct ThreadPool;
56 struct LinuxAioState;
57 struct LuringState;
59 /* Is polling disabled? */
60 bool aio_poll_disabled(AioContext *ctx);
62 /* Callbacks for file descriptor monitoring implementations */
63 typedef struct {
65 * update:
66 * @ctx: the AioContext
67 * @old_node: the existing handler or NULL if this file descriptor is being
68 * monitored for the first time
69 * @new_node: the new handler or NULL if this file descriptor is being
70 * removed
72 * Add/remove/modify a monitored file descriptor.
74 * Called with ctx->list_lock acquired.
76 void (*update)(AioContext *ctx, AioHandler *old_node, AioHandler *new_node);
79 * wait:
80 * @ctx: the AioContext
81 * @ready_list: list for handlers that become ready
82 * @timeout: maximum duration to wait, in nanoseconds
84 * Wait for file descriptors to become ready and place them on ready_list.
86 * Called with ctx->list_lock incremented but not locked.
88 * Returns: number of ready file descriptors.
90 int (*wait)(AioContext *ctx, AioHandlerList *ready_list, int64_t timeout);
93 * need_wait:
94 * @ctx: the AioContext
96 * Tell aio_poll() when to stop userspace polling early because ->wait()
97 * has fds ready.
99 * File descriptor monitoring implementations that cannot poll fd readiness
100 * from userspace should use aio_poll_disabled() here. This ensures that
101 * file descriptors are not starved by handlers that frequently make
102 * progress via userspace polling.
104 * Returns: true if ->wait() should be called, false otherwise.
106 bool (*need_wait)(AioContext *ctx);
107 } FDMonOps;
110 * Each aio_bh_poll() call carves off a slice of the BH list, so that newly
111 * scheduled BHs are not processed until the next aio_bh_poll() call. All
112 * active aio_bh_poll() calls chain their slices together in a list, so that
113 * nested aio_bh_poll() calls process all scheduled bottom halves.
115 typedef QSLIST_HEAD(, QEMUBH) BHList;
116 typedef struct BHListSlice BHListSlice;
117 struct BHListSlice {
118 BHList bh_list;
119 QSIMPLEQ_ENTRY(BHListSlice) next;
122 typedef QSLIST_HEAD(, AioHandler) AioHandlerSList;
124 struct AioContext {
125 GSource source;
127 /* Used by AioContext users to protect from multi-threaded access. */
128 QemuRecMutex lock;
131 * Keep track of readers and writers of the block layer graph.
132 * This is essential to avoid performing additions and removal
133 * of nodes and edges from block graph while some
134 * other thread is traversing it.
136 BdrvGraphRWlock *bdrv_graph;
138 /* The list of registered AIO handlers. Protected by ctx->list_lock. */
139 AioHandlerList aio_handlers;
141 /* The list of AIO handlers to be deleted. Protected by ctx->list_lock. */
142 AioHandlerList deleted_aio_handlers;
144 /* Used to avoid unnecessary event_notifier_set calls in aio_notify;
145 * only written from the AioContext home thread, or under the BQL in
146 * the case of the main AioContext. However, it is read from any
147 * thread so it is still accessed with atomic primitives.
149 * If this field is 0, everything (file descriptors, bottom halves,
150 * timers) will be re-evaluated before the next blocking poll() or
151 * io_uring wait; therefore, the event_notifier_set call can be
152 * skipped. If it is non-zero, you may need to wake up a concurrent
153 * aio_poll or the glib main event loop, making event_notifier_set
154 * necessary.
156 * Bit 0 is reserved for GSource usage of the AioContext, and is 1
157 * between a call to aio_ctx_prepare and the next call to aio_ctx_check.
158 * Bits 1-31 simply count the number of active calls to aio_poll
159 * that are in the prepare or poll phase.
161 * The GSource and aio_poll must use a different mechanism because
162 * there is no certainty that a call to GSource's prepare callback
163 * (via g_main_context_prepare) is indeed followed by check and
164 * dispatch. It's not clear whether this would be a bug, but let's
165 * play safe and allow it---it will just cause extra calls to
166 * event_notifier_set until the next call to dispatch.
168 * Instead, the aio_poll calls include both the prepare and the
169 * dispatch phase, hence a simple counter is enough for them.
171 uint32_t notify_me;
173 /* A lock to protect between QEMUBH and AioHandler adders and deleter,
174 * and to ensure that no callbacks are removed while we're walking and
175 * dispatching them.
177 QemuLockCnt list_lock;
179 /* Bottom Halves pending aio_bh_poll() processing */
180 BHList bh_list;
182 /* Chained BH list slices for each nested aio_bh_poll() call */
183 QSIMPLEQ_HEAD(, BHListSlice) bh_slice_list;
185 /* Used by aio_notify.
187 * "notified" is used to avoid expensive event_notifier_test_and_clear
188 * calls. When it is clear, the EventNotifier is clear, or one thread
189 * is going to clear "notified" before processing more events. False
190 * positives are possible, i.e. "notified" could be set even though the
191 * EventNotifier is clear.
193 * Note that event_notifier_set *cannot* be optimized the same way. For
194 * more information on the problem that would result, see "#ifdef BUG2"
195 * in the docs/aio_notify_accept.promela formal model.
197 bool notified;
198 EventNotifier notifier;
200 QSLIST_HEAD(, Coroutine) scheduled_coroutines;
201 QEMUBH *co_schedule_bh;
203 int thread_pool_min;
204 int thread_pool_max;
205 /* Thread pool for performing work and receiving completion callbacks.
206 * Has its own locking.
208 struct ThreadPool *thread_pool;
210 #ifdef CONFIG_LINUX_AIO
212 * State for native Linux AIO. Uses aio_context_acquire/release for
213 * locking.
215 struct LinuxAioState *linux_aio;
216 #endif
217 #ifdef CONFIG_LINUX_IO_URING
219 * State for Linux io_uring. Uses aio_context_acquire/release for
220 * locking.
222 struct LuringState *linux_io_uring;
224 /* State for file descriptor monitoring using Linux io_uring */
225 struct io_uring fdmon_io_uring;
226 AioHandlerSList submit_list;
227 #endif
229 /* TimerLists for calling timers - one per clock type. Has its own
230 * locking.
232 QEMUTimerListGroup tlg;
234 int external_disable_cnt;
236 /* Number of AioHandlers without .io_poll() */
237 int poll_disable_cnt;
239 /* Polling mode parameters */
240 int64_t poll_ns; /* current polling time in nanoseconds */
241 int64_t poll_max_ns; /* maximum polling time in nanoseconds */
242 int64_t poll_grow; /* polling time growth factor */
243 int64_t poll_shrink; /* polling time shrink factor */
245 /* AIO engine parameters */
246 int64_t aio_max_batch; /* maximum number of requests in a batch */
249 * List of handlers participating in userspace polling. Protected by
250 * ctx->list_lock. Iterated and modified mostly by the event loop thread
251 * from aio_poll() with ctx->list_lock incremented. aio_set_fd_handler()
252 * only touches the list to delete nodes if ctx->list_lock's count is zero.
254 AioHandlerList poll_aio_handlers;
256 /* Are we in polling mode or monitoring file descriptors? */
257 bool poll_started;
259 /* epoll(7) state used when built with CONFIG_EPOLL */
260 int epollfd;
262 const FDMonOps *fdmon_ops;
266 * aio_context_new: Allocate a new AioContext.
268 * AioContext provide a mini event-loop that can be waited on synchronously.
269 * They also provide bottom halves, a service to execute a piece of code
270 * as soon as possible.
272 AioContext *aio_context_new(Error **errp);
275 * aio_context_ref:
276 * @ctx: The AioContext to operate on.
278 * Add a reference to an AioContext.
280 void aio_context_ref(AioContext *ctx);
283 * aio_context_unref:
284 * @ctx: The AioContext to operate on.
286 * Drop a reference to an AioContext.
288 void aio_context_unref(AioContext *ctx);
290 /* Take ownership of the AioContext. If the AioContext will be shared between
291 * threads, and a thread does not want to be interrupted, it will have to
292 * take ownership around calls to aio_poll(). Otherwise, aio_poll()
293 * automatically takes care of calling aio_context_acquire and
294 * aio_context_release.
296 * Note that this is separate from bdrv_drained_begin/bdrv_drained_end. A
297 * thread still has to call those to avoid being interrupted by the guest.
299 * Bottom halves, timers and callbacks can be created or removed without
300 * acquiring the AioContext.
302 void aio_context_acquire(AioContext *ctx);
304 /* Relinquish ownership of the AioContext. */
305 void aio_context_release(AioContext *ctx);
308 * aio_bh_schedule_oneshot_full: Allocate a new bottom half structure that will
309 * run only once and as soon as possible.
311 * @name: A human-readable identifier for debugging purposes.
313 void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
314 const char *name);
317 * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run
318 * only once and as soon as possible.
320 * A convenience wrapper for aio_bh_schedule_oneshot_full() that uses cb as the
321 * name string.
323 #define aio_bh_schedule_oneshot(ctx, cb, opaque) \
324 aio_bh_schedule_oneshot_full((ctx), (cb), (opaque), (stringify(cb)))
327 * aio_bh_new_full: Allocate a new bottom half structure.
329 * Bottom halves are lightweight callbacks whose invocation is guaranteed
330 * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure
331 * is opaque and must be allocated prior to its use.
333 * @name: A human-readable identifier for debugging purposes.
335 QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
336 const char *name);
339 * aio_bh_new: Allocate a new bottom half structure
341 * A convenience wrapper for aio_bh_new_full() that uses the cb as the name
342 * string.
344 #define aio_bh_new(ctx, cb, opaque) \
345 aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)))
348 * aio_notify: Force processing of pending events.
350 * Similar to signaling a condition variable, aio_notify forces
351 * aio_poll to exit, so that the next call will re-examine pending events.
352 * The caller of aio_notify will usually call aio_poll again very soon,
353 * or go through another iteration of the GLib main loop. Hence, aio_notify
354 * also has the side effect of recalculating the sets of file descriptors
355 * that the main loop waits for.
357 * Calling aio_notify is rarely necessary, because for example scheduling
358 * a bottom half calls it already.
360 void aio_notify(AioContext *ctx);
363 * aio_notify_accept: Acknowledge receiving an aio_notify.
365 * aio_notify() uses an EventNotifier in order to wake up a sleeping
366 * aio_poll() or g_main_context_iteration(). Calls to aio_notify() are
367 * usually rare, but the AioContext has to clear the EventNotifier on
368 * every aio_poll() or g_main_context_iteration() in order to avoid
369 * busy waiting. This event_notifier_test_and_clear() cannot be done
370 * using the usual aio_context_set_event_notifier(), because it must
371 * be done before processing all events (file descriptors, bottom halves,
372 * timers).
374 * aio_notify_accept() is an optimized event_notifier_test_and_clear()
375 * that is specific to an AioContext's notifier; it is used internally
376 * to clear the EventNotifier only if aio_notify() had been called.
378 void aio_notify_accept(AioContext *ctx);
381 * aio_bh_call: Executes callback function of the specified BH.
383 void aio_bh_call(QEMUBH *bh);
386 * aio_bh_poll: Poll bottom halves for an AioContext.
388 * These are internal functions used by the QEMU main loop.
389 * And notice that multiple occurrences of aio_bh_poll cannot
390 * be called concurrently
392 int aio_bh_poll(AioContext *ctx);
395 * qemu_bh_schedule: Schedule a bottom half.
397 * Scheduling a bottom half interrupts the main loop and causes the
398 * execution of the callback that was passed to qemu_bh_new.
400 * Bottom halves that are scheduled from a bottom half handler are instantly
401 * invoked. This can create an infinite loop if a bottom half handler
402 * schedules itself.
404 * @bh: The bottom half to be scheduled.
406 void qemu_bh_schedule(QEMUBH *bh);
409 * qemu_bh_cancel: Cancel execution of a bottom half.
411 * Canceling execution of a bottom half undoes the effect of calls to
412 * qemu_bh_schedule without freeing its resources yet. While cancellation
413 * itself is also wait-free and thread-safe, it can of course race with the
414 * loop that executes bottom halves unless you are holding the iothread
415 * mutex. This makes it mostly useless if you are not holding the mutex.
417 * @bh: The bottom half to be canceled.
419 void qemu_bh_cancel(QEMUBH *bh);
422 *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
424 * Deleting a bottom half frees the memory that was allocated for it by
425 * qemu_bh_new. It also implies canceling the bottom half if it was
426 * scheduled.
427 * This func is async. The bottom half will do the delete action at the finial
428 * end.
430 * @bh: The bottom half to be deleted.
432 void qemu_bh_delete(QEMUBH *bh);
434 /* Return whether there are any pending callbacks from the GSource
435 * attached to the AioContext, before g_poll is invoked.
437 * This is used internally in the implementation of the GSource.
439 bool aio_prepare(AioContext *ctx);
441 /* Return whether there are any pending callbacks from the GSource
442 * attached to the AioContext, after g_poll is invoked.
444 * This is used internally in the implementation of the GSource.
446 bool aio_pending(AioContext *ctx);
448 /* Dispatch any pending callbacks from the GSource attached to the AioContext.
450 * This is used internally in the implementation of the GSource.
452 void aio_dispatch(AioContext *ctx);
454 /* Progress in completing AIO work to occur. This can issue new pending
455 * aio as a result of executing I/O completion or bh callbacks.
457 * Return whether any progress was made by executing AIO or bottom half
458 * handlers. If @blocking == true, this should always be true except
459 * if someone called aio_notify.
461 * If there are no pending bottom halves, but there are pending AIO
462 * operations, it may not be possible to make any progress without
463 * blocking. If @blocking is true, this function will wait until one
464 * or more AIO events have completed, to ensure something has moved
465 * before returning.
467 bool aio_poll(AioContext *ctx, bool blocking);
469 /* Register a file descriptor and associated callbacks. Behaves very similarly
470 * to qemu_set_fd_handler. Unlike qemu_set_fd_handler, these callbacks will
471 * be invoked when using aio_poll().
473 * Code that invokes AIO completion functions should rely on this function
474 * instead of qemu_set_fd_handler[2].
476 void aio_set_fd_handler(AioContext *ctx,
477 int fd,
478 bool is_external,
479 IOHandler *io_read,
480 IOHandler *io_write,
481 AioPollFn *io_poll,
482 IOHandler *io_poll_ready,
483 void *opaque);
485 /* Set polling begin/end callbacks for a file descriptor that has already been
486 * registered with aio_set_fd_handler. Do nothing if the file descriptor is
487 * not registered.
489 void aio_set_fd_poll(AioContext *ctx, int fd,
490 IOHandler *io_poll_begin,
491 IOHandler *io_poll_end);
493 /* Register an event notifier and associated callbacks. Behaves very similarly
494 * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks
495 * will be invoked when using aio_poll().
497 * Code that invokes AIO completion functions should rely on this function
498 * instead of event_notifier_set_handler.
500 void aio_set_event_notifier(AioContext *ctx,
501 EventNotifier *notifier,
502 bool is_external,
503 EventNotifierHandler *io_read,
504 AioPollFn *io_poll,
505 EventNotifierHandler *io_poll_ready);
507 /* Set polling begin/end callbacks for an event notifier that has already been
508 * registered with aio_set_event_notifier. Do nothing if the event notifier is
509 * not registered.
511 void aio_set_event_notifier_poll(AioContext *ctx,
512 EventNotifier *notifier,
513 EventNotifierHandler *io_poll_begin,
514 EventNotifierHandler *io_poll_end);
516 /* Return a GSource that lets the main loop poll the file descriptors attached
517 * to this AioContext.
519 GSource *aio_get_g_source(AioContext *ctx);
521 /* Return the ThreadPool bound to this AioContext */
522 struct ThreadPool *aio_get_thread_pool(AioContext *ctx);
524 /* Setup the LinuxAioState bound to this AioContext */
525 struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp);
527 /* Return the LinuxAioState bound to this AioContext */
528 struct LinuxAioState *aio_get_linux_aio(AioContext *ctx);
530 /* Setup the LuringState bound to this AioContext */
531 struct LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp);
533 /* Return the LuringState bound to this AioContext */
534 struct LuringState *aio_get_linux_io_uring(AioContext *ctx);
536 * aio_timer_new_with_attrs:
537 * @ctx: the aio context
538 * @type: the clock type
539 * @scale: the scale
540 * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values
541 * to assign
542 * @cb: the callback to call on timer expiry
543 * @opaque: the opaque pointer to pass to the callback
545 * Allocate a new timer (with attributes) attached to the context @ctx.
546 * The function is responsible for memory allocation.
548 * The preferred interface is aio_timer_init or aio_timer_init_with_attrs.
549 * Use that unless you really need dynamic memory allocation.
551 * Returns: a pointer to the new timer
553 static inline QEMUTimer *aio_timer_new_with_attrs(AioContext *ctx,
554 QEMUClockType type,
555 int scale, int attributes,
556 QEMUTimerCB *cb, void *opaque)
558 return timer_new_full(&ctx->tlg, type, scale, attributes, cb, opaque);
562 * aio_timer_new:
563 * @ctx: the aio context
564 * @type: the clock type
565 * @scale: the scale
566 * @cb: the callback to call on timer expiry
567 * @opaque: the opaque pointer to pass to the callback
569 * Allocate a new timer attached to the context @ctx.
570 * See aio_timer_new_with_attrs for details.
572 * Returns: a pointer to the new timer
574 static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type,
575 int scale,
576 QEMUTimerCB *cb, void *opaque)
578 return timer_new_full(&ctx->tlg, type, scale, 0, cb, opaque);
582 * aio_timer_init_with_attrs:
583 * @ctx: the aio context
584 * @ts: the timer
585 * @type: the clock type
586 * @scale: the scale
587 * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values
588 * to assign
589 * @cb: the callback to call on timer expiry
590 * @opaque: the opaque pointer to pass to the callback
592 * Initialise a new timer (with attributes) attached to the context @ctx.
593 * The caller is responsible for memory allocation.
595 static inline void aio_timer_init_with_attrs(AioContext *ctx,
596 QEMUTimer *ts, QEMUClockType type,
597 int scale, int attributes,
598 QEMUTimerCB *cb, void *opaque)
600 timer_init_full(ts, &ctx->tlg, type, scale, attributes, cb, opaque);
604 * aio_timer_init:
605 * @ctx: the aio context
606 * @ts: the timer
607 * @type: the clock type
608 * @scale: the scale
609 * @cb: the callback to call on timer expiry
610 * @opaque: the opaque pointer to pass to the callback
612 * Initialise a new timer attached to the context @ctx.
613 * See aio_timer_init_with_attrs for details.
615 static inline void aio_timer_init(AioContext *ctx,
616 QEMUTimer *ts, QEMUClockType type,
617 int scale,
618 QEMUTimerCB *cb, void *opaque)
620 timer_init_full(ts, &ctx->tlg, type, scale, 0, cb, opaque);
624 * aio_compute_timeout:
625 * @ctx: the aio context
627 * Compute the timeout that a blocking aio_poll should use.
629 int64_t aio_compute_timeout(AioContext *ctx);
632 * aio_disable_external:
633 * @ctx: the aio context
635 * Disable the further processing of external clients.
637 static inline void aio_disable_external(AioContext *ctx)
639 qatomic_inc(&ctx->external_disable_cnt);
643 * aio_enable_external:
644 * @ctx: the aio context
646 * Enable the processing of external clients.
648 static inline void aio_enable_external(AioContext *ctx)
650 int old;
652 old = qatomic_fetch_dec(&ctx->external_disable_cnt);
653 assert(old > 0);
654 if (old == 1) {
655 /* Kick event loop so it re-arms file descriptors */
656 aio_notify(ctx);
661 * aio_external_disabled:
662 * @ctx: the aio context
664 * Return true if the external clients are disabled.
666 static inline bool aio_external_disabled(AioContext *ctx)
668 return qatomic_read(&ctx->external_disable_cnt);
672 * aio_node_check:
673 * @ctx: the aio context
674 * @is_external: Whether or not the checked node is an external event source.
676 * Check if the node's is_external flag is okay to be polled by the ctx at this
677 * moment. True means green light.
679 static inline bool aio_node_check(AioContext *ctx, bool is_external)
681 return !is_external || !qatomic_read(&ctx->external_disable_cnt);
685 * aio_co_schedule:
686 * @ctx: the aio context
687 * @co: the coroutine
689 * Start a coroutine on a remote AioContext.
691 * The coroutine must not be entered by anyone else while aio_co_schedule()
692 * is active. In addition the coroutine must have yielded unless ctx
693 * is the context in which the coroutine is running (i.e. the value of
694 * qemu_get_current_aio_context() from the coroutine itself).
696 void aio_co_schedule(AioContext *ctx, Coroutine *co);
699 * aio_co_reschedule_self:
700 * @new_ctx: the new context
702 * Move the currently running coroutine to new_ctx. If the coroutine is already
703 * running in new_ctx, do nothing.
705 void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx);
708 * aio_co_wake:
709 * @co: the coroutine
711 * Restart a coroutine on the AioContext where it was running last, thus
712 * preventing coroutines from jumping from one context to another when they
713 * go to sleep.
715 * aio_co_wake may be executed either in coroutine or non-coroutine
716 * context. The coroutine must not be entered by anyone else while
717 * aio_co_wake() is active.
719 void aio_co_wake(Coroutine *co);
722 * aio_co_enter:
723 * @ctx: the context to run the coroutine
724 * @co: the coroutine to run
726 * Enter a coroutine in the specified AioContext.
728 void aio_co_enter(AioContext *ctx, Coroutine *co);
731 * Return the AioContext whose event loop runs in the current thread.
733 * If called from an IOThread this will be the IOThread's AioContext. If
734 * called from the main thread or with the "big QEMU lock" taken it
735 * will be the main loop AioContext.
737 AioContext *qemu_get_current_aio_context(void);
739 void qemu_set_current_aio_context(AioContext *ctx);
742 * aio_context_setup:
743 * @ctx: the aio context
745 * Initialize the aio context.
747 void aio_context_setup(AioContext *ctx);
750 * aio_context_destroy:
751 * @ctx: the aio context
753 * Destroy the aio context.
755 void aio_context_destroy(AioContext *ctx);
757 /* Used internally, do not call outside AioContext code */
758 void aio_context_use_g_source(AioContext *ctx);
761 * aio_context_set_poll_params:
762 * @ctx: the aio context
763 * @max_ns: how long to busy poll for, in nanoseconds
764 * @grow: polling time growth factor
765 * @shrink: polling time shrink factor
767 * Poll mode can be disabled by setting poll_max_ns to 0.
769 void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
770 int64_t grow, int64_t shrink,
771 Error **errp);
774 * aio_context_set_aio_params:
775 * @ctx: the aio context
776 * @max_batch: maximum number of requests in a batch, 0 means that the
777 * engine will use its default
779 void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
780 Error **errp);
783 * aio_context_set_thread_pool_params:
784 * @ctx: the aio context
785 * @min: min number of threads to have readily available in the thread pool
786 * @min: max number of threads the thread pool can contain
788 void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
789 int64_t max, Error **errp);
790 #endif