2 * Linux native AIO support.
4 * Copyright (C) 2009 IBM, Corp.
5 * Copyright (C) 2009 Red Hat, Inc.
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
11 #include "block/aio.h"
12 #include "qemu/queue.h"
13 #include "block/block.h"
14 #include "block/raw-aio.h"
15 #include "qemu/event_notifier.h"
16 #include "qemu/coroutine.h"
17 #include "qemu/defer-call.h"
18 #include "qapi/error.h"
19 #include "sysemu/block-backend.h"
21 /* Only used for assertions. */
22 #include "qemu/coroutine_int.h"
27 * Queue size (per-device).
29 * XXX: eventually we need to communicate this to the guest and/or make it
30 * tunable by the guest. If we get more outstanding requests at a time
31 * than this we will get EAGAIN from io_submit which is communicated to
32 * the guest as an I/O error.
34 #define MAX_EVENTS 1024
36 /* Maximum number of requests in a batch. (default value) */
37 #define DEFAULT_MAX_BATCH 32
47 QSIMPLEQ_ENTRY(qemu_laiocb
) next
;
51 unsigned int in_queue
;
52 unsigned int in_flight
;
54 QSIMPLEQ_HEAD(, qemu_laiocb
) pending
;
57 struct LinuxAioState
{
58 AioContext
*aio_context
;
63 /* No locking required, only accessed from AioContext home thread */
65 QEMUBH
*completion_bh
;
70 static void ioq_submit(LinuxAioState
*s
);
72 static inline ssize_t
io_event_ret(struct io_event
*ev
)
74 return (ssize_t
)(((uint64_t)ev
->res2
<< 32) | ev
->res
);
78 * Completes an AIO request.
80 static void qemu_laio_process_completion(struct qemu_laiocb
*laiocb
)
85 if (ret
!= -ECANCELED
) {
86 if (ret
== laiocb
->nbytes
) {
88 } else if (ret
>= 0) {
89 /* Short reads mean EOF, pad with zeros. */
90 if (laiocb
->is_read
) {
91 qemu_iovec_memset(laiocb
->qiov
, ret
, 0,
92 laiocb
->qiov
->size
- ret
);
102 * If the coroutine is already entered it must be in ioq_submit() and
103 * will notice laio->ret has been filled in when it eventually runs
104 * later. Coroutines cannot be entered recursively so avoid doing
107 assert(laiocb
->co
->ctx
== laiocb
->ctx
->aio_context
);
108 if (!qemu_coroutine_entered(laiocb
->co
)) {
109 aio_co_wake(laiocb
->co
);
114 * aio_ring buffer which is shared between userspace and kernel.
116 * This copied from linux/fs/aio.c, common header does not exist
117 * but AIO exists for ages so we assume ABI is stable.
120 unsigned id
; /* kernel internal index number */
121 unsigned nr
; /* number of io_events */
122 unsigned head
; /* Written to by userland or by kernel. */
126 unsigned compat_features
;
127 unsigned incompat_features
;
128 unsigned header_length
; /* size of aio_ring */
130 struct io_event io_events
[];
136 * @events: pointer on events array, output value
138 * Returns the number of completed events and sets a pointer
139 * on events array. This function does not update the internal
140 * ring buffer, only reads head and tail. When @events has been
141 * processed io_getevents_commit() must be called.
143 static inline unsigned int io_getevents_peek(io_context_t ctx
,
144 struct io_event
**events
)
146 struct aio_ring
*ring
= (struct aio_ring
*)ctx
;
147 unsigned int head
= ring
->head
, tail
= ring
->tail
;
150 nr
= tail
>= head
? tail
- head
: ring
->nr
- head
;
151 *events
= ring
->io_events
+ head
;
152 /* To avoid speculative loads of s->events[i] before observing tail.
153 Paired with smp_wmb() inside linux/fs/aio.c: aio_complete(). */
160 * io_getevents_commit:
162 * @nr: the number of events on which head should be advanced
164 * Advances head of a ring buffer.
166 static inline void io_getevents_commit(io_context_t ctx
, unsigned int nr
)
168 struct aio_ring
*ring
= (struct aio_ring
*)ctx
;
171 ring
->head
= (ring
->head
+ nr
) % ring
->nr
;
176 * io_getevents_advance_and_peek:
178 * @events: pointer on events array, output value
179 * @nr: the number of events on which head should be advanced
181 * Advances head of a ring buffer and returns number of elements left.
183 static inline unsigned int
184 io_getevents_advance_and_peek(io_context_t ctx
,
185 struct io_event
**events
,
188 io_getevents_commit(ctx
, nr
);
189 return io_getevents_peek(ctx
, events
);
193 * qemu_laio_process_completions:
196 * Fetches completed I/O requests and invokes their callbacks.
198 * The function is somewhat tricky because it supports nested event loops, for
199 * example when a request callback invokes aio_poll(). In order to do this,
200 * indices are kept in LinuxAioState. Function schedules BH completion so it
201 * can be called again in a nested event loop. When there are no events left
202 * to complete the BH is being canceled.
204 static void qemu_laio_process_completions(LinuxAioState
*s
)
206 struct io_event
*events
;
210 /* Reschedule so nested event loops see currently pending completions */
211 qemu_bh_schedule(s
->completion_bh
);
213 while ((s
->event_max
= io_getevents_advance_and_peek(s
->ctx
, &events
,
215 for (s
->event_idx
= 0; s
->event_idx
< s
->event_max
; ) {
216 struct iocb
*iocb
= events
[s
->event_idx
].obj
;
217 struct qemu_laiocb
*laiocb
=
218 container_of(iocb
, struct qemu_laiocb
, iocb
);
220 laiocb
->ret
= io_event_ret(&events
[s
->event_idx
]);
222 /* Change counters one-by-one because we can be nested. */
225 qemu_laio_process_completion(laiocb
);
229 qemu_bh_cancel(s
->completion_bh
);
231 /* If we are nested we have to notify the level above that we are done
232 * by setting event_max to zero, upper level will then jump out of it's
233 * own `for` loop. If we are the last all counters dropped to zero. */
240 static void qemu_laio_process_completions_and_submit(LinuxAioState
*s
)
242 qemu_laio_process_completions(s
);
244 if (!QSIMPLEQ_EMPTY(&s
->io_q
.pending
)) {
249 static void qemu_laio_completion_bh(void *opaque
)
251 LinuxAioState
*s
= opaque
;
253 qemu_laio_process_completions_and_submit(s
);
256 static void qemu_laio_completion_cb(EventNotifier
*e
)
258 LinuxAioState
*s
= container_of(e
, LinuxAioState
, e
);
260 if (event_notifier_test_and_clear(&s
->e
)) {
261 qemu_laio_process_completions_and_submit(s
);
265 static bool qemu_laio_poll_cb(void *opaque
)
267 EventNotifier
*e
= opaque
;
268 LinuxAioState
*s
= container_of(e
, LinuxAioState
, e
);
269 struct io_event
*events
;
271 return io_getevents_peek(s
->ctx
, &events
);
274 static void qemu_laio_poll_ready(EventNotifier
*opaque
)
276 EventNotifier
*e
= opaque
;
277 LinuxAioState
*s
= container_of(e
, LinuxAioState
, e
);
279 qemu_laio_process_completions_and_submit(s
);
282 static void ioq_init(LaioQueue
*io_q
)
284 QSIMPLEQ_INIT(&io_q
->pending
);
287 io_q
->blocked
= false;
290 static void ioq_submit(LinuxAioState
*s
)
293 struct qemu_laiocb
*aiocb
;
294 struct iocb
*iocbs
[MAX_EVENTS
];
295 QSIMPLEQ_HEAD(, qemu_laiocb
) completed
;
298 if (s
->io_q
.in_flight
>= MAX_EVENTS
) {
302 QSIMPLEQ_FOREACH(aiocb
, &s
->io_q
.pending
, next
) {
303 iocbs
[len
++] = &aiocb
->iocb
;
304 if (s
->io_q
.in_flight
+ len
>= MAX_EVENTS
) {
309 ret
= io_submit(s
->ctx
, len
, iocbs
);
310 if (ret
== -EAGAIN
) {
314 /* Fail the first request, retry the rest */
315 aiocb
= QSIMPLEQ_FIRST(&s
->io_q
.pending
);
316 QSIMPLEQ_REMOVE_HEAD(&s
->io_q
.pending
, next
);
319 qemu_laio_process_completion(aiocb
);
323 s
->io_q
.in_flight
+= ret
;
324 s
->io_q
.in_queue
-= ret
;
325 aiocb
= container_of(iocbs
[ret
- 1], struct qemu_laiocb
, iocb
);
326 QSIMPLEQ_SPLIT_AFTER(&s
->io_q
.pending
, aiocb
, next
, &completed
);
327 } while (ret
== len
&& !QSIMPLEQ_EMPTY(&s
->io_q
.pending
));
328 s
->io_q
.blocked
= (s
->io_q
.in_queue
> 0);
330 if (s
->io_q
.in_flight
) {
331 /* We can try to complete something just right away if there are
332 * still requests in-flight. */
333 qemu_laio_process_completions(s
);
335 * Even we have completed everything (in_flight == 0), the queue can
336 * have still pended requests (in_queue > 0). We do not attempt to
337 * repeat submission to avoid IO hang. The reason is simple: s->e is
338 * still set and completion callback will be called shortly and all
339 * pended requests will be submitted from there.
344 static uint64_t laio_max_batch(LinuxAioState
*s
, uint64_t dev_max_batch
)
346 uint64_t max_batch
= s
->aio_context
->aio_max_batch
?: DEFAULT_MAX_BATCH
;
349 * AIO context can be shared between multiple block devices, so
350 * `dev_max_batch` allows reducing the batch size for latency-sensitive
353 max_batch
= MIN_NON_ZERO(dev_max_batch
, max_batch
);
355 /* limit the batch with the number of available events */
356 max_batch
= MIN_NON_ZERO(MAX_EVENTS
- s
->io_q
.in_flight
, max_batch
);
361 static void laio_deferred_fn(void *opaque
)
363 LinuxAioState
*s
= opaque
;
365 if (!s
->io_q
.blocked
&& !QSIMPLEQ_EMPTY(&s
->io_q
.pending
)) {
370 static int laio_do_submit(int fd
, struct qemu_laiocb
*laiocb
, off_t offset
,
371 int type
, uint64_t dev_max_batch
)
373 LinuxAioState
*s
= laiocb
->ctx
;
374 struct iocb
*iocbs
= &laiocb
->iocb
;
375 QEMUIOVector
*qiov
= laiocb
->qiov
;
379 io_prep_pwritev(iocbs
, fd
, qiov
->iov
, qiov
->niov
, offset
);
381 case QEMU_AIO_ZONE_APPEND
:
382 io_prep_pwritev(iocbs
, fd
, qiov
->iov
, qiov
->niov
, offset
);
385 io_prep_preadv(iocbs
, fd
, qiov
->iov
, qiov
->niov
, offset
);
388 io_prep_fdsync(iocbs
, fd
);
390 /* Currently Linux kernel does not support other operations */
392 fprintf(stderr
, "%s: invalid AIO request type 0x%x.\n",
396 io_set_eventfd(&laiocb
->iocb
, event_notifier_get_fd(&s
->e
));
398 QSIMPLEQ_INSERT_TAIL(&s
->io_q
.pending
, laiocb
, next
);
400 if (!s
->io_q
.blocked
) {
401 if (s
->io_q
.in_queue
>= laio_max_batch(s
, dev_max_batch
)) {
404 defer_call(laio_deferred_fn
, s
);
411 int coroutine_fn
laio_co_submit(int fd
, uint64_t offset
, QEMUIOVector
*qiov
,
412 int type
, uint64_t dev_max_batch
)
415 AioContext
*ctx
= qemu_get_current_aio_context();
416 struct qemu_laiocb laiocb
= {
417 .co
= qemu_coroutine_self(),
418 .nbytes
= qiov
? qiov
->size
: 0,
419 .ctx
= aio_get_linux_aio(ctx
),
421 .is_read
= (type
== QEMU_AIO_READ
),
425 ret
= laio_do_submit(fd
, &laiocb
, offset
, type
, dev_max_batch
);
430 if (laiocb
.ret
== -EINPROGRESS
) {
431 qemu_coroutine_yield();
436 void laio_detach_aio_context(LinuxAioState
*s
, AioContext
*old_context
)
438 aio_set_event_notifier(old_context
, &s
->e
, NULL
, NULL
, NULL
);
439 qemu_bh_delete(s
->completion_bh
);
440 s
->aio_context
= NULL
;
443 void laio_attach_aio_context(LinuxAioState
*s
, AioContext
*new_context
)
445 s
->aio_context
= new_context
;
446 s
->completion_bh
= aio_bh_new(new_context
, qemu_laio_completion_bh
, s
);
447 aio_set_event_notifier(new_context
, &s
->e
,
448 qemu_laio_completion_cb
,
450 qemu_laio_poll_ready
);
453 LinuxAioState
*laio_init(Error
**errp
)
458 s
= g_malloc0(sizeof(*s
));
459 rc
= event_notifier_init(&s
->e
, false);
461 error_setg_errno(errp
, -rc
, "failed to initialize event notifier");
465 rc
= io_setup(MAX_EVENTS
, &s
->ctx
);
467 error_setg_errno(errp
, -rc
, "failed to create linux AIO context");
476 event_notifier_cleanup(&s
->e
);
482 void laio_cleanup(LinuxAioState
*s
)
484 event_notifier_cleanup(&s
->e
);
486 if (io_destroy(s
->ctx
) != 0) {
487 fprintf(stderr
, "%s: destroy AIO context %p failed\n",
493 bool laio_has_fdsync(int fd
)
496 struct iocb
*cbs
[] = {&cb
, NULL
};
498 io_context_t ctx
= 0;
501 /* check if host kernel supports IO_CMD_FDSYNC */
502 io_prep_fdsync(&cb
, fd
);
503 int ret
= io_submit(ctx
, 1, cbs
);
506 return (ret
== -EINVAL
) ? false : true;