2 * Linux native AIO support.
4 * Copyright (C) 2009 IBM, Corp.
5 * Copyright (C) 2009 Red Hat, Inc.
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
11 #include "qemu-common.h"
12 #include "block/aio.h"
13 #include "qemu/queue.h"
14 #include "block/block.h"
15 #include "block/raw-aio.h"
16 #include "qemu/event_notifier.h"
17 #include "qemu/coroutine.h"
18 #include "qapi/error.h"
23 * Queue size (per-device).
25 * XXX: eventually we need to communicate this to the guest and/or make it
26 * tunable by the guest. If we get more outstanding requests at a time
27 * than this we will get EAGAIN from io_submit which is communicated to
28 * the guest as an I/O error.
30 #define MAX_EVENTS 128
40 QSIMPLEQ_ENTRY(qemu_laiocb
) next
;
45 unsigned int in_queue
;
46 unsigned int in_flight
;
48 QSIMPLEQ_HEAD(, qemu_laiocb
) pending
;
51 struct LinuxAioState
{
52 AioContext
*aio_context
;
57 /* io queue for submit at batch. Protected by AioContext lock. */
60 /* I/O completion processing. Only runs in I/O thread. */
61 QEMUBH
*completion_bh
;
66 static void ioq_submit(LinuxAioState
*s
);
68 static inline ssize_t
io_event_ret(struct io_event
*ev
)
70 return (ssize_t
)(((uint64_t)ev
->res2
<< 32) | ev
->res
);
74 * Completes an AIO request.
76 static void qemu_laio_process_completion(struct qemu_laiocb
*laiocb
)
81 if (ret
!= -ECANCELED
) {
82 if (ret
== laiocb
->nbytes
) {
84 } else if (ret
>= 0) {
85 /* Short reads mean EOF, pad with zeros. */
86 if (laiocb
->is_read
) {
87 qemu_iovec_memset(laiocb
->qiov
, ret
, 0,
88 laiocb
->qiov
->size
- ret
);
98 * If the coroutine is already entered it must be in ioq_submit() and
99 * will notice laio->ret has been filled in when it eventually runs
100 * later. Coroutines cannot be entered recursively so avoid doing
103 if (!qemu_coroutine_entered(laiocb
->co
)) {
104 aio_co_wake(laiocb
->co
);
109 * aio_ring buffer which is shared between userspace and kernel.
111 * This copied from linux/fs/aio.c, common header does not exist
112 * but AIO exists for ages so we assume ABI is stable.
115 unsigned id
; /* kernel internal index number */
116 unsigned nr
; /* number of io_events */
117 unsigned head
; /* Written to by userland or by kernel. */
121 unsigned compat_features
;
122 unsigned incompat_features
;
123 unsigned header_length
; /* size of aio_ring */
125 struct io_event io_events
[0];
131 * @events: pointer on events array, output value
133 * Returns the number of completed events and sets a pointer
134 * on events array. This function does not update the internal
135 * ring buffer, only reads head and tail. When @events has been
136 * processed io_getevents_commit() must be called.
138 static inline unsigned int io_getevents_peek(io_context_t ctx
,
139 struct io_event
**events
)
141 struct aio_ring
*ring
= (struct aio_ring
*)ctx
;
142 unsigned int head
= ring
->head
, tail
= ring
->tail
;
145 nr
= tail
>= head
? tail
- head
: ring
->nr
- head
;
146 *events
= ring
->io_events
+ head
;
147 /* To avoid speculative loads of s->events[i] before observing tail.
148 Paired with smp_wmb() inside linux/fs/aio.c: aio_complete(). */
155 * io_getevents_commit:
157 * @nr: the number of events on which head should be advanced
159 * Advances head of a ring buffer.
161 static inline void io_getevents_commit(io_context_t ctx
, unsigned int nr
)
163 struct aio_ring
*ring
= (struct aio_ring
*)ctx
;
166 ring
->head
= (ring
->head
+ nr
) % ring
->nr
;
171 * io_getevents_advance_and_peek:
173 * @events: pointer on events array, output value
174 * @nr: the number of events on which head should be advanced
176 * Advances head of a ring buffer and returns number of elements left.
178 static inline unsigned int
179 io_getevents_advance_and_peek(io_context_t ctx
,
180 struct io_event
**events
,
183 io_getevents_commit(ctx
, nr
);
184 return io_getevents_peek(ctx
, events
);
188 * qemu_laio_process_completions:
191 * Fetches completed I/O requests and invokes their callbacks.
193 * The function is somewhat tricky because it supports nested event loops, for
194 * example when a request callback invokes aio_poll(). In order to do this,
195 * indices are kept in LinuxAioState. Function schedules BH completion so it
196 * can be called again in a nested event loop. When there are no events left
197 * to complete the BH is being canceled.
199 static void qemu_laio_process_completions(LinuxAioState
*s
)
201 struct io_event
*events
;
203 /* Reschedule so nested event loops see currently pending completions */
204 qemu_bh_schedule(s
->completion_bh
);
206 while ((s
->event_max
= io_getevents_advance_and_peek(s
->ctx
, &events
,
208 for (s
->event_idx
= 0; s
->event_idx
< s
->event_max
; ) {
209 struct iocb
*iocb
= events
[s
->event_idx
].obj
;
210 struct qemu_laiocb
*laiocb
=
211 container_of(iocb
, struct qemu_laiocb
, iocb
);
213 laiocb
->ret
= io_event_ret(&events
[s
->event_idx
]);
215 /* Change counters one-by-one because we can be nested. */
218 qemu_laio_process_completion(laiocb
);
222 qemu_bh_cancel(s
->completion_bh
);
224 /* If we are nested we have to notify the level above that we are done
225 * by setting event_max to zero, upper level will then jump out of it's
226 * own `for` loop. If we are the last all counters droped to zero. */
231 static void qemu_laio_process_completions_and_submit(LinuxAioState
*s
)
233 aio_context_acquire(s
->aio_context
);
234 qemu_laio_process_completions(s
);
236 if (!s
->io_q
.plugged
&& !QSIMPLEQ_EMPTY(&s
->io_q
.pending
)) {
239 aio_context_release(s
->aio_context
);
242 static void qemu_laio_completion_bh(void *opaque
)
244 LinuxAioState
*s
= opaque
;
246 qemu_laio_process_completions_and_submit(s
);
249 static void qemu_laio_completion_cb(EventNotifier
*e
)
251 LinuxAioState
*s
= container_of(e
, LinuxAioState
, e
);
253 if (event_notifier_test_and_clear(&s
->e
)) {
254 qemu_laio_process_completions_and_submit(s
);
258 static bool qemu_laio_poll_cb(void *opaque
)
260 EventNotifier
*e
= opaque
;
261 LinuxAioState
*s
= container_of(e
, LinuxAioState
, e
);
262 struct io_event
*events
;
264 if (!io_getevents_peek(s
->ctx
, &events
)) {
268 qemu_laio_process_completions_and_submit(s
);
272 static void ioq_init(LaioQueue
*io_q
)
274 QSIMPLEQ_INIT(&io_q
->pending
);
278 io_q
->blocked
= false;
281 static void ioq_submit(LinuxAioState
*s
)
284 struct qemu_laiocb
*aiocb
;
285 struct iocb
*iocbs
[MAX_EVENTS
];
286 QSIMPLEQ_HEAD(, qemu_laiocb
) completed
;
289 if (s
->io_q
.in_flight
>= MAX_EVENTS
) {
293 QSIMPLEQ_FOREACH(aiocb
, &s
->io_q
.pending
, next
) {
294 iocbs
[len
++] = &aiocb
->iocb
;
295 if (s
->io_q
.in_flight
+ len
>= MAX_EVENTS
) {
300 ret
= io_submit(s
->ctx
, len
, iocbs
);
301 if (ret
== -EAGAIN
) {
305 /* Fail the first request, retry the rest */
306 aiocb
= QSIMPLEQ_FIRST(&s
->io_q
.pending
);
307 QSIMPLEQ_REMOVE_HEAD(&s
->io_q
.pending
, next
);
310 qemu_laio_process_completion(aiocb
);
314 s
->io_q
.in_flight
+= ret
;
315 s
->io_q
.in_queue
-= ret
;
316 aiocb
= container_of(iocbs
[ret
- 1], struct qemu_laiocb
, iocb
);
317 QSIMPLEQ_SPLIT_AFTER(&s
->io_q
.pending
, aiocb
, next
, &completed
);
318 } while (ret
== len
&& !QSIMPLEQ_EMPTY(&s
->io_q
.pending
));
319 s
->io_q
.blocked
= (s
->io_q
.in_queue
> 0);
321 if (s
->io_q
.in_flight
) {
322 /* We can try to complete something just right away if there are
323 * still requests in-flight. */
324 qemu_laio_process_completions(s
);
326 * Even we have completed everything (in_flight == 0), the queue can
327 * have still pended requests (in_queue > 0). We do not attempt to
328 * repeat submission to avoid IO hang. The reason is simple: s->e is
329 * still set and completion callback will be called shortly and all
330 * pended requests will be submitted from there.
335 void laio_io_plug(BlockDriverState
*bs
, LinuxAioState
*s
)
340 void laio_io_unplug(BlockDriverState
*bs
, LinuxAioState
*s
)
342 assert(s
->io_q
.plugged
);
343 if (--s
->io_q
.plugged
== 0 &&
344 !s
->io_q
.blocked
&& !QSIMPLEQ_EMPTY(&s
->io_q
.pending
)) {
349 static int laio_do_submit(int fd
, struct qemu_laiocb
*laiocb
, off_t offset
,
352 LinuxAioState
*s
= laiocb
->ctx
;
353 struct iocb
*iocbs
= &laiocb
->iocb
;
354 QEMUIOVector
*qiov
= laiocb
->qiov
;
358 io_prep_pwritev(iocbs
, fd
, qiov
->iov
, qiov
->niov
, offset
);
361 io_prep_preadv(iocbs
, fd
, qiov
->iov
, qiov
->niov
, offset
);
363 /* Currently Linux kernel does not support other operations */
365 fprintf(stderr
, "%s: invalid AIO request type 0x%x.\n",
369 io_set_eventfd(&laiocb
->iocb
, event_notifier_get_fd(&s
->e
));
371 QSIMPLEQ_INSERT_TAIL(&s
->io_q
.pending
, laiocb
, next
);
373 if (!s
->io_q
.blocked
&&
375 s
->io_q
.in_flight
+ s
->io_q
.in_queue
>= MAX_EVENTS
)) {
382 int coroutine_fn
laio_co_submit(BlockDriverState
*bs
, LinuxAioState
*s
, int fd
,
383 uint64_t offset
, QEMUIOVector
*qiov
, int type
)
386 struct qemu_laiocb laiocb
= {
387 .co
= qemu_coroutine_self(),
388 .nbytes
= qiov
->size
,
391 .is_read
= (type
== QEMU_AIO_READ
),
395 ret
= laio_do_submit(fd
, &laiocb
, offset
, type
);
400 if (laiocb
.ret
== -EINPROGRESS
) {
401 qemu_coroutine_yield();
406 void laio_detach_aio_context(LinuxAioState
*s
, AioContext
*old_context
)
408 aio_set_event_notifier(old_context
, &s
->e
, false, NULL
, NULL
);
409 qemu_bh_delete(s
->completion_bh
);
410 s
->aio_context
= NULL
;
413 void laio_attach_aio_context(LinuxAioState
*s
, AioContext
*new_context
)
415 s
->aio_context
= new_context
;
416 s
->completion_bh
= aio_bh_new(new_context
, qemu_laio_completion_bh
, s
);
417 aio_set_event_notifier(new_context
, &s
->e
, false,
418 qemu_laio_completion_cb
,
422 LinuxAioState
*laio_init(Error
**errp
)
427 s
= g_malloc0(sizeof(*s
));
428 rc
= event_notifier_init(&s
->e
, false);
430 error_setg_errno(errp
, -rc
, "failed to to initialize event notifier");
434 rc
= io_setup(MAX_EVENTS
, &s
->ctx
);
436 error_setg_errno(errp
, -rc
, "failed to create linux AIO context");
445 event_notifier_cleanup(&s
->e
);
451 void laio_cleanup(LinuxAioState
*s
)
453 event_notifier_cleanup(&s
->e
);
455 if (io_destroy(s
->ctx
) != 0) {
456 fprintf(stderr
, "%s: destroy AIO context %p failed\n",