2 * Linux native AIO support.
4 * Copyright (C) 2009 IBM, Corp.
5 * Copyright (C) 2009 Red Hat, Inc.
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
11 #include "qemu-common.h"
12 #include "block/aio.h"
13 #include "qemu/queue.h"
14 #include "block/block.h"
15 #include "block/raw-aio.h"
16 #include "qemu/event_notifier.h"
17 #include "qemu/coroutine.h"
22 * Queue size (per-device).
24 * XXX: eventually we need to communicate this to the guest and/or make it
25 * tunable by the guest. If we get more outstanding requests at a time
26 * than this we will get EAGAIN from io_submit which is communicated to
27 * the guest as an I/O error.
29 #define MAX_EVENTS 128
40 QSIMPLEQ_ENTRY(qemu_laiocb
) next
;
45 unsigned int in_queue
;
46 unsigned int in_flight
;
48 QSIMPLEQ_HEAD(, qemu_laiocb
) pending
;
51 struct LinuxAioState
{
52 AioContext
*aio_context
;
57 /* io queue for submit at batch */
60 /* I/O completion processing */
61 QEMUBH
*completion_bh
;
66 static void ioq_submit(LinuxAioState
*s
);
68 static inline ssize_t
io_event_ret(struct io_event
*ev
)
70 return (ssize_t
)(((uint64_t)ev
->res2
<< 32) | ev
->res
);
74 * Completes an AIO request (calls the callback and frees the ACB).
76 static void qemu_laio_process_completion(struct qemu_laiocb
*laiocb
)
81 if (ret
!= -ECANCELED
) {
82 if (ret
== laiocb
->nbytes
) {
84 } else if (ret
>= 0) {
85 /* Short reads mean EOF, pad with zeros. */
86 if (laiocb
->is_read
) {
87 qemu_iovec_memset(laiocb
->qiov
, ret
, 0,
88 laiocb
->qiov
->size
- ret
);
97 /* Jump and continue completion for foreign requests, don't do
98 * anything for current request, it will be completed shortly. */
99 if (laiocb
->co
!= qemu_coroutine_self()) {
100 qemu_coroutine_enter(laiocb
->co
);
103 laiocb
->common
.cb(laiocb
->common
.opaque
, ret
);
104 qemu_aio_unref(laiocb
);
109 * aio_ring buffer which is shared between userspace and kernel.
111 * This copied from linux/fs/aio.c, common header does not exist
112 * but AIO exists for ages so we assume ABI is stable.
115 unsigned id
; /* kernel internal index number */
116 unsigned nr
; /* number of io_events */
117 unsigned head
; /* Written to by userland or by kernel. */
121 unsigned compat_features
;
122 unsigned incompat_features
;
123 unsigned header_length
; /* size of aio_ring */
125 struct io_event io_events
[0];
131 * @events: pointer on events array, output value
133 * Returns the number of completed events and sets a pointer
134 * on events array. This function does not update the internal
135 * ring buffer, only reads head and tail. When @events has been
136 * processed io_getevents_commit() must be called.
138 static inline unsigned int io_getevents_peek(io_context_t ctx
,
139 struct io_event
**events
)
141 struct aio_ring
*ring
= (struct aio_ring
*)ctx
;
142 unsigned int head
= ring
->head
, tail
= ring
->tail
;
145 nr
= tail
>= head
? tail
- head
: ring
->nr
- head
;
146 *events
= ring
->io_events
+ head
;
147 /* To avoid speculative loads of s->events[i] before observing tail.
148 Paired with smp_wmb() inside linux/fs/aio.c: aio_complete(). */
155 * io_getevents_commit:
157 * @nr: the number of events on which head should be advanced
159 * Advances head of a ring buffer.
161 static inline void io_getevents_commit(io_context_t ctx
, unsigned int nr
)
163 struct aio_ring
*ring
= (struct aio_ring
*)ctx
;
166 ring
->head
= (ring
->head
+ nr
) % ring
->nr
;
171 * io_getevents_advance_and_peek:
173 * @events: pointer on events array, output value
174 * @nr: the number of events on which head should be advanced
176 * Advances head of a ring buffer and returns number of elements left.
178 static inline unsigned int
179 io_getevents_advance_and_peek(io_context_t ctx
,
180 struct io_event
**events
,
183 io_getevents_commit(ctx
, nr
);
184 return io_getevents_peek(ctx
, events
);
188 * qemu_laio_process_completions:
191 * Fetches completed I/O requests and invokes their callbacks.
193 * The function is somewhat tricky because it supports nested event loops, for
194 * example when a request callback invokes aio_poll(). In order to do this,
195 * indices are kept in LinuxAioState. Function schedules BH completion so it
196 * can be called again in a nested event loop. When there are no events left
197 * to complete the BH is being canceled.
199 static void qemu_laio_process_completions(LinuxAioState
*s
)
201 struct io_event
*events
;
203 /* Reschedule so nested event loops see currently pending completions */
204 qemu_bh_schedule(s
->completion_bh
);
206 while ((s
->event_max
= io_getevents_advance_and_peek(s
->ctx
, &events
,
208 for (s
->event_idx
= 0; s
->event_idx
< s
->event_max
; ) {
209 struct iocb
*iocb
= events
[s
->event_idx
].obj
;
210 struct qemu_laiocb
*laiocb
=
211 container_of(iocb
, struct qemu_laiocb
, iocb
);
213 laiocb
->ret
= io_event_ret(&events
[s
->event_idx
]);
215 /* Change counters one-by-one because we can be nested. */
218 qemu_laio_process_completion(laiocb
);
222 qemu_bh_cancel(s
->completion_bh
);
224 /* If we are nested we have to notify the level above that we are done
225 * by setting event_max to zero, upper level will then jump out of it's
226 * own `for` loop. If we are the last all counters droped to zero. */
231 static void qemu_laio_process_completions_and_submit(LinuxAioState
*s
)
233 qemu_laio_process_completions(s
);
234 if (!s
->io_q
.plugged
&& !QSIMPLEQ_EMPTY(&s
->io_q
.pending
)) {
239 static void qemu_laio_completion_bh(void *opaque
)
241 LinuxAioState
*s
= opaque
;
243 qemu_laio_process_completions_and_submit(s
);
246 static void qemu_laio_completion_cb(EventNotifier
*e
)
248 LinuxAioState
*s
= container_of(e
, LinuxAioState
, e
);
250 if (event_notifier_test_and_clear(&s
->e
)) {
251 qemu_laio_process_completions_and_submit(s
);
255 static void laio_cancel(BlockAIOCB
*blockacb
)
257 struct qemu_laiocb
*laiocb
= (struct qemu_laiocb
*)blockacb
;
258 struct io_event event
;
261 if (laiocb
->ret
!= -EINPROGRESS
) {
264 ret
= io_cancel(laiocb
->ctx
->ctx
, &laiocb
->iocb
, &event
);
265 laiocb
->ret
= -ECANCELED
;
267 /* iocb is not cancelled, cb will be called by the event loop later */
271 laiocb
->common
.cb(laiocb
->common
.opaque
, laiocb
->ret
);
274 static const AIOCBInfo laio_aiocb_info
= {
275 .aiocb_size
= sizeof(struct qemu_laiocb
),
276 .cancel_async
= laio_cancel
,
279 static void ioq_init(LaioQueue
*io_q
)
281 QSIMPLEQ_INIT(&io_q
->pending
);
285 io_q
->blocked
= false;
288 static void ioq_submit(LinuxAioState
*s
)
291 struct qemu_laiocb
*aiocb
;
292 struct iocb
*iocbs
[MAX_EVENTS
];
293 QSIMPLEQ_HEAD(, qemu_laiocb
) completed
;
296 if (s
->io_q
.in_flight
>= MAX_EVENTS
) {
300 QSIMPLEQ_FOREACH(aiocb
, &s
->io_q
.pending
, next
) {
301 iocbs
[len
++] = &aiocb
->iocb
;
302 if (s
->io_q
.in_flight
+ len
>= MAX_EVENTS
) {
307 ret
= io_submit(s
->ctx
, len
, iocbs
);
308 if (ret
== -EAGAIN
) {
312 /* Fail the first request, retry the rest */
313 aiocb
= QSIMPLEQ_FIRST(&s
->io_q
.pending
);
314 QSIMPLEQ_REMOVE_HEAD(&s
->io_q
.pending
, next
);
317 qemu_laio_process_completion(aiocb
);
321 s
->io_q
.in_flight
+= ret
;
322 s
->io_q
.in_queue
-= ret
;
323 aiocb
= container_of(iocbs
[ret
- 1], struct qemu_laiocb
, iocb
);
324 QSIMPLEQ_SPLIT_AFTER(&s
->io_q
.pending
, aiocb
, next
, &completed
);
325 } while (ret
== len
&& !QSIMPLEQ_EMPTY(&s
->io_q
.pending
));
326 s
->io_q
.blocked
= (s
->io_q
.in_queue
> 0);
328 if (s
->io_q
.in_flight
) {
329 /* We can try to complete something just right away if there are
330 * still requests in-flight. */
331 qemu_laio_process_completions(s
);
333 * Even we have completed everything (in_flight == 0), the queue can
334 * have still pended requests (in_queue > 0). We do not attempt to
335 * repeat submission to avoid IO hang. The reason is simple: s->e is
336 * still set and completion callback will be called shortly and all
337 * pended requests will be submitted from there.
342 void laio_io_plug(BlockDriverState
*bs
, LinuxAioState
*s
)
347 void laio_io_unplug(BlockDriverState
*bs
, LinuxAioState
*s
)
349 assert(s
->io_q
.plugged
);
350 if (--s
->io_q
.plugged
== 0 &&
351 !s
->io_q
.blocked
&& !QSIMPLEQ_EMPTY(&s
->io_q
.pending
)) {
356 static int laio_do_submit(int fd
, struct qemu_laiocb
*laiocb
, off_t offset
,
359 LinuxAioState
*s
= laiocb
->ctx
;
360 struct iocb
*iocbs
= &laiocb
->iocb
;
361 QEMUIOVector
*qiov
= laiocb
->qiov
;
365 io_prep_pwritev(iocbs
, fd
, qiov
->iov
, qiov
->niov
, offset
);
368 io_prep_preadv(iocbs
, fd
, qiov
->iov
, qiov
->niov
, offset
);
370 /* Currently Linux kernel does not support other operations */
372 fprintf(stderr
, "%s: invalid AIO request type 0x%x.\n",
376 io_set_eventfd(&laiocb
->iocb
, event_notifier_get_fd(&s
->e
));
378 QSIMPLEQ_INSERT_TAIL(&s
->io_q
.pending
, laiocb
, next
);
380 if (!s
->io_q
.blocked
&&
382 s
->io_q
.in_flight
+ s
->io_q
.in_queue
>= MAX_EVENTS
)) {
389 int coroutine_fn
laio_co_submit(BlockDriverState
*bs
, LinuxAioState
*s
, int fd
,
390 uint64_t offset
, QEMUIOVector
*qiov
, int type
)
393 struct qemu_laiocb laiocb
= {
394 .co
= qemu_coroutine_self(),
395 .nbytes
= qiov
->size
,
398 .is_read
= (type
== QEMU_AIO_READ
),
402 ret
= laio_do_submit(fd
, &laiocb
, offset
, type
);
407 if (laiocb
.ret
== -EINPROGRESS
) {
408 qemu_coroutine_yield();
413 BlockAIOCB
*laio_submit(BlockDriverState
*bs
, LinuxAioState
*s
, int fd
,
414 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
415 BlockCompletionFunc
*cb
, void *opaque
, int type
)
417 struct qemu_laiocb
*laiocb
;
418 off_t offset
= sector_num
* BDRV_SECTOR_SIZE
;
421 laiocb
= qemu_aio_get(&laio_aiocb_info
, bs
, cb
, opaque
);
422 laiocb
->nbytes
= nb_sectors
* BDRV_SECTOR_SIZE
;
424 laiocb
->ret
= -EINPROGRESS
;
425 laiocb
->is_read
= (type
== QEMU_AIO_READ
);
428 ret
= laio_do_submit(fd
, laiocb
, offset
, type
);
430 qemu_aio_unref(laiocb
);
434 return &laiocb
->common
;
437 void laio_detach_aio_context(LinuxAioState
*s
, AioContext
*old_context
)
439 aio_set_event_notifier(old_context
, &s
->e
, false, NULL
);
440 qemu_bh_delete(s
->completion_bh
);
443 void laio_attach_aio_context(LinuxAioState
*s
, AioContext
*new_context
)
445 s
->aio_context
= new_context
;
446 s
->completion_bh
= aio_bh_new(new_context
, qemu_laio_completion_bh
, s
);
447 aio_set_event_notifier(new_context
, &s
->e
, false,
448 qemu_laio_completion_cb
);
451 LinuxAioState
*laio_init(void)
455 s
= g_malloc0(sizeof(*s
));
456 if (event_notifier_init(&s
->e
, false) < 0) {
460 if (io_setup(MAX_EVENTS
, &s
->ctx
) != 0) {
469 event_notifier_cleanup(&s
->e
);
475 void laio_cleanup(LinuxAioState
*s
)
477 event_notifier_cleanup(&s
->e
);
479 if (io_destroy(s
->ctx
) != 0) {
480 fprintf(stderr
, "%s: destroy AIO context %p failed\n",