2 * Linux native AIO support.
4 * Copyright (C) 2009 IBM, Corp.
5 * Copyright (C) 2009 Red Hat, Inc.
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
11 #include "qemu-common.h"
12 #include "block/aio.h"
13 #include "qemu/queue.h"
14 #include "block/block.h"
15 #include "block/raw-aio.h"
16 #include "qemu/event_notifier.h"
17 #include "qemu/coroutine.h"
22 * Queue size (per-device).
24 * XXX: eventually we need to communicate this to the guest and/or make it
25 * tunable by the guest. If we get more outstanding requests at a time
26 * than this we will get EAGAIN from io_submit which is communicated to
27 * the guest as an I/O error.
29 #define MAX_EVENTS 128
40 QSIMPLEQ_ENTRY(qemu_laiocb
) next
;
45 unsigned int in_queue
;
46 unsigned int in_flight
;
48 QSIMPLEQ_HEAD(, qemu_laiocb
) pending
;
51 struct LinuxAioState
{
52 AioContext
*aio_context
;
57 /* io queue for submit at batch */
60 /* I/O completion processing */
61 QEMUBH
*completion_bh
;
62 struct io_event events
[MAX_EVENTS
];
67 static void ioq_submit(LinuxAioState
*s
);
69 static inline ssize_t
io_event_ret(struct io_event
*ev
)
71 return (ssize_t
)(((uint64_t)ev
->res2
<< 32) | ev
->res
);
75 * Completes an AIO request (calls the callback and frees the ACB).
77 static void qemu_laio_process_completion(struct qemu_laiocb
*laiocb
)
82 if (ret
!= -ECANCELED
) {
83 if (ret
== laiocb
->nbytes
) {
85 } else if (ret
>= 0) {
86 /* Short reads mean EOF, pad with zeros. */
87 if (laiocb
->is_read
) {
88 qemu_iovec_memset(laiocb
->qiov
, ret
, 0,
89 laiocb
->qiov
->size
- ret
);
98 qemu_coroutine_enter(laiocb
->co
);
100 laiocb
->common
.cb(laiocb
->common
.opaque
, ret
);
101 qemu_aio_unref(laiocb
);
105 /* The completion BH fetches completed I/O requests and invokes their
108 * The function is somewhat tricky because it supports nested event loops, for
109 * example when a request callback invokes aio_poll(). In order to do this,
110 * the completion events array and index are kept in LinuxAioState. The BH
111 * reschedules itself as long as there are completions pending so it will
112 * either be called again in a nested event loop or will be called after all
113 * events have been completed. When there are no events left to complete, the
114 * BH returns without rescheduling.
116 static void qemu_laio_completion_bh(void *opaque
)
118 LinuxAioState
*s
= opaque
;
120 /* Fetch more completion events when empty */
121 if (s
->event_idx
== s
->event_max
) {
123 struct timespec ts
= { 0 };
124 s
->event_max
= io_getevents(s
->ctx
, MAX_EVENTS
, MAX_EVENTS
,
126 } while (s
->event_max
== -EINTR
);
129 if (s
->event_max
<= 0) {
131 return; /* no more events */
133 s
->io_q
.in_flight
-= s
->event_max
;
136 /* Reschedule so nested event loops see currently pending completions */
137 qemu_bh_schedule(s
->completion_bh
);
139 /* Process completion events */
140 while (s
->event_idx
< s
->event_max
) {
141 struct iocb
*iocb
= s
->events
[s
->event_idx
].obj
;
142 struct qemu_laiocb
*laiocb
=
143 container_of(iocb
, struct qemu_laiocb
, iocb
);
145 laiocb
->ret
= io_event_ret(&s
->events
[s
->event_idx
]);
148 qemu_laio_process_completion(laiocb
);
151 if (!s
->io_q
.plugged
&& !QSIMPLEQ_EMPTY(&s
->io_q
.pending
)) {
155 qemu_bh_cancel(s
->completion_bh
);
158 static void qemu_laio_completion_cb(EventNotifier
*e
)
160 LinuxAioState
*s
= container_of(e
, LinuxAioState
, e
);
162 if (event_notifier_test_and_clear(&s
->e
)) {
163 qemu_laio_completion_bh(s
);
167 static void laio_cancel(BlockAIOCB
*blockacb
)
169 struct qemu_laiocb
*laiocb
= (struct qemu_laiocb
*)blockacb
;
170 struct io_event event
;
173 if (laiocb
->ret
!= -EINPROGRESS
) {
176 ret
= io_cancel(laiocb
->ctx
->ctx
, &laiocb
->iocb
, &event
);
177 laiocb
->ret
= -ECANCELED
;
179 /* iocb is not cancelled, cb will be called by the event loop later */
183 laiocb
->common
.cb(laiocb
->common
.opaque
, laiocb
->ret
);
186 static const AIOCBInfo laio_aiocb_info
= {
187 .aiocb_size
= sizeof(struct qemu_laiocb
),
188 .cancel_async
= laio_cancel
,
191 static void ioq_init(LaioQueue
*io_q
)
193 QSIMPLEQ_INIT(&io_q
->pending
);
197 io_q
->blocked
= false;
200 static void ioq_submit(LinuxAioState
*s
)
203 struct qemu_laiocb
*aiocb
;
204 struct iocb
*iocbs
[MAX_EVENTS
];
205 QSIMPLEQ_HEAD(, qemu_laiocb
) completed
;
208 if (s
->io_q
.in_flight
>= MAX_EVENTS
) {
212 QSIMPLEQ_FOREACH(aiocb
, &s
->io_q
.pending
, next
) {
213 iocbs
[len
++] = &aiocb
->iocb
;
214 if (s
->io_q
.in_flight
+ len
>= MAX_EVENTS
) {
219 ret
= io_submit(s
->ctx
, len
, iocbs
);
220 if (ret
== -EAGAIN
) {
227 s
->io_q
.in_flight
+= ret
;
228 s
->io_q
.in_queue
-= ret
;
229 aiocb
= container_of(iocbs
[ret
- 1], struct qemu_laiocb
, iocb
);
230 QSIMPLEQ_SPLIT_AFTER(&s
->io_q
.pending
, aiocb
, next
, &completed
);
231 } while (ret
== len
&& !QSIMPLEQ_EMPTY(&s
->io_q
.pending
));
232 s
->io_q
.blocked
= (s
->io_q
.in_queue
> 0);
235 void laio_io_plug(BlockDriverState
*bs
, LinuxAioState
*s
)
240 void laio_io_unplug(BlockDriverState
*bs
, LinuxAioState
*s
)
242 assert(s
->io_q
.plugged
);
243 if (--s
->io_q
.plugged
== 0 &&
244 !s
->io_q
.blocked
&& !QSIMPLEQ_EMPTY(&s
->io_q
.pending
)) {
249 static int laio_do_submit(int fd
, struct qemu_laiocb
*laiocb
, off_t offset
,
252 LinuxAioState
*s
= laiocb
->ctx
;
253 struct iocb
*iocbs
= &laiocb
->iocb
;
254 QEMUIOVector
*qiov
= laiocb
->qiov
;
258 io_prep_pwritev(iocbs
, fd
, qiov
->iov
, qiov
->niov
, offset
);
261 io_prep_preadv(iocbs
, fd
, qiov
->iov
, qiov
->niov
, offset
);
263 /* Currently Linux kernel does not support other operations */
265 fprintf(stderr
, "%s: invalid AIO request type 0x%x.\n",
269 io_set_eventfd(&laiocb
->iocb
, event_notifier_get_fd(&s
->e
));
271 QSIMPLEQ_INSERT_TAIL(&s
->io_q
.pending
, laiocb
, next
);
273 if (!s
->io_q
.blocked
&&
275 s
->io_q
.in_flight
+ s
->io_q
.in_queue
>= MAX_EVENTS
)) {
282 int coroutine_fn
laio_co_submit(BlockDriverState
*bs
, LinuxAioState
*s
, int fd
,
283 uint64_t offset
, QEMUIOVector
*qiov
, int type
)
286 struct qemu_laiocb laiocb
= {
287 .co
= qemu_coroutine_self(),
288 .nbytes
= qiov
->size
,
290 .is_read
= (type
== QEMU_AIO_READ
),
294 ret
= laio_do_submit(fd
, &laiocb
, offset
, type
);
299 qemu_coroutine_yield();
303 BlockAIOCB
*laio_submit(BlockDriverState
*bs
, LinuxAioState
*s
, int fd
,
304 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
305 BlockCompletionFunc
*cb
, void *opaque
, int type
)
307 struct qemu_laiocb
*laiocb
;
308 off_t offset
= sector_num
* BDRV_SECTOR_SIZE
;
311 laiocb
= qemu_aio_get(&laio_aiocb_info
, bs
, cb
, opaque
);
312 laiocb
->nbytes
= nb_sectors
* BDRV_SECTOR_SIZE
;
314 laiocb
->ret
= -EINPROGRESS
;
315 laiocb
->is_read
= (type
== QEMU_AIO_READ
);
318 ret
= laio_do_submit(fd
, laiocb
, offset
, type
);
320 qemu_aio_unref(laiocb
);
324 return &laiocb
->common
;
327 void laio_detach_aio_context(LinuxAioState
*s
, AioContext
*old_context
)
329 aio_set_event_notifier(old_context
, &s
->e
, false, NULL
);
330 qemu_bh_delete(s
->completion_bh
);
333 void laio_attach_aio_context(LinuxAioState
*s
, AioContext
*new_context
)
335 s
->aio_context
= new_context
;
336 s
->completion_bh
= aio_bh_new(new_context
, qemu_laio_completion_bh
, s
);
337 aio_set_event_notifier(new_context
, &s
->e
, false,
338 qemu_laio_completion_cb
);
341 LinuxAioState
*laio_init(void)
345 s
= g_malloc0(sizeof(*s
));
346 if (event_notifier_init(&s
->e
, false) < 0) {
350 if (io_setup(MAX_EVENTS
, &s
->ctx
) != 0) {
359 event_notifier_cleanup(&s
->e
);
365 void laio_cleanup(LinuxAioState
*s
)
367 event_notifier_cleanup(&s
->e
);
369 if (io_destroy(s
->ctx
) != 0) {
370 fprintf(stderr
, "%s: destroy AIO context %p failed\n",