2 * Linux native AIO support.
4 * Copyright (C) 2009 IBM, Corp.
5 * Copyright (C) 2009 Red Hat, Inc.
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
10 #include "qemu-common.h"
11 #include "block/aio.h"
12 #include "qemu/queue.h"
13 #include "block/raw-aio.h"
14 #include "qemu/event_notifier.h"
19 * Queue size (per-device).
21 * XXX: eventually we need to communicate this to the guest and/or make it
22 * tunable by the guest. If we get more outstanding requests at a time
23 * than this we will get EAGAIN from io_submit which is communicated to
24 * the guest as an I/O error.
26 #define MAX_EVENTS 128
28 #define MAX_QUEUED_IO 128
32 struct qemu_laio_state
*ctx
;
38 QSIMPLEQ_ENTRY(qemu_laiocb
) next
;
44 QSIMPLEQ_HEAD(, qemu_laiocb
) pending
;
47 struct qemu_laio_state
{
51 /* io queue for submit at batch */
54 /* I/O completion processing */
55 QEMUBH
*completion_bh
;
56 struct io_event events
[MAX_EVENTS
];
61 static int ioq_submit(struct qemu_laio_state
*s
);
63 static inline ssize_t
io_event_ret(struct io_event
*ev
)
65 return (ssize_t
)(((uint64_t)ev
->res2
<< 32) | ev
->res
);
69 * Completes an AIO request (calls the callback and frees the ACB).
71 static void qemu_laio_process_completion(struct qemu_laio_state
*s
,
72 struct qemu_laiocb
*laiocb
)
77 if (ret
!= -ECANCELED
) {
78 if (ret
== laiocb
->nbytes
) {
80 } else if (ret
>= 0) {
81 /* Short reads mean EOF, pad with zeros. */
82 if (laiocb
->is_read
) {
83 qemu_iovec_memset(laiocb
->qiov
, ret
, 0,
84 laiocb
->qiov
->size
- ret
);
90 laiocb
->common
.cb(laiocb
->common
.opaque
, ret
);
92 qemu_aio_unref(laiocb
);
95 /* The completion BH fetches completed I/O requests and invokes their
98 * The function is somewhat tricky because it supports nested event loops, for
99 * example when a request callback invokes aio_poll(). In order to do this,
100 * the completion events array and index are kept in qemu_laio_state. The BH
101 * reschedules itself as long as there are completions pending so it will
102 * either be called again in a nested event loop or will be called after all
103 * events have been completed. When there are no events left to complete, the
104 * BH returns without rescheduling.
106 static void qemu_laio_completion_bh(void *opaque
)
108 struct qemu_laio_state
*s
= opaque
;
110 /* Fetch more completion events when empty */
111 if (s
->event_idx
== s
->event_max
) {
113 struct timespec ts
= { 0 };
114 s
->event_max
= io_getevents(s
->ctx
, MAX_EVENTS
, MAX_EVENTS
,
116 } while (s
->event_max
== -EINTR
);
119 if (s
->event_max
<= 0) {
121 return; /* no more events */
125 /* Reschedule so nested event loops see currently pending completions */
126 qemu_bh_schedule(s
->completion_bh
);
128 /* Process completion events */
129 while (s
->event_idx
< s
->event_max
) {
130 struct iocb
*iocb
= s
->events
[s
->event_idx
].obj
;
131 struct qemu_laiocb
*laiocb
=
132 container_of(iocb
, struct qemu_laiocb
, iocb
);
134 laiocb
->ret
= io_event_ret(&s
->events
[s
->event_idx
]);
137 qemu_laio_process_completion(s
, laiocb
);
140 if (!s
->io_q
.plugged
&& !QSIMPLEQ_EMPTY(&s
->io_q
.pending
)) {
145 static void qemu_laio_completion_cb(EventNotifier
*e
)
147 struct qemu_laio_state
*s
= container_of(e
, struct qemu_laio_state
, e
);
149 if (event_notifier_test_and_clear(&s
->e
)) {
150 qemu_bh_schedule(s
->completion_bh
);
154 static void laio_cancel(BlockAIOCB
*blockacb
)
156 struct qemu_laiocb
*laiocb
= (struct qemu_laiocb
*)blockacb
;
157 struct io_event event
;
160 if (laiocb
->ret
!= -EINPROGRESS
) {
163 ret
= io_cancel(laiocb
->ctx
->ctx
, &laiocb
->iocb
, &event
);
164 laiocb
->ret
= -ECANCELED
;
166 /* iocb is not cancelled, cb will be called by the event loop later */
170 laiocb
->common
.cb(laiocb
->common
.opaque
, laiocb
->ret
);
173 static const AIOCBInfo laio_aiocb_info
= {
174 .aiocb_size
= sizeof(struct qemu_laiocb
),
175 .cancel_async
= laio_cancel
,
178 static void ioq_init(LaioQueue
*io_q
)
180 QSIMPLEQ_INIT(&io_q
->pending
);
185 static int ioq_submit(struct qemu_laio_state
*s
)
189 struct qemu_laiocb
*aiocb
;
190 struct iocb
*iocbs
[MAX_QUEUED_IO
];
192 QSIMPLEQ_FOREACH(aiocb
, &s
->io_q
.pending
, next
) {
193 iocbs
[len
++] = &aiocb
->iocb
;
194 if (len
== MAX_QUEUED_IO
) {
199 ret
= io_submit(s
->ctx
, len
, iocbs
);
200 if (ret
== -EAGAIN
) {
207 for (i
= 0; i
< ret
; i
++) {
209 QSIMPLEQ_REMOVE_HEAD(&s
->io_q
.pending
, next
);
214 void laio_io_plug(BlockDriverState
*bs
, void *aio_ctx
)
216 struct qemu_laio_state
*s
= aio_ctx
;
221 int laio_io_unplug(BlockDriverState
*bs
, void *aio_ctx
, bool unplug
)
223 struct qemu_laio_state
*s
= aio_ctx
;
226 assert(s
->io_q
.plugged
> 0 || !unplug
);
228 if (unplug
&& --s
->io_q
.plugged
> 0) {
232 if (!QSIMPLEQ_EMPTY(&s
->io_q
.pending
)) {
239 BlockAIOCB
*laio_submit(BlockDriverState
*bs
, void *aio_ctx
, int fd
,
240 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
241 BlockCompletionFunc
*cb
, void *opaque
, int type
)
243 struct qemu_laio_state
*s
= aio_ctx
;
244 struct qemu_laiocb
*laiocb
;
246 off_t offset
= sector_num
* 512;
248 laiocb
= qemu_aio_get(&laio_aiocb_info
, bs
, cb
, opaque
);
249 laiocb
->nbytes
= nb_sectors
* 512;
251 laiocb
->ret
= -EINPROGRESS
;
252 laiocb
->is_read
= (type
== QEMU_AIO_READ
);
255 iocbs
= &laiocb
->iocb
;
259 io_prep_pwritev(iocbs
, fd
, qiov
->iov
, qiov
->niov
, offset
);
262 io_prep_preadv(iocbs
, fd
, qiov
->iov
, qiov
->niov
, offset
);
264 /* Currently Linux kernel does not support other operations */
266 fprintf(stderr
, "%s: invalid AIO request type 0x%x.\n",
270 io_set_eventfd(&laiocb
->iocb
, event_notifier_get_fd(&s
->e
));
272 QSIMPLEQ_INSERT_TAIL(&s
->io_q
.pending
, laiocb
, next
);
274 if (s
->io_q
.idx
== (s
->io_q
.plugged
? MAX_QUEUED_IO
: 1)) {
277 return &laiocb
->common
;
280 qemu_aio_unref(laiocb
);
284 void laio_detach_aio_context(void *s_
, AioContext
*old_context
)
286 struct qemu_laio_state
*s
= s_
;
288 aio_set_event_notifier(old_context
, &s
->e
, NULL
);
289 qemu_bh_delete(s
->completion_bh
);
292 void laio_attach_aio_context(void *s_
, AioContext
*new_context
)
294 struct qemu_laio_state
*s
= s_
;
296 s
->completion_bh
= aio_bh_new(new_context
, qemu_laio_completion_bh
, s
);
297 aio_set_event_notifier(new_context
, &s
->e
, qemu_laio_completion_cb
);
300 void *laio_init(void)
302 struct qemu_laio_state
*s
;
304 s
= g_malloc0(sizeof(*s
));
305 if (event_notifier_init(&s
->e
, false) < 0) {
309 if (io_setup(MAX_EVENTS
, &s
->ctx
) != 0) {
318 event_notifier_cleanup(&s
->e
);
324 void laio_cleanup(void *s_
)
326 struct qemu_laio_state
*s
= s_
;
328 event_notifier_cleanup(&s
->e
);
330 if (io_destroy(s
->ctx
) != 0) {
331 fprintf(stderr
, "%s: destroy AIO context %p failed\n",