2 * Linux native AIO support.
4 * Copyright (C) 2009 IBM, Corp.
5 * Copyright (C) 2009 Red Hat, Inc.
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
10 #include "qemu-common.h"
11 #include "block/aio.h"
12 #include "qemu/queue.h"
13 #include "block/raw-aio.h"
14 #include "qemu/event_notifier.h"
19 * Queue size (per-device).
21 * XXX: eventually we need to communicate this to the guest and/or make it
22 * tunable by the guest. If we get more outstanding requests at a time
23 * than this we will get EAGAIN from io_submit which is communicated to
24 * the guest as an I/O error.
26 #define MAX_EVENTS 128
28 #define MAX_QUEUED_IO 128
31 BlockDriverAIOCB common
;
32 struct qemu_laio_state
*ctx
;
38 QLIST_ENTRY(qemu_laiocb
) node
;
42 struct iocb
*iocbs
[MAX_QUEUED_IO
];
48 struct qemu_laio_state
{
52 /* io queue for submit at batch */
56 static inline ssize_t
io_event_ret(struct io_event
*ev
)
58 return (ssize_t
)(((uint64_t)ev
->res2
<< 32) | ev
->res
);
62 * Completes an AIO request (calls the callback and frees the ACB).
64 static void qemu_laio_process_completion(struct qemu_laio_state
*s
,
65 struct qemu_laiocb
*laiocb
)
70 if (ret
!= -ECANCELED
) {
71 if (ret
== laiocb
->nbytes
) {
73 } else if (ret
>= 0) {
74 /* Short reads mean EOF, pad with zeros. */
75 if (laiocb
->is_read
) {
76 qemu_iovec_memset(laiocb
->qiov
, ret
, 0,
77 laiocb
->qiov
->size
- ret
);
83 laiocb
->common
.cb(laiocb
->common
.opaque
, ret
);
86 qemu_aio_release(laiocb
);
89 static void qemu_laio_completion_cb(EventNotifier
*e
)
91 struct qemu_laio_state
*s
= container_of(e
, struct qemu_laio_state
, e
);
93 while (event_notifier_test_and_clear(&s
->e
)) {
94 struct io_event events
[MAX_EVENTS
];
95 struct timespec ts
= { 0 };
99 nevents
= io_getevents(s
->ctx
, MAX_EVENTS
, MAX_EVENTS
, events
, &ts
);
100 } while (nevents
== -EINTR
);
102 for (i
= 0; i
< nevents
; i
++) {
103 struct iocb
*iocb
= events
[i
].obj
;
104 struct qemu_laiocb
*laiocb
=
105 container_of(iocb
, struct qemu_laiocb
, iocb
);
107 laiocb
->ret
= io_event_ret(&events
[i
]);
108 qemu_laio_process_completion(s
, laiocb
);
113 static void laio_cancel(BlockDriverAIOCB
*blockacb
)
115 struct qemu_laiocb
*laiocb
= (struct qemu_laiocb
*)blockacb
;
116 struct io_event event
;
119 if (laiocb
->ret
!= -EINPROGRESS
)
123 * Note that as of Linux 2.6.31 neither the block device code nor any
124 * filesystem implements cancellation of AIO request.
125 * Thus the polling loop below is the normal code path.
127 ret
= io_cancel(laiocb
->ctx
->ctx
, &laiocb
->iocb
, &event
);
129 laiocb
->ret
= -ECANCELED
;
134 * We have to wait for the iocb to finish.
136 * The only way to get the iocb status update is by polling the io context.
137 * We might be able to do this slightly more optimal by removing the
140 while (laiocb
->ret
== -EINPROGRESS
) {
141 qemu_laio_completion_cb(&laiocb
->ctx
->e
);
145 static const AIOCBInfo laio_aiocb_info
= {
146 .aiocb_size
= sizeof(struct qemu_laiocb
),
147 .cancel
= laio_cancel
,
150 static void ioq_init(LaioQueue
*io_q
)
152 io_q
->size
= MAX_QUEUED_IO
;
157 static int ioq_submit(struct qemu_laio_state
*s
)
160 int len
= s
->io_q
.idx
;
163 ret
= io_submit(s
->ctx
, len
, s
->io_q
.iocbs
);
164 } while (i
++ < 3 && ret
== -EAGAIN
);
175 for (; i
< len
; i
++) {
176 struct qemu_laiocb
*laiocb
=
177 container_of(s
->io_q
.iocbs
[i
], struct qemu_laiocb
, iocb
);
179 laiocb
->ret
= (ret
< 0) ? ret
: -EIO
;
180 qemu_laio_process_completion(s
, laiocb
);
185 static void ioq_enqueue(struct qemu_laio_state
*s
, struct iocb
*iocb
)
187 unsigned int idx
= s
->io_q
.idx
;
189 s
->io_q
.iocbs
[idx
++] = iocb
;
192 /* submit immediately if queue is full */
193 if (idx
== s
->io_q
.size
) {
198 void laio_io_plug(BlockDriverState
*bs
, void *aio_ctx
)
200 struct qemu_laio_state
*s
= aio_ctx
;
205 int laio_io_unplug(BlockDriverState
*bs
, void *aio_ctx
, bool unplug
)
207 struct qemu_laio_state
*s
= aio_ctx
;
210 assert(s
->io_q
.plugged
> 0 || !unplug
);
212 if (unplug
&& --s
->io_q
.plugged
> 0) {
216 if (s
->io_q
.idx
> 0) {
223 BlockDriverAIOCB
*laio_submit(BlockDriverState
*bs
, void *aio_ctx
, int fd
,
224 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
225 BlockDriverCompletionFunc
*cb
, void *opaque
, int type
)
227 struct qemu_laio_state
*s
= aio_ctx
;
228 struct qemu_laiocb
*laiocb
;
230 off_t offset
= sector_num
* 512;
232 laiocb
= qemu_aio_get(&laio_aiocb_info
, bs
, cb
, opaque
);
233 laiocb
->nbytes
= nb_sectors
* 512;
235 laiocb
->ret
= -EINPROGRESS
;
236 laiocb
->is_read
= (type
== QEMU_AIO_READ
);
239 iocbs
= &laiocb
->iocb
;
243 io_prep_pwritev(iocbs
, fd
, qiov
->iov
, qiov
->niov
, offset
);
246 io_prep_preadv(iocbs
, fd
, qiov
->iov
, qiov
->niov
, offset
);
248 /* Currently Linux kernel does not support other operations */
250 fprintf(stderr
, "%s: invalid AIO request type 0x%x.\n",
254 io_set_eventfd(&laiocb
->iocb
, event_notifier_get_fd(&s
->e
));
256 if (!s
->io_q
.plugged
) {
257 if (io_submit(s
->ctx
, 1, &iocbs
) < 0) {
261 ioq_enqueue(s
, iocbs
);
263 return &laiocb
->common
;
266 qemu_aio_release(laiocb
);
270 void laio_detach_aio_context(void *s_
, AioContext
*old_context
)
272 struct qemu_laio_state
*s
= s_
;
274 aio_set_event_notifier(old_context
, &s
->e
, NULL
);
277 void laio_attach_aio_context(void *s_
, AioContext
*new_context
)
279 struct qemu_laio_state
*s
= s_
;
281 aio_set_event_notifier(new_context
, &s
->e
, qemu_laio_completion_cb
);
284 void *laio_init(void)
286 struct qemu_laio_state
*s
;
288 s
= g_malloc0(sizeof(*s
));
289 if (event_notifier_init(&s
->e
, false) < 0) {
293 if (io_setup(MAX_EVENTS
, &s
->ctx
) != 0) {
302 event_notifier_cleanup(&s
->e
);
308 void laio_cleanup(void *s_
)
310 struct qemu_laio_state
*s
= s_
;
312 event_notifier_cleanup(&s
->e
);
314 if (io_destroy(s
->ctx
) != 0) {
315 fprintf(stderr
, "%s: destroy AIO context %p failed\n",