2 * Linux native AIO support.
4 * Copyright (C) 2009 IBM, Corp.
5 * Copyright (C) 2009 Red Hat, Inc.
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
10 #include "qemu-common.h"
12 #include "block_int.h"
13 #include "block/raw-posix-aio.h"
15 #include <sys/eventfd.h>
19 * Queue size (per-device).
21 * XXX: eventually we need to communicate this to the guest and/or make it
22 * tunable by the guest. If we get more outstanding requests at a time
23 * than this we will get EAGAIN from io_submit which is communicated to
24 * the guest as an I/O error.
26 #define MAX_EVENTS 128
29 BlockDriverAIOCB common
;
30 struct qemu_laio_state
*ctx
;
35 QLIST_ENTRY(qemu_laiocb
) node
;
38 struct qemu_laio_state
{
42 QLIST_HEAD(, qemu_laiocb
) completed_reqs
;
45 static inline ssize_t
io_event_ret(struct io_event
*ev
)
47 return (ssize_t
)(((uint64_t)ev
->res2
<< 32) | ev
->res
);
51 * Completes an AIO request (calls the callback and frees the ACB).
52 * Be sure to be in the right AsyncContext before calling this function.
54 static void qemu_laio_process_completion(struct qemu_laio_state
*s
,
55 struct qemu_laiocb
*laiocb
)
62 if (ret
!= -ECANCELED
) {
63 if (ret
== laiocb
->nbytes
)
68 laiocb
->common
.cb(laiocb
->common
.opaque
, ret
);
71 qemu_aio_release(laiocb
);
75 * Processes all queued AIO requests, i.e. requests that have return from OS
76 * but their callback was not called yet. Requests that cannot have their
77 * callback called in the current AsyncContext, remain in the queue.
79 * Returns 1 if at least one request could be completed, 0 otherwise.
81 static int qemu_laio_process_requests(void *opaque
)
83 struct qemu_laio_state
*s
= opaque
;
84 struct qemu_laiocb
*laiocb
, *next
;
87 QLIST_FOREACH_SAFE (laiocb
, &s
->completed_reqs
, node
, next
) {
88 if (laiocb
->async_context_id
== get_async_context_id()) {
89 qemu_laio_process_completion(s
, laiocb
);
90 QLIST_REMOVE(laiocb
, node
);
99 * Puts a request in the completion queue so that its callback is called the
100 * next time when it's possible. If we already are in the right AsyncContext,
101 * the request is completed immediately instead.
103 static void qemu_laio_enqueue_completed(struct qemu_laio_state
*s
,
104 struct qemu_laiocb
* laiocb
)
106 if (laiocb
->async_context_id
== get_async_context_id()) {
107 qemu_laio_process_completion(s
, laiocb
);
109 QLIST_INSERT_HEAD(&s
->completed_reqs
, laiocb
, node
);
113 static void qemu_laio_completion_cb(void *opaque
)
115 struct qemu_laio_state
*s
= opaque
;
118 struct io_event events
[MAX_EVENTS
];
121 struct timespec ts
= { 0 };
125 ret
= read(s
->efd
, &val
, sizeof(val
));
126 } while (ret
== -1 && errno
== EINTR
);
128 if (ret
== -1 && errno
== EAGAIN
)
135 nevents
= io_getevents(s
->ctx
, val
, MAX_EVENTS
, events
, &ts
);
136 } while (nevents
== -EINTR
);
138 for (i
= 0; i
< nevents
; i
++) {
139 struct iocb
*iocb
= events
[i
].obj
;
140 struct qemu_laiocb
*laiocb
=
141 container_of(iocb
, struct qemu_laiocb
, iocb
);
143 laiocb
->ret
= io_event_ret(&events
[i
]);
144 qemu_laio_enqueue_completed(s
, laiocb
);
149 static int qemu_laio_flush_cb(void *opaque
)
151 struct qemu_laio_state
*s
= opaque
;
153 return (s
->count
> 0) ? 1 : 0;
156 static void laio_cancel(BlockDriverAIOCB
*blockacb
)
158 struct qemu_laiocb
*laiocb
= (struct qemu_laiocb
*)blockacb
;
159 struct io_event event
;
162 if (laiocb
->ret
!= -EINPROGRESS
)
166 * Note that as of Linux 2.6.31 neither the block device code nor any
167 * filesystem implements cancellation of AIO request.
168 * Thus the polling loop below is the normal code path.
170 ret
= io_cancel(laiocb
->ctx
->ctx
, &laiocb
->iocb
, &event
);
172 laiocb
->ret
= -ECANCELED
;
177 * We have to wait for the iocb to finish.
179 * The only way to get the iocb status update is by polling the io context.
180 * We might be able to do this slightly more optimal by removing the
183 while (laiocb
->ret
== -EINPROGRESS
)
184 qemu_laio_completion_cb(laiocb
->ctx
);
187 static AIOPool laio_pool
= {
188 .aiocb_size
= sizeof(struct qemu_laiocb
),
189 .cancel
= laio_cancel
,
192 BlockDriverAIOCB
*laio_submit(BlockDriverState
*bs
, void *aio_ctx
, int fd
,
193 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
194 BlockDriverCompletionFunc
*cb
, void *opaque
, int type
)
196 struct qemu_laio_state
*s
= aio_ctx
;
197 struct qemu_laiocb
*laiocb
;
199 off_t offset
= sector_num
* 512;
201 laiocb
= qemu_aio_get(&laio_pool
, bs
, cb
, opaque
);
204 laiocb
->nbytes
= nb_sectors
* 512;
206 laiocb
->ret
= -EINPROGRESS
;
207 laiocb
->async_context_id
= get_async_context_id();
209 iocbs
= &laiocb
->iocb
;
213 io_prep_pwritev(iocbs
, fd
, qiov
->iov
, qiov
->niov
, offset
);
216 io_prep_preadv(iocbs
, fd
, qiov
->iov
, qiov
->niov
, offset
);
219 fprintf(stderr
, "%s: invalid AIO request type 0x%x.\n",
223 io_set_eventfd(&laiocb
->iocb
, s
->efd
);
226 if (io_submit(s
->ctx
, 1, &iocbs
) < 0)
228 return &laiocb
->common
;
231 qemu_aio_release(laiocb
);
237 void *laio_init(void)
239 struct qemu_laio_state
*s
;
241 s
= qemu_mallocz(sizeof(*s
));
242 QLIST_INIT(&s
->completed_reqs
);
243 s
->efd
= eventfd(0, 0);
246 fcntl(s
->efd
, F_SETFL
, O_NONBLOCK
);
248 if (io_setup(MAX_EVENTS
, &s
->ctx
) != 0)
251 qemu_aio_set_fd_handler(s
->efd
, qemu_laio_completion_cb
, NULL
,
252 qemu_laio_flush_cb
, qemu_laio_process_requests
, s
);