2 * Linux native AIO support.
4 * Copyright (C) 2009 IBM, Corp.
5 * Copyright (C) 2009 Red Hat, Inc.
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
10 #include "qemu-common.h"
12 #include "block_int.h"
13 #include "block/raw-posix-aio.h"
15 #include <sys/eventfd.h>
19 * Queue size (per-device).
21 * XXX: eventually we need to communicate this to the guest and/or make it
22 * tunable by the guest. If we get more outstanding requests at a time
23 * than this we will get EAGAIN from io_submit which is communicated to
24 * the guest as an I/O error.
26 #define MAX_EVENTS 128
29 BlockDriverAIOCB common
;
30 struct qemu_laio_state
*ctx
;
34 QLIST_ENTRY(qemu_laiocb
) node
;
37 struct qemu_laio_state
{
43 static inline ssize_t
io_event_ret(struct io_event
*ev
)
45 return (ssize_t
)(((uint64_t)ev
->res2
<< 32) | ev
->res
);
49 * Completes an AIO request (calls the callback and frees the ACB).
51 static void qemu_laio_process_completion(struct qemu_laio_state
*s
,
52 struct qemu_laiocb
*laiocb
)
59 if (ret
!= -ECANCELED
) {
60 if (ret
== laiocb
->nbytes
)
65 laiocb
->common
.cb(laiocb
->common
.opaque
, ret
);
68 qemu_aio_release(laiocb
);
72 * All requests are directly processed when they complete, so there's nothing
73 * left to do during qemu_aio_wait().
75 static int qemu_laio_process_requests(void *opaque
)
80 static void qemu_laio_completion_cb(void *opaque
)
82 struct qemu_laio_state
*s
= opaque
;
85 struct io_event events
[MAX_EVENTS
];
88 struct timespec ts
= { 0 };
92 ret
= read(s
->efd
, &val
, sizeof(val
));
93 } while (ret
== -1 && errno
== EINTR
);
95 if (ret
== -1 && errno
== EAGAIN
)
102 nevents
= io_getevents(s
->ctx
, val
, MAX_EVENTS
, events
, &ts
);
103 } while (nevents
== -EINTR
);
105 for (i
= 0; i
< nevents
; i
++) {
106 struct iocb
*iocb
= events
[i
].obj
;
107 struct qemu_laiocb
*laiocb
=
108 container_of(iocb
, struct qemu_laiocb
, iocb
);
110 laiocb
->ret
= io_event_ret(&events
[i
]);
111 qemu_laio_process_completion(s
, laiocb
);
116 static int qemu_laio_flush_cb(void *opaque
)
118 struct qemu_laio_state
*s
= opaque
;
120 return (s
->count
> 0) ? 1 : 0;
123 static void laio_cancel(BlockDriverAIOCB
*blockacb
)
125 struct qemu_laiocb
*laiocb
= (struct qemu_laiocb
*)blockacb
;
126 struct io_event event
;
129 if (laiocb
->ret
!= -EINPROGRESS
)
133 * Note that as of Linux 2.6.31 neither the block device code nor any
134 * filesystem implements cancellation of AIO request.
135 * Thus the polling loop below is the normal code path.
137 ret
= io_cancel(laiocb
->ctx
->ctx
, &laiocb
->iocb
, &event
);
139 laiocb
->ret
= -ECANCELED
;
144 * We have to wait for the iocb to finish.
146 * The only way to get the iocb status update is by polling the io context.
147 * We might be able to do this slightly more optimal by removing the
150 while (laiocb
->ret
== -EINPROGRESS
)
151 qemu_laio_completion_cb(laiocb
->ctx
);
154 static AIOPool laio_pool
= {
155 .aiocb_size
= sizeof(struct qemu_laiocb
),
156 .cancel
= laio_cancel
,
159 BlockDriverAIOCB
*laio_submit(BlockDriverState
*bs
, void *aio_ctx
, int fd
,
160 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
161 BlockDriverCompletionFunc
*cb
, void *opaque
, int type
)
163 struct qemu_laio_state
*s
= aio_ctx
;
164 struct qemu_laiocb
*laiocb
;
166 off_t offset
= sector_num
* 512;
168 laiocb
= qemu_aio_get(&laio_pool
, bs
, cb
, opaque
);
171 laiocb
->nbytes
= nb_sectors
* 512;
173 laiocb
->ret
= -EINPROGRESS
;
175 iocbs
= &laiocb
->iocb
;
179 io_prep_pwritev(iocbs
, fd
, qiov
->iov
, qiov
->niov
, offset
);
182 io_prep_preadv(iocbs
, fd
, qiov
->iov
, qiov
->niov
, offset
);
185 fprintf(stderr
, "%s: invalid AIO request type 0x%x.\n",
189 io_set_eventfd(&laiocb
->iocb
, s
->efd
);
192 if (io_submit(s
->ctx
, 1, &iocbs
) < 0)
194 return &laiocb
->common
;
197 qemu_aio_release(laiocb
);
203 void *laio_init(void)
205 struct qemu_laio_state
*s
;
207 s
= qemu_mallocz(sizeof(*s
));
208 s
->efd
= eventfd(0, 0);
211 fcntl(s
->efd
, F_SETFL
, O_NONBLOCK
);
213 if (io_setup(MAX_EVENTS
, &s
->ctx
) != 0)
216 qemu_aio_set_fd_handler(s
->efd
, qemu_laio_completion_cb
, NULL
,
217 qemu_laio_flush_cb
, qemu_laio_process_requests
, s
);