2 * Linux io_uring support.
4 * Copyright (C) 2009 IBM, Corp.
5 * Copyright (C) 2009 Red Hat, Inc.
6 * Copyright (C) 2019 Aarushi Mehta
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
9 * See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
13 #include "block/aio.h"
14 #include "qemu/queue.h"
15 #include "block/block.h"
16 #include "block/raw-aio.h"
17 #include "qemu/coroutine.h"
18 #include "qapi/error.h"
19 #include "sysemu/block-backend.h"
22 /* Only used for assertions. */
23 #include "qemu/coroutine_int.h"
25 /* io_uring ring size */
26 #define MAX_ENTRIES 128
28 typedef struct LuringAIOCB
{
30 struct io_uring_sqe sqeq
;
34 QSIMPLEQ_ENTRY(LuringAIOCB
) next
;
37 * Buffered reads may require resubmission, see
38 * luring_resubmit_short_read().
41 QEMUIOVector resubmit_qiov
;
44 typedef struct LuringQueue
{
45 unsigned int in_queue
;
46 unsigned int in_flight
;
48 QSIMPLEQ_HEAD(, LuringAIOCB
) submit_queue
;
51 typedef struct LuringState
{
52 AioContext
*aio_context
;
56 /* No locking required, only accessed from AioContext home thread */
59 QEMUBH
*completion_bh
;
65 * Resubmit a request by appending it to submit_queue. The caller must ensure
66 * that ioq_submit() is called later so that submit_queue requests are started.
68 static void luring_resubmit(LuringState
*s
, LuringAIOCB
*luringcb
)
70 QSIMPLEQ_INSERT_TAIL(&s
->io_q
.submit_queue
, luringcb
, next
);
75 * luring_resubmit_short_read:
77 * Short reads are rare but may occur. The remaining read request needs to be
80 static void luring_resubmit_short_read(LuringState
*s
, LuringAIOCB
*luringcb
,
83 QEMUIOVector
*resubmit_qiov
;
86 trace_luring_resubmit_short_read(s
, luringcb
, nread
);
88 /* Update read position */
89 luringcb
->total_read
+= nread
;
90 remaining
= luringcb
->qiov
->size
- luringcb
->total_read
;
93 resubmit_qiov
= &luringcb
->resubmit_qiov
;
94 if (resubmit_qiov
->iov
== NULL
) {
95 qemu_iovec_init(resubmit_qiov
, luringcb
->qiov
->niov
);
97 qemu_iovec_reset(resubmit_qiov
);
99 qemu_iovec_concat(resubmit_qiov
, luringcb
->qiov
, luringcb
->total_read
,
103 luringcb
->sqeq
.off
+= nread
;
104 luringcb
->sqeq
.addr
= (__u64
)(uintptr_t)luringcb
->resubmit_qiov
.iov
;
105 luringcb
->sqeq
.len
= luringcb
->resubmit_qiov
.niov
;
107 luring_resubmit(s
, luringcb
);
111 * luring_process_completions:
114 * Fetches completed I/O requests, consumes cqes and invokes their callbacks
115 * The function is somewhat tricky because it supports nested event loops, for
116 * example when a request callback invokes aio_poll().
118 * Function schedules BH completion so it can be called again in a nested
119 * event loop. When there are no events left to complete the BH is being
123 static void luring_process_completions(LuringState
*s
)
125 struct io_uring_cqe
*cqes
;
128 * Request completion callbacks can run the nested event loop.
129 * Schedule ourselves so the nested event loop will "see" remaining
130 * completed requests and process them. Without this, completion
131 * callbacks that wait for other requests using a nested event loop
132 * would hang forever.
134 * This workaround is needed because io_uring uses poll_wait, which
135 * is woken up when new events are added to the uring, thus polling on
136 * the same uring fd will block unless more events are received.
138 * Other leaf block drivers (drivers that access the data themselves)
139 * are networking based, so they poll sockets for data and run the
142 qemu_bh_schedule(s
->completion_bh
);
144 while (io_uring_peek_cqe(&s
->ring
, &cqes
) == 0) {
145 LuringAIOCB
*luringcb
;
152 luringcb
= io_uring_cqe_get_data(cqes
);
154 io_uring_cqe_seen(&s
->ring
, cqes
);
157 /* Change counters one-by-one because we can be nested. */
159 trace_luring_process_completion(s
, luringcb
, ret
);
161 /* total_read is non-zero only for resubmitted read requests */
162 total_bytes
= ret
+ luringcb
->total_read
;
166 * Only writev/readv/fsync requests on regular files or host block
167 * devices are submitted. Therefore -EAGAIN is not expected but it's
168 * known to happen sometimes with Linux SCSI. Submit again and hope
169 * the request completes successfully.
171 * For more information, see:
172 * https://lore.kernel.org/io-uring/20210727165811.284510-3-axboe@kernel.dk/T/#u
174 * If the code is changed to submit other types of requests in the
175 * future, then this workaround may need to be extended to deal with
176 * genuine -EAGAIN results that should not be resubmitted
179 if (ret
== -EINTR
|| ret
== -EAGAIN
) {
180 luring_resubmit(s
, luringcb
);
183 } else if (!luringcb
->qiov
) {
185 } else if (total_bytes
== luringcb
->qiov
->size
) {
187 /* Only read/write */
189 /* Short Read/Write */
190 if (luringcb
->is_read
) {
192 luring_resubmit_short_read(s
, luringcb
, ret
);
195 /* Pad with zeroes */
196 qemu_iovec_memset(luringcb
->qiov
, total_bytes
, 0,
197 luringcb
->qiov
->size
- total_bytes
);
206 qemu_iovec_destroy(&luringcb
->resubmit_qiov
);
209 * If the coroutine is already entered it must be in ioq_submit()
210 * and will notice luringcb->ret has been filled in when it
211 * eventually runs later. Coroutines cannot be entered recursively
212 * so avoid doing that!
214 assert(luringcb
->co
->ctx
== s
->aio_context
);
215 if (!qemu_coroutine_entered(luringcb
->co
)) {
216 aio_co_wake(luringcb
->co
);
219 qemu_bh_cancel(s
->completion_bh
);
222 static int ioq_submit(LuringState
*s
)
225 LuringAIOCB
*luringcb
, *luringcb_next
;
227 while (s
->io_q
.in_queue
> 0) {
229 * Try to fetch sqes from the ring for requests waiting in
232 QSIMPLEQ_FOREACH_SAFE(luringcb
, &s
->io_q
.submit_queue
, next
,
234 struct io_uring_sqe
*sqes
= io_uring_get_sqe(&s
->ring
);
238 /* Prep sqe for submission */
239 *sqes
= luringcb
->sqeq
;
240 QSIMPLEQ_REMOVE_HEAD(&s
->io_q
.submit_queue
, next
);
242 ret
= io_uring_submit(&s
->ring
);
243 trace_luring_io_uring_submit(s
, ret
);
244 /* Prevent infinite loop if submission is refused */
246 if (ret
== -EAGAIN
|| ret
== -EINTR
) {
251 s
->io_q
.in_flight
+= ret
;
252 s
->io_q
.in_queue
-= ret
;
254 s
->io_q
.blocked
= (s
->io_q
.in_queue
> 0);
256 if (s
->io_q
.in_flight
) {
258 * We can try to complete something just right away if there are
259 * still requests in-flight.
261 luring_process_completions(s
);
266 static void luring_process_completions_and_submit(LuringState
*s
)
268 luring_process_completions(s
);
270 if (s
->io_q
.in_queue
> 0) {
275 static void qemu_luring_completion_bh(void *opaque
)
277 LuringState
*s
= opaque
;
278 luring_process_completions_and_submit(s
);
281 static void qemu_luring_completion_cb(void *opaque
)
283 LuringState
*s
= opaque
;
284 luring_process_completions_and_submit(s
);
287 static bool qemu_luring_poll_cb(void *opaque
)
289 LuringState
*s
= opaque
;
291 return io_uring_cq_ready(&s
->ring
);
294 static void qemu_luring_poll_ready(void *opaque
)
296 LuringState
*s
= opaque
;
298 luring_process_completions_and_submit(s
);
301 static void ioq_init(LuringQueue
*io_q
)
303 QSIMPLEQ_INIT(&io_q
->submit_queue
);
306 io_q
->blocked
= false;
309 static void luring_unplug_fn(void *opaque
)
311 LuringState
*s
= opaque
;
312 trace_luring_unplug_fn(s
, s
->io_q
.blocked
, s
->io_q
.in_queue
,
314 if (!s
->io_q
.blocked
&& s
->io_q
.in_queue
> 0) {
321 * @fd: file descriptor for I/O
322 * @luringcb: AIO control block
324 * @offset: offset for request
325 * @type: type of request
327 * Fetches sqes from ring, adds to pending queue and preps them
330 static int luring_do_submit(int fd
, LuringAIOCB
*luringcb
, LuringState
*s
,
331 uint64_t offset
, int type
)
334 struct io_uring_sqe
*sqes
= &luringcb
->sqeq
;
338 io_uring_prep_writev(sqes
, fd
, luringcb
->qiov
->iov
,
339 luringcb
->qiov
->niov
, offset
);
341 case QEMU_AIO_ZONE_APPEND
:
342 io_uring_prep_writev(sqes
, fd
, luringcb
->qiov
->iov
,
343 luringcb
->qiov
->niov
, offset
);
346 io_uring_prep_readv(sqes
, fd
, luringcb
->qiov
->iov
,
347 luringcb
->qiov
->niov
, offset
);
350 io_uring_prep_fsync(sqes
, fd
, IORING_FSYNC_DATASYNC
);
353 fprintf(stderr
, "%s: invalid AIO request type, aborting 0x%x.\n",
357 io_uring_sqe_set_data(sqes
, luringcb
);
359 QSIMPLEQ_INSERT_TAIL(&s
->io_q
.submit_queue
, luringcb
, next
);
361 trace_luring_do_submit(s
, s
->io_q
.blocked
, s
->io_q
.in_queue
,
363 if (!s
->io_q
.blocked
) {
364 if (s
->io_q
.in_flight
+ s
->io_q
.in_queue
>= MAX_ENTRIES
) {
366 trace_luring_do_submit_done(s
, ret
);
370 blk_io_plug_call(luring_unplug_fn
, s
);
375 int coroutine_fn
luring_co_submit(BlockDriverState
*bs
, int fd
, uint64_t offset
,
376 QEMUIOVector
*qiov
, int type
)
379 AioContext
*ctx
= qemu_get_current_aio_context();
380 LuringState
*s
= aio_get_linux_io_uring(ctx
);
381 LuringAIOCB luringcb
= {
382 .co
= qemu_coroutine_self(),
385 .is_read
= (type
== QEMU_AIO_READ
),
387 trace_luring_co_submit(bs
, s
, &luringcb
, fd
, offset
, qiov
? qiov
->size
: 0,
389 ret
= luring_do_submit(fd
, &luringcb
, s
, offset
, type
);
395 if (luringcb
.ret
== -EINPROGRESS
) {
396 qemu_coroutine_yield();
401 void luring_detach_aio_context(LuringState
*s
, AioContext
*old_context
)
403 aio_set_fd_handler(old_context
, s
->ring
.ring_fd
,
404 NULL
, NULL
, NULL
, NULL
, s
);
405 qemu_bh_delete(s
->completion_bh
);
406 s
->aio_context
= NULL
;
409 void luring_attach_aio_context(LuringState
*s
, AioContext
*new_context
)
411 s
->aio_context
= new_context
;
412 s
->completion_bh
= aio_bh_new(new_context
, qemu_luring_completion_bh
, s
);
413 aio_set_fd_handler(s
->aio_context
, s
->ring
.ring_fd
,
414 qemu_luring_completion_cb
, NULL
,
415 qemu_luring_poll_cb
, qemu_luring_poll_ready
, s
);
418 LuringState
*luring_init(Error
**errp
)
421 LuringState
*s
= g_new0(LuringState
, 1);
422 struct io_uring
*ring
= &s
->ring
;
424 trace_luring_init_state(s
, sizeof(*s
));
426 rc
= io_uring_queue_init(MAX_ENTRIES
, ring
, 0);
428 error_setg_errno(errp
, errno
, "failed to init linux io_uring ring");
438 void luring_cleanup(LuringState
*s
)
440 io_uring_queue_exit(&s
->ring
);
441 trace_luring_cleanup_state(s
);