2 * Linux io_uring support.
4 * Copyright (C) 2009 IBM, Corp.
5 * Copyright (C) 2009 Red Hat, Inc.
6 * Copyright (C) 2019 Aarushi Mehta
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
9 * See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
13 #include "block/aio.h"
14 #include "qemu/queue.h"
15 #include "block/block.h"
16 #include "block/raw-aio.h"
17 #include "qemu/coroutine.h"
18 #include "qapi/error.h"
22 /* io_uring ring size */
23 #define MAX_ENTRIES 128
25 typedef struct LuringAIOCB
{
27 struct io_uring_sqe sqeq
;
31 QSIMPLEQ_ENTRY(LuringAIOCB
) next
;
34 * Buffered reads may require resubmission, see
35 * luring_resubmit_short_read().
38 QEMUIOVector resubmit_qiov
;
41 typedef struct LuringQueue
{
43 unsigned int in_queue
;
44 unsigned int in_flight
;
46 QSIMPLEQ_HEAD(, LuringAIOCB
) submit_queue
;
49 typedef struct LuringState
{
50 AioContext
*aio_context
;
54 /* io queue for submit at batch. Protected by AioContext lock. */
57 /* I/O completion processing. Only runs in I/O thread. */
58 QEMUBH
*completion_bh
;
64 * Resubmit a request by appending it to submit_queue. The caller must ensure
65 * that ioq_submit() is called later so that submit_queue requests are started.
67 static void luring_resubmit(LuringState
*s
, LuringAIOCB
*luringcb
)
69 QSIMPLEQ_INSERT_TAIL(&s
->io_q
.submit_queue
, luringcb
, next
);
74 * luring_resubmit_short_read:
76 * Short reads are rare but may occur. The remaining read request needs to be
79 static void luring_resubmit_short_read(LuringState
*s
, LuringAIOCB
*luringcb
,
82 QEMUIOVector
*resubmit_qiov
;
85 trace_luring_resubmit_short_read(s
, luringcb
, nread
);
87 /* Update read position */
88 luringcb
->total_read
+= nread
;
89 remaining
= luringcb
->qiov
->size
- luringcb
->total_read
;
92 resubmit_qiov
= &luringcb
->resubmit_qiov
;
93 if (resubmit_qiov
->iov
== NULL
) {
94 qemu_iovec_init(resubmit_qiov
, luringcb
->qiov
->niov
);
96 qemu_iovec_reset(resubmit_qiov
);
98 qemu_iovec_concat(resubmit_qiov
, luringcb
->qiov
, luringcb
->total_read
,
102 luringcb
->sqeq
.off
+= nread
;
103 luringcb
->sqeq
.addr
= (__u64
)(uintptr_t)luringcb
->resubmit_qiov
.iov
;
104 luringcb
->sqeq
.len
= luringcb
->resubmit_qiov
.niov
;
106 luring_resubmit(s
, luringcb
);
110 * luring_process_completions:
113 * Fetches completed I/O requests, consumes cqes and invokes their callbacks
114 * The function is somewhat tricky because it supports nested event loops, for
115 * example when a request callback invokes aio_poll().
117 * Function schedules BH completion so it can be called again in a nested
118 * event loop. When there are no events left to complete the BH is being
122 static void luring_process_completions(LuringState
*s
)
124 struct io_uring_cqe
*cqes
;
127 * Request completion callbacks can run the nested event loop.
128 * Schedule ourselves so the nested event loop will "see" remaining
129 * completed requests and process them. Without this, completion
130 * callbacks that wait for other requests using a nested event loop
131 * would hang forever.
133 * This workaround is needed because io_uring uses poll_wait, which
134 * is woken up when new events are added to the uring, thus polling on
135 * the same uring fd will block unless more events are received.
137 * Other leaf block drivers (drivers that access the data themselves)
138 * are networking based, so they poll sockets for data and run the
141 qemu_bh_schedule(s
->completion_bh
);
143 while (io_uring_peek_cqe(&s
->ring
, &cqes
) == 0) {
144 LuringAIOCB
*luringcb
;
151 luringcb
= io_uring_cqe_get_data(cqes
);
153 io_uring_cqe_seen(&s
->ring
, cqes
);
156 /* Change counters one-by-one because we can be nested. */
158 trace_luring_process_completion(s
, luringcb
, ret
);
160 /* total_read is non-zero only for resubmitted read requests */
161 total_bytes
= ret
+ luringcb
->total_read
;
165 * Only writev/readv/fsync requests on regular files or host block
166 * devices are submitted. Therefore -EAGAIN is not expected but it's
167 * known to happen sometimes with Linux SCSI. Submit again and hope
168 * the request completes successfully.
170 * For more information, see:
171 * https://lore.kernel.org/io-uring/20210727165811.284510-3-axboe@kernel.dk/T/#u
173 * If the code is changed to submit other types of requests in the
174 * future, then this workaround may need to be extended to deal with
175 * genuine -EAGAIN results that should not be resubmitted
178 if (ret
== -EINTR
|| ret
== -EAGAIN
) {
179 luring_resubmit(s
, luringcb
);
182 } else if (!luringcb
->qiov
) {
184 } else if (total_bytes
== luringcb
->qiov
->size
) {
186 /* Only read/write */
188 /* Short Read/Write */
189 if (luringcb
->is_read
) {
191 luring_resubmit_short_read(s
, luringcb
, ret
);
194 /* Pad with zeroes */
195 qemu_iovec_memset(luringcb
->qiov
, total_bytes
, 0,
196 luringcb
->qiov
->size
- total_bytes
);
205 qemu_iovec_destroy(&luringcb
->resubmit_qiov
);
208 * If the coroutine is already entered it must be in ioq_submit()
209 * and will notice luringcb->ret has been filled in when it
210 * eventually runs later. Coroutines cannot be entered recursively
211 * so avoid doing that!
213 if (!qemu_coroutine_entered(luringcb
->co
)) {
214 aio_co_wake(luringcb
->co
);
217 qemu_bh_cancel(s
->completion_bh
);
220 static int ioq_submit(LuringState
*s
)
223 LuringAIOCB
*luringcb
, *luringcb_next
;
225 while (s
->io_q
.in_queue
> 0) {
227 * Try to fetch sqes from the ring for requests waiting in
230 QSIMPLEQ_FOREACH_SAFE(luringcb
, &s
->io_q
.submit_queue
, next
,
232 struct io_uring_sqe
*sqes
= io_uring_get_sqe(&s
->ring
);
236 /* Prep sqe for submission */
237 *sqes
= luringcb
->sqeq
;
238 QSIMPLEQ_REMOVE_HEAD(&s
->io_q
.submit_queue
, next
);
240 ret
= io_uring_submit(&s
->ring
);
241 trace_luring_io_uring_submit(s
, ret
);
242 /* Prevent infinite loop if submission is refused */
244 if (ret
== -EAGAIN
|| ret
== -EINTR
) {
249 s
->io_q
.in_flight
+= ret
;
250 s
->io_q
.in_queue
-= ret
;
252 s
->io_q
.blocked
= (s
->io_q
.in_queue
> 0);
254 if (s
->io_q
.in_flight
) {
256 * We can try to complete something just right away if there are
257 * still requests in-flight.
259 luring_process_completions(s
);
264 static void luring_process_completions_and_submit(LuringState
*s
)
266 aio_context_acquire(s
->aio_context
);
267 luring_process_completions(s
);
269 if (!s
->io_q
.plugged
&& s
->io_q
.in_queue
> 0) {
272 aio_context_release(s
->aio_context
);
275 static void qemu_luring_completion_bh(void *opaque
)
277 LuringState
*s
= opaque
;
278 luring_process_completions_and_submit(s
);
281 static void qemu_luring_completion_cb(void *opaque
)
283 LuringState
*s
= opaque
;
284 luring_process_completions_and_submit(s
);
287 static bool qemu_luring_poll_cb(void *opaque
)
289 LuringState
*s
= opaque
;
291 return io_uring_cq_ready(&s
->ring
);
294 static void qemu_luring_poll_ready(void *opaque
)
296 LuringState
*s
= opaque
;
298 luring_process_completions_and_submit(s
);
301 static void ioq_init(LuringQueue
*io_q
)
303 QSIMPLEQ_INIT(&io_q
->submit_queue
);
307 io_q
->blocked
= false;
310 void luring_io_plug(BlockDriverState
*bs
, LuringState
*s
)
312 trace_luring_io_plug(s
);
316 void luring_io_unplug(BlockDriverState
*bs
, LuringState
*s
)
318 assert(s
->io_q
.plugged
);
319 trace_luring_io_unplug(s
, s
->io_q
.blocked
, s
->io_q
.plugged
,
320 s
->io_q
.in_queue
, s
->io_q
.in_flight
);
321 if (--s
->io_q
.plugged
== 0 &&
322 !s
->io_q
.blocked
&& s
->io_q
.in_queue
> 0) {
329 * @fd: file descriptor for I/O
330 * @luringcb: AIO control block
332 * @offset: offset for request
333 * @type: type of request
335 * Fetches sqes from ring, adds to pending queue and preps them
338 static int luring_do_submit(int fd
, LuringAIOCB
*luringcb
, LuringState
*s
,
339 uint64_t offset
, int type
)
342 struct io_uring_sqe
*sqes
= &luringcb
->sqeq
;
346 io_uring_prep_writev(sqes
, fd
, luringcb
->qiov
->iov
,
347 luringcb
->qiov
->niov
, offset
);
350 io_uring_prep_readv(sqes
, fd
, luringcb
->qiov
->iov
,
351 luringcb
->qiov
->niov
, offset
);
354 io_uring_prep_fsync(sqes
, fd
, IORING_FSYNC_DATASYNC
);
357 fprintf(stderr
, "%s: invalid AIO request type, aborting 0x%x.\n",
361 io_uring_sqe_set_data(sqes
, luringcb
);
363 QSIMPLEQ_INSERT_TAIL(&s
->io_q
.submit_queue
, luringcb
, next
);
365 trace_luring_do_submit(s
, s
->io_q
.blocked
, s
->io_q
.plugged
,
366 s
->io_q
.in_queue
, s
->io_q
.in_flight
);
367 if (!s
->io_q
.blocked
&&
369 s
->io_q
.in_flight
+ s
->io_q
.in_queue
>= MAX_ENTRIES
)) {
371 trace_luring_do_submit_done(s
, ret
);
377 int coroutine_fn
luring_co_submit(BlockDriverState
*bs
, LuringState
*s
, int fd
,
378 uint64_t offset
, QEMUIOVector
*qiov
, int type
)
381 LuringAIOCB luringcb
= {
382 .co
= qemu_coroutine_self(),
385 .is_read
= (type
== QEMU_AIO_READ
),
387 trace_luring_co_submit(bs
, s
, &luringcb
, fd
, offset
, qiov
? qiov
->size
: 0,
389 ret
= luring_do_submit(fd
, &luringcb
, s
, offset
, type
);
395 if (luringcb
.ret
== -EINPROGRESS
) {
396 qemu_coroutine_yield();
401 void luring_detach_aio_context(LuringState
*s
, AioContext
*old_context
)
403 aio_set_fd_handler(old_context
, s
->ring
.ring_fd
, false,
404 NULL
, NULL
, NULL
, NULL
, s
);
405 qemu_bh_delete(s
->completion_bh
);
406 s
->aio_context
= NULL
;
409 void luring_attach_aio_context(LuringState
*s
, AioContext
*new_context
)
411 s
->aio_context
= new_context
;
412 s
->completion_bh
= aio_bh_new(new_context
, qemu_luring_completion_bh
, s
);
413 aio_set_fd_handler(s
->aio_context
, s
->ring
.ring_fd
, false,
414 qemu_luring_completion_cb
, NULL
,
415 qemu_luring_poll_cb
, qemu_luring_poll_ready
, s
);
418 LuringState
*luring_init(Error
**errp
)
421 LuringState
*s
= g_new0(LuringState
, 1);
422 struct io_uring
*ring
= &s
->ring
;
424 trace_luring_init_state(s
, sizeof(*s
));
426 rc
= io_uring_queue_init(MAX_ENTRIES
, ring
, 0);
428 error_setg_errno(errp
, errno
, "failed to init linux io_uring ring");
434 #ifdef CONFIG_LIBURING_REGISTER_RING_FD
435 if (io_uring_register_ring_fd(&s
->ring
) < 0) {
437 * Only warn about this error: we will fallback to the non-optimized
438 * io_uring operations.
440 warn_report("failed to register linux io_uring ring file descriptor");
447 void luring_cleanup(LuringState
*s
)
449 io_uring_queue_exit(&s
->ring
);
450 trace_luring_cleanup_state(s
);