2 * Linux io_uring support.
4 * Copyright (C) 2009 IBM, Corp.
5 * Copyright (C) 2009 Red Hat, Inc.
6 * Copyright (C) 2019 Aarushi Mehta
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
9 * See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
13 #include "qemu-common.h"
14 #include "block/aio.h"
15 #include "qemu/queue.h"
16 #include "block/block.h"
17 #include "block/raw-aio.h"
18 #include "qemu/coroutine.h"
19 #include "qapi/error.h"
22 /* io_uring ring size */
23 #define MAX_ENTRIES 128
25 typedef struct LuringAIOCB
{
27 struct io_uring_sqe sqeq
;
31 QSIMPLEQ_ENTRY(LuringAIOCB
) next
;
34 * Buffered reads may require resubmission, see
35 * luring_resubmit_short_read().
38 QEMUIOVector resubmit_qiov
;
41 typedef struct LuringQueue
{
43 unsigned int in_queue
;
44 unsigned int in_flight
;
46 QSIMPLEQ_HEAD(, LuringAIOCB
) submit_queue
;
49 typedef struct LuringState
{
50 AioContext
*aio_context
;
54 /* io queue for submit at batch. Protected by AioContext lock. */
57 /* I/O completion processing. Only runs in I/O thread. */
58 QEMUBH
*completion_bh
;
64 * Resubmit a request by appending it to submit_queue. The caller must ensure
65 * that ioq_submit() is called later so that submit_queue requests are started.
67 static void luring_resubmit(LuringState
*s
, LuringAIOCB
*luringcb
)
69 QSIMPLEQ_INSERT_TAIL(&s
->io_q
.submit_queue
, luringcb
, next
);
74 * luring_resubmit_short_read:
76 * Before Linux commit 9d93a3f5a0c ("io_uring: punt short reads to async
77 * context") a buffered I/O request with the start of the file range in the
78 * page cache could result in a short read. Applications need to resubmit the
79 * remaining read request.
81 * This is a slow path but recent kernels never take it.
83 static void luring_resubmit_short_read(LuringState
*s
, LuringAIOCB
*luringcb
,
86 QEMUIOVector
*resubmit_qiov
;
89 trace_luring_resubmit_short_read(s
, luringcb
, nread
);
91 /* Update read position */
92 luringcb
->total_read
= nread
;
93 remaining
= luringcb
->qiov
->size
- luringcb
->total_read
;
96 resubmit_qiov
= &luringcb
->resubmit_qiov
;
97 if (resubmit_qiov
->iov
== NULL
) {
98 qemu_iovec_init(resubmit_qiov
, luringcb
->qiov
->niov
);
100 qemu_iovec_reset(resubmit_qiov
);
102 qemu_iovec_concat(resubmit_qiov
, luringcb
->qiov
, luringcb
->total_read
,
106 luringcb
->sqeq
.off
= nread
;
107 luringcb
->sqeq
.addr
= (__u64
)(uintptr_t)luringcb
->resubmit_qiov
.iov
;
108 luringcb
->sqeq
.len
= luringcb
->resubmit_qiov
.niov
;
110 luring_resubmit(s
, luringcb
);
114 * luring_process_completions:
117 * Fetches completed I/O requests, consumes cqes and invokes their callbacks
118 * The function is somewhat tricky because it supports nested event loops, for
119 * example when a request callback invokes aio_poll().
121 * Function schedules BH completion so it can be called again in a nested
122 * event loop. When there are no events left to complete the BH is being
126 static void luring_process_completions(LuringState
*s
)
128 struct io_uring_cqe
*cqes
;
131 * Request completion callbacks can run the nested event loop.
132 * Schedule ourselves so the nested event loop will "see" remaining
133 * completed requests and process them. Without this, completion
134 * callbacks that wait for other requests using a nested event loop
135 * would hang forever.
137 * This workaround is needed because io_uring uses poll_wait, which
138 * is woken up when new events are added to the uring, thus polling on
139 * the same uring fd will block unless more events are received.
141 * Other leaf block drivers (drivers that access the data themselves)
142 * are networking based, so they poll sockets for data and run the
145 qemu_bh_schedule(s
->completion_bh
);
147 while (io_uring_peek_cqe(&s
->ring
, &cqes
) == 0) {
148 LuringAIOCB
*luringcb
;
155 luringcb
= io_uring_cqe_get_data(cqes
);
157 io_uring_cqe_seen(&s
->ring
, cqes
);
160 /* Change counters one-by-one because we can be nested. */
162 trace_luring_process_completion(s
, luringcb
, ret
);
164 /* total_read is non-zero only for resubmitted read requests */
165 total_bytes
= ret
+ luringcb
->total_read
;
169 luring_resubmit(s
, luringcb
);
172 } else if (!luringcb
->qiov
) {
174 } else if (total_bytes
== luringcb
->qiov
->size
) {
176 /* Only read/write */
178 /* Short Read/Write */
179 if (luringcb
->is_read
) {
181 luring_resubmit_short_read(s
, luringcb
, ret
);
184 /* Pad with zeroes */
185 qemu_iovec_memset(luringcb
->qiov
, total_bytes
, 0,
186 luringcb
->qiov
->size
- total_bytes
);
195 qemu_iovec_destroy(&luringcb
->resubmit_qiov
);
198 * If the coroutine is already entered it must be in ioq_submit()
199 * and will notice luringcb->ret has been filled in when it
200 * eventually runs later. Coroutines cannot be entered recursively
201 * so avoid doing that!
203 if (!qemu_coroutine_entered(luringcb
->co
)) {
204 aio_co_wake(luringcb
->co
);
207 qemu_bh_cancel(s
->completion_bh
);
210 static int ioq_submit(LuringState
*s
)
213 LuringAIOCB
*luringcb
, *luringcb_next
;
215 while (s
->io_q
.in_queue
> 0) {
217 * Try to fetch sqes from the ring for requests waiting in
220 QSIMPLEQ_FOREACH_SAFE(luringcb
, &s
->io_q
.submit_queue
, next
,
222 struct io_uring_sqe
*sqes
= io_uring_get_sqe(&s
->ring
);
226 /* Prep sqe for submission */
227 *sqes
= luringcb
->sqeq
;
228 QSIMPLEQ_REMOVE_HEAD(&s
->io_q
.submit_queue
, next
);
230 ret
= io_uring_submit(&s
->ring
);
231 trace_luring_io_uring_submit(s
, ret
);
232 /* Prevent infinite loop if submission is refused */
234 if (ret
== -EAGAIN
|| ret
== -EINTR
) {
239 s
->io_q
.in_flight
+= ret
;
240 s
->io_q
.in_queue
-= ret
;
242 s
->io_q
.blocked
= (s
->io_q
.in_queue
> 0);
244 if (s
->io_q
.in_flight
) {
246 * We can try to complete something just right away if there are
247 * still requests in-flight.
249 luring_process_completions(s
);
254 static void luring_process_completions_and_submit(LuringState
*s
)
256 aio_context_acquire(s
->aio_context
);
257 luring_process_completions(s
);
259 if (!s
->io_q
.plugged
&& s
->io_q
.in_queue
> 0) {
262 aio_context_release(s
->aio_context
);
265 static void qemu_luring_completion_bh(void *opaque
)
267 LuringState
*s
= opaque
;
268 luring_process_completions_and_submit(s
);
271 static void qemu_luring_completion_cb(void *opaque
)
273 LuringState
*s
= opaque
;
274 luring_process_completions_and_submit(s
);
277 static bool qemu_luring_poll_cb(void *opaque
)
279 LuringState
*s
= opaque
;
281 if (io_uring_cq_ready(&s
->ring
)) {
282 luring_process_completions_and_submit(s
);
289 static void ioq_init(LuringQueue
*io_q
)
291 QSIMPLEQ_INIT(&io_q
->submit_queue
);
295 io_q
->blocked
= false;
298 void luring_io_plug(BlockDriverState
*bs
, LuringState
*s
)
300 trace_luring_io_plug(s
);
304 void luring_io_unplug(BlockDriverState
*bs
, LuringState
*s
)
306 assert(s
->io_q
.plugged
);
307 trace_luring_io_unplug(s
, s
->io_q
.blocked
, s
->io_q
.plugged
,
308 s
->io_q
.in_queue
, s
->io_q
.in_flight
);
309 if (--s
->io_q
.plugged
== 0 &&
310 !s
->io_q
.blocked
&& s
->io_q
.in_queue
> 0) {
317 * @fd: file descriptor for I/O
318 * @luringcb: AIO control block
320 * @offset: offset for request
321 * @type: type of request
323 * Fetches sqes from ring, adds to pending queue and preps them
326 static int luring_do_submit(int fd
, LuringAIOCB
*luringcb
, LuringState
*s
,
327 uint64_t offset
, int type
)
330 struct io_uring_sqe
*sqes
= &luringcb
->sqeq
;
334 io_uring_prep_writev(sqes
, fd
, luringcb
->qiov
->iov
,
335 luringcb
->qiov
->niov
, offset
);
338 io_uring_prep_readv(sqes
, fd
, luringcb
->qiov
->iov
,
339 luringcb
->qiov
->niov
, offset
);
342 io_uring_prep_fsync(sqes
, fd
, IORING_FSYNC_DATASYNC
);
345 fprintf(stderr
, "%s: invalid AIO request type, aborting 0x%x.\n",
349 io_uring_sqe_set_data(sqes
, luringcb
);
351 QSIMPLEQ_INSERT_TAIL(&s
->io_q
.submit_queue
, luringcb
, next
);
353 trace_luring_do_submit(s
, s
->io_q
.blocked
, s
->io_q
.plugged
,
354 s
->io_q
.in_queue
, s
->io_q
.in_flight
);
355 if (!s
->io_q
.blocked
&&
357 s
->io_q
.in_flight
+ s
->io_q
.in_queue
>= MAX_ENTRIES
)) {
359 trace_luring_do_submit_done(s
, ret
);
365 int coroutine_fn
luring_co_submit(BlockDriverState
*bs
, LuringState
*s
, int fd
,
366 uint64_t offset
, QEMUIOVector
*qiov
, int type
)
369 LuringAIOCB luringcb
= {
370 .co
= qemu_coroutine_self(),
373 .is_read
= (type
== QEMU_AIO_READ
),
375 trace_luring_co_submit(bs
, s
, &luringcb
, fd
, offset
, qiov
? qiov
->size
: 0,
377 ret
= luring_do_submit(fd
, &luringcb
, s
, offset
, type
);
383 if (luringcb
.ret
== -EINPROGRESS
) {
384 qemu_coroutine_yield();
389 void luring_detach_aio_context(LuringState
*s
, AioContext
*old_context
)
391 aio_set_fd_handler(old_context
, s
->ring
.ring_fd
, false, NULL
, NULL
, NULL
,
393 qemu_bh_delete(s
->completion_bh
);
394 s
->aio_context
= NULL
;
397 void luring_attach_aio_context(LuringState
*s
, AioContext
*new_context
)
399 s
->aio_context
= new_context
;
400 s
->completion_bh
= aio_bh_new(new_context
, qemu_luring_completion_bh
, s
);
401 aio_set_fd_handler(s
->aio_context
, s
->ring
.ring_fd
, false,
402 qemu_luring_completion_cb
, NULL
, qemu_luring_poll_cb
, s
);
405 LuringState
*luring_init(Error
**errp
)
408 LuringState
*s
= g_new0(LuringState
, 1);
409 struct io_uring
*ring
= &s
->ring
;
411 trace_luring_init_state(s
, sizeof(*s
));
413 rc
= io_uring_queue_init(MAX_ENTRIES
, ring
, 0);
415 error_setg_errno(errp
, errno
, "failed to init linux io_uring ring");
425 void luring_cleanup(LuringState
*s
)
427 io_uring_queue_exit(&s
->ring
);
428 trace_luring_cleanup_state(s
);