block: Be more verbose in create fallback
[qemu/kevin.git] / block / io_uring.c
blob69d9820928a95075057fe353b0c9414025ae6d72
1 /*
2 * Linux io_uring support.
4 * Copyright (C) 2009 IBM, Corp.
5 * Copyright (C) 2009 Red Hat, Inc.
6 * Copyright (C) 2019 Aarushi Mehta
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
9 * See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
12 #include <liburing.h>
13 #include "block/aio.h"
14 #include "qemu/queue.h"
15 #include "block/block.h"
16 #include "block/raw-aio.h"
17 #include "qemu/coroutine.h"
18 #include "qapi/error.h"
19 #include "sysemu/block-backend.h"
20 #include "trace.h"
22 /* Only used for assertions. */
23 #include "qemu/coroutine_int.h"
25 /* io_uring ring size */
26 #define MAX_ENTRIES 128
28 typedef struct LuringAIOCB {
29 Coroutine *co;
30 struct io_uring_sqe sqeq;
31 ssize_t ret;
32 QEMUIOVector *qiov;
33 bool is_read;
34 QSIMPLEQ_ENTRY(LuringAIOCB) next;
37 * Buffered reads may require resubmission, see
38 * luring_resubmit_short_read().
40 int total_read;
41 QEMUIOVector resubmit_qiov;
42 } LuringAIOCB;
44 typedef struct LuringQueue {
45 unsigned int in_queue;
46 unsigned int in_flight;
47 bool blocked;
48 QSIMPLEQ_HEAD(, LuringAIOCB) submit_queue;
49 } LuringQueue;
51 typedef struct LuringState {
52 AioContext *aio_context;
54 struct io_uring ring;
56 /* No locking required, only accessed from AioContext home thread */
57 LuringQueue io_q;
59 QEMUBH *completion_bh;
60 } LuringState;
62 /**
63 * luring_resubmit:
65 * Resubmit a request by appending it to submit_queue. The caller must ensure
66 * that ioq_submit() is called later so that submit_queue requests are started.
68 static void luring_resubmit(LuringState *s, LuringAIOCB *luringcb)
70 QSIMPLEQ_INSERT_TAIL(&s->io_q.submit_queue, luringcb, next);
71 s->io_q.in_queue++;
74 /**
75 * luring_resubmit_short_read:
77 * Short reads are rare but may occur. The remaining read request needs to be
78 * resubmitted.
80 static void luring_resubmit_short_read(LuringState *s, LuringAIOCB *luringcb,
81 int nread)
83 QEMUIOVector *resubmit_qiov;
84 size_t remaining;
86 trace_luring_resubmit_short_read(s, luringcb, nread);
88 /* Update read position */
89 luringcb->total_read += nread;
90 remaining = luringcb->qiov->size - luringcb->total_read;
92 /* Shorten qiov */
93 resubmit_qiov = &luringcb->resubmit_qiov;
94 if (resubmit_qiov->iov == NULL) {
95 qemu_iovec_init(resubmit_qiov, luringcb->qiov->niov);
96 } else {
97 qemu_iovec_reset(resubmit_qiov);
99 qemu_iovec_concat(resubmit_qiov, luringcb->qiov, luringcb->total_read,
100 remaining);
102 /* Update sqe */
103 luringcb->sqeq.off += nread;
104 luringcb->sqeq.addr = (__u64)(uintptr_t)luringcb->resubmit_qiov.iov;
105 luringcb->sqeq.len = luringcb->resubmit_qiov.niov;
107 luring_resubmit(s, luringcb);
111 * luring_process_completions:
112 * @s: AIO state
114 * Fetches completed I/O requests, consumes cqes and invokes their callbacks
115 * The function is somewhat tricky because it supports nested event loops, for
116 * example when a request callback invokes aio_poll().
118 * Function schedules BH completion so it can be called again in a nested
119 * event loop. When there are no events left to complete the BH is being
120 * canceled.
123 static void luring_process_completions(LuringState *s)
125 struct io_uring_cqe *cqes;
126 int total_bytes;
128 * Request completion callbacks can run the nested event loop.
129 * Schedule ourselves so the nested event loop will "see" remaining
130 * completed requests and process them. Without this, completion
131 * callbacks that wait for other requests using a nested event loop
132 * would hang forever.
134 * This workaround is needed because io_uring uses poll_wait, which
135 * is woken up when new events are added to the uring, thus polling on
136 * the same uring fd will block unless more events are received.
138 * Other leaf block drivers (drivers that access the data themselves)
139 * are networking based, so they poll sockets for data and run the
140 * correct coroutine.
142 qemu_bh_schedule(s->completion_bh);
144 while (io_uring_peek_cqe(&s->ring, &cqes) == 0) {
145 LuringAIOCB *luringcb;
146 int ret;
148 if (!cqes) {
149 break;
152 luringcb = io_uring_cqe_get_data(cqes);
153 ret = cqes->res;
154 io_uring_cqe_seen(&s->ring, cqes);
155 cqes = NULL;
157 /* Change counters one-by-one because we can be nested. */
158 s->io_q.in_flight--;
159 trace_luring_process_completion(s, luringcb, ret);
161 /* total_read is non-zero only for resubmitted read requests */
162 total_bytes = ret + luringcb->total_read;
164 if (ret < 0) {
166 * Only writev/readv/fsync requests on regular files or host block
167 * devices are submitted. Therefore -EAGAIN is not expected but it's
168 * known to happen sometimes with Linux SCSI. Submit again and hope
169 * the request completes successfully.
171 * For more information, see:
172 * https://lore.kernel.org/io-uring/20210727165811.284510-3-axboe@kernel.dk/T/#u
174 * If the code is changed to submit other types of requests in the
175 * future, then this workaround may need to be extended to deal with
176 * genuine -EAGAIN results that should not be resubmitted
177 * immediately.
179 if (ret == -EINTR || ret == -EAGAIN) {
180 luring_resubmit(s, luringcb);
181 continue;
183 } else if (!luringcb->qiov) {
184 goto end;
185 } else if (total_bytes == luringcb->qiov->size) {
186 ret = 0;
187 /* Only read/write */
188 } else {
189 /* Short Read/Write */
190 if (luringcb->is_read) {
191 if (ret > 0) {
192 luring_resubmit_short_read(s, luringcb, ret);
193 continue;
194 } else {
195 /* Pad with zeroes */
196 qemu_iovec_memset(luringcb->qiov, total_bytes, 0,
197 luringcb->qiov->size - total_bytes);
198 ret = 0;
200 } else {
201 ret = -ENOSPC;
204 end:
205 luringcb->ret = ret;
206 qemu_iovec_destroy(&luringcb->resubmit_qiov);
209 * If the coroutine is already entered it must be in ioq_submit()
210 * and will notice luringcb->ret has been filled in when it
211 * eventually runs later. Coroutines cannot be entered recursively
212 * so avoid doing that!
214 assert(luringcb->co->ctx == s->aio_context);
215 if (!qemu_coroutine_entered(luringcb->co)) {
216 aio_co_wake(luringcb->co);
219 qemu_bh_cancel(s->completion_bh);
222 static int ioq_submit(LuringState *s)
224 int ret = 0;
225 LuringAIOCB *luringcb, *luringcb_next;
227 while (s->io_q.in_queue > 0) {
229 * Try to fetch sqes from the ring for requests waiting in
230 * the overflow queue
232 QSIMPLEQ_FOREACH_SAFE(luringcb, &s->io_q.submit_queue, next,
233 luringcb_next) {
234 struct io_uring_sqe *sqes = io_uring_get_sqe(&s->ring);
235 if (!sqes) {
236 break;
238 /* Prep sqe for submission */
239 *sqes = luringcb->sqeq;
240 QSIMPLEQ_REMOVE_HEAD(&s->io_q.submit_queue, next);
242 ret = io_uring_submit(&s->ring);
243 trace_luring_io_uring_submit(s, ret);
244 /* Prevent infinite loop if submission is refused */
245 if (ret <= 0) {
246 if (ret == -EAGAIN || ret == -EINTR) {
247 continue;
249 break;
251 s->io_q.in_flight += ret;
252 s->io_q.in_queue -= ret;
254 s->io_q.blocked = (s->io_q.in_queue > 0);
256 if (s->io_q.in_flight) {
258 * We can try to complete something just right away if there are
259 * still requests in-flight.
261 luring_process_completions(s);
263 return ret;
266 static void luring_process_completions_and_submit(LuringState *s)
268 luring_process_completions(s);
270 if (s->io_q.in_queue > 0) {
271 ioq_submit(s);
275 static void qemu_luring_completion_bh(void *opaque)
277 LuringState *s = opaque;
278 luring_process_completions_and_submit(s);
281 static void qemu_luring_completion_cb(void *opaque)
283 LuringState *s = opaque;
284 luring_process_completions_and_submit(s);
287 static bool qemu_luring_poll_cb(void *opaque)
289 LuringState *s = opaque;
291 return io_uring_cq_ready(&s->ring);
294 static void qemu_luring_poll_ready(void *opaque)
296 LuringState *s = opaque;
298 luring_process_completions_and_submit(s);
301 static void ioq_init(LuringQueue *io_q)
303 QSIMPLEQ_INIT(&io_q->submit_queue);
304 io_q->in_queue = 0;
305 io_q->in_flight = 0;
306 io_q->blocked = false;
309 static void luring_unplug_fn(void *opaque)
311 LuringState *s = opaque;
312 trace_luring_unplug_fn(s, s->io_q.blocked, s->io_q.in_queue,
313 s->io_q.in_flight);
314 if (!s->io_q.blocked && s->io_q.in_queue > 0) {
315 ioq_submit(s);
320 * luring_do_submit:
321 * @fd: file descriptor for I/O
322 * @luringcb: AIO control block
323 * @s: AIO state
324 * @offset: offset for request
325 * @type: type of request
327 * Fetches sqes from ring, adds to pending queue and preps them
330 static int luring_do_submit(int fd, LuringAIOCB *luringcb, LuringState *s,
331 uint64_t offset, int type)
333 int ret;
334 struct io_uring_sqe *sqes = &luringcb->sqeq;
336 switch (type) {
337 case QEMU_AIO_WRITE:
338 io_uring_prep_writev(sqes, fd, luringcb->qiov->iov,
339 luringcb->qiov->niov, offset);
340 break;
341 case QEMU_AIO_ZONE_APPEND:
342 io_uring_prep_writev(sqes, fd, luringcb->qiov->iov,
343 luringcb->qiov->niov, offset);
344 break;
345 case QEMU_AIO_READ:
346 io_uring_prep_readv(sqes, fd, luringcb->qiov->iov,
347 luringcb->qiov->niov, offset);
348 break;
349 case QEMU_AIO_FLUSH:
350 io_uring_prep_fsync(sqes, fd, IORING_FSYNC_DATASYNC);
351 break;
352 default:
353 fprintf(stderr, "%s: invalid AIO request type, aborting 0x%x.\n",
354 __func__, type);
355 abort();
357 io_uring_sqe_set_data(sqes, luringcb);
359 QSIMPLEQ_INSERT_TAIL(&s->io_q.submit_queue, luringcb, next);
360 s->io_q.in_queue++;
361 trace_luring_do_submit(s, s->io_q.blocked, s->io_q.in_queue,
362 s->io_q.in_flight);
363 if (!s->io_q.blocked) {
364 if (s->io_q.in_flight + s->io_q.in_queue >= MAX_ENTRIES) {
365 ret = ioq_submit(s);
366 trace_luring_do_submit_done(s, ret);
367 return ret;
370 blk_io_plug_call(luring_unplug_fn, s);
372 return 0;
375 int coroutine_fn luring_co_submit(BlockDriverState *bs, int fd, uint64_t offset,
376 QEMUIOVector *qiov, int type)
378 int ret;
379 AioContext *ctx = qemu_get_current_aio_context();
380 LuringState *s = aio_get_linux_io_uring(ctx);
381 LuringAIOCB luringcb = {
382 .co = qemu_coroutine_self(),
383 .ret = -EINPROGRESS,
384 .qiov = qiov,
385 .is_read = (type == QEMU_AIO_READ),
387 trace_luring_co_submit(bs, s, &luringcb, fd, offset, qiov ? qiov->size : 0,
388 type);
389 ret = luring_do_submit(fd, &luringcb, s, offset, type);
391 if (ret < 0) {
392 return ret;
395 if (luringcb.ret == -EINPROGRESS) {
396 qemu_coroutine_yield();
398 return luringcb.ret;
401 void luring_detach_aio_context(LuringState *s, AioContext *old_context)
403 aio_set_fd_handler(old_context, s->ring.ring_fd,
404 NULL, NULL, NULL, NULL, s);
405 qemu_bh_delete(s->completion_bh);
406 s->aio_context = NULL;
409 void luring_attach_aio_context(LuringState *s, AioContext *new_context)
411 s->aio_context = new_context;
412 s->completion_bh = aio_bh_new(new_context, qemu_luring_completion_bh, s);
413 aio_set_fd_handler(s->aio_context, s->ring.ring_fd,
414 qemu_luring_completion_cb, NULL,
415 qemu_luring_poll_cb, qemu_luring_poll_ready, s);
418 LuringState *luring_init(Error **errp)
420 int rc;
421 LuringState *s = g_new0(LuringState, 1);
422 struct io_uring *ring = &s->ring;
424 trace_luring_init_state(s, sizeof(*s));
426 rc = io_uring_queue_init(MAX_ENTRIES, ring, 0);
427 if (rc < 0) {
428 error_setg_errno(errp, errno, "failed to init linux io_uring ring");
429 g_free(s);
430 return NULL;
433 ioq_init(&s->io_q);
434 return s;
438 void luring_cleanup(LuringState *s)
440 io_uring_queue_exit(&s->ring);
441 trace_luring_cleanup_state(s);
442 g_free(s);