os-posix: asynchronous teardown for shutdown on Linux
[qemu.git] / block / blkio.c
blob82f26eedd29bbd63875bea2661115d90d7049753
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2 /*
3 * libblkio BlockDriver
5 * Copyright Red Hat, Inc.
7 * Author:
8 * Stefan Hajnoczi <stefanha@redhat.com>
9 */
11 #include "qemu/osdep.h"
12 #include <blkio.h>
13 #include "block/block_int.h"
14 #include "exec/memory.h"
15 #include "exec/cpu-common.h" /* for qemu_ram_get_fd() */
16 #include "qapi/error.h"
17 #include "qemu/error-report.h"
18 #include "qapi/qmp/qdict.h"
19 #include "qemu/module.h"
20 #include "exec/memory.h" /* for ram_block_discard_disable() */
23 * Keep the QEMU BlockDriver names identical to the libblkio driver names.
24 * Using macros instead of typing out the string literals avoids typos.
26 #define DRIVER_IO_URING "io_uring"
27 #define DRIVER_NVME_IO_URING "nvme-io_uring"
28 #define DRIVER_VIRTIO_BLK_VHOST_USER "virtio-blk-vhost-user"
29 #define DRIVER_VIRTIO_BLK_VHOST_VDPA "virtio-blk-vhost-vdpa"
32 * Allocated bounce buffers are kept in a list sorted by buffer address.
34 typedef struct BlkioBounceBuf {
35 QLIST_ENTRY(BlkioBounceBuf) next;
37 /* The bounce buffer */
38 struct iovec buf;
39 } BlkioBounceBuf;
41 typedef struct {
43 * libblkio is not thread-safe so this lock protects ->blkio and
44 * ->blkioq.
46 QemuMutex blkio_lock;
47 struct blkio *blkio;
48 struct blkioq *blkioq; /* make this multi-queue in the future... */
49 int completion_fd;
52 * Polling fetches the next completion into this field.
54 * No lock is necessary since only one thread calls aio_poll() and invokes
55 * fd and poll handlers.
57 struct blkio_completion poll_completion;
60 * Protects ->bounce_pool, ->bounce_bufs, ->bounce_available.
62 * Lock ordering: ->bounce_lock before ->blkio_lock.
64 CoMutex bounce_lock;
66 /* Bounce buffer pool */
67 struct blkio_mem_region bounce_pool;
69 /* Sorted list of allocated bounce buffers */
70 QLIST_HEAD(, BlkioBounceBuf) bounce_bufs;
72 /* Queue for coroutines waiting for bounce buffer space */
73 CoQueue bounce_available;
75 /* The value of the "mem-region-alignment" property */
76 size_t mem_region_alignment;
78 /* Can we skip adding/deleting blkio_mem_regions? */
79 bool needs_mem_regions;
81 /* Are file descriptors necessary for blkio_mem_regions? */
82 bool needs_mem_region_fd;
84 /* Are madvise(MADV_DONTNEED)-style operations unavailable? */
85 bool may_pin_mem_regions;
86 } BDRVBlkioState;
88 /* Called with s->bounce_lock held */
89 static int blkio_resize_bounce_pool(BDRVBlkioState *s, int64_t bytes)
91 /* There can be no allocated bounce buffers during resize */
92 assert(QLIST_EMPTY(&s->bounce_bufs));
94 /* Pad size to reduce frequency of resize calls */
95 bytes += 128 * 1024;
97 WITH_QEMU_LOCK_GUARD(&s->blkio_lock) {
98 int ret;
100 if (s->bounce_pool.addr) {
101 blkio_unmap_mem_region(s->blkio, &s->bounce_pool);
102 blkio_free_mem_region(s->blkio, &s->bounce_pool);
103 memset(&s->bounce_pool, 0, sizeof(s->bounce_pool));
106 /* Automatically freed when s->blkio is destroyed */
107 ret = blkio_alloc_mem_region(s->blkio, &s->bounce_pool, bytes);
108 if (ret < 0) {
109 return ret;
112 ret = blkio_map_mem_region(s->blkio, &s->bounce_pool);
113 if (ret < 0) {
114 blkio_free_mem_region(s->blkio, &s->bounce_pool);
115 memset(&s->bounce_pool, 0, sizeof(s->bounce_pool));
116 return ret;
120 return 0;
123 /* Called with s->bounce_lock held */
124 static bool
125 blkio_do_alloc_bounce_buffer(BDRVBlkioState *s, BlkioBounceBuf *bounce,
126 int64_t bytes)
128 void *addr = s->bounce_pool.addr;
129 BlkioBounceBuf *cur = NULL;
130 BlkioBounceBuf *prev = NULL;
131 ptrdiff_t space;
134 * This is just a linear search over the holes between requests. An
135 * efficient allocator would be nice.
137 QLIST_FOREACH(cur, &s->bounce_bufs, next) {
138 space = cur->buf.iov_base - addr;
139 if (bytes <= space) {
140 QLIST_INSERT_BEFORE(cur, bounce, next);
141 bounce->buf.iov_base = addr;
142 bounce->buf.iov_len = bytes;
143 return true;
146 addr = cur->buf.iov_base + cur->buf.iov_len;
147 prev = cur;
150 /* Is there space after the last request? */
151 space = s->bounce_pool.addr + s->bounce_pool.len - addr;
152 if (bytes > space) {
153 return false;
155 if (prev) {
156 QLIST_INSERT_AFTER(prev, bounce, next);
157 } else {
158 QLIST_INSERT_HEAD(&s->bounce_bufs, bounce, next);
160 bounce->buf.iov_base = addr;
161 bounce->buf.iov_len = bytes;
162 return true;
165 static int coroutine_fn
166 blkio_alloc_bounce_buffer(BDRVBlkioState *s, BlkioBounceBuf *bounce,
167 int64_t bytes)
170 * Ensure fairness: first time around we join the back of the queue,
171 * subsequently we join the front so we don't lose our place.
173 CoQueueWaitFlags wait_flags = 0;
175 QEMU_LOCK_GUARD(&s->bounce_lock);
177 /* Ensure fairness: don't even try if other requests are already waiting */
178 if (!qemu_co_queue_empty(&s->bounce_available)) {
179 qemu_co_queue_wait_flags(&s->bounce_available, &s->bounce_lock,
180 wait_flags);
181 wait_flags = CO_QUEUE_WAIT_FRONT;
184 while (true) {
185 if (blkio_do_alloc_bounce_buffer(s, bounce, bytes)) {
186 /* Kick the next queued request since there may be space */
187 qemu_co_queue_next(&s->bounce_available);
188 return 0;
192 * If there are no in-flight requests then the pool was simply too
193 * small.
195 if (QLIST_EMPTY(&s->bounce_bufs)) {
196 bool ok;
197 int ret;
199 ret = blkio_resize_bounce_pool(s, bytes);
200 if (ret < 0) {
201 /* Kick the next queued request since that may fail too */
202 qemu_co_queue_next(&s->bounce_available);
203 return ret;
206 ok = blkio_do_alloc_bounce_buffer(s, bounce, bytes);
207 assert(ok); /* must have space this time */
208 return 0;
211 qemu_co_queue_wait_flags(&s->bounce_available, &s->bounce_lock,
212 wait_flags);
213 wait_flags = CO_QUEUE_WAIT_FRONT;
217 static void coroutine_fn blkio_free_bounce_buffer(BDRVBlkioState *s,
218 BlkioBounceBuf *bounce)
220 QEMU_LOCK_GUARD(&s->bounce_lock);
222 QLIST_REMOVE(bounce, next);
224 /* Wake up waiting coroutines since space may now be available */
225 qemu_co_queue_next(&s->bounce_available);
228 /* For async to .bdrv_co_*() conversion */
229 typedef struct {
230 Coroutine *coroutine;
231 int ret;
232 } BlkioCoData;
234 static void blkio_completion_fd_read(void *opaque)
236 BlockDriverState *bs = opaque;
237 BDRVBlkioState *s = bs->opaque;
238 uint64_t val;
239 int ret;
241 /* Polling may have already fetched a completion */
242 if (s->poll_completion.user_data != NULL) {
243 BlkioCoData *cod = s->poll_completion.user_data;
244 cod->ret = s->poll_completion.ret;
246 /* Clear it in case aio_co_wake() enters a nested event loop */
247 s->poll_completion.user_data = NULL;
249 aio_co_wake(cod->coroutine);
252 /* Reset completion fd status */
253 ret = read(s->completion_fd, &val, sizeof(val));
255 /* Ignore errors, there's nothing we can do */
256 (void)ret;
259 * Reading one completion at a time makes nested event loop re-entrancy
260 * simple. Change this loop to get multiple completions in one go if it
261 * becomes a performance bottleneck.
263 while (true) {
264 struct blkio_completion completion;
266 WITH_QEMU_LOCK_GUARD(&s->blkio_lock) {
267 ret = blkioq_do_io(s->blkioq, &completion, 0, 1, NULL);
269 if (ret != 1) {
270 break;
273 BlkioCoData *cod = completion.user_data;
274 cod->ret = completion.ret;
275 aio_co_wake(cod->coroutine);
279 static bool blkio_completion_fd_poll(void *opaque)
281 BlockDriverState *bs = opaque;
282 BDRVBlkioState *s = bs->opaque;
283 int ret;
285 /* Just in case we already fetched a completion */
286 if (s->poll_completion.user_data != NULL) {
287 return true;
290 WITH_QEMU_LOCK_GUARD(&s->blkio_lock) {
291 ret = blkioq_do_io(s->blkioq, &s->poll_completion, 0, 1, NULL);
293 return ret == 1;
296 static void blkio_completion_fd_poll_ready(void *opaque)
298 blkio_completion_fd_read(opaque);
301 static void blkio_attach_aio_context(BlockDriverState *bs,
302 AioContext *new_context)
304 BDRVBlkioState *s = bs->opaque;
306 aio_set_fd_handler(new_context,
307 s->completion_fd,
308 false,
309 blkio_completion_fd_read,
310 NULL,
311 blkio_completion_fd_poll,
312 blkio_completion_fd_poll_ready,
313 bs);
316 static void blkio_detach_aio_context(BlockDriverState *bs)
318 BDRVBlkioState *s = bs->opaque;
320 aio_set_fd_handler(bdrv_get_aio_context(bs),
321 s->completion_fd,
322 false, NULL, NULL, NULL, NULL, NULL);
325 /* Call with s->blkio_lock held to submit I/O after enqueuing a new request */
326 static void blkio_submit_io(BlockDriverState *bs)
328 if (qatomic_read(&bs->io_plugged) == 0) {
329 BDRVBlkioState *s = bs->opaque;
331 blkioq_do_io(s->blkioq, NULL, 0, 0, NULL);
335 static int coroutine_fn
336 blkio_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
338 BDRVBlkioState *s = bs->opaque;
339 BlkioCoData cod = {
340 .coroutine = qemu_coroutine_self(),
343 WITH_QEMU_LOCK_GUARD(&s->blkio_lock) {
344 blkioq_discard(s->blkioq, offset, bytes, &cod, 0);
345 blkio_submit_io(bs);
348 qemu_coroutine_yield();
349 return cod.ret;
352 static int coroutine_fn
353 blkio_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
354 QEMUIOVector *qiov, BdrvRequestFlags flags)
356 BlkioCoData cod = {
357 .coroutine = qemu_coroutine_self(),
359 BDRVBlkioState *s = bs->opaque;
360 bool use_bounce_buffer =
361 s->needs_mem_regions && !(flags & BDRV_REQ_REGISTERED_BUF);
362 BlkioBounceBuf bounce;
363 struct iovec *iov = qiov->iov;
364 int iovcnt = qiov->niov;
366 if (use_bounce_buffer) {
367 int ret = blkio_alloc_bounce_buffer(s, &bounce, bytes);
368 if (ret < 0) {
369 return ret;
372 iov = &bounce.buf;
373 iovcnt = 1;
376 WITH_QEMU_LOCK_GUARD(&s->blkio_lock) {
377 blkioq_readv(s->blkioq, offset, iov, iovcnt, &cod, 0);
378 blkio_submit_io(bs);
381 qemu_coroutine_yield();
383 if (use_bounce_buffer) {
384 if (cod.ret == 0) {
385 qemu_iovec_from_buf(qiov, 0,
386 bounce.buf.iov_base,
387 bounce.buf.iov_len);
390 blkio_free_bounce_buffer(s, &bounce);
393 return cod.ret;
396 static int coroutine_fn blkio_co_pwritev(BlockDriverState *bs, int64_t offset,
397 int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags)
399 uint32_t blkio_flags = (flags & BDRV_REQ_FUA) ? BLKIO_REQ_FUA : 0;
400 BlkioCoData cod = {
401 .coroutine = qemu_coroutine_self(),
403 BDRVBlkioState *s = bs->opaque;
404 bool use_bounce_buffer =
405 s->needs_mem_regions && !(flags & BDRV_REQ_REGISTERED_BUF);
406 BlkioBounceBuf bounce;
407 struct iovec *iov = qiov->iov;
408 int iovcnt = qiov->niov;
410 if (use_bounce_buffer) {
411 int ret = blkio_alloc_bounce_buffer(s, &bounce, bytes);
412 if (ret < 0) {
413 return ret;
416 qemu_iovec_to_buf(qiov, 0, bounce.buf.iov_base, bytes);
417 iov = &bounce.buf;
418 iovcnt = 1;
421 WITH_QEMU_LOCK_GUARD(&s->blkio_lock) {
422 blkioq_writev(s->blkioq, offset, iov, iovcnt, &cod, blkio_flags);
423 blkio_submit_io(bs);
426 qemu_coroutine_yield();
428 if (use_bounce_buffer) {
429 blkio_free_bounce_buffer(s, &bounce);
432 return cod.ret;
435 static int coroutine_fn blkio_co_flush(BlockDriverState *bs)
437 BDRVBlkioState *s = bs->opaque;
438 BlkioCoData cod = {
439 .coroutine = qemu_coroutine_self(),
442 WITH_QEMU_LOCK_GUARD(&s->blkio_lock) {
443 blkioq_flush(s->blkioq, &cod, 0);
444 blkio_submit_io(bs);
447 qemu_coroutine_yield();
448 return cod.ret;
451 static int coroutine_fn blkio_co_pwrite_zeroes(BlockDriverState *bs,
452 int64_t offset, int64_t bytes, BdrvRequestFlags flags)
454 BDRVBlkioState *s = bs->opaque;
455 BlkioCoData cod = {
456 .coroutine = qemu_coroutine_self(),
458 uint32_t blkio_flags = 0;
460 if (flags & BDRV_REQ_FUA) {
461 blkio_flags |= BLKIO_REQ_FUA;
463 if (!(flags & BDRV_REQ_MAY_UNMAP)) {
464 blkio_flags |= BLKIO_REQ_NO_UNMAP;
466 if (flags & BDRV_REQ_NO_FALLBACK) {
467 blkio_flags |= BLKIO_REQ_NO_FALLBACK;
470 WITH_QEMU_LOCK_GUARD(&s->blkio_lock) {
471 blkioq_write_zeroes(s->blkioq, offset, bytes, &cod, blkio_flags);
472 blkio_submit_io(bs);
475 qemu_coroutine_yield();
476 return cod.ret;
479 static void blkio_io_unplug(BlockDriverState *bs)
481 BDRVBlkioState *s = bs->opaque;
483 WITH_QEMU_LOCK_GUARD(&s->blkio_lock) {
484 blkio_submit_io(bs);
488 typedef enum {
489 BMRR_OK,
490 BMRR_SKIP,
491 BMRR_FAIL,
492 } BlkioMemRegionResult;
495 * Produce a struct blkio_mem_region for a given address and size.
497 * This function produces identical results when called multiple times with the
498 * same arguments. This property is necessary because blkio_unmap_mem_region()
499 * must receive the same struct blkio_mem_region field values that were passed
500 * to blkio_map_mem_region().
502 static BlkioMemRegionResult
503 blkio_mem_region_from_host(BlockDriverState *bs,
504 void *host, size_t size,
505 struct blkio_mem_region *region,
506 Error **errp)
508 BDRVBlkioState *s = bs->opaque;
509 int fd = -1;
510 ram_addr_t fd_offset = 0;
512 if (((uintptr_t)host | size) % s->mem_region_alignment) {
513 error_setg(errp, "unaligned buf %p with size %zu", host, size);
514 return BMRR_FAIL;
517 /* Attempt to find the fd for the underlying memory */
518 if (s->needs_mem_region_fd) {
519 RAMBlock *ram_block;
520 RAMBlock *end_block;
521 ram_addr_t offset;
524 * bdrv_register_buf() is called with the BQL held so mr lives at least
525 * until this function returns.
527 ram_block = qemu_ram_block_from_host(host, false, &fd_offset);
528 if (ram_block) {
529 fd = qemu_ram_get_fd(ram_block);
531 if (fd == -1) {
533 * Ideally every RAMBlock would have an fd. pc-bios and other
534 * things don't. Luckily they are usually not I/O buffers and we
535 * can just ignore them.
537 return BMRR_SKIP;
540 /* Make sure the fd covers the entire range */
541 end_block = qemu_ram_block_from_host(host + size - 1, false, &offset);
542 if (ram_block != end_block) {
543 error_setg(errp, "registered buffer at %p with size %zu extends "
544 "beyond RAMBlock", host, size);
545 return BMRR_FAIL;
549 *region = (struct blkio_mem_region){
550 .addr = host,
551 .len = size,
552 .fd = fd,
553 .fd_offset = fd_offset,
555 return BMRR_OK;
558 static bool blkio_register_buf(BlockDriverState *bs, void *host, size_t size,
559 Error **errp)
561 BDRVBlkioState *s = bs->opaque;
562 struct blkio_mem_region region;
563 BlkioMemRegionResult region_result;
564 int ret;
567 * Mapping memory regions conflicts with RAM discard (virtio-mem) when
568 * there is pinning, so only do it when necessary.
570 if (!s->needs_mem_regions && s->may_pin_mem_regions) {
571 return true;
574 region_result = blkio_mem_region_from_host(bs, host, size, &region, errp);
575 if (region_result == BMRR_SKIP) {
576 return true;
577 } else if (region_result != BMRR_OK) {
578 return false;
581 WITH_QEMU_LOCK_GUARD(&s->blkio_lock) {
582 ret = blkio_map_mem_region(s->blkio, &region);
585 if (ret < 0) {
586 error_setg(errp, "Failed to add blkio mem region %p with size %zu: %s",
587 host, size, blkio_get_error_msg());
588 return false;
590 return true;
593 static void blkio_unregister_buf(BlockDriverState *bs, void *host, size_t size)
595 BDRVBlkioState *s = bs->opaque;
596 struct blkio_mem_region region;
598 /* See blkio_register_buf() */
599 if (!s->needs_mem_regions && s->may_pin_mem_regions) {
600 return;
603 if (blkio_mem_region_from_host(bs, host, size, &region, NULL) != BMRR_OK) {
604 return;
607 WITH_QEMU_LOCK_GUARD(&s->blkio_lock) {
608 blkio_unmap_mem_region(s->blkio, &region);
612 static int blkio_io_uring_open(BlockDriverState *bs, QDict *options, int flags,
613 Error **errp)
615 const char *filename = qdict_get_str(options, "filename");
616 BDRVBlkioState *s = bs->opaque;
617 int ret;
619 ret = blkio_set_str(s->blkio, "path", filename);
620 qdict_del(options, "filename");
621 if (ret < 0) {
622 error_setg_errno(errp, -ret, "failed to set path: %s",
623 blkio_get_error_msg());
624 return ret;
627 if (flags & BDRV_O_NOCACHE) {
628 ret = blkio_set_bool(s->blkio, "direct", true);
629 if (ret < 0) {
630 error_setg_errno(errp, -ret, "failed to set direct: %s",
631 blkio_get_error_msg());
632 return ret;
636 return 0;
639 static int blkio_nvme_io_uring(BlockDriverState *bs, QDict *options, int flags,
640 Error **errp)
642 const char *filename = qdict_get_str(options, "filename");
643 BDRVBlkioState *s = bs->opaque;
644 int ret;
646 ret = blkio_set_str(s->blkio, "path", filename);
647 qdict_del(options, "filename");
648 if (ret < 0) {
649 error_setg_errno(errp, -ret, "failed to set path: %s",
650 blkio_get_error_msg());
651 return ret;
654 if (!(flags & BDRV_O_NOCACHE)) {
655 error_setg(errp, "cache.direct=off is not supported");
656 return -EINVAL;
659 return 0;
662 static int blkio_virtio_blk_common_open(BlockDriverState *bs,
663 QDict *options, int flags, Error **errp)
665 const char *path = qdict_get_try_str(options, "path");
666 BDRVBlkioState *s = bs->opaque;
667 int ret;
669 if (!path) {
670 error_setg(errp, "missing 'path' option");
671 return -EINVAL;
674 ret = blkio_set_str(s->blkio, "path", path);
675 qdict_del(options, "path");
676 if (ret < 0) {
677 error_setg_errno(errp, -ret, "failed to set path: %s",
678 blkio_get_error_msg());
679 return ret;
682 if (!(flags & BDRV_O_NOCACHE)) {
683 error_setg(errp, "cache.direct=off is not supported");
684 return -EINVAL;
686 return 0;
689 static int blkio_file_open(BlockDriverState *bs, QDict *options, int flags,
690 Error **errp)
692 const char *blkio_driver = bs->drv->protocol_name;
693 BDRVBlkioState *s = bs->opaque;
694 int ret;
696 ret = blkio_create(blkio_driver, &s->blkio);
697 if (ret < 0) {
698 error_setg_errno(errp, -ret, "blkio_create failed: %s",
699 blkio_get_error_msg());
700 return ret;
703 if (strcmp(blkio_driver, DRIVER_IO_URING) == 0) {
704 ret = blkio_io_uring_open(bs, options, flags, errp);
705 } else if (strcmp(blkio_driver, DRIVER_NVME_IO_URING) == 0) {
706 ret = blkio_nvme_io_uring(bs, options, flags, errp);
707 } else if (strcmp(blkio_driver, DRIVER_VIRTIO_BLK_VHOST_USER) == 0) {
708 ret = blkio_virtio_blk_common_open(bs, options, flags, errp);
709 } else if (strcmp(blkio_driver, DRIVER_VIRTIO_BLK_VHOST_VDPA) == 0) {
710 ret = blkio_virtio_blk_common_open(bs, options, flags, errp);
711 } else {
712 g_assert_not_reached();
714 if (ret < 0) {
715 blkio_destroy(&s->blkio);
716 return ret;
719 if (!(flags & BDRV_O_RDWR)) {
720 ret = blkio_set_bool(s->blkio, "read-only", true);
721 if (ret < 0) {
722 error_setg_errno(errp, -ret, "failed to set read-only: %s",
723 blkio_get_error_msg());
724 blkio_destroy(&s->blkio);
725 return ret;
729 ret = blkio_connect(s->blkio);
730 if (ret < 0) {
731 error_setg_errno(errp, -ret, "blkio_connect failed: %s",
732 blkio_get_error_msg());
733 blkio_destroy(&s->blkio);
734 return ret;
737 ret = blkio_get_bool(s->blkio,
738 "needs-mem-regions",
739 &s->needs_mem_regions);
740 if (ret < 0) {
741 error_setg_errno(errp, -ret,
742 "failed to get needs-mem-regions: %s",
743 blkio_get_error_msg());
744 blkio_destroy(&s->blkio);
745 return ret;
748 ret = blkio_get_bool(s->blkio,
749 "needs-mem-region-fd",
750 &s->needs_mem_region_fd);
751 if (ret < 0) {
752 error_setg_errno(errp, -ret,
753 "failed to get needs-mem-region-fd: %s",
754 blkio_get_error_msg());
755 blkio_destroy(&s->blkio);
756 return ret;
759 ret = blkio_get_uint64(s->blkio,
760 "mem-region-alignment",
761 &s->mem_region_alignment);
762 if (ret < 0) {
763 error_setg_errno(errp, -ret,
764 "failed to get mem-region-alignment: %s",
765 blkio_get_error_msg());
766 blkio_destroy(&s->blkio);
767 return ret;
770 ret = blkio_get_bool(s->blkio,
771 "may-pin-mem-regions",
772 &s->may_pin_mem_regions);
773 if (ret < 0) {
774 /* Be conservative (assume pinning) if the property is not supported */
775 s->may_pin_mem_regions = s->needs_mem_regions;
779 * Notify if libblkio drivers pin memory and prevent features like
780 * virtio-mem from working.
782 if (s->may_pin_mem_regions) {
783 ret = ram_block_discard_disable(true);
784 if (ret < 0) {
785 error_setg_errno(errp, -ret, "ram_block_discard_disable() failed");
786 blkio_destroy(&s->blkio);
787 return ret;
791 ret = blkio_start(s->blkio);
792 if (ret < 0) {
793 error_setg_errno(errp, -ret, "blkio_start failed: %s",
794 blkio_get_error_msg());
795 blkio_destroy(&s->blkio);
796 if (s->may_pin_mem_regions) {
797 ram_block_discard_disable(false);
799 return ret;
802 bs->supported_write_flags = BDRV_REQ_FUA | BDRV_REQ_REGISTERED_BUF;
803 bs->supported_zero_flags = BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP |
804 BDRV_REQ_NO_FALLBACK;
806 qemu_mutex_init(&s->blkio_lock);
807 qemu_co_mutex_init(&s->bounce_lock);
808 qemu_co_queue_init(&s->bounce_available);
809 QLIST_INIT(&s->bounce_bufs);
810 s->blkioq = blkio_get_queue(s->blkio, 0);
811 s->completion_fd = blkioq_get_completion_fd(s->blkioq);
813 blkio_attach_aio_context(bs, bdrv_get_aio_context(bs));
814 return 0;
817 static void blkio_close(BlockDriverState *bs)
819 BDRVBlkioState *s = bs->opaque;
821 /* There is no destroy() API for s->bounce_lock */
823 qemu_mutex_destroy(&s->blkio_lock);
824 blkio_detach_aio_context(bs);
825 blkio_destroy(&s->blkio);
827 if (s->may_pin_mem_regions) {
828 ram_block_discard_disable(false);
832 static int64_t blkio_getlength(BlockDriverState *bs)
834 BDRVBlkioState *s = bs->opaque;
835 uint64_t capacity;
836 int ret;
838 WITH_QEMU_LOCK_GUARD(&s->blkio_lock) {
839 ret = blkio_get_uint64(s->blkio, "capacity", &capacity);
841 if (ret < 0) {
842 return -ret;
845 return capacity;
848 static int blkio_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
850 return 0;
853 static void blkio_refresh_limits(BlockDriverState *bs, Error **errp)
855 BDRVBlkioState *s = bs->opaque;
856 QEMU_LOCK_GUARD(&s->blkio_lock);
857 int value;
858 int ret;
860 ret = blkio_get_int(s->blkio, "request-alignment", &value);
861 if (ret < 0) {
862 error_setg_errno(errp, -ret, "failed to get \"request-alignment\": %s",
863 blkio_get_error_msg());
864 return;
866 bs->bl.request_alignment = value;
867 if (bs->bl.request_alignment < 1 ||
868 bs->bl.request_alignment >= INT_MAX ||
869 !is_power_of_2(bs->bl.request_alignment)) {
870 error_setg(errp, "invalid \"request-alignment\" value %" PRIu32 ", "
871 "must be a power of 2 less than INT_MAX",
872 bs->bl.request_alignment);
873 return;
876 ret = blkio_get_int(s->blkio, "optimal-io-size", &value);
877 if (ret < 0) {
878 error_setg_errno(errp, -ret, "failed to get \"optimal-io-size\": %s",
879 blkio_get_error_msg());
880 return;
882 bs->bl.opt_transfer = value;
883 if (bs->bl.opt_transfer > INT_MAX ||
884 (bs->bl.opt_transfer % bs->bl.request_alignment)) {
885 error_setg(errp, "invalid \"optimal-io-size\" value %" PRIu32 ", must "
886 "be a multiple of %" PRIu32, bs->bl.opt_transfer,
887 bs->bl.request_alignment);
888 return;
891 ret = blkio_get_int(s->blkio, "max-transfer", &value);
892 if (ret < 0) {
893 error_setg_errno(errp, -ret, "failed to get \"max-transfer\": %s",
894 blkio_get_error_msg());
895 return;
897 bs->bl.max_transfer = value;
898 if ((bs->bl.max_transfer % bs->bl.request_alignment) ||
899 (bs->bl.opt_transfer && (bs->bl.max_transfer % bs->bl.opt_transfer))) {
900 error_setg(errp, "invalid \"max-transfer\" value %" PRIu32 ", must be "
901 "a multiple of %" PRIu32 " and %" PRIu32 " (if non-zero)",
902 bs->bl.max_transfer, bs->bl.request_alignment,
903 bs->bl.opt_transfer);
904 return;
907 ret = blkio_get_int(s->blkio, "buf-alignment", &value);
908 if (ret < 0) {
909 error_setg_errno(errp, -ret, "failed to get \"buf-alignment\": %s",
910 blkio_get_error_msg());
911 return;
913 if (value < 1) {
914 error_setg(errp, "invalid \"buf-alignment\" value %d, must be "
915 "positive", value);
916 return;
918 bs->bl.min_mem_alignment = value;
920 ret = blkio_get_int(s->blkio, "optimal-buf-alignment", &value);
921 if (ret < 0) {
922 error_setg_errno(errp, -ret,
923 "failed to get \"optimal-buf-alignment\": %s",
924 blkio_get_error_msg());
925 return;
927 if (value < 1) {
928 error_setg(errp, "invalid \"optimal-buf-alignment\" value %d, "
929 "must be positive", value);
930 return;
932 bs->bl.opt_mem_alignment = value;
934 ret = blkio_get_int(s->blkio, "max-segments", &value);
935 if (ret < 0) {
936 error_setg_errno(errp, -ret, "failed to get \"max-segments\": %s",
937 blkio_get_error_msg());
938 return;
940 if (value < 1) {
941 error_setg(errp, "invalid \"max-segments\" value %d, must be positive",
942 value);
943 return;
945 bs->bl.max_iov = value;
949 * TODO
950 * Missing libblkio APIs:
951 * - block_status
952 * - co_invalidate_cache
954 * Out of scope?
955 * - create
956 * - truncate
959 #define BLKIO_DRIVER(name, ...) \
961 .format_name = name, \
962 .protocol_name = name, \
963 .instance_size = sizeof(BDRVBlkioState), \
964 .bdrv_file_open = blkio_file_open, \
965 .bdrv_close = blkio_close, \
966 .bdrv_getlength = blkio_getlength, \
967 .bdrv_get_info = blkio_get_info, \
968 .bdrv_attach_aio_context = blkio_attach_aio_context, \
969 .bdrv_detach_aio_context = blkio_detach_aio_context, \
970 .bdrv_co_pdiscard = blkio_co_pdiscard, \
971 .bdrv_co_preadv = blkio_co_preadv, \
972 .bdrv_co_pwritev = blkio_co_pwritev, \
973 .bdrv_co_flush_to_disk = blkio_co_flush, \
974 .bdrv_co_pwrite_zeroes = blkio_co_pwrite_zeroes, \
975 .bdrv_io_unplug = blkio_io_unplug, \
976 .bdrv_refresh_limits = blkio_refresh_limits, \
977 .bdrv_register_buf = blkio_register_buf, \
978 .bdrv_unregister_buf = blkio_unregister_buf, \
979 __VA_ARGS__ \
982 static BlockDriver bdrv_io_uring = BLKIO_DRIVER(
983 DRIVER_IO_URING,
984 .bdrv_needs_filename = true,
987 static BlockDriver bdrv_nvme_io_uring = BLKIO_DRIVER(
988 DRIVER_NVME_IO_URING,
989 .bdrv_needs_filename = true,
992 static BlockDriver bdrv_virtio_blk_vhost_user = BLKIO_DRIVER(
993 DRIVER_VIRTIO_BLK_VHOST_USER
996 static BlockDriver bdrv_virtio_blk_vhost_vdpa = BLKIO_DRIVER(
997 DRIVER_VIRTIO_BLK_VHOST_VDPA
1000 static void bdrv_blkio_init(void)
1002 bdrv_register(&bdrv_io_uring);
1003 bdrv_register(&bdrv_nvme_io_uring);
1004 bdrv_register(&bdrv_virtio_blk_vhost_user);
1005 bdrv_register(&bdrv_virtio_blk_vhost_vdpa);
1008 block_init(bdrv_blkio_init);