2 * Block driver for RAW files (posix)
4 * Copyright (c) 2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
26 #include "qapi/error.h"
27 #include "qemu/cutils.h"
28 #include "qemu/error-report.h"
29 #include "block/block-io.h"
30 #include "block/block_int.h"
31 #include "qemu/module.h"
32 #include "qemu/option.h"
33 #include "qemu/units.h"
34 #include "qemu/memalign.h"
36 #include "block/thread-pool.h"
38 #include "block/raw-aio.h"
39 #include "qapi/qmp/qdict.h"
40 #include "qapi/qmp/qstring.h"
42 #include "scsi/pr-manager.h"
43 #include "scsi/constants.h"
45 #if defined(__APPLE__) && (__MACH__)
46 #include <sys/ioctl.h>
47 #if defined(HAVE_HOST_BLOCK_DEVICE)
49 #include <sys/param.h>
50 #include <sys/mount.h>
51 #include <IOKit/IOKitLib.h>
52 #include <IOKit/IOBSD.h>
53 #include <IOKit/storage/IOMediaBSDClient.h>
54 #include <IOKit/storage/IOMedia.h>
55 #include <IOKit/storage/IOCDMedia.h>
56 //#include <IOKit/storage/IOCDTypes.h>
57 #include <IOKit/storage/IODVDMedia.h>
58 #include <CoreFoundation/CoreFoundation.h>
59 #endif /* defined(HAVE_HOST_BLOCK_DEVICE) */
63 #define _POSIX_PTHREAD_SEMANTICS 1
67 #include <sys/ioctl.h>
68 #include <sys/param.h>
69 #include <sys/syscall.h>
71 #if defined(CONFIG_BLKZONED)
72 #include <linux/blkzoned.h>
74 #include <linux/cdrom.h>
77 #include <linux/hdreg.h>
78 #include <linux/magic.h>
84 #define FS_NOCOW_FL 0x00800000 /* Do not cow file */
87 #if defined(CONFIG_FALLOCATE_PUNCH_HOLE) || defined(CONFIG_FALLOCATE_ZERO_RANGE)
88 #include <linux/falloc.h>
90 #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__)
96 #include <sys/ioctl.h>
97 #include <sys/disklabel.h>
102 #include <sys/ioctl.h>
103 #include <sys/disklabel.h>
104 #include <sys/dkio.h>
105 #include <sys/disk.h>
109 #include <sys/ioctl.h>
110 #include <sys/diskslice.h>
113 /* OS X does not have O_DSYNC */
116 #define O_DSYNC O_SYNC
117 #elif defined(O_FSYNC)
118 #define O_DSYNC O_FSYNC
122 /* Approximate O_DIRECT with O_DSYNC if O_DIRECT isn't available */
124 #define O_DIRECT O_DSYNC
130 #define MAX_BLOCKSIZE 4096
132 /* Posix file locking bytes. Libvirt takes byte 0, we start from higher bytes,
133 * leaving a few more bytes for its future use. */
134 #define RAW_LOCK_PERM_BASE 100
135 #define RAW_LOCK_SHARED_BASE 200
137 typedef struct BDRVRawState
{
144 /* The current permissions. */
146 uint64_t shared_perm
;
148 /* The perms bits whose corresponding bytes are already locked in
150 uint64_t locked_perm
;
151 uint64_t locked_shared_perm
;
153 uint64_t aio_max_batch
;
156 int perm_change_flags
;
157 BDRVReopenState
*reopen_state
;
160 bool has_write_zeroes
:1;
161 bool use_linux_aio
:1;
162 bool use_linux_io_uring
:1;
163 int64_t *offset
; /* offset of zone append operation */
164 int page_cache_inconsistent
; /* errno from fdatasync failure */
166 bool needs_alignment
;
167 bool force_alignment
;
169 bool check_cache_dropped
;
171 uint64_t discard_nb_ok
;
172 uint64_t discard_nb_failed
;
173 uint64_t discard_bytes_ok
;
179 typedef struct BDRVRawReopenState
{
182 bool check_cache_dropped
;
183 } BDRVRawReopenState
;
185 static int fd_open(BlockDriverState
*bs
)
187 BDRVRawState
*s
= bs
->opaque
;
189 /* this is just to ensure s->fd is sane (its called by io ops) */
196 static int64_t coroutine_fn
raw_co_getlength(BlockDriverState
*bs
);
198 typedef struct RawPosixAIOData
{
199 BlockDriverState
*bs
;
220 PreallocMode prealloc
;
224 unsigned int *nr_zones
;
225 BlockZoneDescriptor
*zones
;
233 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
234 static int cdrom_reopen(BlockDriverState
*bs
);
238 * Elide EAGAIN and EACCES details when failing to lock, as this
239 * indicates that the specified file region is already locked by
240 * another process, which is considered a common scenario.
242 #define raw_lock_error_setg_errno(errp, err, fmt, ...) \
244 if ((err) == EAGAIN || (err) == EACCES) { \
245 error_setg((errp), (fmt), ## __VA_ARGS__); \
247 error_setg_errno((errp), (err), (fmt), ## __VA_ARGS__); \
251 #if defined(__NetBSD__)
252 static int raw_normalize_devicepath(const char **filename
, Error
**errp
)
254 static char namebuf
[PATH_MAX
];
255 const char *dp
, *fname
;
259 dp
= strrchr(fname
, '/');
260 if (lstat(fname
, &sb
) < 0) {
261 error_setg_file_open(errp
, errno
, fname
);
265 if (!S_ISBLK(sb
.st_mode
)) {
270 snprintf(namebuf
, PATH_MAX
, "r%s", fname
);
272 snprintf(namebuf
, PATH_MAX
, "%.*s/r%s",
273 (int)(dp
- fname
), fname
, dp
+ 1);
276 warn_report("%s is a block device, using %s", fname
, *filename
);
281 static int raw_normalize_devicepath(const char **filename
, Error
**errp
)
288 * Get logical block size via ioctl. On success store it in @sector_size_p.
290 static int probe_logical_blocksize(int fd
, unsigned int *sector_size_p
)
292 unsigned int sector_size
;
293 bool success
= false;
297 static const unsigned long ioctl_list
[] = {
301 #ifdef DKIOCGETBLOCKSIZE
304 #ifdef DIOCGSECTORSIZE
309 /* Try a few ioctls to get the right size */
310 for (i
= 0; i
< (int)ARRAY_SIZE(ioctl_list
); i
++) {
311 if (ioctl(fd
, ioctl_list
[i
], §or_size
) >= 0) {
312 *sector_size_p
= sector_size
;
317 return success
? 0 : -errno
;
321 * Get physical block size of @fd.
322 * On success, store it in @blk_size and return 0.
323 * On failure, return -errno.
325 static int probe_physical_blocksize(int fd
, unsigned int *blk_size
)
328 if (ioctl(fd
, BLKPBSZGET
, blk_size
) < 0) {
338 * Returns true if no alignment restrictions are necessary even for files
339 * opened with O_DIRECT.
341 * raw_probe_alignment() probes the required alignment and assume that 1 means
342 * the probing failed, so it falls back to a safe default of 4k. This can be
343 * avoided if we know that byte alignment is okay for the file.
345 static bool dio_byte_aligned(int fd
)
351 ret
= fstatfs(fd
, &buf
);
352 if (ret
== 0 && buf
.f_type
== NFS_SUPER_MAGIC
) {
359 static bool raw_needs_alignment(BlockDriverState
*bs
)
361 BDRVRawState
*s
= bs
->opaque
;
363 if ((bs
->open_flags
& BDRV_O_NOCACHE
) != 0 && !dio_byte_aligned(s
->fd
)) {
367 return s
->force_alignment
;
370 /* Check if read is allowed with given memory buffer and length.
372 * This function is used to check O_DIRECT memory buffer and request alignment.
374 static bool raw_is_io_aligned(int fd
, void *buf
, size_t len
)
376 ssize_t ret
= pread(fd
, buf
, len
, 0);
383 /* The Linux kernel returns EINVAL for misaligned O_DIRECT reads. Ignore
384 * other errors (e.g. real I/O error), which could happen on a failed
385 * drive, since we only care about probing alignment.
387 if (errno
!= EINVAL
) {
395 static void raw_probe_alignment(BlockDriverState
*bs
, int fd
, Error
**errp
)
397 BDRVRawState
*s
= bs
->opaque
;
399 size_t max_align
= MAX(MAX_BLOCKSIZE
, qemu_real_host_page_size());
400 size_t alignments
[] = {1, 512, 1024, 2048, 4096};
402 /* For SCSI generic devices the alignment is not really used.
403 With buffered I/O, we don't have any restrictions. */
404 if (bdrv_is_sg(bs
) || !s
->needs_alignment
) {
405 bs
->bl
.request_alignment
= 1;
410 bs
->bl
.request_alignment
= 0;
412 /* Let's try to use the logical blocksize for the alignment. */
413 if (probe_logical_blocksize(fd
, &bs
->bl
.request_alignment
) < 0) {
414 bs
->bl
.request_alignment
= 0;
419 * The XFS ioctl definitions are shipped in extra packages that might
420 * not always be available. Since we just need the XFS_IOC_DIOINFO ioctl
421 * here, we simply use our own definition instead:
428 if (ioctl(fd
, _IOR('X', 30, struct xfs_dioattr
), &da
) >= 0) {
429 bs
->bl
.request_alignment
= da
.d_miniosz
;
430 /* The kernel returns wrong information for d_mem */
431 /* s->buf_align = da.d_mem; */
436 * If we could not get the sizes so far, we can only guess them. First try
437 * to detect request alignment, since it is more likely to succeed. Then
438 * try to detect buf_align, which cannot be detected in some cases (e.g.
439 * Gluster). If buf_align cannot be detected, we fallback to the value of
443 if (!bs
->bl
.request_alignment
) {
446 buf
= qemu_memalign(max_align
, max_align
);
447 for (i
= 0; i
< ARRAY_SIZE(alignments
); i
++) {
448 align
= alignments
[i
];
449 if (raw_is_io_aligned(fd
, buf
, align
)) {
450 /* Fallback to safe value. */
451 bs
->bl
.request_alignment
= (align
!= 1) ? align
: max_align
;
461 buf
= qemu_memalign(max_align
, 2 * max_align
);
462 for (i
= 0; i
< ARRAY_SIZE(alignments
); i
++) {
463 align
= alignments
[i
];
464 if (raw_is_io_aligned(fd
, buf
+ align
, max_align
)) {
465 /* Fallback to request_alignment. */
466 s
->buf_align
= (align
!= 1) ? align
: bs
->bl
.request_alignment
;
473 if (!s
->buf_align
|| !bs
->bl
.request_alignment
) {
474 error_setg(errp
, "Could not find working O_DIRECT alignment");
475 error_append_hint(errp
, "Try cache.direct=off\n");
479 static int check_hdev_writable(int fd
)
481 #if defined(BLKROGET)
482 /* Linux block devices can be configured "read-only" using blockdev(8).
483 * This is independent of device node permissions and therefore open(2)
484 * with O_RDWR succeeds. Actual writes fail with EPERM.
486 * bdrv_open() is supposed to fail if the disk is read-only. Explicitly
487 * check for read-only block devices so that Linux block devices behave
493 if (fstat(fd
, &st
)) {
497 if (!S_ISBLK(st
.st_mode
)) {
501 if (ioctl(fd
, BLKROGET
, &readonly
) < 0) {
508 #endif /* defined(BLKROGET) */
512 static void raw_parse_flags(int bdrv_flags
, int *open_flags
, bool has_writers
)
514 bool read_write
= false;
515 assert(open_flags
!= NULL
);
517 *open_flags
|= O_BINARY
;
518 *open_flags
&= ~O_ACCMODE
;
520 if (bdrv_flags
& BDRV_O_AUTO_RDONLY
) {
521 read_write
= has_writers
;
522 } else if (bdrv_flags
& BDRV_O_RDWR
) {
527 *open_flags
|= O_RDWR
;
529 *open_flags
|= O_RDONLY
;
532 /* Use O_DSYNC for write-through caching, no flags for write-back caching,
533 * and O_DIRECT for no caching. */
534 if ((bdrv_flags
& BDRV_O_NOCACHE
)) {
535 *open_flags
|= O_DIRECT
;
539 static void raw_parse_filename(const char *filename
, QDict
*options
,
542 bdrv_parse_filename_strip_prefix(filename
, "file:", options
);
545 static QemuOptsList raw_runtime_opts
= {
547 .head
= QTAILQ_HEAD_INITIALIZER(raw_runtime_opts
.head
),
551 .type
= QEMU_OPT_STRING
,
552 .help
= "File name of the image",
556 .type
= QEMU_OPT_STRING
,
557 .help
= "host AIO implementation (threads, native, io_uring)",
560 .name
= "aio-max-batch",
561 .type
= QEMU_OPT_NUMBER
,
562 .help
= "AIO max batch size (0 = auto handled by AIO backend, default: 0)",
566 .type
= QEMU_OPT_STRING
,
567 .help
= "file locking mode (on/off/auto, default: auto)",
570 .name
= "pr-manager",
571 .type
= QEMU_OPT_STRING
,
572 .help
= "id of persistent reservation manager object (default: none)",
574 #if defined(__linux__)
576 .name
= "drop-cache",
577 .type
= QEMU_OPT_BOOL
,
578 .help
= "invalidate page cache during live migration (default: on)",
582 .name
= "x-check-cache-dropped",
583 .type
= QEMU_OPT_BOOL
,
584 .help
= "check that page cache was dropped on live migration (default: off)"
586 { /* end of list */ }
590 static const char *const mutable_opts
[] = { "x-check-cache-dropped", NULL
};
592 static int raw_open_common(BlockDriverState
*bs
, QDict
*options
,
593 int bdrv_flags
, int open_flags
,
594 bool device
, Error
**errp
)
596 BDRVRawState
*s
= bs
->opaque
;
598 Error
*local_err
= NULL
;
599 const char *filename
= NULL
;
601 BlockdevAioOptions aio
, aio_default
;
606 opts
= qemu_opts_create(&raw_runtime_opts
, NULL
, 0, &error_abort
);
607 if (!qemu_opts_absorb_qdict(opts
, options
, errp
)) {
612 filename
= qemu_opt_get(opts
, "filename");
614 ret
= raw_normalize_devicepath(&filename
, errp
);
619 if (bdrv_flags
& BDRV_O_NATIVE_AIO
) {
620 aio_default
= BLOCKDEV_AIO_OPTIONS_NATIVE
;
621 #ifdef CONFIG_LINUX_IO_URING
622 } else if (bdrv_flags
& BDRV_O_IO_URING
) {
623 aio_default
= BLOCKDEV_AIO_OPTIONS_IO_URING
;
626 aio_default
= BLOCKDEV_AIO_OPTIONS_THREADS
;
629 aio
= qapi_enum_parse(&BlockdevAioOptions_lookup
,
630 qemu_opt_get(opts
, "aio"),
631 aio_default
, &local_err
);
633 error_propagate(errp
, local_err
);
638 s
->use_linux_aio
= (aio
== BLOCKDEV_AIO_OPTIONS_NATIVE
);
639 #ifdef CONFIG_LINUX_IO_URING
640 s
->use_linux_io_uring
= (aio
== BLOCKDEV_AIO_OPTIONS_IO_URING
);
643 s
->aio_max_batch
= qemu_opt_get_number(opts
, "aio-max-batch", 0);
645 locking
= qapi_enum_parse(&OnOffAuto_lookup
,
646 qemu_opt_get(opts
, "locking"),
647 ON_OFF_AUTO_AUTO
, &local_err
);
649 error_propagate(errp
, local_err
);
656 if (!qemu_has_ofd_lock()) {
657 warn_report("File lock requested but OFD locking syscall is "
658 "unavailable, falling back to POSIX file locks");
659 error_printf("Due to the implementation, locks can be lost "
663 case ON_OFF_AUTO_OFF
:
666 case ON_OFF_AUTO_AUTO
:
667 s
->use_lock
= qemu_has_ofd_lock();
673 str
= qemu_opt_get(opts
, "pr-manager");
675 s
->pr_mgr
= pr_manager_lookup(str
, &local_err
);
677 error_propagate(errp
, local_err
);
683 s
->drop_cache
= qemu_opt_get_bool(opts
, "drop-cache", true);
684 s
->check_cache_dropped
= qemu_opt_get_bool(opts
, "x-check-cache-dropped",
687 s
->open_flags
= open_flags
;
688 raw_parse_flags(bdrv_flags
, &s
->open_flags
, false);
691 fd
= qemu_open(filename
, s
->open_flags
, errp
);
692 ret
= fd
< 0 ? -errno
: 0;
702 /* Check s->open_flags rather than bdrv_flags due to auto-read-only */
703 if (s
->open_flags
& O_RDWR
) {
704 ret
= check_hdev_writable(s
->fd
);
706 error_setg_errno(errp
, -ret
, "The device is not writable");
712 s
->shared_perm
= BLK_PERM_ALL
;
714 #ifdef CONFIG_LINUX_AIO
715 /* Currently Linux does AIO only for files opened with O_DIRECT */
716 if (s
->use_linux_aio
) {
717 if (!(s
->open_flags
& O_DIRECT
)) {
718 error_setg(errp
, "aio=native was specified, but it requires "
719 "cache.direct=on, which was not specified.");
723 if (!aio_setup_linux_aio(bdrv_get_aio_context(bs
), errp
)) {
724 error_prepend(errp
, "Unable to use native AIO: ");
729 if (s
->use_linux_aio
) {
730 error_setg(errp
, "aio=native was specified, but is not supported "
735 #endif /* !defined(CONFIG_LINUX_AIO) */
737 #ifdef CONFIG_LINUX_IO_URING
738 if (s
->use_linux_io_uring
) {
739 if (!aio_setup_linux_io_uring(bdrv_get_aio_context(bs
), errp
)) {
740 error_prepend(errp
, "Unable to use io_uring: ");
745 if (s
->use_linux_io_uring
) {
746 error_setg(errp
, "aio=io_uring was specified, but is not supported "
751 #endif /* !defined(CONFIG_LINUX_IO_URING) */
753 s
->has_discard
= true;
754 s
->has_write_zeroes
= true;
756 if (fstat(s
->fd
, &st
) < 0) {
758 error_setg_errno(errp
, errno
, "Could not stat file");
763 if (!S_ISREG(st
.st_mode
)) {
764 error_setg(errp
, "'%s' driver requires '%s' to be a regular file",
765 bs
->drv
->format_name
, bs
->filename
);
769 s
->has_fallocate
= true;
772 if (!(S_ISCHR(st
.st_mode
) || S_ISBLK(st
.st_mode
))) {
773 error_setg(errp
, "'%s' driver requires '%s' to be either "
774 "a character or block device",
775 bs
->drv
->format_name
, bs
->filename
);
780 #ifdef CONFIG_BLKZONED
782 * The kernel page cache does not reliably work for writes to SWR zones
783 * of zoned block device because it can not guarantee the order of writes.
785 if ((bs
->bl
.zoned
!= BLK_Z_NONE
) &&
786 (!(s
->open_flags
& O_DIRECT
))) {
787 error_setg(errp
, "The driver supports zoned devices, and it requires "
788 "cache.direct=on, which was not specified.");
789 return -EINVAL
; /* No host kernel page cache */
793 if (S_ISBLK(st
.st_mode
)) {
795 /* On Linux 3.10, BLKDISCARD leaves stale data in the page cache. Do
796 * not rely on the contents of discarded blocks unless using O_DIRECT.
797 * Same for BLKZEROOUT.
799 if (!(bs
->open_flags
& BDRV_O_NOCACHE
)) {
800 s
->has_write_zeroes
= false;
805 if (S_ISCHR(st
.st_mode
)) {
807 * The file is a char device (disk), which on FreeBSD isn't behind
808 * a pager, so force all requests to be aligned. This is needed
809 * so QEMU makes sure all IO operations on the device are aligned
810 * to sector size, or else FreeBSD will reject them with EINVAL.
812 s
->force_alignment
= true;
815 s
->needs_alignment
= raw_needs_alignment(bs
);
817 bs
->supported_zero_flags
= BDRV_REQ_MAY_UNMAP
| BDRV_REQ_NO_FALLBACK
;
818 if (S_ISREG(st
.st_mode
)) {
819 /* When extending regular files, we get zeros from the OS */
820 bs
->supported_truncate_flags
= BDRV_REQ_ZERO_WRITE
;
824 if (ret
< 0 && s
->fd
!= -1) {
827 if (filename
&& (bdrv_flags
& BDRV_O_TEMPORARY
)) {
834 static int raw_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
837 BDRVRawState
*s
= bs
->opaque
;
839 s
->type
= FTYPE_FILE
;
840 return raw_open_common(bs
, options
, flags
, 0, false, errp
);
849 #define PERM_FOREACH(i) \
850 for ((i) = 0; (1ULL << (i)) <= BLK_PERM_ALL; i++)
852 /* Lock bytes indicated by @perm_lock_bits and @shared_perm_lock_bits in the
853 * file; if @unlock == true, also unlock the unneeded bytes.
854 * @shared_perm_lock_bits is the mask of all permissions that are NOT shared.
856 static int raw_apply_lock_bytes(BDRVRawState
*s
, int fd
,
857 uint64_t perm_lock_bits
,
858 uint64_t shared_perm_lock_bits
,
859 bool unlock
, Error
**errp
)
863 uint64_t locked_perm
, locked_shared_perm
;
866 locked_perm
= s
->locked_perm
;
867 locked_shared_perm
= s
->locked_shared_perm
;
870 * We don't have the previous bits, just lock/unlock for each of the
874 locked_perm
= BLK_PERM_ALL
;
875 locked_shared_perm
= BLK_PERM_ALL
;
878 locked_shared_perm
= 0;
883 int off
= RAW_LOCK_PERM_BASE
+ i
;
884 uint64_t bit
= (1ULL << i
);
885 if ((perm_lock_bits
& bit
) && !(locked_perm
& bit
)) {
886 ret
= qemu_lock_fd(fd
, off
, 1, false);
888 raw_lock_error_setg_errno(errp
, -ret
, "Failed to lock byte %d",
892 s
->locked_perm
|= bit
;
894 } else if (unlock
&& (locked_perm
& bit
) && !(perm_lock_bits
& bit
)) {
895 ret
= qemu_unlock_fd(fd
, off
, 1);
897 error_setg_errno(errp
, -ret
, "Failed to unlock byte %d", off
);
900 s
->locked_perm
&= ~bit
;
905 int off
= RAW_LOCK_SHARED_BASE
+ i
;
906 uint64_t bit
= (1ULL << i
);
907 if ((shared_perm_lock_bits
& bit
) && !(locked_shared_perm
& bit
)) {
908 ret
= qemu_lock_fd(fd
, off
, 1, false);
910 raw_lock_error_setg_errno(errp
, -ret
, "Failed to lock byte %d",
914 s
->locked_shared_perm
|= bit
;
916 } else if (unlock
&& (locked_shared_perm
& bit
) &&
917 !(shared_perm_lock_bits
& bit
)) {
918 ret
= qemu_unlock_fd(fd
, off
, 1);
920 error_setg_errno(errp
, -ret
, "Failed to unlock byte %d", off
);
923 s
->locked_shared_perm
&= ~bit
;
930 /* Check "unshared" bytes implied by @perm and ~@shared_perm in the file. */
931 static int raw_check_lock_bytes(int fd
, uint64_t perm
, uint64_t shared_perm
,
938 int off
= RAW_LOCK_SHARED_BASE
+ i
;
939 uint64_t p
= 1ULL << i
;
941 ret
= qemu_lock_fd_test(fd
, off
, 1, true);
943 char *perm_name
= bdrv_perm_names(p
);
945 raw_lock_error_setg_errno(errp
, -ret
,
946 "Failed to get \"%s\" lock",
954 int off
= RAW_LOCK_PERM_BASE
+ i
;
955 uint64_t p
= 1ULL << i
;
956 if (!(shared_perm
& p
)) {
957 ret
= qemu_lock_fd_test(fd
, off
, 1, true);
959 char *perm_name
= bdrv_perm_names(p
);
961 raw_lock_error_setg_errno(errp
, -ret
,
962 "Failed to get shared \"%s\" lock",
972 static int raw_handle_perm_lock(BlockDriverState
*bs
,
974 uint64_t new_perm
, uint64_t new_shared
,
977 BDRVRawState
*s
= bs
->opaque
;
979 Error
*local_err
= NULL
;
985 if (bdrv_get_flags(bs
) & BDRV_O_INACTIVE
) {
991 if ((s
->perm
| new_perm
) == s
->perm
&&
992 (s
->shared_perm
& new_shared
) == s
->shared_perm
)
995 * We are going to unlock bytes, it should not fail. If it fail due
996 * to some fs-dependent permission-unrelated reasons (which occurs
997 * sometimes on NFS and leads to abort in bdrv_replace_child) we
998 * can't prevent such errors by any check here. And we ignore them
999 * anyway in ABORT and COMMIT.
1003 ret
= raw_apply_lock_bytes(s
, s
->fd
, s
->perm
| new_perm
,
1004 ~s
->shared_perm
| ~new_shared
,
1007 ret
= raw_check_lock_bytes(s
->fd
, new_perm
, new_shared
, errp
);
1011 error_append_hint(errp
,
1012 "Is another process using the image [%s]?\n",
1015 /* fall through to unlock bytes. */
1017 raw_apply_lock_bytes(s
, s
->fd
, s
->perm
, ~s
->shared_perm
,
1020 /* Theoretically the above call only unlocks bytes and it cannot
1021 * fail. Something weird happened, report it.
1023 warn_report_err(local_err
);
1027 raw_apply_lock_bytes(s
, s
->fd
, new_perm
, ~new_shared
,
1030 /* Theoretically the above call only unlocks bytes and it cannot
1031 * fail. Something weird happened, report it.
1033 warn_report_err(local_err
);
1040 /* Sets a specific flag */
1041 static int fcntl_setfl(int fd
, int flag
)
1045 flags
= fcntl(fd
, F_GETFL
);
1049 if (fcntl(fd
, F_SETFL
, flags
| flag
) == -1) {
1055 static int raw_reconfigure_getfd(BlockDriverState
*bs
, int flags
,
1056 int *open_flags
, uint64_t perm
, bool force_dup
,
1059 BDRVRawState
*s
= bs
->opaque
;
1062 bool has_writers
= perm
&
1063 (BLK_PERM_WRITE
| BLK_PERM_WRITE_UNCHANGED
| BLK_PERM_RESIZE
);
1064 int fcntl_flags
= O_APPEND
| O_NONBLOCK
;
1066 fcntl_flags
|= O_NOATIME
;
1070 if (s
->type
== FTYPE_CD
) {
1071 *open_flags
|= O_NONBLOCK
;
1074 raw_parse_flags(flags
, open_flags
, has_writers
);
1077 /* Not all operating systems have O_ASYNC, and those that don't
1078 * will not let us track the state into rs->open_flags (typically
1079 * you achieve the same effect with an ioctl, for example I_SETSIG
1080 * on Solaris). But we do not use O_ASYNC, so that's fine.
1082 assert((s
->open_flags
& O_ASYNC
) == 0);
1085 if (!force_dup
&& *open_flags
== s
->open_flags
) {
1086 /* We're lucky, the existing fd is fine */
1090 if ((*open_flags
& ~fcntl_flags
) == (s
->open_flags
& ~fcntl_flags
)) {
1091 /* dup the original fd */
1092 fd
= qemu_dup(s
->fd
);
1094 ret
= fcntl_setfl(fd
, *open_flags
);
1102 /* If we cannot use fcntl, or fcntl failed, fall back to qemu_open() */
1104 const char *normalized_filename
= bs
->filename
;
1105 ret
= raw_normalize_devicepath(&normalized_filename
, errp
);
1107 fd
= qemu_open(normalized_filename
, *open_flags
, errp
);
1114 if (fd
!= -1 && (*open_flags
& O_RDWR
)) {
1115 ret
= check_hdev_writable(fd
);
1118 error_setg_errno(errp
, -ret
, "The device is not writable");
1126 static int raw_reopen_prepare(BDRVReopenState
*state
,
1127 BlockReopenQueue
*queue
, Error
**errp
)
1130 BDRVRawReopenState
*rs
;
1134 assert(state
!= NULL
);
1135 assert(state
->bs
!= NULL
);
1137 s
= state
->bs
->opaque
;
1139 state
->opaque
= g_new0(BDRVRawReopenState
, 1);
1142 /* Handle options changes */
1143 opts
= qemu_opts_create(&raw_runtime_opts
, NULL
, 0, &error_abort
);
1144 if (!qemu_opts_absorb_qdict(opts
, state
->options
, errp
)) {
1149 rs
->drop_cache
= qemu_opt_get_bool_del(opts
, "drop-cache", true);
1150 rs
->check_cache_dropped
=
1151 qemu_opt_get_bool_del(opts
, "x-check-cache-dropped", false);
1153 /* This driver's reopen function doesn't currently allow changing
1154 * other options, so let's put them back in the original QDict and
1155 * bdrv_reopen_prepare() will detect changes and complain. */
1156 qemu_opts_to_qdict(opts
, state
->options
);
1159 * As part of reopen prepare we also want to create new fd by
1160 * raw_reconfigure_getfd(). But it wants updated "perm", when in
1161 * bdrv_reopen_multiple() .bdrv_reopen_prepare() callback called prior to
1162 * permission update. Happily, permission update is always a part (a seprate
1163 * stage) of bdrv_reopen_multiple() so we can rely on this fact and
1164 * reconfigure fd in raw_check_perm().
1167 s
->reopen_state
= state
;
1171 qemu_opts_del(opts
);
1175 static void raw_reopen_commit(BDRVReopenState
*state
)
1177 BDRVRawReopenState
*rs
= state
->opaque
;
1178 BDRVRawState
*s
= state
->bs
->opaque
;
1180 s
->drop_cache
= rs
->drop_cache
;
1181 s
->check_cache_dropped
= rs
->check_cache_dropped
;
1182 s
->open_flags
= rs
->open_flags
;
1183 g_free(state
->opaque
);
1184 state
->opaque
= NULL
;
1186 assert(s
->reopen_state
== state
);
1187 s
->reopen_state
= NULL
;
1191 static void raw_reopen_abort(BDRVReopenState
*state
)
1193 BDRVRawReopenState
*rs
= state
->opaque
;
1194 BDRVRawState
*s
= state
->bs
->opaque
;
1196 /* nothing to do if NULL, we didn't get far enough */
1201 g_free(state
->opaque
);
1202 state
->opaque
= NULL
;
1204 assert(s
->reopen_state
== state
);
1205 s
->reopen_state
= NULL
;
1208 static int hdev_get_max_hw_transfer(int fd
, struct stat
*st
)
1211 if (S_ISBLK(st
->st_mode
)) {
1212 unsigned short max_sectors
= 0;
1213 if (ioctl(fd
, BLKSECTGET
, &max_sectors
) == 0) {
1214 return max_sectors
* 512;
1218 if (ioctl(fd
, BLKSECTGET
, &max_bytes
) == 0) {
1229 * Get a sysfs attribute value as character string.
1232 static int get_sysfs_str_val(struct stat
*st
, const char *attribute
,
1234 g_autofree
char *sysfspath
= NULL
;
1238 if (!S_ISBLK(st
->st_mode
)) {
1242 sysfspath
= g_strdup_printf("/sys/dev/block/%u:%u/queue/%s",
1243 major(st
->st_rdev
), minor(st
->st_rdev
),
1245 ret
= g_file_get_contents(sysfspath
, val
, &len
, NULL
);
1250 /* The file is ended with '\n' */
1253 if (*(p
+ len
- 1) == '\n') {
1254 *(p
+ len
- 1) = '\0';
1260 #if defined(CONFIG_BLKZONED)
1261 static int get_sysfs_zoned_model(struct stat
*st
, BlockZoneModel
*zoned
)
1263 g_autofree
char *val
= NULL
;
1266 ret
= get_sysfs_str_val(st
, "zoned", &val
);
1271 if (strcmp(val
, "host-managed") == 0) {
1273 } else if (strcmp(val
, "host-aware") == 0) {
1275 } else if (strcmp(val
, "none") == 0) {
1276 *zoned
= BLK_Z_NONE
;
1282 #endif /* defined(CONFIG_BLKZONED) */
1285 * Get a sysfs attribute value as a long integer.
1288 static long get_sysfs_long_val(struct stat
*st
, const char *attribute
)
1290 g_autofree
char *str
= NULL
;
1295 ret
= get_sysfs_str_val(st
, attribute
, &str
);
1300 /* The file is ended with '\n', pass 'end' to accept that. */
1301 ret
= qemu_strtol(str
, &end
, 10, &val
);
1302 if (ret
== 0 && end
&& *end
== '\0') {
1309 static int hdev_get_max_segments(int fd
, struct stat
*st
)
1314 if (S_ISCHR(st
->st_mode
)) {
1315 if (ioctl(fd
, SG_GET_SG_TABLESIZE
, &ret
) == 0) {
1320 return get_sysfs_long_val(st
, "max_segments");
1326 #if defined(CONFIG_BLKZONED)
1328 * If the reset_all flag is true, then the wps of zone whose state is
1329 * not readonly or offline should be all reset to the start sector.
1330 * Else, take the real wp of the device.
1332 static int get_zones_wp(BlockDriverState
*bs
, int fd
, int64_t offset
,
1333 unsigned int nrz
, bool reset_all
)
1335 struct blk_zone
*blkz
;
1337 uint64_t sector
= offset
>> BDRV_SECTOR_BITS
;
1338 BlockZoneWps
*wps
= bs
->wps
;
1339 unsigned int j
= offset
/ bs
->bl
.zone_size
;
1340 unsigned int n
= 0, i
= 0;
1342 rep_size
= sizeof(struct blk_zone_report
) + nrz
* sizeof(struct blk_zone
);
1343 g_autofree
struct blk_zone_report
*rep
= NULL
;
1345 rep
= g_malloc(rep_size
);
1346 blkz
= (struct blk_zone
*)(rep
+ 1);
1348 memset(rep
, 0, rep_size
);
1349 rep
->sector
= sector
;
1350 rep
->nr_zones
= nrz
- n
;
1353 ret
= ioctl(fd
, BLKREPORTZONE
, rep
);
1354 } while (ret
!= 0 && errno
== EINTR
);
1356 error_report("%d: ioctl BLKREPORTZONE at %" PRId64
" failed %d",
1361 if (!rep
->nr_zones
) {
1365 for (i
= 0; i
< rep
->nr_zones
; ++i
, ++n
, ++j
) {
1367 * The wp tracking cares only about sequential writes required and
1368 * sequential write preferred zones so that the wp can advance to
1369 * the right location.
1370 * Use the most significant bit of the wp location to indicate the
1371 * zone type: 0 for SWR/SWP zones and 1 for conventional zones.
1373 if (blkz
[i
].type
== BLK_ZONE_TYPE_CONVENTIONAL
) {
1374 wps
->wp
[j
] |= 1ULL << 63;
1376 switch(blkz
[i
].cond
) {
1377 case BLK_ZONE_COND_FULL
:
1378 case BLK_ZONE_COND_READONLY
:
1379 /* Zone not writable */
1380 wps
->wp
[j
] = (blkz
[i
].start
+ blkz
[i
].len
) << BDRV_SECTOR_BITS
;
1382 case BLK_ZONE_COND_OFFLINE
:
1383 /* Zone not writable nor readable */
1384 wps
->wp
[j
] = (blkz
[i
].start
) << BDRV_SECTOR_BITS
;
1388 wps
->wp
[j
] = blkz
[i
].start
<< BDRV_SECTOR_BITS
;
1390 wps
->wp
[j
] = blkz
[i
].wp
<< BDRV_SECTOR_BITS
;
1396 sector
= blkz
[i
- 1].start
+ blkz
[i
- 1].len
;
1402 static void update_zones_wp(BlockDriverState
*bs
, int fd
, int64_t offset
,
1405 if (get_zones_wp(bs
, fd
, offset
, nrz
, 0) < 0) {
1406 error_report("update zone wp failed");
1410 static void raw_refresh_zoned_limits(BlockDriverState
*bs
, struct stat
*st
,
1413 BDRVRawState
*s
= bs
->opaque
;
1414 BlockZoneModel zoned
;
1417 bs
->bl
.zoned
= BLK_Z_NONE
;
1419 ret
= get_sysfs_zoned_model(st
, &zoned
);
1420 if (ret
< 0 || zoned
== BLK_Z_NONE
) {
1423 bs
->bl
.zoned
= zoned
;
1425 ret
= get_sysfs_long_val(st
, "max_open_zones");
1427 bs
->bl
.max_open_zones
= ret
;
1430 ret
= get_sysfs_long_val(st
, "max_active_zones");
1432 bs
->bl
.max_active_zones
= ret
;
1436 * The zoned device must at least have zone size and nr_zones fields.
1438 ret
= get_sysfs_long_val(st
, "chunk_sectors");
1440 error_setg_errno(errp
, -ret
, "Unable to read chunk_sectors "
1444 error_setg(errp
, "Read 0 from chunk_sectors sysfs attribute");
1447 bs
->bl
.zone_size
= ret
<< BDRV_SECTOR_BITS
;
1449 ret
= get_sysfs_long_val(st
, "nr_zones");
1451 error_setg_errno(errp
, -ret
, "Unable to read nr_zones "
1455 error_setg(errp
, "Read 0 from nr_zones sysfs attribute");
1458 bs
->bl
.nr_zones
= ret
;
1460 ret
= get_sysfs_long_val(st
, "zone_append_max_bytes");
1462 bs
->bl
.max_append_sectors
= ret
>> BDRV_SECTOR_BITS
;
1465 ret
= get_sysfs_long_val(st
, "physical_block_size");
1467 bs
->bl
.write_granularity
= ret
;
1470 /* The refresh_limits() function can be called multiple times. */
1472 bs
->wps
= g_malloc(sizeof(BlockZoneWps
) +
1473 sizeof(int64_t) * bs
->bl
.nr_zones
);
1474 ret
= get_zones_wp(bs
, s
->fd
, 0, bs
->bl
.nr_zones
, 0);
1476 error_setg_errno(errp
, -ret
, "report wps failed");
1480 qemu_co_mutex_init(&bs
->wps
->colock
);
1482 #else /* !defined(CONFIG_BLKZONED) */
1483 static void raw_refresh_zoned_limits(BlockDriverState
*bs
, struct stat
*st
,
1486 bs
->bl
.zoned
= BLK_Z_NONE
;
1488 #endif /* !defined(CONFIG_BLKZONED) */
1490 static void raw_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
1492 BDRVRawState
*s
= bs
->opaque
;
1495 s
->needs_alignment
= raw_needs_alignment(bs
);
1496 raw_probe_alignment(bs
, s
->fd
, errp
);
1498 bs
->bl
.min_mem_alignment
= s
->buf_align
;
1499 bs
->bl
.opt_mem_alignment
= MAX(s
->buf_align
, qemu_real_host_page_size());
1502 * Maximum transfers are best effort, so it is okay to ignore any
1503 * errors. That said, based on the man page errors in fstat would be
1504 * very much unexpected; the only possible case seems to be ENOMEM.
1506 if (fstat(s
->fd
, &st
)) {
1510 #if defined(__APPLE__) && (__MACH__)
1513 if (!fstatfs(s
->fd
, &buf
)) {
1514 bs
->bl
.opt_transfer
= buf
.f_iosize
;
1515 bs
->bl
.pdiscard_alignment
= buf
.f_bsize
;
1519 if (bdrv_is_sg(bs
) || S_ISBLK(st
.st_mode
)) {
1520 int ret
= hdev_get_max_hw_transfer(s
->fd
, &st
);
1522 if (ret
> 0 && ret
<= BDRV_REQUEST_MAX_BYTES
) {
1523 bs
->bl
.max_hw_transfer
= ret
;
1526 ret
= hdev_get_max_segments(s
->fd
, &st
);
1528 bs
->bl
.max_hw_iov
= ret
;
1532 raw_refresh_zoned_limits(bs
, &st
, errp
);
1535 static int check_for_dasd(int fd
)
1538 struct dasd_information2_t info
= {0};
1540 return ioctl(fd
, BIODASDINFO2
, &info
);
1547 * Try to get @bs's logical and physical block size.
1548 * On success, store them in @bsz and return zero.
1549 * On failure, return negative errno.
1551 static int hdev_probe_blocksizes(BlockDriverState
*bs
, BlockSizes
*bsz
)
1553 BDRVRawState
*s
= bs
->opaque
;
1556 /* If DASD or zoned devices, get blocksizes */
1557 if (check_for_dasd(s
->fd
) < 0) {
1558 /* zoned devices are not DASD */
1559 if (bs
->bl
.zoned
== BLK_Z_NONE
) {
1563 ret
= probe_logical_blocksize(s
->fd
, &bsz
->log
);
1567 return probe_physical_blocksize(s
->fd
, &bsz
->phys
);
1571 * Try to get @bs's geometry: cyls, heads, sectors.
1572 * On success, store them in @geo and return 0.
1573 * On failure return -errno.
1574 * (Allows block driver to assign default geometry values that guest sees)
1577 static int hdev_probe_geometry(BlockDriverState
*bs
, HDGeometry
*geo
)
1579 BDRVRawState
*s
= bs
->opaque
;
1580 struct hd_geometry ioctl_geo
= {0};
1582 /* If DASD, get its geometry */
1583 if (check_for_dasd(s
->fd
) < 0) {
1586 if (ioctl(s
->fd
, HDIO_GETGEO
, &ioctl_geo
) < 0) {
1589 /* HDIO_GETGEO may return success even though geo contains zeros
1590 (e.g. certain multipath setups) */
1591 if (!ioctl_geo
.heads
|| !ioctl_geo
.sectors
|| !ioctl_geo
.cylinders
) {
1594 /* Do not return a geometry for partition */
1595 if (ioctl_geo
.start
!= 0) {
1598 geo
->heads
= ioctl_geo
.heads
;
1599 geo
->sectors
= ioctl_geo
.sectors
;
1600 geo
->cylinders
= ioctl_geo
.cylinders
;
1604 #else /* __linux__ */
1605 static int hdev_probe_geometry(BlockDriverState
*bs
, HDGeometry
*geo
)
1611 #if defined(__linux__)
1612 static int handle_aiocb_ioctl(void *opaque
)
1614 RawPosixAIOData
*aiocb
= opaque
;
1617 ret
= RETRY_ON_EINTR(
1618 ioctl(aiocb
->aio_fildes
, aiocb
->ioctl
.cmd
, aiocb
->ioctl
.buf
)
1628 static int handle_aiocb_flush(void *opaque
)
1630 RawPosixAIOData
*aiocb
= opaque
;
1631 BDRVRawState
*s
= aiocb
->bs
->opaque
;
1634 if (s
->page_cache_inconsistent
) {
1635 return -s
->page_cache_inconsistent
;
1638 ret
= qemu_fdatasync(aiocb
->aio_fildes
);
1640 trace_file_flush_fdatasync_failed(errno
);
1642 /* There is no clear definition of the semantics of a failing fsync(),
1643 * so we may have to assume the worst. The sad truth is that this
1644 * assumption is correct for Linux. Some pages are now probably marked
1645 * clean in the page cache even though they are inconsistent with the
1646 * on-disk contents. The next fdatasync() call would succeed, but no
1647 * further writeback attempt will be made. We can't get back to a state
1648 * in which we know what is on disk (we would have to rewrite
1649 * everything that was touched since the last fdatasync() at least), so
1650 * make bdrv_flush() fail permanently. Given that the behaviour isn't
1651 * really defined, I have little hope that other OSes are doing better.
1653 * Obviously, this doesn't affect O_DIRECT, which bypasses the page
1655 if ((s
->open_flags
& O_DIRECT
) == 0) {
1656 s
->page_cache_inconsistent
= errno
;
1663 #ifdef CONFIG_PREADV
1665 static bool preadv_present
= true;
1668 qemu_preadv(int fd
, const struct iovec
*iov
, int nr_iov
, off_t offset
)
1670 return preadv(fd
, iov
, nr_iov
, offset
);
1674 qemu_pwritev(int fd
, const struct iovec
*iov
, int nr_iov
, off_t offset
)
1676 return pwritev(fd
, iov
, nr_iov
, offset
);
1681 static bool preadv_present
= false;
1684 qemu_preadv(int fd
, const struct iovec
*iov
, int nr_iov
, off_t offset
)
1690 qemu_pwritev(int fd
, const struct iovec
*iov
, int nr_iov
, off_t offset
)
1697 static ssize_t
handle_aiocb_rw_vector(RawPosixAIOData
*aiocb
)
1701 len
= RETRY_ON_EINTR(
1702 (aiocb
->aio_type
& (QEMU_AIO_WRITE
| QEMU_AIO_ZONE_APPEND
)) ?
1703 qemu_pwritev(aiocb
->aio_fildes
,
1706 aiocb
->aio_offset
) :
1707 qemu_preadv(aiocb
->aio_fildes
,
1720 * Read/writes the data to/from a given linear buffer.
1722 * Returns the number of bytes handles or -errno in case of an error. Short
1723 * reads are only returned if the end of the file is reached.
1725 static ssize_t
handle_aiocb_rw_linear(RawPosixAIOData
*aiocb
, char *buf
)
1730 while (offset
< aiocb
->aio_nbytes
) {
1731 if (aiocb
->aio_type
& (QEMU_AIO_WRITE
| QEMU_AIO_ZONE_APPEND
)) {
1732 len
= pwrite(aiocb
->aio_fildes
,
1733 (const char *)buf
+ offset
,
1734 aiocb
->aio_nbytes
- offset
,
1735 aiocb
->aio_offset
+ offset
);
1737 len
= pread(aiocb
->aio_fildes
,
1739 aiocb
->aio_nbytes
- offset
,
1740 aiocb
->aio_offset
+ offset
);
1742 if (len
== -1 && errno
== EINTR
) {
1744 } else if (len
== -1 && errno
== EINVAL
&&
1745 (aiocb
->bs
->open_flags
& BDRV_O_NOCACHE
) &&
1746 !(aiocb
->aio_type
& QEMU_AIO_WRITE
) &&
1748 /* O_DIRECT pread() may fail with EINVAL when offset is unaligned
1749 * after a short read. Assume that O_DIRECT short reads only occur
1750 * at EOF. Therefore this is a short read, not an I/O error.
1753 } else if (len
== -1) {
1756 } else if (len
== 0) {
1765 static int handle_aiocb_rw(void *opaque
)
1767 RawPosixAIOData
*aiocb
= opaque
;
1771 if (!(aiocb
->aio_type
& QEMU_AIO_MISALIGNED
)) {
1773 * If there is just a single buffer, and it is properly aligned
1774 * we can just use plain pread/pwrite without any problems.
1776 if (aiocb
->io
.niov
== 1) {
1777 nbytes
= handle_aiocb_rw_linear(aiocb
, aiocb
->io
.iov
->iov_base
);
1781 * We have more than one iovec, and all are properly aligned.
1783 * Try preadv/pwritev first and fall back to linearizing the
1784 * buffer if it's not supported.
1786 if (preadv_present
) {
1787 nbytes
= handle_aiocb_rw_vector(aiocb
);
1788 if (nbytes
== aiocb
->aio_nbytes
||
1789 (nbytes
< 0 && nbytes
!= -ENOSYS
)) {
1792 preadv_present
= false;
1796 * XXX(hch): short read/write. no easy way to handle the reminder
1797 * using these interfaces. For now retry using plain
1803 * Ok, we have to do it the hard way, copy all segments into
1804 * a single aligned buffer.
1806 buf
= qemu_try_blockalign(aiocb
->bs
, aiocb
->aio_nbytes
);
1812 if (aiocb
->aio_type
& QEMU_AIO_WRITE
) {
1816 for (i
= 0; i
< aiocb
->io
.niov
; ++i
) {
1817 memcpy(p
, aiocb
->io
.iov
[i
].iov_base
, aiocb
->io
.iov
[i
].iov_len
);
1818 p
+= aiocb
->io
.iov
[i
].iov_len
;
1820 assert(p
- buf
== aiocb
->aio_nbytes
);
1823 nbytes
= handle_aiocb_rw_linear(aiocb
, buf
);
1824 if (!(aiocb
->aio_type
& (QEMU_AIO_WRITE
| QEMU_AIO_ZONE_APPEND
))) {
1826 size_t count
= aiocb
->aio_nbytes
, copy
;
1829 for (i
= 0; i
< aiocb
->io
.niov
&& count
; ++i
) {
1831 if (copy
> aiocb
->io
.iov
[i
].iov_len
) {
1832 copy
= aiocb
->io
.iov
[i
].iov_len
;
1834 memcpy(aiocb
->io
.iov
[i
].iov_base
, p
, copy
);
1835 assert(count
>= copy
);
1844 if (nbytes
== aiocb
->aio_nbytes
) {
1846 } else if (nbytes
>= 0 && nbytes
< aiocb
->aio_nbytes
) {
1847 if (aiocb
->aio_type
& QEMU_AIO_WRITE
) {
1850 iov_memset(aiocb
->io
.iov
, aiocb
->io
.niov
, nbytes
,
1851 0, aiocb
->aio_nbytes
- nbytes
);
1860 #if defined(CONFIG_FALLOCATE) || defined(BLKZEROOUT) || defined(BLKDISCARD)
1861 static int translate_err(int err
)
1863 if (err
== -ENODEV
|| err
== -ENOSYS
|| err
== -EOPNOTSUPP
||
1871 #ifdef CONFIG_FALLOCATE
1872 static int do_fallocate(int fd
, int mode
, off_t offset
, off_t len
)
1875 if (fallocate(fd
, mode
, offset
, len
) == 0) {
1878 } while (errno
== EINTR
);
1879 return translate_err(-errno
);
1883 static ssize_t
handle_aiocb_write_zeroes_block(RawPosixAIOData
*aiocb
)
1886 BDRVRawState
*s
= aiocb
->bs
->opaque
;
1888 if (!s
->has_write_zeroes
) {
1893 /* The BLKZEROOUT implementation in the kernel doesn't set
1894 * BLKDEV_ZERO_NOFALLBACK, so we can't call this if we have to avoid slow
1896 if (!(aiocb
->aio_type
& QEMU_AIO_NO_FALLBACK
)) {
1898 uint64_t range
[2] = { aiocb
->aio_offset
, aiocb
->aio_nbytes
};
1899 if (ioctl(aiocb
->aio_fildes
, BLKZEROOUT
, range
) == 0) {
1902 } while (errno
== EINTR
);
1904 ret
= translate_err(-errno
);
1905 if (ret
== -ENOTSUP
) {
1906 s
->has_write_zeroes
= false;
1914 static int handle_aiocb_write_zeroes(void *opaque
)
1916 RawPosixAIOData
*aiocb
= opaque
;
1917 #ifdef CONFIG_FALLOCATE
1918 BDRVRawState
*s
= aiocb
->bs
->opaque
;
1922 if (aiocb
->aio_type
& QEMU_AIO_BLKDEV
) {
1923 return handle_aiocb_write_zeroes_block(aiocb
);
1926 #ifdef CONFIG_FALLOCATE_ZERO_RANGE
1927 if (s
->has_write_zeroes
) {
1928 int ret
= do_fallocate(s
->fd
, FALLOC_FL_ZERO_RANGE
,
1929 aiocb
->aio_offset
, aiocb
->aio_nbytes
);
1930 if (ret
== -ENOTSUP
) {
1931 s
->has_write_zeroes
= false;
1932 } else if (ret
== 0 || ret
!= -EINVAL
) {
1936 * Note: Some file systems do not like unaligned byte ranges, and
1937 * return EINVAL in such a case, though they should not do it according
1938 * to the man-page of fallocate(). Thus we simply ignore this return
1939 * value and try the other fallbacks instead.
1944 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
1945 if (s
->has_discard
&& s
->has_fallocate
) {
1946 int ret
= do_fallocate(s
->fd
,
1947 FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
,
1948 aiocb
->aio_offset
, aiocb
->aio_nbytes
);
1950 ret
= do_fallocate(s
->fd
, 0, aiocb
->aio_offset
, aiocb
->aio_nbytes
);
1951 if (ret
== 0 || ret
!= -ENOTSUP
) {
1954 s
->has_fallocate
= false;
1955 } else if (ret
== -EINVAL
) {
1957 * Some file systems like older versions of GPFS do not like un-
1958 * aligned byte ranges, and return EINVAL in such a case, though
1959 * they should not do it according to the man-page of fallocate().
1960 * Warn about the bad filesystem and try the final fallback instead.
1962 warn_report_once("Your file system is misbehaving: "
1963 "fallocate(FALLOC_FL_PUNCH_HOLE) returned EINVAL. "
1964 "Please report this bug to your file system "
1966 } else if (ret
!= -ENOTSUP
) {
1969 s
->has_discard
= false;
1974 #ifdef CONFIG_FALLOCATE
1975 /* Last resort: we are trying to extend the file with zeroed data. This
1976 * can be done via fallocate(fd, 0) */
1977 len
= raw_co_getlength(aiocb
->bs
);
1978 if (s
->has_fallocate
&& len
>= 0 && aiocb
->aio_offset
>= len
) {
1979 int ret
= do_fallocate(s
->fd
, 0, aiocb
->aio_offset
, aiocb
->aio_nbytes
);
1980 if (ret
== 0 || ret
!= -ENOTSUP
) {
1983 s
->has_fallocate
= false;
1990 static int handle_aiocb_write_zeroes_unmap(void *opaque
)
1992 RawPosixAIOData
*aiocb
= opaque
;
1993 BDRVRawState
*s G_GNUC_UNUSED
= aiocb
->bs
->opaque
;
1995 /* First try to write zeros and unmap at the same time */
1997 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
1998 int ret
= do_fallocate(s
->fd
, FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
,
1999 aiocb
->aio_offset
, aiocb
->aio_nbytes
);
2010 /* If we couldn't manage to unmap while guaranteed that the area reads as
2011 * all-zero afterwards, just write zeroes without unmapping */
2012 return handle_aiocb_write_zeroes(aiocb
);
2015 #ifndef HAVE_COPY_FILE_RANGE
2016 static off_t
copy_file_range(int in_fd
, off_t
*in_off
, int out_fd
,
2017 off_t
*out_off
, size_t len
, unsigned int flags
)
2019 #ifdef __NR_copy_file_range
2020 return syscall(__NR_copy_file_range
, in_fd
, in_off
, out_fd
,
2021 out_off
, len
, flags
);
2030 * parse_zone - Fill a zone descriptor
2032 #if defined(CONFIG_BLKZONED)
2033 static inline int parse_zone(struct BlockZoneDescriptor
*zone
,
2034 const struct blk_zone
*blkz
) {
2035 zone
->start
= blkz
->start
<< BDRV_SECTOR_BITS
;
2036 zone
->length
= blkz
->len
<< BDRV_SECTOR_BITS
;
2037 zone
->wp
= blkz
->wp
<< BDRV_SECTOR_BITS
;
2039 #ifdef HAVE_BLK_ZONE_REP_CAPACITY
2040 zone
->cap
= blkz
->capacity
<< BDRV_SECTOR_BITS
;
2042 zone
->cap
= blkz
->len
<< BDRV_SECTOR_BITS
;
2045 switch (blkz
->type
) {
2046 case BLK_ZONE_TYPE_SEQWRITE_REQ
:
2047 zone
->type
= BLK_ZT_SWR
;
2049 case BLK_ZONE_TYPE_SEQWRITE_PREF
:
2050 zone
->type
= BLK_ZT_SWP
;
2052 case BLK_ZONE_TYPE_CONVENTIONAL
:
2053 zone
->type
= BLK_ZT_CONV
;
2056 error_report("Unsupported zone type: 0x%x", blkz
->type
);
2060 switch (blkz
->cond
) {
2061 case BLK_ZONE_COND_NOT_WP
:
2062 zone
->state
= BLK_ZS_NOT_WP
;
2064 case BLK_ZONE_COND_EMPTY
:
2065 zone
->state
= BLK_ZS_EMPTY
;
2067 case BLK_ZONE_COND_IMP_OPEN
:
2068 zone
->state
= BLK_ZS_IOPEN
;
2070 case BLK_ZONE_COND_EXP_OPEN
:
2071 zone
->state
= BLK_ZS_EOPEN
;
2073 case BLK_ZONE_COND_CLOSED
:
2074 zone
->state
= BLK_ZS_CLOSED
;
2076 case BLK_ZONE_COND_READONLY
:
2077 zone
->state
= BLK_ZS_RDONLY
;
2079 case BLK_ZONE_COND_FULL
:
2080 zone
->state
= BLK_ZS_FULL
;
2082 case BLK_ZONE_COND_OFFLINE
:
2083 zone
->state
= BLK_ZS_OFFLINE
;
2086 error_report("Unsupported zone state: 0x%x", blkz
->cond
);
2093 #if defined(CONFIG_BLKZONED)
2094 static int handle_aiocb_zone_report(void *opaque
)
2096 RawPosixAIOData
*aiocb
= opaque
;
2097 int fd
= aiocb
->aio_fildes
;
2098 unsigned int *nr_zones
= aiocb
->zone_report
.nr_zones
;
2099 BlockZoneDescriptor
*zones
= aiocb
->zone_report
.zones
;
2100 /* zoned block devices use 512-byte sectors */
2101 uint64_t sector
= aiocb
->aio_offset
/ 512;
2103 struct blk_zone
*blkz
;
2107 unsigned int n
= 0, i
= 0;
2110 rep_size
= sizeof(struct blk_zone_report
) + nrz
* sizeof(struct blk_zone
);
2111 g_autofree
struct blk_zone_report
*rep
= NULL
;
2112 rep
= g_malloc(rep_size
);
2114 blkz
= (struct blk_zone
*)(rep
+ 1);
2116 memset(rep
, 0, rep_size
);
2117 rep
->sector
= sector
;
2118 rep
->nr_zones
= nrz
- n
;
2121 ret
= ioctl(fd
, BLKREPORTZONE
, rep
);
2122 } while (ret
!= 0 && errno
== EINTR
);
2124 error_report("%d: ioctl BLKREPORTZONE at %" PRId64
" failed %d",
2129 if (!rep
->nr_zones
) {
2133 for (i
= 0; i
< rep
->nr_zones
; i
++, n
++) {
2134 ret
= parse_zone(&zones
[n
], &blkz
[i
]);
2139 /* The next report should start after the last zone reported */
2140 sector
= blkz
[i
].start
+ blkz
[i
].len
;
2149 #if defined(CONFIG_BLKZONED)
2150 static int handle_aiocb_zone_mgmt(void *opaque
)
2152 RawPosixAIOData
*aiocb
= opaque
;
2153 int fd
= aiocb
->aio_fildes
;
2154 uint64_t sector
= aiocb
->aio_offset
/ 512;
2155 int64_t nr_sectors
= aiocb
->aio_nbytes
/ 512;
2156 struct blk_zone_range range
;
2159 /* Execute the operation */
2160 range
.sector
= sector
;
2161 range
.nr_sectors
= nr_sectors
;
2163 ret
= ioctl(fd
, aiocb
->zone_mgmt
.op
, &range
);
2164 } while (ret
!= 0 && errno
== EINTR
);
2166 return ret
< 0 ? -errno
: ret
;
2170 static int handle_aiocb_copy_range(void *opaque
)
2172 RawPosixAIOData
*aiocb
= opaque
;
2173 uint64_t bytes
= aiocb
->aio_nbytes
;
2174 off_t in_off
= aiocb
->aio_offset
;
2175 off_t out_off
= aiocb
->copy_range
.aio_offset2
;
2178 ssize_t ret
= copy_file_range(aiocb
->aio_fildes
, &in_off
,
2179 aiocb
->copy_range
.aio_fd2
, &out_off
,
2181 trace_file_copy_file_range(aiocb
->bs
, aiocb
->aio_fildes
, in_off
,
2182 aiocb
->copy_range
.aio_fd2
, out_off
, bytes
,
2185 /* No progress (e.g. when beyond EOF), let the caller fall back to
2204 static int handle_aiocb_discard(void *opaque
)
2206 RawPosixAIOData
*aiocb
= opaque
;
2208 BDRVRawState
*s
= aiocb
->bs
->opaque
;
2210 if (!s
->has_discard
) {
2214 if (aiocb
->aio_type
& QEMU_AIO_BLKDEV
) {
2217 uint64_t range
[2] = { aiocb
->aio_offset
, aiocb
->aio_nbytes
};
2218 if (ioctl(aiocb
->aio_fildes
, BLKDISCARD
, range
) == 0) {
2221 } while (errno
== EINTR
);
2223 ret
= translate_err(-errno
);
2226 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
2227 ret
= do_fallocate(s
->fd
, FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
,
2228 aiocb
->aio_offset
, aiocb
->aio_nbytes
);
2229 ret
= translate_err(ret
);
2230 #elif defined(__APPLE__) && (__MACH__)
2231 fpunchhole_t fpunchhole
;
2232 fpunchhole
.fp_flags
= 0;
2233 fpunchhole
.reserved
= 0;
2234 fpunchhole
.fp_offset
= aiocb
->aio_offset
;
2235 fpunchhole
.fp_length
= aiocb
->aio_nbytes
;
2236 if (fcntl(s
->fd
, F_PUNCHHOLE
, &fpunchhole
) == -1) {
2237 ret
= errno
== ENODEV
? -ENOTSUP
: -errno
;
2244 if (ret
== -ENOTSUP
) {
2245 s
->has_discard
= false;
2251 * Help alignment probing by allocating the first block.
2253 * When reading with direct I/O from unallocated area on Gluster backed by XFS,
2254 * reading succeeds regardless of request length. In this case we fallback to
2255 * safe alignment which is not optimal. Allocating the first block avoids this
2258 * fd may be opened with O_DIRECT, but we don't know the buffer alignment or
2259 * request alignment, so we use safe values.
2261 * Returns: 0 on success, -errno on failure. Since this is an optimization,
2262 * caller may ignore failures.
2264 static int allocate_first_block(int fd
, size_t max_size
)
2266 size_t write_size
= (max_size
< MAX_BLOCKSIZE
)
2269 size_t max_align
= MAX(MAX_BLOCKSIZE
, qemu_real_host_page_size());
2274 buf
= qemu_memalign(max_align
, write_size
);
2275 memset(buf
, 0, write_size
);
2277 n
= RETRY_ON_EINTR(pwrite(fd
, buf
, write_size
, 0));
2279 ret
= (n
== -1) ? -errno
: 0;
2285 static int handle_aiocb_truncate(void *opaque
)
2287 RawPosixAIOData
*aiocb
= opaque
;
2289 int64_t current_length
= 0;
2292 int fd
= aiocb
->aio_fildes
;
2293 int64_t offset
= aiocb
->aio_offset
;
2294 PreallocMode prealloc
= aiocb
->truncate
.prealloc
;
2295 Error
**errp
= aiocb
->truncate
.errp
;
2297 if (fstat(fd
, &st
) < 0) {
2299 error_setg_errno(errp
, -result
, "Could not stat file");
2303 current_length
= st
.st_size
;
2304 if (current_length
> offset
&& prealloc
!= PREALLOC_MODE_OFF
) {
2305 error_setg(errp
, "Cannot use preallocation for shrinking files");
2310 #ifdef CONFIG_POSIX_FALLOCATE
2311 case PREALLOC_MODE_FALLOC
:
2313 * Truncating before posix_fallocate() makes it about twice slower on
2314 * file systems that do not support fallocate(), trying to check if a
2315 * block is allocated before allocating it, so don't do that here.
2317 if (offset
!= current_length
) {
2318 result
= -posix_fallocate(fd
, current_length
,
2319 offset
- current_length
);
2321 /* posix_fallocate() doesn't set errno. */
2322 error_setg_errno(errp
, -result
,
2323 "Could not preallocate new data");
2324 } else if (current_length
== 0) {
2326 * posix_fallocate() uses fallocate() if the filesystem
2327 * supports it, or fallback to manually writing zeroes. If
2328 * fallocate() was used, unaligned reads from the fallocated
2329 * area in raw_probe_alignment() will succeed, hence we need to
2330 * allocate the first block.
2332 * Optimize future alignment probing; ignore failures.
2334 allocate_first_block(fd
, offset
);
2341 case PREALLOC_MODE_FULL
:
2343 int64_t num
= 0, left
= offset
- current_length
;
2347 * Knowing the final size from the beginning could allow the file
2348 * system driver to do less allocations and possibly avoid
2349 * fragmentation of the file.
2351 if (ftruncate(fd
, offset
) != 0) {
2353 error_setg_errno(errp
, -result
, "Could not resize file");
2357 buf
= g_malloc0(65536);
2359 seek_result
= lseek(fd
, current_length
, SEEK_SET
);
2360 if (seek_result
< 0) {
2362 error_setg_errno(errp
, -result
,
2363 "Failed to seek to the old end of file");
2368 num
= MIN(left
, 65536);
2369 result
= write(fd
, buf
, num
);
2371 if (errno
== EINTR
) {
2375 error_setg_errno(errp
, -result
,
2376 "Could not write zeros for preallocation");
2385 error_setg_errno(errp
, -result
,
2386 "Could not flush file to disk");
2392 case PREALLOC_MODE_OFF
:
2393 if (ftruncate(fd
, offset
) != 0) {
2395 error_setg_errno(errp
, -result
, "Could not resize file");
2396 } else if (current_length
== 0 && offset
> current_length
) {
2397 /* Optimize future alignment probing; ignore failures. */
2398 allocate_first_block(fd
, offset
);
2403 error_setg(errp
, "Unsupported preallocation mode: %s",
2404 PreallocMode_str(prealloc
));
2410 if (ftruncate(fd
, current_length
) < 0) {
2411 error_report("Failed to restore old file length: %s",
2420 static int coroutine_fn
raw_thread_pool_submit(ThreadPoolFunc func
, void *arg
)
2422 return thread_pool_submit_co(func
, arg
);
2426 * Check if all memory in this vector is sector aligned.
2428 static bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
)
2431 size_t alignment
= bdrv_min_mem_align(bs
);
2432 size_t len
= bs
->bl
.request_alignment
;
2435 for (i
= 0; i
< qiov
->niov
; i
++) {
2436 if ((uintptr_t) qiov
->iov
[i
].iov_base
% alignment
) {
2439 if (qiov
->iov
[i
].iov_len
% len
) {
2447 static int coroutine_fn
raw_co_prw(BlockDriverState
*bs
, uint64_t offset
,
2448 uint64_t bytes
, QEMUIOVector
*qiov
, int type
)
2450 BDRVRawState
*s
= bs
->opaque
;
2451 RawPosixAIOData acb
;
2454 if (fd_open(bs
) < 0)
2456 #if defined(CONFIG_BLKZONED)
2457 if ((type
& (QEMU_AIO_WRITE
| QEMU_AIO_ZONE_APPEND
)) && bs
->wps
) {
2458 qemu_co_mutex_lock(&bs
->wps
->colock
);
2459 if (type
& QEMU_AIO_ZONE_APPEND
&& bs
->bl
.zone_size
) {
2460 int index
= offset
/ bs
->bl
.zone_size
;
2461 offset
= bs
->wps
->wp
[index
];
2467 * When using O_DIRECT, the request must be aligned to be able to use
2468 * either libaio or io_uring interface. If not fail back to regular thread
2469 * pool read/write code which emulates this for us if we
2470 * set QEMU_AIO_MISALIGNED.
2472 if (s
->needs_alignment
&& !bdrv_qiov_is_aligned(bs
, qiov
)) {
2473 type
|= QEMU_AIO_MISALIGNED
;
2474 #ifdef CONFIG_LINUX_IO_URING
2475 } else if (s
->use_linux_io_uring
) {
2476 assert(qiov
->size
== bytes
);
2477 ret
= luring_co_submit(bs
, s
->fd
, offset
, qiov
, type
);
2480 #ifdef CONFIG_LINUX_AIO
2481 } else if (s
->use_linux_aio
) {
2482 assert(qiov
->size
== bytes
);
2483 ret
= laio_co_submit(s
->fd
, offset
, qiov
, type
,
2489 acb
= (RawPosixAIOData
) {
2491 .aio_fildes
= s
->fd
,
2493 .aio_offset
= offset
,
2494 .aio_nbytes
= bytes
,
2501 assert(qiov
->size
== bytes
);
2502 ret
= raw_thread_pool_submit(handle_aiocb_rw
, &acb
);
2503 goto out
; /* Avoid the compiler err of unused label */
2506 #if defined(CONFIG_BLKZONED)
2508 BlockZoneWps
*wps
= bs
->wps
;
2510 if ((type
& (QEMU_AIO_WRITE
| QEMU_AIO_ZONE_APPEND
))
2511 && wps
&& bs
->bl
.zone_size
) {
2512 uint64_t *wp
= &wps
->wp
[offset
/ bs
->bl
.zone_size
];
2513 if (!BDRV_ZT_IS_CONV(*wp
)) {
2514 if (type
& QEMU_AIO_ZONE_APPEND
) {
2516 trace_zbd_zone_append_complete(bs
, *s
->offset
2517 >> BDRV_SECTOR_BITS
);
2519 /* Advance the wp if needed */
2520 if (offset
+ bytes
> *wp
) {
2521 *wp
= offset
+ bytes
;
2526 if (type
& (QEMU_AIO_WRITE
| QEMU_AIO_ZONE_APPEND
)) {
2527 update_zones_wp(bs
, s
->fd
, 0, 1);
2531 if ((type
& (QEMU_AIO_WRITE
| QEMU_AIO_ZONE_APPEND
)) && wps
) {
2532 qemu_co_mutex_unlock(&wps
->colock
);
2539 static int coroutine_fn
raw_co_preadv(BlockDriverState
*bs
, int64_t offset
,
2540 int64_t bytes
, QEMUIOVector
*qiov
,
2541 BdrvRequestFlags flags
)
2543 return raw_co_prw(bs
, offset
, bytes
, qiov
, QEMU_AIO_READ
);
2546 static int coroutine_fn
raw_co_pwritev(BlockDriverState
*bs
, int64_t offset
,
2547 int64_t bytes
, QEMUIOVector
*qiov
,
2548 BdrvRequestFlags flags
)
2550 return raw_co_prw(bs
, offset
, bytes
, qiov
, QEMU_AIO_WRITE
);
2553 static void coroutine_fn
raw_co_io_plug(BlockDriverState
*bs
)
2555 BDRVRawState
__attribute__((unused
)) *s
= bs
->opaque
;
2556 #ifdef CONFIG_LINUX_AIO
2557 if (s
->use_linux_aio
) {
2561 #ifdef CONFIG_LINUX_IO_URING
2562 if (s
->use_linux_io_uring
) {
2568 static void coroutine_fn
raw_co_io_unplug(BlockDriverState
*bs
)
2570 BDRVRawState
__attribute__((unused
)) *s
= bs
->opaque
;
2571 #ifdef CONFIG_LINUX_AIO
2572 if (s
->use_linux_aio
) {
2573 laio_io_unplug(s
->aio_max_batch
);
2576 #ifdef CONFIG_LINUX_IO_URING
2577 if (s
->use_linux_io_uring
) {
2583 static int coroutine_fn
raw_co_flush_to_disk(BlockDriverState
*bs
)
2585 BDRVRawState
*s
= bs
->opaque
;
2586 RawPosixAIOData acb
;
2594 acb
= (RawPosixAIOData
) {
2596 .aio_fildes
= s
->fd
,
2597 .aio_type
= QEMU_AIO_FLUSH
,
2600 #ifdef CONFIG_LINUX_IO_URING
2601 if (s
->use_linux_io_uring
) {
2602 return luring_co_submit(bs
, s
->fd
, 0, NULL
, QEMU_AIO_FLUSH
);
2605 return raw_thread_pool_submit(handle_aiocb_flush
, &acb
);
2608 static void raw_aio_attach_aio_context(BlockDriverState
*bs
,
2609 AioContext
*new_context
)
2611 BDRVRawState
__attribute__((unused
)) *s
= bs
->opaque
;
2612 #ifdef CONFIG_LINUX_AIO
2613 if (s
->use_linux_aio
) {
2614 Error
*local_err
= NULL
;
2615 if (!aio_setup_linux_aio(new_context
, &local_err
)) {
2616 error_reportf_err(local_err
, "Unable to use native AIO, "
2617 "falling back to thread pool: ");
2618 s
->use_linux_aio
= false;
2622 #ifdef CONFIG_LINUX_IO_URING
2623 if (s
->use_linux_io_uring
) {
2624 Error
*local_err
= NULL
;
2625 if (!aio_setup_linux_io_uring(new_context
, &local_err
)) {
2626 error_reportf_err(local_err
, "Unable to use linux io_uring, "
2627 "falling back to thread pool: ");
2628 s
->use_linux_io_uring
= false;
2634 static void raw_close(BlockDriverState
*bs
)
2636 BDRVRawState
*s
= bs
->opaque
;
2639 #if defined(CONFIG_BLKZONED)
2648 * Truncates the given regular file @fd to @offset and, when growing, fills the
2649 * new space according to @prealloc.
2651 * Returns: 0 on success, -errno on failure.
2653 static int coroutine_fn
2654 raw_regular_truncate(BlockDriverState
*bs
, int fd
, int64_t offset
,
2655 PreallocMode prealloc
, Error
**errp
)
2657 RawPosixAIOData acb
;
2659 acb
= (RawPosixAIOData
) {
2662 .aio_type
= QEMU_AIO_TRUNCATE
,
2663 .aio_offset
= offset
,
2665 .prealloc
= prealloc
,
2670 return raw_thread_pool_submit(handle_aiocb_truncate
, &acb
);
2673 static int coroutine_fn
raw_co_truncate(BlockDriverState
*bs
, int64_t offset
,
2674 bool exact
, PreallocMode prealloc
,
2675 BdrvRequestFlags flags
, Error
**errp
)
2677 BDRVRawState
*s
= bs
->opaque
;
2681 if (fstat(s
->fd
, &st
)) {
2683 error_setg_errno(errp
, -ret
, "Failed to fstat() the file");
2687 if (S_ISREG(st
.st_mode
)) {
2688 /* Always resizes to the exact @offset */
2689 return raw_regular_truncate(bs
, s
->fd
, offset
, prealloc
, errp
);
2692 if (prealloc
!= PREALLOC_MODE_OFF
) {
2693 error_setg(errp
, "Preallocation mode '%s' unsupported for this "
2694 "non-regular file", PreallocMode_str(prealloc
));
2698 if (S_ISCHR(st
.st_mode
) || S_ISBLK(st
.st_mode
)) {
2699 int64_t cur_length
= raw_co_getlength(bs
);
2701 if (offset
!= cur_length
&& exact
) {
2702 error_setg(errp
, "Cannot resize device files");
2704 } else if (offset
> cur_length
) {
2705 error_setg(errp
, "Cannot grow device files");
2709 error_setg(errp
, "Resizing this file is not supported");
2717 static int64_t coroutine_fn
raw_co_getlength(BlockDriverState
*bs
)
2719 BDRVRawState
*s
= bs
->opaque
;
2725 if (S_ISCHR(st
.st_mode
) || S_ISBLK(st
.st_mode
)) {
2726 struct disklabel dl
;
2728 if (ioctl(fd
, DIOCGDINFO
, &dl
))
2730 return (uint64_t)dl
.d_secsize
*
2731 dl
.d_partitions
[DISKPART(st
.st_rdev
)].p_size
;
2735 #elif defined(__NetBSD__)
2736 static int64_t coroutine_fn
raw_co_getlength(BlockDriverState
*bs
)
2738 BDRVRawState
*s
= bs
->opaque
;
2744 if (S_ISCHR(st
.st_mode
) || S_ISBLK(st
.st_mode
)) {
2745 struct dkwedge_info dkw
;
2747 if (ioctl(fd
, DIOCGWEDGEINFO
, &dkw
) != -1) {
2748 return dkw
.dkw_size
* 512;
2750 struct disklabel dl
;
2752 if (ioctl(fd
, DIOCGDINFO
, &dl
))
2754 return (uint64_t)dl
.d_secsize
*
2755 dl
.d_partitions
[DISKPART(st
.st_rdev
)].p_size
;
2760 #elif defined(__sun__)
2761 static int64_t coroutine_fn
raw_co_getlength(BlockDriverState
*bs
)
2763 BDRVRawState
*s
= bs
->opaque
;
2764 struct dk_minfo minfo
;
2774 * Use the DKIOCGMEDIAINFO ioctl to read the size.
2776 ret
= ioctl(s
->fd
, DKIOCGMEDIAINFO
, &minfo
);
2778 return minfo
.dki_lbsize
* minfo
.dki_capacity
;
2782 * There are reports that lseek on some devices fails, but
2783 * irc discussion said that contingency on contingency was overkill.
2785 size
= lseek(s
->fd
, 0, SEEK_END
);
2791 #elif defined(CONFIG_BSD)
2792 static int64_t coroutine_fn
raw_co_getlength(BlockDriverState
*bs
)
2794 BDRVRawState
*s
= bs
->opaque
;
2798 #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__)
2807 #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__)
2810 if (!fstat(fd
, &sb
) && (S_IFCHR
& sb
.st_mode
)) {
2812 #ifdef DIOCGMEDIASIZE
2813 if (ioctl(fd
, DIOCGMEDIASIZE
, (off_t
*)&size
)) {
2820 if (ioctl(fd
, DIOCGPART
, &pi
) == 0) {
2821 size
= pi
.media_size
;
2825 #if defined(DKIOCGETBLOCKCOUNT) && defined(DKIOCGETBLOCKSIZE)
2827 uint64_t sectors
= 0;
2828 uint32_t sector_size
= 0;
2830 if (ioctl(fd
, DKIOCGETBLOCKCOUNT
, §ors
) == 0
2831 && ioctl(fd
, DKIOCGETBLOCKSIZE
, §or_size
) == 0) {
2832 size
= sectors
* sector_size
;
2837 size
= lseek(fd
, 0LL, SEEK_END
);
2842 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
2845 /* XXX FreeBSD acd returns UINT_MAX sectors for an empty drive */
2846 if (size
== 2048LL * (unsigned)-1)
2848 /* XXX no disc? maybe we need to reopen... */
2849 if (size
<= 0 && !reopened
&& cdrom_reopen(bs
) >= 0) {
2856 size
= lseek(fd
, 0, SEEK_END
);
2864 static int64_t coroutine_fn
raw_co_getlength(BlockDriverState
*bs
)
2866 BDRVRawState
*s
= bs
->opaque
;
2875 size
= lseek(s
->fd
, 0, SEEK_END
);
2883 static int64_t coroutine_fn
raw_co_get_allocated_file_size(BlockDriverState
*bs
)
2886 BDRVRawState
*s
= bs
->opaque
;
2888 if (fstat(s
->fd
, &st
) < 0) {
2891 return (int64_t)st
.st_blocks
* 512;
2894 static int coroutine_fn
2895 raw_co_create(BlockdevCreateOptions
*options
, Error
**errp
)
2897 BlockdevCreateOptionsFile
*file_opts
;
2898 Error
*local_err
= NULL
;
2900 uint64_t perm
, shared
;
2903 /* Validate options and set default values */
2904 assert(options
->driver
== BLOCKDEV_DRIVER_FILE
);
2905 file_opts
= &options
->u
.file
;
2907 if (!file_opts
->has_nocow
) {
2908 file_opts
->nocow
= false;
2910 if (!file_opts
->has_preallocation
) {
2911 file_opts
->preallocation
= PREALLOC_MODE_OFF
;
2913 if (!file_opts
->has_extent_size_hint
) {
2914 file_opts
->extent_size_hint
= 1 * MiB
;
2916 if (file_opts
->extent_size_hint
> UINT32_MAX
) {
2918 error_setg(errp
, "Extent size hint is too large");
2923 fd
= qemu_create(file_opts
->filename
, O_RDWR
| O_BINARY
, 0644, errp
);
2929 /* Take permissions: We want to discard everything, so we need
2930 * BLK_PERM_WRITE; and truncation to the desired size requires
2932 * On the other hand, we cannot share the RESIZE permission
2933 * because we promise that after this function, the file has the
2934 * size given in the options. If someone else were to resize it
2935 * concurrently, we could not guarantee that.
2936 * Note that after this function, we can no longer guarantee that
2937 * the file is not touched by a third party, so it may be resized
2939 perm
= BLK_PERM_WRITE
| BLK_PERM_RESIZE
;
2940 shared
= BLK_PERM_ALL
& ~BLK_PERM_RESIZE
;
2942 /* Step one: Take locks */
2943 result
= raw_apply_lock_bytes(NULL
, fd
, perm
, ~shared
, false, errp
);
2948 /* Step two: Check that nobody else has taken conflicting locks */
2949 result
= raw_check_lock_bytes(fd
, perm
, shared
, errp
);
2951 error_append_hint(errp
,
2952 "Is another process using the image [%s]?\n",
2953 file_opts
->filename
);
2957 /* Clear the file by truncating it to 0 */
2958 result
= raw_regular_truncate(NULL
, fd
, 0, PREALLOC_MODE_OFF
, errp
);
2963 if (file_opts
->nocow
) {
2965 /* Set NOCOW flag to solve performance issue on fs like btrfs.
2966 * This is an optimisation. The FS_IOC_SETFLAGS ioctl return value
2967 * will be ignored since any failure of this operation should not
2968 * block the left work.
2971 if (ioctl(fd
, FS_IOC_GETFLAGS
, &attr
) == 0) {
2972 attr
|= FS_NOCOW_FL
;
2973 ioctl(fd
, FS_IOC_SETFLAGS
, &attr
);
2977 #ifdef FS_IOC_FSSETXATTR
2979 * Try to set the extent size hint. Failure is not fatal, and a warning is
2980 * only printed if the option was explicitly specified.
2983 struct fsxattr attr
;
2984 result
= ioctl(fd
, FS_IOC_FSGETXATTR
, &attr
);
2986 attr
.fsx_xflags
|= FS_XFLAG_EXTSIZE
;
2987 attr
.fsx_extsize
= file_opts
->extent_size_hint
;
2988 result
= ioctl(fd
, FS_IOC_FSSETXATTR
, &attr
);
2990 if (result
< 0 && file_opts
->has_extent_size_hint
&&
2991 file_opts
->extent_size_hint
)
2993 warn_report("Failed to set extent size hint: %s",
2999 /* Resize and potentially preallocate the file to the desired
3001 result
= raw_regular_truncate(NULL
, fd
, file_opts
->size
,
3002 file_opts
->preallocation
, errp
);
3008 raw_apply_lock_bytes(NULL
, fd
, 0, 0, true, &local_err
);
3010 /* The above call should not fail, and if it does, that does
3011 * not mean the whole creation operation has failed. So
3012 * report it the user for their convenience, but do not report
3013 * it to the caller. */
3014 warn_report_err(local_err
);
3018 if (qemu_close(fd
) != 0 && result
== 0) {
3020 error_setg_errno(errp
, -result
, "Could not close the new file");
3026 static int coroutine_fn GRAPH_RDLOCK
3027 raw_co_create_opts(BlockDriver
*drv
, const char *filename
,
3028 QemuOpts
*opts
, Error
**errp
)
3030 BlockdevCreateOptions options
;
3031 int64_t total_size
= 0;
3032 int64_t extent_size_hint
= 0;
3033 bool has_extent_size_hint
= false;
3035 PreallocMode prealloc
;
3037 Error
*local_err
= NULL
;
3039 /* Skip file: protocol prefix */
3040 strstart(filename
, "file:", &filename
);
3042 /* Read out options */
3043 total_size
= ROUND_UP(qemu_opt_get_size_del(opts
, BLOCK_OPT_SIZE
, 0),
3045 if (qemu_opt_get(opts
, BLOCK_OPT_EXTENT_SIZE_HINT
)) {
3046 has_extent_size_hint
= true;
3048 qemu_opt_get_size_del(opts
, BLOCK_OPT_EXTENT_SIZE_HINT
, -1);
3050 nocow
= qemu_opt_get_bool(opts
, BLOCK_OPT_NOCOW
, false);
3051 buf
= qemu_opt_get_del(opts
, BLOCK_OPT_PREALLOC
);
3052 prealloc
= qapi_enum_parse(&PreallocMode_lookup
, buf
,
3053 PREALLOC_MODE_OFF
, &local_err
);
3056 error_propagate(errp
, local_err
);
3060 options
= (BlockdevCreateOptions
) {
3061 .driver
= BLOCKDEV_DRIVER_FILE
,
3063 .filename
= (char *) filename
,
3065 .has_preallocation
= true,
3066 .preallocation
= prealloc
,
3069 .has_extent_size_hint
= has_extent_size_hint
,
3070 .extent_size_hint
= extent_size_hint
,
3073 return raw_co_create(&options
, errp
);
3076 static int coroutine_fn
raw_co_delete_file(BlockDriverState
*bs
,
3082 if (!(stat(bs
->filename
, &st
) == 0) || !S_ISREG(st
.st_mode
)) {
3083 error_setg_errno(errp
, ENOENT
, "%s is not a regular file",
3088 ret
= unlink(bs
->filename
);
3091 error_setg_errno(errp
, -ret
, "Error when deleting file %s",
3099 * Find allocation range in @bs around offset @start.
3100 * May change underlying file descriptor's file offset.
3101 * If @start is not in a hole, store @start in @data, and the
3102 * beginning of the next hole in @hole, and return 0.
3103 * If @start is in a non-trailing hole, store @start in @hole and the
3104 * beginning of the next non-hole in @data, and return 0.
3105 * If @start is in a trailing hole or beyond EOF, return -ENXIO.
3106 * If we can't find out, return a negative errno other than -ENXIO.
3108 static int find_allocation(BlockDriverState
*bs
, off_t start
,
3109 off_t
*data
, off_t
*hole
)
3111 #if defined SEEK_HOLE && defined SEEK_DATA
3112 BDRVRawState
*s
= bs
->opaque
;
3117 * D1. offs == start: start is in data
3118 * D2. offs > start: start is in a hole, next data at offs
3119 * D3. offs < 0, errno = ENXIO: either start is in a trailing hole
3120 * or start is beyond EOF
3121 * If the latter happens, the file has been truncated behind
3122 * our back since we opened it. All bets are off then.
3123 * Treating like a trailing hole is simplest.
3124 * D4. offs < 0, errno != ENXIO: we learned nothing
3126 offs
= lseek(s
->fd
, start
, SEEK_DATA
);
3128 return -errno
; /* D3 or D4 */
3132 /* This is not a valid return by lseek(). We are safe to just return
3133 * -EIO in this case, and we'll treat it like D4. */
3138 /* D2: in hole, next data at offs */
3144 /* D1: in data, end not yet known */
3148 * H1. offs == start: start is in a hole
3149 * If this happens here, a hole has been dug behind our back
3150 * since the previous lseek().
3151 * H2. offs > start: either start is in data, next hole at offs,
3152 * or start is in trailing hole, EOF at offs
3153 * Linux treats trailing holes like any other hole: offs ==
3154 * start. Solaris seeks to EOF instead: offs > start (blech).
3155 * If that happens here, a hole has been dug behind our back
3156 * since the previous lseek().
3157 * H3. offs < 0, errno = ENXIO: start is beyond EOF
3158 * If this happens, the file has been truncated behind our
3159 * back since we opened it. Treat it like a trailing hole.
3160 * H4. offs < 0, errno != ENXIO: we learned nothing
3161 * Pretend we know nothing at all, i.e. "forget" about D1.
3163 offs
= lseek(s
->fd
, start
, SEEK_HOLE
);
3165 return -errno
; /* D1 and (H3 or H4) */
3169 /* This is not a valid return by lseek(). We are safe to just return
3170 * -EIO in this case, and we'll treat it like H4. */
3176 * D1 and H2: either in data, next hole at offs, or it was in
3177 * data but is now in a trailing hole. In the latter case,
3178 * all bets are off. Treating it as if it there was data all
3179 * the way to EOF is safe, so simply do that.
3194 * Returns the allocation status of the specified offset.
3196 * The block layer guarantees 'offset' and 'bytes' are within bounds.
3198 * 'pnum' is set to the number of bytes (including and immediately following
3199 * the specified offset) that are known to be in the same
3200 * allocated/unallocated state.
3202 * 'bytes' is a soft cap for 'pnum'. If the information is free, 'pnum' may
3205 static int coroutine_fn
raw_co_block_status(BlockDriverState
*bs
,
3208 int64_t bytes
, int64_t *pnum
,
3210 BlockDriverState
**file
)
3212 off_t data
= 0, hole
= 0;
3215 assert(QEMU_IS_ALIGNED(offset
| bytes
, bs
->bl
.request_alignment
));
3226 return BDRV_BLOCK_DATA
| BDRV_BLOCK_OFFSET_VALID
;
3229 ret
= find_allocation(bs
, offset
, &data
, &hole
);
3230 if (ret
== -ENXIO
) {
3233 ret
= BDRV_BLOCK_ZERO
;
3234 } else if (ret
< 0) {
3235 /* No info available, so pretend there are no holes */
3237 ret
= BDRV_BLOCK_DATA
;
3238 } else if (data
== offset
) {
3239 /* On a data extent, compute bytes to the end of the extent,
3240 * possibly including a partial sector at EOF. */
3241 *pnum
= hole
- offset
;
3244 * We are not allowed to return partial sectors, though, so
3245 * round up if necessary.
3247 if (!QEMU_IS_ALIGNED(*pnum
, bs
->bl
.request_alignment
)) {
3248 int64_t file_length
= raw_co_getlength(bs
);
3249 if (file_length
> 0) {
3250 /* Ignore errors, this is just a safeguard */
3251 assert(hole
== file_length
);
3253 *pnum
= ROUND_UP(*pnum
, bs
->bl
.request_alignment
);
3256 ret
= BDRV_BLOCK_DATA
;
3258 /* On a hole, compute bytes to the beginning of the next extent. */
3259 assert(hole
== offset
);
3260 *pnum
= data
- offset
;
3261 ret
= BDRV_BLOCK_ZERO
;
3265 return ret
| BDRV_BLOCK_OFFSET_VALID
;
3268 #if defined(__linux__)
3269 /* Verify that the file is not in the page cache */
3270 static void coroutine_fn
check_cache_dropped(BlockDriverState
*bs
, Error
**errp
)
3272 const size_t window_size
= 128 * 1024 * 1024;
3273 BDRVRawState
*s
= bs
->opaque
;
3274 void *window
= NULL
;
3281 /* mincore(2) page status information requires 1 byte per page */
3282 page_size
= sysconf(_SC_PAGESIZE
);
3283 vec
= g_malloc(DIV_ROUND_UP(window_size
, page_size
));
3285 end
= raw_co_getlength(bs
);
3287 for (offset
= 0; offset
< end
; offset
+= window_size
) {
3294 /* Unmap previous window if size has changed */
3295 new_length
= MIN(end
- offset
, window_size
);
3296 if (new_length
!= length
) {
3297 munmap(window
, length
);
3302 new_window
= mmap(window
, new_length
, PROT_NONE
, MAP_PRIVATE
,
3304 if (new_window
== MAP_FAILED
) {
3305 error_setg_errno(errp
, errno
, "mmap failed");
3309 window
= new_window
;
3310 length
= new_length
;
3312 ret
= mincore(window
, length
, vec
);
3314 error_setg_errno(errp
, errno
, "mincore failed");
3318 vec_end
= DIV_ROUND_UP(length
, page_size
);
3319 for (i
= 0; i
< vec_end
; i
++) {
3325 error_setg(errp
, "page cache still in use!");
3331 munmap(window
, length
);
3336 #endif /* __linux__ */
3338 static void coroutine_fn GRAPH_RDLOCK
3339 raw_co_invalidate_cache(BlockDriverState
*bs
, Error
**errp
)
3341 BDRVRawState
*s
= bs
->opaque
;
3346 error_setg_errno(errp
, -ret
, "The file descriptor is not open");
3350 if (!s
->drop_cache
) {
3354 if (s
->open_flags
& O_DIRECT
) {
3355 return; /* No host kernel page cache */
3358 #if defined(__linux__)
3359 /* This sets the scene for the next syscall... */
3360 ret
= bdrv_co_flush(bs
);
3362 error_setg_errno(errp
, -ret
, "flush failed");
3366 /* Linux does not invalidate pages that are dirty, locked, or mmapped by a
3367 * process. These limitations are okay because we just fsynced the file,
3368 * we don't use mmap, and the file should not be in use by other processes.
3370 ret
= posix_fadvise(s
->fd
, 0, 0, POSIX_FADV_DONTNEED
);
3371 if (ret
!= 0) { /* the return value is a positive errno */
3372 error_setg_errno(errp
, ret
, "fadvise failed");
3376 if (s
->check_cache_dropped
) {
3377 check_cache_dropped(bs
, errp
);
3379 #else /* __linux__ */
3380 /* Do nothing. Live migration to a remote host with cache.direct=off is
3381 * unsupported on other host operating systems. Cache consistency issues
3382 * may occur but no error is reported here, partly because that's the
3383 * historical behavior and partly because it's hard to differentiate valid
3384 * configurations that should not cause errors.
3386 #endif /* !__linux__ */
3389 static void raw_account_discard(BDRVRawState
*s
, uint64_t nbytes
, int ret
)
3392 s
->stats
.discard_nb_failed
++;
3394 s
->stats
.discard_nb_ok
++;
3395 s
->stats
.discard_bytes_ok
+= nbytes
;
3400 * zone report - Get a zone block device's information in the form
3401 * of an array of zone descriptors.
3402 * zones is an array of zone descriptors to hold zone information on reply;
3403 * offset can be any byte within the entire size of the device;
3404 * nr_zones is the maxium number of sectors the command should operate on.
3406 #if defined(CONFIG_BLKZONED)
3407 static int coroutine_fn
raw_co_zone_report(BlockDriverState
*bs
, int64_t offset
,
3408 unsigned int *nr_zones
,
3409 BlockZoneDescriptor
*zones
) {
3410 BDRVRawState
*s
= bs
->opaque
;
3411 RawPosixAIOData acb
= (RawPosixAIOData
) {
3413 .aio_fildes
= s
->fd
,
3414 .aio_type
= QEMU_AIO_ZONE_REPORT
,
3415 .aio_offset
= offset
,
3417 .nr_zones
= nr_zones
,
3422 trace_zbd_zone_report(bs
, *nr_zones
, offset
>> BDRV_SECTOR_BITS
);
3423 return raw_thread_pool_submit(handle_aiocb_zone_report
, &acb
);
3428 * zone management operations - Execute an operation on a zone
3430 #if defined(CONFIG_BLKZONED)
3431 static int coroutine_fn
raw_co_zone_mgmt(BlockDriverState
*bs
, BlockZoneOp op
,
3432 int64_t offset
, int64_t len
) {
3433 BDRVRawState
*s
= bs
->opaque
;
3434 RawPosixAIOData acb
;
3435 int64_t zone_size
, zone_size_mask
;
3436 const char *op_name
;
3439 BlockZoneWps
*wps
= bs
->wps
;
3440 int64_t capacity
= bs
->total_sectors
<< BDRV_SECTOR_BITS
;
3442 zone_size
= bs
->bl
.zone_size
;
3443 zone_size_mask
= zone_size
- 1;
3444 if (offset
& zone_size_mask
) {
3445 error_report("sector offset %" PRId64
" is not aligned to zone size "
3446 "%" PRId64
"", offset
/ 512, zone_size
/ 512);
3450 if (((offset
+ len
) < capacity
&& len
& zone_size_mask
) ||
3451 offset
+ len
> capacity
) {
3452 error_report("number of sectors %" PRId64
" is not aligned to zone size"
3453 " %" PRId64
"", len
/ 512, zone_size
/ 512);
3457 uint32_t i
= offset
/ bs
->bl
.zone_size
;
3458 uint32_t nrz
= len
/ bs
->bl
.zone_size
;
3459 uint64_t *wp
= &wps
->wp
[i
];
3460 if (BDRV_ZT_IS_CONV(*wp
) && len
!= capacity
) {
3461 error_report("zone mgmt operations are not allowed for conventional zones");
3467 op_name
= "BLKOPENZONE";
3471 op_name
= "BLKCLOSEZONE";
3475 op_name
= "BLKFINISHZONE";
3479 op_name
= "BLKRESETZONE";
3483 error_report("Unsupported zone op: 0x%x", op
);
3487 acb
= (RawPosixAIOData
) {
3489 .aio_fildes
= s
->fd
,
3490 .aio_type
= QEMU_AIO_ZONE_MGMT
,
3491 .aio_offset
= offset
,
3498 trace_zbd_zone_mgmt(bs
, op_name
, offset
>> BDRV_SECTOR_BITS
,
3499 len
>> BDRV_SECTOR_BITS
);
3500 ret
= raw_thread_pool_submit(handle_aiocb_zone_mgmt
, &acb
);
3502 update_zones_wp(bs
, s
->fd
, offset
, i
);
3503 error_report("ioctl %s failed %d", op_name
, ret
);
3507 if (zo
== BLKRESETZONE
&& len
== capacity
) {
3508 ret
= get_zones_wp(bs
, s
->fd
, 0, bs
->bl
.nr_zones
, 1);
3510 error_report("reporting single wp failed");
3513 } else if (zo
== BLKRESETZONE
) {
3514 for (unsigned int j
= 0; j
< nrz
; ++j
) {
3515 wp
[j
] = offset
+ j
* zone_size
;
3517 } else if (zo
== BLKFINISHZONE
) {
3518 for (unsigned int j
= 0; j
< nrz
; ++j
) {
3519 /* The zoned device allows the last zone smaller that the
3521 wp
[j
] = MIN(offset
+ (j
+ 1) * zone_size
, offset
+ len
);
3529 #if defined(CONFIG_BLKZONED)
3530 static int coroutine_fn
raw_co_zone_append(BlockDriverState
*bs
,
3533 BdrvRequestFlags flags
) {
3535 int64_t zone_size_mask
= bs
->bl
.zone_size
- 1;
3536 int64_t iov_len
= 0;
3538 BDRVRawState
*s
= bs
->opaque
;
3541 if (*offset
& zone_size_mask
) {
3542 error_report("sector offset %" PRId64
" is not aligned to zone size "
3543 "%" PRId32
"", *offset
/ 512, bs
->bl
.zone_size
/ 512);
3547 int64_t wg
= bs
->bl
.write_granularity
;
3548 int64_t wg_mask
= wg
- 1;
3549 for (int i
= 0; i
< qiov
->niov
; i
++) {
3550 iov_len
= qiov
->iov
[i
].iov_len
;
3551 if (iov_len
& wg_mask
) {
3552 error_report("len of IOVector[%d] %" PRId64
" is not aligned to "
3553 "block size %" PRId64
"", i
, iov_len
, wg
);
3559 trace_zbd_zone_append(bs
, *offset
>> BDRV_SECTOR_BITS
);
3560 return raw_co_prw(bs
, *offset
, len
, qiov
, QEMU_AIO_ZONE_APPEND
);
3564 static coroutine_fn
int
3565 raw_do_pdiscard(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
3568 BDRVRawState
*s
= bs
->opaque
;
3569 RawPosixAIOData acb
;
3572 acb
= (RawPosixAIOData
) {
3574 .aio_fildes
= s
->fd
,
3575 .aio_type
= QEMU_AIO_DISCARD
,
3576 .aio_offset
= offset
,
3577 .aio_nbytes
= bytes
,
3581 acb
.aio_type
|= QEMU_AIO_BLKDEV
;
3584 ret
= raw_thread_pool_submit(handle_aiocb_discard
, &acb
);
3585 raw_account_discard(s
, bytes
, ret
);
3589 static coroutine_fn
int
3590 raw_co_pdiscard(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
)
3592 return raw_do_pdiscard(bs
, offset
, bytes
, false);
3595 static int coroutine_fn
3596 raw_do_pwrite_zeroes(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
3597 BdrvRequestFlags flags
, bool blkdev
)
3599 BDRVRawState
*s
= bs
->opaque
;
3600 RawPosixAIOData acb
;
3601 ThreadPoolFunc
*handler
;
3603 #ifdef CONFIG_FALLOCATE
3604 if (offset
+ bytes
> bs
->total_sectors
* BDRV_SECTOR_SIZE
) {
3605 BdrvTrackedRequest
*req
;
3608 * This is a workaround for a bug in the Linux XFS driver,
3609 * where writes submitted through the AIO interface will be
3610 * discarded if they happen beyond a concurrently running
3611 * fallocate() that increases the file length (i.e., both the
3612 * write and the fallocate() happen beyond the EOF).
3614 * To work around it, we extend the tracked request for this
3615 * zero write until INT64_MAX (effectively infinity), and mark
3616 * it as serializing.
3618 * We have to enable this workaround for all filesystems and
3619 * AIO modes (not just XFS with aio=native), because for
3620 * remote filesystems we do not know the host configuration.
3623 req
= bdrv_co_get_self_request(bs
);
3625 assert(req
->type
== BDRV_TRACKED_WRITE
);
3626 assert(req
->offset
<= offset
);
3627 assert(req
->offset
+ req
->bytes
>= offset
+ bytes
);
3629 req
->bytes
= BDRV_MAX_LENGTH
- req
->offset
;
3631 bdrv_check_request(req
->offset
, req
->bytes
, &error_abort
);
3633 bdrv_make_request_serialising(req
, bs
->bl
.request_alignment
);
3637 acb
= (RawPosixAIOData
) {
3639 .aio_fildes
= s
->fd
,
3640 .aio_type
= QEMU_AIO_WRITE_ZEROES
,
3641 .aio_offset
= offset
,
3642 .aio_nbytes
= bytes
,
3646 acb
.aio_type
|= QEMU_AIO_BLKDEV
;
3648 if (flags
& BDRV_REQ_NO_FALLBACK
) {
3649 acb
.aio_type
|= QEMU_AIO_NO_FALLBACK
;
3652 if (flags
& BDRV_REQ_MAY_UNMAP
) {
3653 acb
.aio_type
|= QEMU_AIO_DISCARD
;
3654 handler
= handle_aiocb_write_zeroes_unmap
;
3656 handler
= handle_aiocb_write_zeroes
;
3659 return raw_thread_pool_submit(handler
, &acb
);
3662 static int coroutine_fn
raw_co_pwrite_zeroes(
3663 BlockDriverState
*bs
, int64_t offset
,
3664 int64_t bytes
, BdrvRequestFlags flags
)
3666 return raw_do_pwrite_zeroes(bs
, offset
, bytes
, flags
, false);
3669 static int coroutine_fn
3670 raw_co_get_info(BlockDriverState
*bs
, BlockDriverInfo
*bdi
)
3675 static ImageInfoSpecific
*raw_get_specific_info(BlockDriverState
*bs
,
3678 ImageInfoSpecificFile
*file_info
= g_new0(ImageInfoSpecificFile
, 1);
3679 ImageInfoSpecific
*spec_info
= g_new(ImageInfoSpecific
, 1);
3681 *spec_info
= (ImageInfoSpecific
){
3682 .type
= IMAGE_INFO_SPECIFIC_KIND_FILE
,
3683 .u
.file
.data
= file_info
,
3686 #ifdef FS_IOC_FSGETXATTR
3688 BDRVRawState
*s
= bs
->opaque
;
3689 struct fsxattr attr
;
3692 ret
= ioctl(s
->fd
, FS_IOC_FSGETXATTR
, &attr
);
3693 if (!ret
&& attr
.fsx_extsize
!= 0) {
3694 file_info
->has_extent_size_hint
= true;
3695 file_info
->extent_size_hint
= attr
.fsx_extsize
;
3703 static BlockStatsSpecificFile
get_blockstats_specific_file(BlockDriverState
*bs
)
3705 BDRVRawState
*s
= bs
->opaque
;
3706 return (BlockStatsSpecificFile
) {
3707 .discard_nb_ok
= s
->stats
.discard_nb_ok
,
3708 .discard_nb_failed
= s
->stats
.discard_nb_failed
,
3709 .discard_bytes_ok
= s
->stats
.discard_bytes_ok
,
3713 static BlockStatsSpecific
*raw_get_specific_stats(BlockDriverState
*bs
)
3715 BlockStatsSpecific
*stats
= g_new(BlockStatsSpecific
, 1);
3717 stats
->driver
= BLOCKDEV_DRIVER_FILE
;
3718 stats
->u
.file
= get_blockstats_specific_file(bs
);
3723 #if defined(HAVE_HOST_BLOCK_DEVICE)
3724 static BlockStatsSpecific
*hdev_get_specific_stats(BlockDriverState
*bs
)
3726 BlockStatsSpecific
*stats
= g_new(BlockStatsSpecific
, 1);
3728 stats
->driver
= BLOCKDEV_DRIVER_HOST_DEVICE
;
3729 stats
->u
.host_device
= get_blockstats_specific_file(bs
);
3733 #endif /* HAVE_HOST_BLOCK_DEVICE */
3735 static QemuOptsList raw_create_opts
= {
3736 .name
= "raw-create-opts",
3737 .head
= QTAILQ_HEAD_INITIALIZER(raw_create_opts
.head
),
3740 .name
= BLOCK_OPT_SIZE
,
3741 .type
= QEMU_OPT_SIZE
,
3742 .help
= "Virtual disk size"
3745 .name
= BLOCK_OPT_NOCOW
,
3746 .type
= QEMU_OPT_BOOL
,
3747 .help
= "Turn off copy-on-write (valid only on btrfs)"
3750 .name
= BLOCK_OPT_PREALLOC
,
3751 .type
= QEMU_OPT_STRING
,
3752 .help
= "Preallocation mode (allowed values: off"
3753 #ifdef CONFIG_POSIX_FALLOCATE
3759 .name
= BLOCK_OPT_EXTENT_SIZE_HINT
,
3760 .type
= QEMU_OPT_SIZE
,
3761 .help
= "Extent size hint for the image file, 0 to disable"
3763 { /* end of list */ }
3767 static int raw_check_perm(BlockDriverState
*bs
, uint64_t perm
, uint64_t shared
,
3770 BDRVRawState
*s
= bs
->opaque
;
3771 int input_flags
= s
->reopen_state
? s
->reopen_state
->flags
: bs
->open_flags
;
3775 /* We may need a new fd if auto-read-only switches the mode */
3776 ret
= raw_reconfigure_getfd(bs
, input_flags
, &open_flags
, perm
,
3780 } else if (ret
!= s
->fd
) {
3781 Error
*local_err
= NULL
;
3784 * Fail already check_perm() if we can't get a working O_DIRECT
3785 * alignment with the new fd.
3787 raw_probe_alignment(bs
, ret
, &local_err
);
3789 error_propagate(errp
, local_err
);
3793 s
->perm_change_fd
= ret
;
3794 s
->perm_change_flags
= open_flags
;
3797 /* Prepare permissions on old fd to avoid conflicts between old and new,
3798 * but keep everything locked that new will need. */
3799 ret
= raw_handle_perm_lock(bs
, RAW_PL_PREPARE
, perm
, shared
, errp
);
3804 /* Copy locks to the new fd */
3805 if (s
->perm_change_fd
&& s
->use_lock
) {
3806 ret
= raw_apply_lock_bytes(NULL
, s
->perm_change_fd
, perm
, ~shared
,
3809 raw_handle_perm_lock(bs
, RAW_PL_ABORT
, 0, 0, NULL
);
3816 if (s
->perm_change_fd
) {
3817 qemu_close(s
->perm_change_fd
);
3819 s
->perm_change_fd
= 0;
3823 static void raw_set_perm(BlockDriverState
*bs
, uint64_t perm
, uint64_t shared
)
3825 BDRVRawState
*s
= bs
->opaque
;
3827 /* For reopen, we have already switched to the new fd (.bdrv_set_perm is
3828 * called after .bdrv_reopen_commit) */
3829 if (s
->perm_change_fd
&& s
->fd
!= s
->perm_change_fd
) {
3831 s
->fd
= s
->perm_change_fd
;
3832 s
->open_flags
= s
->perm_change_flags
;
3834 s
->perm_change_fd
= 0;
3836 raw_handle_perm_lock(bs
, RAW_PL_COMMIT
, perm
, shared
, NULL
);
3838 s
->shared_perm
= shared
;
3841 static void raw_abort_perm_update(BlockDriverState
*bs
)
3843 BDRVRawState
*s
= bs
->opaque
;
3845 /* For reopen, .bdrv_reopen_abort is called afterwards and will close
3846 * the file descriptor. */
3847 if (s
->perm_change_fd
) {
3848 qemu_close(s
->perm_change_fd
);
3850 s
->perm_change_fd
= 0;
3852 raw_handle_perm_lock(bs
, RAW_PL_ABORT
, 0, 0, NULL
);
3855 static int coroutine_fn GRAPH_RDLOCK
raw_co_copy_range_from(
3856 BlockDriverState
*bs
, BdrvChild
*src
, int64_t src_offset
,
3857 BdrvChild
*dst
, int64_t dst_offset
, int64_t bytes
,
3858 BdrvRequestFlags read_flags
, BdrvRequestFlags write_flags
)
3860 return bdrv_co_copy_range_to(src
, src_offset
, dst
, dst_offset
, bytes
,
3861 read_flags
, write_flags
);
3864 static int coroutine_fn GRAPH_RDLOCK
3865 raw_co_copy_range_to(BlockDriverState
*bs
,
3866 BdrvChild
*src
, int64_t src_offset
,
3867 BdrvChild
*dst
, int64_t dst_offset
,
3868 int64_t bytes
, BdrvRequestFlags read_flags
,
3869 BdrvRequestFlags write_flags
)
3871 RawPosixAIOData acb
;
3872 BDRVRawState
*s
= bs
->opaque
;
3873 BDRVRawState
*src_s
;
3875 assert(dst
->bs
== bs
);
3876 if (src
->bs
->drv
->bdrv_co_copy_range_to
!= raw_co_copy_range_to
) {
3880 src_s
= src
->bs
->opaque
;
3881 if (fd_open(src
->bs
) < 0 || fd_open(dst
->bs
) < 0) {
3885 acb
= (RawPosixAIOData
) {
3887 .aio_type
= QEMU_AIO_COPY_RANGE
,
3888 .aio_fildes
= src_s
->fd
,
3889 .aio_offset
= src_offset
,
3890 .aio_nbytes
= bytes
,
3893 .aio_offset2
= dst_offset
,
3897 return raw_thread_pool_submit(handle_aiocb_copy_range
, &acb
);
3900 BlockDriver bdrv_file
= {
3901 .format_name
= "file",
3902 .protocol_name
= "file",
3903 .instance_size
= sizeof(BDRVRawState
),
3904 .bdrv_needs_filename
= true,
3905 .bdrv_probe
= NULL
, /* no probe for protocols */
3906 .bdrv_parse_filename
= raw_parse_filename
,
3907 .bdrv_file_open
= raw_open
,
3908 .bdrv_reopen_prepare
= raw_reopen_prepare
,
3909 .bdrv_reopen_commit
= raw_reopen_commit
,
3910 .bdrv_reopen_abort
= raw_reopen_abort
,
3911 .bdrv_close
= raw_close
,
3912 .bdrv_co_create
= raw_co_create
,
3913 .bdrv_co_create_opts
= raw_co_create_opts
,
3914 .bdrv_has_zero_init
= bdrv_has_zero_init_1
,
3915 .bdrv_co_block_status
= raw_co_block_status
,
3916 .bdrv_co_invalidate_cache
= raw_co_invalidate_cache
,
3917 .bdrv_co_pwrite_zeroes
= raw_co_pwrite_zeroes
,
3918 .bdrv_co_delete_file
= raw_co_delete_file
,
3920 .bdrv_co_preadv
= raw_co_preadv
,
3921 .bdrv_co_pwritev
= raw_co_pwritev
,
3922 .bdrv_co_flush_to_disk
= raw_co_flush_to_disk
,
3923 .bdrv_co_pdiscard
= raw_co_pdiscard
,
3924 .bdrv_co_copy_range_from
= raw_co_copy_range_from
,
3925 .bdrv_co_copy_range_to
= raw_co_copy_range_to
,
3926 .bdrv_refresh_limits
= raw_refresh_limits
,
3927 .bdrv_co_io_plug
= raw_co_io_plug
,
3928 .bdrv_co_io_unplug
= raw_co_io_unplug
,
3929 .bdrv_attach_aio_context
= raw_aio_attach_aio_context
,
3931 .bdrv_co_truncate
= raw_co_truncate
,
3932 .bdrv_co_getlength
= raw_co_getlength
,
3933 .bdrv_co_get_info
= raw_co_get_info
,
3934 .bdrv_get_specific_info
= raw_get_specific_info
,
3935 .bdrv_co_get_allocated_file_size
= raw_co_get_allocated_file_size
,
3936 .bdrv_get_specific_stats
= raw_get_specific_stats
,
3937 .bdrv_check_perm
= raw_check_perm
,
3938 .bdrv_set_perm
= raw_set_perm
,
3939 .bdrv_abort_perm_update
= raw_abort_perm_update
,
3940 .create_opts
= &raw_create_opts
,
3941 .mutable_opts
= mutable_opts
,
3944 /***********************************************/
3947 #if defined(HAVE_HOST_BLOCK_DEVICE)
3949 #if defined(__APPLE__) && defined(__MACH__)
3950 static kern_return_t
GetBSDPath(io_iterator_t mediaIterator
, char *bsdPath
,
3951 CFIndex maxPathSize
, int flags
);
3953 #if !defined(MAC_OS_VERSION_12_0) \
3954 || (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_VERSION_12_0)
3955 #define IOMainPort IOMasterPort
3958 static char *FindEjectableOpticalMedia(io_iterator_t
*mediaIterator
)
3960 kern_return_t kernResult
= KERN_FAILURE
;
3961 mach_port_t mainPort
;
3962 CFMutableDictionaryRef classesToMatch
;
3963 const char *matching_array
[] = {kIODVDMediaClass
, kIOCDMediaClass
};
3964 char *mediaType
= NULL
;
3966 kernResult
= IOMainPort(MACH_PORT_NULL
, &mainPort
);
3967 if ( KERN_SUCCESS
!= kernResult
) {
3968 printf("IOMainPort returned %d\n", kernResult
);
3972 for (index
= 0; index
< ARRAY_SIZE(matching_array
); index
++) {
3973 classesToMatch
= IOServiceMatching(matching_array
[index
]);
3974 if (classesToMatch
== NULL
) {
3975 error_report("IOServiceMatching returned NULL for %s",
3976 matching_array
[index
]);
3979 CFDictionarySetValue(classesToMatch
, CFSTR(kIOMediaEjectableKey
),
3981 kernResult
= IOServiceGetMatchingServices(mainPort
, classesToMatch
,
3983 if (kernResult
!= KERN_SUCCESS
) {
3984 error_report("Note: IOServiceGetMatchingServices returned %d",
3989 /* If a match was found, leave the loop */
3990 if (*mediaIterator
!= 0) {
3991 trace_file_FindEjectableOpticalMedia(matching_array
[index
]);
3992 mediaType
= g_strdup(matching_array
[index
]);
3999 kern_return_t
GetBSDPath(io_iterator_t mediaIterator
, char *bsdPath
,
4000 CFIndex maxPathSize
, int flags
)
4002 io_object_t nextMedia
;
4003 kern_return_t kernResult
= KERN_FAILURE
;
4005 nextMedia
= IOIteratorNext( mediaIterator
);
4008 CFTypeRef bsdPathAsCFString
;
4009 bsdPathAsCFString
= IORegistryEntryCreateCFProperty( nextMedia
, CFSTR( kIOBSDNameKey
), kCFAllocatorDefault
, 0 );
4010 if ( bsdPathAsCFString
) {
4011 size_t devPathLength
;
4012 strcpy( bsdPath
, _PATH_DEV
);
4013 if (flags
& BDRV_O_NOCACHE
) {
4014 strcat(bsdPath
, "r");
4016 devPathLength
= strlen( bsdPath
);
4017 if ( CFStringGetCString( bsdPathAsCFString
, bsdPath
+ devPathLength
, maxPathSize
- devPathLength
, kCFStringEncodingASCII
) ) {
4018 kernResult
= KERN_SUCCESS
;
4020 CFRelease( bsdPathAsCFString
);
4022 IOObjectRelease( nextMedia
);
4028 /* Sets up a real cdrom for use in QEMU */
4029 static bool setup_cdrom(char *bsd_path
, Error
**errp
)
4031 int index
, num_of_test_partitions
= 2, fd
;
4032 char test_partition
[MAXPATHLEN
];
4033 bool partition_found
= false;
4035 /* look for a working partition */
4036 for (index
= 0; index
< num_of_test_partitions
; index
++) {
4037 snprintf(test_partition
, sizeof(test_partition
), "%ss%d", bsd_path
,
4039 fd
= qemu_open(test_partition
, O_RDONLY
| O_BINARY
| O_LARGEFILE
, NULL
);
4041 partition_found
= true;
4047 /* if a working partition on the device was not found */
4048 if (partition_found
== false) {
4049 error_setg(errp
, "Failed to find a working partition on disc");
4051 trace_file_setup_cdrom(test_partition
);
4052 pstrcpy(bsd_path
, MAXPATHLEN
, test_partition
);
4054 return partition_found
;
4057 /* Prints directions on mounting and unmounting a device */
4058 static void print_unmounting_directions(const char *file_name
)
4060 error_report("If device %s is mounted on the desktop, unmount"
4061 " it first before using it in QEMU", file_name
);
4062 error_report("Command to unmount device: diskutil unmountDisk %s",
4064 error_report("Command to mount device: diskutil mountDisk %s", file_name
);
4067 #endif /* defined(__APPLE__) && defined(__MACH__) */
4069 static int hdev_probe_device(const char *filename
)
4073 /* allow a dedicated CD-ROM driver to match with a higher priority */
4074 if (strstart(filename
, "/dev/cdrom", NULL
))
4077 if (stat(filename
, &st
) >= 0 &&
4078 (S_ISCHR(st
.st_mode
) || S_ISBLK(st
.st_mode
))) {
4085 static void hdev_parse_filename(const char *filename
, QDict
*options
,
4088 bdrv_parse_filename_strip_prefix(filename
, "host_device:", options
);
4091 static bool hdev_is_sg(BlockDriverState
*bs
)
4094 #if defined(__linux__)
4096 BDRVRawState
*s
= bs
->opaque
;
4098 struct sg_scsi_id scsiid
;
4102 if (stat(bs
->filename
, &st
) < 0 || !S_ISCHR(st
.st_mode
)) {
4106 ret
= ioctl(s
->fd
, SG_GET_VERSION_NUM
, &sg_version
);
4111 ret
= ioctl(s
->fd
, SG_GET_SCSI_ID
, &scsiid
);
4113 trace_file_hdev_is_sg(scsiid
.scsi_type
, sg_version
);
4122 static int hdev_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
4125 BDRVRawState
*s
= bs
->opaque
;
4128 #if defined(__APPLE__) && defined(__MACH__)
4130 * Caution: while qdict_get_str() is fine, getting non-string types
4131 * would require more care. When @options come from -blockdev or
4132 * blockdev_add, its members are typed according to the QAPI
4133 * schema, but when they come from -drive, they're all QString.
4135 const char *filename
= qdict_get_str(options
, "filename");
4136 char bsd_path
[MAXPATHLEN
] = "";
4137 bool error_occurred
= false;
4139 /* If using a real cdrom */
4140 if (strcmp(filename
, "/dev/cdrom") == 0) {
4141 char *mediaType
= NULL
;
4142 kern_return_t ret_val
;
4143 io_iterator_t mediaIterator
= 0;
4145 mediaType
= FindEjectableOpticalMedia(&mediaIterator
);
4146 if (mediaType
== NULL
) {
4147 error_setg(errp
, "Please make sure your CD/DVD is in the optical"
4149 error_occurred
= true;
4150 goto hdev_open_Mac_error
;
4153 ret_val
= GetBSDPath(mediaIterator
, bsd_path
, sizeof(bsd_path
), flags
);
4154 if (ret_val
!= KERN_SUCCESS
) {
4155 error_setg(errp
, "Could not get BSD path for optical drive");
4156 error_occurred
= true;
4157 goto hdev_open_Mac_error
;
4160 /* If a real optical drive was not found */
4161 if (bsd_path
[0] == '\0') {
4162 error_setg(errp
, "Failed to obtain bsd path for optical drive");
4163 error_occurred
= true;
4164 goto hdev_open_Mac_error
;
4167 /* If using a cdrom disc and finding a partition on the disc failed */
4168 if (strncmp(mediaType
, kIOCDMediaClass
, 9) == 0 &&
4169 setup_cdrom(bsd_path
, errp
) == false) {
4170 print_unmounting_directions(bsd_path
);
4171 error_occurred
= true;
4172 goto hdev_open_Mac_error
;
4175 qdict_put_str(options
, "filename", bsd_path
);
4177 hdev_open_Mac_error
:
4179 if (mediaIterator
) {
4180 IOObjectRelease(mediaIterator
);
4182 if (error_occurred
) {
4186 #endif /* defined(__APPLE__) && defined(__MACH__) */
4188 s
->type
= FTYPE_FILE
;
4190 ret
= raw_open_common(bs
, options
, flags
, 0, true, errp
);
4192 #if defined(__APPLE__) && defined(__MACH__)
4194 filename
= bsd_path
;
4196 /* if a physical device experienced an error while being opened */
4197 if (strncmp(filename
, "/dev/", 5) == 0) {
4198 print_unmounting_directions(filename
);
4200 #endif /* defined(__APPLE__) && defined(__MACH__) */
4204 /* Since this does ioctl the device must be already opened */
4205 bs
->sg
= hdev_is_sg(bs
);
4210 #if defined(__linux__)
4211 static int coroutine_fn
4212 hdev_co_ioctl(BlockDriverState
*bs
, unsigned long int req
, void *buf
)
4214 BDRVRawState
*s
= bs
->opaque
;
4215 RawPosixAIOData acb
;
4223 if (req
== SG_IO
&& s
->pr_mgr
) {
4224 struct sg_io_hdr
*io_hdr
= buf
;
4225 if (io_hdr
->cmdp
[0] == PERSISTENT_RESERVE_OUT
||
4226 io_hdr
->cmdp
[0] == PERSISTENT_RESERVE_IN
) {
4227 return pr_manager_execute(s
->pr_mgr
, qemu_get_current_aio_context(),
4232 acb
= (RawPosixAIOData
) {
4234 .aio_type
= QEMU_AIO_IOCTL
,
4235 .aio_fildes
= s
->fd
,
4243 return raw_thread_pool_submit(handle_aiocb_ioctl
, &acb
);
4247 static coroutine_fn
int
4248 hdev_co_pdiscard(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
)
4250 BDRVRawState
*s
= bs
->opaque
;
4255 raw_account_discard(s
, bytes
, ret
);
4258 return raw_do_pdiscard(bs
, offset
, bytes
, true);
4261 static coroutine_fn
int hdev_co_pwrite_zeroes(BlockDriverState
*bs
,
4262 int64_t offset
, int64_t bytes
, BdrvRequestFlags flags
)
4271 return raw_do_pwrite_zeroes(bs
, offset
, bytes
, flags
, true);
4274 static BlockDriver bdrv_host_device
= {
4275 .format_name
= "host_device",
4276 .protocol_name
= "host_device",
4277 .instance_size
= sizeof(BDRVRawState
),
4278 .bdrv_needs_filename
= true,
4279 .bdrv_probe_device
= hdev_probe_device
,
4280 .bdrv_parse_filename
= hdev_parse_filename
,
4281 .bdrv_file_open
= hdev_open
,
4282 .bdrv_close
= raw_close
,
4283 .bdrv_reopen_prepare
= raw_reopen_prepare
,
4284 .bdrv_reopen_commit
= raw_reopen_commit
,
4285 .bdrv_reopen_abort
= raw_reopen_abort
,
4286 .bdrv_co_create_opts
= bdrv_co_create_opts_simple
,
4287 .create_opts
= &bdrv_create_opts_simple
,
4288 .mutable_opts
= mutable_opts
,
4289 .bdrv_co_invalidate_cache
= raw_co_invalidate_cache
,
4290 .bdrv_co_pwrite_zeroes
= hdev_co_pwrite_zeroes
,
4292 .bdrv_co_preadv
= raw_co_preadv
,
4293 .bdrv_co_pwritev
= raw_co_pwritev
,
4294 .bdrv_co_flush_to_disk
= raw_co_flush_to_disk
,
4295 .bdrv_co_pdiscard
= hdev_co_pdiscard
,
4296 .bdrv_co_copy_range_from
= raw_co_copy_range_from
,
4297 .bdrv_co_copy_range_to
= raw_co_copy_range_to
,
4298 .bdrv_refresh_limits
= raw_refresh_limits
,
4299 .bdrv_co_io_plug
= raw_co_io_plug
,
4300 .bdrv_co_io_unplug
= raw_co_io_unplug
,
4301 .bdrv_attach_aio_context
= raw_aio_attach_aio_context
,
4303 .bdrv_co_truncate
= raw_co_truncate
,
4304 .bdrv_co_getlength
= raw_co_getlength
,
4305 .bdrv_co_get_info
= raw_co_get_info
,
4306 .bdrv_get_specific_info
= raw_get_specific_info
,
4307 .bdrv_co_get_allocated_file_size
= raw_co_get_allocated_file_size
,
4308 .bdrv_get_specific_stats
= hdev_get_specific_stats
,
4309 .bdrv_check_perm
= raw_check_perm
,
4310 .bdrv_set_perm
= raw_set_perm
,
4311 .bdrv_abort_perm_update
= raw_abort_perm_update
,
4312 .bdrv_probe_blocksizes
= hdev_probe_blocksizes
,
4313 .bdrv_probe_geometry
= hdev_probe_geometry
,
4315 /* generic scsi device */
4317 .bdrv_co_ioctl
= hdev_co_ioctl
,
4321 #if defined(CONFIG_BLKZONED)
4322 /* zone management operations */
4323 .bdrv_co_zone_report
= raw_co_zone_report
,
4324 .bdrv_co_zone_mgmt
= raw_co_zone_mgmt
,
4325 .bdrv_co_zone_append
= raw_co_zone_append
,
4329 #if defined(__linux__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
4330 static void cdrom_parse_filename(const char *filename
, QDict
*options
,
4333 bdrv_parse_filename_strip_prefix(filename
, "host_cdrom:", options
);
4336 static void cdrom_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
4338 bs
->bl
.has_variable_length
= true;
4339 raw_refresh_limits(bs
, errp
);
4344 static int cdrom_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
4347 BDRVRawState
*s
= bs
->opaque
;
4351 /* open will not fail even if no CD is inserted, so add O_NONBLOCK */
4352 return raw_open_common(bs
, options
, flags
, O_NONBLOCK
, true, errp
);
4355 static int cdrom_probe_device(const char *filename
)
4361 fd
= qemu_open(filename
, O_RDONLY
| O_NONBLOCK
, NULL
);
4365 ret
= fstat(fd
, &st
);
4366 if (ret
== -1 || !S_ISBLK(st
.st_mode
)) {
4370 /* Attempt to detect via a CDROM specific ioctl */
4371 ret
= ioctl(fd
, CDROM_DRIVE_STATUS
, CDSL_CURRENT
);
4381 static bool coroutine_fn
cdrom_co_is_inserted(BlockDriverState
*bs
)
4383 BDRVRawState
*s
= bs
->opaque
;
4386 ret
= ioctl(s
->fd
, CDROM_DRIVE_STATUS
, CDSL_CURRENT
);
4387 return ret
== CDS_DISC_OK
;
4390 static void coroutine_fn
cdrom_co_eject(BlockDriverState
*bs
, bool eject_flag
)
4392 BDRVRawState
*s
= bs
->opaque
;
4395 if (ioctl(s
->fd
, CDROMEJECT
, NULL
) < 0)
4396 perror("CDROMEJECT");
4398 if (ioctl(s
->fd
, CDROMCLOSETRAY
, NULL
) < 0)
4399 perror("CDROMEJECT");
4403 static void coroutine_fn
cdrom_co_lock_medium(BlockDriverState
*bs
, bool locked
)
4405 BDRVRawState
*s
= bs
->opaque
;
4407 if (ioctl(s
->fd
, CDROM_LOCKDOOR
, locked
) < 0) {
4409 * Note: an error can happen if the distribution automatically
4412 /* perror("CDROM_LOCKDOOR"); */
4416 static BlockDriver bdrv_host_cdrom
= {
4417 .format_name
= "host_cdrom",
4418 .protocol_name
= "host_cdrom",
4419 .instance_size
= sizeof(BDRVRawState
),
4420 .bdrv_needs_filename
= true,
4421 .bdrv_probe_device
= cdrom_probe_device
,
4422 .bdrv_parse_filename
= cdrom_parse_filename
,
4423 .bdrv_file_open
= cdrom_open
,
4424 .bdrv_close
= raw_close
,
4425 .bdrv_reopen_prepare
= raw_reopen_prepare
,
4426 .bdrv_reopen_commit
= raw_reopen_commit
,
4427 .bdrv_reopen_abort
= raw_reopen_abort
,
4428 .bdrv_co_create_opts
= bdrv_co_create_opts_simple
,
4429 .create_opts
= &bdrv_create_opts_simple
,
4430 .mutable_opts
= mutable_opts
,
4431 .bdrv_co_invalidate_cache
= raw_co_invalidate_cache
,
4433 .bdrv_co_preadv
= raw_co_preadv
,
4434 .bdrv_co_pwritev
= raw_co_pwritev
,
4435 .bdrv_co_flush_to_disk
= raw_co_flush_to_disk
,
4436 .bdrv_refresh_limits
= cdrom_refresh_limits
,
4437 .bdrv_co_io_plug
= raw_co_io_plug
,
4438 .bdrv_co_io_unplug
= raw_co_io_unplug
,
4439 .bdrv_attach_aio_context
= raw_aio_attach_aio_context
,
4441 .bdrv_co_truncate
= raw_co_truncate
,
4442 .bdrv_co_getlength
= raw_co_getlength
,
4443 .bdrv_co_get_allocated_file_size
= raw_co_get_allocated_file_size
,
4445 /* removable device support */
4446 .bdrv_co_is_inserted
= cdrom_co_is_inserted
,
4447 .bdrv_co_eject
= cdrom_co_eject
,
4448 .bdrv_co_lock_medium
= cdrom_co_lock_medium
,
4450 /* generic scsi device */
4451 .bdrv_co_ioctl
= hdev_co_ioctl
,
4453 #endif /* __linux__ */
4455 #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__)
4456 static int cdrom_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
4459 BDRVRawState
*s
= bs
->opaque
;
4464 ret
= raw_open_common(bs
, options
, flags
, 0, true, errp
);
4469 /* make sure the door isn't locked at this time */
4470 ioctl(s
->fd
, CDIOCALLOW
);
4474 static int cdrom_probe_device(const char *filename
)
4476 if (strstart(filename
, "/dev/cd", NULL
) ||
4477 strstart(filename
, "/dev/acd", NULL
))
4482 static int cdrom_reopen(BlockDriverState
*bs
)
4484 BDRVRawState
*s
= bs
->opaque
;
4488 * Force reread of possibly changed/newly loaded disc,
4489 * FreeBSD seems to not notice sometimes...
4493 fd
= qemu_open(bs
->filename
, s
->open_flags
, NULL
);
4500 /* make sure the door isn't locked at this time */
4501 ioctl(s
->fd
, CDIOCALLOW
);
4505 static bool coroutine_fn
cdrom_co_is_inserted(BlockDriverState
*bs
)
4507 return raw_co_getlength(bs
) > 0;
4510 static void coroutine_fn
cdrom_co_eject(BlockDriverState
*bs
, bool eject_flag
)
4512 BDRVRawState
*s
= bs
->opaque
;
4517 (void) ioctl(s
->fd
, CDIOCALLOW
);
4520 if (ioctl(s
->fd
, CDIOCEJECT
) < 0)
4521 perror("CDIOCEJECT");
4523 if (ioctl(s
->fd
, CDIOCCLOSE
) < 0)
4524 perror("CDIOCCLOSE");
4530 static void coroutine_fn
cdrom_co_lock_medium(BlockDriverState
*bs
, bool locked
)
4532 BDRVRawState
*s
= bs
->opaque
;
4536 if (ioctl(s
->fd
, (locked
? CDIOCPREVENT
: CDIOCALLOW
)) < 0) {
4538 * Note: an error can happen if the distribution automatically
4541 /* perror("CDROM_LOCKDOOR"); */
4545 static BlockDriver bdrv_host_cdrom
= {
4546 .format_name
= "host_cdrom",
4547 .protocol_name
= "host_cdrom",
4548 .instance_size
= sizeof(BDRVRawState
),
4549 .bdrv_needs_filename
= true,
4550 .bdrv_probe_device
= cdrom_probe_device
,
4551 .bdrv_parse_filename
= cdrom_parse_filename
,
4552 .bdrv_file_open
= cdrom_open
,
4553 .bdrv_close
= raw_close
,
4554 .bdrv_reopen_prepare
= raw_reopen_prepare
,
4555 .bdrv_reopen_commit
= raw_reopen_commit
,
4556 .bdrv_reopen_abort
= raw_reopen_abort
,
4557 .bdrv_co_create_opts
= bdrv_co_create_opts_simple
,
4558 .create_opts
= &bdrv_create_opts_simple
,
4559 .mutable_opts
= mutable_opts
,
4561 .bdrv_co_preadv
= raw_co_preadv
,
4562 .bdrv_co_pwritev
= raw_co_pwritev
,
4563 .bdrv_co_flush_to_disk
= raw_co_flush_to_disk
,
4564 .bdrv_refresh_limits
= cdrom_refresh_limits
,
4565 .bdrv_co_io_plug
= raw_co_io_plug
,
4566 .bdrv_co_io_unplug
= raw_co_io_unplug
,
4567 .bdrv_attach_aio_context
= raw_aio_attach_aio_context
,
4569 .bdrv_co_truncate
= raw_co_truncate
,
4570 .bdrv_co_getlength
= raw_co_getlength
,
4571 .bdrv_co_get_allocated_file_size
= raw_co_get_allocated_file_size
,
4573 /* removable device support */
4574 .bdrv_co_is_inserted
= cdrom_co_is_inserted
,
4575 .bdrv_co_eject
= cdrom_co_eject
,
4576 .bdrv_co_lock_medium
= cdrom_co_lock_medium
,
4578 #endif /* __FreeBSD__ */
4580 #endif /* HAVE_HOST_BLOCK_DEVICE */
4582 static void bdrv_file_init(void)
4585 * Register all the drivers. Note that order is important, the driver
4586 * registered last will get probed first.
4588 bdrv_register(&bdrv_file
);
4589 #if defined(HAVE_HOST_BLOCK_DEVICE)
4590 bdrv_register(&bdrv_host_device
);
4592 bdrv_register(&bdrv_host_cdrom
);
4594 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
4595 bdrv_register(&bdrv_host_cdrom
);
4597 #endif /* HAVE_HOST_BLOCK_DEVICE */
4600 block_init(bdrv_file_init
);