2 * QEMU System Emulator block driver
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 #include "config-host.h"
25 #include "qemu-common.h"
27 #include "monitor/monitor.h"
28 #include "block/block_int.h"
29 #include "block/blockjob.h"
30 #include "qemu/module.h"
31 #include "qapi/qmp/qjson.h"
32 #include "sysemu/sysemu.h"
33 #include "qemu/notify.h"
34 #include "block/coroutine.h"
35 #include "qmp-commands.h"
36 #include "qemu/timer.h"
39 #include <sys/types.h>
41 #include <sys/ioctl.h>
42 #include <sys/queue.h>
52 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
54 static void bdrv_dev_change_media_cb(BlockDriverState
*bs
, bool load
);
55 static BlockDriverAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
56 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
57 BlockDriverCompletionFunc
*cb
, void *opaque
);
58 static BlockDriverAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
59 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
60 BlockDriverCompletionFunc
*cb
, void *opaque
);
61 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
62 int64_t sector_num
, int nb_sectors
,
64 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
65 int64_t sector_num
, int nb_sectors
,
67 static int coroutine_fn
bdrv_co_do_readv(BlockDriverState
*bs
,
68 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
69 BdrvRequestFlags flags
);
70 static int coroutine_fn
bdrv_co_do_writev(BlockDriverState
*bs
,
71 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
72 BdrvRequestFlags flags
);
73 static BlockDriverAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
77 BlockDriverCompletionFunc
*cb
,
80 static void coroutine_fn
bdrv_co_do_rw(void *opaque
);
81 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
82 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
);
84 static QTAILQ_HEAD(, BlockDriverState
) bdrv_states
=
85 QTAILQ_HEAD_INITIALIZER(bdrv_states
);
87 static QLIST_HEAD(, BlockDriver
) bdrv_drivers
=
88 QLIST_HEAD_INITIALIZER(bdrv_drivers
);
90 /* If non-zero, use only whitelisted block drivers */
91 static int use_bdrv_whitelist
;
94 static int is_windows_drive_prefix(const char *filename
)
96 return (((filename
[0] >= 'a' && filename
[0] <= 'z') ||
97 (filename
[0] >= 'A' && filename
[0] <= 'Z')) &&
101 int is_windows_drive(const char *filename
)
103 if (is_windows_drive_prefix(filename
) &&
106 if (strstart(filename
, "\\\\.\\", NULL
) ||
107 strstart(filename
, "//./", NULL
))
113 /* throttling disk I/O limits */
114 void bdrv_set_io_limits(BlockDriverState
*bs
,
119 throttle_config(&bs
->throttle_state
, cfg
);
121 for (i
= 0; i
< 2; i
++) {
122 qemu_co_enter_next(&bs
->throttled_reqs
[i
]);
126 /* this function drain all the throttled IOs */
127 static bool bdrv_start_throttled_reqs(BlockDriverState
*bs
)
129 bool drained
= false;
130 bool enabled
= bs
->io_limits_enabled
;
133 bs
->io_limits_enabled
= false;
135 for (i
= 0; i
< 2; i
++) {
136 while (qemu_co_enter_next(&bs
->throttled_reqs
[i
])) {
141 bs
->io_limits_enabled
= enabled
;
146 void bdrv_io_limits_disable(BlockDriverState
*bs
)
148 bs
->io_limits_enabled
= false;
150 bdrv_start_throttled_reqs(bs
);
152 throttle_destroy(&bs
->throttle_state
);
155 static void bdrv_throttle_read_timer_cb(void *opaque
)
157 BlockDriverState
*bs
= opaque
;
158 qemu_co_enter_next(&bs
->throttled_reqs
[0]);
161 static void bdrv_throttle_write_timer_cb(void *opaque
)
163 BlockDriverState
*bs
= opaque
;
164 qemu_co_enter_next(&bs
->throttled_reqs
[1]);
167 /* should be called before bdrv_set_io_limits if a limit is set */
168 void bdrv_io_limits_enable(BlockDriverState
*bs
)
170 assert(!bs
->io_limits_enabled
);
171 throttle_init(&bs
->throttle_state
,
173 bdrv_throttle_read_timer_cb
,
174 bdrv_throttle_write_timer_cb
,
176 bs
->io_limits_enabled
= true;
179 /* This function makes an IO wait if needed
181 * @nb_sectors: the number of sectors of the IO
182 * @is_write: is the IO a write
184 static void bdrv_io_limits_intercept(BlockDriverState
*bs
,
188 /* does this io must wait */
189 bool must_wait
= throttle_schedule_timer(&bs
->throttle_state
, is_write
);
191 /* if must wait or any request of this type throttled queue the IO */
193 !qemu_co_queue_empty(&bs
->throttled_reqs
[is_write
])) {
194 qemu_co_queue_wait(&bs
->throttled_reqs
[is_write
]);
197 /* the IO will be executed, do the accounting */
198 throttle_account(&bs
->throttle_state
,
200 nb_sectors
* BDRV_SECTOR_SIZE
);
202 /* if the next request must wait -> do nothing */
203 if (throttle_schedule_timer(&bs
->throttle_state
, is_write
)) {
207 /* else queue next request for execution */
208 qemu_co_queue_next(&bs
->throttled_reqs
[is_write
]);
211 /* check if the path starts with "<protocol>:" */
212 static int path_has_protocol(const char *path
)
217 if (is_windows_drive(path
) ||
218 is_windows_drive_prefix(path
)) {
221 p
= path
+ strcspn(path
, ":/\\");
223 p
= path
+ strcspn(path
, ":/");
229 int path_is_absolute(const char *path
)
232 /* specific case for names like: "\\.\d:" */
233 if (is_windows_drive(path
) || is_windows_drive_prefix(path
)) {
236 return (*path
== '/' || *path
== '\\');
238 return (*path
== '/');
242 /* if filename is absolute, just copy it to dest. Otherwise, build a
243 path to it by considering it is relative to base_path. URL are
245 void path_combine(char *dest
, int dest_size
,
246 const char *base_path
,
247 const char *filename
)
254 if (path_is_absolute(filename
)) {
255 pstrcpy(dest
, dest_size
, filename
);
257 p
= strchr(base_path
, ':');
262 p1
= strrchr(base_path
, '/');
266 p2
= strrchr(base_path
, '\\');
278 if (len
> dest_size
- 1)
280 memcpy(dest
, base_path
, len
);
282 pstrcat(dest
, dest_size
, filename
);
286 void bdrv_get_full_backing_filename(BlockDriverState
*bs
, char *dest
, size_t sz
)
288 if (bs
->backing_file
[0] == '\0' || path_has_protocol(bs
->backing_file
)) {
289 pstrcpy(dest
, sz
, bs
->backing_file
);
291 path_combine(dest
, sz
, bs
->filename
, bs
->backing_file
);
295 void bdrv_register(BlockDriver
*bdrv
)
297 /* Block drivers without coroutine functions need emulation */
298 if (!bdrv
->bdrv_co_readv
) {
299 bdrv
->bdrv_co_readv
= bdrv_co_readv_em
;
300 bdrv
->bdrv_co_writev
= bdrv_co_writev_em
;
302 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
303 * the block driver lacks aio we need to emulate that too.
305 if (!bdrv
->bdrv_aio_readv
) {
306 /* add AIO emulation layer */
307 bdrv
->bdrv_aio_readv
= bdrv_aio_readv_em
;
308 bdrv
->bdrv_aio_writev
= bdrv_aio_writev_em
;
312 QLIST_INSERT_HEAD(&bdrv_drivers
, bdrv
, list
);
315 /* create a new block device (by default it is empty) */
316 BlockDriverState
*bdrv_new(const char *device_name
)
318 BlockDriverState
*bs
;
320 bs
= g_malloc0(sizeof(BlockDriverState
));
321 pstrcpy(bs
->device_name
, sizeof(bs
->device_name
), device_name
);
322 if (device_name
[0] != '\0') {
323 QTAILQ_INSERT_TAIL(&bdrv_states
, bs
, list
);
325 bdrv_iostatus_disable(bs
);
326 notifier_list_init(&bs
->close_notifiers
);
327 notifier_with_return_list_init(&bs
->before_write_notifiers
);
328 qemu_co_queue_init(&bs
->throttled_reqs
[0]);
329 qemu_co_queue_init(&bs
->throttled_reqs
[1]);
335 void bdrv_add_close_notifier(BlockDriverState
*bs
, Notifier
*notify
)
337 notifier_list_add(&bs
->close_notifiers
, notify
);
340 BlockDriver
*bdrv_find_format(const char *format_name
)
343 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
344 if (!strcmp(drv1
->format_name
, format_name
)) {
351 static int bdrv_is_whitelisted(BlockDriver
*drv
, bool read_only
)
353 static const char *whitelist_rw
[] = {
354 CONFIG_BDRV_RW_WHITELIST
356 static const char *whitelist_ro
[] = {
357 CONFIG_BDRV_RO_WHITELIST
361 if (!whitelist_rw
[0] && !whitelist_ro
[0]) {
362 return 1; /* no whitelist, anything goes */
365 for (p
= whitelist_rw
; *p
; p
++) {
366 if (!strcmp(drv
->format_name
, *p
)) {
371 for (p
= whitelist_ro
; *p
; p
++) {
372 if (!strcmp(drv
->format_name
, *p
)) {
380 BlockDriver
*bdrv_find_whitelisted_format(const char *format_name
,
383 BlockDriver
*drv
= bdrv_find_format(format_name
);
384 return drv
&& bdrv_is_whitelisted(drv
, read_only
) ? drv
: NULL
;
387 typedef struct CreateCo
{
390 QEMUOptionParameter
*options
;
395 static void coroutine_fn
bdrv_create_co_entry(void *opaque
)
397 Error
*local_err
= NULL
;
400 CreateCo
*cco
= opaque
;
403 ret
= cco
->drv
->bdrv_create(cco
->filename
, cco
->options
, &local_err
);
404 if (error_is_set(&local_err
)) {
405 error_propagate(&cco
->err
, local_err
);
410 int bdrv_create(BlockDriver
*drv
, const char* filename
,
411 QEMUOptionParameter
*options
, Error
**errp
)
418 .filename
= g_strdup(filename
),
424 if (!drv
->bdrv_create
) {
425 error_setg(errp
, "Driver '%s' does not support image creation", drv
->format_name
);
430 if (qemu_in_coroutine()) {
431 /* Fast-path if already in coroutine context */
432 bdrv_create_co_entry(&cco
);
434 co
= qemu_coroutine_create(bdrv_create_co_entry
);
435 qemu_coroutine_enter(co
, &cco
);
436 while (cco
.ret
== NOT_DONE
) {
443 if (error_is_set(&cco
.err
)) {
444 error_propagate(errp
, cco
.err
);
446 error_setg_errno(errp
, -ret
, "Could not create image");
451 g_free(cco
.filename
);
455 int bdrv_create_file(const char* filename
, QEMUOptionParameter
*options
,
459 Error
*local_err
= NULL
;
462 drv
= bdrv_find_protocol(filename
, true);
464 error_setg(errp
, "Could not find protocol for file '%s'", filename
);
468 ret
= bdrv_create(drv
, filename
, options
, &local_err
);
469 if (error_is_set(&local_err
)) {
470 error_propagate(errp
, local_err
);
476 * Create a uniquely-named empty temporary file.
477 * Return 0 upon success, otherwise a negative errno value.
479 int get_tmp_filename(char *filename
, int size
)
482 char temp_dir
[MAX_PATH
];
483 /* GetTempFileName requires that its output buffer (4th param)
484 have length MAX_PATH or greater. */
485 assert(size
>= MAX_PATH
);
486 return (GetTempPath(MAX_PATH
, temp_dir
)
487 && GetTempFileName(temp_dir
, "qem", 0, filename
)
488 ? 0 : -GetLastError());
492 tmpdir
= getenv("TMPDIR");
495 if (snprintf(filename
, size
, "%s/vl.XXXXXX", tmpdir
) >= size
) {
498 fd
= mkstemp(filename
);
502 if (close(fd
) != 0) {
511 * Detect host devices. By convention, /dev/cdrom[N] is always
512 * recognized as a host CDROM.
514 static BlockDriver
*find_hdev_driver(const char *filename
)
516 int score_max
= 0, score
;
517 BlockDriver
*drv
= NULL
, *d
;
519 QLIST_FOREACH(d
, &bdrv_drivers
, list
) {
520 if (d
->bdrv_probe_device
) {
521 score
= d
->bdrv_probe_device(filename
);
522 if (score
> score_max
) {
532 BlockDriver
*bdrv_find_protocol(const char *filename
,
533 bool allow_protocol_prefix
)
540 /* TODO Drivers without bdrv_file_open must be specified explicitly */
543 * XXX(hch): we really should not let host device detection
544 * override an explicit protocol specification, but moving this
545 * later breaks access to device names with colons in them.
546 * Thanks to the brain-dead persistent naming schemes on udev-
547 * based Linux systems those actually are quite common.
549 drv1
= find_hdev_driver(filename
);
554 if (!path_has_protocol(filename
) || !allow_protocol_prefix
) {
555 return bdrv_find_format("file");
558 p
= strchr(filename
, ':');
561 if (len
> sizeof(protocol
) - 1)
562 len
= sizeof(protocol
) - 1;
563 memcpy(protocol
, filename
, len
);
564 protocol
[len
] = '\0';
565 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
566 if (drv1
->protocol_name
&&
567 !strcmp(drv1
->protocol_name
, protocol
)) {
574 static int find_image_format(BlockDriverState
*bs
, const char *filename
,
575 BlockDriver
**pdrv
, Error
**errp
)
577 int score
, score_max
;
578 BlockDriver
*drv1
, *drv
;
582 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
583 if (bs
->sg
|| !bdrv_is_inserted(bs
) || bdrv_getlength(bs
) == 0) {
584 drv
= bdrv_find_format("raw");
586 error_setg(errp
, "Could not find raw image format");
593 ret
= bdrv_pread(bs
, 0, buf
, sizeof(buf
));
595 error_setg_errno(errp
, -ret
, "Could not read image for determining its "
603 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
604 if (drv1
->bdrv_probe
) {
605 score
= drv1
->bdrv_probe(buf
, ret
, filename
);
606 if (score
> score_max
) {
613 error_setg(errp
, "Could not determine image format: No compatible "
622 * Set the current 'total_sectors' value
624 static int refresh_total_sectors(BlockDriverState
*bs
, int64_t hint
)
626 BlockDriver
*drv
= bs
->drv
;
628 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
632 /* query actual device if possible, otherwise just trust the hint */
633 if (drv
->bdrv_getlength
) {
634 int64_t length
= drv
->bdrv_getlength(bs
);
638 hint
= DIV_ROUND_UP(length
, BDRV_SECTOR_SIZE
);
641 bs
->total_sectors
= hint
;
646 * Set open flags for a given discard mode
648 * Return 0 on success, -1 if the discard mode was invalid.
650 int bdrv_parse_discard_flags(const char *mode
, int *flags
)
652 *flags
&= ~BDRV_O_UNMAP
;
654 if (!strcmp(mode
, "off") || !strcmp(mode
, "ignore")) {
656 } else if (!strcmp(mode
, "on") || !strcmp(mode
, "unmap")) {
657 *flags
|= BDRV_O_UNMAP
;
666 * Set open flags for a given cache mode
668 * Return 0 on success, -1 if the cache mode was invalid.
670 int bdrv_parse_cache_flags(const char *mode
, int *flags
)
672 *flags
&= ~BDRV_O_CACHE_MASK
;
674 if (!strcmp(mode
, "off") || !strcmp(mode
, "none")) {
675 *flags
|= BDRV_O_NOCACHE
| BDRV_O_CACHE_WB
;
676 } else if (!strcmp(mode
, "directsync")) {
677 *flags
|= BDRV_O_NOCACHE
;
678 } else if (!strcmp(mode
, "writeback")) {
679 *flags
|= BDRV_O_CACHE_WB
;
680 } else if (!strcmp(mode
, "unsafe")) {
681 *flags
|= BDRV_O_CACHE_WB
;
682 *flags
|= BDRV_O_NO_FLUSH
;
683 } else if (!strcmp(mode
, "writethrough")) {
684 /* this is the default */
693 * The copy-on-read flag is actually a reference count so multiple users may
694 * use the feature without worrying about clobbering its previous state.
695 * Copy-on-read stays enabled until all users have called to disable it.
697 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
702 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
704 assert(bs
->copy_on_read
> 0);
708 static int bdrv_open_flags(BlockDriverState
*bs
, int flags
)
710 int open_flags
= flags
| BDRV_O_CACHE_WB
;
713 * Clear flags that are internal to the block layer before opening the
716 open_flags
&= ~(BDRV_O_SNAPSHOT
| BDRV_O_NO_BACKING
);
719 * Snapshots should be writable.
721 if (bs
->is_temporary
) {
722 open_flags
|= BDRV_O_RDWR
;
729 * Common part for opening disk images and files
731 * Removes all processed options from *options.
733 static int bdrv_open_common(BlockDriverState
*bs
, BlockDriverState
*file
,
734 QDict
*options
, int flags
, BlockDriver
*drv
, Error
**errp
)
737 const char *filename
;
738 Error
*local_err
= NULL
;
741 assert(bs
->file
== NULL
);
742 assert(options
!= NULL
&& bs
->options
!= options
);
745 filename
= file
->filename
;
747 filename
= qdict_get_try_str(options
, "filename");
750 trace_bdrv_open_common(bs
, filename
?: "", flags
, drv
->format_name
);
752 /* bdrv_open() with directly using a protocol as drv. This layer is already
753 * opened, so assign it to bs (while file becomes a closed BlockDriverState)
754 * and return immediately. */
755 if (file
!= NULL
&& drv
->bdrv_file_open
) {
760 bs
->open_flags
= flags
;
761 bs
->buffer_alignment
= 512;
762 bs
->zero_beyond_eof
= true;
763 open_flags
= bdrv_open_flags(bs
, flags
);
764 bs
->read_only
= !(open_flags
& BDRV_O_RDWR
);
766 if (use_bdrv_whitelist
&& !bdrv_is_whitelisted(drv
, bs
->read_only
)) {
768 !bs
->read_only
&& bdrv_is_whitelisted(drv
, true)
769 ? "Driver '%s' can only be used for read-only devices"
770 : "Driver '%s' is not whitelisted",
775 assert(bs
->copy_on_read
== 0); /* bdrv_new() and bdrv_close() make it so */
776 if (flags
& BDRV_O_COPY_ON_READ
) {
777 if (!bs
->read_only
) {
778 bdrv_enable_copy_on_read(bs
);
780 error_setg(errp
, "Can't use copy-on-read on read-only device");
785 if (filename
!= NULL
) {
786 pstrcpy(bs
->filename
, sizeof(bs
->filename
), filename
);
788 bs
->filename
[0] = '\0';
792 bs
->opaque
= g_malloc0(drv
->instance_size
);
794 bs
->enable_write_cache
= !!(flags
& BDRV_O_CACHE_WB
);
796 /* Open the image, either directly or using a protocol */
797 if (drv
->bdrv_file_open
) {
798 assert(file
== NULL
);
799 assert(!drv
->bdrv_needs_filename
|| filename
!= NULL
);
800 ret
= drv
->bdrv_file_open(bs
, options
, open_flags
, &local_err
);
803 error_setg(errp
, "Can't use '%s' as a block driver for the "
804 "protocol level", drv
->format_name
);
809 ret
= drv
->bdrv_open(bs
, options
, open_flags
, &local_err
);
813 if (error_is_set(&local_err
)) {
814 error_propagate(errp
, local_err
);
815 } else if (bs
->filename
[0]) {
816 error_setg_errno(errp
, -ret
, "Could not open '%s'", bs
->filename
);
818 error_setg_errno(errp
, -ret
, "Could not open image");
823 ret
= refresh_total_sectors(bs
, bs
->total_sectors
);
825 error_setg_errno(errp
, -ret
, "Could not refresh total sector count");
830 if (bs
->is_temporary
) {
831 assert(bs
->filename
[0] != '\0');
832 unlink(bs
->filename
);
846 * Opens a file using a protocol (file, host_device, nbd, ...)
848 * options is a QDict of options to pass to the block drivers, or NULL for an
849 * empty set of options. The reference to the QDict belongs to the block layer
850 * after the call (even on failure), so if the caller intends to reuse the
851 * dictionary, it needs to use QINCREF() before calling bdrv_file_open.
853 int bdrv_file_open(BlockDriverState
**pbs
, const char *filename
,
854 QDict
*options
, int flags
, Error
**errp
)
856 BlockDriverState
*bs
;
859 bool allow_protocol_prefix
= false;
860 Error
*local_err
= NULL
;
863 /* NULL means an empty set of options */
864 if (options
== NULL
) {
865 options
= qdict_new();
869 bs
->options
= options
;
870 options
= qdict_clone_shallow(options
);
872 /* Fetch the file name from the options QDict if necessary */
874 filename
= qdict_get_try_str(options
, "filename");
875 } else if (filename
&& !qdict_haskey(options
, "filename")) {
876 qdict_put(options
, "filename", qstring_from_str(filename
));
877 allow_protocol_prefix
= true;
879 error_setg(errp
, "Can't specify 'file' and 'filename' options at the "
885 /* Find the right block driver */
886 drvname
= qdict_get_try_str(options
, "driver");
888 drv
= bdrv_find_format(drvname
);
890 error_setg(errp
, "Unknown driver '%s'", drvname
);
892 qdict_del(options
, "driver");
893 } else if (filename
) {
894 drv
= bdrv_find_protocol(filename
, allow_protocol_prefix
);
896 error_setg(errp
, "Unknown protocol");
899 error_setg(errp
, "Must specify either driver or file");
904 /* errp has been set already */
909 /* Parse the filename and open it */
910 if (drv
->bdrv_parse_filename
&& filename
) {
911 drv
->bdrv_parse_filename(filename
, options
, &local_err
);
912 if (error_is_set(&local_err
)) {
913 error_propagate(errp
, local_err
);
917 qdict_del(options
, "filename");
918 } else if (drv
->bdrv_needs_filename
&& !filename
) {
919 error_setg(errp
, "The '%s' block driver requires a file name",
925 ret
= bdrv_open_common(bs
, NULL
, options
, flags
, drv
, &local_err
);
927 error_propagate(errp
, local_err
);
931 /* Check if any unknown options were used */
932 if (qdict_size(options
) != 0) {
933 const QDictEntry
*entry
= qdict_first(options
);
934 error_setg(errp
, "Block protocol '%s' doesn't support the option '%s'",
935 drv
->format_name
, entry
->key
);
948 QDECREF(bs
->options
);
955 * Opens the backing file for a BlockDriverState if not yet open
957 * options is a QDict of options to pass to the block drivers, or NULL for an
958 * empty set of options. The reference to the QDict is transferred to this
959 * function (even on failure), so if the caller intends to reuse the dictionary,
960 * it needs to use QINCREF() before calling bdrv_file_open.
962 int bdrv_open_backing_file(BlockDriverState
*bs
, QDict
*options
, Error
**errp
)
964 char backing_filename
[PATH_MAX
];
966 BlockDriver
*back_drv
= NULL
;
967 Error
*local_err
= NULL
;
969 if (bs
->backing_hd
!= NULL
) {
974 /* NULL means an empty set of options */
975 if (options
== NULL
) {
976 options
= qdict_new();
979 bs
->open_flags
&= ~BDRV_O_NO_BACKING
;
980 if (qdict_haskey(options
, "file.filename")) {
981 backing_filename
[0] = '\0';
982 } else if (bs
->backing_file
[0] == '\0' && qdict_size(options
) == 0) {
986 bdrv_get_full_backing_filename(bs
, backing_filename
,
987 sizeof(backing_filename
));
990 bs
->backing_hd
= bdrv_new("");
992 if (bs
->backing_format
[0] != '\0') {
993 back_drv
= bdrv_find_format(bs
->backing_format
);
996 /* backing files always opened read-only */
997 back_flags
= bs
->open_flags
& ~(BDRV_O_RDWR
| BDRV_O_SNAPSHOT
|
998 BDRV_O_COPY_ON_READ
);
1000 ret
= bdrv_open(bs
->backing_hd
,
1001 *backing_filename
? backing_filename
: NULL
, options
,
1002 back_flags
, back_drv
, &local_err
);
1004 bdrv_unref(bs
->backing_hd
);
1005 bs
->backing_hd
= NULL
;
1006 bs
->open_flags
|= BDRV_O_NO_BACKING
;
1007 error_setg(errp
, "Could not open backing file: %s",
1008 error_get_pretty(local_err
));
1009 error_free(local_err
);
1012 pstrcpy(bs
->backing_file
, sizeof(bs
->backing_file
),
1013 bs
->backing_hd
->file
->filename
);
1018 * Opens a disk image (raw, qcow2, vmdk, ...)
1020 * options is a QDict of options to pass to the block drivers, or NULL for an
1021 * empty set of options. The reference to the QDict belongs to the block layer
1022 * after the call (even on failure), so if the caller intends to reuse the
1023 * dictionary, it needs to use QINCREF() before calling bdrv_open.
1025 int bdrv_open(BlockDriverState
*bs
, const char *filename
, QDict
*options
,
1026 int flags
, BlockDriver
*drv
, Error
**errp
)
1029 /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
1030 char tmp_filename
[PATH_MAX
+ 1];
1031 BlockDriverState
*file
= NULL
;
1032 QDict
*file_options
= NULL
;
1033 const char *drvname
;
1034 Error
*local_err
= NULL
;
1036 /* NULL means an empty set of options */
1037 if (options
== NULL
) {
1038 options
= qdict_new();
1041 bs
->options
= options
;
1042 options
= qdict_clone_shallow(options
);
1044 /* For snapshot=on, create a temporary qcow2 overlay */
1045 if (flags
& BDRV_O_SNAPSHOT
) {
1046 BlockDriverState
*bs1
;
1048 BlockDriver
*bdrv_qcow2
;
1049 QEMUOptionParameter
*create_options
;
1050 char backing_filename
[PATH_MAX
];
1052 if (qdict_size(options
) != 0) {
1053 error_setg(errp
, "Can't use snapshot=on with driver-specific options");
1057 assert(filename
!= NULL
);
1059 /* if snapshot, we create a temporary backing file and open it
1060 instead of opening 'filename' directly */
1062 /* if there is a backing file, use it */
1064 ret
= bdrv_open(bs1
, filename
, NULL
, 0, drv
, &local_err
);
1069 total_size
= bdrv_getlength(bs1
) & BDRV_SECTOR_MASK
;
1073 ret
= get_tmp_filename(tmp_filename
, sizeof(tmp_filename
));
1075 error_setg_errno(errp
, -ret
, "Could not get temporary filename");
1079 /* Real path is meaningless for protocols */
1080 if (path_has_protocol(filename
)) {
1081 snprintf(backing_filename
, sizeof(backing_filename
),
1083 } else if (!realpath(filename
, backing_filename
)) {
1085 error_setg_errno(errp
, errno
, "Could not resolve path '%s'", filename
);
1089 bdrv_qcow2
= bdrv_find_format("qcow2");
1090 create_options
= parse_option_parameters("", bdrv_qcow2
->create_options
,
1093 set_option_parameter_int(create_options
, BLOCK_OPT_SIZE
, total_size
);
1094 set_option_parameter(create_options
, BLOCK_OPT_BACKING_FILE
,
1097 set_option_parameter(create_options
, BLOCK_OPT_BACKING_FMT
,
1101 ret
= bdrv_create(bdrv_qcow2
, tmp_filename
, create_options
, &local_err
);
1102 free_option_parameters(create_options
);
1104 error_setg_errno(errp
, -ret
, "Could not create temporary overlay "
1105 "'%s': %s", tmp_filename
,
1106 error_get_pretty(local_err
));
1107 error_free(local_err
);
1112 filename
= tmp_filename
;
1114 bs
->is_temporary
= 1;
1117 /* Open image file without format layer */
1118 if (flags
& BDRV_O_RDWR
) {
1119 flags
|= BDRV_O_ALLOW_RDWR
;
1122 qdict_extract_subqdict(options
, &file_options
, "file.");
1124 ret
= bdrv_file_open(&file
, filename
, file_options
,
1125 bdrv_open_flags(bs
, flags
| BDRV_O_UNMAP
), &local_err
);
1130 /* Find the right image format driver */
1131 drvname
= qdict_get_try_str(options
, "driver");
1133 drv
= bdrv_find_format(drvname
);
1134 qdict_del(options
, "driver");
1136 error_setg(errp
, "Invalid driver: '%s'", drvname
);
1138 goto unlink_and_fail
;
1143 ret
= find_image_format(file
, filename
, &drv
, &local_err
);
1147 goto unlink_and_fail
;
1150 /* Open the image */
1151 ret
= bdrv_open_common(bs
, file
, options
, flags
, drv
, &local_err
);
1153 goto unlink_and_fail
;
1156 if (bs
->file
!= file
) {
1161 /* If there is a backing file, use it */
1162 if ((flags
& BDRV_O_NO_BACKING
) == 0) {
1163 QDict
*backing_options
;
1165 qdict_extract_subqdict(options
, &backing_options
, "backing.");
1166 ret
= bdrv_open_backing_file(bs
, backing_options
, &local_err
);
1168 goto close_and_fail
;
1172 /* Check if any unknown options were used */
1173 if (qdict_size(options
) != 0) {
1174 const QDictEntry
*entry
= qdict_first(options
);
1175 error_setg(errp
, "Block format '%s' used by device '%s' doesn't "
1176 "support the option '%s'", drv
->format_name
, bs
->device_name
,
1180 goto close_and_fail
;
1184 if (!bdrv_key_required(bs
)) {
1185 bdrv_dev_change_media_cb(bs
, true);
1194 if (bs
->is_temporary
) {
1198 QDECREF(bs
->options
);
1201 if (error_is_set(&local_err
)) {
1202 error_propagate(errp
, local_err
);
1209 if (error_is_set(&local_err
)) {
1210 error_propagate(errp
, local_err
);
1215 typedef struct BlockReopenQueueEntry
{
1217 BDRVReopenState state
;
1218 QSIMPLEQ_ENTRY(BlockReopenQueueEntry
) entry
;
1219 } BlockReopenQueueEntry
;
1222 * Adds a BlockDriverState to a simple queue for an atomic, transactional
1223 * reopen of multiple devices.
1225 * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT
1226 * already performed, or alternatively may be NULL a new BlockReopenQueue will
1227 * be created and initialized. This newly created BlockReopenQueue should be
1228 * passed back in for subsequent calls that are intended to be of the same
1231 * bs is the BlockDriverState to add to the reopen queue.
1233 * flags contains the open flags for the associated bs
1235 * returns a pointer to bs_queue, which is either the newly allocated
1236 * bs_queue, or the existing bs_queue being used.
1239 BlockReopenQueue
*bdrv_reopen_queue(BlockReopenQueue
*bs_queue
,
1240 BlockDriverState
*bs
, int flags
)
1244 BlockReopenQueueEntry
*bs_entry
;
1245 if (bs_queue
== NULL
) {
1246 bs_queue
= g_new0(BlockReopenQueue
, 1);
1247 QSIMPLEQ_INIT(bs_queue
);
1251 bdrv_reopen_queue(bs_queue
, bs
->file
, flags
);
1254 bs_entry
= g_new0(BlockReopenQueueEntry
, 1);
1255 QSIMPLEQ_INSERT_TAIL(bs_queue
, bs_entry
, entry
);
1257 bs_entry
->state
.bs
= bs
;
1258 bs_entry
->state
.flags
= flags
;
1264 * Reopen multiple BlockDriverStates atomically & transactionally.
1266 * The queue passed in (bs_queue) must have been built up previous
1267 * via bdrv_reopen_queue().
1269 * Reopens all BDS specified in the queue, with the appropriate
1270 * flags. All devices are prepared for reopen, and failure of any
1271 * device will cause all device changes to be abandonded, and intermediate
1274 * If all devices prepare successfully, then the changes are committed
1278 int bdrv_reopen_multiple(BlockReopenQueue
*bs_queue
, Error
**errp
)
1281 BlockReopenQueueEntry
*bs_entry
, *next
;
1282 Error
*local_err
= NULL
;
1284 assert(bs_queue
!= NULL
);
1288 QSIMPLEQ_FOREACH(bs_entry
, bs_queue
, entry
) {
1289 if (bdrv_reopen_prepare(&bs_entry
->state
, bs_queue
, &local_err
)) {
1290 error_propagate(errp
, local_err
);
1293 bs_entry
->prepared
= true;
1296 /* If we reach this point, we have success and just need to apply the
1299 QSIMPLEQ_FOREACH(bs_entry
, bs_queue
, entry
) {
1300 bdrv_reopen_commit(&bs_entry
->state
);
1306 QSIMPLEQ_FOREACH_SAFE(bs_entry
, bs_queue
, entry
, next
) {
1307 if (ret
&& bs_entry
->prepared
) {
1308 bdrv_reopen_abort(&bs_entry
->state
);
1317 /* Reopen a single BlockDriverState with the specified flags. */
1318 int bdrv_reopen(BlockDriverState
*bs
, int bdrv_flags
, Error
**errp
)
1321 Error
*local_err
= NULL
;
1322 BlockReopenQueue
*queue
= bdrv_reopen_queue(NULL
, bs
, bdrv_flags
);
1324 ret
= bdrv_reopen_multiple(queue
, &local_err
);
1325 if (local_err
!= NULL
) {
1326 error_propagate(errp
, local_err
);
1333 * Prepares a BlockDriverState for reopen. All changes are staged in the
1334 * 'opaque' field of the BDRVReopenState, which is used and allocated by
1335 * the block driver layer .bdrv_reopen_prepare()
1337 * bs is the BlockDriverState to reopen
1338 * flags are the new open flags
1339 * queue is the reopen queue
1341 * Returns 0 on success, non-zero on error. On error errp will be set
1344 * On failure, bdrv_reopen_abort() will be called to clean up any data.
1345 * It is the responsibility of the caller to then call the abort() or
1346 * commit() for any other BDS that have been left in a prepare() state
1349 int bdrv_reopen_prepare(BDRVReopenState
*reopen_state
, BlockReopenQueue
*queue
,
1353 Error
*local_err
= NULL
;
1356 assert(reopen_state
!= NULL
);
1357 assert(reopen_state
->bs
->drv
!= NULL
);
1358 drv
= reopen_state
->bs
->drv
;
1360 /* if we are to stay read-only, do not allow permission change
1362 if (!(reopen_state
->bs
->open_flags
& BDRV_O_ALLOW_RDWR
) &&
1363 reopen_state
->flags
& BDRV_O_RDWR
) {
1364 error_set(errp
, QERR_DEVICE_IS_READ_ONLY
,
1365 reopen_state
->bs
->device_name
);
1370 ret
= bdrv_flush(reopen_state
->bs
);
1372 error_set(errp
, ERROR_CLASS_GENERIC_ERROR
, "Error (%s) flushing drive",
1377 if (drv
->bdrv_reopen_prepare
) {
1378 ret
= drv
->bdrv_reopen_prepare(reopen_state
, queue
, &local_err
);
1380 if (local_err
!= NULL
) {
1381 error_propagate(errp
, local_err
);
1383 error_setg(errp
, "failed while preparing to reopen image '%s'",
1384 reopen_state
->bs
->filename
);
1389 /* It is currently mandatory to have a bdrv_reopen_prepare()
1390 * handler for each supported drv. */
1391 error_set(errp
, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED
,
1392 drv
->format_name
, reopen_state
->bs
->device_name
,
1393 "reopening of file");
1405 * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and
1406 * makes them final by swapping the staging BlockDriverState contents into
1407 * the active BlockDriverState contents.
1409 void bdrv_reopen_commit(BDRVReopenState
*reopen_state
)
1413 assert(reopen_state
!= NULL
);
1414 drv
= reopen_state
->bs
->drv
;
1415 assert(drv
!= NULL
);
1417 /* If there are any driver level actions to take */
1418 if (drv
->bdrv_reopen_commit
) {
1419 drv
->bdrv_reopen_commit(reopen_state
);
1422 /* set BDS specific flags now */
1423 reopen_state
->bs
->open_flags
= reopen_state
->flags
;
1424 reopen_state
->bs
->enable_write_cache
= !!(reopen_state
->flags
&
1426 reopen_state
->bs
->read_only
= !(reopen_state
->flags
& BDRV_O_RDWR
);
1430 * Abort the reopen, and delete and free the staged changes in
1433 void bdrv_reopen_abort(BDRVReopenState
*reopen_state
)
1437 assert(reopen_state
!= NULL
);
1438 drv
= reopen_state
->bs
->drv
;
1439 assert(drv
!= NULL
);
1441 if (drv
->bdrv_reopen_abort
) {
1442 drv
->bdrv_reopen_abort(reopen_state
);
1447 void bdrv_close(BlockDriverState
*bs
)
1450 block_job_cancel_sync(bs
->job
);
1452 bdrv_drain_all(); /* complete I/O */
1454 bdrv_drain_all(); /* in case flush left pending I/O */
1455 notifier_list_notify(&bs
->close_notifiers
, bs
);
1458 if (bs
->backing_hd
) {
1459 bdrv_unref(bs
->backing_hd
);
1460 bs
->backing_hd
= NULL
;
1462 bs
->drv
->bdrv_close(bs
);
1465 if (bs
->is_temporary
) {
1466 unlink(bs
->filename
);
1471 bs
->copy_on_read
= 0;
1472 bs
->backing_file
[0] = '\0';
1473 bs
->backing_format
[0] = '\0';
1474 bs
->total_sectors
= 0;
1479 bs
->zero_beyond_eof
= false;
1480 QDECREF(bs
->options
);
1483 if (bs
->file
!= NULL
) {
1484 bdrv_unref(bs
->file
);
1489 bdrv_dev_change_media_cb(bs
, false);
1491 /*throttling disk I/O limits*/
1492 if (bs
->io_limits_enabled
) {
1493 bdrv_io_limits_disable(bs
);
1497 void bdrv_close_all(void)
1499 BlockDriverState
*bs
;
1501 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
1506 /* Check if any requests are in-flight (including throttled requests) */
1507 static bool bdrv_requests_pending(BlockDriverState
*bs
)
1509 if (!QLIST_EMPTY(&bs
->tracked_requests
)) {
1512 if (!qemu_co_queue_empty(&bs
->throttled_reqs
[0])) {
1515 if (!qemu_co_queue_empty(&bs
->throttled_reqs
[1])) {
1518 if (bs
->file
&& bdrv_requests_pending(bs
->file
)) {
1521 if (bs
->backing_hd
&& bdrv_requests_pending(bs
->backing_hd
)) {
1527 static bool bdrv_requests_pending_all(void)
1529 BlockDriverState
*bs
;
1530 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
1531 if (bdrv_requests_pending(bs
)) {
1539 * Wait for pending requests to complete across all BlockDriverStates
1541 * This function does not flush data to disk, use bdrv_flush_all() for that
1542 * after calling this function.
1544 * Note that completion of an asynchronous I/O operation can trigger any
1545 * number of other I/O operations on other devices---for example a coroutine
1546 * can be arbitrarily complex and a constant flow of I/O can come until the
1547 * coroutine is complete. Because of this, it is not possible to have a
1548 * function to drain a single device's I/O queue.
1550 void bdrv_drain_all(void)
1552 /* Always run first iteration so any pending completion BHs run */
1554 BlockDriverState
*bs
;
1557 /* FIXME: We do not have timer support here, so this is effectively
1560 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
1561 if (bdrv_start_throttled_reqs(bs
)) {
1566 busy
= bdrv_requests_pending_all();
1567 busy
|= aio_poll(qemu_get_aio_context(), busy
);
1571 /* make a BlockDriverState anonymous by removing from bdrv_state list.
1572 Also, NULL terminate the device_name to prevent double remove */
1573 void bdrv_make_anon(BlockDriverState
*bs
)
1575 if (bs
->device_name
[0] != '\0') {
1576 QTAILQ_REMOVE(&bdrv_states
, bs
, list
);
1578 bs
->device_name
[0] = '\0';
1581 static void bdrv_rebind(BlockDriverState
*bs
)
1583 if (bs
->drv
&& bs
->drv
->bdrv_rebind
) {
1584 bs
->drv
->bdrv_rebind(bs
);
1588 static void bdrv_move_feature_fields(BlockDriverState
*bs_dest
,
1589 BlockDriverState
*bs_src
)
1591 /* move some fields that need to stay attached to the device */
1592 bs_dest
->open_flags
= bs_src
->open_flags
;
1595 bs_dest
->dev_ops
= bs_src
->dev_ops
;
1596 bs_dest
->dev_opaque
= bs_src
->dev_opaque
;
1597 bs_dest
->dev
= bs_src
->dev
;
1598 bs_dest
->buffer_alignment
= bs_src
->buffer_alignment
;
1599 bs_dest
->copy_on_read
= bs_src
->copy_on_read
;
1601 bs_dest
->enable_write_cache
= bs_src
->enable_write_cache
;
1603 /* i/o throttled req */
1604 memcpy(&bs_dest
->throttle_state
,
1605 &bs_src
->throttle_state
,
1606 sizeof(ThrottleState
));
1607 bs_dest
->throttled_reqs
[0] = bs_src
->throttled_reqs
[0];
1608 bs_dest
->throttled_reqs
[1] = bs_src
->throttled_reqs
[1];
1609 bs_dest
->io_limits_enabled
= bs_src
->io_limits_enabled
;
1612 bs_dest
->on_read_error
= bs_src
->on_read_error
;
1613 bs_dest
->on_write_error
= bs_src
->on_write_error
;
1616 bs_dest
->iostatus_enabled
= bs_src
->iostatus_enabled
;
1617 bs_dest
->iostatus
= bs_src
->iostatus
;
1620 bs_dest
->dirty_bitmap
= bs_src
->dirty_bitmap
;
1622 /* reference count */
1623 bs_dest
->refcnt
= bs_src
->refcnt
;
1626 bs_dest
->in_use
= bs_src
->in_use
;
1627 bs_dest
->job
= bs_src
->job
;
1629 /* keep the same entry in bdrv_states */
1630 pstrcpy(bs_dest
->device_name
, sizeof(bs_dest
->device_name
),
1631 bs_src
->device_name
);
1632 bs_dest
->list
= bs_src
->list
;
1636 * Swap bs contents for two image chains while they are live,
1637 * while keeping required fields on the BlockDriverState that is
1638 * actually attached to a device.
1640 * This will modify the BlockDriverState fields, and swap contents
1641 * between bs_new and bs_old. Both bs_new and bs_old are modified.
1643 * bs_new is required to be anonymous.
1645 * This function does not create any image files.
1647 void bdrv_swap(BlockDriverState
*bs_new
, BlockDriverState
*bs_old
)
1649 BlockDriverState tmp
;
1651 /* bs_new must be anonymous and shouldn't have anything fancy enabled */
1652 assert(bs_new
->device_name
[0] == '\0');
1653 assert(bs_new
->dirty_bitmap
== NULL
);
1654 assert(bs_new
->job
== NULL
);
1655 assert(bs_new
->dev
== NULL
);
1656 assert(bs_new
->in_use
== 0);
1657 assert(bs_new
->io_limits_enabled
== false);
1658 assert(!throttle_have_timer(&bs_new
->throttle_state
));
1664 /* there are some fields that should not be swapped, move them back */
1665 bdrv_move_feature_fields(&tmp
, bs_old
);
1666 bdrv_move_feature_fields(bs_old
, bs_new
);
1667 bdrv_move_feature_fields(bs_new
, &tmp
);
1669 /* bs_new shouldn't be in bdrv_states even after the swap! */
1670 assert(bs_new
->device_name
[0] == '\0');
1672 /* Check a few fields that should remain attached to the device */
1673 assert(bs_new
->dev
== NULL
);
1674 assert(bs_new
->job
== NULL
);
1675 assert(bs_new
->in_use
== 0);
1676 assert(bs_new
->io_limits_enabled
== false);
1677 assert(!throttle_have_timer(&bs_new
->throttle_state
));
1679 bdrv_rebind(bs_new
);
1680 bdrv_rebind(bs_old
);
1684 * Add new bs contents at the top of an image chain while the chain is
1685 * live, while keeping required fields on the top layer.
1687 * This will modify the BlockDriverState fields, and swap contents
1688 * between bs_new and bs_top. Both bs_new and bs_top are modified.
1690 * bs_new is required to be anonymous.
1692 * This function does not create any image files.
1694 void bdrv_append(BlockDriverState
*bs_new
, BlockDriverState
*bs_top
)
1696 bdrv_swap(bs_new
, bs_top
);
1698 /* The contents of 'tmp' will become bs_top, as we are
1699 * swapping bs_new and bs_top contents. */
1700 bs_top
->backing_hd
= bs_new
;
1701 bs_top
->open_flags
&= ~BDRV_O_NO_BACKING
;
1702 pstrcpy(bs_top
->backing_file
, sizeof(bs_top
->backing_file
),
1704 pstrcpy(bs_top
->backing_format
, sizeof(bs_top
->backing_format
),
1705 bs_new
->drv
? bs_new
->drv
->format_name
: "");
1708 static void bdrv_delete(BlockDriverState
*bs
)
1712 assert(!bs
->in_use
);
1713 assert(!bs
->refcnt
);
1717 /* remove from list, if necessary */
1723 int bdrv_attach_dev(BlockDriverState
*bs
, void *dev
)
1724 /* TODO change to DeviceState *dev when all users are qdevified */
1730 bdrv_iostatus_reset(bs
);
1734 /* TODO qdevified devices don't use this, remove when devices are qdevified */
1735 void bdrv_attach_dev_nofail(BlockDriverState
*bs
, void *dev
)
1737 if (bdrv_attach_dev(bs
, dev
) < 0) {
1742 void bdrv_detach_dev(BlockDriverState
*bs
, void *dev
)
1743 /* TODO change to DeviceState *dev when all users are qdevified */
1745 assert(bs
->dev
== dev
);
1748 bs
->dev_opaque
= NULL
;
1749 bs
->buffer_alignment
= 512;
1752 /* TODO change to return DeviceState * when all users are qdevified */
1753 void *bdrv_get_attached_dev(BlockDriverState
*bs
)
1758 void bdrv_set_dev_ops(BlockDriverState
*bs
, const BlockDevOps
*ops
,
1762 bs
->dev_opaque
= opaque
;
1765 void bdrv_emit_qmp_error_event(const BlockDriverState
*bdrv
,
1766 enum MonitorEvent ev
,
1767 BlockErrorAction action
, bool is_read
)
1770 const char *action_str
;
1773 case BDRV_ACTION_REPORT
:
1774 action_str
= "report";
1776 case BDRV_ACTION_IGNORE
:
1777 action_str
= "ignore";
1779 case BDRV_ACTION_STOP
:
1780 action_str
= "stop";
1786 data
= qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
1789 is_read
? "read" : "write");
1790 monitor_protocol_event(ev
, data
);
1792 qobject_decref(data
);
1795 static void bdrv_emit_qmp_eject_event(BlockDriverState
*bs
, bool ejected
)
1799 data
= qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }",
1800 bdrv_get_device_name(bs
), ejected
);
1801 monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED
, data
);
1803 qobject_decref(data
);
1806 static void bdrv_dev_change_media_cb(BlockDriverState
*bs
, bool load
)
1808 if (bs
->dev_ops
&& bs
->dev_ops
->change_media_cb
) {
1809 bool tray_was_closed
= !bdrv_dev_is_tray_open(bs
);
1810 bs
->dev_ops
->change_media_cb(bs
->dev_opaque
, load
);
1811 if (tray_was_closed
) {
1813 bdrv_emit_qmp_eject_event(bs
, true);
1817 bdrv_emit_qmp_eject_event(bs
, false);
1822 bool bdrv_dev_has_removable_media(BlockDriverState
*bs
)
1824 return !bs
->dev
|| (bs
->dev_ops
&& bs
->dev_ops
->change_media_cb
);
1827 void bdrv_dev_eject_request(BlockDriverState
*bs
, bool force
)
1829 if (bs
->dev_ops
&& bs
->dev_ops
->eject_request_cb
) {
1830 bs
->dev_ops
->eject_request_cb(bs
->dev_opaque
, force
);
1834 bool bdrv_dev_is_tray_open(BlockDriverState
*bs
)
1836 if (bs
->dev_ops
&& bs
->dev_ops
->is_tray_open
) {
1837 return bs
->dev_ops
->is_tray_open(bs
->dev_opaque
);
1842 static void bdrv_dev_resize_cb(BlockDriverState
*bs
)
1844 if (bs
->dev_ops
&& bs
->dev_ops
->resize_cb
) {
1845 bs
->dev_ops
->resize_cb(bs
->dev_opaque
);
1849 bool bdrv_dev_is_medium_locked(BlockDriverState
*bs
)
1851 if (bs
->dev_ops
&& bs
->dev_ops
->is_medium_locked
) {
1852 return bs
->dev_ops
->is_medium_locked(bs
->dev_opaque
);
1858 * Run consistency checks on an image
1860 * Returns 0 if the check could be completed (it doesn't mean that the image is
1861 * free of errors) or -errno when an internal error occurred. The results of the
1862 * check are stored in res.
1864 int bdrv_check(BlockDriverState
*bs
, BdrvCheckResult
*res
, BdrvCheckMode fix
)
1866 if (bs
->drv
->bdrv_check
== NULL
) {
1870 memset(res
, 0, sizeof(*res
));
1871 return bs
->drv
->bdrv_check(bs
, res
, fix
);
1874 #define COMMIT_BUF_SECTORS 2048
1876 /* commit COW file into the raw image */
1877 int bdrv_commit(BlockDriverState
*bs
)
1879 BlockDriver
*drv
= bs
->drv
;
1880 int64_t sector
, total_sectors
;
1881 int n
, ro
, open_flags
;
1884 char filename
[PATH_MAX
];
1889 if (!bs
->backing_hd
) {
1893 if (bdrv_in_use(bs
) || bdrv_in_use(bs
->backing_hd
)) {
1897 ro
= bs
->backing_hd
->read_only
;
1898 /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */
1899 pstrcpy(filename
, sizeof(filename
), bs
->backing_hd
->filename
);
1900 open_flags
= bs
->backing_hd
->open_flags
;
1903 if (bdrv_reopen(bs
->backing_hd
, open_flags
| BDRV_O_RDWR
, NULL
)) {
1908 total_sectors
= bdrv_getlength(bs
) >> BDRV_SECTOR_BITS
;
1909 buf
= g_malloc(COMMIT_BUF_SECTORS
* BDRV_SECTOR_SIZE
);
1911 for (sector
= 0; sector
< total_sectors
; sector
+= n
) {
1912 ret
= bdrv_is_allocated(bs
, sector
, COMMIT_BUF_SECTORS
, &n
);
1917 if (bdrv_read(bs
, sector
, buf
, n
) != 0) {
1922 if (bdrv_write(bs
->backing_hd
, sector
, buf
, n
) != 0) {
1929 if (drv
->bdrv_make_empty
) {
1930 ret
= drv
->bdrv_make_empty(bs
);
1935 * Make sure all data we wrote to the backing device is actually
1939 bdrv_flush(bs
->backing_hd
);
1945 /* ignoring error return here */
1946 bdrv_reopen(bs
->backing_hd
, open_flags
& ~BDRV_O_RDWR
, NULL
);
1952 int bdrv_commit_all(void)
1954 BlockDriverState
*bs
;
1956 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
1957 if (bs
->drv
&& bs
->backing_hd
) {
1958 int ret
= bdrv_commit(bs
);
1968 * Remove an active request from the tracked requests list
1970 * This function should be called when a tracked request is completing.
1972 static void tracked_request_end(BdrvTrackedRequest
*req
)
1974 QLIST_REMOVE(req
, list
);
1975 qemu_co_queue_restart_all(&req
->wait_queue
);
1979 * Add an active request to the tracked requests list
1981 static void tracked_request_begin(BdrvTrackedRequest
*req
,
1982 BlockDriverState
*bs
,
1984 int nb_sectors
, bool is_write
)
1986 *req
= (BdrvTrackedRequest
){
1988 .sector_num
= sector_num
,
1989 .nb_sectors
= nb_sectors
,
1990 .is_write
= is_write
,
1991 .co
= qemu_coroutine_self(),
1994 qemu_co_queue_init(&req
->wait_queue
);
1996 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
2000 * Round a region to cluster boundaries
2002 void bdrv_round_to_clusters(BlockDriverState
*bs
,
2003 int64_t sector_num
, int nb_sectors
,
2004 int64_t *cluster_sector_num
,
2005 int *cluster_nb_sectors
)
2007 BlockDriverInfo bdi
;
2009 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
2010 *cluster_sector_num
= sector_num
;
2011 *cluster_nb_sectors
= nb_sectors
;
2013 int64_t c
= bdi
.cluster_size
/ BDRV_SECTOR_SIZE
;
2014 *cluster_sector_num
= QEMU_ALIGN_DOWN(sector_num
, c
);
2015 *cluster_nb_sectors
= QEMU_ALIGN_UP(sector_num
- *cluster_sector_num
+
2020 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
2021 int64_t sector_num
, int nb_sectors
) {
2023 if (sector_num
>= req
->sector_num
+ req
->nb_sectors
) {
2027 if (req
->sector_num
>= sector_num
+ nb_sectors
) {
2033 static void coroutine_fn
wait_for_overlapping_requests(BlockDriverState
*bs
,
2034 int64_t sector_num
, int nb_sectors
)
2036 BdrvTrackedRequest
*req
;
2037 int64_t cluster_sector_num
;
2038 int cluster_nb_sectors
;
2041 /* If we touch the same cluster it counts as an overlap. This guarantees
2042 * that allocating writes will be serialized and not race with each other
2043 * for the same cluster. For example, in copy-on-read it ensures that the
2044 * CoR read and write operations are atomic and guest writes cannot
2045 * interleave between them.
2047 bdrv_round_to_clusters(bs
, sector_num
, nb_sectors
,
2048 &cluster_sector_num
, &cluster_nb_sectors
);
2052 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
2053 if (tracked_request_overlaps(req
, cluster_sector_num
,
2054 cluster_nb_sectors
)) {
2055 /* Hitting this means there was a reentrant request, for
2056 * example, a block driver issuing nested requests. This must
2057 * never happen since it means deadlock.
2059 assert(qemu_coroutine_self() != req
->co
);
2061 qemu_co_queue_wait(&req
->wait_queue
);
2072 * -EINVAL - backing format specified, but no file
2073 * -ENOSPC - can't update the backing file because no space is left in the
2075 * -ENOTSUP - format driver doesn't support changing the backing file
2077 int bdrv_change_backing_file(BlockDriverState
*bs
,
2078 const char *backing_file
, const char *backing_fmt
)
2080 BlockDriver
*drv
= bs
->drv
;
2083 /* Backing file format doesn't make sense without a backing file */
2084 if (backing_fmt
&& !backing_file
) {
2088 if (drv
->bdrv_change_backing_file
!= NULL
) {
2089 ret
= drv
->bdrv_change_backing_file(bs
, backing_file
, backing_fmt
);
2095 pstrcpy(bs
->backing_file
, sizeof(bs
->backing_file
), backing_file
?: "");
2096 pstrcpy(bs
->backing_format
, sizeof(bs
->backing_format
), backing_fmt
?: "");
2102 * Finds the image layer in the chain that has 'bs' as its backing file.
2104 * active is the current topmost image.
2106 * Returns NULL if bs is not found in active's image chain,
2107 * or if active == bs.
2109 BlockDriverState
*bdrv_find_overlay(BlockDriverState
*active
,
2110 BlockDriverState
*bs
)
2112 BlockDriverState
*overlay
= NULL
;
2113 BlockDriverState
*intermediate
;
2115 assert(active
!= NULL
);
2118 /* if bs is the same as active, then by definition it has no overlay
2124 intermediate
= active
;
2125 while (intermediate
->backing_hd
) {
2126 if (intermediate
->backing_hd
== bs
) {
2127 overlay
= intermediate
;
2130 intermediate
= intermediate
->backing_hd
;
2136 typedef struct BlkIntermediateStates
{
2137 BlockDriverState
*bs
;
2138 QSIMPLEQ_ENTRY(BlkIntermediateStates
) entry
;
2139 } BlkIntermediateStates
;
2143 * Drops images above 'base' up to and including 'top', and sets the image
2144 * above 'top' to have base as its backing file.
2146 * Requires that the overlay to 'top' is opened r/w, so that the backing file
2147 * information in 'bs' can be properly updated.
2149 * E.g., this will convert the following chain:
2150 * bottom <- base <- intermediate <- top <- active
2154 * bottom <- base <- active
2156 * It is allowed for bottom==base, in which case it converts:
2158 * base <- intermediate <- top <- active
2165 * if active == top, that is considered an error
2168 int bdrv_drop_intermediate(BlockDriverState
*active
, BlockDriverState
*top
,
2169 BlockDriverState
*base
)
2171 BlockDriverState
*intermediate
;
2172 BlockDriverState
*base_bs
= NULL
;
2173 BlockDriverState
*new_top_bs
= NULL
;
2174 BlkIntermediateStates
*intermediate_state
, *next
;
2177 QSIMPLEQ_HEAD(states_to_delete
, BlkIntermediateStates
) states_to_delete
;
2178 QSIMPLEQ_INIT(&states_to_delete
);
2180 if (!top
->drv
|| !base
->drv
) {
2184 new_top_bs
= bdrv_find_overlay(active
, top
);
2186 if (new_top_bs
== NULL
) {
2187 /* we could not find the image above 'top', this is an error */
2191 /* special case of new_top_bs->backing_hd already pointing to base - nothing
2192 * to do, no intermediate images */
2193 if (new_top_bs
->backing_hd
== base
) {
2200 /* now we will go down through the list, and add each BDS we find
2201 * into our deletion queue, until we hit the 'base'
2203 while (intermediate
) {
2204 intermediate_state
= g_malloc0(sizeof(BlkIntermediateStates
));
2205 intermediate_state
->bs
= intermediate
;
2206 QSIMPLEQ_INSERT_TAIL(&states_to_delete
, intermediate_state
, entry
);
2208 if (intermediate
->backing_hd
== base
) {
2209 base_bs
= intermediate
->backing_hd
;
2212 intermediate
= intermediate
->backing_hd
;
2214 if (base_bs
== NULL
) {
2215 /* something went wrong, we did not end at the base. safely
2216 * unravel everything, and exit with error */
2220 /* success - we can delete the intermediate states, and link top->base */
2221 ret
= bdrv_change_backing_file(new_top_bs
, base_bs
->filename
,
2222 base_bs
->drv
? base_bs
->drv
->format_name
: "");
2226 new_top_bs
->backing_hd
= base_bs
;
2229 QSIMPLEQ_FOREACH_SAFE(intermediate_state
, &states_to_delete
, entry
, next
) {
2230 /* so that bdrv_close() does not recursively close the chain */
2231 intermediate_state
->bs
->backing_hd
= NULL
;
2232 bdrv_unref(intermediate_state
->bs
);
2237 QSIMPLEQ_FOREACH_SAFE(intermediate_state
, &states_to_delete
, entry
, next
) {
2238 g_free(intermediate_state
);
2244 static int bdrv_check_byte_request(BlockDriverState
*bs
, int64_t offset
,
2249 if (!bdrv_is_inserted(bs
))
2255 len
= bdrv_getlength(bs
);
2260 if ((offset
> len
) || (len
- offset
< size
))
2266 static int bdrv_check_request(BlockDriverState
*bs
, int64_t sector_num
,
2269 return bdrv_check_byte_request(bs
, sector_num
* BDRV_SECTOR_SIZE
,
2270 nb_sectors
* BDRV_SECTOR_SIZE
);
2273 typedef struct RwCo
{
2274 BlockDriverState
*bs
;
2280 BdrvRequestFlags flags
;
2283 static void coroutine_fn
bdrv_rw_co_entry(void *opaque
)
2285 RwCo
*rwco
= opaque
;
2287 if (!rwco
->is_write
) {
2288 rwco
->ret
= bdrv_co_do_readv(rwco
->bs
, rwco
->sector_num
,
2289 rwco
->nb_sectors
, rwco
->qiov
,
2292 rwco
->ret
= bdrv_co_do_writev(rwco
->bs
, rwco
->sector_num
,
2293 rwco
->nb_sectors
, rwco
->qiov
,
2299 * Process a vectored synchronous request using coroutines
2301 static int bdrv_rwv_co(BlockDriverState
*bs
, int64_t sector_num
,
2302 QEMUIOVector
*qiov
, bool is_write
,
2303 BdrvRequestFlags flags
)
2308 .sector_num
= sector_num
,
2309 .nb_sectors
= qiov
->size
>> BDRV_SECTOR_BITS
,
2311 .is_write
= is_write
,
2315 assert((qiov
->size
& (BDRV_SECTOR_SIZE
- 1)) == 0);
2318 * In sync call context, when the vcpu is blocked, this throttling timer
2319 * will not fire; so the I/O throttling function has to be disabled here
2320 * if it has been enabled.
2322 if (bs
->io_limits_enabled
) {
2323 fprintf(stderr
, "Disabling I/O throttling on '%s' due "
2324 "to synchronous I/O.\n", bdrv_get_device_name(bs
));
2325 bdrv_io_limits_disable(bs
);
2328 if (qemu_in_coroutine()) {
2329 /* Fast-path if already in coroutine context */
2330 bdrv_rw_co_entry(&rwco
);
2332 co
= qemu_coroutine_create(bdrv_rw_co_entry
);
2333 qemu_coroutine_enter(co
, &rwco
);
2334 while (rwco
.ret
== NOT_DONE
) {
2342 * Process a synchronous request using coroutines
2344 static int bdrv_rw_co(BlockDriverState
*bs
, int64_t sector_num
, uint8_t *buf
,
2345 int nb_sectors
, bool is_write
, BdrvRequestFlags flags
)
2348 struct iovec iov
= {
2349 .iov_base
= (void *)buf
,
2350 .iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
,
2353 qemu_iovec_init_external(&qiov
, &iov
, 1);
2354 return bdrv_rwv_co(bs
, sector_num
, &qiov
, is_write
, flags
);
2357 /* return < 0 if error. See bdrv_write() for the return codes */
2358 int bdrv_read(BlockDriverState
*bs
, int64_t sector_num
,
2359 uint8_t *buf
, int nb_sectors
)
2361 return bdrv_rw_co(bs
, sector_num
, buf
, nb_sectors
, false, 0);
2364 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
2365 int bdrv_read_unthrottled(BlockDriverState
*bs
, int64_t sector_num
,
2366 uint8_t *buf
, int nb_sectors
)
2371 enabled
= bs
->io_limits_enabled
;
2372 bs
->io_limits_enabled
= false;
2373 ret
= bdrv_read(bs
, sector_num
, buf
, nb_sectors
);
2374 bs
->io_limits_enabled
= enabled
;
2378 /* Return < 0 if error. Important errors are:
2379 -EIO generic I/O error (may happen for all errors)
2380 -ENOMEDIUM No media inserted.
2381 -EINVAL Invalid sector number or nb_sectors
2382 -EACCES Trying to write a read-only device
2384 int bdrv_write(BlockDriverState
*bs
, int64_t sector_num
,
2385 const uint8_t *buf
, int nb_sectors
)
2387 return bdrv_rw_co(bs
, sector_num
, (uint8_t *)buf
, nb_sectors
, true, 0);
2390 int bdrv_writev(BlockDriverState
*bs
, int64_t sector_num
, QEMUIOVector
*qiov
)
2392 return bdrv_rwv_co(bs
, sector_num
, qiov
, true, 0);
2395 int bdrv_write_zeroes(BlockDriverState
*bs
, int64_t sector_num
,
2396 int nb_sectors
, BdrvRequestFlags flags
)
2398 return bdrv_rw_co(bs
, sector_num
, NULL
, nb_sectors
, true,
2399 BDRV_REQ_ZERO_WRITE
| flags
);
2402 int bdrv_pread(BlockDriverState
*bs
, int64_t offset
,
2403 void *buf
, int count1
)
2405 uint8_t tmp_buf
[BDRV_SECTOR_SIZE
];
2406 int len
, nb_sectors
, count
;
2411 /* first read to align to sector start */
2412 len
= (BDRV_SECTOR_SIZE
- offset
) & (BDRV_SECTOR_SIZE
- 1);
2415 sector_num
= offset
>> BDRV_SECTOR_BITS
;
2417 if ((ret
= bdrv_read(bs
, sector_num
, tmp_buf
, 1)) < 0)
2419 memcpy(buf
, tmp_buf
+ (offset
& (BDRV_SECTOR_SIZE
- 1)), len
);
2427 /* read the sectors "in place" */
2428 nb_sectors
= count
>> BDRV_SECTOR_BITS
;
2429 if (nb_sectors
> 0) {
2430 if ((ret
= bdrv_read(bs
, sector_num
, buf
, nb_sectors
)) < 0)
2432 sector_num
+= nb_sectors
;
2433 len
= nb_sectors
<< BDRV_SECTOR_BITS
;
2438 /* add data from the last sector */
2440 if ((ret
= bdrv_read(bs
, sector_num
, tmp_buf
, 1)) < 0)
2442 memcpy(buf
, tmp_buf
, count
);
2447 int bdrv_pwritev(BlockDriverState
*bs
, int64_t offset
, QEMUIOVector
*qiov
)
2449 uint8_t tmp_buf
[BDRV_SECTOR_SIZE
];
2450 int len
, nb_sectors
, count
;
2456 /* first write to align to sector start */
2457 len
= (BDRV_SECTOR_SIZE
- offset
) & (BDRV_SECTOR_SIZE
- 1);
2460 sector_num
= offset
>> BDRV_SECTOR_BITS
;
2462 if ((ret
= bdrv_read(bs
, sector_num
, tmp_buf
, 1)) < 0)
2464 qemu_iovec_to_buf(qiov
, 0, tmp_buf
+ (offset
& (BDRV_SECTOR_SIZE
- 1)),
2466 if ((ret
= bdrv_write(bs
, sector_num
, tmp_buf
, 1)) < 0)
2474 /* write the sectors "in place" */
2475 nb_sectors
= count
>> BDRV_SECTOR_BITS
;
2476 if (nb_sectors
> 0) {
2477 QEMUIOVector qiov_inplace
;
2479 qemu_iovec_init(&qiov_inplace
, qiov
->niov
);
2480 qemu_iovec_concat(&qiov_inplace
, qiov
, len
,
2481 nb_sectors
<< BDRV_SECTOR_BITS
);
2482 ret
= bdrv_writev(bs
, sector_num
, &qiov_inplace
);
2483 qemu_iovec_destroy(&qiov_inplace
);
2488 sector_num
+= nb_sectors
;
2489 len
= nb_sectors
<< BDRV_SECTOR_BITS
;
2493 /* add data from the last sector */
2495 if ((ret
= bdrv_read(bs
, sector_num
, tmp_buf
, 1)) < 0)
2497 qemu_iovec_to_buf(qiov
, qiov
->size
- count
, tmp_buf
, count
);
2498 if ((ret
= bdrv_write(bs
, sector_num
, tmp_buf
, 1)) < 0)
2504 int bdrv_pwrite(BlockDriverState
*bs
, int64_t offset
,
2505 const void *buf
, int count1
)
2508 struct iovec iov
= {
2509 .iov_base
= (void *) buf
,
2513 qemu_iovec_init_external(&qiov
, &iov
, 1);
2514 return bdrv_pwritev(bs
, offset
, &qiov
);
2518 * Writes to the file and ensures that no writes are reordered across this
2519 * request (acts as a barrier)
2521 * Returns 0 on success, -errno in error cases.
2523 int bdrv_pwrite_sync(BlockDriverState
*bs
, int64_t offset
,
2524 const void *buf
, int count
)
2528 ret
= bdrv_pwrite(bs
, offset
, buf
, count
);
2533 /* No flush needed for cache modes that already do it */
2534 if (bs
->enable_write_cache
) {
2541 static int coroutine_fn
bdrv_co_do_copy_on_readv(BlockDriverState
*bs
,
2542 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
2544 /* Perform I/O through a temporary buffer so that users who scribble over
2545 * their read buffer while the operation is in progress do not end up
2546 * modifying the image file. This is critical for zero-copy guest I/O
2547 * where anything might happen inside guest memory.
2549 void *bounce_buffer
;
2551 BlockDriver
*drv
= bs
->drv
;
2553 QEMUIOVector bounce_qiov
;
2554 int64_t cluster_sector_num
;
2555 int cluster_nb_sectors
;
2559 /* Cover entire cluster so no additional backing file I/O is required when
2560 * allocating cluster in the image file.
2562 bdrv_round_to_clusters(bs
, sector_num
, nb_sectors
,
2563 &cluster_sector_num
, &cluster_nb_sectors
);
2565 trace_bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
,
2566 cluster_sector_num
, cluster_nb_sectors
);
2568 iov
.iov_len
= cluster_nb_sectors
* BDRV_SECTOR_SIZE
;
2569 iov
.iov_base
= bounce_buffer
= qemu_blockalign(bs
, iov
.iov_len
);
2570 qemu_iovec_init_external(&bounce_qiov
, &iov
, 1);
2572 ret
= drv
->bdrv_co_readv(bs
, cluster_sector_num
, cluster_nb_sectors
,
2578 if (drv
->bdrv_co_write_zeroes
&&
2579 buffer_is_zero(bounce_buffer
, iov
.iov_len
)) {
2580 ret
= bdrv_co_do_write_zeroes(bs
, cluster_sector_num
,
2581 cluster_nb_sectors
, 0);
2583 /* This does not change the data on the disk, it is not necessary
2584 * to flush even in cache=writethrough mode.
2586 ret
= drv
->bdrv_co_writev(bs
, cluster_sector_num
, cluster_nb_sectors
,
2591 /* It might be okay to ignore write errors for guest requests. If this
2592 * is a deliberate copy-on-read then we don't want to ignore the error.
2593 * Simply report it in all cases.
2598 skip_bytes
= (sector_num
- cluster_sector_num
) * BDRV_SECTOR_SIZE
;
2599 qemu_iovec_from_buf(qiov
, 0, bounce_buffer
+ skip_bytes
,
2600 nb_sectors
* BDRV_SECTOR_SIZE
);
2603 qemu_vfree(bounce_buffer
);
2608 * Handle a read request in coroutine context
2610 static int coroutine_fn
bdrv_co_do_readv(BlockDriverState
*bs
,
2611 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
2612 BdrvRequestFlags flags
)
2614 BlockDriver
*drv
= bs
->drv
;
2615 BdrvTrackedRequest req
;
2621 if (bdrv_check_request(bs
, sector_num
, nb_sectors
)) {
2625 if (bs
->copy_on_read
) {
2626 flags
|= BDRV_REQ_COPY_ON_READ
;
2628 if (flags
& BDRV_REQ_COPY_ON_READ
) {
2629 bs
->copy_on_read_in_flight
++;
2632 if (bs
->copy_on_read_in_flight
) {
2633 wait_for_overlapping_requests(bs
, sector_num
, nb_sectors
);
2636 /* throttling disk I/O */
2637 if (bs
->io_limits_enabled
) {
2638 bdrv_io_limits_intercept(bs
, nb_sectors
, false);
2641 tracked_request_begin(&req
, bs
, sector_num
, nb_sectors
, false);
2643 if (flags
& BDRV_REQ_COPY_ON_READ
) {
2646 ret
= bdrv_is_allocated(bs
, sector_num
, nb_sectors
, &pnum
);
2651 if (!ret
|| pnum
!= nb_sectors
) {
2652 ret
= bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
, qiov
);
2657 if (!(bs
->zero_beyond_eof
&& bs
->growable
)) {
2658 ret
= drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
2660 /* Read zeros after EOF of growable BDSes */
2661 int64_t len
, total_sectors
, max_nb_sectors
;
2663 len
= bdrv_getlength(bs
);
2669 total_sectors
= DIV_ROUND_UP(len
, BDRV_SECTOR_SIZE
);
2670 max_nb_sectors
= MAX(0, total_sectors
- sector_num
);
2671 if (max_nb_sectors
> 0) {
2672 ret
= drv
->bdrv_co_readv(bs
, sector_num
,
2673 MIN(nb_sectors
, max_nb_sectors
), qiov
);
2678 /* Reading beyond end of file is supposed to produce zeroes */
2679 if (ret
== 0 && total_sectors
< sector_num
+ nb_sectors
) {
2680 uint64_t offset
= MAX(0, total_sectors
- sector_num
);
2681 uint64_t bytes
= (sector_num
+ nb_sectors
- offset
) *
2683 qemu_iovec_memset(qiov
, offset
* BDRV_SECTOR_SIZE
, 0, bytes
);
2688 tracked_request_end(&req
);
2690 if (flags
& BDRV_REQ_COPY_ON_READ
) {
2691 bs
->copy_on_read_in_flight
--;
2697 int coroutine_fn
bdrv_co_readv(BlockDriverState
*bs
, int64_t sector_num
,
2698 int nb_sectors
, QEMUIOVector
*qiov
)
2700 trace_bdrv_co_readv(bs
, sector_num
, nb_sectors
);
2702 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
, 0);
2705 int coroutine_fn
bdrv_co_copy_on_readv(BlockDriverState
*bs
,
2706 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
2708 trace_bdrv_co_copy_on_readv(bs
, sector_num
, nb_sectors
);
2710 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
,
2711 BDRV_REQ_COPY_ON_READ
);
2714 /* if no limit is specified in the BlockLimits use a default
2715 * of 32768 512-byte sectors (16 MiB) per request.
2717 #define MAX_WRITE_ZEROES_DEFAULT 32768
2719 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
2720 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
)
2722 BlockDriver
*drv
= bs
->drv
;
2724 struct iovec iov
= {0};
2727 int max_write_zeroes
= bs
->bl
.max_write_zeroes
?
2728 bs
->bl
.max_write_zeroes
: MAX_WRITE_ZEROES_DEFAULT
;
2730 while (nb_sectors
> 0 && !ret
) {
2731 int num
= nb_sectors
;
2734 if (bs
->bl
.write_zeroes_alignment
&&
2735 num
>= bs
->bl
.write_zeroes_alignment
&&
2736 sector_num
% bs
->bl
.write_zeroes_alignment
) {
2737 if (num
> bs
->bl
.write_zeroes_alignment
) {
2738 num
= bs
->bl
.write_zeroes_alignment
;
2740 num
-= sector_num
% bs
->bl
.write_zeroes_alignment
;
2743 /* limit request size */
2744 if (num
> max_write_zeroes
) {
2745 num
= max_write_zeroes
;
2749 /* First try the efficient write zeroes operation */
2750 if (drv
->bdrv_co_write_zeroes
) {
2751 ret
= drv
->bdrv_co_write_zeroes(bs
, sector_num
, num
, flags
);
2754 if (ret
== -ENOTSUP
) {
2755 /* Fall back to bounce buffer if write zeroes is unsupported */
2756 iov
.iov_len
= num
* BDRV_SECTOR_SIZE
;
2757 if (iov
.iov_base
== NULL
) {
2758 /* allocate bounce buffer only once and ensure that it
2759 * is big enough for this and all future requests.
2761 size_t bufsize
= num
<= nb_sectors
? num
: max_write_zeroes
;
2762 iov
.iov_base
= qemu_blockalign(bs
, bufsize
* BDRV_SECTOR_SIZE
);
2763 memset(iov
.iov_base
, 0, bufsize
* BDRV_SECTOR_SIZE
);
2765 qemu_iovec_init_external(&qiov
, &iov
, 1);
2767 ret
= drv
->bdrv_co_writev(bs
, sector_num
, num
, &qiov
);
2774 qemu_vfree(iov
.iov_base
);
2779 * Handle a write request in coroutine context
2781 static int coroutine_fn
bdrv_co_do_writev(BlockDriverState
*bs
,
2782 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
2783 BdrvRequestFlags flags
)
2785 BlockDriver
*drv
= bs
->drv
;
2786 BdrvTrackedRequest req
;
2792 if (bs
->read_only
) {
2795 if (bdrv_check_request(bs
, sector_num
, nb_sectors
)) {
2799 if (bs
->copy_on_read_in_flight
) {
2800 wait_for_overlapping_requests(bs
, sector_num
, nb_sectors
);
2803 /* throttling disk I/O */
2804 if (bs
->io_limits_enabled
) {
2805 bdrv_io_limits_intercept(bs
, nb_sectors
, true);
2808 tracked_request_begin(&req
, bs
, sector_num
, nb_sectors
, true);
2810 ret
= notifier_with_return_list_notify(&bs
->before_write_notifiers
, &req
);
2813 /* Do nothing, write notifier decided to fail this request */
2814 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
2815 ret
= bdrv_co_do_write_zeroes(bs
, sector_num
, nb_sectors
, flags
);
2817 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
);
2820 if (ret
== 0 && !bs
->enable_write_cache
) {
2821 ret
= bdrv_co_flush(bs
);
2824 if (bs
->dirty_bitmap
) {
2825 bdrv_set_dirty(bs
, sector_num
, nb_sectors
);
2828 if (bs
->wr_highest_sector
< sector_num
+ nb_sectors
- 1) {
2829 bs
->wr_highest_sector
= sector_num
+ nb_sectors
- 1;
2831 if (bs
->growable
&& ret
>= 0) {
2832 bs
->total_sectors
= MAX(bs
->total_sectors
, sector_num
+ nb_sectors
);
2835 tracked_request_end(&req
);
2840 int coroutine_fn
bdrv_co_writev(BlockDriverState
*bs
, int64_t sector_num
,
2841 int nb_sectors
, QEMUIOVector
*qiov
)
2843 trace_bdrv_co_writev(bs
, sector_num
, nb_sectors
);
2845 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, qiov
, 0);
2848 int coroutine_fn
bdrv_co_write_zeroes(BlockDriverState
*bs
,
2849 int64_t sector_num
, int nb_sectors
,
2850 BdrvRequestFlags flags
)
2852 trace_bdrv_co_write_zeroes(bs
, sector_num
, nb_sectors
);
2854 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
2855 flags
&= ~BDRV_REQ_MAY_UNMAP
;
2858 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, NULL
,
2859 BDRV_REQ_ZERO_WRITE
| flags
);
2863 * Truncate file to 'offset' bytes (needed only for file protocols)
2865 int bdrv_truncate(BlockDriverState
*bs
, int64_t offset
)
2867 BlockDriver
*drv
= bs
->drv
;
2871 if (!drv
->bdrv_truncate
)
2875 if (bdrv_in_use(bs
))
2877 ret
= drv
->bdrv_truncate(bs
, offset
);
2879 ret
= refresh_total_sectors(bs
, offset
>> BDRV_SECTOR_BITS
);
2880 bdrv_dev_resize_cb(bs
);
2886 * Length of a allocated file in bytes. Sparse files are counted by actual
2887 * allocated space. Return < 0 if error or unknown.
2889 int64_t bdrv_get_allocated_file_size(BlockDriverState
*bs
)
2891 BlockDriver
*drv
= bs
->drv
;
2895 if (drv
->bdrv_get_allocated_file_size
) {
2896 return drv
->bdrv_get_allocated_file_size(bs
);
2899 return bdrv_get_allocated_file_size(bs
->file
);
2905 * Length of a file in bytes. Return < 0 if error or unknown.
2907 int64_t bdrv_getlength(BlockDriverState
*bs
)
2909 BlockDriver
*drv
= bs
->drv
;
2913 if (drv
->has_variable_length
) {
2914 int ret
= refresh_total_sectors(bs
, bs
->total_sectors
);
2919 return bs
->total_sectors
* BDRV_SECTOR_SIZE
;
2922 /* return 0 as number of sectors if no device present or error */
2923 void bdrv_get_geometry(BlockDriverState
*bs
, uint64_t *nb_sectors_ptr
)
2926 length
= bdrv_getlength(bs
);
2930 length
= length
>> BDRV_SECTOR_BITS
;
2931 *nb_sectors_ptr
= length
;
2934 void bdrv_set_on_error(BlockDriverState
*bs
, BlockdevOnError on_read_error
,
2935 BlockdevOnError on_write_error
)
2937 bs
->on_read_error
= on_read_error
;
2938 bs
->on_write_error
= on_write_error
;
2941 BlockdevOnError
bdrv_get_on_error(BlockDriverState
*bs
, bool is_read
)
2943 return is_read
? bs
->on_read_error
: bs
->on_write_error
;
2946 BlockErrorAction
bdrv_get_error_action(BlockDriverState
*bs
, bool is_read
, int error
)
2948 BlockdevOnError on_err
= is_read
? bs
->on_read_error
: bs
->on_write_error
;
2951 case BLOCKDEV_ON_ERROR_ENOSPC
:
2952 return (error
== ENOSPC
) ? BDRV_ACTION_STOP
: BDRV_ACTION_REPORT
;
2953 case BLOCKDEV_ON_ERROR_STOP
:
2954 return BDRV_ACTION_STOP
;
2955 case BLOCKDEV_ON_ERROR_REPORT
:
2956 return BDRV_ACTION_REPORT
;
2957 case BLOCKDEV_ON_ERROR_IGNORE
:
2958 return BDRV_ACTION_IGNORE
;
2964 /* This is done by device models because, while the block layer knows
2965 * about the error, it does not know whether an operation comes from
2966 * the device or the block layer (from a job, for example).
2968 void bdrv_error_action(BlockDriverState
*bs
, BlockErrorAction action
,
2969 bool is_read
, int error
)
2972 bdrv_emit_qmp_error_event(bs
, QEVENT_BLOCK_IO_ERROR
, action
, is_read
);
2973 if (action
== BDRV_ACTION_STOP
) {
2974 vm_stop(RUN_STATE_IO_ERROR
);
2975 bdrv_iostatus_set_err(bs
, error
);
2979 int bdrv_is_read_only(BlockDriverState
*bs
)
2981 return bs
->read_only
;
2984 int bdrv_is_sg(BlockDriverState
*bs
)
2989 int bdrv_enable_write_cache(BlockDriverState
*bs
)
2991 return bs
->enable_write_cache
;
2994 void bdrv_set_enable_write_cache(BlockDriverState
*bs
, bool wce
)
2996 bs
->enable_write_cache
= wce
;
2998 /* so a reopen() will preserve wce */
3000 bs
->open_flags
|= BDRV_O_CACHE_WB
;
3002 bs
->open_flags
&= ~BDRV_O_CACHE_WB
;
3006 int bdrv_is_encrypted(BlockDriverState
*bs
)
3008 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
)
3010 return bs
->encrypted
;
3013 int bdrv_key_required(BlockDriverState
*bs
)
3015 BlockDriverState
*backing_hd
= bs
->backing_hd
;
3017 if (backing_hd
&& backing_hd
->encrypted
&& !backing_hd
->valid_key
)
3019 return (bs
->encrypted
&& !bs
->valid_key
);
3022 int bdrv_set_key(BlockDriverState
*bs
, const char *key
)
3025 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
) {
3026 ret
= bdrv_set_key(bs
->backing_hd
, key
);
3032 if (!bs
->encrypted
) {
3034 } else if (!bs
->drv
|| !bs
->drv
->bdrv_set_key
) {
3037 ret
= bs
->drv
->bdrv_set_key(bs
, key
);
3040 } else if (!bs
->valid_key
) {
3042 /* call the change callback now, we skipped it on open */
3043 bdrv_dev_change_media_cb(bs
, true);
3048 const char *bdrv_get_format_name(BlockDriverState
*bs
)
3050 return bs
->drv
? bs
->drv
->format_name
: NULL
;
3053 void bdrv_iterate_format(void (*it
)(void *opaque
, const char *name
),
3058 QLIST_FOREACH(drv
, &bdrv_drivers
, list
) {
3059 it(opaque
, drv
->format_name
);
3063 BlockDriverState
*bdrv_find(const char *name
)
3065 BlockDriverState
*bs
;
3067 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
3068 if (!strcmp(name
, bs
->device_name
)) {
3075 BlockDriverState
*bdrv_next(BlockDriverState
*bs
)
3078 return QTAILQ_FIRST(&bdrv_states
);
3080 return QTAILQ_NEXT(bs
, list
);
3083 void bdrv_iterate(void (*it
)(void *opaque
, BlockDriverState
*bs
), void *opaque
)
3085 BlockDriverState
*bs
;
3087 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
3092 const char *bdrv_get_device_name(BlockDriverState
*bs
)
3094 return bs
->device_name
;
3097 int bdrv_get_flags(BlockDriverState
*bs
)
3099 return bs
->open_flags
;
3102 int bdrv_flush_all(void)
3104 BlockDriverState
*bs
;
3107 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
3108 int ret
= bdrv_flush(bs
);
3109 if (ret
< 0 && !result
) {
3117 int bdrv_has_zero_init_1(BlockDriverState
*bs
)
3122 int bdrv_has_zero_init(BlockDriverState
*bs
)
3126 /* If BS is a copy on write image, it is initialized to
3127 the contents of the base image, which may not be zeroes. */
3128 if (bs
->backing_hd
) {
3131 if (bs
->drv
->bdrv_has_zero_init
) {
3132 return bs
->drv
->bdrv_has_zero_init(bs
);
3139 bool bdrv_unallocated_blocks_are_zero(BlockDriverState
*bs
)
3141 BlockDriverInfo bdi
;
3143 if (bs
->backing_hd
) {
3147 if (bdrv_get_info(bs
, &bdi
) == 0) {
3148 return bdi
.unallocated_blocks_are_zero
;
3154 bool bdrv_can_write_zeroes_with_unmap(BlockDriverState
*bs
)
3156 BlockDriverInfo bdi
;
3158 if (bs
->backing_hd
|| !(bs
->open_flags
& BDRV_O_UNMAP
)) {
3162 if (bdrv_get_info(bs
, &bdi
) == 0) {
3163 return bdi
.can_write_zeroes_with_unmap
;
3169 typedef struct BdrvCoGetBlockStatusData
{
3170 BlockDriverState
*bs
;
3171 BlockDriverState
*base
;
3177 } BdrvCoGetBlockStatusData
;
3180 * Returns true iff the specified sector is present in the disk image. Drivers
3181 * not implementing the functionality are assumed to not support backing files,
3182 * hence all their sectors are reported as allocated.
3184 * If 'sector_num' is beyond the end of the disk image the return value is 0
3185 * and 'pnum' is set to 0.
3187 * 'pnum' is set to the number of sectors (including and immediately following
3188 * the specified sector) that are known to be in the same
3189 * allocated/unallocated state.
3191 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
3192 * beyond the end of the disk image it will be clamped.
3194 static int64_t coroutine_fn
bdrv_co_get_block_status(BlockDriverState
*bs
,
3196 int nb_sectors
, int *pnum
)
3202 length
= bdrv_getlength(bs
);
3207 if (sector_num
>= (length
>> BDRV_SECTOR_BITS
)) {
3212 n
= bs
->total_sectors
- sector_num
;
3213 if (n
< nb_sectors
) {
3217 if (!bs
->drv
->bdrv_co_get_block_status
) {
3219 ret
= BDRV_BLOCK_DATA
;
3220 if (bs
->drv
->protocol_name
) {
3221 ret
|= BDRV_BLOCK_OFFSET_VALID
| (sector_num
* BDRV_SECTOR_SIZE
);
3226 ret
= bs
->drv
->bdrv_co_get_block_status(bs
, sector_num
, nb_sectors
, pnum
);
3232 if (ret
& BDRV_BLOCK_RAW
) {
3233 assert(ret
& BDRV_BLOCK_OFFSET_VALID
);
3234 return bdrv_get_block_status(bs
->file
, ret
>> BDRV_SECTOR_BITS
,
3238 if (!(ret
& BDRV_BLOCK_DATA
)) {
3239 if (bdrv_has_zero_init(bs
)) {
3240 ret
|= BDRV_BLOCK_ZERO
;
3241 } else if (bs
->backing_hd
) {
3242 BlockDriverState
*bs2
= bs
->backing_hd
;
3243 int64_t length2
= bdrv_getlength(bs2
);
3244 if (length2
>= 0 && sector_num
>= (length2
>> BDRV_SECTOR_BITS
)) {
3245 ret
|= BDRV_BLOCK_ZERO
;
3251 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
3252 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
3253 ret2
= bdrv_co_get_block_status(bs
->file
, ret
>> BDRV_SECTOR_BITS
,
3256 /* Ignore errors. This is just providing extra information, it
3257 * is useful but not necessary.
3259 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
3266 /* Coroutine wrapper for bdrv_get_block_status() */
3267 static void coroutine_fn
bdrv_get_block_status_co_entry(void *opaque
)
3269 BdrvCoGetBlockStatusData
*data
= opaque
;
3270 BlockDriverState
*bs
= data
->bs
;
3272 data
->ret
= bdrv_co_get_block_status(bs
, data
->sector_num
, data
->nb_sectors
,
3278 * Synchronous wrapper around bdrv_co_get_block_status().
3280 * See bdrv_co_get_block_status() for details.
3282 int64_t bdrv_get_block_status(BlockDriverState
*bs
, int64_t sector_num
,
3283 int nb_sectors
, int *pnum
)
3286 BdrvCoGetBlockStatusData data
= {
3288 .sector_num
= sector_num
,
3289 .nb_sectors
= nb_sectors
,
3294 if (qemu_in_coroutine()) {
3295 /* Fast-path if already in coroutine context */
3296 bdrv_get_block_status_co_entry(&data
);
3298 co
= qemu_coroutine_create(bdrv_get_block_status_co_entry
);
3299 qemu_coroutine_enter(co
, &data
);
3300 while (!data
.done
) {
3307 int coroutine_fn
bdrv_is_allocated(BlockDriverState
*bs
, int64_t sector_num
,
3308 int nb_sectors
, int *pnum
)
3310 int64_t ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, pnum
);
3315 (ret
& BDRV_BLOCK_DATA
) ||
3316 ((ret
& BDRV_BLOCK_ZERO
) && !bdrv_has_zero_init(bs
));
3320 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
3322 * Return true if the given sector is allocated in any image between
3323 * BASE and TOP (inclusive). BASE can be NULL to check if the given
3324 * sector is allocated in any image of the chain. Return false otherwise.
3326 * 'pnum' is set to the number of sectors (including and immediately following
3327 * the specified sector) that are known to be in the same
3328 * allocated/unallocated state.
3331 int bdrv_is_allocated_above(BlockDriverState
*top
,
3332 BlockDriverState
*base
,
3334 int nb_sectors
, int *pnum
)
3336 BlockDriverState
*intermediate
;
3337 int ret
, n
= nb_sectors
;
3340 while (intermediate
&& intermediate
!= base
) {
3342 ret
= bdrv_is_allocated(intermediate
, sector_num
, nb_sectors
,
3352 * [sector_num, nb_sectors] is unallocated on top but intermediate
3355 * [sector_num+x, nr_sectors] allocated.
3357 if (n
> pnum_inter
&&
3358 (intermediate
== top
||
3359 sector_num
+ pnum_inter
< intermediate
->total_sectors
)) {
3363 intermediate
= intermediate
->backing_hd
;
3370 const char *bdrv_get_encrypted_filename(BlockDriverState
*bs
)
3372 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
)
3373 return bs
->backing_file
;
3374 else if (bs
->encrypted
)
3375 return bs
->filename
;
3380 void bdrv_get_backing_filename(BlockDriverState
*bs
,
3381 char *filename
, int filename_size
)
3383 pstrcpy(filename
, filename_size
, bs
->backing_file
);
3386 int bdrv_write_compressed(BlockDriverState
*bs
, int64_t sector_num
,
3387 const uint8_t *buf
, int nb_sectors
)
3389 BlockDriver
*drv
= bs
->drv
;
3392 if (!drv
->bdrv_write_compressed
)
3394 if (bdrv_check_request(bs
, sector_num
, nb_sectors
))
3397 assert(!bs
->dirty_bitmap
);
3399 return drv
->bdrv_write_compressed(bs
, sector_num
, buf
, nb_sectors
);
3402 int bdrv_get_info(BlockDriverState
*bs
, BlockDriverInfo
*bdi
)
3404 BlockDriver
*drv
= bs
->drv
;
3407 if (!drv
->bdrv_get_info
)
3409 memset(bdi
, 0, sizeof(*bdi
));
3410 return drv
->bdrv_get_info(bs
, bdi
);
3413 ImageInfoSpecific
*bdrv_get_specific_info(BlockDriverState
*bs
)
3415 BlockDriver
*drv
= bs
->drv
;
3416 if (drv
&& drv
->bdrv_get_specific_info
) {
3417 return drv
->bdrv_get_specific_info(bs
);
3422 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
3423 int64_t pos
, int size
)
3426 struct iovec iov
= {
3427 .iov_base
= (void *) buf
,
3431 qemu_iovec_init_external(&qiov
, &iov
, 1);
3432 return bdrv_writev_vmstate(bs
, &qiov
, pos
);
3435 int bdrv_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
3437 BlockDriver
*drv
= bs
->drv
;
3441 } else if (drv
->bdrv_save_vmstate
) {
3442 return drv
->bdrv_save_vmstate(bs
, qiov
, pos
);
3443 } else if (bs
->file
) {
3444 return bdrv_writev_vmstate(bs
->file
, qiov
, pos
);
3450 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
3451 int64_t pos
, int size
)
3453 BlockDriver
*drv
= bs
->drv
;
3456 if (drv
->bdrv_load_vmstate
)
3457 return drv
->bdrv_load_vmstate(bs
, buf
, pos
, size
);
3459 return bdrv_load_vmstate(bs
->file
, buf
, pos
, size
);
3463 void bdrv_debug_event(BlockDriverState
*bs
, BlkDebugEvent event
)
3465 if (!bs
|| !bs
->drv
|| !bs
->drv
->bdrv_debug_event
) {
3469 bs
->drv
->bdrv_debug_event(bs
, event
);
3472 int bdrv_debug_breakpoint(BlockDriverState
*bs
, const char *event
,
3475 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_breakpoint
) {
3479 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_breakpoint
) {
3480 return bs
->drv
->bdrv_debug_breakpoint(bs
, event
, tag
);
3486 int bdrv_debug_resume(BlockDriverState
*bs
, const char *tag
)
3488 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_resume
) {
3492 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_resume
) {
3493 return bs
->drv
->bdrv_debug_resume(bs
, tag
);
3499 bool bdrv_debug_is_suspended(BlockDriverState
*bs
, const char *tag
)
3501 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_is_suspended
) {
3505 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_is_suspended
) {
3506 return bs
->drv
->bdrv_debug_is_suspended(bs
, tag
);
3512 int bdrv_is_snapshot(BlockDriverState
*bs
)
3514 return !!(bs
->open_flags
& BDRV_O_SNAPSHOT
);
3517 /* backing_file can either be relative, or absolute, or a protocol. If it is
3518 * relative, it must be relative to the chain. So, passing in bs->filename
3519 * from a BDS as backing_file should not be done, as that may be relative to
3520 * the CWD rather than the chain. */
3521 BlockDriverState
*bdrv_find_backing_image(BlockDriverState
*bs
,
3522 const char *backing_file
)
3524 char *filename_full
= NULL
;
3525 char *backing_file_full
= NULL
;
3526 char *filename_tmp
= NULL
;
3527 int is_protocol
= 0;
3528 BlockDriverState
*curr_bs
= NULL
;
3529 BlockDriverState
*retval
= NULL
;
3531 if (!bs
|| !bs
->drv
|| !backing_file
) {
3535 filename_full
= g_malloc(PATH_MAX
);
3536 backing_file_full
= g_malloc(PATH_MAX
);
3537 filename_tmp
= g_malloc(PATH_MAX
);
3539 is_protocol
= path_has_protocol(backing_file
);
3541 for (curr_bs
= bs
; curr_bs
->backing_hd
; curr_bs
= curr_bs
->backing_hd
) {
3543 /* If either of the filename paths is actually a protocol, then
3544 * compare unmodified paths; otherwise make paths relative */
3545 if (is_protocol
|| path_has_protocol(curr_bs
->backing_file
)) {
3546 if (strcmp(backing_file
, curr_bs
->backing_file
) == 0) {
3547 retval
= curr_bs
->backing_hd
;
3551 /* If not an absolute filename path, make it relative to the current
3552 * image's filename path */
3553 path_combine(filename_tmp
, PATH_MAX
, curr_bs
->filename
,
3556 /* We are going to compare absolute pathnames */
3557 if (!realpath(filename_tmp
, filename_full
)) {
3561 /* We need to make sure the backing filename we are comparing against
3562 * is relative to the current image filename (or absolute) */
3563 path_combine(filename_tmp
, PATH_MAX
, curr_bs
->filename
,
3564 curr_bs
->backing_file
);
3566 if (!realpath(filename_tmp
, backing_file_full
)) {
3570 if (strcmp(backing_file_full
, filename_full
) == 0) {
3571 retval
= curr_bs
->backing_hd
;
3577 g_free(filename_full
);
3578 g_free(backing_file_full
);
3579 g_free(filename_tmp
);
3583 int bdrv_get_backing_file_depth(BlockDriverState
*bs
)
3589 if (!bs
->backing_hd
) {
3593 return 1 + bdrv_get_backing_file_depth(bs
->backing_hd
);
3596 BlockDriverState
*bdrv_find_base(BlockDriverState
*bs
)
3598 BlockDriverState
*curr_bs
= NULL
;
3606 while (curr_bs
->backing_hd
) {
3607 curr_bs
= curr_bs
->backing_hd
;
3612 /**************************************************************/
3615 BlockDriverAIOCB
*bdrv_aio_readv(BlockDriverState
*bs
, int64_t sector_num
,
3616 QEMUIOVector
*qiov
, int nb_sectors
,
3617 BlockDriverCompletionFunc
*cb
, void *opaque
)
3619 trace_bdrv_aio_readv(bs
, sector_num
, nb_sectors
, opaque
);
3621 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
,
3625 BlockDriverAIOCB
*bdrv_aio_writev(BlockDriverState
*bs
, int64_t sector_num
,
3626 QEMUIOVector
*qiov
, int nb_sectors
,
3627 BlockDriverCompletionFunc
*cb
, void *opaque
)
3629 trace_bdrv_aio_writev(bs
, sector_num
, nb_sectors
, opaque
);
3631 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
,
3636 typedef struct MultiwriteCB
{
3641 BlockDriverCompletionFunc
*cb
;
3643 QEMUIOVector
*free_qiov
;
3647 static void multiwrite_user_cb(MultiwriteCB
*mcb
)
3651 for (i
= 0; i
< mcb
->num_callbacks
; i
++) {
3652 mcb
->callbacks
[i
].cb(mcb
->callbacks
[i
].opaque
, mcb
->error
);
3653 if (mcb
->callbacks
[i
].free_qiov
) {
3654 qemu_iovec_destroy(mcb
->callbacks
[i
].free_qiov
);
3656 g_free(mcb
->callbacks
[i
].free_qiov
);
3660 static void multiwrite_cb(void *opaque
, int ret
)
3662 MultiwriteCB
*mcb
= opaque
;
3664 trace_multiwrite_cb(mcb
, ret
);
3666 if (ret
< 0 && !mcb
->error
) {
3670 mcb
->num_requests
--;
3671 if (mcb
->num_requests
== 0) {
3672 multiwrite_user_cb(mcb
);
3677 static int multiwrite_req_compare(const void *a
, const void *b
)
3679 const BlockRequest
*req1
= a
, *req2
= b
;
3682 * Note that we can't simply subtract req2->sector from req1->sector
3683 * here as that could overflow the return value.
3685 if (req1
->sector
> req2
->sector
) {
3687 } else if (req1
->sector
< req2
->sector
) {
3695 * Takes a bunch of requests and tries to merge them. Returns the number of
3696 * requests that remain after merging.
3698 static int multiwrite_merge(BlockDriverState
*bs
, BlockRequest
*reqs
,
3699 int num_reqs
, MultiwriteCB
*mcb
)
3703 // Sort requests by start sector
3704 qsort(reqs
, num_reqs
, sizeof(*reqs
), &multiwrite_req_compare
);
3706 // Check if adjacent requests touch the same clusters. If so, combine them,
3707 // filling up gaps with zero sectors.
3709 for (i
= 1; i
< num_reqs
; i
++) {
3711 int64_t oldreq_last
= reqs
[outidx
].sector
+ reqs
[outidx
].nb_sectors
;
3713 // Handle exactly sequential writes and overlapping writes.
3714 if (reqs
[i
].sector
<= oldreq_last
) {
3718 if (reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1 > IOV_MAX
) {
3724 QEMUIOVector
*qiov
= g_malloc0(sizeof(*qiov
));
3725 qemu_iovec_init(qiov
,
3726 reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1);
3728 // Add the first request to the merged one. If the requests are
3729 // overlapping, drop the last sectors of the first request.
3730 size
= (reqs
[i
].sector
- reqs
[outidx
].sector
) << 9;
3731 qemu_iovec_concat(qiov
, reqs
[outidx
].qiov
, 0, size
);
3733 // We should need to add any zeros between the two requests
3734 assert (reqs
[i
].sector
<= oldreq_last
);
3736 // Add the second request
3737 qemu_iovec_concat(qiov
, reqs
[i
].qiov
, 0, reqs
[i
].qiov
->size
);
3739 reqs
[outidx
].nb_sectors
= qiov
->size
>> 9;
3740 reqs
[outidx
].qiov
= qiov
;
3742 mcb
->callbacks
[i
].free_qiov
= reqs
[outidx
].qiov
;
3745 reqs
[outidx
].sector
= reqs
[i
].sector
;
3746 reqs
[outidx
].nb_sectors
= reqs
[i
].nb_sectors
;
3747 reqs
[outidx
].qiov
= reqs
[i
].qiov
;
3755 * Submit multiple AIO write requests at once.
3757 * On success, the function returns 0 and all requests in the reqs array have
3758 * been submitted. In error case this function returns -1, and any of the
3759 * requests may or may not be submitted yet. In particular, this means that the
3760 * callback will be called for some of the requests, for others it won't. The
3761 * caller must check the error field of the BlockRequest to wait for the right
3762 * callbacks (if error != 0, no callback will be called).
3764 * The implementation may modify the contents of the reqs array, e.g. to merge
3765 * requests. However, the fields opaque and error are left unmodified as they
3766 * are used to signal failure for a single request to the caller.
3768 int bdrv_aio_multiwrite(BlockDriverState
*bs
, BlockRequest
*reqs
, int num_reqs
)
3773 /* don't submit writes if we don't have a medium */
3774 if (bs
->drv
== NULL
) {
3775 for (i
= 0; i
< num_reqs
; i
++) {
3776 reqs
[i
].error
= -ENOMEDIUM
;
3781 if (num_reqs
== 0) {
3785 // Create MultiwriteCB structure
3786 mcb
= g_malloc0(sizeof(*mcb
) + num_reqs
* sizeof(*mcb
->callbacks
));
3787 mcb
->num_requests
= 0;
3788 mcb
->num_callbacks
= num_reqs
;
3790 for (i
= 0; i
< num_reqs
; i
++) {
3791 mcb
->callbacks
[i
].cb
= reqs
[i
].cb
;
3792 mcb
->callbacks
[i
].opaque
= reqs
[i
].opaque
;
3795 // Check for mergable requests
3796 num_reqs
= multiwrite_merge(bs
, reqs
, num_reqs
, mcb
);
3798 trace_bdrv_aio_multiwrite(mcb
, mcb
->num_callbacks
, num_reqs
);
3800 /* Run the aio requests. */
3801 mcb
->num_requests
= num_reqs
;
3802 for (i
= 0; i
< num_reqs
; i
++) {
3803 bdrv_aio_writev(bs
, reqs
[i
].sector
, reqs
[i
].qiov
,
3804 reqs
[i
].nb_sectors
, multiwrite_cb
, mcb
);
3810 void bdrv_aio_cancel(BlockDriverAIOCB
*acb
)
3812 acb
->aiocb_info
->cancel(acb
);
3815 /**************************************************************/
3816 /* async block device emulation */
3818 typedef struct BlockDriverAIOCBSync
{
3819 BlockDriverAIOCB common
;
3822 /* vector translation state */
3826 } BlockDriverAIOCBSync
;
3828 static void bdrv_aio_cancel_em(BlockDriverAIOCB
*blockacb
)
3830 BlockDriverAIOCBSync
*acb
=
3831 container_of(blockacb
, BlockDriverAIOCBSync
, common
);
3832 qemu_bh_delete(acb
->bh
);
3834 qemu_aio_release(acb
);
3837 static const AIOCBInfo bdrv_em_aiocb_info
= {
3838 .aiocb_size
= sizeof(BlockDriverAIOCBSync
),
3839 .cancel
= bdrv_aio_cancel_em
,
3842 static void bdrv_aio_bh_cb(void *opaque
)
3844 BlockDriverAIOCBSync
*acb
= opaque
;
3847 qemu_iovec_from_buf(acb
->qiov
, 0, acb
->bounce
, acb
->qiov
->size
);
3848 qemu_vfree(acb
->bounce
);
3849 acb
->common
.cb(acb
->common
.opaque
, acb
->ret
);
3850 qemu_bh_delete(acb
->bh
);
3852 qemu_aio_release(acb
);
3855 static BlockDriverAIOCB
*bdrv_aio_rw_vector(BlockDriverState
*bs
,
3859 BlockDriverCompletionFunc
*cb
,
3864 BlockDriverAIOCBSync
*acb
;
3866 acb
= qemu_aio_get(&bdrv_em_aiocb_info
, bs
, cb
, opaque
);
3867 acb
->is_write
= is_write
;
3869 acb
->bounce
= qemu_blockalign(bs
, qiov
->size
);
3870 acb
->bh
= qemu_bh_new(bdrv_aio_bh_cb
, acb
);
3873 qemu_iovec_to_buf(acb
->qiov
, 0, acb
->bounce
, qiov
->size
);
3874 acb
->ret
= bs
->drv
->bdrv_write(bs
, sector_num
, acb
->bounce
, nb_sectors
);
3876 acb
->ret
= bs
->drv
->bdrv_read(bs
, sector_num
, acb
->bounce
, nb_sectors
);
3879 qemu_bh_schedule(acb
->bh
);
3881 return &acb
->common
;
3884 static BlockDriverAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
3885 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
3886 BlockDriverCompletionFunc
*cb
, void *opaque
)
3888 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 0);
3891 static BlockDriverAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
3892 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
3893 BlockDriverCompletionFunc
*cb
, void *opaque
)
3895 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 1);
3899 typedef struct BlockDriverAIOCBCoroutine
{
3900 BlockDriverAIOCB common
;
3905 } BlockDriverAIOCBCoroutine
;
3907 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB
*blockacb
)
3909 BlockDriverAIOCBCoroutine
*acb
=
3910 container_of(blockacb
, BlockDriverAIOCBCoroutine
, common
);
3919 static const AIOCBInfo bdrv_em_co_aiocb_info
= {
3920 .aiocb_size
= sizeof(BlockDriverAIOCBCoroutine
),
3921 .cancel
= bdrv_aio_co_cancel_em
,
3924 static void bdrv_co_em_bh(void *opaque
)
3926 BlockDriverAIOCBCoroutine
*acb
= opaque
;
3928 acb
->common
.cb(acb
->common
.opaque
, acb
->req
.error
);
3934 qemu_bh_delete(acb
->bh
);
3935 qemu_aio_release(acb
);
3938 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
3939 static void coroutine_fn
bdrv_co_do_rw(void *opaque
)
3941 BlockDriverAIOCBCoroutine
*acb
= opaque
;
3942 BlockDriverState
*bs
= acb
->common
.bs
;
3944 if (!acb
->is_write
) {
3945 acb
->req
.error
= bdrv_co_do_readv(bs
, acb
->req
.sector
,
3946 acb
->req
.nb_sectors
, acb
->req
.qiov
, 0);
3948 acb
->req
.error
= bdrv_co_do_writev(bs
, acb
->req
.sector
,
3949 acb
->req
.nb_sectors
, acb
->req
.qiov
, 0);
3952 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
3953 qemu_bh_schedule(acb
->bh
);
3956 static BlockDriverAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
3960 BlockDriverCompletionFunc
*cb
,
3965 BlockDriverAIOCBCoroutine
*acb
;
3967 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
3968 acb
->req
.sector
= sector_num
;
3969 acb
->req
.nb_sectors
= nb_sectors
;
3970 acb
->req
.qiov
= qiov
;
3971 acb
->is_write
= is_write
;
3974 co
= qemu_coroutine_create(bdrv_co_do_rw
);
3975 qemu_coroutine_enter(co
, acb
);
3977 return &acb
->common
;
3980 static void coroutine_fn
bdrv_aio_flush_co_entry(void *opaque
)
3982 BlockDriverAIOCBCoroutine
*acb
= opaque
;
3983 BlockDriverState
*bs
= acb
->common
.bs
;
3985 acb
->req
.error
= bdrv_co_flush(bs
);
3986 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
3987 qemu_bh_schedule(acb
->bh
);
3990 BlockDriverAIOCB
*bdrv_aio_flush(BlockDriverState
*bs
,
3991 BlockDriverCompletionFunc
*cb
, void *opaque
)
3993 trace_bdrv_aio_flush(bs
, opaque
);
3996 BlockDriverAIOCBCoroutine
*acb
;
3998 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
4001 co
= qemu_coroutine_create(bdrv_aio_flush_co_entry
);
4002 qemu_coroutine_enter(co
, acb
);
4004 return &acb
->common
;
4007 static void coroutine_fn
bdrv_aio_discard_co_entry(void *opaque
)
4009 BlockDriverAIOCBCoroutine
*acb
= opaque
;
4010 BlockDriverState
*bs
= acb
->common
.bs
;
4012 acb
->req
.error
= bdrv_co_discard(bs
, acb
->req
.sector
, acb
->req
.nb_sectors
);
4013 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
4014 qemu_bh_schedule(acb
->bh
);
4017 BlockDriverAIOCB
*bdrv_aio_discard(BlockDriverState
*bs
,
4018 int64_t sector_num
, int nb_sectors
,
4019 BlockDriverCompletionFunc
*cb
, void *opaque
)
4022 BlockDriverAIOCBCoroutine
*acb
;
4024 trace_bdrv_aio_discard(bs
, sector_num
, nb_sectors
, opaque
);
4026 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
4027 acb
->req
.sector
= sector_num
;
4028 acb
->req
.nb_sectors
= nb_sectors
;
4030 co
= qemu_coroutine_create(bdrv_aio_discard_co_entry
);
4031 qemu_coroutine_enter(co
, acb
);
4033 return &acb
->common
;
4036 void bdrv_init(void)
4038 module_call_init(MODULE_INIT_BLOCK
);
4041 void bdrv_init_with_whitelist(void)
4043 use_bdrv_whitelist
= 1;
4047 void *qemu_aio_get(const AIOCBInfo
*aiocb_info
, BlockDriverState
*bs
,
4048 BlockDriverCompletionFunc
*cb
, void *opaque
)
4050 BlockDriverAIOCB
*acb
;
4052 acb
= g_slice_alloc(aiocb_info
->aiocb_size
);
4053 acb
->aiocb_info
= aiocb_info
;
4056 acb
->opaque
= opaque
;
4060 void qemu_aio_release(void *p
)
4062 BlockDriverAIOCB
*acb
= p
;
4063 g_slice_free1(acb
->aiocb_info
->aiocb_size
, acb
);
4066 /**************************************************************/
4067 /* Coroutine block device emulation */
4069 typedef struct CoroutineIOCompletion
{
4070 Coroutine
*coroutine
;
4072 } CoroutineIOCompletion
;
4074 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
4076 CoroutineIOCompletion
*co
= opaque
;
4079 qemu_coroutine_enter(co
->coroutine
, NULL
);
4082 static int coroutine_fn
bdrv_co_io_em(BlockDriverState
*bs
, int64_t sector_num
,
4083 int nb_sectors
, QEMUIOVector
*iov
,
4086 CoroutineIOCompletion co
= {
4087 .coroutine
= qemu_coroutine_self(),
4089 BlockDriverAIOCB
*acb
;
4092 acb
= bs
->drv
->bdrv_aio_writev(bs
, sector_num
, iov
, nb_sectors
,
4093 bdrv_co_io_em_complete
, &co
);
4095 acb
= bs
->drv
->bdrv_aio_readv(bs
, sector_num
, iov
, nb_sectors
,
4096 bdrv_co_io_em_complete
, &co
);
4099 trace_bdrv_co_io_em(bs
, sector_num
, nb_sectors
, is_write
, acb
);
4103 qemu_coroutine_yield();
4108 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
4109 int64_t sector_num
, int nb_sectors
,
4112 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, false);
4115 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
4116 int64_t sector_num
, int nb_sectors
,
4119 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, true);
4122 static void coroutine_fn
bdrv_flush_co_entry(void *opaque
)
4124 RwCo
*rwco
= opaque
;
4126 rwco
->ret
= bdrv_co_flush(rwco
->bs
);
4129 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
4133 if (!bs
|| !bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
)) {
4137 /* Write back cached data to the OS even with cache=unsafe */
4138 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_OS
);
4139 if (bs
->drv
->bdrv_co_flush_to_os
) {
4140 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
4146 /* But don't actually force it to the disk with cache=unsafe */
4147 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
4151 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_DISK
);
4152 if (bs
->drv
->bdrv_co_flush_to_disk
) {
4153 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
4154 } else if (bs
->drv
->bdrv_aio_flush
) {
4155 BlockDriverAIOCB
*acb
;
4156 CoroutineIOCompletion co
= {
4157 .coroutine
= qemu_coroutine_self(),
4160 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
4164 qemu_coroutine_yield();
4169 * Some block drivers always operate in either writethrough or unsafe
4170 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
4171 * know how the server works (because the behaviour is hardcoded or
4172 * depends on server-side configuration), so we can't ensure that
4173 * everything is safe on disk. Returning an error doesn't work because
4174 * that would break guests even if the server operates in writethrough
4177 * Let's hope the user knows what he's doing.
4185 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
4186 * in the case of cache=unsafe, so there are no useless flushes.
4189 return bdrv_co_flush(bs
->file
);
4192 void bdrv_invalidate_cache(BlockDriverState
*bs
)
4194 if (bs
->drv
&& bs
->drv
->bdrv_invalidate_cache
) {
4195 bs
->drv
->bdrv_invalidate_cache(bs
);
4199 void bdrv_invalidate_cache_all(void)
4201 BlockDriverState
*bs
;
4203 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
4204 bdrv_invalidate_cache(bs
);
4208 void bdrv_clear_incoming_migration_all(void)
4210 BlockDriverState
*bs
;
4212 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
4213 bs
->open_flags
= bs
->open_flags
& ~(BDRV_O_INCOMING
);
4217 int bdrv_flush(BlockDriverState
*bs
)
4225 if (qemu_in_coroutine()) {
4226 /* Fast-path if already in coroutine context */
4227 bdrv_flush_co_entry(&rwco
);
4229 co
= qemu_coroutine_create(bdrv_flush_co_entry
);
4230 qemu_coroutine_enter(co
, &rwco
);
4231 while (rwco
.ret
== NOT_DONE
) {
4239 static void coroutine_fn
bdrv_discard_co_entry(void *opaque
)
4241 RwCo
*rwco
= opaque
;
4243 rwco
->ret
= bdrv_co_discard(rwco
->bs
, rwco
->sector_num
, rwco
->nb_sectors
);
4246 /* if no limit is specified in the BlockLimits use a default
4247 * of 32768 512-byte sectors (16 MiB) per request.
4249 #define MAX_DISCARD_DEFAULT 32768
4251 int coroutine_fn
bdrv_co_discard(BlockDriverState
*bs
, int64_t sector_num
,
4256 } else if (bdrv_check_request(bs
, sector_num
, nb_sectors
)) {
4258 } else if (bs
->read_only
) {
4262 if (bs
->dirty_bitmap
) {
4263 bdrv_reset_dirty(bs
, sector_num
, nb_sectors
);
4266 /* Do nothing if disabled. */
4267 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
4271 if (bs
->drv
->bdrv_co_discard
) {
4272 int max_discard
= bs
->bl
.max_discard
?
4273 bs
->bl
.max_discard
: MAX_DISCARD_DEFAULT
;
4275 while (nb_sectors
> 0) {
4277 int num
= nb_sectors
;
4280 if (bs
->bl
.discard_alignment
&&
4281 num
>= bs
->bl
.discard_alignment
&&
4282 sector_num
% bs
->bl
.discard_alignment
) {
4283 if (num
> bs
->bl
.discard_alignment
) {
4284 num
= bs
->bl
.discard_alignment
;
4286 num
-= sector_num
% bs
->bl
.discard_alignment
;
4289 /* limit request size */
4290 if (num
> max_discard
) {
4294 ret
= bs
->drv
->bdrv_co_discard(bs
, sector_num
, num
);
4303 } else if (bs
->drv
->bdrv_aio_discard
) {
4304 BlockDriverAIOCB
*acb
;
4305 CoroutineIOCompletion co
= {
4306 .coroutine
= qemu_coroutine_self(),
4309 acb
= bs
->drv
->bdrv_aio_discard(bs
, sector_num
, nb_sectors
,
4310 bdrv_co_io_em_complete
, &co
);
4314 qemu_coroutine_yield();
4322 int bdrv_discard(BlockDriverState
*bs
, int64_t sector_num
, int nb_sectors
)
4327 .sector_num
= sector_num
,
4328 .nb_sectors
= nb_sectors
,
4332 if (qemu_in_coroutine()) {
4333 /* Fast-path if already in coroutine context */
4334 bdrv_discard_co_entry(&rwco
);
4336 co
= qemu_coroutine_create(bdrv_discard_co_entry
);
4337 qemu_coroutine_enter(co
, &rwco
);
4338 while (rwco
.ret
== NOT_DONE
) {
4346 /**************************************************************/
4347 /* removable device support */
4350 * Return TRUE if the media is present
4352 int bdrv_is_inserted(BlockDriverState
*bs
)
4354 BlockDriver
*drv
= bs
->drv
;
4358 if (!drv
->bdrv_is_inserted
)
4360 return drv
->bdrv_is_inserted(bs
);
4364 * Return whether the media changed since the last call to this
4365 * function, or -ENOTSUP if we don't know. Most drivers don't know.
4367 int bdrv_media_changed(BlockDriverState
*bs
)
4369 BlockDriver
*drv
= bs
->drv
;
4371 if (drv
&& drv
->bdrv_media_changed
) {
4372 return drv
->bdrv_media_changed(bs
);
4378 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
4380 void bdrv_eject(BlockDriverState
*bs
, bool eject_flag
)
4382 BlockDriver
*drv
= bs
->drv
;
4384 if (drv
&& drv
->bdrv_eject
) {
4385 drv
->bdrv_eject(bs
, eject_flag
);
4388 if (bs
->device_name
[0] != '\0') {
4389 bdrv_emit_qmp_eject_event(bs
, eject_flag
);
4394 * Lock or unlock the media (if it is locked, the user won't be able
4395 * to eject it manually).
4397 void bdrv_lock_medium(BlockDriverState
*bs
, bool locked
)
4399 BlockDriver
*drv
= bs
->drv
;
4401 trace_bdrv_lock_medium(bs
, locked
);
4403 if (drv
&& drv
->bdrv_lock_medium
) {
4404 drv
->bdrv_lock_medium(bs
, locked
);
4408 /* needed for generic scsi interface */
4410 int bdrv_ioctl(BlockDriverState
*bs
, unsigned long int req
, void *buf
)
4412 BlockDriver
*drv
= bs
->drv
;
4414 if (drv
&& drv
->bdrv_ioctl
)
4415 return drv
->bdrv_ioctl(bs
, req
, buf
);
4419 BlockDriverAIOCB
*bdrv_aio_ioctl(BlockDriverState
*bs
,
4420 unsigned long int req
, void *buf
,
4421 BlockDriverCompletionFunc
*cb
, void *opaque
)
4423 BlockDriver
*drv
= bs
->drv
;
4425 if (drv
&& drv
->bdrv_aio_ioctl
)
4426 return drv
->bdrv_aio_ioctl(bs
, req
, buf
, cb
, opaque
);
4430 void bdrv_set_buffer_alignment(BlockDriverState
*bs
, int align
)
4432 bs
->buffer_alignment
= align
;
4435 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
4437 return qemu_memalign((bs
&& bs
->buffer_alignment
) ? bs
->buffer_alignment
: 512, size
);
4441 * Check if all memory in this vector is sector aligned.
4443 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
)
4447 for (i
= 0; i
< qiov
->niov
; i
++) {
4448 if ((uintptr_t) qiov
->iov
[i
].iov_base
% bs
->buffer_alignment
) {
4456 void bdrv_set_dirty_tracking(BlockDriverState
*bs
, int granularity
)
4458 int64_t bitmap_size
;
4460 assert((granularity
& (granularity
- 1)) == 0);
4463 granularity
>>= BDRV_SECTOR_BITS
;
4464 assert(!bs
->dirty_bitmap
);
4465 bitmap_size
= (bdrv_getlength(bs
) >> BDRV_SECTOR_BITS
);
4466 bs
->dirty_bitmap
= hbitmap_alloc(bitmap_size
, ffs(granularity
) - 1);
4468 if (bs
->dirty_bitmap
) {
4469 hbitmap_free(bs
->dirty_bitmap
);
4470 bs
->dirty_bitmap
= NULL
;
4475 int bdrv_get_dirty(BlockDriverState
*bs
, int64_t sector
)
4477 if (bs
->dirty_bitmap
) {
4478 return hbitmap_get(bs
->dirty_bitmap
, sector
);
4484 void bdrv_dirty_iter_init(BlockDriverState
*bs
, HBitmapIter
*hbi
)
4486 hbitmap_iter_init(hbi
, bs
->dirty_bitmap
, 0);
4489 void bdrv_set_dirty(BlockDriverState
*bs
, int64_t cur_sector
,
4492 hbitmap_set(bs
->dirty_bitmap
, cur_sector
, nr_sectors
);
4495 void bdrv_reset_dirty(BlockDriverState
*bs
, int64_t cur_sector
,
4498 hbitmap_reset(bs
->dirty_bitmap
, cur_sector
, nr_sectors
);
4501 int64_t bdrv_get_dirty_count(BlockDriverState
*bs
)
4503 if (bs
->dirty_bitmap
) {
4504 return hbitmap_count(bs
->dirty_bitmap
);
4510 /* Get a reference to bs */
4511 void bdrv_ref(BlockDriverState
*bs
)
4516 /* Release a previously grabbed reference to bs.
4517 * If after releasing, reference count is zero, the BlockDriverState is
4519 void bdrv_unref(BlockDriverState
*bs
)
4521 assert(bs
->refcnt
> 0);
4522 if (--bs
->refcnt
== 0) {
4527 void bdrv_set_in_use(BlockDriverState
*bs
, int in_use
)
4529 assert(bs
->in_use
!= in_use
);
4530 bs
->in_use
= in_use
;
4533 int bdrv_in_use(BlockDriverState
*bs
)
4538 void bdrv_iostatus_enable(BlockDriverState
*bs
)
4540 bs
->iostatus_enabled
= true;
4541 bs
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
4544 /* The I/O status is only enabled if the drive explicitly
4545 * enables it _and_ the VM is configured to stop on errors */
4546 bool bdrv_iostatus_is_enabled(const BlockDriverState
*bs
)
4548 return (bs
->iostatus_enabled
&&
4549 (bs
->on_write_error
== BLOCKDEV_ON_ERROR_ENOSPC
||
4550 bs
->on_write_error
== BLOCKDEV_ON_ERROR_STOP
||
4551 bs
->on_read_error
== BLOCKDEV_ON_ERROR_STOP
));
4554 void bdrv_iostatus_disable(BlockDriverState
*bs
)
4556 bs
->iostatus_enabled
= false;
4559 void bdrv_iostatus_reset(BlockDriverState
*bs
)
4561 if (bdrv_iostatus_is_enabled(bs
)) {
4562 bs
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
4564 block_job_iostatus_reset(bs
->job
);
4569 void bdrv_iostatus_set_err(BlockDriverState
*bs
, int error
)
4571 assert(bdrv_iostatus_is_enabled(bs
));
4572 if (bs
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
4573 bs
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
4574 BLOCK_DEVICE_IO_STATUS_FAILED
;
4579 bdrv_acct_start(BlockDriverState
*bs
, BlockAcctCookie
*cookie
, int64_t bytes
,
4580 enum BlockAcctType type
)
4582 assert(type
< BDRV_MAX_IOTYPE
);
4584 cookie
->bytes
= bytes
;
4585 cookie
->start_time_ns
= get_clock();
4586 cookie
->type
= type
;
4590 bdrv_acct_done(BlockDriverState
*bs
, BlockAcctCookie
*cookie
)
4592 assert(cookie
->type
< BDRV_MAX_IOTYPE
);
4594 bs
->nr_bytes
[cookie
->type
] += cookie
->bytes
;
4595 bs
->nr_ops
[cookie
->type
]++;
4596 bs
->total_time_ns
[cookie
->type
] += get_clock() - cookie
->start_time_ns
;
4599 void bdrv_img_create(const char *filename
, const char *fmt
,
4600 const char *base_filename
, const char *base_fmt
,
4601 char *options
, uint64_t img_size
, int flags
,
4602 Error
**errp
, bool quiet
)
4604 QEMUOptionParameter
*param
= NULL
, *create_options
= NULL
;
4605 QEMUOptionParameter
*backing_fmt
, *backing_file
, *size
;
4606 BlockDriverState
*bs
= NULL
;
4607 BlockDriver
*drv
, *proto_drv
;
4608 BlockDriver
*backing_drv
= NULL
;
4609 Error
*local_err
= NULL
;
4612 /* Find driver and parse its options */
4613 drv
= bdrv_find_format(fmt
);
4615 error_setg(errp
, "Unknown file format '%s'", fmt
);
4619 proto_drv
= bdrv_find_protocol(filename
, true);
4621 error_setg(errp
, "Unknown protocol '%s'", filename
);
4625 create_options
= append_option_parameters(create_options
,
4626 drv
->create_options
);
4627 create_options
= append_option_parameters(create_options
,
4628 proto_drv
->create_options
);
4630 /* Create parameter list with default values */
4631 param
= parse_option_parameters("", create_options
, param
);
4633 set_option_parameter_int(param
, BLOCK_OPT_SIZE
, img_size
);
4635 /* Parse -o options */
4637 param
= parse_option_parameters(options
, create_options
, param
);
4638 if (param
== NULL
) {
4639 error_setg(errp
, "Invalid options for file format '%s'.", fmt
);
4644 if (base_filename
) {
4645 if (set_option_parameter(param
, BLOCK_OPT_BACKING_FILE
,
4647 error_setg(errp
, "Backing file not supported for file format '%s'",
4654 if (set_option_parameter(param
, BLOCK_OPT_BACKING_FMT
, base_fmt
)) {
4655 error_setg(errp
, "Backing file format not supported for file "
4656 "format '%s'", fmt
);
4661 backing_file
= get_option_parameter(param
, BLOCK_OPT_BACKING_FILE
);
4662 if (backing_file
&& backing_file
->value
.s
) {
4663 if (!strcmp(filename
, backing_file
->value
.s
)) {
4664 error_setg(errp
, "Error: Trying to create an image with the "
4665 "same filename as the backing file");
4670 backing_fmt
= get_option_parameter(param
, BLOCK_OPT_BACKING_FMT
);
4671 if (backing_fmt
&& backing_fmt
->value
.s
) {
4672 backing_drv
= bdrv_find_format(backing_fmt
->value
.s
);
4674 error_setg(errp
, "Unknown backing file format '%s'",
4675 backing_fmt
->value
.s
);
4680 // The size for the image must always be specified, with one exception:
4681 // If we are using a backing file, we can obtain the size from there
4682 size
= get_option_parameter(param
, BLOCK_OPT_SIZE
);
4683 if (size
&& size
->value
.n
== -1) {
4684 if (backing_file
&& backing_file
->value
.s
) {
4689 /* backing files always opened read-only */
4691 flags
& ~(BDRV_O_RDWR
| BDRV_O_SNAPSHOT
| BDRV_O_NO_BACKING
);
4695 ret
= bdrv_open(bs
, backing_file
->value
.s
, NULL
, back_flags
,
4696 backing_drv
, &local_err
);
4698 error_setg_errno(errp
, -ret
, "Could not open '%s': %s",
4699 backing_file
->value
.s
,
4700 error_get_pretty(local_err
));
4701 error_free(local_err
);
4705 bdrv_get_geometry(bs
, &size
);
4708 snprintf(buf
, sizeof(buf
), "%" PRId64
, size
);
4709 set_option_parameter(param
, BLOCK_OPT_SIZE
, buf
);
4711 error_setg(errp
, "Image creation needs a size parameter");
4717 printf("Formatting '%s', fmt=%s ", filename
, fmt
);
4718 print_option_parameters(param
);
4721 ret
= bdrv_create(drv
, filename
, param
, &local_err
);
4722 if (ret
== -EFBIG
) {
4723 /* This is generally a better message than whatever the driver would
4724 * deliver (especially because of the cluster_size_hint), since that
4725 * is most probably not much different from "image too large". */
4726 const char *cluster_size_hint
= "";
4727 if (get_option_parameter(create_options
, BLOCK_OPT_CLUSTER_SIZE
)) {
4728 cluster_size_hint
= " (try using a larger cluster size)";
4730 error_setg(errp
, "The image size is too large for file format '%s'"
4731 "%s", fmt
, cluster_size_hint
);
4732 error_free(local_err
);
4737 free_option_parameters(create_options
);
4738 free_option_parameters(param
);
4743 if (error_is_set(&local_err
)) {
4744 error_propagate(errp
, local_err
);
4748 AioContext
*bdrv_get_aio_context(BlockDriverState
*bs
)
4750 /* Currently BlockDriverState always uses the main loop AioContext */
4751 return qemu_get_aio_context();
4754 void bdrv_add_before_write_notifier(BlockDriverState
*bs
,
4755 NotifierWithReturn
*notifier
)
4757 notifier_with_return_list_add(&bs
->before_write_notifiers
, notifier
);
4760 int bdrv_amend_options(BlockDriverState
*bs
, QEMUOptionParameter
*options
)
4762 if (bs
->drv
->bdrv_amend_options
== NULL
) {
4765 return bs
->drv
->bdrv_amend_options(bs
, options
);
4768 ExtSnapshotPerm
bdrv_check_ext_snapshot(BlockDriverState
*bs
)
4770 if (bs
->drv
->bdrv_check_ext_snapshot
) {
4771 return bs
->drv
->bdrv_check_ext_snapshot(bs
);
4774 if (bs
->file
&& bs
->file
->drv
&& bs
->file
->drv
->bdrv_check_ext_snapshot
) {
4775 return bs
->file
->drv
->bdrv_check_ext_snapshot(bs
);
4778 /* external snapshots are allowed by default */
4779 return EXT_SNAPSHOT_ALLOWED
;
4782 ExtSnapshotPerm
bdrv_check_ext_snapshot_forbidden(BlockDriverState
*bs
)
4784 return EXT_SNAPSHOT_FORBIDDEN
;