2 * QEMU System Emulator block driver
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 #include "config-host.h"
25 #include "qemu-common.h"
27 #include "monitor/monitor.h"
28 #include "block/block_int.h"
29 #include "block/blockjob.h"
30 #include "qemu/module.h"
31 #include "qapi/qmp/qjson.h"
32 #include "sysemu/sysemu.h"
33 #include "qemu/notify.h"
34 #include "block/coroutine.h"
35 #include "block/qapi.h"
36 #include "qmp-commands.h"
37 #include "qemu/timer.h"
40 #include <sys/types.h>
42 #include <sys/ioctl.h>
43 #include <sys/queue.h>
53 struct BdrvDirtyBitmap
{
55 QLIST_ENTRY(BdrvDirtyBitmap
) list
;
58 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
60 static void bdrv_dev_change_media_cb(BlockDriverState
*bs
, bool load
);
61 static BlockDriverAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
62 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
63 BlockDriverCompletionFunc
*cb
, void *opaque
);
64 static BlockDriverAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
65 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
66 BlockDriverCompletionFunc
*cb
, void *opaque
);
67 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
68 int64_t sector_num
, int nb_sectors
,
70 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
71 int64_t sector_num
, int nb_sectors
,
73 static int coroutine_fn
bdrv_co_do_preadv(BlockDriverState
*bs
,
74 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
75 BdrvRequestFlags flags
);
76 static int coroutine_fn
bdrv_co_do_pwritev(BlockDriverState
*bs
,
77 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
78 BdrvRequestFlags flags
);
79 static BlockDriverAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
83 BdrvRequestFlags flags
,
84 BlockDriverCompletionFunc
*cb
,
87 static void coroutine_fn
bdrv_co_do_rw(void *opaque
);
88 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
89 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
);
91 static QTAILQ_HEAD(, BlockDriverState
) bdrv_states
=
92 QTAILQ_HEAD_INITIALIZER(bdrv_states
);
94 static QTAILQ_HEAD(, BlockDriverState
) graph_bdrv_states
=
95 QTAILQ_HEAD_INITIALIZER(graph_bdrv_states
);
97 static QLIST_HEAD(, BlockDriver
) bdrv_drivers
=
98 QLIST_HEAD_INITIALIZER(bdrv_drivers
);
100 /* If non-zero, use only whitelisted block drivers */
101 static int use_bdrv_whitelist
;
104 static int is_windows_drive_prefix(const char *filename
)
106 return (((filename
[0] >= 'a' && filename
[0] <= 'z') ||
107 (filename
[0] >= 'A' && filename
[0] <= 'Z')) &&
111 int is_windows_drive(const char *filename
)
113 if (is_windows_drive_prefix(filename
) &&
116 if (strstart(filename
, "\\\\.\\", NULL
) ||
117 strstart(filename
, "//./", NULL
))
123 /* throttling disk I/O limits */
124 void bdrv_set_io_limits(BlockDriverState
*bs
,
129 throttle_config(&bs
->throttle_state
, cfg
);
131 for (i
= 0; i
< 2; i
++) {
132 qemu_co_enter_next(&bs
->throttled_reqs
[i
]);
136 /* this function drain all the throttled IOs */
137 static bool bdrv_start_throttled_reqs(BlockDriverState
*bs
)
139 bool drained
= false;
140 bool enabled
= bs
->io_limits_enabled
;
143 bs
->io_limits_enabled
= false;
145 for (i
= 0; i
< 2; i
++) {
146 while (qemu_co_enter_next(&bs
->throttled_reqs
[i
])) {
151 bs
->io_limits_enabled
= enabled
;
156 void bdrv_io_limits_disable(BlockDriverState
*bs
)
158 bs
->io_limits_enabled
= false;
160 bdrv_start_throttled_reqs(bs
);
162 throttle_destroy(&bs
->throttle_state
);
165 static void bdrv_throttle_read_timer_cb(void *opaque
)
167 BlockDriverState
*bs
= opaque
;
168 qemu_co_enter_next(&bs
->throttled_reqs
[0]);
171 static void bdrv_throttle_write_timer_cb(void *opaque
)
173 BlockDriverState
*bs
= opaque
;
174 qemu_co_enter_next(&bs
->throttled_reqs
[1]);
177 /* should be called before bdrv_set_io_limits if a limit is set */
178 void bdrv_io_limits_enable(BlockDriverState
*bs
)
180 assert(!bs
->io_limits_enabled
);
181 throttle_init(&bs
->throttle_state
,
183 bdrv_throttle_read_timer_cb
,
184 bdrv_throttle_write_timer_cb
,
186 bs
->io_limits_enabled
= true;
189 /* This function makes an IO wait if needed
191 * @nb_sectors: the number of sectors of the IO
192 * @is_write: is the IO a write
194 static void bdrv_io_limits_intercept(BlockDriverState
*bs
,
198 /* does this io must wait */
199 bool must_wait
= throttle_schedule_timer(&bs
->throttle_state
, is_write
);
201 /* if must wait or any request of this type throttled queue the IO */
203 !qemu_co_queue_empty(&bs
->throttled_reqs
[is_write
])) {
204 qemu_co_queue_wait(&bs
->throttled_reqs
[is_write
]);
207 /* the IO will be executed, do the accounting */
208 throttle_account(&bs
->throttle_state
, is_write
, bytes
);
211 /* if the next request must wait -> do nothing */
212 if (throttle_schedule_timer(&bs
->throttle_state
, is_write
)) {
216 /* else queue next request for execution */
217 qemu_co_queue_next(&bs
->throttled_reqs
[is_write
]);
220 size_t bdrv_opt_mem_align(BlockDriverState
*bs
)
222 if (!bs
|| !bs
->drv
) {
223 /* 4k should be on the safe side */
227 return bs
->bl
.opt_mem_alignment
;
230 /* check if the path starts with "<protocol>:" */
231 static int path_has_protocol(const char *path
)
236 if (is_windows_drive(path
) ||
237 is_windows_drive_prefix(path
)) {
240 p
= path
+ strcspn(path
, ":/\\");
242 p
= path
+ strcspn(path
, ":/");
248 int path_is_absolute(const char *path
)
251 /* specific case for names like: "\\.\d:" */
252 if (is_windows_drive(path
) || is_windows_drive_prefix(path
)) {
255 return (*path
== '/' || *path
== '\\');
257 return (*path
== '/');
261 /* if filename is absolute, just copy it to dest. Otherwise, build a
262 path to it by considering it is relative to base_path. URL are
264 void path_combine(char *dest
, int dest_size
,
265 const char *base_path
,
266 const char *filename
)
273 if (path_is_absolute(filename
)) {
274 pstrcpy(dest
, dest_size
, filename
);
276 p
= strchr(base_path
, ':');
281 p1
= strrchr(base_path
, '/');
285 p2
= strrchr(base_path
, '\\');
297 if (len
> dest_size
- 1)
299 memcpy(dest
, base_path
, len
);
301 pstrcat(dest
, dest_size
, filename
);
305 void bdrv_get_full_backing_filename(BlockDriverState
*bs
, char *dest
, size_t sz
)
307 if (bs
->backing_file
[0] == '\0' || path_has_protocol(bs
->backing_file
)) {
308 pstrcpy(dest
, sz
, bs
->backing_file
);
310 path_combine(dest
, sz
, bs
->filename
, bs
->backing_file
);
314 void bdrv_register(BlockDriver
*bdrv
)
316 /* Block drivers without coroutine functions need emulation */
317 if (!bdrv
->bdrv_co_readv
) {
318 bdrv
->bdrv_co_readv
= bdrv_co_readv_em
;
319 bdrv
->bdrv_co_writev
= bdrv_co_writev_em
;
321 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
322 * the block driver lacks aio we need to emulate that too.
324 if (!bdrv
->bdrv_aio_readv
) {
325 /* add AIO emulation layer */
326 bdrv
->bdrv_aio_readv
= bdrv_aio_readv_em
;
327 bdrv
->bdrv_aio_writev
= bdrv_aio_writev_em
;
331 QLIST_INSERT_HEAD(&bdrv_drivers
, bdrv
, list
);
334 /* create a new block device (by default it is empty) */
335 BlockDriverState
*bdrv_new(const char *device_name
, Error
**errp
)
337 BlockDriverState
*bs
;
339 if (bdrv_find(device_name
)) {
340 error_setg(errp
, "Device with id '%s' already exists",
344 if (bdrv_find_node(device_name
)) {
345 error_setg(errp
, "Device with node-name '%s' already exists",
350 bs
= g_malloc0(sizeof(BlockDriverState
));
351 QLIST_INIT(&bs
->dirty_bitmaps
);
352 pstrcpy(bs
->device_name
, sizeof(bs
->device_name
), device_name
);
353 if (device_name
[0] != '\0') {
354 QTAILQ_INSERT_TAIL(&bdrv_states
, bs
, device_list
);
356 bdrv_iostatus_disable(bs
);
357 notifier_list_init(&bs
->close_notifiers
);
358 notifier_with_return_list_init(&bs
->before_write_notifiers
);
359 qemu_co_queue_init(&bs
->throttled_reqs
[0]);
360 qemu_co_queue_init(&bs
->throttled_reqs
[1]);
366 void bdrv_add_close_notifier(BlockDriverState
*bs
, Notifier
*notify
)
368 notifier_list_add(&bs
->close_notifiers
, notify
);
371 BlockDriver
*bdrv_find_format(const char *format_name
)
374 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
375 if (!strcmp(drv1
->format_name
, format_name
)) {
382 static int bdrv_is_whitelisted(BlockDriver
*drv
, bool read_only
)
384 static const char *whitelist_rw
[] = {
385 CONFIG_BDRV_RW_WHITELIST
387 static const char *whitelist_ro
[] = {
388 CONFIG_BDRV_RO_WHITELIST
392 if (!whitelist_rw
[0] && !whitelist_ro
[0]) {
393 return 1; /* no whitelist, anything goes */
396 for (p
= whitelist_rw
; *p
; p
++) {
397 if (!strcmp(drv
->format_name
, *p
)) {
402 for (p
= whitelist_ro
; *p
; p
++) {
403 if (!strcmp(drv
->format_name
, *p
)) {
411 BlockDriver
*bdrv_find_whitelisted_format(const char *format_name
,
414 BlockDriver
*drv
= bdrv_find_format(format_name
);
415 return drv
&& bdrv_is_whitelisted(drv
, read_only
) ? drv
: NULL
;
418 typedef struct CreateCo
{
421 QEMUOptionParameter
*options
;
426 static void coroutine_fn
bdrv_create_co_entry(void *opaque
)
428 Error
*local_err
= NULL
;
431 CreateCo
*cco
= opaque
;
434 ret
= cco
->drv
->bdrv_create(cco
->filename
, cco
->options
, &local_err
);
436 error_propagate(&cco
->err
, local_err
);
441 int bdrv_create(BlockDriver
*drv
, const char* filename
,
442 QEMUOptionParameter
*options
, Error
**errp
)
449 .filename
= g_strdup(filename
),
455 if (!drv
->bdrv_create
) {
456 error_setg(errp
, "Driver '%s' does not support image creation", drv
->format_name
);
461 if (qemu_in_coroutine()) {
462 /* Fast-path if already in coroutine context */
463 bdrv_create_co_entry(&cco
);
465 co
= qemu_coroutine_create(bdrv_create_co_entry
);
466 qemu_coroutine_enter(co
, &cco
);
467 while (cco
.ret
== NOT_DONE
) {
475 error_propagate(errp
, cco
.err
);
477 error_setg_errno(errp
, -ret
, "Could not create image");
482 g_free(cco
.filename
);
486 int bdrv_create_file(const char* filename
, QEMUOptionParameter
*options
,
490 Error
*local_err
= NULL
;
493 drv
= bdrv_find_protocol(filename
, true);
495 error_setg(errp
, "Could not find protocol for file '%s'", filename
);
499 ret
= bdrv_create(drv
, filename
, options
, &local_err
);
501 error_propagate(errp
, local_err
);
506 int bdrv_refresh_limits(BlockDriverState
*bs
)
508 BlockDriver
*drv
= bs
->drv
;
510 memset(&bs
->bl
, 0, sizeof(bs
->bl
));
516 /* Take some limits from the children as a default */
518 bdrv_refresh_limits(bs
->file
);
519 bs
->bl
.opt_transfer_length
= bs
->file
->bl
.opt_transfer_length
;
520 bs
->bl
.opt_mem_alignment
= bs
->file
->bl
.opt_mem_alignment
;
522 bs
->bl
.opt_mem_alignment
= 512;
525 if (bs
->backing_hd
) {
526 bdrv_refresh_limits(bs
->backing_hd
);
527 bs
->bl
.opt_transfer_length
=
528 MAX(bs
->bl
.opt_transfer_length
,
529 bs
->backing_hd
->bl
.opt_transfer_length
);
530 bs
->bl
.opt_mem_alignment
=
531 MAX(bs
->bl
.opt_mem_alignment
,
532 bs
->backing_hd
->bl
.opt_mem_alignment
);
535 /* Then let the driver override it */
536 if (drv
->bdrv_refresh_limits
) {
537 return drv
->bdrv_refresh_limits(bs
);
544 * Create a uniquely-named empty temporary file.
545 * Return 0 upon success, otherwise a negative errno value.
547 int get_tmp_filename(char *filename
, int size
)
550 char temp_dir
[MAX_PATH
];
551 /* GetTempFileName requires that its output buffer (4th param)
552 have length MAX_PATH or greater. */
553 assert(size
>= MAX_PATH
);
554 return (GetTempPath(MAX_PATH
, temp_dir
)
555 && GetTempFileName(temp_dir
, "qem", 0, filename
)
556 ? 0 : -GetLastError());
560 tmpdir
= getenv("TMPDIR");
564 if (snprintf(filename
, size
, "%s/vl.XXXXXX", tmpdir
) >= size
) {
567 fd
= mkstemp(filename
);
571 if (close(fd
) != 0) {
580 * Detect host devices. By convention, /dev/cdrom[N] is always
581 * recognized as a host CDROM.
583 static BlockDriver
*find_hdev_driver(const char *filename
)
585 int score_max
= 0, score
;
586 BlockDriver
*drv
= NULL
, *d
;
588 QLIST_FOREACH(d
, &bdrv_drivers
, list
) {
589 if (d
->bdrv_probe_device
) {
590 score
= d
->bdrv_probe_device(filename
);
591 if (score
> score_max
) {
601 BlockDriver
*bdrv_find_protocol(const char *filename
,
602 bool allow_protocol_prefix
)
609 /* TODO Drivers without bdrv_file_open must be specified explicitly */
612 * XXX(hch): we really should not let host device detection
613 * override an explicit protocol specification, but moving this
614 * later breaks access to device names with colons in them.
615 * Thanks to the brain-dead persistent naming schemes on udev-
616 * based Linux systems those actually are quite common.
618 drv1
= find_hdev_driver(filename
);
623 if (!path_has_protocol(filename
) || !allow_protocol_prefix
) {
624 return bdrv_find_format("file");
627 p
= strchr(filename
, ':');
630 if (len
> sizeof(protocol
) - 1)
631 len
= sizeof(protocol
) - 1;
632 memcpy(protocol
, filename
, len
);
633 protocol
[len
] = '\0';
634 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
635 if (drv1
->protocol_name
&&
636 !strcmp(drv1
->protocol_name
, protocol
)) {
643 static int find_image_format(BlockDriverState
*bs
, const char *filename
,
644 BlockDriver
**pdrv
, Error
**errp
)
646 int score
, score_max
;
647 BlockDriver
*drv1
, *drv
;
651 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
652 if (bs
->sg
|| !bdrv_is_inserted(bs
) || bdrv_getlength(bs
) == 0) {
653 drv
= bdrv_find_format("raw");
655 error_setg(errp
, "Could not find raw image format");
662 ret
= bdrv_pread(bs
, 0, buf
, sizeof(buf
));
664 error_setg_errno(errp
, -ret
, "Could not read image for determining its "
672 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
673 if (drv1
->bdrv_probe
) {
674 score
= drv1
->bdrv_probe(buf
, ret
, filename
);
675 if (score
> score_max
) {
682 error_setg(errp
, "Could not determine image format: No compatible "
691 * Set the current 'total_sectors' value
693 static int refresh_total_sectors(BlockDriverState
*bs
, int64_t hint
)
695 BlockDriver
*drv
= bs
->drv
;
697 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
701 /* query actual device if possible, otherwise just trust the hint */
702 if (drv
->bdrv_getlength
) {
703 int64_t length
= drv
->bdrv_getlength(bs
);
707 hint
= DIV_ROUND_UP(length
, BDRV_SECTOR_SIZE
);
710 bs
->total_sectors
= hint
;
715 * Set open flags for a given discard mode
717 * Return 0 on success, -1 if the discard mode was invalid.
719 int bdrv_parse_discard_flags(const char *mode
, int *flags
)
721 *flags
&= ~BDRV_O_UNMAP
;
723 if (!strcmp(mode
, "off") || !strcmp(mode
, "ignore")) {
725 } else if (!strcmp(mode
, "on") || !strcmp(mode
, "unmap")) {
726 *flags
|= BDRV_O_UNMAP
;
735 * Set open flags for a given cache mode
737 * Return 0 on success, -1 if the cache mode was invalid.
739 int bdrv_parse_cache_flags(const char *mode
, int *flags
)
741 *flags
&= ~BDRV_O_CACHE_MASK
;
743 if (!strcmp(mode
, "off") || !strcmp(mode
, "none")) {
744 *flags
|= BDRV_O_NOCACHE
| BDRV_O_CACHE_WB
;
745 } else if (!strcmp(mode
, "directsync")) {
746 *flags
|= BDRV_O_NOCACHE
;
747 } else if (!strcmp(mode
, "writeback")) {
748 *flags
|= BDRV_O_CACHE_WB
;
749 } else if (!strcmp(mode
, "unsafe")) {
750 *flags
|= BDRV_O_CACHE_WB
;
751 *flags
|= BDRV_O_NO_FLUSH
;
752 } else if (!strcmp(mode
, "writethrough")) {
753 /* this is the default */
762 * The copy-on-read flag is actually a reference count so multiple users may
763 * use the feature without worrying about clobbering its previous state.
764 * Copy-on-read stays enabled until all users have called to disable it.
766 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
771 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
773 assert(bs
->copy_on_read
> 0);
778 * Returns the flags that a temporary snapshot should get, based on the
779 * originally requested flags (the originally requested image will have flags
780 * like a backing file)
782 static int bdrv_temp_snapshot_flags(int flags
)
784 return (flags
& ~BDRV_O_SNAPSHOT
) | BDRV_O_TEMPORARY
;
788 * Returns the flags that bs->file should get, based on the given flags for
791 static int bdrv_inherited_flags(int flags
)
793 /* Enable protocol handling, disable format probing for bs->file */
794 flags
|= BDRV_O_PROTOCOL
;
796 /* Our block drivers take care to send flushes and respect unmap policy,
797 * so we can enable both unconditionally on lower layers. */
798 flags
|= BDRV_O_CACHE_WB
| BDRV_O_UNMAP
;
800 /* Clear flags that only apply to the top layer */
801 flags
&= ~(BDRV_O_SNAPSHOT
| BDRV_O_NO_BACKING
| BDRV_O_COPY_ON_READ
);
807 * Returns the flags that bs->backing_hd should get, based on the given flags
810 static int bdrv_backing_flags(int flags
)
812 /* backing files always opened read-only */
813 flags
&= ~(BDRV_O_RDWR
| BDRV_O_COPY_ON_READ
);
815 /* snapshot=on is handled on the top layer */
816 flags
&= ~(BDRV_O_SNAPSHOT
| BDRV_O_TEMPORARY
);
821 static int bdrv_open_flags(BlockDriverState
*bs
, int flags
)
823 int open_flags
= flags
| BDRV_O_CACHE_WB
;
826 * Clear flags that are internal to the block layer before opening the
829 open_flags
&= ~(BDRV_O_SNAPSHOT
| BDRV_O_NO_BACKING
);
832 * Snapshots should be writable.
834 if (flags
& BDRV_O_TEMPORARY
) {
835 open_flags
|= BDRV_O_RDWR
;
841 static void bdrv_assign_node_name(BlockDriverState
*bs
,
842 const char *node_name
,
849 /* empty string node name is invalid */
850 if (node_name
[0] == '\0') {
851 error_setg(errp
, "Empty node name");
855 /* takes care of avoiding namespaces collisions */
856 if (bdrv_find(node_name
)) {
857 error_setg(errp
, "node-name=%s is conflicting with a device id",
862 /* takes care of avoiding duplicates node names */
863 if (bdrv_find_node(node_name
)) {
864 error_setg(errp
, "Duplicate node name");
868 /* copy node name into the bs and insert it into the graph list */
869 pstrcpy(bs
->node_name
, sizeof(bs
->node_name
), node_name
);
870 QTAILQ_INSERT_TAIL(&graph_bdrv_states
, bs
, node_list
);
874 * Common part for opening disk images and files
876 * Removes all processed options from *options.
878 static int bdrv_open_common(BlockDriverState
*bs
, BlockDriverState
*file
,
879 QDict
*options
, int flags
, BlockDriver
*drv
, Error
**errp
)
882 const char *filename
;
883 const char *node_name
= NULL
;
884 Error
*local_err
= NULL
;
887 assert(bs
->file
== NULL
);
888 assert(options
!= NULL
&& bs
->options
!= options
);
891 filename
= file
->filename
;
893 filename
= qdict_get_try_str(options
, "filename");
896 if (drv
->bdrv_needs_filename
&& !filename
) {
897 error_setg(errp
, "The '%s' block driver requires a file name",
902 trace_bdrv_open_common(bs
, filename
?: "", flags
, drv
->format_name
);
904 node_name
= qdict_get_try_str(options
, "node-name");
905 bdrv_assign_node_name(bs
, node_name
, &local_err
);
907 error_propagate(errp
, local_err
);
910 qdict_del(options
, "node-name");
912 /* bdrv_open() with directly using a protocol as drv. This layer is already
913 * opened, so assign it to bs (while file becomes a closed BlockDriverState)
914 * and return immediately. */
915 if (file
!= NULL
&& drv
->bdrv_file_open
) {
920 bs
->open_flags
= flags
;
921 bs
->guest_block_size
= 512;
922 bs
->request_alignment
= 512;
923 bs
->zero_beyond_eof
= true;
924 open_flags
= bdrv_open_flags(bs
, flags
);
925 bs
->read_only
= !(open_flags
& BDRV_O_RDWR
);
927 if (use_bdrv_whitelist
&& !bdrv_is_whitelisted(drv
, bs
->read_only
)) {
929 !bs
->read_only
&& bdrv_is_whitelisted(drv
, true)
930 ? "Driver '%s' can only be used for read-only devices"
931 : "Driver '%s' is not whitelisted",
936 assert(bs
->copy_on_read
== 0); /* bdrv_new() and bdrv_close() make it so */
937 if (flags
& BDRV_O_COPY_ON_READ
) {
938 if (!bs
->read_only
) {
939 bdrv_enable_copy_on_read(bs
);
941 error_setg(errp
, "Can't use copy-on-read on read-only device");
946 if (filename
!= NULL
) {
947 pstrcpy(bs
->filename
, sizeof(bs
->filename
), filename
);
949 bs
->filename
[0] = '\0';
953 bs
->opaque
= g_malloc0(drv
->instance_size
);
955 bs
->enable_write_cache
= !!(flags
& BDRV_O_CACHE_WB
);
957 /* Open the image, either directly or using a protocol */
958 if (drv
->bdrv_file_open
) {
959 assert(file
== NULL
);
960 assert(!drv
->bdrv_needs_filename
|| filename
!= NULL
);
961 ret
= drv
->bdrv_file_open(bs
, options
, open_flags
, &local_err
);
964 error_setg(errp
, "Can't use '%s' as a block driver for the "
965 "protocol level", drv
->format_name
);
970 ret
= drv
->bdrv_open(bs
, options
, open_flags
, &local_err
);
975 error_propagate(errp
, local_err
);
976 } else if (bs
->filename
[0]) {
977 error_setg_errno(errp
, -ret
, "Could not open '%s'", bs
->filename
);
979 error_setg_errno(errp
, -ret
, "Could not open image");
984 ret
= refresh_total_sectors(bs
, bs
->total_sectors
);
986 error_setg_errno(errp
, -ret
, "Could not refresh total sector count");
990 bdrv_refresh_limits(bs
);
991 assert(bdrv_opt_mem_align(bs
) != 0);
992 assert((bs
->request_alignment
!= 0) || bs
->sg
);
1004 * Opens a file using a protocol (file, host_device, nbd, ...)
1006 * options is an indirect pointer to a QDict of options to pass to the block
1007 * drivers, or pointer to NULL for an empty set of options. If this function
1008 * takes ownership of the QDict reference, it will set *options to NULL;
1009 * otherwise, it will contain unused/unrecognized options after this function
1010 * returns. Then, the caller is responsible for freeing it. If it intends to
1011 * reuse the QDict, QINCREF() should be called beforehand.
1013 static int bdrv_file_open(BlockDriverState
*bs
, const char *filename
,
1014 QDict
**options
, int flags
, Error
**errp
)
1017 const char *drvname
;
1018 bool parse_filename
= false;
1019 Error
*local_err
= NULL
;
1022 /* Fetch the file name from the options QDict if necessary */
1024 filename
= qdict_get_try_str(*options
, "filename");
1025 } else if (filename
&& !qdict_haskey(*options
, "filename")) {
1026 qdict_put(*options
, "filename", qstring_from_str(filename
));
1027 parse_filename
= true;
1029 error_setg(errp
, "Can't specify 'file' and 'filename' options at the "
1035 /* Find the right block driver */
1036 drvname
= qdict_get_try_str(*options
, "driver");
1038 drv
= bdrv_find_format(drvname
);
1040 error_setg(errp
, "Unknown driver '%s'", drvname
);
1042 qdict_del(*options
, "driver");
1043 } else if (filename
) {
1044 drv
= bdrv_find_protocol(filename
, parse_filename
);
1046 error_setg(errp
, "Unknown protocol");
1049 error_setg(errp
, "Must specify either driver or file");
1054 /* errp has been set already */
1059 /* Parse the filename and open it */
1060 if (drv
->bdrv_parse_filename
&& parse_filename
) {
1061 drv
->bdrv_parse_filename(filename
, *options
, &local_err
);
1063 error_propagate(errp
, local_err
);
1068 if (!drv
->bdrv_needs_filename
) {
1069 qdict_del(*options
, "filename");
1071 filename
= qdict_get_str(*options
, "filename");
1075 if (!drv
->bdrv_file_open
) {
1076 ret
= bdrv_open(&bs
, filename
, NULL
, *options
, flags
, drv
, &local_err
);
1079 ret
= bdrv_open_common(bs
, NULL
, *options
, flags
, drv
, &local_err
);
1082 error_propagate(errp
, local_err
);
1094 * Opens the backing file for a BlockDriverState if not yet open
1096 * options is a QDict of options to pass to the block drivers, or NULL for an
1097 * empty set of options. The reference to the QDict is transferred to this
1098 * function (even on failure), so if the caller intends to reuse the dictionary,
1099 * it needs to use QINCREF() before calling bdrv_file_open.
1101 int bdrv_open_backing_file(BlockDriverState
*bs
, QDict
*options
, Error
**errp
)
1103 char *backing_filename
= g_malloc0(PATH_MAX
);
1105 BlockDriver
*back_drv
= NULL
;
1106 Error
*local_err
= NULL
;
1108 if (bs
->backing_hd
!= NULL
) {
1113 /* NULL means an empty set of options */
1114 if (options
== NULL
) {
1115 options
= qdict_new();
1118 bs
->open_flags
&= ~BDRV_O_NO_BACKING
;
1119 if (qdict_haskey(options
, "file.filename")) {
1120 backing_filename
[0] = '\0';
1121 } else if (bs
->backing_file
[0] == '\0' && qdict_size(options
) == 0) {
1125 bdrv_get_full_backing_filename(bs
, backing_filename
, PATH_MAX
);
1128 if (bs
->backing_format
[0] != '\0') {
1129 back_drv
= bdrv_find_format(bs
->backing_format
);
1132 assert(bs
->backing_hd
== NULL
);
1133 ret
= bdrv_open(&bs
->backing_hd
,
1134 *backing_filename
? backing_filename
: NULL
, NULL
, options
,
1135 bdrv_backing_flags(bs
->open_flags
), back_drv
, &local_err
);
1137 bs
->backing_hd
= NULL
;
1138 bs
->open_flags
|= BDRV_O_NO_BACKING
;
1139 error_setg(errp
, "Could not open backing file: %s",
1140 error_get_pretty(local_err
));
1141 error_free(local_err
);
1145 if (bs
->backing_hd
->file
) {
1146 pstrcpy(bs
->backing_file
, sizeof(bs
->backing_file
),
1147 bs
->backing_hd
->file
->filename
);
1150 /* Recalculate the BlockLimits with the backing file */
1151 bdrv_refresh_limits(bs
);
1154 g_free(backing_filename
);
1159 * Opens a disk image whose options are given as BlockdevRef in another block
1162 * If allow_none is true, no image will be opened if filename is false and no
1163 * BlockdevRef is given. *pbs will remain unchanged and 0 will be returned.
1165 * bdrev_key specifies the key for the image's BlockdevRef in the options QDict.
1166 * That QDict has to be flattened; therefore, if the BlockdevRef is a QDict
1167 * itself, all options starting with "${bdref_key}." are considered part of the
1170 * The BlockdevRef will be removed from the options QDict.
1172 * To conform with the behavior of bdrv_open(), *pbs has to be NULL.
1174 int bdrv_open_image(BlockDriverState
**pbs
, const char *filename
,
1175 QDict
*options
, const char *bdref_key
, int flags
,
1176 bool allow_none
, Error
**errp
)
1178 QDict
*image_options
;
1180 char *bdref_key_dot
;
1181 const char *reference
;
1184 assert(*pbs
== NULL
);
1186 bdref_key_dot
= g_strdup_printf("%s.", bdref_key
);
1187 qdict_extract_subqdict(options
, &image_options
, bdref_key_dot
);
1188 g_free(bdref_key_dot
);
1190 reference
= qdict_get_try_str(options
, bdref_key
);
1191 if (!filename
&& !reference
&& !qdict_size(image_options
)) {
1195 error_setg(errp
, "A block device must be specified for \"%s\"",
1202 ret
= bdrv_open(pbs
, filename
, reference
, image_options
, flags
, NULL
, errp
);
1205 qdict_del(options
, bdref_key
);
1209 void bdrv_append_temp_snapshot(BlockDriverState
*bs
, int flags
, Error
**errp
)
1211 /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
1212 char *tmp_filename
= g_malloc0(PATH_MAX
+ 1);
1214 BlockDriver
*bdrv_qcow2
;
1215 QEMUOptionParameter
*create_options
;
1216 QDict
*snapshot_options
;
1217 BlockDriverState
*bs_snapshot
;
1221 /* if snapshot, we create a temporary backing file and open it
1222 instead of opening 'filename' directly */
1224 /* Get the required size from the image */
1225 total_size
= bdrv_getlength(bs
);
1226 if (total_size
< 0) {
1227 error_setg_errno(errp
, -total_size
, "Could not get image size");
1230 total_size
&= BDRV_SECTOR_MASK
;
1232 /* Create the temporary image */
1233 ret
= get_tmp_filename(tmp_filename
, PATH_MAX
+ 1);
1235 error_setg_errno(errp
, -ret
, "Could not get temporary filename");
1239 bdrv_qcow2
= bdrv_find_format("qcow2");
1240 create_options
= parse_option_parameters("", bdrv_qcow2
->create_options
,
1243 set_option_parameter_int(create_options
, BLOCK_OPT_SIZE
, total_size
);
1245 ret
= bdrv_create(bdrv_qcow2
, tmp_filename
, create_options
, &local_err
);
1246 free_option_parameters(create_options
);
1248 error_setg_errno(errp
, -ret
, "Could not create temporary overlay "
1249 "'%s': %s", tmp_filename
,
1250 error_get_pretty(local_err
));
1251 error_free(local_err
);
1255 /* Prepare a new options QDict for the temporary file */
1256 snapshot_options
= qdict_new();
1257 qdict_put(snapshot_options
, "file.driver",
1258 qstring_from_str("file"));
1259 qdict_put(snapshot_options
, "file.filename",
1260 qstring_from_str(tmp_filename
));
1262 bs_snapshot
= bdrv_new("", &error_abort
);
1264 ret
= bdrv_open(&bs_snapshot
, NULL
, NULL
, snapshot_options
,
1265 flags
, bdrv_qcow2
, &local_err
);
1267 error_propagate(errp
, local_err
);
1271 bdrv_append(bs_snapshot
, bs
);
1274 g_free(tmp_filename
);
1277 static QDict
*parse_json_filename(const char *filename
, Error
**errp
)
1279 QObject
*options_obj
;
1283 ret
= strstart(filename
, "json:", &filename
);
1286 options_obj
= qobject_from_json(filename
);
1288 error_setg(errp
, "Could not parse the JSON options");
1292 if (qobject_type(options_obj
) != QTYPE_QDICT
) {
1293 qobject_decref(options_obj
);
1294 error_setg(errp
, "Invalid JSON object given");
1298 options
= qobject_to_qdict(options_obj
);
1299 qdict_flatten(options
);
1305 * Opens a disk image (raw, qcow2, vmdk, ...)
1307 * options is a QDict of options to pass to the block drivers, or NULL for an
1308 * empty set of options. The reference to the QDict belongs to the block layer
1309 * after the call (even on failure), so if the caller intends to reuse the
1310 * dictionary, it needs to use QINCREF() before calling bdrv_open.
1312 * If *pbs is NULL, a new BDS will be created with a pointer to it stored there.
1313 * If it is not NULL, the referenced BDS will be reused.
1315 * The reference parameter may be used to specify an existing block device which
1316 * should be opened. If specified, neither options nor a filename may be given,
1317 * nor can an existing BDS be reused (that is, *pbs has to be NULL).
1319 int bdrv_open(BlockDriverState
**pbs
, const char *filename
,
1320 const char *reference
, QDict
*options
, int flags
,
1321 BlockDriver
*drv
, Error
**errp
)
1324 BlockDriverState
*file
= NULL
, *bs
;
1325 const char *drvname
;
1326 Error
*local_err
= NULL
;
1327 int snapshot_flags
= 0;
1332 bool options_non_empty
= options
? qdict_size(options
) : false;
1336 error_setg(errp
, "Cannot reuse an existing BDS when referencing "
1337 "another block device");
1341 if (filename
|| options_non_empty
) {
1342 error_setg(errp
, "Cannot reference an existing block device with "
1343 "additional options or a new filename");
1347 bs
= bdrv_lookup_bs(reference
, reference
, errp
);
1359 bs
= bdrv_new("", &error_abort
);
1362 /* NULL means an empty set of options */
1363 if (options
== NULL
) {
1364 options
= qdict_new();
1367 if (filename
&& g_str_has_prefix(filename
, "json:")) {
1368 QDict
*json_options
= parse_json_filename(filename
, &local_err
);
1374 /* Options given in the filename have lower priority than options
1375 * specified directly */
1376 qdict_join(options
, json_options
, false);
1377 QDECREF(json_options
);
1381 bs
->options
= options
;
1382 options
= qdict_clone_shallow(options
);
1384 if (flags
& BDRV_O_PROTOCOL
) {
1386 ret
= bdrv_file_open(bs
, filename
, &options
, flags
& ~BDRV_O_PROTOCOL
,
1391 } else if (bs
->drv
) {
1392 goto close_and_fail
;
1398 /* Open image file without format layer */
1399 if (flags
& BDRV_O_RDWR
) {
1400 flags
|= BDRV_O_ALLOW_RDWR
;
1402 if (flags
& BDRV_O_SNAPSHOT
) {
1403 snapshot_flags
= bdrv_temp_snapshot_flags(flags
);
1404 flags
= bdrv_backing_flags(flags
);
1407 assert(file
== NULL
);
1408 ret
= bdrv_open_image(&file
, filename
, options
, "file",
1409 bdrv_inherited_flags(flags
),
1415 /* Find the right image format driver */
1416 drvname
= qdict_get_try_str(options
, "driver");
1418 drv
= bdrv_find_format(drvname
);
1419 qdict_del(options
, "driver");
1421 error_setg(errp
, "Invalid driver: '%s'", drvname
);
1429 ret
= find_image_format(file
, filename
, &drv
, &local_err
);
1431 error_setg(errp
, "Must specify either driver or file");
1441 /* Open the image */
1442 ret
= bdrv_open_common(bs
, file
, options
, flags
, drv
, &local_err
);
1447 if (file
&& (bs
->file
!= file
)) {
1452 /* If there is a backing file, use it */
1453 if ((flags
& BDRV_O_NO_BACKING
) == 0) {
1454 QDict
*backing_options
;
1456 qdict_extract_subqdict(options
, &backing_options
, "backing.");
1457 ret
= bdrv_open_backing_file(bs
, backing_options
, &local_err
);
1459 goto close_and_fail
;
1463 /* For snapshot=on, create a temporary qcow2 overlay. bs points to the
1464 * temporary snapshot afterwards. */
1465 if (snapshot_flags
) {
1466 bdrv_append_temp_snapshot(bs
, snapshot_flags
, &local_err
);
1468 error_propagate(errp
, local_err
);
1469 goto close_and_fail
;
1475 /* Check if any unknown options were used */
1476 if (options
&& (qdict_size(options
) != 0)) {
1477 const QDictEntry
*entry
= qdict_first(options
);
1478 if (flags
& BDRV_O_PROTOCOL
) {
1479 error_setg(errp
, "Block protocol '%s' doesn't support the option "
1480 "'%s'", drv
->format_name
, entry
->key
);
1482 error_setg(errp
, "Block format '%s' used by device '%s' doesn't "
1483 "support the option '%s'", drv
->format_name
,
1484 bs
->device_name
, entry
->key
);
1488 goto close_and_fail
;
1491 if (!bdrv_key_required(bs
)) {
1492 bdrv_dev_change_media_cb(bs
, true);
1493 } else if (!runstate_check(RUN_STATE_PRELAUNCH
)
1494 && !runstate_check(RUN_STATE_INMIGRATE
)
1495 && !runstate_check(RUN_STATE_PAUSED
)) { /* HACK */
1497 "Guest must be stopped for opening of encrypted image");
1499 goto close_and_fail
;
1510 QDECREF(bs
->options
);
1514 /* If *pbs is NULL, a new BDS has been created in this function and
1515 needs to be freed now. Otherwise, it does not need to be closed,
1516 since it has not really been opened yet. */
1520 error_propagate(errp
, local_err
);
1525 /* See fail path, but now the BDS has to be always closed */
1533 error_propagate(errp
, local_err
);
1538 typedef struct BlockReopenQueueEntry
{
1540 BDRVReopenState state
;
1541 QSIMPLEQ_ENTRY(BlockReopenQueueEntry
) entry
;
1542 } BlockReopenQueueEntry
;
1545 * Adds a BlockDriverState to a simple queue for an atomic, transactional
1546 * reopen of multiple devices.
1548 * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT
1549 * already performed, or alternatively may be NULL a new BlockReopenQueue will
1550 * be created and initialized. This newly created BlockReopenQueue should be
1551 * passed back in for subsequent calls that are intended to be of the same
1554 * bs is the BlockDriverState to add to the reopen queue.
1556 * flags contains the open flags for the associated bs
1558 * returns a pointer to bs_queue, which is either the newly allocated
1559 * bs_queue, or the existing bs_queue being used.
1562 BlockReopenQueue
*bdrv_reopen_queue(BlockReopenQueue
*bs_queue
,
1563 BlockDriverState
*bs
, int flags
)
1567 BlockReopenQueueEntry
*bs_entry
;
1568 if (bs_queue
== NULL
) {
1569 bs_queue
= g_new0(BlockReopenQueue
, 1);
1570 QSIMPLEQ_INIT(bs_queue
);
1573 /* bdrv_open() masks this flag out */
1574 flags
&= ~BDRV_O_PROTOCOL
;
1577 bdrv_reopen_queue(bs_queue
, bs
->file
, bdrv_inherited_flags(flags
));
1580 bs_entry
= g_new0(BlockReopenQueueEntry
, 1);
1581 QSIMPLEQ_INSERT_TAIL(bs_queue
, bs_entry
, entry
);
1583 bs_entry
->state
.bs
= bs
;
1584 bs_entry
->state
.flags
= flags
;
1590 * Reopen multiple BlockDriverStates atomically & transactionally.
1592 * The queue passed in (bs_queue) must have been built up previous
1593 * via bdrv_reopen_queue().
1595 * Reopens all BDS specified in the queue, with the appropriate
1596 * flags. All devices are prepared for reopen, and failure of any
1597 * device will cause all device changes to be abandonded, and intermediate
1600 * If all devices prepare successfully, then the changes are committed
1604 int bdrv_reopen_multiple(BlockReopenQueue
*bs_queue
, Error
**errp
)
1607 BlockReopenQueueEntry
*bs_entry
, *next
;
1608 Error
*local_err
= NULL
;
1610 assert(bs_queue
!= NULL
);
1614 QSIMPLEQ_FOREACH(bs_entry
, bs_queue
, entry
) {
1615 if (bdrv_reopen_prepare(&bs_entry
->state
, bs_queue
, &local_err
)) {
1616 error_propagate(errp
, local_err
);
1619 bs_entry
->prepared
= true;
1622 /* If we reach this point, we have success and just need to apply the
1625 QSIMPLEQ_FOREACH(bs_entry
, bs_queue
, entry
) {
1626 bdrv_reopen_commit(&bs_entry
->state
);
1632 QSIMPLEQ_FOREACH_SAFE(bs_entry
, bs_queue
, entry
, next
) {
1633 if (ret
&& bs_entry
->prepared
) {
1634 bdrv_reopen_abort(&bs_entry
->state
);
1643 /* Reopen a single BlockDriverState with the specified flags. */
1644 int bdrv_reopen(BlockDriverState
*bs
, int bdrv_flags
, Error
**errp
)
1647 Error
*local_err
= NULL
;
1648 BlockReopenQueue
*queue
= bdrv_reopen_queue(NULL
, bs
, bdrv_flags
);
1650 ret
= bdrv_reopen_multiple(queue
, &local_err
);
1651 if (local_err
!= NULL
) {
1652 error_propagate(errp
, local_err
);
1659 * Prepares a BlockDriverState for reopen. All changes are staged in the
1660 * 'opaque' field of the BDRVReopenState, which is used and allocated by
1661 * the block driver layer .bdrv_reopen_prepare()
1663 * bs is the BlockDriverState to reopen
1664 * flags are the new open flags
1665 * queue is the reopen queue
1667 * Returns 0 on success, non-zero on error. On error errp will be set
1670 * On failure, bdrv_reopen_abort() will be called to clean up any data.
1671 * It is the responsibility of the caller to then call the abort() or
1672 * commit() for any other BDS that have been left in a prepare() state
1675 int bdrv_reopen_prepare(BDRVReopenState
*reopen_state
, BlockReopenQueue
*queue
,
1679 Error
*local_err
= NULL
;
1682 assert(reopen_state
!= NULL
);
1683 assert(reopen_state
->bs
->drv
!= NULL
);
1684 drv
= reopen_state
->bs
->drv
;
1686 /* if we are to stay read-only, do not allow permission change
1688 if (!(reopen_state
->bs
->open_flags
& BDRV_O_ALLOW_RDWR
) &&
1689 reopen_state
->flags
& BDRV_O_RDWR
) {
1690 error_set(errp
, QERR_DEVICE_IS_READ_ONLY
,
1691 reopen_state
->bs
->device_name
);
1696 ret
= bdrv_flush(reopen_state
->bs
);
1698 error_set(errp
, ERROR_CLASS_GENERIC_ERROR
, "Error (%s) flushing drive",
1703 if (drv
->bdrv_reopen_prepare
) {
1704 ret
= drv
->bdrv_reopen_prepare(reopen_state
, queue
, &local_err
);
1706 if (local_err
!= NULL
) {
1707 error_propagate(errp
, local_err
);
1709 error_setg(errp
, "failed while preparing to reopen image '%s'",
1710 reopen_state
->bs
->filename
);
1715 /* It is currently mandatory to have a bdrv_reopen_prepare()
1716 * handler for each supported drv. */
1717 error_set(errp
, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED
,
1718 drv
->format_name
, reopen_state
->bs
->device_name
,
1719 "reopening of file");
1731 * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and
1732 * makes them final by swapping the staging BlockDriverState contents into
1733 * the active BlockDriverState contents.
1735 void bdrv_reopen_commit(BDRVReopenState
*reopen_state
)
1739 assert(reopen_state
!= NULL
);
1740 drv
= reopen_state
->bs
->drv
;
1741 assert(drv
!= NULL
);
1743 /* If there are any driver level actions to take */
1744 if (drv
->bdrv_reopen_commit
) {
1745 drv
->bdrv_reopen_commit(reopen_state
);
1748 /* set BDS specific flags now */
1749 reopen_state
->bs
->open_flags
= reopen_state
->flags
;
1750 reopen_state
->bs
->enable_write_cache
= !!(reopen_state
->flags
&
1752 reopen_state
->bs
->read_only
= !(reopen_state
->flags
& BDRV_O_RDWR
);
1754 bdrv_refresh_limits(reopen_state
->bs
);
1758 * Abort the reopen, and delete and free the staged changes in
1761 void bdrv_reopen_abort(BDRVReopenState
*reopen_state
)
1765 assert(reopen_state
!= NULL
);
1766 drv
= reopen_state
->bs
->drv
;
1767 assert(drv
!= NULL
);
1769 if (drv
->bdrv_reopen_abort
) {
1770 drv
->bdrv_reopen_abort(reopen_state
);
1775 void bdrv_close(BlockDriverState
*bs
)
1778 block_job_cancel_sync(bs
->job
);
1780 bdrv_drain_all(); /* complete I/O */
1782 bdrv_drain_all(); /* in case flush left pending I/O */
1783 notifier_list_notify(&bs
->close_notifiers
, bs
);
1786 if (bs
->backing_hd
) {
1787 bdrv_unref(bs
->backing_hd
);
1788 bs
->backing_hd
= NULL
;
1790 bs
->drv
->bdrv_close(bs
);
1794 bs
->copy_on_read
= 0;
1795 bs
->backing_file
[0] = '\0';
1796 bs
->backing_format
[0] = '\0';
1797 bs
->total_sectors
= 0;
1802 bs
->zero_beyond_eof
= false;
1803 QDECREF(bs
->options
);
1806 if (bs
->file
!= NULL
) {
1807 bdrv_unref(bs
->file
);
1812 bdrv_dev_change_media_cb(bs
, false);
1814 /*throttling disk I/O limits*/
1815 if (bs
->io_limits_enabled
) {
1816 bdrv_io_limits_disable(bs
);
1820 void bdrv_close_all(void)
1822 BlockDriverState
*bs
;
1824 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
1829 /* Check if any requests are in-flight (including throttled requests) */
1830 static bool bdrv_requests_pending(BlockDriverState
*bs
)
1832 if (!QLIST_EMPTY(&bs
->tracked_requests
)) {
1835 if (!qemu_co_queue_empty(&bs
->throttled_reqs
[0])) {
1838 if (!qemu_co_queue_empty(&bs
->throttled_reqs
[1])) {
1841 if (bs
->file
&& bdrv_requests_pending(bs
->file
)) {
1844 if (bs
->backing_hd
&& bdrv_requests_pending(bs
->backing_hd
)) {
1850 static bool bdrv_requests_pending_all(void)
1852 BlockDriverState
*bs
;
1853 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
1854 if (bdrv_requests_pending(bs
)) {
1862 * Wait for pending requests to complete across all BlockDriverStates
1864 * This function does not flush data to disk, use bdrv_flush_all() for that
1865 * after calling this function.
1867 * Note that completion of an asynchronous I/O operation can trigger any
1868 * number of other I/O operations on other devices---for example a coroutine
1869 * can be arbitrarily complex and a constant flow of I/O can come until the
1870 * coroutine is complete. Because of this, it is not possible to have a
1871 * function to drain a single device's I/O queue.
1873 void bdrv_drain_all(void)
1875 /* Always run first iteration so any pending completion BHs run */
1877 BlockDriverState
*bs
;
1880 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
1881 bdrv_start_throttled_reqs(bs
);
1884 busy
= bdrv_requests_pending_all();
1885 busy
|= aio_poll(qemu_get_aio_context(), busy
);
1889 /* make a BlockDriverState anonymous by removing from bdrv_state and
1890 * graph_bdrv_state list.
1891 Also, NULL terminate the device_name to prevent double remove */
1892 void bdrv_make_anon(BlockDriverState
*bs
)
1894 if (bs
->device_name
[0] != '\0') {
1895 QTAILQ_REMOVE(&bdrv_states
, bs
, device_list
);
1897 bs
->device_name
[0] = '\0';
1898 if (bs
->node_name
[0] != '\0') {
1899 QTAILQ_REMOVE(&graph_bdrv_states
, bs
, node_list
);
1901 bs
->node_name
[0] = '\0';
1904 static void bdrv_rebind(BlockDriverState
*bs
)
1906 if (bs
->drv
&& bs
->drv
->bdrv_rebind
) {
1907 bs
->drv
->bdrv_rebind(bs
);
1911 static void bdrv_move_feature_fields(BlockDriverState
*bs_dest
,
1912 BlockDriverState
*bs_src
)
1914 /* move some fields that need to stay attached to the device */
1917 bs_dest
->dev_ops
= bs_src
->dev_ops
;
1918 bs_dest
->dev_opaque
= bs_src
->dev_opaque
;
1919 bs_dest
->dev
= bs_src
->dev
;
1920 bs_dest
->guest_block_size
= bs_src
->guest_block_size
;
1921 bs_dest
->copy_on_read
= bs_src
->copy_on_read
;
1923 bs_dest
->enable_write_cache
= bs_src
->enable_write_cache
;
1925 /* i/o throttled req */
1926 memcpy(&bs_dest
->throttle_state
,
1927 &bs_src
->throttle_state
,
1928 sizeof(ThrottleState
));
1929 bs_dest
->throttled_reqs
[0] = bs_src
->throttled_reqs
[0];
1930 bs_dest
->throttled_reqs
[1] = bs_src
->throttled_reqs
[1];
1931 bs_dest
->io_limits_enabled
= bs_src
->io_limits_enabled
;
1934 bs_dest
->on_read_error
= bs_src
->on_read_error
;
1935 bs_dest
->on_write_error
= bs_src
->on_write_error
;
1938 bs_dest
->iostatus_enabled
= bs_src
->iostatus_enabled
;
1939 bs_dest
->iostatus
= bs_src
->iostatus
;
1942 bs_dest
->dirty_bitmaps
= bs_src
->dirty_bitmaps
;
1944 /* reference count */
1945 bs_dest
->refcnt
= bs_src
->refcnt
;
1948 bs_dest
->in_use
= bs_src
->in_use
;
1949 bs_dest
->job
= bs_src
->job
;
1951 /* keep the same entry in bdrv_states */
1952 pstrcpy(bs_dest
->device_name
, sizeof(bs_dest
->device_name
),
1953 bs_src
->device_name
);
1954 bs_dest
->device_list
= bs_src
->device_list
;
1958 * Swap bs contents for two image chains while they are live,
1959 * while keeping required fields on the BlockDriverState that is
1960 * actually attached to a device.
1962 * This will modify the BlockDriverState fields, and swap contents
1963 * between bs_new and bs_old. Both bs_new and bs_old are modified.
1965 * bs_new is required to be anonymous.
1967 * This function does not create any image files.
1969 void bdrv_swap(BlockDriverState
*bs_new
, BlockDriverState
*bs_old
)
1971 BlockDriverState tmp
;
1973 /* The code needs to swap the node_name but simply swapping node_list won't
1974 * work so first remove the nodes from the graph list, do the swap then
1975 * insert them back if needed.
1977 if (bs_new
->node_name
[0] != '\0') {
1978 QTAILQ_REMOVE(&graph_bdrv_states
, bs_new
, node_list
);
1980 if (bs_old
->node_name
[0] != '\0') {
1981 QTAILQ_REMOVE(&graph_bdrv_states
, bs_old
, node_list
);
1984 /* bs_new must be anonymous and shouldn't have anything fancy enabled */
1985 assert(bs_new
->device_name
[0] == '\0');
1986 assert(QLIST_EMPTY(&bs_new
->dirty_bitmaps
));
1987 assert(bs_new
->job
== NULL
);
1988 assert(bs_new
->dev
== NULL
);
1989 assert(bs_new
->in_use
== 0);
1990 assert(bs_new
->io_limits_enabled
== false);
1991 assert(!throttle_have_timer(&bs_new
->throttle_state
));
1997 /* there are some fields that should not be swapped, move them back */
1998 bdrv_move_feature_fields(&tmp
, bs_old
);
1999 bdrv_move_feature_fields(bs_old
, bs_new
);
2000 bdrv_move_feature_fields(bs_new
, &tmp
);
2002 /* bs_new shouldn't be in bdrv_states even after the swap! */
2003 assert(bs_new
->device_name
[0] == '\0');
2005 /* Check a few fields that should remain attached to the device */
2006 assert(bs_new
->dev
== NULL
);
2007 assert(bs_new
->job
== NULL
);
2008 assert(bs_new
->in_use
== 0);
2009 assert(bs_new
->io_limits_enabled
== false);
2010 assert(!throttle_have_timer(&bs_new
->throttle_state
));
2012 /* insert the nodes back into the graph node list if needed */
2013 if (bs_new
->node_name
[0] != '\0') {
2014 QTAILQ_INSERT_TAIL(&graph_bdrv_states
, bs_new
, node_list
);
2016 if (bs_old
->node_name
[0] != '\0') {
2017 QTAILQ_INSERT_TAIL(&graph_bdrv_states
, bs_old
, node_list
);
2020 bdrv_rebind(bs_new
);
2021 bdrv_rebind(bs_old
);
2025 * Add new bs contents at the top of an image chain while the chain is
2026 * live, while keeping required fields on the top layer.
2028 * This will modify the BlockDriverState fields, and swap contents
2029 * between bs_new and bs_top. Both bs_new and bs_top are modified.
2031 * bs_new is required to be anonymous.
2033 * This function does not create any image files.
2035 void bdrv_append(BlockDriverState
*bs_new
, BlockDriverState
*bs_top
)
2037 bdrv_swap(bs_new
, bs_top
);
2039 /* The contents of 'tmp' will become bs_top, as we are
2040 * swapping bs_new and bs_top contents. */
2041 bs_top
->backing_hd
= bs_new
;
2042 bs_top
->open_flags
&= ~BDRV_O_NO_BACKING
;
2043 pstrcpy(bs_top
->backing_file
, sizeof(bs_top
->backing_file
),
2045 pstrcpy(bs_top
->backing_format
, sizeof(bs_top
->backing_format
),
2046 bs_new
->drv
? bs_new
->drv
->format_name
: "");
2049 static void bdrv_delete(BlockDriverState
*bs
)
2053 assert(!bs
->in_use
);
2054 assert(!bs
->refcnt
);
2055 assert(QLIST_EMPTY(&bs
->dirty_bitmaps
));
2059 /* remove from list, if necessary */
2065 int bdrv_attach_dev(BlockDriverState
*bs
, void *dev
)
2066 /* TODO change to DeviceState *dev when all users are qdevified */
2072 bdrv_iostatus_reset(bs
);
2076 /* TODO qdevified devices don't use this, remove when devices are qdevified */
2077 void bdrv_attach_dev_nofail(BlockDriverState
*bs
, void *dev
)
2079 if (bdrv_attach_dev(bs
, dev
) < 0) {
2084 void bdrv_detach_dev(BlockDriverState
*bs
, void *dev
)
2085 /* TODO change to DeviceState *dev when all users are qdevified */
2087 assert(bs
->dev
== dev
);
2090 bs
->dev_opaque
= NULL
;
2091 bs
->guest_block_size
= 512;
2094 /* TODO change to return DeviceState * when all users are qdevified */
2095 void *bdrv_get_attached_dev(BlockDriverState
*bs
)
2100 void bdrv_set_dev_ops(BlockDriverState
*bs
, const BlockDevOps
*ops
,
2104 bs
->dev_opaque
= opaque
;
2107 void bdrv_emit_qmp_error_event(const BlockDriverState
*bdrv
,
2108 enum MonitorEvent ev
,
2109 BlockErrorAction action
, bool is_read
)
2112 const char *action_str
;
2115 case BDRV_ACTION_REPORT
:
2116 action_str
= "report";
2118 case BDRV_ACTION_IGNORE
:
2119 action_str
= "ignore";
2121 case BDRV_ACTION_STOP
:
2122 action_str
= "stop";
2128 data
= qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
2131 is_read
? "read" : "write");
2132 monitor_protocol_event(ev
, data
);
2134 qobject_decref(data
);
2137 static void bdrv_emit_qmp_eject_event(BlockDriverState
*bs
, bool ejected
)
2141 data
= qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }",
2142 bdrv_get_device_name(bs
), ejected
);
2143 monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED
, data
);
2145 qobject_decref(data
);
2148 static void bdrv_dev_change_media_cb(BlockDriverState
*bs
, bool load
)
2150 if (bs
->dev_ops
&& bs
->dev_ops
->change_media_cb
) {
2151 bool tray_was_closed
= !bdrv_dev_is_tray_open(bs
);
2152 bs
->dev_ops
->change_media_cb(bs
->dev_opaque
, load
);
2153 if (tray_was_closed
) {
2155 bdrv_emit_qmp_eject_event(bs
, true);
2159 bdrv_emit_qmp_eject_event(bs
, false);
2164 bool bdrv_dev_has_removable_media(BlockDriverState
*bs
)
2166 return !bs
->dev
|| (bs
->dev_ops
&& bs
->dev_ops
->change_media_cb
);
2169 void bdrv_dev_eject_request(BlockDriverState
*bs
, bool force
)
2171 if (bs
->dev_ops
&& bs
->dev_ops
->eject_request_cb
) {
2172 bs
->dev_ops
->eject_request_cb(bs
->dev_opaque
, force
);
2176 bool bdrv_dev_is_tray_open(BlockDriverState
*bs
)
2178 if (bs
->dev_ops
&& bs
->dev_ops
->is_tray_open
) {
2179 return bs
->dev_ops
->is_tray_open(bs
->dev_opaque
);
2184 static void bdrv_dev_resize_cb(BlockDriverState
*bs
)
2186 if (bs
->dev_ops
&& bs
->dev_ops
->resize_cb
) {
2187 bs
->dev_ops
->resize_cb(bs
->dev_opaque
);
2191 bool bdrv_dev_is_medium_locked(BlockDriverState
*bs
)
2193 if (bs
->dev_ops
&& bs
->dev_ops
->is_medium_locked
) {
2194 return bs
->dev_ops
->is_medium_locked(bs
->dev_opaque
);
2200 * Run consistency checks on an image
2202 * Returns 0 if the check could be completed (it doesn't mean that the image is
2203 * free of errors) or -errno when an internal error occurred. The results of the
2204 * check are stored in res.
2206 int bdrv_check(BlockDriverState
*bs
, BdrvCheckResult
*res
, BdrvCheckMode fix
)
2208 if (bs
->drv
->bdrv_check
== NULL
) {
2212 memset(res
, 0, sizeof(*res
));
2213 return bs
->drv
->bdrv_check(bs
, res
, fix
);
2216 #define COMMIT_BUF_SECTORS 2048
2218 /* commit COW file into the raw image */
2219 int bdrv_commit(BlockDriverState
*bs
)
2221 BlockDriver
*drv
= bs
->drv
;
2222 int64_t sector
, total_sectors
, length
, backing_length
;
2223 int n
, ro
, open_flags
;
2225 uint8_t *buf
= NULL
;
2226 char filename
[PATH_MAX
];
2231 if (!bs
->backing_hd
) {
2235 if (bdrv_in_use(bs
) || bdrv_in_use(bs
->backing_hd
)) {
2239 ro
= bs
->backing_hd
->read_only
;
2240 /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */
2241 pstrcpy(filename
, sizeof(filename
), bs
->backing_hd
->filename
);
2242 open_flags
= bs
->backing_hd
->open_flags
;
2245 if (bdrv_reopen(bs
->backing_hd
, open_flags
| BDRV_O_RDWR
, NULL
)) {
2250 length
= bdrv_getlength(bs
);
2256 backing_length
= bdrv_getlength(bs
->backing_hd
);
2257 if (backing_length
< 0) {
2258 ret
= backing_length
;
2262 /* If our top snapshot is larger than the backing file image,
2263 * grow the backing file image if possible. If not possible,
2264 * we must return an error */
2265 if (length
> backing_length
) {
2266 ret
= bdrv_truncate(bs
->backing_hd
, length
);
2272 total_sectors
= length
>> BDRV_SECTOR_BITS
;
2273 buf
= g_malloc(COMMIT_BUF_SECTORS
* BDRV_SECTOR_SIZE
);
2275 for (sector
= 0; sector
< total_sectors
; sector
+= n
) {
2276 ret
= bdrv_is_allocated(bs
, sector
, COMMIT_BUF_SECTORS
, &n
);
2281 ret
= bdrv_read(bs
, sector
, buf
, n
);
2286 ret
= bdrv_write(bs
->backing_hd
, sector
, buf
, n
);
2293 if (drv
->bdrv_make_empty
) {
2294 ret
= drv
->bdrv_make_empty(bs
);
2302 * Make sure all data we wrote to the backing device is actually
2305 if (bs
->backing_hd
) {
2306 bdrv_flush(bs
->backing_hd
);
2314 /* ignoring error return here */
2315 bdrv_reopen(bs
->backing_hd
, open_flags
& ~BDRV_O_RDWR
, NULL
);
2321 int bdrv_commit_all(void)
2323 BlockDriverState
*bs
;
2325 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
2326 if (bs
->drv
&& bs
->backing_hd
) {
2327 int ret
= bdrv_commit(bs
);
2337 * Remove an active request from the tracked requests list
2339 * This function should be called when a tracked request is completing.
2341 static void tracked_request_end(BdrvTrackedRequest
*req
)
2343 if (req
->serialising
) {
2344 req
->bs
->serialising_in_flight
--;
2347 QLIST_REMOVE(req
, list
);
2348 qemu_co_queue_restart_all(&req
->wait_queue
);
2352 * Add an active request to the tracked requests list
2354 static void tracked_request_begin(BdrvTrackedRequest
*req
,
2355 BlockDriverState
*bs
,
2357 unsigned int bytes
, bool is_write
)
2359 *req
= (BdrvTrackedRequest
){
2363 .is_write
= is_write
,
2364 .co
= qemu_coroutine_self(),
2365 .serialising
= false,
2366 .overlap_offset
= offset
,
2367 .overlap_bytes
= bytes
,
2370 qemu_co_queue_init(&req
->wait_queue
);
2372 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
2375 static void mark_request_serialising(BdrvTrackedRequest
*req
, uint64_t align
)
2377 int64_t overlap_offset
= req
->offset
& ~(align
- 1);
2378 unsigned int overlap_bytes
= ROUND_UP(req
->offset
+ req
->bytes
, align
)
2381 if (!req
->serialising
) {
2382 req
->bs
->serialising_in_flight
++;
2383 req
->serialising
= true;
2386 req
->overlap_offset
= MIN(req
->overlap_offset
, overlap_offset
);
2387 req
->overlap_bytes
= MAX(req
->overlap_bytes
, overlap_bytes
);
2391 * Round a region to cluster boundaries
2393 void bdrv_round_to_clusters(BlockDriverState
*bs
,
2394 int64_t sector_num
, int nb_sectors
,
2395 int64_t *cluster_sector_num
,
2396 int *cluster_nb_sectors
)
2398 BlockDriverInfo bdi
;
2400 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
2401 *cluster_sector_num
= sector_num
;
2402 *cluster_nb_sectors
= nb_sectors
;
2404 int64_t c
= bdi
.cluster_size
/ BDRV_SECTOR_SIZE
;
2405 *cluster_sector_num
= QEMU_ALIGN_DOWN(sector_num
, c
);
2406 *cluster_nb_sectors
= QEMU_ALIGN_UP(sector_num
- *cluster_sector_num
+
2411 static int bdrv_get_cluster_size(BlockDriverState
*bs
)
2413 BlockDriverInfo bdi
;
2416 ret
= bdrv_get_info(bs
, &bdi
);
2417 if (ret
< 0 || bdi
.cluster_size
== 0) {
2418 return bs
->request_alignment
;
2420 return bdi
.cluster_size
;
2424 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
2425 int64_t offset
, unsigned int bytes
)
2428 if (offset
>= req
->overlap_offset
+ req
->overlap_bytes
) {
2432 if (req
->overlap_offset
>= offset
+ bytes
) {
2438 static bool coroutine_fn
wait_serialising_requests(BdrvTrackedRequest
*self
)
2440 BlockDriverState
*bs
= self
->bs
;
2441 BdrvTrackedRequest
*req
;
2443 bool waited
= false;
2445 if (!bs
->serialising_in_flight
) {
2451 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
2452 if (req
== self
|| (!req
->serialising
&& !self
->serialising
)) {
2455 if (tracked_request_overlaps(req
, self
->overlap_offset
,
2456 self
->overlap_bytes
))
2458 /* Hitting this means there was a reentrant request, for
2459 * example, a block driver issuing nested requests. This must
2460 * never happen since it means deadlock.
2462 assert(qemu_coroutine_self() != req
->co
);
2464 /* If the request is already (indirectly) waiting for us, or
2465 * will wait for us as soon as it wakes up, then just go on
2466 * (instead of producing a deadlock in the former case). */
2467 if (!req
->waiting_for
) {
2468 self
->waiting_for
= req
;
2469 qemu_co_queue_wait(&req
->wait_queue
);
2470 self
->waiting_for
= NULL
;
2485 * -EINVAL - backing format specified, but no file
2486 * -ENOSPC - can't update the backing file because no space is left in the
2488 * -ENOTSUP - format driver doesn't support changing the backing file
2490 int bdrv_change_backing_file(BlockDriverState
*bs
,
2491 const char *backing_file
, const char *backing_fmt
)
2493 BlockDriver
*drv
= bs
->drv
;
2496 /* Backing file format doesn't make sense without a backing file */
2497 if (backing_fmt
&& !backing_file
) {
2501 if (drv
->bdrv_change_backing_file
!= NULL
) {
2502 ret
= drv
->bdrv_change_backing_file(bs
, backing_file
, backing_fmt
);
2508 pstrcpy(bs
->backing_file
, sizeof(bs
->backing_file
), backing_file
?: "");
2509 pstrcpy(bs
->backing_format
, sizeof(bs
->backing_format
), backing_fmt
?: "");
2515 * Finds the image layer in the chain that has 'bs' as its backing file.
2517 * active is the current topmost image.
2519 * Returns NULL if bs is not found in active's image chain,
2520 * or if active == bs.
2522 BlockDriverState
*bdrv_find_overlay(BlockDriverState
*active
,
2523 BlockDriverState
*bs
)
2525 BlockDriverState
*overlay
= NULL
;
2526 BlockDriverState
*intermediate
;
2528 assert(active
!= NULL
);
2531 /* if bs is the same as active, then by definition it has no overlay
2537 intermediate
= active
;
2538 while (intermediate
->backing_hd
) {
2539 if (intermediate
->backing_hd
== bs
) {
2540 overlay
= intermediate
;
2543 intermediate
= intermediate
->backing_hd
;
2549 typedef struct BlkIntermediateStates
{
2550 BlockDriverState
*bs
;
2551 QSIMPLEQ_ENTRY(BlkIntermediateStates
) entry
;
2552 } BlkIntermediateStates
;
2556 * Drops images above 'base' up to and including 'top', and sets the image
2557 * above 'top' to have base as its backing file.
2559 * Requires that the overlay to 'top' is opened r/w, so that the backing file
2560 * information in 'bs' can be properly updated.
2562 * E.g., this will convert the following chain:
2563 * bottom <- base <- intermediate <- top <- active
2567 * bottom <- base <- active
2569 * It is allowed for bottom==base, in which case it converts:
2571 * base <- intermediate <- top <- active
2578 * if active == top, that is considered an error
2581 int bdrv_drop_intermediate(BlockDriverState
*active
, BlockDriverState
*top
,
2582 BlockDriverState
*base
)
2584 BlockDriverState
*intermediate
;
2585 BlockDriverState
*base_bs
= NULL
;
2586 BlockDriverState
*new_top_bs
= NULL
;
2587 BlkIntermediateStates
*intermediate_state
, *next
;
2590 QSIMPLEQ_HEAD(states_to_delete
, BlkIntermediateStates
) states_to_delete
;
2591 QSIMPLEQ_INIT(&states_to_delete
);
2593 if (!top
->drv
|| !base
->drv
) {
2597 new_top_bs
= bdrv_find_overlay(active
, top
);
2599 if (new_top_bs
== NULL
) {
2600 /* we could not find the image above 'top', this is an error */
2604 /* special case of new_top_bs->backing_hd already pointing to base - nothing
2605 * to do, no intermediate images */
2606 if (new_top_bs
->backing_hd
== base
) {
2613 /* now we will go down through the list, and add each BDS we find
2614 * into our deletion queue, until we hit the 'base'
2616 while (intermediate
) {
2617 intermediate_state
= g_malloc0(sizeof(BlkIntermediateStates
));
2618 intermediate_state
->bs
= intermediate
;
2619 QSIMPLEQ_INSERT_TAIL(&states_to_delete
, intermediate_state
, entry
);
2621 if (intermediate
->backing_hd
== base
) {
2622 base_bs
= intermediate
->backing_hd
;
2625 intermediate
= intermediate
->backing_hd
;
2627 if (base_bs
== NULL
) {
2628 /* something went wrong, we did not end at the base. safely
2629 * unravel everything, and exit with error */
2633 /* success - we can delete the intermediate states, and link top->base */
2634 ret
= bdrv_change_backing_file(new_top_bs
, base_bs
->filename
,
2635 base_bs
->drv
? base_bs
->drv
->format_name
: "");
2639 new_top_bs
->backing_hd
= base_bs
;
2641 bdrv_refresh_limits(new_top_bs
);
2643 QSIMPLEQ_FOREACH_SAFE(intermediate_state
, &states_to_delete
, entry
, next
) {
2644 /* so that bdrv_close() does not recursively close the chain */
2645 intermediate_state
->bs
->backing_hd
= NULL
;
2646 bdrv_unref(intermediate_state
->bs
);
2651 QSIMPLEQ_FOREACH_SAFE(intermediate_state
, &states_to_delete
, entry
, next
) {
2652 g_free(intermediate_state
);
2658 static int bdrv_check_byte_request(BlockDriverState
*bs
, int64_t offset
,
2663 if (size
> INT_MAX
) {
2667 if (!bdrv_is_inserted(bs
))
2673 len
= bdrv_getlength(bs
);
2678 if ((offset
> len
) || (len
- offset
< size
))
2684 static int bdrv_check_request(BlockDriverState
*bs
, int64_t sector_num
,
2687 if (nb_sectors
< 0 || nb_sectors
> INT_MAX
/ BDRV_SECTOR_SIZE
) {
2691 return bdrv_check_byte_request(bs
, sector_num
* BDRV_SECTOR_SIZE
,
2692 nb_sectors
* BDRV_SECTOR_SIZE
);
2695 typedef struct RwCo
{
2696 BlockDriverState
*bs
;
2701 BdrvRequestFlags flags
;
2704 static void coroutine_fn
bdrv_rw_co_entry(void *opaque
)
2706 RwCo
*rwco
= opaque
;
2708 if (!rwco
->is_write
) {
2709 rwco
->ret
= bdrv_co_do_preadv(rwco
->bs
, rwco
->offset
,
2710 rwco
->qiov
->size
, rwco
->qiov
,
2713 rwco
->ret
= bdrv_co_do_pwritev(rwco
->bs
, rwco
->offset
,
2714 rwco
->qiov
->size
, rwco
->qiov
,
2720 * Process a vectored synchronous request using coroutines
2722 static int bdrv_prwv_co(BlockDriverState
*bs
, int64_t offset
,
2723 QEMUIOVector
*qiov
, bool is_write
,
2724 BdrvRequestFlags flags
)
2731 .is_write
= is_write
,
2737 * In sync call context, when the vcpu is blocked, this throttling timer
2738 * will not fire; so the I/O throttling function has to be disabled here
2739 * if it has been enabled.
2741 if (bs
->io_limits_enabled
) {
2742 fprintf(stderr
, "Disabling I/O throttling on '%s' due "
2743 "to synchronous I/O.\n", bdrv_get_device_name(bs
));
2744 bdrv_io_limits_disable(bs
);
2747 if (qemu_in_coroutine()) {
2748 /* Fast-path if already in coroutine context */
2749 bdrv_rw_co_entry(&rwco
);
2751 co
= qemu_coroutine_create(bdrv_rw_co_entry
);
2752 qemu_coroutine_enter(co
, &rwco
);
2753 while (rwco
.ret
== NOT_DONE
) {
2761 * Process a synchronous request using coroutines
2763 static int bdrv_rw_co(BlockDriverState
*bs
, int64_t sector_num
, uint8_t *buf
,
2764 int nb_sectors
, bool is_write
, BdrvRequestFlags flags
)
2767 struct iovec iov
= {
2768 .iov_base
= (void *)buf
,
2769 .iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
,
2772 if (nb_sectors
< 0 || nb_sectors
> INT_MAX
/ BDRV_SECTOR_SIZE
) {
2776 qemu_iovec_init_external(&qiov
, &iov
, 1);
2777 return bdrv_prwv_co(bs
, sector_num
<< BDRV_SECTOR_BITS
,
2778 &qiov
, is_write
, flags
);
2781 /* return < 0 if error. See bdrv_write() for the return codes */
2782 int bdrv_read(BlockDriverState
*bs
, int64_t sector_num
,
2783 uint8_t *buf
, int nb_sectors
)
2785 return bdrv_rw_co(bs
, sector_num
, buf
, nb_sectors
, false, 0);
2788 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
2789 int bdrv_read_unthrottled(BlockDriverState
*bs
, int64_t sector_num
,
2790 uint8_t *buf
, int nb_sectors
)
2795 enabled
= bs
->io_limits_enabled
;
2796 bs
->io_limits_enabled
= false;
2797 ret
= bdrv_read(bs
, sector_num
, buf
, nb_sectors
);
2798 bs
->io_limits_enabled
= enabled
;
2802 /* Return < 0 if error. Important errors are:
2803 -EIO generic I/O error (may happen for all errors)
2804 -ENOMEDIUM No media inserted.
2805 -EINVAL Invalid sector number or nb_sectors
2806 -EACCES Trying to write a read-only device
2808 int bdrv_write(BlockDriverState
*bs
, int64_t sector_num
,
2809 const uint8_t *buf
, int nb_sectors
)
2811 return bdrv_rw_co(bs
, sector_num
, (uint8_t *)buf
, nb_sectors
, true, 0);
2814 int bdrv_write_zeroes(BlockDriverState
*bs
, int64_t sector_num
,
2815 int nb_sectors
, BdrvRequestFlags flags
)
2817 return bdrv_rw_co(bs
, sector_num
, NULL
, nb_sectors
, true,
2818 BDRV_REQ_ZERO_WRITE
| flags
);
2822 * Completely zero out a block device with the help of bdrv_write_zeroes.
2823 * The operation is sped up by checking the block status and only writing
2824 * zeroes to the device if they currently do not return zeroes. Optional
2825 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
2827 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
2829 int bdrv_make_zero(BlockDriverState
*bs
, BdrvRequestFlags flags
)
2831 int64_t target_size
;
2832 int64_t ret
, nb_sectors
, sector_num
= 0;
2835 target_size
= bdrv_getlength(bs
);
2836 if (target_size
< 0) {
2839 target_size
/= BDRV_SECTOR_SIZE
;
2842 nb_sectors
= target_size
- sector_num
;
2843 if (nb_sectors
<= 0) {
2846 if (nb_sectors
> INT_MAX
) {
2847 nb_sectors
= INT_MAX
;
2849 ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, &n
);
2851 error_report("error getting block status at sector %" PRId64
": %s",
2852 sector_num
, strerror(-ret
));
2855 if (ret
& BDRV_BLOCK_ZERO
) {
2859 ret
= bdrv_write_zeroes(bs
, sector_num
, n
, flags
);
2861 error_report("error writing zeroes at sector %" PRId64
": %s",
2862 sector_num
, strerror(-ret
));
2869 int bdrv_pread(BlockDriverState
*bs
, int64_t offset
, void *buf
, int bytes
)
2872 struct iovec iov
= {
2873 .iov_base
= (void *)buf
,
2882 qemu_iovec_init_external(&qiov
, &iov
, 1);
2883 ret
= bdrv_prwv_co(bs
, offset
, &qiov
, false, 0);
2891 int bdrv_pwritev(BlockDriverState
*bs
, int64_t offset
, QEMUIOVector
*qiov
)
2895 ret
= bdrv_prwv_co(bs
, offset
, qiov
, true, 0);
2903 int bdrv_pwrite(BlockDriverState
*bs
, int64_t offset
,
2904 const void *buf
, int bytes
)
2907 struct iovec iov
= {
2908 .iov_base
= (void *) buf
,
2916 qemu_iovec_init_external(&qiov
, &iov
, 1);
2917 return bdrv_pwritev(bs
, offset
, &qiov
);
2921 * Writes to the file and ensures that no writes are reordered across this
2922 * request (acts as a barrier)
2924 * Returns 0 on success, -errno in error cases.
2926 int bdrv_pwrite_sync(BlockDriverState
*bs
, int64_t offset
,
2927 const void *buf
, int count
)
2931 ret
= bdrv_pwrite(bs
, offset
, buf
, count
);
2936 /* No flush needed for cache modes that already do it */
2937 if (bs
->enable_write_cache
) {
2944 static int coroutine_fn
bdrv_co_do_copy_on_readv(BlockDriverState
*bs
,
2945 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
2947 /* Perform I/O through a temporary buffer so that users who scribble over
2948 * their read buffer while the operation is in progress do not end up
2949 * modifying the image file. This is critical for zero-copy guest I/O
2950 * where anything might happen inside guest memory.
2952 void *bounce_buffer
;
2954 BlockDriver
*drv
= bs
->drv
;
2956 QEMUIOVector bounce_qiov
;
2957 int64_t cluster_sector_num
;
2958 int cluster_nb_sectors
;
2962 /* Cover entire cluster so no additional backing file I/O is required when
2963 * allocating cluster in the image file.
2965 bdrv_round_to_clusters(bs
, sector_num
, nb_sectors
,
2966 &cluster_sector_num
, &cluster_nb_sectors
);
2968 trace_bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
,
2969 cluster_sector_num
, cluster_nb_sectors
);
2971 iov
.iov_len
= cluster_nb_sectors
* BDRV_SECTOR_SIZE
;
2972 iov
.iov_base
= bounce_buffer
= qemu_blockalign(bs
, iov
.iov_len
);
2973 qemu_iovec_init_external(&bounce_qiov
, &iov
, 1);
2975 ret
= drv
->bdrv_co_readv(bs
, cluster_sector_num
, cluster_nb_sectors
,
2981 if (drv
->bdrv_co_write_zeroes
&&
2982 buffer_is_zero(bounce_buffer
, iov
.iov_len
)) {
2983 ret
= bdrv_co_do_write_zeroes(bs
, cluster_sector_num
,
2984 cluster_nb_sectors
, 0);
2986 /* This does not change the data on the disk, it is not necessary
2987 * to flush even in cache=writethrough mode.
2989 ret
= drv
->bdrv_co_writev(bs
, cluster_sector_num
, cluster_nb_sectors
,
2994 /* It might be okay to ignore write errors for guest requests. If this
2995 * is a deliberate copy-on-read then we don't want to ignore the error.
2996 * Simply report it in all cases.
3001 skip_bytes
= (sector_num
- cluster_sector_num
) * BDRV_SECTOR_SIZE
;
3002 qemu_iovec_from_buf(qiov
, 0, bounce_buffer
+ skip_bytes
,
3003 nb_sectors
* BDRV_SECTOR_SIZE
);
3006 qemu_vfree(bounce_buffer
);
3011 * Forwards an already correctly aligned request to the BlockDriver. This
3012 * handles copy on read and zeroing after EOF; any other features must be
3013 * implemented by the caller.
3015 static int coroutine_fn
bdrv_aligned_preadv(BlockDriverState
*bs
,
3016 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
3017 int64_t align
, QEMUIOVector
*qiov
, int flags
)
3019 BlockDriver
*drv
= bs
->drv
;
3022 int64_t sector_num
= offset
>> BDRV_SECTOR_BITS
;
3023 unsigned int nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
3025 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
3026 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
3028 /* Handle Copy on Read and associated serialisation */
3029 if (flags
& BDRV_REQ_COPY_ON_READ
) {
3030 /* If we touch the same cluster it counts as an overlap. This
3031 * guarantees that allocating writes will be serialized and not race
3032 * with each other for the same cluster. For example, in copy-on-read
3033 * it ensures that the CoR read and write operations are atomic and
3034 * guest writes cannot interleave between them. */
3035 mark_request_serialising(req
, bdrv_get_cluster_size(bs
));
3038 wait_serialising_requests(req
);
3040 if (flags
& BDRV_REQ_COPY_ON_READ
) {
3043 ret
= bdrv_is_allocated(bs
, sector_num
, nb_sectors
, &pnum
);
3048 if (!ret
|| pnum
!= nb_sectors
) {
3049 ret
= bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
, qiov
);
3054 /* Forward the request to the BlockDriver */
3055 if (!(bs
->zero_beyond_eof
&& bs
->growable
)) {
3056 ret
= drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
3058 /* Read zeros after EOF of growable BDSes */
3059 int64_t len
, total_sectors
, max_nb_sectors
;
3061 len
= bdrv_getlength(bs
);
3067 total_sectors
= DIV_ROUND_UP(len
, BDRV_SECTOR_SIZE
);
3068 max_nb_sectors
= ROUND_UP(MAX(0, total_sectors
- sector_num
),
3069 align
>> BDRV_SECTOR_BITS
);
3070 if (max_nb_sectors
> 0) {
3071 ret
= drv
->bdrv_co_readv(bs
, sector_num
,
3072 MIN(nb_sectors
, max_nb_sectors
), qiov
);
3077 /* Reading beyond end of file is supposed to produce zeroes */
3078 if (ret
== 0 && total_sectors
< sector_num
+ nb_sectors
) {
3079 uint64_t offset
= MAX(0, total_sectors
- sector_num
);
3080 uint64_t bytes
= (sector_num
+ nb_sectors
- offset
) *
3082 qemu_iovec_memset(qiov
, offset
* BDRV_SECTOR_SIZE
, 0, bytes
);
3091 * Handle a read request in coroutine context
3093 static int coroutine_fn
bdrv_co_do_preadv(BlockDriverState
*bs
,
3094 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
3095 BdrvRequestFlags flags
)
3097 BlockDriver
*drv
= bs
->drv
;
3098 BdrvTrackedRequest req
;
3100 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3101 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
3102 uint8_t *head_buf
= NULL
;
3103 uint8_t *tail_buf
= NULL
;
3104 QEMUIOVector local_qiov
;
3105 bool use_local_qiov
= false;
3111 if (bdrv_check_byte_request(bs
, offset
, bytes
)) {
3115 if (bs
->copy_on_read
) {
3116 flags
|= BDRV_REQ_COPY_ON_READ
;
3119 /* throttling disk I/O */
3120 if (bs
->io_limits_enabled
) {
3121 bdrv_io_limits_intercept(bs
, bytes
, false);
3124 /* Align read if necessary by padding qiov */
3125 if (offset
& (align
- 1)) {
3126 head_buf
= qemu_blockalign(bs
, align
);
3127 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
3128 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
3129 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
3130 use_local_qiov
= true;
3132 bytes
+= offset
& (align
- 1);
3133 offset
= offset
& ~(align
- 1);
3136 if ((offset
+ bytes
) & (align
- 1)) {
3137 if (!use_local_qiov
) {
3138 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
3139 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
3140 use_local_qiov
= true;
3142 tail_buf
= qemu_blockalign(bs
, align
);
3143 qemu_iovec_add(&local_qiov
, tail_buf
,
3144 align
- ((offset
+ bytes
) & (align
- 1)));
3146 bytes
= ROUND_UP(bytes
, align
);
3149 tracked_request_begin(&req
, bs
, offset
, bytes
, false);
3150 ret
= bdrv_aligned_preadv(bs
, &req
, offset
, bytes
, align
,
3151 use_local_qiov
? &local_qiov
: qiov
,
3153 tracked_request_end(&req
);
3155 if (use_local_qiov
) {
3156 qemu_iovec_destroy(&local_qiov
);
3157 qemu_vfree(head_buf
);
3158 qemu_vfree(tail_buf
);
3164 static int coroutine_fn
bdrv_co_do_readv(BlockDriverState
*bs
,
3165 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
3166 BdrvRequestFlags flags
)
3168 if (nb_sectors
< 0 || nb_sectors
> (UINT_MAX
>> BDRV_SECTOR_BITS
)) {
3172 return bdrv_co_do_preadv(bs
, sector_num
<< BDRV_SECTOR_BITS
,
3173 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
3176 int coroutine_fn
bdrv_co_readv(BlockDriverState
*bs
, int64_t sector_num
,
3177 int nb_sectors
, QEMUIOVector
*qiov
)
3179 trace_bdrv_co_readv(bs
, sector_num
, nb_sectors
);
3181 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
, 0);
3184 int coroutine_fn
bdrv_co_copy_on_readv(BlockDriverState
*bs
,
3185 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
3187 trace_bdrv_co_copy_on_readv(bs
, sector_num
, nb_sectors
);
3189 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
,
3190 BDRV_REQ_COPY_ON_READ
);
3193 /* if no limit is specified in the BlockLimits use a default
3194 * of 32768 512-byte sectors (16 MiB) per request.
3196 #define MAX_WRITE_ZEROES_DEFAULT 32768
3198 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
3199 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
)
3201 BlockDriver
*drv
= bs
->drv
;
3203 struct iovec iov
= {0};
3206 int max_write_zeroes
= bs
->bl
.max_write_zeroes
?
3207 bs
->bl
.max_write_zeroes
: MAX_WRITE_ZEROES_DEFAULT
;
3209 while (nb_sectors
> 0 && !ret
) {
3210 int num
= nb_sectors
;
3212 /* Align request. Block drivers can expect the "bulk" of the request
3215 if (bs
->bl
.write_zeroes_alignment
3216 && num
> bs
->bl
.write_zeroes_alignment
) {
3217 if (sector_num
% bs
->bl
.write_zeroes_alignment
!= 0) {
3218 /* Make a small request up to the first aligned sector. */
3219 num
= bs
->bl
.write_zeroes_alignment
;
3220 num
-= sector_num
% bs
->bl
.write_zeroes_alignment
;
3221 } else if ((sector_num
+ num
) % bs
->bl
.write_zeroes_alignment
!= 0) {
3222 /* Shorten the request to the last aligned sector. num cannot
3223 * underflow because num > bs->bl.write_zeroes_alignment.
3225 num
-= (sector_num
+ num
) % bs
->bl
.write_zeroes_alignment
;
3229 /* limit request size */
3230 if (num
> max_write_zeroes
) {
3231 num
= max_write_zeroes
;
3235 /* First try the efficient write zeroes operation */
3236 if (drv
->bdrv_co_write_zeroes
) {
3237 ret
= drv
->bdrv_co_write_zeroes(bs
, sector_num
, num
, flags
);
3240 if (ret
== -ENOTSUP
) {
3241 /* Fall back to bounce buffer if write zeroes is unsupported */
3242 iov
.iov_len
= num
* BDRV_SECTOR_SIZE
;
3243 if (iov
.iov_base
== NULL
) {
3244 iov
.iov_base
= qemu_blockalign(bs
, num
* BDRV_SECTOR_SIZE
);
3245 memset(iov
.iov_base
, 0, num
* BDRV_SECTOR_SIZE
);
3247 qemu_iovec_init_external(&qiov
, &iov
, 1);
3249 ret
= drv
->bdrv_co_writev(bs
, sector_num
, num
, &qiov
);
3251 /* Keep bounce buffer around if it is big enough for all
3252 * all future requests.
3254 if (num
< max_write_zeroes
) {
3255 qemu_vfree(iov
.iov_base
);
3256 iov
.iov_base
= NULL
;
3264 qemu_vfree(iov
.iov_base
);
3269 * Forwards an already correctly aligned write request to the BlockDriver.
3271 static int coroutine_fn
bdrv_aligned_pwritev(BlockDriverState
*bs
,
3272 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
3273 QEMUIOVector
*qiov
, int flags
)
3275 BlockDriver
*drv
= bs
->drv
;
3279 int64_t sector_num
= offset
>> BDRV_SECTOR_BITS
;
3280 unsigned int nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
3282 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
3283 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
3285 waited
= wait_serialising_requests(req
);
3286 assert(!waited
|| !req
->serialising
);
3287 assert(req
->overlap_offset
<= offset
);
3288 assert(offset
+ bytes
<= req
->overlap_offset
+ req
->overlap_bytes
);
3290 ret
= notifier_with_return_list_notify(&bs
->before_write_notifiers
, req
);
3292 if (!ret
&& bs
->detect_zeroes
!= BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF
&&
3293 !(flags
& BDRV_REQ_ZERO_WRITE
) && drv
->bdrv_co_write_zeroes
&&
3294 qemu_iovec_is_zero(qiov
)) {
3295 flags
|= BDRV_REQ_ZERO_WRITE
;
3296 if (bs
->detect_zeroes
== BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP
) {
3297 flags
|= BDRV_REQ_MAY_UNMAP
;
3302 /* Do nothing, write notifier decided to fail this request */
3303 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
3304 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_ZERO
);
3305 ret
= bdrv_co_do_write_zeroes(bs
, sector_num
, nb_sectors
, flags
);
3307 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV
);
3308 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
);
3310 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_DONE
);
3312 if (ret
== 0 && !bs
->enable_write_cache
) {
3313 ret
= bdrv_co_flush(bs
);
3316 bdrv_set_dirty(bs
, sector_num
, nb_sectors
);
3318 if (bs
->wr_highest_sector
< sector_num
+ nb_sectors
- 1) {
3319 bs
->wr_highest_sector
= sector_num
+ nb_sectors
- 1;
3321 if (bs
->growable
&& ret
>= 0) {
3322 bs
->total_sectors
= MAX(bs
->total_sectors
, sector_num
+ nb_sectors
);
3329 * Handle a write request in coroutine context
3331 static int coroutine_fn
bdrv_co_do_pwritev(BlockDriverState
*bs
,
3332 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
3333 BdrvRequestFlags flags
)
3335 BdrvTrackedRequest req
;
3336 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3337 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
3338 uint8_t *head_buf
= NULL
;
3339 uint8_t *tail_buf
= NULL
;
3340 QEMUIOVector local_qiov
;
3341 bool use_local_qiov
= false;
3347 if (bs
->read_only
) {
3350 if (bdrv_check_byte_request(bs
, offset
, bytes
)) {
3354 /* throttling disk I/O */
3355 if (bs
->io_limits_enabled
) {
3356 bdrv_io_limits_intercept(bs
, bytes
, true);
3360 * Align write if necessary by performing a read-modify-write cycle.
3361 * Pad qiov with the read parts and be sure to have a tracked request not
3362 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
3364 tracked_request_begin(&req
, bs
, offset
, bytes
, true);
3366 if (offset
& (align
- 1)) {
3367 QEMUIOVector head_qiov
;
3368 struct iovec head_iov
;
3370 mark_request_serialising(&req
, align
);
3371 wait_serialising_requests(&req
);
3373 head_buf
= qemu_blockalign(bs
, align
);
3374 head_iov
= (struct iovec
) {
3375 .iov_base
= head_buf
,
3378 qemu_iovec_init_external(&head_qiov
, &head_iov
, 1);
3380 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
3381 ret
= bdrv_aligned_preadv(bs
, &req
, offset
& ~(align
- 1), align
,
3382 align
, &head_qiov
, 0);
3386 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
3388 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
3389 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
3390 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
3391 use_local_qiov
= true;
3393 bytes
+= offset
& (align
- 1);
3394 offset
= offset
& ~(align
- 1);
3397 if ((offset
+ bytes
) & (align
- 1)) {
3398 QEMUIOVector tail_qiov
;
3399 struct iovec tail_iov
;
3403 mark_request_serialising(&req
, align
);
3404 waited
= wait_serialising_requests(&req
);
3405 assert(!waited
|| !use_local_qiov
);
3407 tail_buf
= qemu_blockalign(bs
, align
);
3408 tail_iov
= (struct iovec
) {
3409 .iov_base
= tail_buf
,
3412 qemu_iovec_init_external(&tail_qiov
, &tail_iov
, 1);
3414 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
3415 ret
= bdrv_aligned_preadv(bs
, &req
, (offset
+ bytes
) & ~(align
- 1), align
,
3416 align
, &tail_qiov
, 0);
3420 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
3422 if (!use_local_qiov
) {
3423 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
3424 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
3425 use_local_qiov
= true;
3428 tail_bytes
= (offset
+ bytes
) & (align
- 1);
3429 qemu_iovec_add(&local_qiov
, tail_buf
+ tail_bytes
, align
- tail_bytes
);
3431 bytes
= ROUND_UP(bytes
, align
);
3434 ret
= bdrv_aligned_pwritev(bs
, &req
, offset
, bytes
,
3435 use_local_qiov
? &local_qiov
: qiov
,
3439 tracked_request_end(&req
);
3441 if (use_local_qiov
) {
3442 qemu_iovec_destroy(&local_qiov
);
3444 qemu_vfree(head_buf
);
3445 qemu_vfree(tail_buf
);
3450 static int coroutine_fn
bdrv_co_do_writev(BlockDriverState
*bs
,
3451 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
3452 BdrvRequestFlags flags
)
3454 if (nb_sectors
< 0 || nb_sectors
> (INT_MAX
>> BDRV_SECTOR_BITS
)) {
3458 return bdrv_co_do_pwritev(bs
, sector_num
<< BDRV_SECTOR_BITS
,
3459 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
3462 int coroutine_fn
bdrv_co_writev(BlockDriverState
*bs
, int64_t sector_num
,
3463 int nb_sectors
, QEMUIOVector
*qiov
)
3465 trace_bdrv_co_writev(bs
, sector_num
, nb_sectors
);
3467 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, qiov
, 0);
3470 int coroutine_fn
bdrv_co_write_zeroes(BlockDriverState
*bs
,
3471 int64_t sector_num
, int nb_sectors
,
3472 BdrvRequestFlags flags
)
3474 trace_bdrv_co_write_zeroes(bs
, sector_num
, nb_sectors
, flags
);
3476 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
3477 flags
&= ~BDRV_REQ_MAY_UNMAP
;
3480 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, NULL
,
3481 BDRV_REQ_ZERO_WRITE
| flags
);
3485 * Truncate file to 'offset' bytes (needed only for file protocols)
3487 int bdrv_truncate(BlockDriverState
*bs
, int64_t offset
)
3489 BlockDriver
*drv
= bs
->drv
;
3493 if (!drv
->bdrv_truncate
)
3497 if (bdrv_in_use(bs
))
3499 ret
= drv
->bdrv_truncate(bs
, offset
);
3501 ret
= refresh_total_sectors(bs
, offset
>> BDRV_SECTOR_BITS
);
3502 bdrv_dev_resize_cb(bs
);
3508 * Length of a allocated file in bytes. Sparse files are counted by actual
3509 * allocated space. Return < 0 if error or unknown.
3511 int64_t bdrv_get_allocated_file_size(BlockDriverState
*bs
)
3513 BlockDriver
*drv
= bs
->drv
;
3517 if (drv
->bdrv_get_allocated_file_size
) {
3518 return drv
->bdrv_get_allocated_file_size(bs
);
3521 return bdrv_get_allocated_file_size(bs
->file
);
3527 * Length of a file in bytes. Return < 0 if error or unknown.
3529 int64_t bdrv_getlength(BlockDriverState
*bs
)
3531 BlockDriver
*drv
= bs
->drv
;
3535 if (drv
->has_variable_length
) {
3536 int ret
= refresh_total_sectors(bs
, bs
->total_sectors
);
3541 return bs
->total_sectors
* BDRV_SECTOR_SIZE
;
3544 /* return 0 as number of sectors if no device present or error */
3545 void bdrv_get_geometry(BlockDriverState
*bs
, uint64_t *nb_sectors_ptr
)
3548 length
= bdrv_getlength(bs
);
3552 length
= length
>> BDRV_SECTOR_BITS
;
3553 *nb_sectors_ptr
= length
;
3556 void bdrv_set_on_error(BlockDriverState
*bs
, BlockdevOnError on_read_error
,
3557 BlockdevOnError on_write_error
)
3559 bs
->on_read_error
= on_read_error
;
3560 bs
->on_write_error
= on_write_error
;
3563 BlockdevOnError
bdrv_get_on_error(BlockDriverState
*bs
, bool is_read
)
3565 return is_read
? bs
->on_read_error
: bs
->on_write_error
;
3568 BlockErrorAction
bdrv_get_error_action(BlockDriverState
*bs
, bool is_read
, int error
)
3570 BlockdevOnError on_err
= is_read
? bs
->on_read_error
: bs
->on_write_error
;
3573 case BLOCKDEV_ON_ERROR_ENOSPC
:
3574 return (error
== ENOSPC
) ? BDRV_ACTION_STOP
: BDRV_ACTION_REPORT
;
3575 case BLOCKDEV_ON_ERROR_STOP
:
3576 return BDRV_ACTION_STOP
;
3577 case BLOCKDEV_ON_ERROR_REPORT
:
3578 return BDRV_ACTION_REPORT
;
3579 case BLOCKDEV_ON_ERROR_IGNORE
:
3580 return BDRV_ACTION_IGNORE
;
3586 /* This is done by device models because, while the block layer knows
3587 * about the error, it does not know whether an operation comes from
3588 * the device or the block layer (from a job, for example).
3590 void bdrv_error_action(BlockDriverState
*bs
, BlockErrorAction action
,
3591 bool is_read
, int error
)
3594 bdrv_emit_qmp_error_event(bs
, QEVENT_BLOCK_IO_ERROR
, action
, is_read
);
3595 if (action
== BDRV_ACTION_STOP
) {
3596 vm_stop(RUN_STATE_IO_ERROR
);
3597 bdrv_iostatus_set_err(bs
, error
);
3601 int bdrv_is_read_only(BlockDriverState
*bs
)
3603 return bs
->read_only
;
3606 int bdrv_is_sg(BlockDriverState
*bs
)
3611 int bdrv_enable_write_cache(BlockDriverState
*bs
)
3613 return bs
->enable_write_cache
;
3616 void bdrv_set_enable_write_cache(BlockDriverState
*bs
, bool wce
)
3618 bs
->enable_write_cache
= wce
;
3620 /* so a reopen() will preserve wce */
3622 bs
->open_flags
|= BDRV_O_CACHE_WB
;
3624 bs
->open_flags
&= ~BDRV_O_CACHE_WB
;
3628 int bdrv_is_encrypted(BlockDriverState
*bs
)
3630 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
)
3632 return bs
->encrypted
;
3635 int bdrv_key_required(BlockDriverState
*bs
)
3637 BlockDriverState
*backing_hd
= bs
->backing_hd
;
3639 if (backing_hd
&& backing_hd
->encrypted
&& !backing_hd
->valid_key
)
3641 return (bs
->encrypted
&& !bs
->valid_key
);
3644 int bdrv_set_key(BlockDriverState
*bs
, const char *key
)
3647 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
) {
3648 ret
= bdrv_set_key(bs
->backing_hd
, key
);
3654 if (!bs
->encrypted
) {
3656 } else if (!bs
->drv
|| !bs
->drv
->bdrv_set_key
) {
3659 ret
= bs
->drv
->bdrv_set_key(bs
, key
);
3662 } else if (!bs
->valid_key
) {
3664 /* call the change callback now, we skipped it on open */
3665 bdrv_dev_change_media_cb(bs
, true);
3670 const char *bdrv_get_format_name(BlockDriverState
*bs
)
3672 return bs
->drv
? bs
->drv
->format_name
: NULL
;
3675 void bdrv_iterate_format(void (*it
)(void *opaque
, const char *name
),
3680 const char **formats
= NULL
;
3682 QLIST_FOREACH(drv
, &bdrv_drivers
, list
) {
3683 if (drv
->format_name
) {
3686 while (formats
&& i
&& !found
) {
3687 found
= !strcmp(formats
[--i
], drv
->format_name
);
3691 formats
= g_realloc(formats
, (count
+ 1) * sizeof(char *));
3692 formats
[count
++] = drv
->format_name
;
3693 it(opaque
, drv
->format_name
);
3700 /* This function is to find block backend bs */
3701 BlockDriverState
*bdrv_find(const char *name
)
3703 BlockDriverState
*bs
;
3705 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
3706 if (!strcmp(name
, bs
->device_name
)) {
3713 /* This function is to find a node in the bs graph */
3714 BlockDriverState
*bdrv_find_node(const char *node_name
)
3716 BlockDriverState
*bs
;
3720 QTAILQ_FOREACH(bs
, &graph_bdrv_states
, node_list
) {
3721 if (!strcmp(node_name
, bs
->node_name
)) {
3728 /* Put this QMP function here so it can access the static graph_bdrv_states. */
3729 BlockDeviceInfoList
*bdrv_named_nodes_list(void)
3731 BlockDeviceInfoList
*list
, *entry
;
3732 BlockDriverState
*bs
;
3735 QTAILQ_FOREACH(bs
, &graph_bdrv_states
, node_list
) {
3736 entry
= g_malloc0(sizeof(*entry
));
3737 entry
->value
= bdrv_block_device_info(bs
);
3745 BlockDriverState
*bdrv_lookup_bs(const char *device
,
3746 const char *node_name
,
3749 BlockDriverState
*bs
= NULL
;
3752 bs
= bdrv_find(device
);
3760 bs
= bdrv_find_node(node_name
);
3767 error_setg(errp
, "Cannot find device=%s nor node_name=%s",
3768 device
? device
: "",
3769 node_name
? node_name
: "");
3773 BlockDriverState
*bdrv_next(BlockDriverState
*bs
)
3776 return QTAILQ_FIRST(&bdrv_states
);
3778 return QTAILQ_NEXT(bs
, device_list
);
3781 void bdrv_iterate(void (*it
)(void *opaque
, BlockDriverState
*bs
), void *opaque
)
3783 BlockDriverState
*bs
;
3785 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
3790 const char *bdrv_get_device_name(BlockDriverState
*bs
)
3792 return bs
->device_name
;
3795 int bdrv_get_flags(BlockDriverState
*bs
)
3797 return bs
->open_flags
;
3800 int bdrv_flush_all(void)
3802 BlockDriverState
*bs
;
3805 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
3806 int ret
= bdrv_flush(bs
);
3807 if (ret
< 0 && !result
) {
3815 int bdrv_has_zero_init_1(BlockDriverState
*bs
)
3820 int bdrv_has_zero_init(BlockDriverState
*bs
)
3824 /* If BS is a copy on write image, it is initialized to
3825 the contents of the base image, which may not be zeroes. */
3826 if (bs
->backing_hd
) {
3829 if (bs
->drv
->bdrv_has_zero_init
) {
3830 return bs
->drv
->bdrv_has_zero_init(bs
);
3837 bool bdrv_unallocated_blocks_are_zero(BlockDriverState
*bs
)
3839 BlockDriverInfo bdi
;
3841 if (bs
->backing_hd
) {
3845 if (bdrv_get_info(bs
, &bdi
) == 0) {
3846 return bdi
.unallocated_blocks_are_zero
;
3852 bool bdrv_can_write_zeroes_with_unmap(BlockDriverState
*bs
)
3854 BlockDriverInfo bdi
;
3856 if (bs
->backing_hd
|| !(bs
->open_flags
& BDRV_O_UNMAP
)) {
3860 if (bdrv_get_info(bs
, &bdi
) == 0) {
3861 return bdi
.can_write_zeroes_with_unmap
;
3867 typedef struct BdrvCoGetBlockStatusData
{
3868 BlockDriverState
*bs
;
3869 BlockDriverState
*base
;
3875 } BdrvCoGetBlockStatusData
;
3878 * Returns true iff the specified sector is present in the disk image. Drivers
3879 * not implementing the functionality are assumed to not support backing files,
3880 * hence all their sectors are reported as allocated.
3882 * If 'sector_num' is beyond the end of the disk image the return value is 0
3883 * and 'pnum' is set to 0.
3885 * 'pnum' is set to the number of sectors (including and immediately following
3886 * the specified sector) that are known to be in the same
3887 * allocated/unallocated state.
3889 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
3890 * beyond the end of the disk image it will be clamped.
3892 static int64_t coroutine_fn
bdrv_co_get_block_status(BlockDriverState
*bs
,
3894 int nb_sectors
, int *pnum
)
3900 length
= bdrv_getlength(bs
);
3905 if (sector_num
>= (length
>> BDRV_SECTOR_BITS
)) {
3910 n
= bs
->total_sectors
- sector_num
;
3911 if (n
< nb_sectors
) {
3915 if (!bs
->drv
->bdrv_co_get_block_status
) {
3917 ret
= BDRV_BLOCK_DATA
| BDRV_BLOCK_ALLOCATED
;
3918 if (bs
->drv
->protocol_name
) {
3919 ret
|= BDRV_BLOCK_OFFSET_VALID
| (sector_num
* BDRV_SECTOR_SIZE
);
3924 ret
= bs
->drv
->bdrv_co_get_block_status(bs
, sector_num
, nb_sectors
, pnum
);
3930 if (ret
& BDRV_BLOCK_RAW
) {
3931 assert(ret
& BDRV_BLOCK_OFFSET_VALID
);
3932 return bdrv_get_block_status(bs
->file
, ret
>> BDRV_SECTOR_BITS
,
3936 if (ret
& (BDRV_BLOCK_DATA
| BDRV_BLOCK_ZERO
)) {
3937 ret
|= BDRV_BLOCK_ALLOCATED
;
3940 if (!(ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
)) {
3941 if (bdrv_unallocated_blocks_are_zero(bs
)) {
3942 ret
|= BDRV_BLOCK_ZERO
;
3943 } else if (bs
->backing_hd
) {
3944 BlockDriverState
*bs2
= bs
->backing_hd
;
3945 int64_t length2
= bdrv_getlength(bs2
);
3946 if (length2
>= 0 && sector_num
>= (length2
>> BDRV_SECTOR_BITS
)) {
3947 ret
|= BDRV_BLOCK_ZERO
;
3953 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
3954 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
3955 ret2
= bdrv_co_get_block_status(bs
->file
, ret
>> BDRV_SECTOR_BITS
,
3958 /* Ignore errors. This is just providing extra information, it
3959 * is useful but not necessary.
3961 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
3968 /* Coroutine wrapper for bdrv_get_block_status() */
3969 static void coroutine_fn
bdrv_get_block_status_co_entry(void *opaque
)
3971 BdrvCoGetBlockStatusData
*data
= opaque
;
3972 BlockDriverState
*bs
= data
->bs
;
3974 data
->ret
= bdrv_co_get_block_status(bs
, data
->sector_num
, data
->nb_sectors
,
3980 * Synchronous wrapper around bdrv_co_get_block_status().
3982 * See bdrv_co_get_block_status() for details.
3984 int64_t bdrv_get_block_status(BlockDriverState
*bs
, int64_t sector_num
,
3985 int nb_sectors
, int *pnum
)
3988 BdrvCoGetBlockStatusData data
= {
3990 .sector_num
= sector_num
,
3991 .nb_sectors
= nb_sectors
,
3996 if (qemu_in_coroutine()) {
3997 /* Fast-path if already in coroutine context */
3998 bdrv_get_block_status_co_entry(&data
);
4000 co
= qemu_coroutine_create(bdrv_get_block_status_co_entry
);
4001 qemu_coroutine_enter(co
, &data
);
4002 while (!data
.done
) {
4009 int coroutine_fn
bdrv_is_allocated(BlockDriverState
*bs
, int64_t sector_num
,
4010 int nb_sectors
, int *pnum
)
4012 int64_t ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, pnum
);
4016 return (ret
& BDRV_BLOCK_ALLOCATED
);
4020 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
4022 * Return true if the given sector is allocated in any image between
4023 * BASE and TOP (inclusive). BASE can be NULL to check if the given
4024 * sector is allocated in any image of the chain. Return false otherwise.
4026 * 'pnum' is set to the number of sectors (including and immediately following
4027 * the specified sector) that are known to be in the same
4028 * allocated/unallocated state.
4031 int bdrv_is_allocated_above(BlockDriverState
*top
,
4032 BlockDriverState
*base
,
4034 int nb_sectors
, int *pnum
)
4036 BlockDriverState
*intermediate
;
4037 int ret
, n
= nb_sectors
;
4040 while (intermediate
&& intermediate
!= base
) {
4042 ret
= bdrv_is_allocated(intermediate
, sector_num
, nb_sectors
,
4052 * [sector_num, nb_sectors] is unallocated on top but intermediate
4055 * [sector_num+x, nr_sectors] allocated.
4057 if (n
> pnum_inter
&&
4058 (intermediate
== top
||
4059 sector_num
+ pnum_inter
< intermediate
->total_sectors
)) {
4063 intermediate
= intermediate
->backing_hd
;
4070 const char *bdrv_get_encrypted_filename(BlockDriverState
*bs
)
4072 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
)
4073 return bs
->backing_file
;
4074 else if (bs
->encrypted
)
4075 return bs
->filename
;
4080 void bdrv_get_backing_filename(BlockDriverState
*bs
,
4081 char *filename
, int filename_size
)
4083 pstrcpy(filename
, filename_size
, bs
->backing_file
);
4086 int bdrv_write_compressed(BlockDriverState
*bs
, int64_t sector_num
,
4087 const uint8_t *buf
, int nb_sectors
)
4089 BlockDriver
*drv
= bs
->drv
;
4092 if (!drv
->bdrv_write_compressed
)
4094 if (bdrv_check_request(bs
, sector_num
, nb_sectors
))
4097 assert(QLIST_EMPTY(&bs
->dirty_bitmaps
));
4099 return drv
->bdrv_write_compressed(bs
, sector_num
, buf
, nb_sectors
);
4102 int bdrv_get_info(BlockDriverState
*bs
, BlockDriverInfo
*bdi
)
4104 BlockDriver
*drv
= bs
->drv
;
4107 if (!drv
->bdrv_get_info
)
4109 memset(bdi
, 0, sizeof(*bdi
));
4110 return drv
->bdrv_get_info(bs
, bdi
);
4113 ImageInfoSpecific
*bdrv_get_specific_info(BlockDriverState
*bs
)
4115 BlockDriver
*drv
= bs
->drv
;
4116 if (drv
&& drv
->bdrv_get_specific_info
) {
4117 return drv
->bdrv_get_specific_info(bs
);
4122 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
4123 int64_t pos
, int size
)
4126 struct iovec iov
= {
4127 .iov_base
= (void *) buf
,
4131 qemu_iovec_init_external(&qiov
, &iov
, 1);
4132 return bdrv_writev_vmstate(bs
, &qiov
, pos
);
4135 int bdrv_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
4137 BlockDriver
*drv
= bs
->drv
;
4141 } else if (drv
->bdrv_save_vmstate
) {
4142 return drv
->bdrv_save_vmstate(bs
, qiov
, pos
);
4143 } else if (bs
->file
) {
4144 return bdrv_writev_vmstate(bs
->file
, qiov
, pos
);
4150 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
4151 int64_t pos
, int size
)
4153 BlockDriver
*drv
= bs
->drv
;
4156 if (drv
->bdrv_load_vmstate
)
4157 return drv
->bdrv_load_vmstate(bs
, buf
, pos
, size
);
4159 return bdrv_load_vmstate(bs
->file
, buf
, pos
, size
);
4163 void bdrv_debug_event(BlockDriverState
*bs
, BlkDebugEvent event
)
4165 if (!bs
|| !bs
->drv
|| !bs
->drv
->bdrv_debug_event
) {
4169 bs
->drv
->bdrv_debug_event(bs
, event
);
4172 int bdrv_debug_breakpoint(BlockDriverState
*bs
, const char *event
,
4175 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_breakpoint
) {
4179 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_breakpoint
) {
4180 return bs
->drv
->bdrv_debug_breakpoint(bs
, event
, tag
);
4186 int bdrv_debug_remove_breakpoint(BlockDriverState
*bs
, const char *tag
)
4188 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_remove_breakpoint
) {
4192 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_remove_breakpoint
) {
4193 return bs
->drv
->bdrv_debug_remove_breakpoint(bs
, tag
);
4199 int bdrv_debug_resume(BlockDriverState
*bs
, const char *tag
)
4201 while (bs
&& (!bs
->drv
|| !bs
->drv
->bdrv_debug_resume
)) {
4205 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_resume
) {
4206 return bs
->drv
->bdrv_debug_resume(bs
, tag
);
4212 bool bdrv_debug_is_suspended(BlockDriverState
*bs
, const char *tag
)
4214 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_is_suspended
) {
4218 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_is_suspended
) {
4219 return bs
->drv
->bdrv_debug_is_suspended(bs
, tag
);
4225 int bdrv_is_snapshot(BlockDriverState
*bs
)
4227 return !!(bs
->open_flags
& BDRV_O_SNAPSHOT
);
4230 /* backing_file can either be relative, or absolute, or a protocol. If it is
4231 * relative, it must be relative to the chain. So, passing in bs->filename
4232 * from a BDS as backing_file should not be done, as that may be relative to
4233 * the CWD rather than the chain. */
4234 BlockDriverState
*bdrv_find_backing_image(BlockDriverState
*bs
,
4235 const char *backing_file
)
4237 char *filename_full
= NULL
;
4238 char *backing_file_full
= NULL
;
4239 char *filename_tmp
= NULL
;
4240 int is_protocol
= 0;
4241 BlockDriverState
*curr_bs
= NULL
;
4242 BlockDriverState
*retval
= NULL
;
4244 if (!bs
|| !bs
->drv
|| !backing_file
) {
4248 filename_full
= g_malloc(PATH_MAX
);
4249 backing_file_full
= g_malloc(PATH_MAX
);
4250 filename_tmp
= g_malloc(PATH_MAX
);
4252 is_protocol
= path_has_protocol(backing_file
);
4254 for (curr_bs
= bs
; curr_bs
->backing_hd
; curr_bs
= curr_bs
->backing_hd
) {
4256 /* If either of the filename paths is actually a protocol, then
4257 * compare unmodified paths; otherwise make paths relative */
4258 if (is_protocol
|| path_has_protocol(curr_bs
->backing_file
)) {
4259 if (strcmp(backing_file
, curr_bs
->backing_file
) == 0) {
4260 retval
= curr_bs
->backing_hd
;
4264 /* If not an absolute filename path, make it relative to the current
4265 * image's filename path */
4266 path_combine(filename_tmp
, PATH_MAX
, curr_bs
->filename
,
4269 /* We are going to compare absolute pathnames */
4270 if (!realpath(filename_tmp
, filename_full
)) {
4274 /* We need to make sure the backing filename we are comparing against
4275 * is relative to the current image filename (or absolute) */
4276 path_combine(filename_tmp
, PATH_MAX
, curr_bs
->filename
,
4277 curr_bs
->backing_file
);
4279 if (!realpath(filename_tmp
, backing_file_full
)) {
4283 if (strcmp(backing_file_full
, filename_full
) == 0) {
4284 retval
= curr_bs
->backing_hd
;
4290 g_free(filename_full
);
4291 g_free(backing_file_full
);
4292 g_free(filename_tmp
);
4296 int bdrv_get_backing_file_depth(BlockDriverState
*bs
)
4302 if (!bs
->backing_hd
) {
4306 return 1 + bdrv_get_backing_file_depth(bs
->backing_hd
);
4309 BlockDriverState
*bdrv_find_base(BlockDriverState
*bs
)
4311 BlockDriverState
*curr_bs
= NULL
;
4319 while (curr_bs
->backing_hd
) {
4320 curr_bs
= curr_bs
->backing_hd
;
4325 /**************************************************************/
4328 BlockDriverAIOCB
*bdrv_aio_readv(BlockDriverState
*bs
, int64_t sector_num
,
4329 QEMUIOVector
*qiov
, int nb_sectors
,
4330 BlockDriverCompletionFunc
*cb
, void *opaque
)
4332 trace_bdrv_aio_readv(bs
, sector_num
, nb_sectors
, opaque
);
4334 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, 0,
4338 BlockDriverAIOCB
*bdrv_aio_writev(BlockDriverState
*bs
, int64_t sector_num
,
4339 QEMUIOVector
*qiov
, int nb_sectors
,
4340 BlockDriverCompletionFunc
*cb
, void *opaque
)
4342 trace_bdrv_aio_writev(bs
, sector_num
, nb_sectors
, opaque
);
4344 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, 0,
4348 BlockDriverAIOCB
*bdrv_aio_write_zeroes(BlockDriverState
*bs
,
4349 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
,
4350 BlockDriverCompletionFunc
*cb
, void *opaque
)
4352 trace_bdrv_aio_write_zeroes(bs
, sector_num
, nb_sectors
, flags
, opaque
);
4354 return bdrv_co_aio_rw_vector(bs
, sector_num
, NULL
, nb_sectors
,
4355 BDRV_REQ_ZERO_WRITE
| flags
,
4360 typedef struct MultiwriteCB
{
4365 BlockDriverCompletionFunc
*cb
;
4367 QEMUIOVector
*free_qiov
;
4371 static void multiwrite_user_cb(MultiwriteCB
*mcb
)
4375 for (i
= 0; i
< mcb
->num_callbacks
; i
++) {
4376 mcb
->callbacks
[i
].cb(mcb
->callbacks
[i
].opaque
, mcb
->error
);
4377 if (mcb
->callbacks
[i
].free_qiov
) {
4378 qemu_iovec_destroy(mcb
->callbacks
[i
].free_qiov
);
4380 g_free(mcb
->callbacks
[i
].free_qiov
);
4384 static void multiwrite_cb(void *opaque
, int ret
)
4386 MultiwriteCB
*mcb
= opaque
;
4388 trace_multiwrite_cb(mcb
, ret
);
4390 if (ret
< 0 && !mcb
->error
) {
4394 mcb
->num_requests
--;
4395 if (mcb
->num_requests
== 0) {
4396 multiwrite_user_cb(mcb
);
4401 static int multiwrite_req_compare(const void *a
, const void *b
)
4403 const BlockRequest
*req1
= a
, *req2
= b
;
4406 * Note that we can't simply subtract req2->sector from req1->sector
4407 * here as that could overflow the return value.
4409 if (req1
->sector
> req2
->sector
) {
4411 } else if (req1
->sector
< req2
->sector
) {
4419 * Takes a bunch of requests and tries to merge them. Returns the number of
4420 * requests that remain after merging.
4422 static int multiwrite_merge(BlockDriverState
*bs
, BlockRequest
*reqs
,
4423 int num_reqs
, MultiwriteCB
*mcb
)
4427 // Sort requests by start sector
4428 qsort(reqs
, num_reqs
, sizeof(*reqs
), &multiwrite_req_compare
);
4430 // Check if adjacent requests touch the same clusters. If so, combine them,
4431 // filling up gaps with zero sectors.
4433 for (i
= 1; i
< num_reqs
; i
++) {
4435 int64_t oldreq_last
= reqs
[outidx
].sector
+ reqs
[outidx
].nb_sectors
;
4437 // Handle exactly sequential writes and overlapping writes.
4438 if (reqs
[i
].sector
<= oldreq_last
) {
4442 if (reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1 > IOV_MAX
) {
4448 QEMUIOVector
*qiov
= g_malloc0(sizeof(*qiov
));
4449 qemu_iovec_init(qiov
,
4450 reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1);
4452 // Add the first request to the merged one. If the requests are
4453 // overlapping, drop the last sectors of the first request.
4454 size
= (reqs
[i
].sector
- reqs
[outidx
].sector
) << 9;
4455 qemu_iovec_concat(qiov
, reqs
[outidx
].qiov
, 0, size
);
4457 // We should need to add any zeros between the two requests
4458 assert (reqs
[i
].sector
<= oldreq_last
);
4460 // Add the second request
4461 qemu_iovec_concat(qiov
, reqs
[i
].qiov
, 0, reqs
[i
].qiov
->size
);
4463 reqs
[outidx
].nb_sectors
= qiov
->size
>> 9;
4464 reqs
[outidx
].qiov
= qiov
;
4466 mcb
->callbacks
[i
].free_qiov
= reqs
[outidx
].qiov
;
4469 reqs
[outidx
].sector
= reqs
[i
].sector
;
4470 reqs
[outidx
].nb_sectors
= reqs
[i
].nb_sectors
;
4471 reqs
[outidx
].qiov
= reqs
[i
].qiov
;
4479 * Submit multiple AIO write requests at once.
4481 * On success, the function returns 0 and all requests in the reqs array have
4482 * been submitted. In error case this function returns -1, and any of the
4483 * requests may or may not be submitted yet. In particular, this means that the
4484 * callback will be called for some of the requests, for others it won't. The
4485 * caller must check the error field of the BlockRequest to wait for the right
4486 * callbacks (if error != 0, no callback will be called).
4488 * The implementation may modify the contents of the reqs array, e.g. to merge
4489 * requests. However, the fields opaque and error are left unmodified as they
4490 * are used to signal failure for a single request to the caller.
4492 int bdrv_aio_multiwrite(BlockDriverState
*bs
, BlockRequest
*reqs
, int num_reqs
)
4497 /* don't submit writes if we don't have a medium */
4498 if (bs
->drv
== NULL
) {
4499 for (i
= 0; i
< num_reqs
; i
++) {
4500 reqs
[i
].error
= -ENOMEDIUM
;
4505 if (num_reqs
== 0) {
4509 // Create MultiwriteCB structure
4510 mcb
= g_malloc0(sizeof(*mcb
) + num_reqs
* sizeof(*mcb
->callbacks
));
4511 mcb
->num_requests
= 0;
4512 mcb
->num_callbacks
= num_reqs
;
4514 for (i
= 0; i
< num_reqs
; i
++) {
4515 mcb
->callbacks
[i
].cb
= reqs
[i
].cb
;
4516 mcb
->callbacks
[i
].opaque
= reqs
[i
].opaque
;
4519 // Check for mergable requests
4520 num_reqs
= multiwrite_merge(bs
, reqs
, num_reqs
, mcb
);
4522 trace_bdrv_aio_multiwrite(mcb
, mcb
->num_callbacks
, num_reqs
);
4524 /* Run the aio requests. */
4525 mcb
->num_requests
= num_reqs
;
4526 for (i
= 0; i
< num_reqs
; i
++) {
4527 bdrv_co_aio_rw_vector(bs
, reqs
[i
].sector
, reqs
[i
].qiov
,
4528 reqs
[i
].nb_sectors
, reqs
[i
].flags
,
4536 void bdrv_aio_cancel(BlockDriverAIOCB
*acb
)
4538 acb
->aiocb_info
->cancel(acb
);
4541 /**************************************************************/
4542 /* async block device emulation */
4544 typedef struct BlockDriverAIOCBSync
{
4545 BlockDriverAIOCB common
;
4548 /* vector translation state */
4552 } BlockDriverAIOCBSync
;
4554 static void bdrv_aio_cancel_em(BlockDriverAIOCB
*blockacb
)
4556 BlockDriverAIOCBSync
*acb
=
4557 container_of(blockacb
, BlockDriverAIOCBSync
, common
);
4558 qemu_bh_delete(acb
->bh
);
4560 qemu_aio_release(acb
);
4563 static const AIOCBInfo bdrv_em_aiocb_info
= {
4564 .aiocb_size
= sizeof(BlockDriverAIOCBSync
),
4565 .cancel
= bdrv_aio_cancel_em
,
4568 static void bdrv_aio_bh_cb(void *opaque
)
4570 BlockDriverAIOCBSync
*acb
= opaque
;
4573 qemu_iovec_from_buf(acb
->qiov
, 0, acb
->bounce
, acb
->qiov
->size
);
4574 qemu_vfree(acb
->bounce
);
4575 acb
->common
.cb(acb
->common
.opaque
, acb
->ret
);
4576 qemu_bh_delete(acb
->bh
);
4578 qemu_aio_release(acb
);
4581 static BlockDriverAIOCB
*bdrv_aio_rw_vector(BlockDriverState
*bs
,
4585 BlockDriverCompletionFunc
*cb
,
4590 BlockDriverAIOCBSync
*acb
;
4592 acb
= qemu_aio_get(&bdrv_em_aiocb_info
, bs
, cb
, opaque
);
4593 acb
->is_write
= is_write
;
4595 acb
->bounce
= qemu_blockalign(bs
, qiov
->size
);
4596 acb
->bh
= qemu_bh_new(bdrv_aio_bh_cb
, acb
);
4599 qemu_iovec_to_buf(acb
->qiov
, 0, acb
->bounce
, qiov
->size
);
4600 acb
->ret
= bs
->drv
->bdrv_write(bs
, sector_num
, acb
->bounce
, nb_sectors
);
4602 acb
->ret
= bs
->drv
->bdrv_read(bs
, sector_num
, acb
->bounce
, nb_sectors
);
4605 qemu_bh_schedule(acb
->bh
);
4607 return &acb
->common
;
4610 static BlockDriverAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
4611 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
4612 BlockDriverCompletionFunc
*cb
, void *opaque
)
4614 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 0);
4617 static BlockDriverAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
4618 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
4619 BlockDriverCompletionFunc
*cb
, void *opaque
)
4621 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 1);
4625 typedef struct BlockDriverAIOCBCoroutine
{
4626 BlockDriverAIOCB common
;
4631 } BlockDriverAIOCBCoroutine
;
4633 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB
*blockacb
)
4635 BlockDriverAIOCBCoroutine
*acb
=
4636 container_of(blockacb
, BlockDriverAIOCBCoroutine
, common
);
4645 static const AIOCBInfo bdrv_em_co_aiocb_info
= {
4646 .aiocb_size
= sizeof(BlockDriverAIOCBCoroutine
),
4647 .cancel
= bdrv_aio_co_cancel_em
,
4650 static void bdrv_co_em_bh(void *opaque
)
4652 BlockDriverAIOCBCoroutine
*acb
= opaque
;
4654 acb
->common
.cb(acb
->common
.opaque
, acb
->req
.error
);
4660 qemu_bh_delete(acb
->bh
);
4661 qemu_aio_release(acb
);
4664 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
4665 static void coroutine_fn
bdrv_co_do_rw(void *opaque
)
4667 BlockDriverAIOCBCoroutine
*acb
= opaque
;
4668 BlockDriverState
*bs
= acb
->common
.bs
;
4670 if (!acb
->is_write
) {
4671 acb
->req
.error
= bdrv_co_do_readv(bs
, acb
->req
.sector
,
4672 acb
->req
.nb_sectors
, acb
->req
.qiov
, acb
->req
.flags
);
4674 acb
->req
.error
= bdrv_co_do_writev(bs
, acb
->req
.sector
,
4675 acb
->req
.nb_sectors
, acb
->req
.qiov
, acb
->req
.flags
);
4678 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
4679 qemu_bh_schedule(acb
->bh
);
4682 static BlockDriverAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
4686 BdrvRequestFlags flags
,
4687 BlockDriverCompletionFunc
*cb
,
4692 BlockDriverAIOCBCoroutine
*acb
;
4694 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
4695 acb
->req
.sector
= sector_num
;
4696 acb
->req
.nb_sectors
= nb_sectors
;
4697 acb
->req
.qiov
= qiov
;
4698 acb
->req
.flags
= flags
;
4699 acb
->is_write
= is_write
;
4702 co
= qemu_coroutine_create(bdrv_co_do_rw
);
4703 qemu_coroutine_enter(co
, acb
);
4705 return &acb
->common
;
4708 static void coroutine_fn
bdrv_aio_flush_co_entry(void *opaque
)
4710 BlockDriverAIOCBCoroutine
*acb
= opaque
;
4711 BlockDriverState
*bs
= acb
->common
.bs
;
4713 acb
->req
.error
= bdrv_co_flush(bs
);
4714 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
4715 qemu_bh_schedule(acb
->bh
);
4718 BlockDriverAIOCB
*bdrv_aio_flush(BlockDriverState
*bs
,
4719 BlockDriverCompletionFunc
*cb
, void *opaque
)
4721 trace_bdrv_aio_flush(bs
, opaque
);
4724 BlockDriverAIOCBCoroutine
*acb
;
4726 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
4729 co
= qemu_coroutine_create(bdrv_aio_flush_co_entry
);
4730 qemu_coroutine_enter(co
, acb
);
4732 return &acb
->common
;
4735 static void coroutine_fn
bdrv_aio_discard_co_entry(void *opaque
)
4737 BlockDriverAIOCBCoroutine
*acb
= opaque
;
4738 BlockDriverState
*bs
= acb
->common
.bs
;
4740 acb
->req
.error
= bdrv_co_discard(bs
, acb
->req
.sector
, acb
->req
.nb_sectors
);
4741 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
4742 qemu_bh_schedule(acb
->bh
);
4745 BlockDriverAIOCB
*bdrv_aio_discard(BlockDriverState
*bs
,
4746 int64_t sector_num
, int nb_sectors
,
4747 BlockDriverCompletionFunc
*cb
, void *opaque
)
4750 BlockDriverAIOCBCoroutine
*acb
;
4752 trace_bdrv_aio_discard(bs
, sector_num
, nb_sectors
, opaque
);
4754 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
4755 acb
->req
.sector
= sector_num
;
4756 acb
->req
.nb_sectors
= nb_sectors
;
4758 co
= qemu_coroutine_create(bdrv_aio_discard_co_entry
);
4759 qemu_coroutine_enter(co
, acb
);
4761 return &acb
->common
;
4764 void bdrv_init(void)
4766 module_call_init(MODULE_INIT_BLOCK
);
4769 void bdrv_init_with_whitelist(void)
4771 use_bdrv_whitelist
= 1;
4775 void *qemu_aio_get(const AIOCBInfo
*aiocb_info
, BlockDriverState
*bs
,
4776 BlockDriverCompletionFunc
*cb
, void *opaque
)
4778 BlockDriverAIOCB
*acb
;
4780 acb
= g_slice_alloc(aiocb_info
->aiocb_size
);
4781 acb
->aiocb_info
= aiocb_info
;
4784 acb
->opaque
= opaque
;
4788 void qemu_aio_release(void *p
)
4790 BlockDriverAIOCB
*acb
= p
;
4791 g_slice_free1(acb
->aiocb_info
->aiocb_size
, acb
);
4794 /**************************************************************/
4795 /* Coroutine block device emulation */
4797 typedef struct CoroutineIOCompletion
{
4798 Coroutine
*coroutine
;
4800 } CoroutineIOCompletion
;
4802 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
4804 CoroutineIOCompletion
*co
= opaque
;
4807 qemu_coroutine_enter(co
->coroutine
, NULL
);
4810 static int coroutine_fn
bdrv_co_io_em(BlockDriverState
*bs
, int64_t sector_num
,
4811 int nb_sectors
, QEMUIOVector
*iov
,
4814 CoroutineIOCompletion co
= {
4815 .coroutine
= qemu_coroutine_self(),
4817 BlockDriverAIOCB
*acb
;
4820 acb
= bs
->drv
->bdrv_aio_writev(bs
, sector_num
, iov
, nb_sectors
,
4821 bdrv_co_io_em_complete
, &co
);
4823 acb
= bs
->drv
->bdrv_aio_readv(bs
, sector_num
, iov
, nb_sectors
,
4824 bdrv_co_io_em_complete
, &co
);
4827 trace_bdrv_co_io_em(bs
, sector_num
, nb_sectors
, is_write
, acb
);
4831 qemu_coroutine_yield();
4836 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
4837 int64_t sector_num
, int nb_sectors
,
4840 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, false);
4843 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
4844 int64_t sector_num
, int nb_sectors
,
4847 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, true);
4850 static void coroutine_fn
bdrv_flush_co_entry(void *opaque
)
4852 RwCo
*rwco
= opaque
;
4854 rwco
->ret
= bdrv_co_flush(rwco
->bs
);
4857 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
4861 if (!bs
|| !bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
)) {
4865 /* Write back cached data to the OS even with cache=unsafe */
4866 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_OS
);
4867 if (bs
->drv
->bdrv_co_flush_to_os
) {
4868 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
4874 /* But don't actually force it to the disk with cache=unsafe */
4875 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
4879 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_DISK
);
4880 if (bs
->drv
->bdrv_co_flush_to_disk
) {
4881 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
4882 } else if (bs
->drv
->bdrv_aio_flush
) {
4883 BlockDriverAIOCB
*acb
;
4884 CoroutineIOCompletion co
= {
4885 .coroutine
= qemu_coroutine_self(),
4888 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
4892 qemu_coroutine_yield();
4897 * Some block drivers always operate in either writethrough or unsafe
4898 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
4899 * know how the server works (because the behaviour is hardcoded or
4900 * depends on server-side configuration), so we can't ensure that
4901 * everything is safe on disk. Returning an error doesn't work because
4902 * that would break guests even if the server operates in writethrough
4905 * Let's hope the user knows what he's doing.
4913 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
4914 * in the case of cache=unsafe, so there are no useless flushes.
4917 return bdrv_co_flush(bs
->file
);
4920 void bdrv_invalidate_cache(BlockDriverState
*bs
, Error
**errp
)
4922 Error
*local_err
= NULL
;
4929 if (bs
->drv
->bdrv_invalidate_cache
) {
4930 bs
->drv
->bdrv_invalidate_cache(bs
, &local_err
);
4931 } else if (bs
->file
) {
4932 bdrv_invalidate_cache(bs
->file
, &local_err
);
4935 error_propagate(errp
, local_err
);
4939 ret
= refresh_total_sectors(bs
, bs
->total_sectors
);
4941 error_setg_errno(errp
, -ret
, "Could not refresh total sector count");
4946 void bdrv_invalidate_cache_all(Error
**errp
)
4948 BlockDriverState
*bs
;
4949 Error
*local_err
= NULL
;
4951 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
4952 bdrv_invalidate_cache(bs
, &local_err
);
4954 error_propagate(errp
, local_err
);
4960 void bdrv_clear_incoming_migration_all(void)
4962 BlockDriverState
*bs
;
4964 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
4965 bs
->open_flags
= bs
->open_flags
& ~(BDRV_O_INCOMING
);
4969 int bdrv_flush(BlockDriverState
*bs
)
4977 if (qemu_in_coroutine()) {
4978 /* Fast-path if already in coroutine context */
4979 bdrv_flush_co_entry(&rwco
);
4981 co
= qemu_coroutine_create(bdrv_flush_co_entry
);
4982 qemu_coroutine_enter(co
, &rwco
);
4983 while (rwco
.ret
== NOT_DONE
) {
4991 typedef struct DiscardCo
{
4992 BlockDriverState
*bs
;
4997 static void coroutine_fn
bdrv_discard_co_entry(void *opaque
)
4999 DiscardCo
*rwco
= opaque
;
5001 rwco
->ret
= bdrv_co_discard(rwco
->bs
, rwco
->sector_num
, rwco
->nb_sectors
);
5004 /* if no limit is specified in the BlockLimits use a default
5005 * of 32768 512-byte sectors (16 MiB) per request.
5007 #define MAX_DISCARD_DEFAULT 32768
5009 int coroutine_fn
bdrv_co_discard(BlockDriverState
*bs
, int64_t sector_num
,
5016 } else if (bdrv_check_request(bs
, sector_num
, nb_sectors
)) {
5018 } else if (bs
->read_only
) {
5022 bdrv_reset_dirty(bs
, sector_num
, nb_sectors
);
5024 /* Do nothing if disabled. */
5025 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
5029 if (!bs
->drv
->bdrv_co_discard
&& !bs
->drv
->bdrv_aio_discard
) {
5033 max_discard
= bs
->bl
.max_discard
? bs
->bl
.max_discard
: MAX_DISCARD_DEFAULT
;
5034 while (nb_sectors
> 0) {
5036 int num
= nb_sectors
;
5039 if (bs
->bl
.discard_alignment
&&
5040 num
>= bs
->bl
.discard_alignment
&&
5041 sector_num
% bs
->bl
.discard_alignment
) {
5042 if (num
> bs
->bl
.discard_alignment
) {
5043 num
= bs
->bl
.discard_alignment
;
5045 num
-= sector_num
% bs
->bl
.discard_alignment
;
5048 /* limit request size */
5049 if (num
> max_discard
) {
5053 if (bs
->drv
->bdrv_co_discard
) {
5054 ret
= bs
->drv
->bdrv_co_discard(bs
, sector_num
, num
);
5056 BlockDriverAIOCB
*acb
;
5057 CoroutineIOCompletion co
= {
5058 .coroutine
= qemu_coroutine_self(),
5061 acb
= bs
->drv
->bdrv_aio_discard(bs
, sector_num
, nb_sectors
,
5062 bdrv_co_io_em_complete
, &co
);
5066 qemu_coroutine_yield();
5070 if (ret
&& ret
!= -ENOTSUP
) {
5080 int bdrv_discard(BlockDriverState
*bs
, int64_t sector_num
, int nb_sectors
)
5085 .sector_num
= sector_num
,
5086 .nb_sectors
= nb_sectors
,
5090 if (qemu_in_coroutine()) {
5091 /* Fast-path if already in coroutine context */
5092 bdrv_discard_co_entry(&rwco
);
5094 co
= qemu_coroutine_create(bdrv_discard_co_entry
);
5095 qemu_coroutine_enter(co
, &rwco
);
5096 while (rwco
.ret
== NOT_DONE
) {
5104 /**************************************************************/
5105 /* removable device support */
5108 * Return TRUE if the media is present
5110 int bdrv_is_inserted(BlockDriverState
*bs
)
5112 BlockDriver
*drv
= bs
->drv
;
5116 if (!drv
->bdrv_is_inserted
)
5118 return drv
->bdrv_is_inserted(bs
);
5122 * Return whether the media changed since the last call to this
5123 * function, or -ENOTSUP if we don't know. Most drivers don't know.
5125 int bdrv_media_changed(BlockDriverState
*bs
)
5127 BlockDriver
*drv
= bs
->drv
;
5129 if (drv
&& drv
->bdrv_media_changed
) {
5130 return drv
->bdrv_media_changed(bs
);
5136 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
5138 void bdrv_eject(BlockDriverState
*bs
, bool eject_flag
)
5140 BlockDriver
*drv
= bs
->drv
;
5142 if (drv
&& drv
->bdrv_eject
) {
5143 drv
->bdrv_eject(bs
, eject_flag
);
5146 if (bs
->device_name
[0] != '\0') {
5147 bdrv_emit_qmp_eject_event(bs
, eject_flag
);
5152 * Lock or unlock the media (if it is locked, the user won't be able
5153 * to eject it manually).
5155 void bdrv_lock_medium(BlockDriverState
*bs
, bool locked
)
5157 BlockDriver
*drv
= bs
->drv
;
5159 trace_bdrv_lock_medium(bs
, locked
);
5161 if (drv
&& drv
->bdrv_lock_medium
) {
5162 drv
->bdrv_lock_medium(bs
, locked
);
5166 /* needed for generic scsi interface */
5168 int bdrv_ioctl(BlockDriverState
*bs
, unsigned long int req
, void *buf
)
5170 BlockDriver
*drv
= bs
->drv
;
5172 if (drv
&& drv
->bdrv_ioctl
)
5173 return drv
->bdrv_ioctl(bs
, req
, buf
);
5177 BlockDriverAIOCB
*bdrv_aio_ioctl(BlockDriverState
*bs
,
5178 unsigned long int req
, void *buf
,
5179 BlockDriverCompletionFunc
*cb
, void *opaque
)
5181 BlockDriver
*drv
= bs
->drv
;
5183 if (drv
&& drv
->bdrv_aio_ioctl
)
5184 return drv
->bdrv_aio_ioctl(bs
, req
, buf
, cb
, opaque
);
5188 void bdrv_set_guest_block_size(BlockDriverState
*bs
, int align
)
5190 bs
->guest_block_size
= align
;
5193 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
5195 return qemu_memalign(bdrv_opt_mem_align(bs
), size
);
5199 * Check if all memory in this vector is sector aligned.
5201 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
)
5204 size_t alignment
= bdrv_opt_mem_align(bs
);
5206 for (i
= 0; i
< qiov
->niov
; i
++) {
5207 if ((uintptr_t) qiov
->iov
[i
].iov_base
% alignment
) {
5210 if (qiov
->iov
[i
].iov_len
% alignment
) {
5218 BdrvDirtyBitmap
*bdrv_create_dirty_bitmap(BlockDriverState
*bs
, int granularity
,
5221 int64_t bitmap_size
;
5222 BdrvDirtyBitmap
*bitmap
;
5224 assert((granularity
& (granularity
- 1)) == 0);
5226 granularity
>>= BDRV_SECTOR_BITS
;
5227 assert(granularity
);
5228 bitmap_size
= bdrv_getlength(bs
);
5229 if (bitmap_size
< 0) {
5230 error_setg_errno(errp
, -bitmap_size
, "could not get length of device");
5231 errno
= -bitmap_size
;
5234 bitmap_size
>>= BDRV_SECTOR_BITS
;
5235 bitmap
= g_malloc0(sizeof(BdrvDirtyBitmap
));
5236 bitmap
->bitmap
= hbitmap_alloc(bitmap_size
, ffs(granularity
) - 1);
5237 QLIST_INSERT_HEAD(&bs
->dirty_bitmaps
, bitmap
, list
);
5241 void bdrv_release_dirty_bitmap(BlockDriverState
*bs
, BdrvDirtyBitmap
*bitmap
)
5243 BdrvDirtyBitmap
*bm
, *next
;
5244 QLIST_FOREACH_SAFE(bm
, &bs
->dirty_bitmaps
, list
, next
) {
5246 QLIST_REMOVE(bitmap
, list
);
5247 hbitmap_free(bitmap
->bitmap
);
5254 BlockDirtyInfoList
*bdrv_query_dirty_bitmaps(BlockDriverState
*bs
)
5256 BdrvDirtyBitmap
*bm
;
5257 BlockDirtyInfoList
*list
= NULL
;
5258 BlockDirtyInfoList
**plist
= &list
;
5260 QLIST_FOREACH(bm
, &bs
->dirty_bitmaps
, list
) {
5261 BlockDirtyInfo
*info
= g_malloc0(sizeof(BlockDirtyInfo
));
5262 BlockDirtyInfoList
*entry
= g_malloc0(sizeof(BlockDirtyInfoList
));
5263 info
->count
= bdrv_get_dirty_count(bs
, bm
);
5265 ((int64_t) BDRV_SECTOR_SIZE
<< hbitmap_granularity(bm
->bitmap
));
5266 entry
->value
= info
;
5268 plist
= &entry
->next
;
5274 int bdrv_get_dirty(BlockDriverState
*bs
, BdrvDirtyBitmap
*bitmap
, int64_t sector
)
5277 return hbitmap_get(bitmap
->bitmap
, sector
);
5283 void bdrv_dirty_iter_init(BlockDriverState
*bs
,
5284 BdrvDirtyBitmap
*bitmap
, HBitmapIter
*hbi
)
5286 hbitmap_iter_init(hbi
, bitmap
->bitmap
, 0);
5289 void bdrv_set_dirty(BlockDriverState
*bs
, int64_t cur_sector
,
5292 BdrvDirtyBitmap
*bitmap
;
5293 QLIST_FOREACH(bitmap
, &bs
->dirty_bitmaps
, list
) {
5294 hbitmap_set(bitmap
->bitmap
, cur_sector
, nr_sectors
);
5298 void bdrv_reset_dirty(BlockDriverState
*bs
, int64_t cur_sector
, int nr_sectors
)
5300 BdrvDirtyBitmap
*bitmap
;
5301 QLIST_FOREACH(bitmap
, &bs
->dirty_bitmaps
, list
) {
5302 hbitmap_reset(bitmap
->bitmap
, cur_sector
, nr_sectors
);
5306 int64_t bdrv_get_dirty_count(BlockDriverState
*bs
, BdrvDirtyBitmap
*bitmap
)
5308 return hbitmap_count(bitmap
->bitmap
);
5311 /* Get a reference to bs */
5312 void bdrv_ref(BlockDriverState
*bs
)
5317 /* Release a previously grabbed reference to bs.
5318 * If after releasing, reference count is zero, the BlockDriverState is
5320 void bdrv_unref(BlockDriverState
*bs
)
5322 assert(bs
->refcnt
> 0);
5323 if (--bs
->refcnt
== 0) {
5328 void bdrv_set_in_use(BlockDriverState
*bs
, int in_use
)
5330 assert(bs
->in_use
!= in_use
);
5331 bs
->in_use
= in_use
;
5334 int bdrv_in_use(BlockDriverState
*bs
)
5339 void bdrv_iostatus_enable(BlockDriverState
*bs
)
5341 bs
->iostatus_enabled
= true;
5342 bs
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
5345 /* The I/O status is only enabled if the drive explicitly
5346 * enables it _and_ the VM is configured to stop on errors */
5347 bool bdrv_iostatus_is_enabled(const BlockDriverState
*bs
)
5349 return (bs
->iostatus_enabled
&&
5350 (bs
->on_write_error
== BLOCKDEV_ON_ERROR_ENOSPC
||
5351 bs
->on_write_error
== BLOCKDEV_ON_ERROR_STOP
||
5352 bs
->on_read_error
== BLOCKDEV_ON_ERROR_STOP
));
5355 void bdrv_iostatus_disable(BlockDriverState
*bs
)
5357 bs
->iostatus_enabled
= false;
5360 void bdrv_iostatus_reset(BlockDriverState
*bs
)
5362 if (bdrv_iostatus_is_enabled(bs
)) {
5363 bs
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
5365 block_job_iostatus_reset(bs
->job
);
5370 void bdrv_iostatus_set_err(BlockDriverState
*bs
, int error
)
5372 assert(bdrv_iostatus_is_enabled(bs
));
5373 if (bs
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
5374 bs
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
5375 BLOCK_DEVICE_IO_STATUS_FAILED
;
5380 bdrv_acct_start(BlockDriverState
*bs
, BlockAcctCookie
*cookie
, int64_t bytes
,
5381 enum BlockAcctType type
)
5383 assert(type
< BDRV_MAX_IOTYPE
);
5385 cookie
->bytes
= bytes
;
5386 cookie
->start_time_ns
= get_clock();
5387 cookie
->type
= type
;
5391 bdrv_acct_done(BlockDriverState
*bs
, BlockAcctCookie
*cookie
)
5393 assert(cookie
->type
< BDRV_MAX_IOTYPE
);
5395 bs
->nr_bytes
[cookie
->type
] += cookie
->bytes
;
5396 bs
->nr_ops
[cookie
->type
]++;
5397 bs
->total_time_ns
[cookie
->type
] += get_clock() - cookie
->start_time_ns
;
5400 void bdrv_img_create(const char *filename
, const char *fmt
,
5401 const char *base_filename
, const char *base_fmt
,
5402 char *options
, uint64_t img_size
, int flags
,
5403 Error
**errp
, bool quiet
)
5405 QEMUOptionParameter
*param
= NULL
, *create_options
= NULL
;
5406 QEMUOptionParameter
*backing_fmt
, *backing_file
, *size
;
5407 BlockDriver
*drv
, *proto_drv
;
5408 BlockDriver
*backing_drv
= NULL
;
5409 Error
*local_err
= NULL
;
5412 /* Find driver and parse its options */
5413 drv
= bdrv_find_format(fmt
);
5415 error_setg(errp
, "Unknown file format '%s'", fmt
);
5419 proto_drv
= bdrv_find_protocol(filename
, true);
5421 error_setg(errp
, "Unknown protocol '%s'", filename
);
5425 create_options
= append_option_parameters(create_options
,
5426 drv
->create_options
);
5427 create_options
= append_option_parameters(create_options
,
5428 proto_drv
->create_options
);
5430 /* Create parameter list with default values */
5431 param
= parse_option_parameters("", create_options
, param
);
5433 set_option_parameter_int(param
, BLOCK_OPT_SIZE
, img_size
);
5435 /* Parse -o options */
5437 param
= parse_option_parameters(options
, create_options
, param
);
5438 if (param
== NULL
) {
5439 error_setg(errp
, "Invalid options for file format '%s'.", fmt
);
5444 if (base_filename
) {
5445 if (set_option_parameter(param
, BLOCK_OPT_BACKING_FILE
,
5447 error_setg(errp
, "Backing file not supported for file format '%s'",
5454 if (set_option_parameter(param
, BLOCK_OPT_BACKING_FMT
, base_fmt
)) {
5455 error_setg(errp
, "Backing file format not supported for file "
5456 "format '%s'", fmt
);
5461 backing_file
= get_option_parameter(param
, BLOCK_OPT_BACKING_FILE
);
5462 if (backing_file
&& backing_file
->value
.s
) {
5463 if (!strcmp(filename
, backing_file
->value
.s
)) {
5464 error_setg(errp
, "Error: Trying to create an image with the "
5465 "same filename as the backing file");
5470 backing_fmt
= get_option_parameter(param
, BLOCK_OPT_BACKING_FMT
);
5471 if (backing_fmt
&& backing_fmt
->value
.s
) {
5472 backing_drv
= bdrv_find_format(backing_fmt
->value
.s
);
5474 error_setg(errp
, "Unknown backing file format '%s'",
5475 backing_fmt
->value
.s
);
5480 // The size for the image must always be specified, with one exception:
5481 // If we are using a backing file, we can obtain the size from there
5482 size
= get_option_parameter(param
, BLOCK_OPT_SIZE
);
5483 if (size
&& size
->value
.n
== -1) {
5484 if (backing_file
&& backing_file
->value
.s
) {
5485 BlockDriverState
*bs
;
5490 /* backing files always opened read-only */
5492 flags
& ~(BDRV_O_RDWR
| BDRV_O_SNAPSHOT
| BDRV_O_NO_BACKING
);
5495 ret
= bdrv_open(&bs
, backing_file
->value
.s
, NULL
, NULL
, back_flags
,
5496 backing_drv
, &local_err
);
5498 error_setg_errno(errp
, -ret
, "Could not open '%s': %s",
5499 backing_file
->value
.s
,
5500 error_get_pretty(local_err
));
5501 error_free(local_err
);
5505 bdrv_get_geometry(bs
, &size
);
5508 snprintf(buf
, sizeof(buf
), "%" PRId64
, size
);
5509 set_option_parameter(param
, BLOCK_OPT_SIZE
, buf
);
5513 error_setg(errp
, "Image creation needs a size parameter");
5519 printf("Formatting '%s', fmt=%s ", filename
, fmt
);
5520 print_option_parameters(param
);
5523 ret
= bdrv_create(drv
, filename
, param
, &local_err
);
5524 if (ret
== -EFBIG
) {
5525 /* This is generally a better message than whatever the driver would
5526 * deliver (especially because of the cluster_size_hint), since that
5527 * is most probably not much different from "image too large". */
5528 const char *cluster_size_hint
= "";
5529 if (get_option_parameter(create_options
, BLOCK_OPT_CLUSTER_SIZE
)) {
5530 cluster_size_hint
= " (try using a larger cluster size)";
5532 error_setg(errp
, "The image size is too large for file format '%s'"
5533 "%s", fmt
, cluster_size_hint
);
5534 error_free(local_err
);
5539 free_option_parameters(create_options
);
5540 free_option_parameters(param
);
5543 error_propagate(errp
, local_err
);
5547 AioContext
*bdrv_get_aio_context(BlockDriverState
*bs
)
5549 /* Currently BlockDriverState always uses the main loop AioContext */
5550 return qemu_get_aio_context();
5553 void bdrv_add_before_write_notifier(BlockDriverState
*bs
,
5554 NotifierWithReturn
*notifier
)
5556 notifier_with_return_list_add(&bs
->before_write_notifiers
, notifier
);
5559 int bdrv_amend_options(BlockDriverState
*bs
, QEMUOptionParameter
*options
)
5561 if (bs
->drv
->bdrv_amend_options
== NULL
) {
5564 return bs
->drv
->bdrv_amend_options(bs
, options
);
5567 /* This function will be called by the bdrv_recurse_is_first_non_filter method
5568 * of block filter and by bdrv_is_first_non_filter.
5569 * It is used to test if the given bs is the candidate or recurse more in the
5572 bool bdrv_recurse_is_first_non_filter(BlockDriverState
*bs
,
5573 BlockDriverState
*candidate
)
5575 /* return false if basic checks fails */
5576 if (!bs
|| !bs
->drv
) {
5580 /* the code reached a non block filter driver -> check if the bs is
5581 * the same as the candidate. It's the recursion termination condition.
5583 if (!bs
->drv
->is_filter
) {
5584 return bs
== candidate
;
5586 /* Down this path the driver is a block filter driver */
5588 /* If the block filter recursion method is defined use it to recurse down
5591 if (bs
->drv
->bdrv_recurse_is_first_non_filter
) {
5592 return bs
->drv
->bdrv_recurse_is_first_non_filter(bs
, candidate
);
5595 /* the driver is a block filter but don't allow to recurse -> return false
5600 /* This function checks if the candidate is the first non filter bs down it's
5601 * bs chain. Since we don't have pointers to parents it explore all bs chains
5602 * from the top. Some filters can choose not to pass down the recursion.
5604 bool bdrv_is_first_non_filter(BlockDriverState
*candidate
)
5606 BlockDriverState
*bs
;
5608 /* walk down the bs forest recursively */
5609 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
5612 /* try to recurse in this top level bs */
5613 perm
= bdrv_recurse_is_first_non_filter(bs
, candidate
);
5615 /* candidate is the first non filter */