2 * QEMU System Emulator block driver
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 #include "config-host.h"
25 #include "qemu-common.h"
27 #include "monitor/monitor.h"
28 #include "block/block_int.h"
29 #include "block/blockjob.h"
30 #include "qemu/module.h"
31 #include "qapi/qmp/qjson.h"
32 #include "sysemu/sysemu.h"
33 #include "qemu/notify.h"
34 #include "block/coroutine.h"
35 #include "block/qapi.h"
36 #include "qmp-commands.h"
37 #include "qemu/timer.h"
40 #include <sys/types.h>
42 #include <sys/ioctl.h>
43 #include <sys/queue.h>
53 struct BdrvDirtyBitmap
{
55 QLIST_ENTRY(BdrvDirtyBitmap
) list
;
58 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
60 static void bdrv_dev_change_media_cb(BlockDriverState
*bs
, bool load
);
61 static BlockDriverAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
62 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
63 BlockDriverCompletionFunc
*cb
, void *opaque
);
64 static BlockDriverAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
65 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
66 BlockDriverCompletionFunc
*cb
, void *opaque
);
67 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
68 int64_t sector_num
, int nb_sectors
,
70 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
71 int64_t sector_num
, int nb_sectors
,
73 static int coroutine_fn
bdrv_co_do_preadv(BlockDriverState
*bs
,
74 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
75 BdrvRequestFlags flags
);
76 static int coroutine_fn
bdrv_co_do_pwritev(BlockDriverState
*bs
,
77 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
78 BdrvRequestFlags flags
);
79 static BlockDriverAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
83 BdrvRequestFlags flags
,
84 BlockDriverCompletionFunc
*cb
,
87 static void coroutine_fn
bdrv_co_do_rw(void *opaque
);
88 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
89 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
);
91 static QTAILQ_HEAD(, BlockDriverState
) bdrv_states
=
92 QTAILQ_HEAD_INITIALIZER(bdrv_states
);
94 static QTAILQ_HEAD(, BlockDriverState
) graph_bdrv_states
=
95 QTAILQ_HEAD_INITIALIZER(graph_bdrv_states
);
97 static QLIST_HEAD(, BlockDriver
) bdrv_drivers
=
98 QLIST_HEAD_INITIALIZER(bdrv_drivers
);
100 /* If non-zero, use only whitelisted block drivers */
101 static int use_bdrv_whitelist
;
104 static int is_windows_drive_prefix(const char *filename
)
106 return (((filename
[0] >= 'a' && filename
[0] <= 'z') ||
107 (filename
[0] >= 'A' && filename
[0] <= 'Z')) &&
111 int is_windows_drive(const char *filename
)
113 if (is_windows_drive_prefix(filename
) &&
116 if (strstart(filename
, "\\\\.\\", NULL
) ||
117 strstart(filename
, "//./", NULL
))
123 /* throttling disk I/O limits */
124 void bdrv_set_io_limits(BlockDriverState
*bs
,
129 throttle_config(&bs
->throttle_state
, cfg
);
131 for (i
= 0; i
< 2; i
++) {
132 qemu_co_enter_next(&bs
->throttled_reqs
[i
]);
136 /* this function drain all the throttled IOs */
137 static bool bdrv_start_throttled_reqs(BlockDriverState
*bs
)
139 bool drained
= false;
140 bool enabled
= bs
->io_limits_enabled
;
143 bs
->io_limits_enabled
= false;
145 for (i
= 0; i
< 2; i
++) {
146 while (qemu_co_enter_next(&bs
->throttled_reqs
[i
])) {
151 bs
->io_limits_enabled
= enabled
;
156 void bdrv_io_limits_disable(BlockDriverState
*bs
)
158 bs
->io_limits_enabled
= false;
160 bdrv_start_throttled_reqs(bs
);
162 throttle_destroy(&bs
->throttle_state
);
165 static void bdrv_throttle_read_timer_cb(void *opaque
)
167 BlockDriverState
*bs
= opaque
;
168 qemu_co_enter_next(&bs
->throttled_reqs
[0]);
171 static void bdrv_throttle_write_timer_cb(void *opaque
)
173 BlockDriverState
*bs
= opaque
;
174 qemu_co_enter_next(&bs
->throttled_reqs
[1]);
177 /* should be called before bdrv_set_io_limits if a limit is set */
178 void bdrv_io_limits_enable(BlockDriverState
*bs
)
180 assert(!bs
->io_limits_enabled
);
181 throttle_init(&bs
->throttle_state
,
183 bdrv_throttle_read_timer_cb
,
184 bdrv_throttle_write_timer_cb
,
186 bs
->io_limits_enabled
= true;
189 /* This function makes an IO wait if needed
191 * @nb_sectors: the number of sectors of the IO
192 * @is_write: is the IO a write
194 static void bdrv_io_limits_intercept(BlockDriverState
*bs
,
198 /* does this io must wait */
199 bool must_wait
= throttle_schedule_timer(&bs
->throttle_state
, is_write
);
201 /* if must wait or any request of this type throttled queue the IO */
203 !qemu_co_queue_empty(&bs
->throttled_reqs
[is_write
])) {
204 qemu_co_queue_wait(&bs
->throttled_reqs
[is_write
]);
207 /* the IO will be executed, do the accounting */
208 throttle_account(&bs
->throttle_state
, is_write
, bytes
);
211 /* if the next request must wait -> do nothing */
212 if (throttle_schedule_timer(&bs
->throttle_state
, is_write
)) {
216 /* else queue next request for execution */
217 qemu_co_queue_next(&bs
->throttled_reqs
[is_write
]);
220 size_t bdrv_opt_mem_align(BlockDriverState
*bs
)
222 if (!bs
|| !bs
->drv
) {
223 /* 4k should be on the safe side */
227 return bs
->bl
.opt_mem_alignment
;
230 /* check if the path starts with "<protocol>:" */
231 static int path_has_protocol(const char *path
)
236 if (is_windows_drive(path
) ||
237 is_windows_drive_prefix(path
)) {
240 p
= path
+ strcspn(path
, ":/\\");
242 p
= path
+ strcspn(path
, ":/");
248 int path_is_absolute(const char *path
)
251 /* specific case for names like: "\\.\d:" */
252 if (is_windows_drive(path
) || is_windows_drive_prefix(path
)) {
255 return (*path
== '/' || *path
== '\\');
257 return (*path
== '/');
261 /* if filename is absolute, just copy it to dest. Otherwise, build a
262 path to it by considering it is relative to base_path. URL are
264 void path_combine(char *dest
, int dest_size
,
265 const char *base_path
,
266 const char *filename
)
273 if (path_is_absolute(filename
)) {
274 pstrcpy(dest
, dest_size
, filename
);
276 p
= strchr(base_path
, ':');
281 p1
= strrchr(base_path
, '/');
285 p2
= strrchr(base_path
, '\\');
297 if (len
> dest_size
- 1)
299 memcpy(dest
, base_path
, len
);
301 pstrcat(dest
, dest_size
, filename
);
305 void bdrv_get_full_backing_filename(BlockDriverState
*bs
, char *dest
, size_t sz
)
307 if (bs
->backing_file
[0] == '\0' || path_has_protocol(bs
->backing_file
)) {
308 pstrcpy(dest
, sz
, bs
->backing_file
);
310 path_combine(dest
, sz
, bs
->filename
, bs
->backing_file
);
314 void bdrv_register(BlockDriver
*bdrv
)
316 /* Block drivers without coroutine functions need emulation */
317 if (!bdrv
->bdrv_co_readv
) {
318 bdrv
->bdrv_co_readv
= bdrv_co_readv_em
;
319 bdrv
->bdrv_co_writev
= bdrv_co_writev_em
;
321 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
322 * the block driver lacks aio we need to emulate that too.
324 if (!bdrv
->bdrv_aio_readv
) {
325 /* add AIO emulation layer */
326 bdrv
->bdrv_aio_readv
= bdrv_aio_readv_em
;
327 bdrv
->bdrv_aio_writev
= bdrv_aio_writev_em
;
331 QLIST_INSERT_HEAD(&bdrv_drivers
, bdrv
, list
);
334 /* create a new block device (by default it is empty) */
335 BlockDriverState
*bdrv_new(const char *device_name
)
337 BlockDriverState
*bs
;
339 bs
= g_malloc0(sizeof(BlockDriverState
));
340 QLIST_INIT(&bs
->dirty_bitmaps
);
341 pstrcpy(bs
->device_name
, sizeof(bs
->device_name
), device_name
);
342 if (device_name
[0] != '\0') {
343 QTAILQ_INSERT_TAIL(&bdrv_states
, bs
, device_list
);
345 bdrv_iostatus_disable(bs
);
346 notifier_list_init(&bs
->close_notifiers
);
347 notifier_with_return_list_init(&bs
->before_write_notifiers
);
348 qemu_co_queue_init(&bs
->throttled_reqs
[0]);
349 qemu_co_queue_init(&bs
->throttled_reqs
[1]);
355 void bdrv_add_close_notifier(BlockDriverState
*bs
, Notifier
*notify
)
357 notifier_list_add(&bs
->close_notifiers
, notify
);
360 BlockDriver
*bdrv_find_format(const char *format_name
)
363 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
364 if (!strcmp(drv1
->format_name
, format_name
)) {
371 static int bdrv_is_whitelisted(BlockDriver
*drv
, bool read_only
)
373 static const char *whitelist_rw
[] = {
374 CONFIG_BDRV_RW_WHITELIST
376 static const char *whitelist_ro
[] = {
377 CONFIG_BDRV_RO_WHITELIST
381 if (!whitelist_rw
[0] && !whitelist_ro
[0]) {
382 return 1; /* no whitelist, anything goes */
385 for (p
= whitelist_rw
; *p
; p
++) {
386 if (!strcmp(drv
->format_name
, *p
)) {
391 for (p
= whitelist_ro
; *p
; p
++) {
392 if (!strcmp(drv
->format_name
, *p
)) {
400 BlockDriver
*bdrv_find_whitelisted_format(const char *format_name
,
403 BlockDriver
*drv
= bdrv_find_format(format_name
);
404 return drv
&& bdrv_is_whitelisted(drv
, read_only
) ? drv
: NULL
;
407 typedef struct CreateCo
{
410 QEMUOptionParameter
*options
;
415 static void coroutine_fn
bdrv_create_co_entry(void *opaque
)
417 Error
*local_err
= NULL
;
420 CreateCo
*cco
= opaque
;
423 ret
= cco
->drv
->bdrv_create(cco
->filename
, cco
->options
, &local_err
);
424 if (error_is_set(&local_err
)) {
425 error_propagate(&cco
->err
, local_err
);
430 int bdrv_create(BlockDriver
*drv
, const char* filename
,
431 QEMUOptionParameter
*options
, Error
**errp
)
438 .filename
= g_strdup(filename
),
444 if (!drv
->bdrv_create
) {
445 error_setg(errp
, "Driver '%s' does not support image creation", drv
->format_name
);
450 if (qemu_in_coroutine()) {
451 /* Fast-path if already in coroutine context */
452 bdrv_create_co_entry(&cco
);
454 co
= qemu_coroutine_create(bdrv_create_co_entry
);
455 qemu_coroutine_enter(co
, &cco
);
456 while (cco
.ret
== NOT_DONE
) {
463 if (error_is_set(&cco
.err
)) {
464 error_propagate(errp
, cco
.err
);
466 error_setg_errno(errp
, -ret
, "Could not create image");
471 g_free(cco
.filename
);
475 int bdrv_create_file(const char* filename
, QEMUOptionParameter
*options
,
479 Error
*local_err
= NULL
;
482 drv
= bdrv_find_protocol(filename
, true);
484 error_setg(errp
, "Could not find protocol for file '%s'", filename
);
488 ret
= bdrv_create(drv
, filename
, options
, &local_err
);
489 if (error_is_set(&local_err
)) {
490 error_propagate(errp
, local_err
);
495 int bdrv_refresh_limits(BlockDriverState
*bs
)
497 BlockDriver
*drv
= bs
->drv
;
499 memset(&bs
->bl
, 0, sizeof(bs
->bl
));
505 /* Take some limits from the children as a default */
507 bdrv_refresh_limits(bs
->file
);
508 bs
->bl
.opt_transfer_length
= bs
->file
->bl
.opt_transfer_length
;
509 bs
->bl
.opt_mem_alignment
= bs
->file
->bl
.opt_mem_alignment
;
511 bs
->bl
.opt_mem_alignment
= 512;
514 if (bs
->backing_hd
) {
515 bdrv_refresh_limits(bs
->backing_hd
);
516 bs
->bl
.opt_transfer_length
=
517 MAX(bs
->bl
.opt_transfer_length
,
518 bs
->backing_hd
->bl
.opt_transfer_length
);
519 bs
->bl
.opt_mem_alignment
=
520 MAX(bs
->bl
.opt_mem_alignment
,
521 bs
->backing_hd
->bl
.opt_mem_alignment
);
524 /* Then let the driver override it */
525 if (drv
->bdrv_refresh_limits
) {
526 return drv
->bdrv_refresh_limits(bs
);
533 * Create a uniquely-named empty temporary file.
534 * Return 0 upon success, otherwise a negative errno value.
536 int get_tmp_filename(char *filename
, int size
)
539 char temp_dir
[MAX_PATH
];
540 /* GetTempFileName requires that its output buffer (4th param)
541 have length MAX_PATH or greater. */
542 assert(size
>= MAX_PATH
);
543 return (GetTempPath(MAX_PATH
, temp_dir
)
544 && GetTempFileName(temp_dir
, "qem", 0, filename
)
545 ? 0 : -GetLastError());
549 tmpdir
= getenv("TMPDIR");
552 if (snprintf(filename
, size
, "%s/vl.XXXXXX", tmpdir
) >= size
) {
555 fd
= mkstemp(filename
);
559 if (close(fd
) != 0) {
568 * Detect host devices. By convention, /dev/cdrom[N] is always
569 * recognized as a host CDROM.
571 static BlockDriver
*find_hdev_driver(const char *filename
)
573 int score_max
= 0, score
;
574 BlockDriver
*drv
= NULL
, *d
;
576 QLIST_FOREACH(d
, &bdrv_drivers
, list
) {
577 if (d
->bdrv_probe_device
) {
578 score
= d
->bdrv_probe_device(filename
);
579 if (score
> score_max
) {
589 BlockDriver
*bdrv_find_protocol(const char *filename
,
590 bool allow_protocol_prefix
)
597 /* TODO Drivers without bdrv_file_open must be specified explicitly */
600 * XXX(hch): we really should not let host device detection
601 * override an explicit protocol specification, but moving this
602 * later breaks access to device names with colons in them.
603 * Thanks to the brain-dead persistent naming schemes on udev-
604 * based Linux systems those actually are quite common.
606 drv1
= find_hdev_driver(filename
);
611 if (!path_has_protocol(filename
) || !allow_protocol_prefix
) {
612 return bdrv_find_format("file");
615 p
= strchr(filename
, ':');
618 if (len
> sizeof(protocol
) - 1)
619 len
= sizeof(protocol
) - 1;
620 memcpy(protocol
, filename
, len
);
621 protocol
[len
] = '\0';
622 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
623 if (drv1
->protocol_name
&&
624 !strcmp(drv1
->protocol_name
, protocol
)) {
631 static int find_image_format(BlockDriverState
*bs
, const char *filename
,
632 BlockDriver
**pdrv
, Error
**errp
)
634 int score
, score_max
;
635 BlockDriver
*drv1
, *drv
;
639 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
640 if (bs
->sg
|| !bdrv_is_inserted(bs
) || bdrv_getlength(bs
) == 0) {
641 drv
= bdrv_find_format("raw");
643 error_setg(errp
, "Could not find raw image format");
650 ret
= bdrv_pread(bs
, 0, buf
, sizeof(buf
));
652 error_setg_errno(errp
, -ret
, "Could not read image for determining its "
660 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
661 if (drv1
->bdrv_probe
) {
662 score
= drv1
->bdrv_probe(buf
, ret
, filename
);
663 if (score
> score_max
) {
670 error_setg(errp
, "Could not determine image format: No compatible "
679 * Set the current 'total_sectors' value
681 static int refresh_total_sectors(BlockDriverState
*bs
, int64_t hint
)
683 BlockDriver
*drv
= bs
->drv
;
685 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
689 /* query actual device if possible, otherwise just trust the hint */
690 if (drv
->bdrv_getlength
) {
691 int64_t length
= drv
->bdrv_getlength(bs
);
695 hint
= DIV_ROUND_UP(length
, BDRV_SECTOR_SIZE
);
698 bs
->total_sectors
= hint
;
703 * Set open flags for a given discard mode
705 * Return 0 on success, -1 if the discard mode was invalid.
707 int bdrv_parse_discard_flags(const char *mode
, int *flags
)
709 *flags
&= ~BDRV_O_UNMAP
;
711 if (!strcmp(mode
, "off") || !strcmp(mode
, "ignore")) {
713 } else if (!strcmp(mode
, "on") || !strcmp(mode
, "unmap")) {
714 *flags
|= BDRV_O_UNMAP
;
723 * Set open flags for a given cache mode
725 * Return 0 on success, -1 if the cache mode was invalid.
727 int bdrv_parse_cache_flags(const char *mode
, int *flags
)
729 *flags
&= ~BDRV_O_CACHE_MASK
;
731 if (!strcmp(mode
, "off") || !strcmp(mode
, "none")) {
732 *flags
|= BDRV_O_NOCACHE
| BDRV_O_CACHE_WB
;
733 } else if (!strcmp(mode
, "directsync")) {
734 *flags
|= BDRV_O_NOCACHE
;
735 } else if (!strcmp(mode
, "writeback")) {
736 *flags
|= BDRV_O_CACHE_WB
;
737 } else if (!strcmp(mode
, "unsafe")) {
738 *flags
|= BDRV_O_CACHE_WB
;
739 *flags
|= BDRV_O_NO_FLUSH
;
740 } else if (!strcmp(mode
, "writethrough")) {
741 /* this is the default */
750 * The copy-on-read flag is actually a reference count so multiple users may
751 * use the feature without worrying about clobbering its previous state.
752 * Copy-on-read stays enabled until all users have called to disable it.
754 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
759 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
761 assert(bs
->copy_on_read
> 0);
765 static int bdrv_open_flags(BlockDriverState
*bs
, int flags
)
767 int open_flags
= flags
| BDRV_O_CACHE_WB
;
770 * Clear flags that are internal to the block layer before opening the
773 open_flags
&= ~(BDRV_O_SNAPSHOT
| BDRV_O_NO_BACKING
);
776 * Snapshots should be writable.
778 if (bs
->is_temporary
) {
779 open_flags
|= BDRV_O_RDWR
;
785 static int bdrv_assign_node_name(BlockDriverState
*bs
,
786 const char *node_name
,
793 /* empty string node name is invalid */
794 if (node_name
[0] == '\0') {
795 error_setg(errp
, "Empty node name");
799 /* takes care of avoiding duplicates node names */
800 if (bdrv_find_node(node_name
)) {
801 error_setg(errp
, "Duplicate node name");
805 /* copy node name into the bs and insert it into the graph list */
806 pstrcpy(bs
->node_name
, sizeof(bs
->node_name
), node_name
);
807 QTAILQ_INSERT_TAIL(&graph_bdrv_states
, bs
, node_list
);
813 * Common part for opening disk images and files
815 * Removes all processed options from *options.
817 static int bdrv_open_common(BlockDriverState
*bs
, BlockDriverState
*file
,
818 QDict
*options
, int flags
, BlockDriver
*drv
, Error
**errp
)
821 const char *filename
;
822 const char *node_name
= NULL
;
823 Error
*local_err
= NULL
;
826 assert(bs
->file
== NULL
);
827 assert(options
!= NULL
&& bs
->options
!= options
);
830 filename
= file
->filename
;
832 filename
= qdict_get_try_str(options
, "filename");
835 trace_bdrv_open_common(bs
, filename
?: "", flags
, drv
->format_name
);
837 node_name
= qdict_get_try_str(options
, "node-name");
838 ret
= bdrv_assign_node_name(bs
, node_name
, errp
);
842 qdict_del(options
, "node-name");
844 /* bdrv_open() with directly using a protocol as drv. This layer is already
845 * opened, so assign it to bs (while file becomes a closed BlockDriverState)
846 * and return immediately. */
847 if (file
!= NULL
&& drv
->bdrv_file_open
) {
852 bs
->open_flags
= flags
;
853 bs
->guest_block_size
= 512;
854 bs
->request_alignment
= 512;
855 bs
->zero_beyond_eof
= true;
856 open_flags
= bdrv_open_flags(bs
, flags
);
857 bs
->read_only
= !(open_flags
& BDRV_O_RDWR
);
859 if (use_bdrv_whitelist
&& !bdrv_is_whitelisted(drv
, bs
->read_only
)) {
861 !bs
->read_only
&& bdrv_is_whitelisted(drv
, true)
862 ? "Driver '%s' can only be used for read-only devices"
863 : "Driver '%s' is not whitelisted",
868 assert(bs
->copy_on_read
== 0); /* bdrv_new() and bdrv_close() make it so */
869 if (flags
& BDRV_O_COPY_ON_READ
) {
870 if (!bs
->read_only
) {
871 bdrv_enable_copy_on_read(bs
);
873 error_setg(errp
, "Can't use copy-on-read on read-only device");
878 if (filename
!= NULL
) {
879 pstrcpy(bs
->filename
, sizeof(bs
->filename
), filename
);
881 bs
->filename
[0] = '\0';
885 bs
->opaque
= g_malloc0(drv
->instance_size
);
887 bs
->enable_write_cache
= !!(flags
& BDRV_O_CACHE_WB
);
889 /* Open the image, either directly or using a protocol */
890 if (drv
->bdrv_file_open
) {
891 assert(file
== NULL
);
892 assert(!drv
->bdrv_needs_filename
|| filename
!= NULL
);
893 ret
= drv
->bdrv_file_open(bs
, options
, open_flags
, &local_err
);
896 error_setg(errp
, "Can't use '%s' as a block driver for the "
897 "protocol level", drv
->format_name
);
902 ret
= drv
->bdrv_open(bs
, options
, open_flags
, &local_err
);
906 if (error_is_set(&local_err
)) {
907 error_propagate(errp
, local_err
);
908 } else if (bs
->filename
[0]) {
909 error_setg_errno(errp
, -ret
, "Could not open '%s'", bs
->filename
);
911 error_setg_errno(errp
, -ret
, "Could not open image");
916 ret
= refresh_total_sectors(bs
, bs
->total_sectors
);
918 error_setg_errno(errp
, -ret
, "Could not refresh total sector count");
922 bdrv_refresh_limits(bs
);
923 assert(bdrv_opt_mem_align(bs
) != 0);
924 assert(bs
->request_alignment
!= 0);
927 if (bs
->is_temporary
) {
928 assert(bs
->filename
[0] != '\0');
929 unlink(bs
->filename
);
943 * Opens a file using a protocol (file, host_device, nbd, ...)
945 * options is a QDict of options to pass to the block drivers, or NULL for an
946 * empty set of options. The reference to the QDict belongs to the block layer
947 * after the call (even on failure), so if the caller intends to reuse the
948 * dictionary, it needs to use QINCREF() before calling bdrv_file_open.
950 int bdrv_file_open(BlockDriverState
**pbs
, const char *filename
,
951 const char *reference
, QDict
*options
, int flags
,
954 BlockDriverState
*bs
= NULL
;
957 bool allow_protocol_prefix
= false;
958 Error
*local_err
= NULL
;
961 /* NULL means an empty set of options */
962 if (options
== NULL
) {
963 options
= qdict_new();
967 if (filename
|| qdict_size(options
)) {
968 error_setg(errp
, "Cannot reference an existing block device with "
969 "additional options or a new filename");
974 bs
= bdrv_find(reference
);
976 error_setg(errp
, "Cannot find block device '%s'", reference
);
985 bs
->options
= options
;
986 options
= qdict_clone_shallow(options
);
988 /* Fetch the file name from the options QDict if necessary */
990 filename
= qdict_get_try_str(options
, "filename");
991 } else if (filename
&& !qdict_haskey(options
, "filename")) {
992 qdict_put(options
, "filename", qstring_from_str(filename
));
993 allow_protocol_prefix
= true;
995 error_setg(errp
, "Can't specify 'file' and 'filename' options at the "
1001 /* Find the right block driver */
1002 drvname
= qdict_get_try_str(options
, "driver");
1004 drv
= bdrv_find_format(drvname
);
1006 error_setg(errp
, "Unknown driver '%s'", drvname
);
1008 qdict_del(options
, "driver");
1009 } else if (filename
) {
1010 drv
= bdrv_find_protocol(filename
, allow_protocol_prefix
);
1012 error_setg(errp
, "Unknown protocol");
1015 error_setg(errp
, "Must specify either driver or file");
1020 /* errp has been set already */
1025 /* Parse the filename and open it */
1026 if (drv
->bdrv_parse_filename
&& filename
) {
1027 drv
->bdrv_parse_filename(filename
, options
, &local_err
);
1028 if (error_is_set(&local_err
)) {
1029 error_propagate(errp
, local_err
);
1033 qdict_del(options
, "filename");
1034 } else if (drv
->bdrv_needs_filename
&& !filename
) {
1035 error_setg(errp
, "The '%s' block driver requires a file name",
1041 if (!drv
->bdrv_file_open
) {
1042 ret
= bdrv_open(bs
, filename
, options
, flags
, drv
, &local_err
);
1045 ret
= bdrv_open_common(bs
, NULL
, options
, flags
, drv
, &local_err
);
1048 error_propagate(errp
, local_err
);
1052 /* Check if any unknown options were used */
1053 if (options
&& (qdict_size(options
) != 0)) {
1054 const QDictEntry
*entry
= qdict_first(options
);
1055 error_setg(errp
, "Block protocol '%s' doesn't support the option '%s'",
1056 drv
->format_name
, entry
->key
);
1069 QDECREF(bs
->options
);
1076 * Opens the backing file for a BlockDriverState if not yet open
1078 * options is a QDict of options to pass to the block drivers, or NULL for an
1079 * empty set of options. The reference to the QDict is transferred to this
1080 * function (even on failure), so if the caller intends to reuse the dictionary,
1081 * it needs to use QINCREF() before calling bdrv_file_open.
1083 int bdrv_open_backing_file(BlockDriverState
*bs
, QDict
*options
, Error
**errp
)
1085 char backing_filename
[PATH_MAX
];
1086 int back_flags
, ret
;
1087 BlockDriver
*back_drv
= NULL
;
1088 Error
*local_err
= NULL
;
1090 if (bs
->backing_hd
!= NULL
) {
1095 /* NULL means an empty set of options */
1096 if (options
== NULL
) {
1097 options
= qdict_new();
1100 bs
->open_flags
&= ~BDRV_O_NO_BACKING
;
1101 if (qdict_haskey(options
, "file.filename")) {
1102 backing_filename
[0] = '\0';
1103 } else if (bs
->backing_file
[0] == '\0' && qdict_size(options
) == 0) {
1107 bdrv_get_full_backing_filename(bs
, backing_filename
,
1108 sizeof(backing_filename
));
1111 bs
->backing_hd
= bdrv_new("");
1113 if (bs
->backing_format
[0] != '\0') {
1114 back_drv
= bdrv_find_format(bs
->backing_format
);
1117 /* backing files always opened read-only */
1118 back_flags
= bs
->open_flags
& ~(BDRV_O_RDWR
| BDRV_O_SNAPSHOT
|
1119 BDRV_O_COPY_ON_READ
);
1121 ret
= bdrv_open(bs
->backing_hd
,
1122 *backing_filename
? backing_filename
: NULL
, options
,
1123 back_flags
, back_drv
, &local_err
);
1125 bdrv_unref(bs
->backing_hd
);
1126 bs
->backing_hd
= NULL
;
1127 bs
->open_flags
|= BDRV_O_NO_BACKING
;
1128 error_setg(errp
, "Could not open backing file: %s",
1129 error_get_pretty(local_err
));
1130 error_free(local_err
);
1134 if (bs
->backing_hd
->file
) {
1135 pstrcpy(bs
->backing_file
, sizeof(bs
->backing_file
),
1136 bs
->backing_hd
->file
->filename
);
1139 /* Recalculate the BlockLimits with the backing file */
1140 bdrv_refresh_limits(bs
);
1146 * Opens a disk image whose options are given as BlockdevRef in another block
1149 * If force_raw is true, bdrv_file_open() will be used, thereby preventing any
1150 * image format auto-detection. If it is false and a filename is given,
1151 * bdrv_open() will be used for auto-detection.
1153 * If allow_none is true, no image will be opened if filename is false and no
1154 * BlockdevRef is given. *pbs will remain unchanged and 0 will be returned.
1156 * bdrev_key specifies the key for the image's BlockdevRef in the options QDict.
1157 * That QDict has to be flattened; therefore, if the BlockdevRef is a QDict
1158 * itself, all options starting with "${bdref_key}." are considered part of the
1161 * The BlockdevRef will be removed from the options QDict.
1163 int bdrv_open_image(BlockDriverState
**pbs
, const char *filename
,
1164 QDict
*options
, const char *bdref_key
, int flags
,
1165 bool force_raw
, bool allow_none
, Error
**errp
)
1167 QDict
*image_options
;
1169 char *bdref_key_dot
;
1170 const char *reference
;
1172 bdref_key_dot
= g_strdup_printf("%s.", bdref_key
);
1173 qdict_extract_subqdict(options
, &image_options
, bdref_key_dot
);
1174 g_free(bdref_key_dot
);
1176 reference
= qdict_get_try_str(options
, bdref_key
);
1177 if (!filename
&& !reference
&& !qdict_size(image_options
)) {
1181 error_setg(errp
, "A block device must be specified for \"%s\"",
1188 if (filename
&& !force_raw
) {
1189 /* If a filename is given and the block driver should be detected
1190 automatically (instead of using none), use bdrv_open() in order to do
1191 that auto-detection. */
1192 BlockDriverState
*bs
;
1195 error_setg(errp
, "Cannot reference an existing block device while "
1196 "giving a filename");
1202 ret
= bdrv_open(bs
, filename
, image_options
, flags
, NULL
, errp
);
1209 ret
= bdrv_file_open(pbs
, filename
, reference
, image_options
, flags
,
1214 qdict_del(options
, bdref_key
);
1219 * Opens a disk image (raw, qcow2, vmdk, ...)
1221 * options is a QDict of options to pass to the block drivers, or NULL for an
1222 * empty set of options. The reference to the QDict belongs to the block layer
1223 * after the call (even on failure), so if the caller intends to reuse the
1224 * dictionary, it needs to use QINCREF() before calling bdrv_open.
1226 int bdrv_open(BlockDriverState
*bs
, const char *filename
, QDict
*options
,
1227 int flags
, BlockDriver
*drv
, Error
**errp
)
1230 /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
1231 char tmp_filename
[PATH_MAX
+ 1];
1232 BlockDriverState
*file
= NULL
;
1233 const char *drvname
;
1234 Error
*local_err
= NULL
;
1236 /* NULL means an empty set of options */
1237 if (options
== NULL
) {
1238 options
= qdict_new();
1241 bs
->options
= options
;
1242 options
= qdict_clone_shallow(options
);
1244 /* For snapshot=on, create a temporary qcow2 overlay */
1245 if (flags
& BDRV_O_SNAPSHOT
) {
1246 BlockDriverState
*bs1
;
1248 BlockDriver
*bdrv_qcow2
;
1249 QEMUOptionParameter
*create_options
;
1250 QDict
*snapshot_options
;
1252 /* if snapshot, we create a temporary backing file and open it
1253 instead of opening 'filename' directly */
1255 /* Get the required size from the image */
1258 ret
= bdrv_open(bs1
, filename
, options
, BDRV_O_NO_BACKING
,
1264 total_size
= bdrv_getlength(bs1
) & BDRV_SECTOR_MASK
;
1268 /* Create the temporary image */
1269 ret
= get_tmp_filename(tmp_filename
, sizeof(tmp_filename
));
1271 error_setg_errno(errp
, -ret
, "Could not get temporary filename");
1275 bdrv_qcow2
= bdrv_find_format("qcow2");
1276 create_options
= parse_option_parameters("", bdrv_qcow2
->create_options
,
1279 set_option_parameter_int(create_options
, BLOCK_OPT_SIZE
, total_size
);
1281 ret
= bdrv_create(bdrv_qcow2
, tmp_filename
, create_options
, &local_err
);
1282 free_option_parameters(create_options
);
1284 error_setg_errno(errp
, -ret
, "Could not create temporary overlay "
1285 "'%s': %s", tmp_filename
,
1286 error_get_pretty(local_err
));
1287 error_free(local_err
);
1292 /* Prepare a new options QDict for the temporary file, where user
1293 * options refer to the backing file */
1295 qdict_put(options
, "file.filename", qstring_from_str(filename
));
1298 qdict_put(options
, "driver", qstring_from_str(drv
->format_name
));
1301 snapshot_options
= qdict_new();
1302 qdict_put(snapshot_options
, "backing", options
);
1303 qdict_flatten(snapshot_options
);
1305 bs
->options
= snapshot_options
;
1306 options
= qdict_clone_shallow(bs
->options
);
1308 filename
= tmp_filename
;
1310 bs
->is_temporary
= 1;
1313 /* Open image file without format layer */
1314 if (flags
& BDRV_O_RDWR
) {
1315 flags
|= BDRV_O_ALLOW_RDWR
;
1318 ret
= bdrv_open_image(&file
, filename
, options
, "file",
1319 bdrv_open_flags(bs
, flags
| BDRV_O_UNMAP
), true, true,
1325 /* Find the right image format driver */
1326 drvname
= qdict_get_try_str(options
, "driver");
1328 drv
= bdrv_find_format(drvname
);
1329 qdict_del(options
, "driver");
1331 error_setg(errp
, "Invalid driver: '%s'", drvname
);
1333 goto unlink_and_fail
;
1339 ret
= find_image_format(file
, filename
, &drv
, &local_err
);
1341 error_setg(errp
, "Must specify either driver or file");
1343 goto unlink_and_fail
;
1348 goto unlink_and_fail
;
1351 /* Open the image */
1352 ret
= bdrv_open_common(bs
, file
, options
, flags
, drv
, &local_err
);
1354 goto unlink_and_fail
;
1357 if (file
&& (bs
->file
!= file
)) {
1362 /* If there is a backing file, use it */
1363 if ((flags
& BDRV_O_NO_BACKING
) == 0) {
1364 QDict
*backing_options
;
1366 qdict_extract_subqdict(options
, &backing_options
, "backing.");
1367 ret
= bdrv_open_backing_file(bs
, backing_options
, &local_err
);
1369 goto close_and_fail
;
1373 /* Check if any unknown options were used */
1374 if (qdict_size(options
) != 0) {
1375 const QDictEntry
*entry
= qdict_first(options
);
1376 error_setg(errp
, "Block format '%s' used by device '%s' doesn't "
1377 "support the option '%s'", drv
->format_name
, bs
->device_name
,
1381 goto close_and_fail
;
1385 if (!bdrv_key_required(bs
)) {
1386 bdrv_dev_change_media_cb(bs
, true);
1395 if (bs
->is_temporary
) {
1399 QDECREF(bs
->options
);
1402 if (error_is_set(&local_err
)) {
1403 error_propagate(errp
, local_err
);
1410 if (error_is_set(&local_err
)) {
1411 error_propagate(errp
, local_err
);
1416 typedef struct BlockReopenQueueEntry
{
1418 BDRVReopenState state
;
1419 QSIMPLEQ_ENTRY(BlockReopenQueueEntry
) entry
;
1420 } BlockReopenQueueEntry
;
1423 * Adds a BlockDriverState to a simple queue for an atomic, transactional
1424 * reopen of multiple devices.
1426 * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT
1427 * already performed, or alternatively may be NULL a new BlockReopenQueue will
1428 * be created and initialized. This newly created BlockReopenQueue should be
1429 * passed back in for subsequent calls that are intended to be of the same
1432 * bs is the BlockDriverState to add to the reopen queue.
1434 * flags contains the open flags for the associated bs
1436 * returns a pointer to bs_queue, which is either the newly allocated
1437 * bs_queue, or the existing bs_queue being used.
1440 BlockReopenQueue
*bdrv_reopen_queue(BlockReopenQueue
*bs_queue
,
1441 BlockDriverState
*bs
, int flags
)
1445 BlockReopenQueueEntry
*bs_entry
;
1446 if (bs_queue
== NULL
) {
1447 bs_queue
= g_new0(BlockReopenQueue
, 1);
1448 QSIMPLEQ_INIT(bs_queue
);
1452 bdrv_reopen_queue(bs_queue
, bs
->file
, flags
);
1455 bs_entry
= g_new0(BlockReopenQueueEntry
, 1);
1456 QSIMPLEQ_INSERT_TAIL(bs_queue
, bs_entry
, entry
);
1458 bs_entry
->state
.bs
= bs
;
1459 bs_entry
->state
.flags
= flags
;
1465 * Reopen multiple BlockDriverStates atomically & transactionally.
1467 * The queue passed in (bs_queue) must have been built up previous
1468 * via bdrv_reopen_queue().
1470 * Reopens all BDS specified in the queue, with the appropriate
1471 * flags. All devices are prepared for reopen, and failure of any
1472 * device will cause all device changes to be abandonded, and intermediate
1475 * If all devices prepare successfully, then the changes are committed
1479 int bdrv_reopen_multiple(BlockReopenQueue
*bs_queue
, Error
**errp
)
1482 BlockReopenQueueEntry
*bs_entry
, *next
;
1483 Error
*local_err
= NULL
;
1485 assert(bs_queue
!= NULL
);
1489 QSIMPLEQ_FOREACH(bs_entry
, bs_queue
, entry
) {
1490 if (bdrv_reopen_prepare(&bs_entry
->state
, bs_queue
, &local_err
)) {
1491 error_propagate(errp
, local_err
);
1494 bs_entry
->prepared
= true;
1497 /* If we reach this point, we have success and just need to apply the
1500 QSIMPLEQ_FOREACH(bs_entry
, bs_queue
, entry
) {
1501 bdrv_reopen_commit(&bs_entry
->state
);
1507 QSIMPLEQ_FOREACH_SAFE(bs_entry
, bs_queue
, entry
, next
) {
1508 if (ret
&& bs_entry
->prepared
) {
1509 bdrv_reopen_abort(&bs_entry
->state
);
1518 /* Reopen a single BlockDriverState with the specified flags. */
1519 int bdrv_reopen(BlockDriverState
*bs
, int bdrv_flags
, Error
**errp
)
1522 Error
*local_err
= NULL
;
1523 BlockReopenQueue
*queue
= bdrv_reopen_queue(NULL
, bs
, bdrv_flags
);
1525 ret
= bdrv_reopen_multiple(queue
, &local_err
);
1526 if (local_err
!= NULL
) {
1527 error_propagate(errp
, local_err
);
1534 * Prepares a BlockDriverState for reopen. All changes are staged in the
1535 * 'opaque' field of the BDRVReopenState, which is used and allocated by
1536 * the block driver layer .bdrv_reopen_prepare()
1538 * bs is the BlockDriverState to reopen
1539 * flags are the new open flags
1540 * queue is the reopen queue
1542 * Returns 0 on success, non-zero on error. On error errp will be set
1545 * On failure, bdrv_reopen_abort() will be called to clean up any data.
1546 * It is the responsibility of the caller to then call the abort() or
1547 * commit() for any other BDS that have been left in a prepare() state
1550 int bdrv_reopen_prepare(BDRVReopenState
*reopen_state
, BlockReopenQueue
*queue
,
1554 Error
*local_err
= NULL
;
1557 assert(reopen_state
!= NULL
);
1558 assert(reopen_state
->bs
->drv
!= NULL
);
1559 drv
= reopen_state
->bs
->drv
;
1561 /* if we are to stay read-only, do not allow permission change
1563 if (!(reopen_state
->bs
->open_flags
& BDRV_O_ALLOW_RDWR
) &&
1564 reopen_state
->flags
& BDRV_O_RDWR
) {
1565 error_set(errp
, QERR_DEVICE_IS_READ_ONLY
,
1566 reopen_state
->bs
->device_name
);
1571 ret
= bdrv_flush(reopen_state
->bs
);
1573 error_set(errp
, ERROR_CLASS_GENERIC_ERROR
, "Error (%s) flushing drive",
1578 if (drv
->bdrv_reopen_prepare
) {
1579 ret
= drv
->bdrv_reopen_prepare(reopen_state
, queue
, &local_err
);
1581 if (local_err
!= NULL
) {
1582 error_propagate(errp
, local_err
);
1584 error_setg(errp
, "failed while preparing to reopen image '%s'",
1585 reopen_state
->bs
->filename
);
1590 /* It is currently mandatory to have a bdrv_reopen_prepare()
1591 * handler for each supported drv. */
1592 error_set(errp
, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED
,
1593 drv
->format_name
, reopen_state
->bs
->device_name
,
1594 "reopening of file");
1606 * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and
1607 * makes them final by swapping the staging BlockDriverState contents into
1608 * the active BlockDriverState contents.
1610 void bdrv_reopen_commit(BDRVReopenState
*reopen_state
)
1614 assert(reopen_state
!= NULL
);
1615 drv
= reopen_state
->bs
->drv
;
1616 assert(drv
!= NULL
);
1618 /* If there are any driver level actions to take */
1619 if (drv
->bdrv_reopen_commit
) {
1620 drv
->bdrv_reopen_commit(reopen_state
);
1623 /* set BDS specific flags now */
1624 reopen_state
->bs
->open_flags
= reopen_state
->flags
;
1625 reopen_state
->bs
->enable_write_cache
= !!(reopen_state
->flags
&
1627 reopen_state
->bs
->read_only
= !(reopen_state
->flags
& BDRV_O_RDWR
);
1629 bdrv_refresh_limits(reopen_state
->bs
);
1633 * Abort the reopen, and delete and free the staged changes in
1636 void bdrv_reopen_abort(BDRVReopenState
*reopen_state
)
1640 assert(reopen_state
!= NULL
);
1641 drv
= reopen_state
->bs
->drv
;
1642 assert(drv
!= NULL
);
1644 if (drv
->bdrv_reopen_abort
) {
1645 drv
->bdrv_reopen_abort(reopen_state
);
1650 void bdrv_close(BlockDriverState
*bs
)
1653 block_job_cancel_sync(bs
->job
);
1655 bdrv_drain_all(); /* complete I/O */
1657 bdrv_drain_all(); /* in case flush left pending I/O */
1658 notifier_list_notify(&bs
->close_notifiers
, bs
);
1661 if (bs
->backing_hd
) {
1662 bdrv_unref(bs
->backing_hd
);
1663 bs
->backing_hd
= NULL
;
1665 bs
->drv
->bdrv_close(bs
);
1668 if (bs
->is_temporary
) {
1669 unlink(bs
->filename
);
1674 bs
->copy_on_read
= 0;
1675 bs
->backing_file
[0] = '\0';
1676 bs
->backing_format
[0] = '\0';
1677 bs
->total_sectors
= 0;
1682 bs
->zero_beyond_eof
= false;
1683 QDECREF(bs
->options
);
1686 if (bs
->file
!= NULL
) {
1687 bdrv_unref(bs
->file
);
1692 bdrv_dev_change_media_cb(bs
, false);
1694 /*throttling disk I/O limits*/
1695 if (bs
->io_limits_enabled
) {
1696 bdrv_io_limits_disable(bs
);
1700 void bdrv_close_all(void)
1702 BlockDriverState
*bs
;
1704 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
1709 /* Check if any requests are in-flight (including throttled requests) */
1710 static bool bdrv_requests_pending(BlockDriverState
*bs
)
1712 if (!QLIST_EMPTY(&bs
->tracked_requests
)) {
1715 if (!qemu_co_queue_empty(&bs
->throttled_reqs
[0])) {
1718 if (!qemu_co_queue_empty(&bs
->throttled_reqs
[1])) {
1721 if (bs
->file
&& bdrv_requests_pending(bs
->file
)) {
1724 if (bs
->backing_hd
&& bdrv_requests_pending(bs
->backing_hd
)) {
1730 static bool bdrv_requests_pending_all(void)
1732 BlockDriverState
*bs
;
1733 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
1734 if (bdrv_requests_pending(bs
)) {
1742 * Wait for pending requests to complete across all BlockDriverStates
1744 * This function does not flush data to disk, use bdrv_flush_all() for that
1745 * after calling this function.
1747 * Note that completion of an asynchronous I/O operation can trigger any
1748 * number of other I/O operations on other devices---for example a coroutine
1749 * can be arbitrarily complex and a constant flow of I/O can come until the
1750 * coroutine is complete. Because of this, it is not possible to have a
1751 * function to drain a single device's I/O queue.
1753 void bdrv_drain_all(void)
1755 /* Always run first iteration so any pending completion BHs run */
1757 BlockDriverState
*bs
;
1760 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
1761 bdrv_start_throttled_reqs(bs
);
1764 busy
= bdrv_requests_pending_all();
1765 busy
|= aio_poll(qemu_get_aio_context(), busy
);
1769 /* make a BlockDriverState anonymous by removing from bdrv_state and
1770 * graph_bdrv_state list.
1771 Also, NULL terminate the device_name to prevent double remove */
1772 void bdrv_make_anon(BlockDriverState
*bs
)
1774 if (bs
->device_name
[0] != '\0') {
1775 QTAILQ_REMOVE(&bdrv_states
, bs
, device_list
);
1777 bs
->device_name
[0] = '\0';
1778 if (bs
->node_name
[0] != '\0') {
1779 QTAILQ_REMOVE(&graph_bdrv_states
, bs
, node_list
);
1781 bs
->node_name
[0] = '\0';
1784 static void bdrv_rebind(BlockDriverState
*bs
)
1786 if (bs
->drv
&& bs
->drv
->bdrv_rebind
) {
1787 bs
->drv
->bdrv_rebind(bs
);
1791 static void bdrv_move_feature_fields(BlockDriverState
*bs_dest
,
1792 BlockDriverState
*bs_src
)
1794 /* move some fields that need to stay attached to the device */
1795 bs_dest
->open_flags
= bs_src
->open_flags
;
1798 bs_dest
->dev_ops
= bs_src
->dev_ops
;
1799 bs_dest
->dev_opaque
= bs_src
->dev_opaque
;
1800 bs_dest
->dev
= bs_src
->dev
;
1801 bs_dest
->guest_block_size
= bs_src
->guest_block_size
;
1802 bs_dest
->copy_on_read
= bs_src
->copy_on_read
;
1804 bs_dest
->enable_write_cache
= bs_src
->enable_write_cache
;
1806 /* i/o throttled req */
1807 memcpy(&bs_dest
->throttle_state
,
1808 &bs_src
->throttle_state
,
1809 sizeof(ThrottleState
));
1810 bs_dest
->throttled_reqs
[0] = bs_src
->throttled_reqs
[0];
1811 bs_dest
->throttled_reqs
[1] = bs_src
->throttled_reqs
[1];
1812 bs_dest
->io_limits_enabled
= bs_src
->io_limits_enabled
;
1815 bs_dest
->on_read_error
= bs_src
->on_read_error
;
1816 bs_dest
->on_write_error
= bs_src
->on_write_error
;
1819 bs_dest
->iostatus_enabled
= bs_src
->iostatus_enabled
;
1820 bs_dest
->iostatus
= bs_src
->iostatus
;
1823 bs_dest
->dirty_bitmaps
= bs_src
->dirty_bitmaps
;
1825 /* reference count */
1826 bs_dest
->refcnt
= bs_src
->refcnt
;
1829 bs_dest
->in_use
= bs_src
->in_use
;
1830 bs_dest
->job
= bs_src
->job
;
1832 /* keep the same entry in bdrv_states */
1833 pstrcpy(bs_dest
->device_name
, sizeof(bs_dest
->device_name
),
1834 bs_src
->device_name
);
1835 bs_dest
->device_list
= bs_src
->device_list
;
1837 /* keep the same entry in graph_bdrv_states
1838 * We do want to swap name but don't want to swap linked list entries
1840 bs_dest
->node_list
= bs_src
->node_list
;
1844 * Swap bs contents for two image chains while they are live,
1845 * while keeping required fields on the BlockDriverState that is
1846 * actually attached to a device.
1848 * This will modify the BlockDriverState fields, and swap contents
1849 * between bs_new and bs_old. Both bs_new and bs_old are modified.
1851 * bs_new is required to be anonymous.
1853 * This function does not create any image files.
1855 void bdrv_swap(BlockDriverState
*bs_new
, BlockDriverState
*bs_old
)
1857 BlockDriverState tmp
;
1859 /* bs_new must be anonymous and shouldn't have anything fancy enabled */
1860 assert(bs_new
->device_name
[0] == '\0');
1861 assert(QLIST_EMPTY(&bs_new
->dirty_bitmaps
));
1862 assert(bs_new
->job
== NULL
);
1863 assert(bs_new
->dev
== NULL
);
1864 assert(bs_new
->in_use
== 0);
1865 assert(bs_new
->io_limits_enabled
== false);
1866 assert(!throttle_have_timer(&bs_new
->throttle_state
));
1872 /* there are some fields that should not be swapped, move them back */
1873 bdrv_move_feature_fields(&tmp
, bs_old
);
1874 bdrv_move_feature_fields(bs_old
, bs_new
);
1875 bdrv_move_feature_fields(bs_new
, &tmp
);
1877 /* bs_new shouldn't be in bdrv_states even after the swap! */
1878 assert(bs_new
->device_name
[0] == '\0');
1880 /* Check a few fields that should remain attached to the device */
1881 assert(bs_new
->dev
== NULL
);
1882 assert(bs_new
->job
== NULL
);
1883 assert(bs_new
->in_use
== 0);
1884 assert(bs_new
->io_limits_enabled
== false);
1885 assert(!throttle_have_timer(&bs_new
->throttle_state
));
1887 bdrv_rebind(bs_new
);
1888 bdrv_rebind(bs_old
);
1892 * Add new bs contents at the top of an image chain while the chain is
1893 * live, while keeping required fields on the top layer.
1895 * This will modify the BlockDriverState fields, and swap contents
1896 * between bs_new and bs_top. Both bs_new and bs_top are modified.
1898 * bs_new is required to be anonymous.
1900 * This function does not create any image files.
1902 void bdrv_append(BlockDriverState
*bs_new
, BlockDriverState
*bs_top
)
1904 bdrv_swap(bs_new
, bs_top
);
1906 /* The contents of 'tmp' will become bs_top, as we are
1907 * swapping bs_new and bs_top contents. */
1908 bs_top
->backing_hd
= bs_new
;
1909 bs_top
->open_flags
&= ~BDRV_O_NO_BACKING
;
1910 pstrcpy(bs_top
->backing_file
, sizeof(bs_top
->backing_file
),
1912 pstrcpy(bs_top
->backing_format
, sizeof(bs_top
->backing_format
),
1913 bs_new
->drv
? bs_new
->drv
->format_name
: "");
1916 static void bdrv_delete(BlockDriverState
*bs
)
1920 assert(!bs
->in_use
);
1921 assert(!bs
->refcnt
);
1922 assert(QLIST_EMPTY(&bs
->dirty_bitmaps
));
1926 /* remove from list, if necessary */
1932 int bdrv_attach_dev(BlockDriverState
*bs
, void *dev
)
1933 /* TODO change to DeviceState *dev when all users are qdevified */
1939 bdrv_iostatus_reset(bs
);
1943 /* TODO qdevified devices don't use this, remove when devices are qdevified */
1944 void bdrv_attach_dev_nofail(BlockDriverState
*bs
, void *dev
)
1946 if (bdrv_attach_dev(bs
, dev
) < 0) {
1951 void bdrv_detach_dev(BlockDriverState
*bs
, void *dev
)
1952 /* TODO change to DeviceState *dev when all users are qdevified */
1954 assert(bs
->dev
== dev
);
1957 bs
->dev_opaque
= NULL
;
1958 bs
->guest_block_size
= 512;
1961 /* TODO change to return DeviceState * when all users are qdevified */
1962 void *bdrv_get_attached_dev(BlockDriverState
*bs
)
1967 void bdrv_set_dev_ops(BlockDriverState
*bs
, const BlockDevOps
*ops
,
1971 bs
->dev_opaque
= opaque
;
1974 void bdrv_emit_qmp_error_event(const BlockDriverState
*bdrv
,
1975 enum MonitorEvent ev
,
1976 BlockErrorAction action
, bool is_read
)
1979 const char *action_str
;
1982 case BDRV_ACTION_REPORT
:
1983 action_str
= "report";
1985 case BDRV_ACTION_IGNORE
:
1986 action_str
= "ignore";
1988 case BDRV_ACTION_STOP
:
1989 action_str
= "stop";
1995 data
= qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
1998 is_read
? "read" : "write");
1999 monitor_protocol_event(ev
, data
);
2001 qobject_decref(data
);
2004 static void bdrv_emit_qmp_eject_event(BlockDriverState
*bs
, bool ejected
)
2008 data
= qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }",
2009 bdrv_get_device_name(bs
), ejected
);
2010 monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED
, data
);
2012 qobject_decref(data
);
2015 static void bdrv_dev_change_media_cb(BlockDriverState
*bs
, bool load
)
2017 if (bs
->dev_ops
&& bs
->dev_ops
->change_media_cb
) {
2018 bool tray_was_closed
= !bdrv_dev_is_tray_open(bs
);
2019 bs
->dev_ops
->change_media_cb(bs
->dev_opaque
, load
);
2020 if (tray_was_closed
) {
2022 bdrv_emit_qmp_eject_event(bs
, true);
2026 bdrv_emit_qmp_eject_event(bs
, false);
2031 bool bdrv_dev_has_removable_media(BlockDriverState
*bs
)
2033 return !bs
->dev
|| (bs
->dev_ops
&& bs
->dev_ops
->change_media_cb
);
2036 void bdrv_dev_eject_request(BlockDriverState
*bs
, bool force
)
2038 if (bs
->dev_ops
&& bs
->dev_ops
->eject_request_cb
) {
2039 bs
->dev_ops
->eject_request_cb(bs
->dev_opaque
, force
);
2043 bool bdrv_dev_is_tray_open(BlockDriverState
*bs
)
2045 if (bs
->dev_ops
&& bs
->dev_ops
->is_tray_open
) {
2046 return bs
->dev_ops
->is_tray_open(bs
->dev_opaque
);
2051 static void bdrv_dev_resize_cb(BlockDriverState
*bs
)
2053 if (bs
->dev_ops
&& bs
->dev_ops
->resize_cb
) {
2054 bs
->dev_ops
->resize_cb(bs
->dev_opaque
);
2058 bool bdrv_dev_is_medium_locked(BlockDriverState
*bs
)
2060 if (bs
->dev_ops
&& bs
->dev_ops
->is_medium_locked
) {
2061 return bs
->dev_ops
->is_medium_locked(bs
->dev_opaque
);
2067 * Run consistency checks on an image
2069 * Returns 0 if the check could be completed (it doesn't mean that the image is
2070 * free of errors) or -errno when an internal error occurred. The results of the
2071 * check are stored in res.
2073 int bdrv_check(BlockDriverState
*bs
, BdrvCheckResult
*res
, BdrvCheckMode fix
)
2075 if (bs
->drv
->bdrv_check
== NULL
) {
2079 memset(res
, 0, sizeof(*res
));
2080 return bs
->drv
->bdrv_check(bs
, res
, fix
);
2083 #define COMMIT_BUF_SECTORS 2048
2085 /* commit COW file into the raw image */
2086 int bdrv_commit(BlockDriverState
*bs
)
2088 BlockDriver
*drv
= bs
->drv
;
2089 int64_t sector
, total_sectors
, length
, backing_length
;
2090 int n
, ro
, open_flags
;
2092 uint8_t *buf
= NULL
;
2093 char filename
[PATH_MAX
];
2098 if (!bs
->backing_hd
) {
2102 if (bdrv_in_use(bs
) || bdrv_in_use(bs
->backing_hd
)) {
2106 ro
= bs
->backing_hd
->read_only
;
2107 /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */
2108 pstrcpy(filename
, sizeof(filename
), bs
->backing_hd
->filename
);
2109 open_flags
= bs
->backing_hd
->open_flags
;
2112 if (bdrv_reopen(bs
->backing_hd
, open_flags
| BDRV_O_RDWR
, NULL
)) {
2117 length
= bdrv_getlength(bs
);
2123 backing_length
= bdrv_getlength(bs
->backing_hd
);
2124 if (backing_length
< 0) {
2125 ret
= backing_length
;
2129 /* If our top snapshot is larger than the backing file image,
2130 * grow the backing file image if possible. If not possible,
2131 * we must return an error */
2132 if (length
> backing_length
) {
2133 ret
= bdrv_truncate(bs
->backing_hd
, length
);
2139 total_sectors
= length
>> BDRV_SECTOR_BITS
;
2140 buf
= g_malloc(COMMIT_BUF_SECTORS
* BDRV_SECTOR_SIZE
);
2142 for (sector
= 0; sector
< total_sectors
; sector
+= n
) {
2143 ret
= bdrv_is_allocated(bs
, sector
, COMMIT_BUF_SECTORS
, &n
);
2148 ret
= bdrv_read(bs
, sector
, buf
, n
);
2153 ret
= bdrv_write(bs
->backing_hd
, sector
, buf
, n
);
2160 if (drv
->bdrv_make_empty
) {
2161 ret
= drv
->bdrv_make_empty(bs
);
2169 * Make sure all data we wrote to the backing device is actually
2172 if (bs
->backing_hd
) {
2173 bdrv_flush(bs
->backing_hd
);
2181 /* ignoring error return here */
2182 bdrv_reopen(bs
->backing_hd
, open_flags
& ~BDRV_O_RDWR
, NULL
);
2188 int bdrv_commit_all(void)
2190 BlockDriverState
*bs
;
2192 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
2193 if (bs
->drv
&& bs
->backing_hd
) {
2194 int ret
= bdrv_commit(bs
);
2204 * Remove an active request from the tracked requests list
2206 * This function should be called when a tracked request is completing.
2208 static void tracked_request_end(BdrvTrackedRequest
*req
)
2210 if (req
->serialising
) {
2211 req
->bs
->serialising_in_flight
--;
2214 QLIST_REMOVE(req
, list
);
2215 qemu_co_queue_restart_all(&req
->wait_queue
);
2219 * Add an active request to the tracked requests list
2221 static void tracked_request_begin(BdrvTrackedRequest
*req
,
2222 BlockDriverState
*bs
,
2224 unsigned int bytes
, bool is_write
)
2226 *req
= (BdrvTrackedRequest
){
2230 .is_write
= is_write
,
2231 .co
= qemu_coroutine_self(),
2232 .serialising
= false,
2233 .overlap_offset
= offset
,
2234 .overlap_bytes
= bytes
,
2237 qemu_co_queue_init(&req
->wait_queue
);
2239 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
2242 static void mark_request_serialising(BdrvTrackedRequest
*req
, size_t align
)
2244 int64_t overlap_offset
= req
->offset
& ~(align
- 1);
2245 int overlap_bytes
= ROUND_UP(req
->offset
+ req
->bytes
, align
)
2248 if (!req
->serialising
) {
2249 req
->bs
->serialising_in_flight
++;
2250 req
->serialising
= true;
2253 req
->overlap_offset
= MIN(req
->overlap_offset
, overlap_offset
);
2254 req
->overlap_bytes
= MAX(req
->overlap_bytes
, overlap_bytes
);
2258 * Round a region to cluster boundaries
2260 void bdrv_round_to_clusters(BlockDriverState
*bs
,
2261 int64_t sector_num
, int nb_sectors
,
2262 int64_t *cluster_sector_num
,
2263 int *cluster_nb_sectors
)
2265 BlockDriverInfo bdi
;
2267 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
2268 *cluster_sector_num
= sector_num
;
2269 *cluster_nb_sectors
= nb_sectors
;
2271 int64_t c
= bdi
.cluster_size
/ BDRV_SECTOR_SIZE
;
2272 *cluster_sector_num
= QEMU_ALIGN_DOWN(sector_num
, c
);
2273 *cluster_nb_sectors
= QEMU_ALIGN_UP(sector_num
- *cluster_sector_num
+
2278 static int bdrv_get_cluster_size(BlockDriverState
*bs
)
2280 BlockDriverInfo bdi
;
2283 ret
= bdrv_get_info(bs
, &bdi
);
2284 if (ret
< 0 || bdi
.cluster_size
== 0) {
2285 return bs
->request_alignment
;
2287 return bdi
.cluster_size
;
2291 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
2292 int64_t offset
, unsigned int bytes
)
2295 if (offset
>= req
->overlap_offset
+ req
->overlap_bytes
) {
2299 if (req
->overlap_offset
>= offset
+ bytes
) {
2305 static bool coroutine_fn
wait_serialising_requests(BdrvTrackedRequest
*self
)
2307 BlockDriverState
*bs
= self
->bs
;
2308 BdrvTrackedRequest
*req
;
2310 bool waited
= false;
2312 if (!bs
->serialising_in_flight
) {
2318 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
2319 if (req
== self
|| (!req
->serialising
&& !self
->serialising
)) {
2322 if (tracked_request_overlaps(req
, self
->overlap_offset
,
2323 self
->overlap_bytes
))
2325 /* Hitting this means there was a reentrant request, for
2326 * example, a block driver issuing nested requests. This must
2327 * never happen since it means deadlock.
2329 assert(qemu_coroutine_self() != req
->co
);
2331 /* If the request is already (indirectly) waiting for us, or
2332 * will wait for us as soon as it wakes up, then just go on
2333 * (instead of producing a deadlock in the former case). */
2334 if (!req
->waiting_for
) {
2335 self
->waiting_for
= req
;
2336 qemu_co_queue_wait(&req
->wait_queue
);
2337 self
->waiting_for
= NULL
;
2352 * -EINVAL - backing format specified, but no file
2353 * -ENOSPC - can't update the backing file because no space is left in the
2355 * -ENOTSUP - format driver doesn't support changing the backing file
2357 int bdrv_change_backing_file(BlockDriverState
*bs
,
2358 const char *backing_file
, const char *backing_fmt
)
2360 BlockDriver
*drv
= bs
->drv
;
2363 /* Backing file format doesn't make sense without a backing file */
2364 if (backing_fmt
&& !backing_file
) {
2368 if (drv
->bdrv_change_backing_file
!= NULL
) {
2369 ret
= drv
->bdrv_change_backing_file(bs
, backing_file
, backing_fmt
);
2375 pstrcpy(bs
->backing_file
, sizeof(bs
->backing_file
), backing_file
?: "");
2376 pstrcpy(bs
->backing_format
, sizeof(bs
->backing_format
), backing_fmt
?: "");
2382 * Finds the image layer in the chain that has 'bs' as its backing file.
2384 * active is the current topmost image.
2386 * Returns NULL if bs is not found in active's image chain,
2387 * or if active == bs.
2389 BlockDriverState
*bdrv_find_overlay(BlockDriverState
*active
,
2390 BlockDriverState
*bs
)
2392 BlockDriverState
*overlay
= NULL
;
2393 BlockDriverState
*intermediate
;
2395 assert(active
!= NULL
);
2398 /* if bs is the same as active, then by definition it has no overlay
2404 intermediate
= active
;
2405 while (intermediate
->backing_hd
) {
2406 if (intermediate
->backing_hd
== bs
) {
2407 overlay
= intermediate
;
2410 intermediate
= intermediate
->backing_hd
;
2416 typedef struct BlkIntermediateStates
{
2417 BlockDriverState
*bs
;
2418 QSIMPLEQ_ENTRY(BlkIntermediateStates
) entry
;
2419 } BlkIntermediateStates
;
2423 * Drops images above 'base' up to and including 'top', and sets the image
2424 * above 'top' to have base as its backing file.
2426 * Requires that the overlay to 'top' is opened r/w, so that the backing file
2427 * information in 'bs' can be properly updated.
2429 * E.g., this will convert the following chain:
2430 * bottom <- base <- intermediate <- top <- active
2434 * bottom <- base <- active
2436 * It is allowed for bottom==base, in which case it converts:
2438 * base <- intermediate <- top <- active
2445 * if active == top, that is considered an error
2448 int bdrv_drop_intermediate(BlockDriverState
*active
, BlockDriverState
*top
,
2449 BlockDriverState
*base
)
2451 BlockDriverState
*intermediate
;
2452 BlockDriverState
*base_bs
= NULL
;
2453 BlockDriverState
*new_top_bs
= NULL
;
2454 BlkIntermediateStates
*intermediate_state
, *next
;
2457 QSIMPLEQ_HEAD(states_to_delete
, BlkIntermediateStates
) states_to_delete
;
2458 QSIMPLEQ_INIT(&states_to_delete
);
2460 if (!top
->drv
|| !base
->drv
) {
2464 new_top_bs
= bdrv_find_overlay(active
, top
);
2466 if (new_top_bs
== NULL
) {
2467 /* we could not find the image above 'top', this is an error */
2471 /* special case of new_top_bs->backing_hd already pointing to base - nothing
2472 * to do, no intermediate images */
2473 if (new_top_bs
->backing_hd
== base
) {
2480 /* now we will go down through the list, and add each BDS we find
2481 * into our deletion queue, until we hit the 'base'
2483 while (intermediate
) {
2484 intermediate_state
= g_malloc0(sizeof(BlkIntermediateStates
));
2485 intermediate_state
->bs
= intermediate
;
2486 QSIMPLEQ_INSERT_TAIL(&states_to_delete
, intermediate_state
, entry
);
2488 if (intermediate
->backing_hd
== base
) {
2489 base_bs
= intermediate
->backing_hd
;
2492 intermediate
= intermediate
->backing_hd
;
2494 if (base_bs
== NULL
) {
2495 /* something went wrong, we did not end at the base. safely
2496 * unravel everything, and exit with error */
2500 /* success - we can delete the intermediate states, and link top->base */
2501 ret
= bdrv_change_backing_file(new_top_bs
, base_bs
->filename
,
2502 base_bs
->drv
? base_bs
->drv
->format_name
: "");
2506 new_top_bs
->backing_hd
= base_bs
;
2508 bdrv_refresh_limits(new_top_bs
);
2510 QSIMPLEQ_FOREACH_SAFE(intermediate_state
, &states_to_delete
, entry
, next
) {
2511 /* so that bdrv_close() does not recursively close the chain */
2512 intermediate_state
->bs
->backing_hd
= NULL
;
2513 bdrv_unref(intermediate_state
->bs
);
2518 QSIMPLEQ_FOREACH_SAFE(intermediate_state
, &states_to_delete
, entry
, next
) {
2519 g_free(intermediate_state
);
2525 static int bdrv_check_byte_request(BlockDriverState
*bs
, int64_t offset
,
2530 if (!bdrv_is_inserted(bs
))
2536 len
= bdrv_getlength(bs
);
2541 if ((offset
> len
) || (len
- offset
< size
))
2547 static int bdrv_check_request(BlockDriverState
*bs
, int64_t sector_num
,
2550 return bdrv_check_byte_request(bs
, sector_num
* BDRV_SECTOR_SIZE
,
2551 nb_sectors
* BDRV_SECTOR_SIZE
);
2554 typedef struct RwCo
{
2555 BlockDriverState
*bs
;
2560 BdrvRequestFlags flags
;
2563 static void coroutine_fn
bdrv_rw_co_entry(void *opaque
)
2565 RwCo
*rwco
= opaque
;
2567 if (!rwco
->is_write
) {
2568 rwco
->ret
= bdrv_co_do_preadv(rwco
->bs
, rwco
->offset
,
2569 rwco
->qiov
->size
, rwco
->qiov
,
2572 rwco
->ret
= bdrv_co_do_pwritev(rwco
->bs
, rwco
->offset
,
2573 rwco
->qiov
->size
, rwco
->qiov
,
2579 * Process a vectored synchronous request using coroutines
2581 static int bdrv_prwv_co(BlockDriverState
*bs
, int64_t offset
,
2582 QEMUIOVector
*qiov
, bool is_write
,
2583 BdrvRequestFlags flags
)
2590 .is_write
= is_write
,
2596 * In sync call context, when the vcpu is blocked, this throttling timer
2597 * will not fire; so the I/O throttling function has to be disabled here
2598 * if it has been enabled.
2600 if (bs
->io_limits_enabled
) {
2601 fprintf(stderr
, "Disabling I/O throttling on '%s' due "
2602 "to synchronous I/O.\n", bdrv_get_device_name(bs
));
2603 bdrv_io_limits_disable(bs
);
2606 if (qemu_in_coroutine()) {
2607 /* Fast-path if already in coroutine context */
2608 bdrv_rw_co_entry(&rwco
);
2610 co
= qemu_coroutine_create(bdrv_rw_co_entry
);
2611 qemu_coroutine_enter(co
, &rwco
);
2612 while (rwco
.ret
== NOT_DONE
) {
2620 * Process a synchronous request using coroutines
2622 static int bdrv_rw_co(BlockDriverState
*bs
, int64_t sector_num
, uint8_t *buf
,
2623 int nb_sectors
, bool is_write
, BdrvRequestFlags flags
)
2626 struct iovec iov
= {
2627 .iov_base
= (void *)buf
,
2628 .iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
,
2631 qemu_iovec_init_external(&qiov
, &iov
, 1);
2632 return bdrv_prwv_co(bs
, sector_num
<< BDRV_SECTOR_BITS
,
2633 &qiov
, is_write
, flags
);
2636 /* return < 0 if error. See bdrv_write() for the return codes */
2637 int bdrv_read(BlockDriverState
*bs
, int64_t sector_num
,
2638 uint8_t *buf
, int nb_sectors
)
2640 return bdrv_rw_co(bs
, sector_num
, buf
, nb_sectors
, false, 0);
2643 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
2644 int bdrv_read_unthrottled(BlockDriverState
*bs
, int64_t sector_num
,
2645 uint8_t *buf
, int nb_sectors
)
2650 enabled
= bs
->io_limits_enabled
;
2651 bs
->io_limits_enabled
= false;
2652 ret
= bdrv_read(bs
, sector_num
, buf
, nb_sectors
);
2653 bs
->io_limits_enabled
= enabled
;
2657 /* Return < 0 if error. Important errors are:
2658 -EIO generic I/O error (may happen for all errors)
2659 -ENOMEDIUM No media inserted.
2660 -EINVAL Invalid sector number or nb_sectors
2661 -EACCES Trying to write a read-only device
2663 int bdrv_write(BlockDriverState
*bs
, int64_t sector_num
,
2664 const uint8_t *buf
, int nb_sectors
)
2666 return bdrv_rw_co(bs
, sector_num
, (uint8_t *)buf
, nb_sectors
, true, 0);
2669 int bdrv_write_zeroes(BlockDriverState
*bs
, int64_t sector_num
,
2670 int nb_sectors
, BdrvRequestFlags flags
)
2672 return bdrv_rw_co(bs
, sector_num
, NULL
, nb_sectors
, true,
2673 BDRV_REQ_ZERO_WRITE
| flags
);
2677 * Completely zero out a block device with the help of bdrv_write_zeroes.
2678 * The operation is sped up by checking the block status and only writing
2679 * zeroes to the device if they currently do not return zeroes. Optional
2680 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
2682 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
2684 int bdrv_make_zero(BlockDriverState
*bs
, BdrvRequestFlags flags
)
2686 int64_t target_size
= bdrv_getlength(bs
) / BDRV_SECTOR_SIZE
;
2687 int64_t ret
, nb_sectors
, sector_num
= 0;
2691 nb_sectors
= target_size
- sector_num
;
2692 if (nb_sectors
<= 0) {
2695 if (nb_sectors
> INT_MAX
) {
2696 nb_sectors
= INT_MAX
;
2698 ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, &n
);
2700 error_report("error getting block status at sector %" PRId64
": %s",
2701 sector_num
, strerror(-ret
));
2704 if (ret
& BDRV_BLOCK_ZERO
) {
2708 ret
= bdrv_write_zeroes(bs
, sector_num
, n
, flags
);
2710 error_report("error writing zeroes at sector %" PRId64
": %s",
2711 sector_num
, strerror(-ret
));
2718 int bdrv_pread(BlockDriverState
*bs
, int64_t offset
, void *buf
, int bytes
)
2721 struct iovec iov
= {
2722 .iov_base
= (void *)buf
,
2731 qemu_iovec_init_external(&qiov
, &iov
, 1);
2732 ret
= bdrv_prwv_co(bs
, offset
, &qiov
, false, 0);
2740 int bdrv_pwritev(BlockDriverState
*bs
, int64_t offset
, QEMUIOVector
*qiov
)
2744 ret
= bdrv_prwv_co(bs
, offset
, qiov
, true, 0);
2752 int bdrv_pwrite(BlockDriverState
*bs
, int64_t offset
,
2753 const void *buf
, int bytes
)
2756 struct iovec iov
= {
2757 .iov_base
= (void *) buf
,
2765 qemu_iovec_init_external(&qiov
, &iov
, 1);
2766 return bdrv_pwritev(bs
, offset
, &qiov
);
2770 * Writes to the file and ensures that no writes are reordered across this
2771 * request (acts as a barrier)
2773 * Returns 0 on success, -errno in error cases.
2775 int bdrv_pwrite_sync(BlockDriverState
*bs
, int64_t offset
,
2776 const void *buf
, int count
)
2780 ret
= bdrv_pwrite(bs
, offset
, buf
, count
);
2785 /* No flush needed for cache modes that already do it */
2786 if (bs
->enable_write_cache
) {
2793 static int coroutine_fn
bdrv_co_do_copy_on_readv(BlockDriverState
*bs
,
2794 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
2796 /* Perform I/O through a temporary buffer so that users who scribble over
2797 * their read buffer while the operation is in progress do not end up
2798 * modifying the image file. This is critical for zero-copy guest I/O
2799 * where anything might happen inside guest memory.
2801 void *bounce_buffer
;
2803 BlockDriver
*drv
= bs
->drv
;
2805 QEMUIOVector bounce_qiov
;
2806 int64_t cluster_sector_num
;
2807 int cluster_nb_sectors
;
2811 /* Cover entire cluster so no additional backing file I/O is required when
2812 * allocating cluster in the image file.
2814 bdrv_round_to_clusters(bs
, sector_num
, nb_sectors
,
2815 &cluster_sector_num
, &cluster_nb_sectors
);
2817 trace_bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
,
2818 cluster_sector_num
, cluster_nb_sectors
);
2820 iov
.iov_len
= cluster_nb_sectors
* BDRV_SECTOR_SIZE
;
2821 iov
.iov_base
= bounce_buffer
= qemu_blockalign(bs
, iov
.iov_len
);
2822 qemu_iovec_init_external(&bounce_qiov
, &iov
, 1);
2824 ret
= drv
->bdrv_co_readv(bs
, cluster_sector_num
, cluster_nb_sectors
,
2830 if (drv
->bdrv_co_write_zeroes
&&
2831 buffer_is_zero(bounce_buffer
, iov
.iov_len
)) {
2832 ret
= bdrv_co_do_write_zeroes(bs
, cluster_sector_num
,
2833 cluster_nb_sectors
, 0);
2835 /* This does not change the data on the disk, it is not necessary
2836 * to flush even in cache=writethrough mode.
2838 ret
= drv
->bdrv_co_writev(bs
, cluster_sector_num
, cluster_nb_sectors
,
2843 /* It might be okay to ignore write errors for guest requests. If this
2844 * is a deliberate copy-on-read then we don't want to ignore the error.
2845 * Simply report it in all cases.
2850 skip_bytes
= (sector_num
- cluster_sector_num
) * BDRV_SECTOR_SIZE
;
2851 qemu_iovec_from_buf(qiov
, 0, bounce_buffer
+ skip_bytes
,
2852 nb_sectors
* BDRV_SECTOR_SIZE
);
2855 qemu_vfree(bounce_buffer
);
2860 * Forwards an already correctly aligned request to the BlockDriver. This
2861 * handles copy on read and zeroing after EOF; any other features must be
2862 * implemented by the caller.
2864 static int coroutine_fn
bdrv_aligned_preadv(BlockDriverState
*bs
,
2865 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
2866 int64_t align
, QEMUIOVector
*qiov
, int flags
)
2868 BlockDriver
*drv
= bs
->drv
;
2871 int64_t sector_num
= offset
>> BDRV_SECTOR_BITS
;
2872 unsigned int nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
2874 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
2875 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
2877 /* Handle Copy on Read and associated serialisation */
2878 if (flags
& BDRV_REQ_COPY_ON_READ
) {
2879 /* If we touch the same cluster it counts as an overlap. This
2880 * guarantees that allocating writes will be serialized and not race
2881 * with each other for the same cluster. For example, in copy-on-read
2882 * it ensures that the CoR read and write operations are atomic and
2883 * guest writes cannot interleave between them. */
2884 mark_request_serialising(req
, bdrv_get_cluster_size(bs
));
2887 wait_serialising_requests(req
);
2889 if (flags
& BDRV_REQ_COPY_ON_READ
) {
2892 ret
= bdrv_is_allocated(bs
, sector_num
, nb_sectors
, &pnum
);
2897 if (!ret
|| pnum
!= nb_sectors
) {
2898 ret
= bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
, qiov
);
2903 /* Forward the request to the BlockDriver */
2904 if (!(bs
->zero_beyond_eof
&& bs
->growable
)) {
2905 ret
= drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
2907 /* Read zeros after EOF of growable BDSes */
2908 int64_t len
, total_sectors
, max_nb_sectors
;
2910 len
= bdrv_getlength(bs
);
2916 total_sectors
= DIV_ROUND_UP(len
, BDRV_SECTOR_SIZE
);
2917 max_nb_sectors
= MAX(0, ROUND_UP(total_sectors
- sector_num
,
2918 align
>> BDRV_SECTOR_BITS
));
2919 if (max_nb_sectors
> 0) {
2920 ret
= drv
->bdrv_co_readv(bs
, sector_num
,
2921 MIN(nb_sectors
, max_nb_sectors
), qiov
);
2926 /* Reading beyond end of file is supposed to produce zeroes */
2927 if (ret
== 0 && total_sectors
< sector_num
+ nb_sectors
) {
2928 uint64_t offset
= MAX(0, total_sectors
- sector_num
);
2929 uint64_t bytes
= (sector_num
+ nb_sectors
- offset
) *
2931 qemu_iovec_memset(qiov
, offset
* BDRV_SECTOR_SIZE
, 0, bytes
);
2940 * Handle a read request in coroutine context
2942 static int coroutine_fn
bdrv_co_do_preadv(BlockDriverState
*bs
,
2943 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
2944 BdrvRequestFlags flags
)
2946 BlockDriver
*drv
= bs
->drv
;
2947 BdrvTrackedRequest req
;
2949 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
2950 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
2951 uint8_t *head_buf
= NULL
;
2952 uint8_t *tail_buf
= NULL
;
2953 QEMUIOVector local_qiov
;
2954 bool use_local_qiov
= false;
2960 if (bdrv_check_byte_request(bs
, offset
, bytes
)) {
2964 if (bs
->copy_on_read
) {
2965 flags
|= BDRV_REQ_COPY_ON_READ
;
2968 /* throttling disk I/O */
2969 if (bs
->io_limits_enabled
) {
2970 bdrv_io_limits_intercept(bs
, bytes
, false);
2973 /* Align read if necessary by padding qiov */
2974 if (offset
& (align
- 1)) {
2975 head_buf
= qemu_blockalign(bs
, align
);
2976 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
2977 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
2978 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
2979 use_local_qiov
= true;
2981 bytes
+= offset
& (align
- 1);
2982 offset
= offset
& ~(align
- 1);
2985 if ((offset
+ bytes
) & (align
- 1)) {
2986 if (!use_local_qiov
) {
2987 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
2988 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
2989 use_local_qiov
= true;
2991 tail_buf
= qemu_blockalign(bs
, align
);
2992 qemu_iovec_add(&local_qiov
, tail_buf
,
2993 align
- ((offset
+ bytes
) & (align
- 1)));
2995 bytes
= ROUND_UP(bytes
, align
);
2998 tracked_request_begin(&req
, bs
, offset
, bytes
, false);
2999 ret
= bdrv_aligned_preadv(bs
, &req
, offset
, bytes
, align
,
3000 use_local_qiov
? &local_qiov
: qiov
,
3002 tracked_request_end(&req
);
3004 if (use_local_qiov
) {
3005 qemu_iovec_destroy(&local_qiov
);
3006 qemu_vfree(head_buf
);
3007 qemu_vfree(tail_buf
);
3013 static int coroutine_fn
bdrv_co_do_readv(BlockDriverState
*bs
,
3014 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
3015 BdrvRequestFlags flags
)
3017 if (nb_sectors
< 0 || nb_sectors
> (UINT_MAX
>> BDRV_SECTOR_BITS
)) {
3021 return bdrv_co_do_preadv(bs
, sector_num
<< BDRV_SECTOR_BITS
,
3022 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
3025 int coroutine_fn
bdrv_co_readv(BlockDriverState
*bs
, int64_t sector_num
,
3026 int nb_sectors
, QEMUIOVector
*qiov
)
3028 trace_bdrv_co_readv(bs
, sector_num
, nb_sectors
);
3030 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
, 0);
3033 int coroutine_fn
bdrv_co_copy_on_readv(BlockDriverState
*bs
,
3034 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
3036 trace_bdrv_co_copy_on_readv(bs
, sector_num
, nb_sectors
);
3038 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
,
3039 BDRV_REQ_COPY_ON_READ
);
3042 /* if no limit is specified in the BlockLimits use a default
3043 * of 32768 512-byte sectors (16 MiB) per request.
3045 #define MAX_WRITE_ZEROES_DEFAULT 32768
3047 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
3048 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
)
3050 BlockDriver
*drv
= bs
->drv
;
3052 struct iovec iov
= {0};
3055 int max_write_zeroes
= bs
->bl
.max_write_zeroes
?
3056 bs
->bl
.max_write_zeroes
: MAX_WRITE_ZEROES_DEFAULT
;
3058 while (nb_sectors
> 0 && !ret
) {
3059 int num
= nb_sectors
;
3061 /* Align request. Block drivers can expect the "bulk" of the request
3064 if (bs
->bl
.write_zeroes_alignment
3065 && num
> bs
->bl
.write_zeroes_alignment
) {
3066 if (sector_num
% bs
->bl
.write_zeroes_alignment
!= 0) {
3067 /* Make a small request up to the first aligned sector. */
3068 num
= bs
->bl
.write_zeroes_alignment
;
3069 num
-= sector_num
% bs
->bl
.write_zeroes_alignment
;
3070 } else if ((sector_num
+ num
) % bs
->bl
.write_zeroes_alignment
!= 0) {
3071 /* Shorten the request to the last aligned sector. num cannot
3072 * underflow because num > bs->bl.write_zeroes_alignment.
3074 num
-= (sector_num
+ num
) % bs
->bl
.write_zeroes_alignment
;
3078 /* limit request size */
3079 if (num
> max_write_zeroes
) {
3080 num
= max_write_zeroes
;
3084 /* First try the efficient write zeroes operation */
3085 if (drv
->bdrv_co_write_zeroes
) {
3086 ret
= drv
->bdrv_co_write_zeroes(bs
, sector_num
, num
, flags
);
3089 if (ret
== -ENOTSUP
) {
3090 /* Fall back to bounce buffer if write zeroes is unsupported */
3091 iov
.iov_len
= num
* BDRV_SECTOR_SIZE
;
3092 if (iov
.iov_base
== NULL
) {
3093 iov
.iov_base
= qemu_blockalign(bs
, num
* BDRV_SECTOR_SIZE
);
3094 memset(iov
.iov_base
, 0, num
* BDRV_SECTOR_SIZE
);
3096 qemu_iovec_init_external(&qiov
, &iov
, 1);
3098 ret
= drv
->bdrv_co_writev(bs
, sector_num
, num
, &qiov
);
3100 /* Keep bounce buffer around if it is big enough for all
3101 * all future requests.
3103 if (num
< max_write_zeroes
) {
3104 qemu_vfree(iov
.iov_base
);
3105 iov
.iov_base
= NULL
;
3113 qemu_vfree(iov
.iov_base
);
3118 * Forwards an already correctly aligned write request to the BlockDriver.
3120 static int coroutine_fn
bdrv_aligned_pwritev(BlockDriverState
*bs
,
3121 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
3122 QEMUIOVector
*qiov
, int flags
)
3124 BlockDriver
*drv
= bs
->drv
;
3128 int64_t sector_num
= offset
>> BDRV_SECTOR_BITS
;
3129 unsigned int nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
3131 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
3132 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
3134 waited
= wait_serialising_requests(req
);
3135 assert(!waited
|| !req
->serialising
);
3137 ret
= notifier_with_return_list_notify(&bs
->before_write_notifiers
, req
);
3140 /* Do nothing, write notifier decided to fail this request */
3141 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
3142 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_ZERO
);
3143 ret
= bdrv_co_do_write_zeroes(bs
, sector_num
, nb_sectors
, flags
);
3145 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV
);
3146 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
);
3148 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_DONE
);
3150 if (ret
== 0 && !bs
->enable_write_cache
) {
3151 ret
= bdrv_co_flush(bs
);
3154 bdrv_set_dirty(bs
, sector_num
, nb_sectors
);
3156 if (bs
->wr_highest_sector
< sector_num
+ nb_sectors
- 1) {
3157 bs
->wr_highest_sector
= sector_num
+ nb_sectors
- 1;
3159 if (bs
->growable
&& ret
>= 0) {
3160 bs
->total_sectors
= MAX(bs
->total_sectors
, sector_num
+ nb_sectors
);
3167 * Handle a write request in coroutine context
3169 static int coroutine_fn
bdrv_co_do_pwritev(BlockDriverState
*bs
,
3170 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
3171 BdrvRequestFlags flags
)
3173 BdrvTrackedRequest req
;
3174 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3175 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
3176 uint8_t *head_buf
= NULL
;
3177 uint8_t *tail_buf
= NULL
;
3178 QEMUIOVector local_qiov
;
3179 bool use_local_qiov
= false;
3185 if (bs
->read_only
) {
3188 if (bdrv_check_byte_request(bs
, offset
, bytes
)) {
3192 /* throttling disk I/O */
3193 if (bs
->io_limits_enabled
) {
3194 bdrv_io_limits_intercept(bs
, bytes
, true);
3198 * Align write if necessary by performing a read-modify-write cycle.
3199 * Pad qiov with the read parts and be sure to have a tracked request not
3200 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
3202 tracked_request_begin(&req
, bs
, offset
, bytes
, true);
3204 if (offset
& (align
- 1)) {
3205 QEMUIOVector head_qiov
;
3206 struct iovec head_iov
;
3208 mark_request_serialising(&req
, align
);
3209 wait_serialising_requests(&req
);
3211 head_buf
= qemu_blockalign(bs
, align
);
3212 head_iov
= (struct iovec
) {
3213 .iov_base
= head_buf
,
3216 qemu_iovec_init_external(&head_qiov
, &head_iov
, 1);
3218 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
3219 ret
= bdrv_aligned_preadv(bs
, &req
, offset
& ~(align
- 1), align
,
3220 align
, &head_qiov
, 0);
3224 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
3226 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
3227 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
3228 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
3229 use_local_qiov
= true;
3231 bytes
+= offset
& (align
- 1);
3232 offset
= offset
& ~(align
- 1);
3235 if ((offset
+ bytes
) & (align
- 1)) {
3236 QEMUIOVector tail_qiov
;
3237 struct iovec tail_iov
;
3241 mark_request_serialising(&req
, align
);
3242 waited
= wait_serialising_requests(&req
);
3243 assert(!waited
|| !use_local_qiov
);
3245 tail_buf
= qemu_blockalign(bs
, align
);
3246 tail_iov
= (struct iovec
) {
3247 .iov_base
= tail_buf
,
3250 qemu_iovec_init_external(&tail_qiov
, &tail_iov
, 1);
3252 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
3253 ret
= bdrv_aligned_preadv(bs
, &req
, (offset
+ bytes
) & ~(align
- 1), align
,
3254 align
, &tail_qiov
, 0);
3258 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
3260 if (!use_local_qiov
) {
3261 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
3262 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
3263 use_local_qiov
= true;
3266 tail_bytes
= (offset
+ bytes
) & (align
- 1);
3267 qemu_iovec_add(&local_qiov
, tail_buf
+ tail_bytes
, align
- tail_bytes
);
3269 bytes
= ROUND_UP(bytes
, align
);
3272 ret
= bdrv_aligned_pwritev(bs
, &req
, offset
, bytes
,
3273 use_local_qiov
? &local_qiov
: qiov
,
3277 tracked_request_end(&req
);
3279 if (use_local_qiov
) {
3280 qemu_iovec_destroy(&local_qiov
);
3281 qemu_vfree(head_buf
);
3282 qemu_vfree(tail_buf
);
3288 static int coroutine_fn
bdrv_co_do_writev(BlockDriverState
*bs
,
3289 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
3290 BdrvRequestFlags flags
)
3292 if (nb_sectors
< 0 || nb_sectors
> (INT_MAX
>> BDRV_SECTOR_BITS
)) {
3296 return bdrv_co_do_pwritev(bs
, sector_num
<< BDRV_SECTOR_BITS
,
3297 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
3300 int coroutine_fn
bdrv_co_writev(BlockDriverState
*bs
, int64_t sector_num
,
3301 int nb_sectors
, QEMUIOVector
*qiov
)
3303 trace_bdrv_co_writev(bs
, sector_num
, nb_sectors
);
3305 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, qiov
, 0);
3308 int coroutine_fn
bdrv_co_write_zeroes(BlockDriverState
*bs
,
3309 int64_t sector_num
, int nb_sectors
,
3310 BdrvRequestFlags flags
)
3312 trace_bdrv_co_write_zeroes(bs
, sector_num
, nb_sectors
, flags
);
3314 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
3315 flags
&= ~BDRV_REQ_MAY_UNMAP
;
3318 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, NULL
,
3319 BDRV_REQ_ZERO_WRITE
| flags
);
3323 * Truncate file to 'offset' bytes (needed only for file protocols)
3325 int bdrv_truncate(BlockDriverState
*bs
, int64_t offset
)
3327 BlockDriver
*drv
= bs
->drv
;
3331 if (!drv
->bdrv_truncate
)
3335 if (bdrv_in_use(bs
))
3337 ret
= drv
->bdrv_truncate(bs
, offset
);
3339 ret
= refresh_total_sectors(bs
, offset
>> BDRV_SECTOR_BITS
);
3340 bdrv_dev_resize_cb(bs
);
3346 * Length of a allocated file in bytes. Sparse files are counted by actual
3347 * allocated space. Return < 0 if error or unknown.
3349 int64_t bdrv_get_allocated_file_size(BlockDriverState
*bs
)
3351 BlockDriver
*drv
= bs
->drv
;
3355 if (drv
->bdrv_get_allocated_file_size
) {
3356 return drv
->bdrv_get_allocated_file_size(bs
);
3359 return bdrv_get_allocated_file_size(bs
->file
);
3365 * Length of a file in bytes. Return < 0 if error or unknown.
3367 int64_t bdrv_getlength(BlockDriverState
*bs
)
3369 BlockDriver
*drv
= bs
->drv
;
3373 if (drv
->has_variable_length
) {
3374 int ret
= refresh_total_sectors(bs
, bs
->total_sectors
);
3379 return bs
->total_sectors
* BDRV_SECTOR_SIZE
;
3382 /* return 0 as number of sectors if no device present or error */
3383 void bdrv_get_geometry(BlockDriverState
*bs
, uint64_t *nb_sectors_ptr
)
3386 length
= bdrv_getlength(bs
);
3390 length
= length
>> BDRV_SECTOR_BITS
;
3391 *nb_sectors_ptr
= length
;
3394 void bdrv_set_on_error(BlockDriverState
*bs
, BlockdevOnError on_read_error
,
3395 BlockdevOnError on_write_error
)
3397 bs
->on_read_error
= on_read_error
;
3398 bs
->on_write_error
= on_write_error
;
3401 BlockdevOnError
bdrv_get_on_error(BlockDriverState
*bs
, bool is_read
)
3403 return is_read
? bs
->on_read_error
: bs
->on_write_error
;
3406 BlockErrorAction
bdrv_get_error_action(BlockDriverState
*bs
, bool is_read
, int error
)
3408 BlockdevOnError on_err
= is_read
? bs
->on_read_error
: bs
->on_write_error
;
3411 case BLOCKDEV_ON_ERROR_ENOSPC
:
3412 return (error
== ENOSPC
) ? BDRV_ACTION_STOP
: BDRV_ACTION_REPORT
;
3413 case BLOCKDEV_ON_ERROR_STOP
:
3414 return BDRV_ACTION_STOP
;
3415 case BLOCKDEV_ON_ERROR_REPORT
:
3416 return BDRV_ACTION_REPORT
;
3417 case BLOCKDEV_ON_ERROR_IGNORE
:
3418 return BDRV_ACTION_IGNORE
;
3424 /* This is done by device models because, while the block layer knows
3425 * about the error, it does not know whether an operation comes from
3426 * the device or the block layer (from a job, for example).
3428 void bdrv_error_action(BlockDriverState
*bs
, BlockErrorAction action
,
3429 bool is_read
, int error
)
3432 bdrv_emit_qmp_error_event(bs
, QEVENT_BLOCK_IO_ERROR
, action
, is_read
);
3433 if (action
== BDRV_ACTION_STOP
) {
3434 vm_stop(RUN_STATE_IO_ERROR
);
3435 bdrv_iostatus_set_err(bs
, error
);
3439 int bdrv_is_read_only(BlockDriverState
*bs
)
3441 return bs
->read_only
;
3444 int bdrv_is_sg(BlockDriverState
*bs
)
3449 int bdrv_enable_write_cache(BlockDriverState
*bs
)
3451 return bs
->enable_write_cache
;
3454 void bdrv_set_enable_write_cache(BlockDriverState
*bs
, bool wce
)
3456 bs
->enable_write_cache
= wce
;
3458 /* so a reopen() will preserve wce */
3460 bs
->open_flags
|= BDRV_O_CACHE_WB
;
3462 bs
->open_flags
&= ~BDRV_O_CACHE_WB
;
3466 int bdrv_is_encrypted(BlockDriverState
*bs
)
3468 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
)
3470 return bs
->encrypted
;
3473 int bdrv_key_required(BlockDriverState
*bs
)
3475 BlockDriverState
*backing_hd
= bs
->backing_hd
;
3477 if (backing_hd
&& backing_hd
->encrypted
&& !backing_hd
->valid_key
)
3479 return (bs
->encrypted
&& !bs
->valid_key
);
3482 int bdrv_set_key(BlockDriverState
*bs
, const char *key
)
3485 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
) {
3486 ret
= bdrv_set_key(bs
->backing_hd
, key
);
3492 if (!bs
->encrypted
) {
3494 } else if (!bs
->drv
|| !bs
->drv
->bdrv_set_key
) {
3497 ret
= bs
->drv
->bdrv_set_key(bs
, key
);
3500 } else if (!bs
->valid_key
) {
3502 /* call the change callback now, we skipped it on open */
3503 bdrv_dev_change_media_cb(bs
, true);
3508 const char *bdrv_get_format_name(BlockDriverState
*bs
)
3510 return bs
->drv
? bs
->drv
->format_name
: NULL
;
3513 void bdrv_iterate_format(void (*it
)(void *opaque
, const char *name
),
3518 QLIST_FOREACH(drv
, &bdrv_drivers
, list
) {
3519 it(opaque
, drv
->format_name
);
3523 /* This function is to find block backend bs */
3524 BlockDriverState
*bdrv_find(const char *name
)
3526 BlockDriverState
*bs
;
3528 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
3529 if (!strcmp(name
, bs
->device_name
)) {
3536 /* This function is to find a node in the bs graph */
3537 BlockDriverState
*bdrv_find_node(const char *node_name
)
3539 BlockDriverState
*bs
;
3543 QTAILQ_FOREACH(bs
, &graph_bdrv_states
, node_list
) {
3544 if (!strcmp(node_name
, bs
->node_name
)) {
3551 /* Put this QMP function here so it can access the static graph_bdrv_states. */
3552 BlockDeviceInfoList
*bdrv_named_nodes_list(void)
3554 BlockDeviceInfoList
*list
, *entry
;
3555 BlockDriverState
*bs
;
3558 QTAILQ_FOREACH(bs
, &graph_bdrv_states
, node_list
) {
3559 entry
= g_malloc0(sizeof(*entry
));
3560 entry
->value
= bdrv_block_device_info(bs
);
3568 BlockDriverState
*bdrv_lookup_bs(const char *device
,
3569 const char *node_name
,
3572 BlockDriverState
*bs
= NULL
;
3574 if ((!device
&& !node_name
) || (device
&& node_name
)) {
3575 error_setg(errp
, "Use either device or node-name but not both");
3580 bs
= bdrv_find(device
);
3583 error_set(errp
, QERR_DEVICE_NOT_FOUND
, device
);
3590 bs
= bdrv_find_node(node_name
);
3593 error_set(errp
, QERR_DEVICE_NOT_FOUND
, node_name
);
3600 BlockDriverState
*bdrv_next(BlockDriverState
*bs
)
3603 return QTAILQ_FIRST(&bdrv_states
);
3605 return QTAILQ_NEXT(bs
, device_list
);
3608 void bdrv_iterate(void (*it
)(void *opaque
, BlockDriverState
*bs
), void *opaque
)
3610 BlockDriverState
*bs
;
3612 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
3617 const char *bdrv_get_device_name(BlockDriverState
*bs
)
3619 return bs
->device_name
;
3622 int bdrv_get_flags(BlockDriverState
*bs
)
3624 return bs
->open_flags
;
3627 int bdrv_flush_all(void)
3629 BlockDriverState
*bs
;
3632 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
3633 int ret
= bdrv_flush(bs
);
3634 if (ret
< 0 && !result
) {
3642 int bdrv_has_zero_init_1(BlockDriverState
*bs
)
3647 int bdrv_has_zero_init(BlockDriverState
*bs
)
3651 /* If BS is a copy on write image, it is initialized to
3652 the contents of the base image, which may not be zeroes. */
3653 if (bs
->backing_hd
) {
3656 if (bs
->drv
->bdrv_has_zero_init
) {
3657 return bs
->drv
->bdrv_has_zero_init(bs
);
3664 bool bdrv_unallocated_blocks_are_zero(BlockDriverState
*bs
)
3666 BlockDriverInfo bdi
;
3668 if (bs
->backing_hd
) {
3672 if (bdrv_get_info(bs
, &bdi
) == 0) {
3673 return bdi
.unallocated_blocks_are_zero
;
3679 bool bdrv_can_write_zeroes_with_unmap(BlockDriverState
*bs
)
3681 BlockDriverInfo bdi
;
3683 if (bs
->backing_hd
|| !(bs
->open_flags
& BDRV_O_UNMAP
)) {
3687 if (bdrv_get_info(bs
, &bdi
) == 0) {
3688 return bdi
.can_write_zeroes_with_unmap
;
3694 typedef struct BdrvCoGetBlockStatusData
{
3695 BlockDriverState
*bs
;
3696 BlockDriverState
*base
;
3702 } BdrvCoGetBlockStatusData
;
3705 * Returns true iff the specified sector is present in the disk image. Drivers
3706 * not implementing the functionality are assumed to not support backing files,
3707 * hence all their sectors are reported as allocated.
3709 * If 'sector_num' is beyond the end of the disk image the return value is 0
3710 * and 'pnum' is set to 0.
3712 * 'pnum' is set to the number of sectors (including and immediately following
3713 * the specified sector) that are known to be in the same
3714 * allocated/unallocated state.
3716 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
3717 * beyond the end of the disk image it will be clamped.
3719 static int64_t coroutine_fn
bdrv_co_get_block_status(BlockDriverState
*bs
,
3721 int nb_sectors
, int *pnum
)
3727 length
= bdrv_getlength(bs
);
3732 if (sector_num
>= (length
>> BDRV_SECTOR_BITS
)) {
3737 n
= bs
->total_sectors
- sector_num
;
3738 if (n
< nb_sectors
) {
3742 if (!bs
->drv
->bdrv_co_get_block_status
) {
3744 ret
= BDRV_BLOCK_DATA
;
3745 if (bs
->drv
->protocol_name
) {
3746 ret
|= BDRV_BLOCK_OFFSET_VALID
| (sector_num
* BDRV_SECTOR_SIZE
);
3751 ret
= bs
->drv
->bdrv_co_get_block_status(bs
, sector_num
, nb_sectors
, pnum
);
3757 if (ret
& BDRV_BLOCK_RAW
) {
3758 assert(ret
& BDRV_BLOCK_OFFSET_VALID
);
3759 return bdrv_get_block_status(bs
->file
, ret
>> BDRV_SECTOR_BITS
,
3763 if (!(ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
)) {
3764 if (bdrv_unallocated_blocks_are_zero(bs
)) {
3765 ret
|= BDRV_BLOCK_ZERO
;
3766 } else if (bs
->backing_hd
) {
3767 BlockDriverState
*bs2
= bs
->backing_hd
;
3768 int64_t length2
= bdrv_getlength(bs2
);
3769 if (length2
>= 0 && sector_num
>= (length2
>> BDRV_SECTOR_BITS
)) {
3770 ret
|= BDRV_BLOCK_ZERO
;
3776 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
3777 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
3778 ret2
= bdrv_co_get_block_status(bs
->file
, ret
>> BDRV_SECTOR_BITS
,
3781 /* Ignore errors. This is just providing extra information, it
3782 * is useful but not necessary.
3784 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
3791 /* Coroutine wrapper for bdrv_get_block_status() */
3792 static void coroutine_fn
bdrv_get_block_status_co_entry(void *opaque
)
3794 BdrvCoGetBlockStatusData
*data
= opaque
;
3795 BlockDriverState
*bs
= data
->bs
;
3797 data
->ret
= bdrv_co_get_block_status(bs
, data
->sector_num
, data
->nb_sectors
,
3803 * Synchronous wrapper around bdrv_co_get_block_status().
3805 * See bdrv_co_get_block_status() for details.
3807 int64_t bdrv_get_block_status(BlockDriverState
*bs
, int64_t sector_num
,
3808 int nb_sectors
, int *pnum
)
3811 BdrvCoGetBlockStatusData data
= {
3813 .sector_num
= sector_num
,
3814 .nb_sectors
= nb_sectors
,
3819 if (qemu_in_coroutine()) {
3820 /* Fast-path if already in coroutine context */
3821 bdrv_get_block_status_co_entry(&data
);
3823 co
= qemu_coroutine_create(bdrv_get_block_status_co_entry
);
3824 qemu_coroutine_enter(co
, &data
);
3825 while (!data
.done
) {
3832 int coroutine_fn
bdrv_is_allocated(BlockDriverState
*bs
, int64_t sector_num
,
3833 int nb_sectors
, int *pnum
)
3835 int64_t ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, pnum
);
3840 (ret
& BDRV_BLOCK_DATA
) ||
3841 ((ret
& BDRV_BLOCK_ZERO
) && !bdrv_has_zero_init(bs
));
3845 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
3847 * Return true if the given sector is allocated in any image between
3848 * BASE and TOP (inclusive). BASE can be NULL to check if the given
3849 * sector is allocated in any image of the chain. Return false otherwise.
3851 * 'pnum' is set to the number of sectors (including and immediately following
3852 * the specified sector) that are known to be in the same
3853 * allocated/unallocated state.
3856 int bdrv_is_allocated_above(BlockDriverState
*top
,
3857 BlockDriverState
*base
,
3859 int nb_sectors
, int *pnum
)
3861 BlockDriverState
*intermediate
;
3862 int ret
, n
= nb_sectors
;
3865 while (intermediate
&& intermediate
!= base
) {
3867 ret
= bdrv_is_allocated(intermediate
, sector_num
, nb_sectors
,
3877 * [sector_num, nb_sectors] is unallocated on top but intermediate
3880 * [sector_num+x, nr_sectors] allocated.
3882 if (n
> pnum_inter
&&
3883 (intermediate
== top
||
3884 sector_num
+ pnum_inter
< intermediate
->total_sectors
)) {
3888 intermediate
= intermediate
->backing_hd
;
3895 const char *bdrv_get_encrypted_filename(BlockDriverState
*bs
)
3897 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
)
3898 return bs
->backing_file
;
3899 else if (bs
->encrypted
)
3900 return bs
->filename
;
3905 void bdrv_get_backing_filename(BlockDriverState
*bs
,
3906 char *filename
, int filename_size
)
3908 pstrcpy(filename
, filename_size
, bs
->backing_file
);
3911 int bdrv_write_compressed(BlockDriverState
*bs
, int64_t sector_num
,
3912 const uint8_t *buf
, int nb_sectors
)
3914 BlockDriver
*drv
= bs
->drv
;
3917 if (!drv
->bdrv_write_compressed
)
3919 if (bdrv_check_request(bs
, sector_num
, nb_sectors
))
3922 assert(QLIST_EMPTY(&bs
->dirty_bitmaps
));
3924 return drv
->bdrv_write_compressed(bs
, sector_num
, buf
, nb_sectors
);
3927 int bdrv_get_info(BlockDriverState
*bs
, BlockDriverInfo
*bdi
)
3929 BlockDriver
*drv
= bs
->drv
;
3932 if (!drv
->bdrv_get_info
)
3934 memset(bdi
, 0, sizeof(*bdi
));
3935 return drv
->bdrv_get_info(bs
, bdi
);
3938 ImageInfoSpecific
*bdrv_get_specific_info(BlockDriverState
*bs
)
3940 BlockDriver
*drv
= bs
->drv
;
3941 if (drv
&& drv
->bdrv_get_specific_info
) {
3942 return drv
->bdrv_get_specific_info(bs
);
3947 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
3948 int64_t pos
, int size
)
3951 struct iovec iov
= {
3952 .iov_base
= (void *) buf
,
3956 qemu_iovec_init_external(&qiov
, &iov
, 1);
3957 return bdrv_writev_vmstate(bs
, &qiov
, pos
);
3960 int bdrv_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
3962 BlockDriver
*drv
= bs
->drv
;
3966 } else if (drv
->bdrv_save_vmstate
) {
3967 return drv
->bdrv_save_vmstate(bs
, qiov
, pos
);
3968 } else if (bs
->file
) {
3969 return bdrv_writev_vmstate(bs
->file
, qiov
, pos
);
3975 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
3976 int64_t pos
, int size
)
3978 BlockDriver
*drv
= bs
->drv
;
3981 if (drv
->bdrv_load_vmstate
)
3982 return drv
->bdrv_load_vmstate(bs
, buf
, pos
, size
);
3984 return bdrv_load_vmstate(bs
->file
, buf
, pos
, size
);
3988 void bdrv_debug_event(BlockDriverState
*bs
, BlkDebugEvent event
)
3990 if (!bs
|| !bs
->drv
|| !bs
->drv
->bdrv_debug_event
) {
3994 bs
->drv
->bdrv_debug_event(bs
, event
);
3997 int bdrv_debug_breakpoint(BlockDriverState
*bs
, const char *event
,
4000 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_breakpoint
) {
4004 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_breakpoint
) {
4005 return bs
->drv
->bdrv_debug_breakpoint(bs
, event
, tag
);
4011 int bdrv_debug_remove_breakpoint(BlockDriverState
*bs
, const char *tag
)
4013 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_remove_breakpoint
) {
4017 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_remove_breakpoint
) {
4018 return bs
->drv
->bdrv_debug_remove_breakpoint(bs
, tag
);
4024 int bdrv_debug_resume(BlockDriverState
*bs
, const char *tag
)
4026 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_resume
) {
4030 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_resume
) {
4031 return bs
->drv
->bdrv_debug_resume(bs
, tag
);
4037 bool bdrv_debug_is_suspended(BlockDriverState
*bs
, const char *tag
)
4039 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_is_suspended
) {
4043 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_is_suspended
) {
4044 return bs
->drv
->bdrv_debug_is_suspended(bs
, tag
);
4050 int bdrv_is_snapshot(BlockDriverState
*bs
)
4052 return !!(bs
->open_flags
& BDRV_O_SNAPSHOT
);
4055 /* backing_file can either be relative, or absolute, or a protocol. If it is
4056 * relative, it must be relative to the chain. So, passing in bs->filename
4057 * from a BDS as backing_file should not be done, as that may be relative to
4058 * the CWD rather than the chain. */
4059 BlockDriverState
*bdrv_find_backing_image(BlockDriverState
*bs
,
4060 const char *backing_file
)
4062 char *filename_full
= NULL
;
4063 char *backing_file_full
= NULL
;
4064 char *filename_tmp
= NULL
;
4065 int is_protocol
= 0;
4066 BlockDriverState
*curr_bs
= NULL
;
4067 BlockDriverState
*retval
= NULL
;
4069 if (!bs
|| !bs
->drv
|| !backing_file
) {
4073 filename_full
= g_malloc(PATH_MAX
);
4074 backing_file_full
= g_malloc(PATH_MAX
);
4075 filename_tmp
= g_malloc(PATH_MAX
);
4077 is_protocol
= path_has_protocol(backing_file
);
4079 for (curr_bs
= bs
; curr_bs
->backing_hd
; curr_bs
= curr_bs
->backing_hd
) {
4081 /* If either of the filename paths is actually a protocol, then
4082 * compare unmodified paths; otherwise make paths relative */
4083 if (is_protocol
|| path_has_protocol(curr_bs
->backing_file
)) {
4084 if (strcmp(backing_file
, curr_bs
->backing_file
) == 0) {
4085 retval
= curr_bs
->backing_hd
;
4089 /* If not an absolute filename path, make it relative to the current
4090 * image's filename path */
4091 path_combine(filename_tmp
, PATH_MAX
, curr_bs
->filename
,
4094 /* We are going to compare absolute pathnames */
4095 if (!realpath(filename_tmp
, filename_full
)) {
4099 /* We need to make sure the backing filename we are comparing against
4100 * is relative to the current image filename (or absolute) */
4101 path_combine(filename_tmp
, PATH_MAX
, curr_bs
->filename
,
4102 curr_bs
->backing_file
);
4104 if (!realpath(filename_tmp
, backing_file_full
)) {
4108 if (strcmp(backing_file_full
, filename_full
) == 0) {
4109 retval
= curr_bs
->backing_hd
;
4115 g_free(filename_full
);
4116 g_free(backing_file_full
);
4117 g_free(filename_tmp
);
4121 int bdrv_get_backing_file_depth(BlockDriverState
*bs
)
4127 if (!bs
->backing_hd
) {
4131 return 1 + bdrv_get_backing_file_depth(bs
->backing_hd
);
4134 BlockDriverState
*bdrv_find_base(BlockDriverState
*bs
)
4136 BlockDriverState
*curr_bs
= NULL
;
4144 while (curr_bs
->backing_hd
) {
4145 curr_bs
= curr_bs
->backing_hd
;
4150 /**************************************************************/
4153 BlockDriverAIOCB
*bdrv_aio_readv(BlockDriverState
*bs
, int64_t sector_num
,
4154 QEMUIOVector
*qiov
, int nb_sectors
,
4155 BlockDriverCompletionFunc
*cb
, void *opaque
)
4157 trace_bdrv_aio_readv(bs
, sector_num
, nb_sectors
, opaque
);
4159 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, 0,
4163 BlockDriverAIOCB
*bdrv_aio_writev(BlockDriverState
*bs
, int64_t sector_num
,
4164 QEMUIOVector
*qiov
, int nb_sectors
,
4165 BlockDriverCompletionFunc
*cb
, void *opaque
)
4167 trace_bdrv_aio_writev(bs
, sector_num
, nb_sectors
, opaque
);
4169 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, 0,
4173 BlockDriverAIOCB
*bdrv_aio_write_zeroes(BlockDriverState
*bs
,
4174 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
,
4175 BlockDriverCompletionFunc
*cb
, void *opaque
)
4177 trace_bdrv_aio_write_zeroes(bs
, sector_num
, nb_sectors
, flags
, opaque
);
4179 return bdrv_co_aio_rw_vector(bs
, sector_num
, NULL
, nb_sectors
,
4180 BDRV_REQ_ZERO_WRITE
| flags
,
4185 typedef struct MultiwriteCB
{
4190 BlockDriverCompletionFunc
*cb
;
4192 QEMUIOVector
*free_qiov
;
4196 static void multiwrite_user_cb(MultiwriteCB
*mcb
)
4200 for (i
= 0; i
< mcb
->num_callbacks
; i
++) {
4201 mcb
->callbacks
[i
].cb(mcb
->callbacks
[i
].opaque
, mcb
->error
);
4202 if (mcb
->callbacks
[i
].free_qiov
) {
4203 qemu_iovec_destroy(mcb
->callbacks
[i
].free_qiov
);
4205 g_free(mcb
->callbacks
[i
].free_qiov
);
4209 static void multiwrite_cb(void *opaque
, int ret
)
4211 MultiwriteCB
*mcb
= opaque
;
4213 trace_multiwrite_cb(mcb
, ret
);
4215 if (ret
< 0 && !mcb
->error
) {
4219 mcb
->num_requests
--;
4220 if (mcb
->num_requests
== 0) {
4221 multiwrite_user_cb(mcb
);
4226 static int multiwrite_req_compare(const void *a
, const void *b
)
4228 const BlockRequest
*req1
= a
, *req2
= b
;
4231 * Note that we can't simply subtract req2->sector from req1->sector
4232 * here as that could overflow the return value.
4234 if (req1
->sector
> req2
->sector
) {
4236 } else if (req1
->sector
< req2
->sector
) {
4244 * Takes a bunch of requests and tries to merge them. Returns the number of
4245 * requests that remain after merging.
4247 static int multiwrite_merge(BlockDriverState
*bs
, BlockRequest
*reqs
,
4248 int num_reqs
, MultiwriteCB
*mcb
)
4252 // Sort requests by start sector
4253 qsort(reqs
, num_reqs
, sizeof(*reqs
), &multiwrite_req_compare
);
4255 // Check if adjacent requests touch the same clusters. If so, combine them,
4256 // filling up gaps with zero sectors.
4258 for (i
= 1; i
< num_reqs
; i
++) {
4260 int64_t oldreq_last
= reqs
[outidx
].sector
+ reqs
[outidx
].nb_sectors
;
4262 // Handle exactly sequential writes and overlapping writes.
4263 if (reqs
[i
].sector
<= oldreq_last
) {
4267 if (reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1 > IOV_MAX
) {
4273 QEMUIOVector
*qiov
= g_malloc0(sizeof(*qiov
));
4274 qemu_iovec_init(qiov
,
4275 reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1);
4277 // Add the first request to the merged one. If the requests are
4278 // overlapping, drop the last sectors of the first request.
4279 size
= (reqs
[i
].sector
- reqs
[outidx
].sector
) << 9;
4280 qemu_iovec_concat(qiov
, reqs
[outidx
].qiov
, 0, size
);
4282 // We should need to add any zeros between the two requests
4283 assert (reqs
[i
].sector
<= oldreq_last
);
4285 // Add the second request
4286 qemu_iovec_concat(qiov
, reqs
[i
].qiov
, 0, reqs
[i
].qiov
->size
);
4288 reqs
[outidx
].nb_sectors
= qiov
->size
>> 9;
4289 reqs
[outidx
].qiov
= qiov
;
4291 mcb
->callbacks
[i
].free_qiov
= reqs
[outidx
].qiov
;
4294 reqs
[outidx
].sector
= reqs
[i
].sector
;
4295 reqs
[outidx
].nb_sectors
= reqs
[i
].nb_sectors
;
4296 reqs
[outidx
].qiov
= reqs
[i
].qiov
;
4304 * Submit multiple AIO write requests at once.
4306 * On success, the function returns 0 and all requests in the reqs array have
4307 * been submitted. In error case this function returns -1, and any of the
4308 * requests may or may not be submitted yet. In particular, this means that the
4309 * callback will be called for some of the requests, for others it won't. The
4310 * caller must check the error field of the BlockRequest to wait for the right
4311 * callbacks (if error != 0, no callback will be called).
4313 * The implementation may modify the contents of the reqs array, e.g. to merge
4314 * requests. However, the fields opaque and error are left unmodified as they
4315 * are used to signal failure for a single request to the caller.
4317 int bdrv_aio_multiwrite(BlockDriverState
*bs
, BlockRequest
*reqs
, int num_reqs
)
4322 /* don't submit writes if we don't have a medium */
4323 if (bs
->drv
== NULL
) {
4324 for (i
= 0; i
< num_reqs
; i
++) {
4325 reqs
[i
].error
= -ENOMEDIUM
;
4330 if (num_reqs
== 0) {
4334 // Create MultiwriteCB structure
4335 mcb
= g_malloc0(sizeof(*mcb
) + num_reqs
* sizeof(*mcb
->callbacks
));
4336 mcb
->num_requests
= 0;
4337 mcb
->num_callbacks
= num_reqs
;
4339 for (i
= 0; i
< num_reqs
; i
++) {
4340 mcb
->callbacks
[i
].cb
= reqs
[i
].cb
;
4341 mcb
->callbacks
[i
].opaque
= reqs
[i
].opaque
;
4344 // Check for mergable requests
4345 num_reqs
= multiwrite_merge(bs
, reqs
, num_reqs
, mcb
);
4347 trace_bdrv_aio_multiwrite(mcb
, mcb
->num_callbacks
, num_reqs
);
4349 /* Run the aio requests. */
4350 mcb
->num_requests
= num_reqs
;
4351 for (i
= 0; i
< num_reqs
; i
++) {
4352 bdrv_co_aio_rw_vector(bs
, reqs
[i
].sector
, reqs
[i
].qiov
,
4353 reqs
[i
].nb_sectors
, reqs
[i
].flags
,
4361 void bdrv_aio_cancel(BlockDriverAIOCB
*acb
)
4363 acb
->aiocb_info
->cancel(acb
);
4366 /**************************************************************/
4367 /* async block device emulation */
4369 typedef struct BlockDriverAIOCBSync
{
4370 BlockDriverAIOCB common
;
4373 /* vector translation state */
4377 } BlockDriverAIOCBSync
;
4379 static void bdrv_aio_cancel_em(BlockDriverAIOCB
*blockacb
)
4381 BlockDriverAIOCBSync
*acb
=
4382 container_of(blockacb
, BlockDriverAIOCBSync
, common
);
4383 qemu_bh_delete(acb
->bh
);
4385 qemu_aio_release(acb
);
4388 static const AIOCBInfo bdrv_em_aiocb_info
= {
4389 .aiocb_size
= sizeof(BlockDriverAIOCBSync
),
4390 .cancel
= bdrv_aio_cancel_em
,
4393 static void bdrv_aio_bh_cb(void *opaque
)
4395 BlockDriverAIOCBSync
*acb
= opaque
;
4398 qemu_iovec_from_buf(acb
->qiov
, 0, acb
->bounce
, acb
->qiov
->size
);
4399 qemu_vfree(acb
->bounce
);
4400 acb
->common
.cb(acb
->common
.opaque
, acb
->ret
);
4401 qemu_bh_delete(acb
->bh
);
4403 qemu_aio_release(acb
);
4406 static BlockDriverAIOCB
*bdrv_aio_rw_vector(BlockDriverState
*bs
,
4410 BlockDriverCompletionFunc
*cb
,
4415 BlockDriverAIOCBSync
*acb
;
4417 acb
= qemu_aio_get(&bdrv_em_aiocb_info
, bs
, cb
, opaque
);
4418 acb
->is_write
= is_write
;
4420 acb
->bounce
= qemu_blockalign(bs
, qiov
->size
);
4421 acb
->bh
= qemu_bh_new(bdrv_aio_bh_cb
, acb
);
4424 qemu_iovec_to_buf(acb
->qiov
, 0, acb
->bounce
, qiov
->size
);
4425 acb
->ret
= bs
->drv
->bdrv_write(bs
, sector_num
, acb
->bounce
, nb_sectors
);
4427 acb
->ret
= bs
->drv
->bdrv_read(bs
, sector_num
, acb
->bounce
, nb_sectors
);
4430 qemu_bh_schedule(acb
->bh
);
4432 return &acb
->common
;
4435 static BlockDriverAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
4436 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
4437 BlockDriverCompletionFunc
*cb
, void *opaque
)
4439 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 0);
4442 static BlockDriverAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
4443 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
4444 BlockDriverCompletionFunc
*cb
, void *opaque
)
4446 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 1);
4450 typedef struct BlockDriverAIOCBCoroutine
{
4451 BlockDriverAIOCB common
;
4456 } BlockDriverAIOCBCoroutine
;
4458 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB
*blockacb
)
4460 BlockDriverAIOCBCoroutine
*acb
=
4461 container_of(blockacb
, BlockDriverAIOCBCoroutine
, common
);
4470 static const AIOCBInfo bdrv_em_co_aiocb_info
= {
4471 .aiocb_size
= sizeof(BlockDriverAIOCBCoroutine
),
4472 .cancel
= bdrv_aio_co_cancel_em
,
4475 static void bdrv_co_em_bh(void *opaque
)
4477 BlockDriverAIOCBCoroutine
*acb
= opaque
;
4479 acb
->common
.cb(acb
->common
.opaque
, acb
->req
.error
);
4485 qemu_bh_delete(acb
->bh
);
4486 qemu_aio_release(acb
);
4489 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
4490 static void coroutine_fn
bdrv_co_do_rw(void *opaque
)
4492 BlockDriverAIOCBCoroutine
*acb
= opaque
;
4493 BlockDriverState
*bs
= acb
->common
.bs
;
4495 if (!acb
->is_write
) {
4496 acb
->req
.error
= bdrv_co_do_readv(bs
, acb
->req
.sector
,
4497 acb
->req
.nb_sectors
, acb
->req
.qiov
, acb
->req
.flags
);
4499 acb
->req
.error
= bdrv_co_do_writev(bs
, acb
->req
.sector
,
4500 acb
->req
.nb_sectors
, acb
->req
.qiov
, acb
->req
.flags
);
4503 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
4504 qemu_bh_schedule(acb
->bh
);
4507 static BlockDriverAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
4511 BdrvRequestFlags flags
,
4512 BlockDriverCompletionFunc
*cb
,
4517 BlockDriverAIOCBCoroutine
*acb
;
4519 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
4520 acb
->req
.sector
= sector_num
;
4521 acb
->req
.nb_sectors
= nb_sectors
;
4522 acb
->req
.qiov
= qiov
;
4523 acb
->req
.flags
= flags
;
4524 acb
->is_write
= is_write
;
4527 co
= qemu_coroutine_create(bdrv_co_do_rw
);
4528 qemu_coroutine_enter(co
, acb
);
4530 return &acb
->common
;
4533 static void coroutine_fn
bdrv_aio_flush_co_entry(void *opaque
)
4535 BlockDriverAIOCBCoroutine
*acb
= opaque
;
4536 BlockDriverState
*bs
= acb
->common
.bs
;
4538 acb
->req
.error
= bdrv_co_flush(bs
);
4539 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
4540 qemu_bh_schedule(acb
->bh
);
4543 BlockDriverAIOCB
*bdrv_aio_flush(BlockDriverState
*bs
,
4544 BlockDriverCompletionFunc
*cb
, void *opaque
)
4546 trace_bdrv_aio_flush(bs
, opaque
);
4549 BlockDriverAIOCBCoroutine
*acb
;
4551 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
4554 co
= qemu_coroutine_create(bdrv_aio_flush_co_entry
);
4555 qemu_coroutine_enter(co
, acb
);
4557 return &acb
->common
;
4560 static void coroutine_fn
bdrv_aio_discard_co_entry(void *opaque
)
4562 BlockDriverAIOCBCoroutine
*acb
= opaque
;
4563 BlockDriverState
*bs
= acb
->common
.bs
;
4565 acb
->req
.error
= bdrv_co_discard(bs
, acb
->req
.sector
, acb
->req
.nb_sectors
);
4566 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
4567 qemu_bh_schedule(acb
->bh
);
4570 BlockDriverAIOCB
*bdrv_aio_discard(BlockDriverState
*bs
,
4571 int64_t sector_num
, int nb_sectors
,
4572 BlockDriverCompletionFunc
*cb
, void *opaque
)
4575 BlockDriverAIOCBCoroutine
*acb
;
4577 trace_bdrv_aio_discard(bs
, sector_num
, nb_sectors
, opaque
);
4579 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
4580 acb
->req
.sector
= sector_num
;
4581 acb
->req
.nb_sectors
= nb_sectors
;
4583 co
= qemu_coroutine_create(bdrv_aio_discard_co_entry
);
4584 qemu_coroutine_enter(co
, acb
);
4586 return &acb
->common
;
4589 void bdrv_init(void)
4591 module_call_init(MODULE_INIT_BLOCK
);
4594 void bdrv_init_with_whitelist(void)
4596 use_bdrv_whitelist
= 1;
4600 void *qemu_aio_get(const AIOCBInfo
*aiocb_info
, BlockDriverState
*bs
,
4601 BlockDriverCompletionFunc
*cb
, void *opaque
)
4603 BlockDriverAIOCB
*acb
;
4605 acb
= g_slice_alloc(aiocb_info
->aiocb_size
);
4606 acb
->aiocb_info
= aiocb_info
;
4609 acb
->opaque
= opaque
;
4613 void qemu_aio_release(void *p
)
4615 BlockDriverAIOCB
*acb
= p
;
4616 g_slice_free1(acb
->aiocb_info
->aiocb_size
, acb
);
4619 /**************************************************************/
4620 /* Coroutine block device emulation */
4622 typedef struct CoroutineIOCompletion
{
4623 Coroutine
*coroutine
;
4625 } CoroutineIOCompletion
;
4627 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
4629 CoroutineIOCompletion
*co
= opaque
;
4632 qemu_coroutine_enter(co
->coroutine
, NULL
);
4635 static int coroutine_fn
bdrv_co_io_em(BlockDriverState
*bs
, int64_t sector_num
,
4636 int nb_sectors
, QEMUIOVector
*iov
,
4639 CoroutineIOCompletion co
= {
4640 .coroutine
= qemu_coroutine_self(),
4642 BlockDriverAIOCB
*acb
;
4645 acb
= bs
->drv
->bdrv_aio_writev(bs
, sector_num
, iov
, nb_sectors
,
4646 bdrv_co_io_em_complete
, &co
);
4648 acb
= bs
->drv
->bdrv_aio_readv(bs
, sector_num
, iov
, nb_sectors
,
4649 bdrv_co_io_em_complete
, &co
);
4652 trace_bdrv_co_io_em(bs
, sector_num
, nb_sectors
, is_write
, acb
);
4656 qemu_coroutine_yield();
4661 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
4662 int64_t sector_num
, int nb_sectors
,
4665 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, false);
4668 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
4669 int64_t sector_num
, int nb_sectors
,
4672 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, true);
4675 static void coroutine_fn
bdrv_flush_co_entry(void *opaque
)
4677 RwCo
*rwco
= opaque
;
4679 rwco
->ret
= bdrv_co_flush(rwco
->bs
);
4682 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
4686 if (!bs
|| !bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
)) {
4690 /* Write back cached data to the OS even with cache=unsafe */
4691 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_OS
);
4692 if (bs
->drv
->bdrv_co_flush_to_os
) {
4693 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
4699 /* But don't actually force it to the disk with cache=unsafe */
4700 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
4704 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_DISK
);
4705 if (bs
->drv
->bdrv_co_flush_to_disk
) {
4706 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
4707 } else if (bs
->drv
->bdrv_aio_flush
) {
4708 BlockDriverAIOCB
*acb
;
4709 CoroutineIOCompletion co
= {
4710 .coroutine
= qemu_coroutine_self(),
4713 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
4717 qemu_coroutine_yield();
4722 * Some block drivers always operate in either writethrough or unsafe
4723 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
4724 * know how the server works (because the behaviour is hardcoded or
4725 * depends on server-side configuration), so we can't ensure that
4726 * everything is safe on disk. Returning an error doesn't work because
4727 * that would break guests even if the server operates in writethrough
4730 * Let's hope the user knows what he's doing.
4738 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
4739 * in the case of cache=unsafe, so there are no useless flushes.
4742 return bdrv_co_flush(bs
->file
);
4745 void bdrv_invalidate_cache(BlockDriverState
*bs
)
4747 if (bs
->drv
&& bs
->drv
->bdrv_invalidate_cache
) {
4748 bs
->drv
->bdrv_invalidate_cache(bs
);
4752 void bdrv_invalidate_cache_all(void)
4754 BlockDriverState
*bs
;
4756 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
4757 bdrv_invalidate_cache(bs
);
4761 void bdrv_clear_incoming_migration_all(void)
4763 BlockDriverState
*bs
;
4765 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
4766 bs
->open_flags
= bs
->open_flags
& ~(BDRV_O_INCOMING
);
4770 int bdrv_flush(BlockDriverState
*bs
)
4778 if (qemu_in_coroutine()) {
4779 /* Fast-path if already in coroutine context */
4780 bdrv_flush_co_entry(&rwco
);
4782 co
= qemu_coroutine_create(bdrv_flush_co_entry
);
4783 qemu_coroutine_enter(co
, &rwco
);
4784 while (rwco
.ret
== NOT_DONE
) {
4792 typedef struct DiscardCo
{
4793 BlockDriverState
*bs
;
4798 static void coroutine_fn
bdrv_discard_co_entry(void *opaque
)
4800 DiscardCo
*rwco
= opaque
;
4802 rwco
->ret
= bdrv_co_discard(rwco
->bs
, rwco
->sector_num
, rwco
->nb_sectors
);
4805 /* if no limit is specified in the BlockLimits use a default
4806 * of 32768 512-byte sectors (16 MiB) per request.
4808 #define MAX_DISCARD_DEFAULT 32768
4810 int coroutine_fn
bdrv_co_discard(BlockDriverState
*bs
, int64_t sector_num
,
4817 } else if (bdrv_check_request(bs
, sector_num
, nb_sectors
)) {
4819 } else if (bs
->read_only
) {
4823 bdrv_reset_dirty(bs
, sector_num
, nb_sectors
);
4825 /* Do nothing if disabled. */
4826 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
4830 if (!bs
->drv
->bdrv_co_discard
&& !bs
->drv
->bdrv_aio_discard
) {
4834 max_discard
= bs
->bl
.max_discard
? bs
->bl
.max_discard
: MAX_DISCARD_DEFAULT
;
4835 while (nb_sectors
> 0) {
4837 int num
= nb_sectors
;
4840 if (bs
->bl
.discard_alignment
&&
4841 num
>= bs
->bl
.discard_alignment
&&
4842 sector_num
% bs
->bl
.discard_alignment
) {
4843 if (num
> bs
->bl
.discard_alignment
) {
4844 num
= bs
->bl
.discard_alignment
;
4846 num
-= sector_num
% bs
->bl
.discard_alignment
;
4849 /* limit request size */
4850 if (num
> max_discard
) {
4854 if (bs
->drv
->bdrv_co_discard
) {
4855 ret
= bs
->drv
->bdrv_co_discard(bs
, sector_num
, num
);
4857 BlockDriverAIOCB
*acb
;
4858 CoroutineIOCompletion co
= {
4859 .coroutine
= qemu_coroutine_self(),
4862 acb
= bs
->drv
->bdrv_aio_discard(bs
, sector_num
, nb_sectors
,
4863 bdrv_co_io_em_complete
, &co
);
4867 qemu_coroutine_yield();
4871 if (ret
&& ret
!= -ENOTSUP
) {
4881 int bdrv_discard(BlockDriverState
*bs
, int64_t sector_num
, int nb_sectors
)
4886 .sector_num
= sector_num
,
4887 .nb_sectors
= nb_sectors
,
4891 if (qemu_in_coroutine()) {
4892 /* Fast-path if already in coroutine context */
4893 bdrv_discard_co_entry(&rwco
);
4895 co
= qemu_coroutine_create(bdrv_discard_co_entry
);
4896 qemu_coroutine_enter(co
, &rwco
);
4897 while (rwco
.ret
== NOT_DONE
) {
4905 /**************************************************************/
4906 /* removable device support */
4909 * Return TRUE if the media is present
4911 int bdrv_is_inserted(BlockDriverState
*bs
)
4913 BlockDriver
*drv
= bs
->drv
;
4917 if (!drv
->bdrv_is_inserted
)
4919 return drv
->bdrv_is_inserted(bs
);
4923 * Return whether the media changed since the last call to this
4924 * function, or -ENOTSUP if we don't know. Most drivers don't know.
4926 int bdrv_media_changed(BlockDriverState
*bs
)
4928 BlockDriver
*drv
= bs
->drv
;
4930 if (drv
&& drv
->bdrv_media_changed
) {
4931 return drv
->bdrv_media_changed(bs
);
4937 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
4939 void bdrv_eject(BlockDriverState
*bs
, bool eject_flag
)
4941 BlockDriver
*drv
= bs
->drv
;
4943 if (drv
&& drv
->bdrv_eject
) {
4944 drv
->bdrv_eject(bs
, eject_flag
);
4947 if (bs
->device_name
[0] != '\0') {
4948 bdrv_emit_qmp_eject_event(bs
, eject_flag
);
4953 * Lock or unlock the media (if it is locked, the user won't be able
4954 * to eject it manually).
4956 void bdrv_lock_medium(BlockDriverState
*bs
, bool locked
)
4958 BlockDriver
*drv
= bs
->drv
;
4960 trace_bdrv_lock_medium(bs
, locked
);
4962 if (drv
&& drv
->bdrv_lock_medium
) {
4963 drv
->bdrv_lock_medium(bs
, locked
);
4967 /* needed for generic scsi interface */
4969 int bdrv_ioctl(BlockDriverState
*bs
, unsigned long int req
, void *buf
)
4971 BlockDriver
*drv
= bs
->drv
;
4973 if (drv
&& drv
->bdrv_ioctl
)
4974 return drv
->bdrv_ioctl(bs
, req
, buf
);
4978 BlockDriverAIOCB
*bdrv_aio_ioctl(BlockDriverState
*bs
,
4979 unsigned long int req
, void *buf
,
4980 BlockDriverCompletionFunc
*cb
, void *opaque
)
4982 BlockDriver
*drv
= bs
->drv
;
4984 if (drv
&& drv
->bdrv_aio_ioctl
)
4985 return drv
->bdrv_aio_ioctl(bs
, req
, buf
, cb
, opaque
);
4989 void bdrv_set_guest_block_size(BlockDriverState
*bs
, int align
)
4991 bs
->guest_block_size
= align
;
4994 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
4996 return qemu_memalign(bdrv_opt_mem_align(bs
), size
);
5000 * Check if all memory in this vector is sector aligned.
5002 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
)
5005 size_t alignment
= bdrv_opt_mem_align(bs
);
5007 for (i
= 0; i
< qiov
->niov
; i
++) {
5008 if ((uintptr_t) qiov
->iov
[i
].iov_base
% alignment
) {
5011 if (qiov
->iov
[i
].iov_len
% alignment
) {
5019 BdrvDirtyBitmap
*bdrv_create_dirty_bitmap(BlockDriverState
*bs
, int granularity
)
5021 int64_t bitmap_size
;
5022 BdrvDirtyBitmap
*bitmap
;
5024 assert((granularity
& (granularity
- 1)) == 0);
5026 granularity
>>= BDRV_SECTOR_BITS
;
5027 assert(granularity
);
5028 bitmap_size
= (bdrv_getlength(bs
) >> BDRV_SECTOR_BITS
);
5029 bitmap
= g_malloc0(sizeof(BdrvDirtyBitmap
));
5030 bitmap
->bitmap
= hbitmap_alloc(bitmap_size
, ffs(granularity
) - 1);
5031 QLIST_INSERT_HEAD(&bs
->dirty_bitmaps
, bitmap
, list
);
5035 void bdrv_release_dirty_bitmap(BlockDriverState
*bs
, BdrvDirtyBitmap
*bitmap
)
5037 BdrvDirtyBitmap
*bm
, *next
;
5038 QLIST_FOREACH_SAFE(bm
, &bs
->dirty_bitmaps
, list
, next
) {
5040 QLIST_REMOVE(bitmap
, list
);
5041 hbitmap_free(bitmap
->bitmap
);
5048 BlockDirtyInfoList
*bdrv_query_dirty_bitmaps(BlockDriverState
*bs
)
5050 BdrvDirtyBitmap
*bm
;
5051 BlockDirtyInfoList
*list
= NULL
;
5052 BlockDirtyInfoList
**plist
= &list
;
5054 QLIST_FOREACH(bm
, &bs
->dirty_bitmaps
, list
) {
5055 BlockDirtyInfo
*info
= g_malloc0(sizeof(BlockDirtyInfo
));
5056 BlockDirtyInfoList
*entry
= g_malloc0(sizeof(BlockDirtyInfoList
));
5057 info
->count
= bdrv_get_dirty_count(bs
, bm
);
5059 ((int64_t) BDRV_SECTOR_SIZE
<< hbitmap_granularity(bm
->bitmap
));
5060 entry
->value
= info
;
5062 plist
= &entry
->next
;
5068 int bdrv_get_dirty(BlockDriverState
*bs
, BdrvDirtyBitmap
*bitmap
, int64_t sector
)
5071 return hbitmap_get(bitmap
->bitmap
, sector
);
5077 void bdrv_dirty_iter_init(BlockDriverState
*bs
,
5078 BdrvDirtyBitmap
*bitmap
, HBitmapIter
*hbi
)
5080 hbitmap_iter_init(hbi
, bitmap
->bitmap
, 0);
5083 void bdrv_set_dirty(BlockDriverState
*bs
, int64_t cur_sector
,
5086 BdrvDirtyBitmap
*bitmap
;
5087 QLIST_FOREACH(bitmap
, &bs
->dirty_bitmaps
, list
) {
5088 hbitmap_set(bitmap
->bitmap
, cur_sector
, nr_sectors
);
5092 void bdrv_reset_dirty(BlockDriverState
*bs
, int64_t cur_sector
, int nr_sectors
)
5094 BdrvDirtyBitmap
*bitmap
;
5095 QLIST_FOREACH(bitmap
, &bs
->dirty_bitmaps
, list
) {
5096 hbitmap_reset(bitmap
->bitmap
, cur_sector
, nr_sectors
);
5100 int64_t bdrv_get_dirty_count(BlockDriverState
*bs
, BdrvDirtyBitmap
*bitmap
)
5102 return hbitmap_count(bitmap
->bitmap
);
5105 /* Get a reference to bs */
5106 void bdrv_ref(BlockDriverState
*bs
)
5111 /* Release a previously grabbed reference to bs.
5112 * If after releasing, reference count is zero, the BlockDriverState is
5114 void bdrv_unref(BlockDriverState
*bs
)
5116 assert(bs
->refcnt
> 0);
5117 if (--bs
->refcnt
== 0) {
5122 void bdrv_set_in_use(BlockDriverState
*bs
, int in_use
)
5124 assert(bs
->in_use
!= in_use
);
5125 bs
->in_use
= in_use
;
5128 int bdrv_in_use(BlockDriverState
*bs
)
5133 void bdrv_iostatus_enable(BlockDriverState
*bs
)
5135 bs
->iostatus_enabled
= true;
5136 bs
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
5139 /* The I/O status is only enabled if the drive explicitly
5140 * enables it _and_ the VM is configured to stop on errors */
5141 bool bdrv_iostatus_is_enabled(const BlockDriverState
*bs
)
5143 return (bs
->iostatus_enabled
&&
5144 (bs
->on_write_error
== BLOCKDEV_ON_ERROR_ENOSPC
||
5145 bs
->on_write_error
== BLOCKDEV_ON_ERROR_STOP
||
5146 bs
->on_read_error
== BLOCKDEV_ON_ERROR_STOP
));
5149 void bdrv_iostatus_disable(BlockDriverState
*bs
)
5151 bs
->iostatus_enabled
= false;
5154 void bdrv_iostatus_reset(BlockDriverState
*bs
)
5156 if (bdrv_iostatus_is_enabled(bs
)) {
5157 bs
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
5159 block_job_iostatus_reset(bs
->job
);
5164 void bdrv_iostatus_set_err(BlockDriverState
*bs
, int error
)
5166 assert(bdrv_iostatus_is_enabled(bs
));
5167 if (bs
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
5168 bs
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
5169 BLOCK_DEVICE_IO_STATUS_FAILED
;
5174 bdrv_acct_start(BlockDriverState
*bs
, BlockAcctCookie
*cookie
, int64_t bytes
,
5175 enum BlockAcctType type
)
5177 assert(type
< BDRV_MAX_IOTYPE
);
5179 cookie
->bytes
= bytes
;
5180 cookie
->start_time_ns
= get_clock();
5181 cookie
->type
= type
;
5185 bdrv_acct_done(BlockDriverState
*bs
, BlockAcctCookie
*cookie
)
5187 assert(cookie
->type
< BDRV_MAX_IOTYPE
);
5189 bs
->nr_bytes
[cookie
->type
] += cookie
->bytes
;
5190 bs
->nr_ops
[cookie
->type
]++;
5191 bs
->total_time_ns
[cookie
->type
] += get_clock() - cookie
->start_time_ns
;
5194 void bdrv_img_create(const char *filename
, const char *fmt
,
5195 const char *base_filename
, const char *base_fmt
,
5196 char *options
, uint64_t img_size
, int flags
,
5197 Error
**errp
, bool quiet
)
5199 QEMUOptionParameter
*param
= NULL
, *create_options
= NULL
;
5200 QEMUOptionParameter
*backing_fmt
, *backing_file
, *size
;
5201 BlockDriver
*drv
, *proto_drv
;
5202 BlockDriver
*backing_drv
= NULL
;
5203 Error
*local_err
= NULL
;
5206 /* Find driver and parse its options */
5207 drv
= bdrv_find_format(fmt
);
5209 error_setg(errp
, "Unknown file format '%s'", fmt
);
5213 proto_drv
= bdrv_find_protocol(filename
, true);
5215 error_setg(errp
, "Unknown protocol '%s'", filename
);
5219 create_options
= append_option_parameters(create_options
,
5220 drv
->create_options
);
5221 create_options
= append_option_parameters(create_options
,
5222 proto_drv
->create_options
);
5224 /* Create parameter list with default values */
5225 param
= parse_option_parameters("", create_options
, param
);
5227 set_option_parameter_int(param
, BLOCK_OPT_SIZE
, img_size
);
5229 /* Parse -o options */
5231 param
= parse_option_parameters(options
, create_options
, param
);
5232 if (param
== NULL
) {
5233 error_setg(errp
, "Invalid options for file format '%s'.", fmt
);
5238 if (base_filename
) {
5239 if (set_option_parameter(param
, BLOCK_OPT_BACKING_FILE
,
5241 error_setg(errp
, "Backing file not supported for file format '%s'",
5248 if (set_option_parameter(param
, BLOCK_OPT_BACKING_FMT
, base_fmt
)) {
5249 error_setg(errp
, "Backing file format not supported for file "
5250 "format '%s'", fmt
);
5255 backing_file
= get_option_parameter(param
, BLOCK_OPT_BACKING_FILE
);
5256 if (backing_file
&& backing_file
->value
.s
) {
5257 if (!strcmp(filename
, backing_file
->value
.s
)) {
5258 error_setg(errp
, "Error: Trying to create an image with the "
5259 "same filename as the backing file");
5264 backing_fmt
= get_option_parameter(param
, BLOCK_OPT_BACKING_FMT
);
5265 if (backing_fmt
&& backing_fmt
->value
.s
) {
5266 backing_drv
= bdrv_find_format(backing_fmt
->value
.s
);
5268 error_setg(errp
, "Unknown backing file format '%s'",
5269 backing_fmt
->value
.s
);
5274 // The size for the image must always be specified, with one exception:
5275 // If we are using a backing file, we can obtain the size from there
5276 size
= get_option_parameter(param
, BLOCK_OPT_SIZE
);
5277 if (size
&& size
->value
.n
== -1) {
5278 if (backing_file
&& backing_file
->value
.s
) {
5279 BlockDriverState
*bs
;
5284 /* backing files always opened read-only */
5286 flags
& ~(BDRV_O_RDWR
| BDRV_O_SNAPSHOT
| BDRV_O_NO_BACKING
);
5290 ret
= bdrv_open(bs
, backing_file
->value
.s
, NULL
, back_flags
,
5291 backing_drv
, &local_err
);
5293 error_setg_errno(errp
, -ret
, "Could not open '%s': %s",
5294 backing_file
->value
.s
,
5295 error_get_pretty(local_err
));
5296 error_free(local_err
);
5301 bdrv_get_geometry(bs
, &size
);
5304 snprintf(buf
, sizeof(buf
), "%" PRId64
, size
);
5305 set_option_parameter(param
, BLOCK_OPT_SIZE
, buf
);
5309 error_setg(errp
, "Image creation needs a size parameter");
5315 printf("Formatting '%s', fmt=%s ", filename
, fmt
);
5316 print_option_parameters(param
);
5319 ret
= bdrv_create(drv
, filename
, param
, &local_err
);
5320 if (ret
== -EFBIG
) {
5321 /* This is generally a better message than whatever the driver would
5322 * deliver (especially because of the cluster_size_hint), since that
5323 * is most probably not much different from "image too large". */
5324 const char *cluster_size_hint
= "";
5325 if (get_option_parameter(create_options
, BLOCK_OPT_CLUSTER_SIZE
)) {
5326 cluster_size_hint
= " (try using a larger cluster size)";
5328 error_setg(errp
, "The image size is too large for file format '%s'"
5329 "%s", fmt
, cluster_size_hint
);
5330 error_free(local_err
);
5335 free_option_parameters(create_options
);
5336 free_option_parameters(param
);
5338 if (error_is_set(&local_err
)) {
5339 error_propagate(errp
, local_err
);
5343 AioContext
*bdrv_get_aio_context(BlockDriverState
*bs
)
5345 /* Currently BlockDriverState always uses the main loop AioContext */
5346 return qemu_get_aio_context();
5349 void bdrv_add_before_write_notifier(BlockDriverState
*bs
,
5350 NotifierWithReturn
*notifier
)
5352 notifier_with_return_list_add(&bs
->before_write_notifiers
, notifier
);
5355 int bdrv_amend_options(BlockDriverState
*bs
, QEMUOptionParameter
*options
)
5357 if (bs
->drv
->bdrv_amend_options
== NULL
) {
5360 return bs
->drv
->bdrv_amend_options(bs
, options
);
5363 /* Used to recurse on single child block filters.
5364 * Single child block filter will store their child in bs->file.
5366 bool bdrv_generic_is_first_non_filter(BlockDriverState
*bs
,
5367 BlockDriverState
*candidate
)
5373 if (!bs
->drv
->authorizations
[BS_IS_A_FILTER
]) {
5374 if (bs
== candidate
) {
5381 if (!bs
->drv
->authorizations
[BS_FILTER_PASS_DOWN
]) {
5389 return bdrv_recurse_is_first_non_filter(bs
->file
, candidate
);
5392 bool bdrv_recurse_is_first_non_filter(BlockDriverState
*bs
,
5393 BlockDriverState
*candidate
)
5395 if (bs
->drv
&& bs
->drv
->bdrv_recurse_is_first_non_filter
) {
5396 return bs
->drv
->bdrv_recurse_is_first_non_filter(bs
, candidate
);
5399 return bdrv_generic_is_first_non_filter(bs
, candidate
);
5402 /* This function checks if the candidate is the first non filter bs down it's
5403 * bs chain. Since we don't have pointers to parents it explore all bs chains
5404 * from the top. Some filters can choose not to pass down the recursion.
5406 bool bdrv_is_first_non_filter(BlockDriverState
*candidate
)
5408 BlockDriverState
*bs
;
5410 /* walk down the bs forest recursively */
5411 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
5418 perm
= bdrv_recurse_is_first_non_filter(bs
->file
, candidate
);
5420 /* candidate is the first non filter */