2 * QEMU System Emulator block driver
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 #include "config-host.h"
25 #include "qemu-common.h"
27 #include "monitor/monitor.h"
28 #include "block/block_int.h"
29 #include "block/blockjob.h"
30 #include "qemu/module.h"
31 #include "qapi/qmp/qjson.h"
32 #include "sysemu/sysemu.h"
33 #include "qemu/notify.h"
34 #include "block/coroutine.h"
35 #include "qmp-commands.h"
36 #include "qemu/timer.h"
39 #include <sys/types.h>
41 #include <sys/ioctl.h>
42 #include <sys/queue.h>
52 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
55 BDRV_REQ_COPY_ON_READ
= 0x1,
56 BDRV_REQ_ZERO_WRITE
= 0x2,
59 static void bdrv_dev_change_media_cb(BlockDriverState
*bs
, bool load
);
60 static BlockDriverAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
61 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
62 BlockDriverCompletionFunc
*cb
, void *opaque
);
63 static BlockDriverAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
64 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
65 BlockDriverCompletionFunc
*cb
, void *opaque
);
66 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
67 int64_t sector_num
, int nb_sectors
,
69 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
70 int64_t sector_num
, int nb_sectors
,
72 static int coroutine_fn
bdrv_co_do_readv(BlockDriverState
*bs
,
73 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
74 BdrvRequestFlags flags
);
75 static int coroutine_fn
bdrv_co_do_writev(BlockDriverState
*bs
,
76 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
77 BdrvRequestFlags flags
);
78 static BlockDriverAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
82 BlockDriverCompletionFunc
*cb
,
85 static void coroutine_fn
bdrv_co_do_rw(void *opaque
);
86 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
87 int64_t sector_num
, int nb_sectors
);
89 static QTAILQ_HEAD(, BlockDriverState
) bdrv_states
=
90 QTAILQ_HEAD_INITIALIZER(bdrv_states
);
92 static QLIST_HEAD(, BlockDriver
) bdrv_drivers
=
93 QLIST_HEAD_INITIALIZER(bdrv_drivers
);
95 /* If non-zero, use only whitelisted block drivers */
96 static int use_bdrv_whitelist
;
99 static int is_windows_drive_prefix(const char *filename
)
101 return (((filename
[0] >= 'a' && filename
[0] <= 'z') ||
102 (filename
[0] >= 'A' && filename
[0] <= 'Z')) &&
106 int is_windows_drive(const char *filename
)
108 if (is_windows_drive_prefix(filename
) &&
111 if (strstart(filename
, "\\\\.\\", NULL
) ||
112 strstart(filename
, "//./", NULL
))
118 /* throttling disk I/O limits */
119 void bdrv_set_io_limits(BlockDriverState
*bs
,
124 throttle_config(&bs
->throttle_state
, cfg
);
126 for (i
= 0; i
< 2; i
++) {
127 qemu_co_enter_next(&bs
->throttled_reqs
[i
]);
131 /* this function drain all the throttled IOs */
132 static bool bdrv_start_throttled_reqs(BlockDriverState
*bs
)
134 bool drained
= false;
135 bool enabled
= bs
->io_limits_enabled
;
138 bs
->io_limits_enabled
= false;
140 for (i
= 0; i
< 2; i
++) {
141 while (qemu_co_enter_next(&bs
->throttled_reqs
[i
])) {
146 bs
->io_limits_enabled
= enabled
;
151 void bdrv_io_limits_disable(BlockDriverState
*bs
)
153 bs
->io_limits_enabled
= false;
155 bdrv_start_throttled_reqs(bs
);
157 throttle_destroy(&bs
->throttle_state
);
160 static void bdrv_throttle_read_timer_cb(void *opaque
)
162 BlockDriverState
*bs
= opaque
;
163 qemu_co_enter_next(&bs
->throttled_reqs
[0]);
166 static void bdrv_throttle_write_timer_cb(void *opaque
)
168 BlockDriverState
*bs
= opaque
;
169 qemu_co_enter_next(&bs
->throttled_reqs
[1]);
172 /* should be called before bdrv_set_io_limits if a limit is set */
173 void bdrv_io_limits_enable(BlockDriverState
*bs
)
175 assert(!bs
->io_limits_enabled
);
176 throttle_init(&bs
->throttle_state
,
178 bdrv_throttle_read_timer_cb
,
179 bdrv_throttle_write_timer_cb
,
181 bs
->io_limits_enabled
= true;
184 /* This function makes an IO wait if needed
186 * @nb_sectors: the number of sectors of the IO
187 * @is_write: is the IO a write
189 static void bdrv_io_limits_intercept(BlockDriverState
*bs
,
193 /* does this io must wait */
194 bool must_wait
= throttle_schedule_timer(&bs
->throttle_state
, is_write
);
196 /* if must wait or any request of this type throttled queue the IO */
198 !qemu_co_queue_empty(&bs
->throttled_reqs
[is_write
])) {
199 qemu_co_queue_wait(&bs
->throttled_reqs
[is_write
]);
202 /* the IO will be executed, do the accounting */
203 throttle_account(&bs
->throttle_state
,
205 nb_sectors
* BDRV_SECTOR_SIZE
);
207 /* if the next request must wait -> do nothing */
208 if (throttle_schedule_timer(&bs
->throttle_state
, is_write
)) {
212 /* else queue next request for execution */
213 qemu_co_queue_next(&bs
->throttled_reqs
[is_write
]);
216 /* check if the path starts with "<protocol>:" */
217 static int path_has_protocol(const char *path
)
222 if (is_windows_drive(path
) ||
223 is_windows_drive_prefix(path
)) {
226 p
= path
+ strcspn(path
, ":/\\");
228 p
= path
+ strcspn(path
, ":/");
234 int path_is_absolute(const char *path
)
237 /* specific case for names like: "\\.\d:" */
238 if (is_windows_drive(path
) || is_windows_drive_prefix(path
)) {
241 return (*path
== '/' || *path
== '\\');
243 return (*path
== '/');
247 /* if filename is absolute, just copy it to dest. Otherwise, build a
248 path to it by considering it is relative to base_path. URL are
250 void path_combine(char *dest
, int dest_size
,
251 const char *base_path
,
252 const char *filename
)
259 if (path_is_absolute(filename
)) {
260 pstrcpy(dest
, dest_size
, filename
);
262 p
= strchr(base_path
, ':');
267 p1
= strrchr(base_path
, '/');
271 p2
= strrchr(base_path
, '\\');
283 if (len
> dest_size
- 1)
285 memcpy(dest
, base_path
, len
);
287 pstrcat(dest
, dest_size
, filename
);
291 void bdrv_get_full_backing_filename(BlockDriverState
*bs
, char *dest
, size_t sz
)
293 if (bs
->backing_file
[0] == '\0' || path_has_protocol(bs
->backing_file
)) {
294 pstrcpy(dest
, sz
, bs
->backing_file
);
296 path_combine(dest
, sz
, bs
->filename
, bs
->backing_file
);
300 void bdrv_register(BlockDriver
*bdrv
)
302 /* Block drivers without coroutine functions need emulation */
303 if (!bdrv
->bdrv_co_readv
) {
304 bdrv
->bdrv_co_readv
= bdrv_co_readv_em
;
305 bdrv
->bdrv_co_writev
= bdrv_co_writev_em
;
307 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
308 * the block driver lacks aio we need to emulate that too.
310 if (!bdrv
->bdrv_aio_readv
) {
311 /* add AIO emulation layer */
312 bdrv
->bdrv_aio_readv
= bdrv_aio_readv_em
;
313 bdrv
->bdrv_aio_writev
= bdrv_aio_writev_em
;
317 QLIST_INSERT_HEAD(&bdrv_drivers
, bdrv
, list
);
320 /* create a new block device (by default it is empty) */
321 BlockDriverState
*bdrv_new(const char *device_name
)
323 BlockDriverState
*bs
;
325 bs
= g_malloc0(sizeof(BlockDriverState
));
326 pstrcpy(bs
->device_name
, sizeof(bs
->device_name
), device_name
);
327 if (device_name
[0] != '\0') {
328 QTAILQ_INSERT_TAIL(&bdrv_states
, bs
, list
);
330 bdrv_iostatus_disable(bs
);
331 notifier_list_init(&bs
->close_notifiers
);
332 notifier_with_return_list_init(&bs
->before_write_notifiers
);
333 qemu_co_queue_init(&bs
->throttled_reqs
[0]);
334 qemu_co_queue_init(&bs
->throttled_reqs
[1]);
340 void bdrv_add_close_notifier(BlockDriverState
*bs
, Notifier
*notify
)
342 notifier_list_add(&bs
->close_notifiers
, notify
);
345 BlockDriver
*bdrv_find_format(const char *format_name
)
348 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
349 if (!strcmp(drv1
->format_name
, format_name
)) {
356 static int bdrv_is_whitelisted(BlockDriver
*drv
, bool read_only
)
358 static const char *whitelist_rw
[] = {
359 CONFIG_BDRV_RW_WHITELIST
361 static const char *whitelist_ro
[] = {
362 CONFIG_BDRV_RO_WHITELIST
366 if (!whitelist_rw
[0] && !whitelist_ro
[0]) {
367 return 1; /* no whitelist, anything goes */
370 for (p
= whitelist_rw
; *p
; p
++) {
371 if (!strcmp(drv
->format_name
, *p
)) {
376 for (p
= whitelist_ro
; *p
; p
++) {
377 if (!strcmp(drv
->format_name
, *p
)) {
385 BlockDriver
*bdrv_find_whitelisted_format(const char *format_name
,
388 BlockDriver
*drv
= bdrv_find_format(format_name
);
389 return drv
&& bdrv_is_whitelisted(drv
, read_only
) ? drv
: NULL
;
392 typedef struct CreateCo
{
395 QEMUOptionParameter
*options
;
400 static void coroutine_fn
bdrv_create_co_entry(void *opaque
)
402 Error
*local_err
= NULL
;
405 CreateCo
*cco
= opaque
;
408 ret
= cco
->drv
->bdrv_create(cco
->filename
, cco
->options
, &local_err
);
409 if (error_is_set(&local_err
)) {
410 error_propagate(&cco
->err
, local_err
);
415 int bdrv_create(BlockDriver
*drv
, const char* filename
,
416 QEMUOptionParameter
*options
, Error
**errp
)
423 .filename
= g_strdup(filename
),
429 if (!drv
->bdrv_create
) {
430 error_setg(errp
, "Driver '%s' does not support image creation", drv
->format_name
);
435 if (qemu_in_coroutine()) {
436 /* Fast-path if already in coroutine context */
437 bdrv_create_co_entry(&cco
);
439 co
= qemu_coroutine_create(bdrv_create_co_entry
);
440 qemu_coroutine_enter(co
, &cco
);
441 while (cco
.ret
== NOT_DONE
) {
448 if (error_is_set(&cco
.err
)) {
449 error_propagate(errp
, cco
.err
);
451 error_setg_errno(errp
, -ret
, "Could not create image");
456 g_free(cco
.filename
);
460 int bdrv_create_file(const char* filename
, QEMUOptionParameter
*options
,
464 Error
*local_err
= NULL
;
467 drv
= bdrv_find_protocol(filename
, true);
469 error_setg(errp
, "Could not find protocol for file '%s'", filename
);
473 ret
= bdrv_create(drv
, filename
, options
, &local_err
);
474 if (error_is_set(&local_err
)) {
475 error_propagate(errp
, local_err
);
481 * Create a uniquely-named empty temporary file.
482 * Return 0 upon success, otherwise a negative errno value.
484 int get_tmp_filename(char *filename
, int size
)
487 char temp_dir
[MAX_PATH
];
488 /* GetTempFileName requires that its output buffer (4th param)
489 have length MAX_PATH or greater. */
490 assert(size
>= MAX_PATH
);
491 return (GetTempPath(MAX_PATH
, temp_dir
)
492 && GetTempFileName(temp_dir
, "qem", 0, filename
)
493 ? 0 : -GetLastError());
497 tmpdir
= getenv("TMPDIR");
500 if (snprintf(filename
, size
, "%s/vl.XXXXXX", tmpdir
) >= size
) {
503 fd
= mkstemp(filename
);
507 if (close(fd
) != 0) {
516 * Detect host devices. By convention, /dev/cdrom[N] is always
517 * recognized as a host CDROM.
519 static BlockDriver
*find_hdev_driver(const char *filename
)
521 int score_max
= 0, score
;
522 BlockDriver
*drv
= NULL
, *d
;
524 QLIST_FOREACH(d
, &bdrv_drivers
, list
) {
525 if (d
->bdrv_probe_device
) {
526 score
= d
->bdrv_probe_device(filename
);
527 if (score
> score_max
) {
537 BlockDriver
*bdrv_find_protocol(const char *filename
,
538 bool allow_protocol_prefix
)
545 /* TODO Drivers without bdrv_file_open must be specified explicitly */
548 * XXX(hch): we really should not let host device detection
549 * override an explicit protocol specification, but moving this
550 * later breaks access to device names with colons in them.
551 * Thanks to the brain-dead persistent naming schemes on udev-
552 * based Linux systems those actually are quite common.
554 drv1
= find_hdev_driver(filename
);
559 if (!path_has_protocol(filename
) || !allow_protocol_prefix
) {
560 return bdrv_find_format("file");
563 p
= strchr(filename
, ':');
566 if (len
> sizeof(protocol
) - 1)
567 len
= sizeof(protocol
) - 1;
568 memcpy(protocol
, filename
, len
);
569 protocol
[len
] = '\0';
570 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
571 if (drv1
->protocol_name
&&
572 !strcmp(drv1
->protocol_name
, protocol
)) {
579 static int find_image_format(BlockDriverState
*bs
, const char *filename
,
580 BlockDriver
**pdrv
, Error
**errp
)
582 int score
, score_max
;
583 BlockDriver
*drv1
, *drv
;
587 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
588 if (bs
->sg
|| !bdrv_is_inserted(bs
) || bdrv_getlength(bs
) == 0) {
589 drv
= bdrv_find_format("raw");
591 error_setg(errp
, "Could not find raw image format");
598 ret
= bdrv_pread(bs
, 0, buf
, sizeof(buf
));
600 error_setg_errno(errp
, -ret
, "Could not read image for determining its "
608 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
609 if (drv1
->bdrv_probe
) {
610 score
= drv1
->bdrv_probe(buf
, ret
, filename
);
611 if (score
> score_max
) {
618 error_setg(errp
, "Could not determine image format: No compatible "
627 * Set the current 'total_sectors' value
629 static int refresh_total_sectors(BlockDriverState
*bs
, int64_t hint
)
631 BlockDriver
*drv
= bs
->drv
;
633 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
637 /* query actual device if possible, otherwise just trust the hint */
638 if (drv
->bdrv_getlength
) {
639 int64_t length
= drv
->bdrv_getlength(bs
);
643 hint
= length
>> BDRV_SECTOR_BITS
;
646 bs
->total_sectors
= hint
;
651 * Set open flags for a given discard mode
653 * Return 0 on success, -1 if the discard mode was invalid.
655 int bdrv_parse_discard_flags(const char *mode
, int *flags
)
657 *flags
&= ~BDRV_O_UNMAP
;
659 if (!strcmp(mode
, "off") || !strcmp(mode
, "ignore")) {
661 } else if (!strcmp(mode
, "on") || !strcmp(mode
, "unmap")) {
662 *flags
|= BDRV_O_UNMAP
;
671 * Set open flags for a given cache mode
673 * Return 0 on success, -1 if the cache mode was invalid.
675 int bdrv_parse_cache_flags(const char *mode
, int *flags
)
677 *flags
&= ~BDRV_O_CACHE_MASK
;
679 if (!strcmp(mode
, "off") || !strcmp(mode
, "none")) {
680 *flags
|= BDRV_O_NOCACHE
| BDRV_O_CACHE_WB
;
681 } else if (!strcmp(mode
, "directsync")) {
682 *flags
|= BDRV_O_NOCACHE
;
683 } else if (!strcmp(mode
, "writeback")) {
684 *flags
|= BDRV_O_CACHE_WB
;
685 } else if (!strcmp(mode
, "unsafe")) {
686 *flags
|= BDRV_O_CACHE_WB
;
687 *flags
|= BDRV_O_NO_FLUSH
;
688 } else if (!strcmp(mode
, "writethrough")) {
689 /* this is the default */
698 * The copy-on-read flag is actually a reference count so multiple users may
699 * use the feature without worrying about clobbering its previous state.
700 * Copy-on-read stays enabled until all users have called to disable it.
702 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
707 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
709 assert(bs
->copy_on_read
> 0);
713 static int bdrv_open_flags(BlockDriverState
*bs
, int flags
)
715 int open_flags
= flags
| BDRV_O_CACHE_WB
;
718 * Clear flags that are internal to the block layer before opening the
721 open_flags
&= ~(BDRV_O_SNAPSHOT
| BDRV_O_NO_BACKING
);
724 * Snapshots should be writable.
726 if (bs
->is_temporary
) {
727 open_flags
|= BDRV_O_RDWR
;
734 * Common part for opening disk images and files
736 * Removes all processed options from *options.
738 static int bdrv_open_common(BlockDriverState
*bs
, BlockDriverState
*file
,
739 QDict
*options
, int flags
, BlockDriver
*drv
, Error
**errp
)
742 const char *filename
;
743 Error
*local_err
= NULL
;
746 assert(bs
->file
== NULL
);
747 assert(options
!= NULL
&& bs
->options
!= options
);
750 filename
= file
->filename
;
752 filename
= qdict_get_try_str(options
, "filename");
755 trace_bdrv_open_common(bs
, filename
?: "", flags
, drv
->format_name
);
757 /* bdrv_open() with directly using a protocol as drv. This layer is already
758 * opened, so assign it to bs (while file becomes a closed BlockDriverState)
759 * and return immediately. */
760 if (file
!= NULL
&& drv
->bdrv_file_open
) {
765 bs
->open_flags
= flags
;
766 bs
->buffer_alignment
= 512;
767 bs
->zero_beyond_eof
= true;
768 open_flags
= bdrv_open_flags(bs
, flags
);
769 bs
->read_only
= !(open_flags
& BDRV_O_RDWR
);
771 if (use_bdrv_whitelist
&& !bdrv_is_whitelisted(drv
, bs
->read_only
)) {
772 error_setg(errp
, "Driver '%s' is not whitelisted", drv
->format_name
);
776 assert(bs
->copy_on_read
== 0); /* bdrv_new() and bdrv_close() make it so */
777 if (!bs
->read_only
&& (flags
& BDRV_O_COPY_ON_READ
)) {
778 bdrv_enable_copy_on_read(bs
);
781 if (filename
!= NULL
) {
782 pstrcpy(bs
->filename
, sizeof(bs
->filename
), filename
);
784 bs
->filename
[0] = '\0';
788 bs
->opaque
= g_malloc0(drv
->instance_size
);
790 bs
->enable_write_cache
= !!(flags
& BDRV_O_CACHE_WB
);
792 /* Open the image, either directly or using a protocol */
793 if (drv
->bdrv_file_open
) {
794 assert(file
== NULL
);
795 assert(!drv
->bdrv_needs_filename
|| filename
!= NULL
);
796 ret
= drv
->bdrv_file_open(bs
, options
, open_flags
, &local_err
);
799 error_setg(errp
, "Can't use '%s' as a block driver for the "
800 "protocol level", drv
->format_name
);
805 ret
= drv
->bdrv_open(bs
, options
, open_flags
, &local_err
);
809 if (error_is_set(&local_err
)) {
810 error_propagate(errp
, local_err
);
811 } else if (filename
) {
812 error_setg_errno(errp
, -ret
, "Could not open '%s'", filename
);
814 error_setg_errno(errp
, -ret
, "Could not open image");
819 ret
= refresh_total_sectors(bs
, bs
->total_sectors
);
821 error_setg_errno(errp
, -ret
, "Could not refresh total sector count");
826 if (bs
->is_temporary
) {
827 assert(filename
!= NULL
);
842 * Opens a file using a protocol (file, host_device, nbd, ...)
844 * options is a QDict of options to pass to the block drivers, or NULL for an
845 * empty set of options. The reference to the QDict belongs to the block layer
846 * after the call (even on failure), so if the caller intends to reuse the
847 * dictionary, it needs to use QINCREF() before calling bdrv_file_open.
849 int bdrv_file_open(BlockDriverState
**pbs
, const char *filename
,
850 QDict
*options
, int flags
, Error
**errp
)
852 BlockDriverState
*bs
;
855 bool allow_protocol_prefix
= false;
856 Error
*local_err
= NULL
;
859 /* NULL means an empty set of options */
860 if (options
== NULL
) {
861 options
= qdict_new();
865 bs
->options
= options
;
866 options
= qdict_clone_shallow(options
);
868 /* Fetch the file name from the options QDict if necessary */
870 filename
= qdict_get_try_str(options
, "filename");
871 } else if (filename
&& !qdict_haskey(options
, "filename")) {
872 qdict_put(options
, "filename", qstring_from_str(filename
));
873 allow_protocol_prefix
= true;
875 error_setg(errp
, "Can't specify 'file' and 'filename' options at the "
881 /* Find the right block driver */
882 drvname
= qdict_get_try_str(options
, "driver");
884 drv
= bdrv_find_whitelisted_format(drvname
, !(flags
& BDRV_O_RDWR
));
886 error_setg(errp
, "Unknown driver '%s'", drvname
);
888 qdict_del(options
, "driver");
889 } else if (filename
) {
890 drv
= bdrv_find_protocol(filename
, allow_protocol_prefix
);
892 error_setg(errp
, "Unknown protocol");
895 error_setg(errp
, "Must specify either driver or file");
900 /* errp has been set already */
905 /* Parse the filename and open it */
906 if (drv
->bdrv_parse_filename
&& filename
) {
907 drv
->bdrv_parse_filename(filename
, options
, &local_err
);
908 if (error_is_set(&local_err
)) {
909 error_propagate(errp
, local_err
);
913 qdict_del(options
, "filename");
914 } else if (drv
->bdrv_needs_filename
&& !filename
) {
915 error_setg(errp
, "The '%s' block driver requires a file name",
921 ret
= bdrv_open_common(bs
, NULL
, options
, flags
, drv
, &local_err
);
923 error_propagate(errp
, local_err
);
927 /* Check if any unknown options were used */
928 if (qdict_size(options
) != 0) {
929 const QDictEntry
*entry
= qdict_first(options
);
930 error_setg(errp
, "Block protocol '%s' doesn't support the option '%s'",
931 drv
->format_name
, entry
->key
);
944 QDECREF(bs
->options
);
951 * Opens the backing file for a BlockDriverState if not yet open
953 * options is a QDict of options to pass to the block drivers, or NULL for an
954 * empty set of options. The reference to the QDict is transferred to this
955 * function (even on failure), so if the caller intends to reuse the dictionary,
956 * it needs to use QINCREF() before calling bdrv_file_open.
958 int bdrv_open_backing_file(BlockDriverState
*bs
, QDict
*options
, Error
**errp
)
960 char backing_filename
[PATH_MAX
];
962 BlockDriver
*back_drv
= NULL
;
963 Error
*local_err
= NULL
;
965 if (bs
->backing_hd
!= NULL
) {
970 /* NULL means an empty set of options */
971 if (options
== NULL
) {
972 options
= qdict_new();
975 bs
->open_flags
&= ~BDRV_O_NO_BACKING
;
976 if (qdict_haskey(options
, "file.filename")) {
977 backing_filename
[0] = '\0';
978 } else if (bs
->backing_file
[0] == '\0' && qdict_size(options
) == 0) {
982 bdrv_get_full_backing_filename(bs
, backing_filename
,
983 sizeof(backing_filename
));
986 bs
->backing_hd
= bdrv_new("");
988 if (bs
->backing_format
[0] != '\0') {
989 back_drv
= bdrv_find_format(bs
->backing_format
);
992 /* backing files always opened read-only */
993 back_flags
= bs
->open_flags
& ~(BDRV_O_RDWR
| BDRV_O_SNAPSHOT
);
995 ret
= bdrv_open(bs
->backing_hd
,
996 *backing_filename
? backing_filename
: NULL
, options
,
997 back_flags
, back_drv
, &local_err
);
998 pstrcpy(bs
->backing_file
, sizeof(bs
->backing_file
),
999 bs
->backing_hd
->file
->filename
);
1001 bdrv_unref(bs
->backing_hd
);
1002 bs
->backing_hd
= NULL
;
1003 bs
->open_flags
|= BDRV_O_NO_BACKING
;
1004 error_propagate(errp
, local_err
);
1011 * Opens a disk image (raw, qcow2, vmdk, ...)
1013 * options is a QDict of options to pass to the block drivers, or NULL for an
1014 * empty set of options. The reference to the QDict belongs to the block layer
1015 * after the call (even on failure), so if the caller intends to reuse the
1016 * dictionary, it needs to use QINCREF() before calling bdrv_open.
1018 int bdrv_open(BlockDriverState
*bs
, const char *filename
, QDict
*options
,
1019 int flags
, BlockDriver
*drv
, Error
**errp
)
1022 /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
1023 char tmp_filename
[PATH_MAX
+ 1];
1024 BlockDriverState
*file
= NULL
;
1025 QDict
*file_options
= NULL
;
1026 const char *drvname
;
1027 Error
*local_err
= NULL
;
1029 /* NULL means an empty set of options */
1030 if (options
== NULL
) {
1031 options
= qdict_new();
1034 bs
->options
= options
;
1035 options
= qdict_clone_shallow(options
);
1037 /* For snapshot=on, create a temporary qcow2 overlay */
1038 if (flags
& BDRV_O_SNAPSHOT
) {
1039 BlockDriverState
*bs1
;
1041 BlockDriver
*bdrv_qcow2
;
1042 QEMUOptionParameter
*create_options
;
1043 char backing_filename
[PATH_MAX
];
1045 if (qdict_size(options
) != 0) {
1046 error_setg(errp
, "Can't use snapshot=on with driver-specific options");
1050 assert(filename
!= NULL
);
1052 /* if snapshot, we create a temporary backing file and open it
1053 instead of opening 'filename' directly */
1055 /* if there is a backing file, use it */
1057 ret
= bdrv_open(bs1
, filename
, NULL
, 0, drv
, &local_err
);
1062 total_size
= bdrv_getlength(bs1
) & BDRV_SECTOR_MASK
;
1066 ret
= get_tmp_filename(tmp_filename
, sizeof(tmp_filename
));
1068 error_setg_errno(errp
, -ret
, "Could not get temporary filename");
1072 /* Real path is meaningless for protocols */
1073 if (path_has_protocol(filename
)) {
1074 snprintf(backing_filename
, sizeof(backing_filename
),
1076 } else if (!realpath(filename
, backing_filename
)) {
1077 error_setg_errno(errp
, errno
, "Could not resolve path '%s'", filename
);
1082 bdrv_qcow2
= bdrv_find_format("qcow2");
1083 create_options
= parse_option_parameters("", bdrv_qcow2
->create_options
,
1086 set_option_parameter_int(create_options
, BLOCK_OPT_SIZE
, total_size
);
1087 set_option_parameter(create_options
, BLOCK_OPT_BACKING_FILE
,
1090 set_option_parameter(create_options
, BLOCK_OPT_BACKING_FMT
,
1094 ret
= bdrv_create(bdrv_qcow2
, tmp_filename
, create_options
, &local_err
);
1095 free_option_parameters(create_options
);
1097 error_setg_errno(errp
, -ret
, "Could not create temporary overlay "
1098 "'%s': %s", tmp_filename
,
1099 error_get_pretty(local_err
));
1100 error_free(local_err
);
1105 filename
= tmp_filename
;
1107 bs
->is_temporary
= 1;
1110 /* Open image file without format layer */
1111 if (flags
& BDRV_O_RDWR
) {
1112 flags
|= BDRV_O_ALLOW_RDWR
;
1115 qdict_extract_subqdict(options
, &file_options
, "file.");
1117 ret
= bdrv_file_open(&file
, filename
, file_options
,
1118 bdrv_open_flags(bs
, flags
| BDRV_O_UNMAP
), &local_err
);
1123 /* Find the right image format driver */
1124 drvname
= qdict_get_try_str(options
, "driver");
1126 drv
= bdrv_find_whitelisted_format(drvname
, !(flags
& BDRV_O_RDWR
));
1127 qdict_del(options
, "driver");
1131 ret
= find_image_format(file
, filename
, &drv
, &local_err
);
1135 goto unlink_and_fail
;
1138 /* Open the image */
1139 ret
= bdrv_open_common(bs
, file
, options
, flags
, drv
, &local_err
);
1141 goto unlink_and_fail
;
1144 if (bs
->file
!= file
) {
1149 /* If there is a backing file, use it */
1150 if ((flags
& BDRV_O_NO_BACKING
) == 0) {
1151 QDict
*backing_options
;
1153 qdict_extract_subqdict(options
, &backing_options
, "backing.");
1154 ret
= bdrv_open_backing_file(bs
, backing_options
, &local_err
);
1156 goto close_and_fail
;
1160 /* Check if any unknown options were used */
1161 if (qdict_size(options
) != 0) {
1162 const QDictEntry
*entry
= qdict_first(options
);
1163 error_setg(errp
, "Block format '%s' used by device '%s' doesn't "
1164 "support the option '%s'", drv
->format_name
, bs
->device_name
,
1168 goto close_and_fail
;
1172 if (!bdrv_key_required(bs
)) {
1173 bdrv_dev_change_media_cb(bs
, true);
1182 if (bs
->is_temporary
) {
1186 QDECREF(bs
->options
);
1189 if (error_is_set(&local_err
)) {
1190 error_propagate(errp
, local_err
);
1197 if (error_is_set(&local_err
)) {
1198 error_propagate(errp
, local_err
);
1203 typedef struct BlockReopenQueueEntry
{
1205 BDRVReopenState state
;
1206 QSIMPLEQ_ENTRY(BlockReopenQueueEntry
) entry
;
1207 } BlockReopenQueueEntry
;
1210 * Adds a BlockDriverState to a simple queue for an atomic, transactional
1211 * reopen of multiple devices.
1213 * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT
1214 * already performed, or alternatively may be NULL a new BlockReopenQueue will
1215 * be created and initialized. This newly created BlockReopenQueue should be
1216 * passed back in for subsequent calls that are intended to be of the same
1219 * bs is the BlockDriverState to add to the reopen queue.
1221 * flags contains the open flags for the associated bs
1223 * returns a pointer to bs_queue, which is either the newly allocated
1224 * bs_queue, or the existing bs_queue being used.
1227 BlockReopenQueue
*bdrv_reopen_queue(BlockReopenQueue
*bs_queue
,
1228 BlockDriverState
*bs
, int flags
)
1232 BlockReopenQueueEntry
*bs_entry
;
1233 if (bs_queue
== NULL
) {
1234 bs_queue
= g_new0(BlockReopenQueue
, 1);
1235 QSIMPLEQ_INIT(bs_queue
);
1239 bdrv_reopen_queue(bs_queue
, bs
->file
, flags
);
1242 bs_entry
= g_new0(BlockReopenQueueEntry
, 1);
1243 QSIMPLEQ_INSERT_TAIL(bs_queue
, bs_entry
, entry
);
1245 bs_entry
->state
.bs
= bs
;
1246 bs_entry
->state
.flags
= flags
;
1252 * Reopen multiple BlockDriverStates atomically & transactionally.
1254 * The queue passed in (bs_queue) must have been built up previous
1255 * via bdrv_reopen_queue().
1257 * Reopens all BDS specified in the queue, with the appropriate
1258 * flags. All devices are prepared for reopen, and failure of any
1259 * device will cause all device changes to be abandonded, and intermediate
1262 * If all devices prepare successfully, then the changes are committed
1266 int bdrv_reopen_multiple(BlockReopenQueue
*bs_queue
, Error
**errp
)
1269 BlockReopenQueueEntry
*bs_entry
, *next
;
1270 Error
*local_err
= NULL
;
1272 assert(bs_queue
!= NULL
);
1276 QSIMPLEQ_FOREACH(bs_entry
, bs_queue
, entry
) {
1277 if (bdrv_reopen_prepare(&bs_entry
->state
, bs_queue
, &local_err
)) {
1278 error_propagate(errp
, local_err
);
1281 bs_entry
->prepared
= true;
1284 /* If we reach this point, we have success and just need to apply the
1287 QSIMPLEQ_FOREACH(bs_entry
, bs_queue
, entry
) {
1288 bdrv_reopen_commit(&bs_entry
->state
);
1294 QSIMPLEQ_FOREACH_SAFE(bs_entry
, bs_queue
, entry
, next
) {
1295 if (ret
&& bs_entry
->prepared
) {
1296 bdrv_reopen_abort(&bs_entry
->state
);
1305 /* Reopen a single BlockDriverState with the specified flags. */
1306 int bdrv_reopen(BlockDriverState
*bs
, int bdrv_flags
, Error
**errp
)
1309 Error
*local_err
= NULL
;
1310 BlockReopenQueue
*queue
= bdrv_reopen_queue(NULL
, bs
, bdrv_flags
);
1312 ret
= bdrv_reopen_multiple(queue
, &local_err
);
1313 if (local_err
!= NULL
) {
1314 error_propagate(errp
, local_err
);
1321 * Prepares a BlockDriverState for reopen. All changes are staged in the
1322 * 'opaque' field of the BDRVReopenState, which is used and allocated by
1323 * the block driver layer .bdrv_reopen_prepare()
1325 * bs is the BlockDriverState to reopen
1326 * flags are the new open flags
1327 * queue is the reopen queue
1329 * Returns 0 on success, non-zero on error. On error errp will be set
1332 * On failure, bdrv_reopen_abort() will be called to clean up any data.
1333 * It is the responsibility of the caller to then call the abort() or
1334 * commit() for any other BDS that have been left in a prepare() state
1337 int bdrv_reopen_prepare(BDRVReopenState
*reopen_state
, BlockReopenQueue
*queue
,
1341 Error
*local_err
= NULL
;
1344 assert(reopen_state
!= NULL
);
1345 assert(reopen_state
->bs
->drv
!= NULL
);
1346 drv
= reopen_state
->bs
->drv
;
1348 /* if we are to stay read-only, do not allow permission change
1350 if (!(reopen_state
->bs
->open_flags
& BDRV_O_ALLOW_RDWR
) &&
1351 reopen_state
->flags
& BDRV_O_RDWR
) {
1352 error_set(errp
, QERR_DEVICE_IS_READ_ONLY
,
1353 reopen_state
->bs
->device_name
);
1358 ret
= bdrv_flush(reopen_state
->bs
);
1360 error_set(errp
, ERROR_CLASS_GENERIC_ERROR
, "Error (%s) flushing drive",
1365 if (drv
->bdrv_reopen_prepare
) {
1366 ret
= drv
->bdrv_reopen_prepare(reopen_state
, queue
, &local_err
);
1368 if (local_err
!= NULL
) {
1369 error_propagate(errp
, local_err
);
1371 error_setg(errp
, "failed while preparing to reopen image '%s'",
1372 reopen_state
->bs
->filename
);
1377 /* It is currently mandatory to have a bdrv_reopen_prepare()
1378 * handler for each supported drv. */
1379 error_set(errp
, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED
,
1380 drv
->format_name
, reopen_state
->bs
->device_name
,
1381 "reopening of file");
1393 * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and
1394 * makes them final by swapping the staging BlockDriverState contents into
1395 * the active BlockDriverState contents.
1397 void bdrv_reopen_commit(BDRVReopenState
*reopen_state
)
1401 assert(reopen_state
!= NULL
);
1402 drv
= reopen_state
->bs
->drv
;
1403 assert(drv
!= NULL
);
1405 /* If there are any driver level actions to take */
1406 if (drv
->bdrv_reopen_commit
) {
1407 drv
->bdrv_reopen_commit(reopen_state
);
1410 /* set BDS specific flags now */
1411 reopen_state
->bs
->open_flags
= reopen_state
->flags
;
1412 reopen_state
->bs
->enable_write_cache
= !!(reopen_state
->flags
&
1414 reopen_state
->bs
->read_only
= !(reopen_state
->flags
& BDRV_O_RDWR
);
1418 * Abort the reopen, and delete and free the staged changes in
1421 void bdrv_reopen_abort(BDRVReopenState
*reopen_state
)
1425 assert(reopen_state
!= NULL
);
1426 drv
= reopen_state
->bs
->drv
;
1427 assert(drv
!= NULL
);
1429 if (drv
->bdrv_reopen_abort
) {
1430 drv
->bdrv_reopen_abort(reopen_state
);
1435 void bdrv_close(BlockDriverState
*bs
)
1438 block_job_cancel_sync(bs
->job
);
1440 bdrv_drain_all(); /* complete I/O */
1442 bdrv_drain_all(); /* in case flush left pending I/O */
1443 notifier_list_notify(&bs
->close_notifiers
, bs
);
1446 if (bs
->backing_hd
) {
1447 bdrv_unref(bs
->backing_hd
);
1448 bs
->backing_hd
= NULL
;
1450 bs
->drv
->bdrv_close(bs
);
1453 if (bs
->is_temporary
) {
1454 unlink(bs
->filename
);
1459 bs
->copy_on_read
= 0;
1460 bs
->backing_file
[0] = '\0';
1461 bs
->backing_format
[0] = '\0';
1462 bs
->total_sectors
= 0;
1467 bs
->zero_beyond_eof
= false;
1468 QDECREF(bs
->options
);
1471 if (bs
->file
!= NULL
) {
1472 bdrv_unref(bs
->file
);
1477 bdrv_dev_change_media_cb(bs
, false);
1479 /*throttling disk I/O limits*/
1480 if (bs
->io_limits_enabled
) {
1481 bdrv_io_limits_disable(bs
);
1485 void bdrv_close_all(void)
1487 BlockDriverState
*bs
;
1489 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
1494 /* Check if any requests are in-flight (including throttled requests) */
1495 static bool bdrv_requests_pending(BlockDriverState
*bs
)
1497 if (!QLIST_EMPTY(&bs
->tracked_requests
)) {
1500 if (!qemu_co_queue_empty(&bs
->throttled_reqs
[0])) {
1503 if (!qemu_co_queue_empty(&bs
->throttled_reqs
[1])) {
1506 if (bs
->file
&& bdrv_requests_pending(bs
->file
)) {
1509 if (bs
->backing_hd
&& bdrv_requests_pending(bs
->backing_hd
)) {
1515 static bool bdrv_requests_pending_all(void)
1517 BlockDriverState
*bs
;
1518 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
1519 if (bdrv_requests_pending(bs
)) {
1527 * Wait for pending requests to complete across all BlockDriverStates
1529 * This function does not flush data to disk, use bdrv_flush_all() for that
1530 * after calling this function.
1532 * Note that completion of an asynchronous I/O operation can trigger any
1533 * number of other I/O operations on other devices---for example a coroutine
1534 * can be arbitrarily complex and a constant flow of I/O can come until the
1535 * coroutine is complete. Because of this, it is not possible to have a
1536 * function to drain a single device's I/O queue.
1538 void bdrv_drain_all(void)
1540 /* Always run first iteration so any pending completion BHs run */
1542 BlockDriverState
*bs
;
1545 /* FIXME: We do not have timer support here, so this is effectively
1548 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
1549 if (bdrv_start_throttled_reqs(bs
)) {
1554 busy
= bdrv_requests_pending_all();
1555 busy
|= aio_poll(qemu_get_aio_context(), busy
);
1559 /* make a BlockDriverState anonymous by removing from bdrv_state list.
1560 Also, NULL terminate the device_name to prevent double remove */
1561 void bdrv_make_anon(BlockDriverState
*bs
)
1563 if (bs
->device_name
[0] != '\0') {
1564 QTAILQ_REMOVE(&bdrv_states
, bs
, list
);
1566 bs
->device_name
[0] = '\0';
1569 static void bdrv_rebind(BlockDriverState
*bs
)
1571 if (bs
->drv
&& bs
->drv
->bdrv_rebind
) {
1572 bs
->drv
->bdrv_rebind(bs
);
1576 static void bdrv_move_feature_fields(BlockDriverState
*bs_dest
,
1577 BlockDriverState
*bs_src
)
1579 /* move some fields that need to stay attached to the device */
1580 bs_dest
->open_flags
= bs_src
->open_flags
;
1583 bs_dest
->dev_ops
= bs_src
->dev_ops
;
1584 bs_dest
->dev_opaque
= bs_src
->dev_opaque
;
1585 bs_dest
->dev
= bs_src
->dev
;
1586 bs_dest
->buffer_alignment
= bs_src
->buffer_alignment
;
1587 bs_dest
->copy_on_read
= bs_src
->copy_on_read
;
1589 bs_dest
->enable_write_cache
= bs_src
->enable_write_cache
;
1591 /* i/o throttled req */
1592 memcpy(&bs_dest
->throttle_state
,
1593 &bs_src
->throttle_state
,
1594 sizeof(ThrottleState
));
1595 bs_dest
->throttled_reqs
[0] = bs_src
->throttled_reqs
[0];
1596 bs_dest
->throttled_reqs
[1] = bs_src
->throttled_reqs
[1];
1597 bs_dest
->io_limits_enabled
= bs_src
->io_limits_enabled
;
1600 bs_dest
->on_read_error
= bs_src
->on_read_error
;
1601 bs_dest
->on_write_error
= bs_src
->on_write_error
;
1604 bs_dest
->iostatus_enabled
= bs_src
->iostatus_enabled
;
1605 bs_dest
->iostatus
= bs_src
->iostatus
;
1608 bs_dest
->dirty_bitmap
= bs_src
->dirty_bitmap
;
1610 /* reference count */
1611 bs_dest
->refcnt
= bs_src
->refcnt
;
1614 bs_dest
->in_use
= bs_src
->in_use
;
1615 bs_dest
->job
= bs_src
->job
;
1617 /* keep the same entry in bdrv_states */
1618 pstrcpy(bs_dest
->device_name
, sizeof(bs_dest
->device_name
),
1619 bs_src
->device_name
);
1620 bs_dest
->list
= bs_src
->list
;
1624 * Swap bs contents for two image chains while they are live,
1625 * while keeping required fields on the BlockDriverState that is
1626 * actually attached to a device.
1628 * This will modify the BlockDriverState fields, and swap contents
1629 * between bs_new and bs_old. Both bs_new and bs_old are modified.
1631 * bs_new is required to be anonymous.
1633 * This function does not create any image files.
1635 void bdrv_swap(BlockDriverState
*bs_new
, BlockDriverState
*bs_old
)
1637 BlockDriverState tmp
;
1639 /* bs_new must be anonymous and shouldn't have anything fancy enabled */
1640 assert(bs_new
->device_name
[0] == '\0');
1641 assert(bs_new
->dirty_bitmap
== NULL
);
1642 assert(bs_new
->job
== NULL
);
1643 assert(bs_new
->dev
== NULL
);
1644 assert(bs_new
->in_use
== 0);
1645 assert(bs_new
->io_limits_enabled
== false);
1646 assert(!throttle_have_timer(&bs_new
->throttle_state
));
1652 /* there are some fields that should not be swapped, move them back */
1653 bdrv_move_feature_fields(&tmp
, bs_old
);
1654 bdrv_move_feature_fields(bs_old
, bs_new
);
1655 bdrv_move_feature_fields(bs_new
, &tmp
);
1657 /* bs_new shouldn't be in bdrv_states even after the swap! */
1658 assert(bs_new
->device_name
[0] == '\0');
1660 /* Check a few fields that should remain attached to the device */
1661 assert(bs_new
->dev
== NULL
);
1662 assert(bs_new
->job
== NULL
);
1663 assert(bs_new
->in_use
== 0);
1664 assert(bs_new
->io_limits_enabled
== false);
1665 assert(!throttle_have_timer(&bs_new
->throttle_state
));
1667 bdrv_rebind(bs_new
);
1668 bdrv_rebind(bs_old
);
1672 * Add new bs contents at the top of an image chain while the chain is
1673 * live, while keeping required fields on the top layer.
1675 * This will modify the BlockDriverState fields, and swap contents
1676 * between bs_new and bs_top. Both bs_new and bs_top are modified.
1678 * bs_new is required to be anonymous.
1680 * This function does not create any image files.
1682 void bdrv_append(BlockDriverState
*bs_new
, BlockDriverState
*bs_top
)
1684 bdrv_swap(bs_new
, bs_top
);
1686 /* The contents of 'tmp' will become bs_top, as we are
1687 * swapping bs_new and bs_top contents. */
1688 bs_top
->backing_hd
= bs_new
;
1689 bs_top
->open_flags
&= ~BDRV_O_NO_BACKING
;
1690 pstrcpy(bs_top
->backing_file
, sizeof(bs_top
->backing_file
),
1692 pstrcpy(bs_top
->backing_format
, sizeof(bs_top
->backing_format
),
1693 bs_new
->drv
? bs_new
->drv
->format_name
: "");
1696 static void bdrv_delete(BlockDriverState
*bs
)
1700 assert(!bs
->in_use
);
1701 assert(!bs
->refcnt
);
1705 /* remove from list, if necessary */
1711 int bdrv_attach_dev(BlockDriverState
*bs
, void *dev
)
1712 /* TODO change to DeviceState *dev when all users are qdevified */
1718 bdrv_iostatus_reset(bs
);
1722 /* TODO qdevified devices don't use this, remove when devices are qdevified */
1723 void bdrv_attach_dev_nofail(BlockDriverState
*bs
, void *dev
)
1725 if (bdrv_attach_dev(bs
, dev
) < 0) {
1730 void bdrv_detach_dev(BlockDriverState
*bs
, void *dev
)
1731 /* TODO change to DeviceState *dev when all users are qdevified */
1733 assert(bs
->dev
== dev
);
1736 bs
->dev_opaque
= NULL
;
1737 bs
->buffer_alignment
= 512;
1740 /* TODO change to return DeviceState * when all users are qdevified */
1741 void *bdrv_get_attached_dev(BlockDriverState
*bs
)
1746 void bdrv_set_dev_ops(BlockDriverState
*bs
, const BlockDevOps
*ops
,
1750 bs
->dev_opaque
= opaque
;
1753 void bdrv_emit_qmp_error_event(const BlockDriverState
*bdrv
,
1754 enum MonitorEvent ev
,
1755 BlockErrorAction action
, bool is_read
)
1758 const char *action_str
;
1761 case BDRV_ACTION_REPORT
:
1762 action_str
= "report";
1764 case BDRV_ACTION_IGNORE
:
1765 action_str
= "ignore";
1767 case BDRV_ACTION_STOP
:
1768 action_str
= "stop";
1774 data
= qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
1777 is_read
? "read" : "write");
1778 monitor_protocol_event(ev
, data
);
1780 qobject_decref(data
);
1783 static void bdrv_emit_qmp_eject_event(BlockDriverState
*bs
, bool ejected
)
1787 data
= qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }",
1788 bdrv_get_device_name(bs
), ejected
);
1789 monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED
, data
);
1791 qobject_decref(data
);
1794 static void bdrv_dev_change_media_cb(BlockDriverState
*bs
, bool load
)
1796 if (bs
->dev_ops
&& bs
->dev_ops
->change_media_cb
) {
1797 bool tray_was_closed
= !bdrv_dev_is_tray_open(bs
);
1798 bs
->dev_ops
->change_media_cb(bs
->dev_opaque
, load
);
1799 if (tray_was_closed
) {
1801 bdrv_emit_qmp_eject_event(bs
, true);
1805 bdrv_emit_qmp_eject_event(bs
, false);
1810 bool bdrv_dev_has_removable_media(BlockDriverState
*bs
)
1812 return !bs
->dev
|| (bs
->dev_ops
&& bs
->dev_ops
->change_media_cb
);
1815 void bdrv_dev_eject_request(BlockDriverState
*bs
, bool force
)
1817 if (bs
->dev_ops
&& bs
->dev_ops
->eject_request_cb
) {
1818 bs
->dev_ops
->eject_request_cb(bs
->dev_opaque
, force
);
1822 bool bdrv_dev_is_tray_open(BlockDriverState
*bs
)
1824 if (bs
->dev_ops
&& bs
->dev_ops
->is_tray_open
) {
1825 return bs
->dev_ops
->is_tray_open(bs
->dev_opaque
);
1830 static void bdrv_dev_resize_cb(BlockDriverState
*bs
)
1832 if (bs
->dev_ops
&& bs
->dev_ops
->resize_cb
) {
1833 bs
->dev_ops
->resize_cb(bs
->dev_opaque
);
1837 bool bdrv_dev_is_medium_locked(BlockDriverState
*bs
)
1839 if (bs
->dev_ops
&& bs
->dev_ops
->is_medium_locked
) {
1840 return bs
->dev_ops
->is_medium_locked(bs
->dev_opaque
);
1846 * Run consistency checks on an image
1848 * Returns 0 if the check could be completed (it doesn't mean that the image is
1849 * free of errors) or -errno when an internal error occurred. The results of the
1850 * check are stored in res.
1852 int bdrv_check(BlockDriverState
*bs
, BdrvCheckResult
*res
, BdrvCheckMode fix
)
1854 if (bs
->drv
->bdrv_check
== NULL
) {
1858 memset(res
, 0, sizeof(*res
));
1859 return bs
->drv
->bdrv_check(bs
, res
, fix
);
1862 #define COMMIT_BUF_SECTORS 2048
1864 /* commit COW file into the raw image */
1865 int bdrv_commit(BlockDriverState
*bs
)
1867 BlockDriver
*drv
= bs
->drv
;
1868 int64_t sector
, total_sectors
;
1869 int n
, ro
, open_flags
;
1872 char filename
[PATH_MAX
];
1877 if (!bs
->backing_hd
) {
1881 if (bdrv_in_use(bs
) || bdrv_in_use(bs
->backing_hd
)) {
1885 ro
= bs
->backing_hd
->read_only
;
1886 /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */
1887 pstrcpy(filename
, sizeof(filename
), bs
->backing_hd
->filename
);
1888 open_flags
= bs
->backing_hd
->open_flags
;
1891 if (bdrv_reopen(bs
->backing_hd
, open_flags
| BDRV_O_RDWR
, NULL
)) {
1896 total_sectors
= bdrv_getlength(bs
) >> BDRV_SECTOR_BITS
;
1897 buf
= g_malloc(COMMIT_BUF_SECTORS
* BDRV_SECTOR_SIZE
);
1899 for (sector
= 0; sector
< total_sectors
; sector
+= n
) {
1900 ret
= bdrv_is_allocated(bs
, sector
, COMMIT_BUF_SECTORS
, &n
);
1905 if (bdrv_read(bs
, sector
, buf
, n
) != 0) {
1910 if (bdrv_write(bs
->backing_hd
, sector
, buf
, n
) != 0) {
1917 if (drv
->bdrv_make_empty
) {
1918 ret
= drv
->bdrv_make_empty(bs
);
1923 * Make sure all data we wrote to the backing device is actually
1927 bdrv_flush(bs
->backing_hd
);
1933 /* ignoring error return here */
1934 bdrv_reopen(bs
->backing_hd
, open_flags
& ~BDRV_O_RDWR
, NULL
);
1940 int bdrv_commit_all(void)
1942 BlockDriverState
*bs
;
1944 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
1945 if (bs
->drv
&& bs
->backing_hd
) {
1946 int ret
= bdrv_commit(bs
);
1956 * Remove an active request from the tracked requests list
1958 * This function should be called when a tracked request is completing.
1960 static void tracked_request_end(BdrvTrackedRequest
*req
)
1962 QLIST_REMOVE(req
, list
);
1963 qemu_co_queue_restart_all(&req
->wait_queue
);
1967 * Add an active request to the tracked requests list
1969 static void tracked_request_begin(BdrvTrackedRequest
*req
,
1970 BlockDriverState
*bs
,
1972 int nb_sectors
, bool is_write
)
1974 *req
= (BdrvTrackedRequest
){
1976 .sector_num
= sector_num
,
1977 .nb_sectors
= nb_sectors
,
1978 .is_write
= is_write
,
1979 .co
= qemu_coroutine_self(),
1982 qemu_co_queue_init(&req
->wait_queue
);
1984 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
1988 * Round a region to cluster boundaries
1990 void bdrv_round_to_clusters(BlockDriverState
*bs
,
1991 int64_t sector_num
, int nb_sectors
,
1992 int64_t *cluster_sector_num
,
1993 int *cluster_nb_sectors
)
1995 BlockDriverInfo bdi
;
1997 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
1998 *cluster_sector_num
= sector_num
;
1999 *cluster_nb_sectors
= nb_sectors
;
2001 int64_t c
= bdi
.cluster_size
/ BDRV_SECTOR_SIZE
;
2002 *cluster_sector_num
= QEMU_ALIGN_DOWN(sector_num
, c
);
2003 *cluster_nb_sectors
= QEMU_ALIGN_UP(sector_num
- *cluster_sector_num
+
2008 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
2009 int64_t sector_num
, int nb_sectors
) {
2011 if (sector_num
>= req
->sector_num
+ req
->nb_sectors
) {
2015 if (req
->sector_num
>= sector_num
+ nb_sectors
) {
2021 static void coroutine_fn
wait_for_overlapping_requests(BlockDriverState
*bs
,
2022 int64_t sector_num
, int nb_sectors
)
2024 BdrvTrackedRequest
*req
;
2025 int64_t cluster_sector_num
;
2026 int cluster_nb_sectors
;
2029 /* If we touch the same cluster it counts as an overlap. This guarantees
2030 * that allocating writes will be serialized and not race with each other
2031 * for the same cluster. For example, in copy-on-read it ensures that the
2032 * CoR read and write operations are atomic and guest writes cannot
2033 * interleave between them.
2035 bdrv_round_to_clusters(bs
, sector_num
, nb_sectors
,
2036 &cluster_sector_num
, &cluster_nb_sectors
);
2040 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
2041 if (tracked_request_overlaps(req
, cluster_sector_num
,
2042 cluster_nb_sectors
)) {
2043 /* Hitting this means there was a reentrant request, for
2044 * example, a block driver issuing nested requests. This must
2045 * never happen since it means deadlock.
2047 assert(qemu_coroutine_self() != req
->co
);
2049 qemu_co_queue_wait(&req
->wait_queue
);
2060 * -EINVAL - backing format specified, but no file
2061 * -ENOSPC - can't update the backing file because no space is left in the
2063 * -ENOTSUP - format driver doesn't support changing the backing file
2065 int bdrv_change_backing_file(BlockDriverState
*bs
,
2066 const char *backing_file
, const char *backing_fmt
)
2068 BlockDriver
*drv
= bs
->drv
;
2071 /* Backing file format doesn't make sense without a backing file */
2072 if (backing_fmt
&& !backing_file
) {
2076 if (drv
->bdrv_change_backing_file
!= NULL
) {
2077 ret
= drv
->bdrv_change_backing_file(bs
, backing_file
, backing_fmt
);
2083 pstrcpy(bs
->backing_file
, sizeof(bs
->backing_file
), backing_file
?: "");
2084 pstrcpy(bs
->backing_format
, sizeof(bs
->backing_format
), backing_fmt
?: "");
2090 * Finds the image layer in the chain that has 'bs' as its backing file.
2092 * active is the current topmost image.
2094 * Returns NULL if bs is not found in active's image chain,
2095 * or if active == bs.
2097 BlockDriverState
*bdrv_find_overlay(BlockDriverState
*active
,
2098 BlockDriverState
*bs
)
2100 BlockDriverState
*overlay
= NULL
;
2101 BlockDriverState
*intermediate
;
2103 assert(active
!= NULL
);
2106 /* if bs is the same as active, then by definition it has no overlay
2112 intermediate
= active
;
2113 while (intermediate
->backing_hd
) {
2114 if (intermediate
->backing_hd
== bs
) {
2115 overlay
= intermediate
;
2118 intermediate
= intermediate
->backing_hd
;
2124 typedef struct BlkIntermediateStates
{
2125 BlockDriverState
*bs
;
2126 QSIMPLEQ_ENTRY(BlkIntermediateStates
) entry
;
2127 } BlkIntermediateStates
;
2131 * Drops images above 'base' up to and including 'top', and sets the image
2132 * above 'top' to have base as its backing file.
2134 * Requires that the overlay to 'top' is opened r/w, so that the backing file
2135 * information in 'bs' can be properly updated.
2137 * E.g., this will convert the following chain:
2138 * bottom <- base <- intermediate <- top <- active
2142 * bottom <- base <- active
2144 * It is allowed for bottom==base, in which case it converts:
2146 * base <- intermediate <- top <- active
2153 * if active == top, that is considered an error
2156 int bdrv_drop_intermediate(BlockDriverState
*active
, BlockDriverState
*top
,
2157 BlockDriverState
*base
)
2159 BlockDriverState
*intermediate
;
2160 BlockDriverState
*base_bs
= NULL
;
2161 BlockDriverState
*new_top_bs
= NULL
;
2162 BlkIntermediateStates
*intermediate_state
, *next
;
2165 QSIMPLEQ_HEAD(states_to_delete
, BlkIntermediateStates
) states_to_delete
;
2166 QSIMPLEQ_INIT(&states_to_delete
);
2168 if (!top
->drv
|| !base
->drv
) {
2172 new_top_bs
= bdrv_find_overlay(active
, top
);
2174 if (new_top_bs
== NULL
) {
2175 /* we could not find the image above 'top', this is an error */
2179 /* special case of new_top_bs->backing_hd already pointing to base - nothing
2180 * to do, no intermediate images */
2181 if (new_top_bs
->backing_hd
== base
) {
2188 /* now we will go down through the list, and add each BDS we find
2189 * into our deletion queue, until we hit the 'base'
2191 while (intermediate
) {
2192 intermediate_state
= g_malloc0(sizeof(BlkIntermediateStates
));
2193 intermediate_state
->bs
= intermediate
;
2194 QSIMPLEQ_INSERT_TAIL(&states_to_delete
, intermediate_state
, entry
);
2196 if (intermediate
->backing_hd
== base
) {
2197 base_bs
= intermediate
->backing_hd
;
2200 intermediate
= intermediate
->backing_hd
;
2202 if (base_bs
== NULL
) {
2203 /* something went wrong, we did not end at the base. safely
2204 * unravel everything, and exit with error */
2208 /* success - we can delete the intermediate states, and link top->base */
2209 ret
= bdrv_change_backing_file(new_top_bs
, base_bs
->filename
,
2210 base_bs
->drv
? base_bs
->drv
->format_name
: "");
2214 new_top_bs
->backing_hd
= base_bs
;
2217 QSIMPLEQ_FOREACH_SAFE(intermediate_state
, &states_to_delete
, entry
, next
) {
2218 /* so that bdrv_close() does not recursively close the chain */
2219 intermediate_state
->bs
->backing_hd
= NULL
;
2220 bdrv_unref(intermediate_state
->bs
);
2225 QSIMPLEQ_FOREACH_SAFE(intermediate_state
, &states_to_delete
, entry
, next
) {
2226 g_free(intermediate_state
);
2232 static int bdrv_check_byte_request(BlockDriverState
*bs
, int64_t offset
,
2237 if (!bdrv_is_inserted(bs
))
2243 len
= bdrv_getlength(bs
);
2248 if ((offset
> len
) || (len
- offset
< size
))
2254 static int bdrv_check_request(BlockDriverState
*bs
, int64_t sector_num
,
2257 return bdrv_check_byte_request(bs
, sector_num
* BDRV_SECTOR_SIZE
,
2258 nb_sectors
* BDRV_SECTOR_SIZE
);
2261 typedef struct RwCo
{
2262 BlockDriverState
*bs
;
2268 BdrvRequestFlags flags
;
2271 static void coroutine_fn
bdrv_rw_co_entry(void *opaque
)
2273 RwCo
*rwco
= opaque
;
2275 if (!rwco
->is_write
) {
2276 rwco
->ret
= bdrv_co_do_readv(rwco
->bs
, rwco
->sector_num
,
2277 rwco
->nb_sectors
, rwco
->qiov
,
2280 rwco
->ret
= bdrv_co_do_writev(rwco
->bs
, rwco
->sector_num
,
2281 rwco
->nb_sectors
, rwco
->qiov
,
2287 * Process a vectored synchronous request using coroutines
2289 static int bdrv_rwv_co(BlockDriverState
*bs
, int64_t sector_num
,
2290 QEMUIOVector
*qiov
, bool is_write
,
2291 BdrvRequestFlags flags
)
2296 .sector_num
= sector_num
,
2297 .nb_sectors
= qiov
->size
>> BDRV_SECTOR_BITS
,
2299 .is_write
= is_write
,
2303 assert((qiov
->size
& (BDRV_SECTOR_SIZE
- 1)) == 0);
2306 * In sync call context, when the vcpu is blocked, this throttling timer
2307 * will not fire; so the I/O throttling function has to be disabled here
2308 * if it has been enabled.
2310 if (bs
->io_limits_enabled
) {
2311 fprintf(stderr
, "Disabling I/O throttling on '%s' due "
2312 "to synchronous I/O.\n", bdrv_get_device_name(bs
));
2313 bdrv_io_limits_disable(bs
);
2316 if (qemu_in_coroutine()) {
2317 /* Fast-path if already in coroutine context */
2318 bdrv_rw_co_entry(&rwco
);
2320 co
= qemu_coroutine_create(bdrv_rw_co_entry
);
2321 qemu_coroutine_enter(co
, &rwco
);
2322 while (rwco
.ret
== NOT_DONE
) {
2330 * Process a synchronous request using coroutines
2332 static int bdrv_rw_co(BlockDriverState
*bs
, int64_t sector_num
, uint8_t *buf
,
2333 int nb_sectors
, bool is_write
, BdrvRequestFlags flags
)
2336 struct iovec iov
= {
2337 .iov_base
= (void *)buf
,
2338 .iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
,
2341 qemu_iovec_init_external(&qiov
, &iov
, 1);
2342 return bdrv_rwv_co(bs
, sector_num
, &qiov
, is_write
, flags
);
2345 /* return < 0 if error. See bdrv_write() for the return codes */
2346 int bdrv_read(BlockDriverState
*bs
, int64_t sector_num
,
2347 uint8_t *buf
, int nb_sectors
)
2349 return bdrv_rw_co(bs
, sector_num
, buf
, nb_sectors
, false, 0);
2352 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
2353 int bdrv_read_unthrottled(BlockDriverState
*bs
, int64_t sector_num
,
2354 uint8_t *buf
, int nb_sectors
)
2359 enabled
= bs
->io_limits_enabled
;
2360 bs
->io_limits_enabled
= false;
2361 ret
= bdrv_read(bs
, sector_num
, buf
, nb_sectors
);
2362 bs
->io_limits_enabled
= enabled
;
2366 /* Return < 0 if error. Important errors are:
2367 -EIO generic I/O error (may happen for all errors)
2368 -ENOMEDIUM No media inserted.
2369 -EINVAL Invalid sector number or nb_sectors
2370 -EACCES Trying to write a read-only device
2372 int bdrv_write(BlockDriverState
*bs
, int64_t sector_num
,
2373 const uint8_t *buf
, int nb_sectors
)
2375 return bdrv_rw_co(bs
, sector_num
, (uint8_t *)buf
, nb_sectors
, true, 0);
2378 int bdrv_writev(BlockDriverState
*bs
, int64_t sector_num
, QEMUIOVector
*qiov
)
2380 return bdrv_rwv_co(bs
, sector_num
, qiov
, true, 0);
2383 int bdrv_write_zeroes(BlockDriverState
*bs
, int64_t sector_num
, int nb_sectors
)
2385 return bdrv_rw_co(bs
, sector_num
, NULL
, nb_sectors
, true,
2386 BDRV_REQ_ZERO_WRITE
);
2389 int bdrv_pread(BlockDriverState
*bs
, int64_t offset
,
2390 void *buf
, int count1
)
2392 uint8_t tmp_buf
[BDRV_SECTOR_SIZE
];
2393 int len
, nb_sectors
, count
;
2398 /* first read to align to sector start */
2399 len
= (BDRV_SECTOR_SIZE
- offset
) & (BDRV_SECTOR_SIZE
- 1);
2402 sector_num
= offset
>> BDRV_SECTOR_BITS
;
2404 if ((ret
= bdrv_read(bs
, sector_num
, tmp_buf
, 1)) < 0)
2406 memcpy(buf
, tmp_buf
+ (offset
& (BDRV_SECTOR_SIZE
- 1)), len
);
2414 /* read the sectors "in place" */
2415 nb_sectors
= count
>> BDRV_SECTOR_BITS
;
2416 if (nb_sectors
> 0) {
2417 if ((ret
= bdrv_read(bs
, sector_num
, buf
, nb_sectors
)) < 0)
2419 sector_num
+= nb_sectors
;
2420 len
= nb_sectors
<< BDRV_SECTOR_BITS
;
2425 /* add data from the last sector */
2427 if ((ret
= bdrv_read(bs
, sector_num
, tmp_buf
, 1)) < 0)
2429 memcpy(buf
, tmp_buf
, count
);
2434 int bdrv_pwritev(BlockDriverState
*bs
, int64_t offset
, QEMUIOVector
*qiov
)
2436 uint8_t tmp_buf
[BDRV_SECTOR_SIZE
];
2437 int len
, nb_sectors
, count
;
2443 /* first write to align to sector start */
2444 len
= (BDRV_SECTOR_SIZE
- offset
) & (BDRV_SECTOR_SIZE
- 1);
2447 sector_num
= offset
>> BDRV_SECTOR_BITS
;
2449 if ((ret
= bdrv_read(bs
, sector_num
, tmp_buf
, 1)) < 0)
2451 qemu_iovec_to_buf(qiov
, 0, tmp_buf
+ (offset
& (BDRV_SECTOR_SIZE
- 1)),
2453 if ((ret
= bdrv_write(bs
, sector_num
, tmp_buf
, 1)) < 0)
2461 /* write the sectors "in place" */
2462 nb_sectors
= count
>> BDRV_SECTOR_BITS
;
2463 if (nb_sectors
> 0) {
2464 QEMUIOVector qiov_inplace
;
2466 qemu_iovec_init(&qiov_inplace
, qiov
->niov
);
2467 qemu_iovec_concat(&qiov_inplace
, qiov
, len
,
2468 nb_sectors
<< BDRV_SECTOR_BITS
);
2469 ret
= bdrv_writev(bs
, sector_num
, &qiov_inplace
);
2470 qemu_iovec_destroy(&qiov_inplace
);
2475 sector_num
+= nb_sectors
;
2476 len
= nb_sectors
<< BDRV_SECTOR_BITS
;
2480 /* add data from the last sector */
2482 if ((ret
= bdrv_read(bs
, sector_num
, tmp_buf
, 1)) < 0)
2484 qemu_iovec_to_buf(qiov
, qiov
->size
- count
, tmp_buf
, count
);
2485 if ((ret
= bdrv_write(bs
, sector_num
, tmp_buf
, 1)) < 0)
2491 int bdrv_pwrite(BlockDriverState
*bs
, int64_t offset
,
2492 const void *buf
, int count1
)
2495 struct iovec iov
= {
2496 .iov_base
= (void *) buf
,
2500 qemu_iovec_init_external(&qiov
, &iov
, 1);
2501 return bdrv_pwritev(bs
, offset
, &qiov
);
2505 * Writes to the file and ensures that no writes are reordered across this
2506 * request (acts as a barrier)
2508 * Returns 0 on success, -errno in error cases.
2510 int bdrv_pwrite_sync(BlockDriverState
*bs
, int64_t offset
,
2511 const void *buf
, int count
)
2515 ret
= bdrv_pwrite(bs
, offset
, buf
, count
);
2520 /* No flush needed for cache modes that already do it */
2521 if (bs
->enable_write_cache
) {
2528 static int coroutine_fn
bdrv_co_do_copy_on_readv(BlockDriverState
*bs
,
2529 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
2531 /* Perform I/O through a temporary buffer so that users who scribble over
2532 * their read buffer while the operation is in progress do not end up
2533 * modifying the image file. This is critical for zero-copy guest I/O
2534 * where anything might happen inside guest memory.
2536 void *bounce_buffer
;
2538 BlockDriver
*drv
= bs
->drv
;
2540 QEMUIOVector bounce_qiov
;
2541 int64_t cluster_sector_num
;
2542 int cluster_nb_sectors
;
2546 /* Cover entire cluster so no additional backing file I/O is required when
2547 * allocating cluster in the image file.
2549 bdrv_round_to_clusters(bs
, sector_num
, nb_sectors
,
2550 &cluster_sector_num
, &cluster_nb_sectors
);
2552 trace_bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
,
2553 cluster_sector_num
, cluster_nb_sectors
);
2555 iov
.iov_len
= cluster_nb_sectors
* BDRV_SECTOR_SIZE
;
2556 iov
.iov_base
= bounce_buffer
= qemu_blockalign(bs
, iov
.iov_len
);
2557 qemu_iovec_init_external(&bounce_qiov
, &iov
, 1);
2559 ret
= drv
->bdrv_co_readv(bs
, cluster_sector_num
, cluster_nb_sectors
,
2565 if (drv
->bdrv_co_write_zeroes
&&
2566 buffer_is_zero(bounce_buffer
, iov
.iov_len
)) {
2567 ret
= bdrv_co_do_write_zeroes(bs
, cluster_sector_num
,
2568 cluster_nb_sectors
);
2570 /* This does not change the data on the disk, it is not necessary
2571 * to flush even in cache=writethrough mode.
2573 ret
= drv
->bdrv_co_writev(bs
, cluster_sector_num
, cluster_nb_sectors
,
2578 /* It might be okay to ignore write errors for guest requests. If this
2579 * is a deliberate copy-on-read then we don't want to ignore the error.
2580 * Simply report it in all cases.
2585 skip_bytes
= (sector_num
- cluster_sector_num
) * BDRV_SECTOR_SIZE
;
2586 qemu_iovec_from_buf(qiov
, 0, bounce_buffer
+ skip_bytes
,
2587 nb_sectors
* BDRV_SECTOR_SIZE
);
2590 qemu_vfree(bounce_buffer
);
2595 * Handle a read request in coroutine context
2597 static int coroutine_fn
bdrv_co_do_readv(BlockDriverState
*bs
,
2598 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
2599 BdrvRequestFlags flags
)
2601 BlockDriver
*drv
= bs
->drv
;
2602 BdrvTrackedRequest req
;
2608 if (bdrv_check_request(bs
, sector_num
, nb_sectors
)) {
2612 if (bs
->copy_on_read
) {
2613 flags
|= BDRV_REQ_COPY_ON_READ
;
2615 if (flags
& BDRV_REQ_COPY_ON_READ
) {
2616 bs
->copy_on_read_in_flight
++;
2619 if (bs
->copy_on_read_in_flight
) {
2620 wait_for_overlapping_requests(bs
, sector_num
, nb_sectors
);
2623 /* throttling disk I/O */
2624 if (bs
->io_limits_enabled
) {
2625 bdrv_io_limits_intercept(bs
, nb_sectors
, false);
2628 tracked_request_begin(&req
, bs
, sector_num
, nb_sectors
, false);
2630 if (flags
& BDRV_REQ_COPY_ON_READ
) {
2633 ret
= bdrv_is_allocated(bs
, sector_num
, nb_sectors
, &pnum
);
2638 if (!ret
|| pnum
!= nb_sectors
) {
2639 ret
= bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
, qiov
);
2644 if (!(bs
->zero_beyond_eof
&& bs
->growable
)) {
2645 ret
= drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
2647 /* Read zeros after EOF of growable BDSes */
2648 int64_t len
, total_sectors
, max_nb_sectors
;
2650 len
= bdrv_getlength(bs
);
2656 total_sectors
= DIV_ROUND_UP(len
, BDRV_SECTOR_SIZE
);
2657 max_nb_sectors
= MAX(0, total_sectors
- sector_num
);
2658 if (max_nb_sectors
> 0) {
2659 ret
= drv
->bdrv_co_readv(bs
, sector_num
,
2660 MIN(nb_sectors
, max_nb_sectors
), qiov
);
2665 /* Reading beyond end of file is supposed to produce zeroes */
2666 if (ret
== 0 && total_sectors
< sector_num
+ nb_sectors
) {
2667 uint64_t offset
= MAX(0, total_sectors
- sector_num
);
2668 uint64_t bytes
= (sector_num
+ nb_sectors
- offset
) *
2670 qemu_iovec_memset(qiov
, offset
* BDRV_SECTOR_SIZE
, 0, bytes
);
2675 tracked_request_end(&req
);
2677 if (flags
& BDRV_REQ_COPY_ON_READ
) {
2678 bs
->copy_on_read_in_flight
--;
2684 int coroutine_fn
bdrv_co_readv(BlockDriverState
*bs
, int64_t sector_num
,
2685 int nb_sectors
, QEMUIOVector
*qiov
)
2687 trace_bdrv_co_readv(bs
, sector_num
, nb_sectors
);
2689 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
, 0);
2692 int coroutine_fn
bdrv_co_copy_on_readv(BlockDriverState
*bs
,
2693 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
2695 trace_bdrv_co_copy_on_readv(bs
, sector_num
, nb_sectors
);
2697 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
,
2698 BDRV_REQ_COPY_ON_READ
);
2701 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
2702 int64_t sector_num
, int nb_sectors
)
2704 BlockDriver
*drv
= bs
->drv
;
2709 /* TODO Emulate only part of misaligned requests instead of letting block
2710 * drivers return -ENOTSUP and emulate everything */
2712 /* First try the efficient write zeroes operation */
2713 if (drv
->bdrv_co_write_zeroes
) {
2714 ret
= drv
->bdrv_co_write_zeroes(bs
, sector_num
, nb_sectors
);
2715 if (ret
!= -ENOTSUP
) {
2720 /* Fall back to bounce buffer if write zeroes is unsupported */
2721 iov
.iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
;
2722 iov
.iov_base
= qemu_blockalign(bs
, iov
.iov_len
);
2723 memset(iov
.iov_base
, 0, iov
.iov_len
);
2724 qemu_iovec_init_external(&qiov
, &iov
, 1);
2726 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, &qiov
);
2728 qemu_vfree(iov
.iov_base
);
2733 * Handle a write request in coroutine context
2735 static int coroutine_fn
bdrv_co_do_writev(BlockDriverState
*bs
,
2736 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
2737 BdrvRequestFlags flags
)
2739 BlockDriver
*drv
= bs
->drv
;
2740 BdrvTrackedRequest req
;
2746 if (bs
->read_only
) {
2749 if (bdrv_check_request(bs
, sector_num
, nb_sectors
)) {
2753 if (bs
->copy_on_read_in_flight
) {
2754 wait_for_overlapping_requests(bs
, sector_num
, nb_sectors
);
2757 /* throttling disk I/O */
2758 if (bs
->io_limits_enabled
) {
2759 bdrv_io_limits_intercept(bs
, nb_sectors
, true);
2762 tracked_request_begin(&req
, bs
, sector_num
, nb_sectors
, true);
2764 ret
= notifier_with_return_list_notify(&bs
->before_write_notifiers
, &req
);
2767 /* Do nothing, write notifier decided to fail this request */
2768 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
2769 ret
= bdrv_co_do_write_zeroes(bs
, sector_num
, nb_sectors
);
2771 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
);
2774 if (ret
== 0 && !bs
->enable_write_cache
) {
2775 ret
= bdrv_co_flush(bs
);
2778 if (bs
->dirty_bitmap
) {
2779 bdrv_set_dirty(bs
, sector_num
, nb_sectors
);
2782 if (bs
->wr_highest_sector
< sector_num
+ nb_sectors
- 1) {
2783 bs
->wr_highest_sector
= sector_num
+ nb_sectors
- 1;
2785 if (bs
->growable
&& ret
>= 0) {
2786 bs
->total_sectors
= MAX(bs
->total_sectors
, sector_num
+ nb_sectors
);
2789 tracked_request_end(&req
);
2794 int coroutine_fn
bdrv_co_writev(BlockDriverState
*bs
, int64_t sector_num
,
2795 int nb_sectors
, QEMUIOVector
*qiov
)
2797 trace_bdrv_co_writev(bs
, sector_num
, nb_sectors
);
2799 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, qiov
, 0);
2802 int coroutine_fn
bdrv_co_write_zeroes(BlockDriverState
*bs
,
2803 int64_t sector_num
, int nb_sectors
)
2805 trace_bdrv_co_write_zeroes(bs
, sector_num
, nb_sectors
);
2807 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, NULL
,
2808 BDRV_REQ_ZERO_WRITE
);
2812 * Truncate file to 'offset' bytes (needed only for file protocols)
2814 int bdrv_truncate(BlockDriverState
*bs
, int64_t offset
)
2816 BlockDriver
*drv
= bs
->drv
;
2820 if (!drv
->bdrv_truncate
)
2824 if (bdrv_in_use(bs
))
2826 ret
= drv
->bdrv_truncate(bs
, offset
);
2828 ret
= refresh_total_sectors(bs
, offset
>> BDRV_SECTOR_BITS
);
2829 bdrv_dev_resize_cb(bs
);
2835 * Length of a allocated file in bytes. Sparse files are counted by actual
2836 * allocated space. Return < 0 if error or unknown.
2838 int64_t bdrv_get_allocated_file_size(BlockDriverState
*bs
)
2840 BlockDriver
*drv
= bs
->drv
;
2844 if (drv
->bdrv_get_allocated_file_size
) {
2845 return drv
->bdrv_get_allocated_file_size(bs
);
2848 return bdrv_get_allocated_file_size(bs
->file
);
2854 * Length of a file in bytes. Return < 0 if error or unknown.
2856 int64_t bdrv_getlength(BlockDriverState
*bs
)
2858 BlockDriver
*drv
= bs
->drv
;
2862 if (bdrv_dev_has_removable_media(bs
)) {
2863 if (drv
->bdrv_getlength
) {
2864 return drv
->bdrv_getlength(bs
);
2867 return bs
->total_sectors
* BDRV_SECTOR_SIZE
;
2870 /* return 0 as number of sectors if no device present or error */
2871 void bdrv_get_geometry(BlockDriverState
*bs
, uint64_t *nb_sectors_ptr
)
2874 length
= bdrv_getlength(bs
);
2878 length
= length
>> BDRV_SECTOR_BITS
;
2879 *nb_sectors_ptr
= length
;
2882 void bdrv_set_on_error(BlockDriverState
*bs
, BlockdevOnError on_read_error
,
2883 BlockdevOnError on_write_error
)
2885 bs
->on_read_error
= on_read_error
;
2886 bs
->on_write_error
= on_write_error
;
2889 BlockdevOnError
bdrv_get_on_error(BlockDriverState
*bs
, bool is_read
)
2891 return is_read
? bs
->on_read_error
: bs
->on_write_error
;
2894 BlockErrorAction
bdrv_get_error_action(BlockDriverState
*bs
, bool is_read
, int error
)
2896 BlockdevOnError on_err
= is_read
? bs
->on_read_error
: bs
->on_write_error
;
2899 case BLOCKDEV_ON_ERROR_ENOSPC
:
2900 return (error
== ENOSPC
) ? BDRV_ACTION_STOP
: BDRV_ACTION_REPORT
;
2901 case BLOCKDEV_ON_ERROR_STOP
:
2902 return BDRV_ACTION_STOP
;
2903 case BLOCKDEV_ON_ERROR_REPORT
:
2904 return BDRV_ACTION_REPORT
;
2905 case BLOCKDEV_ON_ERROR_IGNORE
:
2906 return BDRV_ACTION_IGNORE
;
2912 /* This is done by device models because, while the block layer knows
2913 * about the error, it does not know whether an operation comes from
2914 * the device or the block layer (from a job, for example).
2916 void bdrv_error_action(BlockDriverState
*bs
, BlockErrorAction action
,
2917 bool is_read
, int error
)
2920 bdrv_emit_qmp_error_event(bs
, QEVENT_BLOCK_IO_ERROR
, action
, is_read
);
2921 if (action
== BDRV_ACTION_STOP
) {
2922 vm_stop(RUN_STATE_IO_ERROR
);
2923 bdrv_iostatus_set_err(bs
, error
);
2927 int bdrv_is_read_only(BlockDriverState
*bs
)
2929 return bs
->read_only
;
2932 int bdrv_is_sg(BlockDriverState
*bs
)
2937 int bdrv_enable_write_cache(BlockDriverState
*bs
)
2939 return bs
->enable_write_cache
;
2942 void bdrv_set_enable_write_cache(BlockDriverState
*bs
, bool wce
)
2944 bs
->enable_write_cache
= wce
;
2946 /* so a reopen() will preserve wce */
2948 bs
->open_flags
|= BDRV_O_CACHE_WB
;
2950 bs
->open_flags
&= ~BDRV_O_CACHE_WB
;
2954 int bdrv_is_encrypted(BlockDriverState
*bs
)
2956 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
)
2958 return bs
->encrypted
;
2961 int bdrv_key_required(BlockDriverState
*bs
)
2963 BlockDriverState
*backing_hd
= bs
->backing_hd
;
2965 if (backing_hd
&& backing_hd
->encrypted
&& !backing_hd
->valid_key
)
2967 return (bs
->encrypted
&& !bs
->valid_key
);
2970 int bdrv_set_key(BlockDriverState
*bs
, const char *key
)
2973 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
) {
2974 ret
= bdrv_set_key(bs
->backing_hd
, key
);
2980 if (!bs
->encrypted
) {
2982 } else if (!bs
->drv
|| !bs
->drv
->bdrv_set_key
) {
2985 ret
= bs
->drv
->bdrv_set_key(bs
, key
);
2988 } else if (!bs
->valid_key
) {
2990 /* call the change callback now, we skipped it on open */
2991 bdrv_dev_change_media_cb(bs
, true);
2996 const char *bdrv_get_format_name(BlockDriverState
*bs
)
2998 return bs
->drv
? bs
->drv
->format_name
: NULL
;
3001 void bdrv_iterate_format(void (*it
)(void *opaque
, const char *name
),
3006 QLIST_FOREACH(drv
, &bdrv_drivers
, list
) {
3007 it(opaque
, drv
->format_name
);
3011 BlockDriverState
*bdrv_find(const char *name
)
3013 BlockDriverState
*bs
;
3015 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
3016 if (!strcmp(name
, bs
->device_name
)) {
3023 BlockDriverState
*bdrv_next(BlockDriverState
*bs
)
3026 return QTAILQ_FIRST(&bdrv_states
);
3028 return QTAILQ_NEXT(bs
, list
);
3031 void bdrv_iterate(void (*it
)(void *opaque
, BlockDriverState
*bs
), void *opaque
)
3033 BlockDriverState
*bs
;
3035 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
3040 const char *bdrv_get_device_name(BlockDriverState
*bs
)
3042 return bs
->device_name
;
3045 int bdrv_get_flags(BlockDriverState
*bs
)
3047 return bs
->open_flags
;
3050 int bdrv_flush_all(void)
3052 BlockDriverState
*bs
;
3055 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
3056 int ret
= bdrv_flush(bs
);
3057 if (ret
< 0 && !result
) {
3065 int bdrv_has_zero_init_1(BlockDriverState
*bs
)
3070 int bdrv_has_zero_init(BlockDriverState
*bs
)
3074 /* If BS is a copy on write image, it is initialized to
3075 the contents of the base image, which may not be zeroes. */
3076 if (bs
->backing_hd
) {
3079 if (bs
->drv
->bdrv_has_zero_init
) {
3080 return bs
->drv
->bdrv_has_zero_init(bs
);
3087 typedef struct BdrvCoGetBlockStatusData
{
3088 BlockDriverState
*bs
;
3089 BlockDriverState
*base
;
3095 } BdrvCoGetBlockStatusData
;
3098 * Returns true iff the specified sector is present in the disk image. Drivers
3099 * not implementing the functionality are assumed to not support backing files,
3100 * hence all their sectors are reported as allocated.
3102 * If 'sector_num' is beyond the end of the disk image the return value is 0
3103 * and 'pnum' is set to 0.
3105 * 'pnum' is set to the number of sectors (including and immediately following
3106 * the specified sector) that are known to be in the same
3107 * allocated/unallocated state.
3109 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
3110 * beyond the end of the disk image it will be clamped.
3112 static int64_t coroutine_fn
bdrv_co_get_block_status(BlockDriverState
*bs
,
3114 int nb_sectors
, int *pnum
)
3120 length
= bdrv_getlength(bs
);
3125 if (sector_num
>= (length
>> BDRV_SECTOR_BITS
)) {
3130 n
= bs
->total_sectors
- sector_num
;
3131 if (n
< nb_sectors
) {
3135 if (!bs
->drv
->bdrv_co_get_block_status
) {
3137 ret
= BDRV_BLOCK_DATA
;
3138 if (bs
->drv
->protocol_name
) {
3139 ret
|= BDRV_BLOCK_OFFSET_VALID
| (sector_num
* BDRV_SECTOR_SIZE
);
3144 ret
= bs
->drv
->bdrv_co_get_block_status(bs
, sector_num
, nb_sectors
, pnum
);
3150 if (!(ret
& BDRV_BLOCK_DATA
)) {
3151 if (bdrv_has_zero_init(bs
)) {
3152 ret
|= BDRV_BLOCK_ZERO
;
3153 } else if (bs
->backing_hd
) {
3154 BlockDriverState
*bs2
= bs
->backing_hd
;
3155 int64_t length2
= bdrv_getlength(bs2
);
3156 if (length2
>= 0 && sector_num
>= (length2
>> BDRV_SECTOR_BITS
)) {
3157 ret
|= BDRV_BLOCK_ZERO
;
3163 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
3164 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
3165 ret2
= bdrv_co_get_block_status(bs
->file
, ret
>> BDRV_SECTOR_BITS
,
3168 /* Ignore errors. This is just providing extra information, it
3169 * is useful but not necessary.
3171 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
3178 /* Coroutine wrapper for bdrv_get_block_status() */
3179 static void coroutine_fn
bdrv_get_block_status_co_entry(void *opaque
)
3181 BdrvCoGetBlockStatusData
*data
= opaque
;
3182 BlockDriverState
*bs
= data
->bs
;
3184 data
->ret
= bdrv_co_get_block_status(bs
, data
->sector_num
, data
->nb_sectors
,
3190 * Synchronous wrapper around bdrv_co_get_block_status().
3192 * See bdrv_co_get_block_status() for details.
3194 int64_t bdrv_get_block_status(BlockDriverState
*bs
, int64_t sector_num
,
3195 int nb_sectors
, int *pnum
)
3198 BdrvCoGetBlockStatusData data
= {
3200 .sector_num
= sector_num
,
3201 .nb_sectors
= nb_sectors
,
3206 if (qemu_in_coroutine()) {
3207 /* Fast-path if already in coroutine context */
3208 bdrv_get_block_status_co_entry(&data
);
3210 co
= qemu_coroutine_create(bdrv_get_block_status_co_entry
);
3211 qemu_coroutine_enter(co
, &data
);
3212 while (!data
.done
) {
3219 int coroutine_fn
bdrv_is_allocated(BlockDriverState
*bs
, int64_t sector_num
,
3220 int nb_sectors
, int *pnum
)
3222 int64_t ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, pnum
);
3227 (ret
& BDRV_BLOCK_DATA
) ||
3228 ((ret
& BDRV_BLOCK_ZERO
) && !bdrv_has_zero_init(bs
));
3232 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
3234 * Return true if the given sector is allocated in any image between
3235 * BASE and TOP (inclusive). BASE can be NULL to check if the given
3236 * sector is allocated in any image of the chain. Return false otherwise.
3238 * 'pnum' is set to the number of sectors (including and immediately following
3239 * the specified sector) that are known to be in the same
3240 * allocated/unallocated state.
3243 int bdrv_is_allocated_above(BlockDriverState
*top
,
3244 BlockDriverState
*base
,
3246 int nb_sectors
, int *pnum
)
3248 BlockDriverState
*intermediate
;
3249 int ret
, n
= nb_sectors
;
3252 while (intermediate
&& intermediate
!= base
) {
3254 ret
= bdrv_is_allocated(intermediate
, sector_num
, nb_sectors
,
3264 * [sector_num, nb_sectors] is unallocated on top but intermediate
3267 * [sector_num+x, nr_sectors] allocated.
3269 if (n
> pnum_inter
&&
3270 (intermediate
== top
||
3271 sector_num
+ pnum_inter
< intermediate
->total_sectors
)) {
3275 intermediate
= intermediate
->backing_hd
;
3282 const char *bdrv_get_encrypted_filename(BlockDriverState
*bs
)
3284 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
)
3285 return bs
->backing_file
;
3286 else if (bs
->encrypted
)
3287 return bs
->filename
;
3292 void bdrv_get_backing_filename(BlockDriverState
*bs
,
3293 char *filename
, int filename_size
)
3295 pstrcpy(filename
, filename_size
, bs
->backing_file
);
3298 int bdrv_write_compressed(BlockDriverState
*bs
, int64_t sector_num
,
3299 const uint8_t *buf
, int nb_sectors
)
3301 BlockDriver
*drv
= bs
->drv
;
3304 if (!drv
->bdrv_write_compressed
)
3306 if (bdrv_check_request(bs
, sector_num
, nb_sectors
))
3309 assert(!bs
->dirty_bitmap
);
3311 return drv
->bdrv_write_compressed(bs
, sector_num
, buf
, nb_sectors
);
3314 int bdrv_get_info(BlockDriverState
*bs
, BlockDriverInfo
*bdi
)
3316 BlockDriver
*drv
= bs
->drv
;
3319 if (!drv
->bdrv_get_info
)
3321 memset(bdi
, 0, sizeof(*bdi
));
3322 return drv
->bdrv_get_info(bs
, bdi
);
3325 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
3326 int64_t pos
, int size
)
3329 struct iovec iov
= {
3330 .iov_base
= (void *) buf
,
3334 qemu_iovec_init_external(&qiov
, &iov
, 1);
3335 return bdrv_writev_vmstate(bs
, &qiov
, pos
);
3338 int bdrv_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
3340 BlockDriver
*drv
= bs
->drv
;
3344 } else if (drv
->bdrv_save_vmstate
) {
3345 return drv
->bdrv_save_vmstate(bs
, qiov
, pos
);
3346 } else if (bs
->file
) {
3347 return bdrv_writev_vmstate(bs
->file
, qiov
, pos
);
3353 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
3354 int64_t pos
, int size
)
3356 BlockDriver
*drv
= bs
->drv
;
3359 if (drv
->bdrv_load_vmstate
)
3360 return drv
->bdrv_load_vmstate(bs
, buf
, pos
, size
);
3362 return bdrv_load_vmstate(bs
->file
, buf
, pos
, size
);
3366 void bdrv_debug_event(BlockDriverState
*bs
, BlkDebugEvent event
)
3368 if (!bs
|| !bs
->drv
|| !bs
->drv
->bdrv_debug_event
) {
3372 bs
->drv
->bdrv_debug_event(bs
, event
);
3375 int bdrv_debug_breakpoint(BlockDriverState
*bs
, const char *event
,
3378 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_breakpoint
) {
3382 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_breakpoint
) {
3383 return bs
->drv
->bdrv_debug_breakpoint(bs
, event
, tag
);
3389 int bdrv_debug_resume(BlockDriverState
*bs
, const char *tag
)
3391 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_resume
) {
3395 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_resume
) {
3396 return bs
->drv
->bdrv_debug_resume(bs
, tag
);
3402 bool bdrv_debug_is_suspended(BlockDriverState
*bs
, const char *tag
)
3404 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_is_suspended
) {
3408 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_is_suspended
) {
3409 return bs
->drv
->bdrv_debug_is_suspended(bs
, tag
);
3415 int bdrv_is_snapshot(BlockDriverState
*bs
)
3417 return !!(bs
->open_flags
& BDRV_O_SNAPSHOT
);
3420 /* backing_file can either be relative, or absolute, or a protocol. If it is
3421 * relative, it must be relative to the chain. So, passing in bs->filename
3422 * from a BDS as backing_file should not be done, as that may be relative to
3423 * the CWD rather than the chain. */
3424 BlockDriverState
*bdrv_find_backing_image(BlockDriverState
*bs
,
3425 const char *backing_file
)
3427 char *filename_full
= NULL
;
3428 char *backing_file_full
= NULL
;
3429 char *filename_tmp
= NULL
;
3430 int is_protocol
= 0;
3431 BlockDriverState
*curr_bs
= NULL
;
3432 BlockDriverState
*retval
= NULL
;
3434 if (!bs
|| !bs
->drv
|| !backing_file
) {
3438 filename_full
= g_malloc(PATH_MAX
);
3439 backing_file_full
= g_malloc(PATH_MAX
);
3440 filename_tmp
= g_malloc(PATH_MAX
);
3442 is_protocol
= path_has_protocol(backing_file
);
3444 for (curr_bs
= bs
; curr_bs
->backing_hd
; curr_bs
= curr_bs
->backing_hd
) {
3446 /* If either of the filename paths is actually a protocol, then
3447 * compare unmodified paths; otherwise make paths relative */
3448 if (is_protocol
|| path_has_protocol(curr_bs
->backing_file
)) {
3449 if (strcmp(backing_file
, curr_bs
->backing_file
) == 0) {
3450 retval
= curr_bs
->backing_hd
;
3454 /* If not an absolute filename path, make it relative to the current
3455 * image's filename path */
3456 path_combine(filename_tmp
, PATH_MAX
, curr_bs
->filename
,
3459 /* We are going to compare absolute pathnames */
3460 if (!realpath(filename_tmp
, filename_full
)) {
3464 /* We need to make sure the backing filename we are comparing against
3465 * is relative to the current image filename (or absolute) */
3466 path_combine(filename_tmp
, PATH_MAX
, curr_bs
->filename
,
3467 curr_bs
->backing_file
);
3469 if (!realpath(filename_tmp
, backing_file_full
)) {
3473 if (strcmp(backing_file_full
, filename_full
) == 0) {
3474 retval
= curr_bs
->backing_hd
;
3480 g_free(filename_full
);
3481 g_free(backing_file_full
);
3482 g_free(filename_tmp
);
3486 int bdrv_get_backing_file_depth(BlockDriverState
*bs
)
3492 if (!bs
->backing_hd
) {
3496 return 1 + bdrv_get_backing_file_depth(bs
->backing_hd
);
3499 BlockDriverState
*bdrv_find_base(BlockDriverState
*bs
)
3501 BlockDriverState
*curr_bs
= NULL
;
3509 while (curr_bs
->backing_hd
) {
3510 curr_bs
= curr_bs
->backing_hd
;
3515 /**************************************************************/
3518 BlockDriverAIOCB
*bdrv_aio_readv(BlockDriverState
*bs
, int64_t sector_num
,
3519 QEMUIOVector
*qiov
, int nb_sectors
,
3520 BlockDriverCompletionFunc
*cb
, void *opaque
)
3522 trace_bdrv_aio_readv(bs
, sector_num
, nb_sectors
, opaque
);
3524 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
,
3528 BlockDriverAIOCB
*bdrv_aio_writev(BlockDriverState
*bs
, int64_t sector_num
,
3529 QEMUIOVector
*qiov
, int nb_sectors
,
3530 BlockDriverCompletionFunc
*cb
, void *opaque
)
3532 trace_bdrv_aio_writev(bs
, sector_num
, nb_sectors
, opaque
);
3534 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
,
3539 typedef struct MultiwriteCB
{
3544 BlockDriverCompletionFunc
*cb
;
3546 QEMUIOVector
*free_qiov
;
3550 static void multiwrite_user_cb(MultiwriteCB
*mcb
)
3554 for (i
= 0; i
< mcb
->num_callbacks
; i
++) {
3555 mcb
->callbacks
[i
].cb(mcb
->callbacks
[i
].opaque
, mcb
->error
);
3556 if (mcb
->callbacks
[i
].free_qiov
) {
3557 qemu_iovec_destroy(mcb
->callbacks
[i
].free_qiov
);
3559 g_free(mcb
->callbacks
[i
].free_qiov
);
3563 static void multiwrite_cb(void *opaque
, int ret
)
3565 MultiwriteCB
*mcb
= opaque
;
3567 trace_multiwrite_cb(mcb
, ret
);
3569 if (ret
< 0 && !mcb
->error
) {
3573 mcb
->num_requests
--;
3574 if (mcb
->num_requests
== 0) {
3575 multiwrite_user_cb(mcb
);
3580 static int multiwrite_req_compare(const void *a
, const void *b
)
3582 const BlockRequest
*req1
= a
, *req2
= b
;
3585 * Note that we can't simply subtract req2->sector from req1->sector
3586 * here as that could overflow the return value.
3588 if (req1
->sector
> req2
->sector
) {
3590 } else if (req1
->sector
< req2
->sector
) {
3598 * Takes a bunch of requests and tries to merge them. Returns the number of
3599 * requests that remain after merging.
3601 static int multiwrite_merge(BlockDriverState
*bs
, BlockRequest
*reqs
,
3602 int num_reqs
, MultiwriteCB
*mcb
)
3606 // Sort requests by start sector
3607 qsort(reqs
, num_reqs
, sizeof(*reqs
), &multiwrite_req_compare
);
3609 // Check if adjacent requests touch the same clusters. If so, combine them,
3610 // filling up gaps with zero sectors.
3612 for (i
= 1; i
< num_reqs
; i
++) {
3614 int64_t oldreq_last
= reqs
[outidx
].sector
+ reqs
[outidx
].nb_sectors
;
3616 // Handle exactly sequential writes and overlapping writes.
3617 if (reqs
[i
].sector
<= oldreq_last
) {
3621 if (reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1 > IOV_MAX
) {
3627 QEMUIOVector
*qiov
= g_malloc0(sizeof(*qiov
));
3628 qemu_iovec_init(qiov
,
3629 reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1);
3631 // Add the first request to the merged one. If the requests are
3632 // overlapping, drop the last sectors of the first request.
3633 size
= (reqs
[i
].sector
- reqs
[outidx
].sector
) << 9;
3634 qemu_iovec_concat(qiov
, reqs
[outidx
].qiov
, 0, size
);
3636 // We should need to add any zeros between the two requests
3637 assert (reqs
[i
].sector
<= oldreq_last
);
3639 // Add the second request
3640 qemu_iovec_concat(qiov
, reqs
[i
].qiov
, 0, reqs
[i
].qiov
->size
);
3642 reqs
[outidx
].nb_sectors
= qiov
->size
>> 9;
3643 reqs
[outidx
].qiov
= qiov
;
3645 mcb
->callbacks
[i
].free_qiov
= reqs
[outidx
].qiov
;
3648 reqs
[outidx
].sector
= reqs
[i
].sector
;
3649 reqs
[outidx
].nb_sectors
= reqs
[i
].nb_sectors
;
3650 reqs
[outidx
].qiov
= reqs
[i
].qiov
;
3658 * Submit multiple AIO write requests at once.
3660 * On success, the function returns 0 and all requests in the reqs array have
3661 * been submitted. In error case this function returns -1, and any of the
3662 * requests may or may not be submitted yet. In particular, this means that the
3663 * callback will be called for some of the requests, for others it won't. The
3664 * caller must check the error field of the BlockRequest to wait for the right
3665 * callbacks (if error != 0, no callback will be called).
3667 * The implementation may modify the contents of the reqs array, e.g. to merge
3668 * requests. However, the fields opaque and error are left unmodified as they
3669 * are used to signal failure for a single request to the caller.
3671 int bdrv_aio_multiwrite(BlockDriverState
*bs
, BlockRequest
*reqs
, int num_reqs
)
3676 /* don't submit writes if we don't have a medium */
3677 if (bs
->drv
== NULL
) {
3678 for (i
= 0; i
< num_reqs
; i
++) {
3679 reqs
[i
].error
= -ENOMEDIUM
;
3684 if (num_reqs
== 0) {
3688 // Create MultiwriteCB structure
3689 mcb
= g_malloc0(sizeof(*mcb
) + num_reqs
* sizeof(*mcb
->callbacks
));
3690 mcb
->num_requests
= 0;
3691 mcb
->num_callbacks
= num_reqs
;
3693 for (i
= 0; i
< num_reqs
; i
++) {
3694 mcb
->callbacks
[i
].cb
= reqs
[i
].cb
;
3695 mcb
->callbacks
[i
].opaque
= reqs
[i
].opaque
;
3698 // Check for mergable requests
3699 num_reqs
= multiwrite_merge(bs
, reqs
, num_reqs
, mcb
);
3701 trace_bdrv_aio_multiwrite(mcb
, mcb
->num_callbacks
, num_reqs
);
3703 /* Run the aio requests. */
3704 mcb
->num_requests
= num_reqs
;
3705 for (i
= 0; i
< num_reqs
; i
++) {
3706 bdrv_aio_writev(bs
, reqs
[i
].sector
, reqs
[i
].qiov
,
3707 reqs
[i
].nb_sectors
, multiwrite_cb
, mcb
);
3713 void bdrv_aio_cancel(BlockDriverAIOCB
*acb
)
3715 acb
->aiocb_info
->cancel(acb
);
3718 /**************************************************************/
3719 /* async block device emulation */
3721 typedef struct BlockDriverAIOCBSync
{
3722 BlockDriverAIOCB common
;
3725 /* vector translation state */
3729 } BlockDriverAIOCBSync
;
3731 static void bdrv_aio_cancel_em(BlockDriverAIOCB
*blockacb
)
3733 BlockDriverAIOCBSync
*acb
=
3734 container_of(blockacb
, BlockDriverAIOCBSync
, common
);
3735 qemu_bh_delete(acb
->bh
);
3737 qemu_aio_release(acb
);
3740 static const AIOCBInfo bdrv_em_aiocb_info
= {
3741 .aiocb_size
= sizeof(BlockDriverAIOCBSync
),
3742 .cancel
= bdrv_aio_cancel_em
,
3745 static void bdrv_aio_bh_cb(void *opaque
)
3747 BlockDriverAIOCBSync
*acb
= opaque
;
3750 qemu_iovec_from_buf(acb
->qiov
, 0, acb
->bounce
, acb
->qiov
->size
);
3751 qemu_vfree(acb
->bounce
);
3752 acb
->common
.cb(acb
->common
.opaque
, acb
->ret
);
3753 qemu_bh_delete(acb
->bh
);
3755 qemu_aio_release(acb
);
3758 static BlockDriverAIOCB
*bdrv_aio_rw_vector(BlockDriverState
*bs
,
3762 BlockDriverCompletionFunc
*cb
,
3767 BlockDriverAIOCBSync
*acb
;
3769 acb
= qemu_aio_get(&bdrv_em_aiocb_info
, bs
, cb
, opaque
);
3770 acb
->is_write
= is_write
;
3772 acb
->bounce
= qemu_blockalign(bs
, qiov
->size
);
3773 acb
->bh
= qemu_bh_new(bdrv_aio_bh_cb
, acb
);
3776 qemu_iovec_to_buf(acb
->qiov
, 0, acb
->bounce
, qiov
->size
);
3777 acb
->ret
= bs
->drv
->bdrv_write(bs
, sector_num
, acb
->bounce
, nb_sectors
);
3779 acb
->ret
= bs
->drv
->bdrv_read(bs
, sector_num
, acb
->bounce
, nb_sectors
);
3782 qemu_bh_schedule(acb
->bh
);
3784 return &acb
->common
;
3787 static BlockDriverAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
3788 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
3789 BlockDriverCompletionFunc
*cb
, void *opaque
)
3791 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 0);
3794 static BlockDriverAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
3795 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
3796 BlockDriverCompletionFunc
*cb
, void *opaque
)
3798 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 1);
3802 typedef struct BlockDriverAIOCBCoroutine
{
3803 BlockDriverAIOCB common
;
3808 } BlockDriverAIOCBCoroutine
;
3810 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB
*blockacb
)
3812 BlockDriverAIOCBCoroutine
*acb
=
3813 container_of(blockacb
, BlockDriverAIOCBCoroutine
, common
);
3822 static const AIOCBInfo bdrv_em_co_aiocb_info
= {
3823 .aiocb_size
= sizeof(BlockDriverAIOCBCoroutine
),
3824 .cancel
= bdrv_aio_co_cancel_em
,
3827 static void bdrv_co_em_bh(void *opaque
)
3829 BlockDriverAIOCBCoroutine
*acb
= opaque
;
3831 acb
->common
.cb(acb
->common
.opaque
, acb
->req
.error
);
3837 qemu_bh_delete(acb
->bh
);
3838 qemu_aio_release(acb
);
3841 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
3842 static void coroutine_fn
bdrv_co_do_rw(void *opaque
)
3844 BlockDriverAIOCBCoroutine
*acb
= opaque
;
3845 BlockDriverState
*bs
= acb
->common
.bs
;
3847 if (!acb
->is_write
) {
3848 acb
->req
.error
= bdrv_co_do_readv(bs
, acb
->req
.sector
,
3849 acb
->req
.nb_sectors
, acb
->req
.qiov
, 0);
3851 acb
->req
.error
= bdrv_co_do_writev(bs
, acb
->req
.sector
,
3852 acb
->req
.nb_sectors
, acb
->req
.qiov
, 0);
3855 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
3856 qemu_bh_schedule(acb
->bh
);
3859 static BlockDriverAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
3863 BlockDriverCompletionFunc
*cb
,
3868 BlockDriverAIOCBCoroutine
*acb
;
3870 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
3871 acb
->req
.sector
= sector_num
;
3872 acb
->req
.nb_sectors
= nb_sectors
;
3873 acb
->req
.qiov
= qiov
;
3874 acb
->is_write
= is_write
;
3877 co
= qemu_coroutine_create(bdrv_co_do_rw
);
3878 qemu_coroutine_enter(co
, acb
);
3880 return &acb
->common
;
3883 static void coroutine_fn
bdrv_aio_flush_co_entry(void *opaque
)
3885 BlockDriverAIOCBCoroutine
*acb
= opaque
;
3886 BlockDriverState
*bs
= acb
->common
.bs
;
3888 acb
->req
.error
= bdrv_co_flush(bs
);
3889 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
3890 qemu_bh_schedule(acb
->bh
);
3893 BlockDriverAIOCB
*bdrv_aio_flush(BlockDriverState
*bs
,
3894 BlockDriverCompletionFunc
*cb
, void *opaque
)
3896 trace_bdrv_aio_flush(bs
, opaque
);
3899 BlockDriverAIOCBCoroutine
*acb
;
3901 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
3904 co
= qemu_coroutine_create(bdrv_aio_flush_co_entry
);
3905 qemu_coroutine_enter(co
, acb
);
3907 return &acb
->common
;
3910 static void coroutine_fn
bdrv_aio_discard_co_entry(void *opaque
)
3912 BlockDriverAIOCBCoroutine
*acb
= opaque
;
3913 BlockDriverState
*bs
= acb
->common
.bs
;
3915 acb
->req
.error
= bdrv_co_discard(bs
, acb
->req
.sector
, acb
->req
.nb_sectors
);
3916 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
3917 qemu_bh_schedule(acb
->bh
);
3920 BlockDriverAIOCB
*bdrv_aio_discard(BlockDriverState
*bs
,
3921 int64_t sector_num
, int nb_sectors
,
3922 BlockDriverCompletionFunc
*cb
, void *opaque
)
3925 BlockDriverAIOCBCoroutine
*acb
;
3927 trace_bdrv_aio_discard(bs
, sector_num
, nb_sectors
, opaque
);
3929 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
3930 acb
->req
.sector
= sector_num
;
3931 acb
->req
.nb_sectors
= nb_sectors
;
3933 co
= qemu_coroutine_create(bdrv_aio_discard_co_entry
);
3934 qemu_coroutine_enter(co
, acb
);
3936 return &acb
->common
;
3939 void bdrv_init(void)
3941 module_call_init(MODULE_INIT_BLOCK
);
3944 void bdrv_init_with_whitelist(void)
3946 use_bdrv_whitelist
= 1;
3950 void *qemu_aio_get(const AIOCBInfo
*aiocb_info
, BlockDriverState
*bs
,
3951 BlockDriverCompletionFunc
*cb
, void *opaque
)
3953 BlockDriverAIOCB
*acb
;
3955 acb
= g_slice_alloc(aiocb_info
->aiocb_size
);
3956 acb
->aiocb_info
= aiocb_info
;
3959 acb
->opaque
= opaque
;
3963 void qemu_aio_release(void *p
)
3965 BlockDriverAIOCB
*acb
= p
;
3966 g_slice_free1(acb
->aiocb_info
->aiocb_size
, acb
);
3969 /**************************************************************/
3970 /* Coroutine block device emulation */
3972 typedef struct CoroutineIOCompletion
{
3973 Coroutine
*coroutine
;
3975 } CoroutineIOCompletion
;
3977 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
3979 CoroutineIOCompletion
*co
= opaque
;
3982 qemu_coroutine_enter(co
->coroutine
, NULL
);
3985 static int coroutine_fn
bdrv_co_io_em(BlockDriverState
*bs
, int64_t sector_num
,
3986 int nb_sectors
, QEMUIOVector
*iov
,
3989 CoroutineIOCompletion co
= {
3990 .coroutine
= qemu_coroutine_self(),
3992 BlockDriverAIOCB
*acb
;
3995 acb
= bs
->drv
->bdrv_aio_writev(bs
, sector_num
, iov
, nb_sectors
,
3996 bdrv_co_io_em_complete
, &co
);
3998 acb
= bs
->drv
->bdrv_aio_readv(bs
, sector_num
, iov
, nb_sectors
,
3999 bdrv_co_io_em_complete
, &co
);
4002 trace_bdrv_co_io_em(bs
, sector_num
, nb_sectors
, is_write
, acb
);
4006 qemu_coroutine_yield();
4011 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
4012 int64_t sector_num
, int nb_sectors
,
4015 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, false);
4018 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
4019 int64_t sector_num
, int nb_sectors
,
4022 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, true);
4025 static void coroutine_fn
bdrv_flush_co_entry(void *opaque
)
4027 RwCo
*rwco
= opaque
;
4029 rwco
->ret
= bdrv_co_flush(rwco
->bs
);
4032 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
4036 if (!bs
|| !bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
)) {
4040 /* Write back cached data to the OS even with cache=unsafe */
4041 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_OS
);
4042 if (bs
->drv
->bdrv_co_flush_to_os
) {
4043 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
4049 /* But don't actually force it to the disk with cache=unsafe */
4050 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
4054 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_DISK
);
4055 if (bs
->drv
->bdrv_co_flush_to_disk
) {
4056 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
4057 } else if (bs
->drv
->bdrv_aio_flush
) {
4058 BlockDriverAIOCB
*acb
;
4059 CoroutineIOCompletion co
= {
4060 .coroutine
= qemu_coroutine_self(),
4063 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
4067 qemu_coroutine_yield();
4072 * Some block drivers always operate in either writethrough or unsafe
4073 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
4074 * know how the server works (because the behaviour is hardcoded or
4075 * depends on server-side configuration), so we can't ensure that
4076 * everything is safe on disk. Returning an error doesn't work because
4077 * that would break guests even if the server operates in writethrough
4080 * Let's hope the user knows what he's doing.
4088 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
4089 * in the case of cache=unsafe, so there are no useless flushes.
4092 return bdrv_co_flush(bs
->file
);
4095 void bdrv_invalidate_cache(BlockDriverState
*bs
)
4097 if (bs
->drv
&& bs
->drv
->bdrv_invalidate_cache
) {
4098 bs
->drv
->bdrv_invalidate_cache(bs
);
4102 void bdrv_invalidate_cache_all(void)
4104 BlockDriverState
*bs
;
4106 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
4107 bdrv_invalidate_cache(bs
);
4111 void bdrv_clear_incoming_migration_all(void)
4113 BlockDriverState
*bs
;
4115 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
4116 bs
->open_flags
= bs
->open_flags
& ~(BDRV_O_INCOMING
);
4120 int bdrv_flush(BlockDriverState
*bs
)
4128 if (qemu_in_coroutine()) {
4129 /* Fast-path if already in coroutine context */
4130 bdrv_flush_co_entry(&rwco
);
4132 co
= qemu_coroutine_create(bdrv_flush_co_entry
);
4133 qemu_coroutine_enter(co
, &rwco
);
4134 while (rwco
.ret
== NOT_DONE
) {
4142 static void coroutine_fn
bdrv_discard_co_entry(void *opaque
)
4144 RwCo
*rwco
= opaque
;
4146 rwco
->ret
= bdrv_co_discard(rwco
->bs
, rwco
->sector_num
, rwco
->nb_sectors
);
4149 int coroutine_fn
bdrv_co_discard(BlockDriverState
*bs
, int64_t sector_num
,
4154 } else if (bdrv_check_request(bs
, sector_num
, nb_sectors
)) {
4156 } else if (bs
->read_only
) {
4160 if (bs
->dirty_bitmap
) {
4161 bdrv_reset_dirty(bs
, sector_num
, nb_sectors
);
4164 /* Do nothing if disabled. */
4165 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
4169 if (bs
->drv
->bdrv_co_discard
) {
4170 return bs
->drv
->bdrv_co_discard(bs
, sector_num
, nb_sectors
);
4171 } else if (bs
->drv
->bdrv_aio_discard
) {
4172 BlockDriverAIOCB
*acb
;
4173 CoroutineIOCompletion co
= {
4174 .coroutine
= qemu_coroutine_self(),
4177 acb
= bs
->drv
->bdrv_aio_discard(bs
, sector_num
, nb_sectors
,
4178 bdrv_co_io_em_complete
, &co
);
4182 qemu_coroutine_yield();
4190 int bdrv_discard(BlockDriverState
*bs
, int64_t sector_num
, int nb_sectors
)
4195 .sector_num
= sector_num
,
4196 .nb_sectors
= nb_sectors
,
4200 if (qemu_in_coroutine()) {
4201 /* Fast-path if already in coroutine context */
4202 bdrv_discard_co_entry(&rwco
);
4204 co
= qemu_coroutine_create(bdrv_discard_co_entry
);
4205 qemu_coroutine_enter(co
, &rwco
);
4206 while (rwco
.ret
== NOT_DONE
) {
4214 /**************************************************************/
4215 /* removable device support */
4218 * Return TRUE if the media is present
4220 int bdrv_is_inserted(BlockDriverState
*bs
)
4222 BlockDriver
*drv
= bs
->drv
;
4226 if (!drv
->bdrv_is_inserted
)
4228 return drv
->bdrv_is_inserted(bs
);
4232 * Return whether the media changed since the last call to this
4233 * function, or -ENOTSUP if we don't know. Most drivers don't know.
4235 int bdrv_media_changed(BlockDriverState
*bs
)
4237 BlockDriver
*drv
= bs
->drv
;
4239 if (drv
&& drv
->bdrv_media_changed
) {
4240 return drv
->bdrv_media_changed(bs
);
4246 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
4248 void bdrv_eject(BlockDriverState
*bs
, bool eject_flag
)
4250 BlockDriver
*drv
= bs
->drv
;
4252 if (drv
&& drv
->bdrv_eject
) {
4253 drv
->bdrv_eject(bs
, eject_flag
);
4256 if (bs
->device_name
[0] != '\0') {
4257 bdrv_emit_qmp_eject_event(bs
, eject_flag
);
4262 * Lock or unlock the media (if it is locked, the user won't be able
4263 * to eject it manually).
4265 void bdrv_lock_medium(BlockDriverState
*bs
, bool locked
)
4267 BlockDriver
*drv
= bs
->drv
;
4269 trace_bdrv_lock_medium(bs
, locked
);
4271 if (drv
&& drv
->bdrv_lock_medium
) {
4272 drv
->bdrv_lock_medium(bs
, locked
);
4276 /* needed for generic scsi interface */
4278 int bdrv_ioctl(BlockDriverState
*bs
, unsigned long int req
, void *buf
)
4280 BlockDriver
*drv
= bs
->drv
;
4282 if (drv
&& drv
->bdrv_ioctl
)
4283 return drv
->bdrv_ioctl(bs
, req
, buf
);
4287 BlockDriverAIOCB
*bdrv_aio_ioctl(BlockDriverState
*bs
,
4288 unsigned long int req
, void *buf
,
4289 BlockDriverCompletionFunc
*cb
, void *opaque
)
4291 BlockDriver
*drv
= bs
->drv
;
4293 if (drv
&& drv
->bdrv_aio_ioctl
)
4294 return drv
->bdrv_aio_ioctl(bs
, req
, buf
, cb
, opaque
);
4298 void bdrv_set_buffer_alignment(BlockDriverState
*bs
, int align
)
4300 bs
->buffer_alignment
= align
;
4303 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
4305 return qemu_memalign((bs
&& bs
->buffer_alignment
) ? bs
->buffer_alignment
: 512, size
);
4309 * Check if all memory in this vector is sector aligned.
4311 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
)
4315 for (i
= 0; i
< qiov
->niov
; i
++) {
4316 if ((uintptr_t) qiov
->iov
[i
].iov_base
% bs
->buffer_alignment
) {
4324 void bdrv_set_dirty_tracking(BlockDriverState
*bs
, int granularity
)
4326 int64_t bitmap_size
;
4328 assert((granularity
& (granularity
- 1)) == 0);
4331 granularity
>>= BDRV_SECTOR_BITS
;
4332 assert(!bs
->dirty_bitmap
);
4333 bitmap_size
= (bdrv_getlength(bs
) >> BDRV_SECTOR_BITS
);
4334 bs
->dirty_bitmap
= hbitmap_alloc(bitmap_size
, ffs(granularity
) - 1);
4336 if (bs
->dirty_bitmap
) {
4337 hbitmap_free(bs
->dirty_bitmap
);
4338 bs
->dirty_bitmap
= NULL
;
4343 int bdrv_get_dirty(BlockDriverState
*bs
, int64_t sector
)
4345 if (bs
->dirty_bitmap
) {
4346 return hbitmap_get(bs
->dirty_bitmap
, sector
);
4352 void bdrv_dirty_iter_init(BlockDriverState
*bs
, HBitmapIter
*hbi
)
4354 hbitmap_iter_init(hbi
, bs
->dirty_bitmap
, 0);
4357 void bdrv_set_dirty(BlockDriverState
*bs
, int64_t cur_sector
,
4360 hbitmap_set(bs
->dirty_bitmap
, cur_sector
, nr_sectors
);
4363 void bdrv_reset_dirty(BlockDriverState
*bs
, int64_t cur_sector
,
4366 hbitmap_reset(bs
->dirty_bitmap
, cur_sector
, nr_sectors
);
4369 int64_t bdrv_get_dirty_count(BlockDriverState
*bs
)
4371 if (bs
->dirty_bitmap
) {
4372 return hbitmap_count(bs
->dirty_bitmap
);
4378 /* Get a reference to bs */
4379 void bdrv_ref(BlockDriverState
*bs
)
4384 /* Release a previously grabbed reference to bs.
4385 * If after releasing, reference count is zero, the BlockDriverState is
4387 void bdrv_unref(BlockDriverState
*bs
)
4389 assert(bs
->refcnt
> 0);
4390 if (--bs
->refcnt
== 0) {
4395 void bdrv_set_in_use(BlockDriverState
*bs
, int in_use
)
4397 assert(bs
->in_use
!= in_use
);
4398 bs
->in_use
= in_use
;
4401 int bdrv_in_use(BlockDriverState
*bs
)
4406 void bdrv_iostatus_enable(BlockDriverState
*bs
)
4408 bs
->iostatus_enabled
= true;
4409 bs
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
4412 /* The I/O status is only enabled if the drive explicitly
4413 * enables it _and_ the VM is configured to stop on errors */
4414 bool bdrv_iostatus_is_enabled(const BlockDriverState
*bs
)
4416 return (bs
->iostatus_enabled
&&
4417 (bs
->on_write_error
== BLOCKDEV_ON_ERROR_ENOSPC
||
4418 bs
->on_write_error
== BLOCKDEV_ON_ERROR_STOP
||
4419 bs
->on_read_error
== BLOCKDEV_ON_ERROR_STOP
));
4422 void bdrv_iostatus_disable(BlockDriverState
*bs
)
4424 bs
->iostatus_enabled
= false;
4427 void bdrv_iostatus_reset(BlockDriverState
*bs
)
4429 if (bdrv_iostatus_is_enabled(bs
)) {
4430 bs
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
4432 block_job_iostatus_reset(bs
->job
);
4437 void bdrv_iostatus_set_err(BlockDriverState
*bs
, int error
)
4439 assert(bdrv_iostatus_is_enabled(bs
));
4440 if (bs
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
4441 bs
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
4442 BLOCK_DEVICE_IO_STATUS_FAILED
;
4447 bdrv_acct_start(BlockDriverState
*bs
, BlockAcctCookie
*cookie
, int64_t bytes
,
4448 enum BlockAcctType type
)
4450 assert(type
< BDRV_MAX_IOTYPE
);
4452 cookie
->bytes
= bytes
;
4453 cookie
->start_time_ns
= get_clock();
4454 cookie
->type
= type
;
4458 bdrv_acct_done(BlockDriverState
*bs
, BlockAcctCookie
*cookie
)
4460 assert(cookie
->type
< BDRV_MAX_IOTYPE
);
4462 bs
->nr_bytes
[cookie
->type
] += cookie
->bytes
;
4463 bs
->nr_ops
[cookie
->type
]++;
4464 bs
->total_time_ns
[cookie
->type
] += get_clock() - cookie
->start_time_ns
;
4467 void bdrv_img_create(const char *filename
, const char *fmt
,
4468 const char *base_filename
, const char *base_fmt
,
4469 char *options
, uint64_t img_size
, int flags
,
4470 Error
**errp
, bool quiet
)
4472 QEMUOptionParameter
*param
= NULL
, *create_options
= NULL
;
4473 QEMUOptionParameter
*backing_fmt
, *backing_file
, *size
;
4474 BlockDriverState
*bs
= NULL
;
4475 BlockDriver
*drv
, *proto_drv
;
4476 BlockDriver
*backing_drv
= NULL
;
4477 Error
*local_err
= NULL
;
4480 /* Find driver and parse its options */
4481 drv
= bdrv_find_format(fmt
);
4483 error_setg(errp
, "Unknown file format '%s'", fmt
);
4487 proto_drv
= bdrv_find_protocol(filename
, true);
4489 error_setg(errp
, "Unknown protocol '%s'", filename
);
4493 create_options
= append_option_parameters(create_options
,
4494 drv
->create_options
);
4495 create_options
= append_option_parameters(create_options
,
4496 proto_drv
->create_options
);
4498 /* Create parameter list with default values */
4499 param
= parse_option_parameters("", create_options
, param
);
4501 set_option_parameter_int(param
, BLOCK_OPT_SIZE
, img_size
);
4503 /* Parse -o options */
4505 param
= parse_option_parameters(options
, create_options
, param
);
4506 if (param
== NULL
) {
4507 error_setg(errp
, "Invalid options for file format '%s'.", fmt
);
4512 if (base_filename
) {
4513 if (set_option_parameter(param
, BLOCK_OPT_BACKING_FILE
,
4515 error_setg(errp
, "Backing file not supported for file format '%s'",
4522 if (set_option_parameter(param
, BLOCK_OPT_BACKING_FMT
, base_fmt
)) {
4523 error_setg(errp
, "Backing file format not supported for file "
4524 "format '%s'", fmt
);
4529 backing_file
= get_option_parameter(param
, BLOCK_OPT_BACKING_FILE
);
4530 if (backing_file
&& backing_file
->value
.s
) {
4531 if (!strcmp(filename
, backing_file
->value
.s
)) {
4532 error_setg(errp
, "Error: Trying to create an image with the "
4533 "same filename as the backing file");
4538 backing_fmt
= get_option_parameter(param
, BLOCK_OPT_BACKING_FMT
);
4539 if (backing_fmt
&& backing_fmt
->value
.s
) {
4540 backing_drv
= bdrv_find_format(backing_fmt
->value
.s
);
4542 error_setg(errp
, "Unknown backing file format '%s'",
4543 backing_fmt
->value
.s
);
4548 // The size for the image must always be specified, with one exception:
4549 // If we are using a backing file, we can obtain the size from there
4550 size
= get_option_parameter(param
, BLOCK_OPT_SIZE
);
4551 if (size
&& size
->value
.n
== -1) {
4552 if (backing_file
&& backing_file
->value
.s
) {
4557 /* backing files always opened read-only */
4559 flags
& ~(BDRV_O_RDWR
| BDRV_O_SNAPSHOT
| BDRV_O_NO_BACKING
);
4563 ret
= bdrv_open(bs
, backing_file
->value
.s
, NULL
, back_flags
,
4564 backing_drv
, &local_err
);
4566 error_setg_errno(errp
, -ret
, "Could not open '%s': %s",
4567 backing_file
->value
.s
,
4568 error_get_pretty(local_err
));
4569 error_free(local_err
);
4573 bdrv_get_geometry(bs
, &size
);
4576 snprintf(buf
, sizeof(buf
), "%" PRId64
, size
);
4577 set_option_parameter(param
, BLOCK_OPT_SIZE
, buf
);
4579 error_setg(errp
, "Image creation needs a size parameter");
4585 printf("Formatting '%s', fmt=%s ", filename
, fmt
);
4586 print_option_parameters(param
);
4589 ret
= bdrv_create(drv
, filename
, param
, &local_err
);
4590 if (ret
== -EFBIG
) {
4591 /* This is generally a better message than whatever the driver would
4592 * deliver (especially because of the cluster_size_hint), since that
4593 * is most probably not much different from "image too large". */
4594 const char *cluster_size_hint
= "";
4595 if (get_option_parameter(create_options
, BLOCK_OPT_CLUSTER_SIZE
)) {
4596 cluster_size_hint
= " (try using a larger cluster size)";
4598 error_setg(errp
, "The image size is too large for file format '%s'"
4599 "%s", fmt
, cluster_size_hint
);
4600 error_free(local_err
);
4605 free_option_parameters(create_options
);
4606 free_option_parameters(param
);
4611 if (error_is_set(&local_err
)) {
4612 error_propagate(errp
, local_err
);
4616 AioContext
*bdrv_get_aio_context(BlockDriverState
*bs
)
4618 /* Currently BlockDriverState always uses the main loop AioContext */
4619 return qemu_get_aio_context();
4622 void bdrv_add_before_write_notifier(BlockDriverState
*bs
,
4623 NotifierWithReturn
*notifier
)
4625 notifier_with_return_list_add(&bs
->before_write_notifiers
, notifier
);
4628 int bdrv_amend_options(BlockDriverState
*bs
, QEMUOptionParameter
*options
)
4630 if (bs
->drv
->bdrv_amend_options
== NULL
) {
4633 return bs
->drv
->bdrv_amend_options(bs
, options
);