2 * QEMU System Emulator block driver
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 #include "config-host.h"
25 #include "qemu-common.h"
27 #include "monitor/monitor.h"
28 #include "block/block_int.h"
29 #include "block/blockjob.h"
30 #include "qemu/module.h"
31 #include "qapi/qmp/qjson.h"
32 #include "sysemu/sysemu.h"
33 #include "qemu/notify.h"
34 #include "block/coroutine.h"
35 #include "block/qapi.h"
36 #include "qmp-commands.h"
37 #include "qemu/timer.h"
40 #include <sys/types.h>
42 #include <sys/ioctl.h>
43 #include <sys/queue.h>
53 struct BdrvDirtyBitmap
{
55 QLIST_ENTRY(BdrvDirtyBitmap
) list
;
58 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
60 static void bdrv_dev_change_media_cb(BlockDriverState
*bs
, bool load
);
61 static BlockDriverAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
62 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
63 BlockDriverCompletionFunc
*cb
, void *opaque
);
64 static BlockDriverAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
65 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
66 BlockDriverCompletionFunc
*cb
, void *opaque
);
67 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
68 int64_t sector_num
, int nb_sectors
,
70 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
71 int64_t sector_num
, int nb_sectors
,
73 static int coroutine_fn
bdrv_co_do_preadv(BlockDriverState
*bs
,
74 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
75 BdrvRequestFlags flags
);
76 static int coroutine_fn
bdrv_co_do_pwritev(BlockDriverState
*bs
,
77 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
78 BdrvRequestFlags flags
);
79 static BlockDriverAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
83 BdrvRequestFlags flags
,
84 BlockDriverCompletionFunc
*cb
,
87 static void coroutine_fn
bdrv_co_do_rw(void *opaque
);
88 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
89 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
);
91 static QTAILQ_HEAD(, BlockDriverState
) bdrv_states
=
92 QTAILQ_HEAD_INITIALIZER(bdrv_states
);
94 static QTAILQ_HEAD(, BlockDriverState
) graph_bdrv_states
=
95 QTAILQ_HEAD_INITIALIZER(graph_bdrv_states
);
97 static QLIST_HEAD(, BlockDriver
) bdrv_drivers
=
98 QLIST_HEAD_INITIALIZER(bdrv_drivers
);
100 /* If non-zero, use only whitelisted block drivers */
101 static int use_bdrv_whitelist
;
104 static int is_windows_drive_prefix(const char *filename
)
106 return (((filename
[0] >= 'a' && filename
[0] <= 'z') ||
107 (filename
[0] >= 'A' && filename
[0] <= 'Z')) &&
111 int is_windows_drive(const char *filename
)
113 if (is_windows_drive_prefix(filename
) &&
116 if (strstart(filename
, "\\\\.\\", NULL
) ||
117 strstart(filename
, "//./", NULL
))
123 /* throttling disk I/O limits */
124 void bdrv_set_io_limits(BlockDriverState
*bs
,
129 throttle_config(&bs
->throttle_state
, cfg
);
131 for (i
= 0; i
< 2; i
++) {
132 qemu_co_enter_next(&bs
->throttled_reqs
[i
]);
136 /* this function drain all the throttled IOs */
137 static bool bdrv_start_throttled_reqs(BlockDriverState
*bs
)
139 bool drained
= false;
140 bool enabled
= bs
->io_limits_enabled
;
143 bs
->io_limits_enabled
= false;
145 for (i
= 0; i
< 2; i
++) {
146 while (qemu_co_enter_next(&bs
->throttled_reqs
[i
])) {
151 bs
->io_limits_enabled
= enabled
;
156 void bdrv_io_limits_disable(BlockDriverState
*bs
)
158 bs
->io_limits_enabled
= false;
160 bdrv_start_throttled_reqs(bs
);
162 throttle_destroy(&bs
->throttle_state
);
165 static void bdrv_throttle_read_timer_cb(void *opaque
)
167 BlockDriverState
*bs
= opaque
;
168 qemu_co_enter_next(&bs
->throttled_reqs
[0]);
171 static void bdrv_throttle_write_timer_cb(void *opaque
)
173 BlockDriverState
*bs
= opaque
;
174 qemu_co_enter_next(&bs
->throttled_reqs
[1]);
177 /* should be called before bdrv_set_io_limits if a limit is set */
178 void bdrv_io_limits_enable(BlockDriverState
*bs
)
180 assert(!bs
->io_limits_enabled
);
181 throttle_init(&bs
->throttle_state
,
183 bdrv_throttle_read_timer_cb
,
184 bdrv_throttle_write_timer_cb
,
186 bs
->io_limits_enabled
= true;
189 /* This function makes an IO wait if needed
191 * @nb_sectors: the number of sectors of the IO
192 * @is_write: is the IO a write
194 static void bdrv_io_limits_intercept(BlockDriverState
*bs
,
198 /* does this io must wait */
199 bool must_wait
= throttle_schedule_timer(&bs
->throttle_state
, is_write
);
201 /* if must wait or any request of this type throttled queue the IO */
203 !qemu_co_queue_empty(&bs
->throttled_reqs
[is_write
])) {
204 qemu_co_queue_wait(&bs
->throttled_reqs
[is_write
]);
207 /* the IO will be executed, do the accounting */
208 throttle_account(&bs
->throttle_state
, is_write
, bytes
);
211 /* if the next request must wait -> do nothing */
212 if (throttle_schedule_timer(&bs
->throttle_state
, is_write
)) {
216 /* else queue next request for execution */
217 qemu_co_queue_next(&bs
->throttled_reqs
[is_write
]);
220 size_t bdrv_opt_mem_align(BlockDriverState
*bs
)
222 if (!bs
|| !bs
->drv
) {
223 /* 4k should be on the safe side */
227 return bs
->bl
.opt_mem_alignment
;
230 /* check if the path starts with "<protocol>:" */
231 static int path_has_protocol(const char *path
)
236 if (is_windows_drive(path
) ||
237 is_windows_drive_prefix(path
)) {
240 p
= path
+ strcspn(path
, ":/\\");
242 p
= path
+ strcspn(path
, ":/");
248 int path_is_absolute(const char *path
)
251 /* specific case for names like: "\\.\d:" */
252 if (is_windows_drive(path
) || is_windows_drive_prefix(path
)) {
255 return (*path
== '/' || *path
== '\\');
257 return (*path
== '/');
261 /* if filename is absolute, just copy it to dest. Otherwise, build a
262 path to it by considering it is relative to base_path. URL are
264 void path_combine(char *dest
, int dest_size
,
265 const char *base_path
,
266 const char *filename
)
273 if (path_is_absolute(filename
)) {
274 pstrcpy(dest
, dest_size
, filename
);
276 p
= strchr(base_path
, ':');
281 p1
= strrchr(base_path
, '/');
285 p2
= strrchr(base_path
, '\\');
297 if (len
> dest_size
- 1)
299 memcpy(dest
, base_path
, len
);
301 pstrcat(dest
, dest_size
, filename
);
305 void bdrv_get_full_backing_filename(BlockDriverState
*bs
, char *dest
, size_t sz
)
307 if (bs
->backing_file
[0] == '\0' || path_has_protocol(bs
->backing_file
)) {
308 pstrcpy(dest
, sz
, bs
->backing_file
);
310 path_combine(dest
, sz
, bs
->filename
, bs
->backing_file
);
314 void bdrv_register(BlockDriver
*bdrv
)
316 /* Block drivers without coroutine functions need emulation */
317 if (!bdrv
->bdrv_co_readv
) {
318 bdrv
->bdrv_co_readv
= bdrv_co_readv_em
;
319 bdrv
->bdrv_co_writev
= bdrv_co_writev_em
;
321 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
322 * the block driver lacks aio we need to emulate that too.
324 if (!bdrv
->bdrv_aio_readv
) {
325 /* add AIO emulation layer */
326 bdrv
->bdrv_aio_readv
= bdrv_aio_readv_em
;
327 bdrv
->bdrv_aio_writev
= bdrv_aio_writev_em
;
331 QLIST_INSERT_HEAD(&bdrv_drivers
, bdrv
, list
);
334 /* create a new block device (by default it is empty) */
335 BlockDriverState
*bdrv_new(const char *device_name
)
337 BlockDriverState
*bs
;
339 bs
= g_malloc0(sizeof(BlockDriverState
));
340 QLIST_INIT(&bs
->dirty_bitmaps
);
341 pstrcpy(bs
->device_name
, sizeof(bs
->device_name
), device_name
);
342 if (device_name
[0] != '\0') {
343 QTAILQ_INSERT_TAIL(&bdrv_states
, bs
, device_list
);
345 bdrv_iostatus_disable(bs
);
346 notifier_list_init(&bs
->close_notifiers
);
347 notifier_with_return_list_init(&bs
->before_write_notifiers
);
348 qemu_co_queue_init(&bs
->throttled_reqs
[0]);
349 qemu_co_queue_init(&bs
->throttled_reqs
[1]);
355 void bdrv_add_close_notifier(BlockDriverState
*bs
, Notifier
*notify
)
357 notifier_list_add(&bs
->close_notifiers
, notify
);
360 BlockDriver
*bdrv_find_format(const char *format_name
)
363 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
364 if (!strcmp(drv1
->format_name
, format_name
)) {
371 static int bdrv_is_whitelisted(BlockDriver
*drv
, bool read_only
)
373 static const char *whitelist_rw
[] = {
374 CONFIG_BDRV_RW_WHITELIST
376 static const char *whitelist_ro
[] = {
377 CONFIG_BDRV_RO_WHITELIST
381 if (!whitelist_rw
[0] && !whitelist_ro
[0]) {
382 return 1; /* no whitelist, anything goes */
385 for (p
= whitelist_rw
; *p
; p
++) {
386 if (!strcmp(drv
->format_name
, *p
)) {
391 for (p
= whitelist_ro
; *p
; p
++) {
392 if (!strcmp(drv
->format_name
, *p
)) {
400 BlockDriver
*bdrv_find_whitelisted_format(const char *format_name
,
403 BlockDriver
*drv
= bdrv_find_format(format_name
);
404 return drv
&& bdrv_is_whitelisted(drv
, read_only
) ? drv
: NULL
;
407 typedef struct CreateCo
{
410 QEMUOptionParameter
*options
;
415 static void coroutine_fn
bdrv_create_co_entry(void *opaque
)
417 Error
*local_err
= NULL
;
420 CreateCo
*cco
= opaque
;
423 ret
= cco
->drv
->bdrv_create(cco
->filename
, cco
->options
, &local_err
);
425 error_propagate(&cco
->err
, local_err
);
430 int bdrv_create(BlockDriver
*drv
, const char* filename
,
431 QEMUOptionParameter
*options
, Error
**errp
)
438 .filename
= g_strdup(filename
),
444 if (!drv
->bdrv_create
) {
445 error_setg(errp
, "Driver '%s' does not support image creation", drv
->format_name
);
450 if (qemu_in_coroutine()) {
451 /* Fast-path if already in coroutine context */
452 bdrv_create_co_entry(&cco
);
454 co
= qemu_coroutine_create(bdrv_create_co_entry
);
455 qemu_coroutine_enter(co
, &cco
);
456 while (cco
.ret
== NOT_DONE
) {
464 error_propagate(errp
, cco
.err
);
466 error_setg_errno(errp
, -ret
, "Could not create image");
471 g_free(cco
.filename
);
475 int bdrv_create_file(const char* filename
, QEMUOptionParameter
*options
,
479 Error
*local_err
= NULL
;
482 drv
= bdrv_find_protocol(filename
, true);
484 error_setg(errp
, "Could not find protocol for file '%s'", filename
);
488 ret
= bdrv_create(drv
, filename
, options
, &local_err
);
490 error_propagate(errp
, local_err
);
495 int bdrv_refresh_limits(BlockDriverState
*bs
)
497 BlockDriver
*drv
= bs
->drv
;
499 memset(&bs
->bl
, 0, sizeof(bs
->bl
));
505 /* Take some limits from the children as a default */
507 bdrv_refresh_limits(bs
->file
);
508 bs
->bl
.opt_transfer_length
= bs
->file
->bl
.opt_transfer_length
;
509 bs
->bl
.opt_mem_alignment
= bs
->file
->bl
.opt_mem_alignment
;
511 bs
->bl
.opt_mem_alignment
= 512;
514 if (bs
->backing_hd
) {
515 bdrv_refresh_limits(bs
->backing_hd
);
516 bs
->bl
.opt_transfer_length
=
517 MAX(bs
->bl
.opt_transfer_length
,
518 bs
->backing_hd
->bl
.opt_transfer_length
);
519 bs
->bl
.opt_mem_alignment
=
520 MAX(bs
->bl
.opt_mem_alignment
,
521 bs
->backing_hd
->bl
.opt_mem_alignment
);
524 /* Then let the driver override it */
525 if (drv
->bdrv_refresh_limits
) {
526 return drv
->bdrv_refresh_limits(bs
);
533 * Create a uniquely-named empty temporary file.
534 * Return 0 upon success, otherwise a negative errno value.
536 int get_tmp_filename(char *filename
, int size
)
539 char temp_dir
[MAX_PATH
];
540 /* GetTempFileName requires that its output buffer (4th param)
541 have length MAX_PATH or greater. */
542 assert(size
>= MAX_PATH
);
543 return (GetTempPath(MAX_PATH
, temp_dir
)
544 && GetTempFileName(temp_dir
, "qem", 0, filename
)
545 ? 0 : -GetLastError());
549 tmpdir
= getenv("TMPDIR");
552 if (snprintf(filename
, size
, "%s/vl.XXXXXX", tmpdir
) >= size
) {
555 fd
= mkstemp(filename
);
559 if (close(fd
) != 0) {
568 * Detect host devices. By convention, /dev/cdrom[N] is always
569 * recognized as a host CDROM.
571 static BlockDriver
*find_hdev_driver(const char *filename
)
573 int score_max
= 0, score
;
574 BlockDriver
*drv
= NULL
, *d
;
576 QLIST_FOREACH(d
, &bdrv_drivers
, list
) {
577 if (d
->bdrv_probe_device
) {
578 score
= d
->bdrv_probe_device(filename
);
579 if (score
> score_max
) {
589 BlockDriver
*bdrv_find_protocol(const char *filename
,
590 bool allow_protocol_prefix
)
597 /* TODO Drivers without bdrv_file_open must be specified explicitly */
600 * XXX(hch): we really should not let host device detection
601 * override an explicit protocol specification, but moving this
602 * later breaks access to device names with colons in them.
603 * Thanks to the brain-dead persistent naming schemes on udev-
604 * based Linux systems those actually are quite common.
606 drv1
= find_hdev_driver(filename
);
611 if (!path_has_protocol(filename
) || !allow_protocol_prefix
) {
612 return bdrv_find_format("file");
615 p
= strchr(filename
, ':');
618 if (len
> sizeof(protocol
) - 1)
619 len
= sizeof(protocol
) - 1;
620 memcpy(protocol
, filename
, len
);
621 protocol
[len
] = '\0';
622 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
623 if (drv1
->protocol_name
&&
624 !strcmp(drv1
->protocol_name
, protocol
)) {
631 static int find_image_format(BlockDriverState
*bs
, const char *filename
,
632 BlockDriver
**pdrv
, Error
**errp
)
634 int score
, score_max
;
635 BlockDriver
*drv1
, *drv
;
639 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
640 if (bs
->sg
|| !bdrv_is_inserted(bs
) || bdrv_getlength(bs
) == 0) {
641 drv
= bdrv_find_format("raw");
643 error_setg(errp
, "Could not find raw image format");
650 ret
= bdrv_pread(bs
, 0, buf
, sizeof(buf
));
652 error_setg_errno(errp
, -ret
, "Could not read image for determining its "
660 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
661 if (drv1
->bdrv_probe
) {
662 score
= drv1
->bdrv_probe(buf
, ret
, filename
);
663 if (score
> score_max
) {
670 error_setg(errp
, "Could not determine image format: No compatible "
679 * Set the current 'total_sectors' value
681 static int refresh_total_sectors(BlockDriverState
*bs
, int64_t hint
)
683 BlockDriver
*drv
= bs
->drv
;
685 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
689 /* query actual device if possible, otherwise just trust the hint */
690 if (drv
->bdrv_getlength
) {
691 int64_t length
= drv
->bdrv_getlength(bs
);
695 hint
= DIV_ROUND_UP(length
, BDRV_SECTOR_SIZE
);
698 bs
->total_sectors
= hint
;
703 * Set open flags for a given discard mode
705 * Return 0 on success, -1 if the discard mode was invalid.
707 int bdrv_parse_discard_flags(const char *mode
, int *flags
)
709 *flags
&= ~BDRV_O_UNMAP
;
711 if (!strcmp(mode
, "off") || !strcmp(mode
, "ignore")) {
713 } else if (!strcmp(mode
, "on") || !strcmp(mode
, "unmap")) {
714 *flags
|= BDRV_O_UNMAP
;
723 * Set open flags for a given cache mode
725 * Return 0 on success, -1 if the cache mode was invalid.
727 int bdrv_parse_cache_flags(const char *mode
, int *flags
)
729 *flags
&= ~BDRV_O_CACHE_MASK
;
731 if (!strcmp(mode
, "off") || !strcmp(mode
, "none")) {
732 *flags
|= BDRV_O_NOCACHE
| BDRV_O_CACHE_WB
;
733 } else if (!strcmp(mode
, "directsync")) {
734 *flags
|= BDRV_O_NOCACHE
;
735 } else if (!strcmp(mode
, "writeback")) {
736 *flags
|= BDRV_O_CACHE_WB
;
737 } else if (!strcmp(mode
, "unsafe")) {
738 *flags
|= BDRV_O_CACHE_WB
;
739 *flags
|= BDRV_O_NO_FLUSH
;
740 } else if (!strcmp(mode
, "writethrough")) {
741 /* this is the default */
750 * The copy-on-read flag is actually a reference count so multiple users may
751 * use the feature without worrying about clobbering its previous state.
752 * Copy-on-read stays enabled until all users have called to disable it.
754 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
759 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
761 assert(bs
->copy_on_read
> 0);
765 static int bdrv_open_flags(BlockDriverState
*bs
, int flags
)
767 int open_flags
= flags
| BDRV_O_CACHE_WB
;
770 * Clear flags that are internal to the block layer before opening the
773 open_flags
&= ~(BDRV_O_SNAPSHOT
| BDRV_O_NO_BACKING
);
776 * Snapshots should be writable.
778 if (bs
->is_temporary
) {
779 open_flags
|= BDRV_O_RDWR
;
785 static int bdrv_assign_node_name(BlockDriverState
*bs
,
786 const char *node_name
,
793 /* empty string node name is invalid */
794 if (node_name
[0] == '\0') {
795 error_setg(errp
, "Empty node name");
799 /* takes care of avoiding namespaces collisions */
800 if (bdrv_find(node_name
)) {
801 error_setg(errp
, "node-name=%s is conflicting with a device id",
806 /* takes care of avoiding duplicates node names */
807 if (bdrv_find_node(node_name
)) {
808 error_setg(errp
, "Duplicate node name");
812 /* copy node name into the bs and insert it into the graph list */
813 pstrcpy(bs
->node_name
, sizeof(bs
->node_name
), node_name
);
814 QTAILQ_INSERT_TAIL(&graph_bdrv_states
, bs
, node_list
);
820 * Common part for opening disk images and files
822 * Removes all processed options from *options.
824 static int bdrv_open_common(BlockDriverState
*bs
, BlockDriverState
*file
,
825 QDict
*options
, int flags
, BlockDriver
*drv
, Error
**errp
)
828 const char *filename
;
829 const char *node_name
= NULL
;
830 Error
*local_err
= NULL
;
833 assert(bs
->file
== NULL
);
834 assert(options
!= NULL
&& bs
->options
!= options
);
837 filename
= file
->filename
;
839 filename
= qdict_get_try_str(options
, "filename");
842 if (drv
->bdrv_needs_filename
&& !filename
) {
843 error_setg(errp
, "The '%s' block driver requires a file name",
848 trace_bdrv_open_common(bs
, filename
?: "", flags
, drv
->format_name
);
850 node_name
= qdict_get_try_str(options
, "node-name");
851 ret
= bdrv_assign_node_name(bs
, node_name
, errp
);
855 qdict_del(options
, "node-name");
857 /* bdrv_open() with directly using a protocol as drv. This layer is already
858 * opened, so assign it to bs (while file becomes a closed BlockDriverState)
859 * and return immediately. */
860 if (file
!= NULL
&& drv
->bdrv_file_open
) {
865 bs
->open_flags
= flags
;
866 bs
->guest_block_size
= 512;
867 bs
->request_alignment
= 512;
868 bs
->zero_beyond_eof
= true;
869 open_flags
= bdrv_open_flags(bs
, flags
);
870 bs
->read_only
= !(open_flags
& BDRV_O_RDWR
);
872 if (use_bdrv_whitelist
&& !bdrv_is_whitelisted(drv
, bs
->read_only
)) {
874 !bs
->read_only
&& bdrv_is_whitelisted(drv
, true)
875 ? "Driver '%s' can only be used for read-only devices"
876 : "Driver '%s' is not whitelisted",
881 assert(bs
->copy_on_read
== 0); /* bdrv_new() and bdrv_close() make it so */
882 if (flags
& BDRV_O_COPY_ON_READ
) {
883 if (!bs
->read_only
) {
884 bdrv_enable_copy_on_read(bs
);
886 error_setg(errp
, "Can't use copy-on-read on read-only device");
891 if (filename
!= NULL
) {
892 pstrcpy(bs
->filename
, sizeof(bs
->filename
), filename
);
894 bs
->filename
[0] = '\0';
898 bs
->opaque
= g_malloc0(drv
->instance_size
);
900 bs
->enable_write_cache
= !!(flags
& BDRV_O_CACHE_WB
);
902 /* Open the image, either directly or using a protocol */
903 if (drv
->bdrv_file_open
) {
904 assert(file
== NULL
);
905 assert(!drv
->bdrv_needs_filename
|| filename
!= NULL
);
906 ret
= drv
->bdrv_file_open(bs
, options
, open_flags
, &local_err
);
909 error_setg(errp
, "Can't use '%s' as a block driver for the "
910 "protocol level", drv
->format_name
);
915 ret
= drv
->bdrv_open(bs
, options
, open_flags
, &local_err
);
920 error_propagate(errp
, local_err
);
921 } else if (bs
->filename
[0]) {
922 error_setg_errno(errp
, -ret
, "Could not open '%s'", bs
->filename
);
924 error_setg_errno(errp
, -ret
, "Could not open image");
929 ret
= refresh_total_sectors(bs
, bs
->total_sectors
);
931 error_setg_errno(errp
, -ret
, "Could not refresh total sector count");
935 bdrv_refresh_limits(bs
);
936 assert(bdrv_opt_mem_align(bs
) != 0);
937 assert(bs
->request_alignment
!= 0);
940 if (bs
->is_temporary
) {
941 assert(bs
->filename
[0] != '\0');
942 unlink(bs
->filename
);
956 * Opens a file using a protocol (file, host_device, nbd, ...)
958 * options is a QDict of options to pass to the block drivers, or NULL for an
959 * empty set of options. The reference to the QDict belongs to the block layer
960 * after the call (even on failure), so if the caller intends to reuse the
961 * dictionary, it needs to use QINCREF() before calling bdrv_file_open.
963 int bdrv_file_open(BlockDriverState
**pbs
, const char *filename
,
964 const char *reference
, QDict
*options
, int flags
,
967 BlockDriverState
*bs
= NULL
;
970 bool allow_protocol_prefix
= false;
971 Error
*local_err
= NULL
;
974 /* NULL means an empty set of options */
975 if (options
== NULL
) {
976 options
= qdict_new();
980 if (filename
|| qdict_size(options
)) {
981 error_setg(errp
, "Cannot reference an existing block device with "
982 "additional options or a new filename");
987 bs
= bdrv_lookup_bs(reference
, reference
, errp
);
997 bs
->options
= options
;
998 options
= qdict_clone_shallow(options
);
1000 /* Fetch the file name from the options QDict if necessary */
1002 filename
= qdict_get_try_str(options
, "filename");
1003 } else if (filename
&& !qdict_haskey(options
, "filename")) {
1004 qdict_put(options
, "filename", qstring_from_str(filename
));
1005 allow_protocol_prefix
= true;
1007 error_setg(errp
, "Can't specify 'file' and 'filename' options at the "
1013 /* Find the right block driver */
1014 drvname
= qdict_get_try_str(options
, "driver");
1016 drv
= bdrv_find_format(drvname
);
1018 error_setg(errp
, "Unknown driver '%s'", drvname
);
1020 qdict_del(options
, "driver");
1021 } else if (filename
) {
1022 drv
= bdrv_find_protocol(filename
, allow_protocol_prefix
);
1024 error_setg(errp
, "Unknown protocol");
1027 error_setg(errp
, "Must specify either driver or file");
1032 /* errp has been set already */
1037 /* Parse the filename and open it */
1038 if (drv
->bdrv_parse_filename
&& filename
) {
1039 drv
->bdrv_parse_filename(filename
, options
, &local_err
);
1041 error_propagate(errp
, local_err
);
1045 qdict_del(options
, "filename");
1048 if (!drv
->bdrv_file_open
) {
1049 ret
= bdrv_open(bs
, filename
, options
, flags
, drv
, &local_err
);
1052 ret
= bdrv_open_common(bs
, NULL
, options
, flags
, drv
, &local_err
);
1055 error_propagate(errp
, local_err
);
1059 /* Check if any unknown options were used */
1060 if (options
&& (qdict_size(options
) != 0)) {
1061 const QDictEntry
*entry
= qdict_first(options
);
1062 error_setg(errp
, "Block protocol '%s' doesn't support the option '%s'",
1063 drv
->format_name
, entry
->key
);
1076 QDECREF(bs
->options
);
1083 * Opens the backing file for a BlockDriverState if not yet open
1085 * options is a QDict of options to pass to the block drivers, or NULL for an
1086 * empty set of options. The reference to the QDict is transferred to this
1087 * function (even on failure), so if the caller intends to reuse the dictionary,
1088 * it needs to use QINCREF() before calling bdrv_file_open.
1090 int bdrv_open_backing_file(BlockDriverState
*bs
, QDict
*options
, Error
**errp
)
1092 char backing_filename
[PATH_MAX
];
1093 int back_flags
, ret
;
1094 BlockDriver
*back_drv
= NULL
;
1095 Error
*local_err
= NULL
;
1097 if (bs
->backing_hd
!= NULL
) {
1102 /* NULL means an empty set of options */
1103 if (options
== NULL
) {
1104 options
= qdict_new();
1107 bs
->open_flags
&= ~BDRV_O_NO_BACKING
;
1108 if (qdict_haskey(options
, "file.filename")) {
1109 backing_filename
[0] = '\0';
1110 } else if (bs
->backing_file
[0] == '\0' && qdict_size(options
) == 0) {
1114 bdrv_get_full_backing_filename(bs
, backing_filename
,
1115 sizeof(backing_filename
));
1118 bs
->backing_hd
= bdrv_new("");
1120 if (bs
->backing_format
[0] != '\0') {
1121 back_drv
= bdrv_find_format(bs
->backing_format
);
1124 /* backing files always opened read-only */
1125 back_flags
= bs
->open_flags
& ~(BDRV_O_RDWR
| BDRV_O_SNAPSHOT
|
1126 BDRV_O_COPY_ON_READ
);
1128 ret
= bdrv_open(bs
->backing_hd
,
1129 *backing_filename
? backing_filename
: NULL
, options
,
1130 back_flags
, back_drv
, &local_err
);
1132 bdrv_unref(bs
->backing_hd
);
1133 bs
->backing_hd
= NULL
;
1134 bs
->open_flags
|= BDRV_O_NO_BACKING
;
1135 error_setg(errp
, "Could not open backing file: %s",
1136 error_get_pretty(local_err
));
1137 error_free(local_err
);
1141 if (bs
->backing_hd
->file
) {
1142 pstrcpy(bs
->backing_file
, sizeof(bs
->backing_file
),
1143 bs
->backing_hd
->file
->filename
);
1146 /* Recalculate the BlockLimits with the backing file */
1147 bdrv_refresh_limits(bs
);
1153 * Opens a disk image whose options are given as BlockdevRef in another block
1156 * If force_raw is true, bdrv_file_open() will be used, thereby preventing any
1157 * image format auto-detection. If it is false and a filename is given,
1158 * bdrv_open() will be used for auto-detection.
1160 * If allow_none is true, no image will be opened if filename is false and no
1161 * BlockdevRef is given. *pbs will remain unchanged and 0 will be returned.
1163 * bdrev_key specifies the key for the image's BlockdevRef in the options QDict.
1164 * That QDict has to be flattened; therefore, if the BlockdevRef is a QDict
1165 * itself, all options starting with "${bdref_key}." are considered part of the
1168 * The BlockdevRef will be removed from the options QDict.
1170 int bdrv_open_image(BlockDriverState
**pbs
, const char *filename
,
1171 QDict
*options
, const char *bdref_key
, int flags
,
1172 bool force_raw
, bool allow_none
, Error
**errp
)
1174 QDict
*image_options
;
1176 char *bdref_key_dot
;
1177 const char *reference
;
1179 bdref_key_dot
= g_strdup_printf("%s.", bdref_key
);
1180 qdict_extract_subqdict(options
, &image_options
, bdref_key_dot
);
1181 g_free(bdref_key_dot
);
1183 reference
= qdict_get_try_str(options
, bdref_key
);
1184 if (!filename
&& !reference
&& !qdict_size(image_options
)) {
1188 error_setg(errp
, "A block device must be specified for \"%s\"",
1195 if (filename
&& !force_raw
) {
1196 /* If a filename is given and the block driver should be detected
1197 automatically (instead of using none), use bdrv_open() in order to do
1198 that auto-detection. */
1199 BlockDriverState
*bs
;
1202 error_setg(errp
, "Cannot reference an existing block device while "
1203 "giving a filename");
1209 ret
= bdrv_open(bs
, filename
, image_options
, flags
, NULL
, errp
);
1216 ret
= bdrv_file_open(pbs
, filename
, reference
, image_options
, flags
,
1221 qdict_del(options
, bdref_key
);
1226 * Opens a disk image (raw, qcow2, vmdk, ...)
1228 * options is a QDict of options to pass to the block drivers, or NULL for an
1229 * empty set of options. The reference to the QDict belongs to the block layer
1230 * after the call (even on failure), so if the caller intends to reuse the
1231 * dictionary, it needs to use QINCREF() before calling bdrv_open.
1233 int bdrv_open(BlockDriverState
*bs
, const char *filename
, QDict
*options
,
1234 int flags
, BlockDriver
*drv
, Error
**errp
)
1237 /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
1238 char tmp_filename
[PATH_MAX
+ 1];
1239 BlockDriverState
*file
= NULL
;
1240 const char *drvname
;
1241 Error
*local_err
= NULL
;
1243 /* NULL means an empty set of options */
1244 if (options
== NULL
) {
1245 options
= qdict_new();
1248 bs
->options
= options
;
1249 options
= qdict_clone_shallow(options
);
1251 /* For snapshot=on, create a temporary qcow2 overlay */
1252 if (flags
& BDRV_O_SNAPSHOT
) {
1253 BlockDriverState
*bs1
;
1255 BlockDriver
*bdrv_qcow2
;
1256 QEMUOptionParameter
*create_options
;
1257 QDict
*snapshot_options
;
1259 /* if snapshot, we create a temporary backing file and open it
1260 instead of opening 'filename' directly */
1262 /* Get the required size from the image */
1265 ret
= bdrv_open(bs1
, filename
, options
, BDRV_O_NO_BACKING
,
1271 total_size
= bdrv_getlength(bs1
) & BDRV_SECTOR_MASK
;
1275 /* Create the temporary image */
1276 ret
= get_tmp_filename(tmp_filename
, sizeof(tmp_filename
));
1278 error_setg_errno(errp
, -ret
, "Could not get temporary filename");
1282 bdrv_qcow2
= bdrv_find_format("qcow2");
1283 create_options
= parse_option_parameters("", bdrv_qcow2
->create_options
,
1286 set_option_parameter_int(create_options
, BLOCK_OPT_SIZE
, total_size
);
1288 ret
= bdrv_create(bdrv_qcow2
, tmp_filename
, create_options
, &local_err
);
1289 free_option_parameters(create_options
);
1291 error_setg_errno(errp
, -ret
, "Could not create temporary overlay "
1292 "'%s': %s", tmp_filename
,
1293 error_get_pretty(local_err
));
1294 error_free(local_err
);
1299 /* Prepare a new options QDict for the temporary file, where user
1300 * options refer to the backing file */
1302 qdict_put(options
, "file.filename", qstring_from_str(filename
));
1305 qdict_put(options
, "driver", qstring_from_str(drv
->format_name
));
1308 snapshot_options
= qdict_new();
1309 qdict_put(snapshot_options
, "backing", options
);
1310 qdict_flatten(snapshot_options
);
1312 bs
->options
= snapshot_options
;
1313 options
= qdict_clone_shallow(bs
->options
);
1315 filename
= tmp_filename
;
1317 bs
->is_temporary
= 1;
1320 /* Open image file without format layer */
1321 if (flags
& BDRV_O_RDWR
) {
1322 flags
|= BDRV_O_ALLOW_RDWR
;
1325 ret
= bdrv_open_image(&file
, filename
, options
, "file",
1326 bdrv_open_flags(bs
, flags
| BDRV_O_UNMAP
), true, true,
1332 /* Find the right image format driver */
1333 drvname
= qdict_get_try_str(options
, "driver");
1335 drv
= bdrv_find_format(drvname
);
1336 qdict_del(options
, "driver");
1338 error_setg(errp
, "Invalid driver: '%s'", drvname
);
1340 goto unlink_and_fail
;
1346 ret
= find_image_format(file
, filename
, &drv
, &local_err
);
1348 error_setg(errp
, "Must specify either driver or file");
1350 goto unlink_and_fail
;
1355 goto unlink_and_fail
;
1358 /* Open the image */
1359 ret
= bdrv_open_common(bs
, file
, options
, flags
, drv
, &local_err
);
1361 goto unlink_and_fail
;
1364 if (file
&& (bs
->file
!= file
)) {
1369 /* If there is a backing file, use it */
1370 if ((flags
& BDRV_O_NO_BACKING
) == 0) {
1371 QDict
*backing_options
;
1373 qdict_extract_subqdict(options
, &backing_options
, "backing.");
1374 ret
= bdrv_open_backing_file(bs
, backing_options
, &local_err
);
1376 goto close_and_fail
;
1380 /* Check if any unknown options were used */
1381 if (qdict_size(options
) != 0) {
1382 const QDictEntry
*entry
= qdict_first(options
);
1383 error_setg(errp
, "Block format '%s' used by device '%s' doesn't "
1384 "support the option '%s'", drv
->format_name
, bs
->device_name
,
1388 goto close_and_fail
;
1392 if (!bdrv_key_required(bs
)) {
1393 bdrv_dev_change_media_cb(bs
, true);
1402 if (bs
->is_temporary
) {
1406 QDECREF(bs
->options
);
1410 error_propagate(errp
, local_err
);
1418 error_propagate(errp
, local_err
);
1423 typedef struct BlockReopenQueueEntry
{
1425 BDRVReopenState state
;
1426 QSIMPLEQ_ENTRY(BlockReopenQueueEntry
) entry
;
1427 } BlockReopenQueueEntry
;
1430 * Adds a BlockDriverState to a simple queue for an atomic, transactional
1431 * reopen of multiple devices.
1433 * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT
1434 * already performed, or alternatively may be NULL a new BlockReopenQueue will
1435 * be created and initialized. This newly created BlockReopenQueue should be
1436 * passed back in for subsequent calls that are intended to be of the same
1439 * bs is the BlockDriverState to add to the reopen queue.
1441 * flags contains the open flags for the associated bs
1443 * returns a pointer to bs_queue, which is either the newly allocated
1444 * bs_queue, or the existing bs_queue being used.
1447 BlockReopenQueue
*bdrv_reopen_queue(BlockReopenQueue
*bs_queue
,
1448 BlockDriverState
*bs
, int flags
)
1452 BlockReopenQueueEntry
*bs_entry
;
1453 if (bs_queue
== NULL
) {
1454 bs_queue
= g_new0(BlockReopenQueue
, 1);
1455 QSIMPLEQ_INIT(bs_queue
);
1459 bdrv_reopen_queue(bs_queue
, bs
->file
, flags
);
1462 bs_entry
= g_new0(BlockReopenQueueEntry
, 1);
1463 QSIMPLEQ_INSERT_TAIL(bs_queue
, bs_entry
, entry
);
1465 bs_entry
->state
.bs
= bs
;
1466 bs_entry
->state
.flags
= flags
;
1472 * Reopen multiple BlockDriverStates atomically & transactionally.
1474 * The queue passed in (bs_queue) must have been built up previous
1475 * via bdrv_reopen_queue().
1477 * Reopens all BDS specified in the queue, with the appropriate
1478 * flags. All devices are prepared for reopen, and failure of any
1479 * device will cause all device changes to be abandonded, and intermediate
1482 * If all devices prepare successfully, then the changes are committed
1486 int bdrv_reopen_multiple(BlockReopenQueue
*bs_queue
, Error
**errp
)
1489 BlockReopenQueueEntry
*bs_entry
, *next
;
1490 Error
*local_err
= NULL
;
1492 assert(bs_queue
!= NULL
);
1496 QSIMPLEQ_FOREACH(bs_entry
, bs_queue
, entry
) {
1497 if (bdrv_reopen_prepare(&bs_entry
->state
, bs_queue
, &local_err
)) {
1498 error_propagate(errp
, local_err
);
1501 bs_entry
->prepared
= true;
1504 /* If we reach this point, we have success and just need to apply the
1507 QSIMPLEQ_FOREACH(bs_entry
, bs_queue
, entry
) {
1508 bdrv_reopen_commit(&bs_entry
->state
);
1514 QSIMPLEQ_FOREACH_SAFE(bs_entry
, bs_queue
, entry
, next
) {
1515 if (ret
&& bs_entry
->prepared
) {
1516 bdrv_reopen_abort(&bs_entry
->state
);
1525 /* Reopen a single BlockDriverState with the specified flags. */
1526 int bdrv_reopen(BlockDriverState
*bs
, int bdrv_flags
, Error
**errp
)
1529 Error
*local_err
= NULL
;
1530 BlockReopenQueue
*queue
= bdrv_reopen_queue(NULL
, bs
, bdrv_flags
);
1532 ret
= bdrv_reopen_multiple(queue
, &local_err
);
1533 if (local_err
!= NULL
) {
1534 error_propagate(errp
, local_err
);
1541 * Prepares a BlockDriverState for reopen. All changes are staged in the
1542 * 'opaque' field of the BDRVReopenState, which is used and allocated by
1543 * the block driver layer .bdrv_reopen_prepare()
1545 * bs is the BlockDriverState to reopen
1546 * flags are the new open flags
1547 * queue is the reopen queue
1549 * Returns 0 on success, non-zero on error. On error errp will be set
1552 * On failure, bdrv_reopen_abort() will be called to clean up any data.
1553 * It is the responsibility of the caller to then call the abort() or
1554 * commit() for any other BDS that have been left in a prepare() state
1557 int bdrv_reopen_prepare(BDRVReopenState
*reopen_state
, BlockReopenQueue
*queue
,
1561 Error
*local_err
= NULL
;
1564 assert(reopen_state
!= NULL
);
1565 assert(reopen_state
->bs
->drv
!= NULL
);
1566 drv
= reopen_state
->bs
->drv
;
1568 /* if we are to stay read-only, do not allow permission change
1570 if (!(reopen_state
->bs
->open_flags
& BDRV_O_ALLOW_RDWR
) &&
1571 reopen_state
->flags
& BDRV_O_RDWR
) {
1572 error_set(errp
, QERR_DEVICE_IS_READ_ONLY
,
1573 reopen_state
->bs
->device_name
);
1578 ret
= bdrv_flush(reopen_state
->bs
);
1580 error_set(errp
, ERROR_CLASS_GENERIC_ERROR
, "Error (%s) flushing drive",
1585 if (drv
->bdrv_reopen_prepare
) {
1586 ret
= drv
->bdrv_reopen_prepare(reopen_state
, queue
, &local_err
);
1588 if (local_err
!= NULL
) {
1589 error_propagate(errp
, local_err
);
1591 error_setg(errp
, "failed while preparing to reopen image '%s'",
1592 reopen_state
->bs
->filename
);
1597 /* It is currently mandatory to have a bdrv_reopen_prepare()
1598 * handler for each supported drv. */
1599 error_set(errp
, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED
,
1600 drv
->format_name
, reopen_state
->bs
->device_name
,
1601 "reopening of file");
1613 * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and
1614 * makes them final by swapping the staging BlockDriverState contents into
1615 * the active BlockDriverState contents.
1617 void bdrv_reopen_commit(BDRVReopenState
*reopen_state
)
1621 assert(reopen_state
!= NULL
);
1622 drv
= reopen_state
->bs
->drv
;
1623 assert(drv
!= NULL
);
1625 /* If there are any driver level actions to take */
1626 if (drv
->bdrv_reopen_commit
) {
1627 drv
->bdrv_reopen_commit(reopen_state
);
1630 /* set BDS specific flags now */
1631 reopen_state
->bs
->open_flags
= reopen_state
->flags
;
1632 reopen_state
->bs
->enable_write_cache
= !!(reopen_state
->flags
&
1634 reopen_state
->bs
->read_only
= !(reopen_state
->flags
& BDRV_O_RDWR
);
1636 bdrv_refresh_limits(reopen_state
->bs
);
1640 * Abort the reopen, and delete and free the staged changes in
1643 void bdrv_reopen_abort(BDRVReopenState
*reopen_state
)
1647 assert(reopen_state
!= NULL
);
1648 drv
= reopen_state
->bs
->drv
;
1649 assert(drv
!= NULL
);
1651 if (drv
->bdrv_reopen_abort
) {
1652 drv
->bdrv_reopen_abort(reopen_state
);
1657 void bdrv_close(BlockDriverState
*bs
)
1660 block_job_cancel_sync(bs
->job
);
1662 bdrv_drain_all(); /* complete I/O */
1664 bdrv_drain_all(); /* in case flush left pending I/O */
1665 notifier_list_notify(&bs
->close_notifiers
, bs
);
1668 if (bs
->backing_hd
) {
1669 bdrv_unref(bs
->backing_hd
);
1670 bs
->backing_hd
= NULL
;
1672 bs
->drv
->bdrv_close(bs
);
1675 if (bs
->is_temporary
) {
1676 unlink(bs
->filename
);
1681 bs
->copy_on_read
= 0;
1682 bs
->backing_file
[0] = '\0';
1683 bs
->backing_format
[0] = '\0';
1684 bs
->total_sectors
= 0;
1689 bs
->zero_beyond_eof
= false;
1690 QDECREF(bs
->options
);
1693 if (bs
->file
!= NULL
) {
1694 bdrv_unref(bs
->file
);
1699 bdrv_dev_change_media_cb(bs
, false);
1701 /*throttling disk I/O limits*/
1702 if (bs
->io_limits_enabled
) {
1703 bdrv_io_limits_disable(bs
);
1707 void bdrv_close_all(void)
1709 BlockDriverState
*bs
;
1711 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
1716 /* Check if any requests are in-flight (including throttled requests) */
1717 static bool bdrv_requests_pending(BlockDriverState
*bs
)
1719 if (!QLIST_EMPTY(&bs
->tracked_requests
)) {
1722 if (!qemu_co_queue_empty(&bs
->throttled_reqs
[0])) {
1725 if (!qemu_co_queue_empty(&bs
->throttled_reqs
[1])) {
1728 if (bs
->file
&& bdrv_requests_pending(bs
->file
)) {
1731 if (bs
->backing_hd
&& bdrv_requests_pending(bs
->backing_hd
)) {
1737 static bool bdrv_requests_pending_all(void)
1739 BlockDriverState
*bs
;
1740 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
1741 if (bdrv_requests_pending(bs
)) {
1749 * Wait for pending requests to complete across all BlockDriverStates
1751 * This function does not flush data to disk, use bdrv_flush_all() for that
1752 * after calling this function.
1754 * Note that completion of an asynchronous I/O operation can trigger any
1755 * number of other I/O operations on other devices---for example a coroutine
1756 * can be arbitrarily complex and a constant flow of I/O can come until the
1757 * coroutine is complete. Because of this, it is not possible to have a
1758 * function to drain a single device's I/O queue.
1760 void bdrv_drain_all(void)
1762 /* Always run first iteration so any pending completion BHs run */
1764 BlockDriverState
*bs
;
1767 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
1768 bdrv_start_throttled_reqs(bs
);
1771 busy
= bdrv_requests_pending_all();
1772 busy
|= aio_poll(qemu_get_aio_context(), busy
);
1776 /* make a BlockDriverState anonymous by removing from bdrv_state and
1777 * graph_bdrv_state list.
1778 Also, NULL terminate the device_name to prevent double remove */
1779 void bdrv_make_anon(BlockDriverState
*bs
)
1781 if (bs
->device_name
[0] != '\0') {
1782 QTAILQ_REMOVE(&bdrv_states
, bs
, device_list
);
1784 bs
->device_name
[0] = '\0';
1785 if (bs
->node_name
[0] != '\0') {
1786 QTAILQ_REMOVE(&graph_bdrv_states
, bs
, node_list
);
1788 bs
->node_name
[0] = '\0';
1791 static void bdrv_rebind(BlockDriverState
*bs
)
1793 if (bs
->drv
&& bs
->drv
->bdrv_rebind
) {
1794 bs
->drv
->bdrv_rebind(bs
);
1798 static void bdrv_move_feature_fields(BlockDriverState
*bs_dest
,
1799 BlockDriverState
*bs_src
)
1801 /* move some fields that need to stay attached to the device */
1802 bs_dest
->open_flags
= bs_src
->open_flags
;
1805 bs_dest
->dev_ops
= bs_src
->dev_ops
;
1806 bs_dest
->dev_opaque
= bs_src
->dev_opaque
;
1807 bs_dest
->dev
= bs_src
->dev
;
1808 bs_dest
->guest_block_size
= bs_src
->guest_block_size
;
1809 bs_dest
->copy_on_read
= bs_src
->copy_on_read
;
1811 bs_dest
->enable_write_cache
= bs_src
->enable_write_cache
;
1813 /* i/o throttled req */
1814 memcpy(&bs_dest
->throttle_state
,
1815 &bs_src
->throttle_state
,
1816 sizeof(ThrottleState
));
1817 bs_dest
->throttled_reqs
[0] = bs_src
->throttled_reqs
[0];
1818 bs_dest
->throttled_reqs
[1] = bs_src
->throttled_reqs
[1];
1819 bs_dest
->io_limits_enabled
= bs_src
->io_limits_enabled
;
1822 bs_dest
->on_read_error
= bs_src
->on_read_error
;
1823 bs_dest
->on_write_error
= bs_src
->on_write_error
;
1826 bs_dest
->iostatus_enabled
= bs_src
->iostatus_enabled
;
1827 bs_dest
->iostatus
= bs_src
->iostatus
;
1830 bs_dest
->dirty_bitmaps
= bs_src
->dirty_bitmaps
;
1832 /* reference count */
1833 bs_dest
->refcnt
= bs_src
->refcnt
;
1836 bs_dest
->in_use
= bs_src
->in_use
;
1837 bs_dest
->job
= bs_src
->job
;
1839 /* keep the same entry in bdrv_states */
1840 pstrcpy(bs_dest
->device_name
, sizeof(bs_dest
->device_name
),
1841 bs_src
->device_name
);
1842 bs_dest
->device_list
= bs_src
->device_list
;
1844 /* keep the same entry in graph_bdrv_states
1845 * We do want to swap name but don't want to swap linked list entries
1847 bs_dest
->node_list
= bs_src
->node_list
;
1851 * Swap bs contents for two image chains while they are live,
1852 * while keeping required fields on the BlockDriverState that is
1853 * actually attached to a device.
1855 * This will modify the BlockDriverState fields, and swap contents
1856 * between bs_new and bs_old. Both bs_new and bs_old are modified.
1858 * bs_new is required to be anonymous.
1860 * This function does not create any image files.
1862 void bdrv_swap(BlockDriverState
*bs_new
, BlockDriverState
*bs_old
)
1864 BlockDriverState tmp
;
1866 /* bs_new must be anonymous and shouldn't have anything fancy enabled */
1867 assert(bs_new
->device_name
[0] == '\0');
1868 assert(QLIST_EMPTY(&bs_new
->dirty_bitmaps
));
1869 assert(bs_new
->job
== NULL
);
1870 assert(bs_new
->dev
== NULL
);
1871 assert(bs_new
->in_use
== 0);
1872 assert(bs_new
->io_limits_enabled
== false);
1873 assert(!throttle_have_timer(&bs_new
->throttle_state
));
1879 /* there are some fields that should not be swapped, move them back */
1880 bdrv_move_feature_fields(&tmp
, bs_old
);
1881 bdrv_move_feature_fields(bs_old
, bs_new
);
1882 bdrv_move_feature_fields(bs_new
, &tmp
);
1884 /* bs_new shouldn't be in bdrv_states even after the swap! */
1885 assert(bs_new
->device_name
[0] == '\0');
1887 /* Check a few fields that should remain attached to the device */
1888 assert(bs_new
->dev
== NULL
);
1889 assert(bs_new
->job
== NULL
);
1890 assert(bs_new
->in_use
== 0);
1891 assert(bs_new
->io_limits_enabled
== false);
1892 assert(!throttle_have_timer(&bs_new
->throttle_state
));
1894 bdrv_rebind(bs_new
);
1895 bdrv_rebind(bs_old
);
1899 * Add new bs contents at the top of an image chain while the chain is
1900 * live, while keeping required fields on the top layer.
1902 * This will modify the BlockDriverState fields, and swap contents
1903 * between bs_new and bs_top. Both bs_new and bs_top are modified.
1905 * bs_new is required to be anonymous.
1907 * This function does not create any image files.
1909 void bdrv_append(BlockDriverState
*bs_new
, BlockDriverState
*bs_top
)
1911 bdrv_swap(bs_new
, bs_top
);
1913 /* The contents of 'tmp' will become bs_top, as we are
1914 * swapping bs_new and bs_top contents. */
1915 bs_top
->backing_hd
= bs_new
;
1916 bs_top
->open_flags
&= ~BDRV_O_NO_BACKING
;
1917 pstrcpy(bs_top
->backing_file
, sizeof(bs_top
->backing_file
),
1919 pstrcpy(bs_top
->backing_format
, sizeof(bs_top
->backing_format
),
1920 bs_new
->drv
? bs_new
->drv
->format_name
: "");
1923 static void bdrv_delete(BlockDriverState
*bs
)
1927 assert(!bs
->in_use
);
1928 assert(!bs
->refcnt
);
1929 assert(QLIST_EMPTY(&bs
->dirty_bitmaps
));
1933 /* remove from list, if necessary */
1939 int bdrv_attach_dev(BlockDriverState
*bs
, void *dev
)
1940 /* TODO change to DeviceState *dev when all users are qdevified */
1946 bdrv_iostatus_reset(bs
);
1950 /* TODO qdevified devices don't use this, remove when devices are qdevified */
1951 void bdrv_attach_dev_nofail(BlockDriverState
*bs
, void *dev
)
1953 if (bdrv_attach_dev(bs
, dev
) < 0) {
1958 void bdrv_detach_dev(BlockDriverState
*bs
, void *dev
)
1959 /* TODO change to DeviceState *dev when all users are qdevified */
1961 assert(bs
->dev
== dev
);
1964 bs
->dev_opaque
= NULL
;
1965 bs
->guest_block_size
= 512;
1968 /* TODO change to return DeviceState * when all users are qdevified */
1969 void *bdrv_get_attached_dev(BlockDriverState
*bs
)
1974 void bdrv_set_dev_ops(BlockDriverState
*bs
, const BlockDevOps
*ops
,
1978 bs
->dev_opaque
= opaque
;
1981 void bdrv_emit_qmp_error_event(const BlockDriverState
*bdrv
,
1982 enum MonitorEvent ev
,
1983 BlockErrorAction action
, bool is_read
)
1986 const char *action_str
;
1989 case BDRV_ACTION_REPORT
:
1990 action_str
= "report";
1992 case BDRV_ACTION_IGNORE
:
1993 action_str
= "ignore";
1995 case BDRV_ACTION_STOP
:
1996 action_str
= "stop";
2002 data
= qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
2005 is_read
? "read" : "write");
2006 monitor_protocol_event(ev
, data
);
2008 qobject_decref(data
);
2011 static void bdrv_emit_qmp_eject_event(BlockDriverState
*bs
, bool ejected
)
2015 data
= qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }",
2016 bdrv_get_device_name(bs
), ejected
);
2017 monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED
, data
);
2019 qobject_decref(data
);
2022 static void bdrv_dev_change_media_cb(BlockDriverState
*bs
, bool load
)
2024 if (bs
->dev_ops
&& bs
->dev_ops
->change_media_cb
) {
2025 bool tray_was_closed
= !bdrv_dev_is_tray_open(bs
);
2026 bs
->dev_ops
->change_media_cb(bs
->dev_opaque
, load
);
2027 if (tray_was_closed
) {
2029 bdrv_emit_qmp_eject_event(bs
, true);
2033 bdrv_emit_qmp_eject_event(bs
, false);
2038 bool bdrv_dev_has_removable_media(BlockDriverState
*bs
)
2040 return !bs
->dev
|| (bs
->dev_ops
&& bs
->dev_ops
->change_media_cb
);
2043 void bdrv_dev_eject_request(BlockDriverState
*bs
, bool force
)
2045 if (bs
->dev_ops
&& bs
->dev_ops
->eject_request_cb
) {
2046 bs
->dev_ops
->eject_request_cb(bs
->dev_opaque
, force
);
2050 bool bdrv_dev_is_tray_open(BlockDriverState
*bs
)
2052 if (bs
->dev_ops
&& bs
->dev_ops
->is_tray_open
) {
2053 return bs
->dev_ops
->is_tray_open(bs
->dev_opaque
);
2058 static void bdrv_dev_resize_cb(BlockDriverState
*bs
)
2060 if (bs
->dev_ops
&& bs
->dev_ops
->resize_cb
) {
2061 bs
->dev_ops
->resize_cb(bs
->dev_opaque
);
2065 bool bdrv_dev_is_medium_locked(BlockDriverState
*bs
)
2067 if (bs
->dev_ops
&& bs
->dev_ops
->is_medium_locked
) {
2068 return bs
->dev_ops
->is_medium_locked(bs
->dev_opaque
);
2074 * Run consistency checks on an image
2076 * Returns 0 if the check could be completed (it doesn't mean that the image is
2077 * free of errors) or -errno when an internal error occurred. The results of the
2078 * check are stored in res.
2080 int bdrv_check(BlockDriverState
*bs
, BdrvCheckResult
*res
, BdrvCheckMode fix
)
2082 if (bs
->drv
->bdrv_check
== NULL
) {
2086 memset(res
, 0, sizeof(*res
));
2087 return bs
->drv
->bdrv_check(bs
, res
, fix
);
2090 #define COMMIT_BUF_SECTORS 2048
2092 /* commit COW file into the raw image */
2093 int bdrv_commit(BlockDriverState
*bs
)
2095 BlockDriver
*drv
= bs
->drv
;
2096 int64_t sector
, total_sectors
, length
, backing_length
;
2097 int n
, ro
, open_flags
;
2099 uint8_t *buf
= NULL
;
2100 char filename
[PATH_MAX
];
2105 if (!bs
->backing_hd
) {
2109 if (bdrv_in_use(bs
) || bdrv_in_use(bs
->backing_hd
)) {
2113 ro
= bs
->backing_hd
->read_only
;
2114 /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */
2115 pstrcpy(filename
, sizeof(filename
), bs
->backing_hd
->filename
);
2116 open_flags
= bs
->backing_hd
->open_flags
;
2119 if (bdrv_reopen(bs
->backing_hd
, open_flags
| BDRV_O_RDWR
, NULL
)) {
2124 length
= bdrv_getlength(bs
);
2130 backing_length
= bdrv_getlength(bs
->backing_hd
);
2131 if (backing_length
< 0) {
2132 ret
= backing_length
;
2136 /* If our top snapshot is larger than the backing file image,
2137 * grow the backing file image if possible. If not possible,
2138 * we must return an error */
2139 if (length
> backing_length
) {
2140 ret
= bdrv_truncate(bs
->backing_hd
, length
);
2146 total_sectors
= length
>> BDRV_SECTOR_BITS
;
2147 buf
= g_malloc(COMMIT_BUF_SECTORS
* BDRV_SECTOR_SIZE
);
2149 for (sector
= 0; sector
< total_sectors
; sector
+= n
) {
2150 ret
= bdrv_is_allocated(bs
, sector
, COMMIT_BUF_SECTORS
, &n
);
2155 ret
= bdrv_read(bs
, sector
, buf
, n
);
2160 ret
= bdrv_write(bs
->backing_hd
, sector
, buf
, n
);
2167 if (drv
->bdrv_make_empty
) {
2168 ret
= drv
->bdrv_make_empty(bs
);
2176 * Make sure all data we wrote to the backing device is actually
2179 if (bs
->backing_hd
) {
2180 bdrv_flush(bs
->backing_hd
);
2188 /* ignoring error return here */
2189 bdrv_reopen(bs
->backing_hd
, open_flags
& ~BDRV_O_RDWR
, NULL
);
2195 int bdrv_commit_all(void)
2197 BlockDriverState
*bs
;
2199 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
2200 if (bs
->drv
&& bs
->backing_hd
) {
2201 int ret
= bdrv_commit(bs
);
2211 * Remove an active request from the tracked requests list
2213 * This function should be called when a tracked request is completing.
2215 static void tracked_request_end(BdrvTrackedRequest
*req
)
2217 if (req
->serialising
) {
2218 req
->bs
->serialising_in_flight
--;
2221 QLIST_REMOVE(req
, list
);
2222 qemu_co_queue_restart_all(&req
->wait_queue
);
2226 * Add an active request to the tracked requests list
2228 static void tracked_request_begin(BdrvTrackedRequest
*req
,
2229 BlockDriverState
*bs
,
2231 unsigned int bytes
, bool is_write
)
2233 *req
= (BdrvTrackedRequest
){
2237 .is_write
= is_write
,
2238 .co
= qemu_coroutine_self(),
2239 .serialising
= false,
2240 .overlap_offset
= offset
,
2241 .overlap_bytes
= bytes
,
2244 qemu_co_queue_init(&req
->wait_queue
);
2246 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
2249 static void mark_request_serialising(BdrvTrackedRequest
*req
, uint64_t align
)
2251 int64_t overlap_offset
= req
->offset
& ~(align
- 1);
2252 unsigned int overlap_bytes
= ROUND_UP(req
->offset
+ req
->bytes
, align
)
2255 if (!req
->serialising
) {
2256 req
->bs
->serialising_in_flight
++;
2257 req
->serialising
= true;
2260 req
->overlap_offset
= MIN(req
->overlap_offset
, overlap_offset
);
2261 req
->overlap_bytes
= MAX(req
->overlap_bytes
, overlap_bytes
);
2265 * Round a region to cluster boundaries
2267 void bdrv_round_to_clusters(BlockDriverState
*bs
,
2268 int64_t sector_num
, int nb_sectors
,
2269 int64_t *cluster_sector_num
,
2270 int *cluster_nb_sectors
)
2272 BlockDriverInfo bdi
;
2274 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
2275 *cluster_sector_num
= sector_num
;
2276 *cluster_nb_sectors
= nb_sectors
;
2278 int64_t c
= bdi
.cluster_size
/ BDRV_SECTOR_SIZE
;
2279 *cluster_sector_num
= QEMU_ALIGN_DOWN(sector_num
, c
);
2280 *cluster_nb_sectors
= QEMU_ALIGN_UP(sector_num
- *cluster_sector_num
+
2285 static int bdrv_get_cluster_size(BlockDriverState
*bs
)
2287 BlockDriverInfo bdi
;
2290 ret
= bdrv_get_info(bs
, &bdi
);
2291 if (ret
< 0 || bdi
.cluster_size
== 0) {
2292 return bs
->request_alignment
;
2294 return bdi
.cluster_size
;
2298 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
2299 int64_t offset
, unsigned int bytes
)
2302 if (offset
>= req
->overlap_offset
+ req
->overlap_bytes
) {
2306 if (req
->overlap_offset
>= offset
+ bytes
) {
2312 static bool coroutine_fn
wait_serialising_requests(BdrvTrackedRequest
*self
)
2314 BlockDriverState
*bs
= self
->bs
;
2315 BdrvTrackedRequest
*req
;
2317 bool waited
= false;
2319 if (!bs
->serialising_in_flight
) {
2325 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
2326 if (req
== self
|| (!req
->serialising
&& !self
->serialising
)) {
2329 if (tracked_request_overlaps(req
, self
->overlap_offset
,
2330 self
->overlap_bytes
))
2332 /* Hitting this means there was a reentrant request, for
2333 * example, a block driver issuing nested requests. This must
2334 * never happen since it means deadlock.
2336 assert(qemu_coroutine_self() != req
->co
);
2338 /* If the request is already (indirectly) waiting for us, or
2339 * will wait for us as soon as it wakes up, then just go on
2340 * (instead of producing a deadlock in the former case). */
2341 if (!req
->waiting_for
) {
2342 self
->waiting_for
= req
;
2343 qemu_co_queue_wait(&req
->wait_queue
);
2344 self
->waiting_for
= NULL
;
2359 * -EINVAL - backing format specified, but no file
2360 * -ENOSPC - can't update the backing file because no space is left in the
2362 * -ENOTSUP - format driver doesn't support changing the backing file
2364 int bdrv_change_backing_file(BlockDriverState
*bs
,
2365 const char *backing_file
, const char *backing_fmt
)
2367 BlockDriver
*drv
= bs
->drv
;
2370 /* Backing file format doesn't make sense without a backing file */
2371 if (backing_fmt
&& !backing_file
) {
2375 if (drv
->bdrv_change_backing_file
!= NULL
) {
2376 ret
= drv
->bdrv_change_backing_file(bs
, backing_file
, backing_fmt
);
2382 pstrcpy(bs
->backing_file
, sizeof(bs
->backing_file
), backing_file
?: "");
2383 pstrcpy(bs
->backing_format
, sizeof(bs
->backing_format
), backing_fmt
?: "");
2389 * Finds the image layer in the chain that has 'bs' as its backing file.
2391 * active is the current topmost image.
2393 * Returns NULL if bs is not found in active's image chain,
2394 * or if active == bs.
2396 BlockDriverState
*bdrv_find_overlay(BlockDriverState
*active
,
2397 BlockDriverState
*bs
)
2399 BlockDriverState
*overlay
= NULL
;
2400 BlockDriverState
*intermediate
;
2402 assert(active
!= NULL
);
2405 /* if bs is the same as active, then by definition it has no overlay
2411 intermediate
= active
;
2412 while (intermediate
->backing_hd
) {
2413 if (intermediate
->backing_hd
== bs
) {
2414 overlay
= intermediate
;
2417 intermediate
= intermediate
->backing_hd
;
2423 typedef struct BlkIntermediateStates
{
2424 BlockDriverState
*bs
;
2425 QSIMPLEQ_ENTRY(BlkIntermediateStates
) entry
;
2426 } BlkIntermediateStates
;
2430 * Drops images above 'base' up to and including 'top', and sets the image
2431 * above 'top' to have base as its backing file.
2433 * Requires that the overlay to 'top' is opened r/w, so that the backing file
2434 * information in 'bs' can be properly updated.
2436 * E.g., this will convert the following chain:
2437 * bottom <- base <- intermediate <- top <- active
2441 * bottom <- base <- active
2443 * It is allowed for bottom==base, in which case it converts:
2445 * base <- intermediate <- top <- active
2452 * if active == top, that is considered an error
2455 int bdrv_drop_intermediate(BlockDriverState
*active
, BlockDriverState
*top
,
2456 BlockDriverState
*base
)
2458 BlockDriverState
*intermediate
;
2459 BlockDriverState
*base_bs
= NULL
;
2460 BlockDriverState
*new_top_bs
= NULL
;
2461 BlkIntermediateStates
*intermediate_state
, *next
;
2464 QSIMPLEQ_HEAD(states_to_delete
, BlkIntermediateStates
) states_to_delete
;
2465 QSIMPLEQ_INIT(&states_to_delete
);
2467 if (!top
->drv
|| !base
->drv
) {
2471 new_top_bs
= bdrv_find_overlay(active
, top
);
2473 if (new_top_bs
== NULL
) {
2474 /* we could not find the image above 'top', this is an error */
2478 /* special case of new_top_bs->backing_hd already pointing to base - nothing
2479 * to do, no intermediate images */
2480 if (new_top_bs
->backing_hd
== base
) {
2487 /* now we will go down through the list, and add each BDS we find
2488 * into our deletion queue, until we hit the 'base'
2490 while (intermediate
) {
2491 intermediate_state
= g_malloc0(sizeof(BlkIntermediateStates
));
2492 intermediate_state
->bs
= intermediate
;
2493 QSIMPLEQ_INSERT_TAIL(&states_to_delete
, intermediate_state
, entry
);
2495 if (intermediate
->backing_hd
== base
) {
2496 base_bs
= intermediate
->backing_hd
;
2499 intermediate
= intermediate
->backing_hd
;
2501 if (base_bs
== NULL
) {
2502 /* something went wrong, we did not end at the base. safely
2503 * unravel everything, and exit with error */
2507 /* success - we can delete the intermediate states, and link top->base */
2508 ret
= bdrv_change_backing_file(new_top_bs
, base_bs
->filename
,
2509 base_bs
->drv
? base_bs
->drv
->format_name
: "");
2513 new_top_bs
->backing_hd
= base_bs
;
2515 bdrv_refresh_limits(new_top_bs
);
2517 QSIMPLEQ_FOREACH_SAFE(intermediate_state
, &states_to_delete
, entry
, next
) {
2518 /* so that bdrv_close() does not recursively close the chain */
2519 intermediate_state
->bs
->backing_hd
= NULL
;
2520 bdrv_unref(intermediate_state
->bs
);
2525 QSIMPLEQ_FOREACH_SAFE(intermediate_state
, &states_to_delete
, entry
, next
) {
2526 g_free(intermediate_state
);
2532 static int bdrv_check_byte_request(BlockDriverState
*bs
, int64_t offset
,
2537 if (!bdrv_is_inserted(bs
))
2543 len
= bdrv_getlength(bs
);
2548 if ((offset
> len
) || (len
- offset
< size
))
2554 static int bdrv_check_request(BlockDriverState
*bs
, int64_t sector_num
,
2557 return bdrv_check_byte_request(bs
, sector_num
* BDRV_SECTOR_SIZE
,
2558 nb_sectors
* BDRV_SECTOR_SIZE
);
2561 typedef struct RwCo
{
2562 BlockDriverState
*bs
;
2567 BdrvRequestFlags flags
;
2570 static void coroutine_fn
bdrv_rw_co_entry(void *opaque
)
2572 RwCo
*rwco
= opaque
;
2574 if (!rwco
->is_write
) {
2575 rwco
->ret
= bdrv_co_do_preadv(rwco
->bs
, rwco
->offset
,
2576 rwco
->qiov
->size
, rwco
->qiov
,
2579 rwco
->ret
= bdrv_co_do_pwritev(rwco
->bs
, rwco
->offset
,
2580 rwco
->qiov
->size
, rwco
->qiov
,
2586 * Process a vectored synchronous request using coroutines
2588 static int bdrv_prwv_co(BlockDriverState
*bs
, int64_t offset
,
2589 QEMUIOVector
*qiov
, bool is_write
,
2590 BdrvRequestFlags flags
)
2597 .is_write
= is_write
,
2603 * In sync call context, when the vcpu is blocked, this throttling timer
2604 * will not fire; so the I/O throttling function has to be disabled here
2605 * if it has been enabled.
2607 if (bs
->io_limits_enabled
) {
2608 fprintf(stderr
, "Disabling I/O throttling on '%s' due "
2609 "to synchronous I/O.\n", bdrv_get_device_name(bs
));
2610 bdrv_io_limits_disable(bs
);
2613 if (qemu_in_coroutine()) {
2614 /* Fast-path if already in coroutine context */
2615 bdrv_rw_co_entry(&rwco
);
2617 co
= qemu_coroutine_create(bdrv_rw_co_entry
);
2618 qemu_coroutine_enter(co
, &rwco
);
2619 while (rwco
.ret
== NOT_DONE
) {
2627 * Process a synchronous request using coroutines
2629 static int bdrv_rw_co(BlockDriverState
*bs
, int64_t sector_num
, uint8_t *buf
,
2630 int nb_sectors
, bool is_write
, BdrvRequestFlags flags
)
2633 struct iovec iov
= {
2634 .iov_base
= (void *)buf
,
2635 .iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
,
2638 qemu_iovec_init_external(&qiov
, &iov
, 1);
2639 return bdrv_prwv_co(bs
, sector_num
<< BDRV_SECTOR_BITS
,
2640 &qiov
, is_write
, flags
);
2643 /* return < 0 if error. See bdrv_write() for the return codes */
2644 int bdrv_read(BlockDriverState
*bs
, int64_t sector_num
,
2645 uint8_t *buf
, int nb_sectors
)
2647 return bdrv_rw_co(bs
, sector_num
, buf
, nb_sectors
, false, 0);
2650 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
2651 int bdrv_read_unthrottled(BlockDriverState
*bs
, int64_t sector_num
,
2652 uint8_t *buf
, int nb_sectors
)
2657 enabled
= bs
->io_limits_enabled
;
2658 bs
->io_limits_enabled
= false;
2659 ret
= bdrv_read(bs
, sector_num
, buf
, nb_sectors
);
2660 bs
->io_limits_enabled
= enabled
;
2664 /* Return < 0 if error. Important errors are:
2665 -EIO generic I/O error (may happen for all errors)
2666 -ENOMEDIUM No media inserted.
2667 -EINVAL Invalid sector number or nb_sectors
2668 -EACCES Trying to write a read-only device
2670 int bdrv_write(BlockDriverState
*bs
, int64_t sector_num
,
2671 const uint8_t *buf
, int nb_sectors
)
2673 return bdrv_rw_co(bs
, sector_num
, (uint8_t *)buf
, nb_sectors
, true, 0);
2676 int bdrv_write_zeroes(BlockDriverState
*bs
, int64_t sector_num
,
2677 int nb_sectors
, BdrvRequestFlags flags
)
2679 return bdrv_rw_co(bs
, sector_num
, NULL
, nb_sectors
, true,
2680 BDRV_REQ_ZERO_WRITE
| flags
);
2684 * Completely zero out a block device with the help of bdrv_write_zeroes.
2685 * The operation is sped up by checking the block status and only writing
2686 * zeroes to the device if they currently do not return zeroes. Optional
2687 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
2689 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
2691 int bdrv_make_zero(BlockDriverState
*bs
, BdrvRequestFlags flags
)
2693 int64_t target_size
= bdrv_getlength(bs
) / BDRV_SECTOR_SIZE
;
2694 int64_t ret
, nb_sectors
, sector_num
= 0;
2698 nb_sectors
= target_size
- sector_num
;
2699 if (nb_sectors
<= 0) {
2702 if (nb_sectors
> INT_MAX
) {
2703 nb_sectors
= INT_MAX
;
2705 ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, &n
);
2707 error_report("error getting block status at sector %" PRId64
": %s",
2708 sector_num
, strerror(-ret
));
2711 if (ret
& BDRV_BLOCK_ZERO
) {
2715 ret
= bdrv_write_zeroes(bs
, sector_num
, n
, flags
);
2717 error_report("error writing zeroes at sector %" PRId64
": %s",
2718 sector_num
, strerror(-ret
));
2725 int bdrv_pread(BlockDriverState
*bs
, int64_t offset
, void *buf
, int bytes
)
2728 struct iovec iov
= {
2729 .iov_base
= (void *)buf
,
2738 qemu_iovec_init_external(&qiov
, &iov
, 1);
2739 ret
= bdrv_prwv_co(bs
, offset
, &qiov
, false, 0);
2747 int bdrv_pwritev(BlockDriverState
*bs
, int64_t offset
, QEMUIOVector
*qiov
)
2751 ret
= bdrv_prwv_co(bs
, offset
, qiov
, true, 0);
2759 int bdrv_pwrite(BlockDriverState
*bs
, int64_t offset
,
2760 const void *buf
, int bytes
)
2763 struct iovec iov
= {
2764 .iov_base
= (void *) buf
,
2772 qemu_iovec_init_external(&qiov
, &iov
, 1);
2773 return bdrv_pwritev(bs
, offset
, &qiov
);
2777 * Writes to the file and ensures that no writes are reordered across this
2778 * request (acts as a barrier)
2780 * Returns 0 on success, -errno in error cases.
2782 int bdrv_pwrite_sync(BlockDriverState
*bs
, int64_t offset
,
2783 const void *buf
, int count
)
2787 ret
= bdrv_pwrite(bs
, offset
, buf
, count
);
2792 /* No flush needed for cache modes that already do it */
2793 if (bs
->enable_write_cache
) {
2800 static int coroutine_fn
bdrv_co_do_copy_on_readv(BlockDriverState
*bs
,
2801 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
2803 /* Perform I/O through a temporary buffer so that users who scribble over
2804 * their read buffer while the operation is in progress do not end up
2805 * modifying the image file. This is critical for zero-copy guest I/O
2806 * where anything might happen inside guest memory.
2808 void *bounce_buffer
;
2810 BlockDriver
*drv
= bs
->drv
;
2812 QEMUIOVector bounce_qiov
;
2813 int64_t cluster_sector_num
;
2814 int cluster_nb_sectors
;
2818 /* Cover entire cluster so no additional backing file I/O is required when
2819 * allocating cluster in the image file.
2821 bdrv_round_to_clusters(bs
, sector_num
, nb_sectors
,
2822 &cluster_sector_num
, &cluster_nb_sectors
);
2824 trace_bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
,
2825 cluster_sector_num
, cluster_nb_sectors
);
2827 iov
.iov_len
= cluster_nb_sectors
* BDRV_SECTOR_SIZE
;
2828 iov
.iov_base
= bounce_buffer
= qemu_blockalign(bs
, iov
.iov_len
);
2829 qemu_iovec_init_external(&bounce_qiov
, &iov
, 1);
2831 ret
= drv
->bdrv_co_readv(bs
, cluster_sector_num
, cluster_nb_sectors
,
2837 if (drv
->bdrv_co_write_zeroes
&&
2838 buffer_is_zero(bounce_buffer
, iov
.iov_len
)) {
2839 ret
= bdrv_co_do_write_zeroes(bs
, cluster_sector_num
,
2840 cluster_nb_sectors
, 0);
2842 /* This does not change the data on the disk, it is not necessary
2843 * to flush even in cache=writethrough mode.
2845 ret
= drv
->bdrv_co_writev(bs
, cluster_sector_num
, cluster_nb_sectors
,
2850 /* It might be okay to ignore write errors for guest requests. If this
2851 * is a deliberate copy-on-read then we don't want to ignore the error.
2852 * Simply report it in all cases.
2857 skip_bytes
= (sector_num
- cluster_sector_num
) * BDRV_SECTOR_SIZE
;
2858 qemu_iovec_from_buf(qiov
, 0, bounce_buffer
+ skip_bytes
,
2859 nb_sectors
* BDRV_SECTOR_SIZE
);
2862 qemu_vfree(bounce_buffer
);
2867 * Forwards an already correctly aligned request to the BlockDriver. This
2868 * handles copy on read and zeroing after EOF; any other features must be
2869 * implemented by the caller.
2871 static int coroutine_fn
bdrv_aligned_preadv(BlockDriverState
*bs
,
2872 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
2873 int64_t align
, QEMUIOVector
*qiov
, int flags
)
2875 BlockDriver
*drv
= bs
->drv
;
2878 int64_t sector_num
= offset
>> BDRV_SECTOR_BITS
;
2879 unsigned int nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
2881 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
2882 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
2884 /* Handle Copy on Read and associated serialisation */
2885 if (flags
& BDRV_REQ_COPY_ON_READ
) {
2886 /* If we touch the same cluster it counts as an overlap. This
2887 * guarantees that allocating writes will be serialized and not race
2888 * with each other for the same cluster. For example, in copy-on-read
2889 * it ensures that the CoR read and write operations are atomic and
2890 * guest writes cannot interleave between them. */
2891 mark_request_serialising(req
, bdrv_get_cluster_size(bs
));
2894 wait_serialising_requests(req
);
2896 if (flags
& BDRV_REQ_COPY_ON_READ
) {
2899 ret
= bdrv_is_allocated(bs
, sector_num
, nb_sectors
, &pnum
);
2904 if (!ret
|| pnum
!= nb_sectors
) {
2905 ret
= bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
, qiov
);
2910 /* Forward the request to the BlockDriver */
2911 if (!(bs
->zero_beyond_eof
&& bs
->growable
)) {
2912 ret
= drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
2914 /* Read zeros after EOF of growable BDSes */
2915 int64_t len
, total_sectors
, max_nb_sectors
;
2917 len
= bdrv_getlength(bs
);
2923 total_sectors
= DIV_ROUND_UP(len
, BDRV_SECTOR_SIZE
);
2924 max_nb_sectors
= ROUND_UP(MAX(0, total_sectors
- sector_num
),
2925 align
>> BDRV_SECTOR_BITS
);
2926 if (max_nb_sectors
> 0) {
2927 ret
= drv
->bdrv_co_readv(bs
, sector_num
,
2928 MIN(nb_sectors
, max_nb_sectors
), qiov
);
2933 /* Reading beyond end of file is supposed to produce zeroes */
2934 if (ret
== 0 && total_sectors
< sector_num
+ nb_sectors
) {
2935 uint64_t offset
= MAX(0, total_sectors
- sector_num
);
2936 uint64_t bytes
= (sector_num
+ nb_sectors
- offset
) *
2938 qemu_iovec_memset(qiov
, offset
* BDRV_SECTOR_SIZE
, 0, bytes
);
2947 * Handle a read request in coroutine context
2949 static int coroutine_fn
bdrv_co_do_preadv(BlockDriverState
*bs
,
2950 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
2951 BdrvRequestFlags flags
)
2953 BlockDriver
*drv
= bs
->drv
;
2954 BdrvTrackedRequest req
;
2956 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
2957 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
2958 uint8_t *head_buf
= NULL
;
2959 uint8_t *tail_buf
= NULL
;
2960 QEMUIOVector local_qiov
;
2961 bool use_local_qiov
= false;
2967 if (bdrv_check_byte_request(bs
, offset
, bytes
)) {
2971 if (bs
->copy_on_read
) {
2972 flags
|= BDRV_REQ_COPY_ON_READ
;
2975 /* throttling disk I/O */
2976 if (bs
->io_limits_enabled
) {
2977 bdrv_io_limits_intercept(bs
, bytes
, false);
2980 /* Align read if necessary by padding qiov */
2981 if (offset
& (align
- 1)) {
2982 head_buf
= qemu_blockalign(bs
, align
);
2983 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
2984 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
2985 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
2986 use_local_qiov
= true;
2988 bytes
+= offset
& (align
- 1);
2989 offset
= offset
& ~(align
- 1);
2992 if ((offset
+ bytes
) & (align
- 1)) {
2993 if (!use_local_qiov
) {
2994 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
2995 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
2996 use_local_qiov
= true;
2998 tail_buf
= qemu_blockalign(bs
, align
);
2999 qemu_iovec_add(&local_qiov
, tail_buf
,
3000 align
- ((offset
+ bytes
) & (align
- 1)));
3002 bytes
= ROUND_UP(bytes
, align
);
3005 tracked_request_begin(&req
, bs
, offset
, bytes
, false);
3006 ret
= bdrv_aligned_preadv(bs
, &req
, offset
, bytes
, align
,
3007 use_local_qiov
? &local_qiov
: qiov
,
3009 tracked_request_end(&req
);
3011 if (use_local_qiov
) {
3012 qemu_iovec_destroy(&local_qiov
);
3013 qemu_vfree(head_buf
);
3014 qemu_vfree(tail_buf
);
3020 static int coroutine_fn
bdrv_co_do_readv(BlockDriverState
*bs
,
3021 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
3022 BdrvRequestFlags flags
)
3024 if (nb_sectors
< 0 || nb_sectors
> (UINT_MAX
>> BDRV_SECTOR_BITS
)) {
3028 return bdrv_co_do_preadv(bs
, sector_num
<< BDRV_SECTOR_BITS
,
3029 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
3032 int coroutine_fn
bdrv_co_readv(BlockDriverState
*bs
, int64_t sector_num
,
3033 int nb_sectors
, QEMUIOVector
*qiov
)
3035 trace_bdrv_co_readv(bs
, sector_num
, nb_sectors
);
3037 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
, 0);
3040 int coroutine_fn
bdrv_co_copy_on_readv(BlockDriverState
*bs
,
3041 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
3043 trace_bdrv_co_copy_on_readv(bs
, sector_num
, nb_sectors
);
3045 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
,
3046 BDRV_REQ_COPY_ON_READ
);
3049 /* if no limit is specified in the BlockLimits use a default
3050 * of 32768 512-byte sectors (16 MiB) per request.
3052 #define MAX_WRITE_ZEROES_DEFAULT 32768
3054 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
3055 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
)
3057 BlockDriver
*drv
= bs
->drv
;
3059 struct iovec iov
= {0};
3062 int max_write_zeroes
= bs
->bl
.max_write_zeroes
?
3063 bs
->bl
.max_write_zeroes
: MAX_WRITE_ZEROES_DEFAULT
;
3065 while (nb_sectors
> 0 && !ret
) {
3066 int num
= nb_sectors
;
3068 /* Align request. Block drivers can expect the "bulk" of the request
3071 if (bs
->bl
.write_zeroes_alignment
3072 && num
> bs
->bl
.write_zeroes_alignment
) {
3073 if (sector_num
% bs
->bl
.write_zeroes_alignment
!= 0) {
3074 /* Make a small request up to the first aligned sector. */
3075 num
= bs
->bl
.write_zeroes_alignment
;
3076 num
-= sector_num
% bs
->bl
.write_zeroes_alignment
;
3077 } else if ((sector_num
+ num
) % bs
->bl
.write_zeroes_alignment
!= 0) {
3078 /* Shorten the request to the last aligned sector. num cannot
3079 * underflow because num > bs->bl.write_zeroes_alignment.
3081 num
-= (sector_num
+ num
) % bs
->bl
.write_zeroes_alignment
;
3085 /* limit request size */
3086 if (num
> max_write_zeroes
) {
3087 num
= max_write_zeroes
;
3091 /* First try the efficient write zeroes operation */
3092 if (drv
->bdrv_co_write_zeroes
) {
3093 ret
= drv
->bdrv_co_write_zeroes(bs
, sector_num
, num
, flags
);
3096 if (ret
== -ENOTSUP
) {
3097 /* Fall back to bounce buffer if write zeroes is unsupported */
3098 iov
.iov_len
= num
* BDRV_SECTOR_SIZE
;
3099 if (iov
.iov_base
== NULL
) {
3100 iov
.iov_base
= qemu_blockalign(bs
, num
* BDRV_SECTOR_SIZE
);
3101 memset(iov
.iov_base
, 0, num
* BDRV_SECTOR_SIZE
);
3103 qemu_iovec_init_external(&qiov
, &iov
, 1);
3105 ret
= drv
->bdrv_co_writev(bs
, sector_num
, num
, &qiov
);
3107 /* Keep bounce buffer around if it is big enough for all
3108 * all future requests.
3110 if (num
< max_write_zeroes
) {
3111 qemu_vfree(iov
.iov_base
);
3112 iov
.iov_base
= NULL
;
3120 qemu_vfree(iov
.iov_base
);
3125 * Forwards an already correctly aligned write request to the BlockDriver.
3127 static int coroutine_fn
bdrv_aligned_pwritev(BlockDriverState
*bs
,
3128 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
3129 QEMUIOVector
*qiov
, int flags
)
3131 BlockDriver
*drv
= bs
->drv
;
3135 int64_t sector_num
= offset
>> BDRV_SECTOR_BITS
;
3136 unsigned int nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
3138 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
3139 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
3141 waited
= wait_serialising_requests(req
);
3142 assert(!waited
|| !req
->serialising
);
3143 assert(req
->overlap_offset
<= offset
);
3144 assert(offset
+ bytes
<= req
->overlap_offset
+ req
->overlap_bytes
);
3146 ret
= notifier_with_return_list_notify(&bs
->before_write_notifiers
, req
);
3149 /* Do nothing, write notifier decided to fail this request */
3150 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
3151 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_ZERO
);
3152 ret
= bdrv_co_do_write_zeroes(bs
, sector_num
, nb_sectors
, flags
);
3154 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV
);
3155 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
);
3157 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_DONE
);
3159 if (ret
== 0 && !bs
->enable_write_cache
) {
3160 ret
= bdrv_co_flush(bs
);
3163 bdrv_set_dirty(bs
, sector_num
, nb_sectors
);
3165 if (bs
->wr_highest_sector
< sector_num
+ nb_sectors
- 1) {
3166 bs
->wr_highest_sector
= sector_num
+ nb_sectors
- 1;
3168 if (bs
->growable
&& ret
>= 0) {
3169 bs
->total_sectors
= MAX(bs
->total_sectors
, sector_num
+ nb_sectors
);
3176 * Handle a write request in coroutine context
3178 static int coroutine_fn
bdrv_co_do_pwritev(BlockDriverState
*bs
,
3179 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
3180 BdrvRequestFlags flags
)
3182 BdrvTrackedRequest req
;
3183 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3184 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
3185 uint8_t *head_buf
= NULL
;
3186 uint8_t *tail_buf
= NULL
;
3187 QEMUIOVector local_qiov
;
3188 bool use_local_qiov
= false;
3194 if (bs
->read_only
) {
3197 if (bdrv_check_byte_request(bs
, offset
, bytes
)) {
3201 /* throttling disk I/O */
3202 if (bs
->io_limits_enabled
) {
3203 bdrv_io_limits_intercept(bs
, bytes
, true);
3207 * Align write if necessary by performing a read-modify-write cycle.
3208 * Pad qiov with the read parts and be sure to have a tracked request not
3209 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
3211 tracked_request_begin(&req
, bs
, offset
, bytes
, true);
3213 if (offset
& (align
- 1)) {
3214 QEMUIOVector head_qiov
;
3215 struct iovec head_iov
;
3217 mark_request_serialising(&req
, align
);
3218 wait_serialising_requests(&req
);
3220 head_buf
= qemu_blockalign(bs
, align
);
3221 head_iov
= (struct iovec
) {
3222 .iov_base
= head_buf
,
3225 qemu_iovec_init_external(&head_qiov
, &head_iov
, 1);
3227 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
3228 ret
= bdrv_aligned_preadv(bs
, &req
, offset
& ~(align
- 1), align
,
3229 align
, &head_qiov
, 0);
3233 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
3235 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
3236 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
3237 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
3238 use_local_qiov
= true;
3240 bytes
+= offset
& (align
- 1);
3241 offset
= offset
& ~(align
- 1);
3244 if ((offset
+ bytes
) & (align
- 1)) {
3245 QEMUIOVector tail_qiov
;
3246 struct iovec tail_iov
;
3250 mark_request_serialising(&req
, align
);
3251 waited
= wait_serialising_requests(&req
);
3252 assert(!waited
|| !use_local_qiov
);
3254 tail_buf
= qemu_blockalign(bs
, align
);
3255 tail_iov
= (struct iovec
) {
3256 .iov_base
= tail_buf
,
3259 qemu_iovec_init_external(&tail_qiov
, &tail_iov
, 1);
3261 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
3262 ret
= bdrv_aligned_preadv(bs
, &req
, (offset
+ bytes
) & ~(align
- 1), align
,
3263 align
, &tail_qiov
, 0);
3267 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
3269 if (!use_local_qiov
) {
3270 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
3271 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
3272 use_local_qiov
= true;
3275 tail_bytes
= (offset
+ bytes
) & (align
- 1);
3276 qemu_iovec_add(&local_qiov
, tail_buf
+ tail_bytes
, align
- tail_bytes
);
3278 bytes
= ROUND_UP(bytes
, align
);
3281 ret
= bdrv_aligned_pwritev(bs
, &req
, offset
, bytes
,
3282 use_local_qiov
? &local_qiov
: qiov
,
3286 tracked_request_end(&req
);
3288 if (use_local_qiov
) {
3289 qemu_iovec_destroy(&local_qiov
);
3291 qemu_vfree(head_buf
);
3292 qemu_vfree(tail_buf
);
3297 static int coroutine_fn
bdrv_co_do_writev(BlockDriverState
*bs
,
3298 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
3299 BdrvRequestFlags flags
)
3301 if (nb_sectors
< 0 || nb_sectors
> (INT_MAX
>> BDRV_SECTOR_BITS
)) {
3305 return bdrv_co_do_pwritev(bs
, sector_num
<< BDRV_SECTOR_BITS
,
3306 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
3309 int coroutine_fn
bdrv_co_writev(BlockDriverState
*bs
, int64_t sector_num
,
3310 int nb_sectors
, QEMUIOVector
*qiov
)
3312 trace_bdrv_co_writev(bs
, sector_num
, nb_sectors
);
3314 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, qiov
, 0);
3317 int coroutine_fn
bdrv_co_write_zeroes(BlockDriverState
*bs
,
3318 int64_t sector_num
, int nb_sectors
,
3319 BdrvRequestFlags flags
)
3321 trace_bdrv_co_write_zeroes(bs
, sector_num
, nb_sectors
, flags
);
3323 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
3324 flags
&= ~BDRV_REQ_MAY_UNMAP
;
3327 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, NULL
,
3328 BDRV_REQ_ZERO_WRITE
| flags
);
3332 * Truncate file to 'offset' bytes (needed only for file protocols)
3334 int bdrv_truncate(BlockDriverState
*bs
, int64_t offset
)
3336 BlockDriver
*drv
= bs
->drv
;
3340 if (!drv
->bdrv_truncate
)
3344 if (bdrv_in_use(bs
))
3346 ret
= drv
->bdrv_truncate(bs
, offset
);
3348 ret
= refresh_total_sectors(bs
, offset
>> BDRV_SECTOR_BITS
);
3349 bdrv_dev_resize_cb(bs
);
3355 * Length of a allocated file in bytes. Sparse files are counted by actual
3356 * allocated space. Return < 0 if error or unknown.
3358 int64_t bdrv_get_allocated_file_size(BlockDriverState
*bs
)
3360 BlockDriver
*drv
= bs
->drv
;
3364 if (drv
->bdrv_get_allocated_file_size
) {
3365 return drv
->bdrv_get_allocated_file_size(bs
);
3368 return bdrv_get_allocated_file_size(bs
->file
);
3374 * Length of a file in bytes. Return < 0 if error or unknown.
3376 int64_t bdrv_getlength(BlockDriverState
*bs
)
3378 BlockDriver
*drv
= bs
->drv
;
3382 if (drv
->has_variable_length
) {
3383 int ret
= refresh_total_sectors(bs
, bs
->total_sectors
);
3388 return bs
->total_sectors
* BDRV_SECTOR_SIZE
;
3391 /* return 0 as number of sectors if no device present or error */
3392 void bdrv_get_geometry(BlockDriverState
*bs
, uint64_t *nb_sectors_ptr
)
3395 length
= bdrv_getlength(bs
);
3399 length
= length
>> BDRV_SECTOR_BITS
;
3400 *nb_sectors_ptr
= length
;
3403 void bdrv_set_on_error(BlockDriverState
*bs
, BlockdevOnError on_read_error
,
3404 BlockdevOnError on_write_error
)
3406 bs
->on_read_error
= on_read_error
;
3407 bs
->on_write_error
= on_write_error
;
3410 BlockdevOnError
bdrv_get_on_error(BlockDriverState
*bs
, bool is_read
)
3412 return is_read
? bs
->on_read_error
: bs
->on_write_error
;
3415 BlockErrorAction
bdrv_get_error_action(BlockDriverState
*bs
, bool is_read
, int error
)
3417 BlockdevOnError on_err
= is_read
? bs
->on_read_error
: bs
->on_write_error
;
3420 case BLOCKDEV_ON_ERROR_ENOSPC
:
3421 return (error
== ENOSPC
) ? BDRV_ACTION_STOP
: BDRV_ACTION_REPORT
;
3422 case BLOCKDEV_ON_ERROR_STOP
:
3423 return BDRV_ACTION_STOP
;
3424 case BLOCKDEV_ON_ERROR_REPORT
:
3425 return BDRV_ACTION_REPORT
;
3426 case BLOCKDEV_ON_ERROR_IGNORE
:
3427 return BDRV_ACTION_IGNORE
;
3433 /* This is done by device models because, while the block layer knows
3434 * about the error, it does not know whether an operation comes from
3435 * the device or the block layer (from a job, for example).
3437 void bdrv_error_action(BlockDriverState
*bs
, BlockErrorAction action
,
3438 bool is_read
, int error
)
3441 bdrv_emit_qmp_error_event(bs
, QEVENT_BLOCK_IO_ERROR
, action
, is_read
);
3442 if (action
== BDRV_ACTION_STOP
) {
3443 vm_stop(RUN_STATE_IO_ERROR
);
3444 bdrv_iostatus_set_err(bs
, error
);
3448 int bdrv_is_read_only(BlockDriverState
*bs
)
3450 return bs
->read_only
;
3453 int bdrv_is_sg(BlockDriverState
*bs
)
3458 int bdrv_enable_write_cache(BlockDriverState
*bs
)
3460 return bs
->enable_write_cache
;
3463 void bdrv_set_enable_write_cache(BlockDriverState
*bs
, bool wce
)
3465 bs
->enable_write_cache
= wce
;
3467 /* so a reopen() will preserve wce */
3469 bs
->open_flags
|= BDRV_O_CACHE_WB
;
3471 bs
->open_flags
&= ~BDRV_O_CACHE_WB
;
3475 int bdrv_is_encrypted(BlockDriverState
*bs
)
3477 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
)
3479 return bs
->encrypted
;
3482 int bdrv_key_required(BlockDriverState
*bs
)
3484 BlockDriverState
*backing_hd
= bs
->backing_hd
;
3486 if (backing_hd
&& backing_hd
->encrypted
&& !backing_hd
->valid_key
)
3488 return (bs
->encrypted
&& !bs
->valid_key
);
3491 int bdrv_set_key(BlockDriverState
*bs
, const char *key
)
3494 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
) {
3495 ret
= bdrv_set_key(bs
->backing_hd
, key
);
3501 if (!bs
->encrypted
) {
3503 } else if (!bs
->drv
|| !bs
->drv
->bdrv_set_key
) {
3506 ret
= bs
->drv
->bdrv_set_key(bs
, key
);
3509 } else if (!bs
->valid_key
) {
3511 /* call the change callback now, we skipped it on open */
3512 bdrv_dev_change_media_cb(bs
, true);
3517 const char *bdrv_get_format_name(BlockDriverState
*bs
)
3519 return bs
->drv
? bs
->drv
->format_name
: NULL
;
3522 void bdrv_iterate_format(void (*it
)(void *opaque
, const char *name
),
3527 QLIST_FOREACH(drv
, &bdrv_drivers
, list
) {
3528 it(opaque
, drv
->format_name
);
3532 /* This function is to find block backend bs */
3533 BlockDriverState
*bdrv_find(const char *name
)
3535 BlockDriverState
*bs
;
3537 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
3538 if (!strcmp(name
, bs
->device_name
)) {
3545 /* This function is to find a node in the bs graph */
3546 BlockDriverState
*bdrv_find_node(const char *node_name
)
3548 BlockDriverState
*bs
;
3552 QTAILQ_FOREACH(bs
, &graph_bdrv_states
, node_list
) {
3553 if (!strcmp(node_name
, bs
->node_name
)) {
3560 /* Put this QMP function here so it can access the static graph_bdrv_states. */
3561 BlockDeviceInfoList
*bdrv_named_nodes_list(void)
3563 BlockDeviceInfoList
*list
, *entry
;
3564 BlockDriverState
*bs
;
3567 QTAILQ_FOREACH(bs
, &graph_bdrv_states
, node_list
) {
3568 entry
= g_malloc0(sizeof(*entry
));
3569 entry
->value
= bdrv_block_device_info(bs
);
3577 BlockDriverState
*bdrv_lookup_bs(const char *device
,
3578 const char *node_name
,
3581 BlockDriverState
*bs
= NULL
;
3584 bs
= bdrv_find(device
);
3592 bs
= bdrv_find_node(node_name
);
3599 error_setg(errp
, "Cannot find device=%s nor node_name=%s",
3600 device
? device
: "",
3601 node_name
? node_name
: "");
3605 BlockDriverState
*bdrv_next(BlockDriverState
*bs
)
3608 return QTAILQ_FIRST(&bdrv_states
);
3610 return QTAILQ_NEXT(bs
, device_list
);
3613 void bdrv_iterate(void (*it
)(void *opaque
, BlockDriverState
*bs
), void *opaque
)
3615 BlockDriverState
*bs
;
3617 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
3622 const char *bdrv_get_device_name(BlockDriverState
*bs
)
3624 return bs
->device_name
;
3627 int bdrv_get_flags(BlockDriverState
*bs
)
3629 return bs
->open_flags
;
3632 int bdrv_flush_all(void)
3634 BlockDriverState
*bs
;
3637 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
3638 int ret
= bdrv_flush(bs
);
3639 if (ret
< 0 && !result
) {
3647 int bdrv_has_zero_init_1(BlockDriverState
*bs
)
3652 int bdrv_has_zero_init(BlockDriverState
*bs
)
3656 /* If BS is a copy on write image, it is initialized to
3657 the contents of the base image, which may not be zeroes. */
3658 if (bs
->backing_hd
) {
3661 if (bs
->drv
->bdrv_has_zero_init
) {
3662 return bs
->drv
->bdrv_has_zero_init(bs
);
3669 bool bdrv_unallocated_blocks_are_zero(BlockDriverState
*bs
)
3671 BlockDriverInfo bdi
;
3673 if (bs
->backing_hd
) {
3677 if (bdrv_get_info(bs
, &bdi
) == 0) {
3678 return bdi
.unallocated_blocks_are_zero
;
3684 bool bdrv_can_write_zeroes_with_unmap(BlockDriverState
*bs
)
3686 BlockDriverInfo bdi
;
3688 if (bs
->backing_hd
|| !(bs
->open_flags
& BDRV_O_UNMAP
)) {
3692 if (bdrv_get_info(bs
, &bdi
) == 0) {
3693 return bdi
.can_write_zeroes_with_unmap
;
3699 typedef struct BdrvCoGetBlockStatusData
{
3700 BlockDriverState
*bs
;
3701 BlockDriverState
*base
;
3707 } BdrvCoGetBlockStatusData
;
3710 * Returns true iff the specified sector is present in the disk image. Drivers
3711 * not implementing the functionality are assumed to not support backing files,
3712 * hence all their sectors are reported as allocated.
3714 * If 'sector_num' is beyond the end of the disk image the return value is 0
3715 * and 'pnum' is set to 0.
3717 * 'pnum' is set to the number of sectors (including and immediately following
3718 * the specified sector) that are known to be in the same
3719 * allocated/unallocated state.
3721 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
3722 * beyond the end of the disk image it will be clamped.
3724 static int64_t coroutine_fn
bdrv_co_get_block_status(BlockDriverState
*bs
,
3726 int nb_sectors
, int *pnum
)
3732 length
= bdrv_getlength(bs
);
3737 if (sector_num
>= (length
>> BDRV_SECTOR_BITS
)) {
3742 n
= bs
->total_sectors
- sector_num
;
3743 if (n
< nb_sectors
) {
3747 if (!bs
->drv
->bdrv_co_get_block_status
) {
3749 ret
= BDRV_BLOCK_DATA
;
3750 if (bs
->drv
->protocol_name
) {
3751 ret
|= BDRV_BLOCK_OFFSET_VALID
| (sector_num
* BDRV_SECTOR_SIZE
);
3756 ret
= bs
->drv
->bdrv_co_get_block_status(bs
, sector_num
, nb_sectors
, pnum
);
3762 if (ret
& BDRV_BLOCK_RAW
) {
3763 assert(ret
& BDRV_BLOCK_OFFSET_VALID
);
3764 return bdrv_get_block_status(bs
->file
, ret
>> BDRV_SECTOR_BITS
,
3768 if (!(ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
)) {
3769 if (bdrv_unallocated_blocks_are_zero(bs
)) {
3770 ret
|= BDRV_BLOCK_ZERO
;
3771 } else if (bs
->backing_hd
) {
3772 BlockDriverState
*bs2
= bs
->backing_hd
;
3773 int64_t length2
= bdrv_getlength(bs2
);
3774 if (length2
>= 0 && sector_num
>= (length2
>> BDRV_SECTOR_BITS
)) {
3775 ret
|= BDRV_BLOCK_ZERO
;
3781 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
3782 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
3783 ret2
= bdrv_co_get_block_status(bs
->file
, ret
>> BDRV_SECTOR_BITS
,
3786 /* Ignore errors. This is just providing extra information, it
3787 * is useful but not necessary.
3789 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
3796 /* Coroutine wrapper for bdrv_get_block_status() */
3797 static void coroutine_fn
bdrv_get_block_status_co_entry(void *opaque
)
3799 BdrvCoGetBlockStatusData
*data
= opaque
;
3800 BlockDriverState
*bs
= data
->bs
;
3802 data
->ret
= bdrv_co_get_block_status(bs
, data
->sector_num
, data
->nb_sectors
,
3808 * Synchronous wrapper around bdrv_co_get_block_status().
3810 * See bdrv_co_get_block_status() for details.
3812 int64_t bdrv_get_block_status(BlockDriverState
*bs
, int64_t sector_num
,
3813 int nb_sectors
, int *pnum
)
3816 BdrvCoGetBlockStatusData data
= {
3818 .sector_num
= sector_num
,
3819 .nb_sectors
= nb_sectors
,
3824 if (qemu_in_coroutine()) {
3825 /* Fast-path if already in coroutine context */
3826 bdrv_get_block_status_co_entry(&data
);
3828 co
= qemu_coroutine_create(bdrv_get_block_status_co_entry
);
3829 qemu_coroutine_enter(co
, &data
);
3830 while (!data
.done
) {
3837 int coroutine_fn
bdrv_is_allocated(BlockDriverState
*bs
, int64_t sector_num
,
3838 int nb_sectors
, int *pnum
)
3840 int64_t ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, pnum
);
3845 (ret
& BDRV_BLOCK_DATA
) ||
3846 ((ret
& BDRV_BLOCK_ZERO
) && !bdrv_has_zero_init(bs
));
3850 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
3852 * Return true if the given sector is allocated in any image between
3853 * BASE and TOP (inclusive). BASE can be NULL to check if the given
3854 * sector is allocated in any image of the chain. Return false otherwise.
3856 * 'pnum' is set to the number of sectors (including and immediately following
3857 * the specified sector) that are known to be in the same
3858 * allocated/unallocated state.
3861 int bdrv_is_allocated_above(BlockDriverState
*top
,
3862 BlockDriverState
*base
,
3864 int nb_sectors
, int *pnum
)
3866 BlockDriverState
*intermediate
;
3867 int ret
, n
= nb_sectors
;
3870 while (intermediate
&& intermediate
!= base
) {
3872 ret
= bdrv_is_allocated(intermediate
, sector_num
, nb_sectors
,
3882 * [sector_num, nb_sectors] is unallocated on top but intermediate
3885 * [sector_num+x, nr_sectors] allocated.
3887 if (n
> pnum_inter
&&
3888 (intermediate
== top
||
3889 sector_num
+ pnum_inter
< intermediate
->total_sectors
)) {
3893 intermediate
= intermediate
->backing_hd
;
3900 const char *bdrv_get_encrypted_filename(BlockDriverState
*bs
)
3902 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
)
3903 return bs
->backing_file
;
3904 else if (bs
->encrypted
)
3905 return bs
->filename
;
3910 void bdrv_get_backing_filename(BlockDriverState
*bs
,
3911 char *filename
, int filename_size
)
3913 pstrcpy(filename
, filename_size
, bs
->backing_file
);
3916 int bdrv_write_compressed(BlockDriverState
*bs
, int64_t sector_num
,
3917 const uint8_t *buf
, int nb_sectors
)
3919 BlockDriver
*drv
= bs
->drv
;
3922 if (!drv
->bdrv_write_compressed
)
3924 if (bdrv_check_request(bs
, sector_num
, nb_sectors
))
3927 assert(QLIST_EMPTY(&bs
->dirty_bitmaps
));
3929 return drv
->bdrv_write_compressed(bs
, sector_num
, buf
, nb_sectors
);
3932 int bdrv_get_info(BlockDriverState
*bs
, BlockDriverInfo
*bdi
)
3934 BlockDriver
*drv
= bs
->drv
;
3937 if (!drv
->bdrv_get_info
)
3939 memset(bdi
, 0, sizeof(*bdi
));
3940 return drv
->bdrv_get_info(bs
, bdi
);
3943 ImageInfoSpecific
*bdrv_get_specific_info(BlockDriverState
*bs
)
3945 BlockDriver
*drv
= bs
->drv
;
3946 if (drv
&& drv
->bdrv_get_specific_info
) {
3947 return drv
->bdrv_get_specific_info(bs
);
3952 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
3953 int64_t pos
, int size
)
3956 struct iovec iov
= {
3957 .iov_base
= (void *) buf
,
3961 qemu_iovec_init_external(&qiov
, &iov
, 1);
3962 return bdrv_writev_vmstate(bs
, &qiov
, pos
);
3965 int bdrv_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
3967 BlockDriver
*drv
= bs
->drv
;
3971 } else if (drv
->bdrv_save_vmstate
) {
3972 return drv
->bdrv_save_vmstate(bs
, qiov
, pos
);
3973 } else if (bs
->file
) {
3974 return bdrv_writev_vmstate(bs
->file
, qiov
, pos
);
3980 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
3981 int64_t pos
, int size
)
3983 BlockDriver
*drv
= bs
->drv
;
3986 if (drv
->bdrv_load_vmstate
)
3987 return drv
->bdrv_load_vmstate(bs
, buf
, pos
, size
);
3989 return bdrv_load_vmstate(bs
->file
, buf
, pos
, size
);
3993 void bdrv_debug_event(BlockDriverState
*bs
, BlkDebugEvent event
)
3995 if (!bs
|| !bs
->drv
|| !bs
->drv
->bdrv_debug_event
) {
3999 bs
->drv
->bdrv_debug_event(bs
, event
);
4002 int bdrv_debug_breakpoint(BlockDriverState
*bs
, const char *event
,
4005 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_breakpoint
) {
4009 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_breakpoint
) {
4010 return bs
->drv
->bdrv_debug_breakpoint(bs
, event
, tag
);
4016 int bdrv_debug_remove_breakpoint(BlockDriverState
*bs
, const char *tag
)
4018 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_remove_breakpoint
) {
4022 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_remove_breakpoint
) {
4023 return bs
->drv
->bdrv_debug_remove_breakpoint(bs
, tag
);
4029 int bdrv_debug_resume(BlockDriverState
*bs
, const char *tag
)
4031 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_resume
) {
4035 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_resume
) {
4036 return bs
->drv
->bdrv_debug_resume(bs
, tag
);
4042 bool bdrv_debug_is_suspended(BlockDriverState
*bs
, const char *tag
)
4044 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_is_suspended
) {
4048 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_is_suspended
) {
4049 return bs
->drv
->bdrv_debug_is_suspended(bs
, tag
);
4055 int bdrv_is_snapshot(BlockDriverState
*bs
)
4057 return !!(bs
->open_flags
& BDRV_O_SNAPSHOT
);
4060 /* backing_file can either be relative, or absolute, or a protocol. If it is
4061 * relative, it must be relative to the chain. So, passing in bs->filename
4062 * from a BDS as backing_file should not be done, as that may be relative to
4063 * the CWD rather than the chain. */
4064 BlockDriverState
*bdrv_find_backing_image(BlockDriverState
*bs
,
4065 const char *backing_file
)
4067 char *filename_full
= NULL
;
4068 char *backing_file_full
= NULL
;
4069 char *filename_tmp
= NULL
;
4070 int is_protocol
= 0;
4071 BlockDriverState
*curr_bs
= NULL
;
4072 BlockDriverState
*retval
= NULL
;
4074 if (!bs
|| !bs
->drv
|| !backing_file
) {
4078 filename_full
= g_malloc(PATH_MAX
);
4079 backing_file_full
= g_malloc(PATH_MAX
);
4080 filename_tmp
= g_malloc(PATH_MAX
);
4082 is_protocol
= path_has_protocol(backing_file
);
4084 for (curr_bs
= bs
; curr_bs
->backing_hd
; curr_bs
= curr_bs
->backing_hd
) {
4086 /* If either of the filename paths is actually a protocol, then
4087 * compare unmodified paths; otherwise make paths relative */
4088 if (is_protocol
|| path_has_protocol(curr_bs
->backing_file
)) {
4089 if (strcmp(backing_file
, curr_bs
->backing_file
) == 0) {
4090 retval
= curr_bs
->backing_hd
;
4094 /* If not an absolute filename path, make it relative to the current
4095 * image's filename path */
4096 path_combine(filename_tmp
, PATH_MAX
, curr_bs
->filename
,
4099 /* We are going to compare absolute pathnames */
4100 if (!realpath(filename_tmp
, filename_full
)) {
4104 /* We need to make sure the backing filename we are comparing against
4105 * is relative to the current image filename (or absolute) */
4106 path_combine(filename_tmp
, PATH_MAX
, curr_bs
->filename
,
4107 curr_bs
->backing_file
);
4109 if (!realpath(filename_tmp
, backing_file_full
)) {
4113 if (strcmp(backing_file_full
, filename_full
) == 0) {
4114 retval
= curr_bs
->backing_hd
;
4120 g_free(filename_full
);
4121 g_free(backing_file_full
);
4122 g_free(filename_tmp
);
4126 int bdrv_get_backing_file_depth(BlockDriverState
*bs
)
4132 if (!bs
->backing_hd
) {
4136 return 1 + bdrv_get_backing_file_depth(bs
->backing_hd
);
4139 BlockDriverState
*bdrv_find_base(BlockDriverState
*bs
)
4141 BlockDriverState
*curr_bs
= NULL
;
4149 while (curr_bs
->backing_hd
) {
4150 curr_bs
= curr_bs
->backing_hd
;
4155 /**************************************************************/
4158 BlockDriverAIOCB
*bdrv_aio_readv(BlockDriverState
*bs
, int64_t sector_num
,
4159 QEMUIOVector
*qiov
, int nb_sectors
,
4160 BlockDriverCompletionFunc
*cb
, void *opaque
)
4162 trace_bdrv_aio_readv(bs
, sector_num
, nb_sectors
, opaque
);
4164 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, 0,
4168 BlockDriverAIOCB
*bdrv_aio_writev(BlockDriverState
*bs
, int64_t sector_num
,
4169 QEMUIOVector
*qiov
, int nb_sectors
,
4170 BlockDriverCompletionFunc
*cb
, void *opaque
)
4172 trace_bdrv_aio_writev(bs
, sector_num
, nb_sectors
, opaque
);
4174 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, 0,
4178 BlockDriverAIOCB
*bdrv_aio_write_zeroes(BlockDriverState
*bs
,
4179 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
,
4180 BlockDriverCompletionFunc
*cb
, void *opaque
)
4182 trace_bdrv_aio_write_zeroes(bs
, sector_num
, nb_sectors
, flags
, opaque
);
4184 return bdrv_co_aio_rw_vector(bs
, sector_num
, NULL
, nb_sectors
,
4185 BDRV_REQ_ZERO_WRITE
| flags
,
4190 typedef struct MultiwriteCB
{
4195 BlockDriverCompletionFunc
*cb
;
4197 QEMUIOVector
*free_qiov
;
4201 static void multiwrite_user_cb(MultiwriteCB
*mcb
)
4205 for (i
= 0; i
< mcb
->num_callbacks
; i
++) {
4206 mcb
->callbacks
[i
].cb(mcb
->callbacks
[i
].opaque
, mcb
->error
);
4207 if (mcb
->callbacks
[i
].free_qiov
) {
4208 qemu_iovec_destroy(mcb
->callbacks
[i
].free_qiov
);
4210 g_free(mcb
->callbacks
[i
].free_qiov
);
4214 static void multiwrite_cb(void *opaque
, int ret
)
4216 MultiwriteCB
*mcb
= opaque
;
4218 trace_multiwrite_cb(mcb
, ret
);
4220 if (ret
< 0 && !mcb
->error
) {
4224 mcb
->num_requests
--;
4225 if (mcb
->num_requests
== 0) {
4226 multiwrite_user_cb(mcb
);
4231 static int multiwrite_req_compare(const void *a
, const void *b
)
4233 const BlockRequest
*req1
= a
, *req2
= b
;
4236 * Note that we can't simply subtract req2->sector from req1->sector
4237 * here as that could overflow the return value.
4239 if (req1
->sector
> req2
->sector
) {
4241 } else if (req1
->sector
< req2
->sector
) {
4249 * Takes a bunch of requests and tries to merge them. Returns the number of
4250 * requests that remain after merging.
4252 static int multiwrite_merge(BlockDriverState
*bs
, BlockRequest
*reqs
,
4253 int num_reqs
, MultiwriteCB
*mcb
)
4257 // Sort requests by start sector
4258 qsort(reqs
, num_reqs
, sizeof(*reqs
), &multiwrite_req_compare
);
4260 // Check if adjacent requests touch the same clusters. If so, combine them,
4261 // filling up gaps with zero sectors.
4263 for (i
= 1; i
< num_reqs
; i
++) {
4265 int64_t oldreq_last
= reqs
[outidx
].sector
+ reqs
[outidx
].nb_sectors
;
4267 // Handle exactly sequential writes and overlapping writes.
4268 if (reqs
[i
].sector
<= oldreq_last
) {
4272 if (reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1 > IOV_MAX
) {
4278 QEMUIOVector
*qiov
= g_malloc0(sizeof(*qiov
));
4279 qemu_iovec_init(qiov
,
4280 reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1);
4282 // Add the first request to the merged one. If the requests are
4283 // overlapping, drop the last sectors of the first request.
4284 size
= (reqs
[i
].sector
- reqs
[outidx
].sector
) << 9;
4285 qemu_iovec_concat(qiov
, reqs
[outidx
].qiov
, 0, size
);
4287 // We should need to add any zeros between the two requests
4288 assert (reqs
[i
].sector
<= oldreq_last
);
4290 // Add the second request
4291 qemu_iovec_concat(qiov
, reqs
[i
].qiov
, 0, reqs
[i
].qiov
->size
);
4293 reqs
[outidx
].nb_sectors
= qiov
->size
>> 9;
4294 reqs
[outidx
].qiov
= qiov
;
4296 mcb
->callbacks
[i
].free_qiov
= reqs
[outidx
].qiov
;
4299 reqs
[outidx
].sector
= reqs
[i
].sector
;
4300 reqs
[outidx
].nb_sectors
= reqs
[i
].nb_sectors
;
4301 reqs
[outidx
].qiov
= reqs
[i
].qiov
;
4309 * Submit multiple AIO write requests at once.
4311 * On success, the function returns 0 and all requests in the reqs array have
4312 * been submitted. In error case this function returns -1, and any of the
4313 * requests may or may not be submitted yet. In particular, this means that the
4314 * callback will be called for some of the requests, for others it won't. The
4315 * caller must check the error field of the BlockRequest to wait for the right
4316 * callbacks (if error != 0, no callback will be called).
4318 * The implementation may modify the contents of the reqs array, e.g. to merge
4319 * requests. However, the fields opaque and error are left unmodified as they
4320 * are used to signal failure for a single request to the caller.
4322 int bdrv_aio_multiwrite(BlockDriverState
*bs
, BlockRequest
*reqs
, int num_reqs
)
4327 /* don't submit writes if we don't have a medium */
4328 if (bs
->drv
== NULL
) {
4329 for (i
= 0; i
< num_reqs
; i
++) {
4330 reqs
[i
].error
= -ENOMEDIUM
;
4335 if (num_reqs
== 0) {
4339 // Create MultiwriteCB structure
4340 mcb
= g_malloc0(sizeof(*mcb
) + num_reqs
* sizeof(*mcb
->callbacks
));
4341 mcb
->num_requests
= 0;
4342 mcb
->num_callbacks
= num_reqs
;
4344 for (i
= 0; i
< num_reqs
; i
++) {
4345 mcb
->callbacks
[i
].cb
= reqs
[i
].cb
;
4346 mcb
->callbacks
[i
].opaque
= reqs
[i
].opaque
;
4349 // Check for mergable requests
4350 num_reqs
= multiwrite_merge(bs
, reqs
, num_reqs
, mcb
);
4352 trace_bdrv_aio_multiwrite(mcb
, mcb
->num_callbacks
, num_reqs
);
4354 /* Run the aio requests. */
4355 mcb
->num_requests
= num_reqs
;
4356 for (i
= 0; i
< num_reqs
; i
++) {
4357 bdrv_co_aio_rw_vector(bs
, reqs
[i
].sector
, reqs
[i
].qiov
,
4358 reqs
[i
].nb_sectors
, reqs
[i
].flags
,
4366 void bdrv_aio_cancel(BlockDriverAIOCB
*acb
)
4368 acb
->aiocb_info
->cancel(acb
);
4371 /**************************************************************/
4372 /* async block device emulation */
4374 typedef struct BlockDriverAIOCBSync
{
4375 BlockDriverAIOCB common
;
4378 /* vector translation state */
4382 } BlockDriverAIOCBSync
;
4384 static void bdrv_aio_cancel_em(BlockDriverAIOCB
*blockacb
)
4386 BlockDriverAIOCBSync
*acb
=
4387 container_of(blockacb
, BlockDriverAIOCBSync
, common
);
4388 qemu_bh_delete(acb
->bh
);
4390 qemu_aio_release(acb
);
4393 static const AIOCBInfo bdrv_em_aiocb_info
= {
4394 .aiocb_size
= sizeof(BlockDriverAIOCBSync
),
4395 .cancel
= bdrv_aio_cancel_em
,
4398 static void bdrv_aio_bh_cb(void *opaque
)
4400 BlockDriverAIOCBSync
*acb
= opaque
;
4403 qemu_iovec_from_buf(acb
->qiov
, 0, acb
->bounce
, acb
->qiov
->size
);
4404 qemu_vfree(acb
->bounce
);
4405 acb
->common
.cb(acb
->common
.opaque
, acb
->ret
);
4406 qemu_bh_delete(acb
->bh
);
4408 qemu_aio_release(acb
);
4411 static BlockDriverAIOCB
*bdrv_aio_rw_vector(BlockDriverState
*bs
,
4415 BlockDriverCompletionFunc
*cb
,
4420 BlockDriverAIOCBSync
*acb
;
4422 acb
= qemu_aio_get(&bdrv_em_aiocb_info
, bs
, cb
, opaque
);
4423 acb
->is_write
= is_write
;
4425 acb
->bounce
= qemu_blockalign(bs
, qiov
->size
);
4426 acb
->bh
= qemu_bh_new(bdrv_aio_bh_cb
, acb
);
4429 qemu_iovec_to_buf(acb
->qiov
, 0, acb
->bounce
, qiov
->size
);
4430 acb
->ret
= bs
->drv
->bdrv_write(bs
, sector_num
, acb
->bounce
, nb_sectors
);
4432 acb
->ret
= bs
->drv
->bdrv_read(bs
, sector_num
, acb
->bounce
, nb_sectors
);
4435 qemu_bh_schedule(acb
->bh
);
4437 return &acb
->common
;
4440 static BlockDriverAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
4441 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
4442 BlockDriverCompletionFunc
*cb
, void *opaque
)
4444 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 0);
4447 static BlockDriverAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
4448 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
4449 BlockDriverCompletionFunc
*cb
, void *opaque
)
4451 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 1);
4455 typedef struct BlockDriverAIOCBCoroutine
{
4456 BlockDriverAIOCB common
;
4461 } BlockDriverAIOCBCoroutine
;
4463 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB
*blockacb
)
4465 BlockDriverAIOCBCoroutine
*acb
=
4466 container_of(blockacb
, BlockDriverAIOCBCoroutine
, common
);
4475 static const AIOCBInfo bdrv_em_co_aiocb_info
= {
4476 .aiocb_size
= sizeof(BlockDriverAIOCBCoroutine
),
4477 .cancel
= bdrv_aio_co_cancel_em
,
4480 static void bdrv_co_em_bh(void *opaque
)
4482 BlockDriverAIOCBCoroutine
*acb
= opaque
;
4484 acb
->common
.cb(acb
->common
.opaque
, acb
->req
.error
);
4490 qemu_bh_delete(acb
->bh
);
4491 qemu_aio_release(acb
);
4494 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
4495 static void coroutine_fn
bdrv_co_do_rw(void *opaque
)
4497 BlockDriverAIOCBCoroutine
*acb
= opaque
;
4498 BlockDriverState
*bs
= acb
->common
.bs
;
4500 if (!acb
->is_write
) {
4501 acb
->req
.error
= bdrv_co_do_readv(bs
, acb
->req
.sector
,
4502 acb
->req
.nb_sectors
, acb
->req
.qiov
, acb
->req
.flags
);
4504 acb
->req
.error
= bdrv_co_do_writev(bs
, acb
->req
.sector
,
4505 acb
->req
.nb_sectors
, acb
->req
.qiov
, acb
->req
.flags
);
4508 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
4509 qemu_bh_schedule(acb
->bh
);
4512 static BlockDriverAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
4516 BdrvRequestFlags flags
,
4517 BlockDriverCompletionFunc
*cb
,
4522 BlockDriverAIOCBCoroutine
*acb
;
4524 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
4525 acb
->req
.sector
= sector_num
;
4526 acb
->req
.nb_sectors
= nb_sectors
;
4527 acb
->req
.qiov
= qiov
;
4528 acb
->req
.flags
= flags
;
4529 acb
->is_write
= is_write
;
4532 co
= qemu_coroutine_create(bdrv_co_do_rw
);
4533 qemu_coroutine_enter(co
, acb
);
4535 return &acb
->common
;
4538 static void coroutine_fn
bdrv_aio_flush_co_entry(void *opaque
)
4540 BlockDriverAIOCBCoroutine
*acb
= opaque
;
4541 BlockDriverState
*bs
= acb
->common
.bs
;
4543 acb
->req
.error
= bdrv_co_flush(bs
);
4544 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
4545 qemu_bh_schedule(acb
->bh
);
4548 BlockDriverAIOCB
*bdrv_aio_flush(BlockDriverState
*bs
,
4549 BlockDriverCompletionFunc
*cb
, void *opaque
)
4551 trace_bdrv_aio_flush(bs
, opaque
);
4554 BlockDriverAIOCBCoroutine
*acb
;
4556 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
4559 co
= qemu_coroutine_create(bdrv_aio_flush_co_entry
);
4560 qemu_coroutine_enter(co
, acb
);
4562 return &acb
->common
;
4565 static void coroutine_fn
bdrv_aio_discard_co_entry(void *opaque
)
4567 BlockDriverAIOCBCoroutine
*acb
= opaque
;
4568 BlockDriverState
*bs
= acb
->common
.bs
;
4570 acb
->req
.error
= bdrv_co_discard(bs
, acb
->req
.sector
, acb
->req
.nb_sectors
);
4571 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
4572 qemu_bh_schedule(acb
->bh
);
4575 BlockDriverAIOCB
*bdrv_aio_discard(BlockDriverState
*bs
,
4576 int64_t sector_num
, int nb_sectors
,
4577 BlockDriverCompletionFunc
*cb
, void *opaque
)
4580 BlockDriverAIOCBCoroutine
*acb
;
4582 trace_bdrv_aio_discard(bs
, sector_num
, nb_sectors
, opaque
);
4584 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
4585 acb
->req
.sector
= sector_num
;
4586 acb
->req
.nb_sectors
= nb_sectors
;
4588 co
= qemu_coroutine_create(bdrv_aio_discard_co_entry
);
4589 qemu_coroutine_enter(co
, acb
);
4591 return &acb
->common
;
4594 void bdrv_init(void)
4596 module_call_init(MODULE_INIT_BLOCK
);
4599 void bdrv_init_with_whitelist(void)
4601 use_bdrv_whitelist
= 1;
4605 void *qemu_aio_get(const AIOCBInfo
*aiocb_info
, BlockDriverState
*bs
,
4606 BlockDriverCompletionFunc
*cb
, void *opaque
)
4608 BlockDriverAIOCB
*acb
;
4610 acb
= g_slice_alloc(aiocb_info
->aiocb_size
);
4611 acb
->aiocb_info
= aiocb_info
;
4614 acb
->opaque
= opaque
;
4618 void qemu_aio_release(void *p
)
4620 BlockDriverAIOCB
*acb
= p
;
4621 g_slice_free1(acb
->aiocb_info
->aiocb_size
, acb
);
4624 /**************************************************************/
4625 /* Coroutine block device emulation */
4627 typedef struct CoroutineIOCompletion
{
4628 Coroutine
*coroutine
;
4630 } CoroutineIOCompletion
;
4632 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
4634 CoroutineIOCompletion
*co
= opaque
;
4637 qemu_coroutine_enter(co
->coroutine
, NULL
);
4640 static int coroutine_fn
bdrv_co_io_em(BlockDriverState
*bs
, int64_t sector_num
,
4641 int nb_sectors
, QEMUIOVector
*iov
,
4644 CoroutineIOCompletion co
= {
4645 .coroutine
= qemu_coroutine_self(),
4647 BlockDriverAIOCB
*acb
;
4650 acb
= bs
->drv
->bdrv_aio_writev(bs
, sector_num
, iov
, nb_sectors
,
4651 bdrv_co_io_em_complete
, &co
);
4653 acb
= bs
->drv
->bdrv_aio_readv(bs
, sector_num
, iov
, nb_sectors
,
4654 bdrv_co_io_em_complete
, &co
);
4657 trace_bdrv_co_io_em(bs
, sector_num
, nb_sectors
, is_write
, acb
);
4661 qemu_coroutine_yield();
4666 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
4667 int64_t sector_num
, int nb_sectors
,
4670 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, false);
4673 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
4674 int64_t sector_num
, int nb_sectors
,
4677 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, true);
4680 static void coroutine_fn
bdrv_flush_co_entry(void *opaque
)
4682 RwCo
*rwco
= opaque
;
4684 rwco
->ret
= bdrv_co_flush(rwco
->bs
);
4687 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
4691 if (!bs
|| !bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
)) {
4695 /* Write back cached data to the OS even with cache=unsafe */
4696 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_OS
);
4697 if (bs
->drv
->bdrv_co_flush_to_os
) {
4698 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
4704 /* But don't actually force it to the disk with cache=unsafe */
4705 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
4709 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_DISK
);
4710 if (bs
->drv
->bdrv_co_flush_to_disk
) {
4711 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
4712 } else if (bs
->drv
->bdrv_aio_flush
) {
4713 BlockDriverAIOCB
*acb
;
4714 CoroutineIOCompletion co
= {
4715 .coroutine
= qemu_coroutine_self(),
4718 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
4722 qemu_coroutine_yield();
4727 * Some block drivers always operate in either writethrough or unsafe
4728 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
4729 * know how the server works (because the behaviour is hardcoded or
4730 * depends on server-side configuration), so we can't ensure that
4731 * everything is safe on disk. Returning an error doesn't work because
4732 * that would break guests even if the server operates in writethrough
4735 * Let's hope the user knows what he's doing.
4743 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
4744 * in the case of cache=unsafe, so there are no useless flushes.
4747 return bdrv_co_flush(bs
->file
);
4750 void bdrv_invalidate_cache(BlockDriverState
*bs
)
4752 if (bs
->drv
&& bs
->drv
->bdrv_invalidate_cache
) {
4753 bs
->drv
->bdrv_invalidate_cache(bs
);
4757 void bdrv_invalidate_cache_all(void)
4759 BlockDriverState
*bs
;
4761 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
4762 bdrv_invalidate_cache(bs
);
4766 void bdrv_clear_incoming_migration_all(void)
4768 BlockDriverState
*bs
;
4770 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
4771 bs
->open_flags
= bs
->open_flags
& ~(BDRV_O_INCOMING
);
4775 int bdrv_flush(BlockDriverState
*bs
)
4783 if (qemu_in_coroutine()) {
4784 /* Fast-path if already in coroutine context */
4785 bdrv_flush_co_entry(&rwco
);
4787 co
= qemu_coroutine_create(bdrv_flush_co_entry
);
4788 qemu_coroutine_enter(co
, &rwco
);
4789 while (rwco
.ret
== NOT_DONE
) {
4797 typedef struct DiscardCo
{
4798 BlockDriverState
*bs
;
4803 static void coroutine_fn
bdrv_discard_co_entry(void *opaque
)
4805 DiscardCo
*rwco
= opaque
;
4807 rwco
->ret
= bdrv_co_discard(rwco
->bs
, rwco
->sector_num
, rwco
->nb_sectors
);
4810 /* if no limit is specified in the BlockLimits use a default
4811 * of 32768 512-byte sectors (16 MiB) per request.
4813 #define MAX_DISCARD_DEFAULT 32768
4815 int coroutine_fn
bdrv_co_discard(BlockDriverState
*bs
, int64_t sector_num
,
4822 } else if (bdrv_check_request(bs
, sector_num
, nb_sectors
)) {
4824 } else if (bs
->read_only
) {
4828 bdrv_reset_dirty(bs
, sector_num
, nb_sectors
);
4830 /* Do nothing if disabled. */
4831 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
4835 if (!bs
->drv
->bdrv_co_discard
&& !bs
->drv
->bdrv_aio_discard
) {
4839 max_discard
= bs
->bl
.max_discard
? bs
->bl
.max_discard
: MAX_DISCARD_DEFAULT
;
4840 while (nb_sectors
> 0) {
4842 int num
= nb_sectors
;
4845 if (bs
->bl
.discard_alignment
&&
4846 num
>= bs
->bl
.discard_alignment
&&
4847 sector_num
% bs
->bl
.discard_alignment
) {
4848 if (num
> bs
->bl
.discard_alignment
) {
4849 num
= bs
->bl
.discard_alignment
;
4851 num
-= sector_num
% bs
->bl
.discard_alignment
;
4854 /* limit request size */
4855 if (num
> max_discard
) {
4859 if (bs
->drv
->bdrv_co_discard
) {
4860 ret
= bs
->drv
->bdrv_co_discard(bs
, sector_num
, num
);
4862 BlockDriverAIOCB
*acb
;
4863 CoroutineIOCompletion co
= {
4864 .coroutine
= qemu_coroutine_self(),
4867 acb
= bs
->drv
->bdrv_aio_discard(bs
, sector_num
, nb_sectors
,
4868 bdrv_co_io_em_complete
, &co
);
4872 qemu_coroutine_yield();
4876 if (ret
&& ret
!= -ENOTSUP
) {
4886 int bdrv_discard(BlockDriverState
*bs
, int64_t sector_num
, int nb_sectors
)
4891 .sector_num
= sector_num
,
4892 .nb_sectors
= nb_sectors
,
4896 if (qemu_in_coroutine()) {
4897 /* Fast-path if already in coroutine context */
4898 bdrv_discard_co_entry(&rwco
);
4900 co
= qemu_coroutine_create(bdrv_discard_co_entry
);
4901 qemu_coroutine_enter(co
, &rwco
);
4902 while (rwco
.ret
== NOT_DONE
) {
4910 /**************************************************************/
4911 /* removable device support */
4914 * Return TRUE if the media is present
4916 int bdrv_is_inserted(BlockDriverState
*bs
)
4918 BlockDriver
*drv
= bs
->drv
;
4922 if (!drv
->bdrv_is_inserted
)
4924 return drv
->bdrv_is_inserted(bs
);
4928 * Return whether the media changed since the last call to this
4929 * function, or -ENOTSUP if we don't know. Most drivers don't know.
4931 int bdrv_media_changed(BlockDriverState
*bs
)
4933 BlockDriver
*drv
= bs
->drv
;
4935 if (drv
&& drv
->bdrv_media_changed
) {
4936 return drv
->bdrv_media_changed(bs
);
4942 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
4944 void bdrv_eject(BlockDriverState
*bs
, bool eject_flag
)
4946 BlockDriver
*drv
= bs
->drv
;
4948 if (drv
&& drv
->bdrv_eject
) {
4949 drv
->bdrv_eject(bs
, eject_flag
);
4952 if (bs
->device_name
[0] != '\0') {
4953 bdrv_emit_qmp_eject_event(bs
, eject_flag
);
4958 * Lock or unlock the media (if it is locked, the user won't be able
4959 * to eject it manually).
4961 void bdrv_lock_medium(BlockDriverState
*bs
, bool locked
)
4963 BlockDriver
*drv
= bs
->drv
;
4965 trace_bdrv_lock_medium(bs
, locked
);
4967 if (drv
&& drv
->bdrv_lock_medium
) {
4968 drv
->bdrv_lock_medium(bs
, locked
);
4972 /* needed for generic scsi interface */
4974 int bdrv_ioctl(BlockDriverState
*bs
, unsigned long int req
, void *buf
)
4976 BlockDriver
*drv
= bs
->drv
;
4978 if (drv
&& drv
->bdrv_ioctl
)
4979 return drv
->bdrv_ioctl(bs
, req
, buf
);
4983 BlockDriverAIOCB
*bdrv_aio_ioctl(BlockDriverState
*bs
,
4984 unsigned long int req
, void *buf
,
4985 BlockDriverCompletionFunc
*cb
, void *opaque
)
4987 BlockDriver
*drv
= bs
->drv
;
4989 if (drv
&& drv
->bdrv_aio_ioctl
)
4990 return drv
->bdrv_aio_ioctl(bs
, req
, buf
, cb
, opaque
);
4994 void bdrv_set_guest_block_size(BlockDriverState
*bs
, int align
)
4996 bs
->guest_block_size
= align
;
4999 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
5001 return qemu_memalign(bdrv_opt_mem_align(bs
), size
);
5005 * Check if all memory in this vector is sector aligned.
5007 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
)
5010 size_t alignment
= bdrv_opt_mem_align(bs
);
5012 for (i
= 0; i
< qiov
->niov
; i
++) {
5013 if ((uintptr_t) qiov
->iov
[i
].iov_base
% alignment
) {
5016 if (qiov
->iov
[i
].iov_len
% alignment
) {
5024 BdrvDirtyBitmap
*bdrv_create_dirty_bitmap(BlockDriverState
*bs
, int granularity
)
5026 int64_t bitmap_size
;
5027 BdrvDirtyBitmap
*bitmap
;
5029 assert((granularity
& (granularity
- 1)) == 0);
5031 granularity
>>= BDRV_SECTOR_BITS
;
5032 assert(granularity
);
5033 bitmap_size
= (bdrv_getlength(bs
) >> BDRV_SECTOR_BITS
);
5034 bitmap
= g_malloc0(sizeof(BdrvDirtyBitmap
));
5035 bitmap
->bitmap
= hbitmap_alloc(bitmap_size
, ffs(granularity
) - 1);
5036 QLIST_INSERT_HEAD(&bs
->dirty_bitmaps
, bitmap
, list
);
5040 void bdrv_release_dirty_bitmap(BlockDriverState
*bs
, BdrvDirtyBitmap
*bitmap
)
5042 BdrvDirtyBitmap
*bm
, *next
;
5043 QLIST_FOREACH_SAFE(bm
, &bs
->dirty_bitmaps
, list
, next
) {
5045 QLIST_REMOVE(bitmap
, list
);
5046 hbitmap_free(bitmap
->bitmap
);
5053 BlockDirtyInfoList
*bdrv_query_dirty_bitmaps(BlockDriverState
*bs
)
5055 BdrvDirtyBitmap
*bm
;
5056 BlockDirtyInfoList
*list
= NULL
;
5057 BlockDirtyInfoList
**plist
= &list
;
5059 QLIST_FOREACH(bm
, &bs
->dirty_bitmaps
, list
) {
5060 BlockDirtyInfo
*info
= g_malloc0(sizeof(BlockDirtyInfo
));
5061 BlockDirtyInfoList
*entry
= g_malloc0(sizeof(BlockDirtyInfoList
));
5062 info
->count
= bdrv_get_dirty_count(bs
, bm
);
5064 ((int64_t) BDRV_SECTOR_SIZE
<< hbitmap_granularity(bm
->bitmap
));
5065 entry
->value
= info
;
5067 plist
= &entry
->next
;
5073 int bdrv_get_dirty(BlockDriverState
*bs
, BdrvDirtyBitmap
*bitmap
, int64_t sector
)
5076 return hbitmap_get(bitmap
->bitmap
, sector
);
5082 void bdrv_dirty_iter_init(BlockDriverState
*bs
,
5083 BdrvDirtyBitmap
*bitmap
, HBitmapIter
*hbi
)
5085 hbitmap_iter_init(hbi
, bitmap
->bitmap
, 0);
5088 void bdrv_set_dirty(BlockDriverState
*bs
, int64_t cur_sector
,
5091 BdrvDirtyBitmap
*bitmap
;
5092 QLIST_FOREACH(bitmap
, &bs
->dirty_bitmaps
, list
) {
5093 hbitmap_set(bitmap
->bitmap
, cur_sector
, nr_sectors
);
5097 void bdrv_reset_dirty(BlockDriverState
*bs
, int64_t cur_sector
, int nr_sectors
)
5099 BdrvDirtyBitmap
*bitmap
;
5100 QLIST_FOREACH(bitmap
, &bs
->dirty_bitmaps
, list
) {
5101 hbitmap_reset(bitmap
->bitmap
, cur_sector
, nr_sectors
);
5105 int64_t bdrv_get_dirty_count(BlockDriverState
*bs
, BdrvDirtyBitmap
*bitmap
)
5107 return hbitmap_count(bitmap
->bitmap
);
5110 /* Get a reference to bs */
5111 void bdrv_ref(BlockDriverState
*bs
)
5116 /* Release a previously grabbed reference to bs.
5117 * If after releasing, reference count is zero, the BlockDriverState is
5119 void bdrv_unref(BlockDriverState
*bs
)
5121 assert(bs
->refcnt
> 0);
5122 if (--bs
->refcnt
== 0) {
5127 void bdrv_set_in_use(BlockDriverState
*bs
, int in_use
)
5129 assert(bs
->in_use
!= in_use
);
5130 bs
->in_use
= in_use
;
5133 int bdrv_in_use(BlockDriverState
*bs
)
5138 void bdrv_iostatus_enable(BlockDriverState
*bs
)
5140 bs
->iostatus_enabled
= true;
5141 bs
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
5144 /* The I/O status is only enabled if the drive explicitly
5145 * enables it _and_ the VM is configured to stop on errors */
5146 bool bdrv_iostatus_is_enabled(const BlockDriverState
*bs
)
5148 return (bs
->iostatus_enabled
&&
5149 (bs
->on_write_error
== BLOCKDEV_ON_ERROR_ENOSPC
||
5150 bs
->on_write_error
== BLOCKDEV_ON_ERROR_STOP
||
5151 bs
->on_read_error
== BLOCKDEV_ON_ERROR_STOP
));
5154 void bdrv_iostatus_disable(BlockDriverState
*bs
)
5156 bs
->iostatus_enabled
= false;
5159 void bdrv_iostatus_reset(BlockDriverState
*bs
)
5161 if (bdrv_iostatus_is_enabled(bs
)) {
5162 bs
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
5164 block_job_iostatus_reset(bs
->job
);
5169 void bdrv_iostatus_set_err(BlockDriverState
*bs
, int error
)
5171 assert(bdrv_iostatus_is_enabled(bs
));
5172 if (bs
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
5173 bs
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
5174 BLOCK_DEVICE_IO_STATUS_FAILED
;
5179 bdrv_acct_start(BlockDriverState
*bs
, BlockAcctCookie
*cookie
, int64_t bytes
,
5180 enum BlockAcctType type
)
5182 assert(type
< BDRV_MAX_IOTYPE
);
5184 cookie
->bytes
= bytes
;
5185 cookie
->start_time_ns
= get_clock();
5186 cookie
->type
= type
;
5190 bdrv_acct_done(BlockDriverState
*bs
, BlockAcctCookie
*cookie
)
5192 assert(cookie
->type
< BDRV_MAX_IOTYPE
);
5194 bs
->nr_bytes
[cookie
->type
] += cookie
->bytes
;
5195 bs
->nr_ops
[cookie
->type
]++;
5196 bs
->total_time_ns
[cookie
->type
] += get_clock() - cookie
->start_time_ns
;
5199 void bdrv_img_create(const char *filename
, const char *fmt
,
5200 const char *base_filename
, const char *base_fmt
,
5201 char *options
, uint64_t img_size
, int flags
,
5202 Error
**errp
, bool quiet
)
5204 QEMUOptionParameter
*param
= NULL
, *create_options
= NULL
;
5205 QEMUOptionParameter
*backing_fmt
, *backing_file
, *size
;
5206 BlockDriver
*drv
, *proto_drv
;
5207 BlockDriver
*backing_drv
= NULL
;
5208 Error
*local_err
= NULL
;
5211 /* Find driver and parse its options */
5212 drv
= bdrv_find_format(fmt
);
5214 error_setg(errp
, "Unknown file format '%s'", fmt
);
5218 proto_drv
= bdrv_find_protocol(filename
, true);
5220 error_setg(errp
, "Unknown protocol '%s'", filename
);
5224 create_options
= append_option_parameters(create_options
,
5225 drv
->create_options
);
5226 create_options
= append_option_parameters(create_options
,
5227 proto_drv
->create_options
);
5229 /* Create parameter list with default values */
5230 param
= parse_option_parameters("", create_options
, param
);
5232 set_option_parameter_int(param
, BLOCK_OPT_SIZE
, img_size
);
5234 /* Parse -o options */
5236 param
= parse_option_parameters(options
, create_options
, param
);
5237 if (param
== NULL
) {
5238 error_setg(errp
, "Invalid options for file format '%s'.", fmt
);
5243 if (base_filename
) {
5244 if (set_option_parameter(param
, BLOCK_OPT_BACKING_FILE
,
5246 error_setg(errp
, "Backing file not supported for file format '%s'",
5253 if (set_option_parameter(param
, BLOCK_OPT_BACKING_FMT
, base_fmt
)) {
5254 error_setg(errp
, "Backing file format not supported for file "
5255 "format '%s'", fmt
);
5260 backing_file
= get_option_parameter(param
, BLOCK_OPT_BACKING_FILE
);
5261 if (backing_file
&& backing_file
->value
.s
) {
5262 if (!strcmp(filename
, backing_file
->value
.s
)) {
5263 error_setg(errp
, "Error: Trying to create an image with the "
5264 "same filename as the backing file");
5269 backing_fmt
= get_option_parameter(param
, BLOCK_OPT_BACKING_FMT
);
5270 if (backing_fmt
&& backing_fmt
->value
.s
) {
5271 backing_drv
= bdrv_find_format(backing_fmt
->value
.s
);
5273 error_setg(errp
, "Unknown backing file format '%s'",
5274 backing_fmt
->value
.s
);
5279 // The size for the image must always be specified, with one exception:
5280 // If we are using a backing file, we can obtain the size from there
5281 size
= get_option_parameter(param
, BLOCK_OPT_SIZE
);
5282 if (size
&& size
->value
.n
== -1) {
5283 if (backing_file
&& backing_file
->value
.s
) {
5284 BlockDriverState
*bs
;
5289 /* backing files always opened read-only */
5291 flags
& ~(BDRV_O_RDWR
| BDRV_O_SNAPSHOT
| BDRV_O_NO_BACKING
);
5295 ret
= bdrv_open(bs
, backing_file
->value
.s
, NULL
, back_flags
,
5296 backing_drv
, &local_err
);
5298 error_setg_errno(errp
, -ret
, "Could not open '%s': %s",
5299 backing_file
->value
.s
,
5300 error_get_pretty(local_err
));
5301 error_free(local_err
);
5306 bdrv_get_geometry(bs
, &size
);
5309 snprintf(buf
, sizeof(buf
), "%" PRId64
, size
);
5310 set_option_parameter(param
, BLOCK_OPT_SIZE
, buf
);
5314 error_setg(errp
, "Image creation needs a size parameter");
5320 printf("Formatting '%s', fmt=%s ", filename
, fmt
);
5321 print_option_parameters(param
);
5324 ret
= bdrv_create(drv
, filename
, param
, &local_err
);
5325 if (ret
== -EFBIG
) {
5326 /* This is generally a better message than whatever the driver would
5327 * deliver (especially because of the cluster_size_hint), since that
5328 * is most probably not much different from "image too large". */
5329 const char *cluster_size_hint
= "";
5330 if (get_option_parameter(create_options
, BLOCK_OPT_CLUSTER_SIZE
)) {
5331 cluster_size_hint
= " (try using a larger cluster size)";
5333 error_setg(errp
, "The image size is too large for file format '%s'"
5334 "%s", fmt
, cluster_size_hint
);
5335 error_free(local_err
);
5340 free_option_parameters(create_options
);
5341 free_option_parameters(param
);
5344 error_propagate(errp
, local_err
);
5348 AioContext
*bdrv_get_aio_context(BlockDriverState
*bs
)
5350 /* Currently BlockDriverState always uses the main loop AioContext */
5351 return qemu_get_aio_context();
5354 void bdrv_add_before_write_notifier(BlockDriverState
*bs
,
5355 NotifierWithReturn
*notifier
)
5357 notifier_with_return_list_add(&bs
->before_write_notifiers
, notifier
);
5360 int bdrv_amend_options(BlockDriverState
*bs
, QEMUOptionParameter
*options
)
5362 if (bs
->drv
->bdrv_amend_options
== NULL
) {
5365 return bs
->drv
->bdrv_amend_options(bs
, options
);
5368 /* Used to recurse on single child block filters.
5369 * Single child block filter will store their child in bs->file.
5371 bool bdrv_generic_is_first_non_filter(BlockDriverState
*bs
,
5372 BlockDriverState
*candidate
)
5378 if (!bs
->drv
->authorizations
[BS_IS_A_FILTER
]) {
5379 if (bs
== candidate
) {
5386 if (!bs
->drv
->authorizations
[BS_FILTER_PASS_DOWN
]) {
5394 return bdrv_recurse_is_first_non_filter(bs
->file
, candidate
);
5397 bool bdrv_recurse_is_first_non_filter(BlockDriverState
*bs
,
5398 BlockDriverState
*candidate
)
5400 if (bs
->drv
&& bs
->drv
->bdrv_recurse_is_first_non_filter
) {
5401 return bs
->drv
->bdrv_recurse_is_first_non_filter(bs
, candidate
);
5404 return bdrv_generic_is_first_non_filter(bs
, candidate
);
5407 /* This function checks if the candidate is the first non filter bs down it's
5408 * bs chain. Since we don't have pointers to parents it explore all bs chains
5409 * from the top. Some filters can choose not to pass down the recursion.
5411 bool bdrv_is_first_non_filter(BlockDriverState
*candidate
)
5413 BlockDriverState
*bs
;
5415 /* walk down the bs forest recursively */
5416 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
5423 perm
= bdrv_recurse_is_first_non_filter(bs
->file
, candidate
);
5425 /* candidate is the first non filter */