2 * QEMU System Emulator block driver
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 #include "config-host.h"
25 #include "qemu-common.h"
28 #include "block_int.h"
31 #include "qemu-coroutine.h"
32 #include "qmp-commands.h"
33 #include "qemu-timer.h"
36 #include <sys/types.h>
38 #include <sys/ioctl.h>
39 #include <sys/queue.h>
49 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
52 BDRV_REQ_COPY_ON_READ
= 0x1,
53 BDRV_REQ_ZERO_WRITE
= 0x2,
56 static void bdrv_dev_change_media_cb(BlockDriverState
*bs
, bool load
);
57 static BlockDriverAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
58 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
59 BlockDriverCompletionFunc
*cb
, void *opaque
);
60 static BlockDriverAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
61 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
62 BlockDriverCompletionFunc
*cb
, void *opaque
);
63 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
64 int64_t sector_num
, int nb_sectors
,
66 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
67 int64_t sector_num
, int nb_sectors
,
69 static int coroutine_fn
bdrv_co_do_readv(BlockDriverState
*bs
,
70 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
71 BdrvRequestFlags flags
);
72 static int coroutine_fn
bdrv_co_do_writev(BlockDriverState
*bs
,
73 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
74 BdrvRequestFlags flags
);
75 static BlockDriverAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
79 BlockDriverCompletionFunc
*cb
,
82 static void coroutine_fn
bdrv_co_do_rw(void *opaque
);
83 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
84 int64_t sector_num
, int nb_sectors
);
86 static bool bdrv_exceed_bps_limits(BlockDriverState
*bs
, int nb_sectors
,
87 bool is_write
, double elapsed_time
, uint64_t *wait
);
88 static bool bdrv_exceed_iops_limits(BlockDriverState
*bs
, bool is_write
,
89 double elapsed_time
, uint64_t *wait
);
90 static bool bdrv_exceed_io_limits(BlockDriverState
*bs
, int nb_sectors
,
91 bool is_write
, int64_t *wait
);
93 static QTAILQ_HEAD(, BlockDriverState
) bdrv_states
=
94 QTAILQ_HEAD_INITIALIZER(bdrv_states
);
96 static QLIST_HEAD(, BlockDriver
) bdrv_drivers
=
97 QLIST_HEAD_INITIALIZER(bdrv_drivers
);
99 /* The device to use for VM snapshots */
100 static BlockDriverState
*bs_snapshots
;
102 /* If non-zero, use only whitelisted block drivers */
103 static int use_bdrv_whitelist
;
106 static int is_windows_drive_prefix(const char *filename
)
108 return (((filename
[0] >= 'a' && filename
[0] <= 'z') ||
109 (filename
[0] >= 'A' && filename
[0] <= 'Z')) &&
113 int is_windows_drive(const char *filename
)
115 if (is_windows_drive_prefix(filename
) &&
118 if (strstart(filename
, "\\\\.\\", NULL
) ||
119 strstart(filename
, "//./", NULL
))
125 /* throttling disk I/O limits */
126 void bdrv_io_limits_disable(BlockDriverState
*bs
)
128 bs
->io_limits_enabled
= false;
130 while (qemu_co_queue_next(&bs
->throttled_reqs
));
132 if (bs
->block_timer
) {
133 qemu_del_timer(bs
->block_timer
);
134 qemu_free_timer(bs
->block_timer
);
135 bs
->block_timer
= NULL
;
141 memset(&bs
->io_base
, 0, sizeof(bs
->io_base
));
144 static void bdrv_block_timer(void *opaque
)
146 BlockDriverState
*bs
= opaque
;
148 qemu_co_queue_next(&bs
->throttled_reqs
);
151 void bdrv_io_limits_enable(BlockDriverState
*bs
)
153 qemu_co_queue_init(&bs
->throttled_reqs
);
154 bs
->block_timer
= qemu_new_timer_ns(vm_clock
, bdrv_block_timer
, bs
);
155 bs
->slice_time
= 5 * BLOCK_IO_SLICE_TIME
;
156 bs
->slice_start
= qemu_get_clock_ns(vm_clock
);
157 bs
->slice_end
= bs
->slice_start
+ bs
->slice_time
;
158 memset(&bs
->io_base
, 0, sizeof(bs
->io_base
));
159 bs
->io_limits_enabled
= true;
162 bool bdrv_io_limits_enabled(BlockDriverState
*bs
)
164 BlockIOLimit
*io_limits
= &bs
->io_limits
;
165 return io_limits
->bps
[BLOCK_IO_LIMIT_READ
]
166 || io_limits
->bps
[BLOCK_IO_LIMIT_WRITE
]
167 || io_limits
->bps
[BLOCK_IO_LIMIT_TOTAL
]
168 || io_limits
->iops
[BLOCK_IO_LIMIT_READ
]
169 || io_limits
->iops
[BLOCK_IO_LIMIT_WRITE
]
170 || io_limits
->iops
[BLOCK_IO_LIMIT_TOTAL
];
173 static void bdrv_io_limits_intercept(BlockDriverState
*bs
,
174 bool is_write
, int nb_sectors
)
176 int64_t wait_time
= -1;
178 if (!qemu_co_queue_empty(&bs
->throttled_reqs
)) {
179 qemu_co_queue_wait(&bs
->throttled_reqs
);
182 /* In fact, we hope to keep each request's timing, in FIFO mode. The next
183 * throttled requests will not be dequeued until the current request is
184 * allowed to be serviced. So if the current request still exceeds the
185 * limits, it will be inserted to the head. All requests followed it will
186 * be still in throttled_reqs queue.
189 while (bdrv_exceed_io_limits(bs
, nb_sectors
, is_write
, &wait_time
)) {
190 qemu_mod_timer(bs
->block_timer
,
191 wait_time
+ qemu_get_clock_ns(vm_clock
));
192 qemu_co_queue_wait_insert_head(&bs
->throttled_reqs
);
195 qemu_co_queue_next(&bs
->throttled_reqs
);
198 /* check if the path starts with "<protocol>:" */
199 static int path_has_protocol(const char *path
)
204 if (is_windows_drive(path
) ||
205 is_windows_drive_prefix(path
)) {
208 p
= path
+ strcspn(path
, ":/\\");
210 p
= path
+ strcspn(path
, ":/");
216 int path_is_absolute(const char *path
)
219 /* specific case for names like: "\\.\d:" */
220 if (is_windows_drive(path
) || is_windows_drive_prefix(path
)) {
223 return (*path
== '/' || *path
== '\\');
225 return (*path
== '/');
229 /* if filename is absolute, just copy it to dest. Otherwise, build a
230 path to it by considering it is relative to base_path. URL are
232 void path_combine(char *dest
, int dest_size
,
233 const char *base_path
,
234 const char *filename
)
241 if (path_is_absolute(filename
)) {
242 pstrcpy(dest
, dest_size
, filename
);
244 p
= strchr(base_path
, ':');
249 p1
= strrchr(base_path
, '/');
253 p2
= strrchr(base_path
, '\\');
265 if (len
> dest_size
- 1)
267 memcpy(dest
, base_path
, len
);
269 pstrcat(dest
, dest_size
, filename
);
273 void bdrv_get_full_backing_filename(BlockDriverState
*bs
, char *dest
, size_t sz
)
275 if (bs
->backing_file
[0] == '\0' || path_has_protocol(bs
->backing_file
)) {
276 pstrcpy(dest
, sz
, bs
->backing_file
);
278 path_combine(dest
, sz
, bs
->filename
, bs
->backing_file
);
282 void bdrv_register(BlockDriver
*bdrv
)
284 /* Block drivers without coroutine functions need emulation */
285 if (!bdrv
->bdrv_co_readv
) {
286 bdrv
->bdrv_co_readv
= bdrv_co_readv_em
;
287 bdrv
->bdrv_co_writev
= bdrv_co_writev_em
;
289 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
290 * the block driver lacks aio we need to emulate that too.
292 if (!bdrv
->bdrv_aio_readv
) {
293 /* add AIO emulation layer */
294 bdrv
->bdrv_aio_readv
= bdrv_aio_readv_em
;
295 bdrv
->bdrv_aio_writev
= bdrv_aio_writev_em
;
299 QLIST_INSERT_HEAD(&bdrv_drivers
, bdrv
, list
);
302 /* create a new block device (by default it is empty) */
303 BlockDriverState
*bdrv_new(const char *device_name
)
305 BlockDriverState
*bs
;
307 bs
= g_malloc0(sizeof(BlockDriverState
));
308 pstrcpy(bs
->device_name
, sizeof(bs
->device_name
), device_name
);
309 if (device_name
[0] != '\0') {
310 QTAILQ_INSERT_TAIL(&bdrv_states
, bs
, list
);
312 bdrv_iostatus_disable(bs
);
316 BlockDriver
*bdrv_find_format(const char *format_name
)
319 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
320 if (!strcmp(drv1
->format_name
, format_name
)) {
327 static int bdrv_is_whitelisted(BlockDriver
*drv
)
329 static const char *whitelist
[] = {
330 CONFIG_BDRV_WHITELIST
335 return 1; /* no whitelist, anything goes */
337 for (p
= whitelist
; *p
; p
++) {
338 if (!strcmp(drv
->format_name
, *p
)) {
345 BlockDriver
*bdrv_find_whitelisted_format(const char *format_name
)
347 BlockDriver
*drv
= bdrv_find_format(format_name
);
348 return drv
&& bdrv_is_whitelisted(drv
) ? drv
: NULL
;
351 typedef struct CreateCo
{
354 QEMUOptionParameter
*options
;
358 static void coroutine_fn
bdrv_create_co_entry(void *opaque
)
360 CreateCo
*cco
= opaque
;
363 cco
->ret
= cco
->drv
->bdrv_create(cco
->filename
, cco
->options
);
366 int bdrv_create(BlockDriver
*drv
, const char* filename
,
367 QEMUOptionParameter
*options
)
374 .filename
= g_strdup(filename
),
379 if (!drv
->bdrv_create
) {
383 if (qemu_in_coroutine()) {
384 /* Fast-path if already in coroutine context */
385 bdrv_create_co_entry(&cco
);
387 co
= qemu_coroutine_create(bdrv_create_co_entry
);
388 qemu_coroutine_enter(co
, &cco
);
389 while (cco
.ret
== NOT_DONE
) {
395 g_free(cco
.filename
);
400 int bdrv_create_file(const char* filename
, QEMUOptionParameter
*options
)
404 drv
= bdrv_find_protocol(filename
);
409 return bdrv_create(drv
, filename
, options
);
413 * Create a uniquely-named empty temporary file.
414 * Return 0 upon success, otherwise a negative errno value.
416 int get_tmp_filename(char *filename
, int size
)
419 char temp_dir
[MAX_PATH
];
420 /* GetTempFileName requires that its output buffer (4th param)
421 have length MAX_PATH or greater. */
422 assert(size
>= MAX_PATH
);
423 return (GetTempPath(MAX_PATH
, temp_dir
)
424 && GetTempFileName(temp_dir
, "qem", 0, filename
)
425 ? 0 : -GetLastError());
429 tmpdir
= getenv("TMPDIR");
432 if (snprintf(filename
, size
, "%s/vl.XXXXXX", tmpdir
) >= size
) {
435 fd
= mkstemp(filename
);
436 if (fd
< 0 || close(fd
)) {
444 * Detect host devices. By convention, /dev/cdrom[N] is always
445 * recognized as a host CDROM.
447 static BlockDriver
*find_hdev_driver(const char *filename
)
449 int score_max
= 0, score
;
450 BlockDriver
*drv
= NULL
, *d
;
452 QLIST_FOREACH(d
, &bdrv_drivers
, list
) {
453 if (d
->bdrv_probe_device
) {
454 score
= d
->bdrv_probe_device(filename
);
455 if (score
> score_max
) {
465 BlockDriver
*bdrv_find_protocol(const char *filename
)
472 /* TODO Drivers without bdrv_file_open must be specified explicitly */
475 * XXX(hch): we really should not let host device detection
476 * override an explicit protocol specification, but moving this
477 * later breaks access to device names with colons in them.
478 * Thanks to the brain-dead persistent naming schemes on udev-
479 * based Linux systems those actually are quite common.
481 drv1
= find_hdev_driver(filename
);
486 if (!path_has_protocol(filename
)) {
487 return bdrv_find_format("file");
489 p
= strchr(filename
, ':');
492 if (len
> sizeof(protocol
) - 1)
493 len
= sizeof(protocol
) - 1;
494 memcpy(protocol
, filename
, len
);
495 protocol
[len
] = '\0';
496 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
497 if (drv1
->protocol_name
&&
498 !strcmp(drv1
->protocol_name
, protocol
)) {
505 static int find_image_format(const char *filename
, BlockDriver
**pdrv
)
507 int ret
, score
, score_max
;
508 BlockDriver
*drv1
, *drv
;
510 BlockDriverState
*bs
;
512 ret
= bdrv_file_open(&bs
, filename
, 0);
518 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
519 if (bs
->sg
|| !bdrv_is_inserted(bs
)) {
521 drv
= bdrv_find_format("raw");
529 ret
= bdrv_pread(bs
, 0, buf
, sizeof(buf
));
538 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
539 if (drv1
->bdrv_probe
) {
540 score
= drv1
->bdrv_probe(buf
, ret
, filename
);
541 if (score
> score_max
) {
555 * Set the current 'total_sectors' value
557 static int refresh_total_sectors(BlockDriverState
*bs
, int64_t hint
)
559 BlockDriver
*drv
= bs
->drv
;
561 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
565 /* query actual device if possible, otherwise just trust the hint */
566 if (drv
->bdrv_getlength
) {
567 int64_t length
= drv
->bdrv_getlength(bs
);
571 hint
= length
>> BDRV_SECTOR_BITS
;
574 bs
->total_sectors
= hint
;
579 * Set open flags for a given cache mode
581 * Return 0 on success, -1 if the cache mode was invalid.
583 int bdrv_parse_cache_flags(const char *mode
, int *flags
)
585 *flags
&= ~BDRV_O_CACHE_MASK
;
587 if (!strcmp(mode
, "off") || !strcmp(mode
, "none")) {
588 *flags
|= BDRV_O_NOCACHE
| BDRV_O_CACHE_WB
;
589 } else if (!strcmp(mode
, "directsync")) {
590 *flags
|= BDRV_O_NOCACHE
;
591 } else if (!strcmp(mode
, "writeback")) {
592 *flags
|= BDRV_O_CACHE_WB
;
593 } else if (!strcmp(mode
, "unsafe")) {
594 *flags
|= BDRV_O_CACHE_WB
;
595 *flags
|= BDRV_O_NO_FLUSH
;
596 } else if (!strcmp(mode
, "writethrough")) {
597 /* this is the default */
606 * The copy-on-read flag is actually a reference count so multiple users may
607 * use the feature without worrying about clobbering its previous state.
608 * Copy-on-read stays enabled until all users have called to disable it.
610 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
615 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
617 assert(bs
->copy_on_read
> 0);
622 * Common part for opening disk images and files
624 static int bdrv_open_common(BlockDriverState
*bs
, const char *filename
,
625 int flags
, BlockDriver
*drv
)
630 assert(bs
->file
== NULL
);
632 trace_bdrv_open_common(bs
, filename
, flags
, drv
->format_name
);
634 bs
->open_flags
= flags
;
635 bs
->buffer_alignment
= 512;
637 assert(bs
->copy_on_read
== 0); /* bdrv_new() and bdrv_close() make it so */
638 if ((flags
& BDRV_O_RDWR
) && (flags
& BDRV_O_COPY_ON_READ
)) {
639 bdrv_enable_copy_on_read(bs
);
642 pstrcpy(bs
->filename
, sizeof(bs
->filename
), filename
);
644 if (use_bdrv_whitelist
&& !bdrv_is_whitelisted(drv
)) {
649 bs
->opaque
= g_malloc0(drv
->instance_size
);
651 bs
->enable_write_cache
= !!(flags
& BDRV_O_CACHE_WB
);
652 open_flags
= flags
| BDRV_O_CACHE_WB
;
655 * Clear flags that are internal to the block layer before opening the
658 open_flags
&= ~(BDRV_O_SNAPSHOT
| BDRV_O_NO_BACKING
);
661 * Snapshots should be writable.
663 if (bs
->is_temporary
) {
664 open_flags
|= BDRV_O_RDWR
;
667 bs
->keep_read_only
= bs
->read_only
= !(open_flags
& BDRV_O_RDWR
);
669 /* Open the image, either directly or using a protocol */
670 if (drv
->bdrv_file_open
) {
671 ret
= drv
->bdrv_file_open(bs
, filename
, open_flags
);
673 ret
= bdrv_file_open(&bs
->file
, filename
, open_flags
);
675 ret
= drv
->bdrv_open(bs
, open_flags
);
683 ret
= refresh_total_sectors(bs
, bs
->total_sectors
);
689 if (bs
->is_temporary
) {
697 bdrv_delete(bs
->file
);
707 * Opens a file using a protocol (file, host_device, nbd, ...)
709 int bdrv_file_open(BlockDriverState
**pbs
, const char *filename
, int flags
)
711 BlockDriverState
*bs
;
715 drv
= bdrv_find_protocol(filename
);
721 ret
= bdrv_open_common(bs
, filename
, flags
, drv
);
732 * Opens a disk image (raw, qcow2, vmdk, ...)
734 int bdrv_open(BlockDriverState
*bs
, const char *filename
, int flags
,
738 char tmp_filename
[PATH_MAX
];
740 if (flags
& BDRV_O_SNAPSHOT
) {
741 BlockDriverState
*bs1
;
744 BlockDriver
*bdrv_qcow2
;
745 QEMUOptionParameter
*options
;
746 char backing_filename
[PATH_MAX
];
748 /* if snapshot, we create a temporary backing file and open it
749 instead of opening 'filename' directly */
751 /* if there is a backing file, use it */
753 ret
= bdrv_open(bs1
, filename
, 0, drv
);
758 total_size
= bdrv_getlength(bs1
) & BDRV_SECTOR_MASK
;
760 if (bs1
->drv
&& bs1
->drv
->protocol_name
)
765 ret
= get_tmp_filename(tmp_filename
, sizeof(tmp_filename
));
770 /* Real path is meaningless for protocols */
772 snprintf(backing_filename
, sizeof(backing_filename
),
774 else if (!realpath(filename
, backing_filename
))
777 bdrv_qcow2
= bdrv_find_format("qcow2");
778 options
= parse_option_parameters("", bdrv_qcow2
->create_options
, NULL
);
780 set_option_parameter_int(options
, BLOCK_OPT_SIZE
, total_size
);
781 set_option_parameter(options
, BLOCK_OPT_BACKING_FILE
, backing_filename
);
783 set_option_parameter(options
, BLOCK_OPT_BACKING_FMT
,
787 ret
= bdrv_create(bdrv_qcow2
, tmp_filename
, options
);
788 free_option_parameters(options
);
793 filename
= tmp_filename
;
795 bs
->is_temporary
= 1;
798 /* Find the right image format driver */
800 ret
= find_image_format(filename
, &drv
);
804 goto unlink_and_fail
;
808 ret
= bdrv_open_common(bs
, filename
, flags
, drv
);
810 goto unlink_and_fail
;
813 /* If there is a backing file, use it */
814 if ((flags
& BDRV_O_NO_BACKING
) == 0 && bs
->backing_file
[0] != '\0') {
815 char backing_filename
[PATH_MAX
];
817 BlockDriver
*back_drv
= NULL
;
819 bs
->backing_hd
= bdrv_new("");
820 bdrv_get_full_backing_filename(bs
, backing_filename
,
821 sizeof(backing_filename
));
823 if (bs
->backing_format
[0] != '\0') {
824 back_drv
= bdrv_find_format(bs
->backing_format
);
827 /* backing files always opened read-only */
829 flags
& ~(BDRV_O_RDWR
| BDRV_O_SNAPSHOT
| BDRV_O_NO_BACKING
);
831 ret
= bdrv_open(bs
->backing_hd
, backing_filename
, back_flags
, back_drv
);
836 if (bs
->is_temporary
) {
837 bs
->backing_hd
->keep_read_only
= !(flags
& BDRV_O_RDWR
);
839 /* base image inherits from "parent" */
840 bs
->backing_hd
->keep_read_only
= bs
->keep_read_only
;
844 if (!bdrv_key_required(bs
)) {
845 bdrv_dev_change_media_cb(bs
, true);
848 /* throttling disk I/O limits */
849 if (bs
->io_limits_enabled
) {
850 bdrv_io_limits_enable(bs
);
856 if (bs
->is_temporary
) {
862 void bdrv_close(BlockDriverState
*bs
)
867 block_job_cancel_sync(bs
->job
);
871 if (bs
== bs_snapshots
) {
874 if (bs
->backing_hd
) {
875 bdrv_delete(bs
->backing_hd
);
876 bs
->backing_hd
= NULL
;
878 bs
->drv
->bdrv_close(bs
);
881 if (bs
->is_temporary
) {
882 unlink(bs
->filename
);
887 bs
->copy_on_read
= 0;
888 bs
->backing_file
[0] = '\0';
889 bs
->backing_format
[0] = '\0';
890 bs
->total_sectors
= 0;
896 if (bs
->file
!= NULL
) {
897 bdrv_delete(bs
->file
);
902 bdrv_dev_change_media_cb(bs
, false);
904 /*throttling disk I/O limits*/
905 if (bs
->io_limits_enabled
) {
906 bdrv_io_limits_disable(bs
);
910 void bdrv_close_all(void)
912 BlockDriverState
*bs
;
914 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
920 * Wait for pending requests to complete across all BlockDriverStates
922 * This function does not flush data to disk, use bdrv_flush_all() for that
923 * after calling this function.
925 * Note that completion of an asynchronous I/O operation can trigger any
926 * number of other I/O operations on other devices---for example a coroutine
927 * can be arbitrarily complex and a constant flow of I/O can come until the
928 * coroutine is complete. Because of this, it is not possible to have a
929 * function to drain a single device's I/O queue.
931 void bdrv_drain_all(void)
933 BlockDriverState
*bs
;
937 busy
= qemu_aio_wait();
939 /* FIXME: We do not have timer support here, so this is effectively
942 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
943 if (!qemu_co_queue_empty(&bs
->throttled_reqs
)) {
944 qemu_co_queue_restart_all(&bs
->throttled_reqs
);
950 /* If requests are still pending there is a bug somewhere */
951 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
952 assert(QLIST_EMPTY(&bs
->tracked_requests
));
953 assert(qemu_co_queue_empty(&bs
->throttled_reqs
));
957 /* make a BlockDriverState anonymous by removing from bdrv_state list.
958 Also, NULL terminate the device_name to prevent double remove */
959 void bdrv_make_anon(BlockDriverState
*bs
)
961 if (bs
->device_name
[0] != '\0') {
962 QTAILQ_REMOVE(&bdrv_states
, bs
, list
);
964 bs
->device_name
[0] = '\0';
967 static void bdrv_rebind(BlockDriverState
*bs
)
969 if (bs
->drv
&& bs
->drv
->bdrv_rebind
) {
970 bs
->drv
->bdrv_rebind(bs
);
974 static void bdrv_move_feature_fields(BlockDriverState
*bs_dest
,
975 BlockDriverState
*bs_src
)
977 /* move some fields that need to stay attached to the device */
978 bs_dest
->open_flags
= bs_src
->open_flags
;
981 bs_dest
->dev_ops
= bs_src
->dev_ops
;
982 bs_dest
->dev_opaque
= bs_src
->dev_opaque
;
983 bs_dest
->dev
= bs_src
->dev
;
984 bs_dest
->buffer_alignment
= bs_src
->buffer_alignment
;
985 bs_dest
->copy_on_read
= bs_src
->copy_on_read
;
987 bs_dest
->enable_write_cache
= bs_src
->enable_write_cache
;
989 /* i/o timing parameters */
990 bs_dest
->slice_time
= bs_src
->slice_time
;
991 bs_dest
->slice_start
= bs_src
->slice_start
;
992 bs_dest
->slice_end
= bs_src
->slice_end
;
993 bs_dest
->io_limits
= bs_src
->io_limits
;
994 bs_dest
->io_base
= bs_src
->io_base
;
995 bs_dest
->throttled_reqs
= bs_src
->throttled_reqs
;
996 bs_dest
->block_timer
= bs_src
->block_timer
;
997 bs_dest
->io_limits_enabled
= bs_src
->io_limits_enabled
;
1000 bs_dest
->on_read_error
= bs_src
->on_read_error
;
1001 bs_dest
->on_write_error
= bs_src
->on_write_error
;
1004 bs_dest
->iostatus_enabled
= bs_src
->iostatus_enabled
;
1005 bs_dest
->iostatus
= bs_src
->iostatus
;
1008 bs_dest
->dirty_count
= bs_src
->dirty_count
;
1009 bs_dest
->dirty_bitmap
= bs_src
->dirty_bitmap
;
1012 bs_dest
->in_use
= bs_src
->in_use
;
1013 bs_dest
->job
= bs_src
->job
;
1015 /* keep the same entry in bdrv_states */
1016 pstrcpy(bs_dest
->device_name
, sizeof(bs_dest
->device_name
),
1017 bs_src
->device_name
);
1018 bs_dest
->list
= bs_src
->list
;
1022 * Swap bs contents for two image chains while they are live,
1023 * while keeping required fields on the BlockDriverState that is
1024 * actually attached to a device.
1026 * This will modify the BlockDriverState fields, and swap contents
1027 * between bs_new and bs_old. Both bs_new and bs_old are modified.
1029 * bs_new is required to be anonymous.
1031 * This function does not create any image files.
1033 void bdrv_swap(BlockDriverState
*bs_new
, BlockDriverState
*bs_old
)
1035 BlockDriverState tmp
;
1037 /* bs_new must be anonymous and shouldn't have anything fancy enabled */
1038 assert(bs_new
->device_name
[0] == '\0');
1039 assert(bs_new
->dirty_bitmap
== NULL
);
1040 assert(bs_new
->job
== NULL
);
1041 assert(bs_new
->dev
== NULL
);
1042 assert(bs_new
->in_use
== 0);
1043 assert(bs_new
->io_limits_enabled
== false);
1044 assert(bs_new
->block_timer
== NULL
);
1050 /* there are some fields that should not be swapped, move them back */
1051 bdrv_move_feature_fields(&tmp
, bs_old
);
1052 bdrv_move_feature_fields(bs_old
, bs_new
);
1053 bdrv_move_feature_fields(bs_new
, &tmp
);
1055 /* bs_new shouldn't be in bdrv_states even after the swap! */
1056 assert(bs_new
->device_name
[0] == '\0');
1058 /* Check a few fields that should remain attached to the device */
1059 assert(bs_new
->dev
== NULL
);
1060 assert(bs_new
->job
== NULL
);
1061 assert(bs_new
->in_use
== 0);
1062 assert(bs_new
->io_limits_enabled
== false);
1063 assert(bs_new
->block_timer
== NULL
);
1065 bdrv_rebind(bs_new
);
1066 bdrv_rebind(bs_old
);
1070 * Add new bs contents at the top of an image chain while the chain is
1071 * live, while keeping required fields on the top layer.
1073 * This will modify the BlockDriverState fields, and swap contents
1074 * between bs_new and bs_top. Both bs_new and bs_top are modified.
1076 * bs_new is required to be anonymous.
1078 * This function does not create any image files.
1080 void bdrv_append(BlockDriverState
*bs_new
, BlockDriverState
*bs_top
)
1082 bdrv_swap(bs_new
, bs_top
);
1084 /* The contents of 'tmp' will become bs_top, as we are
1085 * swapping bs_new and bs_top contents. */
1086 bs_top
->backing_hd
= bs_new
;
1087 bs_top
->open_flags
&= ~BDRV_O_NO_BACKING
;
1088 pstrcpy(bs_top
->backing_file
, sizeof(bs_top
->backing_file
),
1090 pstrcpy(bs_top
->backing_format
, sizeof(bs_top
->backing_format
),
1091 bs_new
->drv
? bs_new
->drv
->format_name
: "");
1094 void bdrv_delete(BlockDriverState
*bs
)
1098 assert(!bs
->in_use
);
1100 /* remove from list, if necessary */
1105 assert(bs
!= bs_snapshots
);
1109 int bdrv_attach_dev(BlockDriverState
*bs
, void *dev
)
1110 /* TODO change to DeviceState *dev when all users are qdevified */
1116 bdrv_iostatus_reset(bs
);
1120 /* TODO qdevified devices don't use this, remove when devices are qdevified */
1121 void bdrv_attach_dev_nofail(BlockDriverState
*bs
, void *dev
)
1123 if (bdrv_attach_dev(bs
, dev
) < 0) {
1128 void bdrv_detach_dev(BlockDriverState
*bs
, void *dev
)
1129 /* TODO change to DeviceState *dev when all users are qdevified */
1131 assert(bs
->dev
== dev
);
1134 bs
->dev_opaque
= NULL
;
1135 bs
->buffer_alignment
= 512;
1138 /* TODO change to return DeviceState * when all users are qdevified */
1139 void *bdrv_get_attached_dev(BlockDriverState
*bs
)
1144 void bdrv_set_dev_ops(BlockDriverState
*bs
, const BlockDevOps
*ops
,
1148 bs
->dev_opaque
= opaque
;
1149 if (bdrv_dev_has_removable_media(bs
) && bs
== bs_snapshots
) {
1150 bs_snapshots
= NULL
;
1154 void bdrv_emit_qmp_error_event(const BlockDriverState
*bdrv
,
1155 BlockQMPEventAction action
, int is_read
)
1158 const char *action_str
;
1161 case BDRV_ACTION_REPORT
:
1162 action_str
= "report";
1164 case BDRV_ACTION_IGNORE
:
1165 action_str
= "ignore";
1167 case BDRV_ACTION_STOP
:
1168 action_str
= "stop";
1174 data
= qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
1177 is_read
? "read" : "write");
1178 monitor_protocol_event(QEVENT_BLOCK_IO_ERROR
, data
);
1180 qobject_decref(data
);
1183 static void bdrv_emit_qmp_eject_event(BlockDriverState
*bs
, bool ejected
)
1187 data
= qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }",
1188 bdrv_get_device_name(bs
), ejected
);
1189 monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED
, data
);
1191 qobject_decref(data
);
1194 static void bdrv_dev_change_media_cb(BlockDriverState
*bs
, bool load
)
1196 if (bs
->dev_ops
&& bs
->dev_ops
->change_media_cb
) {
1197 bool tray_was_closed
= !bdrv_dev_is_tray_open(bs
);
1198 bs
->dev_ops
->change_media_cb(bs
->dev_opaque
, load
);
1199 if (tray_was_closed
) {
1201 bdrv_emit_qmp_eject_event(bs
, true);
1205 bdrv_emit_qmp_eject_event(bs
, false);
1210 bool bdrv_dev_has_removable_media(BlockDriverState
*bs
)
1212 return !bs
->dev
|| (bs
->dev_ops
&& bs
->dev_ops
->change_media_cb
);
1215 void bdrv_dev_eject_request(BlockDriverState
*bs
, bool force
)
1217 if (bs
->dev_ops
&& bs
->dev_ops
->eject_request_cb
) {
1218 bs
->dev_ops
->eject_request_cb(bs
->dev_opaque
, force
);
1222 bool bdrv_dev_is_tray_open(BlockDriverState
*bs
)
1224 if (bs
->dev_ops
&& bs
->dev_ops
->is_tray_open
) {
1225 return bs
->dev_ops
->is_tray_open(bs
->dev_opaque
);
1230 static void bdrv_dev_resize_cb(BlockDriverState
*bs
)
1232 if (bs
->dev_ops
&& bs
->dev_ops
->resize_cb
) {
1233 bs
->dev_ops
->resize_cb(bs
->dev_opaque
);
1237 bool bdrv_dev_is_medium_locked(BlockDriverState
*bs
)
1239 if (bs
->dev_ops
&& bs
->dev_ops
->is_medium_locked
) {
1240 return bs
->dev_ops
->is_medium_locked(bs
->dev_opaque
);
1246 * Run consistency checks on an image
1248 * Returns 0 if the check could be completed (it doesn't mean that the image is
1249 * free of errors) or -errno when an internal error occurred. The results of the
1250 * check are stored in res.
1252 int bdrv_check(BlockDriverState
*bs
, BdrvCheckResult
*res
, BdrvCheckMode fix
)
1254 if (bs
->drv
->bdrv_check
== NULL
) {
1258 memset(res
, 0, sizeof(*res
));
1259 return bs
->drv
->bdrv_check(bs
, res
, fix
);
1262 #define COMMIT_BUF_SECTORS 2048
1264 /* commit COW file into the raw image */
1265 int bdrv_commit(BlockDriverState
*bs
)
1267 BlockDriver
*drv
= bs
->drv
;
1268 BlockDriver
*backing_drv
;
1269 int64_t sector
, total_sectors
;
1270 int n
, ro
, open_flags
;
1271 int ret
= 0, rw_ret
= 0;
1273 char filename
[1024];
1274 BlockDriverState
*bs_rw
, *bs_ro
;
1279 if (!bs
->backing_hd
) {
1283 if (bs
->backing_hd
->keep_read_only
) {
1287 if (bdrv_in_use(bs
) || bdrv_in_use(bs
->backing_hd
)) {
1291 backing_drv
= bs
->backing_hd
->drv
;
1292 ro
= bs
->backing_hd
->read_only
;
1293 strncpy(filename
, bs
->backing_hd
->filename
, sizeof(filename
));
1294 open_flags
= bs
->backing_hd
->open_flags
;
1298 bdrv_delete(bs
->backing_hd
);
1299 bs
->backing_hd
= NULL
;
1300 bs_rw
= bdrv_new("");
1301 rw_ret
= bdrv_open(bs_rw
, filename
, open_flags
| BDRV_O_RDWR
,
1305 /* try to re-open read-only */
1306 bs_ro
= bdrv_new("");
1307 ret
= bdrv_open(bs_ro
, filename
, open_flags
& ~BDRV_O_RDWR
,
1311 /* drive not functional anymore */
1315 bs
->backing_hd
= bs_ro
;
1318 bs
->backing_hd
= bs_rw
;
1321 total_sectors
= bdrv_getlength(bs
) >> BDRV_SECTOR_BITS
;
1322 buf
= g_malloc(COMMIT_BUF_SECTORS
* BDRV_SECTOR_SIZE
);
1324 for (sector
= 0; sector
< total_sectors
; sector
+= n
) {
1325 if (bdrv_is_allocated(bs
, sector
, COMMIT_BUF_SECTORS
, &n
)) {
1327 if (bdrv_read(bs
, sector
, buf
, n
) != 0) {
1332 if (bdrv_write(bs
->backing_hd
, sector
, buf
, n
) != 0) {
1339 if (drv
->bdrv_make_empty
) {
1340 ret
= drv
->bdrv_make_empty(bs
);
1345 * Make sure all data we wrote to the backing device is actually
1349 bdrv_flush(bs
->backing_hd
);
1356 bdrv_delete(bs
->backing_hd
);
1357 bs
->backing_hd
= NULL
;
1358 bs_ro
= bdrv_new("");
1359 ret
= bdrv_open(bs_ro
, filename
, open_flags
& ~BDRV_O_RDWR
,
1363 /* drive not functional anymore */
1367 bs
->backing_hd
= bs_ro
;
1368 bs
->backing_hd
->keep_read_only
= 0;
1374 int bdrv_commit_all(void)
1376 BlockDriverState
*bs
;
1378 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
1379 int ret
= bdrv_commit(bs
);
1387 struct BdrvTrackedRequest
{
1388 BlockDriverState
*bs
;
1392 QLIST_ENTRY(BdrvTrackedRequest
) list
;
1393 Coroutine
*co
; /* owner, used for deadlock detection */
1394 CoQueue wait_queue
; /* coroutines blocked on this request */
1398 * Remove an active request from the tracked requests list
1400 * This function should be called when a tracked request is completing.
1402 static void tracked_request_end(BdrvTrackedRequest
*req
)
1404 QLIST_REMOVE(req
, list
);
1405 qemu_co_queue_restart_all(&req
->wait_queue
);
1409 * Add an active request to the tracked requests list
1411 static void tracked_request_begin(BdrvTrackedRequest
*req
,
1412 BlockDriverState
*bs
,
1414 int nb_sectors
, bool is_write
)
1416 *req
= (BdrvTrackedRequest
){
1418 .sector_num
= sector_num
,
1419 .nb_sectors
= nb_sectors
,
1420 .is_write
= is_write
,
1421 .co
= qemu_coroutine_self(),
1424 qemu_co_queue_init(&req
->wait_queue
);
1426 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
1430 * Round a region to cluster boundaries
1432 static void round_to_clusters(BlockDriverState
*bs
,
1433 int64_t sector_num
, int nb_sectors
,
1434 int64_t *cluster_sector_num
,
1435 int *cluster_nb_sectors
)
1437 BlockDriverInfo bdi
;
1439 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
1440 *cluster_sector_num
= sector_num
;
1441 *cluster_nb_sectors
= nb_sectors
;
1443 int64_t c
= bdi
.cluster_size
/ BDRV_SECTOR_SIZE
;
1444 *cluster_sector_num
= QEMU_ALIGN_DOWN(sector_num
, c
);
1445 *cluster_nb_sectors
= QEMU_ALIGN_UP(sector_num
- *cluster_sector_num
+
1450 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
1451 int64_t sector_num
, int nb_sectors
) {
1453 if (sector_num
>= req
->sector_num
+ req
->nb_sectors
) {
1457 if (req
->sector_num
>= sector_num
+ nb_sectors
) {
1463 static void coroutine_fn
wait_for_overlapping_requests(BlockDriverState
*bs
,
1464 int64_t sector_num
, int nb_sectors
)
1466 BdrvTrackedRequest
*req
;
1467 int64_t cluster_sector_num
;
1468 int cluster_nb_sectors
;
1471 /* If we touch the same cluster it counts as an overlap. This guarantees
1472 * that allocating writes will be serialized and not race with each other
1473 * for the same cluster. For example, in copy-on-read it ensures that the
1474 * CoR read and write operations are atomic and guest writes cannot
1475 * interleave between them.
1477 round_to_clusters(bs
, sector_num
, nb_sectors
,
1478 &cluster_sector_num
, &cluster_nb_sectors
);
1482 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
1483 if (tracked_request_overlaps(req
, cluster_sector_num
,
1484 cluster_nb_sectors
)) {
1485 /* Hitting this means there was a reentrant request, for
1486 * example, a block driver issuing nested requests. This must
1487 * never happen since it means deadlock.
1489 assert(qemu_coroutine_self() != req
->co
);
1491 qemu_co_queue_wait(&req
->wait_queue
);
1502 * -EINVAL - backing format specified, but no file
1503 * -ENOSPC - can't update the backing file because no space is left in the
1505 * -ENOTSUP - format driver doesn't support changing the backing file
1507 int bdrv_change_backing_file(BlockDriverState
*bs
,
1508 const char *backing_file
, const char *backing_fmt
)
1510 BlockDriver
*drv
= bs
->drv
;
1513 /* Backing file format doesn't make sense without a backing file */
1514 if (backing_fmt
&& !backing_file
) {
1518 if (drv
->bdrv_change_backing_file
!= NULL
) {
1519 ret
= drv
->bdrv_change_backing_file(bs
, backing_file
, backing_fmt
);
1525 pstrcpy(bs
->backing_file
, sizeof(bs
->backing_file
), backing_file
?: "");
1526 pstrcpy(bs
->backing_format
, sizeof(bs
->backing_format
), backing_fmt
?: "");
1531 static int bdrv_check_byte_request(BlockDriverState
*bs
, int64_t offset
,
1536 if (!bdrv_is_inserted(bs
))
1542 len
= bdrv_getlength(bs
);
1547 if ((offset
> len
) || (len
- offset
< size
))
1553 static int bdrv_check_request(BlockDriverState
*bs
, int64_t sector_num
,
1556 return bdrv_check_byte_request(bs
, sector_num
* BDRV_SECTOR_SIZE
,
1557 nb_sectors
* BDRV_SECTOR_SIZE
);
1560 typedef struct RwCo
{
1561 BlockDriverState
*bs
;
1569 static void coroutine_fn
bdrv_rw_co_entry(void *opaque
)
1571 RwCo
*rwco
= opaque
;
1573 if (!rwco
->is_write
) {
1574 rwco
->ret
= bdrv_co_do_readv(rwco
->bs
, rwco
->sector_num
,
1575 rwco
->nb_sectors
, rwco
->qiov
, 0);
1577 rwco
->ret
= bdrv_co_do_writev(rwco
->bs
, rwco
->sector_num
,
1578 rwco
->nb_sectors
, rwco
->qiov
, 0);
1583 * Process a synchronous request using coroutines
1585 static int bdrv_rw_co(BlockDriverState
*bs
, int64_t sector_num
, uint8_t *buf
,
1586 int nb_sectors
, bool is_write
)
1589 struct iovec iov
= {
1590 .iov_base
= (void *)buf
,
1591 .iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
,
1596 .sector_num
= sector_num
,
1597 .nb_sectors
= nb_sectors
,
1599 .is_write
= is_write
,
1603 qemu_iovec_init_external(&qiov
, &iov
, 1);
1606 * In sync call context, when the vcpu is blocked, this throttling timer
1607 * will not fire; so the I/O throttling function has to be disabled here
1608 * if it has been enabled.
1610 if (bs
->io_limits_enabled
) {
1611 fprintf(stderr
, "Disabling I/O throttling on '%s' due "
1612 "to synchronous I/O.\n", bdrv_get_device_name(bs
));
1613 bdrv_io_limits_disable(bs
);
1616 if (qemu_in_coroutine()) {
1617 /* Fast-path if already in coroutine context */
1618 bdrv_rw_co_entry(&rwco
);
1620 co
= qemu_coroutine_create(bdrv_rw_co_entry
);
1621 qemu_coroutine_enter(co
, &rwco
);
1622 while (rwco
.ret
== NOT_DONE
) {
1629 /* return < 0 if error. See bdrv_write() for the return codes */
1630 int bdrv_read(BlockDriverState
*bs
, int64_t sector_num
,
1631 uint8_t *buf
, int nb_sectors
)
1633 return bdrv_rw_co(bs
, sector_num
, buf
, nb_sectors
, false);
1636 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
1637 int bdrv_read_unthrottled(BlockDriverState
*bs
, int64_t sector_num
,
1638 uint8_t *buf
, int nb_sectors
)
1643 enabled
= bs
->io_limits_enabled
;
1644 bs
->io_limits_enabled
= false;
1645 ret
= bdrv_read(bs
, 0, buf
, 1);
1646 bs
->io_limits_enabled
= enabled
;
1650 #define BITS_PER_LONG (sizeof(unsigned long) * 8)
1652 static void set_dirty_bitmap(BlockDriverState
*bs
, int64_t sector_num
,
1653 int nb_sectors
, int dirty
)
1656 unsigned long val
, idx
, bit
;
1658 start
= sector_num
/ BDRV_SECTORS_PER_DIRTY_CHUNK
;
1659 end
= (sector_num
+ nb_sectors
- 1) / BDRV_SECTORS_PER_DIRTY_CHUNK
;
1661 for (; start
<= end
; start
++) {
1662 idx
= start
/ BITS_PER_LONG
;
1663 bit
= start
% BITS_PER_LONG
;
1664 val
= bs
->dirty_bitmap
[idx
];
1666 if (!(val
& (1UL << bit
))) {
1671 if (val
& (1UL << bit
)) {
1673 val
&= ~(1UL << bit
);
1676 bs
->dirty_bitmap
[idx
] = val
;
1680 /* Return < 0 if error. Important errors are:
1681 -EIO generic I/O error (may happen for all errors)
1682 -ENOMEDIUM No media inserted.
1683 -EINVAL Invalid sector number or nb_sectors
1684 -EACCES Trying to write a read-only device
1686 int bdrv_write(BlockDriverState
*bs
, int64_t sector_num
,
1687 const uint8_t *buf
, int nb_sectors
)
1689 return bdrv_rw_co(bs
, sector_num
, (uint8_t *)buf
, nb_sectors
, true);
1692 int bdrv_pread(BlockDriverState
*bs
, int64_t offset
,
1693 void *buf
, int count1
)
1695 uint8_t tmp_buf
[BDRV_SECTOR_SIZE
];
1696 int len
, nb_sectors
, count
;
1701 /* first read to align to sector start */
1702 len
= (BDRV_SECTOR_SIZE
- offset
) & (BDRV_SECTOR_SIZE
- 1);
1705 sector_num
= offset
>> BDRV_SECTOR_BITS
;
1707 if ((ret
= bdrv_read(bs
, sector_num
, tmp_buf
, 1)) < 0)
1709 memcpy(buf
, tmp_buf
+ (offset
& (BDRV_SECTOR_SIZE
- 1)), len
);
1717 /* read the sectors "in place" */
1718 nb_sectors
= count
>> BDRV_SECTOR_BITS
;
1719 if (nb_sectors
> 0) {
1720 if ((ret
= bdrv_read(bs
, sector_num
, buf
, nb_sectors
)) < 0)
1722 sector_num
+= nb_sectors
;
1723 len
= nb_sectors
<< BDRV_SECTOR_BITS
;
1728 /* add data from the last sector */
1730 if ((ret
= bdrv_read(bs
, sector_num
, tmp_buf
, 1)) < 0)
1732 memcpy(buf
, tmp_buf
, count
);
1737 int bdrv_pwrite(BlockDriverState
*bs
, int64_t offset
,
1738 const void *buf
, int count1
)
1740 uint8_t tmp_buf
[BDRV_SECTOR_SIZE
];
1741 int len
, nb_sectors
, count
;
1746 /* first write to align to sector start */
1747 len
= (BDRV_SECTOR_SIZE
- offset
) & (BDRV_SECTOR_SIZE
- 1);
1750 sector_num
= offset
>> BDRV_SECTOR_BITS
;
1752 if ((ret
= bdrv_read(bs
, sector_num
, tmp_buf
, 1)) < 0)
1754 memcpy(tmp_buf
+ (offset
& (BDRV_SECTOR_SIZE
- 1)), buf
, len
);
1755 if ((ret
= bdrv_write(bs
, sector_num
, tmp_buf
, 1)) < 0)
1764 /* write the sectors "in place" */
1765 nb_sectors
= count
>> BDRV_SECTOR_BITS
;
1766 if (nb_sectors
> 0) {
1767 if ((ret
= bdrv_write(bs
, sector_num
, buf
, nb_sectors
)) < 0)
1769 sector_num
+= nb_sectors
;
1770 len
= nb_sectors
<< BDRV_SECTOR_BITS
;
1775 /* add data from the last sector */
1777 if ((ret
= bdrv_read(bs
, sector_num
, tmp_buf
, 1)) < 0)
1779 memcpy(tmp_buf
, buf
, count
);
1780 if ((ret
= bdrv_write(bs
, sector_num
, tmp_buf
, 1)) < 0)
1787 * Writes to the file and ensures that no writes are reordered across this
1788 * request (acts as a barrier)
1790 * Returns 0 on success, -errno in error cases.
1792 int bdrv_pwrite_sync(BlockDriverState
*bs
, int64_t offset
,
1793 const void *buf
, int count
)
1797 ret
= bdrv_pwrite(bs
, offset
, buf
, count
);
1802 /* No flush needed for cache modes that already do it */
1803 if (bs
->enable_write_cache
) {
1810 static int coroutine_fn
bdrv_co_do_copy_on_readv(BlockDriverState
*bs
,
1811 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
1813 /* Perform I/O through a temporary buffer so that users who scribble over
1814 * their read buffer while the operation is in progress do not end up
1815 * modifying the image file. This is critical for zero-copy guest I/O
1816 * where anything might happen inside guest memory.
1818 void *bounce_buffer
;
1820 BlockDriver
*drv
= bs
->drv
;
1822 QEMUIOVector bounce_qiov
;
1823 int64_t cluster_sector_num
;
1824 int cluster_nb_sectors
;
1828 /* Cover entire cluster so no additional backing file I/O is required when
1829 * allocating cluster in the image file.
1831 round_to_clusters(bs
, sector_num
, nb_sectors
,
1832 &cluster_sector_num
, &cluster_nb_sectors
);
1834 trace_bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
,
1835 cluster_sector_num
, cluster_nb_sectors
);
1837 iov
.iov_len
= cluster_nb_sectors
* BDRV_SECTOR_SIZE
;
1838 iov
.iov_base
= bounce_buffer
= qemu_blockalign(bs
, iov
.iov_len
);
1839 qemu_iovec_init_external(&bounce_qiov
, &iov
, 1);
1841 ret
= drv
->bdrv_co_readv(bs
, cluster_sector_num
, cluster_nb_sectors
,
1847 if (drv
->bdrv_co_write_zeroes
&&
1848 buffer_is_zero(bounce_buffer
, iov
.iov_len
)) {
1849 ret
= bdrv_co_do_write_zeroes(bs
, cluster_sector_num
,
1850 cluster_nb_sectors
);
1852 /* This does not change the data on the disk, it is not necessary
1853 * to flush even in cache=writethrough mode.
1855 ret
= drv
->bdrv_co_writev(bs
, cluster_sector_num
, cluster_nb_sectors
,
1860 /* It might be okay to ignore write errors for guest requests. If this
1861 * is a deliberate copy-on-read then we don't want to ignore the error.
1862 * Simply report it in all cases.
1867 skip_bytes
= (sector_num
- cluster_sector_num
) * BDRV_SECTOR_SIZE
;
1868 qemu_iovec_from_buf(qiov
, 0, bounce_buffer
+ skip_bytes
,
1869 nb_sectors
* BDRV_SECTOR_SIZE
);
1872 qemu_vfree(bounce_buffer
);
1877 * Handle a read request in coroutine context
1879 static int coroutine_fn
bdrv_co_do_readv(BlockDriverState
*bs
,
1880 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1881 BdrvRequestFlags flags
)
1883 BlockDriver
*drv
= bs
->drv
;
1884 BdrvTrackedRequest req
;
1890 if (bdrv_check_request(bs
, sector_num
, nb_sectors
)) {
1894 /* throttling disk read I/O */
1895 if (bs
->io_limits_enabled
) {
1896 bdrv_io_limits_intercept(bs
, false, nb_sectors
);
1899 if (bs
->copy_on_read
) {
1900 flags
|= BDRV_REQ_COPY_ON_READ
;
1902 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1903 bs
->copy_on_read_in_flight
++;
1906 if (bs
->copy_on_read_in_flight
) {
1907 wait_for_overlapping_requests(bs
, sector_num
, nb_sectors
);
1910 tracked_request_begin(&req
, bs
, sector_num
, nb_sectors
, false);
1912 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1915 ret
= bdrv_co_is_allocated(bs
, sector_num
, nb_sectors
, &pnum
);
1920 if (!ret
|| pnum
!= nb_sectors
) {
1921 ret
= bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
, qiov
);
1926 ret
= drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
1929 tracked_request_end(&req
);
1931 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1932 bs
->copy_on_read_in_flight
--;
1938 int coroutine_fn
bdrv_co_readv(BlockDriverState
*bs
, int64_t sector_num
,
1939 int nb_sectors
, QEMUIOVector
*qiov
)
1941 trace_bdrv_co_readv(bs
, sector_num
, nb_sectors
);
1943 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
, 0);
1946 int coroutine_fn
bdrv_co_copy_on_readv(BlockDriverState
*bs
,
1947 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
1949 trace_bdrv_co_copy_on_readv(bs
, sector_num
, nb_sectors
);
1951 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
,
1952 BDRV_REQ_COPY_ON_READ
);
1955 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
1956 int64_t sector_num
, int nb_sectors
)
1958 BlockDriver
*drv
= bs
->drv
;
1963 /* TODO Emulate only part of misaligned requests instead of letting block
1964 * drivers return -ENOTSUP and emulate everything */
1966 /* First try the efficient write zeroes operation */
1967 if (drv
->bdrv_co_write_zeroes
) {
1968 ret
= drv
->bdrv_co_write_zeroes(bs
, sector_num
, nb_sectors
);
1969 if (ret
!= -ENOTSUP
) {
1974 /* Fall back to bounce buffer if write zeroes is unsupported */
1975 iov
.iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
;
1976 iov
.iov_base
= qemu_blockalign(bs
, iov
.iov_len
);
1977 memset(iov
.iov_base
, 0, iov
.iov_len
);
1978 qemu_iovec_init_external(&qiov
, &iov
, 1);
1980 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, &qiov
);
1982 qemu_vfree(iov
.iov_base
);
1987 * Handle a write request in coroutine context
1989 static int coroutine_fn
bdrv_co_do_writev(BlockDriverState
*bs
,
1990 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1991 BdrvRequestFlags flags
)
1993 BlockDriver
*drv
= bs
->drv
;
1994 BdrvTrackedRequest req
;
2000 if (bs
->read_only
) {
2003 if (bdrv_check_request(bs
, sector_num
, nb_sectors
)) {
2007 /* throttling disk write I/O */
2008 if (bs
->io_limits_enabled
) {
2009 bdrv_io_limits_intercept(bs
, true, nb_sectors
);
2012 if (bs
->copy_on_read_in_flight
) {
2013 wait_for_overlapping_requests(bs
, sector_num
, nb_sectors
);
2016 tracked_request_begin(&req
, bs
, sector_num
, nb_sectors
, true);
2018 if (flags
& BDRV_REQ_ZERO_WRITE
) {
2019 ret
= bdrv_co_do_write_zeroes(bs
, sector_num
, nb_sectors
);
2021 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
);
2024 if (ret
== 0 && !bs
->enable_write_cache
) {
2025 ret
= bdrv_co_flush(bs
);
2028 if (bs
->dirty_bitmap
) {
2029 set_dirty_bitmap(bs
, sector_num
, nb_sectors
, 1);
2032 if (bs
->wr_highest_sector
< sector_num
+ nb_sectors
- 1) {
2033 bs
->wr_highest_sector
= sector_num
+ nb_sectors
- 1;
2036 tracked_request_end(&req
);
2041 int coroutine_fn
bdrv_co_writev(BlockDriverState
*bs
, int64_t sector_num
,
2042 int nb_sectors
, QEMUIOVector
*qiov
)
2044 trace_bdrv_co_writev(bs
, sector_num
, nb_sectors
);
2046 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, qiov
, 0);
2049 int coroutine_fn
bdrv_co_write_zeroes(BlockDriverState
*bs
,
2050 int64_t sector_num
, int nb_sectors
)
2052 trace_bdrv_co_write_zeroes(bs
, sector_num
, nb_sectors
);
2054 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, NULL
,
2055 BDRV_REQ_ZERO_WRITE
);
2059 * Truncate file to 'offset' bytes (needed only for file protocols)
2061 int bdrv_truncate(BlockDriverState
*bs
, int64_t offset
)
2063 BlockDriver
*drv
= bs
->drv
;
2067 if (!drv
->bdrv_truncate
)
2071 if (bdrv_in_use(bs
))
2073 ret
= drv
->bdrv_truncate(bs
, offset
);
2075 ret
= refresh_total_sectors(bs
, offset
>> BDRV_SECTOR_BITS
);
2076 bdrv_dev_resize_cb(bs
);
2082 * Length of a allocated file in bytes. Sparse files are counted by actual
2083 * allocated space. Return < 0 if error or unknown.
2085 int64_t bdrv_get_allocated_file_size(BlockDriverState
*bs
)
2087 BlockDriver
*drv
= bs
->drv
;
2091 if (drv
->bdrv_get_allocated_file_size
) {
2092 return drv
->bdrv_get_allocated_file_size(bs
);
2095 return bdrv_get_allocated_file_size(bs
->file
);
2101 * Length of a file in bytes. Return < 0 if error or unknown.
2103 int64_t bdrv_getlength(BlockDriverState
*bs
)
2105 BlockDriver
*drv
= bs
->drv
;
2109 if (bs
->growable
|| bdrv_dev_has_removable_media(bs
)) {
2110 if (drv
->bdrv_getlength
) {
2111 return drv
->bdrv_getlength(bs
);
2114 return bs
->total_sectors
* BDRV_SECTOR_SIZE
;
2117 /* return 0 as number of sectors if no device present or error */
2118 void bdrv_get_geometry(BlockDriverState
*bs
, uint64_t *nb_sectors_ptr
)
2121 length
= bdrv_getlength(bs
);
2125 length
= length
>> BDRV_SECTOR_BITS
;
2126 *nb_sectors_ptr
= length
;
2129 /* throttling disk io limits */
2130 void bdrv_set_io_limits(BlockDriverState
*bs
,
2131 BlockIOLimit
*io_limits
)
2133 bs
->io_limits
= *io_limits
;
2134 bs
->io_limits_enabled
= bdrv_io_limits_enabled(bs
);
2137 void bdrv_set_on_error(BlockDriverState
*bs
, BlockErrorAction on_read_error
,
2138 BlockErrorAction on_write_error
)
2140 bs
->on_read_error
= on_read_error
;
2141 bs
->on_write_error
= on_write_error
;
2144 BlockErrorAction
bdrv_get_on_error(BlockDriverState
*bs
, int is_read
)
2146 return is_read
? bs
->on_read_error
: bs
->on_write_error
;
2149 int bdrv_is_read_only(BlockDriverState
*bs
)
2151 return bs
->read_only
;
2154 int bdrv_is_sg(BlockDriverState
*bs
)
2159 int bdrv_enable_write_cache(BlockDriverState
*bs
)
2161 return bs
->enable_write_cache
;
2164 void bdrv_set_enable_write_cache(BlockDriverState
*bs
, bool wce
)
2166 bs
->enable_write_cache
= wce
;
2169 int bdrv_is_encrypted(BlockDriverState
*bs
)
2171 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
)
2173 return bs
->encrypted
;
2176 int bdrv_key_required(BlockDriverState
*bs
)
2178 BlockDriverState
*backing_hd
= bs
->backing_hd
;
2180 if (backing_hd
&& backing_hd
->encrypted
&& !backing_hd
->valid_key
)
2182 return (bs
->encrypted
&& !bs
->valid_key
);
2185 int bdrv_set_key(BlockDriverState
*bs
, const char *key
)
2188 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
) {
2189 ret
= bdrv_set_key(bs
->backing_hd
, key
);
2195 if (!bs
->encrypted
) {
2197 } else if (!bs
->drv
|| !bs
->drv
->bdrv_set_key
) {
2200 ret
= bs
->drv
->bdrv_set_key(bs
, key
);
2203 } else if (!bs
->valid_key
) {
2205 /* call the change callback now, we skipped it on open */
2206 bdrv_dev_change_media_cb(bs
, true);
2211 const char *bdrv_get_format_name(BlockDriverState
*bs
)
2213 return bs
->drv
? bs
->drv
->format_name
: NULL
;
2216 void bdrv_iterate_format(void (*it
)(void *opaque
, const char *name
),
2221 QLIST_FOREACH(drv
, &bdrv_drivers
, list
) {
2222 it(opaque
, drv
->format_name
);
2226 BlockDriverState
*bdrv_find(const char *name
)
2228 BlockDriverState
*bs
;
2230 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
2231 if (!strcmp(name
, bs
->device_name
)) {
2238 BlockDriverState
*bdrv_next(BlockDriverState
*bs
)
2241 return QTAILQ_FIRST(&bdrv_states
);
2243 return QTAILQ_NEXT(bs
, list
);
2246 void bdrv_iterate(void (*it
)(void *opaque
, BlockDriverState
*bs
), void *opaque
)
2248 BlockDriverState
*bs
;
2250 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
2255 const char *bdrv_get_device_name(BlockDriverState
*bs
)
2257 return bs
->device_name
;
2260 int bdrv_get_flags(BlockDriverState
*bs
)
2262 return bs
->open_flags
;
2265 void bdrv_flush_all(void)
2267 BlockDriverState
*bs
;
2269 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
2274 int bdrv_has_zero_init(BlockDriverState
*bs
)
2278 if (bs
->drv
->bdrv_has_zero_init
) {
2279 return bs
->drv
->bdrv_has_zero_init(bs
);
2285 typedef struct BdrvCoIsAllocatedData
{
2286 BlockDriverState
*bs
;
2292 } BdrvCoIsAllocatedData
;
2295 * Returns true iff the specified sector is present in the disk image. Drivers
2296 * not implementing the functionality are assumed to not support backing files,
2297 * hence all their sectors are reported as allocated.
2299 * If 'sector_num' is beyond the end of the disk image the return value is 0
2300 * and 'pnum' is set to 0.
2302 * 'pnum' is set to the number of sectors (including and immediately following
2303 * the specified sector) that are known to be in the same
2304 * allocated/unallocated state.
2306 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
2307 * beyond the end of the disk image it will be clamped.
2309 int coroutine_fn
bdrv_co_is_allocated(BlockDriverState
*bs
, int64_t sector_num
,
2310 int nb_sectors
, int *pnum
)
2314 if (sector_num
>= bs
->total_sectors
) {
2319 n
= bs
->total_sectors
- sector_num
;
2320 if (n
< nb_sectors
) {
2324 if (!bs
->drv
->bdrv_co_is_allocated
) {
2329 return bs
->drv
->bdrv_co_is_allocated(bs
, sector_num
, nb_sectors
, pnum
);
2332 /* Coroutine wrapper for bdrv_is_allocated() */
2333 static void coroutine_fn
bdrv_is_allocated_co_entry(void *opaque
)
2335 BdrvCoIsAllocatedData
*data
= opaque
;
2336 BlockDriverState
*bs
= data
->bs
;
2338 data
->ret
= bdrv_co_is_allocated(bs
, data
->sector_num
, data
->nb_sectors
,
2344 * Synchronous wrapper around bdrv_co_is_allocated().
2346 * See bdrv_co_is_allocated() for details.
2348 int bdrv_is_allocated(BlockDriverState
*bs
, int64_t sector_num
, int nb_sectors
,
2352 BdrvCoIsAllocatedData data
= {
2354 .sector_num
= sector_num
,
2355 .nb_sectors
= nb_sectors
,
2360 co
= qemu_coroutine_create(bdrv_is_allocated_co_entry
);
2361 qemu_coroutine_enter(co
, &data
);
2362 while (!data
.done
) {
2369 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2371 * Return true if the given sector is allocated in any image between
2372 * BASE and TOP (inclusive). BASE can be NULL to check if the given
2373 * sector is allocated in any image of the chain. Return false otherwise.
2375 * 'pnum' is set to the number of sectors (including and immediately following
2376 * the specified sector) that are known to be in the same
2377 * allocated/unallocated state.
2380 int coroutine_fn
bdrv_co_is_allocated_above(BlockDriverState
*top
,
2381 BlockDriverState
*base
,
2383 int nb_sectors
, int *pnum
)
2385 BlockDriverState
*intermediate
;
2386 int ret
, n
= nb_sectors
;
2389 while (intermediate
&& intermediate
!= base
) {
2391 ret
= bdrv_co_is_allocated(intermediate
, sector_num
, nb_sectors
,
2401 * [sector_num, nb_sectors] is unallocated on top but intermediate
2404 * [sector_num+x, nr_sectors] allocated.
2406 if (n
> pnum_inter
) {
2410 intermediate
= intermediate
->backing_hd
;
2417 BlockInfoList
*qmp_query_block(Error
**errp
)
2419 BlockInfoList
*head
= NULL
, *cur_item
= NULL
;
2420 BlockDriverState
*bs
;
2422 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
2423 BlockInfoList
*info
= g_malloc0(sizeof(*info
));
2425 info
->value
= g_malloc0(sizeof(*info
->value
));
2426 info
->value
->device
= g_strdup(bs
->device_name
);
2427 info
->value
->type
= g_strdup("unknown");
2428 info
->value
->locked
= bdrv_dev_is_medium_locked(bs
);
2429 info
->value
->removable
= bdrv_dev_has_removable_media(bs
);
2431 if (bdrv_dev_has_removable_media(bs
)) {
2432 info
->value
->has_tray_open
= true;
2433 info
->value
->tray_open
= bdrv_dev_is_tray_open(bs
);
2436 if (bdrv_iostatus_is_enabled(bs
)) {
2437 info
->value
->has_io_status
= true;
2438 info
->value
->io_status
= bs
->iostatus
;
2442 info
->value
->has_inserted
= true;
2443 info
->value
->inserted
= g_malloc0(sizeof(*info
->value
->inserted
));
2444 info
->value
->inserted
->file
= g_strdup(bs
->filename
);
2445 info
->value
->inserted
->ro
= bs
->read_only
;
2446 info
->value
->inserted
->drv
= g_strdup(bs
->drv
->format_name
);
2447 info
->value
->inserted
->encrypted
= bs
->encrypted
;
2448 info
->value
->inserted
->encryption_key_missing
= bdrv_key_required(bs
);
2449 if (bs
->backing_file
[0]) {
2450 info
->value
->inserted
->has_backing_file
= true;
2451 info
->value
->inserted
->backing_file
= g_strdup(bs
->backing_file
);
2454 info
->value
->inserted
->backing_file_depth
=
2455 bdrv_get_backing_file_depth(bs
);
2457 if (bs
->io_limits_enabled
) {
2458 info
->value
->inserted
->bps
=
2459 bs
->io_limits
.bps
[BLOCK_IO_LIMIT_TOTAL
];
2460 info
->value
->inserted
->bps_rd
=
2461 bs
->io_limits
.bps
[BLOCK_IO_LIMIT_READ
];
2462 info
->value
->inserted
->bps_wr
=
2463 bs
->io_limits
.bps
[BLOCK_IO_LIMIT_WRITE
];
2464 info
->value
->inserted
->iops
=
2465 bs
->io_limits
.iops
[BLOCK_IO_LIMIT_TOTAL
];
2466 info
->value
->inserted
->iops_rd
=
2467 bs
->io_limits
.iops
[BLOCK_IO_LIMIT_READ
];
2468 info
->value
->inserted
->iops_wr
=
2469 bs
->io_limits
.iops
[BLOCK_IO_LIMIT_WRITE
];
2473 /* XXX: waiting for the qapi to support GSList */
2475 head
= cur_item
= info
;
2477 cur_item
->next
= info
;
2485 /* Consider exposing this as a full fledged QMP command */
2486 static BlockStats
*qmp_query_blockstat(const BlockDriverState
*bs
, Error
**errp
)
2490 s
= g_malloc0(sizeof(*s
));
2492 if (bs
->device_name
[0]) {
2493 s
->has_device
= true;
2494 s
->device
= g_strdup(bs
->device_name
);
2497 s
->stats
= g_malloc0(sizeof(*s
->stats
));
2498 s
->stats
->rd_bytes
= bs
->nr_bytes
[BDRV_ACCT_READ
];
2499 s
->stats
->wr_bytes
= bs
->nr_bytes
[BDRV_ACCT_WRITE
];
2500 s
->stats
->rd_operations
= bs
->nr_ops
[BDRV_ACCT_READ
];
2501 s
->stats
->wr_operations
= bs
->nr_ops
[BDRV_ACCT_WRITE
];
2502 s
->stats
->wr_highest_offset
= bs
->wr_highest_sector
* BDRV_SECTOR_SIZE
;
2503 s
->stats
->flush_operations
= bs
->nr_ops
[BDRV_ACCT_FLUSH
];
2504 s
->stats
->wr_total_time_ns
= bs
->total_time_ns
[BDRV_ACCT_WRITE
];
2505 s
->stats
->rd_total_time_ns
= bs
->total_time_ns
[BDRV_ACCT_READ
];
2506 s
->stats
->flush_total_time_ns
= bs
->total_time_ns
[BDRV_ACCT_FLUSH
];
2509 s
->has_parent
= true;
2510 s
->parent
= qmp_query_blockstat(bs
->file
, NULL
);
2516 BlockStatsList
*qmp_query_blockstats(Error
**errp
)
2518 BlockStatsList
*head
= NULL
, *cur_item
= NULL
;
2519 BlockDriverState
*bs
;
2521 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
2522 BlockStatsList
*info
= g_malloc0(sizeof(*info
));
2523 info
->value
= qmp_query_blockstat(bs
, NULL
);
2525 /* XXX: waiting for the qapi to support GSList */
2527 head
= cur_item
= info
;
2529 cur_item
->next
= info
;
2537 const char *bdrv_get_encrypted_filename(BlockDriverState
*bs
)
2539 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
)
2540 return bs
->backing_file
;
2541 else if (bs
->encrypted
)
2542 return bs
->filename
;
2547 void bdrv_get_backing_filename(BlockDriverState
*bs
,
2548 char *filename
, int filename_size
)
2550 pstrcpy(filename
, filename_size
, bs
->backing_file
);
2553 int bdrv_write_compressed(BlockDriverState
*bs
, int64_t sector_num
,
2554 const uint8_t *buf
, int nb_sectors
)
2556 BlockDriver
*drv
= bs
->drv
;
2559 if (!drv
->bdrv_write_compressed
)
2561 if (bdrv_check_request(bs
, sector_num
, nb_sectors
))
2564 if (bs
->dirty_bitmap
) {
2565 set_dirty_bitmap(bs
, sector_num
, nb_sectors
, 1);
2568 return drv
->bdrv_write_compressed(bs
, sector_num
, buf
, nb_sectors
);
2571 int bdrv_get_info(BlockDriverState
*bs
, BlockDriverInfo
*bdi
)
2573 BlockDriver
*drv
= bs
->drv
;
2576 if (!drv
->bdrv_get_info
)
2578 memset(bdi
, 0, sizeof(*bdi
));
2579 return drv
->bdrv_get_info(bs
, bdi
);
2582 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
2583 int64_t pos
, int size
)
2585 BlockDriver
*drv
= bs
->drv
;
2588 if (drv
->bdrv_save_vmstate
)
2589 return drv
->bdrv_save_vmstate(bs
, buf
, pos
, size
);
2591 return bdrv_save_vmstate(bs
->file
, buf
, pos
, size
);
2595 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
2596 int64_t pos
, int size
)
2598 BlockDriver
*drv
= bs
->drv
;
2601 if (drv
->bdrv_load_vmstate
)
2602 return drv
->bdrv_load_vmstate(bs
, buf
, pos
, size
);
2604 return bdrv_load_vmstate(bs
->file
, buf
, pos
, size
);
2608 void bdrv_debug_event(BlockDriverState
*bs
, BlkDebugEvent event
)
2610 BlockDriver
*drv
= bs
->drv
;
2612 if (!drv
|| !drv
->bdrv_debug_event
) {
2616 drv
->bdrv_debug_event(bs
, event
);
2620 /**************************************************************/
2621 /* handling of snapshots */
2623 int bdrv_can_snapshot(BlockDriverState
*bs
)
2625 BlockDriver
*drv
= bs
->drv
;
2626 if (!drv
|| !bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
)) {
2630 if (!drv
->bdrv_snapshot_create
) {
2631 if (bs
->file
!= NULL
) {
2632 return bdrv_can_snapshot(bs
->file
);
2640 int bdrv_is_snapshot(BlockDriverState
*bs
)
2642 return !!(bs
->open_flags
& BDRV_O_SNAPSHOT
);
2645 BlockDriverState
*bdrv_snapshots(void)
2647 BlockDriverState
*bs
;
2650 return bs_snapshots
;
2654 while ((bs
= bdrv_next(bs
))) {
2655 if (bdrv_can_snapshot(bs
)) {
2663 int bdrv_snapshot_create(BlockDriverState
*bs
,
2664 QEMUSnapshotInfo
*sn_info
)
2666 BlockDriver
*drv
= bs
->drv
;
2669 if (drv
->bdrv_snapshot_create
)
2670 return drv
->bdrv_snapshot_create(bs
, sn_info
);
2672 return bdrv_snapshot_create(bs
->file
, sn_info
);
2676 int bdrv_snapshot_goto(BlockDriverState
*bs
,
2677 const char *snapshot_id
)
2679 BlockDriver
*drv
= bs
->drv
;
2684 if (drv
->bdrv_snapshot_goto
)
2685 return drv
->bdrv_snapshot_goto(bs
, snapshot_id
);
2688 drv
->bdrv_close(bs
);
2689 ret
= bdrv_snapshot_goto(bs
->file
, snapshot_id
);
2690 open_ret
= drv
->bdrv_open(bs
, bs
->open_flags
);
2692 bdrv_delete(bs
->file
);
2702 int bdrv_snapshot_delete(BlockDriverState
*bs
, const char *snapshot_id
)
2704 BlockDriver
*drv
= bs
->drv
;
2707 if (drv
->bdrv_snapshot_delete
)
2708 return drv
->bdrv_snapshot_delete(bs
, snapshot_id
);
2710 return bdrv_snapshot_delete(bs
->file
, snapshot_id
);
2714 int bdrv_snapshot_list(BlockDriverState
*bs
,
2715 QEMUSnapshotInfo
**psn_info
)
2717 BlockDriver
*drv
= bs
->drv
;
2720 if (drv
->bdrv_snapshot_list
)
2721 return drv
->bdrv_snapshot_list(bs
, psn_info
);
2723 return bdrv_snapshot_list(bs
->file
, psn_info
);
2727 int bdrv_snapshot_load_tmp(BlockDriverState
*bs
,
2728 const char *snapshot_name
)
2730 BlockDriver
*drv
= bs
->drv
;
2734 if (!bs
->read_only
) {
2737 if (drv
->bdrv_snapshot_load_tmp
) {
2738 return drv
->bdrv_snapshot_load_tmp(bs
, snapshot_name
);
2743 BlockDriverState
*bdrv_find_backing_image(BlockDriverState
*bs
,
2744 const char *backing_file
)
2750 if (bs
->backing_hd
) {
2751 if (strcmp(bs
->backing_file
, backing_file
) == 0) {
2752 return bs
->backing_hd
;
2754 return bdrv_find_backing_image(bs
->backing_hd
, backing_file
);
2761 int bdrv_get_backing_file_depth(BlockDriverState
*bs
)
2767 if (!bs
->backing_hd
) {
2771 return 1 + bdrv_get_backing_file_depth(bs
->backing_hd
);
2774 #define NB_SUFFIXES 4
2776 char *get_human_readable_size(char *buf
, int buf_size
, int64_t size
)
2778 static const char suffixes
[NB_SUFFIXES
] = "KMGT";
2783 snprintf(buf
, buf_size
, "%" PRId64
, size
);
2786 for(i
= 0; i
< NB_SUFFIXES
; i
++) {
2787 if (size
< (10 * base
)) {
2788 snprintf(buf
, buf_size
, "%0.1f%c",
2789 (double)size
/ base
,
2792 } else if (size
< (1000 * base
) || i
== (NB_SUFFIXES
- 1)) {
2793 snprintf(buf
, buf_size
, "%" PRId64
"%c",
2794 ((size
+ (base
>> 1)) / base
),
2804 char *bdrv_snapshot_dump(char *buf
, int buf_size
, QEMUSnapshotInfo
*sn
)
2806 char buf1
[128], date_buf
[128], clock_buf
[128];
2816 snprintf(buf
, buf_size
,
2817 "%-10s%-20s%7s%20s%15s",
2818 "ID", "TAG", "VM SIZE", "DATE", "VM CLOCK");
2822 ptm
= localtime(&ti
);
2823 strftime(date_buf
, sizeof(date_buf
),
2824 "%Y-%m-%d %H:%M:%S", ptm
);
2826 localtime_r(&ti
, &tm
);
2827 strftime(date_buf
, sizeof(date_buf
),
2828 "%Y-%m-%d %H:%M:%S", &tm
);
2830 secs
= sn
->vm_clock_nsec
/ 1000000000;
2831 snprintf(clock_buf
, sizeof(clock_buf
),
2832 "%02d:%02d:%02d.%03d",
2834 (int)((secs
/ 60) % 60),
2836 (int)((sn
->vm_clock_nsec
/ 1000000) % 1000));
2837 snprintf(buf
, buf_size
,
2838 "%-10s%-20s%7s%20s%15s",
2839 sn
->id_str
, sn
->name
,
2840 get_human_readable_size(buf1
, sizeof(buf1
), sn
->vm_state_size
),
2847 /**************************************************************/
2850 BlockDriverAIOCB
*bdrv_aio_readv(BlockDriverState
*bs
, int64_t sector_num
,
2851 QEMUIOVector
*qiov
, int nb_sectors
,
2852 BlockDriverCompletionFunc
*cb
, void *opaque
)
2854 trace_bdrv_aio_readv(bs
, sector_num
, nb_sectors
, opaque
);
2856 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
,
2860 BlockDriverAIOCB
*bdrv_aio_writev(BlockDriverState
*bs
, int64_t sector_num
,
2861 QEMUIOVector
*qiov
, int nb_sectors
,
2862 BlockDriverCompletionFunc
*cb
, void *opaque
)
2864 trace_bdrv_aio_writev(bs
, sector_num
, nb_sectors
, opaque
);
2866 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
,
2871 typedef struct MultiwriteCB
{
2876 BlockDriverCompletionFunc
*cb
;
2878 QEMUIOVector
*free_qiov
;
2882 static void multiwrite_user_cb(MultiwriteCB
*mcb
)
2886 for (i
= 0; i
< mcb
->num_callbacks
; i
++) {
2887 mcb
->callbacks
[i
].cb(mcb
->callbacks
[i
].opaque
, mcb
->error
);
2888 if (mcb
->callbacks
[i
].free_qiov
) {
2889 qemu_iovec_destroy(mcb
->callbacks
[i
].free_qiov
);
2891 g_free(mcb
->callbacks
[i
].free_qiov
);
2895 static void multiwrite_cb(void *opaque
, int ret
)
2897 MultiwriteCB
*mcb
= opaque
;
2899 trace_multiwrite_cb(mcb
, ret
);
2901 if (ret
< 0 && !mcb
->error
) {
2905 mcb
->num_requests
--;
2906 if (mcb
->num_requests
== 0) {
2907 multiwrite_user_cb(mcb
);
2912 static int multiwrite_req_compare(const void *a
, const void *b
)
2914 const BlockRequest
*req1
= a
, *req2
= b
;
2917 * Note that we can't simply subtract req2->sector from req1->sector
2918 * here as that could overflow the return value.
2920 if (req1
->sector
> req2
->sector
) {
2922 } else if (req1
->sector
< req2
->sector
) {
2930 * Takes a bunch of requests and tries to merge them. Returns the number of
2931 * requests that remain after merging.
2933 static int multiwrite_merge(BlockDriverState
*bs
, BlockRequest
*reqs
,
2934 int num_reqs
, MultiwriteCB
*mcb
)
2938 // Sort requests by start sector
2939 qsort(reqs
, num_reqs
, sizeof(*reqs
), &multiwrite_req_compare
);
2941 // Check if adjacent requests touch the same clusters. If so, combine them,
2942 // filling up gaps with zero sectors.
2944 for (i
= 1; i
< num_reqs
; i
++) {
2946 int64_t oldreq_last
= reqs
[outidx
].sector
+ reqs
[outidx
].nb_sectors
;
2948 // Handle exactly sequential writes and overlapping writes.
2949 if (reqs
[i
].sector
<= oldreq_last
) {
2953 if (reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1 > IOV_MAX
) {
2959 QEMUIOVector
*qiov
= g_malloc0(sizeof(*qiov
));
2960 qemu_iovec_init(qiov
,
2961 reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1);
2963 // Add the first request to the merged one. If the requests are
2964 // overlapping, drop the last sectors of the first request.
2965 size
= (reqs
[i
].sector
- reqs
[outidx
].sector
) << 9;
2966 qemu_iovec_concat(qiov
, reqs
[outidx
].qiov
, 0, size
);
2968 // We should need to add any zeros between the two requests
2969 assert (reqs
[i
].sector
<= oldreq_last
);
2971 // Add the second request
2972 qemu_iovec_concat(qiov
, reqs
[i
].qiov
, 0, reqs
[i
].qiov
->size
);
2974 reqs
[outidx
].nb_sectors
= qiov
->size
>> 9;
2975 reqs
[outidx
].qiov
= qiov
;
2977 mcb
->callbacks
[i
].free_qiov
= reqs
[outidx
].qiov
;
2980 reqs
[outidx
].sector
= reqs
[i
].sector
;
2981 reqs
[outidx
].nb_sectors
= reqs
[i
].nb_sectors
;
2982 reqs
[outidx
].qiov
= reqs
[i
].qiov
;
2990 * Submit multiple AIO write requests at once.
2992 * On success, the function returns 0 and all requests in the reqs array have
2993 * been submitted. In error case this function returns -1, and any of the
2994 * requests may or may not be submitted yet. In particular, this means that the
2995 * callback will be called for some of the requests, for others it won't. The
2996 * caller must check the error field of the BlockRequest to wait for the right
2997 * callbacks (if error != 0, no callback will be called).
2999 * The implementation may modify the contents of the reqs array, e.g. to merge
3000 * requests. However, the fields opaque and error are left unmodified as they
3001 * are used to signal failure for a single request to the caller.
3003 int bdrv_aio_multiwrite(BlockDriverState
*bs
, BlockRequest
*reqs
, int num_reqs
)
3008 /* don't submit writes if we don't have a medium */
3009 if (bs
->drv
== NULL
) {
3010 for (i
= 0; i
< num_reqs
; i
++) {
3011 reqs
[i
].error
= -ENOMEDIUM
;
3016 if (num_reqs
== 0) {
3020 // Create MultiwriteCB structure
3021 mcb
= g_malloc0(sizeof(*mcb
) + num_reqs
* sizeof(*mcb
->callbacks
));
3022 mcb
->num_requests
= 0;
3023 mcb
->num_callbacks
= num_reqs
;
3025 for (i
= 0; i
< num_reqs
; i
++) {
3026 mcb
->callbacks
[i
].cb
= reqs
[i
].cb
;
3027 mcb
->callbacks
[i
].opaque
= reqs
[i
].opaque
;
3030 // Check for mergable requests
3031 num_reqs
= multiwrite_merge(bs
, reqs
, num_reqs
, mcb
);
3033 trace_bdrv_aio_multiwrite(mcb
, mcb
->num_callbacks
, num_reqs
);
3035 /* Run the aio requests. */
3036 mcb
->num_requests
= num_reqs
;
3037 for (i
= 0; i
< num_reqs
; i
++) {
3038 bdrv_aio_writev(bs
, reqs
[i
].sector
, reqs
[i
].qiov
,
3039 reqs
[i
].nb_sectors
, multiwrite_cb
, mcb
);
3045 void bdrv_aio_cancel(BlockDriverAIOCB
*acb
)
3047 acb
->pool
->cancel(acb
);
3050 /* block I/O throttling */
3051 static bool bdrv_exceed_bps_limits(BlockDriverState
*bs
, int nb_sectors
,
3052 bool is_write
, double elapsed_time
, uint64_t *wait
)
3054 uint64_t bps_limit
= 0;
3055 double bytes_limit
, bytes_base
, bytes_res
;
3056 double slice_time
, wait_time
;
3058 if (bs
->io_limits
.bps
[BLOCK_IO_LIMIT_TOTAL
]) {
3059 bps_limit
= bs
->io_limits
.bps
[BLOCK_IO_LIMIT_TOTAL
];
3060 } else if (bs
->io_limits
.bps
[is_write
]) {
3061 bps_limit
= bs
->io_limits
.bps
[is_write
];
3070 slice_time
= bs
->slice_end
- bs
->slice_start
;
3071 slice_time
/= (NANOSECONDS_PER_SECOND
);
3072 bytes_limit
= bps_limit
* slice_time
;
3073 bytes_base
= bs
->nr_bytes
[is_write
] - bs
->io_base
.bytes
[is_write
];
3074 if (bs
->io_limits
.bps
[BLOCK_IO_LIMIT_TOTAL
]) {
3075 bytes_base
+= bs
->nr_bytes
[!is_write
] - bs
->io_base
.bytes
[!is_write
];
3078 /* bytes_base: the bytes of data which have been read/written; and
3079 * it is obtained from the history statistic info.
3080 * bytes_res: the remaining bytes of data which need to be read/written.
3081 * (bytes_base + bytes_res) / bps_limit: used to calcuate
3082 * the total time for completing reading/writting all data.
3084 bytes_res
= (unsigned) nb_sectors
* BDRV_SECTOR_SIZE
;
3086 if (bytes_base
+ bytes_res
<= bytes_limit
) {
3094 /* Calc approx time to dispatch */
3095 wait_time
= (bytes_base
+ bytes_res
) / bps_limit
- elapsed_time
;
3097 /* When the I/O rate at runtime exceeds the limits,
3098 * bs->slice_end need to be extended in order that the current statistic
3099 * info can be kept until the timer fire, so it is increased and tuned
3100 * based on the result of experiment.
3102 bs
->slice_time
= wait_time
* BLOCK_IO_SLICE_TIME
* 10;
3103 bs
->slice_end
+= bs
->slice_time
- 3 * BLOCK_IO_SLICE_TIME
;
3105 *wait
= wait_time
* BLOCK_IO_SLICE_TIME
* 10;
3111 static bool bdrv_exceed_iops_limits(BlockDriverState
*bs
, bool is_write
,
3112 double elapsed_time
, uint64_t *wait
)
3114 uint64_t iops_limit
= 0;
3115 double ios_limit
, ios_base
;
3116 double slice_time
, wait_time
;
3118 if (bs
->io_limits
.iops
[BLOCK_IO_LIMIT_TOTAL
]) {
3119 iops_limit
= bs
->io_limits
.iops
[BLOCK_IO_LIMIT_TOTAL
];
3120 } else if (bs
->io_limits
.iops
[is_write
]) {
3121 iops_limit
= bs
->io_limits
.iops
[is_write
];
3130 slice_time
= bs
->slice_end
- bs
->slice_start
;
3131 slice_time
/= (NANOSECONDS_PER_SECOND
);
3132 ios_limit
= iops_limit
* slice_time
;
3133 ios_base
= bs
->nr_ops
[is_write
] - bs
->io_base
.ios
[is_write
];
3134 if (bs
->io_limits
.iops
[BLOCK_IO_LIMIT_TOTAL
]) {
3135 ios_base
+= bs
->nr_ops
[!is_write
] - bs
->io_base
.ios
[!is_write
];
3138 if (ios_base
+ 1 <= ios_limit
) {
3146 /* Calc approx time to dispatch */
3147 wait_time
= (ios_base
+ 1) / iops_limit
;
3148 if (wait_time
> elapsed_time
) {
3149 wait_time
= wait_time
- elapsed_time
;
3154 bs
->slice_time
= wait_time
* BLOCK_IO_SLICE_TIME
* 10;
3155 bs
->slice_end
+= bs
->slice_time
- 3 * BLOCK_IO_SLICE_TIME
;
3157 *wait
= wait_time
* BLOCK_IO_SLICE_TIME
* 10;
3163 static bool bdrv_exceed_io_limits(BlockDriverState
*bs
, int nb_sectors
,
3164 bool is_write
, int64_t *wait
)
3166 int64_t now
, max_wait
;
3167 uint64_t bps_wait
= 0, iops_wait
= 0;
3168 double elapsed_time
;
3169 int bps_ret
, iops_ret
;
3171 now
= qemu_get_clock_ns(vm_clock
);
3172 if ((bs
->slice_start
< now
)
3173 && (bs
->slice_end
> now
)) {
3174 bs
->slice_end
= now
+ bs
->slice_time
;
3176 bs
->slice_time
= 5 * BLOCK_IO_SLICE_TIME
;
3177 bs
->slice_start
= now
;
3178 bs
->slice_end
= now
+ bs
->slice_time
;
3180 bs
->io_base
.bytes
[is_write
] = bs
->nr_bytes
[is_write
];
3181 bs
->io_base
.bytes
[!is_write
] = bs
->nr_bytes
[!is_write
];
3183 bs
->io_base
.ios
[is_write
] = bs
->nr_ops
[is_write
];
3184 bs
->io_base
.ios
[!is_write
] = bs
->nr_ops
[!is_write
];
3187 elapsed_time
= now
- bs
->slice_start
;
3188 elapsed_time
/= (NANOSECONDS_PER_SECOND
);
3190 bps_ret
= bdrv_exceed_bps_limits(bs
, nb_sectors
,
3191 is_write
, elapsed_time
, &bps_wait
);
3192 iops_ret
= bdrv_exceed_iops_limits(bs
, is_write
,
3193 elapsed_time
, &iops_wait
);
3194 if (bps_ret
|| iops_ret
) {
3195 max_wait
= bps_wait
> iops_wait
? bps_wait
: iops_wait
;
3200 now
= qemu_get_clock_ns(vm_clock
);
3201 if (bs
->slice_end
< now
+ max_wait
) {
3202 bs
->slice_end
= now
+ max_wait
;
3215 /**************************************************************/
3216 /* async block device emulation */
3218 typedef struct BlockDriverAIOCBSync
{
3219 BlockDriverAIOCB common
;
3222 /* vector translation state */
3226 } BlockDriverAIOCBSync
;
3228 static void bdrv_aio_cancel_em(BlockDriverAIOCB
*blockacb
)
3230 BlockDriverAIOCBSync
*acb
=
3231 container_of(blockacb
, BlockDriverAIOCBSync
, common
);
3232 qemu_bh_delete(acb
->bh
);
3234 qemu_aio_release(acb
);
3237 static AIOPool bdrv_em_aio_pool
= {
3238 .aiocb_size
= sizeof(BlockDriverAIOCBSync
),
3239 .cancel
= bdrv_aio_cancel_em
,
3242 static void bdrv_aio_bh_cb(void *opaque
)
3244 BlockDriverAIOCBSync
*acb
= opaque
;
3247 qemu_iovec_from_buf(acb
->qiov
, 0, acb
->bounce
, acb
->qiov
->size
);
3248 qemu_vfree(acb
->bounce
);
3249 acb
->common
.cb(acb
->common
.opaque
, acb
->ret
);
3250 qemu_bh_delete(acb
->bh
);
3252 qemu_aio_release(acb
);
3255 static BlockDriverAIOCB
*bdrv_aio_rw_vector(BlockDriverState
*bs
,
3259 BlockDriverCompletionFunc
*cb
,
3264 BlockDriverAIOCBSync
*acb
;
3266 acb
= qemu_aio_get(&bdrv_em_aio_pool
, bs
, cb
, opaque
);
3267 acb
->is_write
= is_write
;
3269 acb
->bounce
= qemu_blockalign(bs
, qiov
->size
);
3270 acb
->bh
= qemu_bh_new(bdrv_aio_bh_cb
, acb
);
3273 qemu_iovec_to_buf(acb
->qiov
, 0, acb
->bounce
, qiov
->size
);
3274 acb
->ret
= bs
->drv
->bdrv_write(bs
, sector_num
, acb
->bounce
, nb_sectors
);
3276 acb
->ret
= bs
->drv
->bdrv_read(bs
, sector_num
, acb
->bounce
, nb_sectors
);
3279 qemu_bh_schedule(acb
->bh
);
3281 return &acb
->common
;
3284 static BlockDriverAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
3285 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
3286 BlockDriverCompletionFunc
*cb
, void *opaque
)
3288 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 0);
3291 static BlockDriverAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
3292 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
3293 BlockDriverCompletionFunc
*cb
, void *opaque
)
3295 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 1);
3299 typedef struct BlockDriverAIOCBCoroutine
{
3300 BlockDriverAIOCB common
;
3304 } BlockDriverAIOCBCoroutine
;
3306 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB
*blockacb
)
3311 static AIOPool bdrv_em_co_aio_pool
= {
3312 .aiocb_size
= sizeof(BlockDriverAIOCBCoroutine
),
3313 .cancel
= bdrv_aio_co_cancel_em
,
3316 static void bdrv_co_em_bh(void *opaque
)
3318 BlockDriverAIOCBCoroutine
*acb
= opaque
;
3320 acb
->common
.cb(acb
->common
.opaque
, acb
->req
.error
);
3321 qemu_bh_delete(acb
->bh
);
3322 qemu_aio_release(acb
);
3325 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
3326 static void coroutine_fn
bdrv_co_do_rw(void *opaque
)
3328 BlockDriverAIOCBCoroutine
*acb
= opaque
;
3329 BlockDriverState
*bs
= acb
->common
.bs
;
3331 if (!acb
->is_write
) {
3332 acb
->req
.error
= bdrv_co_do_readv(bs
, acb
->req
.sector
,
3333 acb
->req
.nb_sectors
, acb
->req
.qiov
, 0);
3335 acb
->req
.error
= bdrv_co_do_writev(bs
, acb
->req
.sector
,
3336 acb
->req
.nb_sectors
, acb
->req
.qiov
, 0);
3339 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
3340 qemu_bh_schedule(acb
->bh
);
3343 static BlockDriverAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
3347 BlockDriverCompletionFunc
*cb
,
3352 BlockDriverAIOCBCoroutine
*acb
;
3354 acb
= qemu_aio_get(&bdrv_em_co_aio_pool
, bs
, cb
, opaque
);
3355 acb
->req
.sector
= sector_num
;
3356 acb
->req
.nb_sectors
= nb_sectors
;
3357 acb
->req
.qiov
= qiov
;
3358 acb
->is_write
= is_write
;
3360 co
= qemu_coroutine_create(bdrv_co_do_rw
);
3361 qemu_coroutine_enter(co
, acb
);
3363 return &acb
->common
;
3366 static void coroutine_fn
bdrv_aio_flush_co_entry(void *opaque
)
3368 BlockDriverAIOCBCoroutine
*acb
= opaque
;
3369 BlockDriverState
*bs
= acb
->common
.bs
;
3371 acb
->req
.error
= bdrv_co_flush(bs
);
3372 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
3373 qemu_bh_schedule(acb
->bh
);
3376 BlockDriverAIOCB
*bdrv_aio_flush(BlockDriverState
*bs
,
3377 BlockDriverCompletionFunc
*cb
, void *opaque
)
3379 trace_bdrv_aio_flush(bs
, opaque
);
3382 BlockDriverAIOCBCoroutine
*acb
;
3384 acb
= qemu_aio_get(&bdrv_em_co_aio_pool
, bs
, cb
, opaque
);
3385 co
= qemu_coroutine_create(bdrv_aio_flush_co_entry
);
3386 qemu_coroutine_enter(co
, acb
);
3388 return &acb
->common
;
3391 static void coroutine_fn
bdrv_aio_discard_co_entry(void *opaque
)
3393 BlockDriverAIOCBCoroutine
*acb
= opaque
;
3394 BlockDriverState
*bs
= acb
->common
.bs
;
3396 acb
->req
.error
= bdrv_co_discard(bs
, acb
->req
.sector
, acb
->req
.nb_sectors
);
3397 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
3398 qemu_bh_schedule(acb
->bh
);
3401 BlockDriverAIOCB
*bdrv_aio_discard(BlockDriverState
*bs
,
3402 int64_t sector_num
, int nb_sectors
,
3403 BlockDriverCompletionFunc
*cb
, void *opaque
)
3406 BlockDriverAIOCBCoroutine
*acb
;
3408 trace_bdrv_aio_discard(bs
, sector_num
, nb_sectors
, opaque
);
3410 acb
= qemu_aio_get(&bdrv_em_co_aio_pool
, bs
, cb
, opaque
);
3411 acb
->req
.sector
= sector_num
;
3412 acb
->req
.nb_sectors
= nb_sectors
;
3413 co
= qemu_coroutine_create(bdrv_aio_discard_co_entry
);
3414 qemu_coroutine_enter(co
, acb
);
3416 return &acb
->common
;
3419 void bdrv_init(void)
3421 module_call_init(MODULE_INIT_BLOCK
);
3424 void bdrv_init_with_whitelist(void)
3426 use_bdrv_whitelist
= 1;
3430 void *qemu_aio_get(AIOPool
*pool
, BlockDriverState
*bs
,
3431 BlockDriverCompletionFunc
*cb
, void *opaque
)
3433 BlockDriverAIOCB
*acb
;
3435 if (pool
->free_aiocb
) {
3436 acb
= pool
->free_aiocb
;
3437 pool
->free_aiocb
= acb
->next
;
3439 acb
= g_malloc0(pool
->aiocb_size
);
3444 acb
->opaque
= opaque
;
3448 void qemu_aio_release(void *p
)
3450 BlockDriverAIOCB
*acb
= (BlockDriverAIOCB
*)p
;
3451 AIOPool
*pool
= acb
->pool
;
3452 acb
->next
= pool
->free_aiocb
;
3453 pool
->free_aiocb
= acb
;
3456 /**************************************************************/
3457 /* Coroutine block device emulation */
3459 typedef struct CoroutineIOCompletion
{
3460 Coroutine
*coroutine
;
3462 } CoroutineIOCompletion
;
3464 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
3466 CoroutineIOCompletion
*co
= opaque
;
3469 qemu_coroutine_enter(co
->coroutine
, NULL
);
3472 static int coroutine_fn
bdrv_co_io_em(BlockDriverState
*bs
, int64_t sector_num
,
3473 int nb_sectors
, QEMUIOVector
*iov
,
3476 CoroutineIOCompletion co
= {
3477 .coroutine
= qemu_coroutine_self(),
3479 BlockDriverAIOCB
*acb
;
3482 acb
= bs
->drv
->bdrv_aio_writev(bs
, sector_num
, iov
, nb_sectors
,
3483 bdrv_co_io_em_complete
, &co
);
3485 acb
= bs
->drv
->bdrv_aio_readv(bs
, sector_num
, iov
, nb_sectors
,
3486 bdrv_co_io_em_complete
, &co
);
3489 trace_bdrv_co_io_em(bs
, sector_num
, nb_sectors
, is_write
, acb
);
3493 qemu_coroutine_yield();
3498 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
3499 int64_t sector_num
, int nb_sectors
,
3502 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, false);
3505 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
3506 int64_t sector_num
, int nb_sectors
,
3509 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, true);
3512 static void coroutine_fn
bdrv_flush_co_entry(void *opaque
)
3514 RwCo
*rwco
= opaque
;
3516 rwco
->ret
= bdrv_co_flush(rwco
->bs
);
3519 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
3523 if (!bs
|| !bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
)) {
3527 /* Write back cached data to the OS even with cache=unsafe */
3528 if (bs
->drv
->bdrv_co_flush_to_os
) {
3529 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
3535 /* But don't actually force it to the disk with cache=unsafe */
3536 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
3540 if (bs
->drv
->bdrv_co_flush_to_disk
) {
3541 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
3542 } else if (bs
->drv
->bdrv_aio_flush
) {
3543 BlockDriverAIOCB
*acb
;
3544 CoroutineIOCompletion co
= {
3545 .coroutine
= qemu_coroutine_self(),
3548 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
3552 qemu_coroutine_yield();
3557 * Some block drivers always operate in either writethrough or unsafe
3558 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
3559 * know how the server works (because the behaviour is hardcoded or
3560 * depends on server-side configuration), so we can't ensure that
3561 * everything is safe on disk. Returning an error doesn't work because
3562 * that would break guests even if the server operates in writethrough
3565 * Let's hope the user knows what he's doing.
3573 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
3574 * in the case of cache=unsafe, so there are no useless flushes.
3577 return bdrv_co_flush(bs
->file
);
3580 void bdrv_invalidate_cache(BlockDriverState
*bs
)
3582 if (bs
->drv
&& bs
->drv
->bdrv_invalidate_cache
) {
3583 bs
->drv
->bdrv_invalidate_cache(bs
);
3587 void bdrv_invalidate_cache_all(void)
3589 BlockDriverState
*bs
;
3591 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
3592 bdrv_invalidate_cache(bs
);
3596 void bdrv_clear_incoming_migration_all(void)
3598 BlockDriverState
*bs
;
3600 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
3601 bs
->open_flags
= bs
->open_flags
& ~(BDRV_O_INCOMING
);
3605 int bdrv_flush(BlockDriverState
*bs
)
3613 if (qemu_in_coroutine()) {
3614 /* Fast-path if already in coroutine context */
3615 bdrv_flush_co_entry(&rwco
);
3617 co
= qemu_coroutine_create(bdrv_flush_co_entry
);
3618 qemu_coroutine_enter(co
, &rwco
);
3619 while (rwco
.ret
== NOT_DONE
) {
3627 static void coroutine_fn
bdrv_discard_co_entry(void *opaque
)
3629 RwCo
*rwco
= opaque
;
3631 rwco
->ret
= bdrv_co_discard(rwco
->bs
, rwco
->sector_num
, rwco
->nb_sectors
);
3634 int coroutine_fn
bdrv_co_discard(BlockDriverState
*bs
, int64_t sector_num
,
3639 } else if (bdrv_check_request(bs
, sector_num
, nb_sectors
)) {
3641 } else if (bs
->read_only
) {
3643 } else if (bs
->drv
->bdrv_co_discard
) {
3644 return bs
->drv
->bdrv_co_discard(bs
, sector_num
, nb_sectors
);
3645 } else if (bs
->drv
->bdrv_aio_discard
) {
3646 BlockDriverAIOCB
*acb
;
3647 CoroutineIOCompletion co
= {
3648 .coroutine
= qemu_coroutine_self(),
3651 acb
= bs
->drv
->bdrv_aio_discard(bs
, sector_num
, nb_sectors
,
3652 bdrv_co_io_em_complete
, &co
);
3656 qemu_coroutine_yield();
3664 int bdrv_discard(BlockDriverState
*bs
, int64_t sector_num
, int nb_sectors
)
3669 .sector_num
= sector_num
,
3670 .nb_sectors
= nb_sectors
,
3674 if (qemu_in_coroutine()) {
3675 /* Fast-path if already in coroutine context */
3676 bdrv_discard_co_entry(&rwco
);
3678 co
= qemu_coroutine_create(bdrv_discard_co_entry
);
3679 qemu_coroutine_enter(co
, &rwco
);
3680 while (rwco
.ret
== NOT_DONE
) {
3688 /**************************************************************/
3689 /* removable device support */
3692 * Return TRUE if the media is present
3694 int bdrv_is_inserted(BlockDriverState
*bs
)
3696 BlockDriver
*drv
= bs
->drv
;
3700 if (!drv
->bdrv_is_inserted
)
3702 return drv
->bdrv_is_inserted(bs
);
3706 * Return whether the media changed since the last call to this
3707 * function, or -ENOTSUP if we don't know. Most drivers don't know.
3709 int bdrv_media_changed(BlockDriverState
*bs
)
3711 BlockDriver
*drv
= bs
->drv
;
3713 if (drv
&& drv
->bdrv_media_changed
) {
3714 return drv
->bdrv_media_changed(bs
);
3720 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
3722 void bdrv_eject(BlockDriverState
*bs
, bool eject_flag
)
3724 BlockDriver
*drv
= bs
->drv
;
3726 if (drv
&& drv
->bdrv_eject
) {
3727 drv
->bdrv_eject(bs
, eject_flag
);
3730 if (bs
->device_name
[0] != '\0') {
3731 bdrv_emit_qmp_eject_event(bs
, eject_flag
);
3736 * Lock or unlock the media (if it is locked, the user won't be able
3737 * to eject it manually).
3739 void bdrv_lock_medium(BlockDriverState
*bs
, bool locked
)
3741 BlockDriver
*drv
= bs
->drv
;
3743 trace_bdrv_lock_medium(bs
, locked
);
3745 if (drv
&& drv
->bdrv_lock_medium
) {
3746 drv
->bdrv_lock_medium(bs
, locked
);
3750 /* needed for generic scsi interface */
3752 int bdrv_ioctl(BlockDriverState
*bs
, unsigned long int req
, void *buf
)
3754 BlockDriver
*drv
= bs
->drv
;
3756 if (drv
&& drv
->bdrv_ioctl
)
3757 return drv
->bdrv_ioctl(bs
, req
, buf
);
3761 BlockDriverAIOCB
*bdrv_aio_ioctl(BlockDriverState
*bs
,
3762 unsigned long int req
, void *buf
,
3763 BlockDriverCompletionFunc
*cb
, void *opaque
)
3765 BlockDriver
*drv
= bs
->drv
;
3767 if (drv
&& drv
->bdrv_aio_ioctl
)
3768 return drv
->bdrv_aio_ioctl(bs
, req
, buf
, cb
, opaque
);
3772 void bdrv_set_buffer_alignment(BlockDriverState
*bs
, int align
)
3774 bs
->buffer_alignment
= align
;
3777 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
3779 return qemu_memalign((bs
&& bs
->buffer_alignment
) ? bs
->buffer_alignment
: 512, size
);
3782 void bdrv_set_dirty_tracking(BlockDriverState
*bs
, int enable
)
3784 int64_t bitmap_size
;
3786 bs
->dirty_count
= 0;
3788 if (!bs
->dirty_bitmap
) {
3789 bitmap_size
= (bdrv_getlength(bs
) >> BDRV_SECTOR_BITS
) +
3790 BDRV_SECTORS_PER_DIRTY_CHUNK
* BITS_PER_LONG
- 1;
3791 bitmap_size
/= BDRV_SECTORS_PER_DIRTY_CHUNK
* BITS_PER_LONG
;
3793 bs
->dirty_bitmap
= g_new0(unsigned long, bitmap_size
);
3796 if (bs
->dirty_bitmap
) {
3797 g_free(bs
->dirty_bitmap
);
3798 bs
->dirty_bitmap
= NULL
;
3803 int bdrv_get_dirty(BlockDriverState
*bs
, int64_t sector
)
3805 int64_t chunk
= sector
/ (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK
;
3807 if (bs
->dirty_bitmap
&&
3808 (sector
<< BDRV_SECTOR_BITS
) < bdrv_getlength(bs
)) {
3809 return !!(bs
->dirty_bitmap
[chunk
/ (sizeof(unsigned long) * 8)] &
3810 (1UL << (chunk
% (sizeof(unsigned long) * 8))));
3816 void bdrv_reset_dirty(BlockDriverState
*bs
, int64_t cur_sector
,
3819 set_dirty_bitmap(bs
, cur_sector
, nr_sectors
, 0);
3822 int64_t bdrv_get_dirty_count(BlockDriverState
*bs
)
3824 return bs
->dirty_count
;
3827 void bdrv_set_in_use(BlockDriverState
*bs
, int in_use
)
3829 assert(bs
->in_use
!= in_use
);
3830 bs
->in_use
= in_use
;
3833 int bdrv_in_use(BlockDriverState
*bs
)
3838 void bdrv_iostatus_enable(BlockDriverState
*bs
)
3840 bs
->iostatus_enabled
= true;
3841 bs
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
3844 /* The I/O status is only enabled if the drive explicitly
3845 * enables it _and_ the VM is configured to stop on errors */
3846 bool bdrv_iostatus_is_enabled(const BlockDriverState
*bs
)
3848 return (bs
->iostatus_enabled
&&
3849 (bs
->on_write_error
== BLOCK_ERR_STOP_ENOSPC
||
3850 bs
->on_write_error
== BLOCK_ERR_STOP_ANY
||
3851 bs
->on_read_error
== BLOCK_ERR_STOP_ANY
));
3854 void bdrv_iostatus_disable(BlockDriverState
*bs
)
3856 bs
->iostatus_enabled
= false;
3859 void bdrv_iostatus_reset(BlockDriverState
*bs
)
3861 if (bdrv_iostatus_is_enabled(bs
)) {
3862 bs
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
3866 /* XXX: Today this is set by device models because it makes the implementation
3867 quite simple. However, the block layer knows about the error, so it's
3868 possible to implement this without device models being involved */
3869 void bdrv_iostatus_set_err(BlockDriverState
*bs
, int error
)
3871 if (bdrv_iostatus_is_enabled(bs
) &&
3872 bs
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
3874 bs
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
3875 BLOCK_DEVICE_IO_STATUS_FAILED
;
3880 bdrv_acct_start(BlockDriverState
*bs
, BlockAcctCookie
*cookie
, int64_t bytes
,
3881 enum BlockAcctType type
)
3883 assert(type
< BDRV_MAX_IOTYPE
);
3885 cookie
->bytes
= bytes
;
3886 cookie
->start_time_ns
= get_clock();
3887 cookie
->type
= type
;
3891 bdrv_acct_done(BlockDriverState
*bs
, BlockAcctCookie
*cookie
)
3893 assert(cookie
->type
< BDRV_MAX_IOTYPE
);
3895 bs
->nr_bytes
[cookie
->type
] += cookie
->bytes
;
3896 bs
->nr_ops
[cookie
->type
]++;
3897 bs
->total_time_ns
[cookie
->type
] += get_clock() - cookie
->start_time_ns
;
3900 int bdrv_img_create(const char *filename
, const char *fmt
,
3901 const char *base_filename
, const char *base_fmt
,
3902 char *options
, uint64_t img_size
, int flags
)
3904 QEMUOptionParameter
*param
= NULL
, *create_options
= NULL
;
3905 QEMUOptionParameter
*backing_fmt
, *backing_file
, *size
;
3906 BlockDriverState
*bs
= NULL
;
3907 BlockDriver
*drv
, *proto_drv
;
3908 BlockDriver
*backing_drv
= NULL
;
3911 /* Find driver and parse its options */
3912 drv
= bdrv_find_format(fmt
);
3914 error_report("Unknown file format '%s'", fmt
);
3919 proto_drv
= bdrv_find_protocol(filename
);
3921 error_report("Unknown protocol '%s'", filename
);
3926 create_options
= append_option_parameters(create_options
,
3927 drv
->create_options
);
3928 create_options
= append_option_parameters(create_options
,
3929 proto_drv
->create_options
);
3931 /* Create parameter list with default values */
3932 param
= parse_option_parameters("", create_options
, param
);
3934 set_option_parameter_int(param
, BLOCK_OPT_SIZE
, img_size
);
3936 /* Parse -o options */
3938 param
= parse_option_parameters(options
, create_options
, param
);
3939 if (param
== NULL
) {
3940 error_report("Invalid options for file format '%s'.", fmt
);
3946 if (base_filename
) {
3947 if (set_option_parameter(param
, BLOCK_OPT_BACKING_FILE
,
3949 error_report("Backing file not supported for file format '%s'",
3957 if (set_option_parameter(param
, BLOCK_OPT_BACKING_FMT
, base_fmt
)) {
3958 error_report("Backing file format not supported for file "
3959 "format '%s'", fmt
);
3965 backing_file
= get_option_parameter(param
, BLOCK_OPT_BACKING_FILE
);
3966 if (backing_file
&& backing_file
->value
.s
) {
3967 if (!strcmp(filename
, backing_file
->value
.s
)) {
3968 error_report("Error: Trying to create an image with the "
3969 "same filename as the backing file");
3975 backing_fmt
= get_option_parameter(param
, BLOCK_OPT_BACKING_FMT
);
3976 if (backing_fmt
&& backing_fmt
->value
.s
) {
3977 backing_drv
= bdrv_find_format(backing_fmt
->value
.s
);
3979 error_report("Unknown backing file format '%s'",
3980 backing_fmt
->value
.s
);
3986 // The size for the image must always be specified, with one exception:
3987 // If we are using a backing file, we can obtain the size from there
3988 size
= get_option_parameter(param
, BLOCK_OPT_SIZE
);
3989 if (size
&& size
->value
.n
== -1) {
3990 if (backing_file
&& backing_file
->value
.s
) {
3995 /* backing files always opened read-only */
3997 flags
& ~(BDRV_O_RDWR
| BDRV_O_SNAPSHOT
| BDRV_O_NO_BACKING
);
4001 ret
= bdrv_open(bs
, backing_file
->value
.s
, back_flags
, backing_drv
);
4003 error_report("Could not open '%s'", backing_file
->value
.s
);
4006 bdrv_get_geometry(bs
, &size
);
4009 snprintf(buf
, sizeof(buf
), "%" PRId64
, size
);
4010 set_option_parameter(param
, BLOCK_OPT_SIZE
, buf
);
4012 error_report("Image creation needs a size parameter");
4018 printf("Formatting '%s', fmt=%s ", filename
, fmt
);
4019 print_option_parameters(param
);
4022 ret
= bdrv_create(drv
, filename
, param
);
4025 if (ret
== -ENOTSUP
) {
4026 error_report("Formatting or formatting option not supported for "
4027 "file format '%s'", fmt
);
4028 } else if (ret
== -EFBIG
) {
4029 error_report("The image size is too large for file format '%s'",
4032 error_report("%s: error while creating %s: %s", filename
, fmt
,
4038 free_option_parameters(create_options
);
4039 free_option_parameters(param
);
4048 void *block_job_create(const BlockJobType
*job_type
, BlockDriverState
*bs
,
4049 int64_t speed
, BlockDriverCompletionFunc
*cb
,
4050 void *opaque
, Error
**errp
)
4054 if (bs
->job
|| bdrv_in_use(bs
)) {
4055 error_set(errp
, QERR_DEVICE_IN_USE
, bdrv_get_device_name(bs
));
4058 bdrv_set_in_use(bs
, 1);
4060 job
= g_malloc0(job_type
->instance_size
);
4061 job
->job_type
= job_type
;
4064 job
->opaque
= opaque
;
4068 /* Only set speed when necessary to avoid NotSupported error */
4070 Error
*local_err
= NULL
;
4072 block_job_set_speed(job
, speed
, &local_err
);
4073 if (error_is_set(&local_err
)) {
4076 bdrv_set_in_use(bs
, 0);
4077 error_propagate(errp
, local_err
);
4084 void block_job_complete(BlockJob
*job
, int ret
)
4086 BlockDriverState
*bs
= job
->bs
;
4088 assert(bs
->job
== job
);
4089 job
->cb(job
->opaque
, ret
);
4092 bdrv_set_in_use(bs
, 0);
4095 void block_job_set_speed(BlockJob
*job
, int64_t speed
, Error
**errp
)
4097 Error
*local_err
= NULL
;
4099 if (!job
->job_type
->set_speed
) {
4100 error_set(errp
, QERR_NOT_SUPPORTED
);
4103 job
->job_type
->set_speed(job
, speed
, &local_err
);
4104 if (error_is_set(&local_err
)) {
4105 error_propagate(errp
, local_err
);
4112 void block_job_cancel(BlockJob
*job
)
4114 job
->cancelled
= true;
4115 if (job
->co
&& !job
->busy
) {
4116 qemu_coroutine_enter(job
->co
, NULL
);
4120 bool block_job_is_cancelled(BlockJob
*job
)
4122 return job
->cancelled
;
4125 struct BlockCancelData
{
4127 BlockDriverCompletionFunc
*cb
;
4133 static void block_job_cancel_cb(void *opaque
, int ret
)
4135 struct BlockCancelData
*data
= opaque
;
4137 data
->cancelled
= block_job_is_cancelled(data
->job
);
4139 data
->cb(data
->opaque
, ret
);
4142 int block_job_cancel_sync(BlockJob
*job
)
4144 struct BlockCancelData data
;
4145 BlockDriverState
*bs
= job
->bs
;
4147 assert(bs
->job
== job
);
4149 /* Set up our own callback to store the result and chain to
4150 * the original callback.
4154 data
.opaque
= job
->opaque
;
4155 data
.ret
= -EINPROGRESS
;
4156 job
->cb
= block_job_cancel_cb
;
4157 job
->opaque
= &data
;
4158 block_job_cancel(job
);
4159 while (data
.ret
== -EINPROGRESS
) {
4162 return (data
.cancelled
&& data
.ret
== 0) ? -ECANCELED
: data
.ret
;
4165 void block_job_sleep_ns(BlockJob
*job
, QEMUClock
*clock
, int64_t ns
)
4167 /* Check cancellation *before* setting busy = false, too! */
4168 if (!block_job_is_cancelled(job
)) {
4170 co_sleep_ns(clock
, ns
);