dataplane: implement async flush
[qemu/cris-port.git] / block.c
blob1fd38159e211bddf49a65d8da168dc9dae197ca0
1 /*
2 * QEMU System Emulator block driver
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
24 #include "config-host.h"
25 #include "qemu-common.h"
26 #include "trace.h"
27 #include "monitor/monitor.h"
28 #include "block/block_int.h"
29 #include "block/blockjob.h"
30 #include "qemu/module.h"
31 #include "qapi/qmp/qjson.h"
32 #include "sysemu/sysemu.h"
33 #include "qemu/notify.h"
34 #include "block/coroutine.h"
35 #include "block/qapi.h"
36 #include "qmp-commands.h"
37 #include "qemu/timer.h"
39 #ifdef CONFIG_BSD
40 #include <sys/types.h>
41 #include <sys/stat.h>
42 #include <sys/ioctl.h>
43 #include <sys/queue.h>
44 #ifndef __DragonFly__
45 #include <sys/disk.h>
46 #endif
47 #endif
49 #ifdef _WIN32
50 #include <windows.h>
51 #endif
53 struct BdrvDirtyBitmap {
54 HBitmap *bitmap;
55 QLIST_ENTRY(BdrvDirtyBitmap) list;
58 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
60 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
61 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
62 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
63 BlockDriverCompletionFunc *cb, void *opaque);
64 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
65 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
66 BlockDriverCompletionFunc *cb, void *opaque);
67 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
68 int64_t sector_num, int nb_sectors,
69 QEMUIOVector *iov);
70 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
71 int64_t sector_num, int nb_sectors,
72 QEMUIOVector *iov);
73 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
74 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
75 BdrvRequestFlags flags);
76 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
77 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
78 BdrvRequestFlags flags);
79 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
80 int64_t sector_num,
81 QEMUIOVector *qiov,
82 int nb_sectors,
83 BdrvRequestFlags flags,
84 BlockDriverCompletionFunc *cb,
85 void *opaque,
86 bool is_write);
87 static void coroutine_fn bdrv_co_do_rw(void *opaque);
88 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
89 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
91 static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
92 QTAILQ_HEAD_INITIALIZER(bdrv_states);
94 static QTAILQ_HEAD(, BlockDriverState) graph_bdrv_states =
95 QTAILQ_HEAD_INITIALIZER(graph_bdrv_states);
97 static QLIST_HEAD(, BlockDriver) bdrv_drivers =
98 QLIST_HEAD_INITIALIZER(bdrv_drivers);
100 /* If non-zero, use only whitelisted block drivers */
101 static int use_bdrv_whitelist;
103 #ifdef _WIN32
104 static int is_windows_drive_prefix(const char *filename)
106 return (((filename[0] >= 'a' && filename[0] <= 'z') ||
107 (filename[0] >= 'A' && filename[0] <= 'Z')) &&
108 filename[1] == ':');
111 int is_windows_drive(const char *filename)
113 if (is_windows_drive_prefix(filename) &&
114 filename[2] == '\0')
115 return 1;
116 if (strstart(filename, "\\\\.\\", NULL) ||
117 strstart(filename, "//./", NULL))
118 return 1;
119 return 0;
121 #endif
123 /* throttling disk I/O limits */
124 void bdrv_set_io_limits(BlockDriverState *bs,
125 ThrottleConfig *cfg)
127 int i;
129 throttle_config(&bs->throttle_state, cfg);
131 for (i = 0; i < 2; i++) {
132 qemu_co_enter_next(&bs->throttled_reqs[i]);
136 /* this function drain all the throttled IOs */
137 static bool bdrv_start_throttled_reqs(BlockDriverState *bs)
139 bool drained = false;
140 bool enabled = bs->io_limits_enabled;
141 int i;
143 bs->io_limits_enabled = false;
145 for (i = 0; i < 2; i++) {
146 while (qemu_co_enter_next(&bs->throttled_reqs[i])) {
147 drained = true;
151 bs->io_limits_enabled = enabled;
153 return drained;
156 void bdrv_io_limits_disable(BlockDriverState *bs)
158 bs->io_limits_enabled = false;
160 bdrv_start_throttled_reqs(bs);
162 throttle_destroy(&bs->throttle_state);
165 static void bdrv_throttle_read_timer_cb(void *opaque)
167 BlockDriverState *bs = opaque;
168 qemu_co_enter_next(&bs->throttled_reqs[0]);
171 static void bdrv_throttle_write_timer_cb(void *opaque)
173 BlockDriverState *bs = opaque;
174 qemu_co_enter_next(&bs->throttled_reqs[1]);
177 /* should be called before bdrv_set_io_limits if a limit is set */
178 void bdrv_io_limits_enable(BlockDriverState *bs)
180 assert(!bs->io_limits_enabled);
181 throttle_init(&bs->throttle_state,
182 QEMU_CLOCK_VIRTUAL,
183 bdrv_throttle_read_timer_cb,
184 bdrv_throttle_write_timer_cb,
185 bs);
186 bs->io_limits_enabled = true;
189 /* This function makes an IO wait if needed
191 * @nb_sectors: the number of sectors of the IO
192 * @is_write: is the IO a write
194 static void bdrv_io_limits_intercept(BlockDriverState *bs,
195 unsigned int bytes,
196 bool is_write)
198 /* does this io must wait */
199 bool must_wait = throttle_schedule_timer(&bs->throttle_state, is_write);
201 /* if must wait or any request of this type throttled queue the IO */
202 if (must_wait ||
203 !qemu_co_queue_empty(&bs->throttled_reqs[is_write])) {
204 qemu_co_queue_wait(&bs->throttled_reqs[is_write]);
207 /* the IO will be executed, do the accounting */
208 throttle_account(&bs->throttle_state, is_write, bytes);
211 /* if the next request must wait -> do nothing */
212 if (throttle_schedule_timer(&bs->throttle_state, is_write)) {
213 return;
216 /* else queue next request for execution */
217 qemu_co_queue_next(&bs->throttled_reqs[is_write]);
220 size_t bdrv_opt_mem_align(BlockDriverState *bs)
222 if (!bs || !bs->drv) {
223 /* 4k should be on the safe side */
224 return 4096;
227 return bs->bl.opt_mem_alignment;
230 /* check if the path starts with "<protocol>:" */
231 static int path_has_protocol(const char *path)
233 const char *p;
235 #ifdef _WIN32
236 if (is_windows_drive(path) ||
237 is_windows_drive_prefix(path)) {
238 return 0;
240 p = path + strcspn(path, ":/\\");
241 #else
242 p = path + strcspn(path, ":/");
243 #endif
245 return *p == ':';
248 int path_is_absolute(const char *path)
250 #ifdef _WIN32
251 /* specific case for names like: "\\.\d:" */
252 if (is_windows_drive(path) || is_windows_drive_prefix(path)) {
253 return 1;
255 return (*path == '/' || *path == '\\');
256 #else
257 return (*path == '/');
258 #endif
261 /* if filename is absolute, just copy it to dest. Otherwise, build a
262 path to it by considering it is relative to base_path. URL are
263 supported. */
264 void path_combine(char *dest, int dest_size,
265 const char *base_path,
266 const char *filename)
268 const char *p, *p1;
269 int len;
271 if (dest_size <= 0)
272 return;
273 if (path_is_absolute(filename)) {
274 pstrcpy(dest, dest_size, filename);
275 } else {
276 p = strchr(base_path, ':');
277 if (p)
278 p++;
279 else
280 p = base_path;
281 p1 = strrchr(base_path, '/');
282 #ifdef _WIN32
284 const char *p2;
285 p2 = strrchr(base_path, '\\');
286 if (!p1 || p2 > p1)
287 p1 = p2;
289 #endif
290 if (p1)
291 p1++;
292 else
293 p1 = base_path;
294 if (p1 > p)
295 p = p1;
296 len = p - base_path;
297 if (len > dest_size - 1)
298 len = dest_size - 1;
299 memcpy(dest, base_path, len);
300 dest[len] = '\0';
301 pstrcat(dest, dest_size, filename);
305 void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz)
307 if (bs->backing_file[0] == '\0' || path_has_protocol(bs->backing_file)) {
308 pstrcpy(dest, sz, bs->backing_file);
309 } else {
310 path_combine(dest, sz, bs->filename, bs->backing_file);
314 void bdrv_register(BlockDriver *bdrv)
316 /* Block drivers without coroutine functions need emulation */
317 if (!bdrv->bdrv_co_readv) {
318 bdrv->bdrv_co_readv = bdrv_co_readv_em;
319 bdrv->bdrv_co_writev = bdrv_co_writev_em;
321 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
322 * the block driver lacks aio we need to emulate that too.
324 if (!bdrv->bdrv_aio_readv) {
325 /* add AIO emulation layer */
326 bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
327 bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
331 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
334 /* create a new block device (by default it is empty) */
335 BlockDriverState *bdrv_new(const char *device_name, Error **errp)
337 BlockDriverState *bs;
338 int i;
340 if (bdrv_find(device_name)) {
341 error_setg(errp, "Device with id '%s' already exists",
342 device_name);
343 return NULL;
345 if (bdrv_find_node(device_name)) {
346 error_setg(errp, "Device with node-name '%s' already exists",
347 device_name);
348 return NULL;
351 bs = g_malloc0(sizeof(BlockDriverState));
352 QLIST_INIT(&bs->dirty_bitmaps);
353 pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);
354 if (device_name[0] != '\0') {
355 QTAILQ_INSERT_TAIL(&bdrv_states, bs, device_list);
357 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
358 QLIST_INIT(&bs->op_blockers[i]);
360 bdrv_iostatus_disable(bs);
361 notifier_list_init(&bs->close_notifiers);
362 notifier_with_return_list_init(&bs->before_write_notifiers);
363 qemu_co_queue_init(&bs->throttled_reqs[0]);
364 qemu_co_queue_init(&bs->throttled_reqs[1]);
365 bs->refcnt = 1;
366 bs->aio_context = qemu_get_aio_context();
368 return bs;
371 void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify)
373 notifier_list_add(&bs->close_notifiers, notify);
376 BlockDriver *bdrv_find_format(const char *format_name)
378 BlockDriver *drv1;
379 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
380 if (!strcmp(drv1->format_name, format_name)) {
381 return drv1;
384 return NULL;
387 static int bdrv_is_whitelisted(BlockDriver *drv, bool read_only)
389 static const char *whitelist_rw[] = {
390 CONFIG_BDRV_RW_WHITELIST
392 static const char *whitelist_ro[] = {
393 CONFIG_BDRV_RO_WHITELIST
395 const char **p;
397 if (!whitelist_rw[0] && !whitelist_ro[0]) {
398 return 1; /* no whitelist, anything goes */
401 for (p = whitelist_rw; *p; p++) {
402 if (!strcmp(drv->format_name, *p)) {
403 return 1;
406 if (read_only) {
407 for (p = whitelist_ro; *p; p++) {
408 if (!strcmp(drv->format_name, *p)) {
409 return 1;
413 return 0;
416 BlockDriver *bdrv_find_whitelisted_format(const char *format_name,
417 bool read_only)
419 BlockDriver *drv = bdrv_find_format(format_name);
420 return drv && bdrv_is_whitelisted(drv, read_only) ? drv : NULL;
423 typedef struct CreateCo {
424 BlockDriver *drv;
425 char *filename;
426 QEMUOptionParameter *options;
427 int ret;
428 Error *err;
429 } CreateCo;
431 static void coroutine_fn bdrv_create_co_entry(void *opaque)
433 Error *local_err = NULL;
434 int ret;
436 CreateCo *cco = opaque;
437 assert(cco->drv);
439 ret = cco->drv->bdrv_create(cco->filename, cco->options, &local_err);
440 if (local_err) {
441 error_propagate(&cco->err, local_err);
443 cco->ret = ret;
446 int bdrv_create(BlockDriver *drv, const char* filename,
447 QEMUOptionParameter *options, Error **errp)
449 int ret;
451 Coroutine *co;
452 CreateCo cco = {
453 .drv = drv,
454 .filename = g_strdup(filename),
455 .options = options,
456 .ret = NOT_DONE,
457 .err = NULL,
460 if (!drv->bdrv_create) {
461 error_setg(errp, "Driver '%s' does not support image creation", drv->format_name);
462 ret = -ENOTSUP;
463 goto out;
466 if (qemu_in_coroutine()) {
467 /* Fast-path if already in coroutine context */
468 bdrv_create_co_entry(&cco);
469 } else {
470 co = qemu_coroutine_create(bdrv_create_co_entry);
471 qemu_coroutine_enter(co, &cco);
472 while (cco.ret == NOT_DONE) {
473 qemu_aio_wait();
477 ret = cco.ret;
478 if (ret < 0) {
479 if (cco.err) {
480 error_propagate(errp, cco.err);
481 } else {
482 error_setg_errno(errp, -ret, "Could not create image");
486 out:
487 g_free(cco.filename);
488 return ret;
491 int bdrv_create_file(const char* filename, QEMUOptionParameter *options,
492 Error **errp)
494 BlockDriver *drv;
495 Error *local_err = NULL;
496 int ret;
498 drv = bdrv_find_protocol(filename, true);
499 if (drv == NULL) {
500 error_setg(errp, "Could not find protocol for file '%s'", filename);
501 return -ENOENT;
504 ret = bdrv_create(drv, filename, options, &local_err);
505 if (local_err) {
506 error_propagate(errp, local_err);
508 return ret;
511 int bdrv_refresh_limits(BlockDriverState *bs)
513 BlockDriver *drv = bs->drv;
515 memset(&bs->bl, 0, sizeof(bs->bl));
517 if (!drv) {
518 return 0;
521 /* Take some limits from the children as a default */
522 if (bs->file) {
523 bdrv_refresh_limits(bs->file);
524 bs->bl.opt_transfer_length = bs->file->bl.opt_transfer_length;
525 bs->bl.opt_mem_alignment = bs->file->bl.opt_mem_alignment;
526 } else {
527 bs->bl.opt_mem_alignment = 512;
530 if (bs->backing_hd) {
531 bdrv_refresh_limits(bs->backing_hd);
532 bs->bl.opt_transfer_length =
533 MAX(bs->bl.opt_transfer_length,
534 bs->backing_hd->bl.opt_transfer_length);
535 bs->bl.opt_mem_alignment =
536 MAX(bs->bl.opt_mem_alignment,
537 bs->backing_hd->bl.opt_mem_alignment);
540 /* Then let the driver override it */
541 if (drv->bdrv_refresh_limits) {
542 return drv->bdrv_refresh_limits(bs);
545 return 0;
549 * Create a uniquely-named empty temporary file.
550 * Return 0 upon success, otherwise a negative errno value.
552 int get_tmp_filename(char *filename, int size)
554 #ifdef _WIN32
555 char temp_dir[MAX_PATH];
556 /* GetTempFileName requires that its output buffer (4th param)
557 have length MAX_PATH or greater. */
558 assert(size >= MAX_PATH);
559 return (GetTempPath(MAX_PATH, temp_dir)
560 && GetTempFileName(temp_dir, "qem", 0, filename)
561 ? 0 : -GetLastError());
562 #else
563 int fd;
564 const char *tmpdir;
565 tmpdir = getenv("TMPDIR");
566 if (!tmpdir) {
567 tmpdir = "/var/tmp";
569 if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) {
570 return -EOVERFLOW;
572 fd = mkstemp(filename);
573 if (fd < 0) {
574 return -errno;
576 if (close(fd) != 0) {
577 unlink(filename);
578 return -errno;
580 return 0;
581 #endif
585 * Detect host devices. By convention, /dev/cdrom[N] is always
586 * recognized as a host CDROM.
588 static BlockDriver *find_hdev_driver(const char *filename)
590 int score_max = 0, score;
591 BlockDriver *drv = NULL, *d;
593 QLIST_FOREACH(d, &bdrv_drivers, list) {
594 if (d->bdrv_probe_device) {
595 score = d->bdrv_probe_device(filename);
596 if (score > score_max) {
597 score_max = score;
598 drv = d;
603 return drv;
606 BlockDriver *bdrv_find_protocol(const char *filename,
607 bool allow_protocol_prefix)
609 BlockDriver *drv1;
610 char protocol[128];
611 int len;
612 const char *p;
614 /* TODO Drivers without bdrv_file_open must be specified explicitly */
617 * XXX(hch): we really should not let host device detection
618 * override an explicit protocol specification, but moving this
619 * later breaks access to device names with colons in them.
620 * Thanks to the brain-dead persistent naming schemes on udev-
621 * based Linux systems those actually are quite common.
623 drv1 = find_hdev_driver(filename);
624 if (drv1) {
625 return drv1;
628 if (!path_has_protocol(filename) || !allow_protocol_prefix) {
629 return bdrv_find_format("file");
632 p = strchr(filename, ':');
633 assert(p != NULL);
634 len = p - filename;
635 if (len > sizeof(protocol) - 1)
636 len = sizeof(protocol) - 1;
637 memcpy(protocol, filename, len);
638 protocol[len] = '\0';
639 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
640 if (drv1->protocol_name &&
641 !strcmp(drv1->protocol_name, protocol)) {
642 return drv1;
645 return NULL;
648 static int find_image_format(BlockDriverState *bs, const char *filename,
649 BlockDriver **pdrv, Error **errp)
651 int score, score_max;
652 BlockDriver *drv1, *drv;
653 uint8_t buf[2048];
654 int ret = 0;
656 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
657 if (bs->sg || !bdrv_is_inserted(bs) || bdrv_getlength(bs) == 0) {
658 drv = bdrv_find_format("raw");
659 if (!drv) {
660 error_setg(errp, "Could not find raw image format");
661 ret = -ENOENT;
663 *pdrv = drv;
664 return ret;
667 ret = bdrv_pread(bs, 0, buf, sizeof(buf));
668 if (ret < 0) {
669 error_setg_errno(errp, -ret, "Could not read image for determining its "
670 "format");
671 *pdrv = NULL;
672 return ret;
675 score_max = 0;
676 drv = NULL;
677 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
678 if (drv1->bdrv_probe) {
679 score = drv1->bdrv_probe(buf, ret, filename);
680 if (score > score_max) {
681 score_max = score;
682 drv = drv1;
686 if (!drv) {
687 error_setg(errp, "Could not determine image format: No compatible "
688 "driver found");
689 ret = -ENOENT;
691 *pdrv = drv;
692 return ret;
696 * Set the current 'total_sectors' value
698 static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
700 BlockDriver *drv = bs->drv;
702 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
703 if (bs->sg)
704 return 0;
706 /* query actual device if possible, otherwise just trust the hint */
707 if (drv->bdrv_getlength) {
708 int64_t length = drv->bdrv_getlength(bs);
709 if (length < 0) {
710 return length;
712 hint = DIV_ROUND_UP(length, BDRV_SECTOR_SIZE);
715 bs->total_sectors = hint;
716 return 0;
720 * Set open flags for a given discard mode
722 * Return 0 on success, -1 if the discard mode was invalid.
724 int bdrv_parse_discard_flags(const char *mode, int *flags)
726 *flags &= ~BDRV_O_UNMAP;
728 if (!strcmp(mode, "off") || !strcmp(mode, "ignore")) {
729 /* do nothing */
730 } else if (!strcmp(mode, "on") || !strcmp(mode, "unmap")) {
731 *flags |= BDRV_O_UNMAP;
732 } else {
733 return -1;
736 return 0;
740 * Set open flags for a given cache mode
742 * Return 0 on success, -1 if the cache mode was invalid.
744 int bdrv_parse_cache_flags(const char *mode, int *flags)
746 *flags &= ~BDRV_O_CACHE_MASK;
748 if (!strcmp(mode, "off") || !strcmp(mode, "none")) {
749 *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB;
750 } else if (!strcmp(mode, "directsync")) {
751 *flags |= BDRV_O_NOCACHE;
752 } else if (!strcmp(mode, "writeback")) {
753 *flags |= BDRV_O_CACHE_WB;
754 } else if (!strcmp(mode, "unsafe")) {
755 *flags |= BDRV_O_CACHE_WB;
756 *flags |= BDRV_O_NO_FLUSH;
757 } else if (!strcmp(mode, "writethrough")) {
758 /* this is the default */
759 } else {
760 return -1;
763 return 0;
767 * The copy-on-read flag is actually a reference count so multiple users may
768 * use the feature without worrying about clobbering its previous state.
769 * Copy-on-read stays enabled until all users have called to disable it.
771 void bdrv_enable_copy_on_read(BlockDriverState *bs)
773 bs->copy_on_read++;
776 void bdrv_disable_copy_on_read(BlockDriverState *bs)
778 assert(bs->copy_on_read > 0);
779 bs->copy_on_read--;
783 * Returns the flags that a temporary snapshot should get, based on the
784 * originally requested flags (the originally requested image will have flags
785 * like a backing file)
787 static int bdrv_temp_snapshot_flags(int flags)
789 return (flags & ~BDRV_O_SNAPSHOT) | BDRV_O_TEMPORARY;
793 * Returns the flags that bs->file should get, based on the given flags for
794 * the parent BDS
796 static int bdrv_inherited_flags(int flags)
798 /* Enable protocol handling, disable format probing for bs->file */
799 flags |= BDRV_O_PROTOCOL;
801 /* Our block drivers take care to send flushes and respect unmap policy,
802 * so we can enable both unconditionally on lower layers. */
803 flags |= BDRV_O_CACHE_WB | BDRV_O_UNMAP;
805 /* Clear flags that only apply to the top layer */
806 flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_COPY_ON_READ);
808 return flags;
812 * Returns the flags that bs->backing_hd should get, based on the given flags
813 * for the parent BDS
815 static int bdrv_backing_flags(int flags)
817 /* backing files always opened read-only */
818 flags &= ~(BDRV_O_RDWR | BDRV_O_COPY_ON_READ);
820 /* snapshot=on is handled on the top layer */
821 flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_TEMPORARY);
823 return flags;
826 static int bdrv_open_flags(BlockDriverState *bs, int flags)
828 int open_flags = flags | BDRV_O_CACHE_WB;
831 * Clear flags that are internal to the block layer before opening the
832 * image.
834 open_flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
837 * Snapshots should be writable.
839 if (flags & BDRV_O_TEMPORARY) {
840 open_flags |= BDRV_O_RDWR;
843 return open_flags;
846 static void bdrv_assign_node_name(BlockDriverState *bs,
847 const char *node_name,
848 Error **errp)
850 if (!node_name) {
851 return;
854 /* empty string node name is invalid */
855 if (node_name[0] == '\0') {
856 error_setg(errp, "Empty node name");
857 return;
860 /* takes care of avoiding namespaces collisions */
861 if (bdrv_find(node_name)) {
862 error_setg(errp, "node-name=%s is conflicting with a device id",
863 node_name);
864 return;
867 /* takes care of avoiding duplicates node names */
868 if (bdrv_find_node(node_name)) {
869 error_setg(errp, "Duplicate node name");
870 return;
873 /* copy node name into the bs and insert it into the graph list */
874 pstrcpy(bs->node_name, sizeof(bs->node_name), node_name);
875 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs, node_list);
879 * Common part for opening disk images and files
881 * Removes all processed options from *options.
883 static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file,
884 QDict *options, int flags, BlockDriver *drv, Error **errp)
886 int ret, open_flags;
887 const char *filename;
888 const char *node_name = NULL;
889 Error *local_err = NULL;
891 assert(drv != NULL);
892 assert(bs->file == NULL);
893 assert(options != NULL && bs->options != options);
895 if (file != NULL) {
896 filename = file->filename;
897 } else {
898 filename = qdict_get_try_str(options, "filename");
901 if (drv->bdrv_needs_filename && !filename) {
902 error_setg(errp, "The '%s' block driver requires a file name",
903 drv->format_name);
904 return -EINVAL;
907 trace_bdrv_open_common(bs, filename ?: "", flags, drv->format_name);
909 node_name = qdict_get_try_str(options, "node-name");
910 bdrv_assign_node_name(bs, node_name, &local_err);
911 if (local_err) {
912 error_propagate(errp, local_err);
913 return -EINVAL;
915 qdict_del(options, "node-name");
917 /* bdrv_open() with directly using a protocol as drv. This layer is already
918 * opened, so assign it to bs (while file becomes a closed BlockDriverState)
919 * and return immediately. */
920 if (file != NULL && drv->bdrv_file_open) {
921 bdrv_swap(file, bs);
922 return 0;
925 bs->open_flags = flags;
926 bs->guest_block_size = 512;
927 bs->request_alignment = 512;
928 bs->zero_beyond_eof = true;
929 open_flags = bdrv_open_flags(bs, flags);
930 bs->read_only = !(open_flags & BDRV_O_RDWR);
932 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, bs->read_only)) {
933 error_setg(errp,
934 !bs->read_only && bdrv_is_whitelisted(drv, true)
935 ? "Driver '%s' can only be used for read-only devices"
936 : "Driver '%s' is not whitelisted",
937 drv->format_name);
938 return -ENOTSUP;
941 assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */
942 if (flags & BDRV_O_COPY_ON_READ) {
943 if (!bs->read_only) {
944 bdrv_enable_copy_on_read(bs);
945 } else {
946 error_setg(errp, "Can't use copy-on-read on read-only device");
947 return -EINVAL;
951 if (filename != NULL) {
952 pstrcpy(bs->filename, sizeof(bs->filename), filename);
953 } else {
954 bs->filename[0] = '\0';
957 bs->drv = drv;
958 bs->opaque = g_malloc0(drv->instance_size);
960 bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB);
962 /* Open the image, either directly or using a protocol */
963 if (drv->bdrv_file_open) {
964 assert(file == NULL);
965 assert(!drv->bdrv_needs_filename || filename != NULL);
966 ret = drv->bdrv_file_open(bs, options, open_flags, &local_err);
967 } else {
968 if (file == NULL) {
969 error_setg(errp, "Can't use '%s' as a block driver for the "
970 "protocol level", drv->format_name);
971 ret = -EINVAL;
972 goto free_and_fail;
974 bs->file = file;
975 ret = drv->bdrv_open(bs, options, open_flags, &local_err);
978 if (ret < 0) {
979 if (local_err) {
980 error_propagate(errp, local_err);
981 } else if (bs->filename[0]) {
982 error_setg_errno(errp, -ret, "Could not open '%s'", bs->filename);
983 } else {
984 error_setg_errno(errp, -ret, "Could not open image");
986 goto free_and_fail;
989 ret = refresh_total_sectors(bs, bs->total_sectors);
990 if (ret < 0) {
991 error_setg_errno(errp, -ret, "Could not refresh total sector count");
992 goto free_and_fail;
995 bdrv_refresh_limits(bs);
996 assert(bdrv_opt_mem_align(bs) != 0);
997 assert((bs->request_alignment != 0) || bs->sg);
998 return 0;
1000 free_and_fail:
1001 bs->file = NULL;
1002 g_free(bs->opaque);
1003 bs->opaque = NULL;
1004 bs->drv = NULL;
1005 return ret;
1009 * Opens a file using a protocol (file, host_device, nbd, ...)
1011 * options is an indirect pointer to a QDict of options to pass to the block
1012 * drivers, or pointer to NULL for an empty set of options. If this function
1013 * takes ownership of the QDict reference, it will set *options to NULL;
1014 * otherwise, it will contain unused/unrecognized options after this function
1015 * returns. Then, the caller is responsible for freeing it. If it intends to
1016 * reuse the QDict, QINCREF() should be called beforehand.
1018 static int bdrv_file_open(BlockDriverState *bs, const char *filename,
1019 QDict **options, int flags, Error **errp)
1021 BlockDriver *drv;
1022 const char *drvname;
1023 bool parse_filename = false;
1024 Error *local_err = NULL;
1025 int ret;
1027 /* Fetch the file name from the options QDict if necessary */
1028 if (!filename) {
1029 filename = qdict_get_try_str(*options, "filename");
1030 } else if (filename && !qdict_haskey(*options, "filename")) {
1031 qdict_put(*options, "filename", qstring_from_str(filename));
1032 parse_filename = true;
1033 } else {
1034 error_setg(errp, "Can't specify 'file' and 'filename' options at the "
1035 "same time");
1036 ret = -EINVAL;
1037 goto fail;
1040 /* Find the right block driver */
1041 drvname = qdict_get_try_str(*options, "driver");
1042 if (drvname) {
1043 drv = bdrv_find_format(drvname);
1044 if (!drv) {
1045 error_setg(errp, "Unknown driver '%s'", drvname);
1047 qdict_del(*options, "driver");
1048 } else if (filename) {
1049 drv = bdrv_find_protocol(filename, parse_filename);
1050 if (!drv) {
1051 error_setg(errp, "Unknown protocol");
1053 } else {
1054 error_setg(errp, "Must specify either driver or file");
1055 drv = NULL;
1058 if (!drv) {
1059 /* errp has been set already */
1060 ret = -ENOENT;
1061 goto fail;
1064 /* Parse the filename and open it */
1065 if (drv->bdrv_parse_filename && parse_filename) {
1066 drv->bdrv_parse_filename(filename, *options, &local_err);
1067 if (local_err) {
1068 error_propagate(errp, local_err);
1069 ret = -EINVAL;
1070 goto fail;
1073 if (!drv->bdrv_needs_filename) {
1074 qdict_del(*options, "filename");
1075 } else {
1076 filename = qdict_get_str(*options, "filename");
1080 if (!drv->bdrv_file_open) {
1081 ret = bdrv_open(&bs, filename, NULL, *options, flags, drv, &local_err);
1082 *options = NULL;
1083 } else {
1084 ret = bdrv_open_common(bs, NULL, *options, flags, drv, &local_err);
1086 if (ret < 0) {
1087 error_propagate(errp, local_err);
1088 goto fail;
1091 bs->growable = 1;
1092 return 0;
1094 fail:
1095 return ret;
1098 void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd)
1101 if (bs->backing_hd) {
1102 assert(bs->backing_blocker);
1103 bdrv_op_unblock_all(bs->backing_hd, bs->backing_blocker);
1104 } else if (backing_hd) {
1105 error_setg(&bs->backing_blocker,
1106 "device is used as backing hd of '%s'",
1107 bs->device_name);
1110 bs->backing_hd = backing_hd;
1111 if (!backing_hd) {
1112 error_free(bs->backing_blocker);
1113 bs->backing_blocker = NULL;
1114 goto out;
1116 bs->open_flags &= ~BDRV_O_NO_BACKING;
1117 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_hd->filename);
1118 pstrcpy(bs->backing_format, sizeof(bs->backing_format),
1119 backing_hd->drv ? backing_hd->drv->format_name : "");
1121 bdrv_op_block_all(bs->backing_hd, bs->backing_blocker);
1122 /* Otherwise we won't be able to commit due to check in bdrv_commit */
1123 bdrv_op_unblock(bs->backing_hd, BLOCK_OP_TYPE_COMMIT,
1124 bs->backing_blocker);
1125 out:
1126 bdrv_refresh_limits(bs);
1130 * Opens the backing file for a BlockDriverState if not yet open
1132 * options is a QDict of options to pass to the block drivers, or NULL for an
1133 * empty set of options. The reference to the QDict is transferred to this
1134 * function (even on failure), so if the caller intends to reuse the dictionary,
1135 * it needs to use QINCREF() before calling bdrv_file_open.
1137 int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp)
1139 char *backing_filename = g_malloc0(PATH_MAX);
1140 int ret = 0;
1141 BlockDriver *back_drv = NULL;
1142 BlockDriverState *backing_hd;
1143 Error *local_err = NULL;
1145 if (bs->backing_hd != NULL) {
1146 QDECREF(options);
1147 goto free_exit;
1150 /* NULL means an empty set of options */
1151 if (options == NULL) {
1152 options = qdict_new();
1155 bs->open_flags &= ~BDRV_O_NO_BACKING;
1156 if (qdict_haskey(options, "file.filename")) {
1157 backing_filename[0] = '\0';
1158 } else if (bs->backing_file[0] == '\0' && qdict_size(options) == 0) {
1159 QDECREF(options);
1160 goto free_exit;
1161 } else {
1162 bdrv_get_full_backing_filename(bs, backing_filename, PATH_MAX);
1165 backing_hd = bdrv_new("", errp);
1167 if (bs->backing_format[0] != '\0') {
1168 back_drv = bdrv_find_format(bs->backing_format);
1171 assert(bs->backing_hd == NULL);
1172 ret = bdrv_open(&backing_hd,
1173 *backing_filename ? backing_filename : NULL, NULL, options,
1174 bdrv_backing_flags(bs->open_flags), back_drv, &local_err);
1175 if (ret < 0) {
1176 bdrv_unref(backing_hd);
1177 backing_hd = NULL;
1178 bs->open_flags |= BDRV_O_NO_BACKING;
1179 error_setg(errp, "Could not open backing file: %s",
1180 error_get_pretty(local_err));
1181 error_free(local_err);
1182 goto free_exit;
1184 bdrv_set_backing_hd(bs, backing_hd);
1186 free_exit:
1187 g_free(backing_filename);
1188 return ret;
1192 * Opens a disk image whose options are given as BlockdevRef in another block
1193 * device's options.
1195 * If allow_none is true, no image will be opened if filename is false and no
1196 * BlockdevRef is given. *pbs will remain unchanged and 0 will be returned.
1198 * bdrev_key specifies the key for the image's BlockdevRef in the options QDict.
1199 * That QDict has to be flattened; therefore, if the BlockdevRef is a QDict
1200 * itself, all options starting with "${bdref_key}." are considered part of the
1201 * BlockdevRef.
1203 * The BlockdevRef will be removed from the options QDict.
1205 * To conform with the behavior of bdrv_open(), *pbs has to be NULL.
1207 int bdrv_open_image(BlockDriverState **pbs, const char *filename,
1208 QDict *options, const char *bdref_key, int flags,
1209 bool allow_none, Error **errp)
1211 QDict *image_options;
1212 int ret;
1213 char *bdref_key_dot;
1214 const char *reference;
1216 assert(pbs);
1217 assert(*pbs == NULL);
1219 bdref_key_dot = g_strdup_printf("%s.", bdref_key);
1220 qdict_extract_subqdict(options, &image_options, bdref_key_dot);
1221 g_free(bdref_key_dot);
1223 reference = qdict_get_try_str(options, bdref_key);
1224 if (!filename && !reference && !qdict_size(image_options)) {
1225 if (allow_none) {
1226 ret = 0;
1227 } else {
1228 error_setg(errp, "A block device must be specified for \"%s\"",
1229 bdref_key);
1230 ret = -EINVAL;
1232 QDECREF(image_options);
1233 goto done;
1236 ret = bdrv_open(pbs, filename, reference, image_options, flags, NULL, errp);
1238 done:
1239 qdict_del(options, bdref_key);
1240 return ret;
1243 void bdrv_append_temp_snapshot(BlockDriverState *bs, int flags, Error **errp)
1245 /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
1246 char *tmp_filename = g_malloc0(PATH_MAX + 1);
1247 int64_t total_size;
1248 BlockDriver *bdrv_qcow2;
1249 QEMUOptionParameter *create_options;
1250 QDict *snapshot_options;
1251 BlockDriverState *bs_snapshot;
1252 Error *local_err;
1253 int ret;
1255 /* if snapshot, we create a temporary backing file and open it
1256 instead of opening 'filename' directly */
1258 /* Get the required size from the image */
1259 total_size = bdrv_getlength(bs);
1260 if (total_size < 0) {
1261 error_setg_errno(errp, -total_size, "Could not get image size");
1262 goto out;
1264 total_size &= BDRV_SECTOR_MASK;
1266 /* Create the temporary image */
1267 ret = get_tmp_filename(tmp_filename, PATH_MAX + 1);
1268 if (ret < 0) {
1269 error_setg_errno(errp, -ret, "Could not get temporary filename");
1270 goto out;
1273 bdrv_qcow2 = bdrv_find_format("qcow2");
1274 create_options = parse_option_parameters("", bdrv_qcow2->create_options,
1275 NULL);
1277 set_option_parameter_int(create_options, BLOCK_OPT_SIZE, total_size);
1279 ret = bdrv_create(bdrv_qcow2, tmp_filename, create_options, &local_err);
1280 free_option_parameters(create_options);
1281 if (ret < 0) {
1282 error_setg_errno(errp, -ret, "Could not create temporary overlay "
1283 "'%s': %s", tmp_filename,
1284 error_get_pretty(local_err));
1285 error_free(local_err);
1286 goto out;
1289 /* Prepare a new options QDict for the temporary file */
1290 snapshot_options = qdict_new();
1291 qdict_put(snapshot_options, "file.driver",
1292 qstring_from_str("file"));
1293 qdict_put(snapshot_options, "file.filename",
1294 qstring_from_str(tmp_filename));
1296 bs_snapshot = bdrv_new("", &error_abort);
1298 ret = bdrv_open(&bs_snapshot, NULL, NULL, snapshot_options,
1299 flags, bdrv_qcow2, &local_err);
1300 if (ret < 0) {
1301 error_propagate(errp, local_err);
1302 goto out;
1305 bdrv_append(bs_snapshot, bs);
1307 out:
1308 g_free(tmp_filename);
1311 static QDict *parse_json_filename(const char *filename, Error **errp)
1313 QObject *options_obj;
1314 QDict *options;
1315 int ret;
1317 ret = strstart(filename, "json:", &filename);
1318 assert(ret);
1320 options_obj = qobject_from_json(filename);
1321 if (!options_obj) {
1322 error_setg(errp, "Could not parse the JSON options");
1323 return NULL;
1326 if (qobject_type(options_obj) != QTYPE_QDICT) {
1327 qobject_decref(options_obj);
1328 error_setg(errp, "Invalid JSON object given");
1329 return NULL;
1332 options = qobject_to_qdict(options_obj);
1333 qdict_flatten(options);
1335 return options;
1339 * Opens a disk image (raw, qcow2, vmdk, ...)
1341 * options is a QDict of options to pass to the block drivers, or NULL for an
1342 * empty set of options. The reference to the QDict belongs to the block layer
1343 * after the call (even on failure), so if the caller intends to reuse the
1344 * dictionary, it needs to use QINCREF() before calling bdrv_open.
1346 * If *pbs is NULL, a new BDS will be created with a pointer to it stored there.
1347 * If it is not NULL, the referenced BDS will be reused.
1349 * The reference parameter may be used to specify an existing block device which
1350 * should be opened. If specified, neither options nor a filename may be given,
1351 * nor can an existing BDS be reused (that is, *pbs has to be NULL).
1353 int bdrv_open(BlockDriverState **pbs, const char *filename,
1354 const char *reference, QDict *options, int flags,
1355 BlockDriver *drv, Error **errp)
1357 int ret;
1358 BlockDriverState *file = NULL, *bs;
1359 const char *drvname;
1360 Error *local_err = NULL;
1361 int snapshot_flags = 0;
1363 assert(pbs);
1365 if (reference) {
1366 bool options_non_empty = options ? qdict_size(options) : false;
1367 QDECREF(options);
1369 if (*pbs) {
1370 error_setg(errp, "Cannot reuse an existing BDS when referencing "
1371 "another block device");
1372 return -EINVAL;
1375 if (filename || options_non_empty) {
1376 error_setg(errp, "Cannot reference an existing block device with "
1377 "additional options or a new filename");
1378 return -EINVAL;
1381 bs = bdrv_lookup_bs(reference, reference, errp);
1382 if (!bs) {
1383 return -ENODEV;
1385 bdrv_ref(bs);
1386 *pbs = bs;
1387 return 0;
1390 if (*pbs) {
1391 bs = *pbs;
1392 } else {
1393 bs = bdrv_new("", &error_abort);
1396 /* NULL means an empty set of options */
1397 if (options == NULL) {
1398 options = qdict_new();
1401 if (filename && g_str_has_prefix(filename, "json:")) {
1402 QDict *json_options = parse_json_filename(filename, &local_err);
1403 if (local_err) {
1404 ret = -EINVAL;
1405 goto fail;
1408 /* Options given in the filename have lower priority than options
1409 * specified directly */
1410 qdict_join(options, json_options, false);
1411 QDECREF(json_options);
1412 filename = NULL;
1415 bs->options = options;
1416 options = qdict_clone_shallow(options);
1418 if (flags & BDRV_O_PROTOCOL) {
1419 assert(!drv);
1420 ret = bdrv_file_open(bs, filename, &options, flags & ~BDRV_O_PROTOCOL,
1421 &local_err);
1422 if (!ret) {
1423 drv = bs->drv;
1424 goto done;
1425 } else if (bs->drv) {
1426 goto close_and_fail;
1427 } else {
1428 goto fail;
1432 /* Open image file without format layer */
1433 if (flags & BDRV_O_RDWR) {
1434 flags |= BDRV_O_ALLOW_RDWR;
1436 if (flags & BDRV_O_SNAPSHOT) {
1437 snapshot_flags = bdrv_temp_snapshot_flags(flags);
1438 flags = bdrv_backing_flags(flags);
1441 assert(file == NULL);
1442 ret = bdrv_open_image(&file, filename, options, "file",
1443 bdrv_inherited_flags(flags),
1444 true, &local_err);
1445 if (ret < 0) {
1446 goto fail;
1449 /* Find the right image format driver */
1450 drvname = qdict_get_try_str(options, "driver");
1451 if (drvname) {
1452 drv = bdrv_find_format(drvname);
1453 qdict_del(options, "driver");
1454 if (!drv) {
1455 error_setg(errp, "Invalid driver: '%s'", drvname);
1456 ret = -EINVAL;
1457 goto fail;
1461 if (!drv) {
1462 if (file) {
1463 ret = find_image_format(file, filename, &drv, &local_err);
1464 } else {
1465 error_setg(errp, "Must specify either driver or file");
1466 ret = -EINVAL;
1467 goto fail;
1471 if (!drv) {
1472 goto fail;
1475 /* Open the image */
1476 ret = bdrv_open_common(bs, file, options, flags, drv, &local_err);
1477 if (ret < 0) {
1478 goto fail;
1481 if (file && (bs->file != file)) {
1482 bdrv_unref(file);
1483 file = NULL;
1486 /* If there is a backing file, use it */
1487 if ((flags & BDRV_O_NO_BACKING) == 0) {
1488 QDict *backing_options;
1490 qdict_extract_subqdict(options, &backing_options, "backing.");
1491 ret = bdrv_open_backing_file(bs, backing_options, &local_err);
1492 if (ret < 0) {
1493 goto close_and_fail;
1497 /* For snapshot=on, create a temporary qcow2 overlay. bs points to the
1498 * temporary snapshot afterwards. */
1499 if (snapshot_flags) {
1500 bdrv_append_temp_snapshot(bs, snapshot_flags, &local_err);
1501 if (local_err) {
1502 error_propagate(errp, local_err);
1503 goto close_and_fail;
1508 done:
1509 /* Check if any unknown options were used */
1510 if (options && (qdict_size(options) != 0)) {
1511 const QDictEntry *entry = qdict_first(options);
1512 if (flags & BDRV_O_PROTOCOL) {
1513 error_setg(errp, "Block protocol '%s' doesn't support the option "
1514 "'%s'", drv->format_name, entry->key);
1515 } else {
1516 error_setg(errp, "Block format '%s' used by device '%s' doesn't "
1517 "support the option '%s'", drv->format_name,
1518 bs->device_name, entry->key);
1521 ret = -EINVAL;
1522 goto close_and_fail;
1525 if (!bdrv_key_required(bs)) {
1526 bdrv_dev_change_media_cb(bs, true);
1527 } else if (!runstate_check(RUN_STATE_PRELAUNCH)
1528 && !runstate_check(RUN_STATE_INMIGRATE)
1529 && !runstate_check(RUN_STATE_PAUSED)) { /* HACK */
1530 error_setg(errp,
1531 "Guest must be stopped for opening of encrypted image");
1532 ret = -EBUSY;
1533 goto close_and_fail;
1536 QDECREF(options);
1537 *pbs = bs;
1538 return 0;
1540 fail:
1541 if (file != NULL) {
1542 bdrv_unref(file);
1544 QDECREF(bs->options);
1545 QDECREF(options);
1546 bs->options = NULL;
1547 if (!*pbs) {
1548 /* If *pbs is NULL, a new BDS has been created in this function and
1549 needs to be freed now. Otherwise, it does not need to be closed,
1550 since it has not really been opened yet. */
1551 bdrv_unref(bs);
1553 if (local_err) {
1554 error_propagate(errp, local_err);
1556 return ret;
1558 close_and_fail:
1559 /* See fail path, but now the BDS has to be always closed */
1560 if (*pbs) {
1561 bdrv_close(bs);
1562 } else {
1563 bdrv_unref(bs);
1565 QDECREF(options);
1566 if (local_err) {
1567 error_propagate(errp, local_err);
1569 return ret;
1572 typedef struct BlockReopenQueueEntry {
1573 bool prepared;
1574 BDRVReopenState state;
1575 QSIMPLEQ_ENTRY(BlockReopenQueueEntry) entry;
1576 } BlockReopenQueueEntry;
1579 * Adds a BlockDriverState to a simple queue for an atomic, transactional
1580 * reopen of multiple devices.
1582 * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT
1583 * already performed, or alternatively may be NULL a new BlockReopenQueue will
1584 * be created and initialized. This newly created BlockReopenQueue should be
1585 * passed back in for subsequent calls that are intended to be of the same
1586 * atomic 'set'.
1588 * bs is the BlockDriverState to add to the reopen queue.
1590 * flags contains the open flags for the associated bs
1592 * returns a pointer to bs_queue, which is either the newly allocated
1593 * bs_queue, or the existing bs_queue being used.
1596 BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
1597 BlockDriverState *bs, int flags)
1599 assert(bs != NULL);
1601 BlockReopenQueueEntry *bs_entry;
1602 if (bs_queue == NULL) {
1603 bs_queue = g_new0(BlockReopenQueue, 1);
1604 QSIMPLEQ_INIT(bs_queue);
1607 /* bdrv_open() masks this flag out */
1608 flags &= ~BDRV_O_PROTOCOL;
1610 if (bs->file) {
1611 bdrv_reopen_queue(bs_queue, bs->file, bdrv_inherited_flags(flags));
1614 bs_entry = g_new0(BlockReopenQueueEntry, 1);
1615 QSIMPLEQ_INSERT_TAIL(bs_queue, bs_entry, entry);
1617 bs_entry->state.bs = bs;
1618 bs_entry->state.flags = flags;
1620 return bs_queue;
1624 * Reopen multiple BlockDriverStates atomically & transactionally.
1626 * The queue passed in (bs_queue) must have been built up previous
1627 * via bdrv_reopen_queue().
1629 * Reopens all BDS specified in the queue, with the appropriate
1630 * flags. All devices are prepared for reopen, and failure of any
1631 * device will cause all device changes to be abandonded, and intermediate
1632 * data cleaned up.
1634 * If all devices prepare successfully, then the changes are committed
1635 * to all devices.
1638 int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
1640 int ret = -1;
1641 BlockReopenQueueEntry *bs_entry, *next;
1642 Error *local_err = NULL;
1644 assert(bs_queue != NULL);
1646 bdrv_drain_all();
1648 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1649 if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) {
1650 error_propagate(errp, local_err);
1651 goto cleanup;
1653 bs_entry->prepared = true;
1656 /* If we reach this point, we have success and just need to apply the
1657 * changes
1659 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1660 bdrv_reopen_commit(&bs_entry->state);
1663 ret = 0;
1665 cleanup:
1666 QSIMPLEQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) {
1667 if (ret && bs_entry->prepared) {
1668 bdrv_reopen_abort(&bs_entry->state);
1670 g_free(bs_entry);
1672 g_free(bs_queue);
1673 return ret;
1677 /* Reopen a single BlockDriverState with the specified flags. */
1678 int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp)
1680 int ret = -1;
1681 Error *local_err = NULL;
1682 BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, bdrv_flags);
1684 ret = bdrv_reopen_multiple(queue, &local_err);
1685 if (local_err != NULL) {
1686 error_propagate(errp, local_err);
1688 return ret;
1693 * Prepares a BlockDriverState for reopen. All changes are staged in the
1694 * 'opaque' field of the BDRVReopenState, which is used and allocated by
1695 * the block driver layer .bdrv_reopen_prepare()
1697 * bs is the BlockDriverState to reopen
1698 * flags are the new open flags
1699 * queue is the reopen queue
1701 * Returns 0 on success, non-zero on error. On error errp will be set
1702 * as well.
1704 * On failure, bdrv_reopen_abort() will be called to clean up any data.
1705 * It is the responsibility of the caller to then call the abort() or
1706 * commit() for any other BDS that have been left in a prepare() state
1709 int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue,
1710 Error **errp)
1712 int ret = -1;
1713 Error *local_err = NULL;
1714 BlockDriver *drv;
1716 assert(reopen_state != NULL);
1717 assert(reopen_state->bs->drv != NULL);
1718 drv = reopen_state->bs->drv;
1720 /* if we are to stay read-only, do not allow permission change
1721 * to r/w */
1722 if (!(reopen_state->bs->open_flags & BDRV_O_ALLOW_RDWR) &&
1723 reopen_state->flags & BDRV_O_RDWR) {
1724 error_set(errp, QERR_DEVICE_IS_READ_ONLY,
1725 reopen_state->bs->device_name);
1726 goto error;
1730 ret = bdrv_flush(reopen_state->bs);
1731 if (ret) {
1732 error_set(errp, ERROR_CLASS_GENERIC_ERROR, "Error (%s) flushing drive",
1733 strerror(-ret));
1734 goto error;
1737 if (drv->bdrv_reopen_prepare) {
1738 ret = drv->bdrv_reopen_prepare(reopen_state, queue, &local_err);
1739 if (ret) {
1740 if (local_err != NULL) {
1741 error_propagate(errp, local_err);
1742 } else {
1743 error_setg(errp, "failed while preparing to reopen image '%s'",
1744 reopen_state->bs->filename);
1746 goto error;
1748 } else {
1749 /* It is currently mandatory to have a bdrv_reopen_prepare()
1750 * handler for each supported drv. */
1751 error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
1752 drv->format_name, reopen_state->bs->device_name,
1753 "reopening of file");
1754 ret = -1;
1755 goto error;
1758 ret = 0;
1760 error:
1761 return ret;
1765 * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and
1766 * makes them final by swapping the staging BlockDriverState contents into
1767 * the active BlockDriverState contents.
1769 void bdrv_reopen_commit(BDRVReopenState *reopen_state)
1771 BlockDriver *drv;
1773 assert(reopen_state != NULL);
1774 drv = reopen_state->bs->drv;
1775 assert(drv != NULL);
1777 /* If there are any driver level actions to take */
1778 if (drv->bdrv_reopen_commit) {
1779 drv->bdrv_reopen_commit(reopen_state);
1782 /* set BDS specific flags now */
1783 reopen_state->bs->open_flags = reopen_state->flags;
1784 reopen_state->bs->enable_write_cache = !!(reopen_state->flags &
1785 BDRV_O_CACHE_WB);
1786 reopen_state->bs->read_only = !(reopen_state->flags & BDRV_O_RDWR);
1788 bdrv_refresh_limits(reopen_state->bs);
1792 * Abort the reopen, and delete and free the staged changes in
1793 * reopen_state
1795 void bdrv_reopen_abort(BDRVReopenState *reopen_state)
1797 BlockDriver *drv;
1799 assert(reopen_state != NULL);
1800 drv = reopen_state->bs->drv;
1801 assert(drv != NULL);
1803 if (drv->bdrv_reopen_abort) {
1804 drv->bdrv_reopen_abort(reopen_state);
1809 void bdrv_close(BlockDriverState *bs)
1811 if (bs->job) {
1812 block_job_cancel_sync(bs->job);
1814 bdrv_drain_all(); /* complete I/O */
1815 bdrv_flush(bs);
1816 bdrv_drain_all(); /* in case flush left pending I/O */
1817 notifier_list_notify(&bs->close_notifiers, bs);
1819 if (bs->drv) {
1820 if (bs->backing_hd) {
1821 BlockDriverState *backing_hd = bs->backing_hd;
1822 bdrv_set_backing_hd(bs, NULL);
1823 bdrv_unref(backing_hd);
1825 bs->drv->bdrv_close(bs);
1826 g_free(bs->opaque);
1827 bs->opaque = NULL;
1828 bs->drv = NULL;
1829 bs->copy_on_read = 0;
1830 bs->backing_file[0] = '\0';
1831 bs->backing_format[0] = '\0';
1832 bs->total_sectors = 0;
1833 bs->encrypted = 0;
1834 bs->valid_key = 0;
1835 bs->sg = 0;
1836 bs->growable = 0;
1837 bs->zero_beyond_eof = false;
1838 QDECREF(bs->options);
1839 bs->options = NULL;
1841 if (bs->file != NULL) {
1842 bdrv_unref(bs->file);
1843 bs->file = NULL;
1847 bdrv_dev_change_media_cb(bs, false);
1849 /*throttling disk I/O limits*/
1850 if (bs->io_limits_enabled) {
1851 bdrv_io_limits_disable(bs);
1855 void bdrv_close_all(void)
1857 BlockDriverState *bs;
1859 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
1860 AioContext *aio_context = bdrv_get_aio_context(bs);
1862 aio_context_acquire(aio_context);
1863 bdrv_close(bs);
1864 aio_context_release(aio_context);
1868 /* Check if any requests are in-flight (including throttled requests) */
1869 static bool bdrv_requests_pending(BlockDriverState *bs)
1871 if (!QLIST_EMPTY(&bs->tracked_requests)) {
1872 return true;
1874 if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) {
1875 return true;
1877 if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) {
1878 return true;
1880 if (bs->file && bdrv_requests_pending(bs->file)) {
1881 return true;
1883 if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) {
1884 return true;
1886 return false;
1890 * Wait for pending requests to complete across all BlockDriverStates
1892 * This function does not flush data to disk, use bdrv_flush_all() for that
1893 * after calling this function.
1895 * Note that completion of an asynchronous I/O operation can trigger any
1896 * number of other I/O operations on other devices---for example a coroutine
1897 * can be arbitrarily complex and a constant flow of I/O can come until the
1898 * coroutine is complete. Because of this, it is not possible to have a
1899 * function to drain a single device's I/O queue.
1901 void bdrv_drain_all(void)
1903 /* Always run first iteration so any pending completion BHs run */
1904 bool busy = true;
1905 BlockDriverState *bs;
1907 while (busy) {
1908 busy = false;
1910 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
1911 AioContext *aio_context = bdrv_get_aio_context(bs);
1912 bool bs_busy;
1914 aio_context_acquire(aio_context);
1915 bdrv_start_throttled_reqs(bs);
1916 bs_busy = bdrv_requests_pending(bs);
1917 bs_busy |= aio_poll(aio_context, bs_busy);
1918 aio_context_release(aio_context);
1920 busy |= bs_busy;
1925 /* make a BlockDriverState anonymous by removing from bdrv_state and
1926 * graph_bdrv_state list.
1927 Also, NULL terminate the device_name to prevent double remove */
1928 void bdrv_make_anon(BlockDriverState *bs)
1930 if (bs->device_name[0] != '\0') {
1931 QTAILQ_REMOVE(&bdrv_states, bs, device_list);
1933 bs->device_name[0] = '\0';
1934 if (bs->node_name[0] != '\0') {
1935 QTAILQ_REMOVE(&graph_bdrv_states, bs, node_list);
1937 bs->node_name[0] = '\0';
1940 static void bdrv_rebind(BlockDriverState *bs)
1942 if (bs->drv && bs->drv->bdrv_rebind) {
1943 bs->drv->bdrv_rebind(bs);
1947 static void bdrv_move_feature_fields(BlockDriverState *bs_dest,
1948 BlockDriverState *bs_src)
1950 /* move some fields that need to stay attached to the device */
1952 /* dev info */
1953 bs_dest->dev_ops = bs_src->dev_ops;
1954 bs_dest->dev_opaque = bs_src->dev_opaque;
1955 bs_dest->dev = bs_src->dev;
1956 bs_dest->guest_block_size = bs_src->guest_block_size;
1957 bs_dest->copy_on_read = bs_src->copy_on_read;
1959 bs_dest->enable_write_cache = bs_src->enable_write_cache;
1961 /* i/o throttled req */
1962 memcpy(&bs_dest->throttle_state,
1963 &bs_src->throttle_state,
1964 sizeof(ThrottleState));
1965 bs_dest->throttled_reqs[0] = bs_src->throttled_reqs[0];
1966 bs_dest->throttled_reqs[1] = bs_src->throttled_reqs[1];
1967 bs_dest->io_limits_enabled = bs_src->io_limits_enabled;
1969 /* r/w error */
1970 bs_dest->on_read_error = bs_src->on_read_error;
1971 bs_dest->on_write_error = bs_src->on_write_error;
1973 /* i/o status */
1974 bs_dest->iostatus_enabled = bs_src->iostatus_enabled;
1975 bs_dest->iostatus = bs_src->iostatus;
1977 /* dirty bitmap */
1978 bs_dest->dirty_bitmaps = bs_src->dirty_bitmaps;
1980 /* reference count */
1981 bs_dest->refcnt = bs_src->refcnt;
1983 /* job */
1984 bs_dest->job = bs_src->job;
1986 /* keep the same entry in bdrv_states */
1987 pstrcpy(bs_dest->device_name, sizeof(bs_dest->device_name),
1988 bs_src->device_name);
1989 bs_dest->device_list = bs_src->device_list;
1990 memcpy(bs_dest->op_blockers, bs_src->op_blockers,
1991 sizeof(bs_dest->op_blockers));
1995 * Swap bs contents for two image chains while they are live,
1996 * while keeping required fields on the BlockDriverState that is
1997 * actually attached to a device.
1999 * This will modify the BlockDriverState fields, and swap contents
2000 * between bs_new and bs_old. Both bs_new and bs_old are modified.
2002 * bs_new is required to be anonymous.
2004 * This function does not create any image files.
2006 void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old)
2008 BlockDriverState tmp;
2010 /* The code needs to swap the node_name but simply swapping node_list won't
2011 * work so first remove the nodes from the graph list, do the swap then
2012 * insert them back if needed.
2014 if (bs_new->node_name[0] != '\0') {
2015 QTAILQ_REMOVE(&graph_bdrv_states, bs_new, node_list);
2017 if (bs_old->node_name[0] != '\0') {
2018 QTAILQ_REMOVE(&graph_bdrv_states, bs_old, node_list);
2021 /* bs_new must be anonymous and shouldn't have anything fancy enabled */
2022 assert(bs_new->device_name[0] == '\0');
2023 assert(QLIST_EMPTY(&bs_new->dirty_bitmaps));
2024 assert(bs_new->job == NULL);
2025 assert(bs_new->dev == NULL);
2026 assert(bs_new->io_limits_enabled == false);
2027 assert(!throttle_have_timer(&bs_new->throttle_state));
2029 tmp = *bs_new;
2030 *bs_new = *bs_old;
2031 *bs_old = tmp;
2033 /* there are some fields that should not be swapped, move them back */
2034 bdrv_move_feature_fields(&tmp, bs_old);
2035 bdrv_move_feature_fields(bs_old, bs_new);
2036 bdrv_move_feature_fields(bs_new, &tmp);
2038 /* bs_new shouldn't be in bdrv_states even after the swap! */
2039 assert(bs_new->device_name[0] == '\0');
2041 /* Check a few fields that should remain attached to the device */
2042 assert(bs_new->dev == NULL);
2043 assert(bs_new->job == NULL);
2044 assert(bs_new->io_limits_enabled == false);
2045 assert(!throttle_have_timer(&bs_new->throttle_state));
2047 /* insert the nodes back into the graph node list if needed */
2048 if (bs_new->node_name[0] != '\0') {
2049 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_new, node_list);
2051 if (bs_old->node_name[0] != '\0') {
2052 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_old, node_list);
2055 bdrv_rebind(bs_new);
2056 bdrv_rebind(bs_old);
2060 * Add new bs contents at the top of an image chain while the chain is
2061 * live, while keeping required fields on the top layer.
2063 * This will modify the BlockDriverState fields, and swap contents
2064 * between bs_new and bs_top. Both bs_new and bs_top are modified.
2066 * bs_new is required to be anonymous.
2068 * This function does not create any image files.
2070 void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top)
2072 bdrv_swap(bs_new, bs_top);
2074 /* The contents of 'tmp' will become bs_top, as we are
2075 * swapping bs_new and bs_top contents. */
2076 bdrv_set_backing_hd(bs_top, bs_new);
2079 static void bdrv_delete(BlockDriverState *bs)
2081 assert(!bs->dev);
2082 assert(!bs->job);
2083 assert(bdrv_op_blocker_is_empty(bs));
2084 assert(!bs->refcnt);
2085 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
2087 bdrv_close(bs);
2089 /* remove from list, if necessary */
2090 bdrv_make_anon(bs);
2092 g_free(bs);
2095 int bdrv_attach_dev(BlockDriverState *bs, void *dev)
2096 /* TODO change to DeviceState *dev when all users are qdevified */
2098 if (bs->dev) {
2099 return -EBUSY;
2101 bs->dev = dev;
2102 bdrv_iostatus_reset(bs);
2103 return 0;
2106 /* TODO qdevified devices don't use this, remove when devices are qdevified */
2107 void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev)
2109 if (bdrv_attach_dev(bs, dev) < 0) {
2110 abort();
2114 void bdrv_detach_dev(BlockDriverState *bs, void *dev)
2115 /* TODO change to DeviceState *dev when all users are qdevified */
2117 assert(bs->dev == dev);
2118 bs->dev = NULL;
2119 bs->dev_ops = NULL;
2120 bs->dev_opaque = NULL;
2121 bs->guest_block_size = 512;
2124 /* TODO change to return DeviceState * when all users are qdevified */
2125 void *bdrv_get_attached_dev(BlockDriverState *bs)
2127 return bs->dev;
2130 void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops,
2131 void *opaque)
2133 bs->dev_ops = ops;
2134 bs->dev_opaque = opaque;
2137 void bdrv_emit_qmp_error_event(const BlockDriverState *bdrv,
2138 enum MonitorEvent ev,
2139 BlockErrorAction action, bool is_read)
2141 QObject *data;
2142 const char *action_str;
2144 switch (action) {
2145 case BDRV_ACTION_REPORT:
2146 action_str = "report";
2147 break;
2148 case BDRV_ACTION_IGNORE:
2149 action_str = "ignore";
2150 break;
2151 case BDRV_ACTION_STOP:
2152 action_str = "stop";
2153 break;
2154 default:
2155 abort();
2158 data = qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
2159 bdrv->device_name,
2160 action_str,
2161 is_read ? "read" : "write");
2162 monitor_protocol_event(ev, data);
2164 qobject_decref(data);
2167 static void bdrv_emit_qmp_eject_event(BlockDriverState *bs, bool ejected)
2169 QObject *data;
2171 data = qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }",
2172 bdrv_get_device_name(bs), ejected);
2173 monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED, data);
2175 qobject_decref(data);
2178 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load)
2180 if (bs->dev_ops && bs->dev_ops->change_media_cb) {
2181 bool tray_was_closed = !bdrv_dev_is_tray_open(bs);
2182 bs->dev_ops->change_media_cb(bs->dev_opaque, load);
2183 if (tray_was_closed) {
2184 /* tray open */
2185 bdrv_emit_qmp_eject_event(bs, true);
2187 if (load) {
2188 /* tray close */
2189 bdrv_emit_qmp_eject_event(bs, false);
2194 bool bdrv_dev_has_removable_media(BlockDriverState *bs)
2196 return !bs->dev || (bs->dev_ops && bs->dev_ops->change_media_cb);
2199 void bdrv_dev_eject_request(BlockDriverState *bs, bool force)
2201 if (bs->dev_ops && bs->dev_ops->eject_request_cb) {
2202 bs->dev_ops->eject_request_cb(bs->dev_opaque, force);
2206 bool bdrv_dev_is_tray_open(BlockDriverState *bs)
2208 if (bs->dev_ops && bs->dev_ops->is_tray_open) {
2209 return bs->dev_ops->is_tray_open(bs->dev_opaque);
2211 return false;
2214 static void bdrv_dev_resize_cb(BlockDriverState *bs)
2216 if (bs->dev_ops && bs->dev_ops->resize_cb) {
2217 bs->dev_ops->resize_cb(bs->dev_opaque);
2221 bool bdrv_dev_is_medium_locked(BlockDriverState *bs)
2223 if (bs->dev_ops && bs->dev_ops->is_medium_locked) {
2224 return bs->dev_ops->is_medium_locked(bs->dev_opaque);
2226 return false;
2230 * Run consistency checks on an image
2232 * Returns 0 if the check could be completed (it doesn't mean that the image is
2233 * free of errors) or -errno when an internal error occurred. The results of the
2234 * check are stored in res.
2236 int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix)
2238 if (bs->drv->bdrv_check == NULL) {
2239 return -ENOTSUP;
2242 memset(res, 0, sizeof(*res));
2243 return bs->drv->bdrv_check(bs, res, fix);
2246 #define COMMIT_BUF_SECTORS 2048
2248 /* commit COW file into the raw image */
2249 int bdrv_commit(BlockDriverState *bs)
2251 BlockDriver *drv = bs->drv;
2252 int64_t sector, total_sectors, length, backing_length;
2253 int n, ro, open_flags;
2254 int ret = 0;
2255 uint8_t *buf = NULL;
2256 char filename[PATH_MAX];
2258 if (!drv)
2259 return -ENOMEDIUM;
2261 if (!bs->backing_hd) {
2262 return -ENOTSUP;
2265 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT, NULL) ||
2266 bdrv_op_is_blocked(bs->backing_hd, BLOCK_OP_TYPE_COMMIT, NULL)) {
2267 return -EBUSY;
2270 ro = bs->backing_hd->read_only;
2271 /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */
2272 pstrcpy(filename, sizeof(filename), bs->backing_hd->filename);
2273 open_flags = bs->backing_hd->open_flags;
2275 if (ro) {
2276 if (bdrv_reopen(bs->backing_hd, open_flags | BDRV_O_RDWR, NULL)) {
2277 return -EACCES;
2281 length = bdrv_getlength(bs);
2282 if (length < 0) {
2283 ret = length;
2284 goto ro_cleanup;
2287 backing_length = bdrv_getlength(bs->backing_hd);
2288 if (backing_length < 0) {
2289 ret = backing_length;
2290 goto ro_cleanup;
2293 /* If our top snapshot is larger than the backing file image,
2294 * grow the backing file image if possible. If not possible,
2295 * we must return an error */
2296 if (length > backing_length) {
2297 ret = bdrv_truncate(bs->backing_hd, length);
2298 if (ret < 0) {
2299 goto ro_cleanup;
2303 total_sectors = length >> BDRV_SECTOR_BITS;
2304 buf = g_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
2306 for (sector = 0; sector < total_sectors; sector += n) {
2307 ret = bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n);
2308 if (ret < 0) {
2309 goto ro_cleanup;
2311 if (ret) {
2312 ret = bdrv_read(bs, sector, buf, n);
2313 if (ret < 0) {
2314 goto ro_cleanup;
2317 ret = bdrv_write(bs->backing_hd, sector, buf, n);
2318 if (ret < 0) {
2319 goto ro_cleanup;
2324 if (drv->bdrv_make_empty) {
2325 ret = drv->bdrv_make_empty(bs);
2326 if (ret < 0) {
2327 goto ro_cleanup;
2329 bdrv_flush(bs);
2333 * Make sure all data we wrote to the backing device is actually
2334 * stable on disk.
2336 if (bs->backing_hd) {
2337 bdrv_flush(bs->backing_hd);
2340 ret = 0;
2341 ro_cleanup:
2342 g_free(buf);
2344 if (ro) {
2345 /* ignoring error return here */
2346 bdrv_reopen(bs->backing_hd, open_flags & ~BDRV_O_RDWR, NULL);
2349 return ret;
2352 int bdrv_commit_all(void)
2354 BlockDriverState *bs;
2356 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
2357 AioContext *aio_context = bdrv_get_aio_context(bs);
2359 aio_context_acquire(aio_context);
2360 if (bs->drv && bs->backing_hd) {
2361 int ret = bdrv_commit(bs);
2362 if (ret < 0) {
2363 aio_context_release(aio_context);
2364 return ret;
2367 aio_context_release(aio_context);
2369 return 0;
2373 * Remove an active request from the tracked requests list
2375 * This function should be called when a tracked request is completing.
2377 static void tracked_request_end(BdrvTrackedRequest *req)
2379 if (req->serialising) {
2380 req->bs->serialising_in_flight--;
2383 QLIST_REMOVE(req, list);
2384 qemu_co_queue_restart_all(&req->wait_queue);
2388 * Add an active request to the tracked requests list
2390 static void tracked_request_begin(BdrvTrackedRequest *req,
2391 BlockDriverState *bs,
2392 int64_t offset,
2393 unsigned int bytes, bool is_write)
2395 *req = (BdrvTrackedRequest){
2396 .bs = bs,
2397 .offset = offset,
2398 .bytes = bytes,
2399 .is_write = is_write,
2400 .co = qemu_coroutine_self(),
2401 .serialising = false,
2402 .overlap_offset = offset,
2403 .overlap_bytes = bytes,
2406 qemu_co_queue_init(&req->wait_queue);
2408 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
2411 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
2413 int64_t overlap_offset = req->offset & ~(align - 1);
2414 unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
2415 - overlap_offset;
2417 if (!req->serialising) {
2418 req->bs->serialising_in_flight++;
2419 req->serialising = true;
2422 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
2423 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
2427 * Round a region to cluster boundaries
2429 void bdrv_round_to_clusters(BlockDriverState *bs,
2430 int64_t sector_num, int nb_sectors,
2431 int64_t *cluster_sector_num,
2432 int *cluster_nb_sectors)
2434 BlockDriverInfo bdi;
2436 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
2437 *cluster_sector_num = sector_num;
2438 *cluster_nb_sectors = nb_sectors;
2439 } else {
2440 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
2441 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
2442 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
2443 nb_sectors, c);
2447 static int bdrv_get_cluster_size(BlockDriverState *bs)
2449 BlockDriverInfo bdi;
2450 int ret;
2452 ret = bdrv_get_info(bs, &bdi);
2453 if (ret < 0 || bdi.cluster_size == 0) {
2454 return bs->request_alignment;
2455 } else {
2456 return bdi.cluster_size;
2460 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
2461 int64_t offset, unsigned int bytes)
2463 /* aaaa bbbb */
2464 if (offset >= req->overlap_offset + req->overlap_bytes) {
2465 return false;
2467 /* bbbb aaaa */
2468 if (req->overlap_offset >= offset + bytes) {
2469 return false;
2471 return true;
2474 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
2476 BlockDriverState *bs = self->bs;
2477 BdrvTrackedRequest *req;
2478 bool retry;
2479 bool waited = false;
2481 if (!bs->serialising_in_flight) {
2482 return false;
2485 do {
2486 retry = false;
2487 QLIST_FOREACH(req, &bs->tracked_requests, list) {
2488 if (req == self || (!req->serialising && !self->serialising)) {
2489 continue;
2491 if (tracked_request_overlaps(req, self->overlap_offset,
2492 self->overlap_bytes))
2494 /* Hitting this means there was a reentrant request, for
2495 * example, a block driver issuing nested requests. This must
2496 * never happen since it means deadlock.
2498 assert(qemu_coroutine_self() != req->co);
2500 /* If the request is already (indirectly) waiting for us, or
2501 * will wait for us as soon as it wakes up, then just go on
2502 * (instead of producing a deadlock in the former case). */
2503 if (!req->waiting_for) {
2504 self->waiting_for = req;
2505 qemu_co_queue_wait(&req->wait_queue);
2506 self->waiting_for = NULL;
2507 retry = true;
2508 waited = true;
2509 break;
2513 } while (retry);
2515 return waited;
2519 * Return values:
2520 * 0 - success
2521 * -EINVAL - backing format specified, but no file
2522 * -ENOSPC - can't update the backing file because no space is left in the
2523 * image file header
2524 * -ENOTSUP - format driver doesn't support changing the backing file
2526 int bdrv_change_backing_file(BlockDriverState *bs,
2527 const char *backing_file, const char *backing_fmt)
2529 BlockDriver *drv = bs->drv;
2530 int ret;
2532 /* Backing file format doesn't make sense without a backing file */
2533 if (backing_fmt && !backing_file) {
2534 return -EINVAL;
2537 if (drv->bdrv_change_backing_file != NULL) {
2538 ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
2539 } else {
2540 ret = -ENOTSUP;
2543 if (ret == 0) {
2544 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
2545 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
2547 return ret;
2551 * Finds the image layer in the chain that has 'bs' as its backing file.
2553 * active is the current topmost image.
2555 * Returns NULL if bs is not found in active's image chain,
2556 * or if active == bs.
2558 BlockDriverState *bdrv_find_overlay(BlockDriverState *active,
2559 BlockDriverState *bs)
2561 BlockDriverState *overlay = NULL;
2562 BlockDriverState *intermediate;
2564 assert(active != NULL);
2565 assert(bs != NULL);
2567 /* if bs is the same as active, then by definition it has no overlay
2569 if (active == bs) {
2570 return NULL;
2573 intermediate = active;
2574 while (intermediate->backing_hd) {
2575 if (intermediate->backing_hd == bs) {
2576 overlay = intermediate;
2577 break;
2579 intermediate = intermediate->backing_hd;
2582 return overlay;
2585 typedef struct BlkIntermediateStates {
2586 BlockDriverState *bs;
2587 QSIMPLEQ_ENTRY(BlkIntermediateStates) entry;
2588 } BlkIntermediateStates;
2592 * Drops images above 'base' up to and including 'top', and sets the image
2593 * above 'top' to have base as its backing file.
2595 * Requires that the overlay to 'top' is opened r/w, so that the backing file
2596 * information in 'bs' can be properly updated.
2598 * E.g., this will convert the following chain:
2599 * bottom <- base <- intermediate <- top <- active
2601 * to
2603 * bottom <- base <- active
2605 * It is allowed for bottom==base, in which case it converts:
2607 * base <- intermediate <- top <- active
2609 * to
2611 * base <- active
2613 * Error conditions:
2614 * if active == top, that is considered an error
2617 int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top,
2618 BlockDriverState *base)
2620 BlockDriverState *intermediate;
2621 BlockDriverState *base_bs = NULL;
2622 BlockDriverState *new_top_bs = NULL;
2623 BlkIntermediateStates *intermediate_state, *next;
2624 int ret = -EIO;
2626 QSIMPLEQ_HEAD(states_to_delete, BlkIntermediateStates) states_to_delete;
2627 QSIMPLEQ_INIT(&states_to_delete);
2629 if (!top->drv || !base->drv) {
2630 goto exit;
2633 new_top_bs = bdrv_find_overlay(active, top);
2635 if (new_top_bs == NULL) {
2636 /* we could not find the image above 'top', this is an error */
2637 goto exit;
2640 /* special case of new_top_bs->backing_hd already pointing to base - nothing
2641 * to do, no intermediate images */
2642 if (new_top_bs->backing_hd == base) {
2643 ret = 0;
2644 goto exit;
2647 intermediate = top;
2649 /* now we will go down through the list, and add each BDS we find
2650 * into our deletion queue, until we hit the 'base'
2652 while (intermediate) {
2653 intermediate_state = g_malloc0(sizeof(BlkIntermediateStates));
2654 intermediate_state->bs = intermediate;
2655 QSIMPLEQ_INSERT_TAIL(&states_to_delete, intermediate_state, entry);
2657 if (intermediate->backing_hd == base) {
2658 base_bs = intermediate->backing_hd;
2659 break;
2661 intermediate = intermediate->backing_hd;
2663 if (base_bs == NULL) {
2664 /* something went wrong, we did not end at the base. safely
2665 * unravel everything, and exit with error */
2666 goto exit;
2669 /* success - we can delete the intermediate states, and link top->base */
2670 ret = bdrv_change_backing_file(new_top_bs, base_bs->filename,
2671 base_bs->drv ? base_bs->drv->format_name : "");
2672 if (ret) {
2673 goto exit;
2675 bdrv_set_backing_hd(new_top_bs, base_bs);
2677 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2678 /* so that bdrv_close() does not recursively close the chain */
2679 bdrv_set_backing_hd(intermediate_state->bs, NULL);
2680 bdrv_unref(intermediate_state->bs);
2682 ret = 0;
2684 exit:
2685 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2686 g_free(intermediate_state);
2688 return ret;
2692 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
2693 size_t size)
2695 int64_t len;
2697 if (size > INT_MAX) {
2698 return -EIO;
2701 if (!bdrv_is_inserted(bs))
2702 return -ENOMEDIUM;
2704 if (bs->growable)
2705 return 0;
2707 len = bdrv_getlength(bs);
2709 if (offset < 0)
2710 return -EIO;
2712 if ((offset > len) || (len - offset < size))
2713 return -EIO;
2715 return 0;
2718 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
2719 int nb_sectors)
2721 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
2722 return -EIO;
2725 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
2726 nb_sectors * BDRV_SECTOR_SIZE);
2729 typedef struct RwCo {
2730 BlockDriverState *bs;
2731 int64_t offset;
2732 QEMUIOVector *qiov;
2733 bool is_write;
2734 int ret;
2735 BdrvRequestFlags flags;
2736 } RwCo;
2738 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
2740 RwCo *rwco = opaque;
2742 if (!rwco->is_write) {
2743 rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset,
2744 rwco->qiov->size, rwco->qiov,
2745 rwco->flags);
2746 } else {
2747 rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset,
2748 rwco->qiov->size, rwco->qiov,
2749 rwco->flags);
2754 * Process a vectored synchronous request using coroutines
2756 static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset,
2757 QEMUIOVector *qiov, bool is_write,
2758 BdrvRequestFlags flags)
2760 Coroutine *co;
2761 RwCo rwco = {
2762 .bs = bs,
2763 .offset = offset,
2764 .qiov = qiov,
2765 .is_write = is_write,
2766 .ret = NOT_DONE,
2767 .flags = flags,
2771 * In sync call context, when the vcpu is blocked, this throttling timer
2772 * will not fire; so the I/O throttling function has to be disabled here
2773 * if it has been enabled.
2775 if (bs->io_limits_enabled) {
2776 fprintf(stderr, "Disabling I/O throttling on '%s' due "
2777 "to synchronous I/O.\n", bdrv_get_device_name(bs));
2778 bdrv_io_limits_disable(bs);
2781 if (qemu_in_coroutine()) {
2782 /* Fast-path if already in coroutine context */
2783 bdrv_rw_co_entry(&rwco);
2784 } else {
2785 AioContext *aio_context = bdrv_get_aio_context(bs);
2787 co = qemu_coroutine_create(bdrv_rw_co_entry);
2788 qemu_coroutine_enter(co, &rwco);
2789 while (rwco.ret == NOT_DONE) {
2790 aio_poll(aio_context, true);
2793 return rwco.ret;
2797 * Process a synchronous request using coroutines
2799 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
2800 int nb_sectors, bool is_write, BdrvRequestFlags flags)
2802 QEMUIOVector qiov;
2803 struct iovec iov = {
2804 .iov_base = (void *)buf,
2805 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
2808 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
2809 return -EINVAL;
2812 qemu_iovec_init_external(&qiov, &iov, 1);
2813 return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS,
2814 &qiov, is_write, flags);
2817 /* return < 0 if error. See bdrv_write() for the return codes */
2818 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
2819 uint8_t *buf, int nb_sectors)
2821 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0);
2824 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
2825 int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
2826 uint8_t *buf, int nb_sectors)
2828 bool enabled;
2829 int ret;
2831 enabled = bs->io_limits_enabled;
2832 bs->io_limits_enabled = false;
2833 ret = bdrv_read(bs, sector_num, buf, nb_sectors);
2834 bs->io_limits_enabled = enabled;
2835 return ret;
2838 /* Return < 0 if error. Important errors are:
2839 -EIO generic I/O error (may happen for all errors)
2840 -ENOMEDIUM No media inserted.
2841 -EINVAL Invalid sector number or nb_sectors
2842 -EACCES Trying to write a read-only device
2844 int bdrv_write(BlockDriverState *bs, int64_t sector_num,
2845 const uint8_t *buf, int nb_sectors)
2847 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
2850 int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
2851 int nb_sectors, BdrvRequestFlags flags)
2853 return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true,
2854 BDRV_REQ_ZERO_WRITE | flags);
2858 * Completely zero out a block device with the help of bdrv_write_zeroes.
2859 * The operation is sped up by checking the block status and only writing
2860 * zeroes to the device if they currently do not return zeroes. Optional
2861 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
2863 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
2865 int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
2867 int64_t target_size;
2868 int64_t ret, nb_sectors, sector_num = 0;
2869 int n;
2871 target_size = bdrv_getlength(bs);
2872 if (target_size < 0) {
2873 return target_size;
2875 target_size /= BDRV_SECTOR_SIZE;
2877 for (;;) {
2878 nb_sectors = target_size - sector_num;
2879 if (nb_sectors <= 0) {
2880 return 0;
2882 if (nb_sectors > INT_MAX) {
2883 nb_sectors = INT_MAX;
2885 ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n);
2886 if (ret < 0) {
2887 error_report("error getting block status at sector %" PRId64 ": %s",
2888 sector_num, strerror(-ret));
2889 return ret;
2891 if (ret & BDRV_BLOCK_ZERO) {
2892 sector_num += n;
2893 continue;
2895 ret = bdrv_write_zeroes(bs, sector_num, n, flags);
2896 if (ret < 0) {
2897 error_report("error writing zeroes at sector %" PRId64 ": %s",
2898 sector_num, strerror(-ret));
2899 return ret;
2901 sector_num += n;
2905 int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes)
2907 QEMUIOVector qiov;
2908 struct iovec iov = {
2909 .iov_base = (void *)buf,
2910 .iov_len = bytes,
2912 int ret;
2914 if (bytes < 0) {
2915 return -EINVAL;
2918 qemu_iovec_init_external(&qiov, &iov, 1);
2919 ret = bdrv_prwv_co(bs, offset, &qiov, false, 0);
2920 if (ret < 0) {
2921 return ret;
2924 return bytes;
2927 int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov)
2929 int ret;
2931 ret = bdrv_prwv_co(bs, offset, qiov, true, 0);
2932 if (ret < 0) {
2933 return ret;
2936 return qiov->size;
2939 int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
2940 const void *buf, int bytes)
2942 QEMUIOVector qiov;
2943 struct iovec iov = {
2944 .iov_base = (void *) buf,
2945 .iov_len = bytes,
2948 if (bytes < 0) {
2949 return -EINVAL;
2952 qemu_iovec_init_external(&qiov, &iov, 1);
2953 return bdrv_pwritev(bs, offset, &qiov);
2957 * Writes to the file and ensures that no writes are reordered across this
2958 * request (acts as a barrier)
2960 * Returns 0 on success, -errno in error cases.
2962 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
2963 const void *buf, int count)
2965 int ret;
2967 ret = bdrv_pwrite(bs, offset, buf, count);
2968 if (ret < 0) {
2969 return ret;
2972 /* No flush needed for cache modes that already do it */
2973 if (bs->enable_write_cache) {
2974 bdrv_flush(bs);
2977 return 0;
2980 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
2981 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
2983 /* Perform I/O through a temporary buffer so that users who scribble over
2984 * their read buffer while the operation is in progress do not end up
2985 * modifying the image file. This is critical for zero-copy guest I/O
2986 * where anything might happen inside guest memory.
2988 void *bounce_buffer;
2990 BlockDriver *drv = bs->drv;
2991 struct iovec iov;
2992 QEMUIOVector bounce_qiov;
2993 int64_t cluster_sector_num;
2994 int cluster_nb_sectors;
2995 size_t skip_bytes;
2996 int ret;
2998 /* Cover entire cluster so no additional backing file I/O is required when
2999 * allocating cluster in the image file.
3001 bdrv_round_to_clusters(bs, sector_num, nb_sectors,
3002 &cluster_sector_num, &cluster_nb_sectors);
3004 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
3005 cluster_sector_num, cluster_nb_sectors);
3007 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
3008 iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
3009 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
3011 ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
3012 &bounce_qiov);
3013 if (ret < 0) {
3014 goto err;
3017 if (drv->bdrv_co_write_zeroes &&
3018 buffer_is_zero(bounce_buffer, iov.iov_len)) {
3019 ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
3020 cluster_nb_sectors, 0);
3021 } else {
3022 /* This does not change the data on the disk, it is not necessary
3023 * to flush even in cache=writethrough mode.
3025 ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
3026 &bounce_qiov);
3029 if (ret < 0) {
3030 /* It might be okay to ignore write errors for guest requests. If this
3031 * is a deliberate copy-on-read then we don't want to ignore the error.
3032 * Simply report it in all cases.
3034 goto err;
3037 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
3038 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
3039 nb_sectors * BDRV_SECTOR_SIZE);
3041 err:
3042 qemu_vfree(bounce_buffer);
3043 return ret;
3047 * Forwards an already correctly aligned request to the BlockDriver. This
3048 * handles copy on read and zeroing after EOF; any other features must be
3049 * implemented by the caller.
3051 static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
3052 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
3053 int64_t align, QEMUIOVector *qiov, int flags)
3055 BlockDriver *drv = bs->drv;
3056 int ret;
3058 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
3059 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
3061 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
3062 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
3064 /* Handle Copy on Read and associated serialisation */
3065 if (flags & BDRV_REQ_COPY_ON_READ) {
3066 /* If we touch the same cluster it counts as an overlap. This
3067 * guarantees that allocating writes will be serialized and not race
3068 * with each other for the same cluster. For example, in copy-on-read
3069 * it ensures that the CoR read and write operations are atomic and
3070 * guest writes cannot interleave between them. */
3071 mark_request_serialising(req, bdrv_get_cluster_size(bs));
3074 wait_serialising_requests(req);
3076 if (flags & BDRV_REQ_COPY_ON_READ) {
3077 int pnum;
3079 ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum);
3080 if (ret < 0) {
3081 goto out;
3084 if (!ret || pnum != nb_sectors) {
3085 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
3086 goto out;
3090 /* Forward the request to the BlockDriver */
3091 if (!(bs->zero_beyond_eof && bs->growable)) {
3092 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
3093 } else {
3094 /* Read zeros after EOF of growable BDSes */
3095 int64_t len, total_sectors, max_nb_sectors;
3097 len = bdrv_getlength(bs);
3098 if (len < 0) {
3099 ret = len;
3100 goto out;
3103 total_sectors = DIV_ROUND_UP(len, BDRV_SECTOR_SIZE);
3104 max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num),
3105 align >> BDRV_SECTOR_BITS);
3106 if (max_nb_sectors > 0) {
3107 ret = drv->bdrv_co_readv(bs, sector_num,
3108 MIN(nb_sectors, max_nb_sectors), qiov);
3109 } else {
3110 ret = 0;
3113 /* Reading beyond end of file is supposed to produce zeroes */
3114 if (ret == 0 && total_sectors < sector_num + nb_sectors) {
3115 uint64_t offset = MAX(0, total_sectors - sector_num);
3116 uint64_t bytes = (sector_num + nb_sectors - offset) *
3117 BDRV_SECTOR_SIZE;
3118 qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes);
3122 out:
3123 return ret;
3127 * Handle a read request in coroutine context
3129 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
3130 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
3131 BdrvRequestFlags flags)
3133 BlockDriver *drv = bs->drv;
3134 BdrvTrackedRequest req;
3136 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3137 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
3138 uint8_t *head_buf = NULL;
3139 uint8_t *tail_buf = NULL;
3140 QEMUIOVector local_qiov;
3141 bool use_local_qiov = false;
3142 int ret;
3144 if (!drv) {
3145 return -ENOMEDIUM;
3147 if (bdrv_check_byte_request(bs, offset, bytes)) {
3148 return -EIO;
3151 if (bs->copy_on_read) {
3152 flags |= BDRV_REQ_COPY_ON_READ;
3155 /* throttling disk I/O */
3156 if (bs->io_limits_enabled) {
3157 bdrv_io_limits_intercept(bs, bytes, false);
3160 /* Align read if necessary by padding qiov */
3161 if (offset & (align - 1)) {
3162 head_buf = qemu_blockalign(bs, align);
3163 qemu_iovec_init(&local_qiov, qiov->niov + 2);
3164 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3165 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3166 use_local_qiov = true;
3168 bytes += offset & (align - 1);
3169 offset = offset & ~(align - 1);
3172 if ((offset + bytes) & (align - 1)) {
3173 if (!use_local_qiov) {
3174 qemu_iovec_init(&local_qiov, qiov->niov + 1);
3175 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3176 use_local_qiov = true;
3178 tail_buf = qemu_blockalign(bs, align);
3179 qemu_iovec_add(&local_qiov, tail_buf,
3180 align - ((offset + bytes) & (align - 1)));
3182 bytes = ROUND_UP(bytes, align);
3185 tracked_request_begin(&req, bs, offset, bytes, false);
3186 ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align,
3187 use_local_qiov ? &local_qiov : qiov,
3188 flags);
3189 tracked_request_end(&req);
3191 if (use_local_qiov) {
3192 qemu_iovec_destroy(&local_qiov);
3193 qemu_vfree(head_buf);
3194 qemu_vfree(tail_buf);
3197 return ret;
3200 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
3201 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3202 BdrvRequestFlags flags)
3204 if (nb_sectors < 0 || nb_sectors > (UINT_MAX >> BDRV_SECTOR_BITS)) {
3205 return -EINVAL;
3208 return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS,
3209 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3212 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
3213 int nb_sectors, QEMUIOVector *qiov)
3215 trace_bdrv_co_readv(bs, sector_num, nb_sectors);
3217 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
3220 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
3221 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
3223 trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
3225 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
3226 BDRV_REQ_COPY_ON_READ);
3229 /* if no limit is specified in the BlockLimits use a default
3230 * of 32768 512-byte sectors (16 MiB) per request.
3232 #define MAX_WRITE_ZEROES_DEFAULT 32768
3234 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
3235 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
3237 BlockDriver *drv = bs->drv;
3238 QEMUIOVector qiov;
3239 struct iovec iov = {0};
3240 int ret = 0;
3242 int max_write_zeroes = bs->bl.max_write_zeroes ?
3243 bs->bl.max_write_zeroes : MAX_WRITE_ZEROES_DEFAULT;
3245 while (nb_sectors > 0 && !ret) {
3246 int num = nb_sectors;
3248 /* Align request. Block drivers can expect the "bulk" of the request
3249 * to be aligned.
3251 if (bs->bl.write_zeroes_alignment
3252 && num > bs->bl.write_zeroes_alignment) {
3253 if (sector_num % bs->bl.write_zeroes_alignment != 0) {
3254 /* Make a small request up to the first aligned sector. */
3255 num = bs->bl.write_zeroes_alignment;
3256 num -= sector_num % bs->bl.write_zeroes_alignment;
3257 } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) {
3258 /* Shorten the request to the last aligned sector. num cannot
3259 * underflow because num > bs->bl.write_zeroes_alignment.
3261 num -= (sector_num + num) % bs->bl.write_zeroes_alignment;
3265 /* limit request size */
3266 if (num > max_write_zeroes) {
3267 num = max_write_zeroes;
3270 ret = -ENOTSUP;
3271 /* First try the efficient write zeroes operation */
3272 if (drv->bdrv_co_write_zeroes) {
3273 ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags);
3276 if (ret == -ENOTSUP) {
3277 /* Fall back to bounce buffer if write zeroes is unsupported */
3278 iov.iov_len = num * BDRV_SECTOR_SIZE;
3279 if (iov.iov_base == NULL) {
3280 iov.iov_base = qemu_blockalign(bs, num * BDRV_SECTOR_SIZE);
3281 memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE);
3283 qemu_iovec_init_external(&qiov, &iov, 1);
3285 ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov);
3287 /* Keep bounce buffer around if it is big enough for all
3288 * all future requests.
3290 if (num < max_write_zeroes) {
3291 qemu_vfree(iov.iov_base);
3292 iov.iov_base = NULL;
3296 sector_num += num;
3297 nb_sectors -= num;
3300 qemu_vfree(iov.iov_base);
3301 return ret;
3305 * Forwards an already correctly aligned write request to the BlockDriver.
3307 static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
3308 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
3309 QEMUIOVector *qiov, int flags)
3311 BlockDriver *drv = bs->drv;
3312 bool waited;
3313 int ret;
3315 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
3316 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
3318 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
3319 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
3321 waited = wait_serialising_requests(req);
3322 assert(!waited || !req->serialising);
3323 assert(req->overlap_offset <= offset);
3324 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
3326 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
3328 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
3329 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes &&
3330 qemu_iovec_is_zero(qiov)) {
3331 flags |= BDRV_REQ_ZERO_WRITE;
3332 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
3333 flags |= BDRV_REQ_MAY_UNMAP;
3337 if (ret < 0) {
3338 /* Do nothing, write notifier decided to fail this request */
3339 } else if (flags & BDRV_REQ_ZERO_WRITE) {
3340 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_ZERO);
3341 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags);
3342 } else {
3343 BLKDBG_EVENT(bs, BLKDBG_PWRITEV);
3344 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
3346 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_DONE);
3348 if (ret == 0 && !bs->enable_write_cache) {
3349 ret = bdrv_co_flush(bs);
3352 bdrv_set_dirty(bs, sector_num, nb_sectors);
3354 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
3355 bs->wr_highest_sector = sector_num + nb_sectors - 1;
3357 if (bs->growable && ret >= 0) {
3358 bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
3361 return ret;
3365 * Handle a write request in coroutine context
3367 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
3368 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
3369 BdrvRequestFlags flags)
3371 BdrvTrackedRequest req;
3372 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3373 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
3374 uint8_t *head_buf = NULL;
3375 uint8_t *tail_buf = NULL;
3376 QEMUIOVector local_qiov;
3377 bool use_local_qiov = false;
3378 int ret;
3380 if (!bs->drv) {
3381 return -ENOMEDIUM;
3383 if (bs->read_only) {
3384 return -EACCES;
3386 if (bdrv_check_byte_request(bs, offset, bytes)) {
3387 return -EIO;
3390 /* throttling disk I/O */
3391 if (bs->io_limits_enabled) {
3392 bdrv_io_limits_intercept(bs, bytes, true);
3396 * Align write if necessary by performing a read-modify-write cycle.
3397 * Pad qiov with the read parts and be sure to have a tracked request not
3398 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
3400 tracked_request_begin(&req, bs, offset, bytes, true);
3402 if (offset & (align - 1)) {
3403 QEMUIOVector head_qiov;
3404 struct iovec head_iov;
3406 mark_request_serialising(&req, align);
3407 wait_serialising_requests(&req);
3409 head_buf = qemu_blockalign(bs, align);
3410 head_iov = (struct iovec) {
3411 .iov_base = head_buf,
3412 .iov_len = align,
3414 qemu_iovec_init_external(&head_qiov, &head_iov, 1);
3416 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_HEAD);
3417 ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align,
3418 align, &head_qiov, 0);
3419 if (ret < 0) {
3420 goto fail;
3422 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
3424 qemu_iovec_init(&local_qiov, qiov->niov + 2);
3425 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3426 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3427 use_local_qiov = true;
3429 bytes += offset & (align - 1);
3430 offset = offset & ~(align - 1);
3433 if ((offset + bytes) & (align - 1)) {
3434 QEMUIOVector tail_qiov;
3435 struct iovec tail_iov;
3436 size_t tail_bytes;
3437 bool waited;
3439 mark_request_serialising(&req, align);
3440 waited = wait_serialising_requests(&req);
3441 assert(!waited || !use_local_qiov);
3443 tail_buf = qemu_blockalign(bs, align);
3444 tail_iov = (struct iovec) {
3445 .iov_base = tail_buf,
3446 .iov_len = align,
3448 qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
3450 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_TAIL);
3451 ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align,
3452 align, &tail_qiov, 0);
3453 if (ret < 0) {
3454 goto fail;
3456 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
3458 if (!use_local_qiov) {
3459 qemu_iovec_init(&local_qiov, qiov->niov + 1);
3460 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3461 use_local_qiov = true;
3464 tail_bytes = (offset + bytes) & (align - 1);
3465 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
3467 bytes = ROUND_UP(bytes, align);
3470 ret = bdrv_aligned_pwritev(bs, &req, offset, bytes,
3471 use_local_qiov ? &local_qiov : qiov,
3472 flags);
3474 fail:
3475 tracked_request_end(&req);
3477 if (use_local_qiov) {
3478 qemu_iovec_destroy(&local_qiov);
3480 qemu_vfree(head_buf);
3481 qemu_vfree(tail_buf);
3483 return ret;
3486 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
3487 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3488 BdrvRequestFlags flags)
3490 if (nb_sectors < 0 || nb_sectors > (INT_MAX >> BDRV_SECTOR_BITS)) {
3491 return -EINVAL;
3494 return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS,
3495 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3498 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
3499 int nb_sectors, QEMUIOVector *qiov)
3501 trace_bdrv_co_writev(bs, sector_num, nb_sectors);
3503 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
3506 int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
3507 int64_t sector_num, int nb_sectors,
3508 BdrvRequestFlags flags)
3510 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags);
3512 if (!(bs->open_flags & BDRV_O_UNMAP)) {
3513 flags &= ~BDRV_REQ_MAY_UNMAP;
3516 return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
3517 BDRV_REQ_ZERO_WRITE | flags);
3521 * Truncate file to 'offset' bytes (needed only for file protocols)
3523 int bdrv_truncate(BlockDriverState *bs, int64_t offset)
3525 BlockDriver *drv = bs->drv;
3526 int ret;
3527 if (!drv)
3528 return -ENOMEDIUM;
3529 if (!drv->bdrv_truncate)
3530 return -ENOTSUP;
3531 if (bs->read_only)
3532 return -EACCES;
3533 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_RESIZE, NULL)) {
3534 return -EBUSY;
3536 ret = drv->bdrv_truncate(bs, offset);
3537 if (ret == 0) {
3538 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3539 bdrv_dev_resize_cb(bs);
3541 return ret;
3545 * Length of a allocated file in bytes. Sparse files are counted by actual
3546 * allocated space. Return < 0 if error or unknown.
3548 int64_t bdrv_get_allocated_file_size(BlockDriverState *bs)
3550 BlockDriver *drv = bs->drv;
3551 if (!drv) {
3552 return -ENOMEDIUM;
3554 if (drv->bdrv_get_allocated_file_size) {
3555 return drv->bdrv_get_allocated_file_size(bs);
3557 if (bs->file) {
3558 return bdrv_get_allocated_file_size(bs->file);
3560 return -ENOTSUP;
3564 * Length of a file in bytes. Return < 0 if error or unknown.
3566 int64_t bdrv_getlength(BlockDriverState *bs)
3568 BlockDriver *drv = bs->drv;
3569 if (!drv)
3570 return -ENOMEDIUM;
3572 if (drv->has_variable_length) {
3573 int ret = refresh_total_sectors(bs, bs->total_sectors);
3574 if (ret < 0) {
3575 return ret;
3578 return bs->total_sectors * BDRV_SECTOR_SIZE;
3581 /* return 0 as number of sectors if no device present or error */
3582 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
3584 int64_t length;
3585 length = bdrv_getlength(bs);
3586 if (length < 0)
3587 length = 0;
3588 else
3589 length = length >> BDRV_SECTOR_BITS;
3590 *nb_sectors_ptr = length;
3593 void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error,
3594 BlockdevOnError on_write_error)
3596 bs->on_read_error = on_read_error;
3597 bs->on_write_error = on_write_error;
3600 BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read)
3602 return is_read ? bs->on_read_error : bs->on_write_error;
3605 BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error)
3607 BlockdevOnError on_err = is_read ? bs->on_read_error : bs->on_write_error;
3609 switch (on_err) {
3610 case BLOCKDEV_ON_ERROR_ENOSPC:
3611 return (error == ENOSPC) ? BDRV_ACTION_STOP : BDRV_ACTION_REPORT;
3612 case BLOCKDEV_ON_ERROR_STOP:
3613 return BDRV_ACTION_STOP;
3614 case BLOCKDEV_ON_ERROR_REPORT:
3615 return BDRV_ACTION_REPORT;
3616 case BLOCKDEV_ON_ERROR_IGNORE:
3617 return BDRV_ACTION_IGNORE;
3618 default:
3619 abort();
3623 /* This is done by device models because, while the block layer knows
3624 * about the error, it does not know whether an operation comes from
3625 * the device or the block layer (from a job, for example).
3627 void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action,
3628 bool is_read, int error)
3630 assert(error >= 0);
3631 bdrv_emit_qmp_error_event(bs, QEVENT_BLOCK_IO_ERROR, action, is_read);
3632 if (action == BDRV_ACTION_STOP) {
3633 vm_stop(RUN_STATE_IO_ERROR);
3634 bdrv_iostatus_set_err(bs, error);
3638 int bdrv_is_read_only(BlockDriverState *bs)
3640 return bs->read_only;
3643 int bdrv_is_sg(BlockDriverState *bs)
3645 return bs->sg;
3648 int bdrv_enable_write_cache(BlockDriverState *bs)
3650 return bs->enable_write_cache;
3653 void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce)
3655 bs->enable_write_cache = wce;
3657 /* so a reopen() will preserve wce */
3658 if (wce) {
3659 bs->open_flags |= BDRV_O_CACHE_WB;
3660 } else {
3661 bs->open_flags &= ~BDRV_O_CACHE_WB;
3665 int bdrv_is_encrypted(BlockDriverState *bs)
3667 if (bs->backing_hd && bs->backing_hd->encrypted)
3668 return 1;
3669 return bs->encrypted;
3672 int bdrv_key_required(BlockDriverState *bs)
3674 BlockDriverState *backing_hd = bs->backing_hd;
3676 if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key)
3677 return 1;
3678 return (bs->encrypted && !bs->valid_key);
3681 int bdrv_set_key(BlockDriverState *bs, const char *key)
3683 int ret;
3684 if (bs->backing_hd && bs->backing_hd->encrypted) {
3685 ret = bdrv_set_key(bs->backing_hd, key);
3686 if (ret < 0)
3687 return ret;
3688 if (!bs->encrypted)
3689 return 0;
3691 if (!bs->encrypted) {
3692 return -EINVAL;
3693 } else if (!bs->drv || !bs->drv->bdrv_set_key) {
3694 return -ENOMEDIUM;
3696 ret = bs->drv->bdrv_set_key(bs, key);
3697 if (ret < 0) {
3698 bs->valid_key = 0;
3699 } else if (!bs->valid_key) {
3700 bs->valid_key = 1;
3701 /* call the change callback now, we skipped it on open */
3702 bdrv_dev_change_media_cb(bs, true);
3704 return ret;
3707 const char *bdrv_get_format_name(BlockDriverState *bs)
3709 return bs->drv ? bs->drv->format_name : NULL;
3712 void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
3713 void *opaque)
3715 BlockDriver *drv;
3716 int count = 0;
3717 const char **formats = NULL;
3719 QLIST_FOREACH(drv, &bdrv_drivers, list) {
3720 if (drv->format_name) {
3721 bool found = false;
3722 int i = count;
3723 while (formats && i && !found) {
3724 found = !strcmp(formats[--i], drv->format_name);
3727 if (!found) {
3728 formats = g_realloc(formats, (count + 1) * sizeof(char *));
3729 formats[count++] = drv->format_name;
3730 it(opaque, drv->format_name);
3734 g_free(formats);
3737 /* This function is to find block backend bs */
3738 BlockDriverState *bdrv_find(const char *name)
3740 BlockDriverState *bs;
3742 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
3743 if (!strcmp(name, bs->device_name)) {
3744 return bs;
3747 return NULL;
3750 /* This function is to find a node in the bs graph */
3751 BlockDriverState *bdrv_find_node(const char *node_name)
3753 BlockDriverState *bs;
3755 assert(node_name);
3757 QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
3758 if (!strcmp(node_name, bs->node_name)) {
3759 return bs;
3762 return NULL;
3765 /* Put this QMP function here so it can access the static graph_bdrv_states. */
3766 BlockDeviceInfoList *bdrv_named_nodes_list(void)
3768 BlockDeviceInfoList *list, *entry;
3769 BlockDriverState *bs;
3771 list = NULL;
3772 QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
3773 entry = g_malloc0(sizeof(*entry));
3774 entry->value = bdrv_block_device_info(bs);
3775 entry->next = list;
3776 list = entry;
3779 return list;
3782 BlockDriverState *bdrv_lookup_bs(const char *device,
3783 const char *node_name,
3784 Error **errp)
3786 BlockDriverState *bs = NULL;
3788 if (device) {
3789 bs = bdrv_find(device);
3791 if (bs) {
3792 return bs;
3796 if (node_name) {
3797 bs = bdrv_find_node(node_name);
3799 if (bs) {
3800 return bs;
3804 error_setg(errp, "Cannot find device=%s nor node_name=%s",
3805 device ? device : "",
3806 node_name ? node_name : "");
3807 return NULL;
3810 BlockDriverState *bdrv_next(BlockDriverState *bs)
3812 if (!bs) {
3813 return QTAILQ_FIRST(&bdrv_states);
3815 return QTAILQ_NEXT(bs, device_list);
3818 void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque)
3820 BlockDriverState *bs;
3822 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
3823 it(opaque, bs);
3827 const char *bdrv_get_device_name(BlockDriverState *bs)
3829 return bs->device_name;
3832 int bdrv_get_flags(BlockDriverState *bs)
3834 return bs->open_flags;
3837 int bdrv_flush_all(void)
3839 BlockDriverState *bs;
3840 int result = 0;
3842 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
3843 AioContext *aio_context = bdrv_get_aio_context(bs);
3844 int ret;
3846 aio_context_acquire(aio_context);
3847 ret = bdrv_flush(bs);
3848 if (ret < 0 && !result) {
3849 result = ret;
3851 aio_context_release(aio_context);
3854 return result;
3857 int bdrv_has_zero_init_1(BlockDriverState *bs)
3859 return 1;
3862 int bdrv_has_zero_init(BlockDriverState *bs)
3864 assert(bs->drv);
3866 /* If BS is a copy on write image, it is initialized to
3867 the contents of the base image, which may not be zeroes. */
3868 if (bs->backing_hd) {
3869 return 0;
3871 if (bs->drv->bdrv_has_zero_init) {
3872 return bs->drv->bdrv_has_zero_init(bs);
3875 /* safe default */
3876 return 0;
3879 bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs)
3881 BlockDriverInfo bdi;
3883 if (bs->backing_hd) {
3884 return false;
3887 if (bdrv_get_info(bs, &bdi) == 0) {
3888 return bdi.unallocated_blocks_are_zero;
3891 return false;
3894 bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs)
3896 BlockDriverInfo bdi;
3898 if (bs->backing_hd || !(bs->open_flags & BDRV_O_UNMAP)) {
3899 return false;
3902 if (bdrv_get_info(bs, &bdi) == 0) {
3903 return bdi.can_write_zeroes_with_unmap;
3906 return false;
3909 typedef struct BdrvCoGetBlockStatusData {
3910 BlockDriverState *bs;
3911 BlockDriverState *base;
3912 int64_t sector_num;
3913 int nb_sectors;
3914 int *pnum;
3915 int64_t ret;
3916 bool done;
3917 } BdrvCoGetBlockStatusData;
3920 * Returns true iff the specified sector is present in the disk image. Drivers
3921 * not implementing the functionality are assumed to not support backing files,
3922 * hence all their sectors are reported as allocated.
3924 * If 'sector_num' is beyond the end of the disk image the return value is 0
3925 * and 'pnum' is set to 0.
3927 * 'pnum' is set to the number of sectors (including and immediately following
3928 * the specified sector) that are known to be in the same
3929 * allocated/unallocated state.
3931 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
3932 * beyond the end of the disk image it will be clamped.
3934 static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
3935 int64_t sector_num,
3936 int nb_sectors, int *pnum)
3938 int64_t length;
3939 int64_t n;
3940 int64_t ret, ret2;
3942 length = bdrv_getlength(bs);
3943 if (length < 0) {
3944 return length;
3947 if (sector_num >= (length >> BDRV_SECTOR_BITS)) {
3948 *pnum = 0;
3949 return 0;
3952 n = bs->total_sectors - sector_num;
3953 if (n < nb_sectors) {
3954 nb_sectors = n;
3957 if (!bs->drv->bdrv_co_get_block_status) {
3958 *pnum = nb_sectors;
3959 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
3960 if (bs->drv->protocol_name) {
3961 ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
3963 return ret;
3966 ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum);
3967 if (ret < 0) {
3968 *pnum = 0;
3969 return ret;
3972 if (ret & BDRV_BLOCK_RAW) {
3973 assert(ret & BDRV_BLOCK_OFFSET_VALID);
3974 return bdrv_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
3975 *pnum, pnum);
3978 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
3979 ret |= BDRV_BLOCK_ALLOCATED;
3982 if (!(ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO)) {
3983 if (bdrv_unallocated_blocks_are_zero(bs)) {
3984 ret |= BDRV_BLOCK_ZERO;
3985 } else if (bs->backing_hd) {
3986 BlockDriverState *bs2 = bs->backing_hd;
3987 int64_t length2 = bdrv_getlength(bs2);
3988 if (length2 >= 0 && sector_num >= (length2 >> BDRV_SECTOR_BITS)) {
3989 ret |= BDRV_BLOCK_ZERO;
3994 if (bs->file &&
3995 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
3996 (ret & BDRV_BLOCK_OFFSET_VALID)) {
3997 ret2 = bdrv_co_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
3998 *pnum, pnum);
3999 if (ret2 >= 0) {
4000 /* Ignore errors. This is just providing extra information, it
4001 * is useful but not necessary.
4003 ret |= (ret2 & BDRV_BLOCK_ZERO);
4007 return ret;
4010 /* Coroutine wrapper for bdrv_get_block_status() */
4011 static void coroutine_fn bdrv_get_block_status_co_entry(void *opaque)
4013 BdrvCoGetBlockStatusData *data = opaque;
4014 BlockDriverState *bs = data->bs;
4016 data->ret = bdrv_co_get_block_status(bs, data->sector_num, data->nb_sectors,
4017 data->pnum);
4018 data->done = true;
4022 * Synchronous wrapper around bdrv_co_get_block_status().
4024 * See bdrv_co_get_block_status() for details.
4026 int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num,
4027 int nb_sectors, int *pnum)
4029 Coroutine *co;
4030 BdrvCoGetBlockStatusData data = {
4031 .bs = bs,
4032 .sector_num = sector_num,
4033 .nb_sectors = nb_sectors,
4034 .pnum = pnum,
4035 .done = false,
4038 if (qemu_in_coroutine()) {
4039 /* Fast-path if already in coroutine context */
4040 bdrv_get_block_status_co_entry(&data);
4041 } else {
4042 AioContext *aio_context = bdrv_get_aio_context(bs);
4044 co = qemu_coroutine_create(bdrv_get_block_status_co_entry);
4045 qemu_coroutine_enter(co, &data);
4046 while (!data.done) {
4047 aio_poll(aio_context, true);
4050 return data.ret;
4053 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
4054 int nb_sectors, int *pnum)
4056 int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum);
4057 if (ret < 0) {
4058 return ret;
4060 return (ret & BDRV_BLOCK_ALLOCATED);
4064 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
4066 * Return true if the given sector is allocated in any image between
4067 * BASE and TOP (inclusive). BASE can be NULL to check if the given
4068 * sector is allocated in any image of the chain. Return false otherwise.
4070 * 'pnum' is set to the number of sectors (including and immediately following
4071 * the specified sector) that are known to be in the same
4072 * allocated/unallocated state.
4075 int bdrv_is_allocated_above(BlockDriverState *top,
4076 BlockDriverState *base,
4077 int64_t sector_num,
4078 int nb_sectors, int *pnum)
4080 BlockDriverState *intermediate;
4081 int ret, n = nb_sectors;
4083 intermediate = top;
4084 while (intermediate && intermediate != base) {
4085 int pnum_inter;
4086 ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
4087 &pnum_inter);
4088 if (ret < 0) {
4089 return ret;
4090 } else if (ret) {
4091 *pnum = pnum_inter;
4092 return 1;
4096 * [sector_num, nb_sectors] is unallocated on top but intermediate
4097 * might have
4099 * [sector_num+x, nr_sectors] allocated.
4101 if (n > pnum_inter &&
4102 (intermediate == top ||
4103 sector_num + pnum_inter < intermediate->total_sectors)) {
4104 n = pnum_inter;
4107 intermediate = intermediate->backing_hd;
4110 *pnum = n;
4111 return 0;
4114 const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
4116 if (bs->backing_hd && bs->backing_hd->encrypted)
4117 return bs->backing_file;
4118 else if (bs->encrypted)
4119 return bs->filename;
4120 else
4121 return NULL;
4124 void bdrv_get_backing_filename(BlockDriverState *bs,
4125 char *filename, int filename_size)
4127 pstrcpy(filename, filename_size, bs->backing_file);
4130 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
4131 const uint8_t *buf, int nb_sectors)
4133 BlockDriver *drv = bs->drv;
4134 if (!drv)
4135 return -ENOMEDIUM;
4136 if (!drv->bdrv_write_compressed)
4137 return -ENOTSUP;
4138 if (bdrv_check_request(bs, sector_num, nb_sectors))
4139 return -EIO;
4141 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
4143 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
4146 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
4148 BlockDriver *drv = bs->drv;
4149 if (!drv)
4150 return -ENOMEDIUM;
4151 if (!drv->bdrv_get_info)
4152 return -ENOTSUP;
4153 memset(bdi, 0, sizeof(*bdi));
4154 return drv->bdrv_get_info(bs, bdi);
4157 ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs)
4159 BlockDriver *drv = bs->drv;
4160 if (drv && drv->bdrv_get_specific_info) {
4161 return drv->bdrv_get_specific_info(bs);
4163 return NULL;
4166 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
4167 int64_t pos, int size)
4169 QEMUIOVector qiov;
4170 struct iovec iov = {
4171 .iov_base = (void *) buf,
4172 .iov_len = size,
4175 qemu_iovec_init_external(&qiov, &iov, 1);
4176 return bdrv_writev_vmstate(bs, &qiov, pos);
4179 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
4181 BlockDriver *drv = bs->drv;
4183 if (!drv) {
4184 return -ENOMEDIUM;
4185 } else if (drv->bdrv_save_vmstate) {
4186 return drv->bdrv_save_vmstate(bs, qiov, pos);
4187 } else if (bs->file) {
4188 return bdrv_writev_vmstate(bs->file, qiov, pos);
4191 return -ENOTSUP;
4194 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
4195 int64_t pos, int size)
4197 BlockDriver *drv = bs->drv;
4198 if (!drv)
4199 return -ENOMEDIUM;
4200 if (drv->bdrv_load_vmstate)
4201 return drv->bdrv_load_vmstate(bs, buf, pos, size);
4202 if (bs->file)
4203 return bdrv_load_vmstate(bs->file, buf, pos, size);
4204 return -ENOTSUP;
4207 void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
4209 if (!bs || !bs->drv || !bs->drv->bdrv_debug_event) {
4210 return;
4213 bs->drv->bdrv_debug_event(bs, event);
4216 int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
4217 const char *tag)
4219 while (bs && bs->drv && !bs->drv->bdrv_debug_breakpoint) {
4220 bs = bs->file;
4223 if (bs && bs->drv && bs->drv->bdrv_debug_breakpoint) {
4224 return bs->drv->bdrv_debug_breakpoint(bs, event, tag);
4227 return -ENOTSUP;
4230 int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag)
4232 while (bs && bs->drv && !bs->drv->bdrv_debug_remove_breakpoint) {
4233 bs = bs->file;
4236 if (bs && bs->drv && bs->drv->bdrv_debug_remove_breakpoint) {
4237 return bs->drv->bdrv_debug_remove_breakpoint(bs, tag);
4240 return -ENOTSUP;
4243 int bdrv_debug_resume(BlockDriverState *bs, const char *tag)
4245 while (bs && (!bs->drv || !bs->drv->bdrv_debug_resume)) {
4246 bs = bs->file;
4249 if (bs && bs->drv && bs->drv->bdrv_debug_resume) {
4250 return bs->drv->bdrv_debug_resume(bs, tag);
4253 return -ENOTSUP;
4256 bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag)
4258 while (bs && bs->drv && !bs->drv->bdrv_debug_is_suspended) {
4259 bs = bs->file;
4262 if (bs && bs->drv && bs->drv->bdrv_debug_is_suspended) {
4263 return bs->drv->bdrv_debug_is_suspended(bs, tag);
4266 return false;
4269 int bdrv_is_snapshot(BlockDriverState *bs)
4271 return !!(bs->open_flags & BDRV_O_SNAPSHOT);
4274 /* backing_file can either be relative, or absolute, or a protocol. If it is
4275 * relative, it must be relative to the chain. So, passing in bs->filename
4276 * from a BDS as backing_file should not be done, as that may be relative to
4277 * the CWD rather than the chain. */
4278 BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
4279 const char *backing_file)
4281 char *filename_full = NULL;
4282 char *backing_file_full = NULL;
4283 char *filename_tmp = NULL;
4284 int is_protocol = 0;
4285 BlockDriverState *curr_bs = NULL;
4286 BlockDriverState *retval = NULL;
4288 if (!bs || !bs->drv || !backing_file) {
4289 return NULL;
4292 filename_full = g_malloc(PATH_MAX);
4293 backing_file_full = g_malloc(PATH_MAX);
4294 filename_tmp = g_malloc(PATH_MAX);
4296 is_protocol = path_has_protocol(backing_file);
4298 for (curr_bs = bs; curr_bs->backing_hd; curr_bs = curr_bs->backing_hd) {
4300 /* If either of the filename paths is actually a protocol, then
4301 * compare unmodified paths; otherwise make paths relative */
4302 if (is_protocol || path_has_protocol(curr_bs->backing_file)) {
4303 if (strcmp(backing_file, curr_bs->backing_file) == 0) {
4304 retval = curr_bs->backing_hd;
4305 break;
4307 } else {
4308 /* If not an absolute filename path, make it relative to the current
4309 * image's filename path */
4310 path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
4311 backing_file);
4313 /* We are going to compare absolute pathnames */
4314 if (!realpath(filename_tmp, filename_full)) {
4315 continue;
4318 /* We need to make sure the backing filename we are comparing against
4319 * is relative to the current image filename (or absolute) */
4320 path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
4321 curr_bs->backing_file);
4323 if (!realpath(filename_tmp, backing_file_full)) {
4324 continue;
4327 if (strcmp(backing_file_full, filename_full) == 0) {
4328 retval = curr_bs->backing_hd;
4329 break;
4334 g_free(filename_full);
4335 g_free(backing_file_full);
4336 g_free(filename_tmp);
4337 return retval;
4340 int bdrv_get_backing_file_depth(BlockDriverState *bs)
4342 if (!bs->drv) {
4343 return 0;
4346 if (!bs->backing_hd) {
4347 return 0;
4350 return 1 + bdrv_get_backing_file_depth(bs->backing_hd);
4353 BlockDriverState *bdrv_find_base(BlockDriverState *bs)
4355 BlockDriverState *curr_bs = NULL;
4357 if (!bs) {
4358 return NULL;
4361 curr_bs = bs;
4363 while (curr_bs->backing_hd) {
4364 curr_bs = curr_bs->backing_hd;
4366 return curr_bs;
4369 /**************************************************************/
4370 /* async I/Os */
4372 BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
4373 QEMUIOVector *qiov, int nb_sectors,
4374 BlockDriverCompletionFunc *cb, void *opaque)
4376 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
4378 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
4379 cb, opaque, false);
4382 BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
4383 QEMUIOVector *qiov, int nb_sectors,
4384 BlockDriverCompletionFunc *cb, void *opaque)
4386 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
4388 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
4389 cb, opaque, true);
4392 BlockDriverAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs,
4393 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags,
4394 BlockDriverCompletionFunc *cb, void *opaque)
4396 trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque);
4398 return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors,
4399 BDRV_REQ_ZERO_WRITE | flags,
4400 cb, opaque, true);
4404 typedef struct MultiwriteCB {
4405 int error;
4406 int num_requests;
4407 int num_callbacks;
4408 struct {
4409 BlockDriverCompletionFunc *cb;
4410 void *opaque;
4411 QEMUIOVector *free_qiov;
4412 } callbacks[];
4413 } MultiwriteCB;
4415 static void multiwrite_user_cb(MultiwriteCB *mcb)
4417 int i;
4419 for (i = 0; i < mcb->num_callbacks; i++) {
4420 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
4421 if (mcb->callbacks[i].free_qiov) {
4422 qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
4424 g_free(mcb->callbacks[i].free_qiov);
4428 static void multiwrite_cb(void *opaque, int ret)
4430 MultiwriteCB *mcb = opaque;
4432 trace_multiwrite_cb(mcb, ret);
4434 if (ret < 0 && !mcb->error) {
4435 mcb->error = ret;
4438 mcb->num_requests--;
4439 if (mcb->num_requests == 0) {
4440 multiwrite_user_cb(mcb);
4441 g_free(mcb);
4445 static int multiwrite_req_compare(const void *a, const void *b)
4447 const BlockRequest *req1 = a, *req2 = b;
4450 * Note that we can't simply subtract req2->sector from req1->sector
4451 * here as that could overflow the return value.
4453 if (req1->sector > req2->sector) {
4454 return 1;
4455 } else if (req1->sector < req2->sector) {
4456 return -1;
4457 } else {
4458 return 0;
4463 * Takes a bunch of requests and tries to merge them. Returns the number of
4464 * requests that remain after merging.
4466 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
4467 int num_reqs, MultiwriteCB *mcb)
4469 int i, outidx;
4471 // Sort requests by start sector
4472 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
4474 // Check if adjacent requests touch the same clusters. If so, combine them,
4475 // filling up gaps with zero sectors.
4476 outidx = 0;
4477 for (i = 1; i < num_reqs; i++) {
4478 int merge = 0;
4479 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
4481 // Handle exactly sequential writes and overlapping writes.
4482 if (reqs[i].sector <= oldreq_last) {
4483 merge = 1;
4486 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
4487 merge = 0;
4490 if (merge) {
4491 size_t size;
4492 QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
4493 qemu_iovec_init(qiov,
4494 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
4496 // Add the first request to the merged one. If the requests are
4497 // overlapping, drop the last sectors of the first request.
4498 size = (reqs[i].sector - reqs[outidx].sector) << 9;
4499 qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size);
4501 // We should need to add any zeros between the two requests
4502 assert (reqs[i].sector <= oldreq_last);
4504 // Add the second request
4505 qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size);
4507 reqs[outidx].nb_sectors = qiov->size >> 9;
4508 reqs[outidx].qiov = qiov;
4510 mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
4511 } else {
4512 outidx++;
4513 reqs[outidx].sector = reqs[i].sector;
4514 reqs[outidx].nb_sectors = reqs[i].nb_sectors;
4515 reqs[outidx].qiov = reqs[i].qiov;
4519 return outidx + 1;
4523 * Submit multiple AIO write requests at once.
4525 * On success, the function returns 0 and all requests in the reqs array have
4526 * been submitted. In error case this function returns -1, and any of the
4527 * requests may or may not be submitted yet. In particular, this means that the
4528 * callback will be called for some of the requests, for others it won't. The
4529 * caller must check the error field of the BlockRequest to wait for the right
4530 * callbacks (if error != 0, no callback will be called).
4532 * The implementation may modify the contents of the reqs array, e.g. to merge
4533 * requests. However, the fields opaque and error are left unmodified as they
4534 * are used to signal failure for a single request to the caller.
4536 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
4538 MultiwriteCB *mcb;
4539 int i;
4541 /* don't submit writes if we don't have a medium */
4542 if (bs->drv == NULL) {
4543 for (i = 0; i < num_reqs; i++) {
4544 reqs[i].error = -ENOMEDIUM;
4546 return -1;
4549 if (num_reqs == 0) {
4550 return 0;
4553 // Create MultiwriteCB structure
4554 mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
4555 mcb->num_requests = 0;
4556 mcb->num_callbacks = num_reqs;
4558 for (i = 0; i < num_reqs; i++) {
4559 mcb->callbacks[i].cb = reqs[i].cb;
4560 mcb->callbacks[i].opaque = reqs[i].opaque;
4563 // Check for mergable requests
4564 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
4566 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
4568 /* Run the aio requests. */
4569 mcb->num_requests = num_reqs;
4570 for (i = 0; i < num_reqs; i++) {
4571 bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov,
4572 reqs[i].nb_sectors, reqs[i].flags,
4573 multiwrite_cb, mcb,
4574 true);
4577 return 0;
4580 void bdrv_aio_cancel(BlockDriverAIOCB *acb)
4582 acb->aiocb_info->cancel(acb);
4585 /**************************************************************/
4586 /* async block device emulation */
4588 typedef struct BlockDriverAIOCBSync {
4589 BlockDriverAIOCB common;
4590 QEMUBH *bh;
4591 int ret;
4592 /* vector translation state */
4593 QEMUIOVector *qiov;
4594 uint8_t *bounce;
4595 int is_write;
4596 } BlockDriverAIOCBSync;
4598 static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
4600 BlockDriverAIOCBSync *acb =
4601 container_of(blockacb, BlockDriverAIOCBSync, common);
4602 qemu_bh_delete(acb->bh);
4603 acb->bh = NULL;
4604 qemu_aio_release(acb);
4607 static const AIOCBInfo bdrv_em_aiocb_info = {
4608 .aiocb_size = sizeof(BlockDriverAIOCBSync),
4609 .cancel = bdrv_aio_cancel_em,
4612 static void bdrv_aio_bh_cb(void *opaque)
4614 BlockDriverAIOCBSync *acb = opaque;
4616 if (!acb->is_write)
4617 qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
4618 qemu_vfree(acb->bounce);
4619 acb->common.cb(acb->common.opaque, acb->ret);
4620 qemu_bh_delete(acb->bh);
4621 acb->bh = NULL;
4622 qemu_aio_release(acb);
4625 static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
4626 int64_t sector_num,
4627 QEMUIOVector *qiov,
4628 int nb_sectors,
4629 BlockDriverCompletionFunc *cb,
4630 void *opaque,
4631 int is_write)
4634 BlockDriverAIOCBSync *acb;
4636 acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque);
4637 acb->is_write = is_write;
4638 acb->qiov = qiov;
4639 acb->bounce = qemu_blockalign(bs, qiov->size);
4640 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb);
4642 if (is_write) {
4643 qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
4644 acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
4645 } else {
4646 acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
4649 qemu_bh_schedule(acb->bh);
4651 return &acb->common;
4654 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
4655 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
4656 BlockDriverCompletionFunc *cb, void *opaque)
4658 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
4661 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
4662 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
4663 BlockDriverCompletionFunc *cb, void *opaque)
4665 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
4669 typedef struct BlockDriverAIOCBCoroutine {
4670 BlockDriverAIOCB common;
4671 BlockRequest req;
4672 bool is_write;
4673 bool *done;
4674 QEMUBH* bh;
4675 } BlockDriverAIOCBCoroutine;
4677 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb)
4679 AioContext *aio_context = bdrv_get_aio_context(blockacb->bs);
4680 BlockDriverAIOCBCoroutine *acb =
4681 container_of(blockacb, BlockDriverAIOCBCoroutine, common);
4682 bool done = false;
4684 acb->done = &done;
4685 while (!done) {
4686 aio_poll(aio_context, true);
4690 static const AIOCBInfo bdrv_em_co_aiocb_info = {
4691 .aiocb_size = sizeof(BlockDriverAIOCBCoroutine),
4692 .cancel = bdrv_aio_co_cancel_em,
4695 static void bdrv_co_em_bh(void *opaque)
4697 BlockDriverAIOCBCoroutine *acb = opaque;
4699 acb->common.cb(acb->common.opaque, acb->req.error);
4701 if (acb->done) {
4702 *acb->done = true;
4705 qemu_bh_delete(acb->bh);
4706 qemu_aio_release(acb);
4709 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
4710 static void coroutine_fn bdrv_co_do_rw(void *opaque)
4712 BlockDriverAIOCBCoroutine *acb = opaque;
4713 BlockDriverState *bs = acb->common.bs;
4715 if (!acb->is_write) {
4716 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
4717 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
4718 } else {
4719 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
4720 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
4723 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
4724 qemu_bh_schedule(acb->bh);
4727 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
4728 int64_t sector_num,
4729 QEMUIOVector *qiov,
4730 int nb_sectors,
4731 BdrvRequestFlags flags,
4732 BlockDriverCompletionFunc *cb,
4733 void *opaque,
4734 bool is_write)
4736 Coroutine *co;
4737 BlockDriverAIOCBCoroutine *acb;
4739 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4740 acb->req.sector = sector_num;
4741 acb->req.nb_sectors = nb_sectors;
4742 acb->req.qiov = qiov;
4743 acb->req.flags = flags;
4744 acb->is_write = is_write;
4745 acb->done = NULL;
4747 co = qemu_coroutine_create(bdrv_co_do_rw);
4748 qemu_coroutine_enter(co, acb);
4750 return &acb->common;
4753 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
4755 BlockDriverAIOCBCoroutine *acb = opaque;
4756 BlockDriverState *bs = acb->common.bs;
4758 acb->req.error = bdrv_co_flush(bs);
4759 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
4760 qemu_bh_schedule(acb->bh);
4763 BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
4764 BlockDriverCompletionFunc *cb, void *opaque)
4766 trace_bdrv_aio_flush(bs, opaque);
4768 Coroutine *co;
4769 BlockDriverAIOCBCoroutine *acb;
4771 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4772 acb->done = NULL;
4774 co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
4775 qemu_coroutine_enter(co, acb);
4777 return &acb->common;
4780 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
4782 BlockDriverAIOCBCoroutine *acb = opaque;
4783 BlockDriverState *bs = acb->common.bs;
4785 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
4786 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
4787 qemu_bh_schedule(acb->bh);
4790 BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
4791 int64_t sector_num, int nb_sectors,
4792 BlockDriverCompletionFunc *cb, void *opaque)
4794 Coroutine *co;
4795 BlockDriverAIOCBCoroutine *acb;
4797 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
4799 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4800 acb->req.sector = sector_num;
4801 acb->req.nb_sectors = nb_sectors;
4802 acb->done = NULL;
4803 co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
4804 qemu_coroutine_enter(co, acb);
4806 return &acb->common;
4809 void bdrv_init(void)
4811 module_call_init(MODULE_INIT_BLOCK);
4814 void bdrv_init_with_whitelist(void)
4816 use_bdrv_whitelist = 1;
4817 bdrv_init();
4820 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
4821 BlockDriverCompletionFunc *cb, void *opaque)
4823 BlockDriverAIOCB *acb;
4825 acb = g_slice_alloc(aiocb_info->aiocb_size);
4826 acb->aiocb_info = aiocb_info;
4827 acb->bs = bs;
4828 acb->cb = cb;
4829 acb->opaque = opaque;
4830 return acb;
4833 void qemu_aio_release(void *p)
4835 BlockDriverAIOCB *acb = p;
4836 g_slice_free1(acb->aiocb_info->aiocb_size, acb);
4839 /**************************************************************/
4840 /* Coroutine block device emulation */
4842 typedef struct CoroutineIOCompletion {
4843 Coroutine *coroutine;
4844 int ret;
4845 } CoroutineIOCompletion;
4847 static void bdrv_co_io_em_complete(void *opaque, int ret)
4849 CoroutineIOCompletion *co = opaque;
4851 co->ret = ret;
4852 qemu_coroutine_enter(co->coroutine, NULL);
4855 static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
4856 int nb_sectors, QEMUIOVector *iov,
4857 bool is_write)
4859 CoroutineIOCompletion co = {
4860 .coroutine = qemu_coroutine_self(),
4862 BlockDriverAIOCB *acb;
4864 if (is_write) {
4865 acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
4866 bdrv_co_io_em_complete, &co);
4867 } else {
4868 acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
4869 bdrv_co_io_em_complete, &co);
4872 trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
4873 if (!acb) {
4874 return -EIO;
4876 qemu_coroutine_yield();
4878 return co.ret;
4881 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
4882 int64_t sector_num, int nb_sectors,
4883 QEMUIOVector *iov)
4885 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
4888 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
4889 int64_t sector_num, int nb_sectors,
4890 QEMUIOVector *iov)
4892 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
4895 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
4897 RwCo *rwco = opaque;
4899 rwco->ret = bdrv_co_flush(rwco->bs);
4902 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
4904 int ret;
4906 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
4907 return 0;
4910 /* Write back cached data to the OS even with cache=unsafe */
4911 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
4912 if (bs->drv->bdrv_co_flush_to_os) {
4913 ret = bs->drv->bdrv_co_flush_to_os(bs);
4914 if (ret < 0) {
4915 return ret;
4919 /* But don't actually force it to the disk with cache=unsafe */
4920 if (bs->open_flags & BDRV_O_NO_FLUSH) {
4921 goto flush_parent;
4924 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
4925 if (bs->drv->bdrv_co_flush_to_disk) {
4926 ret = bs->drv->bdrv_co_flush_to_disk(bs);
4927 } else if (bs->drv->bdrv_aio_flush) {
4928 BlockDriverAIOCB *acb;
4929 CoroutineIOCompletion co = {
4930 .coroutine = qemu_coroutine_self(),
4933 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
4934 if (acb == NULL) {
4935 ret = -EIO;
4936 } else {
4937 qemu_coroutine_yield();
4938 ret = co.ret;
4940 } else {
4942 * Some block drivers always operate in either writethrough or unsafe
4943 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
4944 * know how the server works (because the behaviour is hardcoded or
4945 * depends on server-side configuration), so we can't ensure that
4946 * everything is safe on disk. Returning an error doesn't work because
4947 * that would break guests even if the server operates in writethrough
4948 * mode.
4950 * Let's hope the user knows what he's doing.
4952 ret = 0;
4954 if (ret < 0) {
4955 return ret;
4958 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
4959 * in the case of cache=unsafe, so there are no useless flushes.
4961 flush_parent:
4962 return bdrv_co_flush(bs->file);
4965 void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
4967 Error *local_err = NULL;
4968 int ret;
4970 if (!bs->drv) {
4971 return;
4974 if (bs->drv->bdrv_invalidate_cache) {
4975 bs->drv->bdrv_invalidate_cache(bs, &local_err);
4976 } else if (bs->file) {
4977 bdrv_invalidate_cache(bs->file, &local_err);
4979 if (local_err) {
4980 error_propagate(errp, local_err);
4981 return;
4984 ret = refresh_total_sectors(bs, bs->total_sectors);
4985 if (ret < 0) {
4986 error_setg_errno(errp, -ret, "Could not refresh total sector count");
4987 return;
4991 void bdrv_invalidate_cache_all(Error **errp)
4993 BlockDriverState *bs;
4994 Error *local_err = NULL;
4996 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
4997 AioContext *aio_context = bdrv_get_aio_context(bs);
4999 aio_context_acquire(aio_context);
5000 bdrv_invalidate_cache(bs, &local_err);
5001 aio_context_release(aio_context);
5002 if (local_err) {
5003 error_propagate(errp, local_err);
5004 return;
5009 void bdrv_clear_incoming_migration_all(void)
5011 BlockDriverState *bs;
5013 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
5014 AioContext *aio_context = bdrv_get_aio_context(bs);
5016 aio_context_acquire(aio_context);
5017 bs->open_flags = bs->open_flags & ~(BDRV_O_INCOMING);
5018 aio_context_release(aio_context);
5022 int bdrv_flush(BlockDriverState *bs)
5024 Coroutine *co;
5025 RwCo rwco = {
5026 .bs = bs,
5027 .ret = NOT_DONE,
5030 if (qemu_in_coroutine()) {
5031 /* Fast-path if already in coroutine context */
5032 bdrv_flush_co_entry(&rwco);
5033 } else {
5034 AioContext *aio_context = bdrv_get_aio_context(bs);
5036 co = qemu_coroutine_create(bdrv_flush_co_entry);
5037 qemu_coroutine_enter(co, &rwco);
5038 while (rwco.ret == NOT_DONE) {
5039 aio_poll(aio_context, true);
5043 return rwco.ret;
5046 typedef struct DiscardCo {
5047 BlockDriverState *bs;
5048 int64_t sector_num;
5049 int nb_sectors;
5050 int ret;
5051 } DiscardCo;
5052 static void coroutine_fn bdrv_discard_co_entry(void *opaque)
5054 DiscardCo *rwco = opaque;
5056 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
5059 /* if no limit is specified in the BlockLimits use a default
5060 * of 32768 512-byte sectors (16 MiB) per request.
5062 #define MAX_DISCARD_DEFAULT 32768
5064 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
5065 int nb_sectors)
5067 int max_discard;
5069 if (!bs->drv) {
5070 return -ENOMEDIUM;
5071 } else if (bdrv_check_request(bs, sector_num, nb_sectors)) {
5072 return -EIO;
5073 } else if (bs->read_only) {
5074 return -EROFS;
5077 bdrv_reset_dirty(bs, sector_num, nb_sectors);
5079 /* Do nothing if disabled. */
5080 if (!(bs->open_flags & BDRV_O_UNMAP)) {
5081 return 0;
5084 if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) {
5085 return 0;
5088 max_discard = bs->bl.max_discard ? bs->bl.max_discard : MAX_DISCARD_DEFAULT;
5089 while (nb_sectors > 0) {
5090 int ret;
5091 int num = nb_sectors;
5093 /* align request */
5094 if (bs->bl.discard_alignment &&
5095 num >= bs->bl.discard_alignment &&
5096 sector_num % bs->bl.discard_alignment) {
5097 if (num > bs->bl.discard_alignment) {
5098 num = bs->bl.discard_alignment;
5100 num -= sector_num % bs->bl.discard_alignment;
5103 /* limit request size */
5104 if (num > max_discard) {
5105 num = max_discard;
5108 if (bs->drv->bdrv_co_discard) {
5109 ret = bs->drv->bdrv_co_discard(bs, sector_num, num);
5110 } else {
5111 BlockDriverAIOCB *acb;
5112 CoroutineIOCompletion co = {
5113 .coroutine = qemu_coroutine_self(),
5116 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
5117 bdrv_co_io_em_complete, &co);
5118 if (acb == NULL) {
5119 return -EIO;
5120 } else {
5121 qemu_coroutine_yield();
5122 ret = co.ret;
5125 if (ret && ret != -ENOTSUP) {
5126 return ret;
5129 sector_num += num;
5130 nb_sectors -= num;
5132 return 0;
5135 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
5137 Coroutine *co;
5138 DiscardCo rwco = {
5139 .bs = bs,
5140 .sector_num = sector_num,
5141 .nb_sectors = nb_sectors,
5142 .ret = NOT_DONE,
5145 if (qemu_in_coroutine()) {
5146 /* Fast-path if already in coroutine context */
5147 bdrv_discard_co_entry(&rwco);
5148 } else {
5149 AioContext *aio_context = bdrv_get_aio_context(bs);
5151 co = qemu_coroutine_create(bdrv_discard_co_entry);
5152 qemu_coroutine_enter(co, &rwco);
5153 while (rwco.ret == NOT_DONE) {
5154 aio_poll(aio_context, true);
5158 return rwco.ret;
5161 /**************************************************************/
5162 /* removable device support */
5165 * Return TRUE if the media is present
5167 int bdrv_is_inserted(BlockDriverState *bs)
5169 BlockDriver *drv = bs->drv;
5171 if (!drv)
5172 return 0;
5173 if (!drv->bdrv_is_inserted)
5174 return 1;
5175 return drv->bdrv_is_inserted(bs);
5179 * Return whether the media changed since the last call to this
5180 * function, or -ENOTSUP if we don't know. Most drivers don't know.
5182 int bdrv_media_changed(BlockDriverState *bs)
5184 BlockDriver *drv = bs->drv;
5186 if (drv && drv->bdrv_media_changed) {
5187 return drv->bdrv_media_changed(bs);
5189 return -ENOTSUP;
5193 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
5195 void bdrv_eject(BlockDriverState *bs, bool eject_flag)
5197 BlockDriver *drv = bs->drv;
5199 if (drv && drv->bdrv_eject) {
5200 drv->bdrv_eject(bs, eject_flag);
5203 if (bs->device_name[0] != '\0') {
5204 bdrv_emit_qmp_eject_event(bs, eject_flag);
5209 * Lock or unlock the media (if it is locked, the user won't be able
5210 * to eject it manually).
5212 void bdrv_lock_medium(BlockDriverState *bs, bool locked)
5214 BlockDriver *drv = bs->drv;
5216 trace_bdrv_lock_medium(bs, locked);
5218 if (drv && drv->bdrv_lock_medium) {
5219 drv->bdrv_lock_medium(bs, locked);
5223 /* needed for generic scsi interface */
5225 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
5227 BlockDriver *drv = bs->drv;
5229 if (drv && drv->bdrv_ioctl)
5230 return drv->bdrv_ioctl(bs, req, buf);
5231 return -ENOTSUP;
5234 BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
5235 unsigned long int req, void *buf,
5236 BlockDriverCompletionFunc *cb, void *opaque)
5238 BlockDriver *drv = bs->drv;
5240 if (drv && drv->bdrv_aio_ioctl)
5241 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque);
5242 return NULL;
5245 void bdrv_set_guest_block_size(BlockDriverState *bs, int align)
5247 bs->guest_block_size = align;
5250 void *qemu_blockalign(BlockDriverState *bs, size_t size)
5252 return qemu_memalign(bdrv_opt_mem_align(bs), size);
5256 * Check if all memory in this vector is sector aligned.
5258 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
5260 int i;
5261 size_t alignment = bdrv_opt_mem_align(bs);
5263 for (i = 0; i < qiov->niov; i++) {
5264 if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
5265 return false;
5267 if (qiov->iov[i].iov_len % alignment) {
5268 return false;
5272 return true;
5275 BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, int granularity,
5276 Error **errp)
5278 int64_t bitmap_size;
5279 BdrvDirtyBitmap *bitmap;
5281 assert((granularity & (granularity - 1)) == 0);
5283 granularity >>= BDRV_SECTOR_BITS;
5284 assert(granularity);
5285 bitmap_size = bdrv_getlength(bs);
5286 if (bitmap_size < 0) {
5287 error_setg_errno(errp, -bitmap_size, "could not get length of device");
5288 errno = -bitmap_size;
5289 return NULL;
5291 bitmap_size >>= BDRV_SECTOR_BITS;
5292 bitmap = g_malloc0(sizeof(BdrvDirtyBitmap));
5293 bitmap->bitmap = hbitmap_alloc(bitmap_size, ffs(granularity) - 1);
5294 QLIST_INSERT_HEAD(&bs->dirty_bitmaps, bitmap, list);
5295 return bitmap;
5298 void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
5300 BdrvDirtyBitmap *bm, *next;
5301 QLIST_FOREACH_SAFE(bm, &bs->dirty_bitmaps, list, next) {
5302 if (bm == bitmap) {
5303 QLIST_REMOVE(bitmap, list);
5304 hbitmap_free(bitmap->bitmap);
5305 g_free(bitmap);
5306 return;
5311 BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs)
5313 BdrvDirtyBitmap *bm;
5314 BlockDirtyInfoList *list = NULL;
5315 BlockDirtyInfoList **plist = &list;
5317 QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) {
5318 BlockDirtyInfo *info = g_malloc0(sizeof(BlockDirtyInfo));
5319 BlockDirtyInfoList *entry = g_malloc0(sizeof(BlockDirtyInfoList));
5320 info->count = bdrv_get_dirty_count(bs, bm);
5321 info->granularity =
5322 ((int64_t) BDRV_SECTOR_SIZE << hbitmap_granularity(bm->bitmap));
5323 entry->value = info;
5324 *plist = entry;
5325 plist = &entry->next;
5328 return list;
5331 int bdrv_get_dirty(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, int64_t sector)
5333 if (bitmap) {
5334 return hbitmap_get(bitmap->bitmap, sector);
5335 } else {
5336 return 0;
5340 void bdrv_dirty_iter_init(BlockDriverState *bs,
5341 BdrvDirtyBitmap *bitmap, HBitmapIter *hbi)
5343 hbitmap_iter_init(hbi, bitmap->bitmap, 0);
5346 void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
5347 int nr_sectors)
5349 BdrvDirtyBitmap *bitmap;
5350 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
5351 hbitmap_set(bitmap->bitmap, cur_sector, nr_sectors);
5355 void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors)
5357 BdrvDirtyBitmap *bitmap;
5358 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
5359 hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors);
5363 int64_t bdrv_get_dirty_count(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
5365 return hbitmap_count(bitmap->bitmap);
5368 /* Get a reference to bs */
5369 void bdrv_ref(BlockDriverState *bs)
5371 bs->refcnt++;
5374 /* Release a previously grabbed reference to bs.
5375 * If after releasing, reference count is zero, the BlockDriverState is
5376 * deleted. */
5377 void bdrv_unref(BlockDriverState *bs)
5379 assert(bs->refcnt > 0);
5380 if (--bs->refcnt == 0) {
5381 bdrv_delete(bs);
5385 struct BdrvOpBlocker {
5386 Error *reason;
5387 QLIST_ENTRY(BdrvOpBlocker) list;
5390 bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp)
5392 BdrvOpBlocker *blocker;
5393 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5394 if (!QLIST_EMPTY(&bs->op_blockers[op])) {
5395 blocker = QLIST_FIRST(&bs->op_blockers[op]);
5396 if (errp) {
5397 error_setg(errp, "Device '%s' is busy: %s",
5398 bs->device_name, error_get_pretty(blocker->reason));
5400 return true;
5402 return false;
5405 void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason)
5407 BdrvOpBlocker *blocker;
5408 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5410 blocker = g_malloc0(sizeof(BdrvOpBlocker));
5411 blocker->reason = reason;
5412 QLIST_INSERT_HEAD(&bs->op_blockers[op], blocker, list);
5415 void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason)
5417 BdrvOpBlocker *blocker, *next;
5418 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5419 QLIST_FOREACH_SAFE(blocker, &bs->op_blockers[op], list, next) {
5420 if (blocker->reason == reason) {
5421 QLIST_REMOVE(blocker, list);
5422 g_free(blocker);
5427 void bdrv_op_block_all(BlockDriverState *bs, Error *reason)
5429 int i;
5430 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5431 bdrv_op_block(bs, i, reason);
5435 void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason)
5437 int i;
5438 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5439 bdrv_op_unblock(bs, i, reason);
5443 bool bdrv_op_blocker_is_empty(BlockDriverState *bs)
5445 int i;
5447 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5448 if (!QLIST_EMPTY(&bs->op_blockers[i])) {
5449 return false;
5452 return true;
5455 void bdrv_iostatus_enable(BlockDriverState *bs)
5457 bs->iostatus_enabled = true;
5458 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
5461 /* The I/O status is only enabled if the drive explicitly
5462 * enables it _and_ the VM is configured to stop on errors */
5463 bool bdrv_iostatus_is_enabled(const BlockDriverState *bs)
5465 return (bs->iostatus_enabled &&
5466 (bs->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
5467 bs->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
5468 bs->on_read_error == BLOCKDEV_ON_ERROR_STOP));
5471 void bdrv_iostatus_disable(BlockDriverState *bs)
5473 bs->iostatus_enabled = false;
5476 void bdrv_iostatus_reset(BlockDriverState *bs)
5478 if (bdrv_iostatus_is_enabled(bs)) {
5479 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
5480 if (bs->job) {
5481 block_job_iostatus_reset(bs->job);
5486 void bdrv_iostatus_set_err(BlockDriverState *bs, int error)
5488 assert(bdrv_iostatus_is_enabled(bs));
5489 if (bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
5490 bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
5491 BLOCK_DEVICE_IO_STATUS_FAILED;
5495 void
5496 bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes,
5497 enum BlockAcctType type)
5499 assert(type < BDRV_MAX_IOTYPE);
5501 cookie->bytes = bytes;
5502 cookie->start_time_ns = get_clock();
5503 cookie->type = type;
5506 void
5507 bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie)
5509 assert(cookie->type < BDRV_MAX_IOTYPE);
5511 bs->nr_bytes[cookie->type] += cookie->bytes;
5512 bs->nr_ops[cookie->type]++;
5513 bs->total_time_ns[cookie->type] += get_clock() - cookie->start_time_ns;
5516 void bdrv_img_create(const char *filename, const char *fmt,
5517 const char *base_filename, const char *base_fmt,
5518 char *options, uint64_t img_size, int flags,
5519 Error **errp, bool quiet)
5521 QEMUOptionParameter *param = NULL, *create_options = NULL;
5522 QEMUOptionParameter *backing_fmt, *backing_file, *size;
5523 BlockDriver *drv, *proto_drv;
5524 BlockDriver *backing_drv = NULL;
5525 Error *local_err = NULL;
5526 int ret = 0;
5528 /* Find driver and parse its options */
5529 drv = bdrv_find_format(fmt);
5530 if (!drv) {
5531 error_setg(errp, "Unknown file format '%s'", fmt);
5532 return;
5535 proto_drv = bdrv_find_protocol(filename, true);
5536 if (!proto_drv) {
5537 error_setg(errp, "Unknown protocol '%s'", filename);
5538 return;
5541 create_options = append_option_parameters(create_options,
5542 drv->create_options);
5543 create_options = append_option_parameters(create_options,
5544 proto_drv->create_options);
5546 /* Create parameter list with default values */
5547 param = parse_option_parameters("", create_options, param);
5549 set_option_parameter_int(param, BLOCK_OPT_SIZE, img_size);
5551 /* Parse -o options */
5552 if (options) {
5553 param = parse_option_parameters(options, create_options, param);
5554 if (param == NULL) {
5555 error_setg(errp, "Invalid options for file format '%s'.", fmt);
5556 goto out;
5560 if (base_filename) {
5561 if (set_option_parameter(param, BLOCK_OPT_BACKING_FILE,
5562 base_filename)) {
5563 error_setg(errp, "Backing file not supported for file format '%s'",
5564 fmt);
5565 goto out;
5569 if (base_fmt) {
5570 if (set_option_parameter(param, BLOCK_OPT_BACKING_FMT, base_fmt)) {
5571 error_setg(errp, "Backing file format not supported for file "
5572 "format '%s'", fmt);
5573 goto out;
5577 backing_file = get_option_parameter(param, BLOCK_OPT_BACKING_FILE);
5578 if (backing_file && backing_file->value.s) {
5579 if (!strcmp(filename, backing_file->value.s)) {
5580 error_setg(errp, "Error: Trying to create an image with the "
5581 "same filename as the backing file");
5582 goto out;
5586 backing_fmt = get_option_parameter(param, BLOCK_OPT_BACKING_FMT);
5587 if (backing_fmt && backing_fmt->value.s) {
5588 backing_drv = bdrv_find_format(backing_fmt->value.s);
5589 if (!backing_drv) {
5590 error_setg(errp, "Unknown backing file format '%s'",
5591 backing_fmt->value.s);
5592 goto out;
5596 // The size for the image must always be specified, with one exception:
5597 // If we are using a backing file, we can obtain the size from there
5598 size = get_option_parameter(param, BLOCK_OPT_SIZE);
5599 if (size && size->value.n == -1) {
5600 if (backing_file && backing_file->value.s) {
5601 BlockDriverState *bs;
5602 uint64_t size;
5603 char buf[32];
5604 int back_flags;
5606 /* backing files always opened read-only */
5607 back_flags =
5608 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
5610 bs = NULL;
5611 ret = bdrv_open(&bs, backing_file->value.s, NULL, NULL, back_flags,
5612 backing_drv, &local_err);
5613 if (ret < 0) {
5614 error_setg_errno(errp, -ret, "Could not open '%s': %s",
5615 backing_file->value.s,
5616 error_get_pretty(local_err));
5617 error_free(local_err);
5618 local_err = NULL;
5619 goto out;
5621 bdrv_get_geometry(bs, &size);
5622 size *= 512;
5624 snprintf(buf, sizeof(buf), "%" PRId64, size);
5625 set_option_parameter(param, BLOCK_OPT_SIZE, buf);
5627 bdrv_unref(bs);
5628 } else {
5629 error_setg(errp, "Image creation needs a size parameter");
5630 goto out;
5634 if (!quiet) {
5635 printf("Formatting '%s', fmt=%s ", filename, fmt);
5636 print_option_parameters(param);
5637 puts("");
5639 ret = bdrv_create(drv, filename, param, &local_err);
5640 if (ret == -EFBIG) {
5641 /* This is generally a better message than whatever the driver would
5642 * deliver (especially because of the cluster_size_hint), since that
5643 * is most probably not much different from "image too large". */
5644 const char *cluster_size_hint = "";
5645 if (get_option_parameter(create_options, BLOCK_OPT_CLUSTER_SIZE)) {
5646 cluster_size_hint = " (try using a larger cluster size)";
5648 error_setg(errp, "The image size is too large for file format '%s'"
5649 "%s", fmt, cluster_size_hint);
5650 error_free(local_err);
5651 local_err = NULL;
5654 out:
5655 free_option_parameters(create_options);
5656 free_option_parameters(param);
5658 if (local_err) {
5659 error_propagate(errp, local_err);
5663 AioContext *bdrv_get_aio_context(BlockDriverState *bs)
5665 return bs->aio_context;
5668 void bdrv_detach_aio_context(BlockDriverState *bs)
5670 if (!bs->drv) {
5671 return;
5674 if (bs->drv->bdrv_detach_aio_context) {
5675 bs->drv->bdrv_detach_aio_context(bs);
5677 if (bs->file) {
5678 bdrv_detach_aio_context(bs->file);
5680 if (bs->backing_hd) {
5681 bdrv_detach_aio_context(bs->backing_hd);
5684 bs->aio_context = NULL;
5687 void bdrv_attach_aio_context(BlockDriverState *bs,
5688 AioContext *new_context)
5690 if (!bs->drv) {
5691 return;
5694 bs->aio_context = new_context;
5696 if (bs->backing_hd) {
5697 bdrv_attach_aio_context(bs->backing_hd, new_context);
5699 if (bs->file) {
5700 bdrv_attach_aio_context(bs->file, new_context);
5702 if (bs->drv->bdrv_attach_aio_context) {
5703 bs->drv->bdrv_attach_aio_context(bs, new_context);
5707 void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context)
5709 bdrv_drain_all(); /* ensure there are no in-flight requests */
5711 bdrv_detach_aio_context(bs);
5713 /* This function executes in the old AioContext so acquire the new one in
5714 * case it runs in a different thread.
5716 aio_context_acquire(new_context);
5717 bdrv_attach_aio_context(bs, new_context);
5718 aio_context_release(new_context);
5721 void bdrv_add_before_write_notifier(BlockDriverState *bs,
5722 NotifierWithReturn *notifier)
5724 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
5727 int bdrv_amend_options(BlockDriverState *bs, QEMUOptionParameter *options)
5729 if (bs->drv->bdrv_amend_options == NULL) {
5730 return -ENOTSUP;
5732 return bs->drv->bdrv_amend_options(bs, options);
5735 /* This function will be called by the bdrv_recurse_is_first_non_filter method
5736 * of block filter and by bdrv_is_first_non_filter.
5737 * It is used to test if the given bs is the candidate or recurse more in the
5738 * node graph.
5740 bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs,
5741 BlockDriverState *candidate)
5743 /* return false if basic checks fails */
5744 if (!bs || !bs->drv) {
5745 return false;
5748 /* the code reached a non block filter driver -> check if the bs is
5749 * the same as the candidate. It's the recursion termination condition.
5751 if (!bs->drv->is_filter) {
5752 return bs == candidate;
5754 /* Down this path the driver is a block filter driver */
5756 /* If the block filter recursion method is defined use it to recurse down
5757 * the node graph.
5759 if (bs->drv->bdrv_recurse_is_first_non_filter) {
5760 return bs->drv->bdrv_recurse_is_first_non_filter(bs, candidate);
5763 /* the driver is a block filter but don't allow to recurse -> return false
5765 return false;
5768 /* This function checks if the candidate is the first non filter bs down it's
5769 * bs chain. Since we don't have pointers to parents it explore all bs chains
5770 * from the top. Some filters can choose not to pass down the recursion.
5772 bool bdrv_is_first_non_filter(BlockDriverState *candidate)
5774 BlockDriverState *bs;
5776 /* walk down the bs forest recursively */
5777 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
5778 bool perm;
5780 /* try to recurse in this top level bs */
5781 perm = bdrv_recurse_is_first_non_filter(bs, candidate);
5783 /* candidate is the first non filter */
5784 if (perm) {
5785 return true;
5789 return false;