qapi event: convert DEVICE_TRAY_MOVED
[qemu/cris-port.git] / block.c
blob288efbca984c94649dd51be166cc381bc3be9221
1 /*
2 * QEMU System Emulator block driver
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
24 #include "config-host.h"
25 #include "qemu-common.h"
26 #include "trace.h"
27 #include "monitor/monitor.h"
28 #include "block/block_int.h"
29 #include "block/blockjob.h"
30 #include "qemu/module.h"
31 #include "qapi/qmp/qjson.h"
32 #include "sysemu/sysemu.h"
33 #include "qemu/notify.h"
34 #include "block/coroutine.h"
35 #include "block/qapi.h"
36 #include "qmp-commands.h"
37 #include "qemu/timer.h"
38 #include "qapi-event.h"
40 #ifdef CONFIG_BSD
41 #include <sys/types.h>
42 #include <sys/stat.h>
43 #include <sys/ioctl.h>
44 #include <sys/queue.h>
45 #ifndef __DragonFly__
46 #include <sys/disk.h>
47 #endif
48 #endif
50 #ifdef _WIN32
51 #include <windows.h>
52 #endif
54 struct BdrvDirtyBitmap {
55 HBitmap *bitmap;
56 QLIST_ENTRY(BdrvDirtyBitmap) list;
59 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
61 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
62 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
63 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
64 BlockDriverCompletionFunc *cb, void *opaque);
65 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
66 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
67 BlockDriverCompletionFunc *cb, void *opaque);
68 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
69 int64_t sector_num, int nb_sectors,
70 QEMUIOVector *iov);
71 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
72 int64_t sector_num, int nb_sectors,
73 QEMUIOVector *iov);
74 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
75 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
76 BdrvRequestFlags flags);
77 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
78 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
79 BdrvRequestFlags flags);
80 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
81 int64_t sector_num,
82 QEMUIOVector *qiov,
83 int nb_sectors,
84 BdrvRequestFlags flags,
85 BlockDriverCompletionFunc *cb,
86 void *opaque,
87 bool is_write);
88 static void coroutine_fn bdrv_co_do_rw(void *opaque);
89 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
90 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
92 static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
93 QTAILQ_HEAD_INITIALIZER(bdrv_states);
95 static QTAILQ_HEAD(, BlockDriverState) graph_bdrv_states =
96 QTAILQ_HEAD_INITIALIZER(graph_bdrv_states);
98 static QLIST_HEAD(, BlockDriver) bdrv_drivers =
99 QLIST_HEAD_INITIALIZER(bdrv_drivers);
101 /* If non-zero, use only whitelisted block drivers */
102 static int use_bdrv_whitelist;
104 #ifdef _WIN32
105 static int is_windows_drive_prefix(const char *filename)
107 return (((filename[0] >= 'a' && filename[0] <= 'z') ||
108 (filename[0] >= 'A' && filename[0] <= 'Z')) &&
109 filename[1] == ':');
112 int is_windows_drive(const char *filename)
114 if (is_windows_drive_prefix(filename) &&
115 filename[2] == '\0')
116 return 1;
117 if (strstart(filename, "\\\\.\\", NULL) ||
118 strstart(filename, "//./", NULL))
119 return 1;
120 return 0;
122 #endif
124 /* throttling disk I/O limits */
125 void bdrv_set_io_limits(BlockDriverState *bs,
126 ThrottleConfig *cfg)
128 int i;
130 throttle_config(&bs->throttle_state, cfg);
132 for (i = 0; i < 2; i++) {
133 qemu_co_enter_next(&bs->throttled_reqs[i]);
137 /* this function drain all the throttled IOs */
138 static bool bdrv_start_throttled_reqs(BlockDriverState *bs)
140 bool drained = false;
141 bool enabled = bs->io_limits_enabled;
142 int i;
144 bs->io_limits_enabled = false;
146 for (i = 0; i < 2; i++) {
147 while (qemu_co_enter_next(&bs->throttled_reqs[i])) {
148 drained = true;
152 bs->io_limits_enabled = enabled;
154 return drained;
157 void bdrv_io_limits_disable(BlockDriverState *bs)
159 bs->io_limits_enabled = false;
161 bdrv_start_throttled_reqs(bs);
163 throttle_destroy(&bs->throttle_state);
166 static void bdrv_throttle_read_timer_cb(void *opaque)
168 BlockDriverState *bs = opaque;
169 qemu_co_enter_next(&bs->throttled_reqs[0]);
172 static void bdrv_throttle_write_timer_cb(void *opaque)
174 BlockDriverState *bs = opaque;
175 qemu_co_enter_next(&bs->throttled_reqs[1]);
178 /* should be called before bdrv_set_io_limits if a limit is set */
179 void bdrv_io_limits_enable(BlockDriverState *bs)
181 assert(!bs->io_limits_enabled);
182 throttle_init(&bs->throttle_state,
183 bdrv_get_aio_context(bs),
184 QEMU_CLOCK_VIRTUAL,
185 bdrv_throttle_read_timer_cb,
186 bdrv_throttle_write_timer_cb,
187 bs);
188 bs->io_limits_enabled = true;
191 /* This function makes an IO wait if needed
193 * @nb_sectors: the number of sectors of the IO
194 * @is_write: is the IO a write
196 static void bdrv_io_limits_intercept(BlockDriverState *bs,
197 unsigned int bytes,
198 bool is_write)
200 /* does this io must wait */
201 bool must_wait = throttle_schedule_timer(&bs->throttle_state, is_write);
203 /* if must wait or any request of this type throttled queue the IO */
204 if (must_wait ||
205 !qemu_co_queue_empty(&bs->throttled_reqs[is_write])) {
206 qemu_co_queue_wait(&bs->throttled_reqs[is_write]);
209 /* the IO will be executed, do the accounting */
210 throttle_account(&bs->throttle_state, is_write, bytes);
213 /* if the next request must wait -> do nothing */
214 if (throttle_schedule_timer(&bs->throttle_state, is_write)) {
215 return;
218 /* else queue next request for execution */
219 qemu_co_queue_next(&bs->throttled_reqs[is_write]);
222 size_t bdrv_opt_mem_align(BlockDriverState *bs)
224 if (!bs || !bs->drv) {
225 /* 4k should be on the safe side */
226 return 4096;
229 return bs->bl.opt_mem_alignment;
232 /* check if the path starts with "<protocol>:" */
233 static int path_has_protocol(const char *path)
235 const char *p;
237 #ifdef _WIN32
238 if (is_windows_drive(path) ||
239 is_windows_drive_prefix(path)) {
240 return 0;
242 p = path + strcspn(path, ":/\\");
243 #else
244 p = path + strcspn(path, ":/");
245 #endif
247 return *p == ':';
250 int path_is_absolute(const char *path)
252 #ifdef _WIN32
253 /* specific case for names like: "\\.\d:" */
254 if (is_windows_drive(path) || is_windows_drive_prefix(path)) {
255 return 1;
257 return (*path == '/' || *path == '\\');
258 #else
259 return (*path == '/');
260 #endif
263 /* if filename is absolute, just copy it to dest. Otherwise, build a
264 path to it by considering it is relative to base_path. URL are
265 supported. */
266 void path_combine(char *dest, int dest_size,
267 const char *base_path,
268 const char *filename)
270 const char *p, *p1;
271 int len;
273 if (dest_size <= 0)
274 return;
275 if (path_is_absolute(filename)) {
276 pstrcpy(dest, dest_size, filename);
277 } else {
278 p = strchr(base_path, ':');
279 if (p)
280 p++;
281 else
282 p = base_path;
283 p1 = strrchr(base_path, '/');
284 #ifdef _WIN32
286 const char *p2;
287 p2 = strrchr(base_path, '\\');
288 if (!p1 || p2 > p1)
289 p1 = p2;
291 #endif
292 if (p1)
293 p1++;
294 else
295 p1 = base_path;
296 if (p1 > p)
297 p = p1;
298 len = p - base_path;
299 if (len > dest_size - 1)
300 len = dest_size - 1;
301 memcpy(dest, base_path, len);
302 dest[len] = '\0';
303 pstrcat(dest, dest_size, filename);
307 void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz)
309 if (bs->backing_file[0] == '\0' || path_has_protocol(bs->backing_file)) {
310 pstrcpy(dest, sz, bs->backing_file);
311 } else {
312 path_combine(dest, sz, bs->filename, bs->backing_file);
316 void bdrv_register(BlockDriver *bdrv)
318 /* Block drivers without coroutine functions need emulation */
319 if (!bdrv->bdrv_co_readv) {
320 bdrv->bdrv_co_readv = bdrv_co_readv_em;
321 bdrv->bdrv_co_writev = bdrv_co_writev_em;
323 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
324 * the block driver lacks aio we need to emulate that too.
326 if (!bdrv->bdrv_aio_readv) {
327 /* add AIO emulation layer */
328 bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
329 bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
333 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
336 /* create a new block device (by default it is empty) */
337 BlockDriverState *bdrv_new(const char *device_name, Error **errp)
339 BlockDriverState *bs;
340 int i;
342 if (bdrv_find(device_name)) {
343 error_setg(errp, "Device with id '%s' already exists",
344 device_name);
345 return NULL;
347 if (bdrv_find_node(device_name)) {
348 error_setg(errp, "Device with node-name '%s' already exists",
349 device_name);
350 return NULL;
353 bs = g_malloc0(sizeof(BlockDriverState));
354 QLIST_INIT(&bs->dirty_bitmaps);
355 pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);
356 if (device_name[0] != '\0') {
357 QTAILQ_INSERT_TAIL(&bdrv_states, bs, device_list);
359 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
360 QLIST_INIT(&bs->op_blockers[i]);
362 bdrv_iostatus_disable(bs);
363 notifier_list_init(&bs->close_notifiers);
364 notifier_with_return_list_init(&bs->before_write_notifiers);
365 qemu_co_queue_init(&bs->throttled_reqs[0]);
366 qemu_co_queue_init(&bs->throttled_reqs[1]);
367 bs->refcnt = 1;
368 bs->aio_context = qemu_get_aio_context();
370 return bs;
373 void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify)
375 notifier_list_add(&bs->close_notifiers, notify);
378 BlockDriver *bdrv_find_format(const char *format_name)
380 BlockDriver *drv1;
381 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
382 if (!strcmp(drv1->format_name, format_name)) {
383 return drv1;
386 return NULL;
389 static int bdrv_is_whitelisted(BlockDriver *drv, bool read_only)
391 static const char *whitelist_rw[] = {
392 CONFIG_BDRV_RW_WHITELIST
394 static const char *whitelist_ro[] = {
395 CONFIG_BDRV_RO_WHITELIST
397 const char **p;
399 if (!whitelist_rw[0] && !whitelist_ro[0]) {
400 return 1; /* no whitelist, anything goes */
403 for (p = whitelist_rw; *p; p++) {
404 if (!strcmp(drv->format_name, *p)) {
405 return 1;
408 if (read_only) {
409 for (p = whitelist_ro; *p; p++) {
410 if (!strcmp(drv->format_name, *p)) {
411 return 1;
415 return 0;
418 BlockDriver *bdrv_find_whitelisted_format(const char *format_name,
419 bool read_only)
421 BlockDriver *drv = bdrv_find_format(format_name);
422 return drv && bdrv_is_whitelisted(drv, read_only) ? drv : NULL;
425 typedef struct CreateCo {
426 BlockDriver *drv;
427 char *filename;
428 QemuOpts *opts;
429 int ret;
430 Error *err;
431 } CreateCo;
433 static void coroutine_fn bdrv_create_co_entry(void *opaque)
435 Error *local_err = NULL;
436 int ret;
438 CreateCo *cco = opaque;
439 assert(cco->drv);
441 ret = cco->drv->bdrv_create(cco->filename, cco->opts, &local_err);
442 if (local_err) {
443 error_propagate(&cco->err, local_err);
445 cco->ret = ret;
448 int bdrv_create(BlockDriver *drv, const char* filename,
449 QemuOpts *opts, Error **errp)
451 int ret;
453 Coroutine *co;
454 CreateCo cco = {
455 .drv = drv,
456 .filename = g_strdup(filename),
457 .opts = opts,
458 .ret = NOT_DONE,
459 .err = NULL,
462 if (!drv->bdrv_create) {
463 error_setg(errp, "Driver '%s' does not support image creation", drv->format_name);
464 ret = -ENOTSUP;
465 goto out;
468 if (qemu_in_coroutine()) {
469 /* Fast-path if already in coroutine context */
470 bdrv_create_co_entry(&cco);
471 } else {
472 co = qemu_coroutine_create(bdrv_create_co_entry);
473 qemu_coroutine_enter(co, &cco);
474 while (cco.ret == NOT_DONE) {
475 qemu_aio_wait();
479 ret = cco.ret;
480 if (ret < 0) {
481 if (cco.err) {
482 error_propagate(errp, cco.err);
483 } else {
484 error_setg_errno(errp, -ret, "Could not create image");
488 out:
489 g_free(cco.filename);
490 return ret;
493 int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp)
495 BlockDriver *drv;
496 Error *local_err = NULL;
497 int ret;
499 drv = bdrv_find_protocol(filename, true);
500 if (drv == NULL) {
501 error_setg(errp, "Could not find protocol for file '%s'", filename);
502 return -ENOENT;
505 ret = bdrv_create(drv, filename, opts, &local_err);
506 if (local_err) {
507 error_propagate(errp, local_err);
509 return ret;
512 int bdrv_refresh_limits(BlockDriverState *bs)
514 BlockDriver *drv = bs->drv;
516 memset(&bs->bl, 0, sizeof(bs->bl));
518 if (!drv) {
519 return 0;
522 /* Take some limits from the children as a default */
523 if (bs->file) {
524 bdrv_refresh_limits(bs->file);
525 bs->bl.opt_transfer_length = bs->file->bl.opt_transfer_length;
526 bs->bl.opt_mem_alignment = bs->file->bl.opt_mem_alignment;
527 } else {
528 bs->bl.opt_mem_alignment = 512;
531 if (bs->backing_hd) {
532 bdrv_refresh_limits(bs->backing_hd);
533 bs->bl.opt_transfer_length =
534 MAX(bs->bl.opt_transfer_length,
535 bs->backing_hd->bl.opt_transfer_length);
536 bs->bl.opt_mem_alignment =
537 MAX(bs->bl.opt_mem_alignment,
538 bs->backing_hd->bl.opt_mem_alignment);
541 /* Then let the driver override it */
542 if (drv->bdrv_refresh_limits) {
543 return drv->bdrv_refresh_limits(bs);
546 return 0;
550 * Create a uniquely-named empty temporary file.
551 * Return 0 upon success, otherwise a negative errno value.
553 int get_tmp_filename(char *filename, int size)
555 #ifdef _WIN32
556 char temp_dir[MAX_PATH];
557 /* GetTempFileName requires that its output buffer (4th param)
558 have length MAX_PATH or greater. */
559 assert(size >= MAX_PATH);
560 return (GetTempPath(MAX_PATH, temp_dir)
561 && GetTempFileName(temp_dir, "qem", 0, filename)
562 ? 0 : -GetLastError());
563 #else
564 int fd;
565 const char *tmpdir;
566 tmpdir = getenv("TMPDIR");
567 if (!tmpdir) {
568 tmpdir = "/var/tmp";
570 if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) {
571 return -EOVERFLOW;
573 fd = mkstemp(filename);
574 if (fd < 0) {
575 return -errno;
577 if (close(fd) != 0) {
578 unlink(filename);
579 return -errno;
581 return 0;
582 #endif
586 * Detect host devices. By convention, /dev/cdrom[N] is always
587 * recognized as a host CDROM.
589 static BlockDriver *find_hdev_driver(const char *filename)
591 int score_max = 0, score;
592 BlockDriver *drv = NULL, *d;
594 QLIST_FOREACH(d, &bdrv_drivers, list) {
595 if (d->bdrv_probe_device) {
596 score = d->bdrv_probe_device(filename);
597 if (score > score_max) {
598 score_max = score;
599 drv = d;
604 return drv;
607 BlockDriver *bdrv_find_protocol(const char *filename,
608 bool allow_protocol_prefix)
610 BlockDriver *drv1;
611 char protocol[128];
612 int len;
613 const char *p;
615 /* TODO Drivers without bdrv_file_open must be specified explicitly */
618 * XXX(hch): we really should not let host device detection
619 * override an explicit protocol specification, but moving this
620 * later breaks access to device names with colons in them.
621 * Thanks to the brain-dead persistent naming schemes on udev-
622 * based Linux systems those actually are quite common.
624 drv1 = find_hdev_driver(filename);
625 if (drv1) {
626 return drv1;
629 if (!path_has_protocol(filename) || !allow_protocol_prefix) {
630 return bdrv_find_format("file");
633 p = strchr(filename, ':');
634 assert(p != NULL);
635 len = p - filename;
636 if (len > sizeof(protocol) - 1)
637 len = sizeof(protocol) - 1;
638 memcpy(protocol, filename, len);
639 protocol[len] = '\0';
640 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
641 if (drv1->protocol_name &&
642 !strcmp(drv1->protocol_name, protocol)) {
643 return drv1;
646 return NULL;
649 static int find_image_format(BlockDriverState *bs, const char *filename,
650 BlockDriver **pdrv, Error **errp)
652 int score, score_max;
653 BlockDriver *drv1, *drv;
654 uint8_t buf[2048];
655 int ret = 0;
657 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
658 if (bs->sg || !bdrv_is_inserted(bs) || bdrv_getlength(bs) == 0) {
659 drv = bdrv_find_format("raw");
660 if (!drv) {
661 error_setg(errp, "Could not find raw image format");
662 ret = -ENOENT;
664 *pdrv = drv;
665 return ret;
668 ret = bdrv_pread(bs, 0, buf, sizeof(buf));
669 if (ret < 0) {
670 error_setg_errno(errp, -ret, "Could not read image for determining its "
671 "format");
672 *pdrv = NULL;
673 return ret;
676 score_max = 0;
677 drv = NULL;
678 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
679 if (drv1->bdrv_probe) {
680 score = drv1->bdrv_probe(buf, ret, filename);
681 if (score > score_max) {
682 score_max = score;
683 drv = drv1;
687 if (!drv) {
688 error_setg(errp, "Could not determine image format: No compatible "
689 "driver found");
690 ret = -ENOENT;
692 *pdrv = drv;
693 return ret;
697 * Set the current 'total_sectors' value
699 static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
701 BlockDriver *drv = bs->drv;
703 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
704 if (bs->sg)
705 return 0;
707 /* query actual device if possible, otherwise just trust the hint */
708 if (drv->bdrv_getlength) {
709 int64_t length = drv->bdrv_getlength(bs);
710 if (length < 0) {
711 return length;
713 hint = DIV_ROUND_UP(length, BDRV_SECTOR_SIZE);
716 bs->total_sectors = hint;
717 return 0;
721 * Set open flags for a given discard mode
723 * Return 0 on success, -1 if the discard mode was invalid.
725 int bdrv_parse_discard_flags(const char *mode, int *flags)
727 *flags &= ~BDRV_O_UNMAP;
729 if (!strcmp(mode, "off") || !strcmp(mode, "ignore")) {
730 /* do nothing */
731 } else if (!strcmp(mode, "on") || !strcmp(mode, "unmap")) {
732 *flags |= BDRV_O_UNMAP;
733 } else {
734 return -1;
737 return 0;
741 * Set open flags for a given cache mode
743 * Return 0 on success, -1 if the cache mode was invalid.
745 int bdrv_parse_cache_flags(const char *mode, int *flags)
747 *flags &= ~BDRV_O_CACHE_MASK;
749 if (!strcmp(mode, "off") || !strcmp(mode, "none")) {
750 *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB;
751 } else if (!strcmp(mode, "directsync")) {
752 *flags |= BDRV_O_NOCACHE;
753 } else if (!strcmp(mode, "writeback")) {
754 *flags |= BDRV_O_CACHE_WB;
755 } else if (!strcmp(mode, "unsafe")) {
756 *flags |= BDRV_O_CACHE_WB;
757 *flags |= BDRV_O_NO_FLUSH;
758 } else if (!strcmp(mode, "writethrough")) {
759 /* this is the default */
760 } else {
761 return -1;
764 return 0;
768 * The copy-on-read flag is actually a reference count so multiple users may
769 * use the feature without worrying about clobbering its previous state.
770 * Copy-on-read stays enabled until all users have called to disable it.
772 void bdrv_enable_copy_on_read(BlockDriverState *bs)
774 bs->copy_on_read++;
777 void bdrv_disable_copy_on_read(BlockDriverState *bs)
779 assert(bs->copy_on_read > 0);
780 bs->copy_on_read--;
784 * Returns the flags that a temporary snapshot should get, based on the
785 * originally requested flags (the originally requested image will have flags
786 * like a backing file)
788 static int bdrv_temp_snapshot_flags(int flags)
790 return (flags & ~BDRV_O_SNAPSHOT) | BDRV_O_TEMPORARY;
794 * Returns the flags that bs->file should get, based on the given flags for
795 * the parent BDS
797 static int bdrv_inherited_flags(int flags)
799 /* Enable protocol handling, disable format probing for bs->file */
800 flags |= BDRV_O_PROTOCOL;
802 /* Our block drivers take care to send flushes and respect unmap policy,
803 * so we can enable both unconditionally on lower layers. */
804 flags |= BDRV_O_CACHE_WB | BDRV_O_UNMAP;
806 /* Clear flags that only apply to the top layer */
807 flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_COPY_ON_READ);
809 return flags;
813 * Returns the flags that bs->backing_hd should get, based on the given flags
814 * for the parent BDS
816 static int bdrv_backing_flags(int flags)
818 /* backing files always opened read-only */
819 flags &= ~(BDRV_O_RDWR | BDRV_O_COPY_ON_READ);
821 /* snapshot=on is handled on the top layer */
822 flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_TEMPORARY);
824 return flags;
827 static int bdrv_open_flags(BlockDriverState *bs, int flags)
829 int open_flags = flags | BDRV_O_CACHE_WB;
832 * Clear flags that are internal to the block layer before opening the
833 * image.
835 open_flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
838 * Snapshots should be writable.
840 if (flags & BDRV_O_TEMPORARY) {
841 open_flags |= BDRV_O_RDWR;
844 return open_flags;
847 static void bdrv_assign_node_name(BlockDriverState *bs,
848 const char *node_name,
849 Error **errp)
851 if (!node_name) {
852 return;
855 /* empty string node name is invalid */
856 if (node_name[0] == '\0') {
857 error_setg(errp, "Empty node name");
858 return;
861 /* takes care of avoiding namespaces collisions */
862 if (bdrv_find(node_name)) {
863 error_setg(errp, "node-name=%s is conflicting with a device id",
864 node_name);
865 return;
868 /* takes care of avoiding duplicates node names */
869 if (bdrv_find_node(node_name)) {
870 error_setg(errp, "Duplicate node name");
871 return;
874 /* copy node name into the bs and insert it into the graph list */
875 pstrcpy(bs->node_name, sizeof(bs->node_name), node_name);
876 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs, node_list);
880 * Common part for opening disk images and files
882 * Removes all processed options from *options.
884 static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file,
885 QDict *options, int flags, BlockDriver *drv, Error **errp)
887 int ret, open_flags;
888 const char *filename;
889 const char *node_name = NULL;
890 Error *local_err = NULL;
892 assert(drv != NULL);
893 assert(bs->file == NULL);
894 assert(options != NULL && bs->options != options);
896 if (file != NULL) {
897 filename = file->filename;
898 } else {
899 filename = qdict_get_try_str(options, "filename");
902 if (drv->bdrv_needs_filename && !filename) {
903 error_setg(errp, "The '%s' block driver requires a file name",
904 drv->format_name);
905 return -EINVAL;
908 trace_bdrv_open_common(bs, filename ?: "", flags, drv->format_name);
910 node_name = qdict_get_try_str(options, "node-name");
911 bdrv_assign_node_name(bs, node_name, &local_err);
912 if (local_err) {
913 error_propagate(errp, local_err);
914 return -EINVAL;
916 qdict_del(options, "node-name");
918 /* bdrv_open() with directly using a protocol as drv. This layer is already
919 * opened, so assign it to bs (while file becomes a closed BlockDriverState)
920 * and return immediately. */
921 if (file != NULL && drv->bdrv_file_open) {
922 bdrv_swap(file, bs);
923 return 0;
926 bs->open_flags = flags;
927 bs->guest_block_size = 512;
928 bs->request_alignment = 512;
929 bs->zero_beyond_eof = true;
930 open_flags = bdrv_open_flags(bs, flags);
931 bs->read_only = !(open_flags & BDRV_O_RDWR);
933 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, bs->read_only)) {
934 error_setg(errp,
935 !bs->read_only && bdrv_is_whitelisted(drv, true)
936 ? "Driver '%s' can only be used for read-only devices"
937 : "Driver '%s' is not whitelisted",
938 drv->format_name);
939 return -ENOTSUP;
942 assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */
943 if (flags & BDRV_O_COPY_ON_READ) {
944 if (!bs->read_only) {
945 bdrv_enable_copy_on_read(bs);
946 } else {
947 error_setg(errp, "Can't use copy-on-read on read-only device");
948 return -EINVAL;
952 if (filename != NULL) {
953 pstrcpy(bs->filename, sizeof(bs->filename), filename);
954 } else {
955 bs->filename[0] = '\0';
958 bs->drv = drv;
959 bs->opaque = g_malloc0(drv->instance_size);
961 bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB);
963 /* Open the image, either directly or using a protocol */
964 if (drv->bdrv_file_open) {
965 assert(file == NULL);
966 assert(!drv->bdrv_needs_filename || filename != NULL);
967 ret = drv->bdrv_file_open(bs, options, open_flags, &local_err);
968 } else {
969 if (file == NULL) {
970 error_setg(errp, "Can't use '%s' as a block driver for the "
971 "protocol level", drv->format_name);
972 ret = -EINVAL;
973 goto free_and_fail;
975 bs->file = file;
976 ret = drv->bdrv_open(bs, options, open_flags, &local_err);
979 if (ret < 0) {
980 if (local_err) {
981 error_propagate(errp, local_err);
982 } else if (bs->filename[0]) {
983 error_setg_errno(errp, -ret, "Could not open '%s'", bs->filename);
984 } else {
985 error_setg_errno(errp, -ret, "Could not open image");
987 goto free_and_fail;
990 ret = refresh_total_sectors(bs, bs->total_sectors);
991 if (ret < 0) {
992 error_setg_errno(errp, -ret, "Could not refresh total sector count");
993 goto free_and_fail;
996 bdrv_refresh_limits(bs);
997 assert(bdrv_opt_mem_align(bs) != 0);
998 assert((bs->request_alignment != 0) || bs->sg);
999 return 0;
1001 free_and_fail:
1002 bs->file = NULL;
1003 g_free(bs->opaque);
1004 bs->opaque = NULL;
1005 bs->drv = NULL;
1006 return ret;
1010 * Opens a file using a protocol (file, host_device, nbd, ...)
1012 * options is an indirect pointer to a QDict of options to pass to the block
1013 * drivers, or pointer to NULL for an empty set of options. If this function
1014 * takes ownership of the QDict reference, it will set *options to NULL;
1015 * otherwise, it will contain unused/unrecognized options after this function
1016 * returns. Then, the caller is responsible for freeing it. If it intends to
1017 * reuse the QDict, QINCREF() should be called beforehand.
1019 static int bdrv_file_open(BlockDriverState *bs, const char *filename,
1020 QDict **options, int flags, Error **errp)
1022 BlockDriver *drv;
1023 const char *drvname;
1024 bool parse_filename = false;
1025 Error *local_err = NULL;
1026 int ret;
1028 /* Fetch the file name from the options QDict if necessary */
1029 if (!filename) {
1030 filename = qdict_get_try_str(*options, "filename");
1031 } else if (filename && !qdict_haskey(*options, "filename")) {
1032 qdict_put(*options, "filename", qstring_from_str(filename));
1033 parse_filename = true;
1034 } else {
1035 error_setg(errp, "Can't specify 'file' and 'filename' options at the "
1036 "same time");
1037 ret = -EINVAL;
1038 goto fail;
1041 /* Find the right block driver */
1042 drvname = qdict_get_try_str(*options, "driver");
1043 if (drvname) {
1044 drv = bdrv_find_format(drvname);
1045 if (!drv) {
1046 error_setg(errp, "Unknown driver '%s'", drvname);
1048 qdict_del(*options, "driver");
1049 } else if (filename) {
1050 drv = bdrv_find_protocol(filename, parse_filename);
1051 if (!drv) {
1052 error_setg(errp, "Unknown protocol");
1054 } else {
1055 error_setg(errp, "Must specify either driver or file");
1056 drv = NULL;
1059 if (!drv) {
1060 /* errp has been set already */
1061 ret = -ENOENT;
1062 goto fail;
1065 /* Parse the filename and open it */
1066 if (drv->bdrv_parse_filename && parse_filename) {
1067 drv->bdrv_parse_filename(filename, *options, &local_err);
1068 if (local_err) {
1069 error_propagate(errp, local_err);
1070 ret = -EINVAL;
1071 goto fail;
1074 if (!drv->bdrv_needs_filename) {
1075 qdict_del(*options, "filename");
1076 } else {
1077 filename = qdict_get_str(*options, "filename");
1081 if (!drv->bdrv_file_open) {
1082 ret = bdrv_open(&bs, filename, NULL, *options, flags, drv, &local_err);
1083 *options = NULL;
1084 } else {
1085 ret = bdrv_open_common(bs, NULL, *options, flags, drv, &local_err);
1087 if (ret < 0) {
1088 error_propagate(errp, local_err);
1089 goto fail;
1092 bs->growable = 1;
1093 return 0;
1095 fail:
1096 return ret;
1099 void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd)
1102 if (bs->backing_hd) {
1103 assert(bs->backing_blocker);
1104 bdrv_op_unblock_all(bs->backing_hd, bs->backing_blocker);
1105 } else if (backing_hd) {
1106 error_setg(&bs->backing_blocker,
1107 "device is used as backing hd of '%s'",
1108 bs->device_name);
1111 bs->backing_hd = backing_hd;
1112 if (!backing_hd) {
1113 error_free(bs->backing_blocker);
1114 bs->backing_blocker = NULL;
1115 goto out;
1117 bs->open_flags &= ~BDRV_O_NO_BACKING;
1118 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_hd->filename);
1119 pstrcpy(bs->backing_format, sizeof(bs->backing_format),
1120 backing_hd->drv ? backing_hd->drv->format_name : "");
1122 bdrv_op_block_all(bs->backing_hd, bs->backing_blocker);
1123 /* Otherwise we won't be able to commit due to check in bdrv_commit */
1124 bdrv_op_unblock(bs->backing_hd, BLOCK_OP_TYPE_COMMIT,
1125 bs->backing_blocker);
1126 out:
1127 bdrv_refresh_limits(bs);
1131 * Opens the backing file for a BlockDriverState if not yet open
1133 * options is a QDict of options to pass to the block drivers, or NULL for an
1134 * empty set of options. The reference to the QDict is transferred to this
1135 * function (even on failure), so if the caller intends to reuse the dictionary,
1136 * it needs to use QINCREF() before calling bdrv_file_open.
1138 int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp)
1140 char *backing_filename = g_malloc0(PATH_MAX);
1141 int ret = 0;
1142 BlockDriver *back_drv = NULL;
1143 BlockDriverState *backing_hd;
1144 Error *local_err = NULL;
1146 if (bs->backing_hd != NULL) {
1147 QDECREF(options);
1148 goto free_exit;
1151 /* NULL means an empty set of options */
1152 if (options == NULL) {
1153 options = qdict_new();
1156 bs->open_flags &= ~BDRV_O_NO_BACKING;
1157 if (qdict_haskey(options, "file.filename")) {
1158 backing_filename[0] = '\0';
1159 } else if (bs->backing_file[0] == '\0' && qdict_size(options) == 0) {
1160 QDECREF(options);
1161 goto free_exit;
1162 } else {
1163 bdrv_get_full_backing_filename(bs, backing_filename, PATH_MAX);
1166 backing_hd = bdrv_new("", errp);
1168 if (bs->backing_format[0] != '\0') {
1169 back_drv = bdrv_find_format(bs->backing_format);
1172 assert(bs->backing_hd == NULL);
1173 ret = bdrv_open(&backing_hd,
1174 *backing_filename ? backing_filename : NULL, NULL, options,
1175 bdrv_backing_flags(bs->open_flags), back_drv, &local_err);
1176 if (ret < 0) {
1177 bdrv_unref(backing_hd);
1178 backing_hd = NULL;
1179 bs->open_flags |= BDRV_O_NO_BACKING;
1180 error_setg(errp, "Could not open backing file: %s",
1181 error_get_pretty(local_err));
1182 error_free(local_err);
1183 goto free_exit;
1185 bdrv_set_backing_hd(bs, backing_hd);
1187 free_exit:
1188 g_free(backing_filename);
1189 return ret;
1193 * Opens a disk image whose options are given as BlockdevRef in another block
1194 * device's options.
1196 * If allow_none is true, no image will be opened if filename is false and no
1197 * BlockdevRef is given. *pbs will remain unchanged and 0 will be returned.
1199 * bdrev_key specifies the key for the image's BlockdevRef in the options QDict.
1200 * That QDict has to be flattened; therefore, if the BlockdevRef is a QDict
1201 * itself, all options starting with "${bdref_key}." are considered part of the
1202 * BlockdevRef.
1204 * The BlockdevRef will be removed from the options QDict.
1206 * To conform with the behavior of bdrv_open(), *pbs has to be NULL.
1208 int bdrv_open_image(BlockDriverState **pbs, const char *filename,
1209 QDict *options, const char *bdref_key, int flags,
1210 bool allow_none, Error **errp)
1212 QDict *image_options;
1213 int ret;
1214 char *bdref_key_dot;
1215 const char *reference;
1217 assert(pbs);
1218 assert(*pbs == NULL);
1220 bdref_key_dot = g_strdup_printf("%s.", bdref_key);
1221 qdict_extract_subqdict(options, &image_options, bdref_key_dot);
1222 g_free(bdref_key_dot);
1224 reference = qdict_get_try_str(options, bdref_key);
1225 if (!filename && !reference && !qdict_size(image_options)) {
1226 if (allow_none) {
1227 ret = 0;
1228 } else {
1229 error_setg(errp, "A block device must be specified for \"%s\"",
1230 bdref_key);
1231 ret = -EINVAL;
1233 QDECREF(image_options);
1234 goto done;
1237 ret = bdrv_open(pbs, filename, reference, image_options, flags, NULL, errp);
1239 done:
1240 qdict_del(options, bdref_key);
1241 return ret;
1244 void bdrv_append_temp_snapshot(BlockDriverState *bs, int flags, Error **errp)
1246 /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
1247 char *tmp_filename = g_malloc0(PATH_MAX + 1);
1248 int64_t total_size;
1249 BlockDriver *bdrv_qcow2;
1250 QemuOpts *opts = NULL;
1251 QDict *snapshot_options;
1252 BlockDriverState *bs_snapshot;
1253 Error *local_err;
1254 int ret;
1256 /* if snapshot, we create a temporary backing file and open it
1257 instead of opening 'filename' directly */
1259 /* Get the required size from the image */
1260 total_size = bdrv_getlength(bs);
1261 if (total_size < 0) {
1262 error_setg_errno(errp, -total_size, "Could not get image size");
1263 goto out;
1265 total_size &= BDRV_SECTOR_MASK;
1267 /* Create the temporary image */
1268 ret = get_tmp_filename(tmp_filename, PATH_MAX + 1);
1269 if (ret < 0) {
1270 error_setg_errno(errp, -ret, "Could not get temporary filename");
1271 goto out;
1274 bdrv_qcow2 = bdrv_find_format("qcow2");
1275 opts = qemu_opts_create(bdrv_qcow2->create_opts, NULL, 0,
1276 &error_abort);
1277 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, total_size);
1278 ret = bdrv_create(bdrv_qcow2, tmp_filename, opts, &local_err);
1279 qemu_opts_del(opts);
1280 if (ret < 0) {
1281 error_setg_errno(errp, -ret, "Could not create temporary overlay "
1282 "'%s': %s", tmp_filename,
1283 error_get_pretty(local_err));
1284 error_free(local_err);
1285 goto out;
1288 /* Prepare a new options QDict for the temporary file */
1289 snapshot_options = qdict_new();
1290 qdict_put(snapshot_options, "file.driver",
1291 qstring_from_str("file"));
1292 qdict_put(snapshot_options, "file.filename",
1293 qstring_from_str(tmp_filename));
1295 bs_snapshot = bdrv_new("", &error_abort);
1297 ret = bdrv_open(&bs_snapshot, NULL, NULL, snapshot_options,
1298 flags, bdrv_qcow2, &local_err);
1299 if (ret < 0) {
1300 error_propagate(errp, local_err);
1301 goto out;
1304 bdrv_append(bs_snapshot, bs);
1306 out:
1307 g_free(tmp_filename);
1310 static QDict *parse_json_filename(const char *filename, Error **errp)
1312 QObject *options_obj;
1313 QDict *options;
1314 int ret;
1316 ret = strstart(filename, "json:", &filename);
1317 assert(ret);
1319 options_obj = qobject_from_json(filename);
1320 if (!options_obj) {
1321 error_setg(errp, "Could not parse the JSON options");
1322 return NULL;
1325 if (qobject_type(options_obj) != QTYPE_QDICT) {
1326 qobject_decref(options_obj);
1327 error_setg(errp, "Invalid JSON object given");
1328 return NULL;
1331 options = qobject_to_qdict(options_obj);
1332 qdict_flatten(options);
1334 return options;
1338 * Opens a disk image (raw, qcow2, vmdk, ...)
1340 * options is a QDict of options to pass to the block drivers, or NULL for an
1341 * empty set of options. The reference to the QDict belongs to the block layer
1342 * after the call (even on failure), so if the caller intends to reuse the
1343 * dictionary, it needs to use QINCREF() before calling bdrv_open.
1345 * If *pbs is NULL, a new BDS will be created with a pointer to it stored there.
1346 * If it is not NULL, the referenced BDS will be reused.
1348 * The reference parameter may be used to specify an existing block device which
1349 * should be opened. If specified, neither options nor a filename may be given,
1350 * nor can an existing BDS be reused (that is, *pbs has to be NULL).
1352 int bdrv_open(BlockDriverState **pbs, const char *filename,
1353 const char *reference, QDict *options, int flags,
1354 BlockDriver *drv, Error **errp)
1356 int ret;
1357 BlockDriverState *file = NULL, *bs;
1358 const char *drvname;
1359 Error *local_err = NULL;
1360 int snapshot_flags = 0;
1362 assert(pbs);
1364 if (reference) {
1365 bool options_non_empty = options ? qdict_size(options) : false;
1366 QDECREF(options);
1368 if (*pbs) {
1369 error_setg(errp, "Cannot reuse an existing BDS when referencing "
1370 "another block device");
1371 return -EINVAL;
1374 if (filename || options_non_empty) {
1375 error_setg(errp, "Cannot reference an existing block device with "
1376 "additional options or a new filename");
1377 return -EINVAL;
1380 bs = bdrv_lookup_bs(reference, reference, errp);
1381 if (!bs) {
1382 return -ENODEV;
1384 bdrv_ref(bs);
1385 *pbs = bs;
1386 return 0;
1389 if (*pbs) {
1390 bs = *pbs;
1391 } else {
1392 bs = bdrv_new("", &error_abort);
1395 /* NULL means an empty set of options */
1396 if (options == NULL) {
1397 options = qdict_new();
1400 if (filename && g_str_has_prefix(filename, "json:")) {
1401 QDict *json_options = parse_json_filename(filename, &local_err);
1402 if (local_err) {
1403 ret = -EINVAL;
1404 goto fail;
1407 /* Options given in the filename have lower priority than options
1408 * specified directly */
1409 qdict_join(options, json_options, false);
1410 QDECREF(json_options);
1411 filename = NULL;
1414 bs->options = options;
1415 options = qdict_clone_shallow(options);
1417 if (flags & BDRV_O_PROTOCOL) {
1418 assert(!drv);
1419 ret = bdrv_file_open(bs, filename, &options, flags & ~BDRV_O_PROTOCOL,
1420 &local_err);
1421 if (!ret) {
1422 drv = bs->drv;
1423 goto done;
1424 } else if (bs->drv) {
1425 goto close_and_fail;
1426 } else {
1427 goto fail;
1431 /* Open image file without format layer */
1432 if (flags & BDRV_O_RDWR) {
1433 flags |= BDRV_O_ALLOW_RDWR;
1435 if (flags & BDRV_O_SNAPSHOT) {
1436 snapshot_flags = bdrv_temp_snapshot_flags(flags);
1437 flags = bdrv_backing_flags(flags);
1440 assert(file == NULL);
1441 ret = bdrv_open_image(&file, filename, options, "file",
1442 bdrv_inherited_flags(flags),
1443 true, &local_err);
1444 if (ret < 0) {
1445 goto fail;
1448 /* Find the right image format driver */
1449 drvname = qdict_get_try_str(options, "driver");
1450 if (drvname) {
1451 drv = bdrv_find_format(drvname);
1452 qdict_del(options, "driver");
1453 if (!drv) {
1454 error_setg(errp, "Invalid driver: '%s'", drvname);
1455 ret = -EINVAL;
1456 goto fail;
1460 if (!drv) {
1461 if (file) {
1462 ret = find_image_format(file, filename, &drv, &local_err);
1463 } else {
1464 error_setg(errp, "Must specify either driver or file");
1465 ret = -EINVAL;
1466 goto fail;
1470 if (!drv) {
1471 goto fail;
1474 /* Open the image */
1475 ret = bdrv_open_common(bs, file, options, flags, drv, &local_err);
1476 if (ret < 0) {
1477 goto fail;
1480 if (file && (bs->file != file)) {
1481 bdrv_unref(file);
1482 file = NULL;
1485 /* If there is a backing file, use it */
1486 if ((flags & BDRV_O_NO_BACKING) == 0) {
1487 QDict *backing_options;
1489 qdict_extract_subqdict(options, &backing_options, "backing.");
1490 ret = bdrv_open_backing_file(bs, backing_options, &local_err);
1491 if (ret < 0) {
1492 goto close_and_fail;
1496 /* For snapshot=on, create a temporary qcow2 overlay. bs points to the
1497 * temporary snapshot afterwards. */
1498 if (snapshot_flags) {
1499 bdrv_append_temp_snapshot(bs, snapshot_flags, &local_err);
1500 if (local_err) {
1501 error_propagate(errp, local_err);
1502 goto close_and_fail;
1507 done:
1508 /* Check if any unknown options were used */
1509 if (options && (qdict_size(options) != 0)) {
1510 const QDictEntry *entry = qdict_first(options);
1511 if (flags & BDRV_O_PROTOCOL) {
1512 error_setg(errp, "Block protocol '%s' doesn't support the option "
1513 "'%s'", drv->format_name, entry->key);
1514 } else {
1515 error_setg(errp, "Block format '%s' used by device '%s' doesn't "
1516 "support the option '%s'", drv->format_name,
1517 bs->device_name, entry->key);
1520 ret = -EINVAL;
1521 goto close_and_fail;
1524 if (!bdrv_key_required(bs)) {
1525 bdrv_dev_change_media_cb(bs, true);
1526 } else if (!runstate_check(RUN_STATE_PRELAUNCH)
1527 && !runstate_check(RUN_STATE_INMIGRATE)
1528 && !runstate_check(RUN_STATE_PAUSED)) { /* HACK */
1529 error_setg(errp,
1530 "Guest must be stopped for opening of encrypted image");
1531 ret = -EBUSY;
1532 goto close_and_fail;
1535 QDECREF(options);
1536 *pbs = bs;
1537 return 0;
1539 fail:
1540 if (file != NULL) {
1541 bdrv_unref(file);
1543 QDECREF(bs->options);
1544 QDECREF(options);
1545 bs->options = NULL;
1546 if (!*pbs) {
1547 /* If *pbs is NULL, a new BDS has been created in this function and
1548 needs to be freed now. Otherwise, it does not need to be closed,
1549 since it has not really been opened yet. */
1550 bdrv_unref(bs);
1552 if (local_err) {
1553 error_propagate(errp, local_err);
1555 return ret;
1557 close_and_fail:
1558 /* See fail path, but now the BDS has to be always closed */
1559 if (*pbs) {
1560 bdrv_close(bs);
1561 } else {
1562 bdrv_unref(bs);
1564 QDECREF(options);
1565 if (local_err) {
1566 error_propagate(errp, local_err);
1568 return ret;
1571 typedef struct BlockReopenQueueEntry {
1572 bool prepared;
1573 BDRVReopenState state;
1574 QSIMPLEQ_ENTRY(BlockReopenQueueEntry) entry;
1575 } BlockReopenQueueEntry;
1578 * Adds a BlockDriverState to a simple queue for an atomic, transactional
1579 * reopen of multiple devices.
1581 * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT
1582 * already performed, or alternatively may be NULL a new BlockReopenQueue will
1583 * be created and initialized. This newly created BlockReopenQueue should be
1584 * passed back in for subsequent calls that are intended to be of the same
1585 * atomic 'set'.
1587 * bs is the BlockDriverState to add to the reopen queue.
1589 * flags contains the open flags for the associated bs
1591 * returns a pointer to bs_queue, which is either the newly allocated
1592 * bs_queue, or the existing bs_queue being used.
1595 BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
1596 BlockDriverState *bs, int flags)
1598 assert(bs != NULL);
1600 BlockReopenQueueEntry *bs_entry;
1601 if (bs_queue == NULL) {
1602 bs_queue = g_new0(BlockReopenQueue, 1);
1603 QSIMPLEQ_INIT(bs_queue);
1606 /* bdrv_open() masks this flag out */
1607 flags &= ~BDRV_O_PROTOCOL;
1609 if (bs->file) {
1610 bdrv_reopen_queue(bs_queue, bs->file, bdrv_inherited_flags(flags));
1613 bs_entry = g_new0(BlockReopenQueueEntry, 1);
1614 QSIMPLEQ_INSERT_TAIL(bs_queue, bs_entry, entry);
1616 bs_entry->state.bs = bs;
1617 bs_entry->state.flags = flags;
1619 return bs_queue;
1623 * Reopen multiple BlockDriverStates atomically & transactionally.
1625 * The queue passed in (bs_queue) must have been built up previous
1626 * via bdrv_reopen_queue().
1628 * Reopens all BDS specified in the queue, with the appropriate
1629 * flags. All devices are prepared for reopen, and failure of any
1630 * device will cause all device changes to be abandonded, and intermediate
1631 * data cleaned up.
1633 * If all devices prepare successfully, then the changes are committed
1634 * to all devices.
1637 int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
1639 int ret = -1;
1640 BlockReopenQueueEntry *bs_entry, *next;
1641 Error *local_err = NULL;
1643 assert(bs_queue != NULL);
1645 bdrv_drain_all();
1647 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1648 if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) {
1649 error_propagate(errp, local_err);
1650 goto cleanup;
1652 bs_entry->prepared = true;
1655 /* If we reach this point, we have success and just need to apply the
1656 * changes
1658 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1659 bdrv_reopen_commit(&bs_entry->state);
1662 ret = 0;
1664 cleanup:
1665 QSIMPLEQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) {
1666 if (ret && bs_entry->prepared) {
1667 bdrv_reopen_abort(&bs_entry->state);
1669 g_free(bs_entry);
1671 g_free(bs_queue);
1672 return ret;
1676 /* Reopen a single BlockDriverState with the specified flags. */
1677 int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp)
1679 int ret = -1;
1680 Error *local_err = NULL;
1681 BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, bdrv_flags);
1683 ret = bdrv_reopen_multiple(queue, &local_err);
1684 if (local_err != NULL) {
1685 error_propagate(errp, local_err);
1687 return ret;
1692 * Prepares a BlockDriverState for reopen. All changes are staged in the
1693 * 'opaque' field of the BDRVReopenState, which is used and allocated by
1694 * the block driver layer .bdrv_reopen_prepare()
1696 * bs is the BlockDriverState to reopen
1697 * flags are the new open flags
1698 * queue is the reopen queue
1700 * Returns 0 on success, non-zero on error. On error errp will be set
1701 * as well.
1703 * On failure, bdrv_reopen_abort() will be called to clean up any data.
1704 * It is the responsibility of the caller to then call the abort() or
1705 * commit() for any other BDS that have been left in a prepare() state
1708 int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue,
1709 Error **errp)
1711 int ret = -1;
1712 Error *local_err = NULL;
1713 BlockDriver *drv;
1715 assert(reopen_state != NULL);
1716 assert(reopen_state->bs->drv != NULL);
1717 drv = reopen_state->bs->drv;
1719 /* if we are to stay read-only, do not allow permission change
1720 * to r/w */
1721 if (!(reopen_state->bs->open_flags & BDRV_O_ALLOW_RDWR) &&
1722 reopen_state->flags & BDRV_O_RDWR) {
1723 error_set(errp, QERR_DEVICE_IS_READ_ONLY,
1724 reopen_state->bs->device_name);
1725 goto error;
1729 ret = bdrv_flush(reopen_state->bs);
1730 if (ret) {
1731 error_set(errp, ERROR_CLASS_GENERIC_ERROR, "Error (%s) flushing drive",
1732 strerror(-ret));
1733 goto error;
1736 if (drv->bdrv_reopen_prepare) {
1737 ret = drv->bdrv_reopen_prepare(reopen_state, queue, &local_err);
1738 if (ret) {
1739 if (local_err != NULL) {
1740 error_propagate(errp, local_err);
1741 } else {
1742 error_setg(errp, "failed while preparing to reopen image '%s'",
1743 reopen_state->bs->filename);
1745 goto error;
1747 } else {
1748 /* It is currently mandatory to have a bdrv_reopen_prepare()
1749 * handler for each supported drv. */
1750 error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
1751 drv->format_name, reopen_state->bs->device_name,
1752 "reopening of file");
1753 ret = -1;
1754 goto error;
1757 ret = 0;
1759 error:
1760 return ret;
1764 * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and
1765 * makes them final by swapping the staging BlockDriverState contents into
1766 * the active BlockDriverState contents.
1768 void bdrv_reopen_commit(BDRVReopenState *reopen_state)
1770 BlockDriver *drv;
1772 assert(reopen_state != NULL);
1773 drv = reopen_state->bs->drv;
1774 assert(drv != NULL);
1776 /* If there are any driver level actions to take */
1777 if (drv->bdrv_reopen_commit) {
1778 drv->bdrv_reopen_commit(reopen_state);
1781 /* set BDS specific flags now */
1782 reopen_state->bs->open_flags = reopen_state->flags;
1783 reopen_state->bs->enable_write_cache = !!(reopen_state->flags &
1784 BDRV_O_CACHE_WB);
1785 reopen_state->bs->read_only = !(reopen_state->flags & BDRV_O_RDWR);
1787 bdrv_refresh_limits(reopen_state->bs);
1791 * Abort the reopen, and delete and free the staged changes in
1792 * reopen_state
1794 void bdrv_reopen_abort(BDRVReopenState *reopen_state)
1796 BlockDriver *drv;
1798 assert(reopen_state != NULL);
1799 drv = reopen_state->bs->drv;
1800 assert(drv != NULL);
1802 if (drv->bdrv_reopen_abort) {
1803 drv->bdrv_reopen_abort(reopen_state);
1808 void bdrv_close(BlockDriverState *bs)
1810 if (bs->job) {
1811 block_job_cancel_sync(bs->job);
1813 bdrv_drain_all(); /* complete I/O */
1814 bdrv_flush(bs);
1815 bdrv_drain_all(); /* in case flush left pending I/O */
1816 notifier_list_notify(&bs->close_notifiers, bs);
1818 if (bs->drv) {
1819 if (bs->backing_hd) {
1820 BlockDriverState *backing_hd = bs->backing_hd;
1821 bdrv_set_backing_hd(bs, NULL);
1822 bdrv_unref(backing_hd);
1824 bs->drv->bdrv_close(bs);
1825 g_free(bs->opaque);
1826 bs->opaque = NULL;
1827 bs->drv = NULL;
1828 bs->copy_on_read = 0;
1829 bs->backing_file[0] = '\0';
1830 bs->backing_format[0] = '\0';
1831 bs->total_sectors = 0;
1832 bs->encrypted = 0;
1833 bs->valid_key = 0;
1834 bs->sg = 0;
1835 bs->growable = 0;
1836 bs->zero_beyond_eof = false;
1837 QDECREF(bs->options);
1838 bs->options = NULL;
1840 if (bs->file != NULL) {
1841 bdrv_unref(bs->file);
1842 bs->file = NULL;
1846 bdrv_dev_change_media_cb(bs, false);
1848 /*throttling disk I/O limits*/
1849 if (bs->io_limits_enabled) {
1850 bdrv_io_limits_disable(bs);
1854 void bdrv_close_all(void)
1856 BlockDriverState *bs;
1858 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
1859 AioContext *aio_context = bdrv_get_aio_context(bs);
1861 aio_context_acquire(aio_context);
1862 bdrv_close(bs);
1863 aio_context_release(aio_context);
1867 /* Check if any requests are in-flight (including throttled requests) */
1868 static bool bdrv_requests_pending(BlockDriverState *bs)
1870 if (!QLIST_EMPTY(&bs->tracked_requests)) {
1871 return true;
1873 if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) {
1874 return true;
1876 if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) {
1877 return true;
1879 if (bs->file && bdrv_requests_pending(bs->file)) {
1880 return true;
1882 if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) {
1883 return true;
1885 return false;
1889 * Wait for pending requests to complete across all BlockDriverStates
1891 * This function does not flush data to disk, use bdrv_flush_all() for that
1892 * after calling this function.
1894 * Note that completion of an asynchronous I/O operation can trigger any
1895 * number of other I/O operations on other devices---for example a coroutine
1896 * can be arbitrarily complex and a constant flow of I/O can come until the
1897 * coroutine is complete. Because of this, it is not possible to have a
1898 * function to drain a single device's I/O queue.
1900 void bdrv_drain_all(void)
1902 /* Always run first iteration so any pending completion BHs run */
1903 bool busy = true;
1904 BlockDriverState *bs;
1906 while (busy) {
1907 busy = false;
1909 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
1910 AioContext *aio_context = bdrv_get_aio_context(bs);
1911 bool bs_busy;
1913 aio_context_acquire(aio_context);
1914 bdrv_start_throttled_reqs(bs);
1915 bs_busy = bdrv_requests_pending(bs);
1916 bs_busy |= aio_poll(aio_context, bs_busy);
1917 aio_context_release(aio_context);
1919 busy |= bs_busy;
1924 /* make a BlockDriverState anonymous by removing from bdrv_state and
1925 * graph_bdrv_state list.
1926 Also, NULL terminate the device_name to prevent double remove */
1927 void bdrv_make_anon(BlockDriverState *bs)
1929 if (bs->device_name[0] != '\0') {
1930 QTAILQ_REMOVE(&bdrv_states, bs, device_list);
1932 bs->device_name[0] = '\0';
1933 if (bs->node_name[0] != '\0') {
1934 QTAILQ_REMOVE(&graph_bdrv_states, bs, node_list);
1936 bs->node_name[0] = '\0';
1939 static void bdrv_rebind(BlockDriverState *bs)
1941 if (bs->drv && bs->drv->bdrv_rebind) {
1942 bs->drv->bdrv_rebind(bs);
1946 static void bdrv_move_feature_fields(BlockDriverState *bs_dest,
1947 BlockDriverState *bs_src)
1949 /* move some fields that need to stay attached to the device */
1951 /* dev info */
1952 bs_dest->dev_ops = bs_src->dev_ops;
1953 bs_dest->dev_opaque = bs_src->dev_opaque;
1954 bs_dest->dev = bs_src->dev;
1955 bs_dest->guest_block_size = bs_src->guest_block_size;
1956 bs_dest->copy_on_read = bs_src->copy_on_read;
1958 bs_dest->enable_write_cache = bs_src->enable_write_cache;
1960 /* i/o throttled req */
1961 memcpy(&bs_dest->throttle_state,
1962 &bs_src->throttle_state,
1963 sizeof(ThrottleState));
1964 bs_dest->throttled_reqs[0] = bs_src->throttled_reqs[0];
1965 bs_dest->throttled_reqs[1] = bs_src->throttled_reqs[1];
1966 bs_dest->io_limits_enabled = bs_src->io_limits_enabled;
1968 /* r/w error */
1969 bs_dest->on_read_error = bs_src->on_read_error;
1970 bs_dest->on_write_error = bs_src->on_write_error;
1972 /* i/o status */
1973 bs_dest->iostatus_enabled = bs_src->iostatus_enabled;
1974 bs_dest->iostatus = bs_src->iostatus;
1976 /* dirty bitmap */
1977 bs_dest->dirty_bitmaps = bs_src->dirty_bitmaps;
1979 /* reference count */
1980 bs_dest->refcnt = bs_src->refcnt;
1982 /* job */
1983 bs_dest->job = bs_src->job;
1985 /* keep the same entry in bdrv_states */
1986 pstrcpy(bs_dest->device_name, sizeof(bs_dest->device_name),
1987 bs_src->device_name);
1988 bs_dest->device_list = bs_src->device_list;
1989 memcpy(bs_dest->op_blockers, bs_src->op_blockers,
1990 sizeof(bs_dest->op_blockers));
1994 * Swap bs contents for two image chains while they are live,
1995 * while keeping required fields on the BlockDriverState that is
1996 * actually attached to a device.
1998 * This will modify the BlockDriverState fields, and swap contents
1999 * between bs_new and bs_old. Both bs_new and bs_old are modified.
2001 * bs_new is required to be anonymous.
2003 * This function does not create any image files.
2005 void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old)
2007 BlockDriverState tmp;
2009 /* The code needs to swap the node_name but simply swapping node_list won't
2010 * work so first remove the nodes from the graph list, do the swap then
2011 * insert them back if needed.
2013 if (bs_new->node_name[0] != '\0') {
2014 QTAILQ_REMOVE(&graph_bdrv_states, bs_new, node_list);
2016 if (bs_old->node_name[0] != '\0') {
2017 QTAILQ_REMOVE(&graph_bdrv_states, bs_old, node_list);
2020 /* bs_new must be anonymous and shouldn't have anything fancy enabled */
2021 assert(bs_new->device_name[0] == '\0');
2022 assert(QLIST_EMPTY(&bs_new->dirty_bitmaps));
2023 assert(bs_new->job == NULL);
2024 assert(bs_new->dev == NULL);
2025 assert(bs_new->io_limits_enabled == false);
2026 assert(!throttle_have_timer(&bs_new->throttle_state));
2028 tmp = *bs_new;
2029 *bs_new = *bs_old;
2030 *bs_old = tmp;
2032 /* there are some fields that should not be swapped, move them back */
2033 bdrv_move_feature_fields(&tmp, bs_old);
2034 bdrv_move_feature_fields(bs_old, bs_new);
2035 bdrv_move_feature_fields(bs_new, &tmp);
2037 /* bs_new shouldn't be in bdrv_states even after the swap! */
2038 assert(bs_new->device_name[0] == '\0');
2040 /* Check a few fields that should remain attached to the device */
2041 assert(bs_new->dev == NULL);
2042 assert(bs_new->job == NULL);
2043 assert(bs_new->io_limits_enabled == false);
2044 assert(!throttle_have_timer(&bs_new->throttle_state));
2046 /* insert the nodes back into the graph node list if needed */
2047 if (bs_new->node_name[0] != '\0') {
2048 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_new, node_list);
2050 if (bs_old->node_name[0] != '\0') {
2051 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_old, node_list);
2054 bdrv_rebind(bs_new);
2055 bdrv_rebind(bs_old);
2059 * Add new bs contents at the top of an image chain while the chain is
2060 * live, while keeping required fields on the top layer.
2062 * This will modify the BlockDriverState fields, and swap contents
2063 * between bs_new and bs_top. Both bs_new and bs_top are modified.
2065 * bs_new is required to be anonymous.
2067 * This function does not create any image files.
2069 void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top)
2071 bdrv_swap(bs_new, bs_top);
2073 /* The contents of 'tmp' will become bs_top, as we are
2074 * swapping bs_new and bs_top contents. */
2075 bdrv_set_backing_hd(bs_top, bs_new);
2078 static void bdrv_delete(BlockDriverState *bs)
2080 assert(!bs->dev);
2081 assert(!bs->job);
2082 assert(bdrv_op_blocker_is_empty(bs));
2083 assert(!bs->refcnt);
2084 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
2086 bdrv_close(bs);
2088 /* remove from list, if necessary */
2089 bdrv_make_anon(bs);
2091 g_free(bs);
2094 int bdrv_attach_dev(BlockDriverState *bs, void *dev)
2095 /* TODO change to DeviceState *dev when all users are qdevified */
2097 if (bs->dev) {
2098 return -EBUSY;
2100 bs->dev = dev;
2101 bdrv_iostatus_reset(bs);
2102 return 0;
2105 /* TODO qdevified devices don't use this, remove when devices are qdevified */
2106 void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev)
2108 if (bdrv_attach_dev(bs, dev) < 0) {
2109 abort();
2113 void bdrv_detach_dev(BlockDriverState *bs, void *dev)
2114 /* TODO change to DeviceState *dev when all users are qdevified */
2116 assert(bs->dev == dev);
2117 bs->dev = NULL;
2118 bs->dev_ops = NULL;
2119 bs->dev_opaque = NULL;
2120 bs->guest_block_size = 512;
2123 /* TODO change to return DeviceState * when all users are qdevified */
2124 void *bdrv_get_attached_dev(BlockDriverState *bs)
2126 return bs->dev;
2129 void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops,
2130 void *opaque)
2132 bs->dev_ops = ops;
2133 bs->dev_opaque = opaque;
2136 void bdrv_emit_qmp_error_event(const BlockDriverState *bdrv,
2137 enum MonitorEvent ev,
2138 BlockErrorAction action, bool is_read)
2140 QObject *data;
2141 const char *action_str;
2143 switch (action) {
2144 case BLOCK_ERROR_ACTION_REPORT:
2145 action_str = "report";
2146 break;
2147 case BLOCK_ERROR_ACTION_IGNORE:
2148 action_str = "ignore";
2149 break;
2150 case BLOCK_ERROR_ACTION_STOP:
2151 action_str = "stop";
2152 break;
2153 default:
2154 abort();
2157 data = qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
2158 bdrv->device_name,
2159 action_str,
2160 is_read ? "read" : "write");
2161 monitor_protocol_event(ev, data);
2163 qobject_decref(data);
2166 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load)
2168 if (bs->dev_ops && bs->dev_ops->change_media_cb) {
2169 bool tray_was_closed = !bdrv_dev_is_tray_open(bs);
2170 bs->dev_ops->change_media_cb(bs->dev_opaque, load);
2171 if (tray_was_closed) {
2172 /* tray open */
2173 qapi_event_send_device_tray_moved(bdrv_get_device_name(bs),
2174 true, &error_abort);
2176 if (load) {
2177 /* tray close */
2178 qapi_event_send_device_tray_moved(bdrv_get_device_name(bs),
2179 false, &error_abort);
2184 bool bdrv_dev_has_removable_media(BlockDriverState *bs)
2186 return !bs->dev || (bs->dev_ops && bs->dev_ops->change_media_cb);
2189 void bdrv_dev_eject_request(BlockDriverState *bs, bool force)
2191 if (bs->dev_ops && bs->dev_ops->eject_request_cb) {
2192 bs->dev_ops->eject_request_cb(bs->dev_opaque, force);
2196 bool bdrv_dev_is_tray_open(BlockDriverState *bs)
2198 if (bs->dev_ops && bs->dev_ops->is_tray_open) {
2199 return bs->dev_ops->is_tray_open(bs->dev_opaque);
2201 return false;
2204 static void bdrv_dev_resize_cb(BlockDriverState *bs)
2206 if (bs->dev_ops && bs->dev_ops->resize_cb) {
2207 bs->dev_ops->resize_cb(bs->dev_opaque);
2211 bool bdrv_dev_is_medium_locked(BlockDriverState *bs)
2213 if (bs->dev_ops && bs->dev_ops->is_medium_locked) {
2214 return bs->dev_ops->is_medium_locked(bs->dev_opaque);
2216 return false;
2220 * Run consistency checks on an image
2222 * Returns 0 if the check could be completed (it doesn't mean that the image is
2223 * free of errors) or -errno when an internal error occurred. The results of the
2224 * check are stored in res.
2226 int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix)
2228 if (bs->drv->bdrv_check == NULL) {
2229 return -ENOTSUP;
2232 memset(res, 0, sizeof(*res));
2233 return bs->drv->bdrv_check(bs, res, fix);
2236 #define COMMIT_BUF_SECTORS 2048
2238 /* commit COW file into the raw image */
2239 int bdrv_commit(BlockDriverState *bs)
2241 BlockDriver *drv = bs->drv;
2242 int64_t sector, total_sectors, length, backing_length;
2243 int n, ro, open_flags;
2244 int ret = 0;
2245 uint8_t *buf = NULL;
2246 char filename[PATH_MAX];
2248 if (!drv)
2249 return -ENOMEDIUM;
2251 if (!bs->backing_hd) {
2252 return -ENOTSUP;
2255 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT, NULL) ||
2256 bdrv_op_is_blocked(bs->backing_hd, BLOCK_OP_TYPE_COMMIT, NULL)) {
2257 return -EBUSY;
2260 ro = bs->backing_hd->read_only;
2261 /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */
2262 pstrcpy(filename, sizeof(filename), bs->backing_hd->filename);
2263 open_flags = bs->backing_hd->open_flags;
2265 if (ro) {
2266 if (bdrv_reopen(bs->backing_hd, open_flags | BDRV_O_RDWR, NULL)) {
2267 return -EACCES;
2271 length = bdrv_getlength(bs);
2272 if (length < 0) {
2273 ret = length;
2274 goto ro_cleanup;
2277 backing_length = bdrv_getlength(bs->backing_hd);
2278 if (backing_length < 0) {
2279 ret = backing_length;
2280 goto ro_cleanup;
2283 /* If our top snapshot is larger than the backing file image,
2284 * grow the backing file image if possible. If not possible,
2285 * we must return an error */
2286 if (length > backing_length) {
2287 ret = bdrv_truncate(bs->backing_hd, length);
2288 if (ret < 0) {
2289 goto ro_cleanup;
2293 total_sectors = length >> BDRV_SECTOR_BITS;
2294 buf = g_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
2296 for (sector = 0; sector < total_sectors; sector += n) {
2297 ret = bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n);
2298 if (ret < 0) {
2299 goto ro_cleanup;
2301 if (ret) {
2302 ret = bdrv_read(bs, sector, buf, n);
2303 if (ret < 0) {
2304 goto ro_cleanup;
2307 ret = bdrv_write(bs->backing_hd, sector, buf, n);
2308 if (ret < 0) {
2309 goto ro_cleanup;
2314 if (drv->bdrv_make_empty) {
2315 ret = drv->bdrv_make_empty(bs);
2316 if (ret < 0) {
2317 goto ro_cleanup;
2319 bdrv_flush(bs);
2323 * Make sure all data we wrote to the backing device is actually
2324 * stable on disk.
2326 if (bs->backing_hd) {
2327 bdrv_flush(bs->backing_hd);
2330 ret = 0;
2331 ro_cleanup:
2332 g_free(buf);
2334 if (ro) {
2335 /* ignoring error return here */
2336 bdrv_reopen(bs->backing_hd, open_flags & ~BDRV_O_RDWR, NULL);
2339 return ret;
2342 int bdrv_commit_all(void)
2344 BlockDriverState *bs;
2346 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
2347 AioContext *aio_context = bdrv_get_aio_context(bs);
2349 aio_context_acquire(aio_context);
2350 if (bs->drv && bs->backing_hd) {
2351 int ret = bdrv_commit(bs);
2352 if (ret < 0) {
2353 aio_context_release(aio_context);
2354 return ret;
2357 aio_context_release(aio_context);
2359 return 0;
2363 * Remove an active request from the tracked requests list
2365 * This function should be called when a tracked request is completing.
2367 static void tracked_request_end(BdrvTrackedRequest *req)
2369 if (req->serialising) {
2370 req->bs->serialising_in_flight--;
2373 QLIST_REMOVE(req, list);
2374 qemu_co_queue_restart_all(&req->wait_queue);
2378 * Add an active request to the tracked requests list
2380 static void tracked_request_begin(BdrvTrackedRequest *req,
2381 BlockDriverState *bs,
2382 int64_t offset,
2383 unsigned int bytes, bool is_write)
2385 *req = (BdrvTrackedRequest){
2386 .bs = bs,
2387 .offset = offset,
2388 .bytes = bytes,
2389 .is_write = is_write,
2390 .co = qemu_coroutine_self(),
2391 .serialising = false,
2392 .overlap_offset = offset,
2393 .overlap_bytes = bytes,
2396 qemu_co_queue_init(&req->wait_queue);
2398 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
2401 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
2403 int64_t overlap_offset = req->offset & ~(align - 1);
2404 unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
2405 - overlap_offset;
2407 if (!req->serialising) {
2408 req->bs->serialising_in_flight++;
2409 req->serialising = true;
2412 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
2413 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
2417 * Round a region to cluster boundaries
2419 void bdrv_round_to_clusters(BlockDriverState *bs,
2420 int64_t sector_num, int nb_sectors,
2421 int64_t *cluster_sector_num,
2422 int *cluster_nb_sectors)
2424 BlockDriverInfo bdi;
2426 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
2427 *cluster_sector_num = sector_num;
2428 *cluster_nb_sectors = nb_sectors;
2429 } else {
2430 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
2431 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
2432 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
2433 nb_sectors, c);
2437 static int bdrv_get_cluster_size(BlockDriverState *bs)
2439 BlockDriverInfo bdi;
2440 int ret;
2442 ret = bdrv_get_info(bs, &bdi);
2443 if (ret < 0 || bdi.cluster_size == 0) {
2444 return bs->request_alignment;
2445 } else {
2446 return bdi.cluster_size;
2450 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
2451 int64_t offset, unsigned int bytes)
2453 /* aaaa bbbb */
2454 if (offset >= req->overlap_offset + req->overlap_bytes) {
2455 return false;
2457 /* bbbb aaaa */
2458 if (req->overlap_offset >= offset + bytes) {
2459 return false;
2461 return true;
2464 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
2466 BlockDriverState *bs = self->bs;
2467 BdrvTrackedRequest *req;
2468 bool retry;
2469 bool waited = false;
2471 if (!bs->serialising_in_flight) {
2472 return false;
2475 do {
2476 retry = false;
2477 QLIST_FOREACH(req, &bs->tracked_requests, list) {
2478 if (req == self || (!req->serialising && !self->serialising)) {
2479 continue;
2481 if (tracked_request_overlaps(req, self->overlap_offset,
2482 self->overlap_bytes))
2484 /* Hitting this means there was a reentrant request, for
2485 * example, a block driver issuing nested requests. This must
2486 * never happen since it means deadlock.
2488 assert(qemu_coroutine_self() != req->co);
2490 /* If the request is already (indirectly) waiting for us, or
2491 * will wait for us as soon as it wakes up, then just go on
2492 * (instead of producing a deadlock in the former case). */
2493 if (!req->waiting_for) {
2494 self->waiting_for = req;
2495 qemu_co_queue_wait(&req->wait_queue);
2496 self->waiting_for = NULL;
2497 retry = true;
2498 waited = true;
2499 break;
2503 } while (retry);
2505 return waited;
2509 * Return values:
2510 * 0 - success
2511 * -EINVAL - backing format specified, but no file
2512 * -ENOSPC - can't update the backing file because no space is left in the
2513 * image file header
2514 * -ENOTSUP - format driver doesn't support changing the backing file
2516 int bdrv_change_backing_file(BlockDriverState *bs,
2517 const char *backing_file, const char *backing_fmt)
2519 BlockDriver *drv = bs->drv;
2520 int ret;
2522 /* Backing file format doesn't make sense without a backing file */
2523 if (backing_fmt && !backing_file) {
2524 return -EINVAL;
2527 if (drv->bdrv_change_backing_file != NULL) {
2528 ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
2529 } else {
2530 ret = -ENOTSUP;
2533 if (ret == 0) {
2534 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
2535 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
2537 return ret;
2541 * Finds the image layer in the chain that has 'bs' as its backing file.
2543 * active is the current topmost image.
2545 * Returns NULL if bs is not found in active's image chain,
2546 * or if active == bs.
2548 BlockDriverState *bdrv_find_overlay(BlockDriverState *active,
2549 BlockDriverState *bs)
2551 BlockDriverState *overlay = NULL;
2552 BlockDriverState *intermediate;
2554 assert(active != NULL);
2555 assert(bs != NULL);
2557 /* if bs is the same as active, then by definition it has no overlay
2559 if (active == bs) {
2560 return NULL;
2563 intermediate = active;
2564 while (intermediate->backing_hd) {
2565 if (intermediate->backing_hd == bs) {
2566 overlay = intermediate;
2567 break;
2569 intermediate = intermediate->backing_hd;
2572 return overlay;
2575 typedef struct BlkIntermediateStates {
2576 BlockDriverState *bs;
2577 QSIMPLEQ_ENTRY(BlkIntermediateStates) entry;
2578 } BlkIntermediateStates;
2582 * Drops images above 'base' up to and including 'top', and sets the image
2583 * above 'top' to have base as its backing file.
2585 * Requires that the overlay to 'top' is opened r/w, so that the backing file
2586 * information in 'bs' can be properly updated.
2588 * E.g., this will convert the following chain:
2589 * bottom <- base <- intermediate <- top <- active
2591 * to
2593 * bottom <- base <- active
2595 * It is allowed for bottom==base, in which case it converts:
2597 * base <- intermediate <- top <- active
2599 * to
2601 * base <- active
2603 * Error conditions:
2604 * if active == top, that is considered an error
2607 int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top,
2608 BlockDriverState *base)
2610 BlockDriverState *intermediate;
2611 BlockDriverState *base_bs = NULL;
2612 BlockDriverState *new_top_bs = NULL;
2613 BlkIntermediateStates *intermediate_state, *next;
2614 int ret = -EIO;
2616 QSIMPLEQ_HEAD(states_to_delete, BlkIntermediateStates) states_to_delete;
2617 QSIMPLEQ_INIT(&states_to_delete);
2619 if (!top->drv || !base->drv) {
2620 goto exit;
2623 new_top_bs = bdrv_find_overlay(active, top);
2625 if (new_top_bs == NULL) {
2626 /* we could not find the image above 'top', this is an error */
2627 goto exit;
2630 /* special case of new_top_bs->backing_hd already pointing to base - nothing
2631 * to do, no intermediate images */
2632 if (new_top_bs->backing_hd == base) {
2633 ret = 0;
2634 goto exit;
2637 intermediate = top;
2639 /* now we will go down through the list, and add each BDS we find
2640 * into our deletion queue, until we hit the 'base'
2642 while (intermediate) {
2643 intermediate_state = g_malloc0(sizeof(BlkIntermediateStates));
2644 intermediate_state->bs = intermediate;
2645 QSIMPLEQ_INSERT_TAIL(&states_to_delete, intermediate_state, entry);
2647 if (intermediate->backing_hd == base) {
2648 base_bs = intermediate->backing_hd;
2649 break;
2651 intermediate = intermediate->backing_hd;
2653 if (base_bs == NULL) {
2654 /* something went wrong, we did not end at the base. safely
2655 * unravel everything, and exit with error */
2656 goto exit;
2659 /* success - we can delete the intermediate states, and link top->base */
2660 ret = bdrv_change_backing_file(new_top_bs, base_bs->filename,
2661 base_bs->drv ? base_bs->drv->format_name : "");
2662 if (ret) {
2663 goto exit;
2665 bdrv_set_backing_hd(new_top_bs, base_bs);
2667 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2668 /* so that bdrv_close() does not recursively close the chain */
2669 bdrv_set_backing_hd(intermediate_state->bs, NULL);
2670 bdrv_unref(intermediate_state->bs);
2672 ret = 0;
2674 exit:
2675 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2676 g_free(intermediate_state);
2678 return ret;
2682 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
2683 size_t size)
2685 int64_t len;
2687 if (size > INT_MAX) {
2688 return -EIO;
2691 if (!bdrv_is_inserted(bs))
2692 return -ENOMEDIUM;
2694 if (bs->growable)
2695 return 0;
2697 len = bdrv_getlength(bs);
2699 if (offset < 0)
2700 return -EIO;
2702 if ((offset > len) || (len - offset < size))
2703 return -EIO;
2705 return 0;
2708 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
2709 int nb_sectors)
2711 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
2712 return -EIO;
2715 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
2716 nb_sectors * BDRV_SECTOR_SIZE);
2719 typedef struct RwCo {
2720 BlockDriverState *bs;
2721 int64_t offset;
2722 QEMUIOVector *qiov;
2723 bool is_write;
2724 int ret;
2725 BdrvRequestFlags flags;
2726 } RwCo;
2728 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
2730 RwCo *rwco = opaque;
2732 if (!rwco->is_write) {
2733 rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset,
2734 rwco->qiov->size, rwco->qiov,
2735 rwco->flags);
2736 } else {
2737 rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset,
2738 rwco->qiov->size, rwco->qiov,
2739 rwco->flags);
2744 * Process a vectored synchronous request using coroutines
2746 static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset,
2747 QEMUIOVector *qiov, bool is_write,
2748 BdrvRequestFlags flags)
2750 Coroutine *co;
2751 RwCo rwco = {
2752 .bs = bs,
2753 .offset = offset,
2754 .qiov = qiov,
2755 .is_write = is_write,
2756 .ret = NOT_DONE,
2757 .flags = flags,
2761 * In sync call context, when the vcpu is blocked, this throttling timer
2762 * will not fire; so the I/O throttling function has to be disabled here
2763 * if it has been enabled.
2765 if (bs->io_limits_enabled) {
2766 fprintf(stderr, "Disabling I/O throttling on '%s' due "
2767 "to synchronous I/O.\n", bdrv_get_device_name(bs));
2768 bdrv_io_limits_disable(bs);
2771 if (qemu_in_coroutine()) {
2772 /* Fast-path if already in coroutine context */
2773 bdrv_rw_co_entry(&rwco);
2774 } else {
2775 AioContext *aio_context = bdrv_get_aio_context(bs);
2777 co = qemu_coroutine_create(bdrv_rw_co_entry);
2778 qemu_coroutine_enter(co, &rwco);
2779 while (rwco.ret == NOT_DONE) {
2780 aio_poll(aio_context, true);
2783 return rwco.ret;
2787 * Process a synchronous request using coroutines
2789 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
2790 int nb_sectors, bool is_write, BdrvRequestFlags flags)
2792 QEMUIOVector qiov;
2793 struct iovec iov = {
2794 .iov_base = (void *)buf,
2795 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
2798 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
2799 return -EINVAL;
2802 qemu_iovec_init_external(&qiov, &iov, 1);
2803 return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS,
2804 &qiov, is_write, flags);
2807 /* return < 0 if error. See bdrv_write() for the return codes */
2808 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
2809 uint8_t *buf, int nb_sectors)
2811 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0);
2814 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
2815 int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
2816 uint8_t *buf, int nb_sectors)
2818 bool enabled;
2819 int ret;
2821 enabled = bs->io_limits_enabled;
2822 bs->io_limits_enabled = false;
2823 ret = bdrv_read(bs, sector_num, buf, nb_sectors);
2824 bs->io_limits_enabled = enabled;
2825 return ret;
2828 /* Return < 0 if error. Important errors are:
2829 -EIO generic I/O error (may happen for all errors)
2830 -ENOMEDIUM No media inserted.
2831 -EINVAL Invalid sector number or nb_sectors
2832 -EACCES Trying to write a read-only device
2834 int bdrv_write(BlockDriverState *bs, int64_t sector_num,
2835 const uint8_t *buf, int nb_sectors)
2837 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
2840 int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
2841 int nb_sectors, BdrvRequestFlags flags)
2843 return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true,
2844 BDRV_REQ_ZERO_WRITE | flags);
2848 * Completely zero out a block device with the help of bdrv_write_zeroes.
2849 * The operation is sped up by checking the block status and only writing
2850 * zeroes to the device if they currently do not return zeroes. Optional
2851 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
2853 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
2855 int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
2857 int64_t target_size;
2858 int64_t ret, nb_sectors, sector_num = 0;
2859 int n;
2861 target_size = bdrv_getlength(bs);
2862 if (target_size < 0) {
2863 return target_size;
2865 target_size /= BDRV_SECTOR_SIZE;
2867 for (;;) {
2868 nb_sectors = target_size - sector_num;
2869 if (nb_sectors <= 0) {
2870 return 0;
2872 if (nb_sectors > INT_MAX) {
2873 nb_sectors = INT_MAX;
2875 ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n);
2876 if (ret < 0) {
2877 error_report("error getting block status at sector %" PRId64 ": %s",
2878 sector_num, strerror(-ret));
2879 return ret;
2881 if (ret & BDRV_BLOCK_ZERO) {
2882 sector_num += n;
2883 continue;
2885 ret = bdrv_write_zeroes(bs, sector_num, n, flags);
2886 if (ret < 0) {
2887 error_report("error writing zeroes at sector %" PRId64 ": %s",
2888 sector_num, strerror(-ret));
2889 return ret;
2891 sector_num += n;
2895 int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes)
2897 QEMUIOVector qiov;
2898 struct iovec iov = {
2899 .iov_base = (void *)buf,
2900 .iov_len = bytes,
2902 int ret;
2904 if (bytes < 0) {
2905 return -EINVAL;
2908 qemu_iovec_init_external(&qiov, &iov, 1);
2909 ret = bdrv_prwv_co(bs, offset, &qiov, false, 0);
2910 if (ret < 0) {
2911 return ret;
2914 return bytes;
2917 int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov)
2919 int ret;
2921 ret = bdrv_prwv_co(bs, offset, qiov, true, 0);
2922 if (ret < 0) {
2923 return ret;
2926 return qiov->size;
2929 int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
2930 const void *buf, int bytes)
2932 QEMUIOVector qiov;
2933 struct iovec iov = {
2934 .iov_base = (void *) buf,
2935 .iov_len = bytes,
2938 if (bytes < 0) {
2939 return -EINVAL;
2942 qemu_iovec_init_external(&qiov, &iov, 1);
2943 return bdrv_pwritev(bs, offset, &qiov);
2947 * Writes to the file and ensures that no writes are reordered across this
2948 * request (acts as a barrier)
2950 * Returns 0 on success, -errno in error cases.
2952 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
2953 const void *buf, int count)
2955 int ret;
2957 ret = bdrv_pwrite(bs, offset, buf, count);
2958 if (ret < 0) {
2959 return ret;
2962 /* No flush needed for cache modes that already do it */
2963 if (bs->enable_write_cache) {
2964 bdrv_flush(bs);
2967 return 0;
2970 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
2971 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
2973 /* Perform I/O through a temporary buffer so that users who scribble over
2974 * their read buffer while the operation is in progress do not end up
2975 * modifying the image file. This is critical for zero-copy guest I/O
2976 * where anything might happen inside guest memory.
2978 void *bounce_buffer;
2980 BlockDriver *drv = bs->drv;
2981 struct iovec iov;
2982 QEMUIOVector bounce_qiov;
2983 int64_t cluster_sector_num;
2984 int cluster_nb_sectors;
2985 size_t skip_bytes;
2986 int ret;
2988 /* Cover entire cluster so no additional backing file I/O is required when
2989 * allocating cluster in the image file.
2991 bdrv_round_to_clusters(bs, sector_num, nb_sectors,
2992 &cluster_sector_num, &cluster_nb_sectors);
2994 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
2995 cluster_sector_num, cluster_nb_sectors);
2997 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
2998 iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
2999 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
3001 ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
3002 &bounce_qiov);
3003 if (ret < 0) {
3004 goto err;
3007 if (drv->bdrv_co_write_zeroes &&
3008 buffer_is_zero(bounce_buffer, iov.iov_len)) {
3009 ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
3010 cluster_nb_sectors, 0);
3011 } else {
3012 /* This does not change the data on the disk, it is not necessary
3013 * to flush even in cache=writethrough mode.
3015 ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
3016 &bounce_qiov);
3019 if (ret < 0) {
3020 /* It might be okay to ignore write errors for guest requests. If this
3021 * is a deliberate copy-on-read then we don't want to ignore the error.
3022 * Simply report it in all cases.
3024 goto err;
3027 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
3028 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
3029 nb_sectors * BDRV_SECTOR_SIZE);
3031 err:
3032 qemu_vfree(bounce_buffer);
3033 return ret;
3037 * Forwards an already correctly aligned request to the BlockDriver. This
3038 * handles copy on read and zeroing after EOF; any other features must be
3039 * implemented by the caller.
3041 static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
3042 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
3043 int64_t align, QEMUIOVector *qiov, int flags)
3045 BlockDriver *drv = bs->drv;
3046 int ret;
3048 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
3049 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
3051 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
3052 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
3054 /* Handle Copy on Read and associated serialisation */
3055 if (flags & BDRV_REQ_COPY_ON_READ) {
3056 /* If we touch the same cluster it counts as an overlap. This
3057 * guarantees that allocating writes will be serialized and not race
3058 * with each other for the same cluster. For example, in copy-on-read
3059 * it ensures that the CoR read and write operations are atomic and
3060 * guest writes cannot interleave between them. */
3061 mark_request_serialising(req, bdrv_get_cluster_size(bs));
3064 wait_serialising_requests(req);
3066 if (flags & BDRV_REQ_COPY_ON_READ) {
3067 int pnum;
3069 ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum);
3070 if (ret < 0) {
3071 goto out;
3074 if (!ret || pnum != nb_sectors) {
3075 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
3076 goto out;
3080 /* Forward the request to the BlockDriver */
3081 if (!(bs->zero_beyond_eof && bs->growable)) {
3082 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
3083 } else {
3084 /* Read zeros after EOF of growable BDSes */
3085 int64_t len, total_sectors, max_nb_sectors;
3087 len = bdrv_getlength(bs);
3088 if (len < 0) {
3089 ret = len;
3090 goto out;
3093 total_sectors = DIV_ROUND_UP(len, BDRV_SECTOR_SIZE);
3094 max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num),
3095 align >> BDRV_SECTOR_BITS);
3096 if (max_nb_sectors > 0) {
3097 ret = drv->bdrv_co_readv(bs, sector_num,
3098 MIN(nb_sectors, max_nb_sectors), qiov);
3099 } else {
3100 ret = 0;
3103 /* Reading beyond end of file is supposed to produce zeroes */
3104 if (ret == 0 && total_sectors < sector_num + nb_sectors) {
3105 uint64_t offset = MAX(0, total_sectors - sector_num);
3106 uint64_t bytes = (sector_num + nb_sectors - offset) *
3107 BDRV_SECTOR_SIZE;
3108 qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes);
3112 out:
3113 return ret;
3117 * Handle a read request in coroutine context
3119 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
3120 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
3121 BdrvRequestFlags flags)
3123 BlockDriver *drv = bs->drv;
3124 BdrvTrackedRequest req;
3126 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3127 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
3128 uint8_t *head_buf = NULL;
3129 uint8_t *tail_buf = NULL;
3130 QEMUIOVector local_qiov;
3131 bool use_local_qiov = false;
3132 int ret;
3134 if (!drv) {
3135 return -ENOMEDIUM;
3137 if (bdrv_check_byte_request(bs, offset, bytes)) {
3138 return -EIO;
3141 if (bs->copy_on_read) {
3142 flags |= BDRV_REQ_COPY_ON_READ;
3145 /* throttling disk I/O */
3146 if (bs->io_limits_enabled) {
3147 bdrv_io_limits_intercept(bs, bytes, false);
3150 /* Align read if necessary by padding qiov */
3151 if (offset & (align - 1)) {
3152 head_buf = qemu_blockalign(bs, align);
3153 qemu_iovec_init(&local_qiov, qiov->niov + 2);
3154 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3155 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3156 use_local_qiov = true;
3158 bytes += offset & (align - 1);
3159 offset = offset & ~(align - 1);
3162 if ((offset + bytes) & (align - 1)) {
3163 if (!use_local_qiov) {
3164 qemu_iovec_init(&local_qiov, qiov->niov + 1);
3165 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3166 use_local_qiov = true;
3168 tail_buf = qemu_blockalign(bs, align);
3169 qemu_iovec_add(&local_qiov, tail_buf,
3170 align - ((offset + bytes) & (align - 1)));
3172 bytes = ROUND_UP(bytes, align);
3175 tracked_request_begin(&req, bs, offset, bytes, false);
3176 ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align,
3177 use_local_qiov ? &local_qiov : qiov,
3178 flags);
3179 tracked_request_end(&req);
3181 if (use_local_qiov) {
3182 qemu_iovec_destroy(&local_qiov);
3183 qemu_vfree(head_buf);
3184 qemu_vfree(tail_buf);
3187 return ret;
3190 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
3191 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3192 BdrvRequestFlags flags)
3194 if (nb_sectors < 0 || nb_sectors > (UINT_MAX >> BDRV_SECTOR_BITS)) {
3195 return -EINVAL;
3198 return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS,
3199 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3202 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
3203 int nb_sectors, QEMUIOVector *qiov)
3205 trace_bdrv_co_readv(bs, sector_num, nb_sectors);
3207 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
3210 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
3211 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
3213 trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
3215 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
3216 BDRV_REQ_COPY_ON_READ);
3219 /* if no limit is specified in the BlockLimits use a default
3220 * of 32768 512-byte sectors (16 MiB) per request.
3222 #define MAX_WRITE_ZEROES_DEFAULT 32768
3224 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
3225 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
3227 BlockDriver *drv = bs->drv;
3228 QEMUIOVector qiov;
3229 struct iovec iov = {0};
3230 int ret = 0;
3232 int max_write_zeroes = bs->bl.max_write_zeroes ?
3233 bs->bl.max_write_zeroes : MAX_WRITE_ZEROES_DEFAULT;
3235 while (nb_sectors > 0 && !ret) {
3236 int num = nb_sectors;
3238 /* Align request. Block drivers can expect the "bulk" of the request
3239 * to be aligned.
3241 if (bs->bl.write_zeroes_alignment
3242 && num > bs->bl.write_zeroes_alignment) {
3243 if (sector_num % bs->bl.write_zeroes_alignment != 0) {
3244 /* Make a small request up to the first aligned sector. */
3245 num = bs->bl.write_zeroes_alignment;
3246 num -= sector_num % bs->bl.write_zeroes_alignment;
3247 } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) {
3248 /* Shorten the request to the last aligned sector. num cannot
3249 * underflow because num > bs->bl.write_zeroes_alignment.
3251 num -= (sector_num + num) % bs->bl.write_zeroes_alignment;
3255 /* limit request size */
3256 if (num > max_write_zeroes) {
3257 num = max_write_zeroes;
3260 ret = -ENOTSUP;
3261 /* First try the efficient write zeroes operation */
3262 if (drv->bdrv_co_write_zeroes) {
3263 ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags);
3266 if (ret == -ENOTSUP) {
3267 /* Fall back to bounce buffer if write zeroes is unsupported */
3268 iov.iov_len = num * BDRV_SECTOR_SIZE;
3269 if (iov.iov_base == NULL) {
3270 iov.iov_base = qemu_blockalign(bs, num * BDRV_SECTOR_SIZE);
3271 memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE);
3273 qemu_iovec_init_external(&qiov, &iov, 1);
3275 ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov);
3277 /* Keep bounce buffer around if it is big enough for all
3278 * all future requests.
3280 if (num < max_write_zeroes) {
3281 qemu_vfree(iov.iov_base);
3282 iov.iov_base = NULL;
3286 sector_num += num;
3287 nb_sectors -= num;
3290 qemu_vfree(iov.iov_base);
3291 return ret;
3295 * Forwards an already correctly aligned write request to the BlockDriver.
3297 static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
3298 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
3299 QEMUIOVector *qiov, int flags)
3301 BlockDriver *drv = bs->drv;
3302 bool waited;
3303 int ret;
3305 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
3306 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
3308 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
3309 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
3311 waited = wait_serialising_requests(req);
3312 assert(!waited || !req->serialising);
3313 assert(req->overlap_offset <= offset);
3314 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
3316 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
3318 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
3319 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes &&
3320 qemu_iovec_is_zero(qiov)) {
3321 flags |= BDRV_REQ_ZERO_WRITE;
3322 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
3323 flags |= BDRV_REQ_MAY_UNMAP;
3327 if (ret < 0) {
3328 /* Do nothing, write notifier decided to fail this request */
3329 } else if (flags & BDRV_REQ_ZERO_WRITE) {
3330 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_ZERO);
3331 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags);
3332 } else {
3333 BLKDBG_EVENT(bs, BLKDBG_PWRITEV);
3334 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
3336 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_DONE);
3338 if (ret == 0 && !bs->enable_write_cache) {
3339 ret = bdrv_co_flush(bs);
3342 bdrv_set_dirty(bs, sector_num, nb_sectors);
3344 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
3345 bs->wr_highest_sector = sector_num + nb_sectors - 1;
3347 if (bs->growable && ret >= 0) {
3348 bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
3351 return ret;
3355 * Handle a write request in coroutine context
3357 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
3358 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
3359 BdrvRequestFlags flags)
3361 BdrvTrackedRequest req;
3362 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3363 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
3364 uint8_t *head_buf = NULL;
3365 uint8_t *tail_buf = NULL;
3366 QEMUIOVector local_qiov;
3367 bool use_local_qiov = false;
3368 int ret;
3370 if (!bs->drv) {
3371 return -ENOMEDIUM;
3373 if (bs->read_only) {
3374 return -EACCES;
3376 if (bdrv_check_byte_request(bs, offset, bytes)) {
3377 return -EIO;
3380 /* throttling disk I/O */
3381 if (bs->io_limits_enabled) {
3382 bdrv_io_limits_intercept(bs, bytes, true);
3386 * Align write if necessary by performing a read-modify-write cycle.
3387 * Pad qiov with the read parts and be sure to have a tracked request not
3388 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
3390 tracked_request_begin(&req, bs, offset, bytes, true);
3392 if (offset & (align - 1)) {
3393 QEMUIOVector head_qiov;
3394 struct iovec head_iov;
3396 mark_request_serialising(&req, align);
3397 wait_serialising_requests(&req);
3399 head_buf = qemu_blockalign(bs, align);
3400 head_iov = (struct iovec) {
3401 .iov_base = head_buf,
3402 .iov_len = align,
3404 qemu_iovec_init_external(&head_qiov, &head_iov, 1);
3406 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_HEAD);
3407 ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align,
3408 align, &head_qiov, 0);
3409 if (ret < 0) {
3410 goto fail;
3412 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
3414 qemu_iovec_init(&local_qiov, qiov->niov + 2);
3415 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3416 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3417 use_local_qiov = true;
3419 bytes += offset & (align - 1);
3420 offset = offset & ~(align - 1);
3423 if ((offset + bytes) & (align - 1)) {
3424 QEMUIOVector tail_qiov;
3425 struct iovec tail_iov;
3426 size_t tail_bytes;
3427 bool waited;
3429 mark_request_serialising(&req, align);
3430 waited = wait_serialising_requests(&req);
3431 assert(!waited || !use_local_qiov);
3433 tail_buf = qemu_blockalign(bs, align);
3434 tail_iov = (struct iovec) {
3435 .iov_base = tail_buf,
3436 .iov_len = align,
3438 qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
3440 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_TAIL);
3441 ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align,
3442 align, &tail_qiov, 0);
3443 if (ret < 0) {
3444 goto fail;
3446 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
3448 if (!use_local_qiov) {
3449 qemu_iovec_init(&local_qiov, qiov->niov + 1);
3450 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3451 use_local_qiov = true;
3454 tail_bytes = (offset + bytes) & (align - 1);
3455 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
3457 bytes = ROUND_UP(bytes, align);
3460 ret = bdrv_aligned_pwritev(bs, &req, offset, bytes,
3461 use_local_qiov ? &local_qiov : qiov,
3462 flags);
3464 fail:
3465 tracked_request_end(&req);
3467 if (use_local_qiov) {
3468 qemu_iovec_destroy(&local_qiov);
3470 qemu_vfree(head_buf);
3471 qemu_vfree(tail_buf);
3473 return ret;
3476 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
3477 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3478 BdrvRequestFlags flags)
3480 if (nb_sectors < 0 || nb_sectors > (INT_MAX >> BDRV_SECTOR_BITS)) {
3481 return -EINVAL;
3484 return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS,
3485 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3488 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
3489 int nb_sectors, QEMUIOVector *qiov)
3491 trace_bdrv_co_writev(bs, sector_num, nb_sectors);
3493 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
3496 int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
3497 int64_t sector_num, int nb_sectors,
3498 BdrvRequestFlags flags)
3500 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags);
3502 if (!(bs->open_flags & BDRV_O_UNMAP)) {
3503 flags &= ~BDRV_REQ_MAY_UNMAP;
3506 return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
3507 BDRV_REQ_ZERO_WRITE | flags);
3511 * Truncate file to 'offset' bytes (needed only for file protocols)
3513 int bdrv_truncate(BlockDriverState *bs, int64_t offset)
3515 BlockDriver *drv = bs->drv;
3516 int ret;
3517 if (!drv)
3518 return -ENOMEDIUM;
3519 if (!drv->bdrv_truncate)
3520 return -ENOTSUP;
3521 if (bs->read_only)
3522 return -EACCES;
3523 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_RESIZE, NULL)) {
3524 return -EBUSY;
3526 ret = drv->bdrv_truncate(bs, offset);
3527 if (ret == 0) {
3528 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3529 bdrv_dev_resize_cb(bs);
3531 return ret;
3535 * Length of a allocated file in bytes. Sparse files are counted by actual
3536 * allocated space. Return < 0 if error or unknown.
3538 int64_t bdrv_get_allocated_file_size(BlockDriverState *bs)
3540 BlockDriver *drv = bs->drv;
3541 if (!drv) {
3542 return -ENOMEDIUM;
3544 if (drv->bdrv_get_allocated_file_size) {
3545 return drv->bdrv_get_allocated_file_size(bs);
3547 if (bs->file) {
3548 return bdrv_get_allocated_file_size(bs->file);
3550 return -ENOTSUP;
3554 * Length of a file in bytes. Return < 0 if error or unknown.
3556 int64_t bdrv_getlength(BlockDriverState *bs)
3558 BlockDriver *drv = bs->drv;
3559 if (!drv)
3560 return -ENOMEDIUM;
3562 if (drv->has_variable_length) {
3563 int ret = refresh_total_sectors(bs, bs->total_sectors);
3564 if (ret < 0) {
3565 return ret;
3568 return bs->total_sectors * BDRV_SECTOR_SIZE;
3571 /* return 0 as number of sectors if no device present or error */
3572 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
3574 int64_t length;
3575 length = bdrv_getlength(bs);
3576 if (length < 0)
3577 length = 0;
3578 else
3579 length = length >> BDRV_SECTOR_BITS;
3580 *nb_sectors_ptr = length;
3583 void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error,
3584 BlockdevOnError on_write_error)
3586 bs->on_read_error = on_read_error;
3587 bs->on_write_error = on_write_error;
3590 BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read)
3592 return is_read ? bs->on_read_error : bs->on_write_error;
3595 BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error)
3597 BlockdevOnError on_err = is_read ? bs->on_read_error : bs->on_write_error;
3599 switch (on_err) {
3600 case BLOCKDEV_ON_ERROR_ENOSPC:
3601 return (error == ENOSPC) ?
3602 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
3603 case BLOCKDEV_ON_ERROR_STOP:
3604 return BLOCK_ERROR_ACTION_STOP;
3605 case BLOCKDEV_ON_ERROR_REPORT:
3606 return BLOCK_ERROR_ACTION_REPORT;
3607 case BLOCKDEV_ON_ERROR_IGNORE:
3608 return BLOCK_ERROR_ACTION_IGNORE;
3609 default:
3610 abort();
3614 /* This is done by device models because, while the block layer knows
3615 * about the error, it does not know whether an operation comes from
3616 * the device or the block layer (from a job, for example).
3618 void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action,
3619 bool is_read, int error)
3621 assert(error >= 0);
3623 if (action == BLOCK_ERROR_ACTION_STOP) {
3624 /* First set the iostatus, so that "info block" returns an iostatus
3625 * that matches the events raised so far (an additional error iostatus
3626 * is fine, but not a lost one).
3628 bdrv_iostatus_set_err(bs, error);
3630 /* Then raise the request to stop the VM and the event.
3631 * qemu_system_vmstop_request_prepare has two effects. First,
3632 * it ensures that the STOP event always comes after the
3633 * BLOCK_IO_ERROR event. Second, it ensures that even if management
3634 * can observe the STOP event and do a "cont" before the STOP
3635 * event is issued, the VM will not stop. In this case, vm_start()
3636 * also ensures that the STOP/RESUME pair of events is emitted.
3638 qemu_system_vmstop_request_prepare();
3639 bdrv_emit_qmp_error_event(bs, QEVENT_BLOCK_IO_ERROR, action, is_read);
3640 qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
3641 } else {
3642 bdrv_emit_qmp_error_event(bs, QEVENT_BLOCK_IO_ERROR, action, is_read);
3646 int bdrv_is_read_only(BlockDriverState *bs)
3648 return bs->read_only;
3651 int bdrv_is_sg(BlockDriverState *bs)
3653 return bs->sg;
3656 int bdrv_enable_write_cache(BlockDriverState *bs)
3658 return bs->enable_write_cache;
3661 void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce)
3663 bs->enable_write_cache = wce;
3665 /* so a reopen() will preserve wce */
3666 if (wce) {
3667 bs->open_flags |= BDRV_O_CACHE_WB;
3668 } else {
3669 bs->open_flags &= ~BDRV_O_CACHE_WB;
3673 int bdrv_is_encrypted(BlockDriverState *bs)
3675 if (bs->backing_hd && bs->backing_hd->encrypted)
3676 return 1;
3677 return bs->encrypted;
3680 int bdrv_key_required(BlockDriverState *bs)
3682 BlockDriverState *backing_hd = bs->backing_hd;
3684 if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key)
3685 return 1;
3686 return (bs->encrypted && !bs->valid_key);
3689 int bdrv_set_key(BlockDriverState *bs, const char *key)
3691 int ret;
3692 if (bs->backing_hd && bs->backing_hd->encrypted) {
3693 ret = bdrv_set_key(bs->backing_hd, key);
3694 if (ret < 0)
3695 return ret;
3696 if (!bs->encrypted)
3697 return 0;
3699 if (!bs->encrypted) {
3700 return -EINVAL;
3701 } else if (!bs->drv || !bs->drv->bdrv_set_key) {
3702 return -ENOMEDIUM;
3704 ret = bs->drv->bdrv_set_key(bs, key);
3705 if (ret < 0) {
3706 bs->valid_key = 0;
3707 } else if (!bs->valid_key) {
3708 bs->valid_key = 1;
3709 /* call the change callback now, we skipped it on open */
3710 bdrv_dev_change_media_cb(bs, true);
3712 return ret;
3715 const char *bdrv_get_format_name(BlockDriverState *bs)
3717 return bs->drv ? bs->drv->format_name : NULL;
3720 void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
3721 void *opaque)
3723 BlockDriver *drv;
3724 int count = 0;
3725 const char **formats = NULL;
3727 QLIST_FOREACH(drv, &bdrv_drivers, list) {
3728 if (drv->format_name) {
3729 bool found = false;
3730 int i = count;
3731 while (formats && i && !found) {
3732 found = !strcmp(formats[--i], drv->format_name);
3735 if (!found) {
3736 formats = g_realloc(formats, (count + 1) * sizeof(char *));
3737 formats[count++] = drv->format_name;
3738 it(opaque, drv->format_name);
3742 g_free(formats);
3745 /* This function is to find block backend bs */
3746 BlockDriverState *bdrv_find(const char *name)
3748 BlockDriverState *bs;
3750 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
3751 if (!strcmp(name, bs->device_name)) {
3752 return bs;
3755 return NULL;
3758 /* This function is to find a node in the bs graph */
3759 BlockDriverState *bdrv_find_node(const char *node_name)
3761 BlockDriverState *bs;
3763 assert(node_name);
3765 QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
3766 if (!strcmp(node_name, bs->node_name)) {
3767 return bs;
3770 return NULL;
3773 /* Put this QMP function here so it can access the static graph_bdrv_states. */
3774 BlockDeviceInfoList *bdrv_named_nodes_list(void)
3776 BlockDeviceInfoList *list, *entry;
3777 BlockDriverState *bs;
3779 list = NULL;
3780 QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
3781 entry = g_malloc0(sizeof(*entry));
3782 entry->value = bdrv_block_device_info(bs);
3783 entry->next = list;
3784 list = entry;
3787 return list;
3790 BlockDriverState *bdrv_lookup_bs(const char *device,
3791 const char *node_name,
3792 Error **errp)
3794 BlockDriverState *bs = NULL;
3796 if (device) {
3797 bs = bdrv_find(device);
3799 if (bs) {
3800 return bs;
3804 if (node_name) {
3805 bs = bdrv_find_node(node_name);
3807 if (bs) {
3808 return bs;
3812 error_setg(errp, "Cannot find device=%s nor node_name=%s",
3813 device ? device : "",
3814 node_name ? node_name : "");
3815 return NULL;
3818 BlockDriverState *bdrv_next(BlockDriverState *bs)
3820 if (!bs) {
3821 return QTAILQ_FIRST(&bdrv_states);
3823 return QTAILQ_NEXT(bs, device_list);
3826 void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque)
3828 BlockDriverState *bs;
3830 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
3831 it(opaque, bs);
3835 const char *bdrv_get_device_name(BlockDriverState *bs)
3837 return bs->device_name;
3840 int bdrv_get_flags(BlockDriverState *bs)
3842 return bs->open_flags;
3845 int bdrv_flush_all(void)
3847 BlockDriverState *bs;
3848 int result = 0;
3850 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
3851 AioContext *aio_context = bdrv_get_aio_context(bs);
3852 int ret;
3854 aio_context_acquire(aio_context);
3855 ret = bdrv_flush(bs);
3856 if (ret < 0 && !result) {
3857 result = ret;
3859 aio_context_release(aio_context);
3862 return result;
3865 int bdrv_has_zero_init_1(BlockDriverState *bs)
3867 return 1;
3870 int bdrv_has_zero_init(BlockDriverState *bs)
3872 assert(bs->drv);
3874 /* If BS is a copy on write image, it is initialized to
3875 the contents of the base image, which may not be zeroes. */
3876 if (bs->backing_hd) {
3877 return 0;
3879 if (bs->drv->bdrv_has_zero_init) {
3880 return bs->drv->bdrv_has_zero_init(bs);
3883 /* safe default */
3884 return 0;
3887 bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs)
3889 BlockDriverInfo bdi;
3891 if (bs->backing_hd) {
3892 return false;
3895 if (bdrv_get_info(bs, &bdi) == 0) {
3896 return bdi.unallocated_blocks_are_zero;
3899 return false;
3902 bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs)
3904 BlockDriverInfo bdi;
3906 if (bs->backing_hd || !(bs->open_flags & BDRV_O_UNMAP)) {
3907 return false;
3910 if (bdrv_get_info(bs, &bdi) == 0) {
3911 return bdi.can_write_zeroes_with_unmap;
3914 return false;
3917 typedef struct BdrvCoGetBlockStatusData {
3918 BlockDriverState *bs;
3919 BlockDriverState *base;
3920 int64_t sector_num;
3921 int nb_sectors;
3922 int *pnum;
3923 int64_t ret;
3924 bool done;
3925 } BdrvCoGetBlockStatusData;
3928 * Returns true iff the specified sector is present in the disk image. Drivers
3929 * not implementing the functionality are assumed to not support backing files,
3930 * hence all their sectors are reported as allocated.
3932 * If 'sector_num' is beyond the end of the disk image the return value is 0
3933 * and 'pnum' is set to 0.
3935 * 'pnum' is set to the number of sectors (including and immediately following
3936 * the specified sector) that are known to be in the same
3937 * allocated/unallocated state.
3939 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
3940 * beyond the end of the disk image it will be clamped.
3942 static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
3943 int64_t sector_num,
3944 int nb_sectors, int *pnum)
3946 int64_t length;
3947 int64_t n;
3948 int64_t ret, ret2;
3950 length = bdrv_getlength(bs);
3951 if (length < 0) {
3952 return length;
3955 if (sector_num >= (length >> BDRV_SECTOR_BITS)) {
3956 *pnum = 0;
3957 return 0;
3960 n = bs->total_sectors - sector_num;
3961 if (n < nb_sectors) {
3962 nb_sectors = n;
3965 if (!bs->drv->bdrv_co_get_block_status) {
3966 *pnum = nb_sectors;
3967 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
3968 if (bs->drv->protocol_name) {
3969 ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
3971 return ret;
3974 ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum);
3975 if (ret < 0) {
3976 *pnum = 0;
3977 return ret;
3980 if (ret & BDRV_BLOCK_RAW) {
3981 assert(ret & BDRV_BLOCK_OFFSET_VALID);
3982 return bdrv_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
3983 *pnum, pnum);
3986 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
3987 ret |= BDRV_BLOCK_ALLOCATED;
3990 if (!(ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO)) {
3991 if (bdrv_unallocated_blocks_are_zero(bs)) {
3992 ret |= BDRV_BLOCK_ZERO;
3993 } else if (bs->backing_hd) {
3994 BlockDriverState *bs2 = bs->backing_hd;
3995 int64_t length2 = bdrv_getlength(bs2);
3996 if (length2 >= 0 && sector_num >= (length2 >> BDRV_SECTOR_BITS)) {
3997 ret |= BDRV_BLOCK_ZERO;
4002 if (bs->file &&
4003 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
4004 (ret & BDRV_BLOCK_OFFSET_VALID)) {
4005 ret2 = bdrv_co_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
4006 *pnum, pnum);
4007 if (ret2 >= 0) {
4008 /* Ignore errors. This is just providing extra information, it
4009 * is useful but not necessary.
4011 ret |= (ret2 & BDRV_BLOCK_ZERO);
4015 return ret;
4018 /* Coroutine wrapper for bdrv_get_block_status() */
4019 static void coroutine_fn bdrv_get_block_status_co_entry(void *opaque)
4021 BdrvCoGetBlockStatusData *data = opaque;
4022 BlockDriverState *bs = data->bs;
4024 data->ret = bdrv_co_get_block_status(bs, data->sector_num, data->nb_sectors,
4025 data->pnum);
4026 data->done = true;
4030 * Synchronous wrapper around bdrv_co_get_block_status().
4032 * See bdrv_co_get_block_status() for details.
4034 int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num,
4035 int nb_sectors, int *pnum)
4037 Coroutine *co;
4038 BdrvCoGetBlockStatusData data = {
4039 .bs = bs,
4040 .sector_num = sector_num,
4041 .nb_sectors = nb_sectors,
4042 .pnum = pnum,
4043 .done = false,
4046 if (qemu_in_coroutine()) {
4047 /* Fast-path if already in coroutine context */
4048 bdrv_get_block_status_co_entry(&data);
4049 } else {
4050 AioContext *aio_context = bdrv_get_aio_context(bs);
4052 co = qemu_coroutine_create(bdrv_get_block_status_co_entry);
4053 qemu_coroutine_enter(co, &data);
4054 while (!data.done) {
4055 aio_poll(aio_context, true);
4058 return data.ret;
4061 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
4062 int nb_sectors, int *pnum)
4064 int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum);
4065 if (ret < 0) {
4066 return ret;
4068 return (ret & BDRV_BLOCK_ALLOCATED);
4072 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
4074 * Return true if the given sector is allocated in any image between
4075 * BASE and TOP (inclusive). BASE can be NULL to check if the given
4076 * sector is allocated in any image of the chain. Return false otherwise.
4078 * 'pnum' is set to the number of sectors (including and immediately following
4079 * the specified sector) that are known to be in the same
4080 * allocated/unallocated state.
4083 int bdrv_is_allocated_above(BlockDriverState *top,
4084 BlockDriverState *base,
4085 int64_t sector_num,
4086 int nb_sectors, int *pnum)
4088 BlockDriverState *intermediate;
4089 int ret, n = nb_sectors;
4091 intermediate = top;
4092 while (intermediate && intermediate != base) {
4093 int pnum_inter;
4094 ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
4095 &pnum_inter);
4096 if (ret < 0) {
4097 return ret;
4098 } else if (ret) {
4099 *pnum = pnum_inter;
4100 return 1;
4104 * [sector_num, nb_sectors] is unallocated on top but intermediate
4105 * might have
4107 * [sector_num+x, nr_sectors] allocated.
4109 if (n > pnum_inter &&
4110 (intermediate == top ||
4111 sector_num + pnum_inter < intermediate->total_sectors)) {
4112 n = pnum_inter;
4115 intermediate = intermediate->backing_hd;
4118 *pnum = n;
4119 return 0;
4122 const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
4124 if (bs->backing_hd && bs->backing_hd->encrypted)
4125 return bs->backing_file;
4126 else if (bs->encrypted)
4127 return bs->filename;
4128 else
4129 return NULL;
4132 void bdrv_get_backing_filename(BlockDriverState *bs,
4133 char *filename, int filename_size)
4135 pstrcpy(filename, filename_size, bs->backing_file);
4138 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
4139 const uint8_t *buf, int nb_sectors)
4141 BlockDriver *drv = bs->drv;
4142 if (!drv)
4143 return -ENOMEDIUM;
4144 if (!drv->bdrv_write_compressed)
4145 return -ENOTSUP;
4146 if (bdrv_check_request(bs, sector_num, nb_sectors))
4147 return -EIO;
4149 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
4151 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
4154 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
4156 BlockDriver *drv = bs->drv;
4157 if (!drv)
4158 return -ENOMEDIUM;
4159 if (!drv->bdrv_get_info)
4160 return -ENOTSUP;
4161 memset(bdi, 0, sizeof(*bdi));
4162 return drv->bdrv_get_info(bs, bdi);
4165 ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs)
4167 BlockDriver *drv = bs->drv;
4168 if (drv && drv->bdrv_get_specific_info) {
4169 return drv->bdrv_get_specific_info(bs);
4171 return NULL;
4174 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
4175 int64_t pos, int size)
4177 QEMUIOVector qiov;
4178 struct iovec iov = {
4179 .iov_base = (void *) buf,
4180 .iov_len = size,
4183 qemu_iovec_init_external(&qiov, &iov, 1);
4184 return bdrv_writev_vmstate(bs, &qiov, pos);
4187 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
4189 BlockDriver *drv = bs->drv;
4191 if (!drv) {
4192 return -ENOMEDIUM;
4193 } else if (drv->bdrv_save_vmstate) {
4194 return drv->bdrv_save_vmstate(bs, qiov, pos);
4195 } else if (bs->file) {
4196 return bdrv_writev_vmstate(bs->file, qiov, pos);
4199 return -ENOTSUP;
4202 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
4203 int64_t pos, int size)
4205 BlockDriver *drv = bs->drv;
4206 if (!drv)
4207 return -ENOMEDIUM;
4208 if (drv->bdrv_load_vmstate)
4209 return drv->bdrv_load_vmstate(bs, buf, pos, size);
4210 if (bs->file)
4211 return bdrv_load_vmstate(bs->file, buf, pos, size);
4212 return -ENOTSUP;
4215 void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
4217 if (!bs || !bs->drv || !bs->drv->bdrv_debug_event) {
4218 return;
4221 bs->drv->bdrv_debug_event(bs, event);
4224 int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
4225 const char *tag)
4227 while (bs && bs->drv && !bs->drv->bdrv_debug_breakpoint) {
4228 bs = bs->file;
4231 if (bs && bs->drv && bs->drv->bdrv_debug_breakpoint) {
4232 return bs->drv->bdrv_debug_breakpoint(bs, event, tag);
4235 return -ENOTSUP;
4238 int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag)
4240 while (bs && bs->drv && !bs->drv->bdrv_debug_remove_breakpoint) {
4241 bs = bs->file;
4244 if (bs && bs->drv && bs->drv->bdrv_debug_remove_breakpoint) {
4245 return bs->drv->bdrv_debug_remove_breakpoint(bs, tag);
4248 return -ENOTSUP;
4251 int bdrv_debug_resume(BlockDriverState *bs, const char *tag)
4253 while (bs && (!bs->drv || !bs->drv->bdrv_debug_resume)) {
4254 bs = bs->file;
4257 if (bs && bs->drv && bs->drv->bdrv_debug_resume) {
4258 return bs->drv->bdrv_debug_resume(bs, tag);
4261 return -ENOTSUP;
4264 bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag)
4266 while (bs && bs->drv && !bs->drv->bdrv_debug_is_suspended) {
4267 bs = bs->file;
4270 if (bs && bs->drv && bs->drv->bdrv_debug_is_suspended) {
4271 return bs->drv->bdrv_debug_is_suspended(bs, tag);
4274 return false;
4277 int bdrv_is_snapshot(BlockDriverState *bs)
4279 return !!(bs->open_flags & BDRV_O_SNAPSHOT);
4282 /* backing_file can either be relative, or absolute, or a protocol. If it is
4283 * relative, it must be relative to the chain. So, passing in bs->filename
4284 * from a BDS as backing_file should not be done, as that may be relative to
4285 * the CWD rather than the chain. */
4286 BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
4287 const char *backing_file)
4289 char *filename_full = NULL;
4290 char *backing_file_full = NULL;
4291 char *filename_tmp = NULL;
4292 int is_protocol = 0;
4293 BlockDriverState *curr_bs = NULL;
4294 BlockDriverState *retval = NULL;
4296 if (!bs || !bs->drv || !backing_file) {
4297 return NULL;
4300 filename_full = g_malloc(PATH_MAX);
4301 backing_file_full = g_malloc(PATH_MAX);
4302 filename_tmp = g_malloc(PATH_MAX);
4304 is_protocol = path_has_protocol(backing_file);
4306 for (curr_bs = bs; curr_bs->backing_hd; curr_bs = curr_bs->backing_hd) {
4308 /* If either of the filename paths is actually a protocol, then
4309 * compare unmodified paths; otherwise make paths relative */
4310 if (is_protocol || path_has_protocol(curr_bs->backing_file)) {
4311 if (strcmp(backing_file, curr_bs->backing_file) == 0) {
4312 retval = curr_bs->backing_hd;
4313 break;
4315 } else {
4316 /* If not an absolute filename path, make it relative to the current
4317 * image's filename path */
4318 path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
4319 backing_file);
4321 /* We are going to compare absolute pathnames */
4322 if (!realpath(filename_tmp, filename_full)) {
4323 continue;
4326 /* We need to make sure the backing filename we are comparing against
4327 * is relative to the current image filename (or absolute) */
4328 path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
4329 curr_bs->backing_file);
4331 if (!realpath(filename_tmp, backing_file_full)) {
4332 continue;
4335 if (strcmp(backing_file_full, filename_full) == 0) {
4336 retval = curr_bs->backing_hd;
4337 break;
4342 g_free(filename_full);
4343 g_free(backing_file_full);
4344 g_free(filename_tmp);
4345 return retval;
4348 int bdrv_get_backing_file_depth(BlockDriverState *bs)
4350 if (!bs->drv) {
4351 return 0;
4354 if (!bs->backing_hd) {
4355 return 0;
4358 return 1 + bdrv_get_backing_file_depth(bs->backing_hd);
4361 BlockDriverState *bdrv_find_base(BlockDriverState *bs)
4363 BlockDriverState *curr_bs = NULL;
4365 if (!bs) {
4366 return NULL;
4369 curr_bs = bs;
4371 while (curr_bs->backing_hd) {
4372 curr_bs = curr_bs->backing_hd;
4374 return curr_bs;
4377 /**************************************************************/
4378 /* async I/Os */
4380 BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
4381 QEMUIOVector *qiov, int nb_sectors,
4382 BlockDriverCompletionFunc *cb, void *opaque)
4384 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
4386 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
4387 cb, opaque, false);
4390 BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
4391 QEMUIOVector *qiov, int nb_sectors,
4392 BlockDriverCompletionFunc *cb, void *opaque)
4394 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
4396 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
4397 cb, opaque, true);
4400 BlockDriverAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs,
4401 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags,
4402 BlockDriverCompletionFunc *cb, void *opaque)
4404 trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque);
4406 return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors,
4407 BDRV_REQ_ZERO_WRITE | flags,
4408 cb, opaque, true);
4412 typedef struct MultiwriteCB {
4413 int error;
4414 int num_requests;
4415 int num_callbacks;
4416 struct {
4417 BlockDriverCompletionFunc *cb;
4418 void *opaque;
4419 QEMUIOVector *free_qiov;
4420 } callbacks[];
4421 } MultiwriteCB;
4423 static void multiwrite_user_cb(MultiwriteCB *mcb)
4425 int i;
4427 for (i = 0; i < mcb->num_callbacks; i++) {
4428 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
4429 if (mcb->callbacks[i].free_qiov) {
4430 qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
4432 g_free(mcb->callbacks[i].free_qiov);
4436 static void multiwrite_cb(void *opaque, int ret)
4438 MultiwriteCB *mcb = opaque;
4440 trace_multiwrite_cb(mcb, ret);
4442 if (ret < 0 && !mcb->error) {
4443 mcb->error = ret;
4446 mcb->num_requests--;
4447 if (mcb->num_requests == 0) {
4448 multiwrite_user_cb(mcb);
4449 g_free(mcb);
4453 static int multiwrite_req_compare(const void *a, const void *b)
4455 const BlockRequest *req1 = a, *req2 = b;
4458 * Note that we can't simply subtract req2->sector from req1->sector
4459 * here as that could overflow the return value.
4461 if (req1->sector > req2->sector) {
4462 return 1;
4463 } else if (req1->sector < req2->sector) {
4464 return -1;
4465 } else {
4466 return 0;
4471 * Takes a bunch of requests and tries to merge them. Returns the number of
4472 * requests that remain after merging.
4474 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
4475 int num_reqs, MultiwriteCB *mcb)
4477 int i, outidx;
4479 // Sort requests by start sector
4480 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
4482 // Check if adjacent requests touch the same clusters. If so, combine them,
4483 // filling up gaps with zero sectors.
4484 outidx = 0;
4485 for (i = 1; i < num_reqs; i++) {
4486 int merge = 0;
4487 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
4489 // Handle exactly sequential writes and overlapping writes.
4490 if (reqs[i].sector <= oldreq_last) {
4491 merge = 1;
4494 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
4495 merge = 0;
4498 if (merge) {
4499 size_t size;
4500 QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
4501 qemu_iovec_init(qiov,
4502 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
4504 // Add the first request to the merged one. If the requests are
4505 // overlapping, drop the last sectors of the first request.
4506 size = (reqs[i].sector - reqs[outidx].sector) << 9;
4507 qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size);
4509 // We should need to add any zeros between the two requests
4510 assert (reqs[i].sector <= oldreq_last);
4512 // Add the second request
4513 qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size);
4515 reqs[outidx].nb_sectors = qiov->size >> 9;
4516 reqs[outidx].qiov = qiov;
4518 mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
4519 } else {
4520 outidx++;
4521 reqs[outidx].sector = reqs[i].sector;
4522 reqs[outidx].nb_sectors = reqs[i].nb_sectors;
4523 reqs[outidx].qiov = reqs[i].qiov;
4527 return outidx + 1;
4531 * Submit multiple AIO write requests at once.
4533 * On success, the function returns 0 and all requests in the reqs array have
4534 * been submitted. In error case this function returns -1, and any of the
4535 * requests may or may not be submitted yet. In particular, this means that the
4536 * callback will be called for some of the requests, for others it won't. The
4537 * caller must check the error field of the BlockRequest to wait for the right
4538 * callbacks (if error != 0, no callback will be called).
4540 * The implementation may modify the contents of the reqs array, e.g. to merge
4541 * requests. However, the fields opaque and error are left unmodified as they
4542 * are used to signal failure for a single request to the caller.
4544 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
4546 MultiwriteCB *mcb;
4547 int i;
4549 /* don't submit writes if we don't have a medium */
4550 if (bs->drv == NULL) {
4551 for (i = 0; i < num_reqs; i++) {
4552 reqs[i].error = -ENOMEDIUM;
4554 return -1;
4557 if (num_reqs == 0) {
4558 return 0;
4561 // Create MultiwriteCB structure
4562 mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
4563 mcb->num_requests = 0;
4564 mcb->num_callbacks = num_reqs;
4566 for (i = 0; i < num_reqs; i++) {
4567 mcb->callbacks[i].cb = reqs[i].cb;
4568 mcb->callbacks[i].opaque = reqs[i].opaque;
4571 // Check for mergable requests
4572 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
4574 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
4576 /* Run the aio requests. */
4577 mcb->num_requests = num_reqs;
4578 for (i = 0; i < num_reqs; i++) {
4579 bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov,
4580 reqs[i].nb_sectors, reqs[i].flags,
4581 multiwrite_cb, mcb,
4582 true);
4585 return 0;
4588 void bdrv_aio_cancel(BlockDriverAIOCB *acb)
4590 acb->aiocb_info->cancel(acb);
4593 /**************************************************************/
4594 /* async block device emulation */
4596 typedef struct BlockDriverAIOCBSync {
4597 BlockDriverAIOCB common;
4598 QEMUBH *bh;
4599 int ret;
4600 /* vector translation state */
4601 QEMUIOVector *qiov;
4602 uint8_t *bounce;
4603 int is_write;
4604 } BlockDriverAIOCBSync;
4606 static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
4608 BlockDriverAIOCBSync *acb =
4609 container_of(blockacb, BlockDriverAIOCBSync, common);
4610 qemu_bh_delete(acb->bh);
4611 acb->bh = NULL;
4612 qemu_aio_release(acb);
4615 static const AIOCBInfo bdrv_em_aiocb_info = {
4616 .aiocb_size = sizeof(BlockDriverAIOCBSync),
4617 .cancel = bdrv_aio_cancel_em,
4620 static void bdrv_aio_bh_cb(void *opaque)
4622 BlockDriverAIOCBSync *acb = opaque;
4624 if (!acb->is_write)
4625 qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
4626 qemu_vfree(acb->bounce);
4627 acb->common.cb(acb->common.opaque, acb->ret);
4628 qemu_bh_delete(acb->bh);
4629 acb->bh = NULL;
4630 qemu_aio_release(acb);
4633 static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
4634 int64_t sector_num,
4635 QEMUIOVector *qiov,
4636 int nb_sectors,
4637 BlockDriverCompletionFunc *cb,
4638 void *opaque,
4639 int is_write)
4642 BlockDriverAIOCBSync *acb;
4644 acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque);
4645 acb->is_write = is_write;
4646 acb->qiov = qiov;
4647 acb->bounce = qemu_blockalign(bs, qiov->size);
4648 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb);
4650 if (is_write) {
4651 qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
4652 acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
4653 } else {
4654 acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
4657 qemu_bh_schedule(acb->bh);
4659 return &acb->common;
4662 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
4663 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
4664 BlockDriverCompletionFunc *cb, void *opaque)
4666 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
4669 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
4670 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
4671 BlockDriverCompletionFunc *cb, void *opaque)
4673 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
4677 typedef struct BlockDriverAIOCBCoroutine {
4678 BlockDriverAIOCB common;
4679 BlockRequest req;
4680 bool is_write;
4681 bool *done;
4682 QEMUBH* bh;
4683 } BlockDriverAIOCBCoroutine;
4685 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb)
4687 AioContext *aio_context = bdrv_get_aio_context(blockacb->bs);
4688 BlockDriverAIOCBCoroutine *acb =
4689 container_of(blockacb, BlockDriverAIOCBCoroutine, common);
4690 bool done = false;
4692 acb->done = &done;
4693 while (!done) {
4694 aio_poll(aio_context, true);
4698 static const AIOCBInfo bdrv_em_co_aiocb_info = {
4699 .aiocb_size = sizeof(BlockDriverAIOCBCoroutine),
4700 .cancel = bdrv_aio_co_cancel_em,
4703 static void bdrv_co_em_bh(void *opaque)
4705 BlockDriverAIOCBCoroutine *acb = opaque;
4707 acb->common.cb(acb->common.opaque, acb->req.error);
4709 if (acb->done) {
4710 *acb->done = true;
4713 qemu_bh_delete(acb->bh);
4714 qemu_aio_release(acb);
4717 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
4718 static void coroutine_fn bdrv_co_do_rw(void *opaque)
4720 BlockDriverAIOCBCoroutine *acb = opaque;
4721 BlockDriverState *bs = acb->common.bs;
4723 if (!acb->is_write) {
4724 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
4725 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
4726 } else {
4727 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
4728 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
4731 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
4732 qemu_bh_schedule(acb->bh);
4735 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
4736 int64_t sector_num,
4737 QEMUIOVector *qiov,
4738 int nb_sectors,
4739 BdrvRequestFlags flags,
4740 BlockDriverCompletionFunc *cb,
4741 void *opaque,
4742 bool is_write)
4744 Coroutine *co;
4745 BlockDriverAIOCBCoroutine *acb;
4747 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4748 acb->req.sector = sector_num;
4749 acb->req.nb_sectors = nb_sectors;
4750 acb->req.qiov = qiov;
4751 acb->req.flags = flags;
4752 acb->is_write = is_write;
4753 acb->done = NULL;
4755 co = qemu_coroutine_create(bdrv_co_do_rw);
4756 qemu_coroutine_enter(co, acb);
4758 return &acb->common;
4761 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
4763 BlockDriverAIOCBCoroutine *acb = opaque;
4764 BlockDriverState *bs = acb->common.bs;
4766 acb->req.error = bdrv_co_flush(bs);
4767 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
4768 qemu_bh_schedule(acb->bh);
4771 BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
4772 BlockDriverCompletionFunc *cb, void *opaque)
4774 trace_bdrv_aio_flush(bs, opaque);
4776 Coroutine *co;
4777 BlockDriverAIOCBCoroutine *acb;
4779 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4780 acb->done = NULL;
4782 co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
4783 qemu_coroutine_enter(co, acb);
4785 return &acb->common;
4788 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
4790 BlockDriverAIOCBCoroutine *acb = opaque;
4791 BlockDriverState *bs = acb->common.bs;
4793 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
4794 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
4795 qemu_bh_schedule(acb->bh);
4798 BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
4799 int64_t sector_num, int nb_sectors,
4800 BlockDriverCompletionFunc *cb, void *opaque)
4802 Coroutine *co;
4803 BlockDriverAIOCBCoroutine *acb;
4805 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
4807 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4808 acb->req.sector = sector_num;
4809 acb->req.nb_sectors = nb_sectors;
4810 acb->done = NULL;
4811 co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
4812 qemu_coroutine_enter(co, acb);
4814 return &acb->common;
4817 void bdrv_init(void)
4819 module_call_init(MODULE_INIT_BLOCK);
4822 void bdrv_init_with_whitelist(void)
4824 use_bdrv_whitelist = 1;
4825 bdrv_init();
4828 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
4829 BlockDriverCompletionFunc *cb, void *opaque)
4831 BlockDriverAIOCB *acb;
4833 acb = g_slice_alloc(aiocb_info->aiocb_size);
4834 acb->aiocb_info = aiocb_info;
4835 acb->bs = bs;
4836 acb->cb = cb;
4837 acb->opaque = opaque;
4838 return acb;
4841 void qemu_aio_release(void *p)
4843 BlockDriverAIOCB *acb = p;
4844 g_slice_free1(acb->aiocb_info->aiocb_size, acb);
4847 /**************************************************************/
4848 /* Coroutine block device emulation */
4850 typedef struct CoroutineIOCompletion {
4851 Coroutine *coroutine;
4852 int ret;
4853 } CoroutineIOCompletion;
4855 static void bdrv_co_io_em_complete(void *opaque, int ret)
4857 CoroutineIOCompletion *co = opaque;
4859 co->ret = ret;
4860 qemu_coroutine_enter(co->coroutine, NULL);
4863 static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
4864 int nb_sectors, QEMUIOVector *iov,
4865 bool is_write)
4867 CoroutineIOCompletion co = {
4868 .coroutine = qemu_coroutine_self(),
4870 BlockDriverAIOCB *acb;
4872 if (is_write) {
4873 acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
4874 bdrv_co_io_em_complete, &co);
4875 } else {
4876 acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
4877 bdrv_co_io_em_complete, &co);
4880 trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
4881 if (!acb) {
4882 return -EIO;
4884 qemu_coroutine_yield();
4886 return co.ret;
4889 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
4890 int64_t sector_num, int nb_sectors,
4891 QEMUIOVector *iov)
4893 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
4896 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
4897 int64_t sector_num, int nb_sectors,
4898 QEMUIOVector *iov)
4900 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
4903 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
4905 RwCo *rwco = opaque;
4907 rwco->ret = bdrv_co_flush(rwco->bs);
4910 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
4912 int ret;
4914 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
4915 return 0;
4918 /* Write back cached data to the OS even with cache=unsafe */
4919 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
4920 if (bs->drv->bdrv_co_flush_to_os) {
4921 ret = bs->drv->bdrv_co_flush_to_os(bs);
4922 if (ret < 0) {
4923 return ret;
4927 /* But don't actually force it to the disk with cache=unsafe */
4928 if (bs->open_flags & BDRV_O_NO_FLUSH) {
4929 goto flush_parent;
4932 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
4933 if (bs->drv->bdrv_co_flush_to_disk) {
4934 ret = bs->drv->bdrv_co_flush_to_disk(bs);
4935 } else if (bs->drv->bdrv_aio_flush) {
4936 BlockDriverAIOCB *acb;
4937 CoroutineIOCompletion co = {
4938 .coroutine = qemu_coroutine_self(),
4941 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
4942 if (acb == NULL) {
4943 ret = -EIO;
4944 } else {
4945 qemu_coroutine_yield();
4946 ret = co.ret;
4948 } else {
4950 * Some block drivers always operate in either writethrough or unsafe
4951 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
4952 * know how the server works (because the behaviour is hardcoded or
4953 * depends on server-side configuration), so we can't ensure that
4954 * everything is safe on disk. Returning an error doesn't work because
4955 * that would break guests even if the server operates in writethrough
4956 * mode.
4958 * Let's hope the user knows what he's doing.
4960 ret = 0;
4962 if (ret < 0) {
4963 return ret;
4966 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
4967 * in the case of cache=unsafe, so there are no useless flushes.
4969 flush_parent:
4970 return bdrv_co_flush(bs->file);
4973 void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
4975 Error *local_err = NULL;
4976 int ret;
4978 if (!bs->drv) {
4979 return;
4982 if (bs->drv->bdrv_invalidate_cache) {
4983 bs->drv->bdrv_invalidate_cache(bs, &local_err);
4984 } else if (bs->file) {
4985 bdrv_invalidate_cache(bs->file, &local_err);
4987 if (local_err) {
4988 error_propagate(errp, local_err);
4989 return;
4992 ret = refresh_total_sectors(bs, bs->total_sectors);
4993 if (ret < 0) {
4994 error_setg_errno(errp, -ret, "Could not refresh total sector count");
4995 return;
4999 void bdrv_invalidate_cache_all(Error **errp)
5001 BlockDriverState *bs;
5002 Error *local_err = NULL;
5004 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
5005 AioContext *aio_context = bdrv_get_aio_context(bs);
5007 aio_context_acquire(aio_context);
5008 bdrv_invalidate_cache(bs, &local_err);
5009 aio_context_release(aio_context);
5010 if (local_err) {
5011 error_propagate(errp, local_err);
5012 return;
5017 void bdrv_clear_incoming_migration_all(void)
5019 BlockDriverState *bs;
5021 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
5022 AioContext *aio_context = bdrv_get_aio_context(bs);
5024 aio_context_acquire(aio_context);
5025 bs->open_flags = bs->open_flags & ~(BDRV_O_INCOMING);
5026 aio_context_release(aio_context);
5030 int bdrv_flush(BlockDriverState *bs)
5032 Coroutine *co;
5033 RwCo rwco = {
5034 .bs = bs,
5035 .ret = NOT_DONE,
5038 if (qemu_in_coroutine()) {
5039 /* Fast-path if already in coroutine context */
5040 bdrv_flush_co_entry(&rwco);
5041 } else {
5042 AioContext *aio_context = bdrv_get_aio_context(bs);
5044 co = qemu_coroutine_create(bdrv_flush_co_entry);
5045 qemu_coroutine_enter(co, &rwco);
5046 while (rwco.ret == NOT_DONE) {
5047 aio_poll(aio_context, true);
5051 return rwco.ret;
5054 typedef struct DiscardCo {
5055 BlockDriverState *bs;
5056 int64_t sector_num;
5057 int nb_sectors;
5058 int ret;
5059 } DiscardCo;
5060 static void coroutine_fn bdrv_discard_co_entry(void *opaque)
5062 DiscardCo *rwco = opaque;
5064 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
5067 /* if no limit is specified in the BlockLimits use a default
5068 * of 32768 512-byte sectors (16 MiB) per request.
5070 #define MAX_DISCARD_DEFAULT 32768
5072 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
5073 int nb_sectors)
5075 int max_discard;
5077 if (!bs->drv) {
5078 return -ENOMEDIUM;
5079 } else if (bdrv_check_request(bs, sector_num, nb_sectors)) {
5080 return -EIO;
5081 } else if (bs->read_only) {
5082 return -EROFS;
5085 bdrv_reset_dirty(bs, sector_num, nb_sectors);
5087 /* Do nothing if disabled. */
5088 if (!(bs->open_flags & BDRV_O_UNMAP)) {
5089 return 0;
5092 if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) {
5093 return 0;
5096 max_discard = bs->bl.max_discard ? bs->bl.max_discard : MAX_DISCARD_DEFAULT;
5097 while (nb_sectors > 0) {
5098 int ret;
5099 int num = nb_sectors;
5101 /* align request */
5102 if (bs->bl.discard_alignment &&
5103 num >= bs->bl.discard_alignment &&
5104 sector_num % bs->bl.discard_alignment) {
5105 if (num > bs->bl.discard_alignment) {
5106 num = bs->bl.discard_alignment;
5108 num -= sector_num % bs->bl.discard_alignment;
5111 /* limit request size */
5112 if (num > max_discard) {
5113 num = max_discard;
5116 if (bs->drv->bdrv_co_discard) {
5117 ret = bs->drv->bdrv_co_discard(bs, sector_num, num);
5118 } else {
5119 BlockDriverAIOCB *acb;
5120 CoroutineIOCompletion co = {
5121 .coroutine = qemu_coroutine_self(),
5124 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
5125 bdrv_co_io_em_complete, &co);
5126 if (acb == NULL) {
5127 return -EIO;
5128 } else {
5129 qemu_coroutine_yield();
5130 ret = co.ret;
5133 if (ret && ret != -ENOTSUP) {
5134 return ret;
5137 sector_num += num;
5138 nb_sectors -= num;
5140 return 0;
5143 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
5145 Coroutine *co;
5146 DiscardCo rwco = {
5147 .bs = bs,
5148 .sector_num = sector_num,
5149 .nb_sectors = nb_sectors,
5150 .ret = NOT_DONE,
5153 if (qemu_in_coroutine()) {
5154 /* Fast-path if already in coroutine context */
5155 bdrv_discard_co_entry(&rwco);
5156 } else {
5157 AioContext *aio_context = bdrv_get_aio_context(bs);
5159 co = qemu_coroutine_create(bdrv_discard_co_entry);
5160 qemu_coroutine_enter(co, &rwco);
5161 while (rwco.ret == NOT_DONE) {
5162 aio_poll(aio_context, true);
5166 return rwco.ret;
5169 /**************************************************************/
5170 /* removable device support */
5173 * Return TRUE if the media is present
5175 int bdrv_is_inserted(BlockDriverState *bs)
5177 BlockDriver *drv = bs->drv;
5179 if (!drv)
5180 return 0;
5181 if (!drv->bdrv_is_inserted)
5182 return 1;
5183 return drv->bdrv_is_inserted(bs);
5187 * Return whether the media changed since the last call to this
5188 * function, or -ENOTSUP if we don't know. Most drivers don't know.
5190 int bdrv_media_changed(BlockDriverState *bs)
5192 BlockDriver *drv = bs->drv;
5194 if (drv && drv->bdrv_media_changed) {
5195 return drv->bdrv_media_changed(bs);
5197 return -ENOTSUP;
5201 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
5203 void bdrv_eject(BlockDriverState *bs, bool eject_flag)
5205 BlockDriver *drv = bs->drv;
5207 if (drv && drv->bdrv_eject) {
5208 drv->bdrv_eject(bs, eject_flag);
5211 if (bs->device_name[0] != '\0') {
5212 qapi_event_send_device_tray_moved(bdrv_get_device_name(bs),
5213 eject_flag, &error_abort);
5218 * Lock or unlock the media (if it is locked, the user won't be able
5219 * to eject it manually).
5221 void bdrv_lock_medium(BlockDriverState *bs, bool locked)
5223 BlockDriver *drv = bs->drv;
5225 trace_bdrv_lock_medium(bs, locked);
5227 if (drv && drv->bdrv_lock_medium) {
5228 drv->bdrv_lock_medium(bs, locked);
5232 /* needed for generic scsi interface */
5234 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
5236 BlockDriver *drv = bs->drv;
5238 if (drv && drv->bdrv_ioctl)
5239 return drv->bdrv_ioctl(bs, req, buf);
5240 return -ENOTSUP;
5243 BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
5244 unsigned long int req, void *buf,
5245 BlockDriverCompletionFunc *cb, void *opaque)
5247 BlockDriver *drv = bs->drv;
5249 if (drv && drv->bdrv_aio_ioctl)
5250 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque);
5251 return NULL;
5254 void bdrv_set_guest_block_size(BlockDriverState *bs, int align)
5256 bs->guest_block_size = align;
5259 void *qemu_blockalign(BlockDriverState *bs, size_t size)
5261 return qemu_memalign(bdrv_opt_mem_align(bs), size);
5265 * Check if all memory in this vector is sector aligned.
5267 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
5269 int i;
5270 size_t alignment = bdrv_opt_mem_align(bs);
5272 for (i = 0; i < qiov->niov; i++) {
5273 if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
5274 return false;
5276 if (qiov->iov[i].iov_len % alignment) {
5277 return false;
5281 return true;
5284 BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, int granularity,
5285 Error **errp)
5287 int64_t bitmap_size;
5288 BdrvDirtyBitmap *bitmap;
5290 assert((granularity & (granularity - 1)) == 0);
5292 granularity >>= BDRV_SECTOR_BITS;
5293 assert(granularity);
5294 bitmap_size = bdrv_getlength(bs);
5295 if (bitmap_size < 0) {
5296 error_setg_errno(errp, -bitmap_size, "could not get length of device");
5297 errno = -bitmap_size;
5298 return NULL;
5300 bitmap_size >>= BDRV_SECTOR_BITS;
5301 bitmap = g_malloc0(sizeof(BdrvDirtyBitmap));
5302 bitmap->bitmap = hbitmap_alloc(bitmap_size, ffs(granularity) - 1);
5303 QLIST_INSERT_HEAD(&bs->dirty_bitmaps, bitmap, list);
5304 return bitmap;
5307 void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
5309 BdrvDirtyBitmap *bm, *next;
5310 QLIST_FOREACH_SAFE(bm, &bs->dirty_bitmaps, list, next) {
5311 if (bm == bitmap) {
5312 QLIST_REMOVE(bitmap, list);
5313 hbitmap_free(bitmap->bitmap);
5314 g_free(bitmap);
5315 return;
5320 BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs)
5322 BdrvDirtyBitmap *bm;
5323 BlockDirtyInfoList *list = NULL;
5324 BlockDirtyInfoList **plist = &list;
5326 QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) {
5327 BlockDirtyInfo *info = g_malloc0(sizeof(BlockDirtyInfo));
5328 BlockDirtyInfoList *entry = g_malloc0(sizeof(BlockDirtyInfoList));
5329 info->count = bdrv_get_dirty_count(bs, bm);
5330 info->granularity =
5331 ((int64_t) BDRV_SECTOR_SIZE << hbitmap_granularity(bm->bitmap));
5332 entry->value = info;
5333 *plist = entry;
5334 plist = &entry->next;
5337 return list;
5340 int bdrv_get_dirty(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, int64_t sector)
5342 if (bitmap) {
5343 return hbitmap_get(bitmap->bitmap, sector);
5344 } else {
5345 return 0;
5349 void bdrv_dirty_iter_init(BlockDriverState *bs,
5350 BdrvDirtyBitmap *bitmap, HBitmapIter *hbi)
5352 hbitmap_iter_init(hbi, bitmap->bitmap, 0);
5355 void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
5356 int nr_sectors)
5358 BdrvDirtyBitmap *bitmap;
5359 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
5360 hbitmap_set(bitmap->bitmap, cur_sector, nr_sectors);
5364 void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors)
5366 BdrvDirtyBitmap *bitmap;
5367 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
5368 hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors);
5372 int64_t bdrv_get_dirty_count(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
5374 return hbitmap_count(bitmap->bitmap);
5377 /* Get a reference to bs */
5378 void bdrv_ref(BlockDriverState *bs)
5380 bs->refcnt++;
5383 /* Release a previously grabbed reference to bs.
5384 * If after releasing, reference count is zero, the BlockDriverState is
5385 * deleted. */
5386 void bdrv_unref(BlockDriverState *bs)
5388 assert(bs->refcnt > 0);
5389 if (--bs->refcnt == 0) {
5390 bdrv_delete(bs);
5394 struct BdrvOpBlocker {
5395 Error *reason;
5396 QLIST_ENTRY(BdrvOpBlocker) list;
5399 bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp)
5401 BdrvOpBlocker *blocker;
5402 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5403 if (!QLIST_EMPTY(&bs->op_blockers[op])) {
5404 blocker = QLIST_FIRST(&bs->op_blockers[op]);
5405 if (errp) {
5406 error_setg(errp, "Device '%s' is busy: %s",
5407 bs->device_name, error_get_pretty(blocker->reason));
5409 return true;
5411 return false;
5414 void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason)
5416 BdrvOpBlocker *blocker;
5417 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5419 blocker = g_malloc0(sizeof(BdrvOpBlocker));
5420 blocker->reason = reason;
5421 QLIST_INSERT_HEAD(&bs->op_blockers[op], blocker, list);
5424 void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason)
5426 BdrvOpBlocker *blocker, *next;
5427 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5428 QLIST_FOREACH_SAFE(blocker, &bs->op_blockers[op], list, next) {
5429 if (blocker->reason == reason) {
5430 QLIST_REMOVE(blocker, list);
5431 g_free(blocker);
5436 void bdrv_op_block_all(BlockDriverState *bs, Error *reason)
5438 int i;
5439 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5440 bdrv_op_block(bs, i, reason);
5444 void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason)
5446 int i;
5447 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5448 bdrv_op_unblock(bs, i, reason);
5452 bool bdrv_op_blocker_is_empty(BlockDriverState *bs)
5454 int i;
5456 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5457 if (!QLIST_EMPTY(&bs->op_blockers[i])) {
5458 return false;
5461 return true;
5464 void bdrv_iostatus_enable(BlockDriverState *bs)
5466 bs->iostatus_enabled = true;
5467 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
5470 /* The I/O status is only enabled if the drive explicitly
5471 * enables it _and_ the VM is configured to stop on errors */
5472 bool bdrv_iostatus_is_enabled(const BlockDriverState *bs)
5474 return (bs->iostatus_enabled &&
5475 (bs->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
5476 bs->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
5477 bs->on_read_error == BLOCKDEV_ON_ERROR_STOP));
5480 void bdrv_iostatus_disable(BlockDriverState *bs)
5482 bs->iostatus_enabled = false;
5485 void bdrv_iostatus_reset(BlockDriverState *bs)
5487 if (bdrv_iostatus_is_enabled(bs)) {
5488 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
5489 if (bs->job) {
5490 block_job_iostatus_reset(bs->job);
5495 void bdrv_iostatus_set_err(BlockDriverState *bs, int error)
5497 assert(bdrv_iostatus_is_enabled(bs));
5498 if (bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
5499 bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
5500 BLOCK_DEVICE_IO_STATUS_FAILED;
5504 void
5505 bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes,
5506 enum BlockAcctType type)
5508 assert(type < BDRV_MAX_IOTYPE);
5510 cookie->bytes = bytes;
5511 cookie->start_time_ns = get_clock();
5512 cookie->type = type;
5515 void
5516 bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie)
5518 assert(cookie->type < BDRV_MAX_IOTYPE);
5520 bs->nr_bytes[cookie->type] += cookie->bytes;
5521 bs->nr_ops[cookie->type]++;
5522 bs->total_time_ns[cookie->type] += get_clock() - cookie->start_time_ns;
5525 void bdrv_img_create(const char *filename, const char *fmt,
5526 const char *base_filename, const char *base_fmt,
5527 char *options, uint64_t img_size, int flags,
5528 Error **errp, bool quiet)
5530 QemuOptsList *create_opts = NULL;
5531 QemuOpts *opts = NULL;
5532 const char *backing_fmt, *backing_file;
5533 int64_t size;
5534 BlockDriver *drv, *proto_drv;
5535 BlockDriver *backing_drv = NULL;
5536 Error *local_err = NULL;
5537 int ret = 0;
5539 /* Find driver and parse its options */
5540 drv = bdrv_find_format(fmt);
5541 if (!drv) {
5542 error_setg(errp, "Unknown file format '%s'", fmt);
5543 return;
5546 proto_drv = bdrv_find_protocol(filename, true);
5547 if (!proto_drv) {
5548 error_setg(errp, "Unknown protocol '%s'", filename);
5549 return;
5552 create_opts = qemu_opts_append(create_opts, drv->create_opts);
5553 create_opts = qemu_opts_append(create_opts, proto_drv->create_opts);
5555 /* Create parameter list with default values */
5556 opts = qemu_opts_create(create_opts, NULL, 0, &error_abort);
5557 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, img_size);
5559 /* Parse -o options */
5560 if (options) {
5561 if (qemu_opts_do_parse(opts, options, NULL) != 0) {
5562 error_setg(errp, "Invalid options for file format '%s'", fmt);
5563 goto out;
5567 if (base_filename) {
5568 if (qemu_opt_set(opts, BLOCK_OPT_BACKING_FILE, base_filename)) {
5569 error_setg(errp, "Backing file not supported for file format '%s'",
5570 fmt);
5571 goto out;
5575 if (base_fmt) {
5576 if (qemu_opt_set(opts, BLOCK_OPT_BACKING_FMT, base_fmt)) {
5577 error_setg(errp, "Backing file format not supported for file "
5578 "format '%s'", fmt);
5579 goto out;
5583 backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE);
5584 if (backing_file) {
5585 if (!strcmp(filename, backing_file)) {
5586 error_setg(errp, "Error: Trying to create an image with the "
5587 "same filename as the backing file");
5588 goto out;
5592 backing_fmt = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT);
5593 if (backing_fmt) {
5594 backing_drv = bdrv_find_format(backing_fmt);
5595 if (!backing_drv) {
5596 error_setg(errp, "Unknown backing file format '%s'",
5597 backing_fmt);
5598 goto out;
5602 // The size for the image must always be specified, with one exception:
5603 // If we are using a backing file, we can obtain the size from there
5604 size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0);
5605 if (size == -1) {
5606 if (backing_file) {
5607 BlockDriverState *bs;
5608 uint64_t size;
5609 char buf[32];
5610 int back_flags;
5612 /* backing files always opened read-only */
5613 back_flags =
5614 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
5616 bs = NULL;
5617 ret = bdrv_open(&bs, backing_file, NULL, NULL, back_flags,
5618 backing_drv, &local_err);
5619 if (ret < 0) {
5620 error_setg_errno(errp, -ret, "Could not open '%s': %s",
5621 backing_file,
5622 error_get_pretty(local_err));
5623 error_free(local_err);
5624 local_err = NULL;
5625 goto out;
5627 bdrv_get_geometry(bs, &size);
5628 size *= 512;
5630 snprintf(buf, sizeof(buf), "%" PRId64, size);
5631 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, size);
5633 bdrv_unref(bs);
5634 } else {
5635 error_setg(errp, "Image creation needs a size parameter");
5636 goto out;
5640 if (!quiet) {
5641 printf("Formatting '%s', fmt=%s ", filename, fmt);
5642 qemu_opts_print(opts);
5643 puts("");
5646 ret = bdrv_create(drv, filename, opts, &local_err);
5648 if (ret == -EFBIG) {
5649 /* This is generally a better message than whatever the driver would
5650 * deliver (especially because of the cluster_size_hint), since that
5651 * is most probably not much different from "image too large". */
5652 const char *cluster_size_hint = "";
5653 if (qemu_opt_get_size(opts, BLOCK_OPT_CLUSTER_SIZE, 0)) {
5654 cluster_size_hint = " (try using a larger cluster size)";
5656 error_setg(errp, "The image size is too large for file format '%s'"
5657 "%s", fmt, cluster_size_hint);
5658 error_free(local_err);
5659 local_err = NULL;
5662 out:
5663 qemu_opts_del(opts);
5664 qemu_opts_free(create_opts);
5665 if (local_err) {
5666 error_propagate(errp, local_err);
5670 AioContext *bdrv_get_aio_context(BlockDriverState *bs)
5672 return bs->aio_context;
5675 void bdrv_detach_aio_context(BlockDriverState *bs)
5677 if (!bs->drv) {
5678 return;
5681 if (bs->io_limits_enabled) {
5682 throttle_detach_aio_context(&bs->throttle_state);
5684 if (bs->drv->bdrv_detach_aio_context) {
5685 bs->drv->bdrv_detach_aio_context(bs);
5687 if (bs->file) {
5688 bdrv_detach_aio_context(bs->file);
5690 if (bs->backing_hd) {
5691 bdrv_detach_aio_context(bs->backing_hd);
5694 bs->aio_context = NULL;
5697 void bdrv_attach_aio_context(BlockDriverState *bs,
5698 AioContext *new_context)
5700 if (!bs->drv) {
5701 return;
5704 bs->aio_context = new_context;
5706 if (bs->backing_hd) {
5707 bdrv_attach_aio_context(bs->backing_hd, new_context);
5709 if (bs->file) {
5710 bdrv_attach_aio_context(bs->file, new_context);
5712 if (bs->drv->bdrv_attach_aio_context) {
5713 bs->drv->bdrv_attach_aio_context(bs, new_context);
5715 if (bs->io_limits_enabled) {
5716 throttle_attach_aio_context(&bs->throttle_state, new_context);
5720 void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context)
5722 bdrv_drain_all(); /* ensure there are no in-flight requests */
5724 bdrv_detach_aio_context(bs);
5726 /* This function executes in the old AioContext so acquire the new one in
5727 * case it runs in a different thread.
5729 aio_context_acquire(new_context);
5730 bdrv_attach_aio_context(bs, new_context);
5731 aio_context_release(new_context);
5734 void bdrv_add_before_write_notifier(BlockDriverState *bs,
5735 NotifierWithReturn *notifier)
5737 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
5740 int bdrv_amend_options(BlockDriverState *bs, QemuOpts *opts)
5742 if (!bs->drv->bdrv_amend_options) {
5743 return -ENOTSUP;
5745 return bs->drv->bdrv_amend_options(bs, opts);
5748 /* This function will be called by the bdrv_recurse_is_first_non_filter method
5749 * of block filter and by bdrv_is_first_non_filter.
5750 * It is used to test if the given bs is the candidate or recurse more in the
5751 * node graph.
5753 bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs,
5754 BlockDriverState *candidate)
5756 /* return false if basic checks fails */
5757 if (!bs || !bs->drv) {
5758 return false;
5761 /* the code reached a non block filter driver -> check if the bs is
5762 * the same as the candidate. It's the recursion termination condition.
5764 if (!bs->drv->is_filter) {
5765 return bs == candidate;
5767 /* Down this path the driver is a block filter driver */
5769 /* If the block filter recursion method is defined use it to recurse down
5770 * the node graph.
5772 if (bs->drv->bdrv_recurse_is_first_non_filter) {
5773 return bs->drv->bdrv_recurse_is_first_non_filter(bs, candidate);
5776 /* the driver is a block filter but don't allow to recurse -> return false
5778 return false;
5781 /* This function checks if the candidate is the first non filter bs down it's
5782 * bs chain. Since we don't have pointers to parents it explore all bs chains
5783 * from the top. Some filters can choose not to pass down the recursion.
5785 bool bdrv_is_first_non_filter(BlockDriverState *candidate)
5787 BlockDriverState *bs;
5789 /* walk down the bs forest recursively */
5790 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
5791 bool perm;
5793 /* try to recurse in this top level bs */
5794 perm = bdrv_recurse_is_first_non_filter(bs, candidate);
5796 /* candidate is the first non filter */
5797 if (perm) {
5798 return true;
5802 return false;