linux-user/uname: Return correct uname string for x86_64
[qemu.git] / block.c
blob43abe96db69e88648a991373fc453f679d238461
1 /*
2 * QEMU System Emulator block driver
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
24 #include "config-host.h"
25 #include "qemu-common.h"
26 #include "trace.h"
27 #include "monitor/monitor.h"
28 #include "block/block_int.h"
29 #include "block/blockjob.h"
30 #include "qemu/module.h"
31 #include "qapi/qmp/qjson.h"
32 #include "sysemu/sysemu.h"
33 #include "qemu/notify.h"
34 #include "block/coroutine.h"
35 #include "block/qapi.h"
36 #include "qmp-commands.h"
37 #include "qemu/timer.h"
39 #ifdef CONFIG_BSD
40 #include <sys/types.h>
41 #include <sys/stat.h>
42 #include <sys/ioctl.h>
43 #include <sys/queue.h>
44 #ifndef __DragonFly__
45 #include <sys/disk.h>
46 #endif
47 #endif
49 #ifdef _WIN32
50 #include <windows.h>
51 #endif
53 struct BdrvDirtyBitmap {
54 HBitmap *bitmap;
55 QLIST_ENTRY(BdrvDirtyBitmap) list;
58 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
60 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
61 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
62 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
63 BlockDriverCompletionFunc *cb, void *opaque);
64 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
65 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
66 BlockDriverCompletionFunc *cb, void *opaque);
67 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
68 int64_t sector_num, int nb_sectors,
69 QEMUIOVector *iov);
70 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
71 int64_t sector_num, int nb_sectors,
72 QEMUIOVector *iov);
73 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
74 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
75 BdrvRequestFlags flags);
76 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
77 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
78 BdrvRequestFlags flags);
79 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
80 int64_t sector_num,
81 QEMUIOVector *qiov,
82 int nb_sectors,
83 BdrvRequestFlags flags,
84 BlockDriverCompletionFunc *cb,
85 void *opaque,
86 bool is_write);
87 static void coroutine_fn bdrv_co_do_rw(void *opaque);
88 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
89 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
91 static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
92 QTAILQ_HEAD_INITIALIZER(bdrv_states);
94 static QTAILQ_HEAD(, BlockDriverState) graph_bdrv_states =
95 QTAILQ_HEAD_INITIALIZER(graph_bdrv_states);
97 static QLIST_HEAD(, BlockDriver) bdrv_drivers =
98 QLIST_HEAD_INITIALIZER(bdrv_drivers);
100 /* If non-zero, use only whitelisted block drivers */
101 static int use_bdrv_whitelist;
103 #ifdef _WIN32
104 static int is_windows_drive_prefix(const char *filename)
106 return (((filename[0] >= 'a' && filename[0] <= 'z') ||
107 (filename[0] >= 'A' && filename[0] <= 'Z')) &&
108 filename[1] == ':');
111 int is_windows_drive(const char *filename)
113 if (is_windows_drive_prefix(filename) &&
114 filename[2] == '\0')
115 return 1;
116 if (strstart(filename, "\\\\.\\", NULL) ||
117 strstart(filename, "//./", NULL))
118 return 1;
119 return 0;
121 #endif
123 /* throttling disk I/O limits */
124 void bdrv_set_io_limits(BlockDriverState *bs,
125 ThrottleConfig *cfg)
127 int i;
129 throttle_config(&bs->throttle_state, cfg);
131 for (i = 0; i < 2; i++) {
132 qemu_co_enter_next(&bs->throttled_reqs[i]);
136 /* this function drain all the throttled IOs */
137 static bool bdrv_start_throttled_reqs(BlockDriverState *bs)
139 bool drained = false;
140 bool enabled = bs->io_limits_enabled;
141 int i;
143 bs->io_limits_enabled = false;
145 for (i = 0; i < 2; i++) {
146 while (qemu_co_enter_next(&bs->throttled_reqs[i])) {
147 drained = true;
151 bs->io_limits_enabled = enabled;
153 return drained;
156 void bdrv_io_limits_disable(BlockDriverState *bs)
158 bs->io_limits_enabled = false;
160 bdrv_start_throttled_reqs(bs);
162 throttle_destroy(&bs->throttle_state);
165 static void bdrv_throttle_read_timer_cb(void *opaque)
167 BlockDriverState *bs = opaque;
168 qemu_co_enter_next(&bs->throttled_reqs[0]);
171 static void bdrv_throttle_write_timer_cb(void *opaque)
173 BlockDriverState *bs = opaque;
174 qemu_co_enter_next(&bs->throttled_reqs[1]);
177 /* should be called before bdrv_set_io_limits if a limit is set */
178 void bdrv_io_limits_enable(BlockDriverState *bs)
180 assert(!bs->io_limits_enabled);
181 throttle_init(&bs->throttle_state,
182 bdrv_get_aio_context(bs),
183 QEMU_CLOCK_VIRTUAL,
184 bdrv_throttle_read_timer_cb,
185 bdrv_throttle_write_timer_cb,
186 bs);
187 bs->io_limits_enabled = true;
190 /* This function makes an IO wait if needed
192 * @nb_sectors: the number of sectors of the IO
193 * @is_write: is the IO a write
195 static void bdrv_io_limits_intercept(BlockDriverState *bs,
196 unsigned int bytes,
197 bool is_write)
199 /* does this io must wait */
200 bool must_wait = throttle_schedule_timer(&bs->throttle_state, is_write);
202 /* if must wait or any request of this type throttled queue the IO */
203 if (must_wait ||
204 !qemu_co_queue_empty(&bs->throttled_reqs[is_write])) {
205 qemu_co_queue_wait(&bs->throttled_reqs[is_write]);
208 /* the IO will be executed, do the accounting */
209 throttle_account(&bs->throttle_state, is_write, bytes);
212 /* if the next request must wait -> do nothing */
213 if (throttle_schedule_timer(&bs->throttle_state, is_write)) {
214 return;
217 /* else queue next request for execution */
218 qemu_co_queue_next(&bs->throttled_reqs[is_write]);
221 size_t bdrv_opt_mem_align(BlockDriverState *bs)
223 if (!bs || !bs->drv) {
224 /* 4k should be on the safe side */
225 return 4096;
228 return bs->bl.opt_mem_alignment;
231 /* check if the path starts with "<protocol>:" */
232 static int path_has_protocol(const char *path)
234 const char *p;
236 #ifdef _WIN32
237 if (is_windows_drive(path) ||
238 is_windows_drive_prefix(path)) {
239 return 0;
241 p = path + strcspn(path, ":/\\");
242 #else
243 p = path + strcspn(path, ":/");
244 #endif
246 return *p == ':';
249 int path_is_absolute(const char *path)
251 #ifdef _WIN32
252 /* specific case for names like: "\\.\d:" */
253 if (is_windows_drive(path) || is_windows_drive_prefix(path)) {
254 return 1;
256 return (*path == '/' || *path == '\\');
257 #else
258 return (*path == '/');
259 #endif
262 /* if filename is absolute, just copy it to dest. Otherwise, build a
263 path to it by considering it is relative to base_path. URL are
264 supported. */
265 void path_combine(char *dest, int dest_size,
266 const char *base_path,
267 const char *filename)
269 const char *p, *p1;
270 int len;
272 if (dest_size <= 0)
273 return;
274 if (path_is_absolute(filename)) {
275 pstrcpy(dest, dest_size, filename);
276 } else {
277 p = strchr(base_path, ':');
278 if (p)
279 p++;
280 else
281 p = base_path;
282 p1 = strrchr(base_path, '/');
283 #ifdef _WIN32
285 const char *p2;
286 p2 = strrchr(base_path, '\\');
287 if (!p1 || p2 > p1)
288 p1 = p2;
290 #endif
291 if (p1)
292 p1++;
293 else
294 p1 = base_path;
295 if (p1 > p)
296 p = p1;
297 len = p - base_path;
298 if (len > dest_size - 1)
299 len = dest_size - 1;
300 memcpy(dest, base_path, len);
301 dest[len] = '\0';
302 pstrcat(dest, dest_size, filename);
306 void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz)
308 if (bs->backing_file[0] == '\0' || path_has_protocol(bs->backing_file)) {
309 pstrcpy(dest, sz, bs->backing_file);
310 } else {
311 path_combine(dest, sz, bs->filename, bs->backing_file);
315 void bdrv_register(BlockDriver *bdrv)
317 /* Block drivers without coroutine functions need emulation */
318 if (!bdrv->bdrv_co_readv) {
319 bdrv->bdrv_co_readv = bdrv_co_readv_em;
320 bdrv->bdrv_co_writev = bdrv_co_writev_em;
322 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
323 * the block driver lacks aio we need to emulate that too.
325 if (!bdrv->bdrv_aio_readv) {
326 /* add AIO emulation layer */
327 bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
328 bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
332 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
335 /* create a new block device (by default it is empty) */
336 BlockDriverState *bdrv_new(const char *device_name, Error **errp)
338 BlockDriverState *bs;
339 int i;
341 if (bdrv_find(device_name)) {
342 error_setg(errp, "Device with id '%s' already exists",
343 device_name);
344 return NULL;
346 if (bdrv_find_node(device_name)) {
347 error_setg(errp, "Device with node-name '%s' already exists",
348 device_name);
349 return NULL;
352 bs = g_malloc0(sizeof(BlockDriverState));
353 QLIST_INIT(&bs->dirty_bitmaps);
354 pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);
355 if (device_name[0] != '\0') {
356 QTAILQ_INSERT_TAIL(&bdrv_states, bs, device_list);
358 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
359 QLIST_INIT(&bs->op_blockers[i]);
361 bdrv_iostatus_disable(bs);
362 notifier_list_init(&bs->close_notifiers);
363 notifier_with_return_list_init(&bs->before_write_notifiers);
364 qemu_co_queue_init(&bs->throttled_reqs[0]);
365 qemu_co_queue_init(&bs->throttled_reqs[1]);
366 bs->refcnt = 1;
367 bs->aio_context = qemu_get_aio_context();
369 return bs;
372 void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify)
374 notifier_list_add(&bs->close_notifiers, notify);
377 BlockDriver *bdrv_find_format(const char *format_name)
379 BlockDriver *drv1;
380 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
381 if (!strcmp(drv1->format_name, format_name)) {
382 return drv1;
385 return NULL;
388 static int bdrv_is_whitelisted(BlockDriver *drv, bool read_only)
390 static const char *whitelist_rw[] = {
391 CONFIG_BDRV_RW_WHITELIST
393 static const char *whitelist_ro[] = {
394 CONFIG_BDRV_RO_WHITELIST
396 const char **p;
398 if (!whitelist_rw[0] && !whitelist_ro[0]) {
399 return 1; /* no whitelist, anything goes */
402 for (p = whitelist_rw; *p; p++) {
403 if (!strcmp(drv->format_name, *p)) {
404 return 1;
407 if (read_only) {
408 for (p = whitelist_ro; *p; p++) {
409 if (!strcmp(drv->format_name, *p)) {
410 return 1;
414 return 0;
417 BlockDriver *bdrv_find_whitelisted_format(const char *format_name,
418 bool read_only)
420 BlockDriver *drv = bdrv_find_format(format_name);
421 return drv && bdrv_is_whitelisted(drv, read_only) ? drv : NULL;
424 typedef struct CreateCo {
425 BlockDriver *drv;
426 char *filename;
427 QemuOpts *opts;
428 int ret;
429 Error *err;
430 } CreateCo;
432 static void coroutine_fn bdrv_create_co_entry(void *opaque)
434 Error *local_err = NULL;
435 int ret;
437 CreateCo *cco = opaque;
438 assert(cco->drv);
440 ret = cco->drv->bdrv_create(cco->filename, cco->opts, &local_err);
441 if (local_err) {
442 error_propagate(&cco->err, local_err);
444 cco->ret = ret;
447 int bdrv_create(BlockDriver *drv, const char* filename,
448 QemuOpts *opts, Error **errp)
450 int ret;
452 Coroutine *co;
453 CreateCo cco = {
454 .drv = drv,
455 .filename = g_strdup(filename),
456 .opts = opts,
457 .ret = NOT_DONE,
458 .err = NULL,
461 if (!drv->bdrv_create) {
462 error_setg(errp, "Driver '%s' does not support image creation", drv->format_name);
463 ret = -ENOTSUP;
464 goto out;
467 if (qemu_in_coroutine()) {
468 /* Fast-path if already in coroutine context */
469 bdrv_create_co_entry(&cco);
470 } else {
471 co = qemu_coroutine_create(bdrv_create_co_entry);
472 qemu_coroutine_enter(co, &cco);
473 while (cco.ret == NOT_DONE) {
474 qemu_aio_wait();
478 ret = cco.ret;
479 if (ret < 0) {
480 if (cco.err) {
481 error_propagate(errp, cco.err);
482 } else {
483 error_setg_errno(errp, -ret, "Could not create image");
487 out:
488 g_free(cco.filename);
489 return ret;
492 int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp)
494 BlockDriver *drv;
495 Error *local_err = NULL;
496 int ret;
498 drv = bdrv_find_protocol(filename, true);
499 if (drv == NULL) {
500 error_setg(errp, "Could not find protocol for file '%s'", filename);
501 return -ENOENT;
504 ret = bdrv_create(drv, filename, opts, &local_err);
505 if (local_err) {
506 error_propagate(errp, local_err);
508 return ret;
511 int bdrv_refresh_limits(BlockDriverState *bs)
513 BlockDriver *drv = bs->drv;
515 memset(&bs->bl, 0, sizeof(bs->bl));
517 if (!drv) {
518 return 0;
521 /* Take some limits from the children as a default */
522 if (bs->file) {
523 bdrv_refresh_limits(bs->file);
524 bs->bl.opt_transfer_length = bs->file->bl.opt_transfer_length;
525 bs->bl.opt_mem_alignment = bs->file->bl.opt_mem_alignment;
526 } else {
527 bs->bl.opt_mem_alignment = 512;
530 if (bs->backing_hd) {
531 bdrv_refresh_limits(bs->backing_hd);
532 bs->bl.opt_transfer_length =
533 MAX(bs->bl.opt_transfer_length,
534 bs->backing_hd->bl.opt_transfer_length);
535 bs->bl.opt_mem_alignment =
536 MAX(bs->bl.opt_mem_alignment,
537 bs->backing_hd->bl.opt_mem_alignment);
540 /* Then let the driver override it */
541 if (drv->bdrv_refresh_limits) {
542 return drv->bdrv_refresh_limits(bs);
545 return 0;
549 * Create a uniquely-named empty temporary file.
550 * Return 0 upon success, otherwise a negative errno value.
552 int get_tmp_filename(char *filename, int size)
554 #ifdef _WIN32
555 char temp_dir[MAX_PATH];
556 /* GetTempFileName requires that its output buffer (4th param)
557 have length MAX_PATH or greater. */
558 assert(size >= MAX_PATH);
559 return (GetTempPath(MAX_PATH, temp_dir)
560 && GetTempFileName(temp_dir, "qem", 0, filename)
561 ? 0 : -GetLastError());
562 #else
563 int fd;
564 const char *tmpdir;
565 tmpdir = getenv("TMPDIR");
566 if (!tmpdir) {
567 tmpdir = "/var/tmp";
569 if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) {
570 return -EOVERFLOW;
572 fd = mkstemp(filename);
573 if (fd < 0) {
574 return -errno;
576 if (close(fd) != 0) {
577 unlink(filename);
578 return -errno;
580 return 0;
581 #endif
585 * Detect host devices. By convention, /dev/cdrom[N] is always
586 * recognized as a host CDROM.
588 static BlockDriver *find_hdev_driver(const char *filename)
590 int score_max = 0, score;
591 BlockDriver *drv = NULL, *d;
593 QLIST_FOREACH(d, &bdrv_drivers, list) {
594 if (d->bdrv_probe_device) {
595 score = d->bdrv_probe_device(filename);
596 if (score > score_max) {
597 score_max = score;
598 drv = d;
603 return drv;
606 BlockDriver *bdrv_find_protocol(const char *filename,
607 bool allow_protocol_prefix)
609 BlockDriver *drv1;
610 char protocol[128];
611 int len;
612 const char *p;
614 /* TODO Drivers without bdrv_file_open must be specified explicitly */
617 * XXX(hch): we really should not let host device detection
618 * override an explicit protocol specification, but moving this
619 * later breaks access to device names with colons in them.
620 * Thanks to the brain-dead persistent naming schemes on udev-
621 * based Linux systems those actually are quite common.
623 drv1 = find_hdev_driver(filename);
624 if (drv1) {
625 return drv1;
628 if (!path_has_protocol(filename) || !allow_protocol_prefix) {
629 return bdrv_find_format("file");
632 p = strchr(filename, ':');
633 assert(p != NULL);
634 len = p - filename;
635 if (len > sizeof(protocol) - 1)
636 len = sizeof(protocol) - 1;
637 memcpy(protocol, filename, len);
638 protocol[len] = '\0';
639 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
640 if (drv1->protocol_name &&
641 !strcmp(drv1->protocol_name, protocol)) {
642 return drv1;
645 return NULL;
648 static int find_image_format(BlockDriverState *bs, const char *filename,
649 BlockDriver **pdrv, Error **errp)
651 int score, score_max;
652 BlockDriver *drv1, *drv;
653 uint8_t buf[2048];
654 int ret = 0;
656 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
657 if (bs->sg || !bdrv_is_inserted(bs) || bdrv_getlength(bs) == 0) {
658 drv = bdrv_find_format("raw");
659 if (!drv) {
660 error_setg(errp, "Could not find raw image format");
661 ret = -ENOENT;
663 *pdrv = drv;
664 return ret;
667 ret = bdrv_pread(bs, 0, buf, sizeof(buf));
668 if (ret < 0) {
669 error_setg_errno(errp, -ret, "Could not read image for determining its "
670 "format");
671 *pdrv = NULL;
672 return ret;
675 score_max = 0;
676 drv = NULL;
677 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
678 if (drv1->bdrv_probe) {
679 score = drv1->bdrv_probe(buf, ret, filename);
680 if (score > score_max) {
681 score_max = score;
682 drv = drv1;
686 if (!drv) {
687 error_setg(errp, "Could not determine image format: No compatible "
688 "driver found");
689 ret = -ENOENT;
691 *pdrv = drv;
692 return ret;
696 * Set the current 'total_sectors' value
698 static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
700 BlockDriver *drv = bs->drv;
702 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
703 if (bs->sg)
704 return 0;
706 /* query actual device if possible, otherwise just trust the hint */
707 if (drv->bdrv_getlength) {
708 int64_t length = drv->bdrv_getlength(bs);
709 if (length < 0) {
710 return length;
712 hint = DIV_ROUND_UP(length, BDRV_SECTOR_SIZE);
715 bs->total_sectors = hint;
716 return 0;
720 * Set open flags for a given discard mode
722 * Return 0 on success, -1 if the discard mode was invalid.
724 int bdrv_parse_discard_flags(const char *mode, int *flags)
726 *flags &= ~BDRV_O_UNMAP;
728 if (!strcmp(mode, "off") || !strcmp(mode, "ignore")) {
729 /* do nothing */
730 } else if (!strcmp(mode, "on") || !strcmp(mode, "unmap")) {
731 *flags |= BDRV_O_UNMAP;
732 } else {
733 return -1;
736 return 0;
740 * Set open flags for a given cache mode
742 * Return 0 on success, -1 if the cache mode was invalid.
744 int bdrv_parse_cache_flags(const char *mode, int *flags)
746 *flags &= ~BDRV_O_CACHE_MASK;
748 if (!strcmp(mode, "off") || !strcmp(mode, "none")) {
749 *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB;
750 } else if (!strcmp(mode, "directsync")) {
751 *flags |= BDRV_O_NOCACHE;
752 } else if (!strcmp(mode, "writeback")) {
753 *flags |= BDRV_O_CACHE_WB;
754 } else if (!strcmp(mode, "unsafe")) {
755 *flags |= BDRV_O_CACHE_WB;
756 *flags |= BDRV_O_NO_FLUSH;
757 } else if (!strcmp(mode, "writethrough")) {
758 /* this is the default */
759 } else {
760 return -1;
763 return 0;
767 * The copy-on-read flag is actually a reference count so multiple users may
768 * use the feature without worrying about clobbering its previous state.
769 * Copy-on-read stays enabled until all users have called to disable it.
771 void bdrv_enable_copy_on_read(BlockDriverState *bs)
773 bs->copy_on_read++;
776 void bdrv_disable_copy_on_read(BlockDriverState *bs)
778 assert(bs->copy_on_read > 0);
779 bs->copy_on_read--;
783 * Returns the flags that a temporary snapshot should get, based on the
784 * originally requested flags (the originally requested image will have flags
785 * like a backing file)
787 static int bdrv_temp_snapshot_flags(int flags)
789 return (flags & ~BDRV_O_SNAPSHOT) | BDRV_O_TEMPORARY;
793 * Returns the flags that bs->file should get, based on the given flags for
794 * the parent BDS
796 static int bdrv_inherited_flags(int flags)
798 /* Enable protocol handling, disable format probing for bs->file */
799 flags |= BDRV_O_PROTOCOL;
801 /* Our block drivers take care to send flushes and respect unmap policy,
802 * so we can enable both unconditionally on lower layers. */
803 flags |= BDRV_O_CACHE_WB | BDRV_O_UNMAP;
805 /* Clear flags that only apply to the top layer */
806 flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_COPY_ON_READ);
808 return flags;
812 * Returns the flags that bs->backing_hd should get, based on the given flags
813 * for the parent BDS
815 static int bdrv_backing_flags(int flags)
817 /* backing files always opened read-only */
818 flags &= ~(BDRV_O_RDWR | BDRV_O_COPY_ON_READ);
820 /* snapshot=on is handled on the top layer */
821 flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_TEMPORARY);
823 return flags;
826 static int bdrv_open_flags(BlockDriverState *bs, int flags)
828 int open_flags = flags | BDRV_O_CACHE_WB;
831 * Clear flags that are internal to the block layer before opening the
832 * image.
834 open_flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
837 * Snapshots should be writable.
839 if (flags & BDRV_O_TEMPORARY) {
840 open_flags |= BDRV_O_RDWR;
843 return open_flags;
846 static void bdrv_assign_node_name(BlockDriverState *bs,
847 const char *node_name,
848 Error **errp)
850 if (!node_name) {
851 return;
854 /* empty string node name is invalid */
855 if (node_name[0] == '\0') {
856 error_setg(errp, "Empty node name");
857 return;
860 /* takes care of avoiding namespaces collisions */
861 if (bdrv_find(node_name)) {
862 error_setg(errp, "node-name=%s is conflicting with a device id",
863 node_name);
864 return;
867 /* takes care of avoiding duplicates node names */
868 if (bdrv_find_node(node_name)) {
869 error_setg(errp, "Duplicate node name");
870 return;
873 /* copy node name into the bs and insert it into the graph list */
874 pstrcpy(bs->node_name, sizeof(bs->node_name), node_name);
875 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs, node_list);
879 * Common part for opening disk images and files
881 * Removes all processed options from *options.
883 static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file,
884 QDict *options, int flags, BlockDriver *drv, Error **errp)
886 int ret, open_flags;
887 const char *filename;
888 const char *node_name = NULL;
889 Error *local_err = NULL;
891 assert(drv != NULL);
892 assert(bs->file == NULL);
893 assert(options != NULL && bs->options != options);
895 if (file != NULL) {
896 filename = file->filename;
897 } else {
898 filename = qdict_get_try_str(options, "filename");
901 if (drv->bdrv_needs_filename && !filename) {
902 error_setg(errp, "The '%s' block driver requires a file name",
903 drv->format_name);
904 return -EINVAL;
907 trace_bdrv_open_common(bs, filename ?: "", flags, drv->format_name);
909 node_name = qdict_get_try_str(options, "node-name");
910 bdrv_assign_node_name(bs, node_name, &local_err);
911 if (local_err) {
912 error_propagate(errp, local_err);
913 return -EINVAL;
915 qdict_del(options, "node-name");
917 /* bdrv_open() with directly using a protocol as drv. This layer is already
918 * opened, so assign it to bs (while file becomes a closed BlockDriverState)
919 * and return immediately. */
920 if (file != NULL && drv->bdrv_file_open) {
921 bdrv_swap(file, bs);
922 return 0;
925 bs->open_flags = flags;
926 bs->guest_block_size = 512;
927 bs->request_alignment = 512;
928 bs->zero_beyond_eof = true;
929 open_flags = bdrv_open_flags(bs, flags);
930 bs->read_only = !(open_flags & BDRV_O_RDWR);
932 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, bs->read_only)) {
933 error_setg(errp,
934 !bs->read_only && bdrv_is_whitelisted(drv, true)
935 ? "Driver '%s' can only be used for read-only devices"
936 : "Driver '%s' is not whitelisted",
937 drv->format_name);
938 return -ENOTSUP;
941 assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */
942 if (flags & BDRV_O_COPY_ON_READ) {
943 if (!bs->read_only) {
944 bdrv_enable_copy_on_read(bs);
945 } else {
946 error_setg(errp, "Can't use copy-on-read on read-only device");
947 return -EINVAL;
951 if (filename != NULL) {
952 pstrcpy(bs->filename, sizeof(bs->filename), filename);
953 } else {
954 bs->filename[0] = '\0';
957 bs->drv = drv;
958 bs->opaque = g_malloc0(drv->instance_size);
960 bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB);
962 /* Open the image, either directly or using a protocol */
963 if (drv->bdrv_file_open) {
964 assert(file == NULL);
965 assert(!drv->bdrv_needs_filename || filename != NULL);
966 ret = drv->bdrv_file_open(bs, options, open_flags, &local_err);
967 } else {
968 if (file == NULL) {
969 error_setg(errp, "Can't use '%s' as a block driver for the "
970 "protocol level", drv->format_name);
971 ret = -EINVAL;
972 goto free_and_fail;
974 bs->file = file;
975 ret = drv->bdrv_open(bs, options, open_flags, &local_err);
978 if (ret < 0) {
979 if (local_err) {
980 error_propagate(errp, local_err);
981 } else if (bs->filename[0]) {
982 error_setg_errno(errp, -ret, "Could not open '%s'", bs->filename);
983 } else {
984 error_setg_errno(errp, -ret, "Could not open image");
986 goto free_and_fail;
989 ret = refresh_total_sectors(bs, bs->total_sectors);
990 if (ret < 0) {
991 error_setg_errno(errp, -ret, "Could not refresh total sector count");
992 goto free_and_fail;
995 bdrv_refresh_limits(bs);
996 assert(bdrv_opt_mem_align(bs) != 0);
997 assert((bs->request_alignment != 0) || bs->sg);
998 return 0;
1000 free_and_fail:
1001 bs->file = NULL;
1002 g_free(bs->opaque);
1003 bs->opaque = NULL;
1004 bs->drv = NULL;
1005 return ret;
1009 * Opens a file using a protocol (file, host_device, nbd, ...)
1011 * options is an indirect pointer to a QDict of options to pass to the block
1012 * drivers, or pointer to NULL for an empty set of options. If this function
1013 * takes ownership of the QDict reference, it will set *options to NULL;
1014 * otherwise, it will contain unused/unrecognized options after this function
1015 * returns. Then, the caller is responsible for freeing it. If it intends to
1016 * reuse the QDict, QINCREF() should be called beforehand.
1018 static int bdrv_file_open(BlockDriverState *bs, const char *filename,
1019 QDict **options, int flags, Error **errp)
1021 BlockDriver *drv;
1022 const char *drvname;
1023 bool parse_filename = false;
1024 Error *local_err = NULL;
1025 int ret;
1027 /* Fetch the file name from the options QDict if necessary */
1028 if (!filename) {
1029 filename = qdict_get_try_str(*options, "filename");
1030 } else if (filename && !qdict_haskey(*options, "filename")) {
1031 qdict_put(*options, "filename", qstring_from_str(filename));
1032 parse_filename = true;
1033 } else {
1034 error_setg(errp, "Can't specify 'file' and 'filename' options at the "
1035 "same time");
1036 ret = -EINVAL;
1037 goto fail;
1040 /* Find the right block driver */
1041 drvname = qdict_get_try_str(*options, "driver");
1042 if (drvname) {
1043 drv = bdrv_find_format(drvname);
1044 if (!drv) {
1045 error_setg(errp, "Unknown driver '%s'", drvname);
1047 qdict_del(*options, "driver");
1048 } else if (filename) {
1049 drv = bdrv_find_protocol(filename, parse_filename);
1050 if (!drv) {
1051 error_setg(errp, "Unknown protocol");
1053 } else {
1054 error_setg(errp, "Must specify either driver or file");
1055 drv = NULL;
1058 if (!drv) {
1059 /* errp has been set already */
1060 ret = -ENOENT;
1061 goto fail;
1064 /* Parse the filename and open it */
1065 if (drv->bdrv_parse_filename && parse_filename) {
1066 drv->bdrv_parse_filename(filename, *options, &local_err);
1067 if (local_err) {
1068 error_propagate(errp, local_err);
1069 ret = -EINVAL;
1070 goto fail;
1073 if (!drv->bdrv_needs_filename) {
1074 qdict_del(*options, "filename");
1075 } else {
1076 filename = qdict_get_str(*options, "filename");
1080 if (!drv->bdrv_file_open) {
1081 ret = bdrv_open(&bs, filename, NULL, *options, flags, drv, &local_err);
1082 *options = NULL;
1083 } else {
1084 ret = bdrv_open_common(bs, NULL, *options, flags, drv, &local_err);
1086 if (ret < 0) {
1087 error_propagate(errp, local_err);
1088 goto fail;
1091 bs->growable = 1;
1092 return 0;
1094 fail:
1095 return ret;
1098 void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd)
1101 if (bs->backing_hd) {
1102 assert(bs->backing_blocker);
1103 bdrv_op_unblock_all(bs->backing_hd, bs->backing_blocker);
1104 } else if (backing_hd) {
1105 error_setg(&bs->backing_blocker,
1106 "device is used as backing hd of '%s'",
1107 bs->device_name);
1110 bs->backing_hd = backing_hd;
1111 if (!backing_hd) {
1112 error_free(bs->backing_blocker);
1113 bs->backing_blocker = NULL;
1114 goto out;
1116 bs->open_flags &= ~BDRV_O_NO_BACKING;
1117 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_hd->filename);
1118 pstrcpy(bs->backing_format, sizeof(bs->backing_format),
1119 backing_hd->drv ? backing_hd->drv->format_name : "");
1121 bdrv_op_block_all(bs->backing_hd, bs->backing_blocker);
1122 /* Otherwise we won't be able to commit due to check in bdrv_commit */
1123 bdrv_op_unblock(bs->backing_hd, BLOCK_OP_TYPE_COMMIT,
1124 bs->backing_blocker);
1125 out:
1126 bdrv_refresh_limits(bs);
1130 * Opens the backing file for a BlockDriverState if not yet open
1132 * options is a QDict of options to pass to the block drivers, or NULL for an
1133 * empty set of options. The reference to the QDict is transferred to this
1134 * function (even on failure), so if the caller intends to reuse the dictionary,
1135 * it needs to use QINCREF() before calling bdrv_file_open.
1137 int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp)
1139 char *backing_filename = g_malloc0(PATH_MAX);
1140 int ret = 0;
1141 BlockDriver *back_drv = NULL;
1142 BlockDriverState *backing_hd;
1143 Error *local_err = NULL;
1145 if (bs->backing_hd != NULL) {
1146 QDECREF(options);
1147 goto free_exit;
1150 /* NULL means an empty set of options */
1151 if (options == NULL) {
1152 options = qdict_new();
1155 bs->open_flags &= ~BDRV_O_NO_BACKING;
1156 if (qdict_haskey(options, "file.filename")) {
1157 backing_filename[0] = '\0';
1158 } else if (bs->backing_file[0] == '\0' && qdict_size(options) == 0) {
1159 QDECREF(options);
1160 goto free_exit;
1161 } else {
1162 bdrv_get_full_backing_filename(bs, backing_filename, PATH_MAX);
1165 backing_hd = bdrv_new("", errp);
1167 if (bs->backing_format[0] != '\0') {
1168 back_drv = bdrv_find_format(bs->backing_format);
1171 assert(bs->backing_hd == NULL);
1172 ret = bdrv_open(&backing_hd,
1173 *backing_filename ? backing_filename : NULL, NULL, options,
1174 bdrv_backing_flags(bs->open_flags), back_drv, &local_err);
1175 if (ret < 0) {
1176 bdrv_unref(backing_hd);
1177 backing_hd = NULL;
1178 bs->open_flags |= BDRV_O_NO_BACKING;
1179 error_setg(errp, "Could not open backing file: %s",
1180 error_get_pretty(local_err));
1181 error_free(local_err);
1182 goto free_exit;
1184 bdrv_set_backing_hd(bs, backing_hd);
1186 free_exit:
1187 g_free(backing_filename);
1188 return ret;
1192 * Opens a disk image whose options are given as BlockdevRef in another block
1193 * device's options.
1195 * If allow_none is true, no image will be opened if filename is false and no
1196 * BlockdevRef is given. *pbs will remain unchanged and 0 will be returned.
1198 * bdrev_key specifies the key for the image's BlockdevRef in the options QDict.
1199 * That QDict has to be flattened; therefore, if the BlockdevRef is a QDict
1200 * itself, all options starting with "${bdref_key}." are considered part of the
1201 * BlockdevRef.
1203 * The BlockdevRef will be removed from the options QDict.
1205 * To conform with the behavior of bdrv_open(), *pbs has to be NULL.
1207 int bdrv_open_image(BlockDriverState **pbs, const char *filename,
1208 QDict *options, const char *bdref_key, int flags,
1209 bool allow_none, Error **errp)
1211 QDict *image_options;
1212 int ret;
1213 char *bdref_key_dot;
1214 const char *reference;
1216 assert(pbs);
1217 assert(*pbs == NULL);
1219 bdref_key_dot = g_strdup_printf("%s.", bdref_key);
1220 qdict_extract_subqdict(options, &image_options, bdref_key_dot);
1221 g_free(bdref_key_dot);
1223 reference = qdict_get_try_str(options, bdref_key);
1224 if (!filename && !reference && !qdict_size(image_options)) {
1225 if (allow_none) {
1226 ret = 0;
1227 } else {
1228 error_setg(errp, "A block device must be specified for \"%s\"",
1229 bdref_key);
1230 ret = -EINVAL;
1232 QDECREF(image_options);
1233 goto done;
1236 ret = bdrv_open(pbs, filename, reference, image_options, flags, NULL, errp);
1238 done:
1239 qdict_del(options, bdref_key);
1240 return ret;
1243 void bdrv_append_temp_snapshot(BlockDriverState *bs, int flags, Error **errp)
1245 /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
1246 char *tmp_filename = g_malloc0(PATH_MAX + 1);
1247 int64_t total_size;
1248 BlockDriver *bdrv_qcow2;
1249 QemuOpts *opts = NULL;
1250 QDict *snapshot_options;
1251 BlockDriverState *bs_snapshot;
1252 Error *local_err;
1253 int ret;
1255 /* if snapshot, we create a temporary backing file and open it
1256 instead of opening 'filename' directly */
1258 /* Get the required size from the image */
1259 total_size = bdrv_getlength(bs);
1260 if (total_size < 0) {
1261 error_setg_errno(errp, -total_size, "Could not get image size");
1262 goto out;
1264 total_size &= BDRV_SECTOR_MASK;
1266 /* Create the temporary image */
1267 ret = get_tmp_filename(tmp_filename, PATH_MAX + 1);
1268 if (ret < 0) {
1269 error_setg_errno(errp, -ret, "Could not get temporary filename");
1270 goto out;
1273 bdrv_qcow2 = bdrv_find_format("qcow2");
1274 opts = qemu_opts_create(bdrv_qcow2->create_opts, NULL, 0,
1275 &error_abort);
1276 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, total_size);
1277 ret = bdrv_create(bdrv_qcow2, tmp_filename, opts, &local_err);
1278 qemu_opts_del(opts);
1279 if (ret < 0) {
1280 error_setg_errno(errp, -ret, "Could not create temporary overlay "
1281 "'%s': %s", tmp_filename,
1282 error_get_pretty(local_err));
1283 error_free(local_err);
1284 goto out;
1287 /* Prepare a new options QDict for the temporary file */
1288 snapshot_options = qdict_new();
1289 qdict_put(snapshot_options, "file.driver",
1290 qstring_from_str("file"));
1291 qdict_put(snapshot_options, "file.filename",
1292 qstring_from_str(tmp_filename));
1294 bs_snapshot = bdrv_new("", &error_abort);
1296 ret = bdrv_open(&bs_snapshot, NULL, NULL, snapshot_options,
1297 flags, bdrv_qcow2, &local_err);
1298 if (ret < 0) {
1299 error_propagate(errp, local_err);
1300 goto out;
1303 bdrv_append(bs_snapshot, bs);
1305 out:
1306 g_free(tmp_filename);
1309 static QDict *parse_json_filename(const char *filename, Error **errp)
1311 QObject *options_obj;
1312 QDict *options;
1313 int ret;
1315 ret = strstart(filename, "json:", &filename);
1316 assert(ret);
1318 options_obj = qobject_from_json(filename);
1319 if (!options_obj) {
1320 error_setg(errp, "Could not parse the JSON options");
1321 return NULL;
1324 if (qobject_type(options_obj) != QTYPE_QDICT) {
1325 qobject_decref(options_obj);
1326 error_setg(errp, "Invalid JSON object given");
1327 return NULL;
1330 options = qobject_to_qdict(options_obj);
1331 qdict_flatten(options);
1333 return options;
1337 * Opens a disk image (raw, qcow2, vmdk, ...)
1339 * options is a QDict of options to pass to the block drivers, or NULL for an
1340 * empty set of options. The reference to the QDict belongs to the block layer
1341 * after the call (even on failure), so if the caller intends to reuse the
1342 * dictionary, it needs to use QINCREF() before calling bdrv_open.
1344 * If *pbs is NULL, a new BDS will be created with a pointer to it stored there.
1345 * If it is not NULL, the referenced BDS will be reused.
1347 * The reference parameter may be used to specify an existing block device which
1348 * should be opened. If specified, neither options nor a filename may be given,
1349 * nor can an existing BDS be reused (that is, *pbs has to be NULL).
1351 int bdrv_open(BlockDriverState **pbs, const char *filename,
1352 const char *reference, QDict *options, int flags,
1353 BlockDriver *drv, Error **errp)
1355 int ret;
1356 BlockDriverState *file = NULL, *bs;
1357 const char *drvname;
1358 Error *local_err = NULL;
1359 int snapshot_flags = 0;
1361 assert(pbs);
1363 if (reference) {
1364 bool options_non_empty = options ? qdict_size(options) : false;
1365 QDECREF(options);
1367 if (*pbs) {
1368 error_setg(errp, "Cannot reuse an existing BDS when referencing "
1369 "another block device");
1370 return -EINVAL;
1373 if (filename || options_non_empty) {
1374 error_setg(errp, "Cannot reference an existing block device with "
1375 "additional options or a new filename");
1376 return -EINVAL;
1379 bs = bdrv_lookup_bs(reference, reference, errp);
1380 if (!bs) {
1381 return -ENODEV;
1383 bdrv_ref(bs);
1384 *pbs = bs;
1385 return 0;
1388 if (*pbs) {
1389 bs = *pbs;
1390 } else {
1391 bs = bdrv_new("", &error_abort);
1394 /* NULL means an empty set of options */
1395 if (options == NULL) {
1396 options = qdict_new();
1399 if (filename && g_str_has_prefix(filename, "json:")) {
1400 QDict *json_options = parse_json_filename(filename, &local_err);
1401 if (local_err) {
1402 ret = -EINVAL;
1403 goto fail;
1406 /* Options given in the filename have lower priority than options
1407 * specified directly */
1408 qdict_join(options, json_options, false);
1409 QDECREF(json_options);
1410 filename = NULL;
1413 bs->options = options;
1414 options = qdict_clone_shallow(options);
1416 if (flags & BDRV_O_PROTOCOL) {
1417 assert(!drv);
1418 ret = bdrv_file_open(bs, filename, &options, flags & ~BDRV_O_PROTOCOL,
1419 &local_err);
1420 if (!ret) {
1421 drv = bs->drv;
1422 goto done;
1423 } else if (bs->drv) {
1424 goto close_and_fail;
1425 } else {
1426 goto fail;
1430 /* Open image file without format layer */
1431 if (flags & BDRV_O_RDWR) {
1432 flags |= BDRV_O_ALLOW_RDWR;
1434 if (flags & BDRV_O_SNAPSHOT) {
1435 snapshot_flags = bdrv_temp_snapshot_flags(flags);
1436 flags = bdrv_backing_flags(flags);
1439 assert(file == NULL);
1440 ret = bdrv_open_image(&file, filename, options, "file",
1441 bdrv_inherited_flags(flags),
1442 true, &local_err);
1443 if (ret < 0) {
1444 goto fail;
1447 /* Find the right image format driver */
1448 drvname = qdict_get_try_str(options, "driver");
1449 if (drvname) {
1450 drv = bdrv_find_format(drvname);
1451 qdict_del(options, "driver");
1452 if (!drv) {
1453 error_setg(errp, "Invalid driver: '%s'", drvname);
1454 ret = -EINVAL;
1455 goto fail;
1459 if (!drv) {
1460 if (file) {
1461 ret = find_image_format(file, filename, &drv, &local_err);
1462 } else {
1463 error_setg(errp, "Must specify either driver or file");
1464 ret = -EINVAL;
1465 goto fail;
1469 if (!drv) {
1470 goto fail;
1473 /* Open the image */
1474 ret = bdrv_open_common(bs, file, options, flags, drv, &local_err);
1475 if (ret < 0) {
1476 goto fail;
1479 if (file && (bs->file != file)) {
1480 bdrv_unref(file);
1481 file = NULL;
1484 /* If there is a backing file, use it */
1485 if ((flags & BDRV_O_NO_BACKING) == 0) {
1486 QDict *backing_options;
1488 qdict_extract_subqdict(options, &backing_options, "backing.");
1489 ret = bdrv_open_backing_file(bs, backing_options, &local_err);
1490 if (ret < 0) {
1491 goto close_and_fail;
1495 /* For snapshot=on, create a temporary qcow2 overlay. bs points to the
1496 * temporary snapshot afterwards. */
1497 if (snapshot_flags) {
1498 bdrv_append_temp_snapshot(bs, snapshot_flags, &local_err);
1499 if (local_err) {
1500 error_propagate(errp, local_err);
1501 goto close_and_fail;
1506 done:
1507 /* Check if any unknown options were used */
1508 if (options && (qdict_size(options) != 0)) {
1509 const QDictEntry *entry = qdict_first(options);
1510 if (flags & BDRV_O_PROTOCOL) {
1511 error_setg(errp, "Block protocol '%s' doesn't support the option "
1512 "'%s'", drv->format_name, entry->key);
1513 } else {
1514 error_setg(errp, "Block format '%s' used by device '%s' doesn't "
1515 "support the option '%s'", drv->format_name,
1516 bs->device_name, entry->key);
1519 ret = -EINVAL;
1520 goto close_and_fail;
1523 if (!bdrv_key_required(bs)) {
1524 bdrv_dev_change_media_cb(bs, true);
1525 } else if (!runstate_check(RUN_STATE_PRELAUNCH)
1526 && !runstate_check(RUN_STATE_INMIGRATE)
1527 && !runstate_check(RUN_STATE_PAUSED)) { /* HACK */
1528 error_setg(errp,
1529 "Guest must be stopped for opening of encrypted image");
1530 ret = -EBUSY;
1531 goto close_and_fail;
1534 QDECREF(options);
1535 *pbs = bs;
1536 return 0;
1538 fail:
1539 if (file != NULL) {
1540 bdrv_unref(file);
1542 QDECREF(bs->options);
1543 QDECREF(options);
1544 bs->options = NULL;
1545 if (!*pbs) {
1546 /* If *pbs is NULL, a new BDS has been created in this function and
1547 needs to be freed now. Otherwise, it does not need to be closed,
1548 since it has not really been opened yet. */
1549 bdrv_unref(bs);
1551 if (local_err) {
1552 error_propagate(errp, local_err);
1554 return ret;
1556 close_and_fail:
1557 /* See fail path, but now the BDS has to be always closed */
1558 if (*pbs) {
1559 bdrv_close(bs);
1560 } else {
1561 bdrv_unref(bs);
1563 QDECREF(options);
1564 if (local_err) {
1565 error_propagate(errp, local_err);
1567 return ret;
1570 typedef struct BlockReopenQueueEntry {
1571 bool prepared;
1572 BDRVReopenState state;
1573 QSIMPLEQ_ENTRY(BlockReopenQueueEntry) entry;
1574 } BlockReopenQueueEntry;
1577 * Adds a BlockDriverState to a simple queue for an atomic, transactional
1578 * reopen of multiple devices.
1580 * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT
1581 * already performed, or alternatively may be NULL a new BlockReopenQueue will
1582 * be created and initialized. This newly created BlockReopenQueue should be
1583 * passed back in for subsequent calls that are intended to be of the same
1584 * atomic 'set'.
1586 * bs is the BlockDriverState to add to the reopen queue.
1588 * flags contains the open flags for the associated bs
1590 * returns a pointer to bs_queue, which is either the newly allocated
1591 * bs_queue, or the existing bs_queue being used.
1594 BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
1595 BlockDriverState *bs, int flags)
1597 assert(bs != NULL);
1599 BlockReopenQueueEntry *bs_entry;
1600 if (bs_queue == NULL) {
1601 bs_queue = g_new0(BlockReopenQueue, 1);
1602 QSIMPLEQ_INIT(bs_queue);
1605 /* bdrv_open() masks this flag out */
1606 flags &= ~BDRV_O_PROTOCOL;
1608 if (bs->file) {
1609 bdrv_reopen_queue(bs_queue, bs->file, bdrv_inherited_flags(flags));
1612 bs_entry = g_new0(BlockReopenQueueEntry, 1);
1613 QSIMPLEQ_INSERT_TAIL(bs_queue, bs_entry, entry);
1615 bs_entry->state.bs = bs;
1616 bs_entry->state.flags = flags;
1618 return bs_queue;
1622 * Reopen multiple BlockDriverStates atomically & transactionally.
1624 * The queue passed in (bs_queue) must have been built up previous
1625 * via bdrv_reopen_queue().
1627 * Reopens all BDS specified in the queue, with the appropriate
1628 * flags. All devices are prepared for reopen, and failure of any
1629 * device will cause all device changes to be abandonded, and intermediate
1630 * data cleaned up.
1632 * If all devices prepare successfully, then the changes are committed
1633 * to all devices.
1636 int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
1638 int ret = -1;
1639 BlockReopenQueueEntry *bs_entry, *next;
1640 Error *local_err = NULL;
1642 assert(bs_queue != NULL);
1644 bdrv_drain_all();
1646 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1647 if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) {
1648 error_propagate(errp, local_err);
1649 goto cleanup;
1651 bs_entry->prepared = true;
1654 /* If we reach this point, we have success and just need to apply the
1655 * changes
1657 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1658 bdrv_reopen_commit(&bs_entry->state);
1661 ret = 0;
1663 cleanup:
1664 QSIMPLEQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) {
1665 if (ret && bs_entry->prepared) {
1666 bdrv_reopen_abort(&bs_entry->state);
1668 g_free(bs_entry);
1670 g_free(bs_queue);
1671 return ret;
1675 /* Reopen a single BlockDriverState with the specified flags. */
1676 int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp)
1678 int ret = -1;
1679 Error *local_err = NULL;
1680 BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, bdrv_flags);
1682 ret = bdrv_reopen_multiple(queue, &local_err);
1683 if (local_err != NULL) {
1684 error_propagate(errp, local_err);
1686 return ret;
1691 * Prepares a BlockDriverState for reopen. All changes are staged in the
1692 * 'opaque' field of the BDRVReopenState, which is used and allocated by
1693 * the block driver layer .bdrv_reopen_prepare()
1695 * bs is the BlockDriverState to reopen
1696 * flags are the new open flags
1697 * queue is the reopen queue
1699 * Returns 0 on success, non-zero on error. On error errp will be set
1700 * as well.
1702 * On failure, bdrv_reopen_abort() will be called to clean up any data.
1703 * It is the responsibility of the caller to then call the abort() or
1704 * commit() for any other BDS that have been left in a prepare() state
1707 int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue,
1708 Error **errp)
1710 int ret = -1;
1711 Error *local_err = NULL;
1712 BlockDriver *drv;
1714 assert(reopen_state != NULL);
1715 assert(reopen_state->bs->drv != NULL);
1716 drv = reopen_state->bs->drv;
1718 /* if we are to stay read-only, do not allow permission change
1719 * to r/w */
1720 if (!(reopen_state->bs->open_flags & BDRV_O_ALLOW_RDWR) &&
1721 reopen_state->flags & BDRV_O_RDWR) {
1722 error_set(errp, QERR_DEVICE_IS_READ_ONLY,
1723 reopen_state->bs->device_name);
1724 goto error;
1728 ret = bdrv_flush(reopen_state->bs);
1729 if (ret) {
1730 error_set(errp, ERROR_CLASS_GENERIC_ERROR, "Error (%s) flushing drive",
1731 strerror(-ret));
1732 goto error;
1735 if (drv->bdrv_reopen_prepare) {
1736 ret = drv->bdrv_reopen_prepare(reopen_state, queue, &local_err);
1737 if (ret) {
1738 if (local_err != NULL) {
1739 error_propagate(errp, local_err);
1740 } else {
1741 error_setg(errp, "failed while preparing to reopen image '%s'",
1742 reopen_state->bs->filename);
1744 goto error;
1746 } else {
1747 /* It is currently mandatory to have a bdrv_reopen_prepare()
1748 * handler for each supported drv. */
1749 error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
1750 drv->format_name, reopen_state->bs->device_name,
1751 "reopening of file");
1752 ret = -1;
1753 goto error;
1756 ret = 0;
1758 error:
1759 return ret;
1763 * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and
1764 * makes them final by swapping the staging BlockDriverState contents into
1765 * the active BlockDriverState contents.
1767 void bdrv_reopen_commit(BDRVReopenState *reopen_state)
1769 BlockDriver *drv;
1771 assert(reopen_state != NULL);
1772 drv = reopen_state->bs->drv;
1773 assert(drv != NULL);
1775 /* If there are any driver level actions to take */
1776 if (drv->bdrv_reopen_commit) {
1777 drv->bdrv_reopen_commit(reopen_state);
1780 /* set BDS specific flags now */
1781 reopen_state->bs->open_flags = reopen_state->flags;
1782 reopen_state->bs->enable_write_cache = !!(reopen_state->flags &
1783 BDRV_O_CACHE_WB);
1784 reopen_state->bs->read_only = !(reopen_state->flags & BDRV_O_RDWR);
1786 bdrv_refresh_limits(reopen_state->bs);
1790 * Abort the reopen, and delete and free the staged changes in
1791 * reopen_state
1793 void bdrv_reopen_abort(BDRVReopenState *reopen_state)
1795 BlockDriver *drv;
1797 assert(reopen_state != NULL);
1798 drv = reopen_state->bs->drv;
1799 assert(drv != NULL);
1801 if (drv->bdrv_reopen_abort) {
1802 drv->bdrv_reopen_abort(reopen_state);
1807 void bdrv_close(BlockDriverState *bs)
1809 if (bs->job) {
1810 block_job_cancel_sync(bs->job);
1812 bdrv_drain_all(); /* complete I/O */
1813 bdrv_flush(bs);
1814 bdrv_drain_all(); /* in case flush left pending I/O */
1815 notifier_list_notify(&bs->close_notifiers, bs);
1817 if (bs->drv) {
1818 if (bs->backing_hd) {
1819 BlockDriverState *backing_hd = bs->backing_hd;
1820 bdrv_set_backing_hd(bs, NULL);
1821 bdrv_unref(backing_hd);
1823 bs->drv->bdrv_close(bs);
1824 g_free(bs->opaque);
1825 bs->opaque = NULL;
1826 bs->drv = NULL;
1827 bs->copy_on_read = 0;
1828 bs->backing_file[0] = '\0';
1829 bs->backing_format[0] = '\0';
1830 bs->total_sectors = 0;
1831 bs->encrypted = 0;
1832 bs->valid_key = 0;
1833 bs->sg = 0;
1834 bs->growable = 0;
1835 bs->zero_beyond_eof = false;
1836 QDECREF(bs->options);
1837 bs->options = NULL;
1839 if (bs->file != NULL) {
1840 bdrv_unref(bs->file);
1841 bs->file = NULL;
1845 bdrv_dev_change_media_cb(bs, false);
1847 /*throttling disk I/O limits*/
1848 if (bs->io_limits_enabled) {
1849 bdrv_io_limits_disable(bs);
1853 void bdrv_close_all(void)
1855 BlockDriverState *bs;
1857 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
1858 AioContext *aio_context = bdrv_get_aio_context(bs);
1860 aio_context_acquire(aio_context);
1861 bdrv_close(bs);
1862 aio_context_release(aio_context);
1866 /* Check if any requests are in-flight (including throttled requests) */
1867 static bool bdrv_requests_pending(BlockDriverState *bs)
1869 if (!QLIST_EMPTY(&bs->tracked_requests)) {
1870 return true;
1872 if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) {
1873 return true;
1875 if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) {
1876 return true;
1878 if (bs->file && bdrv_requests_pending(bs->file)) {
1879 return true;
1881 if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) {
1882 return true;
1884 return false;
1888 * Wait for pending requests to complete across all BlockDriverStates
1890 * This function does not flush data to disk, use bdrv_flush_all() for that
1891 * after calling this function.
1893 * Note that completion of an asynchronous I/O operation can trigger any
1894 * number of other I/O operations on other devices---for example a coroutine
1895 * can be arbitrarily complex and a constant flow of I/O can come until the
1896 * coroutine is complete. Because of this, it is not possible to have a
1897 * function to drain a single device's I/O queue.
1899 void bdrv_drain_all(void)
1901 /* Always run first iteration so any pending completion BHs run */
1902 bool busy = true;
1903 BlockDriverState *bs;
1905 while (busy) {
1906 busy = false;
1908 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
1909 AioContext *aio_context = bdrv_get_aio_context(bs);
1910 bool bs_busy;
1912 aio_context_acquire(aio_context);
1913 bdrv_start_throttled_reqs(bs);
1914 bs_busy = bdrv_requests_pending(bs);
1915 bs_busy |= aio_poll(aio_context, bs_busy);
1916 aio_context_release(aio_context);
1918 busy |= bs_busy;
1923 /* make a BlockDriverState anonymous by removing from bdrv_state and
1924 * graph_bdrv_state list.
1925 Also, NULL terminate the device_name to prevent double remove */
1926 void bdrv_make_anon(BlockDriverState *bs)
1928 if (bs->device_name[0] != '\0') {
1929 QTAILQ_REMOVE(&bdrv_states, bs, device_list);
1931 bs->device_name[0] = '\0';
1932 if (bs->node_name[0] != '\0') {
1933 QTAILQ_REMOVE(&graph_bdrv_states, bs, node_list);
1935 bs->node_name[0] = '\0';
1938 static void bdrv_rebind(BlockDriverState *bs)
1940 if (bs->drv && bs->drv->bdrv_rebind) {
1941 bs->drv->bdrv_rebind(bs);
1945 static void bdrv_move_feature_fields(BlockDriverState *bs_dest,
1946 BlockDriverState *bs_src)
1948 /* move some fields that need to stay attached to the device */
1950 /* dev info */
1951 bs_dest->dev_ops = bs_src->dev_ops;
1952 bs_dest->dev_opaque = bs_src->dev_opaque;
1953 bs_dest->dev = bs_src->dev;
1954 bs_dest->guest_block_size = bs_src->guest_block_size;
1955 bs_dest->copy_on_read = bs_src->copy_on_read;
1957 bs_dest->enable_write_cache = bs_src->enable_write_cache;
1959 /* i/o throttled req */
1960 memcpy(&bs_dest->throttle_state,
1961 &bs_src->throttle_state,
1962 sizeof(ThrottleState));
1963 bs_dest->throttled_reqs[0] = bs_src->throttled_reqs[0];
1964 bs_dest->throttled_reqs[1] = bs_src->throttled_reqs[1];
1965 bs_dest->io_limits_enabled = bs_src->io_limits_enabled;
1967 /* r/w error */
1968 bs_dest->on_read_error = bs_src->on_read_error;
1969 bs_dest->on_write_error = bs_src->on_write_error;
1971 /* i/o status */
1972 bs_dest->iostatus_enabled = bs_src->iostatus_enabled;
1973 bs_dest->iostatus = bs_src->iostatus;
1975 /* dirty bitmap */
1976 bs_dest->dirty_bitmaps = bs_src->dirty_bitmaps;
1978 /* reference count */
1979 bs_dest->refcnt = bs_src->refcnt;
1981 /* job */
1982 bs_dest->job = bs_src->job;
1984 /* keep the same entry in bdrv_states */
1985 pstrcpy(bs_dest->device_name, sizeof(bs_dest->device_name),
1986 bs_src->device_name);
1987 bs_dest->device_list = bs_src->device_list;
1988 memcpy(bs_dest->op_blockers, bs_src->op_blockers,
1989 sizeof(bs_dest->op_blockers));
1993 * Swap bs contents for two image chains while they are live,
1994 * while keeping required fields on the BlockDriverState that is
1995 * actually attached to a device.
1997 * This will modify the BlockDriverState fields, and swap contents
1998 * between bs_new and bs_old. Both bs_new and bs_old are modified.
2000 * bs_new is required to be anonymous.
2002 * This function does not create any image files.
2004 void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old)
2006 BlockDriverState tmp;
2008 /* The code needs to swap the node_name but simply swapping node_list won't
2009 * work so first remove the nodes from the graph list, do the swap then
2010 * insert them back if needed.
2012 if (bs_new->node_name[0] != '\0') {
2013 QTAILQ_REMOVE(&graph_bdrv_states, bs_new, node_list);
2015 if (bs_old->node_name[0] != '\0') {
2016 QTAILQ_REMOVE(&graph_bdrv_states, bs_old, node_list);
2019 /* bs_new must be anonymous and shouldn't have anything fancy enabled */
2020 assert(bs_new->device_name[0] == '\0');
2021 assert(QLIST_EMPTY(&bs_new->dirty_bitmaps));
2022 assert(bs_new->job == NULL);
2023 assert(bs_new->dev == NULL);
2024 assert(bs_new->io_limits_enabled == false);
2025 assert(!throttle_have_timer(&bs_new->throttle_state));
2027 tmp = *bs_new;
2028 *bs_new = *bs_old;
2029 *bs_old = tmp;
2031 /* there are some fields that should not be swapped, move them back */
2032 bdrv_move_feature_fields(&tmp, bs_old);
2033 bdrv_move_feature_fields(bs_old, bs_new);
2034 bdrv_move_feature_fields(bs_new, &tmp);
2036 /* bs_new shouldn't be in bdrv_states even after the swap! */
2037 assert(bs_new->device_name[0] == '\0');
2039 /* Check a few fields that should remain attached to the device */
2040 assert(bs_new->dev == NULL);
2041 assert(bs_new->job == NULL);
2042 assert(bs_new->io_limits_enabled == false);
2043 assert(!throttle_have_timer(&bs_new->throttle_state));
2045 /* insert the nodes back into the graph node list if needed */
2046 if (bs_new->node_name[0] != '\0') {
2047 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_new, node_list);
2049 if (bs_old->node_name[0] != '\0') {
2050 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_old, node_list);
2053 bdrv_rebind(bs_new);
2054 bdrv_rebind(bs_old);
2058 * Add new bs contents at the top of an image chain while the chain is
2059 * live, while keeping required fields on the top layer.
2061 * This will modify the BlockDriverState fields, and swap contents
2062 * between bs_new and bs_top. Both bs_new and bs_top are modified.
2064 * bs_new is required to be anonymous.
2066 * This function does not create any image files.
2068 void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top)
2070 bdrv_swap(bs_new, bs_top);
2072 /* The contents of 'tmp' will become bs_top, as we are
2073 * swapping bs_new and bs_top contents. */
2074 bdrv_set_backing_hd(bs_top, bs_new);
2077 static void bdrv_delete(BlockDriverState *bs)
2079 assert(!bs->dev);
2080 assert(!bs->job);
2081 assert(bdrv_op_blocker_is_empty(bs));
2082 assert(!bs->refcnt);
2083 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
2085 bdrv_close(bs);
2087 /* remove from list, if necessary */
2088 bdrv_make_anon(bs);
2090 g_free(bs);
2093 int bdrv_attach_dev(BlockDriverState *bs, void *dev)
2094 /* TODO change to DeviceState *dev when all users are qdevified */
2096 if (bs->dev) {
2097 return -EBUSY;
2099 bs->dev = dev;
2100 bdrv_iostatus_reset(bs);
2101 return 0;
2104 /* TODO qdevified devices don't use this, remove when devices are qdevified */
2105 void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev)
2107 if (bdrv_attach_dev(bs, dev) < 0) {
2108 abort();
2112 void bdrv_detach_dev(BlockDriverState *bs, void *dev)
2113 /* TODO change to DeviceState *dev when all users are qdevified */
2115 assert(bs->dev == dev);
2116 bs->dev = NULL;
2117 bs->dev_ops = NULL;
2118 bs->dev_opaque = NULL;
2119 bs->guest_block_size = 512;
2122 /* TODO change to return DeviceState * when all users are qdevified */
2123 void *bdrv_get_attached_dev(BlockDriverState *bs)
2125 return bs->dev;
2128 void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops,
2129 void *opaque)
2131 bs->dev_ops = ops;
2132 bs->dev_opaque = opaque;
2135 void bdrv_emit_qmp_error_event(const BlockDriverState *bdrv,
2136 enum MonitorEvent ev,
2137 BlockErrorAction action, bool is_read)
2139 QObject *data;
2140 const char *action_str;
2142 switch (action) {
2143 case BDRV_ACTION_REPORT:
2144 action_str = "report";
2145 break;
2146 case BDRV_ACTION_IGNORE:
2147 action_str = "ignore";
2148 break;
2149 case BDRV_ACTION_STOP:
2150 action_str = "stop";
2151 break;
2152 default:
2153 abort();
2156 data = qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
2157 bdrv->device_name,
2158 action_str,
2159 is_read ? "read" : "write");
2160 monitor_protocol_event(ev, data);
2162 qobject_decref(data);
2165 static void bdrv_emit_qmp_eject_event(BlockDriverState *bs, bool ejected)
2167 QObject *data;
2169 data = qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }",
2170 bdrv_get_device_name(bs), ejected);
2171 monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED, data);
2173 qobject_decref(data);
2176 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load)
2178 if (bs->dev_ops && bs->dev_ops->change_media_cb) {
2179 bool tray_was_closed = !bdrv_dev_is_tray_open(bs);
2180 bs->dev_ops->change_media_cb(bs->dev_opaque, load);
2181 if (tray_was_closed) {
2182 /* tray open */
2183 bdrv_emit_qmp_eject_event(bs, true);
2185 if (load) {
2186 /* tray close */
2187 bdrv_emit_qmp_eject_event(bs, false);
2192 bool bdrv_dev_has_removable_media(BlockDriverState *bs)
2194 return !bs->dev || (bs->dev_ops && bs->dev_ops->change_media_cb);
2197 void bdrv_dev_eject_request(BlockDriverState *bs, bool force)
2199 if (bs->dev_ops && bs->dev_ops->eject_request_cb) {
2200 bs->dev_ops->eject_request_cb(bs->dev_opaque, force);
2204 bool bdrv_dev_is_tray_open(BlockDriverState *bs)
2206 if (bs->dev_ops && bs->dev_ops->is_tray_open) {
2207 return bs->dev_ops->is_tray_open(bs->dev_opaque);
2209 return false;
2212 static void bdrv_dev_resize_cb(BlockDriverState *bs)
2214 if (bs->dev_ops && bs->dev_ops->resize_cb) {
2215 bs->dev_ops->resize_cb(bs->dev_opaque);
2219 bool bdrv_dev_is_medium_locked(BlockDriverState *bs)
2221 if (bs->dev_ops && bs->dev_ops->is_medium_locked) {
2222 return bs->dev_ops->is_medium_locked(bs->dev_opaque);
2224 return false;
2228 * Run consistency checks on an image
2230 * Returns 0 if the check could be completed (it doesn't mean that the image is
2231 * free of errors) or -errno when an internal error occurred. The results of the
2232 * check are stored in res.
2234 int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix)
2236 if (bs->drv->bdrv_check == NULL) {
2237 return -ENOTSUP;
2240 memset(res, 0, sizeof(*res));
2241 return bs->drv->bdrv_check(bs, res, fix);
2244 #define COMMIT_BUF_SECTORS 2048
2246 /* commit COW file into the raw image */
2247 int bdrv_commit(BlockDriverState *bs)
2249 BlockDriver *drv = bs->drv;
2250 int64_t sector, total_sectors, length, backing_length;
2251 int n, ro, open_flags;
2252 int ret = 0;
2253 uint8_t *buf = NULL;
2254 char filename[PATH_MAX];
2256 if (!drv)
2257 return -ENOMEDIUM;
2259 if (!bs->backing_hd) {
2260 return -ENOTSUP;
2263 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT, NULL) ||
2264 bdrv_op_is_blocked(bs->backing_hd, BLOCK_OP_TYPE_COMMIT, NULL)) {
2265 return -EBUSY;
2268 ro = bs->backing_hd->read_only;
2269 /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */
2270 pstrcpy(filename, sizeof(filename), bs->backing_hd->filename);
2271 open_flags = bs->backing_hd->open_flags;
2273 if (ro) {
2274 if (bdrv_reopen(bs->backing_hd, open_flags | BDRV_O_RDWR, NULL)) {
2275 return -EACCES;
2279 length = bdrv_getlength(bs);
2280 if (length < 0) {
2281 ret = length;
2282 goto ro_cleanup;
2285 backing_length = bdrv_getlength(bs->backing_hd);
2286 if (backing_length < 0) {
2287 ret = backing_length;
2288 goto ro_cleanup;
2291 /* If our top snapshot is larger than the backing file image,
2292 * grow the backing file image if possible. If not possible,
2293 * we must return an error */
2294 if (length > backing_length) {
2295 ret = bdrv_truncate(bs->backing_hd, length);
2296 if (ret < 0) {
2297 goto ro_cleanup;
2301 total_sectors = length >> BDRV_SECTOR_BITS;
2302 buf = g_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
2304 for (sector = 0; sector < total_sectors; sector += n) {
2305 ret = bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n);
2306 if (ret < 0) {
2307 goto ro_cleanup;
2309 if (ret) {
2310 ret = bdrv_read(bs, sector, buf, n);
2311 if (ret < 0) {
2312 goto ro_cleanup;
2315 ret = bdrv_write(bs->backing_hd, sector, buf, n);
2316 if (ret < 0) {
2317 goto ro_cleanup;
2322 if (drv->bdrv_make_empty) {
2323 ret = drv->bdrv_make_empty(bs);
2324 if (ret < 0) {
2325 goto ro_cleanup;
2327 bdrv_flush(bs);
2331 * Make sure all data we wrote to the backing device is actually
2332 * stable on disk.
2334 if (bs->backing_hd) {
2335 bdrv_flush(bs->backing_hd);
2338 ret = 0;
2339 ro_cleanup:
2340 g_free(buf);
2342 if (ro) {
2343 /* ignoring error return here */
2344 bdrv_reopen(bs->backing_hd, open_flags & ~BDRV_O_RDWR, NULL);
2347 return ret;
2350 int bdrv_commit_all(void)
2352 BlockDriverState *bs;
2354 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
2355 AioContext *aio_context = bdrv_get_aio_context(bs);
2357 aio_context_acquire(aio_context);
2358 if (bs->drv && bs->backing_hd) {
2359 int ret = bdrv_commit(bs);
2360 if (ret < 0) {
2361 aio_context_release(aio_context);
2362 return ret;
2365 aio_context_release(aio_context);
2367 return 0;
2371 * Remove an active request from the tracked requests list
2373 * This function should be called when a tracked request is completing.
2375 static void tracked_request_end(BdrvTrackedRequest *req)
2377 if (req->serialising) {
2378 req->bs->serialising_in_flight--;
2381 QLIST_REMOVE(req, list);
2382 qemu_co_queue_restart_all(&req->wait_queue);
2386 * Add an active request to the tracked requests list
2388 static void tracked_request_begin(BdrvTrackedRequest *req,
2389 BlockDriverState *bs,
2390 int64_t offset,
2391 unsigned int bytes, bool is_write)
2393 *req = (BdrvTrackedRequest){
2394 .bs = bs,
2395 .offset = offset,
2396 .bytes = bytes,
2397 .is_write = is_write,
2398 .co = qemu_coroutine_self(),
2399 .serialising = false,
2400 .overlap_offset = offset,
2401 .overlap_bytes = bytes,
2404 qemu_co_queue_init(&req->wait_queue);
2406 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
2409 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
2411 int64_t overlap_offset = req->offset & ~(align - 1);
2412 unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
2413 - overlap_offset;
2415 if (!req->serialising) {
2416 req->bs->serialising_in_flight++;
2417 req->serialising = true;
2420 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
2421 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
2425 * Round a region to cluster boundaries
2427 void bdrv_round_to_clusters(BlockDriverState *bs,
2428 int64_t sector_num, int nb_sectors,
2429 int64_t *cluster_sector_num,
2430 int *cluster_nb_sectors)
2432 BlockDriverInfo bdi;
2434 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
2435 *cluster_sector_num = sector_num;
2436 *cluster_nb_sectors = nb_sectors;
2437 } else {
2438 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
2439 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
2440 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
2441 nb_sectors, c);
2445 static int bdrv_get_cluster_size(BlockDriverState *bs)
2447 BlockDriverInfo bdi;
2448 int ret;
2450 ret = bdrv_get_info(bs, &bdi);
2451 if (ret < 0 || bdi.cluster_size == 0) {
2452 return bs->request_alignment;
2453 } else {
2454 return bdi.cluster_size;
2458 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
2459 int64_t offset, unsigned int bytes)
2461 /* aaaa bbbb */
2462 if (offset >= req->overlap_offset + req->overlap_bytes) {
2463 return false;
2465 /* bbbb aaaa */
2466 if (req->overlap_offset >= offset + bytes) {
2467 return false;
2469 return true;
2472 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
2474 BlockDriverState *bs = self->bs;
2475 BdrvTrackedRequest *req;
2476 bool retry;
2477 bool waited = false;
2479 if (!bs->serialising_in_flight) {
2480 return false;
2483 do {
2484 retry = false;
2485 QLIST_FOREACH(req, &bs->tracked_requests, list) {
2486 if (req == self || (!req->serialising && !self->serialising)) {
2487 continue;
2489 if (tracked_request_overlaps(req, self->overlap_offset,
2490 self->overlap_bytes))
2492 /* Hitting this means there was a reentrant request, for
2493 * example, a block driver issuing nested requests. This must
2494 * never happen since it means deadlock.
2496 assert(qemu_coroutine_self() != req->co);
2498 /* If the request is already (indirectly) waiting for us, or
2499 * will wait for us as soon as it wakes up, then just go on
2500 * (instead of producing a deadlock in the former case). */
2501 if (!req->waiting_for) {
2502 self->waiting_for = req;
2503 qemu_co_queue_wait(&req->wait_queue);
2504 self->waiting_for = NULL;
2505 retry = true;
2506 waited = true;
2507 break;
2511 } while (retry);
2513 return waited;
2517 * Return values:
2518 * 0 - success
2519 * -EINVAL - backing format specified, but no file
2520 * -ENOSPC - can't update the backing file because no space is left in the
2521 * image file header
2522 * -ENOTSUP - format driver doesn't support changing the backing file
2524 int bdrv_change_backing_file(BlockDriverState *bs,
2525 const char *backing_file, const char *backing_fmt)
2527 BlockDriver *drv = bs->drv;
2528 int ret;
2530 /* Backing file format doesn't make sense without a backing file */
2531 if (backing_fmt && !backing_file) {
2532 return -EINVAL;
2535 if (drv->bdrv_change_backing_file != NULL) {
2536 ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
2537 } else {
2538 ret = -ENOTSUP;
2541 if (ret == 0) {
2542 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
2543 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
2545 return ret;
2549 * Finds the image layer in the chain that has 'bs' as its backing file.
2551 * active is the current topmost image.
2553 * Returns NULL if bs is not found in active's image chain,
2554 * or if active == bs.
2556 BlockDriverState *bdrv_find_overlay(BlockDriverState *active,
2557 BlockDriverState *bs)
2559 BlockDriverState *overlay = NULL;
2560 BlockDriverState *intermediate;
2562 assert(active != NULL);
2563 assert(bs != NULL);
2565 /* if bs is the same as active, then by definition it has no overlay
2567 if (active == bs) {
2568 return NULL;
2571 intermediate = active;
2572 while (intermediate->backing_hd) {
2573 if (intermediate->backing_hd == bs) {
2574 overlay = intermediate;
2575 break;
2577 intermediate = intermediate->backing_hd;
2580 return overlay;
2583 typedef struct BlkIntermediateStates {
2584 BlockDriverState *bs;
2585 QSIMPLEQ_ENTRY(BlkIntermediateStates) entry;
2586 } BlkIntermediateStates;
2590 * Drops images above 'base' up to and including 'top', and sets the image
2591 * above 'top' to have base as its backing file.
2593 * Requires that the overlay to 'top' is opened r/w, so that the backing file
2594 * information in 'bs' can be properly updated.
2596 * E.g., this will convert the following chain:
2597 * bottom <- base <- intermediate <- top <- active
2599 * to
2601 * bottom <- base <- active
2603 * It is allowed for bottom==base, in which case it converts:
2605 * base <- intermediate <- top <- active
2607 * to
2609 * base <- active
2611 * Error conditions:
2612 * if active == top, that is considered an error
2615 int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top,
2616 BlockDriverState *base)
2618 BlockDriverState *intermediate;
2619 BlockDriverState *base_bs = NULL;
2620 BlockDriverState *new_top_bs = NULL;
2621 BlkIntermediateStates *intermediate_state, *next;
2622 int ret = -EIO;
2624 QSIMPLEQ_HEAD(states_to_delete, BlkIntermediateStates) states_to_delete;
2625 QSIMPLEQ_INIT(&states_to_delete);
2627 if (!top->drv || !base->drv) {
2628 goto exit;
2631 new_top_bs = bdrv_find_overlay(active, top);
2633 if (new_top_bs == NULL) {
2634 /* we could not find the image above 'top', this is an error */
2635 goto exit;
2638 /* special case of new_top_bs->backing_hd already pointing to base - nothing
2639 * to do, no intermediate images */
2640 if (new_top_bs->backing_hd == base) {
2641 ret = 0;
2642 goto exit;
2645 intermediate = top;
2647 /* now we will go down through the list, and add each BDS we find
2648 * into our deletion queue, until we hit the 'base'
2650 while (intermediate) {
2651 intermediate_state = g_malloc0(sizeof(BlkIntermediateStates));
2652 intermediate_state->bs = intermediate;
2653 QSIMPLEQ_INSERT_TAIL(&states_to_delete, intermediate_state, entry);
2655 if (intermediate->backing_hd == base) {
2656 base_bs = intermediate->backing_hd;
2657 break;
2659 intermediate = intermediate->backing_hd;
2661 if (base_bs == NULL) {
2662 /* something went wrong, we did not end at the base. safely
2663 * unravel everything, and exit with error */
2664 goto exit;
2667 /* success - we can delete the intermediate states, and link top->base */
2668 ret = bdrv_change_backing_file(new_top_bs, base_bs->filename,
2669 base_bs->drv ? base_bs->drv->format_name : "");
2670 if (ret) {
2671 goto exit;
2673 bdrv_set_backing_hd(new_top_bs, base_bs);
2675 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2676 /* so that bdrv_close() does not recursively close the chain */
2677 bdrv_set_backing_hd(intermediate_state->bs, NULL);
2678 bdrv_unref(intermediate_state->bs);
2680 ret = 0;
2682 exit:
2683 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2684 g_free(intermediate_state);
2686 return ret;
2690 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
2691 size_t size)
2693 int64_t len;
2695 if (size > INT_MAX) {
2696 return -EIO;
2699 if (!bdrv_is_inserted(bs))
2700 return -ENOMEDIUM;
2702 if (bs->growable)
2703 return 0;
2705 len = bdrv_getlength(bs);
2707 if (offset < 0)
2708 return -EIO;
2710 if ((offset > len) || (len - offset < size))
2711 return -EIO;
2713 return 0;
2716 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
2717 int nb_sectors)
2719 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
2720 return -EIO;
2723 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
2724 nb_sectors * BDRV_SECTOR_SIZE);
2727 typedef struct RwCo {
2728 BlockDriverState *bs;
2729 int64_t offset;
2730 QEMUIOVector *qiov;
2731 bool is_write;
2732 int ret;
2733 BdrvRequestFlags flags;
2734 } RwCo;
2736 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
2738 RwCo *rwco = opaque;
2740 if (!rwco->is_write) {
2741 rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset,
2742 rwco->qiov->size, rwco->qiov,
2743 rwco->flags);
2744 } else {
2745 rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset,
2746 rwco->qiov->size, rwco->qiov,
2747 rwco->flags);
2752 * Process a vectored synchronous request using coroutines
2754 static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset,
2755 QEMUIOVector *qiov, bool is_write,
2756 BdrvRequestFlags flags)
2758 Coroutine *co;
2759 RwCo rwco = {
2760 .bs = bs,
2761 .offset = offset,
2762 .qiov = qiov,
2763 .is_write = is_write,
2764 .ret = NOT_DONE,
2765 .flags = flags,
2769 * In sync call context, when the vcpu is blocked, this throttling timer
2770 * will not fire; so the I/O throttling function has to be disabled here
2771 * if it has been enabled.
2773 if (bs->io_limits_enabled) {
2774 fprintf(stderr, "Disabling I/O throttling on '%s' due "
2775 "to synchronous I/O.\n", bdrv_get_device_name(bs));
2776 bdrv_io_limits_disable(bs);
2779 if (qemu_in_coroutine()) {
2780 /* Fast-path if already in coroutine context */
2781 bdrv_rw_co_entry(&rwco);
2782 } else {
2783 AioContext *aio_context = bdrv_get_aio_context(bs);
2785 co = qemu_coroutine_create(bdrv_rw_co_entry);
2786 qemu_coroutine_enter(co, &rwco);
2787 while (rwco.ret == NOT_DONE) {
2788 aio_poll(aio_context, true);
2791 return rwco.ret;
2795 * Process a synchronous request using coroutines
2797 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
2798 int nb_sectors, bool is_write, BdrvRequestFlags flags)
2800 QEMUIOVector qiov;
2801 struct iovec iov = {
2802 .iov_base = (void *)buf,
2803 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
2806 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
2807 return -EINVAL;
2810 qemu_iovec_init_external(&qiov, &iov, 1);
2811 return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS,
2812 &qiov, is_write, flags);
2815 /* return < 0 if error. See bdrv_write() for the return codes */
2816 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
2817 uint8_t *buf, int nb_sectors)
2819 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0);
2822 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
2823 int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
2824 uint8_t *buf, int nb_sectors)
2826 bool enabled;
2827 int ret;
2829 enabled = bs->io_limits_enabled;
2830 bs->io_limits_enabled = false;
2831 ret = bdrv_read(bs, sector_num, buf, nb_sectors);
2832 bs->io_limits_enabled = enabled;
2833 return ret;
2836 /* Return < 0 if error. Important errors are:
2837 -EIO generic I/O error (may happen for all errors)
2838 -ENOMEDIUM No media inserted.
2839 -EINVAL Invalid sector number or nb_sectors
2840 -EACCES Trying to write a read-only device
2842 int bdrv_write(BlockDriverState *bs, int64_t sector_num,
2843 const uint8_t *buf, int nb_sectors)
2845 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
2848 int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
2849 int nb_sectors, BdrvRequestFlags flags)
2851 return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true,
2852 BDRV_REQ_ZERO_WRITE | flags);
2856 * Completely zero out a block device with the help of bdrv_write_zeroes.
2857 * The operation is sped up by checking the block status and only writing
2858 * zeroes to the device if they currently do not return zeroes. Optional
2859 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
2861 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
2863 int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
2865 int64_t target_size;
2866 int64_t ret, nb_sectors, sector_num = 0;
2867 int n;
2869 target_size = bdrv_getlength(bs);
2870 if (target_size < 0) {
2871 return target_size;
2873 target_size /= BDRV_SECTOR_SIZE;
2875 for (;;) {
2876 nb_sectors = target_size - sector_num;
2877 if (nb_sectors <= 0) {
2878 return 0;
2880 if (nb_sectors > INT_MAX) {
2881 nb_sectors = INT_MAX;
2883 ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n);
2884 if (ret < 0) {
2885 error_report("error getting block status at sector %" PRId64 ": %s",
2886 sector_num, strerror(-ret));
2887 return ret;
2889 if (ret & BDRV_BLOCK_ZERO) {
2890 sector_num += n;
2891 continue;
2893 ret = bdrv_write_zeroes(bs, sector_num, n, flags);
2894 if (ret < 0) {
2895 error_report("error writing zeroes at sector %" PRId64 ": %s",
2896 sector_num, strerror(-ret));
2897 return ret;
2899 sector_num += n;
2903 int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes)
2905 QEMUIOVector qiov;
2906 struct iovec iov = {
2907 .iov_base = (void *)buf,
2908 .iov_len = bytes,
2910 int ret;
2912 if (bytes < 0) {
2913 return -EINVAL;
2916 qemu_iovec_init_external(&qiov, &iov, 1);
2917 ret = bdrv_prwv_co(bs, offset, &qiov, false, 0);
2918 if (ret < 0) {
2919 return ret;
2922 return bytes;
2925 int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov)
2927 int ret;
2929 ret = bdrv_prwv_co(bs, offset, qiov, true, 0);
2930 if (ret < 0) {
2931 return ret;
2934 return qiov->size;
2937 int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
2938 const void *buf, int bytes)
2940 QEMUIOVector qiov;
2941 struct iovec iov = {
2942 .iov_base = (void *) buf,
2943 .iov_len = bytes,
2946 if (bytes < 0) {
2947 return -EINVAL;
2950 qemu_iovec_init_external(&qiov, &iov, 1);
2951 return bdrv_pwritev(bs, offset, &qiov);
2955 * Writes to the file and ensures that no writes are reordered across this
2956 * request (acts as a barrier)
2958 * Returns 0 on success, -errno in error cases.
2960 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
2961 const void *buf, int count)
2963 int ret;
2965 ret = bdrv_pwrite(bs, offset, buf, count);
2966 if (ret < 0) {
2967 return ret;
2970 /* No flush needed for cache modes that already do it */
2971 if (bs->enable_write_cache) {
2972 bdrv_flush(bs);
2975 return 0;
2978 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
2979 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
2981 /* Perform I/O through a temporary buffer so that users who scribble over
2982 * their read buffer while the operation is in progress do not end up
2983 * modifying the image file. This is critical for zero-copy guest I/O
2984 * where anything might happen inside guest memory.
2986 void *bounce_buffer;
2988 BlockDriver *drv = bs->drv;
2989 struct iovec iov;
2990 QEMUIOVector bounce_qiov;
2991 int64_t cluster_sector_num;
2992 int cluster_nb_sectors;
2993 size_t skip_bytes;
2994 int ret;
2996 /* Cover entire cluster so no additional backing file I/O is required when
2997 * allocating cluster in the image file.
2999 bdrv_round_to_clusters(bs, sector_num, nb_sectors,
3000 &cluster_sector_num, &cluster_nb_sectors);
3002 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
3003 cluster_sector_num, cluster_nb_sectors);
3005 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
3006 iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
3007 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
3009 ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
3010 &bounce_qiov);
3011 if (ret < 0) {
3012 goto err;
3015 if (drv->bdrv_co_write_zeroes &&
3016 buffer_is_zero(bounce_buffer, iov.iov_len)) {
3017 ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
3018 cluster_nb_sectors, 0);
3019 } else {
3020 /* This does not change the data on the disk, it is not necessary
3021 * to flush even in cache=writethrough mode.
3023 ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
3024 &bounce_qiov);
3027 if (ret < 0) {
3028 /* It might be okay to ignore write errors for guest requests. If this
3029 * is a deliberate copy-on-read then we don't want to ignore the error.
3030 * Simply report it in all cases.
3032 goto err;
3035 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
3036 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
3037 nb_sectors * BDRV_SECTOR_SIZE);
3039 err:
3040 qemu_vfree(bounce_buffer);
3041 return ret;
3045 * Forwards an already correctly aligned request to the BlockDriver. This
3046 * handles copy on read and zeroing after EOF; any other features must be
3047 * implemented by the caller.
3049 static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
3050 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
3051 int64_t align, QEMUIOVector *qiov, int flags)
3053 BlockDriver *drv = bs->drv;
3054 int ret;
3056 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
3057 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
3059 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
3060 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
3062 /* Handle Copy on Read and associated serialisation */
3063 if (flags & BDRV_REQ_COPY_ON_READ) {
3064 /* If we touch the same cluster it counts as an overlap. This
3065 * guarantees that allocating writes will be serialized and not race
3066 * with each other for the same cluster. For example, in copy-on-read
3067 * it ensures that the CoR read and write operations are atomic and
3068 * guest writes cannot interleave between them. */
3069 mark_request_serialising(req, bdrv_get_cluster_size(bs));
3072 wait_serialising_requests(req);
3074 if (flags & BDRV_REQ_COPY_ON_READ) {
3075 int pnum;
3077 ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum);
3078 if (ret < 0) {
3079 goto out;
3082 if (!ret || pnum != nb_sectors) {
3083 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
3084 goto out;
3088 /* Forward the request to the BlockDriver */
3089 if (!(bs->zero_beyond_eof && bs->growable)) {
3090 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
3091 } else {
3092 /* Read zeros after EOF of growable BDSes */
3093 int64_t len, total_sectors, max_nb_sectors;
3095 len = bdrv_getlength(bs);
3096 if (len < 0) {
3097 ret = len;
3098 goto out;
3101 total_sectors = DIV_ROUND_UP(len, BDRV_SECTOR_SIZE);
3102 max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num),
3103 align >> BDRV_SECTOR_BITS);
3104 if (max_nb_sectors > 0) {
3105 ret = drv->bdrv_co_readv(bs, sector_num,
3106 MIN(nb_sectors, max_nb_sectors), qiov);
3107 } else {
3108 ret = 0;
3111 /* Reading beyond end of file is supposed to produce zeroes */
3112 if (ret == 0 && total_sectors < sector_num + nb_sectors) {
3113 uint64_t offset = MAX(0, total_sectors - sector_num);
3114 uint64_t bytes = (sector_num + nb_sectors - offset) *
3115 BDRV_SECTOR_SIZE;
3116 qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes);
3120 out:
3121 return ret;
3125 * Handle a read request in coroutine context
3127 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
3128 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
3129 BdrvRequestFlags flags)
3131 BlockDriver *drv = bs->drv;
3132 BdrvTrackedRequest req;
3134 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3135 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
3136 uint8_t *head_buf = NULL;
3137 uint8_t *tail_buf = NULL;
3138 QEMUIOVector local_qiov;
3139 bool use_local_qiov = false;
3140 int ret;
3142 if (!drv) {
3143 return -ENOMEDIUM;
3145 if (bdrv_check_byte_request(bs, offset, bytes)) {
3146 return -EIO;
3149 if (bs->copy_on_read) {
3150 flags |= BDRV_REQ_COPY_ON_READ;
3153 /* throttling disk I/O */
3154 if (bs->io_limits_enabled) {
3155 bdrv_io_limits_intercept(bs, bytes, false);
3158 /* Align read if necessary by padding qiov */
3159 if (offset & (align - 1)) {
3160 head_buf = qemu_blockalign(bs, align);
3161 qemu_iovec_init(&local_qiov, qiov->niov + 2);
3162 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3163 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3164 use_local_qiov = true;
3166 bytes += offset & (align - 1);
3167 offset = offset & ~(align - 1);
3170 if ((offset + bytes) & (align - 1)) {
3171 if (!use_local_qiov) {
3172 qemu_iovec_init(&local_qiov, qiov->niov + 1);
3173 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3174 use_local_qiov = true;
3176 tail_buf = qemu_blockalign(bs, align);
3177 qemu_iovec_add(&local_qiov, tail_buf,
3178 align - ((offset + bytes) & (align - 1)));
3180 bytes = ROUND_UP(bytes, align);
3183 tracked_request_begin(&req, bs, offset, bytes, false);
3184 ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align,
3185 use_local_qiov ? &local_qiov : qiov,
3186 flags);
3187 tracked_request_end(&req);
3189 if (use_local_qiov) {
3190 qemu_iovec_destroy(&local_qiov);
3191 qemu_vfree(head_buf);
3192 qemu_vfree(tail_buf);
3195 return ret;
3198 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
3199 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3200 BdrvRequestFlags flags)
3202 if (nb_sectors < 0 || nb_sectors > (UINT_MAX >> BDRV_SECTOR_BITS)) {
3203 return -EINVAL;
3206 return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS,
3207 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3210 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
3211 int nb_sectors, QEMUIOVector *qiov)
3213 trace_bdrv_co_readv(bs, sector_num, nb_sectors);
3215 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
3218 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
3219 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
3221 trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
3223 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
3224 BDRV_REQ_COPY_ON_READ);
3227 /* if no limit is specified in the BlockLimits use a default
3228 * of 32768 512-byte sectors (16 MiB) per request.
3230 #define MAX_WRITE_ZEROES_DEFAULT 32768
3232 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
3233 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
3235 BlockDriver *drv = bs->drv;
3236 QEMUIOVector qiov;
3237 struct iovec iov = {0};
3238 int ret = 0;
3240 int max_write_zeroes = bs->bl.max_write_zeroes ?
3241 bs->bl.max_write_zeroes : MAX_WRITE_ZEROES_DEFAULT;
3243 while (nb_sectors > 0 && !ret) {
3244 int num = nb_sectors;
3246 /* Align request. Block drivers can expect the "bulk" of the request
3247 * to be aligned.
3249 if (bs->bl.write_zeroes_alignment
3250 && num > bs->bl.write_zeroes_alignment) {
3251 if (sector_num % bs->bl.write_zeroes_alignment != 0) {
3252 /* Make a small request up to the first aligned sector. */
3253 num = bs->bl.write_zeroes_alignment;
3254 num -= sector_num % bs->bl.write_zeroes_alignment;
3255 } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) {
3256 /* Shorten the request to the last aligned sector. num cannot
3257 * underflow because num > bs->bl.write_zeroes_alignment.
3259 num -= (sector_num + num) % bs->bl.write_zeroes_alignment;
3263 /* limit request size */
3264 if (num > max_write_zeroes) {
3265 num = max_write_zeroes;
3268 ret = -ENOTSUP;
3269 /* First try the efficient write zeroes operation */
3270 if (drv->bdrv_co_write_zeroes) {
3271 ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags);
3274 if (ret == -ENOTSUP) {
3275 /* Fall back to bounce buffer if write zeroes is unsupported */
3276 iov.iov_len = num * BDRV_SECTOR_SIZE;
3277 if (iov.iov_base == NULL) {
3278 iov.iov_base = qemu_blockalign(bs, num * BDRV_SECTOR_SIZE);
3279 memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE);
3281 qemu_iovec_init_external(&qiov, &iov, 1);
3283 ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov);
3285 /* Keep bounce buffer around if it is big enough for all
3286 * all future requests.
3288 if (num < max_write_zeroes) {
3289 qemu_vfree(iov.iov_base);
3290 iov.iov_base = NULL;
3294 sector_num += num;
3295 nb_sectors -= num;
3298 qemu_vfree(iov.iov_base);
3299 return ret;
3303 * Forwards an already correctly aligned write request to the BlockDriver.
3305 static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
3306 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
3307 QEMUIOVector *qiov, int flags)
3309 BlockDriver *drv = bs->drv;
3310 bool waited;
3311 int ret;
3313 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
3314 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
3316 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
3317 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
3319 waited = wait_serialising_requests(req);
3320 assert(!waited || !req->serialising);
3321 assert(req->overlap_offset <= offset);
3322 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
3324 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
3326 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
3327 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes &&
3328 qemu_iovec_is_zero(qiov)) {
3329 flags |= BDRV_REQ_ZERO_WRITE;
3330 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
3331 flags |= BDRV_REQ_MAY_UNMAP;
3335 if (ret < 0) {
3336 /* Do nothing, write notifier decided to fail this request */
3337 } else if (flags & BDRV_REQ_ZERO_WRITE) {
3338 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_ZERO);
3339 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags);
3340 } else {
3341 BLKDBG_EVENT(bs, BLKDBG_PWRITEV);
3342 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
3344 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_DONE);
3346 if (ret == 0 && !bs->enable_write_cache) {
3347 ret = bdrv_co_flush(bs);
3350 bdrv_set_dirty(bs, sector_num, nb_sectors);
3352 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
3353 bs->wr_highest_sector = sector_num + nb_sectors - 1;
3355 if (bs->growable && ret >= 0) {
3356 bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
3359 return ret;
3363 * Handle a write request in coroutine context
3365 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
3366 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
3367 BdrvRequestFlags flags)
3369 BdrvTrackedRequest req;
3370 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3371 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
3372 uint8_t *head_buf = NULL;
3373 uint8_t *tail_buf = NULL;
3374 QEMUIOVector local_qiov;
3375 bool use_local_qiov = false;
3376 int ret;
3378 if (!bs->drv) {
3379 return -ENOMEDIUM;
3381 if (bs->read_only) {
3382 return -EACCES;
3384 if (bdrv_check_byte_request(bs, offset, bytes)) {
3385 return -EIO;
3388 /* throttling disk I/O */
3389 if (bs->io_limits_enabled) {
3390 bdrv_io_limits_intercept(bs, bytes, true);
3394 * Align write if necessary by performing a read-modify-write cycle.
3395 * Pad qiov with the read parts and be sure to have a tracked request not
3396 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
3398 tracked_request_begin(&req, bs, offset, bytes, true);
3400 if (offset & (align - 1)) {
3401 QEMUIOVector head_qiov;
3402 struct iovec head_iov;
3404 mark_request_serialising(&req, align);
3405 wait_serialising_requests(&req);
3407 head_buf = qemu_blockalign(bs, align);
3408 head_iov = (struct iovec) {
3409 .iov_base = head_buf,
3410 .iov_len = align,
3412 qemu_iovec_init_external(&head_qiov, &head_iov, 1);
3414 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_HEAD);
3415 ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align,
3416 align, &head_qiov, 0);
3417 if (ret < 0) {
3418 goto fail;
3420 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
3422 qemu_iovec_init(&local_qiov, qiov->niov + 2);
3423 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3424 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3425 use_local_qiov = true;
3427 bytes += offset & (align - 1);
3428 offset = offset & ~(align - 1);
3431 if ((offset + bytes) & (align - 1)) {
3432 QEMUIOVector tail_qiov;
3433 struct iovec tail_iov;
3434 size_t tail_bytes;
3435 bool waited;
3437 mark_request_serialising(&req, align);
3438 waited = wait_serialising_requests(&req);
3439 assert(!waited || !use_local_qiov);
3441 tail_buf = qemu_blockalign(bs, align);
3442 tail_iov = (struct iovec) {
3443 .iov_base = tail_buf,
3444 .iov_len = align,
3446 qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
3448 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_TAIL);
3449 ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align,
3450 align, &tail_qiov, 0);
3451 if (ret < 0) {
3452 goto fail;
3454 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
3456 if (!use_local_qiov) {
3457 qemu_iovec_init(&local_qiov, qiov->niov + 1);
3458 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3459 use_local_qiov = true;
3462 tail_bytes = (offset + bytes) & (align - 1);
3463 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
3465 bytes = ROUND_UP(bytes, align);
3468 ret = bdrv_aligned_pwritev(bs, &req, offset, bytes,
3469 use_local_qiov ? &local_qiov : qiov,
3470 flags);
3472 fail:
3473 tracked_request_end(&req);
3475 if (use_local_qiov) {
3476 qemu_iovec_destroy(&local_qiov);
3478 qemu_vfree(head_buf);
3479 qemu_vfree(tail_buf);
3481 return ret;
3484 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
3485 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3486 BdrvRequestFlags flags)
3488 if (nb_sectors < 0 || nb_sectors > (INT_MAX >> BDRV_SECTOR_BITS)) {
3489 return -EINVAL;
3492 return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS,
3493 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3496 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
3497 int nb_sectors, QEMUIOVector *qiov)
3499 trace_bdrv_co_writev(bs, sector_num, nb_sectors);
3501 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
3504 int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
3505 int64_t sector_num, int nb_sectors,
3506 BdrvRequestFlags flags)
3508 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags);
3510 if (!(bs->open_flags & BDRV_O_UNMAP)) {
3511 flags &= ~BDRV_REQ_MAY_UNMAP;
3514 return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
3515 BDRV_REQ_ZERO_WRITE | flags);
3519 * Truncate file to 'offset' bytes (needed only for file protocols)
3521 int bdrv_truncate(BlockDriverState *bs, int64_t offset)
3523 BlockDriver *drv = bs->drv;
3524 int ret;
3525 if (!drv)
3526 return -ENOMEDIUM;
3527 if (!drv->bdrv_truncate)
3528 return -ENOTSUP;
3529 if (bs->read_only)
3530 return -EACCES;
3531 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_RESIZE, NULL)) {
3532 return -EBUSY;
3534 ret = drv->bdrv_truncate(bs, offset);
3535 if (ret == 0) {
3536 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3537 bdrv_dev_resize_cb(bs);
3539 return ret;
3543 * Length of a allocated file in bytes. Sparse files are counted by actual
3544 * allocated space. Return < 0 if error or unknown.
3546 int64_t bdrv_get_allocated_file_size(BlockDriverState *bs)
3548 BlockDriver *drv = bs->drv;
3549 if (!drv) {
3550 return -ENOMEDIUM;
3552 if (drv->bdrv_get_allocated_file_size) {
3553 return drv->bdrv_get_allocated_file_size(bs);
3555 if (bs->file) {
3556 return bdrv_get_allocated_file_size(bs->file);
3558 return -ENOTSUP;
3562 * Length of a file in bytes. Return < 0 if error or unknown.
3564 int64_t bdrv_getlength(BlockDriverState *bs)
3566 BlockDriver *drv = bs->drv;
3567 if (!drv)
3568 return -ENOMEDIUM;
3570 if (drv->has_variable_length) {
3571 int ret = refresh_total_sectors(bs, bs->total_sectors);
3572 if (ret < 0) {
3573 return ret;
3576 return bs->total_sectors * BDRV_SECTOR_SIZE;
3579 /* return 0 as number of sectors if no device present or error */
3580 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
3582 int64_t length;
3583 length = bdrv_getlength(bs);
3584 if (length < 0)
3585 length = 0;
3586 else
3587 length = length >> BDRV_SECTOR_BITS;
3588 *nb_sectors_ptr = length;
3591 void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error,
3592 BlockdevOnError on_write_error)
3594 bs->on_read_error = on_read_error;
3595 bs->on_write_error = on_write_error;
3598 BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read)
3600 return is_read ? bs->on_read_error : bs->on_write_error;
3603 BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error)
3605 BlockdevOnError on_err = is_read ? bs->on_read_error : bs->on_write_error;
3607 switch (on_err) {
3608 case BLOCKDEV_ON_ERROR_ENOSPC:
3609 return (error == ENOSPC) ? BDRV_ACTION_STOP : BDRV_ACTION_REPORT;
3610 case BLOCKDEV_ON_ERROR_STOP:
3611 return BDRV_ACTION_STOP;
3612 case BLOCKDEV_ON_ERROR_REPORT:
3613 return BDRV_ACTION_REPORT;
3614 case BLOCKDEV_ON_ERROR_IGNORE:
3615 return BDRV_ACTION_IGNORE;
3616 default:
3617 abort();
3621 /* This is done by device models because, while the block layer knows
3622 * about the error, it does not know whether an operation comes from
3623 * the device or the block layer (from a job, for example).
3625 void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action,
3626 bool is_read, int error)
3628 assert(error >= 0);
3629 bdrv_emit_qmp_error_event(bs, QEVENT_BLOCK_IO_ERROR, action, is_read);
3630 if (action == BDRV_ACTION_STOP) {
3631 vm_stop(RUN_STATE_IO_ERROR);
3632 bdrv_iostatus_set_err(bs, error);
3636 int bdrv_is_read_only(BlockDriverState *bs)
3638 return bs->read_only;
3641 int bdrv_is_sg(BlockDriverState *bs)
3643 return bs->sg;
3646 int bdrv_enable_write_cache(BlockDriverState *bs)
3648 return bs->enable_write_cache;
3651 void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce)
3653 bs->enable_write_cache = wce;
3655 /* so a reopen() will preserve wce */
3656 if (wce) {
3657 bs->open_flags |= BDRV_O_CACHE_WB;
3658 } else {
3659 bs->open_flags &= ~BDRV_O_CACHE_WB;
3663 int bdrv_is_encrypted(BlockDriverState *bs)
3665 if (bs->backing_hd && bs->backing_hd->encrypted)
3666 return 1;
3667 return bs->encrypted;
3670 int bdrv_key_required(BlockDriverState *bs)
3672 BlockDriverState *backing_hd = bs->backing_hd;
3674 if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key)
3675 return 1;
3676 return (bs->encrypted && !bs->valid_key);
3679 int bdrv_set_key(BlockDriverState *bs, const char *key)
3681 int ret;
3682 if (bs->backing_hd && bs->backing_hd->encrypted) {
3683 ret = bdrv_set_key(bs->backing_hd, key);
3684 if (ret < 0)
3685 return ret;
3686 if (!bs->encrypted)
3687 return 0;
3689 if (!bs->encrypted) {
3690 return -EINVAL;
3691 } else if (!bs->drv || !bs->drv->bdrv_set_key) {
3692 return -ENOMEDIUM;
3694 ret = bs->drv->bdrv_set_key(bs, key);
3695 if (ret < 0) {
3696 bs->valid_key = 0;
3697 } else if (!bs->valid_key) {
3698 bs->valid_key = 1;
3699 /* call the change callback now, we skipped it on open */
3700 bdrv_dev_change_media_cb(bs, true);
3702 return ret;
3705 const char *bdrv_get_format_name(BlockDriverState *bs)
3707 return bs->drv ? bs->drv->format_name : NULL;
3710 void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
3711 void *opaque)
3713 BlockDriver *drv;
3714 int count = 0;
3715 const char **formats = NULL;
3717 QLIST_FOREACH(drv, &bdrv_drivers, list) {
3718 if (drv->format_name) {
3719 bool found = false;
3720 int i = count;
3721 while (formats && i && !found) {
3722 found = !strcmp(formats[--i], drv->format_name);
3725 if (!found) {
3726 formats = g_realloc(formats, (count + 1) * sizeof(char *));
3727 formats[count++] = drv->format_name;
3728 it(opaque, drv->format_name);
3732 g_free(formats);
3735 /* This function is to find block backend bs */
3736 BlockDriverState *bdrv_find(const char *name)
3738 BlockDriverState *bs;
3740 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
3741 if (!strcmp(name, bs->device_name)) {
3742 return bs;
3745 return NULL;
3748 /* This function is to find a node in the bs graph */
3749 BlockDriverState *bdrv_find_node(const char *node_name)
3751 BlockDriverState *bs;
3753 assert(node_name);
3755 QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
3756 if (!strcmp(node_name, bs->node_name)) {
3757 return bs;
3760 return NULL;
3763 /* Put this QMP function here so it can access the static graph_bdrv_states. */
3764 BlockDeviceInfoList *bdrv_named_nodes_list(void)
3766 BlockDeviceInfoList *list, *entry;
3767 BlockDriverState *bs;
3769 list = NULL;
3770 QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
3771 entry = g_malloc0(sizeof(*entry));
3772 entry->value = bdrv_block_device_info(bs);
3773 entry->next = list;
3774 list = entry;
3777 return list;
3780 BlockDriverState *bdrv_lookup_bs(const char *device,
3781 const char *node_name,
3782 Error **errp)
3784 BlockDriverState *bs = NULL;
3786 if (device) {
3787 bs = bdrv_find(device);
3789 if (bs) {
3790 return bs;
3794 if (node_name) {
3795 bs = bdrv_find_node(node_name);
3797 if (bs) {
3798 return bs;
3802 error_setg(errp, "Cannot find device=%s nor node_name=%s",
3803 device ? device : "",
3804 node_name ? node_name : "");
3805 return NULL;
3808 BlockDriverState *bdrv_next(BlockDriverState *bs)
3810 if (!bs) {
3811 return QTAILQ_FIRST(&bdrv_states);
3813 return QTAILQ_NEXT(bs, device_list);
3816 void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque)
3818 BlockDriverState *bs;
3820 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
3821 it(opaque, bs);
3825 const char *bdrv_get_device_name(BlockDriverState *bs)
3827 return bs->device_name;
3830 int bdrv_get_flags(BlockDriverState *bs)
3832 return bs->open_flags;
3835 int bdrv_flush_all(void)
3837 BlockDriverState *bs;
3838 int result = 0;
3840 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
3841 AioContext *aio_context = bdrv_get_aio_context(bs);
3842 int ret;
3844 aio_context_acquire(aio_context);
3845 ret = bdrv_flush(bs);
3846 if (ret < 0 && !result) {
3847 result = ret;
3849 aio_context_release(aio_context);
3852 return result;
3855 int bdrv_has_zero_init_1(BlockDriverState *bs)
3857 return 1;
3860 int bdrv_has_zero_init(BlockDriverState *bs)
3862 assert(bs->drv);
3864 /* If BS is a copy on write image, it is initialized to
3865 the contents of the base image, which may not be zeroes. */
3866 if (bs->backing_hd) {
3867 return 0;
3869 if (bs->drv->bdrv_has_zero_init) {
3870 return bs->drv->bdrv_has_zero_init(bs);
3873 /* safe default */
3874 return 0;
3877 bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs)
3879 BlockDriverInfo bdi;
3881 if (bs->backing_hd) {
3882 return false;
3885 if (bdrv_get_info(bs, &bdi) == 0) {
3886 return bdi.unallocated_blocks_are_zero;
3889 return false;
3892 bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs)
3894 BlockDriverInfo bdi;
3896 if (bs->backing_hd || !(bs->open_flags & BDRV_O_UNMAP)) {
3897 return false;
3900 if (bdrv_get_info(bs, &bdi) == 0) {
3901 return bdi.can_write_zeroes_with_unmap;
3904 return false;
3907 typedef struct BdrvCoGetBlockStatusData {
3908 BlockDriverState *bs;
3909 BlockDriverState *base;
3910 int64_t sector_num;
3911 int nb_sectors;
3912 int *pnum;
3913 int64_t ret;
3914 bool done;
3915 } BdrvCoGetBlockStatusData;
3918 * Returns true iff the specified sector is present in the disk image. Drivers
3919 * not implementing the functionality are assumed to not support backing files,
3920 * hence all their sectors are reported as allocated.
3922 * If 'sector_num' is beyond the end of the disk image the return value is 0
3923 * and 'pnum' is set to 0.
3925 * 'pnum' is set to the number of sectors (including and immediately following
3926 * the specified sector) that are known to be in the same
3927 * allocated/unallocated state.
3929 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
3930 * beyond the end of the disk image it will be clamped.
3932 static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
3933 int64_t sector_num,
3934 int nb_sectors, int *pnum)
3936 int64_t length;
3937 int64_t n;
3938 int64_t ret, ret2;
3940 length = bdrv_getlength(bs);
3941 if (length < 0) {
3942 return length;
3945 if (sector_num >= (length >> BDRV_SECTOR_BITS)) {
3946 *pnum = 0;
3947 return 0;
3950 n = bs->total_sectors - sector_num;
3951 if (n < nb_sectors) {
3952 nb_sectors = n;
3955 if (!bs->drv->bdrv_co_get_block_status) {
3956 *pnum = nb_sectors;
3957 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
3958 if (bs->drv->protocol_name) {
3959 ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
3961 return ret;
3964 ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum);
3965 if (ret < 0) {
3966 *pnum = 0;
3967 return ret;
3970 if (ret & BDRV_BLOCK_RAW) {
3971 assert(ret & BDRV_BLOCK_OFFSET_VALID);
3972 return bdrv_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
3973 *pnum, pnum);
3976 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
3977 ret |= BDRV_BLOCK_ALLOCATED;
3980 if (!(ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO)) {
3981 if (bdrv_unallocated_blocks_are_zero(bs)) {
3982 ret |= BDRV_BLOCK_ZERO;
3983 } else if (bs->backing_hd) {
3984 BlockDriverState *bs2 = bs->backing_hd;
3985 int64_t length2 = bdrv_getlength(bs2);
3986 if (length2 >= 0 && sector_num >= (length2 >> BDRV_SECTOR_BITS)) {
3987 ret |= BDRV_BLOCK_ZERO;
3992 if (bs->file &&
3993 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
3994 (ret & BDRV_BLOCK_OFFSET_VALID)) {
3995 ret2 = bdrv_co_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
3996 *pnum, pnum);
3997 if (ret2 >= 0) {
3998 /* Ignore errors. This is just providing extra information, it
3999 * is useful but not necessary.
4001 ret |= (ret2 & BDRV_BLOCK_ZERO);
4005 return ret;
4008 /* Coroutine wrapper for bdrv_get_block_status() */
4009 static void coroutine_fn bdrv_get_block_status_co_entry(void *opaque)
4011 BdrvCoGetBlockStatusData *data = opaque;
4012 BlockDriverState *bs = data->bs;
4014 data->ret = bdrv_co_get_block_status(bs, data->sector_num, data->nb_sectors,
4015 data->pnum);
4016 data->done = true;
4020 * Synchronous wrapper around bdrv_co_get_block_status().
4022 * See bdrv_co_get_block_status() for details.
4024 int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num,
4025 int nb_sectors, int *pnum)
4027 Coroutine *co;
4028 BdrvCoGetBlockStatusData data = {
4029 .bs = bs,
4030 .sector_num = sector_num,
4031 .nb_sectors = nb_sectors,
4032 .pnum = pnum,
4033 .done = false,
4036 if (qemu_in_coroutine()) {
4037 /* Fast-path if already in coroutine context */
4038 bdrv_get_block_status_co_entry(&data);
4039 } else {
4040 AioContext *aio_context = bdrv_get_aio_context(bs);
4042 co = qemu_coroutine_create(bdrv_get_block_status_co_entry);
4043 qemu_coroutine_enter(co, &data);
4044 while (!data.done) {
4045 aio_poll(aio_context, true);
4048 return data.ret;
4051 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
4052 int nb_sectors, int *pnum)
4054 int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum);
4055 if (ret < 0) {
4056 return ret;
4058 return (ret & BDRV_BLOCK_ALLOCATED);
4062 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
4064 * Return true if the given sector is allocated in any image between
4065 * BASE and TOP (inclusive). BASE can be NULL to check if the given
4066 * sector is allocated in any image of the chain. Return false otherwise.
4068 * 'pnum' is set to the number of sectors (including and immediately following
4069 * the specified sector) that are known to be in the same
4070 * allocated/unallocated state.
4073 int bdrv_is_allocated_above(BlockDriverState *top,
4074 BlockDriverState *base,
4075 int64_t sector_num,
4076 int nb_sectors, int *pnum)
4078 BlockDriverState *intermediate;
4079 int ret, n = nb_sectors;
4081 intermediate = top;
4082 while (intermediate && intermediate != base) {
4083 int pnum_inter;
4084 ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
4085 &pnum_inter);
4086 if (ret < 0) {
4087 return ret;
4088 } else if (ret) {
4089 *pnum = pnum_inter;
4090 return 1;
4094 * [sector_num, nb_sectors] is unallocated on top but intermediate
4095 * might have
4097 * [sector_num+x, nr_sectors] allocated.
4099 if (n > pnum_inter &&
4100 (intermediate == top ||
4101 sector_num + pnum_inter < intermediate->total_sectors)) {
4102 n = pnum_inter;
4105 intermediate = intermediate->backing_hd;
4108 *pnum = n;
4109 return 0;
4112 const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
4114 if (bs->backing_hd && bs->backing_hd->encrypted)
4115 return bs->backing_file;
4116 else if (bs->encrypted)
4117 return bs->filename;
4118 else
4119 return NULL;
4122 void bdrv_get_backing_filename(BlockDriverState *bs,
4123 char *filename, int filename_size)
4125 pstrcpy(filename, filename_size, bs->backing_file);
4128 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
4129 const uint8_t *buf, int nb_sectors)
4131 BlockDriver *drv = bs->drv;
4132 if (!drv)
4133 return -ENOMEDIUM;
4134 if (!drv->bdrv_write_compressed)
4135 return -ENOTSUP;
4136 if (bdrv_check_request(bs, sector_num, nb_sectors))
4137 return -EIO;
4139 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
4141 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
4144 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
4146 BlockDriver *drv = bs->drv;
4147 if (!drv)
4148 return -ENOMEDIUM;
4149 if (!drv->bdrv_get_info)
4150 return -ENOTSUP;
4151 memset(bdi, 0, sizeof(*bdi));
4152 return drv->bdrv_get_info(bs, bdi);
4155 ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs)
4157 BlockDriver *drv = bs->drv;
4158 if (drv && drv->bdrv_get_specific_info) {
4159 return drv->bdrv_get_specific_info(bs);
4161 return NULL;
4164 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
4165 int64_t pos, int size)
4167 QEMUIOVector qiov;
4168 struct iovec iov = {
4169 .iov_base = (void *) buf,
4170 .iov_len = size,
4173 qemu_iovec_init_external(&qiov, &iov, 1);
4174 return bdrv_writev_vmstate(bs, &qiov, pos);
4177 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
4179 BlockDriver *drv = bs->drv;
4181 if (!drv) {
4182 return -ENOMEDIUM;
4183 } else if (drv->bdrv_save_vmstate) {
4184 return drv->bdrv_save_vmstate(bs, qiov, pos);
4185 } else if (bs->file) {
4186 return bdrv_writev_vmstate(bs->file, qiov, pos);
4189 return -ENOTSUP;
4192 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
4193 int64_t pos, int size)
4195 BlockDriver *drv = bs->drv;
4196 if (!drv)
4197 return -ENOMEDIUM;
4198 if (drv->bdrv_load_vmstate)
4199 return drv->bdrv_load_vmstate(bs, buf, pos, size);
4200 if (bs->file)
4201 return bdrv_load_vmstate(bs->file, buf, pos, size);
4202 return -ENOTSUP;
4205 void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
4207 if (!bs || !bs->drv || !bs->drv->bdrv_debug_event) {
4208 return;
4211 bs->drv->bdrv_debug_event(bs, event);
4214 int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
4215 const char *tag)
4217 while (bs && bs->drv && !bs->drv->bdrv_debug_breakpoint) {
4218 bs = bs->file;
4221 if (bs && bs->drv && bs->drv->bdrv_debug_breakpoint) {
4222 return bs->drv->bdrv_debug_breakpoint(bs, event, tag);
4225 return -ENOTSUP;
4228 int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag)
4230 while (bs && bs->drv && !bs->drv->bdrv_debug_remove_breakpoint) {
4231 bs = bs->file;
4234 if (bs && bs->drv && bs->drv->bdrv_debug_remove_breakpoint) {
4235 return bs->drv->bdrv_debug_remove_breakpoint(bs, tag);
4238 return -ENOTSUP;
4241 int bdrv_debug_resume(BlockDriverState *bs, const char *tag)
4243 while (bs && (!bs->drv || !bs->drv->bdrv_debug_resume)) {
4244 bs = bs->file;
4247 if (bs && bs->drv && bs->drv->bdrv_debug_resume) {
4248 return bs->drv->bdrv_debug_resume(bs, tag);
4251 return -ENOTSUP;
4254 bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag)
4256 while (bs && bs->drv && !bs->drv->bdrv_debug_is_suspended) {
4257 bs = bs->file;
4260 if (bs && bs->drv && bs->drv->bdrv_debug_is_suspended) {
4261 return bs->drv->bdrv_debug_is_suspended(bs, tag);
4264 return false;
4267 int bdrv_is_snapshot(BlockDriverState *bs)
4269 return !!(bs->open_flags & BDRV_O_SNAPSHOT);
4272 /* backing_file can either be relative, or absolute, or a protocol. If it is
4273 * relative, it must be relative to the chain. So, passing in bs->filename
4274 * from a BDS as backing_file should not be done, as that may be relative to
4275 * the CWD rather than the chain. */
4276 BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
4277 const char *backing_file)
4279 char *filename_full = NULL;
4280 char *backing_file_full = NULL;
4281 char *filename_tmp = NULL;
4282 int is_protocol = 0;
4283 BlockDriverState *curr_bs = NULL;
4284 BlockDriverState *retval = NULL;
4286 if (!bs || !bs->drv || !backing_file) {
4287 return NULL;
4290 filename_full = g_malloc(PATH_MAX);
4291 backing_file_full = g_malloc(PATH_MAX);
4292 filename_tmp = g_malloc(PATH_MAX);
4294 is_protocol = path_has_protocol(backing_file);
4296 for (curr_bs = bs; curr_bs->backing_hd; curr_bs = curr_bs->backing_hd) {
4298 /* If either of the filename paths is actually a protocol, then
4299 * compare unmodified paths; otherwise make paths relative */
4300 if (is_protocol || path_has_protocol(curr_bs->backing_file)) {
4301 if (strcmp(backing_file, curr_bs->backing_file) == 0) {
4302 retval = curr_bs->backing_hd;
4303 break;
4305 } else {
4306 /* If not an absolute filename path, make it relative to the current
4307 * image's filename path */
4308 path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
4309 backing_file);
4311 /* We are going to compare absolute pathnames */
4312 if (!realpath(filename_tmp, filename_full)) {
4313 continue;
4316 /* We need to make sure the backing filename we are comparing against
4317 * is relative to the current image filename (or absolute) */
4318 path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
4319 curr_bs->backing_file);
4321 if (!realpath(filename_tmp, backing_file_full)) {
4322 continue;
4325 if (strcmp(backing_file_full, filename_full) == 0) {
4326 retval = curr_bs->backing_hd;
4327 break;
4332 g_free(filename_full);
4333 g_free(backing_file_full);
4334 g_free(filename_tmp);
4335 return retval;
4338 int bdrv_get_backing_file_depth(BlockDriverState *bs)
4340 if (!bs->drv) {
4341 return 0;
4344 if (!bs->backing_hd) {
4345 return 0;
4348 return 1 + bdrv_get_backing_file_depth(bs->backing_hd);
4351 BlockDriverState *bdrv_find_base(BlockDriverState *bs)
4353 BlockDriverState *curr_bs = NULL;
4355 if (!bs) {
4356 return NULL;
4359 curr_bs = bs;
4361 while (curr_bs->backing_hd) {
4362 curr_bs = curr_bs->backing_hd;
4364 return curr_bs;
4367 /**************************************************************/
4368 /* async I/Os */
4370 BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
4371 QEMUIOVector *qiov, int nb_sectors,
4372 BlockDriverCompletionFunc *cb, void *opaque)
4374 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
4376 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
4377 cb, opaque, false);
4380 BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
4381 QEMUIOVector *qiov, int nb_sectors,
4382 BlockDriverCompletionFunc *cb, void *opaque)
4384 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
4386 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
4387 cb, opaque, true);
4390 BlockDriverAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs,
4391 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags,
4392 BlockDriverCompletionFunc *cb, void *opaque)
4394 trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque);
4396 return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors,
4397 BDRV_REQ_ZERO_WRITE | flags,
4398 cb, opaque, true);
4402 typedef struct MultiwriteCB {
4403 int error;
4404 int num_requests;
4405 int num_callbacks;
4406 struct {
4407 BlockDriverCompletionFunc *cb;
4408 void *opaque;
4409 QEMUIOVector *free_qiov;
4410 } callbacks[];
4411 } MultiwriteCB;
4413 static void multiwrite_user_cb(MultiwriteCB *mcb)
4415 int i;
4417 for (i = 0; i < mcb->num_callbacks; i++) {
4418 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
4419 if (mcb->callbacks[i].free_qiov) {
4420 qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
4422 g_free(mcb->callbacks[i].free_qiov);
4426 static void multiwrite_cb(void *opaque, int ret)
4428 MultiwriteCB *mcb = opaque;
4430 trace_multiwrite_cb(mcb, ret);
4432 if (ret < 0 && !mcb->error) {
4433 mcb->error = ret;
4436 mcb->num_requests--;
4437 if (mcb->num_requests == 0) {
4438 multiwrite_user_cb(mcb);
4439 g_free(mcb);
4443 static int multiwrite_req_compare(const void *a, const void *b)
4445 const BlockRequest *req1 = a, *req2 = b;
4448 * Note that we can't simply subtract req2->sector from req1->sector
4449 * here as that could overflow the return value.
4451 if (req1->sector > req2->sector) {
4452 return 1;
4453 } else if (req1->sector < req2->sector) {
4454 return -1;
4455 } else {
4456 return 0;
4461 * Takes a bunch of requests and tries to merge them. Returns the number of
4462 * requests that remain after merging.
4464 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
4465 int num_reqs, MultiwriteCB *mcb)
4467 int i, outidx;
4469 // Sort requests by start sector
4470 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
4472 // Check if adjacent requests touch the same clusters. If so, combine them,
4473 // filling up gaps with zero sectors.
4474 outidx = 0;
4475 for (i = 1; i < num_reqs; i++) {
4476 int merge = 0;
4477 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
4479 // Handle exactly sequential writes and overlapping writes.
4480 if (reqs[i].sector <= oldreq_last) {
4481 merge = 1;
4484 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
4485 merge = 0;
4488 if (merge) {
4489 size_t size;
4490 QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
4491 qemu_iovec_init(qiov,
4492 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
4494 // Add the first request to the merged one. If the requests are
4495 // overlapping, drop the last sectors of the first request.
4496 size = (reqs[i].sector - reqs[outidx].sector) << 9;
4497 qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size);
4499 // We should need to add any zeros between the two requests
4500 assert (reqs[i].sector <= oldreq_last);
4502 // Add the second request
4503 qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size);
4505 reqs[outidx].nb_sectors = qiov->size >> 9;
4506 reqs[outidx].qiov = qiov;
4508 mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
4509 } else {
4510 outidx++;
4511 reqs[outidx].sector = reqs[i].sector;
4512 reqs[outidx].nb_sectors = reqs[i].nb_sectors;
4513 reqs[outidx].qiov = reqs[i].qiov;
4517 return outidx + 1;
4521 * Submit multiple AIO write requests at once.
4523 * On success, the function returns 0 and all requests in the reqs array have
4524 * been submitted. In error case this function returns -1, and any of the
4525 * requests may or may not be submitted yet. In particular, this means that the
4526 * callback will be called for some of the requests, for others it won't. The
4527 * caller must check the error field of the BlockRequest to wait for the right
4528 * callbacks (if error != 0, no callback will be called).
4530 * The implementation may modify the contents of the reqs array, e.g. to merge
4531 * requests. However, the fields opaque and error are left unmodified as they
4532 * are used to signal failure for a single request to the caller.
4534 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
4536 MultiwriteCB *mcb;
4537 int i;
4539 /* don't submit writes if we don't have a medium */
4540 if (bs->drv == NULL) {
4541 for (i = 0; i < num_reqs; i++) {
4542 reqs[i].error = -ENOMEDIUM;
4544 return -1;
4547 if (num_reqs == 0) {
4548 return 0;
4551 // Create MultiwriteCB structure
4552 mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
4553 mcb->num_requests = 0;
4554 mcb->num_callbacks = num_reqs;
4556 for (i = 0; i < num_reqs; i++) {
4557 mcb->callbacks[i].cb = reqs[i].cb;
4558 mcb->callbacks[i].opaque = reqs[i].opaque;
4561 // Check for mergable requests
4562 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
4564 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
4566 /* Run the aio requests. */
4567 mcb->num_requests = num_reqs;
4568 for (i = 0; i < num_reqs; i++) {
4569 bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov,
4570 reqs[i].nb_sectors, reqs[i].flags,
4571 multiwrite_cb, mcb,
4572 true);
4575 return 0;
4578 void bdrv_aio_cancel(BlockDriverAIOCB *acb)
4580 acb->aiocb_info->cancel(acb);
4583 /**************************************************************/
4584 /* async block device emulation */
4586 typedef struct BlockDriverAIOCBSync {
4587 BlockDriverAIOCB common;
4588 QEMUBH *bh;
4589 int ret;
4590 /* vector translation state */
4591 QEMUIOVector *qiov;
4592 uint8_t *bounce;
4593 int is_write;
4594 } BlockDriverAIOCBSync;
4596 static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
4598 BlockDriverAIOCBSync *acb =
4599 container_of(blockacb, BlockDriverAIOCBSync, common);
4600 qemu_bh_delete(acb->bh);
4601 acb->bh = NULL;
4602 qemu_aio_release(acb);
4605 static const AIOCBInfo bdrv_em_aiocb_info = {
4606 .aiocb_size = sizeof(BlockDriverAIOCBSync),
4607 .cancel = bdrv_aio_cancel_em,
4610 static void bdrv_aio_bh_cb(void *opaque)
4612 BlockDriverAIOCBSync *acb = opaque;
4614 if (!acb->is_write)
4615 qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
4616 qemu_vfree(acb->bounce);
4617 acb->common.cb(acb->common.opaque, acb->ret);
4618 qemu_bh_delete(acb->bh);
4619 acb->bh = NULL;
4620 qemu_aio_release(acb);
4623 static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
4624 int64_t sector_num,
4625 QEMUIOVector *qiov,
4626 int nb_sectors,
4627 BlockDriverCompletionFunc *cb,
4628 void *opaque,
4629 int is_write)
4632 BlockDriverAIOCBSync *acb;
4634 acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque);
4635 acb->is_write = is_write;
4636 acb->qiov = qiov;
4637 acb->bounce = qemu_blockalign(bs, qiov->size);
4638 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb);
4640 if (is_write) {
4641 qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
4642 acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
4643 } else {
4644 acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
4647 qemu_bh_schedule(acb->bh);
4649 return &acb->common;
4652 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
4653 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
4654 BlockDriverCompletionFunc *cb, void *opaque)
4656 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
4659 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
4660 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
4661 BlockDriverCompletionFunc *cb, void *opaque)
4663 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
4667 typedef struct BlockDriverAIOCBCoroutine {
4668 BlockDriverAIOCB common;
4669 BlockRequest req;
4670 bool is_write;
4671 bool *done;
4672 QEMUBH* bh;
4673 } BlockDriverAIOCBCoroutine;
4675 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb)
4677 AioContext *aio_context = bdrv_get_aio_context(blockacb->bs);
4678 BlockDriverAIOCBCoroutine *acb =
4679 container_of(blockacb, BlockDriverAIOCBCoroutine, common);
4680 bool done = false;
4682 acb->done = &done;
4683 while (!done) {
4684 aio_poll(aio_context, true);
4688 static const AIOCBInfo bdrv_em_co_aiocb_info = {
4689 .aiocb_size = sizeof(BlockDriverAIOCBCoroutine),
4690 .cancel = bdrv_aio_co_cancel_em,
4693 static void bdrv_co_em_bh(void *opaque)
4695 BlockDriverAIOCBCoroutine *acb = opaque;
4697 acb->common.cb(acb->common.opaque, acb->req.error);
4699 if (acb->done) {
4700 *acb->done = true;
4703 qemu_bh_delete(acb->bh);
4704 qemu_aio_release(acb);
4707 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
4708 static void coroutine_fn bdrv_co_do_rw(void *opaque)
4710 BlockDriverAIOCBCoroutine *acb = opaque;
4711 BlockDriverState *bs = acb->common.bs;
4713 if (!acb->is_write) {
4714 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
4715 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
4716 } else {
4717 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
4718 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
4721 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
4722 qemu_bh_schedule(acb->bh);
4725 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
4726 int64_t sector_num,
4727 QEMUIOVector *qiov,
4728 int nb_sectors,
4729 BdrvRequestFlags flags,
4730 BlockDriverCompletionFunc *cb,
4731 void *opaque,
4732 bool is_write)
4734 Coroutine *co;
4735 BlockDriverAIOCBCoroutine *acb;
4737 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4738 acb->req.sector = sector_num;
4739 acb->req.nb_sectors = nb_sectors;
4740 acb->req.qiov = qiov;
4741 acb->req.flags = flags;
4742 acb->is_write = is_write;
4743 acb->done = NULL;
4745 co = qemu_coroutine_create(bdrv_co_do_rw);
4746 qemu_coroutine_enter(co, acb);
4748 return &acb->common;
4751 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
4753 BlockDriverAIOCBCoroutine *acb = opaque;
4754 BlockDriverState *bs = acb->common.bs;
4756 acb->req.error = bdrv_co_flush(bs);
4757 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
4758 qemu_bh_schedule(acb->bh);
4761 BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
4762 BlockDriverCompletionFunc *cb, void *opaque)
4764 trace_bdrv_aio_flush(bs, opaque);
4766 Coroutine *co;
4767 BlockDriverAIOCBCoroutine *acb;
4769 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4770 acb->done = NULL;
4772 co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
4773 qemu_coroutine_enter(co, acb);
4775 return &acb->common;
4778 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
4780 BlockDriverAIOCBCoroutine *acb = opaque;
4781 BlockDriverState *bs = acb->common.bs;
4783 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
4784 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
4785 qemu_bh_schedule(acb->bh);
4788 BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
4789 int64_t sector_num, int nb_sectors,
4790 BlockDriverCompletionFunc *cb, void *opaque)
4792 Coroutine *co;
4793 BlockDriverAIOCBCoroutine *acb;
4795 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
4797 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4798 acb->req.sector = sector_num;
4799 acb->req.nb_sectors = nb_sectors;
4800 acb->done = NULL;
4801 co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
4802 qemu_coroutine_enter(co, acb);
4804 return &acb->common;
4807 void bdrv_init(void)
4809 module_call_init(MODULE_INIT_BLOCK);
4812 void bdrv_init_with_whitelist(void)
4814 use_bdrv_whitelist = 1;
4815 bdrv_init();
4818 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
4819 BlockDriverCompletionFunc *cb, void *opaque)
4821 BlockDriverAIOCB *acb;
4823 acb = g_slice_alloc(aiocb_info->aiocb_size);
4824 acb->aiocb_info = aiocb_info;
4825 acb->bs = bs;
4826 acb->cb = cb;
4827 acb->opaque = opaque;
4828 return acb;
4831 void qemu_aio_release(void *p)
4833 BlockDriverAIOCB *acb = p;
4834 g_slice_free1(acb->aiocb_info->aiocb_size, acb);
4837 /**************************************************************/
4838 /* Coroutine block device emulation */
4840 typedef struct CoroutineIOCompletion {
4841 Coroutine *coroutine;
4842 int ret;
4843 } CoroutineIOCompletion;
4845 static void bdrv_co_io_em_complete(void *opaque, int ret)
4847 CoroutineIOCompletion *co = opaque;
4849 co->ret = ret;
4850 qemu_coroutine_enter(co->coroutine, NULL);
4853 static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
4854 int nb_sectors, QEMUIOVector *iov,
4855 bool is_write)
4857 CoroutineIOCompletion co = {
4858 .coroutine = qemu_coroutine_self(),
4860 BlockDriverAIOCB *acb;
4862 if (is_write) {
4863 acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
4864 bdrv_co_io_em_complete, &co);
4865 } else {
4866 acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
4867 bdrv_co_io_em_complete, &co);
4870 trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
4871 if (!acb) {
4872 return -EIO;
4874 qemu_coroutine_yield();
4876 return co.ret;
4879 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
4880 int64_t sector_num, int nb_sectors,
4881 QEMUIOVector *iov)
4883 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
4886 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
4887 int64_t sector_num, int nb_sectors,
4888 QEMUIOVector *iov)
4890 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
4893 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
4895 RwCo *rwco = opaque;
4897 rwco->ret = bdrv_co_flush(rwco->bs);
4900 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
4902 int ret;
4904 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
4905 return 0;
4908 /* Write back cached data to the OS even with cache=unsafe */
4909 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
4910 if (bs->drv->bdrv_co_flush_to_os) {
4911 ret = bs->drv->bdrv_co_flush_to_os(bs);
4912 if (ret < 0) {
4913 return ret;
4917 /* But don't actually force it to the disk with cache=unsafe */
4918 if (bs->open_flags & BDRV_O_NO_FLUSH) {
4919 goto flush_parent;
4922 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
4923 if (bs->drv->bdrv_co_flush_to_disk) {
4924 ret = bs->drv->bdrv_co_flush_to_disk(bs);
4925 } else if (bs->drv->bdrv_aio_flush) {
4926 BlockDriverAIOCB *acb;
4927 CoroutineIOCompletion co = {
4928 .coroutine = qemu_coroutine_self(),
4931 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
4932 if (acb == NULL) {
4933 ret = -EIO;
4934 } else {
4935 qemu_coroutine_yield();
4936 ret = co.ret;
4938 } else {
4940 * Some block drivers always operate in either writethrough or unsafe
4941 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
4942 * know how the server works (because the behaviour is hardcoded or
4943 * depends on server-side configuration), so we can't ensure that
4944 * everything is safe on disk. Returning an error doesn't work because
4945 * that would break guests even if the server operates in writethrough
4946 * mode.
4948 * Let's hope the user knows what he's doing.
4950 ret = 0;
4952 if (ret < 0) {
4953 return ret;
4956 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
4957 * in the case of cache=unsafe, so there are no useless flushes.
4959 flush_parent:
4960 return bdrv_co_flush(bs->file);
4963 void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
4965 Error *local_err = NULL;
4966 int ret;
4968 if (!bs->drv) {
4969 return;
4972 if (bs->drv->bdrv_invalidate_cache) {
4973 bs->drv->bdrv_invalidate_cache(bs, &local_err);
4974 } else if (bs->file) {
4975 bdrv_invalidate_cache(bs->file, &local_err);
4977 if (local_err) {
4978 error_propagate(errp, local_err);
4979 return;
4982 ret = refresh_total_sectors(bs, bs->total_sectors);
4983 if (ret < 0) {
4984 error_setg_errno(errp, -ret, "Could not refresh total sector count");
4985 return;
4989 void bdrv_invalidate_cache_all(Error **errp)
4991 BlockDriverState *bs;
4992 Error *local_err = NULL;
4994 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
4995 AioContext *aio_context = bdrv_get_aio_context(bs);
4997 aio_context_acquire(aio_context);
4998 bdrv_invalidate_cache(bs, &local_err);
4999 aio_context_release(aio_context);
5000 if (local_err) {
5001 error_propagate(errp, local_err);
5002 return;
5007 void bdrv_clear_incoming_migration_all(void)
5009 BlockDriverState *bs;
5011 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
5012 AioContext *aio_context = bdrv_get_aio_context(bs);
5014 aio_context_acquire(aio_context);
5015 bs->open_flags = bs->open_flags & ~(BDRV_O_INCOMING);
5016 aio_context_release(aio_context);
5020 int bdrv_flush(BlockDriverState *bs)
5022 Coroutine *co;
5023 RwCo rwco = {
5024 .bs = bs,
5025 .ret = NOT_DONE,
5028 if (qemu_in_coroutine()) {
5029 /* Fast-path if already in coroutine context */
5030 bdrv_flush_co_entry(&rwco);
5031 } else {
5032 AioContext *aio_context = bdrv_get_aio_context(bs);
5034 co = qemu_coroutine_create(bdrv_flush_co_entry);
5035 qemu_coroutine_enter(co, &rwco);
5036 while (rwco.ret == NOT_DONE) {
5037 aio_poll(aio_context, true);
5041 return rwco.ret;
5044 typedef struct DiscardCo {
5045 BlockDriverState *bs;
5046 int64_t sector_num;
5047 int nb_sectors;
5048 int ret;
5049 } DiscardCo;
5050 static void coroutine_fn bdrv_discard_co_entry(void *opaque)
5052 DiscardCo *rwco = opaque;
5054 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
5057 /* if no limit is specified in the BlockLimits use a default
5058 * of 32768 512-byte sectors (16 MiB) per request.
5060 #define MAX_DISCARD_DEFAULT 32768
5062 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
5063 int nb_sectors)
5065 int max_discard;
5067 if (!bs->drv) {
5068 return -ENOMEDIUM;
5069 } else if (bdrv_check_request(bs, sector_num, nb_sectors)) {
5070 return -EIO;
5071 } else if (bs->read_only) {
5072 return -EROFS;
5075 bdrv_reset_dirty(bs, sector_num, nb_sectors);
5077 /* Do nothing if disabled. */
5078 if (!(bs->open_flags & BDRV_O_UNMAP)) {
5079 return 0;
5082 if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) {
5083 return 0;
5086 max_discard = bs->bl.max_discard ? bs->bl.max_discard : MAX_DISCARD_DEFAULT;
5087 while (nb_sectors > 0) {
5088 int ret;
5089 int num = nb_sectors;
5091 /* align request */
5092 if (bs->bl.discard_alignment &&
5093 num >= bs->bl.discard_alignment &&
5094 sector_num % bs->bl.discard_alignment) {
5095 if (num > bs->bl.discard_alignment) {
5096 num = bs->bl.discard_alignment;
5098 num -= sector_num % bs->bl.discard_alignment;
5101 /* limit request size */
5102 if (num > max_discard) {
5103 num = max_discard;
5106 if (bs->drv->bdrv_co_discard) {
5107 ret = bs->drv->bdrv_co_discard(bs, sector_num, num);
5108 } else {
5109 BlockDriverAIOCB *acb;
5110 CoroutineIOCompletion co = {
5111 .coroutine = qemu_coroutine_self(),
5114 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
5115 bdrv_co_io_em_complete, &co);
5116 if (acb == NULL) {
5117 return -EIO;
5118 } else {
5119 qemu_coroutine_yield();
5120 ret = co.ret;
5123 if (ret && ret != -ENOTSUP) {
5124 return ret;
5127 sector_num += num;
5128 nb_sectors -= num;
5130 return 0;
5133 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
5135 Coroutine *co;
5136 DiscardCo rwco = {
5137 .bs = bs,
5138 .sector_num = sector_num,
5139 .nb_sectors = nb_sectors,
5140 .ret = NOT_DONE,
5143 if (qemu_in_coroutine()) {
5144 /* Fast-path if already in coroutine context */
5145 bdrv_discard_co_entry(&rwco);
5146 } else {
5147 AioContext *aio_context = bdrv_get_aio_context(bs);
5149 co = qemu_coroutine_create(bdrv_discard_co_entry);
5150 qemu_coroutine_enter(co, &rwco);
5151 while (rwco.ret == NOT_DONE) {
5152 aio_poll(aio_context, true);
5156 return rwco.ret;
5159 /**************************************************************/
5160 /* removable device support */
5163 * Return TRUE if the media is present
5165 int bdrv_is_inserted(BlockDriverState *bs)
5167 BlockDriver *drv = bs->drv;
5169 if (!drv)
5170 return 0;
5171 if (!drv->bdrv_is_inserted)
5172 return 1;
5173 return drv->bdrv_is_inserted(bs);
5177 * Return whether the media changed since the last call to this
5178 * function, or -ENOTSUP if we don't know. Most drivers don't know.
5180 int bdrv_media_changed(BlockDriverState *bs)
5182 BlockDriver *drv = bs->drv;
5184 if (drv && drv->bdrv_media_changed) {
5185 return drv->bdrv_media_changed(bs);
5187 return -ENOTSUP;
5191 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
5193 void bdrv_eject(BlockDriverState *bs, bool eject_flag)
5195 BlockDriver *drv = bs->drv;
5197 if (drv && drv->bdrv_eject) {
5198 drv->bdrv_eject(bs, eject_flag);
5201 if (bs->device_name[0] != '\0') {
5202 bdrv_emit_qmp_eject_event(bs, eject_flag);
5207 * Lock or unlock the media (if it is locked, the user won't be able
5208 * to eject it manually).
5210 void bdrv_lock_medium(BlockDriverState *bs, bool locked)
5212 BlockDriver *drv = bs->drv;
5214 trace_bdrv_lock_medium(bs, locked);
5216 if (drv && drv->bdrv_lock_medium) {
5217 drv->bdrv_lock_medium(bs, locked);
5221 /* needed for generic scsi interface */
5223 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
5225 BlockDriver *drv = bs->drv;
5227 if (drv && drv->bdrv_ioctl)
5228 return drv->bdrv_ioctl(bs, req, buf);
5229 return -ENOTSUP;
5232 BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
5233 unsigned long int req, void *buf,
5234 BlockDriverCompletionFunc *cb, void *opaque)
5236 BlockDriver *drv = bs->drv;
5238 if (drv && drv->bdrv_aio_ioctl)
5239 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque);
5240 return NULL;
5243 void bdrv_set_guest_block_size(BlockDriverState *bs, int align)
5245 bs->guest_block_size = align;
5248 void *qemu_blockalign(BlockDriverState *bs, size_t size)
5250 return qemu_memalign(bdrv_opt_mem_align(bs), size);
5254 * Check if all memory in this vector is sector aligned.
5256 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
5258 int i;
5259 size_t alignment = bdrv_opt_mem_align(bs);
5261 for (i = 0; i < qiov->niov; i++) {
5262 if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
5263 return false;
5265 if (qiov->iov[i].iov_len % alignment) {
5266 return false;
5270 return true;
5273 BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, int granularity,
5274 Error **errp)
5276 int64_t bitmap_size;
5277 BdrvDirtyBitmap *bitmap;
5279 assert((granularity & (granularity - 1)) == 0);
5281 granularity >>= BDRV_SECTOR_BITS;
5282 assert(granularity);
5283 bitmap_size = bdrv_getlength(bs);
5284 if (bitmap_size < 0) {
5285 error_setg_errno(errp, -bitmap_size, "could not get length of device");
5286 errno = -bitmap_size;
5287 return NULL;
5289 bitmap_size >>= BDRV_SECTOR_BITS;
5290 bitmap = g_malloc0(sizeof(BdrvDirtyBitmap));
5291 bitmap->bitmap = hbitmap_alloc(bitmap_size, ffs(granularity) - 1);
5292 QLIST_INSERT_HEAD(&bs->dirty_bitmaps, bitmap, list);
5293 return bitmap;
5296 void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
5298 BdrvDirtyBitmap *bm, *next;
5299 QLIST_FOREACH_SAFE(bm, &bs->dirty_bitmaps, list, next) {
5300 if (bm == bitmap) {
5301 QLIST_REMOVE(bitmap, list);
5302 hbitmap_free(bitmap->bitmap);
5303 g_free(bitmap);
5304 return;
5309 BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs)
5311 BdrvDirtyBitmap *bm;
5312 BlockDirtyInfoList *list = NULL;
5313 BlockDirtyInfoList **plist = &list;
5315 QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) {
5316 BlockDirtyInfo *info = g_malloc0(sizeof(BlockDirtyInfo));
5317 BlockDirtyInfoList *entry = g_malloc0(sizeof(BlockDirtyInfoList));
5318 info->count = bdrv_get_dirty_count(bs, bm);
5319 info->granularity =
5320 ((int64_t) BDRV_SECTOR_SIZE << hbitmap_granularity(bm->bitmap));
5321 entry->value = info;
5322 *plist = entry;
5323 plist = &entry->next;
5326 return list;
5329 int bdrv_get_dirty(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, int64_t sector)
5331 if (bitmap) {
5332 return hbitmap_get(bitmap->bitmap, sector);
5333 } else {
5334 return 0;
5338 void bdrv_dirty_iter_init(BlockDriverState *bs,
5339 BdrvDirtyBitmap *bitmap, HBitmapIter *hbi)
5341 hbitmap_iter_init(hbi, bitmap->bitmap, 0);
5344 void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
5345 int nr_sectors)
5347 BdrvDirtyBitmap *bitmap;
5348 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
5349 hbitmap_set(bitmap->bitmap, cur_sector, nr_sectors);
5353 void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors)
5355 BdrvDirtyBitmap *bitmap;
5356 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
5357 hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors);
5361 int64_t bdrv_get_dirty_count(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
5363 return hbitmap_count(bitmap->bitmap);
5366 /* Get a reference to bs */
5367 void bdrv_ref(BlockDriverState *bs)
5369 bs->refcnt++;
5372 /* Release a previously grabbed reference to bs.
5373 * If after releasing, reference count is zero, the BlockDriverState is
5374 * deleted. */
5375 void bdrv_unref(BlockDriverState *bs)
5377 assert(bs->refcnt > 0);
5378 if (--bs->refcnt == 0) {
5379 bdrv_delete(bs);
5383 struct BdrvOpBlocker {
5384 Error *reason;
5385 QLIST_ENTRY(BdrvOpBlocker) list;
5388 bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp)
5390 BdrvOpBlocker *blocker;
5391 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5392 if (!QLIST_EMPTY(&bs->op_blockers[op])) {
5393 blocker = QLIST_FIRST(&bs->op_blockers[op]);
5394 if (errp) {
5395 error_setg(errp, "Device '%s' is busy: %s",
5396 bs->device_name, error_get_pretty(blocker->reason));
5398 return true;
5400 return false;
5403 void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason)
5405 BdrvOpBlocker *blocker;
5406 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5408 blocker = g_malloc0(sizeof(BdrvOpBlocker));
5409 blocker->reason = reason;
5410 QLIST_INSERT_HEAD(&bs->op_blockers[op], blocker, list);
5413 void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason)
5415 BdrvOpBlocker *blocker, *next;
5416 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5417 QLIST_FOREACH_SAFE(blocker, &bs->op_blockers[op], list, next) {
5418 if (blocker->reason == reason) {
5419 QLIST_REMOVE(blocker, list);
5420 g_free(blocker);
5425 void bdrv_op_block_all(BlockDriverState *bs, Error *reason)
5427 int i;
5428 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5429 bdrv_op_block(bs, i, reason);
5433 void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason)
5435 int i;
5436 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5437 bdrv_op_unblock(bs, i, reason);
5441 bool bdrv_op_blocker_is_empty(BlockDriverState *bs)
5443 int i;
5445 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5446 if (!QLIST_EMPTY(&bs->op_blockers[i])) {
5447 return false;
5450 return true;
5453 void bdrv_iostatus_enable(BlockDriverState *bs)
5455 bs->iostatus_enabled = true;
5456 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
5459 /* The I/O status is only enabled if the drive explicitly
5460 * enables it _and_ the VM is configured to stop on errors */
5461 bool bdrv_iostatus_is_enabled(const BlockDriverState *bs)
5463 return (bs->iostatus_enabled &&
5464 (bs->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
5465 bs->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
5466 bs->on_read_error == BLOCKDEV_ON_ERROR_STOP));
5469 void bdrv_iostatus_disable(BlockDriverState *bs)
5471 bs->iostatus_enabled = false;
5474 void bdrv_iostatus_reset(BlockDriverState *bs)
5476 if (bdrv_iostatus_is_enabled(bs)) {
5477 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
5478 if (bs->job) {
5479 block_job_iostatus_reset(bs->job);
5484 void bdrv_iostatus_set_err(BlockDriverState *bs, int error)
5486 assert(bdrv_iostatus_is_enabled(bs));
5487 if (bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
5488 bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
5489 BLOCK_DEVICE_IO_STATUS_FAILED;
5493 void
5494 bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes,
5495 enum BlockAcctType type)
5497 assert(type < BDRV_MAX_IOTYPE);
5499 cookie->bytes = bytes;
5500 cookie->start_time_ns = get_clock();
5501 cookie->type = type;
5504 void
5505 bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie)
5507 assert(cookie->type < BDRV_MAX_IOTYPE);
5509 bs->nr_bytes[cookie->type] += cookie->bytes;
5510 bs->nr_ops[cookie->type]++;
5511 bs->total_time_ns[cookie->type] += get_clock() - cookie->start_time_ns;
5514 void bdrv_img_create(const char *filename, const char *fmt,
5515 const char *base_filename, const char *base_fmt,
5516 char *options, uint64_t img_size, int flags,
5517 Error **errp, bool quiet)
5519 QemuOptsList *create_opts = NULL;
5520 QemuOpts *opts = NULL;
5521 const char *backing_fmt, *backing_file;
5522 int64_t size;
5523 BlockDriver *drv, *proto_drv;
5524 BlockDriver *backing_drv = NULL;
5525 Error *local_err = NULL;
5526 int ret = 0;
5528 /* Find driver and parse its options */
5529 drv = bdrv_find_format(fmt);
5530 if (!drv) {
5531 error_setg(errp, "Unknown file format '%s'", fmt);
5532 return;
5535 proto_drv = bdrv_find_protocol(filename, true);
5536 if (!proto_drv) {
5537 error_setg(errp, "Unknown protocol '%s'", filename);
5538 return;
5541 create_opts = qemu_opts_append(create_opts, drv->create_opts);
5542 create_opts = qemu_opts_append(create_opts, proto_drv->create_opts);
5544 /* Create parameter list with default values */
5545 opts = qemu_opts_create(create_opts, NULL, 0, &error_abort);
5546 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, img_size);
5548 /* Parse -o options */
5549 if (options) {
5550 if (qemu_opts_do_parse(opts, options, NULL) != 0) {
5551 error_setg(errp, "Invalid options for file format '%s'", fmt);
5552 goto out;
5556 if (base_filename) {
5557 if (qemu_opt_set(opts, BLOCK_OPT_BACKING_FILE, base_filename)) {
5558 error_setg(errp, "Backing file not supported for file format '%s'",
5559 fmt);
5560 goto out;
5564 if (base_fmt) {
5565 if (qemu_opt_set(opts, BLOCK_OPT_BACKING_FMT, base_fmt)) {
5566 error_setg(errp, "Backing file format not supported for file "
5567 "format '%s'", fmt);
5568 goto out;
5572 backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE);
5573 if (backing_file) {
5574 if (!strcmp(filename, backing_file)) {
5575 error_setg(errp, "Error: Trying to create an image with the "
5576 "same filename as the backing file");
5577 goto out;
5581 backing_fmt = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT);
5582 if (backing_fmt) {
5583 backing_drv = bdrv_find_format(backing_fmt);
5584 if (!backing_drv) {
5585 error_setg(errp, "Unknown backing file format '%s'",
5586 backing_fmt);
5587 goto out;
5591 // The size for the image must always be specified, with one exception:
5592 // If we are using a backing file, we can obtain the size from there
5593 size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0);
5594 if (size == -1) {
5595 if (backing_file) {
5596 BlockDriverState *bs;
5597 uint64_t size;
5598 char buf[32];
5599 int back_flags;
5601 /* backing files always opened read-only */
5602 back_flags =
5603 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
5605 bs = NULL;
5606 ret = bdrv_open(&bs, backing_file, NULL, NULL, back_flags,
5607 backing_drv, &local_err);
5608 if (ret < 0) {
5609 error_setg_errno(errp, -ret, "Could not open '%s': %s",
5610 backing_file,
5611 error_get_pretty(local_err));
5612 error_free(local_err);
5613 local_err = NULL;
5614 goto out;
5616 bdrv_get_geometry(bs, &size);
5617 size *= 512;
5619 snprintf(buf, sizeof(buf), "%" PRId64, size);
5620 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, size);
5622 bdrv_unref(bs);
5623 } else {
5624 error_setg(errp, "Image creation needs a size parameter");
5625 goto out;
5629 if (!quiet) {
5630 printf("Formatting '%s', fmt=%s ", filename, fmt);
5631 qemu_opts_print(opts);
5632 puts("");
5635 ret = bdrv_create(drv, filename, opts, &local_err);
5637 if (ret == -EFBIG) {
5638 /* This is generally a better message than whatever the driver would
5639 * deliver (especially because of the cluster_size_hint), since that
5640 * is most probably not much different from "image too large". */
5641 const char *cluster_size_hint = "";
5642 if (qemu_opt_get_size(opts, BLOCK_OPT_CLUSTER_SIZE, 0)) {
5643 cluster_size_hint = " (try using a larger cluster size)";
5645 error_setg(errp, "The image size is too large for file format '%s'"
5646 "%s", fmt, cluster_size_hint);
5647 error_free(local_err);
5648 local_err = NULL;
5651 out:
5652 qemu_opts_del(opts);
5653 qemu_opts_free(create_opts);
5654 if (local_err) {
5655 error_propagate(errp, local_err);
5659 AioContext *bdrv_get_aio_context(BlockDriverState *bs)
5661 return bs->aio_context;
5664 void bdrv_detach_aio_context(BlockDriverState *bs)
5666 if (!bs->drv) {
5667 return;
5670 if (bs->io_limits_enabled) {
5671 throttle_detach_aio_context(&bs->throttle_state);
5673 if (bs->drv->bdrv_detach_aio_context) {
5674 bs->drv->bdrv_detach_aio_context(bs);
5676 if (bs->file) {
5677 bdrv_detach_aio_context(bs->file);
5679 if (bs->backing_hd) {
5680 bdrv_detach_aio_context(bs->backing_hd);
5683 bs->aio_context = NULL;
5686 void bdrv_attach_aio_context(BlockDriverState *bs,
5687 AioContext *new_context)
5689 if (!bs->drv) {
5690 return;
5693 bs->aio_context = new_context;
5695 if (bs->backing_hd) {
5696 bdrv_attach_aio_context(bs->backing_hd, new_context);
5698 if (bs->file) {
5699 bdrv_attach_aio_context(bs->file, new_context);
5701 if (bs->drv->bdrv_attach_aio_context) {
5702 bs->drv->bdrv_attach_aio_context(bs, new_context);
5704 if (bs->io_limits_enabled) {
5705 throttle_attach_aio_context(&bs->throttle_state, new_context);
5709 void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context)
5711 bdrv_drain_all(); /* ensure there are no in-flight requests */
5713 bdrv_detach_aio_context(bs);
5715 /* This function executes in the old AioContext so acquire the new one in
5716 * case it runs in a different thread.
5718 aio_context_acquire(new_context);
5719 bdrv_attach_aio_context(bs, new_context);
5720 aio_context_release(new_context);
5723 void bdrv_add_before_write_notifier(BlockDriverState *bs,
5724 NotifierWithReturn *notifier)
5726 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
5729 int bdrv_amend_options(BlockDriverState *bs, QemuOpts *opts)
5731 if (!bs->drv->bdrv_amend_options) {
5732 return -ENOTSUP;
5734 return bs->drv->bdrv_amend_options(bs, opts);
5737 /* This function will be called by the bdrv_recurse_is_first_non_filter method
5738 * of block filter and by bdrv_is_first_non_filter.
5739 * It is used to test if the given bs is the candidate or recurse more in the
5740 * node graph.
5742 bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs,
5743 BlockDriverState *candidate)
5745 /* return false if basic checks fails */
5746 if (!bs || !bs->drv) {
5747 return false;
5750 /* the code reached a non block filter driver -> check if the bs is
5751 * the same as the candidate. It's the recursion termination condition.
5753 if (!bs->drv->is_filter) {
5754 return bs == candidate;
5756 /* Down this path the driver is a block filter driver */
5758 /* If the block filter recursion method is defined use it to recurse down
5759 * the node graph.
5761 if (bs->drv->bdrv_recurse_is_first_non_filter) {
5762 return bs->drv->bdrv_recurse_is_first_non_filter(bs, candidate);
5765 /* the driver is a block filter but don't allow to recurse -> return false
5767 return false;
5770 /* This function checks if the candidate is the first non filter bs down it's
5771 * bs chain. Since we don't have pointers to parents it explore all bs chains
5772 * from the top. Some filters can choose not to pass down the recursion.
5774 bool bdrv_is_first_non_filter(BlockDriverState *candidate)
5776 BlockDriverState *bs;
5778 /* walk down the bs forest recursively */
5779 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
5780 bool perm;
5782 /* try to recurse in this top level bs */
5783 perm = bdrv_recurse_is_first_non_filter(bs, candidate);
5785 /* candidate is the first non filter */
5786 if (perm) {
5787 return true;
5791 return false;