coroutine: make pool size dynamic
[qemu/qmp-unstable.git] / block.c
blob7cafc49612ec7ce063e3ab2139106c37a2cfe24d
1 /*
2 * QEMU System Emulator block driver
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
24 #include "config-host.h"
25 #include "qemu-common.h"
26 #include "trace.h"
27 #include "block/block_int.h"
28 #include "block/blockjob.h"
29 #include "qemu/module.h"
30 #include "qapi/qmp/qjson.h"
31 #include "sysemu/sysemu.h"
32 #include "qemu/notify.h"
33 #include "block/coroutine.h"
34 #include "block/qapi.h"
35 #include "qmp-commands.h"
36 #include "qemu/timer.h"
37 #include "qapi-event.h"
39 #ifdef CONFIG_BSD
40 #include <sys/types.h>
41 #include <sys/stat.h>
42 #include <sys/ioctl.h>
43 #include <sys/queue.h>
44 #ifndef __DragonFly__
45 #include <sys/disk.h>
46 #endif
47 #endif
49 #ifdef _WIN32
50 #include <windows.h>
51 #endif
53 struct BdrvDirtyBitmap {
54 HBitmap *bitmap;
55 QLIST_ENTRY(BdrvDirtyBitmap) list;
58 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
60 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
61 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
62 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
63 BlockDriverCompletionFunc *cb, void *opaque);
64 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
65 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
66 BlockDriverCompletionFunc *cb, void *opaque);
67 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
68 int64_t sector_num, int nb_sectors,
69 QEMUIOVector *iov);
70 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
71 int64_t sector_num, int nb_sectors,
72 QEMUIOVector *iov);
73 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
74 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
75 BdrvRequestFlags flags);
76 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
77 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
78 BdrvRequestFlags flags);
79 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
80 int64_t sector_num,
81 QEMUIOVector *qiov,
82 int nb_sectors,
83 BdrvRequestFlags flags,
84 BlockDriverCompletionFunc *cb,
85 void *opaque,
86 bool is_write);
87 static void coroutine_fn bdrv_co_do_rw(void *opaque);
88 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
89 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
91 static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
92 QTAILQ_HEAD_INITIALIZER(bdrv_states);
94 static QTAILQ_HEAD(, BlockDriverState) graph_bdrv_states =
95 QTAILQ_HEAD_INITIALIZER(graph_bdrv_states);
97 static QLIST_HEAD(, BlockDriver) bdrv_drivers =
98 QLIST_HEAD_INITIALIZER(bdrv_drivers);
100 /* If non-zero, use only whitelisted block drivers */
101 static int use_bdrv_whitelist;
103 #ifdef _WIN32
104 static int is_windows_drive_prefix(const char *filename)
106 return (((filename[0] >= 'a' && filename[0] <= 'z') ||
107 (filename[0] >= 'A' && filename[0] <= 'Z')) &&
108 filename[1] == ':');
111 int is_windows_drive(const char *filename)
113 if (is_windows_drive_prefix(filename) &&
114 filename[2] == '\0')
115 return 1;
116 if (strstart(filename, "\\\\.\\", NULL) ||
117 strstart(filename, "//./", NULL))
118 return 1;
119 return 0;
121 #endif
123 /* throttling disk I/O limits */
124 void bdrv_set_io_limits(BlockDriverState *bs,
125 ThrottleConfig *cfg)
127 int i;
129 throttle_config(&bs->throttle_state, cfg);
131 for (i = 0; i < 2; i++) {
132 qemu_co_enter_next(&bs->throttled_reqs[i]);
136 /* this function drain all the throttled IOs */
137 static bool bdrv_start_throttled_reqs(BlockDriverState *bs)
139 bool drained = false;
140 bool enabled = bs->io_limits_enabled;
141 int i;
143 bs->io_limits_enabled = false;
145 for (i = 0; i < 2; i++) {
146 while (qemu_co_enter_next(&bs->throttled_reqs[i])) {
147 drained = true;
151 bs->io_limits_enabled = enabled;
153 return drained;
156 void bdrv_io_limits_disable(BlockDriverState *bs)
158 bs->io_limits_enabled = false;
160 bdrv_start_throttled_reqs(bs);
162 throttle_destroy(&bs->throttle_state);
165 static void bdrv_throttle_read_timer_cb(void *opaque)
167 BlockDriverState *bs = opaque;
168 qemu_co_enter_next(&bs->throttled_reqs[0]);
171 static void bdrv_throttle_write_timer_cb(void *opaque)
173 BlockDriverState *bs = opaque;
174 qemu_co_enter_next(&bs->throttled_reqs[1]);
177 /* should be called before bdrv_set_io_limits if a limit is set */
178 void bdrv_io_limits_enable(BlockDriverState *bs)
180 assert(!bs->io_limits_enabled);
181 throttle_init(&bs->throttle_state,
182 bdrv_get_aio_context(bs),
183 QEMU_CLOCK_VIRTUAL,
184 bdrv_throttle_read_timer_cb,
185 bdrv_throttle_write_timer_cb,
186 bs);
187 bs->io_limits_enabled = true;
190 /* This function makes an IO wait if needed
192 * @nb_sectors: the number of sectors of the IO
193 * @is_write: is the IO a write
195 static void bdrv_io_limits_intercept(BlockDriverState *bs,
196 unsigned int bytes,
197 bool is_write)
199 /* does this io must wait */
200 bool must_wait = throttle_schedule_timer(&bs->throttle_state, is_write);
202 /* if must wait or any request of this type throttled queue the IO */
203 if (must_wait ||
204 !qemu_co_queue_empty(&bs->throttled_reqs[is_write])) {
205 qemu_co_queue_wait(&bs->throttled_reqs[is_write]);
208 /* the IO will be executed, do the accounting */
209 throttle_account(&bs->throttle_state, is_write, bytes);
212 /* if the next request must wait -> do nothing */
213 if (throttle_schedule_timer(&bs->throttle_state, is_write)) {
214 return;
217 /* else queue next request for execution */
218 qemu_co_queue_next(&bs->throttled_reqs[is_write]);
221 size_t bdrv_opt_mem_align(BlockDriverState *bs)
223 if (!bs || !bs->drv) {
224 /* 4k should be on the safe side */
225 return 4096;
228 return bs->bl.opt_mem_alignment;
231 /* check if the path starts with "<protocol>:" */
232 static int path_has_protocol(const char *path)
234 const char *p;
236 #ifdef _WIN32
237 if (is_windows_drive(path) ||
238 is_windows_drive_prefix(path)) {
239 return 0;
241 p = path + strcspn(path, ":/\\");
242 #else
243 p = path + strcspn(path, ":/");
244 #endif
246 return *p == ':';
249 int path_is_absolute(const char *path)
251 #ifdef _WIN32
252 /* specific case for names like: "\\.\d:" */
253 if (is_windows_drive(path) || is_windows_drive_prefix(path)) {
254 return 1;
256 return (*path == '/' || *path == '\\');
257 #else
258 return (*path == '/');
259 #endif
262 /* if filename is absolute, just copy it to dest. Otherwise, build a
263 path to it by considering it is relative to base_path. URL are
264 supported. */
265 void path_combine(char *dest, int dest_size,
266 const char *base_path,
267 const char *filename)
269 const char *p, *p1;
270 int len;
272 if (dest_size <= 0)
273 return;
274 if (path_is_absolute(filename)) {
275 pstrcpy(dest, dest_size, filename);
276 } else {
277 p = strchr(base_path, ':');
278 if (p)
279 p++;
280 else
281 p = base_path;
282 p1 = strrchr(base_path, '/');
283 #ifdef _WIN32
285 const char *p2;
286 p2 = strrchr(base_path, '\\');
287 if (!p1 || p2 > p1)
288 p1 = p2;
290 #endif
291 if (p1)
292 p1++;
293 else
294 p1 = base_path;
295 if (p1 > p)
296 p = p1;
297 len = p - base_path;
298 if (len > dest_size - 1)
299 len = dest_size - 1;
300 memcpy(dest, base_path, len);
301 dest[len] = '\0';
302 pstrcat(dest, dest_size, filename);
306 void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz)
308 if (bs->backing_file[0] == '\0' || path_has_protocol(bs->backing_file)) {
309 pstrcpy(dest, sz, bs->backing_file);
310 } else {
311 path_combine(dest, sz, bs->filename, bs->backing_file);
315 void bdrv_register(BlockDriver *bdrv)
317 /* Block drivers without coroutine functions need emulation */
318 if (!bdrv->bdrv_co_readv) {
319 bdrv->bdrv_co_readv = bdrv_co_readv_em;
320 bdrv->bdrv_co_writev = bdrv_co_writev_em;
322 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
323 * the block driver lacks aio we need to emulate that too.
325 if (!bdrv->bdrv_aio_readv) {
326 /* add AIO emulation layer */
327 bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
328 bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
332 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
335 /* create a new block device (by default it is empty) */
336 BlockDriverState *bdrv_new(const char *device_name, Error **errp)
338 BlockDriverState *bs;
339 int i;
341 if (bdrv_find(device_name)) {
342 error_setg(errp, "Device with id '%s' already exists",
343 device_name);
344 return NULL;
346 if (bdrv_find_node(device_name)) {
347 error_setg(errp, "Device with node-name '%s' already exists",
348 device_name);
349 return NULL;
352 bs = g_malloc0(sizeof(BlockDriverState));
353 QLIST_INIT(&bs->dirty_bitmaps);
354 pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);
355 if (device_name[0] != '\0') {
356 QTAILQ_INSERT_TAIL(&bdrv_states, bs, device_list);
358 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
359 QLIST_INIT(&bs->op_blockers[i]);
361 bdrv_iostatus_disable(bs);
362 notifier_list_init(&bs->close_notifiers);
363 notifier_with_return_list_init(&bs->before_write_notifiers);
364 qemu_co_queue_init(&bs->throttled_reqs[0]);
365 qemu_co_queue_init(&bs->throttled_reqs[1]);
366 bs->refcnt = 1;
367 bs->aio_context = qemu_get_aio_context();
369 return bs;
372 void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify)
374 notifier_list_add(&bs->close_notifiers, notify);
377 BlockDriver *bdrv_find_format(const char *format_name)
379 BlockDriver *drv1;
380 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
381 if (!strcmp(drv1->format_name, format_name)) {
382 return drv1;
385 return NULL;
388 static int bdrv_is_whitelisted(BlockDriver *drv, bool read_only)
390 static const char *whitelist_rw[] = {
391 CONFIG_BDRV_RW_WHITELIST
393 static const char *whitelist_ro[] = {
394 CONFIG_BDRV_RO_WHITELIST
396 const char **p;
398 if (!whitelist_rw[0] && !whitelist_ro[0]) {
399 return 1; /* no whitelist, anything goes */
402 for (p = whitelist_rw; *p; p++) {
403 if (!strcmp(drv->format_name, *p)) {
404 return 1;
407 if (read_only) {
408 for (p = whitelist_ro; *p; p++) {
409 if (!strcmp(drv->format_name, *p)) {
410 return 1;
414 return 0;
417 BlockDriver *bdrv_find_whitelisted_format(const char *format_name,
418 bool read_only)
420 BlockDriver *drv = bdrv_find_format(format_name);
421 return drv && bdrv_is_whitelisted(drv, read_only) ? drv : NULL;
424 typedef struct CreateCo {
425 BlockDriver *drv;
426 char *filename;
427 QemuOpts *opts;
428 int ret;
429 Error *err;
430 } CreateCo;
432 static void coroutine_fn bdrv_create_co_entry(void *opaque)
434 Error *local_err = NULL;
435 int ret;
437 CreateCo *cco = opaque;
438 assert(cco->drv);
440 ret = cco->drv->bdrv_create(cco->filename, cco->opts, &local_err);
441 if (local_err) {
442 error_propagate(&cco->err, local_err);
444 cco->ret = ret;
447 int bdrv_create(BlockDriver *drv, const char* filename,
448 QemuOpts *opts, Error **errp)
450 int ret;
452 Coroutine *co;
453 CreateCo cco = {
454 .drv = drv,
455 .filename = g_strdup(filename),
456 .opts = opts,
457 .ret = NOT_DONE,
458 .err = NULL,
461 if (!drv->bdrv_create) {
462 error_setg(errp, "Driver '%s' does not support image creation", drv->format_name);
463 ret = -ENOTSUP;
464 goto out;
467 if (qemu_in_coroutine()) {
468 /* Fast-path if already in coroutine context */
469 bdrv_create_co_entry(&cco);
470 } else {
471 co = qemu_coroutine_create(bdrv_create_co_entry);
472 qemu_coroutine_enter(co, &cco);
473 while (cco.ret == NOT_DONE) {
474 aio_poll(qemu_get_aio_context(), true);
478 ret = cco.ret;
479 if (ret < 0) {
480 if (cco.err) {
481 error_propagate(errp, cco.err);
482 } else {
483 error_setg_errno(errp, -ret, "Could not create image");
487 out:
488 g_free(cco.filename);
489 return ret;
492 int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp)
494 BlockDriver *drv;
495 Error *local_err = NULL;
496 int ret;
498 drv = bdrv_find_protocol(filename, true);
499 if (drv == NULL) {
500 error_setg(errp, "Could not find protocol for file '%s'", filename);
501 return -ENOENT;
504 ret = bdrv_create(drv, filename, opts, &local_err);
505 if (local_err) {
506 error_propagate(errp, local_err);
508 return ret;
511 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
513 BlockDriver *drv = bs->drv;
514 Error *local_err = NULL;
516 memset(&bs->bl, 0, sizeof(bs->bl));
518 if (!drv) {
519 return;
522 /* Take some limits from the children as a default */
523 if (bs->file) {
524 bdrv_refresh_limits(bs->file, &local_err);
525 if (local_err) {
526 error_propagate(errp, local_err);
527 return;
529 bs->bl.opt_transfer_length = bs->file->bl.opt_transfer_length;
530 bs->bl.opt_mem_alignment = bs->file->bl.opt_mem_alignment;
531 } else {
532 bs->bl.opt_mem_alignment = 512;
535 if (bs->backing_hd) {
536 bdrv_refresh_limits(bs->backing_hd, &local_err);
537 if (local_err) {
538 error_propagate(errp, local_err);
539 return;
541 bs->bl.opt_transfer_length =
542 MAX(bs->bl.opt_transfer_length,
543 bs->backing_hd->bl.opt_transfer_length);
544 bs->bl.opt_mem_alignment =
545 MAX(bs->bl.opt_mem_alignment,
546 bs->backing_hd->bl.opt_mem_alignment);
549 /* Then let the driver override it */
550 if (drv->bdrv_refresh_limits) {
551 drv->bdrv_refresh_limits(bs, errp);
556 * Create a uniquely-named empty temporary file.
557 * Return 0 upon success, otherwise a negative errno value.
559 int get_tmp_filename(char *filename, int size)
561 #ifdef _WIN32
562 char temp_dir[MAX_PATH];
563 /* GetTempFileName requires that its output buffer (4th param)
564 have length MAX_PATH or greater. */
565 assert(size >= MAX_PATH);
566 return (GetTempPath(MAX_PATH, temp_dir)
567 && GetTempFileName(temp_dir, "qem", 0, filename)
568 ? 0 : -GetLastError());
569 #else
570 int fd;
571 const char *tmpdir;
572 tmpdir = getenv("TMPDIR");
573 if (!tmpdir) {
574 tmpdir = "/var/tmp";
576 if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) {
577 return -EOVERFLOW;
579 fd = mkstemp(filename);
580 if (fd < 0) {
581 return -errno;
583 if (close(fd) != 0) {
584 unlink(filename);
585 return -errno;
587 return 0;
588 #endif
592 * Detect host devices. By convention, /dev/cdrom[N] is always
593 * recognized as a host CDROM.
595 static BlockDriver *find_hdev_driver(const char *filename)
597 int score_max = 0, score;
598 BlockDriver *drv = NULL, *d;
600 QLIST_FOREACH(d, &bdrv_drivers, list) {
601 if (d->bdrv_probe_device) {
602 score = d->bdrv_probe_device(filename);
603 if (score > score_max) {
604 score_max = score;
605 drv = d;
610 return drv;
613 BlockDriver *bdrv_find_protocol(const char *filename,
614 bool allow_protocol_prefix)
616 BlockDriver *drv1;
617 char protocol[128];
618 int len;
619 const char *p;
621 /* TODO Drivers without bdrv_file_open must be specified explicitly */
624 * XXX(hch): we really should not let host device detection
625 * override an explicit protocol specification, but moving this
626 * later breaks access to device names with colons in them.
627 * Thanks to the brain-dead persistent naming schemes on udev-
628 * based Linux systems those actually are quite common.
630 drv1 = find_hdev_driver(filename);
631 if (drv1) {
632 return drv1;
635 if (!path_has_protocol(filename) || !allow_protocol_prefix) {
636 return bdrv_find_format("file");
639 p = strchr(filename, ':');
640 assert(p != NULL);
641 len = p - filename;
642 if (len > sizeof(protocol) - 1)
643 len = sizeof(protocol) - 1;
644 memcpy(protocol, filename, len);
645 protocol[len] = '\0';
646 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
647 if (drv1->protocol_name &&
648 !strcmp(drv1->protocol_name, protocol)) {
649 return drv1;
652 return NULL;
655 static int find_image_format(BlockDriverState *bs, const char *filename,
656 BlockDriver **pdrv, Error **errp)
658 int score, score_max;
659 BlockDriver *drv1, *drv;
660 uint8_t buf[2048];
661 int ret = 0;
663 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
664 if (bs->sg || !bdrv_is_inserted(bs) || bdrv_getlength(bs) == 0) {
665 drv = bdrv_find_format("raw");
666 if (!drv) {
667 error_setg(errp, "Could not find raw image format");
668 ret = -ENOENT;
670 *pdrv = drv;
671 return ret;
674 ret = bdrv_pread(bs, 0, buf, sizeof(buf));
675 if (ret < 0) {
676 error_setg_errno(errp, -ret, "Could not read image for determining its "
677 "format");
678 *pdrv = NULL;
679 return ret;
682 score_max = 0;
683 drv = NULL;
684 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
685 if (drv1->bdrv_probe) {
686 score = drv1->bdrv_probe(buf, ret, filename);
687 if (score > score_max) {
688 score_max = score;
689 drv = drv1;
693 if (!drv) {
694 error_setg(errp, "Could not determine image format: No compatible "
695 "driver found");
696 ret = -ENOENT;
698 *pdrv = drv;
699 return ret;
703 * Set the current 'total_sectors' value
704 * Return 0 on success, -errno on error.
706 static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
708 BlockDriver *drv = bs->drv;
710 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
711 if (bs->sg)
712 return 0;
714 /* query actual device if possible, otherwise just trust the hint */
715 if (drv->bdrv_getlength) {
716 int64_t length = drv->bdrv_getlength(bs);
717 if (length < 0) {
718 return length;
720 hint = DIV_ROUND_UP(length, BDRV_SECTOR_SIZE);
723 bs->total_sectors = hint;
724 return 0;
728 * Set open flags for a given discard mode
730 * Return 0 on success, -1 if the discard mode was invalid.
732 int bdrv_parse_discard_flags(const char *mode, int *flags)
734 *flags &= ~BDRV_O_UNMAP;
736 if (!strcmp(mode, "off") || !strcmp(mode, "ignore")) {
737 /* do nothing */
738 } else if (!strcmp(mode, "on") || !strcmp(mode, "unmap")) {
739 *flags |= BDRV_O_UNMAP;
740 } else {
741 return -1;
744 return 0;
748 * Set open flags for a given cache mode
750 * Return 0 on success, -1 if the cache mode was invalid.
752 int bdrv_parse_cache_flags(const char *mode, int *flags)
754 *flags &= ~BDRV_O_CACHE_MASK;
756 if (!strcmp(mode, "off") || !strcmp(mode, "none")) {
757 *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB;
758 } else if (!strcmp(mode, "directsync")) {
759 *flags |= BDRV_O_NOCACHE;
760 } else if (!strcmp(mode, "writeback")) {
761 *flags |= BDRV_O_CACHE_WB;
762 } else if (!strcmp(mode, "unsafe")) {
763 *flags |= BDRV_O_CACHE_WB;
764 *flags |= BDRV_O_NO_FLUSH;
765 } else if (!strcmp(mode, "writethrough")) {
766 /* this is the default */
767 } else {
768 return -1;
771 return 0;
775 * The copy-on-read flag is actually a reference count so multiple users may
776 * use the feature without worrying about clobbering its previous state.
777 * Copy-on-read stays enabled until all users have called to disable it.
779 void bdrv_enable_copy_on_read(BlockDriverState *bs)
781 bs->copy_on_read++;
784 void bdrv_disable_copy_on_read(BlockDriverState *bs)
786 assert(bs->copy_on_read > 0);
787 bs->copy_on_read--;
791 * Returns the flags that a temporary snapshot should get, based on the
792 * originally requested flags (the originally requested image will have flags
793 * like a backing file)
795 static int bdrv_temp_snapshot_flags(int flags)
797 return (flags & ~BDRV_O_SNAPSHOT) | BDRV_O_TEMPORARY;
801 * Returns the flags that bs->file should get, based on the given flags for
802 * the parent BDS
804 static int bdrv_inherited_flags(int flags)
806 /* Enable protocol handling, disable format probing for bs->file */
807 flags |= BDRV_O_PROTOCOL;
809 /* Our block drivers take care to send flushes and respect unmap policy,
810 * so we can enable both unconditionally on lower layers. */
811 flags |= BDRV_O_CACHE_WB | BDRV_O_UNMAP;
813 /* Clear flags that only apply to the top layer */
814 flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_COPY_ON_READ);
816 return flags;
820 * Returns the flags that bs->backing_hd should get, based on the given flags
821 * for the parent BDS
823 static int bdrv_backing_flags(int flags)
825 /* backing files always opened read-only */
826 flags &= ~(BDRV_O_RDWR | BDRV_O_COPY_ON_READ);
828 /* snapshot=on is handled on the top layer */
829 flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_TEMPORARY);
831 return flags;
834 static int bdrv_open_flags(BlockDriverState *bs, int flags)
836 int open_flags = flags | BDRV_O_CACHE_WB;
839 * Clear flags that are internal to the block layer before opening the
840 * image.
842 open_flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_PROTOCOL);
845 * Snapshots should be writable.
847 if (flags & BDRV_O_TEMPORARY) {
848 open_flags |= BDRV_O_RDWR;
851 return open_flags;
854 static void bdrv_assign_node_name(BlockDriverState *bs,
855 const char *node_name,
856 Error **errp)
858 if (!node_name) {
859 return;
862 /* empty string node name is invalid */
863 if (node_name[0] == '\0') {
864 error_setg(errp, "Empty node name");
865 return;
868 /* takes care of avoiding namespaces collisions */
869 if (bdrv_find(node_name)) {
870 error_setg(errp, "node-name=%s is conflicting with a device id",
871 node_name);
872 return;
875 /* takes care of avoiding duplicates node names */
876 if (bdrv_find_node(node_name)) {
877 error_setg(errp, "Duplicate node name");
878 return;
881 /* copy node name into the bs and insert it into the graph list */
882 pstrcpy(bs->node_name, sizeof(bs->node_name), node_name);
883 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs, node_list);
887 * Common part for opening disk images and files
889 * Removes all processed options from *options.
891 static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file,
892 QDict *options, int flags, BlockDriver *drv, Error **errp)
894 int ret, open_flags;
895 const char *filename;
896 const char *node_name = NULL;
897 Error *local_err = NULL;
899 assert(drv != NULL);
900 assert(bs->file == NULL);
901 assert(options != NULL && bs->options != options);
903 if (file != NULL) {
904 filename = file->filename;
905 } else {
906 filename = qdict_get_try_str(options, "filename");
909 if (drv->bdrv_needs_filename && !filename) {
910 error_setg(errp, "The '%s' block driver requires a file name",
911 drv->format_name);
912 return -EINVAL;
915 trace_bdrv_open_common(bs, filename ?: "", flags, drv->format_name);
917 node_name = qdict_get_try_str(options, "node-name");
918 bdrv_assign_node_name(bs, node_name, &local_err);
919 if (local_err) {
920 error_propagate(errp, local_err);
921 return -EINVAL;
923 qdict_del(options, "node-name");
925 /* bdrv_open() with directly using a protocol as drv. This layer is already
926 * opened, so assign it to bs (while file becomes a closed BlockDriverState)
927 * and return immediately. */
928 if (file != NULL && drv->bdrv_file_open) {
929 bdrv_swap(file, bs);
930 return 0;
933 bs->open_flags = flags;
934 bs->guest_block_size = 512;
935 bs->request_alignment = 512;
936 bs->zero_beyond_eof = true;
937 open_flags = bdrv_open_flags(bs, flags);
938 bs->read_only = !(open_flags & BDRV_O_RDWR);
939 bs->growable = !!(flags & BDRV_O_PROTOCOL);
941 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, bs->read_only)) {
942 error_setg(errp,
943 !bs->read_only && bdrv_is_whitelisted(drv, true)
944 ? "Driver '%s' can only be used for read-only devices"
945 : "Driver '%s' is not whitelisted",
946 drv->format_name);
947 return -ENOTSUP;
950 assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */
951 if (flags & BDRV_O_COPY_ON_READ) {
952 if (!bs->read_only) {
953 bdrv_enable_copy_on_read(bs);
954 } else {
955 error_setg(errp, "Can't use copy-on-read on read-only device");
956 return -EINVAL;
960 if (filename != NULL) {
961 pstrcpy(bs->filename, sizeof(bs->filename), filename);
962 } else {
963 bs->filename[0] = '\0';
966 bs->drv = drv;
967 bs->opaque = g_malloc0(drv->instance_size);
969 bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB);
971 /* Open the image, either directly or using a protocol */
972 if (drv->bdrv_file_open) {
973 assert(file == NULL);
974 assert(!drv->bdrv_needs_filename || filename != NULL);
975 ret = drv->bdrv_file_open(bs, options, open_flags, &local_err);
976 } else {
977 if (file == NULL) {
978 error_setg(errp, "Can't use '%s' as a block driver for the "
979 "protocol level", drv->format_name);
980 ret = -EINVAL;
981 goto free_and_fail;
983 bs->file = file;
984 ret = drv->bdrv_open(bs, options, open_flags, &local_err);
987 if (ret < 0) {
988 if (local_err) {
989 error_propagate(errp, local_err);
990 } else if (bs->filename[0]) {
991 error_setg_errno(errp, -ret, "Could not open '%s'", bs->filename);
992 } else {
993 error_setg_errno(errp, -ret, "Could not open image");
995 goto free_and_fail;
998 ret = refresh_total_sectors(bs, bs->total_sectors);
999 if (ret < 0) {
1000 error_setg_errno(errp, -ret, "Could not refresh total sector count");
1001 goto free_and_fail;
1004 bdrv_refresh_limits(bs, &local_err);
1005 if (local_err) {
1006 error_propagate(errp, local_err);
1007 ret = -EINVAL;
1008 goto free_and_fail;
1011 assert(bdrv_opt_mem_align(bs) != 0);
1012 assert((bs->request_alignment != 0) || bs->sg);
1013 return 0;
1015 free_and_fail:
1016 bs->file = NULL;
1017 g_free(bs->opaque);
1018 bs->opaque = NULL;
1019 bs->drv = NULL;
1020 return ret;
1023 static QDict *parse_json_filename(const char *filename, Error **errp)
1025 QObject *options_obj;
1026 QDict *options;
1027 int ret;
1029 ret = strstart(filename, "json:", &filename);
1030 assert(ret);
1032 options_obj = qobject_from_json(filename);
1033 if (!options_obj) {
1034 error_setg(errp, "Could not parse the JSON options");
1035 return NULL;
1038 if (qobject_type(options_obj) != QTYPE_QDICT) {
1039 qobject_decref(options_obj);
1040 error_setg(errp, "Invalid JSON object given");
1041 return NULL;
1044 options = qobject_to_qdict(options_obj);
1045 qdict_flatten(options);
1047 return options;
1051 * Fills in default options for opening images and converts the legacy
1052 * filename/flags pair to option QDict entries.
1054 static int bdrv_fill_options(QDict **options, const char **pfilename, int flags,
1055 BlockDriver *drv, Error **errp)
1057 const char *filename = *pfilename;
1058 const char *drvname;
1059 bool protocol = flags & BDRV_O_PROTOCOL;
1060 bool parse_filename = false;
1061 Error *local_err = NULL;
1063 /* Parse json: pseudo-protocol */
1064 if (filename && g_str_has_prefix(filename, "json:")) {
1065 QDict *json_options = parse_json_filename(filename, &local_err);
1066 if (local_err) {
1067 error_propagate(errp, local_err);
1068 return -EINVAL;
1071 /* Options given in the filename have lower priority than options
1072 * specified directly */
1073 qdict_join(*options, json_options, false);
1074 QDECREF(json_options);
1075 *pfilename = filename = NULL;
1078 /* Fetch the file name from the options QDict if necessary */
1079 if (protocol && filename) {
1080 if (!qdict_haskey(*options, "filename")) {
1081 qdict_put(*options, "filename", qstring_from_str(filename));
1082 parse_filename = true;
1083 } else {
1084 error_setg(errp, "Can't specify 'file' and 'filename' options at "
1085 "the same time");
1086 return -EINVAL;
1090 /* Find the right block driver */
1091 filename = qdict_get_try_str(*options, "filename");
1092 drvname = qdict_get_try_str(*options, "driver");
1094 if (drv) {
1095 if (drvname) {
1096 error_setg(errp, "Driver specified twice");
1097 return -EINVAL;
1099 drvname = drv->format_name;
1100 qdict_put(*options, "driver", qstring_from_str(drvname));
1101 } else {
1102 if (!drvname && protocol) {
1103 if (filename) {
1104 drv = bdrv_find_protocol(filename, parse_filename);
1105 if (!drv) {
1106 error_setg(errp, "Unknown protocol");
1107 return -EINVAL;
1110 drvname = drv->format_name;
1111 qdict_put(*options, "driver", qstring_from_str(drvname));
1112 } else {
1113 error_setg(errp, "Must specify either driver or file");
1114 return -EINVAL;
1116 } else if (drvname) {
1117 drv = bdrv_find_format(drvname);
1118 if (!drv) {
1119 error_setg(errp, "Unknown driver '%s'", drvname);
1120 return -ENOENT;
1125 assert(drv || !protocol);
1127 /* Driver-specific filename parsing */
1128 if (drv && drv->bdrv_parse_filename && parse_filename) {
1129 drv->bdrv_parse_filename(filename, *options, &local_err);
1130 if (local_err) {
1131 error_propagate(errp, local_err);
1132 return -EINVAL;
1135 if (!drv->bdrv_needs_filename) {
1136 qdict_del(*options, "filename");
1140 return 0;
1143 void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd)
1146 if (bs->backing_hd) {
1147 assert(bs->backing_blocker);
1148 bdrv_op_unblock_all(bs->backing_hd, bs->backing_blocker);
1149 } else if (backing_hd) {
1150 error_setg(&bs->backing_blocker,
1151 "device is used as backing hd of '%s'",
1152 bs->device_name);
1155 bs->backing_hd = backing_hd;
1156 if (!backing_hd) {
1157 error_free(bs->backing_blocker);
1158 bs->backing_blocker = NULL;
1159 goto out;
1161 bs->open_flags &= ~BDRV_O_NO_BACKING;
1162 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_hd->filename);
1163 pstrcpy(bs->backing_format, sizeof(bs->backing_format),
1164 backing_hd->drv ? backing_hd->drv->format_name : "");
1166 bdrv_op_block_all(bs->backing_hd, bs->backing_blocker);
1167 /* Otherwise we won't be able to commit due to check in bdrv_commit */
1168 bdrv_op_unblock(bs->backing_hd, BLOCK_OP_TYPE_COMMIT,
1169 bs->backing_blocker);
1170 out:
1171 bdrv_refresh_limits(bs, NULL);
1175 * Opens the backing file for a BlockDriverState if not yet open
1177 * options is a QDict of options to pass to the block drivers, or NULL for an
1178 * empty set of options. The reference to the QDict is transferred to this
1179 * function (even on failure), so if the caller intends to reuse the dictionary,
1180 * it needs to use QINCREF() before calling bdrv_file_open.
1182 int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp)
1184 char *backing_filename = g_malloc0(PATH_MAX);
1185 int ret = 0;
1186 BlockDriver *back_drv = NULL;
1187 BlockDriverState *backing_hd;
1188 Error *local_err = NULL;
1190 if (bs->backing_hd != NULL) {
1191 QDECREF(options);
1192 goto free_exit;
1195 /* NULL means an empty set of options */
1196 if (options == NULL) {
1197 options = qdict_new();
1200 bs->open_flags &= ~BDRV_O_NO_BACKING;
1201 if (qdict_haskey(options, "file.filename")) {
1202 backing_filename[0] = '\0';
1203 } else if (bs->backing_file[0] == '\0' && qdict_size(options) == 0) {
1204 QDECREF(options);
1205 goto free_exit;
1206 } else {
1207 bdrv_get_full_backing_filename(bs, backing_filename, PATH_MAX);
1210 if (!bs->drv || !bs->drv->supports_backing) {
1211 ret = -EINVAL;
1212 error_setg(errp, "Driver doesn't support backing files");
1213 QDECREF(options);
1214 goto free_exit;
1217 backing_hd = bdrv_new("", errp);
1219 if (bs->backing_format[0] != '\0') {
1220 back_drv = bdrv_find_format(bs->backing_format);
1223 assert(bs->backing_hd == NULL);
1224 ret = bdrv_open(&backing_hd,
1225 *backing_filename ? backing_filename : NULL, NULL, options,
1226 bdrv_backing_flags(bs->open_flags), back_drv, &local_err);
1227 if (ret < 0) {
1228 bdrv_unref(backing_hd);
1229 backing_hd = NULL;
1230 bs->open_flags |= BDRV_O_NO_BACKING;
1231 error_setg(errp, "Could not open backing file: %s",
1232 error_get_pretty(local_err));
1233 error_free(local_err);
1234 goto free_exit;
1236 bdrv_set_backing_hd(bs, backing_hd);
1238 free_exit:
1239 g_free(backing_filename);
1240 return ret;
1244 * Opens a disk image whose options are given as BlockdevRef in another block
1245 * device's options.
1247 * If allow_none is true, no image will be opened if filename is false and no
1248 * BlockdevRef is given. *pbs will remain unchanged and 0 will be returned.
1250 * bdrev_key specifies the key for the image's BlockdevRef in the options QDict.
1251 * That QDict has to be flattened; therefore, if the BlockdevRef is a QDict
1252 * itself, all options starting with "${bdref_key}." are considered part of the
1253 * BlockdevRef.
1255 * The BlockdevRef will be removed from the options QDict.
1257 * To conform with the behavior of bdrv_open(), *pbs has to be NULL.
1259 int bdrv_open_image(BlockDriverState **pbs, const char *filename,
1260 QDict *options, const char *bdref_key, int flags,
1261 bool allow_none, Error **errp)
1263 QDict *image_options;
1264 int ret;
1265 char *bdref_key_dot;
1266 const char *reference;
1268 assert(pbs);
1269 assert(*pbs == NULL);
1271 bdref_key_dot = g_strdup_printf("%s.", bdref_key);
1272 qdict_extract_subqdict(options, &image_options, bdref_key_dot);
1273 g_free(bdref_key_dot);
1275 reference = qdict_get_try_str(options, bdref_key);
1276 if (!filename && !reference && !qdict_size(image_options)) {
1277 if (allow_none) {
1278 ret = 0;
1279 } else {
1280 error_setg(errp, "A block device must be specified for \"%s\"",
1281 bdref_key);
1282 ret = -EINVAL;
1284 QDECREF(image_options);
1285 goto done;
1288 ret = bdrv_open(pbs, filename, reference, image_options, flags, NULL, errp);
1290 done:
1291 qdict_del(options, bdref_key);
1292 return ret;
1295 int bdrv_append_temp_snapshot(BlockDriverState *bs, int flags, Error **errp)
1297 /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
1298 char *tmp_filename = g_malloc0(PATH_MAX + 1);
1299 int64_t total_size;
1300 BlockDriver *bdrv_qcow2;
1301 QemuOpts *opts = NULL;
1302 QDict *snapshot_options;
1303 BlockDriverState *bs_snapshot;
1304 Error *local_err;
1305 int ret;
1307 /* if snapshot, we create a temporary backing file and open it
1308 instead of opening 'filename' directly */
1310 /* Get the required size from the image */
1311 total_size = bdrv_getlength(bs);
1312 if (total_size < 0) {
1313 ret = total_size;
1314 error_setg_errno(errp, -total_size, "Could not get image size");
1315 goto out;
1318 /* Create the temporary image */
1319 ret = get_tmp_filename(tmp_filename, PATH_MAX + 1);
1320 if (ret < 0) {
1321 error_setg_errno(errp, -ret, "Could not get temporary filename");
1322 goto out;
1325 bdrv_qcow2 = bdrv_find_format("qcow2");
1326 opts = qemu_opts_create(bdrv_qcow2->create_opts, NULL, 0,
1327 &error_abort);
1328 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, total_size);
1329 ret = bdrv_create(bdrv_qcow2, tmp_filename, opts, &local_err);
1330 qemu_opts_del(opts);
1331 if (ret < 0) {
1332 error_setg_errno(errp, -ret, "Could not create temporary overlay "
1333 "'%s': %s", tmp_filename,
1334 error_get_pretty(local_err));
1335 error_free(local_err);
1336 goto out;
1339 /* Prepare a new options QDict for the temporary file */
1340 snapshot_options = qdict_new();
1341 qdict_put(snapshot_options, "file.driver",
1342 qstring_from_str("file"));
1343 qdict_put(snapshot_options, "file.filename",
1344 qstring_from_str(tmp_filename));
1346 bs_snapshot = bdrv_new("", &error_abort);
1348 ret = bdrv_open(&bs_snapshot, NULL, NULL, snapshot_options,
1349 flags, bdrv_qcow2, &local_err);
1350 if (ret < 0) {
1351 error_propagate(errp, local_err);
1352 goto out;
1355 bdrv_append(bs_snapshot, bs);
1357 out:
1358 g_free(tmp_filename);
1359 return ret;
1363 * Opens a disk image (raw, qcow2, vmdk, ...)
1365 * options is a QDict of options to pass to the block drivers, or NULL for an
1366 * empty set of options. The reference to the QDict belongs to the block layer
1367 * after the call (even on failure), so if the caller intends to reuse the
1368 * dictionary, it needs to use QINCREF() before calling bdrv_open.
1370 * If *pbs is NULL, a new BDS will be created with a pointer to it stored there.
1371 * If it is not NULL, the referenced BDS will be reused.
1373 * The reference parameter may be used to specify an existing block device which
1374 * should be opened. If specified, neither options nor a filename may be given,
1375 * nor can an existing BDS be reused (that is, *pbs has to be NULL).
1377 int bdrv_open(BlockDriverState **pbs, const char *filename,
1378 const char *reference, QDict *options, int flags,
1379 BlockDriver *drv, Error **errp)
1381 int ret;
1382 BlockDriverState *file = NULL, *bs;
1383 const char *drvname;
1384 Error *local_err = NULL;
1385 int snapshot_flags = 0;
1387 assert(pbs);
1389 if (reference) {
1390 bool options_non_empty = options ? qdict_size(options) : false;
1391 QDECREF(options);
1393 if (*pbs) {
1394 error_setg(errp, "Cannot reuse an existing BDS when referencing "
1395 "another block device");
1396 return -EINVAL;
1399 if (filename || options_non_empty) {
1400 error_setg(errp, "Cannot reference an existing block device with "
1401 "additional options or a new filename");
1402 return -EINVAL;
1405 bs = bdrv_lookup_bs(reference, reference, errp);
1406 if (!bs) {
1407 return -ENODEV;
1409 bdrv_ref(bs);
1410 *pbs = bs;
1411 return 0;
1414 if (*pbs) {
1415 bs = *pbs;
1416 } else {
1417 bs = bdrv_new("", &error_abort);
1420 /* NULL means an empty set of options */
1421 if (options == NULL) {
1422 options = qdict_new();
1425 ret = bdrv_fill_options(&options, &filename, flags, drv, &local_err);
1426 if (local_err) {
1427 goto fail;
1430 /* Find the right image format driver */
1431 drv = NULL;
1432 drvname = qdict_get_try_str(options, "driver");
1433 if (drvname) {
1434 drv = bdrv_find_format(drvname);
1435 qdict_del(options, "driver");
1436 if (!drv) {
1437 error_setg(errp, "Unknown driver: '%s'", drvname);
1438 ret = -EINVAL;
1439 goto fail;
1443 assert(drvname || !(flags & BDRV_O_PROTOCOL));
1444 if (drv && !drv->bdrv_file_open) {
1445 /* If the user explicitly wants a format driver here, we'll need to add
1446 * another layer for the protocol in bs->file */
1447 flags &= ~BDRV_O_PROTOCOL;
1450 bs->options = options;
1451 options = qdict_clone_shallow(options);
1453 /* Open image file without format layer */
1454 if ((flags & BDRV_O_PROTOCOL) == 0) {
1455 if (flags & BDRV_O_RDWR) {
1456 flags |= BDRV_O_ALLOW_RDWR;
1458 if (flags & BDRV_O_SNAPSHOT) {
1459 snapshot_flags = bdrv_temp_snapshot_flags(flags);
1460 flags = bdrv_backing_flags(flags);
1463 assert(file == NULL);
1464 ret = bdrv_open_image(&file, filename, options, "file",
1465 bdrv_inherited_flags(flags),
1466 true, &local_err);
1467 if (ret < 0) {
1468 goto fail;
1472 /* Image format probing */
1473 if (!drv && file) {
1474 ret = find_image_format(file, filename, &drv, &local_err);
1475 if (ret < 0) {
1476 goto fail;
1478 } else if (!drv) {
1479 error_setg(errp, "Must specify either driver or file");
1480 ret = -EINVAL;
1481 goto fail;
1484 /* Open the image */
1485 ret = bdrv_open_common(bs, file, options, flags, drv, &local_err);
1486 if (ret < 0) {
1487 goto fail;
1490 if (file && (bs->file != file)) {
1491 bdrv_unref(file);
1492 file = NULL;
1495 /* If there is a backing file, use it */
1496 if ((flags & BDRV_O_NO_BACKING) == 0) {
1497 QDict *backing_options;
1499 qdict_extract_subqdict(options, &backing_options, "backing.");
1500 ret = bdrv_open_backing_file(bs, backing_options, &local_err);
1501 if (ret < 0) {
1502 goto close_and_fail;
1506 /* For snapshot=on, create a temporary qcow2 overlay. bs points to the
1507 * temporary snapshot afterwards. */
1508 if (snapshot_flags) {
1509 ret = bdrv_append_temp_snapshot(bs, snapshot_flags, &local_err);
1510 if (local_err) {
1511 goto close_and_fail;
1515 /* Check if any unknown options were used */
1516 if (options && (qdict_size(options) != 0)) {
1517 const QDictEntry *entry = qdict_first(options);
1518 if (flags & BDRV_O_PROTOCOL) {
1519 error_setg(errp, "Block protocol '%s' doesn't support the option "
1520 "'%s'", drv->format_name, entry->key);
1521 } else {
1522 error_setg(errp, "Block format '%s' used by device '%s' doesn't "
1523 "support the option '%s'", drv->format_name,
1524 bs->device_name, entry->key);
1527 ret = -EINVAL;
1528 goto close_and_fail;
1531 if (!bdrv_key_required(bs)) {
1532 bdrv_dev_change_media_cb(bs, true);
1533 } else if (!runstate_check(RUN_STATE_PRELAUNCH)
1534 && !runstate_check(RUN_STATE_INMIGRATE)
1535 && !runstate_check(RUN_STATE_PAUSED)) { /* HACK */
1536 error_setg(errp,
1537 "Guest must be stopped for opening of encrypted image");
1538 ret = -EBUSY;
1539 goto close_and_fail;
1542 QDECREF(options);
1543 *pbs = bs;
1544 return 0;
1546 fail:
1547 if (file != NULL) {
1548 bdrv_unref(file);
1550 QDECREF(bs->options);
1551 QDECREF(options);
1552 bs->options = NULL;
1553 if (!*pbs) {
1554 /* If *pbs is NULL, a new BDS has been created in this function and
1555 needs to be freed now. Otherwise, it does not need to be closed,
1556 since it has not really been opened yet. */
1557 bdrv_unref(bs);
1559 if (local_err) {
1560 error_propagate(errp, local_err);
1562 return ret;
1564 close_and_fail:
1565 /* See fail path, but now the BDS has to be always closed */
1566 if (*pbs) {
1567 bdrv_close(bs);
1568 } else {
1569 bdrv_unref(bs);
1571 QDECREF(options);
1572 if (local_err) {
1573 error_propagate(errp, local_err);
1575 return ret;
1578 typedef struct BlockReopenQueueEntry {
1579 bool prepared;
1580 BDRVReopenState state;
1581 QSIMPLEQ_ENTRY(BlockReopenQueueEntry) entry;
1582 } BlockReopenQueueEntry;
1585 * Adds a BlockDriverState to a simple queue for an atomic, transactional
1586 * reopen of multiple devices.
1588 * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT
1589 * already performed, or alternatively may be NULL a new BlockReopenQueue will
1590 * be created and initialized. This newly created BlockReopenQueue should be
1591 * passed back in for subsequent calls that are intended to be of the same
1592 * atomic 'set'.
1594 * bs is the BlockDriverState to add to the reopen queue.
1596 * flags contains the open flags for the associated bs
1598 * returns a pointer to bs_queue, which is either the newly allocated
1599 * bs_queue, or the existing bs_queue being used.
1602 BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
1603 BlockDriverState *bs, int flags)
1605 assert(bs != NULL);
1607 BlockReopenQueueEntry *bs_entry;
1608 if (bs_queue == NULL) {
1609 bs_queue = g_new0(BlockReopenQueue, 1);
1610 QSIMPLEQ_INIT(bs_queue);
1613 /* bdrv_open() masks this flag out */
1614 flags &= ~BDRV_O_PROTOCOL;
1616 if (bs->file) {
1617 bdrv_reopen_queue(bs_queue, bs->file, bdrv_inherited_flags(flags));
1620 bs_entry = g_new0(BlockReopenQueueEntry, 1);
1621 QSIMPLEQ_INSERT_TAIL(bs_queue, bs_entry, entry);
1623 bs_entry->state.bs = bs;
1624 bs_entry->state.flags = flags;
1626 return bs_queue;
1630 * Reopen multiple BlockDriverStates atomically & transactionally.
1632 * The queue passed in (bs_queue) must have been built up previous
1633 * via bdrv_reopen_queue().
1635 * Reopens all BDS specified in the queue, with the appropriate
1636 * flags. All devices are prepared for reopen, and failure of any
1637 * device will cause all device changes to be abandonded, and intermediate
1638 * data cleaned up.
1640 * If all devices prepare successfully, then the changes are committed
1641 * to all devices.
1644 int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
1646 int ret = -1;
1647 BlockReopenQueueEntry *bs_entry, *next;
1648 Error *local_err = NULL;
1650 assert(bs_queue != NULL);
1652 bdrv_drain_all();
1654 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1655 if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) {
1656 error_propagate(errp, local_err);
1657 goto cleanup;
1659 bs_entry->prepared = true;
1662 /* If we reach this point, we have success and just need to apply the
1663 * changes
1665 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1666 bdrv_reopen_commit(&bs_entry->state);
1669 ret = 0;
1671 cleanup:
1672 QSIMPLEQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) {
1673 if (ret && bs_entry->prepared) {
1674 bdrv_reopen_abort(&bs_entry->state);
1676 g_free(bs_entry);
1678 g_free(bs_queue);
1679 return ret;
1683 /* Reopen a single BlockDriverState with the specified flags. */
1684 int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp)
1686 int ret = -1;
1687 Error *local_err = NULL;
1688 BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, bdrv_flags);
1690 ret = bdrv_reopen_multiple(queue, &local_err);
1691 if (local_err != NULL) {
1692 error_propagate(errp, local_err);
1694 return ret;
1699 * Prepares a BlockDriverState for reopen. All changes are staged in the
1700 * 'opaque' field of the BDRVReopenState, which is used and allocated by
1701 * the block driver layer .bdrv_reopen_prepare()
1703 * bs is the BlockDriverState to reopen
1704 * flags are the new open flags
1705 * queue is the reopen queue
1707 * Returns 0 on success, non-zero on error. On error errp will be set
1708 * as well.
1710 * On failure, bdrv_reopen_abort() will be called to clean up any data.
1711 * It is the responsibility of the caller to then call the abort() or
1712 * commit() for any other BDS that have been left in a prepare() state
1715 int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue,
1716 Error **errp)
1718 int ret = -1;
1719 Error *local_err = NULL;
1720 BlockDriver *drv;
1722 assert(reopen_state != NULL);
1723 assert(reopen_state->bs->drv != NULL);
1724 drv = reopen_state->bs->drv;
1726 /* if we are to stay read-only, do not allow permission change
1727 * to r/w */
1728 if (!(reopen_state->bs->open_flags & BDRV_O_ALLOW_RDWR) &&
1729 reopen_state->flags & BDRV_O_RDWR) {
1730 error_set(errp, QERR_DEVICE_IS_READ_ONLY,
1731 reopen_state->bs->device_name);
1732 goto error;
1736 ret = bdrv_flush(reopen_state->bs);
1737 if (ret) {
1738 error_set(errp, ERROR_CLASS_GENERIC_ERROR, "Error (%s) flushing drive",
1739 strerror(-ret));
1740 goto error;
1743 if (drv->bdrv_reopen_prepare) {
1744 ret = drv->bdrv_reopen_prepare(reopen_state, queue, &local_err);
1745 if (ret) {
1746 if (local_err != NULL) {
1747 error_propagate(errp, local_err);
1748 } else {
1749 error_setg(errp, "failed while preparing to reopen image '%s'",
1750 reopen_state->bs->filename);
1752 goto error;
1754 } else {
1755 /* It is currently mandatory to have a bdrv_reopen_prepare()
1756 * handler for each supported drv. */
1757 error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
1758 drv->format_name, reopen_state->bs->device_name,
1759 "reopening of file");
1760 ret = -1;
1761 goto error;
1764 ret = 0;
1766 error:
1767 return ret;
1771 * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and
1772 * makes them final by swapping the staging BlockDriverState contents into
1773 * the active BlockDriverState contents.
1775 void bdrv_reopen_commit(BDRVReopenState *reopen_state)
1777 BlockDriver *drv;
1779 assert(reopen_state != NULL);
1780 drv = reopen_state->bs->drv;
1781 assert(drv != NULL);
1783 /* If there are any driver level actions to take */
1784 if (drv->bdrv_reopen_commit) {
1785 drv->bdrv_reopen_commit(reopen_state);
1788 /* set BDS specific flags now */
1789 reopen_state->bs->open_flags = reopen_state->flags;
1790 reopen_state->bs->enable_write_cache = !!(reopen_state->flags &
1791 BDRV_O_CACHE_WB);
1792 reopen_state->bs->read_only = !(reopen_state->flags & BDRV_O_RDWR);
1794 bdrv_refresh_limits(reopen_state->bs, NULL);
1798 * Abort the reopen, and delete and free the staged changes in
1799 * reopen_state
1801 void bdrv_reopen_abort(BDRVReopenState *reopen_state)
1803 BlockDriver *drv;
1805 assert(reopen_state != NULL);
1806 drv = reopen_state->bs->drv;
1807 assert(drv != NULL);
1809 if (drv->bdrv_reopen_abort) {
1810 drv->bdrv_reopen_abort(reopen_state);
1815 void bdrv_close(BlockDriverState *bs)
1817 if (bs->job) {
1818 block_job_cancel_sync(bs->job);
1820 bdrv_drain_all(); /* complete I/O */
1821 bdrv_flush(bs);
1822 bdrv_drain_all(); /* in case flush left pending I/O */
1823 notifier_list_notify(&bs->close_notifiers, bs);
1825 if (bs->drv) {
1826 if (bs->backing_hd) {
1827 BlockDriverState *backing_hd = bs->backing_hd;
1828 bdrv_set_backing_hd(bs, NULL);
1829 bdrv_unref(backing_hd);
1831 bs->drv->bdrv_close(bs);
1832 g_free(bs->opaque);
1833 bs->opaque = NULL;
1834 bs->drv = NULL;
1835 bs->copy_on_read = 0;
1836 bs->backing_file[0] = '\0';
1837 bs->backing_format[0] = '\0';
1838 bs->total_sectors = 0;
1839 bs->encrypted = 0;
1840 bs->valid_key = 0;
1841 bs->sg = 0;
1842 bs->growable = 0;
1843 bs->zero_beyond_eof = false;
1844 QDECREF(bs->options);
1845 bs->options = NULL;
1847 if (bs->file != NULL) {
1848 bdrv_unref(bs->file);
1849 bs->file = NULL;
1853 bdrv_dev_change_media_cb(bs, false);
1855 /*throttling disk I/O limits*/
1856 if (bs->io_limits_enabled) {
1857 bdrv_io_limits_disable(bs);
1861 void bdrv_close_all(void)
1863 BlockDriverState *bs;
1865 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
1866 AioContext *aio_context = bdrv_get_aio_context(bs);
1868 aio_context_acquire(aio_context);
1869 bdrv_close(bs);
1870 aio_context_release(aio_context);
1874 /* Check if any requests are in-flight (including throttled requests) */
1875 static bool bdrv_requests_pending(BlockDriverState *bs)
1877 if (!QLIST_EMPTY(&bs->tracked_requests)) {
1878 return true;
1880 if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) {
1881 return true;
1883 if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) {
1884 return true;
1886 if (bs->file && bdrv_requests_pending(bs->file)) {
1887 return true;
1889 if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) {
1890 return true;
1892 return false;
1896 * Wait for pending requests to complete across all BlockDriverStates
1898 * This function does not flush data to disk, use bdrv_flush_all() for that
1899 * after calling this function.
1901 * Note that completion of an asynchronous I/O operation can trigger any
1902 * number of other I/O operations on other devices---for example a coroutine
1903 * can be arbitrarily complex and a constant flow of I/O can come until the
1904 * coroutine is complete. Because of this, it is not possible to have a
1905 * function to drain a single device's I/O queue.
1907 void bdrv_drain_all(void)
1909 /* Always run first iteration so any pending completion BHs run */
1910 bool busy = true;
1911 BlockDriverState *bs;
1913 while (busy) {
1914 busy = false;
1916 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
1917 AioContext *aio_context = bdrv_get_aio_context(bs);
1918 bool bs_busy;
1920 aio_context_acquire(aio_context);
1921 bdrv_flush_io_queue(bs);
1922 bdrv_start_throttled_reqs(bs);
1923 bs_busy = bdrv_requests_pending(bs);
1924 bs_busy |= aio_poll(aio_context, bs_busy);
1925 aio_context_release(aio_context);
1927 busy |= bs_busy;
1932 /* make a BlockDriverState anonymous by removing from bdrv_state and
1933 * graph_bdrv_state list.
1934 Also, NULL terminate the device_name to prevent double remove */
1935 void bdrv_make_anon(BlockDriverState *bs)
1937 if (bs->device_name[0] != '\0') {
1938 QTAILQ_REMOVE(&bdrv_states, bs, device_list);
1940 bs->device_name[0] = '\0';
1941 if (bs->node_name[0] != '\0') {
1942 QTAILQ_REMOVE(&graph_bdrv_states, bs, node_list);
1944 bs->node_name[0] = '\0';
1947 static void bdrv_rebind(BlockDriverState *bs)
1949 if (bs->drv && bs->drv->bdrv_rebind) {
1950 bs->drv->bdrv_rebind(bs);
1954 static void bdrv_move_feature_fields(BlockDriverState *bs_dest,
1955 BlockDriverState *bs_src)
1957 /* move some fields that need to stay attached to the device */
1959 /* dev info */
1960 bs_dest->dev_ops = bs_src->dev_ops;
1961 bs_dest->dev_opaque = bs_src->dev_opaque;
1962 bs_dest->dev = bs_src->dev;
1963 bs_dest->guest_block_size = bs_src->guest_block_size;
1964 bs_dest->copy_on_read = bs_src->copy_on_read;
1966 bs_dest->enable_write_cache = bs_src->enable_write_cache;
1968 /* i/o throttled req */
1969 memcpy(&bs_dest->throttle_state,
1970 &bs_src->throttle_state,
1971 sizeof(ThrottleState));
1972 bs_dest->throttled_reqs[0] = bs_src->throttled_reqs[0];
1973 bs_dest->throttled_reqs[1] = bs_src->throttled_reqs[1];
1974 bs_dest->io_limits_enabled = bs_src->io_limits_enabled;
1976 /* r/w error */
1977 bs_dest->on_read_error = bs_src->on_read_error;
1978 bs_dest->on_write_error = bs_src->on_write_error;
1980 /* i/o status */
1981 bs_dest->iostatus_enabled = bs_src->iostatus_enabled;
1982 bs_dest->iostatus = bs_src->iostatus;
1984 /* dirty bitmap */
1985 bs_dest->dirty_bitmaps = bs_src->dirty_bitmaps;
1987 /* reference count */
1988 bs_dest->refcnt = bs_src->refcnt;
1990 /* job */
1991 bs_dest->job = bs_src->job;
1993 /* keep the same entry in bdrv_states */
1994 pstrcpy(bs_dest->device_name, sizeof(bs_dest->device_name),
1995 bs_src->device_name);
1996 bs_dest->device_list = bs_src->device_list;
1997 memcpy(bs_dest->op_blockers, bs_src->op_blockers,
1998 sizeof(bs_dest->op_blockers));
2002 * Swap bs contents for two image chains while they are live,
2003 * while keeping required fields on the BlockDriverState that is
2004 * actually attached to a device.
2006 * This will modify the BlockDriverState fields, and swap contents
2007 * between bs_new and bs_old. Both bs_new and bs_old are modified.
2009 * bs_new is required to be anonymous.
2011 * This function does not create any image files.
2013 void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old)
2015 BlockDriverState tmp;
2017 /* The code needs to swap the node_name but simply swapping node_list won't
2018 * work so first remove the nodes from the graph list, do the swap then
2019 * insert them back if needed.
2021 if (bs_new->node_name[0] != '\0') {
2022 QTAILQ_REMOVE(&graph_bdrv_states, bs_new, node_list);
2024 if (bs_old->node_name[0] != '\0') {
2025 QTAILQ_REMOVE(&graph_bdrv_states, bs_old, node_list);
2028 /* bs_new must be anonymous and shouldn't have anything fancy enabled */
2029 assert(bs_new->device_name[0] == '\0');
2030 assert(QLIST_EMPTY(&bs_new->dirty_bitmaps));
2031 assert(bs_new->job == NULL);
2032 assert(bs_new->dev == NULL);
2033 assert(bs_new->io_limits_enabled == false);
2034 assert(!throttle_have_timer(&bs_new->throttle_state));
2036 tmp = *bs_new;
2037 *bs_new = *bs_old;
2038 *bs_old = tmp;
2040 /* there are some fields that should not be swapped, move them back */
2041 bdrv_move_feature_fields(&tmp, bs_old);
2042 bdrv_move_feature_fields(bs_old, bs_new);
2043 bdrv_move_feature_fields(bs_new, &tmp);
2045 /* bs_new shouldn't be in bdrv_states even after the swap! */
2046 assert(bs_new->device_name[0] == '\0');
2048 /* Check a few fields that should remain attached to the device */
2049 assert(bs_new->dev == NULL);
2050 assert(bs_new->job == NULL);
2051 assert(bs_new->io_limits_enabled == false);
2052 assert(!throttle_have_timer(&bs_new->throttle_state));
2054 /* insert the nodes back into the graph node list if needed */
2055 if (bs_new->node_name[0] != '\0') {
2056 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_new, node_list);
2058 if (bs_old->node_name[0] != '\0') {
2059 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_old, node_list);
2062 bdrv_rebind(bs_new);
2063 bdrv_rebind(bs_old);
2067 * Add new bs contents at the top of an image chain while the chain is
2068 * live, while keeping required fields on the top layer.
2070 * This will modify the BlockDriverState fields, and swap contents
2071 * between bs_new and bs_top. Both bs_new and bs_top are modified.
2073 * bs_new is required to be anonymous.
2075 * This function does not create any image files.
2077 void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top)
2079 bdrv_swap(bs_new, bs_top);
2081 /* The contents of 'tmp' will become bs_top, as we are
2082 * swapping bs_new and bs_top contents. */
2083 bdrv_set_backing_hd(bs_top, bs_new);
2086 static void bdrv_delete(BlockDriverState *bs)
2088 assert(!bs->dev);
2089 assert(!bs->job);
2090 assert(bdrv_op_blocker_is_empty(bs));
2091 assert(!bs->refcnt);
2092 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
2094 bdrv_close(bs);
2096 /* remove from list, if necessary */
2097 bdrv_make_anon(bs);
2099 g_free(bs);
2102 int bdrv_attach_dev(BlockDriverState *bs, void *dev)
2103 /* TODO change to DeviceState *dev when all users are qdevified */
2105 if (bs->dev) {
2106 return -EBUSY;
2108 bs->dev = dev;
2109 bdrv_iostatus_reset(bs);
2110 return 0;
2113 /* TODO qdevified devices don't use this, remove when devices are qdevified */
2114 void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev)
2116 if (bdrv_attach_dev(bs, dev) < 0) {
2117 abort();
2121 void bdrv_detach_dev(BlockDriverState *bs, void *dev)
2122 /* TODO change to DeviceState *dev when all users are qdevified */
2124 assert(bs->dev == dev);
2125 bs->dev = NULL;
2126 bs->dev_ops = NULL;
2127 bs->dev_opaque = NULL;
2128 bs->guest_block_size = 512;
2131 /* TODO change to return DeviceState * when all users are qdevified */
2132 void *bdrv_get_attached_dev(BlockDriverState *bs)
2134 return bs->dev;
2137 void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops,
2138 void *opaque)
2140 bs->dev_ops = ops;
2141 bs->dev_opaque = opaque;
2144 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load)
2146 if (bs->dev_ops && bs->dev_ops->change_media_cb) {
2147 bool tray_was_closed = !bdrv_dev_is_tray_open(bs);
2148 bs->dev_ops->change_media_cb(bs->dev_opaque, load);
2149 if (tray_was_closed) {
2150 /* tray open */
2151 qapi_event_send_device_tray_moved(bdrv_get_device_name(bs),
2152 true, &error_abort);
2154 if (load) {
2155 /* tray close */
2156 qapi_event_send_device_tray_moved(bdrv_get_device_name(bs),
2157 false, &error_abort);
2162 bool bdrv_dev_has_removable_media(BlockDriverState *bs)
2164 return !bs->dev || (bs->dev_ops && bs->dev_ops->change_media_cb);
2167 void bdrv_dev_eject_request(BlockDriverState *bs, bool force)
2169 if (bs->dev_ops && bs->dev_ops->eject_request_cb) {
2170 bs->dev_ops->eject_request_cb(bs->dev_opaque, force);
2174 bool bdrv_dev_is_tray_open(BlockDriverState *bs)
2176 if (bs->dev_ops && bs->dev_ops->is_tray_open) {
2177 return bs->dev_ops->is_tray_open(bs->dev_opaque);
2179 return false;
2182 static void bdrv_dev_resize_cb(BlockDriverState *bs)
2184 if (bs->dev_ops && bs->dev_ops->resize_cb) {
2185 bs->dev_ops->resize_cb(bs->dev_opaque);
2189 bool bdrv_dev_is_medium_locked(BlockDriverState *bs)
2191 if (bs->dev_ops && bs->dev_ops->is_medium_locked) {
2192 return bs->dev_ops->is_medium_locked(bs->dev_opaque);
2194 return false;
2198 * Run consistency checks on an image
2200 * Returns 0 if the check could be completed (it doesn't mean that the image is
2201 * free of errors) or -errno when an internal error occurred. The results of the
2202 * check are stored in res.
2204 int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix)
2206 if (bs->drv->bdrv_check == NULL) {
2207 return -ENOTSUP;
2210 memset(res, 0, sizeof(*res));
2211 return bs->drv->bdrv_check(bs, res, fix);
2214 #define COMMIT_BUF_SECTORS 2048
2216 /* commit COW file into the raw image */
2217 int bdrv_commit(BlockDriverState *bs)
2219 BlockDriver *drv = bs->drv;
2220 int64_t sector, total_sectors, length, backing_length;
2221 int n, ro, open_flags;
2222 int ret = 0;
2223 uint8_t *buf = NULL;
2224 char filename[PATH_MAX];
2226 if (!drv)
2227 return -ENOMEDIUM;
2229 if (!bs->backing_hd) {
2230 return -ENOTSUP;
2233 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT, NULL) ||
2234 bdrv_op_is_blocked(bs->backing_hd, BLOCK_OP_TYPE_COMMIT, NULL)) {
2235 return -EBUSY;
2238 ro = bs->backing_hd->read_only;
2239 /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */
2240 pstrcpy(filename, sizeof(filename), bs->backing_hd->filename);
2241 open_flags = bs->backing_hd->open_flags;
2243 if (ro) {
2244 if (bdrv_reopen(bs->backing_hd, open_flags | BDRV_O_RDWR, NULL)) {
2245 return -EACCES;
2249 length = bdrv_getlength(bs);
2250 if (length < 0) {
2251 ret = length;
2252 goto ro_cleanup;
2255 backing_length = bdrv_getlength(bs->backing_hd);
2256 if (backing_length < 0) {
2257 ret = backing_length;
2258 goto ro_cleanup;
2261 /* If our top snapshot is larger than the backing file image,
2262 * grow the backing file image if possible. If not possible,
2263 * we must return an error */
2264 if (length > backing_length) {
2265 ret = bdrv_truncate(bs->backing_hd, length);
2266 if (ret < 0) {
2267 goto ro_cleanup;
2271 total_sectors = length >> BDRV_SECTOR_BITS;
2272 buf = g_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
2274 for (sector = 0; sector < total_sectors; sector += n) {
2275 ret = bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n);
2276 if (ret < 0) {
2277 goto ro_cleanup;
2279 if (ret) {
2280 ret = bdrv_read(bs, sector, buf, n);
2281 if (ret < 0) {
2282 goto ro_cleanup;
2285 ret = bdrv_write(bs->backing_hd, sector, buf, n);
2286 if (ret < 0) {
2287 goto ro_cleanup;
2292 if (drv->bdrv_make_empty) {
2293 ret = drv->bdrv_make_empty(bs);
2294 if (ret < 0) {
2295 goto ro_cleanup;
2297 bdrv_flush(bs);
2301 * Make sure all data we wrote to the backing device is actually
2302 * stable on disk.
2304 if (bs->backing_hd) {
2305 bdrv_flush(bs->backing_hd);
2308 ret = 0;
2309 ro_cleanup:
2310 g_free(buf);
2312 if (ro) {
2313 /* ignoring error return here */
2314 bdrv_reopen(bs->backing_hd, open_flags & ~BDRV_O_RDWR, NULL);
2317 return ret;
2320 int bdrv_commit_all(void)
2322 BlockDriverState *bs;
2324 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
2325 AioContext *aio_context = bdrv_get_aio_context(bs);
2327 aio_context_acquire(aio_context);
2328 if (bs->drv && bs->backing_hd) {
2329 int ret = bdrv_commit(bs);
2330 if (ret < 0) {
2331 aio_context_release(aio_context);
2332 return ret;
2335 aio_context_release(aio_context);
2337 return 0;
2341 * Remove an active request from the tracked requests list
2343 * This function should be called when a tracked request is completing.
2345 static void tracked_request_end(BdrvTrackedRequest *req)
2347 if (req->serialising) {
2348 req->bs->serialising_in_flight--;
2351 QLIST_REMOVE(req, list);
2352 qemu_co_queue_restart_all(&req->wait_queue);
2356 * Add an active request to the tracked requests list
2358 static void tracked_request_begin(BdrvTrackedRequest *req,
2359 BlockDriverState *bs,
2360 int64_t offset,
2361 unsigned int bytes, bool is_write)
2363 *req = (BdrvTrackedRequest){
2364 .bs = bs,
2365 .offset = offset,
2366 .bytes = bytes,
2367 .is_write = is_write,
2368 .co = qemu_coroutine_self(),
2369 .serialising = false,
2370 .overlap_offset = offset,
2371 .overlap_bytes = bytes,
2374 qemu_co_queue_init(&req->wait_queue);
2376 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
2379 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
2381 int64_t overlap_offset = req->offset & ~(align - 1);
2382 unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
2383 - overlap_offset;
2385 if (!req->serialising) {
2386 req->bs->serialising_in_flight++;
2387 req->serialising = true;
2390 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
2391 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
2395 * Round a region to cluster boundaries
2397 void bdrv_round_to_clusters(BlockDriverState *bs,
2398 int64_t sector_num, int nb_sectors,
2399 int64_t *cluster_sector_num,
2400 int *cluster_nb_sectors)
2402 BlockDriverInfo bdi;
2404 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
2405 *cluster_sector_num = sector_num;
2406 *cluster_nb_sectors = nb_sectors;
2407 } else {
2408 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
2409 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
2410 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
2411 nb_sectors, c);
2415 static int bdrv_get_cluster_size(BlockDriverState *bs)
2417 BlockDriverInfo bdi;
2418 int ret;
2420 ret = bdrv_get_info(bs, &bdi);
2421 if (ret < 0 || bdi.cluster_size == 0) {
2422 return bs->request_alignment;
2423 } else {
2424 return bdi.cluster_size;
2428 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
2429 int64_t offset, unsigned int bytes)
2431 /* aaaa bbbb */
2432 if (offset >= req->overlap_offset + req->overlap_bytes) {
2433 return false;
2435 /* bbbb aaaa */
2436 if (req->overlap_offset >= offset + bytes) {
2437 return false;
2439 return true;
2442 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
2444 BlockDriverState *bs = self->bs;
2445 BdrvTrackedRequest *req;
2446 bool retry;
2447 bool waited = false;
2449 if (!bs->serialising_in_flight) {
2450 return false;
2453 do {
2454 retry = false;
2455 QLIST_FOREACH(req, &bs->tracked_requests, list) {
2456 if (req == self || (!req->serialising && !self->serialising)) {
2457 continue;
2459 if (tracked_request_overlaps(req, self->overlap_offset,
2460 self->overlap_bytes))
2462 /* Hitting this means there was a reentrant request, for
2463 * example, a block driver issuing nested requests. This must
2464 * never happen since it means deadlock.
2466 assert(qemu_coroutine_self() != req->co);
2468 /* If the request is already (indirectly) waiting for us, or
2469 * will wait for us as soon as it wakes up, then just go on
2470 * (instead of producing a deadlock in the former case). */
2471 if (!req->waiting_for) {
2472 self->waiting_for = req;
2473 qemu_co_queue_wait(&req->wait_queue);
2474 self->waiting_for = NULL;
2475 retry = true;
2476 waited = true;
2477 break;
2481 } while (retry);
2483 return waited;
2487 * Return values:
2488 * 0 - success
2489 * -EINVAL - backing format specified, but no file
2490 * -ENOSPC - can't update the backing file because no space is left in the
2491 * image file header
2492 * -ENOTSUP - format driver doesn't support changing the backing file
2494 int bdrv_change_backing_file(BlockDriverState *bs,
2495 const char *backing_file, const char *backing_fmt)
2497 BlockDriver *drv = bs->drv;
2498 int ret;
2500 /* Backing file format doesn't make sense without a backing file */
2501 if (backing_fmt && !backing_file) {
2502 return -EINVAL;
2505 if (drv->bdrv_change_backing_file != NULL) {
2506 ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
2507 } else {
2508 ret = -ENOTSUP;
2511 if (ret == 0) {
2512 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
2513 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
2515 return ret;
2519 * Finds the image layer in the chain that has 'bs' as its backing file.
2521 * active is the current topmost image.
2523 * Returns NULL if bs is not found in active's image chain,
2524 * or if active == bs.
2526 * Returns the bottommost base image if bs == NULL.
2528 BlockDriverState *bdrv_find_overlay(BlockDriverState *active,
2529 BlockDriverState *bs)
2531 while (active && bs != active->backing_hd) {
2532 active = active->backing_hd;
2535 return active;
2538 /* Given a BDS, searches for the base layer. */
2539 BlockDriverState *bdrv_find_base(BlockDriverState *bs)
2541 return bdrv_find_overlay(bs, NULL);
2544 typedef struct BlkIntermediateStates {
2545 BlockDriverState *bs;
2546 QSIMPLEQ_ENTRY(BlkIntermediateStates) entry;
2547 } BlkIntermediateStates;
2551 * Drops images above 'base' up to and including 'top', and sets the image
2552 * above 'top' to have base as its backing file.
2554 * Requires that the overlay to 'top' is opened r/w, so that the backing file
2555 * information in 'bs' can be properly updated.
2557 * E.g., this will convert the following chain:
2558 * bottom <- base <- intermediate <- top <- active
2560 * to
2562 * bottom <- base <- active
2564 * It is allowed for bottom==base, in which case it converts:
2566 * base <- intermediate <- top <- active
2568 * to
2570 * base <- active
2572 * If backing_file_str is non-NULL, it will be used when modifying top's
2573 * overlay image metadata.
2575 * Error conditions:
2576 * if active == top, that is considered an error
2579 int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top,
2580 BlockDriverState *base, const char *backing_file_str)
2582 BlockDriverState *intermediate;
2583 BlockDriverState *base_bs = NULL;
2584 BlockDriverState *new_top_bs = NULL;
2585 BlkIntermediateStates *intermediate_state, *next;
2586 int ret = -EIO;
2588 QSIMPLEQ_HEAD(states_to_delete, BlkIntermediateStates) states_to_delete;
2589 QSIMPLEQ_INIT(&states_to_delete);
2591 if (!top->drv || !base->drv) {
2592 goto exit;
2595 new_top_bs = bdrv_find_overlay(active, top);
2597 if (new_top_bs == NULL) {
2598 /* we could not find the image above 'top', this is an error */
2599 goto exit;
2602 /* special case of new_top_bs->backing_hd already pointing to base - nothing
2603 * to do, no intermediate images */
2604 if (new_top_bs->backing_hd == base) {
2605 ret = 0;
2606 goto exit;
2609 intermediate = top;
2611 /* now we will go down through the list, and add each BDS we find
2612 * into our deletion queue, until we hit the 'base'
2614 while (intermediate) {
2615 intermediate_state = g_malloc0(sizeof(BlkIntermediateStates));
2616 intermediate_state->bs = intermediate;
2617 QSIMPLEQ_INSERT_TAIL(&states_to_delete, intermediate_state, entry);
2619 if (intermediate->backing_hd == base) {
2620 base_bs = intermediate->backing_hd;
2621 break;
2623 intermediate = intermediate->backing_hd;
2625 if (base_bs == NULL) {
2626 /* something went wrong, we did not end at the base. safely
2627 * unravel everything, and exit with error */
2628 goto exit;
2631 /* success - we can delete the intermediate states, and link top->base */
2632 backing_file_str = backing_file_str ? backing_file_str : base_bs->filename;
2633 ret = bdrv_change_backing_file(new_top_bs, backing_file_str,
2634 base_bs->drv ? base_bs->drv->format_name : "");
2635 if (ret) {
2636 goto exit;
2638 bdrv_set_backing_hd(new_top_bs, base_bs);
2640 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2641 /* so that bdrv_close() does not recursively close the chain */
2642 bdrv_set_backing_hd(intermediate_state->bs, NULL);
2643 bdrv_unref(intermediate_state->bs);
2645 ret = 0;
2647 exit:
2648 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2649 g_free(intermediate_state);
2651 return ret;
2655 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
2656 size_t size)
2658 int64_t len;
2660 if (size > INT_MAX) {
2661 return -EIO;
2664 if (!bdrv_is_inserted(bs))
2665 return -ENOMEDIUM;
2667 if (bs->growable)
2668 return 0;
2670 len = bdrv_getlength(bs);
2672 if (offset < 0)
2673 return -EIO;
2675 if ((offset > len) || (len - offset < size))
2676 return -EIO;
2678 return 0;
2681 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
2682 int nb_sectors)
2684 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
2685 return -EIO;
2688 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
2689 nb_sectors * BDRV_SECTOR_SIZE);
2692 typedef struct RwCo {
2693 BlockDriverState *bs;
2694 int64_t offset;
2695 QEMUIOVector *qiov;
2696 bool is_write;
2697 int ret;
2698 BdrvRequestFlags flags;
2699 } RwCo;
2701 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
2703 RwCo *rwco = opaque;
2705 if (!rwco->is_write) {
2706 rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset,
2707 rwco->qiov->size, rwco->qiov,
2708 rwco->flags);
2709 } else {
2710 rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset,
2711 rwco->qiov->size, rwco->qiov,
2712 rwco->flags);
2717 * Process a vectored synchronous request using coroutines
2719 static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset,
2720 QEMUIOVector *qiov, bool is_write,
2721 BdrvRequestFlags flags)
2723 Coroutine *co;
2724 RwCo rwco = {
2725 .bs = bs,
2726 .offset = offset,
2727 .qiov = qiov,
2728 .is_write = is_write,
2729 .ret = NOT_DONE,
2730 .flags = flags,
2734 * In sync call context, when the vcpu is blocked, this throttling timer
2735 * will not fire; so the I/O throttling function has to be disabled here
2736 * if it has been enabled.
2738 if (bs->io_limits_enabled) {
2739 fprintf(stderr, "Disabling I/O throttling on '%s' due "
2740 "to synchronous I/O.\n", bdrv_get_device_name(bs));
2741 bdrv_io_limits_disable(bs);
2744 if (qemu_in_coroutine()) {
2745 /* Fast-path if already in coroutine context */
2746 bdrv_rw_co_entry(&rwco);
2747 } else {
2748 AioContext *aio_context = bdrv_get_aio_context(bs);
2750 co = qemu_coroutine_create(bdrv_rw_co_entry);
2751 qemu_coroutine_enter(co, &rwco);
2752 while (rwco.ret == NOT_DONE) {
2753 aio_poll(aio_context, true);
2756 return rwco.ret;
2760 * Process a synchronous request using coroutines
2762 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
2763 int nb_sectors, bool is_write, BdrvRequestFlags flags)
2765 QEMUIOVector qiov;
2766 struct iovec iov = {
2767 .iov_base = (void *)buf,
2768 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
2771 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
2772 return -EINVAL;
2775 qemu_iovec_init_external(&qiov, &iov, 1);
2776 return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS,
2777 &qiov, is_write, flags);
2780 /* return < 0 if error. See bdrv_write() for the return codes */
2781 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
2782 uint8_t *buf, int nb_sectors)
2784 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0);
2787 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
2788 int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
2789 uint8_t *buf, int nb_sectors)
2791 bool enabled;
2792 int ret;
2794 enabled = bs->io_limits_enabled;
2795 bs->io_limits_enabled = false;
2796 ret = bdrv_read(bs, sector_num, buf, nb_sectors);
2797 bs->io_limits_enabled = enabled;
2798 return ret;
2801 /* Return < 0 if error. Important errors are:
2802 -EIO generic I/O error (may happen for all errors)
2803 -ENOMEDIUM No media inserted.
2804 -EINVAL Invalid sector number or nb_sectors
2805 -EACCES Trying to write a read-only device
2807 int bdrv_write(BlockDriverState *bs, int64_t sector_num,
2808 const uint8_t *buf, int nb_sectors)
2810 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
2813 int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
2814 int nb_sectors, BdrvRequestFlags flags)
2816 return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true,
2817 BDRV_REQ_ZERO_WRITE | flags);
2821 * Completely zero out a block device with the help of bdrv_write_zeroes.
2822 * The operation is sped up by checking the block status and only writing
2823 * zeroes to the device if they currently do not return zeroes. Optional
2824 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
2826 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
2828 int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
2830 int64_t target_sectors, ret, nb_sectors, sector_num = 0;
2831 int n;
2833 target_sectors = bdrv_nb_sectors(bs);
2834 if (target_sectors < 0) {
2835 return target_sectors;
2838 for (;;) {
2839 nb_sectors = target_sectors - sector_num;
2840 if (nb_sectors <= 0) {
2841 return 0;
2843 if (nb_sectors > INT_MAX) {
2844 nb_sectors = INT_MAX;
2846 ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n);
2847 if (ret < 0) {
2848 error_report("error getting block status at sector %" PRId64 ": %s",
2849 sector_num, strerror(-ret));
2850 return ret;
2852 if (ret & BDRV_BLOCK_ZERO) {
2853 sector_num += n;
2854 continue;
2856 ret = bdrv_write_zeroes(bs, sector_num, n, flags);
2857 if (ret < 0) {
2858 error_report("error writing zeroes at sector %" PRId64 ": %s",
2859 sector_num, strerror(-ret));
2860 return ret;
2862 sector_num += n;
2866 int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes)
2868 QEMUIOVector qiov;
2869 struct iovec iov = {
2870 .iov_base = (void *)buf,
2871 .iov_len = bytes,
2873 int ret;
2875 if (bytes < 0) {
2876 return -EINVAL;
2879 qemu_iovec_init_external(&qiov, &iov, 1);
2880 ret = bdrv_prwv_co(bs, offset, &qiov, false, 0);
2881 if (ret < 0) {
2882 return ret;
2885 return bytes;
2888 int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov)
2890 int ret;
2892 ret = bdrv_prwv_co(bs, offset, qiov, true, 0);
2893 if (ret < 0) {
2894 return ret;
2897 return qiov->size;
2900 int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
2901 const void *buf, int bytes)
2903 QEMUIOVector qiov;
2904 struct iovec iov = {
2905 .iov_base = (void *) buf,
2906 .iov_len = bytes,
2909 if (bytes < 0) {
2910 return -EINVAL;
2913 qemu_iovec_init_external(&qiov, &iov, 1);
2914 return bdrv_pwritev(bs, offset, &qiov);
2918 * Writes to the file and ensures that no writes are reordered across this
2919 * request (acts as a barrier)
2921 * Returns 0 on success, -errno in error cases.
2923 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
2924 const void *buf, int count)
2926 int ret;
2928 ret = bdrv_pwrite(bs, offset, buf, count);
2929 if (ret < 0) {
2930 return ret;
2933 /* No flush needed for cache modes that already do it */
2934 if (bs->enable_write_cache) {
2935 bdrv_flush(bs);
2938 return 0;
2941 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
2942 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
2944 /* Perform I/O through a temporary buffer so that users who scribble over
2945 * their read buffer while the operation is in progress do not end up
2946 * modifying the image file. This is critical for zero-copy guest I/O
2947 * where anything might happen inside guest memory.
2949 void *bounce_buffer;
2951 BlockDriver *drv = bs->drv;
2952 struct iovec iov;
2953 QEMUIOVector bounce_qiov;
2954 int64_t cluster_sector_num;
2955 int cluster_nb_sectors;
2956 size_t skip_bytes;
2957 int ret;
2959 /* Cover entire cluster so no additional backing file I/O is required when
2960 * allocating cluster in the image file.
2962 bdrv_round_to_clusters(bs, sector_num, nb_sectors,
2963 &cluster_sector_num, &cluster_nb_sectors);
2965 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
2966 cluster_sector_num, cluster_nb_sectors);
2968 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
2969 iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
2970 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
2972 ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
2973 &bounce_qiov);
2974 if (ret < 0) {
2975 goto err;
2978 if (drv->bdrv_co_write_zeroes &&
2979 buffer_is_zero(bounce_buffer, iov.iov_len)) {
2980 ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
2981 cluster_nb_sectors, 0);
2982 } else {
2983 /* This does not change the data on the disk, it is not necessary
2984 * to flush even in cache=writethrough mode.
2986 ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
2987 &bounce_qiov);
2990 if (ret < 0) {
2991 /* It might be okay to ignore write errors for guest requests. If this
2992 * is a deliberate copy-on-read then we don't want to ignore the error.
2993 * Simply report it in all cases.
2995 goto err;
2998 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
2999 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
3000 nb_sectors * BDRV_SECTOR_SIZE);
3002 err:
3003 qemu_vfree(bounce_buffer);
3004 return ret;
3008 * Forwards an already correctly aligned request to the BlockDriver. This
3009 * handles copy on read and zeroing after EOF; any other features must be
3010 * implemented by the caller.
3012 static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
3013 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
3014 int64_t align, QEMUIOVector *qiov, int flags)
3016 BlockDriver *drv = bs->drv;
3017 int ret;
3019 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
3020 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
3022 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
3023 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
3024 assert(!qiov || bytes == qiov->size);
3026 /* Handle Copy on Read and associated serialisation */
3027 if (flags & BDRV_REQ_COPY_ON_READ) {
3028 /* If we touch the same cluster it counts as an overlap. This
3029 * guarantees that allocating writes will be serialized and not race
3030 * with each other for the same cluster. For example, in copy-on-read
3031 * it ensures that the CoR read and write operations are atomic and
3032 * guest writes cannot interleave between them. */
3033 mark_request_serialising(req, bdrv_get_cluster_size(bs));
3036 wait_serialising_requests(req);
3038 if (flags & BDRV_REQ_COPY_ON_READ) {
3039 int pnum;
3041 ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum);
3042 if (ret < 0) {
3043 goto out;
3046 if (!ret || pnum != nb_sectors) {
3047 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
3048 goto out;
3052 /* Forward the request to the BlockDriver */
3053 if (!(bs->zero_beyond_eof && bs->growable)) {
3054 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
3055 } else {
3056 /* Read zeros after EOF of growable BDSes */
3057 int64_t total_sectors, max_nb_sectors;
3059 total_sectors = bdrv_nb_sectors(bs);
3060 if (total_sectors < 0) {
3061 ret = total_sectors;
3062 goto out;
3065 max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num),
3066 align >> BDRV_SECTOR_BITS);
3067 if (max_nb_sectors > 0) {
3068 QEMUIOVector local_qiov;
3069 size_t local_sectors;
3071 max_nb_sectors = MIN(max_nb_sectors, SIZE_MAX / BDRV_SECTOR_BITS);
3072 local_sectors = MIN(max_nb_sectors, nb_sectors);
3074 qemu_iovec_init(&local_qiov, qiov->niov);
3075 qemu_iovec_concat(&local_qiov, qiov, 0,
3076 local_sectors * BDRV_SECTOR_SIZE);
3078 ret = drv->bdrv_co_readv(bs, sector_num, local_sectors,
3079 &local_qiov);
3081 qemu_iovec_destroy(&local_qiov);
3082 } else {
3083 ret = 0;
3086 /* Reading beyond end of file is supposed to produce zeroes */
3087 if (ret == 0 && total_sectors < sector_num + nb_sectors) {
3088 uint64_t offset = MAX(0, total_sectors - sector_num);
3089 uint64_t bytes = (sector_num + nb_sectors - offset) *
3090 BDRV_SECTOR_SIZE;
3091 qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes);
3095 out:
3096 return ret;
3100 * Handle a read request in coroutine context
3102 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
3103 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
3104 BdrvRequestFlags flags)
3106 BlockDriver *drv = bs->drv;
3107 BdrvTrackedRequest req;
3109 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3110 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
3111 uint8_t *head_buf = NULL;
3112 uint8_t *tail_buf = NULL;
3113 QEMUIOVector local_qiov;
3114 bool use_local_qiov = false;
3115 int ret;
3117 if (!drv) {
3118 return -ENOMEDIUM;
3120 if (bdrv_check_byte_request(bs, offset, bytes)) {
3121 return -EIO;
3124 if (bs->copy_on_read) {
3125 flags |= BDRV_REQ_COPY_ON_READ;
3128 /* throttling disk I/O */
3129 if (bs->io_limits_enabled) {
3130 bdrv_io_limits_intercept(bs, bytes, false);
3133 /* Align read if necessary by padding qiov */
3134 if (offset & (align - 1)) {
3135 head_buf = qemu_blockalign(bs, align);
3136 qemu_iovec_init(&local_qiov, qiov->niov + 2);
3137 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3138 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3139 use_local_qiov = true;
3141 bytes += offset & (align - 1);
3142 offset = offset & ~(align - 1);
3145 if ((offset + bytes) & (align - 1)) {
3146 if (!use_local_qiov) {
3147 qemu_iovec_init(&local_qiov, qiov->niov + 1);
3148 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3149 use_local_qiov = true;
3151 tail_buf = qemu_blockalign(bs, align);
3152 qemu_iovec_add(&local_qiov, tail_buf,
3153 align - ((offset + bytes) & (align - 1)));
3155 bytes = ROUND_UP(bytes, align);
3158 tracked_request_begin(&req, bs, offset, bytes, false);
3159 ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align,
3160 use_local_qiov ? &local_qiov : qiov,
3161 flags);
3162 tracked_request_end(&req);
3164 if (use_local_qiov) {
3165 qemu_iovec_destroy(&local_qiov);
3166 qemu_vfree(head_buf);
3167 qemu_vfree(tail_buf);
3170 return ret;
3173 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
3174 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3175 BdrvRequestFlags flags)
3177 if (nb_sectors < 0 || nb_sectors > (UINT_MAX >> BDRV_SECTOR_BITS)) {
3178 return -EINVAL;
3181 return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS,
3182 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3185 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
3186 int nb_sectors, QEMUIOVector *qiov)
3188 trace_bdrv_co_readv(bs, sector_num, nb_sectors);
3190 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
3193 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
3194 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
3196 trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
3198 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
3199 BDRV_REQ_COPY_ON_READ);
3202 /* if no limit is specified in the BlockLimits use a default
3203 * of 32768 512-byte sectors (16 MiB) per request.
3205 #define MAX_WRITE_ZEROES_DEFAULT 32768
3207 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
3208 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
3210 BlockDriver *drv = bs->drv;
3211 QEMUIOVector qiov;
3212 struct iovec iov = {0};
3213 int ret = 0;
3215 int max_write_zeroes = bs->bl.max_write_zeroes ?
3216 bs->bl.max_write_zeroes : MAX_WRITE_ZEROES_DEFAULT;
3218 while (nb_sectors > 0 && !ret) {
3219 int num = nb_sectors;
3221 /* Align request. Block drivers can expect the "bulk" of the request
3222 * to be aligned.
3224 if (bs->bl.write_zeroes_alignment
3225 && num > bs->bl.write_zeroes_alignment) {
3226 if (sector_num % bs->bl.write_zeroes_alignment != 0) {
3227 /* Make a small request up to the first aligned sector. */
3228 num = bs->bl.write_zeroes_alignment;
3229 num -= sector_num % bs->bl.write_zeroes_alignment;
3230 } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) {
3231 /* Shorten the request to the last aligned sector. num cannot
3232 * underflow because num > bs->bl.write_zeroes_alignment.
3234 num -= (sector_num + num) % bs->bl.write_zeroes_alignment;
3238 /* limit request size */
3239 if (num > max_write_zeroes) {
3240 num = max_write_zeroes;
3243 ret = -ENOTSUP;
3244 /* First try the efficient write zeroes operation */
3245 if (drv->bdrv_co_write_zeroes) {
3246 ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags);
3249 if (ret == -ENOTSUP) {
3250 /* Fall back to bounce buffer if write zeroes is unsupported */
3251 iov.iov_len = num * BDRV_SECTOR_SIZE;
3252 if (iov.iov_base == NULL) {
3253 iov.iov_base = qemu_blockalign(bs, num * BDRV_SECTOR_SIZE);
3254 memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE);
3256 qemu_iovec_init_external(&qiov, &iov, 1);
3258 ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov);
3260 /* Keep bounce buffer around if it is big enough for all
3261 * all future requests.
3263 if (num < max_write_zeroes) {
3264 qemu_vfree(iov.iov_base);
3265 iov.iov_base = NULL;
3269 sector_num += num;
3270 nb_sectors -= num;
3273 qemu_vfree(iov.iov_base);
3274 return ret;
3278 * Forwards an already correctly aligned write request to the BlockDriver.
3280 static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
3281 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
3282 QEMUIOVector *qiov, int flags)
3284 BlockDriver *drv = bs->drv;
3285 bool waited;
3286 int ret;
3288 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
3289 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
3291 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
3292 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
3293 assert(!qiov || bytes == qiov->size);
3295 waited = wait_serialising_requests(req);
3296 assert(!waited || !req->serialising);
3297 assert(req->overlap_offset <= offset);
3298 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
3300 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
3302 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
3303 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes &&
3304 qemu_iovec_is_zero(qiov)) {
3305 flags |= BDRV_REQ_ZERO_WRITE;
3306 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
3307 flags |= BDRV_REQ_MAY_UNMAP;
3311 if (ret < 0) {
3312 /* Do nothing, write notifier decided to fail this request */
3313 } else if (flags & BDRV_REQ_ZERO_WRITE) {
3314 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_ZERO);
3315 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags);
3316 } else {
3317 BLKDBG_EVENT(bs, BLKDBG_PWRITEV);
3318 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
3320 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_DONE);
3322 if (ret == 0 && !bs->enable_write_cache) {
3323 ret = bdrv_co_flush(bs);
3326 bdrv_set_dirty(bs, sector_num, nb_sectors);
3328 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
3329 bs->wr_highest_sector = sector_num + nb_sectors - 1;
3331 if (bs->growable && ret >= 0) {
3332 bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
3335 return ret;
3339 * Handle a write request in coroutine context
3341 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
3342 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
3343 BdrvRequestFlags flags)
3345 BdrvTrackedRequest req;
3346 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3347 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
3348 uint8_t *head_buf = NULL;
3349 uint8_t *tail_buf = NULL;
3350 QEMUIOVector local_qiov;
3351 bool use_local_qiov = false;
3352 int ret;
3354 if (!bs->drv) {
3355 return -ENOMEDIUM;
3357 if (bs->read_only) {
3358 return -EACCES;
3360 if (bdrv_check_byte_request(bs, offset, bytes)) {
3361 return -EIO;
3364 /* throttling disk I/O */
3365 if (bs->io_limits_enabled) {
3366 bdrv_io_limits_intercept(bs, bytes, true);
3370 * Align write if necessary by performing a read-modify-write cycle.
3371 * Pad qiov with the read parts and be sure to have a tracked request not
3372 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
3374 tracked_request_begin(&req, bs, offset, bytes, true);
3376 if (offset & (align - 1)) {
3377 QEMUIOVector head_qiov;
3378 struct iovec head_iov;
3380 mark_request_serialising(&req, align);
3381 wait_serialising_requests(&req);
3383 head_buf = qemu_blockalign(bs, align);
3384 head_iov = (struct iovec) {
3385 .iov_base = head_buf,
3386 .iov_len = align,
3388 qemu_iovec_init_external(&head_qiov, &head_iov, 1);
3390 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_HEAD);
3391 ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align,
3392 align, &head_qiov, 0);
3393 if (ret < 0) {
3394 goto fail;
3396 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
3398 qemu_iovec_init(&local_qiov, qiov->niov + 2);
3399 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3400 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3401 use_local_qiov = true;
3403 bytes += offset & (align - 1);
3404 offset = offset & ~(align - 1);
3407 if ((offset + bytes) & (align - 1)) {
3408 QEMUIOVector tail_qiov;
3409 struct iovec tail_iov;
3410 size_t tail_bytes;
3411 bool waited;
3413 mark_request_serialising(&req, align);
3414 waited = wait_serialising_requests(&req);
3415 assert(!waited || !use_local_qiov);
3417 tail_buf = qemu_blockalign(bs, align);
3418 tail_iov = (struct iovec) {
3419 .iov_base = tail_buf,
3420 .iov_len = align,
3422 qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
3424 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_TAIL);
3425 ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align,
3426 align, &tail_qiov, 0);
3427 if (ret < 0) {
3428 goto fail;
3430 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
3432 if (!use_local_qiov) {
3433 qemu_iovec_init(&local_qiov, qiov->niov + 1);
3434 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3435 use_local_qiov = true;
3438 tail_bytes = (offset + bytes) & (align - 1);
3439 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
3441 bytes = ROUND_UP(bytes, align);
3444 ret = bdrv_aligned_pwritev(bs, &req, offset, bytes,
3445 use_local_qiov ? &local_qiov : qiov,
3446 flags);
3448 fail:
3449 tracked_request_end(&req);
3451 if (use_local_qiov) {
3452 qemu_iovec_destroy(&local_qiov);
3454 qemu_vfree(head_buf);
3455 qemu_vfree(tail_buf);
3457 return ret;
3460 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
3461 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3462 BdrvRequestFlags flags)
3464 if (nb_sectors < 0 || nb_sectors > (INT_MAX >> BDRV_SECTOR_BITS)) {
3465 return -EINVAL;
3468 return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS,
3469 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3472 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
3473 int nb_sectors, QEMUIOVector *qiov)
3475 trace_bdrv_co_writev(bs, sector_num, nb_sectors);
3477 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
3480 int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
3481 int64_t sector_num, int nb_sectors,
3482 BdrvRequestFlags flags)
3484 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags);
3486 if (!(bs->open_flags & BDRV_O_UNMAP)) {
3487 flags &= ~BDRV_REQ_MAY_UNMAP;
3490 return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
3491 BDRV_REQ_ZERO_WRITE | flags);
3495 * Truncate file to 'offset' bytes (needed only for file protocols)
3497 int bdrv_truncate(BlockDriverState *bs, int64_t offset)
3499 BlockDriver *drv = bs->drv;
3500 int ret;
3501 if (!drv)
3502 return -ENOMEDIUM;
3503 if (!drv->bdrv_truncate)
3504 return -ENOTSUP;
3505 if (bs->read_only)
3506 return -EACCES;
3508 ret = drv->bdrv_truncate(bs, offset);
3509 if (ret == 0) {
3510 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3511 bdrv_dev_resize_cb(bs);
3513 return ret;
3517 * Length of a allocated file in bytes. Sparse files are counted by actual
3518 * allocated space. Return < 0 if error or unknown.
3520 int64_t bdrv_get_allocated_file_size(BlockDriverState *bs)
3522 BlockDriver *drv = bs->drv;
3523 if (!drv) {
3524 return -ENOMEDIUM;
3526 if (drv->bdrv_get_allocated_file_size) {
3527 return drv->bdrv_get_allocated_file_size(bs);
3529 if (bs->file) {
3530 return bdrv_get_allocated_file_size(bs->file);
3532 return -ENOTSUP;
3536 * Return number of sectors on success, -errno on error.
3538 int64_t bdrv_nb_sectors(BlockDriverState *bs)
3540 BlockDriver *drv = bs->drv;
3542 if (!drv)
3543 return -ENOMEDIUM;
3545 if (drv->has_variable_length) {
3546 int ret = refresh_total_sectors(bs, bs->total_sectors);
3547 if (ret < 0) {
3548 return ret;
3551 return bs->total_sectors;
3555 * Return length in bytes on success, -errno on error.
3556 * The length is always a multiple of BDRV_SECTOR_SIZE.
3558 int64_t bdrv_getlength(BlockDriverState *bs)
3560 int64_t ret = bdrv_nb_sectors(bs);
3562 return ret < 0 ? ret : ret * BDRV_SECTOR_SIZE;
3565 /* return 0 as number of sectors if no device present or error */
3566 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
3568 int64_t nb_sectors = bdrv_nb_sectors(bs);
3570 *nb_sectors_ptr = nb_sectors < 0 ? 0 : nb_sectors;
3573 void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error,
3574 BlockdevOnError on_write_error)
3576 bs->on_read_error = on_read_error;
3577 bs->on_write_error = on_write_error;
3580 BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read)
3582 return is_read ? bs->on_read_error : bs->on_write_error;
3585 BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error)
3587 BlockdevOnError on_err = is_read ? bs->on_read_error : bs->on_write_error;
3589 switch (on_err) {
3590 case BLOCKDEV_ON_ERROR_ENOSPC:
3591 return (error == ENOSPC) ?
3592 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
3593 case BLOCKDEV_ON_ERROR_STOP:
3594 return BLOCK_ERROR_ACTION_STOP;
3595 case BLOCKDEV_ON_ERROR_REPORT:
3596 return BLOCK_ERROR_ACTION_REPORT;
3597 case BLOCKDEV_ON_ERROR_IGNORE:
3598 return BLOCK_ERROR_ACTION_IGNORE;
3599 default:
3600 abort();
3604 /* This is done by device models because, while the block layer knows
3605 * about the error, it does not know whether an operation comes from
3606 * the device or the block layer (from a job, for example).
3608 void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action,
3609 bool is_read, int error)
3611 assert(error >= 0);
3613 if (action == BLOCK_ERROR_ACTION_STOP) {
3614 /* First set the iostatus, so that "info block" returns an iostatus
3615 * that matches the events raised so far (an additional error iostatus
3616 * is fine, but not a lost one).
3618 bdrv_iostatus_set_err(bs, error);
3620 /* Then raise the request to stop the VM and the event.
3621 * qemu_system_vmstop_request_prepare has two effects. First,
3622 * it ensures that the STOP event always comes after the
3623 * BLOCK_IO_ERROR event. Second, it ensures that even if management
3624 * can observe the STOP event and do a "cont" before the STOP
3625 * event is issued, the VM will not stop. In this case, vm_start()
3626 * also ensures that the STOP/RESUME pair of events is emitted.
3628 qemu_system_vmstop_request_prepare();
3629 qapi_event_send_block_io_error(bdrv_get_device_name(bs),
3630 is_read ? IO_OPERATION_TYPE_READ :
3631 IO_OPERATION_TYPE_WRITE,
3632 action, &error_abort);
3633 qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
3634 } else {
3635 qapi_event_send_block_io_error(bdrv_get_device_name(bs),
3636 is_read ? IO_OPERATION_TYPE_READ :
3637 IO_OPERATION_TYPE_WRITE,
3638 action, &error_abort);
3642 int bdrv_is_read_only(BlockDriverState *bs)
3644 return bs->read_only;
3647 int bdrv_is_sg(BlockDriverState *bs)
3649 return bs->sg;
3652 int bdrv_enable_write_cache(BlockDriverState *bs)
3654 return bs->enable_write_cache;
3657 void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce)
3659 bs->enable_write_cache = wce;
3661 /* so a reopen() will preserve wce */
3662 if (wce) {
3663 bs->open_flags |= BDRV_O_CACHE_WB;
3664 } else {
3665 bs->open_flags &= ~BDRV_O_CACHE_WB;
3669 int bdrv_is_encrypted(BlockDriverState *bs)
3671 if (bs->backing_hd && bs->backing_hd->encrypted)
3672 return 1;
3673 return bs->encrypted;
3676 int bdrv_key_required(BlockDriverState *bs)
3678 BlockDriverState *backing_hd = bs->backing_hd;
3680 if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key)
3681 return 1;
3682 return (bs->encrypted && !bs->valid_key);
3685 int bdrv_set_key(BlockDriverState *bs, const char *key)
3687 int ret;
3688 if (bs->backing_hd && bs->backing_hd->encrypted) {
3689 ret = bdrv_set_key(bs->backing_hd, key);
3690 if (ret < 0)
3691 return ret;
3692 if (!bs->encrypted)
3693 return 0;
3695 if (!bs->encrypted) {
3696 return -EINVAL;
3697 } else if (!bs->drv || !bs->drv->bdrv_set_key) {
3698 return -ENOMEDIUM;
3700 ret = bs->drv->bdrv_set_key(bs, key);
3701 if (ret < 0) {
3702 bs->valid_key = 0;
3703 } else if (!bs->valid_key) {
3704 bs->valid_key = 1;
3705 /* call the change callback now, we skipped it on open */
3706 bdrv_dev_change_media_cb(bs, true);
3708 return ret;
3711 const char *bdrv_get_format_name(BlockDriverState *bs)
3713 return bs->drv ? bs->drv->format_name : NULL;
3716 void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
3717 void *opaque)
3719 BlockDriver *drv;
3720 int count = 0;
3721 const char **formats = NULL;
3723 QLIST_FOREACH(drv, &bdrv_drivers, list) {
3724 if (drv->format_name) {
3725 bool found = false;
3726 int i = count;
3727 while (formats && i && !found) {
3728 found = !strcmp(formats[--i], drv->format_name);
3731 if (!found) {
3732 formats = g_realloc(formats, (count + 1) * sizeof(char *));
3733 formats[count++] = drv->format_name;
3734 it(opaque, drv->format_name);
3738 g_free(formats);
3741 /* This function is to find block backend bs */
3742 BlockDriverState *bdrv_find(const char *name)
3744 BlockDriverState *bs;
3746 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
3747 if (!strcmp(name, bs->device_name)) {
3748 return bs;
3751 return NULL;
3754 /* This function is to find a node in the bs graph */
3755 BlockDriverState *bdrv_find_node(const char *node_name)
3757 BlockDriverState *bs;
3759 assert(node_name);
3761 QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
3762 if (!strcmp(node_name, bs->node_name)) {
3763 return bs;
3766 return NULL;
3769 /* Put this QMP function here so it can access the static graph_bdrv_states. */
3770 BlockDeviceInfoList *bdrv_named_nodes_list(void)
3772 BlockDeviceInfoList *list, *entry;
3773 BlockDriverState *bs;
3775 list = NULL;
3776 QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
3777 entry = g_malloc0(sizeof(*entry));
3778 entry->value = bdrv_block_device_info(bs);
3779 entry->next = list;
3780 list = entry;
3783 return list;
3786 BlockDriverState *bdrv_lookup_bs(const char *device,
3787 const char *node_name,
3788 Error **errp)
3790 BlockDriverState *bs = NULL;
3792 if (device) {
3793 bs = bdrv_find(device);
3795 if (bs) {
3796 return bs;
3800 if (node_name) {
3801 bs = bdrv_find_node(node_name);
3803 if (bs) {
3804 return bs;
3808 error_setg(errp, "Cannot find device=%s nor node_name=%s",
3809 device ? device : "",
3810 node_name ? node_name : "");
3811 return NULL;
3814 /* If 'base' is in the same chain as 'top', return true. Otherwise,
3815 * return false. If either argument is NULL, return false. */
3816 bool bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base)
3818 while (top && top != base) {
3819 top = top->backing_hd;
3822 return top != NULL;
3825 BlockDriverState *bdrv_next(BlockDriverState *bs)
3827 if (!bs) {
3828 return QTAILQ_FIRST(&bdrv_states);
3830 return QTAILQ_NEXT(bs, device_list);
3833 void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque)
3835 BlockDriverState *bs;
3837 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
3838 it(opaque, bs);
3842 const char *bdrv_get_device_name(BlockDriverState *bs)
3844 return bs->device_name;
3847 int bdrv_get_flags(BlockDriverState *bs)
3849 return bs->open_flags;
3852 int bdrv_flush_all(void)
3854 BlockDriverState *bs;
3855 int result = 0;
3857 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
3858 AioContext *aio_context = bdrv_get_aio_context(bs);
3859 int ret;
3861 aio_context_acquire(aio_context);
3862 ret = bdrv_flush(bs);
3863 if (ret < 0 && !result) {
3864 result = ret;
3866 aio_context_release(aio_context);
3869 return result;
3872 int bdrv_has_zero_init_1(BlockDriverState *bs)
3874 return 1;
3877 int bdrv_has_zero_init(BlockDriverState *bs)
3879 assert(bs->drv);
3881 /* If BS is a copy on write image, it is initialized to
3882 the contents of the base image, which may not be zeroes. */
3883 if (bs->backing_hd) {
3884 return 0;
3886 if (bs->drv->bdrv_has_zero_init) {
3887 return bs->drv->bdrv_has_zero_init(bs);
3890 /* safe default */
3891 return 0;
3894 bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs)
3896 BlockDriverInfo bdi;
3898 if (bs->backing_hd) {
3899 return false;
3902 if (bdrv_get_info(bs, &bdi) == 0) {
3903 return bdi.unallocated_blocks_are_zero;
3906 return false;
3909 bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs)
3911 BlockDriverInfo bdi;
3913 if (bs->backing_hd || !(bs->open_flags & BDRV_O_UNMAP)) {
3914 return false;
3917 if (bdrv_get_info(bs, &bdi) == 0) {
3918 return bdi.can_write_zeroes_with_unmap;
3921 return false;
3924 typedef struct BdrvCoGetBlockStatusData {
3925 BlockDriverState *bs;
3926 BlockDriverState *base;
3927 int64_t sector_num;
3928 int nb_sectors;
3929 int *pnum;
3930 int64_t ret;
3931 bool done;
3932 } BdrvCoGetBlockStatusData;
3935 * Returns true iff the specified sector is present in the disk image. Drivers
3936 * not implementing the functionality are assumed to not support backing files,
3937 * hence all their sectors are reported as allocated.
3939 * If 'sector_num' is beyond the end of the disk image the return value is 0
3940 * and 'pnum' is set to 0.
3942 * 'pnum' is set to the number of sectors (including and immediately following
3943 * the specified sector) that are known to be in the same
3944 * allocated/unallocated state.
3946 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
3947 * beyond the end of the disk image it will be clamped.
3949 static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
3950 int64_t sector_num,
3951 int nb_sectors, int *pnum)
3953 int64_t total_sectors;
3954 int64_t n;
3955 int64_t ret, ret2;
3957 total_sectors = bdrv_nb_sectors(bs);
3958 if (total_sectors < 0) {
3959 return total_sectors;
3962 if (sector_num >= total_sectors) {
3963 *pnum = 0;
3964 return 0;
3967 n = total_sectors - sector_num;
3968 if (n < nb_sectors) {
3969 nb_sectors = n;
3972 if (!bs->drv->bdrv_co_get_block_status) {
3973 *pnum = nb_sectors;
3974 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
3975 if (bs->drv->protocol_name) {
3976 ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
3978 return ret;
3981 ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum);
3982 if (ret < 0) {
3983 *pnum = 0;
3984 return ret;
3987 if (ret & BDRV_BLOCK_RAW) {
3988 assert(ret & BDRV_BLOCK_OFFSET_VALID);
3989 return bdrv_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
3990 *pnum, pnum);
3993 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
3994 ret |= BDRV_BLOCK_ALLOCATED;
3997 if (!(ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO)) {
3998 if (bdrv_unallocated_blocks_are_zero(bs)) {
3999 ret |= BDRV_BLOCK_ZERO;
4000 } else if (bs->backing_hd) {
4001 BlockDriverState *bs2 = bs->backing_hd;
4002 int64_t nb_sectors2 = bdrv_nb_sectors(bs2);
4003 if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) {
4004 ret |= BDRV_BLOCK_ZERO;
4009 if (bs->file &&
4010 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
4011 (ret & BDRV_BLOCK_OFFSET_VALID)) {
4012 ret2 = bdrv_co_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
4013 *pnum, pnum);
4014 if (ret2 >= 0) {
4015 /* Ignore errors. This is just providing extra information, it
4016 * is useful but not necessary.
4018 ret |= (ret2 & BDRV_BLOCK_ZERO);
4022 return ret;
4025 /* Coroutine wrapper for bdrv_get_block_status() */
4026 static void coroutine_fn bdrv_get_block_status_co_entry(void *opaque)
4028 BdrvCoGetBlockStatusData *data = opaque;
4029 BlockDriverState *bs = data->bs;
4031 data->ret = bdrv_co_get_block_status(bs, data->sector_num, data->nb_sectors,
4032 data->pnum);
4033 data->done = true;
4037 * Synchronous wrapper around bdrv_co_get_block_status().
4039 * See bdrv_co_get_block_status() for details.
4041 int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num,
4042 int nb_sectors, int *pnum)
4044 Coroutine *co;
4045 BdrvCoGetBlockStatusData data = {
4046 .bs = bs,
4047 .sector_num = sector_num,
4048 .nb_sectors = nb_sectors,
4049 .pnum = pnum,
4050 .done = false,
4053 if (qemu_in_coroutine()) {
4054 /* Fast-path if already in coroutine context */
4055 bdrv_get_block_status_co_entry(&data);
4056 } else {
4057 AioContext *aio_context = bdrv_get_aio_context(bs);
4059 co = qemu_coroutine_create(bdrv_get_block_status_co_entry);
4060 qemu_coroutine_enter(co, &data);
4061 while (!data.done) {
4062 aio_poll(aio_context, true);
4065 return data.ret;
4068 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
4069 int nb_sectors, int *pnum)
4071 int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum);
4072 if (ret < 0) {
4073 return ret;
4075 return !!(ret & BDRV_BLOCK_ALLOCATED);
4079 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
4081 * Return true if the given sector is allocated in any image between
4082 * BASE and TOP (inclusive). BASE can be NULL to check if the given
4083 * sector is allocated in any image of the chain. Return false otherwise.
4085 * 'pnum' is set to the number of sectors (including and immediately following
4086 * the specified sector) that are known to be in the same
4087 * allocated/unallocated state.
4090 int bdrv_is_allocated_above(BlockDriverState *top,
4091 BlockDriverState *base,
4092 int64_t sector_num,
4093 int nb_sectors, int *pnum)
4095 BlockDriverState *intermediate;
4096 int ret, n = nb_sectors;
4098 intermediate = top;
4099 while (intermediate && intermediate != base) {
4100 int pnum_inter;
4101 ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
4102 &pnum_inter);
4103 if (ret < 0) {
4104 return ret;
4105 } else if (ret) {
4106 *pnum = pnum_inter;
4107 return 1;
4111 * [sector_num, nb_sectors] is unallocated on top but intermediate
4112 * might have
4114 * [sector_num+x, nr_sectors] allocated.
4116 if (n > pnum_inter &&
4117 (intermediate == top ||
4118 sector_num + pnum_inter < intermediate->total_sectors)) {
4119 n = pnum_inter;
4122 intermediate = intermediate->backing_hd;
4125 *pnum = n;
4126 return 0;
4129 const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
4131 if (bs->backing_hd && bs->backing_hd->encrypted)
4132 return bs->backing_file;
4133 else if (bs->encrypted)
4134 return bs->filename;
4135 else
4136 return NULL;
4139 void bdrv_get_backing_filename(BlockDriverState *bs,
4140 char *filename, int filename_size)
4142 pstrcpy(filename, filename_size, bs->backing_file);
4145 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
4146 const uint8_t *buf, int nb_sectors)
4148 BlockDriver *drv = bs->drv;
4149 if (!drv)
4150 return -ENOMEDIUM;
4151 if (!drv->bdrv_write_compressed)
4152 return -ENOTSUP;
4153 if (bdrv_check_request(bs, sector_num, nb_sectors))
4154 return -EIO;
4156 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
4158 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
4161 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
4163 BlockDriver *drv = bs->drv;
4164 if (!drv)
4165 return -ENOMEDIUM;
4166 if (!drv->bdrv_get_info)
4167 return -ENOTSUP;
4168 memset(bdi, 0, sizeof(*bdi));
4169 return drv->bdrv_get_info(bs, bdi);
4172 ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs)
4174 BlockDriver *drv = bs->drv;
4175 if (drv && drv->bdrv_get_specific_info) {
4176 return drv->bdrv_get_specific_info(bs);
4178 return NULL;
4181 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
4182 int64_t pos, int size)
4184 QEMUIOVector qiov;
4185 struct iovec iov = {
4186 .iov_base = (void *) buf,
4187 .iov_len = size,
4190 qemu_iovec_init_external(&qiov, &iov, 1);
4191 return bdrv_writev_vmstate(bs, &qiov, pos);
4194 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
4196 BlockDriver *drv = bs->drv;
4198 if (!drv) {
4199 return -ENOMEDIUM;
4200 } else if (drv->bdrv_save_vmstate) {
4201 return drv->bdrv_save_vmstate(bs, qiov, pos);
4202 } else if (bs->file) {
4203 return bdrv_writev_vmstate(bs->file, qiov, pos);
4206 return -ENOTSUP;
4209 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
4210 int64_t pos, int size)
4212 BlockDriver *drv = bs->drv;
4213 if (!drv)
4214 return -ENOMEDIUM;
4215 if (drv->bdrv_load_vmstate)
4216 return drv->bdrv_load_vmstate(bs, buf, pos, size);
4217 if (bs->file)
4218 return bdrv_load_vmstate(bs->file, buf, pos, size);
4219 return -ENOTSUP;
4222 void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
4224 if (!bs || !bs->drv || !bs->drv->bdrv_debug_event) {
4225 return;
4228 bs->drv->bdrv_debug_event(bs, event);
4231 int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
4232 const char *tag)
4234 while (bs && bs->drv && !bs->drv->bdrv_debug_breakpoint) {
4235 bs = bs->file;
4238 if (bs && bs->drv && bs->drv->bdrv_debug_breakpoint) {
4239 return bs->drv->bdrv_debug_breakpoint(bs, event, tag);
4242 return -ENOTSUP;
4245 int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag)
4247 while (bs && bs->drv && !bs->drv->bdrv_debug_remove_breakpoint) {
4248 bs = bs->file;
4251 if (bs && bs->drv && bs->drv->bdrv_debug_remove_breakpoint) {
4252 return bs->drv->bdrv_debug_remove_breakpoint(bs, tag);
4255 return -ENOTSUP;
4258 int bdrv_debug_resume(BlockDriverState *bs, const char *tag)
4260 while (bs && (!bs->drv || !bs->drv->bdrv_debug_resume)) {
4261 bs = bs->file;
4264 if (bs && bs->drv && bs->drv->bdrv_debug_resume) {
4265 return bs->drv->bdrv_debug_resume(bs, tag);
4268 return -ENOTSUP;
4271 bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag)
4273 while (bs && bs->drv && !bs->drv->bdrv_debug_is_suspended) {
4274 bs = bs->file;
4277 if (bs && bs->drv && bs->drv->bdrv_debug_is_suspended) {
4278 return bs->drv->bdrv_debug_is_suspended(bs, tag);
4281 return false;
4284 int bdrv_is_snapshot(BlockDriverState *bs)
4286 return !!(bs->open_flags & BDRV_O_SNAPSHOT);
4289 /* backing_file can either be relative, or absolute, or a protocol. If it is
4290 * relative, it must be relative to the chain. So, passing in bs->filename
4291 * from a BDS as backing_file should not be done, as that may be relative to
4292 * the CWD rather than the chain. */
4293 BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
4294 const char *backing_file)
4296 char *filename_full = NULL;
4297 char *backing_file_full = NULL;
4298 char *filename_tmp = NULL;
4299 int is_protocol = 0;
4300 BlockDriverState *curr_bs = NULL;
4301 BlockDriverState *retval = NULL;
4303 if (!bs || !bs->drv || !backing_file) {
4304 return NULL;
4307 filename_full = g_malloc(PATH_MAX);
4308 backing_file_full = g_malloc(PATH_MAX);
4309 filename_tmp = g_malloc(PATH_MAX);
4311 is_protocol = path_has_protocol(backing_file);
4313 for (curr_bs = bs; curr_bs->backing_hd; curr_bs = curr_bs->backing_hd) {
4315 /* If either of the filename paths is actually a protocol, then
4316 * compare unmodified paths; otherwise make paths relative */
4317 if (is_protocol || path_has_protocol(curr_bs->backing_file)) {
4318 if (strcmp(backing_file, curr_bs->backing_file) == 0) {
4319 retval = curr_bs->backing_hd;
4320 break;
4322 } else {
4323 /* If not an absolute filename path, make it relative to the current
4324 * image's filename path */
4325 path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
4326 backing_file);
4328 /* We are going to compare absolute pathnames */
4329 if (!realpath(filename_tmp, filename_full)) {
4330 continue;
4333 /* We need to make sure the backing filename we are comparing against
4334 * is relative to the current image filename (or absolute) */
4335 path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
4336 curr_bs->backing_file);
4338 if (!realpath(filename_tmp, backing_file_full)) {
4339 continue;
4342 if (strcmp(backing_file_full, filename_full) == 0) {
4343 retval = curr_bs->backing_hd;
4344 break;
4349 g_free(filename_full);
4350 g_free(backing_file_full);
4351 g_free(filename_tmp);
4352 return retval;
4355 int bdrv_get_backing_file_depth(BlockDriverState *bs)
4357 if (!bs->drv) {
4358 return 0;
4361 if (!bs->backing_hd) {
4362 return 0;
4365 return 1 + bdrv_get_backing_file_depth(bs->backing_hd);
4368 /**************************************************************/
4369 /* async I/Os */
4371 BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
4372 QEMUIOVector *qiov, int nb_sectors,
4373 BlockDriverCompletionFunc *cb, void *opaque)
4375 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
4377 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
4378 cb, opaque, false);
4381 BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
4382 QEMUIOVector *qiov, int nb_sectors,
4383 BlockDriverCompletionFunc *cb, void *opaque)
4385 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
4387 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
4388 cb, opaque, true);
4391 BlockDriverAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs,
4392 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags,
4393 BlockDriverCompletionFunc *cb, void *opaque)
4395 trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque);
4397 return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors,
4398 BDRV_REQ_ZERO_WRITE | flags,
4399 cb, opaque, true);
4403 typedef struct MultiwriteCB {
4404 int error;
4405 int num_requests;
4406 int num_callbacks;
4407 struct {
4408 BlockDriverCompletionFunc *cb;
4409 void *opaque;
4410 QEMUIOVector *free_qiov;
4411 } callbacks[];
4412 } MultiwriteCB;
4414 static void multiwrite_user_cb(MultiwriteCB *mcb)
4416 int i;
4418 for (i = 0; i < mcb->num_callbacks; i++) {
4419 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
4420 if (mcb->callbacks[i].free_qiov) {
4421 qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
4423 g_free(mcb->callbacks[i].free_qiov);
4427 static void multiwrite_cb(void *opaque, int ret)
4429 MultiwriteCB *mcb = opaque;
4431 trace_multiwrite_cb(mcb, ret);
4433 if (ret < 0 && !mcb->error) {
4434 mcb->error = ret;
4437 mcb->num_requests--;
4438 if (mcb->num_requests == 0) {
4439 multiwrite_user_cb(mcb);
4440 g_free(mcb);
4444 static int multiwrite_req_compare(const void *a, const void *b)
4446 const BlockRequest *req1 = a, *req2 = b;
4449 * Note that we can't simply subtract req2->sector from req1->sector
4450 * here as that could overflow the return value.
4452 if (req1->sector > req2->sector) {
4453 return 1;
4454 } else if (req1->sector < req2->sector) {
4455 return -1;
4456 } else {
4457 return 0;
4462 * Takes a bunch of requests and tries to merge them. Returns the number of
4463 * requests that remain after merging.
4465 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
4466 int num_reqs, MultiwriteCB *mcb)
4468 int i, outidx;
4470 // Sort requests by start sector
4471 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
4473 // Check if adjacent requests touch the same clusters. If so, combine them,
4474 // filling up gaps with zero sectors.
4475 outidx = 0;
4476 for (i = 1; i < num_reqs; i++) {
4477 int merge = 0;
4478 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
4480 // Handle exactly sequential writes and overlapping writes.
4481 if (reqs[i].sector <= oldreq_last) {
4482 merge = 1;
4485 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
4486 merge = 0;
4489 if (merge) {
4490 size_t size;
4491 QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
4492 qemu_iovec_init(qiov,
4493 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
4495 // Add the first request to the merged one. If the requests are
4496 // overlapping, drop the last sectors of the first request.
4497 size = (reqs[i].sector - reqs[outidx].sector) << 9;
4498 qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size);
4500 // We should need to add any zeros between the two requests
4501 assert (reqs[i].sector <= oldreq_last);
4503 // Add the second request
4504 qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size);
4506 reqs[outidx].nb_sectors = qiov->size >> 9;
4507 reqs[outidx].qiov = qiov;
4509 mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
4510 } else {
4511 outidx++;
4512 reqs[outidx].sector = reqs[i].sector;
4513 reqs[outidx].nb_sectors = reqs[i].nb_sectors;
4514 reqs[outidx].qiov = reqs[i].qiov;
4518 return outidx + 1;
4522 * Submit multiple AIO write requests at once.
4524 * On success, the function returns 0 and all requests in the reqs array have
4525 * been submitted. In error case this function returns -1, and any of the
4526 * requests may or may not be submitted yet. In particular, this means that the
4527 * callback will be called for some of the requests, for others it won't. The
4528 * caller must check the error field of the BlockRequest to wait for the right
4529 * callbacks (if error != 0, no callback will be called).
4531 * The implementation may modify the contents of the reqs array, e.g. to merge
4532 * requests. However, the fields opaque and error are left unmodified as they
4533 * are used to signal failure for a single request to the caller.
4535 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
4537 MultiwriteCB *mcb;
4538 int i;
4540 /* don't submit writes if we don't have a medium */
4541 if (bs->drv == NULL) {
4542 for (i = 0; i < num_reqs; i++) {
4543 reqs[i].error = -ENOMEDIUM;
4545 return -1;
4548 if (num_reqs == 0) {
4549 return 0;
4552 // Create MultiwriteCB structure
4553 mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
4554 mcb->num_requests = 0;
4555 mcb->num_callbacks = num_reqs;
4557 for (i = 0; i < num_reqs; i++) {
4558 mcb->callbacks[i].cb = reqs[i].cb;
4559 mcb->callbacks[i].opaque = reqs[i].opaque;
4562 // Check for mergable requests
4563 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
4565 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
4567 /* Run the aio requests. */
4568 mcb->num_requests = num_reqs;
4569 for (i = 0; i < num_reqs; i++) {
4570 bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov,
4571 reqs[i].nb_sectors, reqs[i].flags,
4572 multiwrite_cb, mcb,
4573 true);
4576 return 0;
4579 void bdrv_aio_cancel(BlockDriverAIOCB *acb)
4581 acb->aiocb_info->cancel(acb);
4584 /**************************************************************/
4585 /* async block device emulation */
4587 typedef struct BlockDriverAIOCBSync {
4588 BlockDriverAIOCB common;
4589 QEMUBH *bh;
4590 int ret;
4591 /* vector translation state */
4592 QEMUIOVector *qiov;
4593 uint8_t *bounce;
4594 int is_write;
4595 } BlockDriverAIOCBSync;
4597 static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
4599 BlockDriverAIOCBSync *acb =
4600 container_of(blockacb, BlockDriverAIOCBSync, common);
4601 qemu_bh_delete(acb->bh);
4602 acb->bh = NULL;
4603 qemu_aio_release(acb);
4606 static const AIOCBInfo bdrv_em_aiocb_info = {
4607 .aiocb_size = sizeof(BlockDriverAIOCBSync),
4608 .cancel = bdrv_aio_cancel_em,
4611 static void bdrv_aio_bh_cb(void *opaque)
4613 BlockDriverAIOCBSync *acb = opaque;
4615 if (!acb->is_write)
4616 qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
4617 qemu_vfree(acb->bounce);
4618 acb->common.cb(acb->common.opaque, acb->ret);
4619 qemu_bh_delete(acb->bh);
4620 acb->bh = NULL;
4621 qemu_aio_release(acb);
4624 static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
4625 int64_t sector_num,
4626 QEMUIOVector *qiov,
4627 int nb_sectors,
4628 BlockDriverCompletionFunc *cb,
4629 void *opaque,
4630 int is_write)
4633 BlockDriverAIOCBSync *acb;
4635 acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque);
4636 acb->is_write = is_write;
4637 acb->qiov = qiov;
4638 acb->bounce = qemu_blockalign(bs, qiov->size);
4639 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb);
4641 if (is_write) {
4642 qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
4643 acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
4644 } else {
4645 acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
4648 qemu_bh_schedule(acb->bh);
4650 return &acb->common;
4653 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
4654 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
4655 BlockDriverCompletionFunc *cb, void *opaque)
4657 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
4660 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
4661 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
4662 BlockDriverCompletionFunc *cb, void *opaque)
4664 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
4668 typedef struct BlockDriverAIOCBCoroutine {
4669 BlockDriverAIOCB common;
4670 BlockRequest req;
4671 bool is_write;
4672 bool *done;
4673 QEMUBH* bh;
4674 } BlockDriverAIOCBCoroutine;
4676 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb)
4678 AioContext *aio_context = bdrv_get_aio_context(blockacb->bs);
4679 BlockDriverAIOCBCoroutine *acb =
4680 container_of(blockacb, BlockDriverAIOCBCoroutine, common);
4681 bool done = false;
4683 acb->done = &done;
4684 while (!done) {
4685 aio_poll(aio_context, true);
4689 static const AIOCBInfo bdrv_em_co_aiocb_info = {
4690 .aiocb_size = sizeof(BlockDriverAIOCBCoroutine),
4691 .cancel = bdrv_aio_co_cancel_em,
4694 static void bdrv_co_em_bh(void *opaque)
4696 BlockDriverAIOCBCoroutine *acb = opaque;
4698 acb->common.cb(acb->common.opaque, acb->req.error);
4700 if (acb->done) {
4701 *acb->done = true;
4704 qemu_bh_delete(acb->bh);
4705 qemu_aio_release(acb);
4708 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
4709 static void coroutine_fn bdrv_co_do_rw(void *opaque)
4711 BlockDriverAIOCBCoroutine *acb = opaque;
4712 BlockDriverState *bs = acb->common.bs;
4714 if (!acb->is_write) {
4715 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
4716 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
4717 } else {
4718 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
4719 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
4722 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
4723 qemu_bh_schedule(acb->bh);
4726 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
4727 int64_t sector_num,
4728 QEMUIOVector *qiov,
4729 int nb_sectors,
4730 BdrvRequestFlags flags,
4731 BlockDriverCompletionFunc *cb,
4732 void *opaque,
4733 bool is_write)
4735 Coroutine *co;
4736 BlockDriverAIOCBCoroutine *acb;
4738 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4739 acb->req.sector = sector_num;
4740 acb->req.nb_sectors = nb_sectors;
4741 acb->req.qiov = qiov;
4742 acb->req.flags = flags;
4743 acb->is_write = is_write;
4744 acb->done = NULL;
4746 co = qemu_coroutine_create(bdrv_co_do_rw);
4747 qemu_coroutine_enter(co, acb);
4749 return &acb->common;
4752 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
4754 BlockDriverAIOCBCoroutine *acb = opaque;
4755 BlockDriverState *bs = acb->common.bs;
4757 acb->req.error = bdrv_co_flush(bs);
4758 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
4759 qemu_bh_schedule(acb->bh);
4762 BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
4763 BlockDriverCompletionFunc *cb, void *opaque)
4765 trace_bdrv_aio_flush(bs, opaque);
4767 Coroutine *co;
4768 BlockDriverAIOCBCoroutine *acb;
4770 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4771 acb->done = NULL;
4773 co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
4774 qemu_coroutine_enter(co, acb);
4776 return &acb->common;
4779 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
4781 BlockDriverAIOCBCoroutine *acb = opaque;
4782 BlockDriverState *bs = acb->common.bs;
4784 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
4785 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
4786 qemu_bh_schedule(acb->bh);
4789 BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
4790 int64_t sector_num, int nb_sectors,
4791 BlockDriverCompletionFunc *cb, void *opaque)
4793 Coroutine *co;
4794 BlockDriverAIOCBCoroutine *acb;
4796 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
4798 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4799 acb->req.sector = sector_num;
4800 acb->req.nb_sectors = nb_sectors;
4801 acb->done = NULL;
4802 co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
4803 qemu_coroutine_enter(co, acb);
4805 return &acb->common;
4808 void bdrv_init(void)
4810 module_call_init(MODULE_INIT_BLOCK);
4813 void bdrv_init_with_whitelist(void)
4815 use_bdrv_whitelist = 1;
4816 bdrv_init();
4819 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
4820 BlockDriverCompletionFunc *cb, void *opaque)
4822 BlockDriverAIOCB *acb;
4824 acb = g_slice_alloc(aiocb_info->aiocb_size);
4825 acb->aiocb_info = aiocb_info;
4826 acb->bs = bs;
4827 acb->cb = cb;
4828 acb->opaque = opaque;
4829 return acb;
4832 void qemu_aio_release(void *p)
4834 BlockDriverAIOCB *acb = p;
4835 g_slice_free1(acb->aiocb_info->aiocb_size, acb);
4838 /**************************************************************/
4839 /* Coroutine block device emulation */
4841 typedef struct CoroutineIOCompletion {
4842 Coroutine *coroutine;
4843 int ret;
4844 } CoroutineIOCompletion;
4846 static void bdrv_co_io_em_complete(void *opaque, int ret)
4848 CoroutineIOCompletion *co = opaque;
4850 co->ret = ret;
4851 qemu_coroutine_enter(co->coroutine, NULL);
4854 static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
4855 int nb_sectors, QEMUIOVector *iov,
4856 bool is_write)
4858 CoroutineIOCompletion co = {
4859 .coroutine = qemu_coroutine_self(),
4861 BlockDriverAIOCB *acb;
4863 if (is_write) {
4864 acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
4865 bdrv_co_io_em_complete, &co);
4866 } else {
4867 acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
4868 bdrv_co_io_em_complete, &co);
4871 trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
4872 if (!acb) {
4873 return -EIO;
4875 qemu_coroutine_yield();
4877 return co.ret;
4880 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
4881 int64_t sector_num, int nb_sectors,
4882 QEMUIOVector *iov)
4884 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
4887 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
4888 int64_t sector_num, int nb_sectors,
4889 QEMUIOVector *iov)
4891 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
4894 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
4896 RwCo *rwco = opaque;
4898 rwco->ret = bdrv_co_flush(rwco->bs);
4901 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
4903 int ret;
4905 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
4906 return 0;
4909 /* Write back cached data to the OS even with cache=unsafe */
4910 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
4911 if (bs->drv->bdrv_co_flush_to_os) {
4912 ret = bs->drv->bdrv_co_flush_to_os(bs);
4913 if (ret < 0) {
4914 return ret;
4918 /* But don't actually force it to the disk with cache=unsafe */
4919 if (bs->open_flags & BDRV_O_NO_FLUSH) {
4920 goto flush_parent;
4923 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
4924 if (bs->drv->bdrv_co_flush_to_disk) {
4925 ret = bs->drv->bdrv_co_flush_to_disk(bs);
4926 } else if (bs->drv->bdrv_aio_flush) {
4927 BlockDriverAIOCB *acb;
4928 CoroutineIOCompletion co = {
4929 .coroutine = qemu_coroutine_self(),
4932 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
4933 if (acb == NULL) {
4934 ret = -EIO;
4935 } else {
4936 qemu_coroutine_yield();
4937 ret = co.ret;
4939 } else {
4941 * Some block drivers always operate in either writethrough or unsafe
4942 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
4943 * know how the server works (because the behaviour is hardcoded or
4944 * depends on server-side configuration), so we can't ensure that
4945 * everything is safe on disk. Returning an error doesn't work because
4946 * that would break guests even if the server operates in writethrough
4947 * mode.
4949 * Let's hope the user knows what he's doing.
4951 ret = 0;
4953 if (ret < 0) {
4954 return ret;
4957 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
4958 * in the case of cache=unsafe, so there are no useless flushes.
4960 flush_parent:
4961 return bdrv_co_flush(bs->file);
4964 void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
4966 Error *local_err = NULL;
4967 int ret;
4969 if (!bs->drv) {
4970 return;
4973 if (bs->drv->bdrv_invalidate_cache) {
4974 bs->drv->bdrv_invalidate_cache(bs, &local_err);
4975 } else if (bs->file) {
4976 bdrv_invalidate_cache(bs->file, &local_err);
4978 if (local_err) {
4979 error_propagate(errp, local_err);
4980 return;
4983 ret = refresh_total_sectors(bs, bs->total_sectors);
4984 if (ret < 0) {
4985 error_setg_errno(errp, -ret, "Could not refresh total sector count");
4986 return;
4990 void bdrv_invalidate_cache_all(Error **errp)
4992 BlockDriverState *bs;
4993 Error *local_err = NULL;
4995 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
4996 AioContext *aio_context = bdrv_get_aio_context(bs);
4998 aio_context_acquire(aio_context);
4999 bdrv_invalidate_cache(bs, &local_err);
5000 aio_context_release(aio_context);
5001 if (local_err) {
5002 error_propagate(errp, local_err);
5003 return;
5008 void bdrv_clear_incoming_migration_all(void)
5010 BlockDriverState *bs;
5012 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
5013 AioContext *aio_context = bdrv_get_aio_context(bs);
5015 aio_context_acquire(aio_context);
5016 bs->open_flags = bs->open_flags & ~(BDRV_O_INCOMING);
5017 aio_context_release(aio_context);
5021 int bdrv_flush(BlockDriverState *bs)
5023 Coroutine *co;
5024 RwCo rwco = {
5025 .bs = bs,
5026 .ret = NOT_DONE,
5029 if (qemu_in_coroutine()) {
5030 /* Fast-path if already in coroutine context */
5031 bdrv_flush_co_entry(&rwco);
5032 } else {
5033 AioContext *aio_context = bdrv_get_aio_context(bs);
5035 co = qemu_coroutine_create(bdrv_flush_co_entry);
5036 qemu_coroutine_enter(co, &rwco);
5037 while (rwco.ret == NOT_DONE) {
5038 aio_poll(aio_context, true);
5042 return rwco.ret;
5045 typedef struct DiscardCo {
5046 BlockDriverState *bs;
5047 int64_t sector_num;
5048 int nb_sectors;
5049 int ret;
5050 } DiscardCo;
5051 static void coroutine_fn bdrv_discard_co_entry(void *opaque)
5053 DiscardCo *rwco = opaque;
5055 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
5058 /* if no limit is specified in the BlockLimits use a default
5059 * of 32768 512-byte sectors (16 MiB) per request.
5061 #define MAX_DISCARD_DEFAULT 32768
5063 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
5064 int nb_sectors)
5066 int max_discard;
5068 if (!bs->drv) {
5069 return -ENOMEDIUM;
5070 } else if (bdrv_check_request(bs, sector_num, nb_sectors)) {
5071 return -EIO;
5072 } else if (bs->read_only) {
5073 return -EROFS;
5076 bdrv_reset_dirty(bs, sector_num, nb_sectors);
5078 /* Do nothing if disabled. */
5079 if (!(bs->open_flags & BDRV_O_UNMAP)) {
5080 return 0;
5083 if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) {
5084 return 0;
5087 max_discard = bs->bl.max_discard ? bs->bl.max_discard : MAX_DISCARD_DEFAULT;
5088 while (nb_sectors > 0) {
5089 int ret;
5090 int num = nb_sectors;
5092 /* align request */
5093 if (bs->bl.discard_alignment &&
5094 num >= bs->bl.discard_alignment &&
5095 sector_num % bs->bl.discard_alignment) {
5096 if (num > bs->bl.discard_alignment) {
5097 num = bs->bl.discard_alignment;
5099 num -= sector_num % bs->bl.discard_alignment;
5102 /* limit request size */
5103 if (num > max_discard) {
5104 num = max_discard;
5107 if (bs->drv->bdrv_co_discard) {
5108 ret = bs->drv->bdrv_co_discard(bs, sector_num, num);
5109 } else {
5110 BlockDriverAIOCB *acb;
5111 CoroutineIOCompletion co = {
5112 .coroutine = qemu_coroutine_self(),
5115 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
5116 bdrv_co_io_em_complete, &co);
5117 if (acb == NULL) {
5118 return -EIO;
5119 } else {
5120 qemu_coroutine_yield();
5121 ret = co.ret;
5124 if (ret && ret != -ENOTSUP) {
5125 return ret;
5128 sector_num += num;
5129 nb_sectors -= num;
5131 return 0;
5134 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
5136 Coroutine *co;
5137 DiscardCo rwco = {
5138 .bs = bs,
5139 .sector_num = sector_num,
5140 .nb_sectors = nb_sectors,
5141 .ret = NOT_DONE,
5144 if (qemu_in_coroutine()) {
5145 /* Fast-path if already in coroutine context */
5146 bdrv_discard_co_entry(&rwco);
5147 } else {
5148 AioContext *aio_context = bdrv_get_aio_context(bs);
5150 co = qemu_coroutine_create(bdrv_discard_co_entry);
5151 qemu_coroutine_enter(co, &rwco);
5152 while (rwco.ret == NOT_DONE) {
5153 aio_poll(aio_context, true);
5157 return rwco.ret;
5160 /**************************************************************/
5161 /* removable device support */
5164 * Return TRUE if the media is present
5166 int bdrv_is_inserted(BlockDriverState *bs)
5168 BlockDriver *drv = bs->drv;
5170 if (!drv)
5171 return 0;
5172 if (!drv->bdrv_is_inserted)
5173 return 1;
5174 return drv->bdrv_is_inserted(bs);
5178 * Return whether the media changed since the last call to this
5179 * function, or -ENOTSUP if we don't know. Most drivers don't know.
5181 int bdrv_media_changed(BlockDriverState *bs)
5183 BlockDriver *drv = bs->drv;
5185 if (drv && drv->bdrv_media_changed) {
5186 return drv->bdrv_media_changed(bs);
5188 return -ENOTSUP;
5192 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
5194 void bdrv_eject(BlockDriverState *bs, bool eject_flag)
5196 BlockDriver *drv = bs->drv;
5198 if (drv && drv->bdrv_eject) {
5199 drv->bdrv_eject(bs, eject_flag);
5202 if (bs->device_name[0] != '\0') {
5203 qapi_event_send_device_tray_moved(bdrv_get_device_name(bs),
5204 eject_flag, &error_abort);
5209 * Lock or unlock the media (if it is locked, the user won't be able
5210 * to eject it manually).
5212 void bdrv_lock_medium(BlockDriverState *bs, bool locked)
5214 BlockDriver *drv = bs->drv;
5216 trace_bdrv_lock_medium(bs, locked);
5218 if (drv && drv->bdrv_lock_medium) {
5219 drv->bdrv_lock_medium(bs, locked);
5223 /* needed for generic scsi interface */
5225 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
5227 BlockDriver *drv = bs->drv;
5229 if (drv && drv->bdrv_ioctl)
5230 return drv->bdrv_ioctl(bs, req, buf);
5231 return -ENOTSUP;
5234 BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
5235 unsigned long int req, void *buf,
5236 BlockDriverCompletionFunc *cb, void *opaque)
5238 BlockDriver *drv = bs->drv;
5240 if (drv && drv->bdrv_aio_ioctl)
5241 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque);
5242 return NULL;
5245 void bdrv_set_guest_block_size(BlockDriverState *bs, int align)
5247 bs->guest_block_size = align;
5250 void *qemu_blockalign(BlockDriverState *bs, size_t size)
5252 return qemu_memalign(bdrv_opt_mem_align(bs), size);
5256 * Check if all memory in this vector is sector aligned.
5258 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
5260 int i;
5261 size_t alignment = bdrv_opt_mem_align(bs);
5263 for (i = 0; i < qiov->niov; i++) {
5264 if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
5265 return false;
5267 if (qiov->iov[i].iov_len % alignment) {
5268 return false;
5272 return true;
5275 BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, int granularity,
5276 Error **errp)
5278 int64_t bitmap_size;
5279 BdrvDirtyBitmap *bitmap;
5281 assert((granularity & (granularity - 1)) == 0);
5283 granularity >>= BDRV_SECTOR_BITS;
5284 assert(granularity);
5285 bitmap_size = bdrv_nb_sectors(bs);
5286 if (bitmap_size < 0) {
5287 error_setg_errno(errp, -bitmap_size, "could not get length of device");
5288 errno = -bitmap_size;
5289 return NULL;
5291 bitmap = g_malloc0(sizeof(BdrvDirtyBitmap));
5292 bitmap->bitmap = hbitmap_alloc(bitmap_size, ffs(granularity) - 1);
5293 QLIST_INSERT_HEAD(&bs->dirty_bitmaps, bitmap, list);
5294 return bitmap;
5297 void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
5299 BdrvDirtyBitmap *bm, *next;
5300 QLIST_FOREACH_SAFE(bm, &bs->dirty_bitmaps, list, next) {
5301 if (bm == bitmap) {
5302 QLIST_REMOVE(bitmap, list);
5303 hbitmap_free(bitmap->bitmap);
5304 g_free(bitmap);
5305 return;
5310 BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs)
5312 BdrvDirtyBitmap *bm;
5313 BlockDirtyInfoList *list = NULL;
5314 BlockDirtyInfoList **plist = &list;
5316 QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) {
5317 BlockDirtyInfo *info = g_malloc0(sizeof(BlockDirtyInfo));
5318 BlockDirtyInfoList *entry = g_malloc0(sizeof(BlockDirtyInfoList));
5319 info->count = bdrv_get_dirty_count(bs, bm);
5320 info->granularity =
5321 ((int64_t) BDRV_SECTOR_SIZE << hbitmap_granularity(bm->bitmap));
5322 entry->value = info;
5323 *plist = entry;
5324 plist = &entry->next;
5327 return list;
5330 int bdrv_get_dirty(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, int64_t sector)
5332 if (bitmap) {
5333 return hbitmap_get(bitmap->bitmap, sector);
5334 } else {
5335 return 0;
5339 void bdrv_dirty_iter_init(BlockDriverState *bs,
5340 BdrvDirtyBitmap *bitmap, HBitmapIter *hbi)
5342 hbitmap_iter_init(hbi, bitmap->bitmap, 0);
5345 void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
5346 int nr_sectors)
5348 BdrvDirtyBitmap *bitmap;
5349 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
5350 hbitmap_set(bitmap->bitmap, cur_sector, nr_sectors);
5354 void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors)
5356 BdrvDirtyBitmap *bitmap;
5357 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
5358 hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors);
5362 int64_t bdrv_get_dirty_count(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
5364 return hbitmap_count(bitmap->bitmap);
5367 /* Get a reference to bs */
5368 void bdrv_ref(BlockDriverState *bs)
5370 bs->refcnt++;
5373 /* Release a previously grabbed reference to bs.
5374 * If after releasing, reference count is zero, the BlockDriverState is
5375 * deleted. */
5376 void bdrv_unref(BlockDriverState *bs)
5378 assert(bs->refcnt > 0);
5379 if (--bs->refcnt == 0) {
5380 bdrv_delete(bs);
5384 struct BdrvOpBlocker {
5385 Error *reason;
5386 QLIST_ENTRY(BdrvOpBlocker) list;
5389 bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp)
5391 BdrvOpBlocker *blocker;
5392 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5393 if (!QLIST_EMPTY(&bs->op_blockers[op])) {
5394 blocker = QLIST_FIRST(&bs->op_blockers[op]);
5395 if (errp) {
5396 error_setg(errp, "Device '%s' is busy: %s",
5397 bs->device_name, error_get_pretty(blocker->reason));
5399 return true;
5401 return false;
5404 void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason)
5406 BdrvOpBlocker *blocker;
5407 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5409 blocker = g_malloc0(sizeof(BdrvOpBlocker));
5410 blocker->reason = reason;
5411 QLIST_INSERT_HEAD(&bs->op_blockers[op], blocker, list);
5414 void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason)
5416 BdrvOpBlocker *blocker, *next;
5417 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5418 QLIST_FOREACH_SAFE(blocker, &bs->op_blockers[op], list, next) {
5419 if (blocker->reason == reason) {
5420 QLIST_REMOVE(blocker, list);
5421 g_free(blocker);
5426 void bdrv_op_block_all(BlockDriverState *bs, Error *reason)
5428 int i;
5429 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5430 bdrv_op_block(bs, i, reason);
5434 void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason)
5436 int i;
5437 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5438 bdrv_op_unblock(bs, i, reason);
5442 bool bdrv_op_blocker_is_empty(BlockDriverState *bs)
5444 int i;
5446 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5447 if (!QLIST_EMPTY(&bs->op_blockers[i])) {
5448 return false;
5451 return true;
5454 void bdrv_iostatus_enable(BlockDriverState *bs)
5456 bs->iostatus_enabled = true;
5457 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
5460 /* The I/O status is only enabled if the drive explicitly
5461 * enables it _and_ the VM is configured to stop on errors */
5462 bool bdrv_iostatus_is_enabled(const BlockDriverState *bs)
5464 return (bs->iostatus_enabled &&
5465 (bs->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
5466 bs->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
5467 bs->on_read_error == BLOCKDEV_ON_ERROR_STOP));
5470 void bdrv_iostatus_disable(BlockDriverState *bs)
5472 bs->iostatus_enabled = false;
5475 void bdrv_iostatus_reset(BlockDriverState *bs)
5477 if (bdrv_iostatus_is_enabled(bs)) {
5478 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
5479 if (bs->job) {
5480 block_job_iostatus_reset(bs->job);
5485 void bdrv_iostatus_set_err(BlockDriverState *bs, int error)
5487 assert(bdrv_iostatus_is_enabled(bs));
5488 if (bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
5489 bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
5490 BLOCK_DEVICE_IO_STATUS_FAILED;
5494 void
5495 bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes,
5496 enum BlockAcctType type)
5498 assert(type < BDRV_MAX_IOTYPE);
5500 cookie->bytes = bytes;
5501 cookie->start_time_ns = get_clock();
5502 cookie->type = type;
5505 void
5506 bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie)
5508 assert(cookie->type < BDRV_MAX_IOTYPE);
5510 bs->nr_bytes[cookie->type] += cookie->bytes;
5511 bs->nr_ops[cookie->type]++;
5512 bs->total_time_ns[cookie->type] += get_clock() - cookie->start_time_ns;
5515 void bdrv_img_create(const char *filename, const char *fmt,
5516 const char *base_filename, const char *base_fmt,
5517 char *options, uint64_t img_size, int flags,
5518 Error **errp, bool quiet)
5520 QemuOptsList *create_opts = NULL;
5521 QemuOpts *opts = NULL;
5522 const char *backing_fmt, *backing_file;
5523 int64_t size;
5524 BlockDriver *drv, *proto_drv;
5525 BlockDriver *backing_drv = NULL;
5526 Error *local_err = NULL;
5527 int ret = 0;
5529 /* Find driver and parse its options */
5530 drv = bdrv_find_format(fmt);
5531 if (!drv) {
5532 error_setg(errp, "Unknown file format '%s'", fmt);
5533 return;
5536 proto_drv = bdrv_find_protocol(filename, true);
5537 if (!proto_drv) {
5538 error_setg(errp, "Unknown protocol '%s'", filename);
5539 return;
5542 create_opts = qemu_opts_append(create_opts, drv->create_opts);
5543 create_opts = qemu_opts_append(create_opts, proto_drv->create_opts);
5545 /* Create parameter list with default values */
5546 opts = qemu_opts_create(create_opts, NULL, 0, &error_abort);
5547 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, img_size);
5549 /* Parse -o options */
5550 if (options) {
5551 if (qemu_opts_do_parse(opts, options, NULL) != 0) {
5552 error_setg(errp, "Invalid options for file format '%s'", fmt);
5553 goto out;
5557 if (base_filename) {
5558 if (qemu_opt_set(opts, BLOCK_OPT_BACKING_FILE, base_filename)) {
5559 error_setg(errp, "Backing file not supported for file format '%s'",
5560 fmt);
5561 goto out;
5565 if (base_fmt) {
5566 if (qemu_opt_set(opts, BLOCK_OPT_BACKING_FMT, base_fmt)) {
5567 error_setg(errp, "Backing file format not supported for file "
5568 "format '%s'", fmt);
5569 goto out;
5573 backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE);
5574 if (backing_file) {
5575 if (!strcmp(filename, backing_file)) {
5576 error_setg(errp, "Error: Trying to create an image with the "
5577 "same filename as the backing file");
5578 goto out;
5582 backing_fmt = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT);
5583 if (backing_fmt) {
5584 backing_drv = bdrv_find_format(backing_fmt);
5585 if (!backing_drv) {
5586 error_setg(errp, "Unknown backing file format '%s'",
5587 backing_fmt);
5588 goto out;
5592 // The size for the image must always be specified, with one exception:
5593 // If we are using a backing file, we can obtain the size from there
5594 size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0);
5595 if (size == -1) {
5596 if (backing_file) {
5597 BlockDriverState *bs;
5598 int64_t size;
5599 int back_flags;
5601 /* backing files always opened read-only */
5602 back_flags =
5603 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
5605 bs = NULL;
5606 ret = bdrv_open(&bs, backing_file, NULL, NULL, back_flags,
5607 backing_drv, &local_err);
5608 if (ret < 0) {
5609 error_setg_errno(errp, -ret, "Could not open '%s': %s",
5610 backing_file,
5611 error_get_pretty(local_err));
5612 error_free(local_err);
5613 local_err = NULL;
5614 goto out;
5616 size = bdrv_getlength(bs);
5617 if (size < 0) {
5618 error_setg_errno(errp, -size, "Could not get size of '%s'",
5619 backing_file);
5620 bdrv_unref(bs);
5621 goto out;
5624 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, size);
5626 bdrv_unref(bs);
5627 } else {
5628 error_setg(errp, "Image creation needs a size parameter");
5629 goto out;
5633 if (!quiet) {
5634 printf("Formatting '%s', fmt=%s ", filename, fmt);
5635 qemu_opts_print(opts);
5636 puts("");
5639 ret = bdrv_create(drv, filename, opts, &local_err);
5641 if (ret == -EFBIG) {
5642 /* This is generally a better message than whatever the driver would
5643 * deliver (especially because of the cluster_size_hint), since that
5644 * is most probably not much different from "image too large". */
5645 const char *cluster_size_hint = "";
5646 if (qemu_opt_get_size(opts, BLOCK_OPT_CLUSTER_SIZE, 0)) {
5647 cluster_size_hint = " (try using a larger cluster size)";
5649 error_setg(errp, "The image size is too large for file format '%s'"
5650 "%s", fmt, cluster_size_hint);
5651 error_free(local_err);
5652 local_err = NULL;
5655 out:
5656 qemu_opts_del(opts);
5657 qemu_opts_free(create_opts);
5658 if (local_err) {
5659 error_propagate(errp, local_err);
5663 AioContext *bdrv_get_aio_context(BlockDriverState *bs)
5665 return bs->aio_context;
5668 void bdrv_detach_aio_context(BlockDriverState *bs)
5670 if (!bs->drv) {
5671 return;
5674 if (bs->io_limits_enabled) {
5675 throttle_detach_aio_context(&bs->throttle_state);
5677 if (bs->drv->bdrv_detach_aio_context) {
5678 bs->drv->bdrv_detach_aio_context(bs);
5680 if (bs->file) {
5681 bdrv_detach_aio_context(bs->file);
5683 if (bs->backing_hd) {
5684 bdrv_detach_aio_context(bs->backing_hd);
5687 bs->aio_context = NULL;
5690 void bdrv_attach_aio_context(BlockDriverState *bs,
5691 AioContext *new_context)
5693 if (!bs->drv) {
5694 return;
5697 bs->aio_context = new_context;
5699 if (bs->backing_hd) {
5700 bdrv_attach_aio_context(bs->backing_hd, new_context);
5702 if (bs->file) {
5703 bdrv_attach_aio_context(bs->file, new_context);
5705 if (bs->drv->bdrv_attach_aio_context) {
5706 bs->drv->bdrv_attach_aio_context(bs, new_context);
5708 if (bs->io_limits_enabled) {
5709 throttle_attach_aio_context(&bs->throttle_state, new_context);
5713 void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context)
5715 bdrv_drain_all(); /* ensure there are no in-flight requests */
5717 bdrv_detach_aio_context(bs);
5719 /* This function executes in the old AioContext so acquire the new one in
5720 * case it runs in a different thread.
5722 aio_context_acquire(new_context);
5723 bdrv_attach_aio_context(bs, new_context);
5724 aio_context_release(new_context);
5727 void bdrv_add_before_write_notifier(BlockDriverState *bs,
5728 NotifierWithReturn *notifier)
5730 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
5733 int bdrv_amend_options(BlockDriverState *bs, QemuOpts *opts)
5735 if (!bs->drv->bdrv_amend_options) {
5736 return -ENOTSUP;
5738 return bs->drv->bdrv_amend_options(bs, opts);
5741 /* This function will be called by the bdrv_recurse_is_first_non_filter method
5742 * of block filter and by bdrv_is_first_non_filter.
5743 * It is used to test if the given bs is the candidate or recurse more in the
5744 * node graph.
5746 bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs,
5747 BlockDriverState *candidate)
5749 /* return false if basic checks fails */
5750 if (!bs || !bs->drv) {
5751 return false;
5754 /* the code reached a non block filter driver -> check if the bs is
5755 * the same as the candidate. It's the recursion termination condition.
5757 if (!bs->drv->is_filter) {
5758 return bs == candidate;
5760 /* Down this path the driver is a block filter driver */
5762 /* If the block filter recursion method is defined use it to recurse down
5763 * the node graph.
5765 if (bs->drv->bdrv_recurse_is_first_non_filter) {
5766 return bs->drv->bdrv_recurse_is_first_non_filter(bs, candidate);
5769 /* the driver is a block filter but don't allow to recurse -> return false
5771 return false;
5774 /* This function checks if the candidate is the first non filter bs down it's
5775 * bs chain. Since we don't have pointers to parents it explore all bs chains
5776 * from the top. Some filters can choose not to pass down the recursion.
5778 bool bdrv_is_first_non_filter(BlockDriverState *candidate)
5780 BlockDriverState *bs;
5782 /* walk down the bs forest recursively */
5783 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
5784 bool perm;
5786 /* try to recurse in this top level bs */
5787 perm = bdrv_recurse_is_first_non_filter(bs, candidate);
5789 /* candidate is the first non filter */
5790 if (perm) {
5791 return true;
5795 return false;
5798 BlockDriverState *check_to_replace_node(const char *node_name, Error **errp)
5800 BlockDriverState *to_replace_bs = bdrv_find_node(node_name);
5801 if (!to_replace_bs) {
5802 error_setg(errp, "Node name '%s' not found", node_name);
5803 return NULL;
5806 if (bdrv_op_is_blocked(to_replace_bs, BLOCK_OP_TYPE_REPLACE, errp)) {
5807 return NULL;
5810 /* We don't want arbitrary node of the BDS chain to be replaced only the top
5811 * most non filter in order to prevent data corruption.
5812 * Another benefit is that this tests exclude backing files which are
5813 * blocked by the backing blockers.
5815 if (!bdrv_is_first_non_filter(to_replace_bs)) {
5816 error_setg(errp, "Only top most non filter can be replaced");
5817 return NULL;
5820 return to_replace_bs;
5823 void bdrv_io_plug(BlockDriverState *bs)
5825 BlockDriver *drv = bs->drv;
5826 if (drv && drv->bdrv_io_plug) {
5827 drv->bdrv_io_plug(bs);
5828 } else if (bs->file) {
5829 bdrv_io_plug(bs->file);
5833 void bdrv_io_unplug(BlockDriverState *bs)
5835 BlockDriver *drv = bs->drv;
5836 if (drv && drv->bdrv_io_unplug) {
5837 drv->bdrv_io_unplug(bs);
5838 } else if (bs->file) {
5839 bdrv_io_unplug(bs->file);
5843 void bdrv_flush_io_queue(BlockDriverState *bs)
5845 BlockDriver *drv = bs->drv;
5846 if (drv && drv->bdrv_flush_io_queue) {
5847 drv->bdrv_flush_io_queue(bs);
5848 } else if (bs->file) {
5849 bdrv_flush_io_queue(bs->file);