qemu-iotests: move blank lines of output in case 059
[qemu/cris-port.git] / block.c
blob84c0eac671a36bd413bc70d1a9411b6a3620b391
1 /*
2 * QEMU System Emulator block driver
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
24 #include "config-host.h"
25 #include "qemu-common.h"
26 #include "trace.h"
27 #include "monitor/monitor.h"
28 #include "block/block_int.h"
29 #include "block/blockjob.h"
30 #include "qemu/module.h"
31 #include "qapi/qmp/qjson.h"
32 #include "sysemu/sysemu.h"
33 #include "qemu/notify.h"
34 #include "block/coroutine.h"
35 #include "qmp-commands.h"
36 #include "qemu/timer.h"
38 #ifdef CONFIG_BSD
39 #include <sys/types.h>
40 #include <sys/stat.h>
41 #include <sys/ioctl.h>
42 #include <sys/queue.h>
43 #ifndef __DragonFly__
44 #include <sys/disk.h>
45 #endif
46 #endif
48 #ifdef _WIN32
49 #include <windows.h>
50 #endif
52 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
54 typedef enum {
55 BDRV_REQ_COPY_ON_READ = 0x1,
56 BDRV_REQ_ZERO_WRITE = 0x2,
57 } BdrvRequestFlags;
59 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
60 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
61 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
62 BlockDriverCompletionFunc *cb, void *opaque);
63 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
64 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
65 BlockDriverCompletionFunc *cb, void *opaque);
66 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
67 int64_t sector_num, int nb_sectors,
68 QEMUIOVector *iov);
69 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
70 int64_t sector_num, int nb_sectors,
71 QEMUIOVector *iov);
72 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
73 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
74 BdrvRequestFlags flags);
75 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
76 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
77 BdrvRequestFlags flags);
78 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
79 int64_t sector_num,
80 QEMUIOVector *qiov,
81 int nb_sectors,
82 BlockDriverCompletionFunc *cb,
83 void *opaque,
84 bool is_write);
85 static void coroutine_fn bdrv_co_do_rw(void *opaque);
86 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
87 int64_t sector_num, int nb_sectors);
89 static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
90 QTAILQ_HEAD_INITIALIZER(bdrv_states);
92 static QLIST_HEAD(, BlockDriver) bdrv_drivers =
93 QLIST_HEAD_INITIALIZER(bdrv_drivers);
95 /* If non-zero, use only whitelisted block drivers */
96 static int use_bdrv_whitelist;
98 #ifdef _WIN32
99 static int is_windows_drive_prefix(const char *filename)
101 return (((filename[0] >= 'a' && filename[0] <= 'z') ||
102 (filename[0] >= 'A' && filename[0] <= 'Z')) &&
103 filename[1] == ':');
106 int is_windows_drive(const char *filename)
108 if (is_windows_drive_prefix(filename) &&
109 filename[2] == '\0')
110 return 1;
111 if (strstart(filename, "\\\\.\\", NULL) ||
112 strstart(filename, "//./", NULL))
113 return 1;
114 return 0;
116 #endif
118 /* throttling disk I/O limits */
119 void bdrv_set_io_limits(BlockDriverState *bs,
120 ThrottleConfig *cfg)
122 int i;
124 throttle_config(&bs->throttle_state, cfg);
126 for (i = 0; i < 2; i++) {
127 qemu_co_enter_next(&bs->throttled_reqs[i]);
131 /* this function drain all the throttled IOs */
132 static bool bdrv_start_throttled_reqs(BlockDriverState *bs)
134 bool drained = false;
135 bool enabled = bs->io_limits_enabled;
136 int i;
138 bs->io_limits_enabled = false;
140 for (i = 0; i < 2; i++) {
141 while (qemu_co_enter_next(&bs->throttled_reqs[i])) {
142 drained = true;
146 bs->io_limits_enabled = enabled;
148 return drained;
151 void bdrv_io_limits_disable(BlockDriverState *bs)
153 bs->io_limits_enabled = false;
155 bdrv_start_throttled_reqs(bs);
157 throttle_destroy(&bs->throttle_state);
160 static void bdrv_throttle_read_timer_cb(void *opaque)
162 BlockDriverState *bs = opaque;
163 qemu_co_enter_next(&bs->throttled_reqs[0]);
166 static void bdrv_throttle_write_timer_cb(void *opaque)
168 BlockDriverState *bs = opaque;
169 qemu_co_enter_next(&bs->throttled_reqs[1]);
172 /* should be called before bdrv_set_io_limits if a limit is set */
173 void bdrv_io_limits_enable(BlockDriverState *bs)
175 assert(!bs->io_limits_enabled);
176 throttle_init(&bs->throttle_state,
177 QEMU_CLOCK_VIRTUAL,
178 bdrv_throttle_read_timer_cb,
179 bdrv_throttle_write_timer_cb,
180 bs);
181 bs->io_limits_enabled = true;
184 /* This function makes an IO wait if needed
186 * @nb_sectors: the number of sectors of the IO
187 * @is_write: is the IO a write
189 static void bdrv_io_limits_intercept(BlockDriverState *bs,
190 int nb_sectors,
191 bool is_write)
193 /* does this io must wait */
194 bool must_wait = throttle_schedule_timer(&bs->throttle_state, is_write);
196 /* if must wait or any request of this type throttled queue the IO */
197 if (must_wait ||
198 !qemu_co_queue_empty(&bs->throttled_reqs[is_write])) {
199 qemu_co_queue_wait(&bs->throttled_reqs[is_write]);
202 /* the IO will be executed, do the accounting */
203 throttle_account(&bs->throttle_state,
204 is_write,
205 nb_sectors * BDRV_SECTOR_SIZE);
207 /* if the next request must wait -> do nothing */
208 if (throttle_schedule_timer(&bs->throttle_state, is_write)) {
209 return;
212 /* else queue next request for execution */
213 qemu_co_queue_next(&bs->throttled_reqs[is_write]);
216 /* check if the path starts with "<protocol>:" */
217 static int path_has_protocol(const char *path)
219 const char *p;
221 #ifdef _WIN32
222 if (is_windows_drive(path) ||
223 is_windows_drive_prefix(path)) {
224 return 0;
226 p = path + strcspn(path, ":/\\");
227 #else
228 p = path + strcspn(path, ":/");
229 #endif
231 return *p == ':';
234 int path_is_absolute(const char *path)
236 #ifdef _WIN32
237 /* specific case for names like: "\\.\d:" */
238 if (is_windows_drive(path) || is_windows_drive_prefix(path)) {
239 return 1;
241 return (*path == '/' || *path == '\\');
242 #else
243 return (*path == '/');
244 #endif
247 /* if filename is absolute, just copy it to dest. Otherwise, build a
248 path to it by considering it is relative to base_path. URL are
249 supported. */
250 void path_combine(char *dest, int dest_size,
251 const char *base_path,
252 const char *filename)
254 const char *p, *p1;
255 int len;
257 if (dest_size <= 0)
258 return;
259 if (path_is_absolute(filename)) {
260 pstrcpy(dest, dest_size, filename);
261 } else {
262 p = strchr(base_path, ':');
263 if (p)
264 p++;
265 else
266 p = base_path;
267 p1 = strrchr(base_path, '/');
268 #ifdef _WIN32
270 const char *p2;
271 p2 = strrchr(base_path, '\\');
272 if (!p1 || p2 > p1)
273 p1 = p2;
275 #endif
276 if (p1)
277 p1++;
278 else
279 p1 = base_path;
280 if (p1 > p)
281 p = p1;
282 len = p - base_path;
283 if (len > dest_size - 1)
284 len = dest_size - 1;
285 memcpy(dest, base_path, len);
286 dest[len] = '\0';
287 pstrcat(dest, dest_size, filename);
291 void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz)
293 if (bs->backing_file[0] == '\0' || path_has_protocol(bs->backing_file)) {
294 pstrcpy(dest, sz, bs->backing_file);
295 } else {
296 path_combine(dest, sz, bs->filename, bs->backing_file);
300 void bdrv_register(BlockDriver *bdrv)
302 /* Block drivers without coroutine functions need emulation */
303 if (!bdrv->bdrv_co_readv) {
304 bdrv->bdrv_co_readv = bdrv_co_readv_em;
305 bdrv->bdrv_co_writev = bdrv_co_writev_em;
307 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
308 * the block driver lacks aio we need to emulate that too.
310 if (!bdrv->bdrv_aio_readv) {
311 /* add AIO emulation layer */
312 bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
313 bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
317 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
320 /* create a new block device (by default it is empty) */
321 BlockDriverState *bdrv_new(const char *device_name)
323 BlockDriverState *bs;
325 bs = g_malloc0(sizeof(BlockDriverState));
326 pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);
327 if (device_name[0] != '\0') {
328 QTAILQ_INSERT_TAIL(&bdrv_states, bs, list);
330 bdrv_iostatus_disable(bs);
331 notifier_list_init(&bs->close_notifiers);
332 notifier_with_return_list_init(&bs->before_write_notifiers);
333 qemu_co_queue_init(&bs->throttled_reqs[0]);
334 qemu_co_queue_init(&bs->throttled_reqs[1]);
335 bs->refcnt = 1;
337 return bs;
340 void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify)
342 notifier_list_add(&bs->close_notifiers, notify);
345 BlockDriver *bdrv_find_format(const char *format_name)
347 BlockDriver *drv1;
348 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
349 if (!strcmp(drv1->format_name, format_name)) {
350 return drv1;
353 return NULL;
356 static int bdrv_is_whitelisted(BlockDriver *drv, bool read_only)
358 static const char *whitelist_rw[] = {
359 CONFIG_BDRV_RW_WHITELIST
361 static const char *whitelist_ro[] = {
362 CONFIG_BDRV_RO_WHITELIST
364 const char **p;
366 if (!whitelist_rw[0] && !whitelist_ro[0]) {
367 return 1; /* no whitelist, anything goes */
370 for (p = whitelist_rw; *p; p++) {
371 if (!strcmp(drv->format_name, *p)) {
372 return 1;
375 if (read_only) {
376 for (p = whitelist_ro; *p; p++) {
377 if (!strcmp(drv->format_name, *p)) {
378 return 1;
382 return 0;
385 BlockDriver *bdrv_find_whitelisted_format(const char *format_name,
386 bool read_only)
388 BlockDriver *drv = bdrv_find_format(format_name);
389 return drv && bdrv_is_whitelisted(drv, read_only) ? drv : NULL;
392 typedef struct CreateCo {
393 BlockDriver *drv;
394 char *filename;
395 QEMUOptionParameter *options;
396 int ret;
397 Error *err;
398 } CreateCo;
400 static void coroutine_fn bdrv_create_co_entry(void *opaque)
402 Error *local_err = NULL;
403 int ret;
405 CreateCo *cco = opaque;
406 assert(cco->drv);
408 ret = cco->drv->bdrv_create(cco->filename, cco->options, &local_err);
409 if (error_is_set(&local_err)) {
410 error_propagate(&cco->err, local_err);
412 cco->ret = ret;
415 int bdrv_create(BlockDriver *drv, const char* filename,
416 QEMUOptionParameter *options, Error **errp)
418 int ret;
420 Coroutine *co;
421 CreateCo cco = {
422 .drv = drv,
423 .filename = g_strdup(filename),
424 .options = options,
425 .ret = NOT_DONE,
426 .err = NULL,
429 if (!drv->bdrv_create) {
430 error_setg(errp, "Driver '%s' does not support image creation", drv->format_name);
431 ret = -ENOTSUP;
432 goto out;
435 if (qemu_in_coroutine()) {
436 /* Fast-path if already in coroutine context */
437 bdrv_create_co_entry(&cco);
438 } else {
439 co = qemu_coroutine_create(bdrv_create_co_entry);
440 qemu_coroutine_enter(co, &cco);
441 while (cco.ret == NOT_DONE) {
442 qemu_aio_wait();
446 ret = cco.ret;
447 if (ret < 0) {
448 if (error_is_set(&cco.err)) {
449 error_propagate(errp, cco.err);
450 } else {
451 error_setg_errno(errp, -ret, "Could not create image");
455 out:
456 g_free(cco.filename);
457 return ret;
460 int bdrv_create_file(const char* filename, QEMUOptionParameter *options,
461 Error **errp)
463 BlockDriver *drv;
464 Error *local_err = NULL;
465 int ret;
467 drv = bdrv_find_protocol(filename, true);
468 if (drv == NULL) {
469 error_setg(errp, "Could not find protocol for file '%s'", filename);
470 return -ENOENT;
473 ret = bdrv_create(drv, filename, options, &local_err);
474 if (error_is_set(&local_err)) {
475 error_propagate(errp, local_err);
477 return ret;
481 * Create a uniquely-named empty temporary file.
482 * Return 0 upon success, otherwise a negative errno value.
484 int get_tmp_filename(char *filename, int size)
486 #ifdef _WIN32
487 char temp_dir[MAX_PATH];
488 /* GetTempFileName requires that its output buffer (4th param)
489 have length MAX_PATH or greater. */
490 assert(size >= MAX_PATH);
491 return (GetTempPath(MAX_PATH, temp_dir)
492 && GetTempFileName(temp_dir, "qem", 0, filename)
493 ? 0 : -GetLastError());
494 #else
495 int fd;
496 const char *tmpdir;
497 tmpdir = getenv("TMPDIR");
498 if (!tmpdir)
499 tmpdir = "/tmp";
500 if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) {
501 return -EOVERFLOW;
503 fd = mkstemp(filename);
504 if (fd < 0) {
505 return -errno;
507 if (close(fd) != 0) {
508 unlink(filename);
509 return -errno;
511 return 0;
512 #endif
516 * Detect host devices. By convention, /dev/cdrom[N] is always
517 * recognized as a host CDROM.
519 static BlockDriver *find_hdev_driver(const char *filename)
521 int score_max = 0, score;
522 BlockDriver *drv = NULL, *d;
524 QLIST_FOREACH(d, &bdrv_drivers, list) {
525 if (d->bdrv_probe_device) {
526 score = d->bdrv_probe_device(filename);
527 if (score > score_max) {
528 score_max = score;
529 drv = d;
534 return drv;
537 BlockDriver *bdrv_find_protocol(const char *filename,
538 bool allow_protocol_prefix)
540 BlockDriver *drv1;
541 char protocol[128];
542 int len;
543 const char *p;
545 /* TODO Drivers without bdrv_file_open must be specified explicitly */
548 * XXX(hch): we really should not let host device detection
549 * override an explicit protocol specification, but moving this
550 * later breaks access to device names with colons in them.
551 * Thanks to the brain-dead persistent naming schemes on udev-
552 * based Linux systems those actually are quite common.
554 drv1 = find_hdev_driver(filename);
555 if (drv1) {
556 return drv1;
559 if (!path_has_protocol(filename) || !allow_protocol_prefix) {
560 return bdrv_find_format("file");
563 p = strchr(filename, ':');
564 assert(p != NULL);
565 len = p - filename;
566 if (len > sizeof(protocol) - 1)
567 len = sizeof(protocol) - 1;
568 memcpy(protocol, filename, len);
569 protocol[len] = '\0';
570 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
571 if (drv1->protocol_name &&
572 !strcmp(drv1->protocol_name, protocol)) {
573 return drv1;
576 return NULL;
579 static int find_image_format(BlockDriverState *bs, const char *filename,
580 BlockDriver **pdrv, Error **errp)
582 int score, score_max;
583 BlockDriver *drv1, *drv;
584 uint8_t buf[2048];
585 int ret = 0;
587 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
588 if (bs->sg || !bdrv_is_inserted(bs) || bdrv_getlength(bs) == 0) {
589 drv = bdrv_find_format("raw");
590 if (!drv) {
591 error_setg(errp, "Could not find raw image format");
592 ret = -ENOENT;
594 *pdrv = drv;
595 return ret;
598 ret = bdrv_pread(bs, 0, buf, sizeof(buf));
599 if (ret < 0) {
600 error_setg_errno(errp, -ret, "Could not read image for determining its "
601 "format");
602 *pdrv = NULL;
603 return ret;
606 score_max = 0;
607 drv = NULL;
608 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
609 if (drv1->bdrv_probe) {
610 score = drv1->bdrv_probe(buf, ret, filename);
611 if (score > score_max) {
612 score_max = score;
613 drv = drv1;
617 if (!drv) {
618 error_setg(errp, "Could not determine image format: No compatible "
619 "driver found");
620 ret = -ENOENT;
622 *pdrv = drv;
623 return ret;
627 * Set the current 'total_sectors' value
629 static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
631 BlockDriver *drv = bs->drv;
633 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
634 if (bs->sg)
635 return 0;
637 /* query actual device if possible, otherwise just trust the hint */
638 if (drv->bdrv_getlength) {
639 int64_t length = drv->bdrv_getlength(bs);
640 if (length < 0) {
641 return length;
643 hint = length >> BDRV_SECTOR_BITS;
646 bs->total_sectors = hint;
647 return 0;
651 * Set open flags for a given discard mode
653 * Return 0 on success, -1 if the discard mode was invalid.
655 int bdrv_parse_discard_flags(const char *mode, int *flags)
657 *flags &= ~BDRV_O_UNMAP;
659 if (!strcmp(mode, "off") || !strcmp(mode, "ignore")) {
660 /* do nothing */
661 } else if (!strcmp(mode, "on") || !strcmp(mode, "unmap")) {
662 *flags |= BDRV_O_UNMAP;
663 } else {
664 return -1;
667 return 0;
671 * Set open flags for a given cache mode
673 * Return 0 on success, -1 if the cache mode was invalid.
675 int bdrv_parse_cache_flags(const char *mode, int *flags)
677 *flags &= ~BDRV_O_CACHE_MASK;
679 if (!strcmp(mode, "off") || !strcmp(mode, "none")) {
680 *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB;
681 } else if (!strcmp(mode, "directsync")) {
682 *flags |= BDRV_O_NOCACHE;
683 } else if (!strcmp(mode, "writeback")) {
684 *flags |= BDRV_O_CACHE_WB;
685 } else if (!strcmp(mode, "unsafe")) {
686 *flags |= BDRV_O_CACHE_WB;
687 *flags |= BDRV_O_NO_FLUSH;
688 } else if (!strcmp(mode, "writethrough")) {
689 /* this is the default */
690 } else {
691 return -1;
694 return 0;
698 * The copy-on-read flag is actually a reference count so multiple users may
699 * use the feature without worrying about clobbering its previous state.
700 * Copy-on-read stays enabled until all users have called to disable it.
702 void bdrv_enable_copy_on_read(BlockDriverState *bs)
704 bs->copy_on_read++;
707 void bdrv_disable_copy_on_read(BlockDriverState *bs)
709 assert(bs->copy_on_read > 0);
710 bs->copy_on_read--;
713 static int bdrv_open_flags(BlockDriverState *bs, int flags)
715 int open_flags = flags | BDRV_O_CACHE_WB;
718 * Clear flags that are internal to the block layer before opening the
719 * image.
721 open_flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
724 * Snapshots should be writable.
726 if (bs->is_temporary) {
727 open_flags |= BDRV_O_RDWR;
730 return open_flags;
734 * Common part for opening disk images and files
736 * Removes all processed options from *options.
738 static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file,
739 QDict *options, int flags, BlockDriver *drv, Error **errp)
741 int ret, open_flags;
742 const char *filename;
743 Error *local_err = NULL;
745 assert(drv != NULL);
746 assert(bs->file == NULL);
747 assert(options != NULL && bs->options != options);
749 if (file != NULL) {
750 filename = file->filename;
751 } else {
752 filename = qdict_get_try_str(options, "filename");
755 trace_bdrv_open_common(bs, filename ?: "", flags, drv->format_name);
757 /* bdrv_open() with directly using a protocol as drv. This layer is already
758 * opened, so assign it to bs (while file becomes a closed BlockDriverState)
759 * and return immediately. */
760 if (file != NULL && drv->bdrv_file_open) {
761 bdrv_swap(file, bs);
762 return 0;
765 bs->open_flags = flags;
766 bs->buffer_alignment = 512;
767 bs->zero_beyond_eof = true;
768 open_flags = bdrv_open_flags(bs, flags);
769 bs->read_only = !(open_flags & BDRV_O_RDWR);
771 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, bs->read_only)) {
772 error_setg(errp,
773 !bs->read_only && bdrv_is_whitelisted(drv, true)
774 ? "Driver '%s' can only be used for read-only devices"
775 : "Driver '%s' is not whitelisted",
776 drv->format_name);
777 return -ENOTSUP;
780 assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */
781 if (!bs->read_only && (flags & BDRV_O_COPY_ON_READ)) {
782 bdrv_enable_copy_on_read(bs);
785 if (filename != NULL) {
786 pstrcpy(bs->filename, sizeof(bs->filename), filename);
787 } else {
788 bs->filename[0] = '\0';
791 bs->drv = drv;
792 bs->opaque = g_malloc0(drv->instance_size);
794 bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB);
796 /* Open the image, either directly or using a protocol */
797 if (drv->bdrv_file_open) {
798 assert(file == NULL);
799 assert(!drv->bdrv_needs_filename || filename != NULL);
800 ret = drv->bdrv_file_open(bs, options, open_flags, &local_err);
801 } else {
802 if (file == NULL) {
803 error_setg(errp, "Can't use '%s' as a block driver for the "
804 "protocol level", drv->format_name);
805 ret = -EINVAL;
806 goto free_and_fail;
808 bs->file = file;
809 ret = drv->bdrv_open(bs, options, open_flags, &local_err);
812 if (ret < 0) {
813 if (error_is_set(&local_err)) {
814 error_propagate(errp, local_err);
815 } else if (bs->filename[0]) {
816 error_setg_errno(errp, -ret, "Could not open '%s'", bs->filename);
817 } else {
818 error_setg_errno(errp, -ret, "Could not open image");
820 goto free_and_fail;
823 ret = refresh_total_sectors(bs, bs->total_sectors);
824 if (ret < 0) {
825 error_setg_errno(errp, -ret, "Could not refresh total sector count");
826 goto free_and_fail;
829 #ifndef _WIN32
830 if (bs->is_temporary) {
831 assert(bs->filename[0] != '\0');
832 unlink(bs->filename);
834 #endif
835 return 0;
837 free_and_fail:
838 bs->file = NULL;
839 g_free(bs->opaque);
840 bs->opaque = NULL;
841 bs->drv = NULL;
842 return ret;
846 * Opens a file using a protocol (file, host_device, nbd, ...)
848 * options is a QDict of options to pass to the block drivers, or NULL for an
849 * empty set of options. The reference to the QDict belongs to the block layer
850 * after the call (even on failure), so if the caller intends to reuse the
851 * dictionary, it needs to use QINCREF() before calling bdrv_file_open.
853 int bdrv_file_open(BlockDriverState **pbs, const char *filename,
854 QDict *options, int flags, Error **errp)
856 BlockDriverState *bs;
857 BlockDriver *drv;
858 const char *drvname;
859 bool allow_protocol_prefix = false;
860 Error *local_err = NULL;
861 int ret;
863 /* NULL means an empty set of options */
864 if (options == NULL) {
865 options = qdict_new();
868 bs = bdrv_new("");
869 bs->options = options;
870 options = qdict_clone_shallow(options);
872 /* Fetch the file name from the options QDict if necessary */
873 if (!filename) {
874 filename = qdict_get_try_str(options, "filename");
875 } else if (filename && !qdict_haskey(options, "filename")) {
876 qdict_put(options, "filename", qstring_from_str(filename));
877 allow_protocol_prefix = true;
878 } else {
879 error_setg(errp, "Can't specify 'file' and 'filename' options at the "
880 "same time");
881 ret = -EINVAL;
882 goto fail;
885 /* Find the right block driver */
886 drvname = qdict_get_try_str(options, "driver");
887 if (drvname) {
888 drv = bdrv_find_format(drvname);
889 if (!drv) {
890 error_setg(errp, "Unknown driver '%s'", drvname);
892 qdict_del(options, "driver");
893 } else if (filename) {
894 drv = bdrv_find_protocol(filename, allow_protocol_prefix);
895 if (!drv) {
896 error_setg(errp, "Unknown protocol");
898 } else {
899 error_setg(errp, "Must specify either driver or file");
900 drv = NULL;
903 if (!drv) {
904 /* errp has been set already */
905 ret = -ENOENT;
906 goto fail;
909 /* Parse the filename and open it */
910 if (drv->bdrv_parse_filename && filename) {
911 drv->bdrv_parse_filename(filename, options, &local_err);
912 if (error_is_set(&local_err)) {
913 error_propagate(errp, local_err);
914 ret = -EINVAL;
915 goto fail;
917 qdict_del(options, "filename");
918 } else if (drv->bdrv_needs_filename && !filename) {
919 error_setg(errp, "The '%s' block driver requires a file name",
920 drv->format_name);
921 ret = -EINVAL;
922 goto fail;
925 ret = bdrv_open_common(bs, NULL, options, flags, drv, &local_err);
926 if (ret < 0) {
927 error_propagate(errp, local_err);
928 goto fail;
931 /* Check if any unknown options were used */
932 if (qdict_size(options) != 0) {
933 const QDictEntry *entry = qdict_first(options);
934 error_setg(errp, "Block protocol '%s' doesn't support the option '%s'",
935 drv->format_name, entry->key);
936 ret = -EINVAL;
937 goto fail;
939 QDECREF(options);
941 bs->growable = 1;
942 *pbs = bs;
943 return 0;
945 fail:
946 QDECREF(options);
947 if (!bs->drv) {
948 QDECREF(bs->options);
950 bdrv_unref(bs);
951 return ret;
955 * Opens the backing file for a BlockDriverState if not yet open
957 * options is a QDict of options to pass to the block drivers, or NULL for an
958 * empty set of options. The reference to the QDict is transferred to this
959 * function (even on failure), so if the caller intends to reuse the dictionary,
960 * it needs to use QINCREF() before calling bdrv_file_open.
962 int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp)
964 char backing_filename[PATH_MAX];
965 int back_flags, ret;
966 BlockDriver *back_drv = NULL;
967 Error *local_err = NULL;
969 if (bs->backing_hd != NULL) {
970 QDECREF(options);
971 return 0;
974 /* NULL means an empty set of options */
975 if (options == NULL) {
976 options = qdict_new();
979 bs->open_flags &= ~BDRV_O_NO_BACKING;
980 if (qdict_haskey(options, "file.filename")) {
981 backing_filename[0] = '\0';
982 } else if (bs->backing_file[0] == '\0' && qdict_size(options) == 0) {
983 QDECREF(options);
984 return 0;
985 } else {
986 bdrv_get_full_backing_filename(bs, backing_filename,
987 sizeof(backing_filename));
990 bs->backing_hd = bdrv_new("");
992 if (bs->backing_format[0] != '\0') {
993 back_drv = bdrv_find_format(bs->backing_format);
996 /* backing files always opened read-only */
997 back_flags = bs->open_flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT);
999 ret = bdrv_open(bs->backing_hd,
1000 *backing_filename ? backing_filename : NULL, options,
1001 back_flags, back_drv, &local_err);
1002 pstrcpy(bs->backing_file, sizeof(bs->backing_file),
1003 bs->backing_hd->file->filename);
1004 if (ret < 0) {
1005 bdrv_unref(bs->backing_hd);
1006 bs->backing_hd = NULL;
1007 bs->open_flags |= BDRV_O_NO_BACKING;
1008 error_propagate(errp, local_err);
1009 return ret;
1011 return 0;
1015 * Opens a disk image (raw, qcow2, vmdk, ...)
1017 * options is a QDict of options to pass to the block drivers, or NULL for an
1018 * empty set of options. The reference to the QDict belongs to the block layer
1019 * after the call (even on failure), so if the caller intends to reuse the
1020 * dictionary, it needs to use QINCREF() before calling bdrv_open.
1022 int bdrv_open(BlockDriverState *bs, const char *filename, QDict *options,
1023 int flags, BlockDriver *drv, Error **errp)
1025 int ret;
1026 /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
1027 char tmp_filename[PATH_MAX + 1];
1028 BlockDriverState *file = NULL;
1029 QDict *file_options = NULL;
1030 const char *drvname;
1031 Error *local_err = NULL;
1033 /* NULL means an empty set of options */
1034 if (options == NULL) {
1035 options = qdict_new();
1038 bs->options = options;
1039 options = qdict_clone_shallow(options);
1041 /* For snapshot=on, create a temporary qcow2 overlay */
1042 if (flags & BDRV_O_SNAPSHOT) {
1043 BlockDriverState *bs1;
1044 int64_t total_size;
1045 BlockDriver *bdrv_qcow2;
1046 QEMUOptionParameter *create_options;
1047 char backing_filename[PATH_MAX];
1049 if (qdict_size(options) != 0) {
1050 error_setg(errp, "Can't use snapshot=on with driver-specific options");
1051 ret = -EINVAL;
1052 goto fail;
1054 assert(filename != NULL);
1056 /* if snapshot, we create a temporary backing file and open it
1057 instead of opening 'filename' directly */
1059 /* if there is a backing file, use it */
1060 bs1 = bdrv_new("");
1061 ret = bdrv_open(bs1, filename, NULL, 0, drv, &local_err);
1062 if (ret < 0) {
1063 bdrv_unref(bs1);
1064 goto fail;
1066 total_size = bdrv_getlength(bs1) & BDRV_SECTOR_MASK;
1068 bdrv_unref(bs1);
1070 ret = get_tmp_filename(tmp_filename, sizeof(tmp_filename));
1071 if (ret < 0) {
1072 error_setg_errno(errp, -ret, "Could not get temporary filename");
1073 goto fail;
1076 /* Real path is meaningless for protocols */
1077 if (path_has_protocol(filename)) {
1078 snprintf(backing_filename, sizeof(backing_filename),
1079 "%s", filename);
1080 } else if (!realpath(filename, backing_filename)) {
1081 error_setg_errno(errp, errno, "Could not resolve path '%s'", filename);
1082 ret = -errno;
1083 goto fail;
1086 bdrv_qcow2 = bdrv_find_format("qcow2");
1087 create_options = parse_option_parameters("", bdrv_qcow2->create_options,
1088 NULL);
1090 set_option_parameter_int(create_options, BLOCK_OPT_SIZE, total_size);
1091 set_option_parameter(create_options, BLOCK_OPT_BACKING_FILE,
1092 backing_filename);
1093 if (drv) {
1094 set_option_parameter(create_options, BLOCK_OPT_BACKING_FMT,
1095 drv->format_name);
1098 ret = bdrv_create(bdrv_qcow2, tmp_filename, create_options, &local_err);
1099 free_option_parameters(create_options);
1100 if (ret < 0) {
1101 error_setg_errno(errp, -ret, "Could not create temporary overlay "
1102 "'%s': %s", tmp_filename,
1103 error_get_pretty(local_err));
1104 error_free(local_err);
1105 local_err = NULL;
1106 goto fail;
1109 filename = tmp_filename;
1110 drv = bdrv_qcow2;
1111 bs->is_temporary = 1;
1114 /* Open image file without format layer */
1115 if (flags & BDRV_O_RDWR) {
1116 flags |= BDRV_O_ALLOW_RDWR;
1119 qdict_extract_subqdict(options, &file_options, "file.");
1121 ret = bdrv_file_open(&file, filename, file_options,
1122 bdrv_open_flags(bs, flags | BDRV_O_UNMAP), &local_err);
1123 if (ret < 0) {
1124 goto fail;
1127 /* Find the right image format driver */
1128 drvname = qdict_get_try_str(options, "driver");
1129 if (drvname) {
1130 drv = bdrv_find_format(drvname);
1131 qdict_del(options, "driver");
1134 if (!drv) {
1135 ret = find_image_format(file, filename, &drv, &local_err);
1138 if (!drv) {
1139 goto unlink_and_fail;
1142 /* Open the image */
1143 ret = bdrv_open_common(bs, file, options, flags, drv, &local_err);
1144 if (ret < 0) {
1145 goto unlink_and_fail;
1148 if (bs->file != file) {
1149 bdrv_unref(file);
1150 file = NULL;
1153 /* If there is a backing file, use it */
1154 if ((flags & BDRV_O_NO_BACKING) == 0) {
1155 QDict *backing_options;
1157 qdict_extract_subqdict(options, &backing_options, "backing.");
1158 ret = bdrv_open_backing_file(bs, backing_options, &local_err);
1159 if (ret < 0) {
1160 goto close_and_fail;
1164 /* Check if any unknown options were used */
1165 if (qdict_size(options) != 0) {
1166 const QDictEntry *entry = qdict_first(options);
1167 error_setg(errp, "Block format '%s' used by device '%s' doesn't "
1168 "support the option '%s'", drv->format_name, bs->device_name,
1169 entry->key);
1171 ret = -EINVAL;
1172 goto close_and_fail;
1174 QDECREF(options);
1176 if (!bdrv_key_required(bs)) {
1177 bdrv_dev_change_media_cb(bs, true);
1180 return 0;
1182 unlink_and_fail:
1183 if (file != NULL) {
1184 bdrv_unref(file);
1186 if (bs->is_temporary) {
1187 unlink(filename);
1189 fail:
1190 QDECREF(bs->options);
1191 QDECREF(options);
1192 bs->options = NULL;
1193 if (error_is_set(&local_err)) {
1194 error_propagate(errp, local_err);
1196 return ret;
1198 close_and_fail:
1199 bdrv_close(bs);
1200 QDECREF(options);
1201 if (error_is_set(&local_err)) {
1202 error_propagate(errp, local_err);
1204 return ret;
1207 typedef struct BlockReopenQueueEntry {
1208 bool prepared;
1209 BDRVReopenState state;
1210 QSIMPLEQ_ENTRY(BlockReopenQueueEntry) entry;
1211 } BlockReopenQueueEntry;
1214 * Adds a BlockDriverState to a simple queue for an atomic, transactional
1215 * reopen of multiple devices.
1217 * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT
1218 * already performed, or alternatively may be NULL a new BlockReopenQueue will
1219 * be created and initialized. This newly created BlockReopenQueue should be
1220 * passed back in for subsequent calls that are intended to be of the same
1221 * atomic 'set'.
1223 * bs is the BlockDriverState to add to the reopen queue.
1225 * flags contains the open flags for the associated bs
1227 * returns a pointer to bs_queue, which is either the newly allocated
1228 * bs_queue, or the existing bs_queue being used.
1231 BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
1232 BlockDriverState *bs, int flags)
1234 assert(bs != NULL);
1236 BlockReopenQueueEntry *bs_entry;
1237 if (bs_queue == NULL) {
1238 bs_queue = g_new0(BlockReopenQueue, 1);
1239 QSIMPLEQ_INIT(bs_queue);
1242 if (bs->file) {
1243 bdrv_reopen_queue(bs_queue, bs->file, flags);
1246 bs_entry = g_new0(BlockReopenQueueEntry, 1);
1247 QSIMPLEQ_INSERT_TAIL(bs_queue, bs_entry, entry);
1249 bs_entry->state.bs = bs;
1250 bs_entry->state.flags = flags;
1252 return bs_queue;
1256 * Reopen multiple BlockDriverStates atomically & transactionally.
1258 * The queue passed in (bs_queue) must have been built up previous
1259 * via bdrv_reopen_queue().
1261 * Reopens all BDS specified in the queue, with the appropriate
1262 * flags. All devices are prepared for reopen, and failure of any
1263 * device will cause all device changes to be abandonded, and intermediate
1264 * data cleaned up.
1266 * If all devices prepare successfully, then the changes are committed
1267 * to all devices.
1270 int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
1272 int ret = -1;
1273 BlockReopenQueueEntry *bs_entry, *next;
1274 Error *local_err = NULL;
1276 assert(bs_queue != NULL);
1278 bdrv_drain_all();
1280 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1281 if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) {
1282 error_propagate(errp, local_err);
1283 goto cleanup;
1285 bs_entry->prepared = true;
1288 /* If we reach this point, we have success and just need to apply the
1289 * changes
1291 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1292 bdrv_reopen_commit(&bs_entry->state);
1295 ret = 0;
1297 cleanup:
1298 QSIMPLEQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) {
1299 if (ret && bs_entry->prepared) {
1300 bdrv_reopen_abort(&bs_entry->state);
1302 g_free(bs_entry);
1304 g_free(bs_queue);
1305 return ret;
1309 /* Reopen a single BlockDriverState with the specified flags. */
1310 int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp)
1312 int ret = -1;
1313 Error *local_err = NULL;
1314 BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, bdrv_flags);
1316 ret = bdrv_reopen_multiple(queue, &local_err);
1317 if (local_err != NULL) {
1318 error_propagate(errp, local_err);
1320 return ret;
1325 * Prepares a BlockDriverState for reopen. All changes are staged in the
1326 * 'opaque' field of the BDRVReopenState, which is used and allocated by
1327 * the block driver layer .bdrv_reopen_prepare()
1329 * bs is the BlockDriverState to reopen
1330 * flags are the new open flags
1331 * queue is the reopen queue
1333 * Returns 0 on success, non-zero on error. On error errp will be set
1334 * as well.
1336 * On failure, bdrv_reopen_abort() will be called to clean up any data.
1337 * It is the responsibility of the caller to then call the abort() or
1338 * commit() for any other BDS that have been left in a prepare() state
1341 int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue,
1342 Error **errp)
1344 int ret = -1;
1345 Error *local_err = NULL;
1346 BlockDriver *drv;
1348 assert(reopen_state != NULL);
1349 assert(reopen_state->bs->drv != NULL);
1350 drv = reopen_state->bs->drv;
1352 /* if we are to stay read-only, do not allow permission change
1353 * to r/w */
1354 if (!(reopen_state->bs->open_flags & BDRV_O_ALLOW_RDWR) &&
1355 reopen_state->flags & BDRV_O_RDWR) {
1356 error_set(errp, QERR_DEVICE_IS_READ_ONLY,
1357 reopen_state->bs->device_name);
1358 goto error;
1362 ret = bdrv_flush(reopen_state->bs);
1363 if (ret) {
1364 error_set(errp, ERROR_CLASS_GENERIC_ERROR, "Error (%s) flushing drive",
1365 strerror(-ret));
1366 goto error;
1369 if (drv->bdrv_reopen_prepare) {
1370 ret = drv->bdrv_reopen_prepare(reopen_state, queue, &local_err);
1371 if (ret) {
1372 if (local_err != NULL) {
1373 error_propagate(errp, local_err);
1374 } else {
1375 error_setg(errp, "failed while preparing to reopen image '%s'",
1376 reopen_state->bs->filename);
1378 goto error;
1380 } else {
1381 /* It is currently mandatory to have a bdrv_reopen_prepare()
1382 * handler for each supported drv. */
1383 error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
1384 drv->format_name, reopen_state->bs->device_name,
1385 "reopening of file");
1386 ret = -1;
1387 goto error;
1390 ret = 0;
1392 error:
1393 return ret;
1397 * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and
1398 * makes them final by swapping the staging BlockDriverState contents into
1399 * the active BlockDriverState contents.
1401 void bdrv_reopen_commit(BDRVReopenState *reopen_state)
1403 BlockDriver *drv;
1405 assert(reopen_state != NULL);
1406 drv = reopen_state->bs->drv;
1407 assert(drv != NULL);
1409 /* If there are any driver level actions to take */
1410 if (drv->bdrv_reopen_commit) {
1411 drv->bdrv_reopen_commit(reopen_state);
1414 /* set BDS specific flags now */
1415 reopen_state->bs->open_flags = reopen_state->flags;
1416 reopen_state->bs->enable_write_cache = !!(reopen_state->flags &
1417 BDRV_O_CACHE_WB);
1418 reopen_state->bs->read_only = !(reopen_state->flags & BDRV_O_RDWR);
1422 * Abort the reopen, and delete and free the staged changes in
1423 * reopen_state
1425 void bdrv_reopen_abort(BDRVReopenState *reopen_state)
1427 BlockDriver *drv;
1429 assert(reopen_state != NULL);
1430 drv = reopen_state->bs->drv;
1431 assert(drv != NULL);
1433 if (drv->bdrv_reopen_abort) {
1434 drv->bdrv_reopen_abort(reopen_state);
1439 void bdrv_close(BlockDriverState *bs)
1441 if (bs->job) {
1442 block_job_cancel_sync(bs->job);
1444 bdrv_drain_all(); /* complete I/O */
1445 bdrv_flush(bs);
1446 bdrv_drain_all(); /* in case flush left pending I/O */
1447 notifier_list_notify(&bs->close_notifiers, bs);
1449 if (bs->drv) {
1450 if (bs->backing_hd) {
1451 bdrv_unref(bs->backing_hd);
1452 bs->backing_hd = NULL;
1454 bs->drv->bdrv_close(bs);
1455 g_free(bs->opaque);
1456 #ifdef _WIN32
1457 if (bs->is_temporary) {
1458 unlink(bs->filename);
1460 #endif
1461 bs->opaque = NULL;
1462 bs->drv = NULL;
1463 bs->copy_on_read = 0;
1464 bs->backing_file[0] = '\0';
1465 bs->backing_format[0] = '\0';
1466 bs->total_sectors = 0;
1467 bs->encrypted = 0;
1468 bs->valid_key = 0;
1469 bs->sg = 0;
1470 bs->growable = 0;
1471 bs->zero_beyond_eof = false;
1472 QDECREF(bs->options);
1473 bs->options = NULL;
1475 if (bs->file != NULL) {
1476 bdrv_unref(bs->file);
1477 bs->file = NULL;
1481 bdrv_dev_change_media_cb(bs, false);
1483 /*throttling disk I/O limits*/
1484 if (bs->io_limits_enabled) {
1485 bdrv_io_limits_disable(bs);
1489 void bdrv_close_all(void)
1491 BlockDriverState *bs;
1493 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1494 bdrv_close(bs);
1498 /* Check if any requests are in-flight (including throttled requests) */
1499 static bool bdrv_requests_pending(BlockDriverState *bs)
1501 if (!QLIST_EMPTY(&bs->tracked_requests)) {
1502 return true;
1504 if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) {
1505 return true;
1507 if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) {
1508 return true;
1510 if (bs->file && bdrv_requests_pending(bs->file)) {
1511 return true;
1513 if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) {
1514 return true;
1516 return false;
1519 static bool bdrv_requests_pending_all(void)
1521 BlockDriverState *bs;
1522 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1523 if (bdrv_requests_pending(bs)) {
1524 return true;
1527 return false;
1531 * Wait for pending requests to complete across all BlockDriverStates
1533 * This function does not flush data to disk, use bdrv_flush_all() for that
1534 * after calling this function.
1536 * Note that completion of an asynchronous I/O operation can trigger any
1537 * number of other I/O operations on other devices---for example a coroutine
1538 * can be arbitrarily complex and a constant flow of I/O can come until the
1539 * coroutine is complete. Because of this, it is not possible to have a
1540 * function to drain a single device's I/O queue.
1542 void bdrv_drain_all(void)
1544 /* Always run first iteration so any pending completion BHs run */
1545 bool busy = true;
1546 BlockDriverState *bs;
1548 while (busy) {
1549 /* FIXME: We do not have timer support here, so this is effectively
1550 * a busy wait.
1552 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1553 if (bdrv_start_throttled_reqs(bs)) {
1554 busy = true;
1558 busy = bdrv_requests_pending_all();
1559 busy |= aio_poll(qemu_get_aio_context(), busy);
1563 /* make a BlockDriverState anonymous by removing from bdrv_state list.
1564 Also, NULL terminate the device_name to prevent double remove */
1565 void bdrv_make_anon(BlockDriverState *bs)
1567 if (bs->device_name[0] != '\0') {
1568 QTAILQ_REMOVE(&bdrv_states, bs, list);
1570 bs->device_name[0] = '\0';
1573 static void bdrv_rebind(BlockDriverState *bs)
1575 if (bs->drv && bs->drv->bdrv_rebind) {
1576 bs->drv->bdrv_rebind(bs);
1580 static void bdrv_move_feature_fields(BlockDriverState *bs_dest,
1581 BlockDriverState *bs_src)
1583 /* move some fields that need to stay attached to the device */
1584 bs_dest->open_flags = bs_src->open_flags;
1586 /* dev info */
1587 bs_dest->dev_ops = bs_src->dev_ops;
1588 bs_dest->dev_opaque = bs_src->dev_opaque;
1589 bs_dest->dev = bs_src->dev;
1590 bs_dest->buffer_alignment = bs_src->buffer_alignment;
1591 bs_dest->copy_on_read = bs_src->copy_on_read;
1593 bs_dest->enable_write_cache = bs_src->enable_write_cache;
1595 /* i/o throttled req */
1596 memcpy(&bs_dest->throttle_state,
1597 &bs_src->throttle_state,
1598 sizeof(ThrottleState));
1599 bs_dest->throttled_reqs[0] = bs_src->throttled_reqs[0];
1600 bs_dest->throttled_reqs[1] = bs_src->throttled_reqs[1];
1601 bs_dest->io_limits_enabled = bs_src->io_limits_enabled;
1603 /* r/w error */
1604 bs_dest->on_read_error = bs_src->on_read_error;
1605 bs_dest->on_write_error = bs_src->on_write_error;
1607 /* i/o status */
1608 bs_dest->iostatus_enabled = bs_src->iostatus_enabled;
1609 bs_dest->iostatus = bs_src->iostatus;
1611 /* dirty bitmap */
1612 bs_dest->dirty_bitmap = bs_src->dirty_bitmap;
1614 /* reference count */
1615 bs_dest->refcnt = bs_src->refcnt;
1617 /* job */
1618 bs_dest->in_use = bs_src->in_use;
1619 bs_dest->job = bs_src->job;
1621 /* keep the same entry in bdrv_states */
1622 pstrcpy(bs_dest->device_name, sizeof(bs_dest->device_name),
1623 bs_src->device_name);
1624 bs_dest->list = bs_src->list;
1628 * Swap bs contents for two image chains while they are live,
1629 * while keeping required fields on the BlockDriverState that is
1630 * actually attached to a device.
1632 * This will modify the BlockDriverState fields, and swap contents
1633 * between bs_new and bs_old. Both bs_new and bs_old are modified.
1635 * bs_new is required to be anonymous.
1637 * This function does not create any image files.
1639 void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old)
1641 BlockDriverState tmp;
1643 /* bs_new must be anonymous and shouldn't have anything fancy enabled */
1644 assert(bs_new->device_name[0] == '\0');
1645 assert(bs_new->dirty_bitmap == NULL);
1646 assert(bs_new->job == NULL);
1647 assert(bs_new->dev == NULL);
1648 assert(bs_new->in_use == 0);
1649 assert(bs_new->io_limits_enabled == false);
1650 assert(!throttle_have_timer(&bs_new->throttle_state));
1652 tmp = *bs_new;
1653 *bs_new = *bs_old;
1654 *bs_old = tmp;
1656 /* there are some fields that should not be swapped, move them back */
1657 bdrv_move_feature_fields(&tmp, bs_old);
1658 bdrv_move_feature_fields(bs_old, bs_new);
1659 bdrv_move_feature_fields(bs_new, &tmp);
1661 /* bs_new shouldn't be in bdrv_states even after the swap! */
1662 assert(bs_new->device_name[0] == '\0');
1664 /* Check a few fields that should remain attached to the device */
1665 assert(bs_new->dev == NULL);
1666 assert(bs_new->job == NULL);
1667 assert(bs_new->in_use == 0);
1668 assert(bs_new->io_limits_enabled == false);
1669 assert(!throttle_have_timer(&bs_new->throttle_state));
1671 bdrv_rebind(bs_new);
1672 bdrv_rebind(bs_old);
1676 * Add new bs contents at the top of an image chain while the chain is
1677 * live, while keeping required fields on the top layer.
1679 * This will modify the BlockDriverState fields, and swap contents
1680 * between bs_new and bs_top. Both bs_new and bs_top are modified.
1682 * bs_new is required to be anonymous.
1684 * This function does not create any image files.
1686 void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top)
1688 bdrv_swap(bs_new, bs_top);
1690 /* The contents of 'tmp' will become bs_top, as we are
1691 * swapping bs_new and bs_top contents. */
1692 bs_top->backing_hd = bs_new;
1693 bs_top->open_flags &= ~BDRV_O_NO_BACKING;
1694 pstrcpy(bs_top->backing_file, sizeof(bs_top->backing_file),
1695 bs_new->filename);
1696 pstrcpy(bs_top->backing_format, sizeof(bs_top->backing_format),
1697 bs_new->drv ? bs_new->drv->format_name : "");
1700 static void bdrv_delete(BlockDriverState *bs)
1702 assert(!bs->dev);
1703 assert(!bs->job);
1704 assert(!bs->in_use);
1705 assert(!bs->refcnt);
1707 bdrv_close(bs);
1709 /* remove from list, if necessary */
1710 bdrv_make_anon(bs);
1712 g_free(bs);
1715 int bdrv_attach_dev(BlockDriverState *bs, void *dev)
1716 /* TODO change to DeviceState *dev when all users are qdevified */
1718 if (bs->dev) {
1719 return -EBUSY;
1721 bs->dev = dev;
1722 bdrv_iostatus_reset(bs);
1723 return 0;
1726 /* TODO qdevified devices don't use this, remove when devices are qdevified */
1727 void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev)
1729 if (bdrv_attach_dev(bs, dev) < 0) {
1730 abort();
1734 void bdrv_detach_dev(BlockDriverState *bs, void *dev)
1735 /* TODO change to DeviceState *dev when all users are qdevified */
1737 assert(bs->dev == dev);
1738 bs->dev = NULL;
1739 bs->dev_ops = NULL;
1740 bs->dev_opaque = NULL;
1741 bs->buffer_alignment = 512;
1744 /* TODO change to return DeviceState * when all users are qdevified */
1745 void *bdrv_get_attached_dev(BlockDriverState *bs)
1747 return bs->dev;
1750 void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops,
1751 void *opaque)
1753 bs->dev_ops = ops;
1754 bs->dev_opaque = opaque;
1757 void bdrv_emit_qmp_error_event(const BlockDriverState *bdrv,
1758 enum MonitorEvent ev,
1759 BlockErrorAction action, bool is_read)
1761 QObject *data;
1762 const char *action_str;
1764 switch (action) {
1765 case BDRV_ACTION_REPORT:
1766 action_str = "report";
1767 break;
1768 case BDRV_ACTION_IGNORE:
1769 action_str = "ignore";
1770 break;
1771 case BDRV_ACTION_STOP:
1772 action_str = "stop";
1773 break;
1774 default:
1775 abort();
1778 data = qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
1779 bdrv->device_name,
1780 action_str,
1781 is_read ? "read" : "write");
1782 monitor_protocol_event(ev, data);
1784 qobject_decref(data);
1787 static void bdrv_emit_qmp_eject_event(BlockDriverState *bs, bool ejected)
1789 QObject *data;
1791 data = qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }",
1792 bdrv_get_device_name(bs), ejected);
1793 monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED, data);
1795 qobject_decref(data);
1798 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load)
1800 if (bs->dev_ops && bs->dev_ops->change_media_cb) {
1801 bool tray_was_closed = !bdrv_dev_is_tray_open(bs);
1802 bs->dev_ops->change_media_cb(bs->dev_opaque, load);
1803 if (tray_was_closed) {
1804 /* tray open */
1805 bdrv_emit_qmp_eject_event(bs, true);
1807 if (load) {
1808 /* tray close */
1809 bdrv_emit_qmp_eject_event(bs, false);
1814 bool bdrv_dev_has_removable_media(BlockDriverState *bs)
1816 return !bs->dev || (bs->dev_ops && bs->dev_ops->change_media_cb);
1819 void bdrv_dev_eject_request(BlockDriverState *bs, bool force)
1821 if (bs->dev_ops && bs->dev_ops->eject_request_cb) {
1822 bs->dev_ops->eject_request_cb(bs->dev_opaque, force);
1826 bool bdrv_dev_is_tray_open(BlockDriverState *bs)
1828 if (bs->dev_ops && bs->dev_ops->is_tray_open) {
1829 return bs->dev_ops->is_tray_open(bs->dev_opaque);
1831 return false;
1834 static void bdrv_dev_resize_cb(BlockDriverState *bs)
1836 if (bs->dev_ops && bs->dev_ops->resize_cb) {
1837 bs->dev_ops->resize_cb(bs->dev_opaque);
1841 bool bdrv_dev_is_medium_locked(BlockDriverState *bs)
1843 if (bs->dev_ops && bs->dev_ops->is_medium_locked) {
1844 return bs->dev_ops->is_medium_locked(bs->dev_opaque);
1846 return false;
1850 * Run consistency checks on an image
1852 * Returns 0 if the check could be completed (it doesn't mean that the image is
1853 * free of errors) or -errno when an internal error occurred. The results of the
1854 * check are stored in res.
1856 int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix)
1858 if (bs->drv->bdrv_check == NULL) {
1859 return -ENOTSUP;
1862 memset(res, 0, sizeof(*res));
1863 return bs->drv->bdrv_check(bs, res, fix);
1866 #define COMMIT_BUF_SECTORS 2048
1868 /* commit COW file into the raw image */
1869 int bdrv_commit(BlockDriverState *bs)
1871 BlockDriver *drv = bs->drv;
1872 int64_t sector, total_sectors;
1873 int n, ro, open_flags;
1874 int ret = 0;
1875 uint8_t *buf;
1876 char filename[PATH_MAX];
1878 if (!drv)
1879 return -ENOMEDIUM;
1881 if (!bs->backing_hd) {
1882 return -ENOTSUP;
1885 if (bdrv_in_use(bs) || bdrv_in_use(bs->backing_hd)) {
1886 return -EBUSY;
1889 ro = bs->backing_hd->read_only;
1890 /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */
1891 pstrcpy(filename, sizeof(filename), bs->backing_hd->filename);
1892 open_flags = bs->backing_hd->open_flags;
1894 if (ro) {
1895 if (bdrv_reopen(bs->backing_hd, open_flags | BDRV_O_RDWR, NULL)) {
1896 return -EACCES;
1900 total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
1901 buf = g_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
1903 for (sector = 0; sector < total_sectors; sector += n) {
1904 ret = bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n);
1905 if (ret < 0) {
1906 goto ro_cleanup;
1908 if (ret) {
1909 if (bdrv_read(bs, sector, buf, n) != 0) {
1910 ret = -EIO;
1911 goto ro_cleanup;
1914 if (bdrv_write(bs->backing_hd, sector, buf, n) != 0) {
1915 ret = -EIO;
1916 goto ro_cleanup;
1921 if (drv->bdrv_make_empty) {
1922 ret = drv->bdrv_make_empty(bs);
1923 bdrv_flush(bs);
1927 * Make sure all data we wrote to the backing device is actually
1928 * stable on disk.
1930 if (bs->backing_hd)
1931 bdrv_flush(bs->backing_hd);
1933 ro_cleanup:
1934 g_free(buf);
1936 if (ro) {
1937 /* ignoring error return here */
1938 bdrv_reopen(bs->backing_hd, open_flags & ~BDRV_O_RDWR, NULL);
1941 return ret;
1944 int bdrv_commit_all(void)
1946 BlockDriverState *bs;
1948 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1949 if (bs->drv && bs->backing_hd) {
1950 int ret = bdrv_commit(bs);
1951 if (ret < 0) {
1952 return ret;
1956 return 0;
1960 * Remove an active request from the tracked requests list
1962 * This function should be called when a tracked request is completing.
1964 static void tracked_request_end(BdrvTrackedRequest *req)
1966 QLIST_REMOVE(req, list);
1967 qemu_co_queue_restart_all(&req->wait_queue);
1971 * Add an active request to the tracked requests list
1973 static void tracked_request_begin(BdrvTrackedRequest *req,
1974 BlockDriverState *bs,
1975 int64_t sector_num,
1976 int nb_sectors, bool is_write)
1978 *req = (BdrvTrackedRequest){
1979 .bs = bs,
1980 .sector_num = sector_num,
1981 .nb_sectors = nb_sectors,
1982 .is_write = is_write,
1983 .co = qemu_coroutine_self(),
1986 qemu_co_queue_init(&req->wait_queue);
1988 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
1992 * Round a region to cluster boundaries
1994 void bdrv_round_to_clusters(BlockDriverState *bs,
1995 int64_t sector_num, int nb_sectors,
1996 int64_t *cluster_sector_num,
1997 int *cluster_nb_sectors)
1999 BlockDriverInfo bdi;
2001 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
2002 *cluster_sector_num = sector_num;
2003 *cluster_nb_sectors = nb_sectors;
2004 } else {
2005 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
2006 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
2007 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
2008 nb_sectors, c);
2012 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
2013 int64_t sector_num, int nb_sectors) {
2014 /* aaaa bbbb */
2015 if (sector_num >= req->sector_num + req->nb_sectors) {
2016 return false;
2018 /* bbbb aaaa */
2019 if (req->sector_num >= sector_num + nb_sectors) {
2020 return false;
2022 return true;
2025 static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs,
2026 int64_t sector_num, int nb_sectors)
2028 BdrvTrackedRequest *req;
2029 int64_t cluster_sector_num;
2030 int cluster_nb_sectors;
2031 bool retry;
2033 /* If we touch the same cluster it counts as an overlap. This guarantees
2034 * that allocating writes will be serialized and not race with each other
2035 * for the same cluster. For example, in copy-on-read it ensures that the
2036 * CoR read and write operations are atomic and guest writes cannot
2037 * interleave between them.
2039 bdrv_round_to_clusters(bs, sector_num, nb_sectors,
2040 &cluster_sector_num, &cluster_nb_sectors);
2042 do {
2043 retry = false;
2044 QLIST_FOREACH(req, &bs->tracked_requests, list) {
2045 if (tracked_request_overlaps(req, cluster_sector_num,
2046 cluster_nb_sectors)) {
2047 /* Hitting this means there was a reentrant request, for
2048 * example, a block driver issuing nested requests. This must
2049 * never happen since it means deadlock.
2051 assert(qemu_coroutine_self() != req->co);
2053 qemu_co_queue_wait(&req->wait_queue);
2054 retry = true;
2055 break;
2058 } while (retry);
2062 * Return values:
2063 * 0 - success
2064 * -EINVAL - backing format specified, but no file
2065 * -ENOSPC - can't update the backing file because no space is left in the
2066 * image file header
2067 * -ENOTSUP - format driver doesn't support changing the backing file
2069 int bdrv_change_backing_file(BlockDriverState *bs,
2070 const char *backing_file, const char *backing_fmt)
2072 BlockDriver *drv = bs->drv;
2073 int ret;
2075 /* Backing file format doesn't make sense without a backing file */
2076 if (backing_fmt && !backing_file) {
2077 return -EINVAL;
2080 if (drv->bdrv_change_backing_file != NULL) {
2081 ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
2082 } else {
2083 ret = -ENOTSUP;
2086 if (ret == 0) {
2087 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
2088 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
2090 return ret;
2094 * Finds the image layer in the chain that has 'bs' as its backing file.
2096 * active is the current topmost image.
2098 * Returns NULL if bs is not found in active's image chain,
2099 * or if active == bs.
2101 BlockDriverState *bdrv_find_overlay(BlockDriverState *active,
2102 BlockDriverState *bs)
2104 BlockDriverState *overlay = NULL;
2105 BlockDriverState *intermediate;
2107 assert(active != NULL);
2108 assert(bs != NULL);
2110 /* if bs is the same as active, then by definition it has no overlay
2112 if (active == bs) {
2113 return NULL;
2116 intermediate = active;
2117 while (intermediate->backing_hd) {
2118 if (intermediate->backing_hd == bs) {
2119 overlay = intermediate;
2120 break;
2122 intermediate = intermediate->backing_hd;
2125 return overlay;
2128 typedef struct BlkIntermediateStates {
2129 BlockDriverState *bs;
2130 QSIMPLEQ_ENTRY(BlkIntermediateStates) entry;
2131 } BlkIntermediateStates;
2135 * Drops images above 'base' up to and including 'top', and sets the image
2136 * above 'top' to have base as its backing file.
2138 * Requires that the overlay to 'top' is opened r/w, so that the backing file
2139 * information in 'bs' can be properly updated.
2141 * E.g., this will convert the following chain:
2142 * bottom <- base <- intermediate <- top <- active
2144 * to
2146 * bottom <- base <- active
2148 * It is allowed for bottom==base, in which case it converts:
2150 * base <- intermediate <- top <- active
2152 * to
2154 * base <- active
2156 * Error conditions:
2157 * if active == top, that is considered an error
2160 int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top,
2161 BlockDriverState *base)
2163 BlockDriverState *intermediate;
2164 BlockDriverState *base_bs = NULL;
2165 BlockDriverState *new_top_bs = NULL;
2166 BlkIntermediateStates *intermediate_state, *next;
2167 int ret = -EIO;
2169 QSIMPLEQ_HEAD(states_to_delete, BlkIntermediateStates) states_to_delete;
2170 QSIMPLEQ_INIT(&states_to_delete);
2172 if (!top->drv || !base->drv) {
2173 goto exit;
2176 new_top_bs = bdrv_find_overlay(active, top);
2178 if (new_top_bs == NULL) {
2179 /* we could not find the image above 'top', this is an error */
2180 goto exit;
2183 /* special case of new_top_bs->backing_hd already pointing to base - nothing
2184 * to do, no intermediate images */
2185 if (new_top_bs->backing_hd == base) {
2186 ret = 0;
2187 goto exit;
2190 intermediate = top;
2192 /* now we will go down through the list, and add each BDS we find
2193 * into our deletion queue, until we hit the 'base'
2195 while (intermediate) {
2196 intermediate_state = g_malloc0(sizeof(BlkIntermediateStates));
2197 intermediate_state->bs = intermediate;
2198 QSIMPLEQ_INSERT_TAIL(&states_to_delete, intermediate_state, entry);
2200 if (intermediate->backing_hd == base) {
2201 base_bs = intermediate->backing_hd;
2202 break;
2204 intermediate = intermediate->backing_hd;
2206 if (base_bs == NULL) {
2207 /* something went wrong, we did not end at the base. safely
2208 * unravel everything, and exit with error */
2209 goto exit;
2212 /* success - we can delete the intermediate states, and link top->base */
2213 ret = bdrv_change_backing_file(new_top_bs, base_bs->filename,
2214 base_bs->drv ? base_bs->drv->format_name : "");
2215 if (ret) {
2216 goto exit;
2218 new_top_bs->backing_hd = base_bs;
2221 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2222 /* so that bdrv_close() does not recursively close the chain */
2223 intermediate_state->bs->backing_hd = NULL;
2224 bdrv_unref(intermediate_state->bs);
2226 ret = 0;
2228 exit:
2229 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2230 g_free(intermediate_state);
2232 return ret;
2236 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
2237 size_t size)
2239 int64_t len;
2241 if (!bdrv_is_inserted(bs))
2242 return -ENOMEDIUM;
2244 if (bs->growable)
2245 return 0;
2247 len = bdrv_getlength(bs);
2249 if (offset < 0)
2250 return -EIO;
2252 if ((offset > len) || (len - offset < size))
2253 return -EIO;
2255 return 0;
2258 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
2259 int nb_sectors)
2261 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
2262 nb_sectors * BDRV_SECTOR_SIZE);
2265 typedef struct RwCo {
2266 BlockDriverState *bs;
2267 int64_t sector_num;
2268 int nb_sectors;
2269 QEMUIOVector *qiov;
2270 bool is_write;
2271 int ret;
2272 BdrvRequestFlags flags;
2273 } RwCo;
2275 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
2277 RwCo *rwco = opaque;
2279 if (!rwco->is_write) {
2280 rwco->ret = bdrv_co_do_readv(rwco->bs, rwco->sector_num,
2281 rwco->nb_sectors, rwco->qiov,
2282 rwco->flags);
2283 } else {
2284 rwco->ret = bdrv_co_do_writev(rwco->bs, rwco->sector_num,
2285 rwco->nb_sectors, rwco->qiov,
2286 rwco->flags);
2291 * Process a vectored synchronous request using coroutines
2293 static int bdrv_rwv_co(BlockDriverState *bs, int64_t sector_num,
2294 QEMUIOVector *qiov, bool is_write,
2295 BdrvRequestFlags flags)
2297 Coroutine *co;
2298 RwCo rwco = {
2299 .bs = bs,
2300 .sector_num = sector_num,
2301 .nb_sectors = qiov->size >> BDRV_SECTOR_BITS,
2302 .qiov = qiov,
2303 .is_write = is_write,
2304 .ret = NOT_DONE,
2305 .flags = flags,
2307 assert((qiov->size & (BDRV_SECTOR_SIZE - 1)) == 0);
2310 * In sync call context, when the vcpu is blocked, this throttling timer
2311 * will not fire; so the I/O throttling function has to be disabled here
2312 * if it has been enabled.
2314 if (bs->io_limits_enabled) {
2315 fprintf(stderr, "Disabling I/O throttling on '%s' due "
2316 "to synchronous I/O.\n", bdrv_get_device_name(bs));
2317 bdrv_io_limits_disable(bs);
2320 if (qemu_in_coroutine()) {
2321 /* Fast-path if already in coroutine context */
2322 bdrv_rw_co_entry(&rwco);
2323 } else {
2324 co = qemu_coroutine_create(bdrv_rw_co_entry);
2325 qemu_coroutine_enter(co, &rwco);
2326 while (rwco.ret == NOT_DONE) {
2327 qemu_aio_wait();
2330 return rwco.ret;
2334 * Process a synchronous request using coroutines
2336 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
2337 int nb_sectors, bool is_write, BdrvRequestFlags flags)
2339 QEMUIOVector qiov;
2340 struct iovec iov = {
2341 .iov_base = (void *)buf,
2342 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
2345 qemu_iovec_init_external(&qiov, &iov, 1);
2346 return bdrv_rwv_co(bs, sector_num, &qiov, is_write, flags);
2349 /* return < 0 if error. See bdrv_write() for the return codes */
2350 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
2351 uint8_t *buf, int nb_sectors)
2353 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0);
2356 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
2357 int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
2358 uint8_t *buf, int nb_sectors)
2360 bool enabled;
2361 int ret;
2363 enabled = bs->io_limits_enabled;
2364 bs->io_limits_enabled = false;
2365 ret = bdrv_read(bs, sector_num, buf, nb_sectors);
2366 bs->io_limits_enabled = enabled;
2367 return ret;
2370 /* Return < 0 if error. Important errors are:
2371 -EIO generic I/O error (may happen for all errors)
2372 -ENOMEDIUM No media inserted.
2373 -EINVAL Invalid sector number or nb_sectors
2374 -EACCES Trying to write a read-only device
2376 int bdrv_write(BlockDriverState *bs, int64_t sector_num,
2377 const uint8_t *buf, int nb_sectors)
2379 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
2382 int bdrv_writev(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov)
2384 return bdrv_rwv_co(bs, sector_num, qiov, true, 0);
2387 int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
2389 return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true,
2390 BDRV_REQ_ZERO_WRITE);
2393 int bdrv_pread(BlockDriverState *bs, int64_t offset,
2394 void *buf, int count1)
2396 uint8_t tmp_buf[BDRV_SECTOR_SIZE];
2397 int len, nb_sectors, count;
2398 int64_t sector_num;
2399 int ret;
2401 count = count1;
2402 /* first read to align to sector start */
2403 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
2404 if (len > count)
2405 len = count;
2406 sector_num = offset >> BDRV_SECTOR_BITS;
2407 if (len > 0) {
2408 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
2409 return ret;
2410 memcpy(buf, tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), len);
2411 count -= len;
2412 if (count == 0)
2413 return count1;
2414 sector_num++;
2415 buf += len;
2418 /* read the sectors "in place" */
2419 nb_sectors = count >> BDRV_SECTOR_BITS;
2420 if (nb_sectors > 0) {
2421 if ((ret = bdrv_read(bs, sector_num, buf, nb_sectors)) < 0)
2422 return ret;
2423 sector_num += nb_sectors;
2424 len = nb_sectors << BDRV_SECTOR_BITS;
2425 buf += len;
2426 count -= len;
2429 /* add data from the last sector */
2430 if (count > 0) {
2431 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
2432 return ret;
2433 memcpy(buf, tmp_buf, count);
2435 return count1;
2438 int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov)
2440 uint8_t tmp_buf[BDRV_SECTOR_SIZE];
2441 int len, nb_sectors, count;
2442 int64_t sector_num;
2443 int ret;
2445 count = qiov->size;
2447 /* first write to align to sector start */
2448 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
2449 if (len > count)
2450 len = count;
2451 sector_num = offset >> BDRV_SECTOR_BITS;
2452 if (len > 0) {
2453 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
2454 return ret;
2455 qemu_iovec_to_buf(qiov, 0, tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)),
2456 len);
2457 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
2458 return ret;
2459 count -= len;
2460 if (count == 0)
2461 return qiov->size;
2462 sector_num++;
2465 /* write the sectors "in place" */
2466 nb_sectors = count >> BDRV_SECTOR_BITS;
2467 if (nb_sectors > 0) {
2468 QEMUIOVector qiov_inplace;
2470 qemu_iovec_init(&qiov_inplace, qiov->niov);
2471 qemu_iovec_concat(&qiov_inplace, qiov, len,
2472 nb_sectors << BDRV_SECTOR_BITS);
2473 ret = bdrv_writev(bs, sector_num, &qiov_inplace);
2474 qemu_iovec_destroy(&qiov_inplace);
2475 if (ret < 0) {
2476 return ret;
2479 sector_num += nb_sectors;
2480 len = nb_sectors << BDRV_SECTOR_BITS;
2481 count -= len;
2484 /* add data from the last sector */
2485 if (count > 0) {
2486 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
2487 return ret;
2488 qemu_iovec_to_buf(qiov, qiov->size - count, tmp_buf, count);
2489 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
2490 return ret;
2492 return qiov->size;
2495 int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
2496 const void *buf, int count1)
2498 QEMUIOVector qiov;
2499 struct iovec iov = {
2500 .iov_base = (void *) buf,
2501 .iov_len = count1,
2504 qemu_iovec_init_external(&qiov, &iov, 1);
2505 return bdrv_pwritev(bs, offset, &qiov);
2509 * Writes to the file and ensures that no writes are reordered across this
2510 * request (acts as a barrier)
2512 * Returns 0 on success, -errno in error cases.
2514 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
2515 const void *buf, int count)
2517 int ret;
2519 ret = bdrv_pwrite(bs, offset, buf, count);
2520 if (ret < 0) {
2521 return ret;
2524 /* No flush needed for cache modes that already do it */
2525 if (bs->enable_write_cache) {
2526 bdrv_flush(bs);
2529 return 0;
2532 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
2533 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
2535 /* Perform I/O through a temporary buffer so that users who scribble over
2536 * their read buffer while the operation is in progress do not end up
2537 * modifying the image file. This is critical for zero-copy guest I/O
2538 * where anything might happen inside guest memory.
2540 void *bounce_buffer;
2542 BlockDriver *drv = bs->drv;
2543 struct iovec iov;
2544 QEMUIOVector bounce_qiov;
2545 int64_t cluster_sector_num;
2546 int cluster_nb_sectors;
2547 size_t skip_bytes;
2548 int ret;
2550 /* Cover entire cluster so no additional backing file I/O is required when
2551 * allocating cluster in the image file.
2553 bdrv_round_to_clusters(bs, sector_num, nb_sectors,
2554 &cluster_sector_num, &cluster_nb_sectors);
2556 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
2557 cluster_sector_num, cluster_nb_sectors);
2559 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
2560 iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
2561 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
2563 ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
2564 &bounce_qiov);
2565 if (ret < 0) {
2566 goto err;
2569 if (drv->bdrv_co_write_zeroes &&
2570 buffer_is_zero(bounce_buffer, iov.iov_len)) {
2571 ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
2572 cluster_nb_sectors);
2573 } else {
2574 /* This does not change the data on the disk, it is not necessary
2575 * to flush even in cache=writethrough mode.
2577 ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
2578 &bounce_qiov);
2581 if (ret < 0) {
2582 /* It might be okay to ignore write errors for guest requests. If this
2583 * is a deliberate copy-on-read then we don't want to ignore the error.
2584 * Simply report it in all cases.
2586 goto err;
2589 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
2590 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
2591 nb_sectors * BDRV_SECTOR_SIZE);
2593 err:
2594 qemu_vfree(bounce_buffer);
2595 return ret;
2599 * Handle a read request in coroutine context
2601 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
2602 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
2603 BdrvRequestFlags flags)
2605 BlockDriver *drv = bs->drv;
2606 BdrvTrackedRequest req;
2607 int ret;
2609 if (!drv) {
2610 return -ENOMEDIUM;
2612 if (bdrv_check_request(bs, sector_num, nb_sectors)) {
2613 return -EIO;
2616 if (bs->copy_on_read) {
2617 flags |= BDRV_REQ_COPY_ON_READ;
2619 if (flags & BDRV_REQ_COPY_ON_READ) {
2620 bs->copy_on_read_in_flight++;
2623 if (bs->copy_on_read_in_flight) {
2624 wait_for_overlapping_requests(bs, sector_num, nb_sectors);
2627 /* throttling disk I/O */
2628 if (bs->io_limits_enabled) {
2629 bdrv_io_limits_intercept(bs, nb_sectors, false);
2632 tracked_request_begin(&req, bs, sector_num, nb_sectors, false);
2634 if (flags & BDRV_REQ_COPY_ON_READ) {
2635 int pnum;
2637 ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum);
2638 if (ret < 0) {
2639 goto out;
2642 if (!ret || pnum != nb_sectors) {
2643 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
2644 goto out;
2648 if (!(bs->zero_beyond_eof && bs->growable)) {
2649 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
2650 } else {
2651 /* Read zeros after EOF of growable BDSes */
2652 int64_t len, total_sectors, max_nb_sectors;
2654 len = bdrv_getlength(bs);
2655 if (len < 0) {
2656 ret = len;
2657 goto out;
2660 total_sectors = DIV_ROUND_UP(len, BDRV_SECTOR_SIZE);
2661 max_nb_sectors = MAX(0, total_sectors - sector_num);
2662 if (max_nb_sectors > 0) {
2663 ret = drv->bdrv_co_readv(bs, sector_num,
2664 MIN(nb_sectors, max_nb_sectors), qiov);
2665 } else {
2666 ret = 0;
2669 /* Reading beyond end of file is supposed to produce zeroes */
2670 if (ret == 0 && total_sectors < sector_num + nb_sectors) {
2671 uint64_t offset = MAX(0, total_sectors - sector_num);
2672 uint64_t bytes = (sector_num + nb_sectors - offset) *
2673 BDRV_SECTOR_SIZE;
2674 qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes);
2678 out:
2679 tracked_request_end(&req);
2681 if (flags & BDRV_REQ_COPY_ON_READ) {
2682 bs->copy_on_read_in_flight--;
2685 return ret;
2688 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
2689 int nb_sectors, QEMUIOVector *qiov)
2691 trace_bdrv_co_readv(bs, sector_num, nb_sectors);
2693 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
2696 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
2697 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
2699 trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
2701 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
2702 BDRV_REQ_COPY_ON_READ);
2705 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
2706 int64_t sector_num, int nb_sectors)
2708 BlockDriver *drv = bs->drv;
2709 QEMUIOVector qiov;
2710 struct iovec iov;
2711 int ret;
2713 /* TODO Emulate only part of misaligned requests instead of letting block
2714 * drivers return -ENOTSUP and emulate everything */
2716 /* First try the efficient write zeroes operation */
2717 if (drv->bdrv_co_write_zeroes) {
2718 ret = drv->bdrv_co_write_zeroes(bs, sector_num, nb_sectors);
2719 if (ret != -ENOTSUP) {
2720 return ret;
2724 /* Fall back to bounce buffer if write zeroes is unsupported */
2725 iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE;
2726 iov.iov_base = qemu_blockalign(bs, iov.iov_len);
2727 memset(iov.iov_base, 0, iov.iov_len);
2728 qemu_iovec_init_external(&qiov, &iov, 1);
2730 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, &qiov);
2732 qemu_vfree(iov.iov_base);
2733 return ret;
2737 * Handle a write request in coroutine context
2739 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
2740 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
2741 BdrvRequestFlags flags)
2743 BlockDriver *drv = bs->drv;
2744 BdrvTrackedRequest req;
2745 int ret;
2747 if (!bs->drv) {
2748 return -ENOMEDIUM;
2750 if (bs->read_only) {
2751 return -EACCES;
2753 if (bdrv_check_request(bs, sector_num, nb_sectors)) {
2754 return -EIO;
2757 if (bs->copy_on_read_in_flight) {
2758 wait_for_overlapping_requests(bs, sector_num, nb_sectors);
2761 /* throttling disk I/O */
2762 if (bs->io_limits_enabled) {
2763 bdrv_io_limits_intercept(bs, nb_sectors, true);
2766 tracked_request_begin(&req, bs, sector_num, nb_sectors, true);
2768 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, &req);
2770 if (ret < 0) {
2771 /* Do nothing, write notifier decided to fail this request */
2772 } else if (flags & BDRV_REQ_ZERO_WRITE) {
2773 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors);
2774 } else {
2775 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
2778 if (ret == 0 && !bs->enable_write_cache) {
2779 ret = bdrv_co_flush(bs);
2782 if (bs->dirty_bitmap) {
2783 bdrv_set_dirty(bs, sector_num, nb_sectors);
2786 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
2787 bs->wr_highest_sector = sector_num + nb_sectors - 1;
2789 if (bs->growable && ret >= 0) {
2790 bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
2793 tracked_request_end(&req);
2795 return ret;
2798 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
2799 int nb_sectors, QEMUIOVector *qiov)
2801 trace_bdrv_co_writev(bs, sector_num, nb_sectors);
2803 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
2806 int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
2807 int64_t sector_num, int nb_sectors)
2809 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors);
2811 return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
2812 BDRV_REQ_ZERO_WRITE);
2816 * Truncate file to 'offset' bytes (needed only for file protocols)
2818 int bdrv_truncate(BlockDriverState *bs, int64_t offset)
2820 BlockDriver *drv = bs->drv;
2821 int ret;
2822 if (!drv)
2823 return -ENOMEDIUM;
2824 if (!drv->bdrv_truncate)
2825 return -ENOTSUP;
2826 if (bs->read_only)
2827 return -EACCES;
2828 if (bdrv_in_use(bs))
2829 return -EBUSY;
2830 ret = drv->bdrv_truncate(bs, offset);
2831 if (ret == 0) {
2832 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
2833 bdrv_dev_resize_cb(bs);
2835 return ret;
2839 * Length of a allocated file in bytes. Sparse files are counted by actual
2840 * allocated space. Return < 0 if error or unknown.
2842 int64_t bdrv_get_allocated_file_size(BlockDriverState *bs)
2844 BlockDriver *drv = bs->drv;
2845 if (!drv) {
2846 return -ENOMEDIUM;
2848 if (drv->bdrv_get_allocated_file_size) {
2849 return drv->bdrv_get_allocated_file_size(bs);
2851 if (bs->file) {
2852 return bdrv_get_allocated_file_size(bs->file);
2854 return -ENOTSUP;
2858 * Length of a file in bytes. Return < 0 if error or unknown.
2860 int64_t bdrv_getlength(BlockDriverState *bs)
2862 BlockDriver *drv = bs->drv;
2863 if (!drv)
2864 return -ENOMEDIUM;
2866 if (bdrv_dev_has_removable_media(bs)) {
2867 if (drv->bdrv_getlength) {
2868 return drv->bdrv_getlength(bs);
2871 return bs->total_sectors * BDRV_SECTOR_SIZE;
2874 /* return 0 as number of sectors if no device present or error */
2875 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
2877 int64_t length;
2878 length = bdrv_getlength(bs);
2879 if (length < 0)
2880 length = 0;
2881 else
2882 length = length >> BDRV_SECTOR_BITS;
2883 *nb_sectors_ptr = length;
2886 void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error,
2887 BlockdevOnError on_write_error)
2889 bs->on_read_error = on_read_error;
2890 bs->on_write_error = on_write_error;
2893 BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read)
2895 return is_read ? bs->on_read_error : bs->on_write_error;
2898 BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error)
2900 BlockdevOnError on_err = is_read ? bs->on_read_error : bs->on_write_error;
2902 switch (on_err) {
2903 case BLOCKDEV_ON_ERROR_ENOSPC:
2904 return (error == ENOSPC) ? BDRV_ACTION_STOP : BDRV_ACTION_REPORT;
2905 case BLOCKDEV_ON_ERROR_STOP:
2906 return BDRV_ACTION_STOP;
2907 case BLOCKDEV_ON_ERROR_REPORT:
2908 return BDRV_ACTION_REPORT;
2909 case BLOCKDEV_ON_ERROR_IGNORE:
2910 return BDRV_ACTION_IGNORE;
2911 default:
2912 abort();
2916 /* This is done by device models because, while the block layer knows
2917 * about the error, it does not know whether an operation comes from
2918 * the device or the block layer (from a job, for example).
2920 void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action,
2921 bool is_read, int error)
2923 assert(error >= 0);
2924 bdrv_emit_qmp_error_event(bs, QEVENT_BLOCK_IO_ERROR, action, is_read);
2925 if (action == BDRV_ACTION_STOP) {
2926 vm_stop(RUN_STATE_IO_ERROR);
2927 bdrv_iostatus_set_err(bs, error);
2931 int bdrv_is_read_only(BlockDriverState *bs)
2933 return bs->read_only;
2936 int bdrv_is_sg(BlockDriverState *bs)
2938 return bs->sg;
2941 int bdrv_enable_write_cache(BlockDriverState *bs)
2943 return bs->enable_write_cache;
2946 void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce)
2948 bs->enable_write_cache = wce;
2950 /* so a reopen() will preserve wce */
2951 if (wce) {
2952 bs->open_flags |= BDRV_O_CACHE_WB;
2953 } else {
2954 bs->open_flags &= ~BDRV_O_CACHE_WB;
2958 int bdrv_is_encrypted(BlockDriverState *bs)
2960 if (bs->backing_hd && bs->backing_hd->encrypted)
2961 return 1;
2962 return bs->encrypted;
2965 int bdrv_key_required(BlockDriverState *bs)
2967 BlockDriverState *backing_hd = bs->backing_hd;
2969 if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key)
2970 return 1;
2971 return (bs->encrypted && !bs->valid_key);
2974 int bdrv_set_key(BlockDriverState *bs, const char *key)
2976 int ret;
2977 if (bs->backing_hd && bs->backing_hd->encrypted) {
2978 ret = bdrv_set_key(bs->backing_hd, key);
2979 if (ret < 0)
2980 return ret;
2981 if (!bs->encrypted)
2982 return 0;
2984 if (!bs->encrypted) {
2985 return -EINVAL;
2986 } else if (!bs->drv || !bs->drv->bdrv_set_key) {
2987 return -ENOMEDIUM;
2989 ret = bs->drv->bdrv_set_key(bs, key);
2990 if (ret < 0) {
2991 bs->valid_key = 0;
2992 } else if (!bs->valid_key) {
2993 bs->valid_key = 1;
2994 /* call the change callback now, we skipped it on open */
2995 bdrv_dev_change_media_cb(bs, true);
2997 return ret;
3000 const char *bdrv_get_format_name(BlockDriverState *bs)
3002 return bs->drv ? bs->drv->format_name : NULL;
3005 void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
3006 void *opaque)
3008 BlockDriver *drv;
3010 QLIST_FOREACH(drv, &bdrv_drivers, list) {
3011 it(opaque, drv->format_name);
3015 BlockDriverState *bdrv_find(const char *name)
3017 BlockDriverState *bs;
3019 QTAILQ_FOREACH(bs, &bdrv_states, list) {
3020 if (!strcmp(name, bs->device_name)) {
3021 return bs;
3024 return NULL;
3027 BlockDriverState *bdrv_next(BlockDriverState *bs)
3029 if (!bs) {
3030 return QTAILQ_FIRST(&bdrv_states);
3032 return QTAILQ_NEXT(bs, list);
3035 void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque)
3037 BlockDriverState *bs;
3039 QTAILQ_FOREACH(bs, &bdrv_states, list) {
3040 it(opaque, bs);
3044 const char *bdrv_get_device_name(BlockDriverState *bs)
3046 return bs->device_name;
3049 int bdrv_get_flags(BlockDriverState *bs)
3051 return bs->open_flags;
3054 int bdrv_flush_all(void)
3056 BlockDriverState *bs;
3057 int result = 0;
3059 QTAILQ_FOREACH(bs, &bdrv_states, list) {
3060 int ret = bdrv_flush(bs);
3061 if (ret < 0 && !result) {
3062 result = ret;
3066 return result;
3069 int bdrv_has_zero_init_1(BlockDriverState *bs)
3071 return 1;
3074 int bdrv_has_zero_init(BlockDriverState *bs)
3076 assert(bs->drv);
3078 /* If BS is a copy on write image, it is initialized to
3079 the contents of the base image, which may not be zeroes. */
3080 if (bs->backing_hd) {
3081 return 0;
3083 if (bs->drv->bdrv_has_zero_init) {
3084 return bs->drv->bdrv_has_zero_init(bs);
3087 /* safe default */
3088 return 0;
3091 typedef struct BdrvCoGetBlockStatusData {
3092 BlockDriverState *bs;
3093 BlockDriverState *base;
3094 int64_t sector_num;
3095 int nb_sectors;
3096 int *pnum;
3097 int64_t ret;
3098 bool done;
3099 } BdrvCoGetBlockStatusData;
3102 * Returns true iff the specified sector is present in the disk image. Drivers
3103 * not implementing the functionality are assumed to not support backing files,
3104 * hence all their sectors are reported as allocated.
3106 * If 'sector_num' is beyond the end of the disk image the return value is 0
3107 * and 'pnum' is set to 0.
3109 * 'pnum' is set to the number of sectors (including and immediately following
3110 * the specified sector) that are known to be in the same
3111 * allocated/unallocated state.
3113 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
3114 * beyond the end of the disk image it will be clamped.
3116 static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
3117 int64_t sector_num,
3118 int nb_sectors, int *pnum)
3120 int64_t length;
3121 int64_t n;
3122 int64_t ret, ret2;
3124 length = bdrv_getlength(bs);
3125 if (length < 0) {
3126 return length;
3129 if (sector_num >= (length >> BDRV_SECTOR_BITS)) {
3130 *pnum = 0;
3131 return 0;
3134 n = bs->total_sectors - sector_num;
3135 if (n < nb_sectors) {
3136 nb_sectors = n;
3139 if (!bs->drv->bdrv_co_get_block_status) {
3140 *pnum = nb_sectors;
3141 ret = BDRV_BLOCK_DATA;
3142 if (bs->drv->protocol_name) {
3143 ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
3145 return ret;
3148 ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum);
3149 if (ret < 0) {
3150 *pnum = 0;
3151 return ret;
3154 if (ret & BDRV_BLOCK_RAW) {
3155 assert(ret & BDRV_BLOCK_OFFSET_VALID);
3156 return bdrv_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
3157 *pnum, pnum);
3160 if (!(ret & BDRV_BLOCK_DATA)) {
3161 if (bdrv_has_zero_init(bs)) {
3162 ret |= BDRV_BLOCK_ZERO;
3163 } else if (bs->backing_hd) {
3164 BlockDriverState *bs2 = bs->backing_hd;
3165 int64_t length2 = bdrv_getlength(bs2);
3166 if (length2 >= 0 && sector_num >= (length2 >> BDRV_SECTOR_BITS)) {
3167 ret |= BDRV_BLOCK_ZERO;
3172 if (bs->file &&
3173 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
3174 (ret & BDRV_BLOCK_OFFSET_VALID)) {
3175 ret2 = bdrv_co_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
3176 *pnum, pnum);
3177 if (ret2 >= 0) {
3178 /* Ignore errors. This is just providing extra information, it
3179 * is useful but not necessary.
3181 ret |= (ret2 & BDRV_BLOCK_ZERO);
3185 return ret;
3188 /* Coroutine wrapper for bdrv_get_block_status() */
3189 static void coroutine_fn bdrv_get_block_status_co_entry(void *opaque)
3191 BdrvCoGetBlockStatusData *data = opaque;
3192 BlockDriverState *bs = data->bs;
3194 data->ret = bdrv_co_get_block_status(bs, data->sector_num, data->nb_sectors,
3195 data->pnum);
3196 data->done = true;
3200 * Synchronous wrapper around bdrv_co_get_block_status().
3202 * See bdrv_co_get_block_status() for details.
3204 int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num,
3205 int nb_sectors, int *pnum)
3207 Coroutine *co;
3208 BdrvCoGetBlockStatusData data = {
3209 .bs = bs,
3210 .sector_num = sector_num,
3211 .nb_sectors = nb_sectors,
3212 .pnum = pnum,
3213 .done = false,
3216 if (qemu_in_coroutine()) {
3217 /* Fast-path if already in coroutine context */
3218 bdrv_get_block_status_co_entry(&data);
3219 } else {
3220 co = qemu_coroutine_create(bdrv_get_block_status_co_entry);
3221 qemu_coroutine_enter(co, &data);
3222 while (!data.done) {
3223 qemu_aio_wait();
3226 return data.ret;
3229 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
3230 int nb_sectors, int *pnum)
3232 int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum);
3233 if (ret < 0) {
3234 return ret;
3236 return
3237 (ret & BDRV_BLOCK_DATA) ||
3238 ((ret & BDRV_BLOCK_ZERO) && !bdrv_has_zero_init(bs));
3242 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
3244 * Return true if the given sector is allocated in any image between
3245 * BASE and TOP (inclusive). BASE can be NULL to check if the given
3246 * sector is allocated in any image of the chain. Return false otherwise.
3248 * 'pnum' is set to the number of sectors (including and immediately following
3249 * the specified sector) that are known to be in the same
3250 * allocated/unallocated state.
3253 int bdrv_is_allocated_above(BlockDriverState *top,
3254 BlockDriverState *base,
3255 int64_t sector_num,
3256 int nb_sectors, int *pnum)
3258 BlockDriverState *intermediate;
3259 int ret, n = nb_sectors;
3261 intermediate = top;
3262 while (intermediate && intermediate != base) {
3263 int pnum_inter;
3264 ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
3265 &pnum_inter);
3266 if (ret < 0) {
3267 return ret;
3268 } else if (ret) {
3269 *pnum = pnum_inter;
3270 return 1;
3274 * [sector_num, nb_sectors] is unallocated on top but intermediate
3275 * might have
3277 * [sector_num+x, nr_sectors] allocated.
3279 if (n > pnum_inter &&
3280 (intermediate == top ||
3281 sector_num + pnum_inter < intermediate->total_sectors)) {
3282 n = pnum_inter;
3285 intermediate = intermediate->backing_hd;
3288 *pnum = n;
3289 return 0;
3292 const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
3294 if (bs->backing_hd && bs->backing_hd->encrypted)
3295 return bs->backing_file;
3296 else if (bs->encrypted)
3297 return bs->filename;
3298 else
3299 return NULL;
3302 void bdrv_get_backing_filename(BlockDriverState *bs,
3303 char *filename, int filename_size)
3305 pstrcpy(filename, filename_size, bs->backing_file);
3308 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
3309 const uint8_t *buf, int nb_sectors)
3311 BlockDriver *drv = bs->drv;
3312 if (!drv)
3313 return -ENOMEDIUM;
3314 if (!drv->bdrv_write_compressed)
3315 return -ENOTSUP;
3316 if (bdrv_check_request(bs, sector_num, nb_sectors))
3317 return -EIO;
3319 assert(!bs->dirty_bitmap);
3321 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
3324 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
3326 BlockDriver *drv = bs->drv;
3327 if (!drv)
3328 return -ENOMEDIUM;
3329 if (!drv->bdrv_get_info)
3330 return -ENOTSUP;
3331 memset(bdi, 0, sizeof(*bdi));
3332 return drv->bdrv_get_info(bs, bdi);
3335 ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs)
3337 BlockDriver *drv = bs->drv;
3338 if (drv && drv->bdrv_get_specific_info) {
3339 return drv->bdrv_get_specific_info(bs);
3341 return NULL;
3344 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
3345 int64_t pos, int size)
3347 QEMUIOVector qiov;
3348 struct iovec iov = {
3349 .iov_base = (void *) buf,
3350 .iov_len = size,
3353 qemu_iovec_init_external(&qiov, &iov, 1);
3354 return bdrv_writev_vmstate(bs, &qiov, pos);
3357 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
3359 BlockDriver *drv = bs->drv;
3361 if (!drv) {
3362 return -ENOMEDIUM;
3363 } else if (drv->bdrv_save_vmstate) {
3364 return drv->bdrv_save_vmstate(bs, qiov, pos);
3365 } else if (bs->file) {
3366 return bdrv_writev_vmstate(bs->file, qiov, pos);
3369 return -ENOTSUP;
3372 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
3373 int64_t pos, int size)
3375 BlockDriver *drv = bs->drv;
3376 if (!drv)
3377 return -ENOMEDIUM;
3378 if (drv->bdrv_load_vmstate)
3379 return drv->bdrv_load_vmstate(bs, buf, pos, size);
3380 if (bs->file)
3381 return bdrv_load_vmstate(bs->file, buf, pos, size);
3382 return -ENOTSUP;
3385 void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
3387 if (!bs || !bs->drv || !bs->drv->bdrv_debug_event) {
3388 return;
3391 bs->drv->bdrv_debug_event(bs, event);
3394 int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
3395 const char *tag)
3397 while (bs && bs->drv && !bs->drv->bdrv_debug_breakpoint) {
3398 bs = bs->file;
3401 if (bs && bs->drv && bs->drv->bdrv_debug_breakpoint) {
3402 return bs->drv->bdrv_debug_breakpoint(bs, event, tag);
3405 return -ENOTSUP;
3408 int bdrv_debug_resume(BlockDriverState *bs, const char *tag)
3410 while (bs && bs->drv && !bs->drv->bdrv_debug_resume) {
3411 bs = bs->file;
3414 if (bs && bs->drv && bs->drv->bdrv_debug_resume) {
3415 return bs->drv->bdrv_debug_resume(bs, tag);
3418 return -ENOTSUP;
3421 bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag)
3423 while (bs && bs->drv && !bs->drv->bdrv_debug_is_suspended) {
3424 bs = bs->file;
3427 if (bs && bs->drv && bs->drv->bdrv_debug_is_suspended) {
3428 return bs->drv->bdrv_debug_is_suspended(bs, tag);
3431 return false;
3434 int bdrv_is_snapshot(BlockDriverState *bs)
3436 return !!(bs->open_flags & BDRV_O_SNAPSHOT);
3439 /* backing_file can either be relative, or absolute, or a protocol. If it is
3440 * relative, it must be relative to the chain. So, passing in bs->filename
3441 * from a BDS as backing_file should not be done, as that may be relative to
3442 * the CWD rather than the chain. */
3443 BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
3444 const char *backing_file)
3446 char *filename_full = NULL;
3447 char *backing_file_full = NULL;
3448 char *filename_tmp = NULL;
3449 int is_protocol = 0;
3450 BlockDriverState *curr_bs = NULL;
3451 BlockDriverState *retval = NULL;
3453 if (!bs || !bs->drv || !backing_file) {
3454 return NULL;
3457 filename_full = g_malloc(PATH_MAX);
3458 backing_file_full = g_malloc(PATH_MAX);
3459 filename_tmp = g_malloc(PATH_MAX);
3461 is_protocol = path_has_protocol(backing_file);
3463 for (curr_bs = bs; curr_bs->backing_hd; curr_bs = curr_bs->backing_hd) {
3465 /* If either of the filename paths is actually a protocol, then
3466 * compare unmodified paths; otherwise make paths relative */
3467 if (is_protocol || path_has_protocol(curr_bs->backing_file)) {
3468 if (strcmp(backing_file, curr_bs->backing_file) == 0) {
3469 retval = curr_bs->backing_hd;
3470 break;
3472 } else {
3473 /* If not an absolute filename path, make it relative to the current
3474 * image's filename path */
3475 path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
3476 backing_file);
3478 /* We are going to compare absolute pathnames */
3479 if (!realpath(filename_tmp, filename_full)) {
3480 continue;
3483 /* We need to make sure the backing filename we are comparing against
3484 * is relative to the current image filename (or absolute) */
3485 path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
3486 curr_bs->backing_file);
3488 if (!realpath(filename_tmp, backing_file_full)) {
3489 continue;
3492 if (strcmp(backing_file_full, filename_full) == 0) {
3493 retval = curr_bs->backing_hd;
3494 break;
3499 g_free(filename_full);
3500 g_free(backing_file_full);
3501 g_free(filename_tmp);
3502 return retval;
3505 int bdrv_get_backing_file_depth(BlockDriverState *bs)
3507 if (!bs->drv) {
3508 return 0;
3511 if (!bs->backing_hd) {
3512 return 0;
3515 return 1 + bdrv_get_backing_file_depth(bs->backing_hd);
3518 BlockDriverState *bdrv_find_base(BlockDriverState *bs)
3520 BlockDriverState *curr_bs = NULL;
3522 if (!bs) {
3523 return NULL;
3526 curr_bs = bs;
3528 while (curr_bs->backing_hd) {
3529 curr_bs = curr_bs->backing_hd;
3531 return curr_bs;
3534 /**************************************************************/
3535 /* async I/Os */
3537 BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
3538 QEMUIOVector *qiov, int nb_sectors,
3539 BlockDriverCompletionFunc *cb, void *opaque)
3541 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
3543 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors,
3544 cb, opaque, false);
3547 BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
3548 QEMUIOVector *qiov, int nb_sectors,
3549 BlockDriverCompletionFunc *cb, void *opaque)
3551 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
3553 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors,
3554 cb, opaque, true);
3558 typedef struct MultiwriteCB {
3559 int error;
3560 int num_requests;
3561 int num_callbacks;
3562 struct {
3563 BlockDriverCompletionFunc *cb;
3564 void *opaque;
3565 QEMUIOVector *free_qiov;
3566 } callbacks[];
3567 } MultiwriteCB;
3569 static void multiwrite_user_cb(MultiwriteCB *mcb)
3571 int i;
3573 for (i = 0; i < mcb->num_callbacks; i++) {
3574 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
3575 if (mcb->callbacks[i].free_qiov) {
3576 qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
3578 g_free(mcb->callbacks[i].free_qiov);
3582 static void multiwrite_cb(void *opaque, int ret)
3584 MultiwriteCB *mcb = opaque;
3586 trace_multiwrite_cb(mcb, ret);
3588 if (ret < 0 && !mcb->error) {
3589 mcb->error = ret;
3592 mcb->num_requests--;
3593 if (mcb->num_requests == 0) {
3594 multiwrite_user_cb(mcb);
3595 g_free(mcb);
3599 static int multiwrite_req_compare(const void *a, const void *b)
3601 const BlockRequest *req1 = a, *req2 = b;
3604 * Note that we can't simply subtract req2->sector from req1->sector
3605 * here as that could overflow the return value.
3607 if (req1->sector > req2->sector) {
3608 return 1;
3609 } else if (req1->sector < req2->sector) {
3610 return -1;
3611 } else {
3612 return 0;
3617 * Takes a bunch of requests and tries to merge them. Returns the number of
3618 * requests that remain after merging.
3620 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
3621 int num_reqs, MultiwriteCB *mcb)
3623 int i, outidx;
3625 // Sort requests by start sector
3626 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
3628 // Check if adjacent requests touch the same clusters. If so, combine them,
3629 // filling up gaps with zero sectors.
3630 outidx = 0;
3631 for (i = 1; i < num_reqs; i++) {
3632 int merge = 0;
3633 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
3635 // Handle exactly sequential writes and overlapping writes.
3636 if (reqs[i].sector <= oldreq_last) {
3637 merge = 1;
3640 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
3641 merge = 0;
3644 if (merge) {
3645 size_t size;
3646 QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
3647 qemu_iovec_init(qiov,
3648 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
3650 // Add the first request to the merged one. If the requests are
3651 // overlapping, drop the last sectors of the first request.
3652 size = (reqs[i].sector - reqs[outidx].sector) << 9;
3653 qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size);
3655 // We should need to add any zeros between the two requests
3656 assert (reqs[i].sector <= oldreq_last);
3658 // Add the second request
3659 qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size);
3661 reqs[outidx].nb_sectors = qiov->size >> 9;
3662 reqs[outidx].qiov = qiov;
3664 mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
3665 } else {
3666 outidx++;
3667 reqs[outidx].sector = reqs[i].sector;
3668 reqs[outidx].nb_sectors = reqs[i].nb_sectors;
3669 reqs[outidx].qiov = reqs[i].qiov;
3673 return outidx + 1;
3677 * Submit multiple AIO write requests at once.
3679 * On success, the function returns 0 and all requests in the reqs array have
3680 * been submitted. In error case this function returns -1, and any of the
3681 * requests may or may not be submitted yet. In particular, this means that the
3682 * callback will be called for some of the requests, for others it won't. The
3683 * caller must check the error field of the BlockRequest to wait for the right
3684 * callbacks (if error != 0, no callback will be called).
3686 * The implementation may modify the contents of the reqs array, e.g. to merge
3687 * requests. However, the fields opaque and error are left unmodified as they
3688 * are used to signal failure for a single request to the caller.
3690 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
3692 MultiwriteCB *mcb;
3693 int i;
3695 /* don't submit writes if we don't have a medium */
3696 if (bs->drv == NULL) {
3697 for (i = 0; i < num_reqs; i++) {
3698 reqs[i].error = -ENOMEDIUM;
3700 return -1;
3703 if (num_reqs == 0) {
3704 return 0;
3707 // Create MultiwriteCB structure
3708 mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
3709 mcb->num_requests = 0;
3710 mcb->num_callbacks = num_reqs;
3712 for (i = 0; i < num_reqs; i++) {
3713 mcb->callbacks[i].cb = reqs[i].cb;
3714 mcb->callbacks[i].opaque = reqs[i].opaque;
3717 // Check for mergable requests
3718 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
3720 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
3722 /* Run the aio requests. */
3723 mcb->num_requests = num_reqs;
3724 for (i = 0; i < num_reqs; i++) {
3725 bdrv_aio_writev(bs, reqs[i].sector, reqs[i].qiov,
3726 reqs[i].nb_sectors, multiwrite_cb, mcb);
3729 return 0;
3732 void bdrv_aio_cancel(BlockDriverAIOCB *acb)
3734 acb->aiocb_info->cancel(acb);
3737 /**************************************************************/
3738 /* async block device emulation */
3740 typedef struct BlockDriverAIOCBSync {
3741 BlockDriverAIOCB common;
3742 QEMUBH *bh;
3743 int ret;
3744 /* vector translation state */
3745 QEMUIOVector *qiov;
3746 uint8_t *bounce;
3747 int is_write;
3748 } BlockDriverAIOCBSync;
3750 static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
3752 BlockDriverAIOCBSync *acb =
3753 container_of(blockacb, BlockDriverAIOCBSync, common);
3754 qemu_bh_delete(acb->bh);
3755 acb->bh = NULL;
3756 qemu_aio_release(acb);
3759 static const AIOCBInfo bdrv_em_aiocb_info = {
3760 .aiocb_size = sizeof(BlockDriverAIOCBSync),
3761 .cancel = bdrv_aio_cancel_em,
3764 static void bdrv_aio_bh_cb(void *opaque)
3766 BlockDriverAIOCBSync *acb = opaque;
3768 if (!acb->is_write)
3769 qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
3770 qemu_vfree(acb->bounce);
3771 acb->common.cb(acb->common.opaque, acb->ret);
3772 qemu_bh_delete(acb->bh);
3773 acb->bh = NULL;
3774 qemu_aio_release(acb);
3777 static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
3778 int64_t sector_num,
3779 QEMUIOVector *qiov,
3780 int nb_sectors,
3781 BlockDriverCompletionFunc *cb,
3782 void *opaque,
3783 int is_write)
3786 BlockDriverAIOCBSync *acb;
3788 acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque);
3789 acb->is_write = is_write;
3790 acb->qiov = qiov;
3791 acb->bounce = qemu_blockalign(bs, qiov->size);
3792 acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
3794 if (is_write) {
3795 qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
3796 acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
3797 } else {
3798 acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
3801 qemu_bh_schedule(acb->bh);
3803 return &acb->common;
3806 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
3807 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
3808 BlockDriverCompletionFunc *cb, void *opaque)
3810 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
3813 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
3814 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
3815 BlockDriverCompletionFunc *cb, void *opaque)
3817 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
3821 typedef struct BlockDriverAIOCBCoroutine {
3822 BlockDriverAIOCB common;
3823 BlockRequest req;
3824 bool is_write;
3825 bool *done;
3826 QEMUBH* bh;
3827 } BlockDriverAIOCBCoroutine;
3829 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb)
3831 BlockDriverAIOCBCoroutine *acb =
3832 container_of(blockacb, BlockDriverAIOCBCoroutine, common);
3833 bool done = false;
3835 acb->done = &done;
3836 while (!done) {
3837 qemu_aio_wait();
3841 static const AIOCBInfo bdrv_em_co_aiocb_info = {
3842 .aiocb_size = sizeof(BlockDriverAIOCBCoroutine),
3843 .cancel = bdrv_aio_co_cancel_em,
3846 static void bdrv_co_em_bh(void *opaque)
3848 BlockDriverAIOCBCoroutine *acb = opaque;
3850 acb->common.cb(acb->common.opaque, acb->req.error);
3852 if (acb->done) {
3853 *acb->done = true;
3856 qemu_bh_delete(acb->bh);
3857 qemu_aio_release(acb);
3860 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
3861 static void coroutine_fn bdrv_co_do_rw(void *opaque)
3863 BlockDriverAIOCBCoroutine *acb = opaque;
3864 BlockDriverState *bs = acb->common.bs;
3866 if (!acb->is_write) {
3867 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
3868 acb->req.nb_sectors, acb->req.qiov, 0);
3869 } else {
3870 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
3871 acb->req.nb_sectors, acb->req.qiov, 0);
3874 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
3875 qemu_bh_schedule(acb->bh);
3878 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
3879 int64_t sector_num,
3880 QEMUIOVector *qiov,
3881 int nb_sectors,
3882 BlockDriverCompletionFunc *cb,
3883 void *opaque,
3884 bool is_write)
3886 Coroutine *co;
3887 BlockDriverAIOCBCoroutine *acb;
3889 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
3890 acb->req.sector = sector_num;
3891 acb->req.nb_sectors = nb_sectors;
3892 acb->req.qiov = qiov;
3893 acb->is_write = is_write;
3894 acb->done = NULL;
3896 co = qemu_coroutine_create(bdrv_co_do_rw);
3897 qemu_coroutine_enter(co, acb);
3899 return &acb->common;
3902 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
3904 BlockDriverAIOCBCoroutine *acb = opaque;
3905 BlockDriverState *bs = acb->common.bs;
3907 acb->req.error = bdrv_co_flush(bs);
3908 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
3909 qemu_bh_schedule(acb->bh);
3912 BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
3913 BlockDriverCompletionFunc *cb, void *opaque)
3915 trace_bdrv_aio_flush(bs, opaque);
3917 Coroutine *co;
3918 BlockDriverAIOCBCoroutine *acb;
3920 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
3921 acb->done = NULL;
3923 co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
3924 qemu_coroutine_enter(co, acb);
3926 return &acb->common;
3929 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
3931 BlockDriverAIOCBCoroutine *acb = opaque;
3932 BlockDriverState *bs = acb->common.bs;
3934 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
3935 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
3936 qemu_bh_schedule(acb->bh);
3939 BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
3940 int64_t sector_num, int nb_sectors,
3941 BlockDriverCompletionFunc *cb, void *opaque)
3943 Coroutine *co;
3944 BlockDriverAIOCBCoroutine *acb;
3946 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
3948 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
3949 acb->req.sector = sector_num;
3950 acb->req.nb_sectors = nb_sectors;
3951 acb->done = NULL;
3952 co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
3953 qemu_coroutine_enter(co, acb);
3955 return &acb->common;
3958 void bdrv_init(void)
3960 module_call_init(MODULE_INIT_BLOCK);
3963 void bdrv_init_with_whitelist(void)
3965 use_bdrv_whitelist = 1;
3966 bdrv_init();
3969 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
3970 BlockDriverCompletionFunc *cb, void *opaque)
3972 BlockDriverAIOCB *acb;
3974 acb = g_slice_alloc(aiocb_info->aiocb_size);
3975 acb->aiocb_info = aiocb_info;
3976 acb->bs = bs;
3977 acb->cb = cb;
3978 acb->opaque = opaque;
3979 return acb;
3982 void qemu_aio_release(void *p)
3984 BlockDriverAIOCB *acb = p;
3985 g_slice_free1(acb->aiocb_info->aiocb_size, acb);
3988 /**************************************************************/
3989 /* Coroutine block device emulation */
3991 typedef struct CoroutineIOCompletion {
3992 Coroutine *coroutine;
3993 int ret;
3994 } CoroutineIOCompletion;
3996 static void bdrv_co_io_em_complete(void *opaque, int ret)
3998 CoroutineIOCompletion *co = opaque;
4000 co->ret = ret;
4001 qemu_coroutine_enter(co->coroutine, NULL);
4004 static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
4005 int nb_sectors, QEMUIOVector *iov,
4006 bool is_write)
4008 CoroutineIOCompletion co = {
4009 .coroutine = qemu_coroutine_self(),
4011 BlockDriverAIOCB *acb;
4013 if (is_write) {
4014 acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
4015 bdrv_co_io_em_complete, &co);
4016 } else {
4017 acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
4018 bdrv_co_io_em_complete, &co);
4021 trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
4022 if (!acb) {
4023 return -EIO;
4025 qemu_coroutine_yield();
4027 return co.ret;
4030 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
4031 int64_t sector_num, int nb_sectors,
4032 QEMUIOVector *iov)
4034 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
4037 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
4038 int64_t sector_num, int nb_sectors,
4039 QEMUIOVector *iov)
4041 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
4044 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
4046 RwCo *rwco = opaque;
4048 rwco->ret = bdrv_co_flush(rwco->bs);
4051 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
4053 int ret;
4055 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
4056 return 0;
4059 /* Write back cached data to the OS even with cache=unsafe */
4060 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
4061 if (bs->drv->bdrv_co_flush_to_os) {
4062 ret = bs->drv->bdrv_co_flush_to_os(bs);
4063 if (ret < 0) {
4064 return ret;
4068 /* But don't actually force it to the disk with cache=unsafe */
4069 if (bs->open_flags & BDRV_O_NO_FLUSH) {
4070 goto flush_parent;
4073 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
4074 if (bs->drv->bdrv_co_flush_to_disk) {
4075 ret = bs->drv->bdrv_co_flush_to_disk(bs);
4076 } else if (bs->drv->bdrv_aio_flush) {
4077 BlockDriverAIOCB *acb;
4078 CoroutineIOCompletion co = {
4079 .coroutine = qemu_coroutine_self(),
4082 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
4083 if (acb == NULL) {
4084 ret = -EIO;
4085 } else {
4086 qemu_coroutine_yield();
4087 ret = co.ret;
4089 } else {
4091 * Some block drivers always operate in either writethrough or unsafe
4092 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
4093 * know how the server works (because the behaviour is hardcoded or
4094 * depends on server-side configuration), so we can't ensure that
4095 * everything is safe on disk. Returning an error doesn't work because
4096 * that would break guests even if the server operates in writethrough
4097 * mode.
4099 * Let's hope the user knows what he's doing.
4101 ret = 0;
4103 if (ret < 0) {
4104 return ret;
4107 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
4108 * in the case of cache=unsafe, so there are no useless flushes.
4110 flush_parent:
4111 return bdrv_co_flush(bs->file);
4114 void bdrv_invalidate_cache(BlockDriverState *bs)
4116 if (bs->drv && bs->drv->bdrv_invalidate_cache) {
4117 bs->drv->bdrv_invalidate_cache(bs);
4121 void bdrv_invalidate_cache_all(void)
4123 BlockDriverState *bs;
4125 QTAILQ_FOREACH(bs, &bdrv_states, list) {
4126 bdrv_invalidate_cache(bs);
4130 void bdrv_clear_incoming_migration_all(void)
4132 BlockDriverState *bs;
4134 QTAILQ_FOREACH(bs, &bdrv_states, list) {
4135 bs->open_flags = bs->open_flags & ~(BDRV_O_INCOMING);
4139 int bdrv_flush(BlockDriverState *bs)
4141 Coroutine *co;
4142 RwCo rwco = {
4143 .bs = bs,
4144 .ret = NOT_DONE,
4147 if (qemu_in_coroutine()) {
4148 /* Fast-path if already in coroutine context */
4149 bdrv_flush_co_entry(&rwco);
4150 } else {
4151 co = qemu_coroutine_create(bdrv_flush_co_entry);
4152 qemu_coroutine_enter(co, &rwco);
4153 while (rwco.ret == NOT_DONE) {
4154 qemu_aio_wait();
4158 return rwco.ret;
4161 static void coroutine_fn bdrv_discard_co_entry(void *opaque)
4163 RwCo *rwco = opaque;
4165 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
4168 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
4169 int nb_sectors)
4171 if (!bs->drv) {
4172 return -ENOMEDIUM;
4173 } else if (bdrv_check_request(bs, sector_num, nb_sectors)) {
4174 return -EIO;
4175 } else if (bs->read_only) {
4176 return -EROFS;
4179 if (bs->dirty_bitmap) {
4180 bdrv_reset_dirty(bs, sector_num, nb_sectors);
4183 /* Do nothing if disabled. */
4184 if (!(bs->open_flags & BDRV_O_UNMAP)) {
4185 return 0;
4188 if (bs->drv->bdrv_co_discard) {
4189 return bs->drv->bdrv_co_discard(bs, sector_num, nb_sectors);
4190 } else if (bs->drv->bdrv_aio_discard) {
4191 BlockDriverAIOCB *acb;
4192 CoroutineIOCompletion co = {
4193 .coroutine = qemu_coroutine_self(),
4196 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
4197 bdrv_co_io_em_complete, &co);
4198 if (acb == NULL) {
4199 return -EIO;
4200 } else {
4201 qemu_coroutine_yield();
4202 return co.ret;
4204 } else {
4205 return 0;
4209 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
4211 Coroutine *co;
4212 RwCo rwco = {
4213 .bs = bs,
4214 .sector_num = sector_num,
4215 .nb_sectors = nb_sectors,
4216 .ret = NOT_DONE,
4219 if (qemu_in_coroutine()) {
4220 /* Fast-path if already in coroutine context */
4221 bdrv_discard_co_entry(&rwco);
4222 } else {
4223 co = qemu_coroutine_create(bdrv_discard_co_entry);
4224 qemu_coroutine_enter(co, &rwco);
4225 while (rwco.ret == NOT_DONE) {
4226 qemu_aio_wait();
4230 return rwco.ret;
4233 /**************************************************************/
4234 /* removable device support */
4237 * Return TRUE if the media is present
4239 int bdrv_is_inserted(BlockDriverState *bs)
4241 BlockDriver *drv = bs->drv;
4243 if (!drv)
4244 return 0;
4245 if (!drv->bdrv_is_inserted)
4246 return 1;
4247 return drv->bdrv_is_inserted(bs);
4251 * Return whether the media changed since the last call to this
4252 * function, or -ENOTSUP if we don't know. Most drivers don't know.
4254 int bdrv_media_changed(BlockDriverState *bs)
4256 BlockDriver *drv = bs->drv;
4258 if (drv && drv->bdrv_media_changed) {
4259 return drv->bdrv_media_changed(bs);
4261 return -ENOTSUP;
4265 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
4267 void bdrv_eject(BlockDriverState *bs, bool eject_flag)
4269 BlockDriver *drv = bs->drv;
4271 if (drv && drv->bdrv_eject) {
4272 drv->bdrv_eject(bs, eject_flag);
4275 if (bs->device_name[0] != '\0') {
4276 bdrv_emit_qmp_eject_event(bs, eject_flag);
4281 * Lock or unlock the media (if it is locked, the user won't be able
4282 * to eject it manually).
4284 void bdrv_lock_medium(BlockDriverState *bs, bool locked)
4286 BlockDriver *drv = bs->drv;
4288 trace_bdrv_lock_medium(bs, locked);
4290 if (drv && drv->bdrv_lock_medium) {
4291 drv->bdrv_lock_medium(bs, locked);
4295 /* needed for generic scsi interface */
4297 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
4299 BlockDriver *drv = bs->drv;
4301 if (drv && drv->bdrv_ioctl)
4302 return drv->bdrv_ioctl(bs, req, buf);
4303 return -ENOTSUP;
4306 BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
4307 unsigned long int req, void *buf,
4308 BlockDriverCompletionFunc *cb, void *opaque)
4310 BlockDriver *drv = bs->drv;
4312 if (drv && drv->bdrv_aio_ioctl)
4313 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque);
4314 return NULL;
4317 void bdrv_set_buffer_alignment(BlockDriverState *bs, int align)
4319 bs->buffer_alignment = align;
4322 void *qemu_blockalign(BlockDriverState *bs, size_t size)
4324 return qemu_memalign((bs && bs->buffer_alignment) ? bs->buffer_alignment : 512, size);
4328 * Check if all memory in this vector is sector aligned.
4330 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
4332 int i;
4334 for (i = 0; i < qiov->niov; i++) {
4335 if ((uintptr_t) qiov->iov[i].iov_base % bs->buffer_alignment) {
4336 return false;
4340 return true;
4343 void bdrv_set_dirty_tracking(BlockDriverState *bs, int granularity)
4345 int64_t bitmap_size;
4347 assert((granularity & (granularity - 1)) == 0);
4349 if (granularity) {
4350 granularity >>= BDRV_SECTOR_BITS;
4351 assert(!bs->dirty_bitmap);
4352 bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS);
4353 bs->dirty_bitmap = hbitmap_alloc(bitmap_size, ffs(granularity) - 1);
4354 } else {
4355 if (bs->dirty_bitmap) {
4356 hbitmap_free(bs->dirty_bitmap);
4357 bs->dirty_bitmap = NULL;
4362 int bdrv_get_dirty(BlockDriverState *bs, int64_t sector)
4364 if (bs->dirty_bitmap) {
4365 return hbitmap_get(bs->dirty_bitmap, sector);
4366 } else {
4367 return 0;
4371 void bdrv_dirty_iter_init(BlockDriverState *bs, HBitmapIter *hbi)
4373 hbitmap_iter_init(hbi, bs->dirty_bitmap, 0);
4376 void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
4377 int nr_sectors)
4379 hbitmap_set(bs->dirty_bitmap, cur_sector, nr_sectors);
4382 void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector,
4383 int nr_sectors)
4385 hbitmap_reset(bs->dirty_bitmap, cur_sector, nr_sectors);
4388 int64_t bdrv_get_dirty_count(BlockDriverState *bs)
4390 if (bs->dirty_bitmap) {
4391 return hbitmap_count(bs->dirty_bitmap);
4392 } else {
4393 return 0;
4397 /* Get a reference to bs */
4398 void bdrv_ref(BlockDriverState *bs)
4400 bs->refcnt++;
4403 /* Release a previously grabbed reference to bs.
4404 * If after releasing, reference count is zero, the BlockDriverState is
4405 * deleted. */
4406 void bdrv_unref(BlockDriverState *bs)
4408 assert(bs->refcnt > 0);
4409 if (--bs->refcnt == 0) {
4410 bdrv_delete(bs);
4414 void bdrv_set_in_use(BlockDriverState *bs, int in_use)
4416 assert(bs->in_use != in_use);
4417 bs->in_use = in_use;
4420 int bdrv_in_use(BlockDriverState *bs)
4422 return bs->in_use;
4425 void bdrv_iostatus_enable(BlockDriverState *bs)
4427 bs->iostatus_enabled = true;
4428 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
4431 /* The I/O status is only enabled if the drive explicitly
4432 * enables it _and_ the VM is configured to stop on errors */
4433 bool bdrv_iostatus_is_enabled(const BlockDriverState *bs)
4435 return (bs->iostatus_enabled &&
4436 (bs->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
4437 bs->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
4438 bs->on_read_error == BLOCKDEV_ON_ERROR_STOP));
4441 void bdrv_iostatus_disable(BlockDriverState *bs)
4443 bs->iostatus_enabled = false;
4446 void bdrv_iostatus_reset(BlockDriverState *bs)
4448 if (bdrv_iostatus_is_enabled(bs)) {
4449 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
4450 if (bs->job) {
4451 block_job_iostatus_reset(bs->job);
4456 void bdrv_iostatus_set_err(BlockDriverState *bs, int error)
4458 assert(bdrv_iostatus_is_enabled(bs));
4459 if (bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
4460 bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
4461 BLOCK_DEVICE_IO_STATUS_FAILED;
4465 void
4466 bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes,
4467 enum BlockAcctType type)
4469 assert(type < BDRV_MAX_IOTYPE);
4471 cookie->bytes = bytes;
4472 cookie->start_time_ns = get_clock();
4473 cookie->type = type;
4476 void
4477 bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie)
4479 assert(cookie->type < BDRV_MAX_IOTYPE);
4481 bs->nr_bytes[cookie->type] += cookie->bytes;
4482 bs->nr_ops[cookie->type]++;
4483 bs->total_time_ns[cookie->type] += get_clock() - cookie->start_time_ns;
4486 void bdrv_img_create(const char *filename, const char *fmt,
4487 const char *base_filename, const char *base_fmt,
4488 char *options, uint64_t img_size, int flags,
4489 Error **errp, bool quiet)
4491 QEMUOptionParameter *param = NULL, *create_options = NULL;
4492 QEMUOptionParameter *backing_fmt, *backing_file, *size;
4493 BlockDriverState *bs = NULL;
4494 BlockDriver *drv, *proto_drv;
4495 BlockDriver *backing_drv = NULL;
4496 Error *local_err = NULL;
4497 int ret = 0;
4499 /* Find driver and parse its options */
4500 drv = bdrv_find_format(fmt);
4501 if (!drv) {
4502 error_setg(errp, "Unknown file format '%s'", fmt);
4503 return;
4506 proto_drv = bdrv_find_protocol(filename, true);
4507 if (!proto_drv) {
4508 error_setg(errp, "Unknown protocol '%s'", filename);
4509 return;
4512 create_options = append_option_parameters(create_options,
4513 drv->create_options);
4514 create_options = append_option_parameters(create_options,
4515 proto_drv->create_options);
4517 /* Create parameter list with default values */
4518 param = parse_option_parameters("", create_options, param);
4520 set_option_parameter_int(param, BLOCK_OPT_SIZE, img_size);
4522 /* Parse -o options */
4523 if (options) {
4524 param = parse_option_parameters(options, create_options, param);
4525 if (param == NULL) {
4526 error_setg(errp, "Invalid options for file format '%s'.", fmt);
4527 goto out;
4531 if (base_filename) {
4532 if (set_option_parameter(param, BLOCK_OPT_BACKING_FILE,
4533 base_filename)) {
4534 error_setg(errp, "Backing file not supported for file format '%s'",
4535 fmt);
4536 goto out;
4540 if (base_fmt) {
4541 if (set_option_parameter(param, BLOCK_OPT_BACKING_FMT, base_fmt)) {
4542 error_setg(errp, "Backing file format not supported for file "
4543 "format '%s'", fmt);
4544 goto out;
4548 backing_file = get_option_parameter(param, BLOCK_OPT_BACKING_FILE);
4549 if (backing_file && backing_file->value.s) {
4550 if (!strcmp(filename, backing_file->value.s)) {
4551 error_setg(errp, "Error: Trying to create an image with the "
4552 "same filename as the backing file");
4553 goto out;
4557 backing_fmt = get_option_parameter(param, BLOCK_OPT_BACKING_FMT);
4558 if (backing_fmt && backing_fmt->value.s) {
4559 backing_drv = bdrv_find_format(backing_fmt->value.s);
4560 if (!backing_drv) {
4561 error_setg(errp, "Unknown backing file format '%s'",
4562 backing_fmt->value.s);
4563 goto out;
4567 // The size for the image must always be specified, with one exception:
4568 // If we are using a backing file, we can obtain the size from there
4569 size = get_option_parameter(param, BLOCK_OPT_SIZE);
4570 if (size && size->value.n == -1) {
4571 if (backing_file && backing_file->value.s) {
4572 uint64_t size;
4573 char buf[32];
4574 int back_flags;
4576 /* backing files always opened read-only */
4577 back_flags =
4578 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
4580 bs = bdrv_new("");
4582 ret = bdrv_open(bs, backing_file->value.s, NULL, back_flags,
4583 backing_drv, &local_err);
4584 if (ret < 0) {
4585 error_setg_errno(errp, -ret, "Could not open '%s': %s",
4586 backing_file->value.s,
4587 error_get_pretty(local_err));
4588 error_free(local_err);
4589 local_err = NULL;
4590 goto out;
4592 bdrv_get_geometry(bs, &size);
4593 size *= 512;
4595 snprintf(buf, sizeof(buf), "%" PRId64, size);
4596 set_option_parameter(param, BLOCK_OPT_SIZE, buf);
4597 } else {
4598 error_setg(errp, "Image creation needs a size parameter");
4599 goto out;
4603 if (!quiet) {
4604 printf("Formatting '%s', fmt=%s ", filename, fmt);
4605 print_option_parameters(param);
4606 puts("");
4608 ret = bdrv_create(drv, filename, param, &local_err);
4609 if (ret == -EFBIG) {
4610 /* This is generally a better message than whatever the driver would
4611 * deliver (especially because of the cluster_size_hint), since that
4612 * is most probably not much different from "image too large". */
4613 const char *cluster_size_hint = "";
4614 if (get_option_parameter(create_options, BLOCK_OPT_CLUSTER_SIZE)) {
4615 cluster_size_hint = " (try using a larger cluster size)";
4617 error_setg(errp, "The image size is too large for file format '%s'"
4618 "%s", fmt, cluster_size_hint);
4619 error_free(local_err);
4620 local_err = NULL;
4623 out:
4624 free_option_parameters(create_options);
4625 free_option_parameters(param);
4627 if (bs) {
4628 bdrv_unref(bs);
4630 if (error_is_set(&local_err)) {
4631 error_propagate(errp, local_err);
4635 AioContext *bdrv_get_aio_context(BlockDriverState *bs)
4637 /* Currently BlockDriverState always uses the main loop AioContext */
4638 return qemu_get_aio_context();
4641 void bdrv_add_before_write_notifier(BlockDriverState *bs,
4642 NotifierWithReturn *notifier)
4644 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
4647 int bdrv_amend_options(BlockDriverState *bs, QEMUOptionParameter *options)
4649 if (bs->drv->bdrv_amend_options == NULL) {
4650 return -ENOTSUP;
4652 return bs->drv->bdrv_amend_options(bs, options);
4655 ExtSnapshotPerm bdrv_check_ext_snapshot(BlockDriverState *bs)
4657 if (bs->drv->bdrv_check_ext_snapshot) {
4658 return bs->drv->bdrv_check_ext_snapshot(bs);
4661 if (bs->file && bs->file->drv && bs->file->drv->bdrv_check_ext_snapshot) {
4662 return bs->file->drv->bdrv_check_ext_snapshot(bs);
4665 /* external snapshots are allowed by default */
4666 return EXT_SNAPSHOT_ALLOWED;
4669 ExtSnapshotPerm bdrv_check_ext_snapshot_forbidden(BlockDriverState *bs)
4671 return EXT_SNAPSHOT_FORBIDDEN;