iostatus: rename BlockErrorAction, BlockQMPEventAction
[qemu/ar7.git] / block.c
blob7b4508295ca571eb393e36d00ec8fde65d717698
1 /*
2 * QEMU System Emulator block driver
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
24 #include "config-host.h"
25 #include "qemu-common.h"
26 #include "trace.h"
27 #include "monitor.h"
28 #include "block_int.h"
29 #include "blockjob.h"
30 #include "module.h"
31 #include "qjson.h"
32 #include "qemu-coroutine.h"
33 #include "qmp-commands.h"
34 #include "qemu-timer.h"
36 #ifdef CONFIG_BSD
37 #include <sys/types.h>
38 #include <sys/stat.h>
39 #include <sys/ioctl.h>
40 #include <sys/queue.h>
41 #ifndef __DragonFly__
42 #include <sys/disk.h>
43 #endif
44 #endif
46 #ifdef _WIN32
47 #include <windows.h>
48 #endif
50 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
52 typedef enum {
53 BDRV_REQ_COPY_ON_READ = 0x1,
54 BDRV_REQ_ZERO_WRITE = 0x2,
55 } BdrvRequestFlags;
57 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
58 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
59 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
60 BlockDriverCompletionFunc *cb, void *opaque);
61 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
62 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
63 BlockDriverCompletionFunc *cb, void *opaque);
64 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
65 int64_t sector_num, int nb_sectors,
66 QEMUIOVector *iov);
67 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
68 int64_t sector_num, int nb_sectors,
69 QEMUIOVector *iov);
70 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
71 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
72 BdrvRequestFlags flags);
73 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
74 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
75 BdrvRequestFlags flags);
76 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
77 int64_t sector_num,
78 QEMUIOVector *qiov,
79 int nb_sectors,
80 BlockDriverCompletionFunc *cb,
81 void *opaque,
82 bool is_write);
83 static void coroutine_fn bdrv_co_do_rw(void *opaque);
84 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
85 int64_t sector_num, int nb_sectors);
87 static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors,
88 bool is_write, double elapsed_time, uint64_t *wait);
89 static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write,
90 double elapsed_time, uint64_t *wait);
91 static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors,
92 bool is_write, int64_t *wait);
94 static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
95 QTAILQ_HEAD_INITIALIZER(bdrv_states);
97 static QLIST_HEAD(, BlockDriver) bdrv_drivers =
98 QLIST_HEAD_INITIALIZER(bdrv_drivers);
100 /* The device to use for VM snapshots */
101 static BlockDriverState *bs_snapshots;
103 /* If non-zero, use only whitelisted block drivers */
104 static int use_bdrv_whitelist;
106 #ifdef _WIN32
107 static int is_windows_drive_prefix(const char *filename)
109 return (((filename[0] >= 'a' && filename[0] <= 'z') ||
110 (filename[0] >= 'A' && filename[0] <= 'Z')) &&
111 filename[1] == ':');
114 int is_windows_drive(const char *filename)
116 if (is_windows_drive_prefix(filename) &&
117 filename[2] == '\0')
118 return 1;
119 if (strstart(filename, "\\\\.\\", NULL) ||
120 strstart(filename, "//./", NULL))
121 return 1;
122 return 0;
124 #endif
126 /* throttling disk I/O limits */
127 void bdrv_io_limits_disable(BlockDriverState *bs)
129 bs->io_limits_enabled = false;
131 while (qemu_co_queue_next(&bs->throttled_reqs));
133 if (bs->block_timer) {
134 qemu_del_timer(bs->block_timer);
135 qemu_free_timer(bs->block_timer);
136 bs->block_timer = NULL;
139 bs->slice_start = 0;
140 bs->slice_end = 0;
141 bs->slice_time = 0;
142 memset(&bs->io_base, 0, sizeof(bs->io_base));
145 static void bdrv_block_timer(void *opaque)
147 BlockDriverState *bs = opaque;
149 qemu_co_queue_next(&bs->throttled_reqs);
152 void bdrv_io_limits_enable(BlockDriverState *bs)
154 qemu_co_queue_init(&bs->throttled_reqs);
155 bs->block_timer = qemu_new_timer_ns(vm_clock, bdrv_block_timer, bs);
156 bs->slice_time = 5 * BLOCK_IO_SLICE_TIME;
157 bs->slice_start = qemu_get_clock_ns(vm_clock);
158 bs->slice_end = bs->slice_start + bs->slice_time;
159 memset(&bs->io_base, 0, sizeof(bs->io_base));
160 bs->io_limits_enabled = true;
163 bool bdrv_io_limits_enabled(BlockDriverState *bs)
165 BlockIOLimit *io_limits = &bs->io_limits;
166 return io_limits->bps[BLOCK_IO_LIMIT_READ]
167 || io_limits->bps[BLOCK_IO_LIMIT_WRITE]
168 || io_limits->bps[BLOCK_IO_LIMIT_TOTAL]
169 || io_limits->iops[BLOCK_IO_LIMIT_READ]
170 || io_limits->iops[BLOCK_IO_LIMIT_WRITE]
171 || io_limits->iops[BLOCK_IO_LIMIT_TOTAL];
174 static void bdrv_io_limits_intercept(BlockDriverState *bs,
175 bool is_write, int nb_sectors)
177 int64_t wait_time = -1;
179 if (!qemu_co_queue_empty(&bs->throttled_reqs)) {
180 qemu_co_queue_wait(&bs->throttled_reqs);
183 /* In fact, we hope to keep each request's timing, in FIFO mode. The next
184 * throttled requests will not be dequeued until the current request is
185 * allowed to be serviced. So if the current request still exceeds the
186 * limits, it will be inserted to the head. All requests followed it will
187 * be still in throttled_reqs queue.
190 while (bdrv_exceed_io_limits(bs, nb_sectors, is_write, &wait_time)) {
191 qemu_mod_timer(bs->block_timer,
192 wait_time + qemu_get_clock_ns(vm_clock));
193 qemu_co_queue_wait_insert_head(&bs->throttled_reqs);
196 qemu_co_queue_next(&bs->throttled_reqs);
199 /* check if the path starts with "<protocol>:" */
200 static int path_has_protocol(const char *path)
202 const char *p;
204 #ifdef _WIN32
205 if (is_windows_drive(path) ||
206 is_windows_drive_prefix(path)) {
207 return 0;
209 p = path + strcspn(path, ":/\\");
210 #else
211 p = path + strcspn(path, ":/");
212 #endif
214 return *p == ':';
217 int path_is_absolute(const char *path)
219 #ifdef _WIN32
220 /* specific case for names like: "\\.\d:" */
221 if (is_windows_drive(path) || is_windows_drive_prefix(path)) {
222 return 1;
224 return (*path == '/' || *path == '\\');
225 #else
226 return (*path == '/');
227 #endif
230 /* if filename is absolute, just copy it to dest. Otherwise, build a
231 path to it by considering it is relative to base_path. URL are
232 supported. */
233 void path_combine(char *dest, int dest_size,
234 const char *base_path,
235 const char *filename)
237 const char *p, *p1;
238 int len;
240 if (dest_size <= 0)
241 return;
242 if (path_is_absolute(filename)) {
243 pstrcpy(dest, dest_size, filename);
244 } else {
245 p = strchr(base_path, ':');
246 if (p)
247 p++;
248 else
249 p = base_path;
250 p1 = strrchr(base_path, '/');
251 #ifdef _WIN32
253 const char *p2;
254 p2 = strrchr(base_path, '\\');
255 if (!p1 || p2 > p1)
256 p1 = p2;
258 #endif
259 if (p1)
260 p1++;
261 else
262 p1 = base_path;
263 if (p1 > p)
264 p = p1;
265 len = p - base_path;
266 if (len > dest_size - 1)
267 len = dest_size - 1;
268 memcpy(dest, base_path, len);
269 dest[len] = '\0';
270 pstrcat(dest, dest_size, filename);
274 void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz)
276 if (bs->backing_file[0] == '\0' || path_has_protocol(bs->backing_file)) {
277 pstrcpy(dest, sz, bs->backing_file);
278 } else {
279 path_combine(dest, sz, bs->filename, bs->backing_file);
283 void bdrv_register(BlockDriver *bdrv)
285 /* Block drivers without coroutine functions need emulation */
286 if (!bdrv->bdrv_co_readv) {
287 bdrv->bdrv_co_readv = bdrv_co_readv_em;
288 bdrv->bdrv_co_writev = bdrv_co_writev_em;
290 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
291 * the block driver lacks aio we need to emulate that too.
293 if (!bdrv->bdrv_aio_readv) {
294 /* add AIO emulation layer */
295 bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
296 bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
300 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
303 /* create a new block device (by default it is empty) */
304 BlockDriverState *bdrv_new(const char *device_name)
306 BlockDriverState *bs;
308 bs = g_malloc0(sizeof(BlockDriverState));
309 pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);
310 if (device_name[0] != '\0') {
311 QTAILQ_INSERT_TAIL(&bdrv_states, bs, list);
313 bdrv_iostatus_disable(bs);
314 return bs;
317 BlockDriver *bdrv_find_format(const char *format_name)
319 BlockDriver *drv1;
320 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
321 if (!strcmp(drv1->format_name, format_name)) {
322 return drv1;
325 return NULL;
328 static int bdrv_is_whitelisted(BlockDriver *drv)
330 static const char *whitelist[] = {
331 CONFIG_BDRV_WHITELIST
333 const char **p;
335 if (!whitelist[0])
336 return 1; /* no whitelist, anything goes */
338 for (p = whitelist; *p; p++) {
339 if (!strcmp(drv->format_name, *p)) {
340 return 1;
343 return 0;
346 BlockDriver *bdrv_find_whitelisted_format(const char *format_name)
348 BlockDriver *drv = bdrv_find_format(format_name);
349 return drv && bdrv_is_whitelisted(drv) ? drv : NULL;
352 typedef struct CreateCo {
353 BlockDriver *drv;
354 char *filename;
355 QEMUOptionParameter *options;
356 int ret;
357 } CreateCo;
359 static void coroutine_fn bdrv_create_co_entry(void *opaque)
361 CreateCo *cco = opaque;
362 assert(cco->drv);
364 cco->ret = cco->drv->bdrv_create(cco->filename, cco->options);
367 int bdrv_create(BlockDriver *drv, const char* filename,
368 QEMUOptionParameter *options)
370 int ret;
372 Coroutine *co;
373 CreateCo cco = {
374 .drv = drv,
375 .filename = g_strdup(filename),
376 .options = options,
377 .ret = NOT_DONE,
380 if (!drv->bdrv_create) {
381 return -ENOTSUP;
384 if (qemu_in_coroutine()) {
385 /* Fast-path if already in coroutine context */
386 bdrv_create_co_entry(&cco);
387 } else {
388 co = qemu_coroutine_create(bdrv_create_co_entry);
389 qemu_coroutine_enter(co, &cco);
390 while (cco.ret == NOT_DONE) {
391 qemu_aio_wait();
395 ret = cco.ret;
396 g_free(cco.filename);
398 return ret;
401 int bdrv_create_file(const char* filename, QEMUOptionParameter *options)
403 BlockDriver *drv;
405 drv = bdrv_find_protocol(filename);
406 if (drv == NULL) {
407 return -ENOENT;
410 return bdrv_create(drv, filename, options);
414 * Create a uniquely-named empty temporary file.
415 * Return 0 upon success, otherwise a negative errno value.
417 int get_tmp_filename(char *filename, int size)
419 #ifdef _WIN32
420 char temp_dir[MAX_PATH];
421 /* GetTempFileName requires that its output buffer (4th param)
422 have length MAX_PATH or greater. */
423 assert(size >= MAX_PATH);
424 return (GetTempPath(MAX_PATH, temp_dir)
425 && GetTempFileName(temp_dir, "qem", 0, filename)
426 ? 0 : -GetLastError());
427 #else
428 int fd;
429 const char *tmpdir;
430 tmpdir = getenv("TMPDIR");
431 if (!tmpdir)
432 tmpdir = "/tmp";
433 if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) {
434 return -EOVERFLOW;
436 fd = mkstemp(filename);
437 if (fd < 0) {
438 return -errno;
440 if (close(fd) != 0) {
441 unlink(filename);
442 return -errno;
444 return 0;
445 #endif
449 * Detect host devices. By convention, /dev/cdrom[N] is always
450 * recognized as a host CDROM.
452 static BlockDriver *find_hdev_driver(const char *filename)
454 int score_max = 0, score;
455 BlockDriver *drv = NULL, *d;
457 QLIST_FOREACH(d, &bdrv_drivers, list) {
458 if (d->bdrv_probe_device) {
459 score = d->bdrv_probe_device(filename);
460 if (score > score_max) {
461 score_max = score;
462 drv = d;
467 return drv;
470 BlockDriver *bdrv_find_protocol(const char *filename)
472 BlockDriver *drv1;
473 char protocol[128];
474 int len;
475 const char *p;
477 /* TODO Drivers without bdrv_file_open must be specified explicitly */
480 * XXX(hch): we really should not let host device detection
481 * override an explicit protocol specification, but moving this
482 * later breaks access to device names with colons in them.
483 * Thanks to the brain-dead persistent naming schemes on udev-
484 * based Linux systems those actually are quite common.
486 drv1 = find_hdev_driver(filename);
487 if (drv1) {
488 return drv1;
491 if (!path_has_protocol(filename)) {
492 return bdrv_find_format("file");
494 p = strchr(filename, ':');
495 assert(p != NULL);
496 len = p - filename;
497 if (len > sizeof(protocol) - 1)
498 len = sizeof(protocol) - 1;
499 memcpy(protocol, filename, len);
500 protocol[len] = '\0';
501 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
502 if (drv1->protocol_name &&
503 !strcmp(drv1->protocol_name, protocol)) {
504 return drv1;
507 return NULL;
510 static int find_image_format(const char *filename, BlockDriver **pdrv)
512 int ret, score, score_max;
513 BlockDriver *drv1, *drv;
514 uint8_t buf[2048];
515 BlockDriverState *bs;
517 ret = bdrv_file_open(&bs, filename, 0);
518 if (ret < 0) {
519 *pdrv = NULL;
520 return ret;
523 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
524 if (bs->sg || !bdrv_is_inserted(bs)) {
525 bdrv_delete(bs);
526 drv = bdrv_find_format("raw");
527 if (!drv) {
528 ret = -ENOENT;
530 *pdrv = drv;
531 return ret;
534 ret = bdrv_pread(bs, 0, buf, sizeof(buf));
535 bdrv_delete(bs);
536 if (ret < 0) {
537 *pdrv = NULL;
538 return ret;
541 score_max = 0;
542 drv = NULL;
543 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
544 if (drv1->bdrv_probe) {
545 score = drv1->bdrv_probe(buf, ret, filename);
546 if (score > score_max) {
547 score_max = score;
548 drv = drv1;
552 if (!drv) {
553 ret = -ENOENT;
555 *pdrv = drv;
556 return ret;
560 * Set the current 'total_sectors' value
562 static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
564 BlockDriver *drv = bs->drv;
566 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
567 if (bs->sg)
568 return 0;
570 /* query actual device if possible, otherwise just trust the hint */
571 if (drv->bdrv_getlength) {
572 int64_t length = drv->bdrv_getlength(bs);
573 if (length < 0) {
574 return length;
576 hint = length >> BDRV_SECTOR_BITS;
579 bs->total_sectors = hint;
580 return 0;
584 * Set open flags for a given cache mode
586 * Return 0 on success, -1 if the cache mode was invalid.
588 int bdrv_parse_cache_flags(const char *mode, int *flags)
590 *flags &= ~BDRV_O_CACHE_MASK;
592 if (!strcmp(mode, "off") || !strcmp(mode, "none")) {
593 *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB;
594 } else if (!strcmp(mode, "directsync")) {
595 *flags |= BDRV_O_NOCACHE;
596 } else if (!strcmp(mode, "writeback")) {
597 *flags |= BDRV_O_CACHE_WB;
598 } else if (!strcmp(mode, "unsafe")) {
599 *flags |= BDRV_O_CACHE_WB;
600 *flags |= BDRV_O_NO_FLUSH;
601 } else if (!strcmp(mode, "writethrough")) {
602 /* this is the default */
603 } else {
604 return -1;
607 return 0;
611 * The copy-on-read flag is actually a reference count so multiple users may
612 * use the feature without worrying about clobbering its previous state.
613 * Copy-on-read stays enabled until all users have called to disable it.
615 void bdrv_enable_copy_on_read(BlockDriverState *bs)
617 bs->copy_on_read++;
620 void bdrv_disable_copy_on_read(BlockDriverState *bs)
622 assert(bs->copy_on_read > 0);
623 bs->copy_on_read--;
627 * Common part for opening disk images and files
629 static int bdrv_open_common(BlockDriverState *bs, const char *filename,
630 int flags, BlockDriver *drv)
632 int ret, open_flags;
634 assert(drv != NULL);
635 assert(bs->file == NULL);
637 trace_bdrv_open_common(bs, filename, flags, drv->format_name);
639 bs->open_flags = flags;
640 bs->buffer_alignment = 512;
642 assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */
643 if ((flags & BDRV_O_RDWR) && (flags & BDRV_O_COPY_ON_READ)) {
644 bdrv_enable_copy_on_read(bs);
647 pstrcpy(bs->filename, sizeof(bs->filename), filename);
649 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv)) {
650 return -ENOTSUP;
653 bs->drv = drv;
654 bs->opaque = g_malloc0(drv->instance_size);
656 bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB);
657 open_flags = flags | BDRV_O_CACHE_WB;
660 * Clear flags that are internal to the block layer before opening the
661 * image.
663 open_flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
666 * Snapshots should be writable.
668 if (bs->is_temporary) {
669 open_flags |= BDRV_O_RDWR;
672 bs->read_only = !(open_flags & BDRV_O_RDWR);
674 /* Open the image, either directly or using a protocol */
675 if (drv->bdrv_file_open) {
676 ret = drv->bdrv_file_open(bs, filename, open_flags);
677 } else {
678 ret = bdrv_file_open(&bs->file, filename, open_flags);
679 if (ret >= 0) {
680 ret = drv->bdrv_open(bs, open_flags);
684 if (ret < 0) {
685 goto free_and_fail;
688 ret = refresh_total_sectors(bs, bs->total_sectors);
689 if (ret < 0) {
690 goto free_and_fail;
693 #ifndef _WIN32
694 if (bs->is_temporary) {
695 unlink(filename);
697 #endif
698 return 0;
700 free_and_fail:
701 if (bs->file) {
702 bdrv_delete(bs->file);
703 bs->file = NULL;
705 g_free(bs->opaque);
706 bs->opaque = NULL;
707 bs->drv = NULL;
708 return ret;
712 * Opens a file using a protocol (file, host_device, nbd, ...)
714 int bdrv_file_open(BlockDriverState **pbs, const char *filename, int flags)
716 BlockDriverState *bs;
717 BlockDriver *drv;
718 int ret;
720 drv = bdrv_find_protocol(filename);
721 if (!drv) {
722 return -ENOENT;
725 bs = bdrv_new("");
726 ret = bdrv_open_common(bs, filename, flags, drv);
727 if (ret < 0) {
728 bdrv_delete(bs);
729 return ret;
731 bs->growable = 1;
732 *pbs = bs;
733 return 0;
737 * Opens a disk image (raw, qcow2, vmdk, ...)
739 int bdrv_open(BlockDriverState *bs, const char *filename, int flags,
740 BlockDriver *drv)
742 int ret;
743 char tmp_filename[PATH_MAX];
745 if (flags & BDRV_O_SNAPSHOT) {
746 BlockDriverState *bs1;
747 int64_t total_size;
748 int is_protocol = 0;
749 BlockDriver *bdrv_qcow2;
750 QEMUOptionParameter *options;
751 char backing_filename[PATH_MAX];
753 /* if snapshot, we create a temporary backing file and open it
754 instead of opening 'filename' directly */
756 /* if there is a backing file, use it */
757 bs1 = bdrv_new("");
758 ret = bdrv_open(bs1, filename, 0, drv);
759 if (ret < 0) {
760 bdrv_delete(bs1);
761 return ret;
763 total_size = bdrv_getlength(bs1) & BDRV_SECTOR_MASK;
765 if (bs1->drv && bs1->drv->protocol_name)
766 is_protocol = 1;
768 bdrv_delete(bs1);
770 ret = get_tmp_filename(tmp_filename, sizeof(tmp_filename));
771 if (ret < 0) {
772 return ret;
775 /* Real path is meaningless for protocols */
776 if (is_protocol)
777 snprintf(backing_filename, sizeof(backing_filename),
778 "%s", filename);
779 else if (!realpath(filename, backing_filename))
780 return -errno;
782 bdrv_qcow2 = bdrv_find_format("qcow2");
783 options = parse_option_parameters("", bdrv_qcow2->create_options, NULL);
785 set_option_parameter_int(options, BLOCK_OPT_SIZE, total_size);
786 set_option_parameter(options, BLOCK_OPT_BACKING_FILE, backing_filename);
787 if (drv) {
788 set_option_parameter(options, BLOCK_OPT_BACKING_FMT,
789 drv->format_name);
792 ret = bdrv_create(bdrv_qcow2, tmp_filename, options);
793 free_option_parameters(options);
794 if (ret < 0) {
795 return ret;
798 filename = tmp_filename;
799 drv = bdrv_qcow2;
800 bs->is_temporary = 1;
803 /* Find the right image format driver */
804 if (!drv) {
805 ret = find_image_format(filename, &drv);
808 if (!drv) {
809 goto unlink_and_fail;
812 if (flags & BDRV_O_RDWR) {
813 flags |= BDRV_O_ALLOW_RDWR;
816 /* Open the image */
817 ret = bdrv_open_common(bs, filename, flags, drv);
818 if (ret < 0) {
819 goto unlink_and_fail;
822 /* If there is a backing file, use it */
823 if ((flags & BDRV_O_NO_BACKING) == 0 && bs->backing_file[0] != '\0') {
824 char backing_filename[PATH_MAX];
825 int back_flags;
826 BlockDriver *back_drv = NULL;
828 bs->backing_hd = bdrv_new("");
829 bdrv_get_full_backing_filename(bs, backing_filename,
830 sizeof(backing_filename));
832 if (bs->backing_format[0] != '\0') {
833 back_drv = bdrv_find_format(bs->backing_format);
836 /* backing files always opened read-only */
837 back_flags =
838 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
840 ret = bdrv_open(bs->backing_hd, backing_filename, back_flags, back_drv);
841 if (ret < 0) {
842 bdrv_close(bs);
843 return ret;
847 if (!bdrv_key_required(bs)) {
848 bdrv_dev_change_media_cb(bs, true);
851 /* throttling disk I/O limits */
852 if (bs->io_limits_enabled) {
853 bdrv_io_limits_enable(bs);
856 return 0;
858 unlink_and_fail:
859 if (bs->is_temporary) {
860 unlink(filename);
862 return ret;
865 typedef struct BlockReopenQueueEntry {
866 bool prepared;
867 BDRVReopenState state;
868 QSIMPLEQ_ENTRY(BlockReopenQueueEntry) entry;
869 } BlockReopenQueueEntry;
872 * Adds a BlockDriverState to a simple queue for an atomic, transactional
873 * reopen of multiple devices.
875 * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT
876 * already performed, or alternatively may be NULL a new BlockReopenQueue will
877 * be created and initialized. This newly created BlockReopenQueue should be
878 * passed back in for subsequent calls that are intended to be of the same
879 * atomic 'set'.
881 * bs is the BlockDriverState to add to the reopen queue.
883 * flags contains the open flags for the associated bs
885 * returns a pointer to bs_queue, which is either the newly allocated
886 * bs_queue, or the existing bs_queue being used.
889 BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
890 BlockDriverState *bs, int flags)
892 assert(bs != NULL);
894 BlockReopenQueueEntry *bs_entry;
895 if (bs_queue == NULL) {
896 bs_queue = g_new0(BlockReopenQueue, 1);
897 QSIMPLEQ_INIT(bs_queue);
900 if (bs->file) {
901 bdrv_reopen_queue(bs_queue, bs->file, flags);
904 bs_entry = g_new0(BlockReopenQueueEntry, 1);
905 QSIMPLEQ_INSERT_TAIL(bs_queue, bs_entry, entry);
907 bs_entry->state.bs = bs;
908 bs_entry->state.flags = flags;
910 return bs_queue;
914 * Reopen multiple BlockDriverStates atomically & transactionally.
916 * The queue passed in (bs_queue) must have been built up previous
917 * via bdrv_reopen_queue().
919 * Reopens all BDS specified in the queue, with the appropriate
920 * flags. All devices are prepared for reopen, and failure of any
921 * device will cause all device changes to be abandonded, and intermediate
922 * data cleaned up.
924 * If all devices prepare successfully, then the changes are committed
925 * to all devices.
928 int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
930 int ret = -1;
931 BlockReopenQueueEntry *bs_entry, *next;
932 Error *local_err = NULL;
934 assert(bs_queue != NULL);
936 bdrv_drain_all();
938 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
939 if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) {
940 error_propagate(errp, local_err);
941 goto cleanup;
943 bs_entry->prepared = true;
946 /* If we reach this point, we have success and just need to apply the
947 * changes
949 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
950 bdrv_reopen_commit(&bs_entry->state);
953 ret = 0;
955 cleanup:
956 QSIMPLEQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) {
957 if (ret && bs_entry->prepared) {
958 bdrv_reopen_abort(&bs_entry->state);
960 g_free(bs_entry);
962 g_free(bs_queue);
963 return ret;
967 /* Reopen a single BlockDriverState with the specified flags. */
968 int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp)
970 int ret = -1;
971 Error *local_err = NULL;
972 BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, bdrv_flags);
974 ret = bdrv_reopen_multiple(queue, &local_err);
975 if (local_err != NULL) {
976 error_propagate(errp, local_err);
978 return ret;
983 * Prepares a BlockDriverState for reopen. All changes are staged in the
984 * 'opaque' field of the BDRVReopenState, which is used and allocated by
985 * the block driver layer .bdrv_reopen_prepare()
987 * bs is the BlockDriverState to reopen
988 * flags are the new open flags
989 * queue is the reopen queue
991 * Returns 0 on success, non-zero on error. On error errp will be set
992 * as well.
994 * On failure, bdrv_reopen_abort() will be called to clean up any data.
995 * It is the responsibility of the caller to then call the abort() or
996 * commit() for any other BDS that have been left in a prepare() state
999 int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue,
1000 Error **errp)
1002 int ret = -1;
1003 Error *local_err = NULL;
1004 BlockDriver *drv;
1006 assert(reopen_state != NULL);
1007 assert(reopen_state->bs->drv != NULL);
1008 drv = reopen_state->bs->drv;
1010 /* if we are to stay read-only, do not allow permission change
1011 * to r/w */
1012 if (!(reopen_state->bs->open_flags & BDRV_O_ALLOW_RDWR) &&
1013 reopen_state->flags & BDRV_O_RDWR) {
1014 error_set(errp, QERR_DEVICE_IS_READ_ONLY,
1015 reopen_state->bs->device_name);
1016 goto error;
1020 ret = bdrv_flush(reopen_state->bs);
1021 if (ret) {
1022 error_set(errp, ERROR_CLASS_GENERIC_ERROR, "Error (%s) flushing drive",
1023 strerror(-ret));
1024 goto error;
1027 if (drv->bdrv_reopen_prepare) {
1028 ret = drv->bdrv_reopen_prepare(reopen_state, queue, &local_err);
1029 if (ret) {
1030 if (local_err != NULL) {
1031 error_propagate(errp, local_err);
1032 } else {
1033 error_set(errp, QERR_OPEN_FILE_FAILED,
1034 reopen_state->bs->filename);
1036 goto error;
1038 } else {
1039 /* It is currently mandatory to have a bdrv_reopen_prepare()
1040 * handler for each supported drv. */
1041 error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
1042 drv->format_name, reopen_state->bs->device_name,
1043 "reopening of file");
1044 ret = -1;
1045 goto error;
1048 ret = 0;
1050 error:
1051 return ret;
1055 * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and
1056 * makes them final by swapping the staging BlockDriverState contents into
1057 * the active BlockDriverState contents.
1059 void bdrv_reopen_commit(BDRVReopenState *reopen_state)
1061 BlockDriver *drv;
1063 assert(reopen_state != NULL);
1064 drv = reopen_state->bs->drv;
1065 assert(drv != NULL);
1067 /* If there are any driver level actions to take */
1068 if (drv->bdrv_reopen_commit) {
1069 drv->bdrv_reopen_commit(reopen_state);
1072 /* set BDS specific flags now */
1073 reopen_state->bs->open_flags = reopen_state->flags;
1074 reopen_state->bs->enable_write_cache = !!(reopen_state->flags &
1075 BDRV_O_CACHE_WB);
1076 reopen_state->bs->read_only = !(reopen_state->flags & BDRV_O_RDWR);
1080 * Abort the reopen, and delete and free the staged changes in
1081 * reopen_state
1083 void bdrv_reopen_abort(BDRVReopenState *reopen_state)
1085 BlockDriver *drv;
1087 assert(reopen_state != NULL);
1088 drv = reopen_state->bs->drv;
1089 assert(drv != NULL);
1091 if (drv->bdrv_reopen_abort) {
1092 drv->bdrv_reopen_abort(reopen_state);
1097 void bdrv_close(BlockDriverState *bs)
1099 bdrv_flush(bs);
1100 if (bs->drv) {
1101 if (bs->job) {
1102 block_job_cancel_sync(bs->job);
1104 bdrv_drain_all();
1106 if (bs == bs_snapshots) {
1107 bs_snapshots = NULL;
1109 if (bs->backing_hd) {
1110 bdrv_delete(bs->backing_hd);
1111 bs->backing_hd = NULL;
1113 bs->drv->bdrv_close(bs);
1114 g_free(bs->opaque);
1115 #ifdef _WIN32
1116 if (bs->is_temporary) {
1117 unlink(bs->filename);
1119 #endif
1120 bs->opaque = NULL;
1121 bs->drv = NULL;
1122 bs->copy_on_read = 0;
1123 bs->backing_file[0] = '\0';
1124 bs->backing_format[0] = '\0';
1125 bs->total_sectors = 0;
1126 bs->encrypted = 0;
1127 bs->valid_key = 0;
1128 bs->sg = 0;
1129 bs->growable = 0;
1131 if (bs->file != NULL) {
1132 bdrv_delete(bs->file);
1133 bs->file = NULL;
1137 bdrv_dev_change_media_cb(bs, false);
1139 /*throttling disk I/O limits*/
1140 if (bs->io_limits_enabled) {
1141 bdrv_io_limits_disable(bs);
1145 void bdrv_close_all(void)
1147 BlockDriverState *bs;
1149 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1150 bdrv_close(bs);
1155 * Wait for pending requests to complete across all BlockDriverStates
1157 * This function does not flush data to disk, use bdrv_flush_all() for that
1158 * after calling this function.
1160 * Note that completion of an asynchronous I/O operation can trigger any
1161 * number of other I/O operations on other devices---for example a coroutine
1162 * can be arbitrarily complex and a constant flow of I/O can come until the
1163 * coroutine is complete. Because of this, it is not possible to have a
1164 * function to drain a single device's I/O queue.
1166 void bdrv_drain_all(void)
1168 BlockDriverState *bs;
1169 bool busy;
1171 do {
1172 busy = qemu_aio_wait();
1174 /* FIXME: We do not have timer support here, so this is effectively
1175 * a busy wait.
1177 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1178 if (!qemu_co_queue_empty(&bs->throttled_reqs)) {
1179 qemu_co_queue_restart_all(&bs->throttled_reqs);
1180 busy = true;
1183 } while (busy);
1185 /* If requests are still pending there is a bug somewhere */
1186 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1187 assert(QLIST_EMPTY(&bs->tracked_requests));
1188 assert(qemu_co_queue_empty(&bs->throttled_reqs));
1192 /* make a BlockDriverState anonymous by removing from bdrv_state list.
1193 Also, NULL terminate the device_name to prevent double remove */
1194 void bdrv_make_anon(BlockDriverState *bs)
1196 if (bs->device_name[0] != '\0') {
1197 QTAILQ_REMOVE(&bdrv_states, bs, list);
1199 bs->device_name[0] = '\0';
1202 static void bdrv_rebind(BlockDriverState *bs)
1204 if (bs->drv && bs->drv->bdrv_rebind) {
1205 bs->drv->bdrv_rebind(bs);
1209 static void bdrv_move_feature_fields(BlockDriverState *bs_dest,
1210 BlockDriverState *bs_src)
1212 /* move some fields that need to stay attached to the device */
1213 bs_dest->open_flags = bs_src->open_flags;
1215 /* dev info */
1216 bs_dest->dev_ops = bs_src->dev_ops;
1217 bs_dest->dev_opaque = bs_src->dev_opaque;
1218 bs_dest->dev = bs_src->dev;
1219 bs_dest->buffer_alignment = bs_src->buffer_alignment;
1220 bs_dest->copy_on_read = bs_src->copy_on_read;
1222 bs_dest->enable_write_cache = bs_src->enable_write_cache;
1224 /* i/o timing parameters */
1225 bs_dest->slice_time = bs_src->slice_time;
1226 bs_dest->slice_start = bs_src->slice_start;
1227 bs_dest->slice_end = bs_src->slice_end;
1228 bs_dest->io_limits = bs_src->io_limits;
1229 bs_dest->io_base = bs_src->io_base;
1230 bs_dest->throttled_reqs = bs_src->throttled_reqs;
1231 bs_dest->block_timer = bs_src->block_timer;
1232 bs_dest->io_limits_enabled = bs_src->io_limits_enabled;
1234 /* r/w error */
1235 bs_dest->on_read_error = bs_src->on_read_error;
1236 bs_dest->on_write_error = bs_src->on_write_error;
1238 /* i/o status */
1239 bs_dest->iostatus_enabled = bs_src->iostatus_enabled;
1240 bs_dest->iostatus = bs_src->iostatus;
1242 /* dirty bitmap */
1243 bs_dest->dirty_count = bs_src->dirty_count;
1244 bs_dest->dirty_bitmap = bs_src->dirty_bitmap;
1246 /* job */
1247 bs_dest->in_use = bs_src->in_use;
1248 bs_dest->job = bs_src->job;
1250 /* keep the same entry in bdrv_states */
1251 pstrcpy(bs_dest->device_name, sizeof(bs_dest->device_name),
1252 bs_src->device_name);
1253 bs_dest->list = bs_src->list;
1257 * Swap bs contents for two image chains while they are live,
1258 * while keeping required fields on the BlockDriverState that is
1259 * actually attached to a device.
1261 * This will modify the BlockDriverState fields, and swap contents
1262 * between bs_new and bs_old. Both bs_new and bs_old are modified.
1264 * bs_new is required to be anonymous.
1266 * This function does not create any image files.
1268 void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old)
1270 BlockDriverState tmp;
1272 /* bs_new must be anonymous and shouldn't have anything fancy enabled */
1273 assert(bs_new->device_name[0] == '\0');
1274 assert(bs_new->dirty_bitmap == NULL);
1275 assert(bs_new->job == NULL);
1276 assert(bs_new->dev == NULL);
1277 assert(bs_new->in_use == 0);
1278 assert(bs_new->io_limits_enabled == false);
1279 assert(bs_new->block_timer == NULL);
1281 tmp = *bs_new;
1282 *bs_new = *bs_old;
1283 *bs_old = tmp;
1285 /* there are some fields that should not be swapped, move them back */
1286 bdrv_move_feature_fields(&tmp, bs_old);
1287 bdrv_move_feature_fields(bs_old, bs_new);
1288 bdrv_move_feature_fields(bs_new, &tmp);
1290 /* bs_new shouldn't be in bdrv_states even after the swap! */
1291 assert(bs_new->device_name[0] == '\0');
1293 /* Check a few fields that should remain attached to the device */
1294 assert(bs_new->dev == NULL);
1295 assert(bs_new->job == NULL);
1296 assert(bs_new->in_use == 0);
1297 assert(bs_new->io_limits_enabled == false);
1298 assert(bs_new->block_timer == NULL);
1300 bdrv_rebind(bs_new);
1301 bdrv_rebind(bs_old);
1305 * Add new bs contents at the top of an image chain while the chain is
1306 * live, while keeping required fields on the top layer.
1308 * This will modify the BlockDriverState fields, and swap contents
1309 * between bs_new and bs_top. Both bs_new and bs_top are modified.
1311 * bs_new is required to be anonymous.
1313 * This function does not create any image files.
1315 void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top)
1317 bdrv_swap(bs_new, bs_top);
1319 /* The contents of 'tmp' will become bs_top, as we are
1320 * swapping bs_new and bs_top contents. */
1321 bs_top->backing_hd = bs_new;
1322 bs_top->open_flags &= ~BDRV_O_NO_BACKING;
1323 pstrcpy(bs_top->backing_file, sizeof(bs_top->backing_file),
1324 bs_new->filename);
1325 pstrcpy(bs_top->backing_format, sizeof(bs_top->backing_format),
1326 bs_new->drv ? bs_new->drv->format_name : "");
1329 void bdrv_delete(BlockDriverState *bs)
1331 assert(!bs->dev);
1332 assert(!bs->job);
1333 assert(!bs->in_use);
1335 /* remove from list, if necessary */
1336 bdrv_make_anon(bs);
1338 bdrv_close(bs);
1340 assert(bs != bs_snapshots);
1341 g_free(bs);
1344 int bdrv_attach_dev(BlockDriverState *bs, void *dev)
1345 /* TODO change to DeviceState *dev when all users are qdevified */
1347 if (bs->dev) {
1348 return -EBUSY;
1350 bs->dev = dev;
1351 bdrv_iostatus_reset(bs);
1352 return 0;
1355 /* TODO qdevified devices don't use this, remove when devices are qdevified */
1356 void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev)
1358 if (bdrv_attach_dev(bs, dev) < 0) {
1359 abort();
1363 void bdrv_detach_dev(BlockDriverState *bs, void *dev)
1364 /* TODO change to DeviceState *dev when all users are qdevified */
1366 assert(bs->dev == dev);
1367 bs->dev = NULL;
1368 bs->dev_ops = NULL;
1369 bs->dev_opaque = NULL;
1370 bs->buffer_alignment = 512;
1373 /* TODO change to return DeviceState * when all users are qdevified */
1374 void *bdrv_get_attached_dev(BlockDriverState *bs)
1376 return bs->dev;
1379 void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops,
1380 void *opaque)
1382 bs->dev_ops = ops;
1383 bs->dev_opaque = opaque;
1384 if (bdrv_dev_has_removable_media(bs) && bs == bs_snapshots) {
1385 bs_snapshots = NULL;
1389 void bdrv_emit_qmp_error_event(const BlockDriverState *bdrv,
1390 BlockErrorAction action, int is_read)
1392 QObject *data;
1393 const char *action_str;
1395 switch (action) {
1396 case BDRV_ACTION_REPORT:
1397 action_str = "report";
1398 break;
1399 case BDRV_ACTION_IGNORE:
1400 action_str = "ignore";
1401 break;
1402 case BDRV_ACTION_STOP:
1403 action_str = "stop";
1404 break;
1405 default:
1406 abort();
1409 data = qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
1410 bdrv->device_name,
1411 action_str,
1412 is_read ? "read" : "write");
1413 monitor_protocol_event(QEVENT_BLOCK_IO_ERROR, data);
1415 qobject_decref(data);
1418 static void bdrv_emit_qmp_eject_event(BlockDriverState *bs, bool ejected)
1420 QObject *data;
1422 data = qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }",
1423 bdrv_get_device_name(bs), ejected);
1424 monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED, data);
1426 qobject_decref(data);
1429 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load)
1431 if (bs->dev_ops && bs->dev_ops->change_media_cb) {
1432 bool tray_was_closed = !bdrv_dev_is_tray_open(bs);
1433 bs->dev_ops->change_media_cb(bs->dev_opaque, load);
1434 if (tray_was_closed) {
1435 /* tray open */
1436 bdrv_emit_qmp_eject_event(bs, true);
1438 if (load) {
1439 /* tray close */
1440 bdrv_emit_qmp_eject_event(bs, false);
1445 bool bdrv_dev_has_removable_media(BlockDriverState *bs)
1447 return !bs->dev || (bs->dev_ops && bs->dev_ops->change_media_cb);
1450 void bdrv_dev_eject_request(BlockDriverState *bs, bool force)
1452 if (bs->dev_ops && bs->dev_ops->eject_request_cb) {
1453 bs->dev_ops->eject_request_cb(bs->dev_opaque, force);
1457 bool bdrv_dev_is_tray_open(BlockDriverState *bs)
1459 if (bs->dev_ops && bs->dev_ops->is_tray_open) {
1460 return bs->dev_ops->is_tray_open(bs->dev_opaque);
1462 return false;
1465 static void bdrv_dev_resize_cb(BlockDriverState *bs)
1467 if (bs->dev_ops && bs->dev_ops->resize_cb) {
1468 bs->dev_ops->resize_cb(bs->dev_opaque);
1472 bool bdrv_dev_is_medium_locked(BlockDriverState *bs)
1474 if (bs->dev_ops && bs->dev_ops->is_medium_locked) {
1475 return bs->dev_ops->is_medium_locked(bs->dev_opaque);
1477 return false;
1481 * Run consistency checks on an image
1483 * Returns 0 if the check could be completed (it doesn't mean that the image is
1484 * free of errors) or -errno when an internal error occurred. The results of the
1485 * check are stored in res.
1487 int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix)
1489 if (bs->drv->bdrv_check == NULL) {
1490 return -ENOTSUP;
1493 memset(res, 0, sizeof(*res));
1494 return bs->drv->bdrv_check(bs, res, fix);
1497 #define COMMIT_BUF_SECTORS 2048
1499 /* commit COW file into the raw image */
1500 int bdrv_commit(BlockDriverState *bs)
1502 BlockDriver *drv = bs->drv;
1503 int64_t sector, total_sectors;
1504 int n, ro, open_flags;
1505 int ret = 0;
1506 uint8_t *buf;
1507 char filename[1024];
1509 if (!drv)
1510 return -ENOMEDIUM;
1512 if (!bs->backing_hd) {
1513 return -ENOTSUP;
1516 if (bdrv_in_use(bs) || bdrv_in_use(bs->backing_hd)) {
1517 return -EBUSY;
1520 ro = bs->backing_hd->read_only;
1521 strncpy(filename, bs->backing_hd->filename, sizeof(filename));
1522 open_flags = bs->backing_hd->open_flags;
1524 if (ro) {
1525 if (bdrv_reopen(bs->backing_hd, open_flags | BDRV_O_RDWR, NULL)) {
1526 return -EACCES;
1530 total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
1531 buf = g_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
1533 for (sector = 0; sector < total_sectors; sector += n) {
1534 if (bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n)) {
1536 if (bdrv_read(bs, sector, buf, n) != 0) {
1537 ret = -EIO;
1538 goto ro_cleanup;
1541 if (bdrv_write(bs->backing_hd, sector, buf, n) != 0) {
1542 ret = -EIO;
1543 goto ro_cleanup;
1548 if (drv->bdrv_make_empty) {
1549 ret = drv->bdrv_make_empty(bs);
1550 bdrv_flush(bs);
1554 * Make sure all data we wrote to the backing device is actually
1555 * stable on disk.
1557 if (bs->backing_hd)
1558 bdrv_flush(bs->backing_hd);
1560 ro_cleanup:
1561 g_free(buf);
1563 if (ro) {
1564 /* ignoring error return here */
1565 bdrv_reopen(bs->backing_hd, open_flags & ~BDRV_O_RDWR, NULL);
1568 return ret;
1571 int bdrv_commit_all(void)
1573 BlockDriverState *bs;
1575 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1576 int ret = bdrv_commit(bs);
1577 if (ret < 0) {
1578 return ret;
1581 return 0;
1584 struct BdrvTrackedRequest {
1585 BlockDriverState *bs;
1586 int64_t sector_num;
1587 int nb_sectors;
1588 bool is_write;
1589 QLIST_ENTRY(BdrvTrackedRequest) list;
1590 Coroutine *co; /* owner, used for deadlock detection */
1591 CoQueue wait_queue; /* coroutines blocked on this request */
1595 * Remove an active request from the tracked requests list
1597 * This function should be called when a tracked request is completing.
1599 static void tracked_request_end(BdrvTrackedRequest *req)
1601 QLIST_REMOVE(req, list);
1602 qemu_co_queue_restart_all(&req->wait_queue);
1606 * Add an active request to the tracked requests list
1608 static void tracked_request_begin(BdrvTrackedRequest *req,
1609 BlockDriverState *bs,
1610 int64_t sector_num,
1611 int nb_sectors, bool is_write)
1613 *req = (BdrvTrackedRequest){
1614 .bs = bs,
1615 .sector_num = sector_num,
1616 .nb_sectors = nb_sectors,
1617 .is_write = is_write,
1618 .co = qemu_coroutine_self(),
1621 qemu_co_queue_init(&req->wait_queue);
1623 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
1627 * Round a region to cluster boundaries
1629 static void round_to_clusters(BlockDriverState *bs,
1630 int64_t sector_num, int nb_sectors,
1631 int64_t *cluster_sector_num,
1632 int *cluster_nb_sectors)
1634 BlockDriverInfo bdi;
1636 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
1637 *cluster_sector_num = sector_num;
1638 *cluster_nb_sectors = nb_sectors;
1639 } else {
1640 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
1641 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
1642 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
1643 nb_sectors, c);
1647 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
1648 int64_t sector_num, int nb_sectors) {
1649 /* aaaa bbbb */
1650 if (sector_num >= req->sector_num + req->nb_sectors) {
1651 return false;
1653 /* bbbb aaaa */
1654 if (req->sector_num >= sector_num + nb_sectors) {
1655 return false;
1657 return true;
1660 static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs,
1661 int64_t sector_num, int nb_sectors)
1663 BdrvTrackedRequest *req;
1664 int64_t cluster_sector_num;
1665 int cluster_nb_sectors;
1666 bool retry;
1668 /* If we touch the same cluster it counts as an overlap. This guarantees
1669 * that allocating writes will be serialized and not race with each other
1670 * for the same cluster. For example, in copy-on-read it ensures that the
1671 * CoR read and write operations are atomic and guest writes cannot
1672 * interleave between them.
1674 round_to_clusters(bs, sector_num, nb_sectors,
1675 &cluster_sector_num, &cluster_nb_sectors);
1677 do {
1678 retry = false;
1679 QLIST_FOREACH(req, &bs->tracked_requests, list) {
1680 if (tracked_request_overlaps(req, cluster_sector_num,
1681 cluster_nb_sectors)) {
1682 /* Hitting this means there was a reentrant request, for
1683 * example, a block driver issuing nested requests. This must
1684 * never happen since it means deadlock.
1686 assert(qemu_coroutine_self() != req->co);
1688 qemu_co_queue_wait(&req->wait_queue);
1689 retry = true;
1690 break;
1693 } while (retry);
1697 * Return values:
1698 * 0 - success
1699 * -EINVAL - backing format specified, but no file
1700 * -ENOSPC - can't update the backing file because no space is left in the
1701 * image file header
1702 * -ENOTSUP - format driver doesn't support changing the backing file
1704 int bdrv_change_backing_file(BlockDriverState *bs,
1705 const char *backing_file, const char *backing_fmt)
1707 BlockDriver *drv = bs->drv;
1708 int ret;
1710 /* Backing file format doesn't make sense without a backing file */
1711 if (backing_fmt && !backing_file) {
1712 return -EINVAL;
1715 if (drv->bdrv_change_backing_file != NULL) {
1716 ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
1717 } else {
1718 ret = -ENOTSUP;
1721 if (ret == 0) {
1722 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
1723 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
1725 return ret;
1729 * Finds the image layer in the chain that has 'bs' as its backing file.
1731 * active is the current topmost image.
1733 * Returns NULL if bs is not found in active's image chain,
1734 * or if active == bs.
1736 BlockDriverState *bdrv_find_overlay(BlockDriverState *active,
1737 BlockDriverState *bs)
1739 BlockDriverState *overlay = NULL;
1740 BlockDriverState *intermediate;
1742 assert(active != NULL);
1743 assert(bs != NULL);
1745 /* if bs is the same as active, then by definition it has no overlay
1747 if (active == bs) {
1748 return NULL;
1751 intermediate = active;
1752 while (intermediate->backing_hd) {
1753 if (intermediate->backing_hd == bs) {
1754 overlay = intermediate;
1755 break;
1757 intermediate = intermediate->backing_hd;
1760 return overlay;
1763 typedef struct BlkIntermediateStates {
1764 BlockDriverState *bs;
1765 QSIMPLEQ_ENTRY(BlkIntermediateStates) entry;
1766 } BlkIntermediateStates;
1770 * Drops images above 'base' up to and including 'top', and sets the image
1771 * above 'top' to have base as its backing file.
1773 * Requires that the overlay to 'top' is opened r/w, so that the backing file
1774 * information in 'bs' can be properly updated.
1776 * E.g., this will convert the following chain:
1777 * bottom <- base <- intermediate <- top <- active
1779 * to
1781 * bottom <- base <- active
1783 * It is allowed for bottom==base, in which case it converts:
1785 * base <- intermediate <- top <- active
1787 * to
1789 * base <- active
1791 * Error conditions:
1792 * if active == top, that is considered an error
1795 int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top,
1796 BlockDriverState *base)
1798 BlockDriverState *intermediate;
1799 BlockDriverState *base_bs = NULL;
1800 BlockDriverState *new_top_bs = NULL;
1801 BlkIntermediateStates *intermediate_state, *next;
1802 int ret = -EIO;
1804 QSIMPLEQ_HEAD(states_to_delete, BlkIntermediateStates) states_to_delete;
1805 QSIMPLEQ_INIT(&states_to_delete);
1807 if (!top->drv || !base->drv) {
1808 goto exit;
1811 new_top_bs = bdrv_find_overlay(active, top);
1813 if (new_top_bs == NULL) {
1814 /* we could not find the image above 'top', this is an error */
1815 goto exit;
1818 /* special case of new_top_bs->backing_hd already pointing to base - nothing
1819 * to do, no intermediate images */
1820 if (new_top_bs->backing_hd == base) {
1821 ret = 0;
1822 goto exit;
1825 intermediate = top;
1827 /* now we will go down through the list, and add each BDS we find
1828 * into our deletion queue, until we hit the 'base'
1830 while (intermediate) {
1831 intermediate_state = g_malloc0(sizeof(BlkIntermediateStates));
1832 intermediate_state->bs = intermediate;
1833 QSIMPLEQ_INSERT_TAIL(&states_to_delete, intermediate_state, entry);
1835 if (intermediate->backing_hd == base) {
1836 base_bs = intermediate->backing_hd;
1837 break;
1839 intermediate = intermediate->backing_hd;
1841 if (base_bs == NULL) {
1842 /* something went wrong, we did not end at the base. safely
1843 * unravel everything, and exit with error */
1844 goto exit;
1847 /* success - we can delete the intermediate states, and link top->base */
1848 ret = bdrv_change_backing_file(new_top_bs, base_bs->filename,
1849 base_bs->drv ? base_bs->drv->format_name : "");
1850 if (ret) {
1851 goto exit;
1853 new_top_bs->backing_hd = base_bs;
1856 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
1857 /* so that bdrv_close() does not recursively close the chain */
1858 intermediate_state->bs->backing_hd = NULL;
1859 bdrv_delete(intermediate_state->bs);
1861 ret = 0;
1863 exit:
1864 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
1865 g_free(intermediate_state);
1867 return ret;
1871 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
1872 size_t size)
1874 int64_t len;
1876 if (!bdrv_is_inserted(bs))
1877 return -ENOMEDIUM;
1879 if (bs->growable)
1880 return 0;
1882 len = bdrv_getlength(bs);
1884 if (offset < 0)
1885 return -EIO;
1887 if ((offset > len) || (len - offset < size))
1888 return -EIO;
1890 return 0;
1893 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
1894 int nb_sectors)
1896 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
1897 nb_sectors * BDRV_SECTOR_SIZE);
1900 typedef struct RwCo {
1901 BlockDriverState *bs;
1902 int64_t sector_num;
1903 int nb_sectors;
1904 QEMUIOVector *qiov;
1905 bool is_write;
1906 int ret;
1907 } RwCo;
1909 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
1911 RwCo *rwco = opaque;
1913 if (!rwco->is_write) {
1914 rwco->ret = bdrv_co_do_readv(rwco->bs, rwco->sector_num,
1915 rwco->nb_sectors, rwco->qiov, 0);
1916 } else {
1917 rwco->ret = bdrv_co_do_writev(rwco->bs, rwco->sector_num,
1918 rwco->nb_sectors, rwco->qiov, 0);
1923 * Process a synchronous request using coroutines
1925 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
1926 int nb_sectors, bool is_write)
1928 QEMUIOVector qiov;
1929 struct iovec iov = {
1930 .iov_base = (void *)buf,
1931 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
1933 Coroutine *co;
1934 RwCo rwco = {
1935 .bs = bs,
1936 .sector_num = sector_num,
1937 .nb_sectors = nb_sectors,
1938 .qiov = &qiov,
1939 .is_write = is_write,
1940 .ret = NOT_DONE,
1943 qemu_iovec_init_external(&qiov, &iov, 1);
1946 * In sync call context, when the vcpu is blocked, this throttling timer
1947 * will not fire; so the I/O throttling function has to be disabled here
1948 * if it has been enabled.
1950 if (bs->io_limits_enabled) {
1951 fprintf(stderr, "Disabling I/O throttling on '%s' due "
1952 "to synchronous I/O.\n", bdrv_get_device_name(bs));
1953 bdrv_io_limits_disable(bs);
1956 if (qemu_in_coroutine()) {
1957 /* Fast-path if already in coroutine context */
1958 bdrv_rw_co_entry(&rwco);
1959 } else {
1960 co = qemu_coroutine_create(bdrv_rw_co_entry);
1961 qemu_coroutine_enter(co, &rwco);
1962 while (rwco.ret == NOT_DONE) {
1963 qemu_aio_wait();
1966 return rwco.ret;
1969 /* return < 0 if error. See bdrv_write() for the return codes */
1970 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
1971 uint8_t *buf, int nb_sectors)
1973 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false);
1976 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
1977 int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
1978 uint8_t *buf, int nb_sectors)
1980 bool enabled;
1981 int ret;
1983 enabled = bs->io_limits_enabled;
1984 bs->io_limits_enabled = false;
1985 ret = bdrv_read(bs, 0, buf, 1);
1986 bs->io_limits_enabled = enabled;
1987 return ret;
1990 #define BITS_PER_LONG (sizeof(unsigned long) * 8)
1992 static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num,
1993 int nb_sectors, int dirty)
1995 int64_t start, end;
1996 unsigned long val, idx, bit;
1998 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
1999 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
2001 for (; start <= end; start++) {
2002 idx = start / BITS_PER_LONG;
2003 bit = start % BITS_PER_LONG;
2004 val = bs->dirty_bitmap[idx];
2005 if (dirty) {
2006 if (!(val & (1UL << bit))) {
2007 bs->dirty_count++;
2008 val |= 1UL << bit;
2010 } else {
2011 if (val & (1UL << bit)) {
2012 bs->dirty_count--;
2013 val &= ~(1UL << bit);
2016 bs->dirty_bitmap[idx] = val;
2020 /* Return < 0 if error. Important errors are:
2021 -EIO generic I/O error (may happen for all errors)
2022 -ENOMEDIUM No media inserted.
2023 -EINVAL Invalid sector number or nb_sectors
2024 -EACCES Trying to write a read-only device
2026 int bdrv_write(BlockDriverState *bs, int64_t sector_num,
2027 const uint8_t *buf, int nb_sectors)
2029 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true);
2032 int bdrv_pread(BlockDriverState *bs, int64_t offset,
2033 void *buf, int count1)
2035 uint8_t tmp_buf[BDRV_SECTOR_SIZE];
2036 int len, nb_sectors, count;
2037 int64_t sector_num;
2038 int ret;
2040 count = count1;
2041 /* first read to align to sector start */
2042 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
2043 if (len > count)
2044 len = count;
2045 sector_num = offset >> BDRV_SECTOR_BITS;
2046 if (len > 0) {
2047 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
2048 return ret;
2049 memcpy(buf, tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), len);
2050 count -= len;
2051 if (count == 0)
2052 return count1;
2053 sector_num++;
2054 buf += len;
2057 /* read the sectors "in place" */
2058 nb_sectors = count >> BDRV_SECTOR_BITS;
2059 if (nb_sectors > 0) {
2060 if ((ret = bdrv_read(bs, sector_num, buf, nb_sectors)) < 0)
2061 return ret;
2062 sector_num += nb_sectors;
2063 len = nb_sectors << BDRV_SECTOR_BITS;
2064 buf += len;
2065 count -= len;
2068 /* add data from the last sector */
2069 if (count > 0) {
2070 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
2071 return ret;
2072 memcpy(buf, tmp_buf, count);
2074 return count1;
2077 int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
2078 const void *buf, int count1)
2080 uint8_t tmp_buf[BDRV_SECTOR_SIZE];
2081 int len, nb_sectors, count;
2082 int64_t sector_num;
2083 int ret;
2085 count = count1;
2086 /* first write to align to sector start */
2087 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
2088 if (len > count)
2089 len = count;
2090 sector_num = offset >> BDRV_SECTOR_BITS;
2091 if (len > 0) {
2092 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
2093 return ret;
2094 memcpy(tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), buf, len);
2095 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
2096 return ret;
2097 count -= len;
2098 if (count == 0)
2099 return count1;
2100 sector_num++;
2101 buf += len;
2104 /* write the sectors "in place" */
2105 nb_sectors = count >> BDRV_SECTOR_BITS;
2106 if (nb_sectors > 0) {
2107 if ((ret = bdrv_write(bs, sector_num, buf, nb_sectors)) < 0)
2108 return ret;
2109 sector_num += nb_sectors;
2110 len = nb_sectors << BDRV_SECTOR_BITS;
2111 buf += len;
2112 count -= len;
2115 /* add data from the last sector */
2116 if (count > 0) {
2117 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
2118 return ret;
2119 memcpy(tmp_buf, buf, count);
2120 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
2121 return ret;
2123 return count1;
2127 * Writes to the file and ensures that no writes are reordered across this
2128 * request (acts as a barrier)
2130 * Returns 0 on success, -errno in error cases.
2132 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
2133 const void *buf, int count)
2135 int ret;
2137 ret = bdrv_pwrite(bs, offset, buf, count);
2138 if (ret < 0) {
2139 return ret;
2142 /* No flush needed for cache modes that already do it */
2143 if (bs->enable_write_cache) {
2144 bdrv_flush(bs);
2147 return 0;
2150 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
2151 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
2153 /* Perform I/O through a temporary buffer so that users who scribble over
2154 * their read buffer while the operation is in progress do not end up
2155 * modifying the image file. This is critical for zero-copy guest I/O
2156 * where anything might happen inside guest memory.
2158 void *bounce_buffer;
2160 BlockDriver *drv = bs->drv;
2161 struct iovec iov;
2162 QEMUIOVector bounce_qiov;
2163 int64_t cluster_sector_num;
2164 int cluster_nb_sectors;
2165 size_t skip_bytes;
2166 int ret;
2168 /* Cover entire cluster so no additional backing file I/O is required when
2169 * allocating cluster in the image file.
2171 round_to_clusters(bs, sector_num, nb_sectors,
2172 &cluster_sector_num, &cluster_nb_sectors);
2174 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
2175 cluster_sector_num, cluster_nb_sectors);
2177 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
2178 iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
2179 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
2181 ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
2182 &bounce_qiov);
2183 if (ret < 0) {
2184 goto err;
2187 if (drv->bdrv_co_write_zeroes &&
2188 buffer_is_zero(bounce_buffer, iov.iov_len)) {
2189 ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
2190 cluster_nb_sectors);
2191 } else {
2192 /* This does not change the data on the disk, it is not necessary
2193 * to flush even in cache=writethrough mode.
2195 ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
2196 &bounce_qiov);
2199 if (ret < 0) {
2200 /* It might be okay to ignore write errors for guest requests. If this
2201 * is a deliberate copy-on-read then we don't want to ignore the error.
2202 * Simply report it in all cases.
2204 goto err;
2207 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
2208 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
2209 nb_sectors * BDRV_SECTOR_SIZE);
2211 err:
2212 qemu_vfree(bounce_buffer);
2213 return ret;
2217 * Handle a read request in coroutine context
2219 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
2220 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
2221 BdrvRequestFlags flags)
2223 BlockDriver *drv = bs->drv;
2224 BdrvTrackedRequest req;
2225 int ret;
2227 if (!drv) {
2228 return -ENOMEDIUM;
2230 if (bdrv_check_request(bs, sector_num, nb_sectors)) {
2231 return -EIO;
2234 /* throttling disk read I/O */
2235 if (bs->io_limits_enabled) {
2236 bdrv_io_limits_intercept(bs, false, nb_sectors);
2239 if (bs->copy_on_read) {
2240 flags |= BDRV_REQ_COPY_ON_READ;
2242 if (flags & BDRV_REQ_COPY_ON_READ) {
2243 bs->copy_on_read_in_flight++;
2246 if (bs->copy_on_read_in_flight) {
2247 wait_for_overlapping_requests(bs, sector_num, nb_sectors);
2250 tracked_request_begin(&req, bs, sector_num, nb_sectors, false);
2252 if (flags & BDRV_REQ_COPY_ON_READ) {
2253 int pnum;
2255 ret = bdrv_co_is_allocated(bs, sector_num, nb_sectors, &pnum);
2256 if (ret < 0) {
2257 goto out;
2260 if (!ret || pnum != nb_sectors) {
2261 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
2262 goto out;
2266 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
2268 out:
2269 tracked_request_end(&req);
2271 if (flags & BDRV_REQ_COPY_ON_READ) {
2272 bs->copy_on_read_in_flight--;
2275 return ret;
2278 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
2279 int nb_sectors, QEMUIOVector *qiov)
2281 trace_bdrv_co_readv(bs, sector_num, nb_sectors);
2283 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
2286 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
2287 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
2289 trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
2291 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
2292 BDRV_REQ_COPY_ON_READ);
2295 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
2296 int64_t sector_num, int nb_sectors)
2298 BlockDriver *drv = bs->drv;
2299 QEMUIOVector qiov;
2300 struct iovec iov;
2301 int ret;
2303 /* TODO Emulate only part of misaligned requests instead of letting block
2304 * drivers return -ENOTSUP and emulate everything */
2306 /* First try the efficient write zeroes operation */
2307 if (drv->bdrv_co_write_zeroes) {
2308 ret = drv->bdrv_co_write_zeroes(bs, sector_num, nb_sectors);
2309 if (ret != -ENOTSUP) {
2310 return ret;
2314 /* Fall back to bounce buffer if write zeroes is unsupported */
2315 iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE;
2316 iov.iov_base = qemu_blockalign(bs, iov.iov_len);
2317 memset(iov.iov_base, 0, iov.iov_len);
2318 qemu_iovec_init_external(&qiov, &iov, 1);
2320 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, &qiov);
2322 qemu_vfree(iov.iov_base);
2323 return ret;
2327 * Handle a write request in coroutine context
2329 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
2330 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
2331 BdrvRequestFlags flags)
2333 BlockDriver *drv = bs->drv;
2334 BdrvTrackedRequest req;
2335 int ret;
2337 if (!bs->drv) {
2338 return -ENOMEDIUM;
2340 if (bs->read_only) {
2341 return -EACCES;
2343 if (bdrv_check_request(bs, sector_num, nb_sectors)) {
2344 return -EIO;
2347 /* throttling disk write I/O */
2348 if (bs->io_limits_enabled) {
2349 bdrv_io_limits_intercept(bs, true, nb_sectors);
2352 if (bs->copy_on_read_in_flight) {
2353 wait_for_overlapping_requests(bs, sector_num, nb_sectors);
2356 tracked_request_begin(&req, bs, sector_num, nb_sectors, true);
2358 if (flags & BDRV_REQ_ZERO_WRITE) {
2359 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors);
2360 } else {
2361 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
2364 if (ret == 0 && !bs->enable_write_cache) {
2365 ret = bdrv_co_flush(bs);
2368 if (bs->dirty_bitmap) {
2369 set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
2372 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
2373 bs->wr_highest_sector = sector_num + nb_sectors - 1;
2376 tracked_request_end(&req);
2378 return ret;
2381 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
2382 int nb_sectors, QEMUIOVector *qiov)
2384 trace_bdrv_co_writev(bs, sector_num, nb_sectors);
2386 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
2389 int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
2390 int64_t sector_num, int nb_sectors)
2392 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors);
2394 return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
2395 BDRV_REQ_ZERO_WRITE);
2399 * Truncate file to 'offset' bytes (needed only for file protocols)
2401 int bdrv_truncate(BlockDriverState *bs, int64_t offset)
2403 BlockDriver *drv = bs->drv;
2404 int ret;
2405 if (!drv)
2406 return -ENOMEDIUM;
2407 if (!drv->bdrv_truncate)
2408 return -ENOTSUP;
2409 if (bs->read_only)
2410 return -EACCES;
2411 if (bdrv_in_use(bs))
2412 return -EBUSY;
2413 ret = drv->bdrv_truncate(bs, offset);
2414 if (ret == 0) {
2415 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
2416 bdrv_dev_resize_cb(bs);
2418 return ret;
2422 * Length of a allocated file in bytes. Sparse files are counted by actual
2423 * allocated space. Return < 0 if error or unknown.
2425 int64_t bdrv_get_allocated_file_size(BlockDriverState *bs)
2427 BlockDriver *drv = bs->drv;
2428 if (!drv) {
2429 return -ENOMEDIUM;
2431 if (drv->bdrv_get_allocated_file_size) {
2432 return drv->bdrv_get_allocated_file_size(bs);
2434 if (bs->file) {
2435 return bdrv_get_allocated_file_size(bs->file);
2437 return -ENOTSUP;
2441 * Length of a file in bytes. Return < 0 if error or unknown.
2443 int64_t bdrv_getlength(BlockDriverState *bs)
2445 BlockDriver *drv = bs->drv;
2446 if (!drv)
2447 return -ENOMEDIUM;
2449 if (bs->growable || bdrv_dev_has_removable_media(bs)) {
2450 if (drv->bdrv_getlength) {
2451 return drv->bdrv_getlength(bs);
2454 return bs->total_sectors * BDRV_SECTOR_SIZE;
2457 /* return 0 as number of sectors if no device present or error */
2458 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
2460 int64_t length;
2461 length = bdrv_getlength(bs);
2462 if (length < 0)
2463 length = 0;
2464 else
2465 length = length >> BDRV_SECTOR_BITS;
2466 *nb_sectors_ptr = length;
2469 /* throttling disk io limits */
2470 void bdrv_set_io_limits(BlockDriverState *bs,
2471 BlockIOLimit *io_limits)
2473 bs->io_limits = *io_limits;
2474 bs->io_limits_enabled = bdrv_io_limits_enabled(bs);
2477 void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error,
2478 BlockdevOnError on_write_error)
2480 bs->on_read_error = on_read_error;
2481 bs->on_write_error = on_write_error;
2484 BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, int is_read)
2486 return is_read ? bs->on_read_error : bs->on_write_error;
2489 int bdrv_is_read_only(BlockDriverState *bs)
2491 return bs->read_only;
2494 int bdrv_is_sg(BlockDriverState *bs)
2496 return bs->sg;
2499 int bdrv_enable_write_cache(BlockDriverState *bs)
2501 return bs->enable_write_cache;
2504 void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce)
2506 bs->enable_write_cache = wce;
2508 /* so a reopen() will preserve wce */
2509 if (wce) {
2510 bs->open_flags |= BDRV_O_CACHE_WB;
2511 } else {
2512 bs->open_flags &= ~BDRV_O_CACHE_WB;
2516 int bdrv_is_encrypted(BlockDriverState *bs)
2518 if (bs->backing_hd && bs->backing_hd->encrypted)
2519 return 1;
2520 return bs->encrypted;
2523 int bdrv_key_required(BlockDriverState *bs)
2525 BlockDriverState *backing_hd = bs->backing_hd;
2527 if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key)
2528 return 1;
2529 return (bs->encrypted && !bs->valid_key);
2532 int bdrv_set_key(BlockDriverState *bs, const char *key)
2534 int ret;
2535 if (bs->backing_hd && bs->backing_hd->encrypted) {
2536 ret = bdrv_set_key(bs->backing_hd, key);
2537 if (ret < 0)
2538 return ret;
2539 if (!bs->encrypted)
2540 return 0;
2542 if (!bs->encrypted) {
2543 return -EINVAL;
2544 } else if (!bs->drv || !bs->drv->bdrv_set_key) {
2545 return -ENOMEDIUM;
2547 ret = bs->drv->bdrv_set_key(bs, key);
2548 if (ret < 0) {
2549 bs->valid_key = 0;
2550 } else if (!bs->valid_key) {
2551 bs->valid_key = 1;
2552 /* call the change callback now, we skipped it on open */
2553 bdrv_dev_change_media_cb(bs, true);
2555 return ret;
2558 const char *bdrv_get_format_name(BlockDriverState *bs)
2560 return bs->drv ? bs->drv->format_name : NULL;
2563 void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
2564 void *opaque)
2566 BlockDriver *drv;
2568 QLIST_FOREACH(drv, &bdrv_drivers, list) {
2569 it(opaque, drv->format_name);
2573 BlockDriverState *bdrv_find(const char *name)
2575 BlockDriverState *bs;
2577 QTAILQ_FOREACH(bs, &bdrv_states, list) {
2578 if (!strcmp(name, bs->device_name)) {
2579 return bs;
2582 return NULL;
2585 BlockDriverState *bdrv_next(BlockDriverState *bs)
2587 if (!bs) {
2588 return QTAILQ_FIRST(&bdrv_states);
2590 return QTAILQ_NEXT(bs, list);
2593 void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque)
2595 BlockDriverState *bs;
2597 QTAILQ_FOREACH(bs, &bdrv_states, list) {
2598 it(opaque, bs);
2602 const char *bdrv_get_device_name(BlockDriverState *bs)
2604 return bs->device_name;
2607 int bdrv_get_flags(BlockDriverState *bs)
2609 return bs->open_flags;
2612 void bdrv_flush_all(void)
2614 BlockDriverState *bs;
2616 QTAILQ_FOREACH(bs, &bdrv_states, list) {
2617 bdrv_flush(bs);
2621 int bdrv_has_zero_init(BlockDriverState *bs)
2623 assert(bs->drv);
2625 if (bs->drv->bdrv_has_zero_init) {
2626 return bs->drv->bdrv_has_zero_init(bs);
2629 return 1;
2632 typedef struct BdrvCoIsAllocatedData {
2633 BlockDriverState *bs;
2634 int64_t sector_num;
2635 int nb_sectors;
2636 int *pnum;
2637 int ret;
2638 bool done;
2639 } BdrvCoIsAllocatedData;
2642 * Returns true iff the specified sector is present in the disk image. Drivers
2643 * not implementing the functionality are assumed to not support backing files,
2644 * hence all their sectors are reported as allocated.
2646 * If 'sector_num' is beyond the end of the disk image the return value is 0
2647 * and 'pnum' is set to 0.
2649 * 'pnum' is set to the number of sectors (including and immediately following
2650 * the specified sector) that are known to be in the same
2651 * allocated/unallocated state.
2653 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
2654 * beyond the end of the disk image it will be clamped.
2656 int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t sector_num,
2657 int nb_sectors, int *pnum)
2659 int64_t n;
2661 if (sector_num >= bs->total_sectors) {
2662 *pnum = 0;
2663 return 0;
2666 n = bs->total_sectors - sector_num;
2667 if (n < nb_sectors) {
2668 nb_sectors = n;
2671 if (!bs->drv->bdrv_co_is_allocated) {
2672 *pnum = nb_sectors;
2673 return 1;
2676 return bs->drv->bdrv_co_is_allocated(bs, sector_num, nb_sectors, pnum);
2679 /* Coroutine wrapper for bdrv_is_allocated() */
2680 static void coroutine_fn bdrv_is_allocated_co_entry(void *opaque)
2682 BdrvCoIsAllocatedData *data = opaque;
2683 BlockDriverState *bs = data->bs;
2685 data->ret = bdrv_co_is_allocated(bs, data->sector_num, data->nb_sectors,
2686 data->pnum);
2687 data->done = true;
2691 * Synchronous wrapper around bdrv_co_is_allocated().
2693 * See bdrv_co_is_allocated() for details.
2695 int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
2696 int *pnum)
2698 Coroutine *co;
2699 BdrvCoIsAllocatedData data = {
2700 .bs = bs,
2701 .sector_num = sector_num,
2702 .nb_sectors = nb_sectors,
2703 .pnum = pnum,
2704 .done = false,
2707 co = qemu_coroutine_create(bdrv_is_allocated_co_entry);
2708 qemu_coroutine_enter(co, &data);
2709 while (!data.done) {
2710 qemu_aio_wait();
2712 return data.ret;
2716 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2718 * Return true if the given sector is allocated in any image between
2719 * BASE and TOP (inclusive). BASE can be NULL to check if the given
2720 * sector is allocated in any image of the chain. Return false otherwise.
2722 * 'pnum' is set to the number of sectors (including and immediately following
2723 * the specified sector) that are known to be in the same
2724 * allocated/unallocated state.
2727 int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *top,
2728 BlockDriverState *base,
2729 int64_t sector_num,
2730 int nb_sectors, int *pnum)
2732 BlockDriverState *intermediate;
2733 int ret, n = nb_sectors;
2735 intermediate = top;
2736 while (intermediate && intermediate != base) {
2737 int pnum_inter;
2738 ret = bdrv_co_is_allocated(intermediate, sector_num, nb_sectors,
2739 &pnum_inter);
2740 if (ret < 0) {
2741 return ret;
2742 } else if (ret) {
2743 *pnum = pnum_inter;
2744 return 1;
2748 * [sector_num, nb_sectors] is unallocated on top but intermediate
2749 * might have
2751 * [sector_num+x, nr_sectors] allocated.
2753 if (n > pnum_inter) {
2754 n = pnum_inter;
2757 intermediate = intermediate->backing_hd;
2760 *pnum = n;
2761 return 0;
2764 BlockInfoList *qmp_query_block(Error **errp)
2766 BlockInfoList *head = NULL, *cur_item = NULL;
2767 BlockDriverState *bs;
2769 QTAILQ_FOREACH(bs, &bdrv_states, list) {
2770 BlockInfoList *info = g_malloc0(sizeof(*info));
2772 info->value = g_malloc0(sizeof(*info->value));
2773 info->value->device = g_strdup(bs->device_name);
2774 info->value->type = g_strdup("unknown");
2775 info->value->locked = bdrv_dev_is_medium_locked(bs);
2776 info->value->removable = bdrv_dev_has_removable_media(bs);
2778 if (bdrv_dev_has_removable_media(bs)) {
2779 info->value->has_tray_open = true;
2780 info->value->tray_open = bdrv_dev_is_tray_open(bs);
2783 if (bdrv_iostatus_is_enabled(bs)) {
2784 info->value->has_io_status = true;
2785 info->value->io_status = bs->iostatus;
2788 if (bs->drv) {
2789 info->value->has_inserted = true;
2790 info->value->inserted = g_malloc0(sizeof(*info->value->inserted));
2791 info->value->inserted->file = g_strdup(bs->filename);
2792 info->value->inserted->ro = bs->read_only;
2793 info->value->inserted->drv = g_strdup(bs->drv->format_name);
2794 info->value->inserted->encrypted = bs->encrypted;
2795 info->value->inserted->encryption_key_missing = bdrv_key_required(bs);
2796 if (bs->backing_file[0]) {
2797 info->value->inserted->has_backing_file = true;
2798 info->value->inserted->backing_file = g_strdup(bs->backing_file);
2801 info->value->inserted->backing_file_depth =
2802 bdrv_get_backing_file_depth(bs);
2804 if (bs->io_limits_enabled) {
2805 info->value->inserted->bps =
2806 bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL];
2807 info->value->inserted->bps_rd =
2808 bs->io_limits.bps[BLOCK_IO_LIMIT_READ];
2809 info->value->inserted->bps_wr =
2810 bs->io_limits.bps[BLOCK_IO_LIMIT_WRITE];
2811 info->value->inserted->iops =
2812 bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL];
2813 info->value->inserted->iops_rd =
2814 bs->io_limits.iops[BLOCK_IO_LIMIT_READ];
2815 info->value->inserted->iops_wr =
2816 bs->io_limits.iops[BLOCK_IO_LIMIT_WRITE];
2820 /* XXX: waiting for the qapi to support GSList */
2821 if (!cur_item) {
2822 head = cur_item = info;
2823 } else {
2824 cur_item->next = info;
2825 cur_item = info;
2829 return head;
2832 /* Consider exposing this as a full fledged QMP command */
2833 static BlockStats *qmp_query_blockstat(const BlockDriverState *bs, Error **errp)
2835 BlockStats *s;
2837 s = g_malloc0(sizeof(*s));
2839 if (bs->device_name[0]) {
2840 s->has_device = true;
2841 s->device = g_strdup(bs->device_name);
2844 s->stats = g_malloc0(sizeof(*s->stats));
2845 s->stats->rd_bytes = bs->nr_bytes[BDRV_ACCT_READ];
2846 s->stats->wr_bytes = bs->nr_bytes[BDRV_ACCT_WRITE];
2847 s->stats->rd_operations = bs->nr_ops[BDRV_ACCT_READ];
2848 s->stats->wr_operations = bs->nr_ops[BDRV_ACCT_WRITE];
2849 s->stats->wr_highest_offset = bs->wr_highest_sector * BDRV_SECTOR_SIZE;
2850 s->stats->flush_operations = bs->nr_ops[BDRV_ACCT_FLUSH];
2851 s->stats->wr_total_time_ns = bs->total_time_ns[BDRV_ACCT_WRITE];
2852 s->stats->rd_total_time_ns = bs->total_time_ns[BDRV_ACCT_READ];
2853 s->stats->flush_total_time_ns = bs->total_time_ns[BDRV_ACCT_FLUSH];
2855 if (bs->file) {
2856 s->has_parent = true;
2857 s->parent = qmp_query_blockstat(bs->file, NULL);
2860 return s;
2863 BlockStatsList *qmp_query_blockstats(Error **errp)
2865 BlockStatsList *head = NULL, *cur_item = NULL;
2866 BlockDriverState *bs;
2868 QTAILQ_FOREACH(bs, &bdrv_states, list) {
2869 BlockStatsList *info = g_malloc0(sizeof(*info));
2870 info->value = qmp_query_blockstat(bs, NULL);
2872 /* XXX: waiting for the qapi to support GSList */
2873 if (!cur_item) {
2874 head = cur_item = info;
2875 } else {
2876 cur_item->next = info;
2877 cur_item = info;
2881 return head;
2884 const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
2886 if (bs->backing_hd && bs->backing_hd->encrypted)
2887 return bs->backing_file;
2888 else if (bs->encrypted)
2889 return bs->filename;
2890 else
2891 return NULL;
2894 void bdrv_get_backing_filename(BlockDriverState *bs,
2895 char *filename, int filename_size)
2897 pstrcpy(filename, filename_size, bs->backing_file);
2900 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
2901 const uint8_t *buf, int nb_sectors)
2903 BlockDriver *drv = bs->drv;
2904 if (!drv)
2905 return -ENOMEDIUM;
2906 if (!drv->bdrv_write_compressed)
2907 return -ENOTSUP;
2908 if (bdrv_check_request(bs, sector_num, nb_sectors))
2909 return -EIO;
2911 if (bs->dirty_bitmap) {
2912 set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
2915 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
2918 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
2920 BlockDriver *drv = bs->drv;
2921 if (!drv)
2922 return -ENOMEDIUM;
2923 if (!drv->bdrv_get_info)
2924 return -ENOTSUP;
2925 memset(bdi, 0, sizeof(*bdi));
2926 return drv->bdrv_get_info(bs, bdi);
2929 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
2930 int64_t pos, int size)
2932 BlockDriver *drv = bs->drv;
2933 if (!drv)
2934 return -ENOMEDIUM;
2935 if (drv->bdrv_save_vmstate)
2936 return drv->bdrv_save_vmstate(bs, buf, pos, size);
2937 if (bs->file)
2938 return bdrv_save_vmstate(bs->file, buf, pos, size);
2939 return -ENOTSUP;
2942 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2943 int64_t pos, int size)
2945 BlockDriver *drv = bs->drv;
2946 if (!drv)
2947 return -ENOMEDIUM;
2948 if (drv->bdrv_load_vmstate)
2949 return drv->bdrv_load_vmstate(bs, buf, pos, size);
2950 if (bs->file)
2951 return bdrv_load_vmstate(bs->file, buf, pos, size);
2952 return -ENOTSUP;
2955 void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
2957 BlockDriver *drv = bs->drv;
2959 if (!drv || !drv->bdrv_debug_event) {
2960 return;
2963 drv->bdrv_debug_event(bs, event);
2967 /**************************************************************/
2968 /* handling of snapshots */
2970 int bdrv_can_snapshot(BlockDriverState *bs)
2972 BlockDriver *drv = bs->drv;
2973 if (!drv || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
2974 return 0;
2977 if (!drv->bdrv_snapshot_create) {
2978 if (bs->file != NULL) {
2979 return bdrv_can_snapshot(bs->file);
2981 return 0;
2984 return 1;
2987 int bdrv_is_snapshot(BlockDriverState *bs)
2989 return !!(bs->open_flags & BDRV_O_SNAPSHOT);
2992 BlockDriverState *bdrv_snapshots(void)
2994 BlockDriverState *bs;
2996 if (bs_snapshots) {
2997 return bs_snapshots;
3000 bs = NULL;
3001 while ((bs = bdrv_next(bs))) {
3002 if (bdrv_can_snapshot(bs)) {
3003 bs_snapshots = bs;
3004 return bs;
3007 return NULL;
3010 int bdrv_snapshot_create(BlockDriverState *bs,
3011 QEMUSnapshotInfo *sn_info)
3013 BlockDriver *drv = bs->drv;
3014 if (!drv)
3015 return -ENOMEDIUM;
3016 if (drv->bdrv_snapshot_create)
3017 return drv->bdrv_snapshot_create(bs, sn_info);
3018 if (bs->file)
3019 return bdrv_snapshot_create(bs->file, sn_info);
3020 return -ENOTSUP;
3023 int bdrv_snapshot_goto(BlockDriverState *bs,
3024 const char *snapshot_id)
3026 BlockDriver *drv = bs->drv;
3027 int ret, open_ret;
3029 if (!drv)
3030 return -ENOMEDIUM;
3031 if (drv->bdrv_snapshot_goto)
3032 return drv->bdrv_snapshot_goto(bs, snapshot_id);
3034 if (bs->file) {
3035 drv->bdrv_close(bs);
3036 ret = bdrv_snapshot_goto(bs->file, snapshot_id);
3037 open_ret = drv->bdrv_open(bs, bs->open_flags);
3038 if (open_ret < 0) {
3039 bdrv_delete(bs->file);
3040 bs->drv = NULL;
3041 return open_ret;
3043 return ret;
3046 return -ENOTSUP;
3049 int bdrv_snapshot_delete(BlockDriverState *bs, const char *snapshot_id)
3051 BlockDriver *drv = bs->drv;
3052 if (!drv)
3053 return -ENOMEDIUM;
3054 if (drv->bdrv_snapshot_delete)
3055 return drv->bdrv_snapshot_delete(bs, snapshot_id);
3056 if (bs->file)
3057 return bdrv_snapshot_delete(bs->file, snapshot_id);
3058 return -ENOTSUP;
3061 int bdrv_snapshot_list(BlockDriverState *bs,
3062 QEMUSnapshotInfo **psn_info)
3064 BlockDriver *drv = bs->drv;
3065 if (!drv)
3066 return -ENOMEDIUM;
3067 if (drv->bdrv_snapshot_list)
3068 return drv->bdrv_snapshot_list(bs, psn_info);
3069 if (bs->file)
3070 return bdrv_snapshot_list(bs->file, psn_info);
3071 return -ENOTSUP;
3074 int bdrv_snapshot_load_tmp(BlockDriverState *bs,
3075 const char *snapshot_name)
3077 BlockDriver *drv = bs->drv;
3078 if (!drv) {
3079 return -ENOMEDIUM;
3081 if (!bs->read_only) {
3082 return -EINVAL;
3084 if (drv->bdrv_snapshot_load_tmp) {
3085 return drv->bdrv_snapshot_load_tmp(bs, snapshot_name);
3087 return -ENOTSUP;
3090 BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
3091 const char *backing_file)
3093 if (!bs->drv) {
3094 return NULL;
3097 if (bs->backing_hd) {
3098 if (strcmp(bs->backing_file, backing_file) == 0) {
3099 return bs->backing_hd;
3100 } else {
3101 return bdrv_find_backing_image(bs->backing_hd, backing_file);
3105 return NULL;
3108 int bdrv_get_backing_file_depth(BlockDriverState *bs)
3110 if (!bs->drv) {
3111 return 0;
3114 if (!bs->backing_hd) {
3115 return 0;
3118 return 1 + bdrv_get_backing_file_depth(bs->backing_hd);
3121 BlockDriverState *bdrv_find_base(BlockDriverState *bs)
3123 BlockDriverState *curr_bs = NULL;
3125 if (!bs) {
3126 return NULL;
3129 curr_bs = bs;
3131 while (curr_bs->backing_hd) {
3132 curr_bs = curr_bs->backing_hd;
3134 return curr_bs;
3137 #define NB_SUFFIXES 4
3139 char *get_human_readable_size(char *buf, int buf_size, int64_t size)
3141 static const char suffixes[NB_SUFFIXES] = "KMGT";
3142 int64_t base;
3143 int i;
3145 if (size <= 999) {
3146 snprintf(buf, buf_size, "%" PRId64, size);
3147 } else {
3148 base = 1024;
3149 for(i = 0; i < NB_SUFFIXES; i++) {
3150 if (size < (10 * base)) {
3151 snprintf(buf, buf_size, "%0.1f%c",
3152 (double)size / base,
3153 suffixes[i]);
3154 break;
3155 } else if (size < (1000 * base) || i == (NB_SUFFIXES - 1)) {
3156 snprintf(buf, buf_size, "%" PRId64 "%c",
3157 ((size + (base >> 1)) / base),
3158 suffixes[i]);
3159 break;
3161 base = base * 1024;
3164 return buf;
3167 char *bdrv_snapshot_dump(char *buf, int buf_size, QEMUSnapshotInfo *sn)
3169 char buf1[128], date_buf[128], clock_buf[128];
3170 #ifdef _WIN32
3171 struct tm *ptm;
3172 #else
3173 struct tm tm;
3174 #endif
3175 time_t ti;
3176 int64_t secs;
3178 if (!sn) {
3179 snprintf(buf, buf_size,
3180 "%-10s%-20s%7s%20s%15s",
3181 "ID", "TAG", "VM SIZE", "DATE", "VM CLOCK");
3182 } else {
3183 ti = sn->date_sec;
3184 #ifdef _WIN32
3185 ptm = localtime(&ti);
3186 strftime(date_buf, sizeof(date_buf),
3187 "%Y-%m-%d %H:%M:%S", ptm);
3188 #else
3189 localtime_r(&ti, &tm);
3190 strftime(date_buf, sizeof(date_buf),
3191 "%Y-%m-%d %H:%M:%S", &tm);
3192 #endif
3193 secs = sn->vm_clock_nsec / 1000000000;
3194 snprintf(clock_buf, sizeof(clock_buf),
3195 "%02d:%02d:%02d.%03d",
3196 (int)(secs / 3600),
3197 (int)((secs / 60) % 60),
3198 (int)(secs % 60),
3199 (int)((sn->vm_clock_nsec / 1000000) % 1000));
3200 snprintf(buf, buf_size,
3201 "%-10s%-20s%7s%20s%15s",
3202 sn->id_str, sn->name,
3203 get_human_readable_size(buf1, sizeof(buf1), sn->vm_state_size),
3204 date_buf,
3205 clock_buf);
3207 return buf;
3210 /**************************************************************/
3211 /* async I/Os */
3213 BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
3214 QEMUIOVector *qiov, int nb_sectors,
3215 BlockDriverCompletionFunc *cb, void *opaque)
3217 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
3219 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors,
3220 cb, opaque, false);
3223 BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
3224 QEMUIOVector *qiov, int nb_sectors,
3225 BlockDriverCompletionFunc *cb, void *opaque)
3227 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
3229 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors,
3230 cb, opaque, true);
3234 typedef struct MultiwriteCB {
3235 int error;
3236 int num_requests;
3237 int num_callbacks;
3238 struct {
3239 BlockDriverCompletionFunc *cb;
3240 void *opaque;
3241 QEMUIOVector *free_qiov;
3242 } callbacks[];
3243 } MultiwriteCB;
3245 static void multiwrite_user_cb(MultiwriteCB *mcb)
3247 int i;
3249 for (i = 0; i < mcb->num_callbacks; i++) {
3250 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
3251 if (mcb->callbacks[i].free_qiov) {
3252 qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
3254 g_free(mcb->callbacks[i].free_qiov);
3258 static void multiwrite_cb(void *opaque, int ret)
3260 MultiwriteCB *mcb = opaque;
3262 trace_multiwrite_cb(mcb, ret);
3264 if (ret < 0 && !mcb->error) {
3265 mcb->error = ret;
3268 mcb->num_requests--;
3269 if (mcb->num_requests == 0) {
3270 multiwrite_user_cb(mcb);
3271 g_free(mcb);
3275 static int multiwrite_req_compare(const void *a, const void *b)
3277 const BlockRequest *req1 = a, *req2 = b;
3280 * Note that we can't simply subtract req2->sector from req1->sector
3281 * here as that could overflow the return value.
3283 if (req1->sector > req2->sector) {
3284 return 1;
3285 } else if (req1->sector < req2->sector) {
3286 return -1;
3287 } else {
3288 return 0;
3293 * Takes a bunch of requests and tries to merge them. Returns the number of
3294 * requests that remain after merging.
3296 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
3297 int num_reqs, MultiwriteCB *mcb)
3299 int i, outidx;
3301 // Sort requests by start sector
3302 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
3304 // Check if adjacent requests touch the same clusters. If so, combine them,
3305 // filling up gaps with zero sectors.
3306 outidx = 0;
3307 for (i = 1; i < num_reqs; i++) {
3308 int merge = 0;
3309 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
3311 // Handle exactly sequential writes and overlapping writes.
3312 if (reqs[i].sector <= oldreq_last) {
3313 merge = 1;
3316 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
3317 merge = 0;
3320 if (merge) {
3321 size_t size;
3322 QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
3323 qemu_iovec_init(qiov,
3324 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
3326 // Add the first request to the merged one. If the requests are
3327 // overlapping, drop the last sectors of the first request.
3328 size = (reqs[i].sector - reqs[outidx].sector) << 9;
3329 qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size);
3331 // We should need to add any zeros between the two requests
3332 assert (reqs[i].sector <= oldreq_last);
3334 // Add the second request
3335 qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size);
3337 reqs[outidx].nb_sectors = qiov->size >> 9;
3338 reqs[outidx].qiov = qiov;
3340 mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
3341 } else {
3342 outidx++;
3343 reqs[outidx].sector = reqs[i].sector;
3344 reqs[outidx].nb_sectors = reqs[i].nb_sectors;
3345 reqs[outidx].qiov = reqs[i].qiov;
3349 return outidx + 1;
3353 * Submit multiple AIO write requests at once.
3355 * On success, the function returns 0 and all requests in the reqs array have
3356 * been submitted. In error case this function returns -1, and any of the
3357 * requests may or may not be submitted yet. In particular, this means that the
3358 * callback will be called for some of the requests, for others it won't. The
3359 * caller must check the error field of the BlockRequest to wait for the right
3360 * callbacks (if error != 0, no callback will be called).
3362 * The implementation may modify the contents of the reqs array, e.g. to merge
3363 * requests. However, the fields opaque and error are left unmodified as they
3364 * are used to signal failure for a single request to the caller.
3366 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
3368 MultiwriteCB *mcb;
3369 int i;
3371 /* don't submit writes if we don't have a medium */
3372 if (bs->drv == NULL) {
3373 for (i = 0; i < num_reqs; i++) {
3374 reqs[i].error = -ENOMEDIUM;
3376 return -1;
3379 if (num_reqs == 0) {
3380 return 0;
3383 // Create MultiwriteCB structure
3384 mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
3385 mcb->num_requests = 0;
3386 mcb->num_callbacks = num_reqs;
3388 for (i = 0; i < num_reqs; i++) {
3389 mcb->callbacks[i].cb = reqs[i].cb;
3390 mcb->callbacks[i].opaque = reqs[i].opaque;
3393 // Check for mergable requests
3394 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
3396 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
3398 /* Run the aio requests. */
3399 mcb->num_requests = num_reqs;
3400 for (i = 0; i < num_reqs; i++) {
3401 bdrv_aio_writev(bs, reqs[i].sector, reqs[i].qiov,
3402 reqs[i].nb_sectors, multiwrite_cb, mcb);
3405 return 0;
3408 void bdrv_aio_cancel(BlockDriverAIOCB *acb)
3410 acb->pool->cancel(acb);
3413 /* block I/O throttling */
3414 static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors,
3415 bool is_write, double elapsed_time, uint64_t *wait)
3417 uint64_t bps_limit = 0;
3418 double bytes_limit, bytes_base, bytes_res;
3419 double slice_time, wait_time;
3421 if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) {
3422 bps_limit = bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL];
3423 } else if (bs->io_limits.bps[is_write]) {
3424 bps_limit = bs->io_limits.bps[is_write];
3425 } else {
3426 if (wait) {
3427 *wait = 0;
3430 return false;
3433 slice_time = bs->slice_end - bs->slice_start;
3434 slice_time /= (NANOSECONDS_PER_SECOND);
3435 bytes_limit = bps_limit * slice_time;
3436 bytes_base = bs->nr_bytes[is_write] - bs->io_base.bytes[is_write];
3437 if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) {
3438 bytes_base += bs->nr_bytes[!is_write] - bs->io_base.bytes[!is_write];
3441 /* bytes_base: the bytes of data which have been read/written; and
3442 * it is obtained from the history statistic info.
3443 * bytes_res: the remaining bytes of data which need to be read/written.
3444 * (bytes_base + bytes_res) / bps_limit: used to calcuate
3445 * the total time for completing reading/writting all data.
3447 bytes_res = (unsigned) nb_sectors * BDRV_SECTOR_SIZE;
3449 if (bytes_base + bytes_res <= bytes_limit) {
3450 if (wait) {
3451 *wait = 0;
3454 return false;
3457 /* Calc approx time to dispatch */
3458 wait_time = (bytes_base + bytes_res) / bps_limit - elapsed_time;
3460 /* When the I/O rate at runtime exceeds the limits,
3461 * bs->slice_end need to be extended in order that the current statistic
3462 * info can be kept until the timer fire, so it is increased and tuned
3463 * based on the result of experiment.
3465 bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10;
3466 bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME;
3467 if (wait) {
3468 *wait = wait_time * BLOCK_IO_SLICE_TIME * 10;
3471 return true;
3474 static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write,
3475 double elapsed_time, uint64_t *wait)
3477 uint64_t iops_limit = 0;
3478 double ios_limit, ios_base;
3479 double slice_time, wait_time;
3481 if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) {
3482 iops_limit = bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL];
3483 } else if (bs->io_limits.iops[is_write]) {
3484 iops_limit = bs->io_limits.iops[is_write];
3485 } else {
3486 if (wait) {
3487 *wait = 0;
3490 return false;
3493 slice_time = bs->slice_end - bs->slice_start;
3494 slice_time /= (NANOSECONDS_PER_SECOND);
3495 ios_limit = iops_limit * slice_time;
3496 ios_base = bs->nr_ops[is_write] - bs->io_base.ios[is_write];
3497 if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) {
3498 ios_base += bs->nr_ops[!is_write] - bs->io_base.ios[!is_write];
3501 if (ios_base + 1 <= ios_limit) {
3502 if (wait) {
3503 *wait = 0;
3506 return false;
3509 /* Calc approx time to dispatch */
3510 wait_time = (ios_base + 1) / iops_limit;
3511 if (wait_time > elapsed_time) {
3512 wait_time = wait_time - elapsed_time;
3513 } else {
3514 wait_time = 0;
3517 bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10;
3518 bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME;
3519 if (wait) {
3520 *wait = wait_time * BLOCK_IO_SLICE_TIME * 10;
3523 return true;
3526 static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors,
3527 bool is_write, int64_t *wait)
3529 int64_t now, max_wait;
3530 uint64_t bps_wait = 0, iops_wait = 0;
3531 double elapsed_time;
3532 int bps_ret, iops_ret;
3534 now = qemu_get_clock_ns(vm_clock);
3535 if ((bs->slice_start < now)
3536 && (bs->slice_end > now)) {
3537 bs->slice_end = now + bs->slice_time;
3538 } else {
3539 bs->slice_time = 5 * BLOCK_IO_SLICE_TIME;
3540 bs->slice_start = now;
3541 bs->slice_end = now + bs->slice_time;
3543 bs->io_base.bytes[is_write] = bs->nr_bytes[is_write];
3544 bs->io_base.bytes[!is_write] = bs->nr_bytes[!is_write];
3546 bs->io_base.ios[is_write] = bs->nr_ops[is_write];
3547 bs->io_base.ios[!is_write] = bs->nr_ops[!is_write];
3550 elapsed_time = now - bs->slice_start;
3551 elapsed_time /= (NANOSECONDS_PER_SECOND);
3553 bps_ret = bdrv_exceed_bps_limits(bs, nb_sectors,
3554 is_write, elapsed_time, &bps_wait);
3555 iops_ret = bdrv_exceed_iops_limits(bs, is_write,
3556 elapsed_time, &iops_wait);
3557 if (bps_ret || iops_ret) {
3558 max_wait = bps_wait > iops_wait ? bps_wait : iops_wait;
3559 if (wait) {
3560 *wait = max_wait;
3563 now = qemu_get_clock_ns(vm_clock);
3564 if (bs->slice_end < now + max_wait) {
3565 bs->slice_end = now + max_wait;
3568 return true;
3571 if (wait) {
3572 *wait = 0;
3575 return false;
3578 /**************************************************************/
3579 /* async block device emulation */
3581 typedef struct BlockDriverAIOCBSync {
3582 BlockDriverAIOCB common;
3583 QEMUBH *bh;
3584 int ret;
3585 /* vector translation state */
3586 QEMUIOVector *qiov;
3587 uint8_t *bounce;
3588 int is_write;
3589 } BlockDriverAIOCBSync;
3591 static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
3593 BlockDriverAIOCBSync *acb =
3594 container_of(blockacb, BlockDriverAIOCBSync, common);
3595 qemu_bh_delete(acb->bh);
3596 acb->bh = NULL;
3597 qemu_aio_release(acb);
3600 static AIOPool bdrv_em_aio_pool = {
3601 .aiocb_size = sizeof(BlockDriverAIOCBSync),
3602 .cancel = bdrv_aio_cancel_em,
3605 static void bdrv_aio_bh_cb(void *opaque)
3607 BlockDriverAIOCBSync *acb = opaque;
3609 if (!acb->is_write)
3610 qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
3611 qemu_vfree(acb->bounce);
3612 acb->common.cb(acb->common.opaque, acb->ret);
3613 qemu_bh_delete(acb->bh);
3614 acb->bh = NULL;
3615 qemu_aio_release(acb);
3618 static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
3619 int64_t sector_num,
3620 QEMUIOVector *qiov,
3621 int nb_sectors,
3622 BlockDriverCompletionFunc *cb,
3623 void *opaque,
3624 int is_write)
3627 BlockDriverAIOCBSync *acb;
3629 acb = qemu_aio_get(&bdrv_em_aio_pool, bs, cb, opaque);
3630 acb->is_write = is_write;
3631 acb->qiov = qiov;
3632 acb->bounce = qemu_blockalign(bs, qiov->size);
3633 acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
3635 if (is_write) {
3636 qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
3637 acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
3638 } else {
3639 acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
3642 qemu_bh_schedule(acb->bh);
3644 return &acb->common;
3647 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
3648 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
3649 BlockDriverCompletionFunc *cb, void *opaque)
3651 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
3654 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
3655 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
3656 BlockDriverCompletionFunc *cb, void *opaque)
3658 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
3662 typedef struct BlockDriverAIOCBCoroutine {
3663 BlockDriverAIOCB common;
3664 BlockRequest req;
3665 bool is_write;
3666 QEMUBH* bh;
3667 } BlockDriverAIOCBCoroutine;
3669 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb)
3671 qemu_aio_flush();
3674 static AIOPool bdrv_em_co_aio_pool = {
3675 .aiocb_size = sizeof(BlockDriverAIOCBCoroutine),
3676 .cancel = bdrv_aio_co_cancel_em,
3679 static void bdrv_co_em_bh(void *opaque)
3681 BlockDriverAIOCBCoroutine *acb = opaque;
3683 acb->common.cb(acb->common.opaque, acb->req.error);
3684 qemu_bh_delete(acb->bh);
3685 qemu_aio_release(acb);
3688 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
3689 static void coroutine_fn bdrv_co_do_rw(void *opaque)
3691 BlockDriverAIOCBCoroutine *acb = opaque;
3692 BlockDriverState *bs = acb->common.bs;
3694 if (!acb->is_write) {
3695 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
3696 acb->req.nb_sectors, acb->req.qiov, 0);
3697 } else {
3698 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
3699 acb->req.nb_sectors, acb->req.qiov, 0);
3702 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
3703 qemu_bh_schedule(acb->bh);
3706 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
3707 int64_t sector_num,
3708 QEMUIOVector *qiov,
3709 int nb_sectors,
3710 BlockDriverCompletionFunc *cb,
3711 void *opaque,
3712 bool is_write)
3714 Coroutine *co;
3715 BlockDriverAIOCBCoroutine *acb;
3717 acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
3718 acb->req.sector = sector_num;
3719 acb->req.nb_sectors = nb_sectors;
3720 acb->req.qiov = qiov;
3721 acb->is_write = is_write;
3723 co = qemu_coroutine_create(bdrv_co_do_rw);
3724 qemu_coroutine_enter(co, acb);
3726 return &acb->common;
3729 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
3731 BlockDriverAIOCBCoroutine *acb = opaque;
3732 BlockDriverState *bs = acb->common.bs;
3734 acb->req.error = bdrv_co_flush(bs);
3735 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
3736 qemu_bh_schedule(acb->bh);
3739 BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
3740 BlockDriverCompletionFunc *cb, void *opaque)
3742 trace_bdrv_aio_flush(bs, opaque);
3744 Coroutine *co;
3745 BlockDriverAIOCBCoroutine *acb;
3747 acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
3748 co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
3749 qemu_coroutine_enter(co, acb);
3751 return &acb->common;
3754 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
3756 BlockDriverAIOCBCoroutine *acb = opaque;
3757 BlockDriverState *bs = acb->common.bs;
3759 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
3760 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
3761 qemu_bh_schedule(acb->bh);
3764 BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
3765 int64_t sector_num, int nb_sectors,
3766 BlockDriverCompletionFunc *cb, void *opaque)
3768 Coroutine *co;
3769 BlockDriverAIOCBCoroutine *acb;
3771 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
3773 acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
3774 acb->req.sector = sector_num;
3775 acb->req.nb_sectors = nb_sectors;
3776 co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
3777 qemu_coroutine_enter(co, acb);
3779 return &acb->common;
3782 void bdrv_init(void)
3784 module_call_init(MODULE_INIT_BLOCK);
3787 void bdrv_init_with_whitelist(void)
3789 use_bdrv_whitelist = 1;
3790 bdrv_init();
3793 void *qemu_aio_get(AIOPool *pool, BlockDriverState *bs,
3794 BlockDriverCompletionFunc *cb, void *opaque)
3796 BlockDriverAIOCB *acb;
3798 if (pool->free_aiocb) {
3799 acb = pool->free_aiocb;
3800 pool->free_aiocb = acb->next;
3801 } else {
3802 acb = g_malloc0(pool->aiocb_size);
3803 acb->pool = pool;
3805 acb->bs = bs;
3806 acb->cb = cb;
3807 acb->opaque = opaque;
3808 return acb;
3811 void qemu_aio_release(void *p)
3813 BlockDriverAIOCB *acb = (BlockDriverAIOCB *)p;
3814 AIOPool *pool = acb->pool;
3815 acb->next = pool->free_aiocb;
3816 pool->free_aiocb = acb;
3819 /**************************************************************/
3820 /* Coroutine block device emulation */
3822 typedef struct CoroutineIOCompletion {
3823 Coroutine *coroutine;
3824 int ret;
3825 } CoroutineIOCompletion;
3827 static void bdrv_co_io_em_complete(void *opaque, int ret)
3829 CoroutineIOCompletion *co = opaque;
3831 co->ret = ret;
3832 qemu_coroutine_enter(co->coroutine, NULL);
3835 static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
3836 int nb_sectors, QEMUIOVector *iov,
3837 bool is_write)
3839 CoroutineIOCompletion co = {
3840 .coroutine = qemu_coroutine_self(),
3842 BlockDriverAIOCB *acb;
3844 if (is_write) {
3845 acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
3846 bdrv_co_io_em_complete, &co);
3847 } else {
3848 acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
3849 bdrv_co_io_em_complete, &co);
3852 trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
3853 if (!acb) {
3854 return -EIO;
3856 qemu_coroutine_yield();
3858 return co.ret;
3861 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
3862 int64_t sector_num, int nb_sectors,
3863 QEMUIOVector *iov)
3865 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
3868 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
3869 int64_t sector_num, int nb_sectors,
3870 QEMUIOVector *iov)
3872 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
3875 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
3877 RwCo *rwco = opaque;
3879 rwco->ret = bdrv_co_flush(rwco->bs);
3882 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
3884 int ret;
3886 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
3887 return 0;
3890 /* Write back cached data to the OS even with cache=unsafe */
3891 if (bs->drv->bdrv_co_flush_to_os) {
3892 ret = bs->drv->bdrv_co_flush_to_os(bs);
3893 if (ret < 0) {
3894 return ret;
3898 /* But don't actually force it to the disk with cache=unsafe */
3899 if (bs->open_flags & BDRV_O_NO_FLUSH) {
3900 goto flush_parent;
3903 if (bs->drv->bdrv_co_flush_to_disk) {
3904 ret = bs->drv->bdrv_co_flush_to_disk(bs);
3905 } else if (bs->drv->bdrv_aio_flush) {
3906 BlockDriverAIOCB *acb;
3907 CoroutineIOCompletion co = {
3908 .coroutine = qemu_coroutine_self(),
3911 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
3912 if (acb == NULL) {
3913 ret = -EIO;
3914 } else {
3915 qemu_coroutine_yield();
3916 ret = co.ret;
3918 } else {
3920 * Some block drivers always operate in either writethrough or unsafe
3921 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
3922 * know how the server works (because the behaviour is hardcoded or
3923 * depends on server-side configuration), so we can't ensure that
3924 * everything is safe on disk. Returning an error doesn't work because
3925 * that would break guests even if the server operates in writethrough
3926 * mode.
3928 * Let's hope the user knows what he's doing.
3930 ret = 0;
3932 if (ret < 0) {
3933 return ret;
3936 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
3937 * in the case of cache=unsafe, so there are no useless flushes.
3939 flush_parent:
3940 return bdrv_co_flush(bs->file);
3943 void bdrv_invalidate_cache(BlockDriverState *bs)
3945 if (bs->drv && bs->drv->bdrv_invalidate_cache) {
3946 bs->drv->bdrv_invalidate_cache(bs);
3950 void bdrv_invalidate_cache_all(void)
3952 BlockDriverState *bs;
3954 QTAILQ_FOREACH(bs, &bdrv_states, list) {
3955 bdrv_invalidate_cache(bs);
3959 void bdrv_clear_incoming_migration_all(void)
3961 BlockDriverState *bs;
3963 QTAILQ_FOREACH(bs, &bdrv_states, list) {
3964 bs->open_flags = bs->open_flags & ~(BDRV_O_INCOMING);
3968 int bdrv_flush(BlockDriverState *bs)
3970 Coroutine *co;
3971 RwCo rwco = {
3972 .bs = bs,
3973 .ret = NOT_DONE,
3976 if (qemu_in_coroutine()) {
3977 /* Fast-path if already in coroutine context */
3978 bdrv_flush_co_entry(&rwco);
3979 } else {
3980 co = qemu_coroutine_create(bdrv_flush_co_entry);
3981 qemu_coroutine_enter(co, &rwco);
3982 while (rwco.ret == NOT_DONE) {
3983 qemu_aio_wait();
3987 return rwco.ret;
3990 static void coroutine_fn bdrv_discard_co_entry(void *opaque)
3992 RwCo *rwco = opaque;
3994 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
3997 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
3998 int nb_sectors)
4000 if (!bs->drv) {
4001 return -ENOMEDIUM;
4002 } else if (bdrv_check_request(bs, sector_num, nb_sectors)) {
4003 return -EIO;
4004 } else if (bs->read_only) {
4005 return -EROFS;
4006 } else if (bs->drv->bdrv_co_discard) {
4007 return bs->drv->bdrv_co_discard(bs, sector_num, nb_sectors);
4008 } else if (bs->drv->bdrv_aio_discard) {
4009 BlockDriverAIOCB *acb;
4010 CoroutineIOCompletion co = {
4011 .coroutine = qemu_coroutine_self(),
4014 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
4015 bdrv_co_io_em_complete, &co);
4016 if (acb == NULL) {
4017 return -EIO;
4018 } else {
4019 qemu_coroutine_yield();
4020 return co.ret;
4022 } else {
4023 return 0;
4027 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
4029 Coroutine *co;
4030 RwCo rwco = {
4031 .bs = bs,
4032 .sector_num = sector_num,
4033 .nb_sectors = nb_sectors,
4034 .ret = NOT_DONE,
4037 if (qemu_in_coroutine()) {
4038 /* Fast-path if already in coroutine context */
4039 bdrv_discard_co_entry(&rwco);
4040 } else {
4041 co = qemu_coroutine_create(bdrv_discard_co_entry);
4042 qemu_coroutine_enter(co, &rwco);
4043 while (rwco.ret == NOT_DONE) {
4044 qemu_aio_wait();
4048 return rwco.ret;
4051 /**************************************************************/
4052 /* removable device support */
4055 * Return TRUE if the media is present
4057 int bdrv_is_inserted(BlockDriverState *bs)
4059 BlockDriver *drv = bs->drv;
4061 if (!drv)
4062 return 0;
4063 if (!drv->bdrv_is_inserted)
4064 return 1;
4065 return drv->bdrv_is_inserted(bs);
4069 * Return whether the media changed since the last call to this
4070 * function, or -ENOTSUP if we don't know. Most drivers don't know.
4072 int bdrv_media_changed(BlockDriverState *bs)
4074 BlockDriver *drv = bs->drv;
4076 if (drv && drv->bdrv_media_changed) {
4077 return drv->bdrv_media_changed(bs);
4079 return -ENOTSUP;
4083 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
4085 void bdrv_eject(BlockDriverState *bs, bool eject_flag)
4087 BlockDriver *drv = bs->drv;
4089 if (drv && drv->bdrv_eject) {
4090 drv->bdrv_eject(bs, eject_flag);
4093 if (bs->device_name[0] != '\0') {
4094 bdrv_emit_qmp_eject_event(bs, eject_flag);
4099 * Lock or unlock the media (if it is locked, the user won't be able
4100 * to eject it manually).
4102 void bdrv_lock_medium(BlockDriverState *bs, bool locked)
4104 BlockDriver *drv = bs->drv;
4106 trace_bdrv_lock_medium(bs, locked);
4108 if (drv && drv->bdrv_lock_medium) {
4109 drv->bdrv_lock_medium(bs, locked);
4113 /* needed for generic scsi interface */
4115 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
4117 BlockDriver *drv = bs->drv;
4119 if (drv && drv->bdrv_ioctl)
4120 return drv->bdrv_ioctl(bs, req, buf);
4121 return -ENOTSUP;
4124 BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
4125 unsigned long int req, void *buf,
4126 BlockDriverCompletionFunc *cb, void *opaque)
4128 BlockDriver *drv = bs->drv;
4130 if (drv && drv->bdrv_aio_ioctl)
4131 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque);
4132 return NULL;
4135 void bdrv_set_buffer_alignment(BlockDriverState *bs, int align)
4137 bs->buffer_alignment = align;
4140 void *qemu_blockalign(BlockDriverState *bs, size_t size)
4142 return qemu_memalign((bs && bs->buffer_alignment) ? bs->buffer_alignment : 512, size);
4145 void bdrv_set_dirty_tracking(BlockDriverState *bs, int enable)
4147 int64_t bitmap_size;
4149 bs->dirty_count = 0;
4150 if (enable) {
4151 if (!bs->dirty_bitmap) {
4152 bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS) +
4153 BDRV_SECTORS_PER_DIRTY_CHUNK * BITS_PER_LONG - 1;
4154 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * BITS_PER_LONG;
4156 bs->dirty_bitmap = g_new0(unsigned long, bitmap_size);
4158 } else {
4159 if (bs->dirty_bitmap) {
4160 g_free(bs->dirty_bitmap);
4161 bs->dirty_bitmap = NULL;
4166 int bdrv_get_dirty(BlockDriverState *bs, int64_t sector)
4168 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
4170 if (bs->dirty_bitmap &&
4171 (sector << BDRV_SECTOR_BITS) < bdrv_getlength(bs)) {
4172 return !!(bs->dirty_bitmap[chunk / (sizeof(unsigned long) * 8)] &
4173 (1UL << (chunk % (sizeof(unsigned long) * 8))));
4174 } else {
4175 return 0;
4179 void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector,
4180 int nr_sectors)
4182 set_dirty_bitmap(bs, cur_sector, nr_sectors, 0);
4185 int64_t bdrv_get_dirty_count(BlockDriverState *bs)
4187 return bs->dirty_count;
4190 void bdrv_set_in_use(BlockDriverState *bs, int in_use)
4192 assert(bs->in_use != in_use);
4193 bs->in_use = in_use;
4196 int bdrv_in_use(BlockDriverState *bs)
4198 return bs->in_use;
4201 void bdrv_iostatus_enable(BlockDriverState *bs)
4203 bs->iostatus_enabled = true;
4204 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
4207 /* The I/O status is only enabled if the drive explicitly
4208 * enables it _and_ the VM is configured to stop on errors */
4209 bool bdrv_iostatus_is_enabled(const BlockDriverState *bs)
4211 return (bs->iostatus_enabled &&
4212 (bs->on_write_error == BLOCK_ERR_STOP_ENOSPC ||
4213 bs->on_write_error == BLOCK_ERR_STOP_ANY ||
4214 bs->on_read_error == BLOCK_ERR_STOP_ANY));
4217 void bdrv_iostatus_disable(BlockDriverState *bs)
4219 bs->iostatus_enabled = false;
4222 void bdrv_iostatus_reset(BlockDriverState *bs)
4224 if (bdrv_iostatus_is_enabled(bs)) {
4225 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
4229 /* XXX: Today this is set by device models because it makes the implementation
4230 quite simple. However, the block layer knows about the error, so it's
4231 possible to implement this without device models being involved */
4232 void bdrv_iostatus_set_err(BlockDriverState *bs, int error)
4234 if (bdrv_iostatus_is_enabled(bs) &&
4235 bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
4236 assert(error >= 0);
4237 bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
4238 BLOCK_DEVICE_IO_STATUS_FAILED;
4242 void
4243 bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes,
4244 enum BlockAcctType type)
4246 assert(type < BDRV_MAX_IOTYPE);
4248 cookie->bytes = bytes;
4249 cookie->start_time_ns = get_clock();
4250 cookie->type = type;
4253 void
4254 bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie)
4256 assert(cookie->type < BDRV_MAX_IOTYPE);
4258 bs->nr_bytes[cookie->type] += cookie->bytes;
4259 bs->nr_ops[cookie->type]++;
4260 bs->total_time_ns[cookie->type] += get_clock() - cookie->start_time_ns;
4263 int bdrv_img_create(const char *filename, const char *fmt,
4264 const char *base_filename, const char *base_fmt,
4265 char *options, uint64_t img_size, int flags)
4267 QEMUOptionParameter *param = NULL, *create_options = NULL;
4268 QEMUOptionParameter *backing_fmt, *backing_file, *size;
4269 BlockDriverState *bs = NULL;
4270 BlockDriver *drv, *proto_drv;
4271 BlockDriver *backing_drv = NULL;
4272 int ret = 0;
4274 /* Find driver and parse its options */
4275 drv = bdrv_find_format(fmt);
4276 if (!drv) {
4277 error_report("Unknown file format '%s'", fmt);
4278 ret = -EINVAL;
4279 goto out;
4282 proto_drv = bdrv_find_protocol(filename);
4283 if (!proto_drv) {
4284 error_report("Unknown protocol '%s'", filename);
4285 ret = -EINVAL;
4286 goto out;
4289 create_options = append_option_parameters(create_options,
4290 drv->create_options);
4291 create_options = append_option_parameters(create_options,
4292 proto_drv->create_options);
4294 /* Create parameter list with default values */
4295 param = parse_option_parameters("", create_options, param);
4297 set_option_parameter_int(param, BLOCK_OPT_SIZE, img_size);
4299 /* Parse -o options */
4300 if (options) {
4301 param = parse_option_parameters(options, create_options, param);
4302 if (param == NULL) {
4303 error_report("Invalid options for file format '%s'.", fmt);
4304 ret = -EINVAL;
4305 goto out;
4309 if (base_filename) {
4310 if (set_option_parameter(param, BLOCK_OPT_BACKING_FILE,
4311 base_filename)) {
4312 error_report("Backing file not supported for file format '%s'",
4313 fmt);
4314 ret = -EINVAL;
4315 goto out;
4319 if (base_fmt) {
4320 if (set_option_parameter(param, BLOCK_OPT_BACKING_FMT, base_fmt)) {
4321 error_report("Backing file format not supported for file "
4322 "format '%s'", fmt);
4323 ret = -EINVAL;
4324 goto out;
4328 backing_file = get_option_parameter(param, BLOCK_OPT_BACKING_FILE);
4329 if (backing_file && backing_file->value.s) {
4330 if (!strcmp(filename, backing_file->value.s)) {
4331 error_report("Error: Trying to create an image with the "
4332 "same filename as the backing file");
4333 ret = -EINVAL;
4334 goto out;
4338 backing_fmt = get_option_parameter(param, BLOCK_OPT_BACKING_FMT);
4339 if (backing_fmt && backing_fmt->value.s) {
4340 backing_drv = bdrv_find_format(backing_fmt->value.s);
4341 if (!backing_drv) {
4342 error_report("Unknown backing file format '%s'",
4343 backing_fmt->value.s);
4344 ret = -EINVAL;
4345 goto out;
4349 // The size for the image must always be specified, with one exception:
4350 // If we are using a backing file, we can obtain the size from there
4351 size = get_option_parameter(param, BLOCK_OPT_SIZE);
4352 if (size && size->value.n == -1) {
4353 if (backing_file && backing_file->value.s) {
4354 uint64_t size;
4355 char buf[32];
4356 int back_flags;
4358 /* backing files always opened read-only */
4359 back_flags =
4360 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
4362 bs = bdrv_new("");
4364 ret = bdrv_open(bs, backing_file->value.s, back_flags, backing_drv);
4365 if (ret < 0) {
4366 error_report("Could not open '%s'", backing_file->value.s);
4367 goto out;
4369 bdrv_get_geometry(bs, &size);
4370 size *= 512;
4372 snprintf(buf, sizeof(buf), "%" PRId64, size);
4373 set_option_parameter(param, BLOCK_OPT_SIZE, buf);
4374 } else {
4375 error_report("Image creation needs a size parameter");
4376 ret = -EINVAL;
4377 goto out;
4381 printf("Formatting '%s', fmt=%s ", filename, fmt);
4382 print_option_parameters(param);
4383 puts("");
4385 ret = bdrv_create(drv, filename, param);
4387 if (ret < 0) {
4388 if (ret == -ENOTSUP) {
4389 error_report("Formatting or formatting option not supported for "
4390 "file format '%s'", fmt);
4391 } else if (ret == -EFBIG) {
4392 error_report("The image size is too large for file format '%s'",
4393 fmt);
4394 } else {
4395 error_report("%s: error while creating %s: %s", filename, fmt,
4396 strerror(-ret));
4400 out:
4401 free_option_parameters(create_options);
4402 free_option_parameters(param);
4404 if (bs) {
4405 bdrv_delete(bs);
4408 return ret;