block: Don't let locked flag prevent medium load
[qemu/kevin.git] / block.c
blob8859f9b414b1e484c93c5777c783cafadc060456
1 /*
2 * QEMU System Emulator block driver
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
24 #include "config-host.h"
25 #include "qemu-common.h"
26 #include "trace.h"
27 #include "monitor.h"
28 #include "block_int.h"
29 #include "module.h"
30 #include "qemu-objects.h"
32 #ifdef CONFIG_BSD
33 #include <sys/types.h>
34 #include <sys/stat.h>
35 #include <sys/ioctl.h>
36 #include <sys/queue.h>
37 #ifndef __DragonFly__
38 #include <sys/disk.h>
39 #endif
40 #endif
42 #ifdef _WIN32
43 #include <windows.h>
44 #endif
46 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
47 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
48 BlockDriverCompletionFunc *cb, void *opaque);
49 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
50 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
51 BlockDriverCompletionFunc *cb, void *opaque);
52 static BlockDriverAIOCB *bdrv_aio_flush_em(BlockDriverState *bs,
53 BlockDriverCompletionFunc *cb, void *opaque);
54 static BlockDriverAIOCB *bdrv_aio_noop_em(BlockDriverState *bs,
55 BlockDriverCompletionFunc *cb, void *opaque);
56 static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num,
57 uint8_t *buf, int nb_sectors);
58 static int bdrv_write_em(BlockDriverState *bs, int64_t sector_num,
59 const uint8_t *buf, int nb_sectors);
61 static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
62 QTAILQ_HEAD_INITIALIZER(bdrv_states);
64 static QLIST_HEAD(, BlockDriver) bdrv_drivers =
65 QLIST_HEAD_INITIALIZER(bdrv_drivers);
67 /* The device to use for VM snapshots */
68 static BlockDriverState *bs_snapshots;
70 /* If non-zero, use only whitelisted block drivers */
71 static int use_bdrv_whitelist;
73 #ifdef _WIN32
74 static int is_windows_drive_prefix(const char *filename)
76 return (((filename[0] >= 'a' && filename[0] <= 'z') ||
77 (filename[0] >= 'A' && filename[0] <= 'Z')) &&
78 filename[1] == ':');
81 int is_windows_drive(const char *filename)
83 if (is_windows_drive_prefix(filename) &&
84 filename[2] == '\0')
85 return 1;
86 if (strstart(filename, "\\\\.\\", NULL) ||
87 strstart(filename, "//./", NULL))
88 return 1;
89 return 0;
91 #endif
93 /* check if the path starts with "<protocol>:" */
94 static int path_has_protocol(const char *path)
96 #ifdef _WIN32
97 if (is_windows_drive(path) ||
98 is_windows_drive_prefix(path)) {
99 return 0;
101 #endif
103 return strchr(path, ':') != NULL;
106 int path_is_absolute(const char *path)
108 const char *p;
109 #ifdef _WIN32
110 /* specific case for names like: "\\.\d:" */
111 if (*path == '/' || *path == '\\')
112 return 1;
113 #endif
114 p = strchr(path, ':');
115 if (p)
116 p++;
117 else
118 p = path;
119 #ifdef _WIN32
120 return (*p == '/' || *p == '\\');
121 #else
122 return (*p == '/');
123 #endif
126 /* if filename is absolute, just copy it to dest. Otherwise, build a
127 path to it by considering it is relative to base_path. URL are
128 supported. */
129 void path_combine(char *dest, int dest_size,
130 const char *base_path,
131 const char *filename)
133 const char *p, *p1;
134 int len;
136 if (dest_size <= 0)
137 return;
138 if (path_is_absolute(filename)) {
139 pstrcpy(dest, dest_size, filename);
140 } else {
141 p = strchr(base_path, ':');
142 if (p)
143 p++;
144 else
145 p = base_path;
146 p1 = strrchr(base_path, '/');
147 #ifdef _WIN32
149 const char *p2;
150 p2 = strrchr(base_path, '\\');
151 if (!p1 || p2 > p1)
152 p1 = p2;
154 #endif
155 if (p1)
156 p1++;
157 else
158 p1 = base_path;
159 if (p1 > p)
160 p = p1;
161 len = p - base_path;
162 if (len > dest_size - 1)
163 len = dest_size - 1;
164 memcpy(dest, base_path, len);
165 dest[len] = '\0';
166 pstrcat(dest, dest_size, filename);
170 void bdrv_register(BlockDriver *bdrv)
172 if (!bdrv->bdrv_aio_readv) {
173 /* add AIO emulation layer */
174 bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
175 bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
176 } else if (!bdrv->bdrv_read) {
177 /* add synchronous IO emulation layer */
178 bdrv->bdrv_read = bdrv_read_em;
179 bdrv->bdrv_write = bdrv_write_em;
182 if (!bdrv->bdrv_aio_flush)
183 bdrv->bdrv_aio_flush = bdrv_aio_flush_em;
185 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
188 /* create a new block device (by default it is empty) */
189 BlockDriverState *bdrv_new(const char *device_name)
191 BlockDriverState *bs;
193 bs = qemu_mallocz(sizeof(BlockDriverState));
194 pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);
195 if (device_name[0] != '\0') {
196 QTAILQ_INSERT_TAIL(&bdrv_states, bs, list);
198 return bs;
201 BlockDriver *bdrv_find_format(const char *format_name)
203 BlockDriver *drv1;
204 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
205 if (!strcmp(drv1->format_name, format_name)) {
206 return drv1;
209 return NULL;
212 static int bdrv_is_whitelisted(BlockDriver *drv)
214 static const char *whitelist[] = {
215 CONFIG_BDRV_WHITELIST
217 const char **p;
219 if (!whitelist[0])
220 return 1; /* no whitelist, anything goes */
222 for (p = whitelist; *p; p++) {
223 if (!strcmp(drv->format_name, *p)) {
224 return 1;
227 return 0;
230 BlockDriver *bdrv_find_whitelisted_format(const char *format_name)
232 BlockDriver *drv = bdrv_find_format(format_name);
233 return drv && bdrv_is_whitelisted(drv) ? drv : NULL;
236 int bdrv_create(BlockDriver *drv, const char* filename,
237 QEMUOptionParameter *options)
239 if (!drv->bdrv_create)
240 return -ENOTSUP;
242 return drv->bdrv_create(filename, options);
245 int bdrv_create_file(const char* filename, QEMUOptionParameter *options)
247 BlockDriver *drv;
249 drv = bdrv_find_protocol(filename);
250 if (drv == NULL) {
251 return -ENOENT;
254 return bdrv_create(drv, filename, options);
257 #ifdef _WIN32
258 void get_tmp_filename(char *filename, int size)
260 char temp_dir[MAX_PATH];
262 GetTempPath(MAX_PATH, temp_dir);
263 GetTempFileName(temp_dir, "qem", 0, filename);
265 #else
266 void get_tmp_filename(char *filename, int size)
268 int fd;
269 const char *tmpdir;
270 /* XXX: race condition possible */
271 tmpdir = getenv("TMPDIR");
272 if (!tmpdir)
273 tmpdir = "/tmp";
274 snprintf(filename, size, "%s/vl.XXXXXX", tmpdir);
275 fd = mkstemp(filename);
276 close(fd);
278 #endif
281 * Detect host devices. By convention, /dev/cdrom[N] is always
282 * recognized as a host CDROM.
284 static BlockDriver *find_hdev_driver(const char *filename)
286 int score_max = 0, score;
287 BlockDriver *drv = NULL, *d;
289 QLIST_FOREACH(d, &bdrv_drivers, list) {
290 if (d->bdrv_probe_device) {
291 score = d->bdrv_probe_device(filename);
292 if (score > score_max) {
293 score_max = score;
294 drv = d;
299 return drv;
302 BlockDriver *bdrv_find_protocol(const char *filename)
304 BlockDriver *drv1;
305 char protocol[128];
306 int len;
307 const char *p;
309 /* TODO Drivers without bdrv_file_open must be specified explicitly */
312 * XXX(hch): we really should not let host device detection
313 * override an explicit protocol specification, but moving this
314 * later breaks access to device names with colons in them.
315 * Thanks to the brain-dead persistent naming schemes on udev-
316 * based Linux systems those actually are quite common.
318 drv1 = find_hdev_driver(filename);
319 if (drv1) {
320 return drv1;
323 if (!path_has_protocol(filename)) {
324 return bdrv_find_format("file");
326 p = strchr(filename, ':');
327 assert(p != NULL);
328 len = p - filename;
329 if (len > sizeof(protocol) - 1)
330 len = sizeof(protocol) - 1;
331 memcpy(protocol, filename, len);
332 protocol[len] = '\0';
333 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
334 if (drv1->protocol_name &&
335 !strcmp(drv1->protocol_name, protocol)) {
336 return drv1;
339 return NULL;
342 static int find_image_format(const char *filename, BlockDriver **pdrv)
344 int ret, score, score_max;
345 BlockDriver *drv1, *drv;
346 uint8_t buf[2048];
347 BlockDriverState *bs;
349 ret = bdrv_file_open(&bs, filename, 0);
350 if (ret < 0) {
351 *pdrv = NULL;
352 return ret;
355 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
356 if (bs->sg || !bdrv_is_inserted(bs)) {
357 bdrv_delete(bs);
358 drv = bdrv_find_format("raw");
359 if (!drv) {
360 ret = -ENOENT;
362 *pdrv = drv;
363 return ret;
366 ret = bdrv_pread(bs, 0, buf, sizeof(buf));
367 bdrv_delete(bs);
368 if (ret < 0) {
369 *pdrv = NULL;
370 return ret;
373 score_max = 0;
374 drv = NULL;
375 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
376 if (drv1->bdrv_probe) {
377 score = drv1->bdrv_probe(buf, ret, filename);
378 if (score > score_max) {
379 score_max = score;
380 drv = drv1;
384 if (!drv) {
385 ret = -ENOENT;
387 *pdrv = drv;
388 return ret;
392 * Set the current 'total_sectors' value
394 static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
396 BlockDriver *drv = bs->drv;
398 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
399 if (bs->sg)
400 return 0;
402 /* query actual device if possible, otherwise just trust the hint */
403 if (drv->bdrv_getlength) {
404 int64_t length = drv->bdrv_getlength(bs);
405 if (length < 0) {
406 return length;
408 hint = length >> BDRV_SECTOR_BITS;
411 bs->total_sectors = hint;
412 return 0;
416 * Common part for opening disk images and files
418 static int bdrv_open_common(BlockDriverState *bs, const char *filename,
419 int flags, BlockDriver *drv)
421 int ret, open_flags;
423 assert(drv != NULL);
425 bs->file = NULL;
426 bs->total_sectors = 0;
427 bs->encrypted = 0;
428 bs->valid_key = 0;
429 bs->open_flags = flags;
430 /* buffer_alignment defaulted to 512, drivers can change this value */
431 bs->buffer_alignment = 512;
433 pstrcpy(bs->filename, sizeof(bs->filename), filename);
435 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv)) {
436 return -ENOTSUP;
439 bs->drv = drv;
440 bs->opaque = qemu_mallocz(drv->instance_size);
442 if (flags & BDRV_O_CACHE_WB)
443 bs->enable_write_cache = 1;
446 * Clear flags that are internal to the block layer before opening the
447 * image.
449 open_flags = flags & ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
452 * Snapshots should be writable.
454 if (bs->is_temporary) {
455 open_flags |= BDRV_O_RDWR;
458 /* Open the image, either directly or using a protocol */
459 if (drv->bdrv_file_open) {
460 ret = drv->bdrv_file_open(bs, filename, open_flags);
461 } else {
462 ret = bdrv_file_open(&bs->file, filename, open_flags);
463 if (ret >= 0) {
464 ret = drv->bdrv_open(bs, open_flags);
468 if (ret < 0) {
469 goto free_and_fail;
472 bs->keep_read_only = bs->read_only = !(open_flags & BDRV_O_RDWR);
474 ret = refresh_total_sectors(bs, bs->total_sectors);
475 if (ret < 0) {
476 goto free_and_fail;
479 #ifndef _WIN32
480 if (bs->is_temporary) {
481 unlink(filename);
483 #endif
484 return 0;
486 free_and_fail:
487 if (bs->file) {
488 bdrv_delete(bs->file);
489 bs->file = NULL;
491 qemu_free(bs->opaque);
492 bs->opaque = NULL;
493 bs->drv = NULL;
494 return ret;
498 * Opens a file using a protocol (file, host_device, nbd, ...)
500 int bdrv_file_open(BlockDriverState **pbs, const char *filename, int flags)
502 BlockDriverState *bs;
503 BlockDriver *drv;
504 int ret;
506 drv = bdrv_find_protocol(filename);
507 if (!drv) {
508 return -ENOENT;
511 bs = bdrv_new("");
512 ret = bdrv_open_common(bs, filename, flags, drv);
513 if (ret < 0) {
514 bdrv_delete(bs);
515 return ret;
517 bs->growable = 1;
518 *pbs = bs;
519 return 0;
523 * Opens a disk image (raw, qcow2, vmdk, ...)
525 int bdrv_open(BlockDriverState *bs, const char *filename, int flags,
526 BlockDriver *drv)
528 int ret;
530 if (flags & BDRV_O_SNAPSHOT) {
531 BlockDriverState *bs1;
532 int64_t total_size;
533 int is_protocol = 0;
534 BlockDriver *bdrv_qcow2;
535 QEMUOptionParameter *options;
536 char tmp_filename[PATH_MAX];
537 char backing_filename[PATH_MAX];
539 /* if snapshot, we create a temporary backing file and open it
540 instead of opening 'filename' directly */
542 /* if there is a backing file, use it */
543 bs1 = bdrv_new("");
544 ret = bdrv_open(bs1, filename, 0, drv);
545 if (ret < 0) {
546 bdrv_delete(bs1);
547 return ret;
549 total_size = bdrv_getlength(bs1) & BDRV_SECTOR_MASK;
551 if (bs1->drv && bs1->drv->protocol_name)
552 is_protocol = 1;
554 bdrv_delete(bs1);
556 get_tmp_filename(tmp_filename, sizeof(tmp_filename));
558 /* Real path is meaningless for protocols */
559 if (is_protocol)
560 snprintf(backing_filename, sizeof(backing_filename),
561 "%s", filename);
562 else if (!realpath(filename, backing_filename))
563 return -errno;
565 bdrv_qcow2 = bdrv_find_format("qcow2");
566 options = parse_option_parameters("", bdrv_qcow2->create_options, NULL);
568 set_option_parameter_int(options, BLOCK_OPT_SIZE, total_size);
569 set_option_parameter(options, BLOCK_OPT_BACKING_FILE, backing_filename);
570 if (drv) {
571 set_option_parameter(options, BLOCK_OPT_BACKING_FMT,
572 drv->format_name);
575 ret = bdrv_create(bdrv_qcow2, tmp_filename, options);
576 free_option_parameters(options);
577 if (ret < 0) {
578 return ret;
581 filename = tmp_filename;
582 drv = bdrv_qcow2;
583 bs->is_temporary = 1;
586 /* Find the right image format driver */
587 if (!drv) {
588 ret = find_image_format(filename, &drv);
591 if (!drv) {
592 goto unlink_and_fail;
595 /* Open the image */
596 ret = bdrv_open_common(bs, filename, flags, drv);
597 if (ret < 0) {
598 goto unlink_and_fail;
601 /* If there is a backing file, use it */
602 if ((flags & BDRV_O_NO_BACKING) == 0 && bs->backing_file[0] != '\0') {
603 char backing_filename[PATH_MAX];
604 int back_flags;
605 BlockDriver *back_drv = NULL;
607 bs->backing_hd = bdrv_new("");
609 if (path_has_protocol(bs->backing_file)) {
610 pstrcpy(backing_filename, sizeof(backing_filename),
611 bs->backing_file);
612 } else {
613 path_combine(backing_filename, sizeof(backing_filename),
614 filename, bs->backing_file);
617 if (bs->backing_format[0] != '\0') {
618 back_drv = bdrv_find_format(bs->backing_format);
621 /* backing files always opened read-only */
622 back_flags =
623 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
625 ret = bdrv_open(bs->backing_hd, backing_filename, back_flags, back_drv);
626 if (ret < 0) {
627 bdrv_close(bs);
628 return ret;
630 if (bs->is_temporary) {
631 bs->backing_hd->keep_read_only = !(flags & BDRV_O_RDWR);
632 } else {
633 /* base image inherits from "parent" */
634 bs->backing_hd->keep_read_only = bs->keep_read_only;
638 if (!bdrv_key_required(bs)) {
639 /* call the change callback */
640 bs->media_changed = 1;
641 if (bs->change_cb)
642 bs->change_cb(bs->change_opaque, CHANGE_MEDIA);
645 return 0;
647 unlink_and_fail:
648 if (bs->is_temporary) {
649 unlink(filename);
651 return ret;
654 void bdrv_close(BlockDriverState *bs)
656 if (bs->drv) {
657 if (bs == bs_snapshots) {
658 bs_snapshots = NULL;
660 if (bs->backing_hd) {
661 bdrv_delete(bs->backing_hd);
662 bs->backing_hd = NULL;
664 bs->drv->bdrv_close(bs);
665 qemu_free(bs->opaque);
666 #ifdef _WIN32
667 if (bs->is_temporary) {
668 unlink(bs->filename);
670 #endif
671 bs->opaque = NULL;
672 bs->drv = NULL;
674 if (bs->file != NULL) {
675 bdrv_close(bs->file);
678 /* call the change callback */
679 bs->media_changed = 1;
680 if (bs->change_cb)
681 bs->change_cb(bs->change_opaque, CHANGE_MEDIA);
685 void bdrv_close_all(void)
687 BlockDriverState *bs;
689 QTAILQ_FOREACH(bs, &bdrv_states, list) {
690 bdrv_close(bs);
694 /* make a BlockDriverState anonymous by removing from bdrv_state list.
695 Also, NULL terminate the device_name to prevent double remove */
696 void bdrv_make_anon(BlockDriverState *bs)
698 if (bs->device_name[0] != '\0') {
699 QTAILQ_REMOVE(&bdrv_states, bs, list);
701 bs->device_name[0] = '\0';
704 void bdrv_delete(BlockDriverState *bs)
706 assert(!bs->peer);
708 /* remove from list, if necessary */
709 bdrv_make_anon(bs);
711 bdrv_close(bs);
712 if (bs->file != NULL) {
713 bdrv_delete(bs->file);
716 assert(bs != bs_snapshots);
717 qemu_free(bs);
720 int bdrv_attach(BlockDriverState *bs, DeviceState *qdev)
722 if (bs->peer) {
723 return -EBUSY;
725 bs->peer = qdev;
726 return 0;
729 void bdrv_detach(BlockDriverState *bs, DeviceState *qdev)
731 assert(bs->peer == qdev);
732 bs->peer = NULL;
733 bs->change_cb = NULL;
734 bs->change_opaque = NULL;
737 DeviceState *bdrv_get_attached(BlockDriverState *bs)
739 return bs->peer;
743 * Run consistency checks on an image
745 * Returns 0 if the check could be completed (it doesn't mean that the image is
746 * free of errors) or -errno when an internal error occurred. The results of the
747 * check are stored in res.
749 int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res)
751 if (bs->drv->bdrv_check == NULL) {
752 return -ENOTSUP;
755 memset(res, 0, sizeof(*res));
756 return bs->drv->bdrv_check(bs, res);
759 #define COMMIT_BUF_SECTORS 2048
761 /* commit COW file into the raw image */
762 int bdrv_commit(BlockDriverState *bs)
764 BlockDriver *drv = bs->drv;
765 BlockDriver *backing_drv;
766 int64_t sector, total_sectors;
767 int n, ro, open_flags;
768 int ret = 0, rw_ret = 0;
769 uint8_t *buf;
770 char filename[1024];
771 BlockDriverState *bs_rw, *bs_ro;
773 if (!drv)
774 return -ENOMEDIUM;
776 if (!bs->backing_hd) {
777 return -ENOTSUP;
780 if (bs->backing_hd->keep_read_only) {
781 return -EACCES;
784 backing_drv = bs->backing_hd->drv;
785 ro = bs->backing_hd->read_only;
786 strncpy(filename, bs->backing_hd->filename, sizeof(filename));
787 open_flags = bs->backing_hd->open_flags;
789 if (ro) {
790 /* re-open as RW */
791 bdrv_delete(bs->backing_hd);
792 bs->backing_hd = NULL;
793 bs_rw = bdrv_new("");
794 rw_ret = bdrv_open(bs_rw, filename, open_flags | BDRV_O_RDWR,
795 backing_drv);
796 if (rw_ret < 0) {
797 bdrv_delete(bs_rw);
798 /* try to re-open read-only */
799 bs_ro = bdrv_new("");
800 ret = bdrv_open(bs_ro, filename, open_flags & ~BDRV_O_RDWR,
801 backing_drv);
802 if (ret < 0) {
803 bdrv_delete(bs_ro);
804 /* drive not functional anymore */
805 bs->drv = NULL;
806 return ret;
808 bs->backing_hd = bs_ro;
809 return rw_ret;
811 bs->backing_hd = bs_rw;
814 total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
815 buf = qemu_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
817 for (sector = 0; sector < total_sectors; sector += n) {
818 if (drv->bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n)) {
820 if (bdrv_read(bs, sector, buf, n) != 0) {
821 ret = -EIO;
822 goto ro_cleanup;
825 if (bdrv_write(bs->backing_hd, sector, buf, n) != 0) {
826 ret = -EIO;
827 goto ro_cleanup;
832 if (drv->bdrv_make_empty) {
833 ret = drv->bdrv_make_empty(bs);
834 bdrv_flush(bs);
838 * Make sure all data we wrote to the backing device is actually
839 * stable on disk.
841 if (bs->backing_hd)
842 bdrv_flush(bs->backing_hd);
844 ro_cleanup:
845 qemu_free(buf);
847 if (ro) {
848 /* re-open as RO */
849 bdrv_delete(bs->backing_hd);
850 bs->backing_hd = NULL;
851 bs_ro = bdrv_new("");
852 ret = bdrv_open(bs_ro, filename, open_flags & ~BDRV_O_RDWR,
853 backing_drv);
854 if (ret < 0) {
855 bdrv_delete(bs_ro);
856 /* drive not functional anymore */
857 bs->drv = NULL;
858 return ret;
860 bs->backing_hd = bs_ro;
861 bs->backing_hd->keep_read_only = 0;
864 return ret;
867 void bdrv_commit_all(void)
869 BlockDriverState *bs;
871 QTAILQ_FOREACH(bs, &bdrv_states, list) {
872 bdrv_commit(bs);
877 * Return values:
878 * 0 - success
879 * -EINVAL - backing format specified, but no file
880 * -ENOSPC - can't update the backing file because no space is left in the
881 * image file header
882 * -ENOTSUP - format driver doesn't support changing the backing file
884 int bdrv_change_backing_file(BlockDriverState *bs,
885 const char *backing_file, const char *backing_fmt)
887 BlockDriver *drv = bs->drv;
889 if (drv->bdrv_change_backing_file != NULL) {
890 return drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
891 } else {
892 return -ENOTSUP;
896 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
897 size_t size)
899 int64_t len;
901 if (!bdrv_is_inserted(bs))
902 return -ENOMEDIUM;
904 if (bs->growable)
905 return 0;
907 len = bdrv_getlength(bs);
909 if (offset < 0)
910 return -EIO;
912 if ((offset > len) || (len - offset < size))
913 return -EIO;
915 return 0;
918 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
919 int nb_sectors)
921 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
922 nb_sectors * BDRV_SECTOR_SIZE);
925 /* return < 0 if error. See bdrv_write() for the return codes */
926 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
927 uint8_t *buf, int nb_sectors)
929 BlockDriver *drv = bs->drv;
931 if (!drv)
932 return -ENOMEDIUM;
933 if (bdrv_check_request(bs, sector_num, nb_sectors))
934 return -EIO;
936 return drv->bdrv_read(bs, sector_num, buf, nb_sectors);
939 static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num,
940 int nb_sectors, int dirty)
942 int64_t start, end;
943 unsigned long val, idx, bit;
945 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
946 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
948 for (; start <= end; start++) {
949 idx = start / (sizeof(unsigned long) * 8);
950 bit = start % (sizeof(unsigned long) * 8);
951 val = bs->dirty_bitmap[idx];
952 if (dirty) {
953 if (!(val & (1UL << bit))) {
954 bs->dirty_count++;
955 val |= 1UL << bit;
957 } else {
958 if (val & (1UL << bit)) {
959 bs->dirty_count--;
960 val &= ~(1UL << bit);
963 bs->dirty_bitmap[idx] = val;
967 /* Return < 0 if error. Important errors are:
968 -EIO generic I/O error (may happen for all errors)
969 -ENOMEDIUM No media inserted.
970 -EINVAL Invalid sector number or nb_sectors
971 -EACCES Trying to write a read-only device
973 int bdrv_write(BlockDriverState *bs, int64_t sector_num,
974 const uint8_t *buf, int nb_sectors)
976 BlockDriver *drv = bs->drv;
977 if (!bs->drv)
978 return -ENOMEDIUM;
979 if (bs->read_only)
980 return -EACCES;
981 if (bdrv_check_request(bs, sector_num, nb_sectors))
982 return -EIO;
984 if (bs->dirty_bitmap) {
985 set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
988 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
989 bs->wr_highest_sector = sector_num + nb_sectors - 1;
992 return drv->bdrv_write(bs, sector_num, buf, nb_sectors);
995 int bdrv_pread(BlockDriverState *bs, int64_t offset,
996 void *buf, int count1)
998 uint8_t tmp_buf[BDRV_SECTOR_SIZE];
999 int len, nb_sectors, count;
1000 int64_t sector_num;
1001 int ret;
1003 count = count1;
1004 /* first read to align to sector start */
1005 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
1006 if (len > count)
1007 len = count;
1008 sector_num = offset >> BDRV_SECTOR_BITS;
1009 if (len > 0) {
1010 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1011 return ret;
1012 memcpy(buf, tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), len);
1013 count -= len;
1014 if (count == 0)
1015 return count1;
1016 sector_num++;
1017 buf += len;
1020 /* read the sectors "in place" */
1021 nb_sectors = count >> BDRV_SECTOR_BITS;
1022 if (nb_sectors > 0) {
1023 if ((ret = bdrv_read(bs, sector_num, buf, nb_sectors)) < 0)
1024 return ret;
1025 sector_num += nb_sectors;
1026 len = nb_sectors << BDRV_SECTOR_BITS;
1027 buf += len;
1028 count -= len;
1031 /* add data from the last sector */
1032 if (count > 0) {
1033 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1034 return ret;
1035 memcpy(buf, tmp_buf, count);
1037 return count1;
1040 int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
1041 const void *buf, int count1)
1043 uint8_t tmp_buf[BDRV_SECTOR_SIZE];
1044 int len, nb_sectors, count;
1045 int64_t sector_num;
1046 int ret;
1048 count = count1;
1049 /* first write to align to sector start */
1050 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
1051 if (len > count)
1052 len = count;
1053 sector_num = offset >> BDRV_SECTOR_BITS;
1054 if (len > 0) {
1055 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1056 return ret;
1057 memcpy(tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), buf, len);
1058 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
1059 return ret;
1060 count -= len;
1061 if (count == 0)
1062 return count1;
1063 sector_num++;
1064 buf += len;
1067 /* write the sectors "in place" */
1068 nb_sectors = count >> BDRV_SECTOR_BITS;
1069 if (nb_sectors > 0) {
1070 if ((ret = bdrv_write(bs, sector_num, buf, nb_sectors)) < 0)
1071 return ret;
1072 sector_num += nb_sectors;
1073 len = nb_sectors << BDRV_SECTOR_BITS;
1074 buf += len;
1075 count -= len;
1078 /* add data from the last sector */
1079 if (count > 0) {
1080 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1081 return ret;
1082 memcpy(tmp_buf, buf, count);
1083 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
1084 return ret;
1086 return count1;
1090 * Writes to the file and ensures that no writes are reordered across this
1091 * request (acts as a barrier)
1093 * Returns 0 on success, -errno in error cases.
1095 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
1096 const void *buf, int count)
1098 int ret;
1100 ret = bdrv_pwrite(bs, offset, buf, count);
1101 if (ret < 0) {
1102 return ret;
1105 /* No flush needed for cache=writethrough, it uses O_DSYNC */
1106 if ((bs->open_flags & BDRV_O_CACHE_MASK) != 0) {
1107 bdrv_flush(bs);
1110 return 0;
1114 * Writes to the file and ensures that no writes are reordered across this
1115 * request (acts as a barrier)
1117 * Returns 0 on success, -errno in error cases.
1119 int bdrv_write_sync(BlockDriverState *bs, int64_t sector_num,
1120 const uint8_t *buf, int nb_sectors)
1122 return bdrv_pwrite_sync(bs, BDRV_SECTOR_SIZE * sector_num,
1123 buf, BDRV_SECTOR_SIZE * nb_sectors);
1127 * Truncate file to 'offset' bytes (needed only for file protocols)
1129 int bdrv_truncate(BlockDriverState *bs, int64_t offset)
1131 BlockDriver *drv = bs->drv;
1132 int ret;
1133 if (!drv)
1134 return -ENOMEDIUM;
1135 if (!drv->bdrv_truncate)
1136 return -ENOTSUP;
1137 if (bs->read_only)
1138 return -EACCES;
1139 if (bdrv_in_use(bs))
1140 return -EBUSY;
1141 ret = drv->bdrv_truncate(bs, offset);
1142 if (ret == 0) {
1143 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
1144 if (bs->change_cb) {
1145 bs->change_cb(bs->change_opaque, CHANGE_SIZE);
1148 return ret;
1152 * Length of a allocated file in bytes. Sparse files are counted by actual
1153 * allocated space. Return < 0 if error or unknown.
1155 int64_t bdrv_get_allocated_file_size(BlockDriverState *bs)
1157 BlockDriver *drv = bs->drv;
1158 if (!drv) {
1159 return -ENOMEDIUM;
1161 if (drv->bdrv_get_allocated_file_size) {
1162 return drv->bdrv_get_allocated_file_size(bs);
1164 if (bs->file) {
1165 return bdrv_get_allocated_file_size(bs->file);
1167 return -ENOTSUP;
1171 * Length of a file in bytes. Return < 0 if error or unknown.
1173 int64_t bdrv_getlength(BlockDriverState *bs)
1175 BlockDriver *drv = bs->drv;
1176 if (!drv)
1177 return -ENOMEDIUM;
1179 if (bs->growable || bs->removable) {
1180 if (drv->bdrv_getlength) {
1181 return drv->bdrv_getlength(bs);
1184 return bs->total_sectors * BDRV_SECTOR_SIZE;
1187 /* return 0 as number of sectors if no device present or error */
1188 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
1190 int64_t length;
1191 length = bdrv_getlength(bs);
1192 if (length < 0)
1193 length = 0;
1194 else
1195 length = length >> BDRV_SECTOR_BITS;
1196 *nb_sectors_ptr = length;
1199 struct partition {
1200 uint8_t boot_ind; /* 0x80 - active */
1201 uint8_t head; /* starting head */
1202 uint8_t sector; /* starting sector */
1203 uint8_t cyl; /* starting cylinder */
1204 uint8_t sys_ind; /* What partition type */
1205 uint8_t end_head; /* end head */
1206 uint8_t end_sector; /* end sector */
1207 uint8_t end_cyl; /* end cylinder */
1208 uint32_t start_sect; /* starting sector counting from 0 */
1209 uint32_t nr_sects; /* nr of sectors in partition */
1210 } __attribute__((packed));
1212 /* try to guess the disk logical geometry from the MSDOS partition table. Return 0 if OK, -1 if could not guess */
1213 static int guess_disk_lchs(BlockDriverState *bs,
1214 int *pcylinders, int *pheads, int *psectors)
1216 uint8_t buf[BDRV_SECTOR_SIZE];
1217 int ret, i, heads, sectors, cylinders;
1218 struct partition *p;
1219 uint32_t nr_sects;
1220 uint64_t nb_sectors;
1222 bdrv_get_geometry(bs, &nb_sectors);
1224 ret = bdrv_read(bs, 0, buf, 1);
1225 if (ret < 0)
1226 return -1;
1227 /* test msdos magic */
1228 if (buf[510] != 0x55 || buf[511] != 0xaa)
1229 return -1;
1230 for(i = 0; i < 4; i++) {
1231 p = ((struct partition *)(buf + 0x1be)) + i;
1232 nr_sects = le32_to_cpu(p->nr_sects);
1233 if (nr_sects && p->end_head) {
1234 /* We make the assumption that the partition terminates on
1235 a cylinder boundary */
1236 heads = p->end_head + 1;
1237 sectors = p->end_sector & 63;
1238 if (sectors == 0)
1239 continue;
1240 cylinders = nb_sectors / (heads * sectors);
1241 if (cylinders < 1 || cylinders > 16383)
1242 continue;
1243 *pheads = heads;
1244 *psectors = sectors;
1245 *pcylinders = cylinders;
1246 #if 0
1247 printf("guessed geometry: LCHS=%d %d %d\n",
1248 cylinders, heads, sectors);
1249 #endif
1250 return 0;
1253 return -1;
1256 void bdrv_guess_geometry(BlockDriverState *bs, int *pcyls, int *pheads, int *psecs)
1258 int translation, lba_detected = 0;
1259 int cylinders, heads, secs;
1260 uint64_t nb_sectors;
1262 /* if a geometry hint is available, use it */
1263 bdrv_get_geometry(bs, &nb_sectors);
1264 bdrv_get_geometry_hint(bs, &cylinders, &heads, &secs);
1265 translation = bdrv_get_translation_hint(bs);
1266 if (cylinders != 0) {
1267 *pcyls = cylinders;
1268 *pheads = heads;
1269 *psecs = secs;
1270 } else {
1271 if (guess_disk_lchs(bs, &cylinders, &heads, &secs) == 0) {
1272 if (heads > 16) {
1273 /* if heads > 16, it means that a BIOS LBA
1274 translation was active, so the default
1275 hardware geometry is OK */
1276 lba_detected = 1;
1277 goto default_geometry;
1278 } else {
1279 *pcyls = cylinders;
1280 *pheads = heads;
1281 *psecs = secs;
1282 /* disable any translation to be in sync with
1283 the logical geometry */
1284 if (translation == BIOS_ATA_TRANSLATION_AUTO) {
1285 bdrv_set_translation_hint(bs,
1286 BIOS_ATA_TRANSLATION_NONE);
1289 } else {
1290 default_geometry:
1291 /* if no geometry, use a standard physical disk geometry */
1292 cylinders = nb_sectors / (16 * 63);
1294 if (cylinders > 16383)
1295 cylinders = 16383;
1296 else if (cylinders < 2)
1297 cylinders = 2;
1298 *pcyls = cylinders;
1299 *pheads = 16;
1300 *psecs = 63;
1301 if ((lba_detected == 1) && (translation == BIOS_ATA_TRANSLATION_AUTO)) {
1302 if ((*pcyls * *pheads) <= 131072) {
1303 bdrv_set_translation_hint(bs,
1304 BIOS_ATA_TRANSLATION_LARGE);
1305 } else {
1306 bdrv_set_translation_hint(bs,
1307 BIOS_ATA_TRANSLATION_LBA);
1311 bdrv_set_geometry_hint(bs, *pcyls, *pheads, *psecs);
1315 void bdrv_set_geometry_hint(BlockDriverState *bs,
1316 int cyls, int heads, int secs)
1318 bs->cyls = cyls;
1319 bs->heads = heads;
1320 bs->secs = secs;
1323 void bdrv_set_translation_hint(BlockDriverState *bs, int translation)
1325 bs->translation = translation;
1328 void bdrv_get_geometry_hint(BlockDriverState *bs,
1329 int *pcyls, int *pheads, int *psecs)
1331 *pcyls = bs->cyls;
1332 *pheads = bs->heads;
1333 *psecs = bs->secs;
1336 /* Recognize floppy formats */
1337 typedef struct FDFormat {
1338 FDriveType drive;
1339 uint8_t last_sect;
1340 uint8_t max_track;
1341 uint8_t max_head;
1342 } FDFormat;
1344 static const FDFormat fd_formats[] = {
1345 /* First entry is default format */
1346 /* 1.44 MB 3"1/2 floppy disks */
1347 { FDRIVE_DRV_144, 18, 80, 1, },
1348 { FDRIVE_DRV_144, 20, 80, 1, },
1349 { FDRIVE_DRV_144, 21, 80, 1, },
1350 { FDRIVE_DRV_144, 21, 82, 1, },
1351 { FDRIVE_DRV_144, 21, 83, 1, },
1352 { FDRIVE_DRV_144, 22, 80, 1, },
1353 { FDRIVE_DRV_144, 23, 80, 1, },
1354 { FDRIVE_DRV_144, 24, 80, 1, },
1355 /* 2.88 MB 3"1/2 floppy disks */
1356 { FDRIVE_DRV_288, 36, 80, 1, },
1357 { FDRIVE_DRV_288, 39, 80, 1, },
1358 { FDRIVE_DRV_288, 40, 80, 1, },
1359 { FDRIVE_DRV_288, 44, 80, 1, },
1360 { FDRIVE_DRV_288, 48, 80, 1, },
1361 /* 720 kB 3"1/2 floppy disks */
1362 { FDRIVE_DRV_144, 9, 80, 1, },
1363 { FDRIVE_DRV_144, 10, 80, 1, },
1364 { FDRIVE_DRV_144, 10, 82, 1, },
1365 { FDRIVE_DRV_144, 10, 83, 1, },
1366 { FDRIVE_DRV_144, 13, 80, 1, },
1367 { FDRIVE_DRV_144, 14, 80, 1, },
1368 /* 1.2 MB 5"1/4 floppy disks */
1369 { FDRIVE_DRV_120, 15, 80, 1, },
1370 { FDRIVE_DRV_120, 18, 80, 1, },
1371 { FDRIVE_DRV_120, 18, 82, 1, },
1372 { FDRIVE_DRV_120, 18, 83, 1, },
1373 { FDRIVE_DRV_120, 20, 80, 1, },
1374 /* 720 kB 5"1/4 floppy disks */
1375 { FDRIVE_DRV_120, 9, 80, 1, },
1376 { FDRIVE_DRV_120, 11, 80, 1, },
1377 /* 360 kB 5"1/4 floppy disks */
1378 { FDRIVE_DRV_120, 9, 40, 1, },
1379 { FDRIVE_DRV_120, 9, 40, 0, },
1380 { FDRIVE_DRV_120, 10, 41, 1, },
1381 { FDRIVE_DRV_120, 10, 42, 1, },
1382 /* 320 kB 5"1/4 floppy disks */
1383 { FDRIVE_DRV_120, 8, 40, 1, },
1384 { FDRIVE_DRV_120, 8, 40, 0, },
1385 /* 360 kB must match 5"1/4 better than 3"1/2... */
1386 { FDRIVE_DRV_144, 9, 80, 0, },
1387 /* end */
1388 { FDRIVE_DRV_NONE, -1, -1, 0, },
1391 void bdrv_get_floppy_geometry_hint(BlockDriverState *bs, int *nb_heads,
1392 int *max_track, int *last_sect,
1393 FDriveType drive_in, FDriveType *drive)
1395 const FDFormat *parse;
1396 uint64_t nb_sectors, size;
1397 int i, first_match, match;
1399 bdrv_get_geometry_hint(bs, nb_heads, max_track, last_sect);
1400 if (*nb_heads != 0 && *max_track != 0 && *last_sect != 0) {
1401 /* User defined disk */
1402 } else {
1403 bdrv_get_geometry(bs, &nb_sectors);
1404 match = -1;
1405 first_match = -1;
1406 for (i = 0; ; i++) {
1407 parse = &fd_formats[i];
1408 if (parse->drive == FDRIVE_DRV_NONE) {
1409 break;
1411 if (drive_in == parse->drive ||
1412 drive_in == FDRIVE_DRV_NONE) {
1413 size = (parse->max_head + 1) * parse->max_track *
1414 parse->last_sect;
1415 if (nb_sectors == size) {
1416 match = i;
1417 break;
1419 if (first_match == -1) {
1420 first_match = i;
1424 if (match == -1) {
1425 if (first_match == -1) {
1426 match = 1;
1427 } else {
1428 match = first_match;
1430 parse = &fd_formats[match];
1432 *nb_heads = parse->max_head + 1;
1433 *max_track = parse->max_track;
1434 *last_sect = parse->last_sect;
1435 *drive = parse->drive;
1439 int bdrv_get_translation_hint(BlockDriverState *bs)
1441 return bs->translation;
1444 void bdrv_set_on_error(BlockDriverState *bs, BlockErrorAction on_read_error,
1445 BlockErrorAction on_write_error)
1447 bs->on_read_error = on_read_error;
1448 bs->on_write_error = on_write_error;
1451 BlockErrorAction bdrv_get_on_error(BlockDriverState *bs, int is_read)
1453 return is_read ? bs->on_read_error : bs->on_write_error;
1456 void bdrv_set_removable(BlockDriverState *bs, int removable)
1458 bs->removable = removable;
1459 if (removable && bs == bs_snapshots) {
1460 bs_snapshots = NULL;
1464 int bdrv_is_removable(BlockDriverState *bs)
1466 return bs->removable;
1469 int bdrv_is_read_only(BlockDriverState *bs)
1471 return bs->read_only;
1474 int bdrv_is_sg(BlockDriverState *bs)
1476 return bs->sg;
1479 int bdrv_enable_write_cache(BlockDriverState *bs)
1481 return bs->enable_write_cache;
1484 /* XXX: no longer used */
1485 void bdrv_set_change_cb(BlockDriverState *bs,
1486 void (*change_cb)(void *opaque, int reason),
1487 void *opaque)
1489 bs->change_cb = change_cb;
1490 bs->change_opaque = opaque;
1493 int bdrv_is_encrypted(BlockDriverState *bs)
1495 if (bs->backing_hd && bs->backing_hd->encrypted)
1496 return 1;
1497 return bs->encrypted;
1500 int bdrv_key_required(BlockDriverState *bs)
1502 BlockDriverState *backing_hd = bs->backing_hd;
1504 if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key)
1505 return 1;
1506 return (bs->encrypted && !bs->valid_key);
1509 int bdrv_set_key(BlockDriverState *bs, const char *key)
1511 int ret;
1512 if (bs->backing_hd && bs->backing_hd->encrypted) {
1513 ret = bdrv_set_key(bs->backing_hd, key);
1514 if (ret < 0)
1515 return ret;
1516 if (!bs->encrypted)
1517 return 0;
1519 if (!bs->encrypted) {
1520 return -EINVAL;
1521 } else if (!bs->drv || !bs->drv->bdrv_set_key) {
1522 return -ENOMEDIUM;
1524 ret = bs->drv->bdrv_set_key(bs, key);
1525 if (ret < 0) {
1526 bs->valid_key = 0;
1527 } else if (!bs->valid_key) {
1528 bs->valid_key = 1;
1529 /* call the change callback now, we skipped it on open */
1530 bs->media_changed = 1;
1531 if (bs->change_cb)
1532 bs->change_cb(bs->change_opaque, CHANGE_MEDIA);
1534 return ret;
1537 void bdrv_get_format(BlockDriverState *bs, char *buf, int buf_size)
1539 if (!bs->drv) {
1540 buf[0] = '\0';
1541 } else {
1542 pstrcpy(buf, buf_size, bs->drv->format_name);
1546 void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
1547 void *opaque)
1549 BlockDriver *drv;
1551 QLIST_FOREACH(drv, &bdrv_drivers, list) {
1552 it(opaque, drv->format_name);
1556 BlockDriverState *bdrv_find(const char *name)
1558 BlockDriverState *bs;
1560 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1561 if (!strcmp(name, bs->device_name)) {
1562 return bs;
1565 return NULL;
1568 BlockDriverState *bdrv_next(BlockDriverState *bs)
1570 if (!bs) {
1571 return QTAILQ_FIRST(&bdrv_states);
1573 return QTAILQ_NEXT(bs, list);
1576 void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque)
1578 BlockDriverState *bs;
1580 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1581 it(opaque, bs);
1585 const char *bdrv_get_device_name(BlockDriverState *bs)
1587 return bs->device_name;
1590 int bdrv_flush(BlockDriverState *bs)
1592 if (bs->open_flags & BDRV_O_NO_FLUSH) {
1593 return 0;
1596 if (bs->drv && bs->drv->bdrv_flush) {
1597 return bs->drv->bdrv_flush(bs);
1601 * Some block drivers always operate in either writethrough or unsafe mode
1602 * and don't support bdrv_flush therefore. Usually qemu doesn't know how
1603 * the server works (because the behaviour is hardcoded or depends on
1604 * server-side configuration), so we can't ensure that everything is safe
1605 * on disk. Returning an error doesn't work because that would break guests
1606 * even if the server operates in writethrough mode.
1608 * Let's hope the user knows what he's doing.
1610 return 0;
1613 void bdrv_flush_all(void)
1615 BlockDriverState *bs;
1617 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1618 if (bs->drv && !bdrv_is_read_only(bs) &&
1619 (!bdrv_is_removable(bs) || bdrv_is_inserted(bs))) {
1620 bdrv_flush(bs);
1625 int bdrv_has_zero_init(BlockDriverState *bs)
1627 assert(bs->drv);
1629 if (bs->drv->bdrv_has_zero_init) {
1630 return bs->drv->bdrv_has_zero_init(bs);
1633 return 1;
1636 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
1638 if (!bs->drv) {
1639 return -ENOMEDIUM;
1641 if (!bs->drv->bdrv_discard) {
1642 return 0;
1644 return bs->drv->bdrv_discard(bs, sector_num, nb_sectors);
1648 * Returns true iff the specified sector is present in the disk image. Drivers
1649 * not implementing the functionality are assumed to not support backing files,
1650 * hence all their sectors are reported as allocated.
1652 * 'pnum' is set to the number of sectors (including and immediately following
1653 * the specified sector) that are known to be in the same
1654 * allocated/unallocated state.
1656 * 'nb_sectors' is the max value 'pnum' should be set to.
1658 int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
1659 int *pnum)
1661 int64_t n;
1662 if (!bs->drv->bdrv_is_allocated) {
1663 if (sector_num >= bs->total_sectors) {
1664 *pnum = 0;
1665 return 0;
1667 n = bs->total_sectors - sector_num;
1668 *pnum = (n < nb_sectors) ? (n) : (nb_sectors);
1669 return 1;
1671 return bs->drv->bdrv_is_allocated(bs, sector_num, nb_sectors, pnum);
1674 void bdrv_mon_event(const BlockDriverState *bdrv,
1675 BlockMonEventAction action, int is_read)
1677 QObject *data;
1678 const char *action_str;
1680 switch (action) {
1681 case BDRV_ACTION_REPORT:
1682 action_str = "report";
1683 break;
1684 case BDRV_ACTION_IGNORE:
1685 action_str = "ignore";
1686 break;
1687 case BDRV_ACTION_STOP:
1688 action_str = "stop";
1689 break;
1690 default:
1691 abort();
1694 data = qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
1695 bdrv->device_name,
1696 action_str,
1697 is_read ? "read" : "write");
1698 monitor_protocol_event(QEVENT_BLOCK_IO_ERROR, data);
1700 qobject_decref(data);
1703 static void bdrv_print_dict(QObject *obj, void *opaque)
1705 QDict *bs_dict;
1706 Monitor *mon = opaque;
1708 bs_dict = qobject_to_qdict(obj);
1710 monitor_printf(mon, "%s: removable=%d",
1711 qdict_get_str(bs_dict, "device"),
1712 qdict_get_bool(bs_dict, "removable"));
1714 if (qdict_get_bool(bs_dict, "removable")) {
1715 monitor_printf(mon, " locked=%d", qdict_get_bool(bs_dict, "locked"));
1718 if (qdict_haskey(bs_dict, "inserted")) {
1719 QDict *qdict = qobject_to_qdict(qdict_get(bs_dict, "inserted"));
1721 monitor_printf(mon, " file=");
1722 monitor_print_filename(mon, qdict_get_str(qdict, "file"));
1723 if (qdict_haskey(qdict, "backing_file")) {
1724 monitor_printf(mon, " backing_file=");
1725 monitor_print_filename(mon, qdict_get_str(qdict, "backing_file"));
1727 monitor_printf(mon, " ro=%d drv=%s encrypted=%d",
1728 qdict_get_bool(qdict, "ro"),
1729 qdict_get_str(qdict, "drv"),
1730 qdict_get_bool(qdict, "encrypted"));
1731 } else {
1732 monitor_printf(mon, " [not inserted]");
1735 monitor_printf(mon, "\n");
1738 void bdrv_info_print(Monitor *mon, const QObject *data)
1740 qlist_iter(qobject_to_qlist(data), bdrv_print_dict, mon);
1743 void bdrv_info(Monitor *mon, QObject **ret_data)
1745 QList *bs_list;
1746 BlockDriverState *bs;
1748 bs_list = qlist_new();
1750 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1751 QObject *bs_obj;
1753 bs_obj = qobject_from_jsonf("{ 'device': %s, 'type': 'unknown', "
1754 "'removable': %i, 'locked': %i }",
1755 bs->device_name, bs->removable,
1756 bs->locked);
1758 if (bs->drv) {
1759 QObject *obj;
1760 QDict *bs_dict = qobject_to_qdict(bs_obj);
1762 obj = qobject_from_jsonf("{ 'file': %s, 'ro': %i, 'drv': %s, "
1763 "'encrypted': %i }",
1764 bs->filename, bs->read_only,
1765 bs->drv->format_name,
1766 bdrv_is_encrypted(bs));
1767 if (bs->backing_file[0] != '\0') {
1768 QDict *qdict = qobject_to_qdict(obj);
1769 qdict_put(qdict, "backing_file",
1770 qstring_from_str(bs->backing_file));
1773 qdict_put_obj(bs_dict, "inserted", obj);
1775 qlist_append_obj(bs_list, bs_obj);
1778 *ret_data = QOBJECT(bs_list);
1781 static void bdrv_stats_iter(QObject *data, void *opaque)
1783 QDict *qdict;
1784 Monitor *mon = opaque;
1786 qdict = qobject_to_qdict(data);
1787 monitor_printf(mon, "%s:", qdict_get_str(qdict, "device"));
1789 qdict = qobject_to_qdict(qdict_get(qdict, "stats"));
1790 monitor_printf(mon, " rd_bytes=%" PRId64
1791 " wr_bytes=%" PRId64
1792 " rd_operations=%" PRId64
1793 " wr_operations=%" PRId64
1794 "\n",
1795 qdict_get_int(qdict, "rd_bytes"),
1796 qdict_get_int(qdict, "wr_bytes"),
1797 qdict_get_int(qdict, "rd_operations"),
1798 qdict_get_int(qdict, "wr_operations"));
1801 void bdrv_stats_print(Monitor *mon, const QObject *data)
1803 qlist_iter(qobject_to_qlist(data), bdrv_stats_iter, mon);
1806 static QObject* bdrv_info_stats_bs(BlockDriverState *bs)
1808 QObject *res;
1809 QDict *dict;
1811 res = qobject_from_jsonf("{ 'stats': {"
1812 "'rd_bytes': %" PRId64 ","
1813 "'wr_bytes': %" PRId64 ","
1814 "'rd_operations': %" PRId64 ","
1815 "'wr_operations': %" PRId64 ","
1816 "'wr_highest_offset': %" PRId64
1817 "} }",
1818 bs->rd_bytes, bs->wr_bytes,
1819 bs->rd_ops, bs->wr_ops,
1820 bs->wr_highest_sector *
1821 (uint64_t)BDRV_SECTOR_SIZE);
1822 dict = qobject_to_qdict(res);
1824 if (*bs->device_name) {
1825 qdict_put(dict, "device", qstring_from_str(bs->device_name));
1828 if (bs->file) {
1829 QObject *parent = bdrv_info_stats_bs(bs->file);
1830 qdict_put_obj(dict, "parent", parent);
1833 return res;
1836 void bdrv_info_stats(Monitor *mon, QObject **ret_data)
1838 QObject *obj;
1839 QList *devices;
1840 BlockDriverState *bs;
1842 devices = qlist_new();
1844 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1845 obj = bdrv_info_stats_bs(bs);
1846 qlist_append_obj(devices, obj);
1849 *ret_data = QOBJECT(devices);
1852 const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
1854 if (bs->backing_hd && bs->backing_hd->encrypted)
1855 return bs->backing_file;
1856 else if (bs->encrypted)
1857 return bs->filename;
1858 else
1859 return NULL;
1862 void bdrv_get_backing_filename(BlockDriverState *bs,
1863 char *filename, int filename_size)
1865 if (!bs->backing_file) {
1866 pstrcpy(filename, filename_size, "");
1867 } else {
1868 pstrcpy(filename, filename_size, bs->backing_file);
1872 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
1873 const uint8_t *buf, int nb_sectors)
1875 BlockDriver *drv = bs->drv;
1876 if (!drv)
1877 return -ENOMEDIUM;
1878 if (!drv->bdrv_write_compressed)
1879 return -ENOTSUP;
1880 if (bdrv_check_request(bs, sector_num, nb_sectors))
1881 return -EIO;
1883 if (bs->dirty_bitmap) {
1884 set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
1887 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
1890 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1892 BlockDriver *drv = bs->drv;
1893 if (!drv)
1894 return -ENOMEDIUM;
1895 if (!drv->bdrv_get_info)
1896 return -ENOTSUP;
1897 memset(bdi, 0, sizeof(*bdi));
1898 return drv->bdrv_get_info(bs, bdi);
1901 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
1902 int64_t pos, int size)
1904 BlockDriver *drv = bs->drv;
1905 if (!drv)
1906 return -ENOMEDIUM;
1907 if (drv->bdrv_save_vmstate)
1908 return drv->bdrv_save_vmstate(bs, buf, pos, size);
1909 if (bs->file)
1910 return bdrv_save_vmstate(bs->file, buf, pos, size);
1911 return -ENOTSUP;
1914 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
1915 int64_t pos, int size)
1917 BlockDriver *drv = bs->drv;
1918 if (!drv)
1919 return -ENOMEDIUM;
1920 if (drv->bdrv_load_vmstate)
1921 return drv->bdrv_load_vmstate(bs, buf, pos, size);
1922 if (bs->file)
1923 return bdrv_load_vmstate(bs->file, buf, pos, size);
1924 return -ENOTSUP;
1927 void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
1929 BlockDriver *drv = bs->drv;
1931 if (!drv || !drv->bdrv_debug_event) {
1932 return;
1935 return drv->bdrv_debug_event(bs, event);
1939 /**************************************************************/
1940 /* handling of snapshots */
1942 int bdrv_can_snapshot(BlockDriverState *bs)
1944 BlockDriver *drv = bs->drv;
1945 if (!drv || bdrv_is_removable(bs) || bdrv_is_read_only(bs)) {
1946 return 0;
1949 if (!drv->bdrv_snapshot_create) {
1950 if (bs->file != NULL) {
1951 return bdrv_can_snapshot(bs->file);
1953 return 0;
1956 return 1;
1959 int bdrv_is_snapshot(BlockDriverState *bs)
1961 return !!(bs->open_flags & BDRV_O_SNAPSHOT);
1964 BlockDriverState *bdrv_snapshots(void)
1966 BlockDriverState *bs;
1968 if (bs_snapshots) {
1969 return bs_snapshots;
1972 bs = NULL;
1973 while ((bs = bdrv_next(bs))) {
1974 if (bdrv_can_snapshot(bs)) {
1975 bs_snapshots = bs;
1976 return bs;
1979 return NULL;
1982 int bdrv_snapshot_create(BlockDriverState *bs,
1983 QEMUSnapshotInfo *sn_info)
1985 BlockDriver *drv = bs->drv;
1986 if (!drv)
1987 return -ENOMEDIUM;
1988 if (drv->bdrv_snapshot_create)
1989 return drv->bdrv_snapshot_create(bs, sn_info);
1990 if (bs->file)
1991 return bdrv_snapshot_create(bs->file, sn_info);
1992 return -ENOTSUP;
1995 int bdrv_snapshot_goto(BlockDriverState *bs,
1996 const char *snapshot_id)
1998 BlockDriver *drv = bs->drv;
1999 int ret, open_ret;
2001 if (!drv)
2002 return -ENOMEDIUM;
2003 if (drv->bdrv_snapshot_goto)
2004 return drv->bdrv_snapshot_goto(bs, snapshot_id);
2006 if (bs->file) {
2007 drv->bdrv_close(bs);
2008 ret = bdrv_snapshot_goto(bs->file, snapshot_id);
2009 open_ret = drv->bdrv_open(bs, bs->open_flags);
2010 if (open_ret < 0) {
2011 bdrv_delete(bs->file);
2012 bs->drv = NULL;
2013 return open_ret;
2015 return ret;
2018 return -ENOTSUP;
2021 int bdrv_snapshot_delete(BlockDriverState *bs, const char *snapshot_id)
2023 BlockDriver *drv = bs->drv;
2024 if (!drv)
2025 return -ENOMEDIUM;
2026 if (drv->bdrv_snapshot_delete)
2027 return drv->bdrv_snapshot_delete(bs, snapshot_id);
2028 if (bs->file)
2029 return bdrv_snapshot_delete(bs->file, snapshot_id);
2030 return -ENOTSUP;
2033 int bdrv_snapshot_list(BlockDriverState *bs,
2034 QEMUSnapshotInfo **psn_info)
2036 BlockDriver *drv = bs->drv;
2037 if (!drv)
2038 return -ENOMEDIUM;
2039 if (drv->bdrv_snapshot_list)
2040 return drv->bdrv_snapshot_list(bs, psn_info);
2041 if (bs->file)
2042 return bdrv_snapshot_list(bs->file, psn_info);
2043 return -ENOTSUP;
2046 int bdrv_snapshot_load_tmp(BlockDriverState *bs,
2047 const char *snapshot_name)
2049 BlockDriver *drv = bs->drv;
2050 if (!drv) {
2051 return -ENOMEDIUM;
2053 if (!bs->read_only) {
2054 return -EINVAL;
2056 if (drv->bdrv_snapshot_load_tmp) {
2057 return drv->bdrv_snapshot_load_tmp(bs, snapshot_name);
2059 return -ENOTSUP;
2062 #define NB_SUFFIXES 4
2064 char *get_human_readable_size(char *buf, int buf_size, int64_t size)
2066 static const char suffixes[NB_SUFFIXES] = "KMGT";
2067 int64_t base;
2068 int i;
2070 if (size <= 999) {
2071 snprintf(buf, buf_size, "%" PRId64, size);
2072 } else {
2073 base = 1024;
2074 for(i = 0; i < NB_SUFFIXES; i++) {
2075 if (size < (10 * base)) {
2076 snprintf(buf, buf_size, "%0.1f%c",
2077 (double)size / base,
2078 suffixes[i]);
2079 break;
2080 } else if (size < (1000 * base) || i == (NB_SUFFIXES - 1)) {
2081 snprintf(buf, buf_size, "%" PRId64 "%c",
2082 ((size + (base >> 1)) / base),
2083 suffixes[i]);
2084 break;
2086 base = base * 1024;
2089 return buf;
2092 char *bdrv_snapshot_dump(char *buf, int buf_size, QEMUSnapshotInfo *sn)
2094 char buf1[128], date_buf[128], clock_buf[128];
2095 #ifdef _WIN32
2096 struct tm *ptm;
2097 #else
2098 struct tm tm;
2099 #endif
2100 time_t ti;
2101 int64_t secs;
2103 if (!sn) {
2104 snprintf(buf, buf_size,
2105 "%-10s%-20s%7s%20s%15s",
2106 "ID", "TAG", "VM SIZE", "DATE", "VM CLOCK");
2107 } else {
2108 ti = sn->date_sec;
2109 #ifdef _WIN32
2110 ptm = localtime(&ti);
2111 strftime(date_buf, sizeof(date_buf),
2112 "%Y-%m-%d %H:%M:%S", ptm);
2113 #else
2114 localtime_r(&ti, &tm);
2115 strftime(date_buf, sizeof(date_buf),
2116 "%Y-%m-%d %H:%M:%S", &tm);
2117 #endif
2118 secs = sn->vm_clock_nsec / 1000000000;
2119 snprintf(clock_buf, sizeof(clock_buf),
2120 "%02d:%02d:%02d.%03d",
2121 (int)(secs / 3600),
2122 (int)((secs / 60) % 60),
2123 (int)(secs % 60),
2124 (int)((sn->vm_clock_nsec / 1000000) % 1000));
2125 snprintf(buf, buf_size,
2126 "%-10s%-20s%7s%20s%15s",
2127 sn->id_str, sn->name,
2128 get_human_readable_size(buf1, sizeof(buf1), sn->vm_state_size),
2129 date_buf,
2130 clock_buf);
2132 return buf;
2136 /**************************************************************/
2137 /* async I/Os */
2139 BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
2140 QEMUIOVector *qiov, int nb_sectors,
2141 BlockDriverCompletionFunc *cb, void *opaque)
2143 BlockDriver *drv = bs->drv;
2144 BlockDriverAIOCB *ret;
2146 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
2148 if (!drv)
2149 return NULL;
2150 if (bdrv_check_request(bs, sector_num, nb_sectors))
2151 return NULL;
2153 ret = drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors,
2154 cb, opaque);
2156 if (ret) {
2157 /* Update stats even though technically transfer has not happened. */
2158 bs->rd_bytes += (unsigned) nb_sectors * BDRV_SECTOR_SIZE;
2159 bs->rd_ops ++;
2162 return ret;
2165 typedef struct BlockCompleteData {
2166 BlockDriverCompletionFunc *cb;
2167 void *opaque;
2168 BlockDriverState *bs;
2169 int64_t sector_num;
2170 int nb_sectors;
2171 } BlockCompleteData;
2173 static void block_complete_cb(void *opaque, int ret)
2175 BlockCompleteData *b = opaque;
2177 if (b->bs->dirty_bitmap) {
2178 set_dirty_bitmap(b->bs, b->sector_num, b->nb_sectors, 1);
2180 b->cb(b->opaque, ret);
2181 qemu_free(b);
2184 static BlockCompleteData *blk_dirty_cb_alloc(BlockDriverState *bs,
2185 int64_t sector_num,
2186 int nb_sectors,
2187 BlockDriverCompletionFunc *cb,
2188 void *opaque)
2190 BlockCompleteData *blkdata = qemu_mallocz(sizeof(BlockCompleteData));
2192 blkdata->bs = bs;
2193 blkdata->cb = cb;
2194 blkdata->opaque = opaque;
2195 blkdata->sector_num = sector_num;
2196 blkdata->nb_sectors = nb_sectors;
2198 return blkdata;
2201 BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
2202 QEMUIOVector *qiov, int nb_sectors,
2203 BlockDriverCompletionFunc *cb, void *opaque)
2205 BlockDriver *drv = bs->drv;
2206 BlockDriverAIOCB *ret;
2207 BlockCompleteData *blk_cb_data;
2209 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
2211 if (!drv)
2212 return NULL;
2213 if (bs->read_only)
2214 return NULL;
2215 if (bdrv_check_request(bs, sector_num, nb_sectors))
2216 return NULL;
2218 if (bs->dirty_bitmap) {
2219 blk_cb_data = blk_dirty_cb_alloc(bs, sector_num, nb_sectors, cb,
2220 opaque);
2221 cb = &block_complete_cb;
2222 opaque = blk_cb_data;
2225 ret = drv->bdrv_aio_writev(bs, sector_num, qiov, nb_sectors,
2226 cb, opaque);
2228 if (ret) {
2229 /* Update stats even though technically transfer has not happened. */
2230 bs->wr_bytes += (unsigned) nb_sectors * BDRV_SECTOR_SIZE;
2231 bs->wr_ops ++;
2232 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
2233 bs->wr_highest_sector = sector_num + nb_sectors - 1;
2237 return ret;
2241 typedef struct MultiwriteCB {
2242 int error;
2243 int num_requests;
2244 int num_callbacks;
2245 struct {
2246 BlockDriverCompletionFunc *cb;
2247 void *opaque;
2248 QEMUIOVector *free_qiov;
2249 void *free_buf;
2250 } callbacks[];
2251 } MultiwriteCB;
2253 static void multiwrite_user_cb(MultiwriteCB *mcb)
2255 int i;
2257 for (i = 0; i < mcb->num_callbacks; i++) {
2258 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
2259 if (mcb->callbacks[i].free_qiov) {
2260 qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
2262 qemu_free(mcb->callbacks[i].free_qiov);
2263 qemu_vfree(mcb->callbacks[i].free_buf);
2267 static void multiwrite_cb(void *opaque, int ret)
2269 MultiwriteCB *mcb = opaque;
2271 trace_multiwrite_cb(mcb, ret);
2273 if (ret < 0 && !mcb->error) {
2274 mcb->error = ret;
2277 mcb->num_requests--;
2278 if (mcb->num_requests == 0) {
2279 multiwrite_user_cb(mcb);
2280 qemu_free(mcb);
2284 static int multiwrite_req_compare(const void *a, const void *b)
2286 const BlockRequest *req1 = a, *req2 = b;
2289 * Note that we can't simply subtract req2->sector from req1->sector
2290 * here as that could overflow the return value.
2292 if (req1->sector > req2->sector) {
2293 return 1;
2294 } else if (req1->sector < req2->sector) {
2295 return -1;
2296 } else {
2297 return 0;
2302 * Takes a bunch of requests and tries to merge them. Returns the number of
2303 * requests that remain after merging.
2305 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
2306 int num_reqs, MultiwriteCB *mcb)
2308 int i, outidx;
2310 // Sort requests by start sector
2311 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
2313 // Check if adjacent requests touch the same clusters. If so, combine them,
2314 // filling up gaps with zero sectors.
2315 outidx = 0;
2316 for (i = 1; i < num_reqs; i++) {
2317 int merge = 0;
2318 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
2320 // This handles the cases that are valid for all block drivers, namely
2321 // exactly sequential writes and overlapping writes.
2322 if (reqs[i].sector <= oldreq_last) {
2323 merge = 1;
2326 // The block driver may decide that it makes sense to combine requests
2327 // even if there is a gap of some sectors between them. In this case,
2328 // the gap is filled with zeros (therefore only applicable for yet
2329 // unused space in format like qcow2).
2330 if (!merge && bs->drv->bdrv_merge_requests) {
2331 merge = bs->drv->bdrv_merge_requests(bs, &reqs[outidx], &reqs[i]);
2334 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
2335 merge = 0;
2338 if (merge) {
2339 size_t size;
2340 QEMUIOVector *qiov = qemu_mallocz(sizeof(*qiov));
2341 qemu_iovec_init(qiov,
2342 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
2344 // Add the first request to the merged one. If the requests are
2345 // overlapping, drop the last sectors of the first request.
2346 size = (reqs[i].sector - reqs[outidx].sector) << 9;
2347 qemu_iovec_concat(qiov, reqs[outidx].qiov, size);
2349 // We might need to add some zeros between the two requests
2350 if (reqs[i].sector > oldreq_last) {
2351 size_t zero_bytes = (reqs[i].sector - oldreq_last) << 9;
2352 uint8_t *buf = qemu_blockalign(bs, zero_bytes);
2353 memset(buf, 0, zero_bytes);
2354 qemu_iovec_add(qiov, buf, zero_bytes);
2355 mcb->callbacks[i].free_buf = buf;
2358 // Add the second request
2359 qemu_iovec_concat(qiov, reqs[i].qiov, reqs[i].qiov->size);
2361 reqs[outidx].nb_sectors = qiov->size >> 9;
2362 reqs[outidx].qiov = qiov;
2364 mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
2365 } else {
2366 outidx++;
2367 reqs[outidx].sector = reqs[i].sector;
2368 reqs[outidx].nb_sectors = reqs[i].nb_sectors;
2369 reqs[outidx].qiov = reqs[i].qiov;
2373 return outidx + 1;
2377 * Submit multiple AIO write requests at once.
2379 * On success, the function returns 0 and all requests in the reqs array have
2380 * been submitted. In error case this function returns -1, and any of the
2381 * requests may or may not be submitted yet. In particular, this means that the
2382 * callback will be called for some of the requests, for others it won't. The
2383 * caller must check the error field of the BlockRequest to wait for the right
2384 * callbacks (if error != 0, no callback will be called).
2386 * The implementation may modify the contents of the reqs array, e.g. to merge
2387 * requests. However, the fields opaque and error are left unmodified as they
2388 * are used to signal failure for a single request to the caller.
2390 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
2392 BlockDriverAIOCB *acb;
2393 MultiwriteCB *mcb;
2394 int i;
2396 /* don't submit writes if we don't have a medium */
2397 if (bs->drv == NULL) {
2398 for (i = 0; i < num_reqs; i++) {
2399 reqs[i].error = -ENOMEDIUM;
2401 return -1;
2404 if (num_reqs == 0) {
2405 return 0;
2408 // Create MultiwriteCB structure
2409 mcb = qemu_mallocz(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
2410 mcb->num_requests = 0;
2411 mcb->num_callbacks = num_reqs;
2413 for (i = 0; i < num_reqs; i++) {
2414 mcb->callbacks[i].cb = reqs[i].cb;
2415 mcb->callbacks[i].opaque = reqs[i].opaque;
2418 // Check for mergable requests
2419 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
2421 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
2424 * Run the aio requests. As soon as one request can't be submitted
2425 * successfully, fail all requests that are not yet submitted (we must
2426 * return failure for all requests anyway)
2428 * num_requests cannot be set to the right value immediately: If
2429 * bdrv_aio_writev fails for some request, num_requests would be too high
2430 * and therefore multiwrite_cb() would never recognize the multiwrite
2431 * request as completed. We also cannot use the loop variable i to set it
2432 * when the first request fails because the callback may already have been
2433 * called for previously submitted requests. Thus, num_requests must be
2434 * incremented for each request that is submitted.
2436 * The problem that callbacks may be called early also means that we need
2437 * to take care that num_requests doesn't become 0 before all requests are
2438 * submitted - multiwrite_cb() would consider the multiwrite request
2439 * completed. A dummy request that is "completed" by a manual call to
2440 * multiwrite_cb() takes care of this.
2442 mcb->num_requests = 1;
2444 // Run the aio requests
2445 for (i = 0; i < num_reqs; i++) {
2446 mcb->num_requests++;
2447 acb = bdrv_aio_writev(bs, reqs[i].sector, reqs[i].qiov,
2448 reqs[i].nb_sectors, multiwrite_cb, mcb);
2450 if (acb == NULL) {
2451 // We can only fail the whole thing if no request has been
2452 // submitted yet. Otherwise we'll wait for the submitted AIOs to
2453 // complete and report the error in the callback.
2454 if (i == 0) {
2455 trace_bdrv_aio_multiwrite_earlyfail(mcb);
2456 goto fail;
2457 } else {
2458 trace_bdrv_aio_multiwrite_latefail(mcb, i);
2459 multiwrite_cb(mcb, -EIO);
2460 break;
2465 /* Complete the dummy request */
2466 multiwrite_cb(mcb, 0);
2468 return 0;
2470 fail:
2471 for (i = 0; i < mcb->num_callbacks; i++) {
2472 reqs[i].error = -EIO;
2474 qemu_free(mcb);
2475 return -1;
2478 BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
2479 BlockDriverCompletionFunc *cb, void *opaque)
2481 BlockDriver *drv = bs->drv;
2483 trace_bdrv_aio_flush(bs, opaque);
2485 if (bs->open_flags & BDRV_O_NO_FLUSH) {
2486 return bdrv_aio_noop_em(bs, cb, opaque);
2489 if (!drv)
2490 return NULL;
2491 return drv->bdrv_aio_flush(bs, cb, opaque);
2494 void bdrv_aio_cancel(BlockDriverAIOCB *acb)
2496 acb->pool->cancel(acb);
2500 /**************************************************************/
2501 /* async block device emulation */
2503 typedef struct BlockDriverAIOCBSync {
2504 BlockDriverAIOCB common;
2505 QEMUBH *bh;
2506 int ret;
2507 /* vector translation state */
2508 QEMUIOVector *qiov;
2509 uint8_t *bounce;
2510 int is_write;
2511 } BlockDriverAIOCBSync;
2513 static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
2515 BlockDriverAIOCBSync *acb =
2516 container_of(blockacb, BlockDriverAIOCBSync, common);
2517 qemu_bh_delete(acb->bh);
2518 acb->bh = NULL;
2519 qemu_aio_release(acb);
2522 static AIOPool bdrv_em_aio_pool = {
2523 .aiocb_size = sizeof(BlockDriverAIOCBSync),
2524 .cancel = bdrv_aio_cancel_em,
2527 static void bdrv_aio_bh_cb(void *opaque)
2529 BlockDriverAIOCBSync *acb = opaque;
2531 if (!acb->is_write)
2532 qemu_iovec_from_buffer(acb->qiov, acb->bounce, acb->qiov->size);
2533 qemu_vfree(acb->bounce);
2534 acb->common.cb(acb->common.opaque, acb->ret);
2535 qemu_bh_delete(acb->bh);
2536 acb->bh = NULL;
2537 qemu_aio_release(acb);
2540 static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
2541 int64_t sector_num,
2542 QEMUIOVector *qiov,
2543 int nb_sectors,
2544 BlockDriverCompletionFunc *cb,
2545 void *opaque,
2546 int is_write)
2549 BlockDriverAIOCBSync *acb;
2551 acb = qemu_aio_get(&bdrv_em_aio_pool, bs, cb, opaque);
2552 acb->is_write = is_write;
2553 acb->qiov = qiov;
2554 acb->bounce = qemu_blockalign(bs, qiov->size);
2556 if (!acb->bh)
2557 acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
2559 if (is_write) {
2560 qemu_iovec_to_buffer(acb->qiov, acb->bounce);
2561 acb->ret = bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
2562 } else {
2563 acb->ret = bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
2566 qemu_bh_schedule(acb->bh);
2568 return &acb->common;
2571 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
2572 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
2573 BlockDriverCompletionFunc *cb, void *opaque)
2575 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
2578 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
2579 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
2580 BlockDriverCompletionFunc *cb, void *opaque)
2582 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
2585 static BlockDriverAIOCB *bdrv_aio_flush_em(BlockDriverState *bs,
2586 BlockDriverCompletionFunc *cb, void *opaque)
2588 BlockDriverAIOCBSync *acb;
2590 acb = qemu_aio_get(&bdrv_em_aio_pool, bs, cb, opaque);
2591 acb->is_write = 1; /* don't bounce in the completion hadler */
2592 acb->qiov = NULL;
2593 acb->bounce = NULL;
2594 acb->ret = 0;
2596 if (!acb->bh)
2597 acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
2599 bdrv_flush(bs);
2600 qemu_bh_schedule(acb->bh);
2601 return &acb->common;
2604 static BlockDriverAIOCB *bdrv_aio_noop_em(BlockDriverState *bs,
2605 BlockDriverCompletionFunc *cb, void *opaque)
2607 BlockDriverAIOCBSync *acb;
2609 acb = qemu_aio_get(&bdrv_em_aio_pool, bs, cb, opaque);
2610 acb->is_write = 1; /* don't bounce in the completion handler */
2611 acb->qiov = NULL;
2612 acb->bounce = NULL;
2613 acb->ret = 0;
2615 if (!acb->bh) {
2616 acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
2619 qemu_bh_schedule(acb->bh);
2620 return &acb->common;
2623 /**************************************************************/
2624 /* sync block device emulation */
2626 static void bdrv_rw_em_cb(void *opaque, int ret)
2628 *(int *)opaque = ret;
2631 #define NOT_DONE 0x7fffffff
2633 static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num,
2634 uint8_t *buf, int nb_sectors)
2636 int async_ret;
2637 BlockDriverAIOCB *acb;
2638 struct iovec iov;
2639 QEMUIOVector qiov;
2641 async_context_push();
2643 async_ret = NOT_DONE;
2644 iov.iov_base = (void *)buf;
2645 iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE;
2646 qemu_iovec_init_external(&qiov, &iov, 1);
2647 acb = bdrv_aio_readv(bs, sector_num, &qiov, nb_sectors,
2648 bdrv_rw_em_cb, &async_ret);
2649 if (acb == NULL) {
2650 async_ret = -1;
2651 goto fail;
2654 while (async_ret == NOT_DONE) {
2655 qemu_aio_wait();
2659 fail:
2660 async_context_pop();
2661 return async_ret;
2664 static int bdrv_write_em(BlockDriverState *bs, int64_t sector_num,
2665 const uint8_t *buf, int nb_sectors)
2667 int async_ret;
2668 BlockDriverAIOCB *acb;
2669 struct iovec iov;
2670 QEMUIOVector qiov;
2672 async_context_push();
2674 async_ret = NOT_DONE;
2675 iov.iov_base = (void *)buf;
2676 iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE;
2677 qemu_iovec_init_external(&qiov, &iov, 1);
2678 acb = bdrv_aio_writev(bs, sector_num, &qiov, nb_sectors,
2679 bdrv_rw_em_cb, &async_ret);
2680 if (acb == NULL) {
2681 async_ret = -1;
2682 goto fail;
2684 while (async_ret == NOT_DONE) {
2685 qemu_aio_wait();
2688 fail:
2689 async_context_pop();
2690 return async_ret;
2693 void bdrv_init(void)
2695 module_call_init(MODULE_INIT_BLOCK);
2698 void bdrv_init_with_whitelist(void)
2700 use_bdrv_whitelist = 1;
2701 bdrv_init();
2704 void *qemu_aio_get(AIOPool *pool, BlockDriverState *bs,
2705 BlockDriverCompletionFunc *cb, void *opaque)
2707 BlockDriverAIOCB *acb;
2709 if (pool->free_aiocb) {
2710 acb = pool->free_aiocb;
2711 pool->free_aiocb = acb->next;
2712 } else {
2713 acb = qemu_mallocz(pool->aiocb_size);
2714 acb->pool = pool;
2716 acb->bs = bs;
2717 acb->cb = cb;
2718 acb->opaque = opaque;
2719 return acb;
2722 void qemu_aio_release(void *p)
2724 BlockDriverAIOCB *acb = (BlockDriverAIOCB *)p;
2725 AIOPool *pool = acb->pool;
2726 acb->next = pool->free_aiocb;
2727 pool->free_aiocb = acb;
2730 /**************************************************************/
2731 /* removable device support */
2734 * Return TRUE if the media is present
2736 int bdrv_is_inserted(BlockDriverState *bs)
2738 BlockDriver *drv = bs->drv;
2739 int ret;
2740 if (!drv)
2741 return 0;
2742 if (!drv->bdrv_is_inserted)
2743 return !bs->tray_open;
2744 ret = drv->bdrv_is_inserted(bs);
2745 return ret;
2749 * Return TRUE if the media changed since the last call to this
2750 * function. It is currently only used for floppy disks
2752 int bdrv_media_changed(BlockDriverState *bs)
2754 BlockDriver *drv = bs->drv;
2755 int ret;
2757 if (!drv || !drv->bdrv_media_changed)
2758 ret = -ENOTSUP;
2759 else
2760 ret = drv->bdrv_media_changed(bs);
2761 if (ret == -ENOTSUP)
2762 ret = bs->media_changed;
2763 bs->media_changed = 0;
2764 return ret;
2768 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
2770 int bdrv_eject(BlockDriverState *bs, int eject_flag)
2772 BlockDriver *drv = bs->drv;
2774 if (eject_flag && bs->locked) {
2775 return -EBUSY;
2778 if (drv && drv->bdrv_eject) {
2779 drv->bdrv_eject(bs, eject_flag);
2781 bs->tray_open = eject_flag;
2782 return 0;
2785 int bdrv_is_locked(BlockDriverState *bs)
2787 return bs->locked;
2791 * Lock or unlock the media (if it is locked, the user won't be able
2792 * to eject it manually).
2794 void bdrv_set_locked(BlockDriverState *bs, int locked)
2796 BlockDriver *drv = bs->drv;
2798 trace_bdrv_set_locked(bs, locked);
2800 bs->locked = locked;
2801 if (drv && drv->bdrv_set_locked) {
2802 drv->bdrv_set_locked(bs, locked);
2806 /* needed for generic scsi interface */
2808 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
2810 BlockDriver *drv = bs->drv;
2812 if (drv && drv->bdrv_ioctl)
2813 return drv->bdrv_ioctl(bs, req, buf);
2814 return -ENOTSUP;
2817 BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
2818 unsigned long int req, void *buf,
2819 BlockDriverCompletionFunc *cb, void *opaque)
2821 BlockDriver *drv = bs->drv;
2823 if (drv && drv->bdrv_aio_ioctl)
2824 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque);
2825 return NULL;
2830 void *qemu_blockalign(BlockDriverState *bs, size_t size)
2832 return qemu_memalign((bs && bs->buffer_alignment) ? bs->buffer_alignment : 512, size);
2835 void bdrv_set_dirty_tracking(BlockDriverState *bs, int enable)
2837 int64_t bitmap_size;
2839 bs->dirty_count = 0;
2840 if (enable) {
2841 if (!bs->dirty_bitmap) {
2842 bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS) +
2843 BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
2844 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
2846 bs->dirty_bitmap = qemu_mallocz(bitmap_size);
2848 } else {
2849 if (bs->dirty_bitmap) {
2850 qemu_free(bs->dirty_bitmap);
2851 bs->dirty_bitmap = NULL;
2856 int bdrv_get_dirty(BlockDriverState *bs, int64_t sector)
2858 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
2860 if (bs->dirty_bitmap &&
2861 (sector << BDRV_SECTOR_BITS) < bdrv_getlength(bs)) {
2862 return !!(bs->dirty_bitmap[chunk / (sizeof(unsigned long) * 8)] &
2863 (1UL << (chunk % (sizeof(unsigned long) * 8))));
2864 } else {
2865 return 0;
2869 void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector,
2870 int nr_sectors)
2872 set_dirty_bitmap(bs, cur_sector, nr_sectors, 0);
2875 int64_t bdrv_get_dirty_count(BlockDriverState *bs)
2877 return bs->dirty_count;
2880 void bdrv_set_in_use(BlockDriverState *bs, int in_use)
2882 assert(bs->in_use != in_use);
2883 bs->in_use = in_use;
2886 int bdrv_in_use(BlockDriverState *bs)
2888 return bs->in_use;
2891 int bdrv_img_create(const char *filename, const char *fmt,
2892 const char *base_filename, const char *base_fmt,
2893 char *options, uint64_t img_size, int flags)
2895 QEMUOptionParameter *param = NULL, *create_options = NULL;
2896 QEMUOptionParameter *backing_fmt, *backing_file, *size;
2897 BlockDriverState *bs = NULL;
2898 BlockDriver *drv, *proto_drv;
2899 BlockDriver *backing_drv = NULL;
2900 int ret = 0;
2902 /* Find driver and parse its options */
2903 drv = bdrv_find_format(fmt);
2904 if (!drv) {
2905 error_report("Unknown file format '%s'", fmt);
2906 ret = -EINVAL;
2907 goto out;
2910 proto_drv = bdrv_find_protocol(filename);
2911 if (!proto_drv) {
2912 error_report("Unknown protocol '%s'", filename);
2913 ret = -EINVAL;
2914 goto out;
2917 create_options = append_option_parameters(create_options,
2918 drv->create_options);
2919 create_options = append_option_parameters(create_options,
2920 proto_drv->create_options);
2922 /* Create parameter list with default values */
2923 param = parse_option_parameters("", create_options, param);
2925 set_option_parameter_int(param, BLOCK_OPT_SIZE, img_size);
2927 /* Parse -o options */
2928 if (options) {
2929 param = parse_option_parameters(options, create_options, param);
2930 if (param == NULL) {
2931 error_report("Invalid options for file format '%s'.", fmt);
2932 ret = -EINVAL;
2933 goto out;
2937 if (base_filename) {
2938 if (set_option_parameter(param, BLOCK_OPT_BACKING_FILE,
2939 base_filename)) {
2940 error_report("Backing file not supported for file format '%s'",
2941 fmt);
2942 ret = -EINVAL;
2943 goto out;
2947 if (base_fmt) {
2948 if (set_option_parameter(param, BLOCK_OPT_BACKING_FMT, base_fmt)) {
2949 error_report("Backing file format not supported for file "
2950 "format '%s'", fmt);
2951 ret = -EINVAL;
2952 goto out;
2956 backing_file = get_option_parameter(param, BLOCK_OPT_BACKING_FILE);
2957 if (backing_file && backing_file->value.s) {
2958 if (!strcmp(filename, backing_file->value.s)) {
2959 error_report("Error: Trying to create an image with the "
2960 "same filename as the backing file");
2961 ret = -EINVAL;
2962 goto out;
2966 backing_fmt = get_option_parameter(param, BLOCK_OPT_BACKING_FMT);
2967 if (backing_fmt && backing_fmt->value.s) {
2968 backing_drv = bdrv_find_format(backing_fmt->value.s);
2969 if (!backing_drv) {
2970 error_report("Unknown backing file format '%s'",
2971 backing_fmt->value.s);
2972 ret = -EINVAL;
2973 goto out;
2977 // The size for the image must always be specified, with one exception:
2978 // If we are using a backing file, we can obtain the size from there
2979 size = get_option_parameter(param, BLOCK_OPT_SIZE);
2980 if (size && size->value.n == -1) {
2981 if (backing_file && backing_file->value.s) {
2982 uint64_t size;
2983 char buf[32];
2985 bs = bdrv_new("");
2987 ret = bdrv_open(bs, backing_file->value.s, flags, backing_drv);
2988 if (ret < 0) {
2989 error_report("Could not open '%s'", backing_file->value.s);
2990 goto out;
2992 bdrv_get_geometry(bs, &size);
2993 size *= 512;
2995 snprintf(buf, sizeof(buf), "%" PRId64, size);
2996 set_option_parameter(param, BLOCK_OPT_SIZE, buf);
2997 } else {
2998 error_report("Image creation needs a size parameter");
2999 ret = -EINVAL;
3000 goto out;
3004 printf("Formatting '%s', fmt=%s ", filename, fmt);
3005 print_option_parameters(param);
3006 puts("");
3008 ret = bdrv_create(drv, filename, param);
3010 if (ret < 0) {
3011 if (ret == -ENOTSUP) {
3012 error_report("Formatting or formatting option not supported for "
3013 "file format '%s'", fmt);
3014 } else if (ret == -EFBIG) {
3015 error_report("The image size is too large for file format '%s'",
3016 fmt);
3017 } else {
3018 error_report("%s: error while creating %s: %s", filename, fmt,
3019 strerror(-ret));
3023 out:
3024 free_option_parameters(create_options);
3025 free_option_parameters(param);
3027 if (bs) {
3028 bdrv_delete(bs);
3031 return ret;