2 * Block driver for the QCOW version 2 format
4 * Copyright (c) 2004-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 #include "qemu/osdep.h"
25 #include "block/block_int.h"
26 #include "sysemu/block-backend.h"
27 #include "qemu/module.h"
29 #include "block/qcow2.h"
30 #include "qemu/error-report.h"
31 #include "qapi/qmp/qerror.h"
32 #include "qapi/qmp/qbool.h"
33 #include "qapi/util.h"
34 #include "qapi/qmp/types.h"
35 #include "qapi-event.h"
37 #include "qemu/option_int.h"
38 #include "qemu/cutils.h"
39 #include "qemu/bswap.h"
42 Differences with QCOW:
44 - Support for multiple incremental snapshots.
45 - Memory management by reference counts.
46 - Clusters which have a reference count of one have the bit
47 QCOW_OFLAG_COPIED to optimize write performance.
48 - Size of compressed clusters is stored in sectors to reduce bit usage
49 in the cluster offsets.
50 - Support for storing additional data (such as the VM state) in the
52 - If a backing store is used, the cluster size is not constrained
53 (could be backported to QCOW).
54 - L2 tables have always a size of one cluster.
61 } QEMU_PACKED QCowExtension
;
63 #define QCOW2_EXT_MAGIC_END 0
64 #define QCOW2_EXT_MAGIC_BACKING_FORMAT 0xE2792ACA
65 #define QCOW2_EXT_MAGIC_FEATURE_TABLE 0x6803f857
67 static int qcow2_probe(const uint8_t *buf
, int buf_size
, const char *filename
)
69 const QCowHeader
*cow_header
= (const void *)buf
;
71 if (buf_size
>= sizeof(QCowHeader
) &&
72 be32_to_cpu(cow_header
->magic
) == QCOW_MAGIC
&&
73 be32_to_cpu(cow_header
->version
) >= 2)
81 * read qcow2 extension and fill bs
82 * start reading from start_offset
83 * finish reading upon magic of value 0 or when end_offset reached
84 * unknown magic is skipped (future extension this version knows nothing about)
85 * return 0 upon success, non-0 otherwise
87 static int qcow2_read_extensions(BlockDriverState
*bs
, uint64_t start_offset
,
88 uint64_t end_offset
, void **p_feature_table
,
91 BDRVQcow2State
*s
= bs
->opaque
;
97 printf("qcow2_read_extensions: start=%ld end=%ld\n", start_offset
, end_offset
);
99 offset
= start_offset
;
100 while (offset
< end_offset
) {
104 if (offset
> s
->cluster_size
)
105 printf("qcow2_read_extension: suspicious offset %lu\n", offset
);
107 printf("attempting to read extended header in offset %lu\n", offset
);
110 ret
= bdrv_pread(bs
->file
, offset
, &ext
, sizeof(ext
));
112 error_setg_errno(errp
, -ret
, "qcow2_read_extension: ERROR: "
113 "pread fail from offset %" PRIu64
, offset
);
116 be32_to_cpus(&ext
.magic
);
117 be32_to_cpus(&ext
.len
);
118 offset
+= sizeof(ext
);
120 printf("ext.magic = 0x%x\n", ext
.magic
);
122 if (offset
> end_offset
|| ext
.len
> end_offset
- offset
) {
123 error_setg(errp
, "Header extension too large");
128 case QCOW2_EXT_MAGIC_END
:
131 case QCOW2_EXT_MAGIC_BACKING_FORMAT
:
132 if (ext
.len
>= sizeof(bs
->backing_format
)) {
133 error_setg(errp
, "ERROR: ext_backing_format: len=%" PRIu32
134 " too large (>=%zu)", ext
.len
,
135 sizeof(bs
->backing_format
));
138 ret
= bdrv_pread(bs
->file
, offset
, bs
->backing_format
, ext
.len
);
140 error_setg_errno(errp
, -ret
, "ERROR: ext_backing_format: "
141 "Could not read format name");
144 bs
->backing_format
[ext
.len
] = '\0';
145 s
->image_backing_format
= g_strdup(bs
->backing_format
);
147 printf("Qcow2: Got format extension %s\n", bs
->backing_format
);
151 case QCOW2_EXT_MAGIC_FEATURE_TABLE
:
152 if (p_feature_table
!= NULL
) {
153 void* feature_table
= g_malloc0(ext
.len
+ 2 * sizeof(Qcow2Feature
));
154 ret
= bdrv_pread(bs
->file
, offset
, feature_table
, ext
.len
);
156 error_setg_errno(errp
, -ret
, "ERROR: ext_feature_table: "
157 "Could not read table");
161 *p_feature_table
= feature_table
;
166 /* unknown magic - save it in case we need to rewrite the header */
168 Qcow2UnknownHeaderExtension
*uext
;
170 uext
= g_malloc0(sizeof(*uext
) + ext
.len
);
171 uext
->magic
= ext
.magic
;
173 QLIST_INSERT_HEAD(&s
->unknown_header_ext
, uext
, next
);
175 ret
= bdrv_pread(bs
->file
, offset
, uext
->data
, uext
->len
);
177 error_setg_errno(errp
, -ret
, "ERROR: unknown extension: "
178 "Could not read data");
185 offset
+= ((ext
.len
+ 7) & ~7);
191 static void cleanup_unknown_header_ext(BlockDriverState
*bs
)
193 BDRVQcow2State
*s
= bs
->opaque
;
194 Qcow2UnknownHeaderExtension
*uext
, *next
;
196 QLIST_FOREACH_SAFE(uext
, &s
->unknown_header_ext
, next
, next
) {
197 QLIST_REMOVE(uext
, next
);
202 static void report_unsupported_feature(Error
**errp
, Qcow2Feature
*table
,
205 char *features
= g_strdup("");
208 while (table
&& table
->name
[0] != '\0') {
209 if (table
->type
== QCOW2_FEAT_TYPE_INCOMPATIBLE
) {
210 if (mask
& (1ULL << table
->bit
)) {
212 features
= g_strdup_printf("%s%s%.46s", old
, *old
? ", " : "",
215 mask
&= ~(1ULL << table
->bit
);
223 features
= g_strdup_printf("%s%sUnknown incompatible feature: %" PRIx64
,
224 old
, *old
? ", " : "", mask
);
228 error_setg(errp
, "Unsupported qcow2 feature(s): %s", features
);
233 * Sets the dirty bit and flushes afterwards if necessary.
235 * The incompatible_features bit is only set if the image file header was
236 * updated successfully. Therefore it is not required to check the return
237 * value of this function.
239 int qcow2_mark_dirty(BlockDriverState
*bs
)
241 BDRVQcow2State
*s
= bs
->opaque
;
245 assert(s
->qcow_version
>= 3);
247 if (s
->incompatible_features
& QCOW2_INCOMPAT_DIRTY
) {
248 return 0; /* already dirty */
251 val
= cpu_to_be64(s
->incompatible_features
| QCOW2_INCOMPAT_DIRTY
);
252 ret
= bdrv_pwrite(bs
->file
, offsetof(QCowHeader
, incompatible_features
),
257 ret
= bdrv_flush(bs
->file
->bs
);
262 /* Only treat image as dirty if the header was updated successfully */
263 s
->incompatible_features
|= QCOW2_INCOMPAT_DIRTY
;
268 * Clears the dirty bit and flushes before if necessary. Only call this
269 * function when there are no pending requests, it does not guard against
270 * concurrent requests dirtying the image.
272 static int qcow2_mark_clean(BlockDriverState
*bs
)
274 BDRVQcow2State
*s
= bs
->opaque
;
276 if (s
->incompatible_features
& QCOW2_INCOMPAT_DIRTY
) {
279 s
->incompatible_features
&= ~QCOW2_INCOMPAT_DIRTY
;
281 ret
= bdrv_flush(bs
);
286 return qcow2_update_header(bs
);
292 * Marks the image as corrupt.
294 int qcow2_mark_corrupt(BlockDriverState
*bs
)
296 BDRVQcow2State
*s
= bs
->opaque
;
298 s
->incompatible_features
|= QCOW2_INCOMPAT_CORRUPT
;
299 return qcow2_update_header(bs
);
303 * Marks the image as consistent, i.e., unsets the corrupt bit, and flushes
304 * before if necessary.
306 int qcow2_mark_consistent(BlockDriverState
*bs
)
308 BDRVQcow2State
*s
= bs
->opaque
;
310 if (s
->incompatible_features
& QCOW2_INCOMPAT_CORRUPT
) {
311 int ret
= bdrv_flush(bs
);
316 s
->incompatible_features
&= ~QCOW2_INCOMPAT_CORRUPT
;
317 return qcow2_update_header(bs
);
322 static int qcow2_check(BlockDriverState
*bs
, BdrvCheckResult
*result
,
325 int ret
= qcow2_check_refcounts(bs
, result
, fix
);
330 if (fix
&& result
->check_errors
== 0 && result
->corruptions
== 0) {
331 ret
= qcow2_mark_clean(bs
);
335 return qcow2_mark_consistent(bs
);
340 static int validate_table_offset(BlockDriverState
*bs
, uint64_t offset
,
341 uint64_t entries
, size_t entry_len
)
343 BDRVQcow2State
*s
= bs
->opaque
;
346 /* Use signed INT64_MAX as the maximum even for uint64_t header fields,
347 * because values will be passed to qemu functions taking int64_t. */
348 if (entries
> INT64_MAX
/ entry_len
) {
352 size
= entries
* entry_len
;
354 if (INT64_MAX
- size
< offset
) {
358 /* Tables must be cluster aligned */
359 if (offset
& (s
->cluster_size
- 1)) {
366 static QemuOptsList qcow2_runtime_opts
= {
368 .head
= QTAILQ_HEAD_INITIALIZER(qcow2_runtime_opts
.head
),
371 .name
= QCOW2_OPT_LAZY_REFCOUNTS
,
372 .type
= QEMU_OPT_BOOL
,
373 .help
= "Postpone refcount updates",
376 .name
= QCOW2_OPT_DISCARD_REQUEST
,
377 .type
= QEMU_OPT_BOOL
,
378 .help
= "Pass guest discard requests to the layer below",
381 .name
= QCOW2_OPT_DISCARD_SNAPSHOT
,
382 .type
= QEMU_OPT_BOOL
,
383 .help
= "Generate discard requests when snapshot related space "
387 .name
= QCOW2_OPT_DISCARD_OTHER
,
388 .type
= QEMU_OPT_BOOL
,
389 .help
= "Generate discard requests when other clusters are freed",
392 .name
= QCOW2_OPT_OVERLAP
,
393 .type
= QEMU_OPT_STRING
,
394 .help
= "Selects which overlap checks to perform from a range of "
395 "templates (none, constant, cached, all)",
398 .name
= QCOW2_OPT_OVERLAP_TEMPLATE
,
399 .type
= QEMU_OPT_STRING
,
400 .help
= "Selects which overlap checks to perform from a range of "
401 "templates (none, constant, cached, all)",
404 .name
= QCOW2_OPT_OVERLAP_MAIN_HEADER
,
405 .type
= QEMU_OPT_BOOL
,
406 .help
= "Check for unintended writes into the main qcow2 header",
409 .name
= QCOW2_OPT_OVERLAP_ACTIVE_L1
,
410 .type
= QEMU_OPT_BOOL
,
411 .help
= "Check for unintended writes into the active L1 table",
414 .name
= QCOW2_OPT_OVERLAP_ACTIVE_L2
,
415 .type
= QEMU_OPT_BOOL
,
416 .help
= "Check for unintended writes into an active L2 table",
419 .name
= QCOW2_OPT_OVERLAP_REFCOUNT_TABLE
,
420 .type
= QEMU_OPT_BOOL
,
421 .help
= "Check for unintended writes into the refcount table",
424 .name
= QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK
,
425 .type
= QEMU_OPT_BOOL
,
426 .help
= "Check for unintended writes into a refcount block",
429 .name
= QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE
,
430 .type
= QEMU_OPT_BOOL
,
431 .help
= "Check for unintended writes into the snapshot table",
434 .name
= QCOW2_OPT_OVERLAP_INACTIVE_L1
,
435 .type
= QEMU_OPT_BOOL
,
436 .help
= "Check for unintended writes into an inactive L1 table",
439 .name
= QCOW2_OPT_OVERLAP_INACTIVE_L2
,
440 .type
= QEMU_OPT_BOOL
,
441 .help
= "Check for unintended writes into an inactive L2 table",
444 .name
= QCOW2_OPT_CACHE_SIZE
,
445 .type
= QEMU_OPT_SIZE
,
446 .help
= "Maximum combined metadata (L2 tables and refcount blocks) "
450 .name
= QCOW2_OPT_L2_CACHE_SIZE
,
451 .type
= QEMU_OPT_SIZE
,
452 .help
= "Maximum L2 table cache size",
455 .name
= QCOW2_OPT_REFCOUNT_CACHE_SIZE
,
456 .type
= QEMU_OPT_SIZE
,
457 .help
= "Maximum refcount block cache size",
460 .name
= QCOW2_OPT_CACHE_CLEAN_INTERVAL
,
461 .type
= QEMU_OPT_NUMBER
,
462 .help
= "Clean unused cache entries after this time (in seconds)",
464 { /* end of list */ }
468 static const char *overlap_bool_option_names
[QCOW2_OL_MAX_BITNR
] = {
469 [QCOW2_OL_MAIN_HEADER_BITNR
] = QCOW2_OPT_OVERLAP_MAIN_HEADER
,
470 [QCOW2_OL_ACTIVE_L1_BITNR
] = QCOW2_OPT_OVERLAP_ACTIVE_L1
,
471 [QCOW2_OL_ACTIVE_L2_BITNR
] = QCOW2_OPT_OVERLAP_ACTIVE_L2
,
472 [QCOW2_OL_REFCOUNT_TABLE_BITNR
] = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE
,
473 [QCOW2_OL_REFCOUNT_BLOCK_BITNR
] = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK
,
474 [QCOW2_OL_SNAPSHOT_TABLE_BITNR
] = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE
,
475 [QCOW2_OL_INACTIVE_L1_BITNR
] = QCOW2_OPT_OVERLAP_INACTIVE_L1
,
476 [QCOW2_OL_INACTIVE_L2_BITNR
] = QCOW2_OPT_OVERLAP_INACTIVE_L2
,
479 static void cache_clean_timer_cb(void *opaque
)
481 BlockDriverState
*bs
= opaque
;
482 BDRVQcow2State
*s
= bs
->opaque
;
483 qcow2_cache_clean_unused(bs
, s
->l2_table_cache
);
484 qcow2_cache_clean_unused(bs
, s
->refcount_block_cache
);
485 timer_mod(s
->cache_clean_timer
, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) +
486 (int64_t) s
->cache_clean_interval
* 1000);
489 static void cache_clean_timer_init(BlockDriverState
*bs
, AioContext
*context
)
491 BDRVQcow2State
*s
= bs
->opaque
;
492 if (s
->cache_clean_interval
> 0) {
493 s
->cache_clean_timer
= aio_timer_new(context
, QEMU_CLOCK_VIRTUAL
,
494 SCALE_MS
, cache_clean_timer_cb
,
496 timer_mod(s
->cache_clean_timer
, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) +
497 (int64_t) s
->cache_clean_interval
* 1000);
501 static void cache_clean_timer_del(BlockDriverState
*bs
)
503 BDRVQcow2State
*s
= bs
->opaque
;
504 if (s
->cache_clean_timer
) {
505 timer_del(s
->cache_clean_timer
);
506 timer_free(s
->cache_clean_timer
);
507 s
->cache_clean_timer
= NULL
;
511 static void qcow2_detach_aio_context(BlockDriverState
*bs
)
513 cache_clean_timer_del(bs
);
516 static void qcow2_attach_aio_context(BlockDriverState
*bs
,
517 AioContext
*new_context
)
519 cache_clean_timer_init(bs
, new_context
);
522 static void read_cache_sizes(BlockDriverState
*bs
, QemuOpts
*opts
,
523 uint64_t *l2_cache_size
,
524 uint64_t *refcount_cache_size
, Error
**errp
)
526 BDRVQcow2State
*s
= bs
->opaque
;
527 uint64_t combined_cache_size
;
528 bool l2_cache_size_set
, refcount_cache_size_set
, combined_cache_size_set
;
530 combined_cache_size_set
= qemu_opt_get(opts
, QCOW2_OPT_CACHE_SIZE
);
531 l2_cache_size_set
= qemu_opt_get(opts
, QCOW2_OPT_L2_CACHE_SIZE
);
532 refcount_cache_size_set
= qemu_opt_get(opts
, QCOW2_OPT_REFCOUNT_CACHE_SIZE
);
534 combined_cache_size
= qemu_opt_get_size(opts
, QCOW2_OPT_CACHE_SIZE
, 0);
535 *l2_cache_size
= qemu_opt_get_size(opts
, QCOW2_OPT_L2_CACHE_SIZE
, 0);
536 *refcount_cache_size
= qemu_opt_get_size(opts
,
537 QCOW2_OPT_REFCOUNT_CACHE_SIZE
, 0);
539 if (combined_cache_size_set
) {
540 if (l2_cache_size_set
&& refcount_cache_size_set
) {
541 error_setg(errp
, QCOW2_OPT_CACHE_SIZE
", " QCOW2_OPT_L2_CACHE_SIZE
542 " and " QCOW2_OPT_REFCOUNT_CACHE_SIZE
" may not be set "
545 } else if (*l2_cache_size
> combined_cache_size
) {
546 error_setg(errp
, QCOW2_OPT_L2_CACHE_SIZE
" may not exceed "
547 QCOW2_OPT_CACHE_SIZE
);
549 } else if (*refcount_cache_size
> combined_cache_size
) {
550 error_setg(errp
, QCOW2_OPT_REFCOUNT_CACHE_SIZE
" may not exceed "
551 QCOW2_OPT_CACHE_SIZE
);
555 if (l2_cache_size_set
) {
556 *refcount_cache_size
= combined_cache_size
- *l2_cache_size
;
557 } else if (refcount_cache_size_set
) {
558 *l2_cache_size
= combined_cache_size
- *refcount_cache_size
;
560 *refcount_cache_size
= combined_cache_size
561 / (DEFAULT_L2_REFCOUNT_SIZE_RATIO
+ 1);
562 *l2_cache_size
= combined_cache_size
- *refcount_cache_size
;
565 if (!l2_cache_size_set
&& !refcount_cache_size_set
) {
566 *l2_cache_size
= MAX(DEFAULT_L2_CACHE_BYTE_SIZE
,
567 (uint64_t)DEFAULT_L2_CACHE_CLUSTERS
569 *refcount_cache_size
= *l2_cache_size
570 / DEFAULT_L2_REFCOUNT_SIZE_RATIO
;
571 } else if (!l2_cache_size_set
) {
572 *l2_cache_size
= *refcount_cache_size
573 * DEFAULT_L2_REFCOUNT_SIZE_RATIO
;
574 } else if (!refcount_cache_size_set
) {
575 *refcount_cache_size
= *l2_cache_size
576 / DEFAULT_L2_REFCOUNT_SIZE_RATIO
;
581 typedef struct Qcow2ReopenState
{
582 Qcow2Cache
*l2_table_cache
;
583 Qcow2Cache
*refcount_block_cache
;
584 bool use_lazy_refcounts
;
586 bool discard_passthrough
[QCOW2_DISCARD_MAX
];
587 uint64_t cache_clean_interval
;
590 static int qcow2_update_options_prepare(BlockDriverState
*bs
,
592 QDict
*options
, int flags
,
595 BDRVQcow2State
*s
= bs
->opaque
;
596 QemuOpts
*opts
= NULL
;
597 const char *opt_overlap_check
, *opt_overlap_check_template
;
598 int overlap_check_template
= 0;
599 uint64_t l2_cache_size
, refcount_cache_size
;
601 Error
*local_err
= NULL
;
604 opts
= qemu_opts_create(&qcow2_runtime_opts
, NULL
, 0, &error_abort
);
605 qemu_opts_absorb_qdict(opts
, options
, &local_err
);
607 error_propagate(errp
, local_err
);
612 /* get L2 table/refcount block cache size from command line options */
613 read_cache_sizes(bs
, opts
, &l2_cache_size
, &refcount_cache_size
,
616 error_propagate(errp
, local_err
);
621 l2_cache_size
/= s
->cluster_size
;
622 if (l2_cache_size
< MIN_L2_CACHE_SIZE
) {
623 l2_cache_size
= MIN_L2_CACHE_SIZE
;
625 if (l2_cache_size
> INT_MAX
) {
626 error_setg(errp
, "L2 cache size too big");
631 refcount_cache_size
/= s
->cluster_size
;
632 if (refcount_cache_size
< MIN_REFCOUNT_CACHE_SIZE
) {
633 refcount_cache_size
= MIN_REFCOUNT_CACHE_SIZE
;
635 if (refcount_cache_size
> INT_MAX
) {
636 error_setg(errp
, "Refcount cache size too big");
641 /* alloc new L2 table/refcount block cache, flush old one */
642 if (s
->l2_table_cache
) {
643 ret
= qcow2_cache_flush(bs
, s
->l2_table_cache
);
645 error_setg_errno(errp
, -ret
, "Failed to flush the L2 table cache");
650 if (s
->refcount_block_cache
) {
651 ret
= qcow2_cache_flush(bs
, s
->refcount_block_cache
);
653 error_setg_errno(errp
, -ret
,
654 "Failed to flush the refcount block cache");
659 r
->l2_table_cache
= qcow2_cache_create(bs
, l2_cache_size
);
660 r
->refcount_block_cache
= qcow2_cache_create(bs
, refcount_cache_size
);
661 if (r
->l2_table_cache
== NULL
|| r
->refcount_block_cache
== NULL
) {
662 error_setg(errp
, "Could not allocate metadata caches");
667 /* New interval for cache cleanup timer */
668 r
->cache_clean_interval
=
669 qemu_opt_get_number(opts
, QCOW2_OPT_CACHE_CLEAN_INTERVAL
,
670 s
->cache_clean_interval
);
672 if (r
->cache_clean_interval
!= 0) {
673 error_setg(errp
, QCOW2_OPT_CACHE_CLEAN_INTERVAL
674 " not supported on this host");
679 if (r
->cache_clean_interval
> UINT_MAX
) {
680 error_setg(errp
, "Cache clean interval too big");
685 /* lazy-refcounts; flush if going from enabled to disabled */
686 r
->use_lazy_refcounts
= qemu_opt_get_bool(opts
, QCOW2_OPT_LAZY_REFCOUNTS
,
687 (s
->compatible_features
& QCOW2_COMPAT_LAZY_REFCOUNTS
));
688 if (r
->use_lazy_refcounts
&& s
->qcow_version
< 3) {
689 error_setg(errp
, "Lazy refcounts require a qcow2 image with at least "
690 "qemu 1.1 compatibility level");
695 if (s
->use_lazy_refcounts
&& !r
->use_lazy_refcounts
) {
696 ret
= qcow2_mark_clean(bs
);
698 error_setg_errno(errp
, -ret
, "Failed to disable lazy refcounts");
703 /* Overlap check options */
704 opt_overlap_check
= qemu_opt_get(opts
, QCOW2_OPT_OVERLAP
);
705 opt_overlap_check_template
= qemu_opt_get(opts
, QCOW2_OPT_OVERLAP_TEMPLATE
);
706 if (opt_overlap_check_template
&& opt_overlap_check
&&
707 strcmp(opt_overlap_check_template
, opt_overlap_check
))
709 error_setg(errp
, "Conflicting values for qcow2 options '"
710 QCOW2_OPT_OVERLAP
"' ('%s') and '" QCOW2_OPT_OVERLAP_TEMPLATE
711 "' ('%s')", opt_overlap_check
, opt_overlap_check_template
);
715 if (!opt_overlap_check
) {
716 opt_overlap_check
= opt_overlap_check_template
?: "cached";
719 if (!strcmp(opt_overlap_check
, "none")) {
720 overlap_check_template
= 0;
721 } else if (!strcmp(opt_overlap_check
, "constant")) {
722 overlap_check_template
= QCOW2_OL_CONSTANT
;
723 } else if (!strcmp(opt_overlap_check
, "cached")) {
724 overlap_check_template
= QCOW2_OL_CACHED
;
725 } else if (!strcmp(opt_overlap_check
, "all")) {
726 overlap_check_template
= QCOW2_OL_ALL
;
728 error_setg(errp
, "Unsupported value '%s' for qcow2 option "
729 "'overlap-check'. Allowed are any of the following: "
730 "none, constant, cached, all", opt_overlap_check
);
735 r
->overlap_check
= 0;
736 for (i
= 0; i
< QCOW2_OL_MAX_BITNR
; i
++) {
737 /* overlap-check defines a template bitmask, but every flag may be
738 * overwritten through the associated boolean option */
740 qemu_opt_get_bool(opts
, overlap_bool_option_names
[i
],
741 overlap_check_template
& (1 << i
)) << i
;
744 r
->discard_passthrough
[QCOW2_DISCARD_NEVER
] = false;
745 r
->discard_passthrough
[QCOW2_DISCARD_ALWAYS
] = true;
746 r
->discard_passthrough
[QCOW2_DISCARD_REQUEST
] =
747 qemu_opt_get_bool(opts
, QCOW2_OPT_DISCARD_REQUEST
,
748 flags
& BDRV_O_UNMAP
);
749 r
->discard_passthrough
[QCOW2_DISCARD_SNAPSHOT
] =
750 qemu_opt_get_bool(opts
, QCOW2_OPT_DISCARD_SNAPSHOT
, true);
751 r
->discard_passthrough
[QCOW2_DISCARD_OTHER
] =
752 qemu_opt_get_bool(opts
, QCOW2_OPT_DISCARD_OTHER
, false);
761 static void qcow2_update_options_commit(BlockDriverState
*bs
,
764 BDRVQcow2State
*s
= bs
->opaque
;
767 if (s
->l2_table_cache
) {
768 qcow2_cache_destroy(bs
, s
->l2_table_cache
);
770 if (s
->refcount_block_cache
) {
771 qcow2_cache_destroy(bs
, s
->refcount_block_cache
);
773 s
->l2_table_cache
= r
->l2_table_cache
;
774 s
->refcount_block_cache
= r
->refcount_block_cache
;
776 s
->overlap_check
= r
->overlap_check
;
777 s
->use_lazy_refcounts
= r
->use_lazy_refcounts
;
779 for (i
= 0; i
< QCOW2_DISCARD_MAX
; i
++) {
780 s
->discard_passthrough
[i
] = r
->discard_passthrough
[i
];
783 if (s
->cache_clean_interval
!= r
->cache_clean_interval
) {
784 cache_clean_timer_del(bs
);
785 s
->cache_clean_interval
= r
->cache_clean_interval
;
786 cache_clean_timer_init(bs
, bdrv_get_aio_context(bs
));
790 static void qcow2_update_options_abort(BlockDriverState
*bs
,
793 if (r
->l2_table_cache
) {
794 qcow2_cache_destroy(bs
, r
->l2_table_cache
);
796 if (r
->refcount_block_cache
) {
797 qcow2_cache_destroy(bs
, r
->refcount_block_cache
);
801 static int qcow2_update_options(BlockDriverState
*bs
, QDict
*options
,
802 int flags
, Error
**errp
)
804 Qcow2ReopenState r
= {};
807 ret
= qcow2_update_options_prepare(bs
, &r
, options
, flags
, errp
);
809 qcow2_update_options_commit(bs
, &r
);
811 qcow2_update_options_abort(bs
, &r
);
817 static int qcow2_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
820 BDRVQcow2State
*s
= bs
->opaque
;
824 Error
*local_err
= NULL
;
826 uint64_t l1_vm_state_index
;
828 ret
= bdrv_pread(bs
->file
, 0, &header
, sizeof(header
));
830 error_setg_errno(errp
, -ret
, "Could not read qcow2 header");
833 be32_to_cpus(&header
.magic
);
834 be32_to_cpus(&header
.version
);
835 be64_to_cpus(&header
.backing_file_offset
);
836 be32_to_cpus(&header
.backing_file_size
);
837 be64_to_cpus(&header
.size
);
838 be32_to_cpus(&header
.cluster_bits
);
839 be32_to_cpus(&header
.crypt_method
);
840 be64_to_cpus(&header
.l1_table_offset
);
841 be32_to_cpus(&header
.l1_size
);
842 be64_to_cpus(&header
.refcount_table_offset
);
843 be32_to_cpus(&header
.refcount_table_clusters
);
844 be64_to_cpus(&header
.snapshots_offset
);
845 be32_to_cpus(&header
.nb_snapshots
);
847 if (header
.magic
!= QCOW_MAGIC
) {
848 error_setg(errp
, "Image is not in qcow2 format");
852 if (header
.version
< 2 || header
.version
> 3) {
853 error_setg(errp
, "Unsupported qcow2 version %" PRIu32
, header
.version
);
858 s
->qcow_version
= header
.version
;
860 /* Initialise cluster size */
861 if (header
.cluster_bits
< MIN_CLUSTER_BITS
||
862 header
.cluster_bits
> MAX_CLUSTER_BITS
) {
863 error_setg(errp
, "Unsupported cluster size: 2^%" PRIu32
,
864 header
.cluster_bits
);
869 s
->cluster_bits
= header
.cluster_bits
;
870 s
->cluster_size
= 1 << s
->cluster_bits
;
871 s
->cluster_sectors
= 1 << (s
->cluster_bits
- 9);
873 /* Initialise version 3 header fields */
874 if (header
.version
== 2) {
875 header
.incompatible_features
= 0;
876 header
.compatible_features
= 0;
877 header
.autoclear_features
= 0;
878 header
.refcount_order
= 4;
879 header
.header_length
= 72;
881 be64_to_cpus(&header
.incompatible_features
);
882 be64_to_cpus(&header
.compatible_features
);
883 be64_to_cpus(&header
.autoclear_features
);
884 be32_to_cpus(&header
.refcount_order
);
885 be32_to_cpus(&header
.header_length
);
887 if (header
.header_length
< 104) {
888 error_setg(errp
, "qcow2 header too short");
894 if (header
.header_length
> s
->cluster_size
) {
895 error_setg(errp
, "qcow2 header exceeds cluster size");
900 if (header
.header_length
> sizeof(header
)) {
901 s
->unknown_header_fields_size
= header
.header_length
- sizeof(header
);
902 s
->unknown_header_fields
= g_malloc(s
->unknown_header_fields_size
);
903 ret
= bdrv_pread(bs
->file
, sizeof(header
), s
->unknown_header_fields
,
904 s
->unknown_header_fields_size
);
906 error_setg_errno(errp
, -ret
, "Could not read unknown qcow2 header "
912 if (header
.backing_file_offset
> s
->cluster_size
) {
913 error_setg(errp
, "Invalid backing file offset");
918 if (header
.backing_file_offset
) {
919 ext_end
= header
.backing_file_offset
;
921 ext_end
= 1 << header
.cluster_bits
;
924 /* Handle feature bits */
925 s
->incompatible_features
= header
.incompatible_features
;
926 s
->compatible_features
= header
.compatible_features
;
927 s
->autoclear_features
= header
.autoclear_features
;
929 if (s
->incompatible_features
& ~QCOW2_INCOMPAT_MASK
) {
930 void *feature_table
= NULL
;
931 qcow2_read_extensions(bs
, header
.header_length
, ext_end
,
932 &feature_table
, NULL
);
933 report_unsupported_feature(errp
, feature_table
,
934 s
->incompatible_features
&
935 ~QCOW2_INCOMPAT_MASK
);
937 g_free(feature_table
);
941 if (s
->incompatible_features
& QCOW2_INCOMPAT_CORRUPT
) {
942 /* Corrupt images may not be written to unless they are being repaired
944 if ((flags
& BDRV_O_RDWR
) && !(flags
& BDRV_O_CHECK
)) {
945 error_setg(errp
, "qcow2: Image is corrupt; cannot be opened "
952 /* Check support for various header values */
953 if (header
.refcount_order
> 6) {
954 error_setg(errp
, "Reference count entry width too large; may not "
959 s
->refcount_order
= header
.refcount_order
;
960 s
->refcount_bits
= 1 << s
->refcount_order
;
961 s
->refcount_max
= UINT64_C(1) << (s
->refcount_bits
- 1);
962 s
->refcount_max
+= s
->refcount_max
- 1;
964 if (header
.crypt_method
> QCOW_CRYPT_AES
) {
965 error_setg(errp
, "Unsupported encryption method: %" PRIu32
,
966 header
.crypt_method
);
970 if (!qcrypto_cipher_supports(QCRYPTO_CIPHER_ALG_AES_128
,
971 QCRYPTO_CIPHER_MODE_CBC
)) {
972 error_setg(errp
, "AES cipher not available");
976 s
->crypt_method_header
= header
.crypt_method
;
977 if (s
->crypt_method_header
) {
978 if (bdrv_uses_whitelist() &&
979 s
->crypt_method_header
== QCOW_CRYPT_AES
) {
981 "Use of AES-CBC encrypted qcow2 images is no longer "
982 "supported in system emulators");
983 error_append_hint(errp
,
984 "You can use 'qemu-img convert' to convert your "
985 "image to an alternative supported format, such "
986 "as unencrypted qcow2, or raw with the LUKS "
987 "format instead.\n");
992 bs
->encrypted
= true;
995 s
->l2_bits
= s
->cluster_bits
- 3; /* L2 is always one cluster */
996 s
->l2_size
= 1 << s
->l2_bits
;
997 /* 2^(s->refcount_order - 3) is the refcount width in bytes */
998 s
->refcount_block_bits
= s
->cluster_bits
- (s
->refcount_order
- 3);
999 s
->refcount_block_size
= 1 << s
->refcount_block_bits
;
1000 bs
->total_sectors
= header
.size
/ 512;
1001 s
->csize_shift
= (62 - (s
->cluster_bits
- 8));
1002 s
->csize_mask
= (1 << (s
->cluster_bits
- 8)) - 1;
1003 s
->cluster_offset_mask
= (1LL << s
->csize_shift
) - 1;
1005 s
->refcount_table_offset
= header
.refcount_table_offset
;
1006 s
->refcount_table_size
=
1007 header
.refcount_table_clusters
<< (s
->cluster_bits
- 3);
1009 if (header
.refcount_table_clusters
> qcow2_max_refcount_clusters(s
)) {
1010 error_setg(errp
, "Reference count table too large");
1015 ret
= validate_table_offset(bs
, s
->refcount_table_offset
,
1016 s
->refcount_table_size
, sizeof(uint64_t));
1018 error_setg(errp
, "Invalid reference count table offset");
1022 /* Snapshot table offset/length */
1023 if (header
.nb_snapshots
> QCOW_MAX_SNAPSHOTS
) {
1024 error_setg(errp
, "Too many snapshots");
1029 ret
= validate_table_offset(bs
, header
.snapshots_offset
,
1030 header
.nb_snapshots
,
1031 sizeof(QCowSnapshotHeader
));
1033 error_setg(errp
, "Invalid snapshot table offset");
1037 /* read the level 1 table */
1038 if (header
.l1_size
> QCOW_MAX_L1_SIZE
/ sizeof(uint64_t)) {
1039 error_setg(errp
, "Active L1 table too large");
1043 s
->l1_size
= header
.l1_size
;
1045 l1_vm_state_index
= size_to_l1(s
, header
.size
);
1046 if (l1_vm_state_index
> INT_MAX
) {
1047 error_setg(errp
, "Image is too big");
1051 s
->l1_vm_state_index
= l1_vm_state_index
;
1053 /* the L1 table must contain at least enough entries to put
1054 header.size bytes */
1055 if (s
->l1_size
< s
->l1_vm_state_index
) {
1056 error_setg(errp
, "L1 table is too small");
1061 ret
= validate_table_offset(bs
, header
.l1_table_offset
,
1062 header
.l1_size
, sizeof(uint64_t));
1064 error_setg(errp
, "Invalid L1 table offset");
1067 s
->l1_table_offset
= header
.l1_table_offset
;
1070 if (s
->l1_size
> 0) {
1071 s
->l1_table
= qemu_try_blockalign(bs
->file
->bs
,
1072 align_offset(s
->l1_size
* sizeof(uint64_t), 512));
1073 if (s
->l1_table
== NULL
) {
1074 error_setg(errp
, "Could not allocate L1 table");
1078 ret
= bdrv_pread(bs
->file
, s
->l1_table_offset
, s
->l1_table
,
1079 s
->l1_size
* sizeof(uint64_t));
1081 error_setg_errno(errp
, -ret
, "Could not read L1 table");
1084 for(i
= 0;i
< s
->l1_size
; i
++) {
1085 be64_to_cpus(&s
->l1_table
[i
]);
1089 /* Parse driver-specific options */
1090 ret
= qcow2_update_options(bs
, options
, flags
, errp
);
1095 s
->cluster_cache
= g_malloc(s
->cluster_size
);
1096 /* one more sector for decompressed data alignment */
1097 s
->cluster_data
= qemu_try_blockalign(bs
->file
->bs
, QCOW_MAX_CRYPT_CLUSTERS
1098 * s
->cluster_size
+ 512);
1099 if (s
->cluster_data
== NULL
) {
1100 error_setg(errp
, "Could not allocate temporary cluster buffer");
1105 s
->cluster_cache_offset
= -1;
1108 ret
= qcow2_refcount_init(bs
);
1110 error_setg_errno(errp
, -ret
, "Could not initialize refcount handling");
1114 QLIST_INIT(&s
->cluster_allocs
);
1115 QTAILQ_INIT(&s
->discards
);
1117 /* read qcow2 extensions */
1118 if (qcow2_read_extensions(bs
, header
.header_length
, ext_end
, NULL
,
1120 error_propagate(errp
, local_err
);
1125 /* read the backing file name */
1126 if (header
.backing_file_offset
!= 0) {
1127 len
= header
.backing_file_size
;
1128 if (len
> MIN(1023, s
->cluster_size
- header
.backing_file_offset
) ||
1129 len
>= sizeof(bs
->backing_file
)) {
1130 error_setg(errp
, "Backing file name too long");
1134 ret
= bdrv_pread(bs
->file
, header
.backing_file_offset
,
1135 bs
->backing_file
, len
);
1137 error_setg_errno(errp
, -ret
, "Could not read backing file name");
1140 bs
->backing_file
[len
] = '\0';
1141 s
->image_backing_file
= g_strdup(bs
->backing_file
);
1144 /* Internal snapshots */
1145 s
->snapshots_offset
= header
.snapshots_offset
;
1146 s
->nb_snapshots
= header
.nb_snapshots
;
1148 ret
= qcow2_read_snapshots(bs
);
1150 error_setg_errno(errp
, -ret
, "Could not read snapshots");
1154 /* Clear unknown autoclear feature bits */
1155 if (!bs
->read_only
&& !(flags
& BDRV_O_INACTIVE
) && s
->autoclear_features
) {
1156 s
->autoclear_features
= 0;
1157 ret
= qcow2_update_header(bs
);
1159 error_setg_errno(errp
, -ret
, "Could not update qcow2 header");
1164 /* Initialise locks */
1165 qemu_co_mutex_init(&s
->lock
);
1166 bs
->supported_zero_flags
= BDRV_REQ_MAY_UNMAP
;
1168 /* Repair image if dirty */
1169 if (!(flags
& (BDRV_O_CHECK
| BDRV_O_INACTIVE
)) && !bs
->read_only
&&
1170 (s
->incompatible_features
& QCOW2_INCOMPAT_DIRTY
)) {
1171 BdrvCheckResult result
= {0};
1173 ret
= qcow2_check(bs
, &result
, BDRV_FIX_ERRORS
| BDRV_FIX_LEAKS
);
1175 error_setg_errno(errp
, -ret
, "Could not repair dirty image");
1182 BdrvCheckResult result
= {0};
1183 qcow2_check_refcounts(bs
, &result
, 0);
1189 g_free(s
->unknown_header_fields
);
1190 cleanup_unknown_header_ext(bs
);
1191 qcow2_free_snapshots(bs
);
1192 qcow2_refcount_close(bs
);
1193 qemu_vfree(s
->l1_table
);
1194 /* else pre-write overlap checks in cache_destroy may crash */
1196 cache_clean_timer_del(bs
);
1197 if (s
->l2_table_cache
) {
1198 qcow2_cache_destroy(bs
, s
->l2_table_cache
);
1200 if (s
->refcount_block_cache
) {
1201 qcow2_cache_destroy(bs
, s
->refcount_block_cache
);
1203 g_free(s
->cluster_cache
);
1204 qemu_vfree(s
->cluster_data
);
1208 static void qcow2_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
1210 BDRVQcow2State
*s
= bs
->opaque
;
1212 if (bs
->encrypted
) {
1213 /* Encryption works on a sector granularity */
1214 bs
->bl
.request_alignment
= BDRV_SECTOR_SIZE
;
1216 bs
->bl
.pwrite_zeroes_alignment
= s
->cluster_size
;
1217 bs
->bl
.pdiscard_alignment
= s
->cluster_size
;
1220 static int qcow2_set_key(BlockDriverState
*bs
, const char *key
)
1222 BDRVQcow2State
*s
= bs
->opaque
;
1227 memset(keybuf
, 0, 16);
1231 /* XXX: we could compress the chars to 7 bits to increase
1233 for(i
= 0;i
< len
;i
++) {
1236 assert(bs
->encrypted
);
1238 qcrypto_cipher_free(s
->cipher
);
1239 s
->cipher
= qcrypto_cipher_new(
1240 QCRYPTO_CIPHER_ALG_AES_128
,
1241 QCRYPTO_CIPHER_MODE_CBC
,
1242 keybuf
, G_N_ELEMENTS(keybuf
),
1246 /* XXX would be nice if errors in this method could
1247 * be properly propagate to the caller. Would need
1248 * the bdrv_set_key() API signature to be fixed. */
1255 static int qcow2_reopen_prepare(BDRVReopenState
*state
,
1256 BlockReopenQueue
*queue
, Error
**errp
)
1258 Qcow2ReopenState
*r
;
1261 r
= g_new0(Qcow2ReopenState
, 1);
1264 ret
= qcow2_update_options_prepare(state
->bs
, r
, state
->options
,
1265 state
->flags
, errp
);
1270 /* We need to write out any unwritten data if we reopen read-only. */
1271 if ((state
->flags
& BDRV_O_RDWR
) == 0) {
1272 ret
= bdrv_flush(state
->bs
);
1277 ret
= qcow2_mark_clean(state
->bs
);
1286 qcow2_update_options_abort(state
->bs
, r
);
1291 static void qcow2_reopen_commit(BDRVReopenState
*state
)
1293 qcow2_update_options_commit(state
->bs
, state
->opaque
);
1294 g_free(state
->opaque
);
1297 static void qcow2_reopen_abort(BDRVReopenState
*state
)
1299 qcow2_update_options_abort(state
->bs
, state
->opaque
);
1300 g_free(state
->opaque
);
1303 static void qcow2_join_options(QDict
*options
, QDict
*old_options
)
1305 bool has_new_overlap_template
=
1306 qdict_haskey(options
, QCOW2_OPT_OVERLAP
) ||
1307 qdict_haskey(options
, QCOW2_OPT_OVERLAP_TEMPLATE
);
1308 bool has_new_total_cache_size
=
1309 qdict_haskey(options
, QCOW2_OPT_CACHE_SIZE
);
1310 bool has_all_cache_options
;
1312 /* New overlap template overrides all old overlap options */
1313 if (has_new_overlap_template
) {
1314 qdict_del(old_options
, QCOW2_OPT_OVERLAP
);
1315 qdict_del(old_options
, QCOW2_OPT_OVERLAP_TEMPLATE
);
1316 qdict_del(old_options
, QCOW2_OPT_OVERLAP_MAIN_HEADER
);
1317 qdict_del(old_options
, QCOW2_OPT_OVERLAP_ACTIVE_L1
);
1318 qdict_del(old_options
, QCOW2_OPT_OVERLAP_ACTIVE_L2
);
1319 qdict_del(old_options
, QCOW2_OPT_OVERLAP_REFCOUNT_TABLE
);
1320 qdict_del(old_options
, QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK
);
1321 qdict_del(old_options
, QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE
);
1322 qdict_del(old_options
, QCOW2_OPT_OVERLAP_INACTIVE_L1
);
1323 qdict_del(old_options
, QCOW2_OPT_OVERLAP_INACTIVE_L2
);
1326 /* New total cache size overrides all old options */
1327 if (qdict_haskey(options
, QCOW2_OPT_CACHE_SIZE
)) {
1328 qdict_del(old_options
, QCOW2_OPT_L2_CACHE_SIZE
);
1329 qdict_del(old_options
, QCOW2_OPT_REFCOUNT_CACHE_SIZE
);
1332 qdict_join(options
, old_options
, false);
1335 * If after merging all cache size options are set, an old total size is
1336 * overwritten. Do keep all options, however, if all three are new. The
1337 * resulting error message is what we want to happen.
1339 has_all_cache_options
=
1340 qdict_haskey(options
, QCOW2_OPT_CACHE_SIZE
) ||
1341 qdict_haskey(options
, QCOW2_OPT_L2_CACHE_SIZE
) ||
1342 qdict_haskey(options
, QCOW2_OPT_REFCOUNT_CACHE_SIZE
);
1344 if (has_all_cache_options
&& !has_new_total_cache_size
) {
1345 qdict_del(options
, QCOW2_OPT_CACHE_SIZE
);
1349 static int64_t coroutine_fn
qcow2_co_get_block_status(BlockDriverState
*bs
,
1350 int64_t sector_num
, int nb_sectors
, int *pnum
, BlockDriverState
**file
)
1352 BDRVQcow2State
*s
= bs
->opaque
;
1353 uint64_t cluster_offset
;
1354 int index_in_cluster
, ret
;
1358 bytes
= MIN(INT_MAX
, nb_sectors
* BDRV_SECTOR_SIZE
);
1359 qemu_co_mutex_lock(&s
->lock
);
1360 ret
= qcow2_get_cluster_offset(bs
, sector_num
<< 9, &bytes
,
1362 qemu_co_mutex_unlock(&s
->lock
);
1367 *pnum
= bytes
>> BDRV_SECTOR_BITS
;
1369 if (cluster_offset
!= 0 && ret
!= QCOW2_CLUSTER_COMPRESSED
&&
1371 index_in_cluster
= sector_num
& (s
->cluster_sectors
- 1);
1372 cluster_offset
|= (index_in_cluster
<< BDRV_SECTOR_BITS
);
1373 *file
= bs
->file
->bs
;
1374 status
|= BDRV_BLOCK_OFFSET_VALID
| cluster_offset
;
1376 if (ret
== QCOW2_CLUSTER_ZERO
) {
1377 status
|= BDRV_BLOCK_ZERO
;
1378 } else if (ret
!= QCOW2_CLUSTER_UNALLOCATED
) {
1379 status
|= BDRV_BLOCK_DATA
;
1384 /* handle reading after the end of the backing file */
1385 int qcow2_backing_read1(BlockDriverState
*bs
, QEMUIOVector
*qiov
,
1386 int64_t offset
, int bytes
)
1388 uint64_t bs_size
= bs
->total_sectors
* BDRV_SECTOR_SIZE
;
1391 if ((offset
+ bytes
) <= bs_size
) {
1395 if (offset
>= bs_size
) {
1398 n1
= bs_size
- offset
;
1401 qemu_iovec_memset(qiov
, n1
, 0, bytes
- n1
);
1406 static coroutine_fn
int qcow2_co_preadv(BlockDriverState
*bs
, uint64_t offset
,
1407 uint64_t bytes
, QEMUIOVector
*qiov
,
1410 BDRVQcow2State
*s
= bs
->opaque
;
1411 int offset_in_cluster
, n1
;
1413 unsigned int cur_bytes
; /* number of bytes in current iteration */
1414 uint64_t cluster_offset
= 0;
1415 uint64_t bytes_done
= 0;
1416 QEMUIOVector hd_qiov
;
1417 uint8_t *cluster_data
= NULL
;
1419 qemu_iovec_init(&hd_qiov
, qiov
->niov
);
1421 qemu_co_mutex_lock(&s
->lock
);
1423 while (bytes
!= 0) {
1425 /* prepare next request */
1426 cur_bytes
= MIN(bytes
, INT_MAX
);
1428 cur_bytes
= MIN(cur_bytes
,
1429 QCOW_MAX_CRYPT_CLUSTERS
* s
->cluster_size
);
1432 ret
= qcow2_get_cluster_offset(bs
, offset
, &cur_bytes
, &cluster_offset
);
1437 offset_in_cluster
= offset_into_cluster(s
, offset
);
1439 qemu_iovec_reset(&hd_qiov
);
1440 qemu_iovec_concat(&hd_qiov
, qiov
, bytes_done
, cur_bytes
);
1443 case QCOW2_CLUSTER_UNALLOCATED
:
1446 /* read from the base image */
1447 n1
= qcow2_backing_read1(bs
->backing
->bs
, &hd_qiov
,
1450 QEMUIOVector local_qiov
;
1452 qemu_iovec_init(&local_qiov
, hd_qiov
.niov
);
1453 qemu_iovec_concat(&local_qiov
, &hd_qiov
, 0, n1
);
1455 BLKDBG_EVENT(bs
->file
, BLKDBG_READ_BACKING_AIO
);
1456 qemu_co_mutex_unlock(&s
->lock
);
1457 ret
= bdrv_co_preadv(bs
->backing
, offset
, n1
,
1459 qemu_co_mutex_lock(&s
->lock
);
1461 qemu_iovec_destroy(&local_qiov
);
1468 /* Note: in this case, no need to wait */
1469 qemu_iovec_memset(&hd_qiov
, 0, 0, cur_bytes
);
1473 case QCOW2_CLUSTER_ZERO
:
1474 qemu_iovec_memset(&hd_qiov
, 0, 0, cur_bytes
);
1477 case QCOW2_CLUSTER_COMPRESSED
:
1478 /* add AIO support for compressed blocks ? */
1479 ret
= qcow2_decompress_cluster(bs
, cluster_offset
);
1484 qemu_iovec_from_buf(&hd_qiov
, 0,
1485 s
->cluster_cache
+ offset_in_cluster
,
1489 case QCOW2_CLUSTER_NORMAL
:
1490 if ((cluster_offset
& 511) != 0) {
1495 if (bs
->encrypted
) {
1499 * For encrypted images, read everything into a temporary
1500 * contiguous buffer on which the AES functions can work.
1502 if (!cluster_data
) {
1504 qemu_try_blockalign(bs
->file
->bs
,
1505 QCOW_MAX_CRYPT_CLUSTERS
1507 if (cluster_data
== NULL
) {
1513 assert(cur_bytes
<= QCOW_MAX_CRYPT_CLUSTERS
* s
->cluster_size
);
1514 qemu_iovec_reset(&hd_qiov
);
1515 qemu_iovec_add(&hd_qiov
, cluster_data
, cur_bytes
);
1518 BLKDBG_EVENT(bs
->file
, BLKDBG_READ_AIO
);
1519 qemu_co_mutex_unlock(&s
->lock
);
1520 ret
= bdrv_co_preadv(bs
->file
,
1521 cluster_offset
+ offset_in_cluster
,
1522 cur_bytes
, &hd_qiov
, 0);
1523 qemu_co_mutex_lock(&s
->lock
);
1527 if (bs
->encrypted
) {
1529 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
1530 assert((cur_bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
1532 if (qcow2_encrypt_sectors(s
, offset
>> BDRV_SECTOR_BITS
,
1533 cluster_data
, cluster_data
,
1534 cur_bytes
>> BDRV_SECTOR_BITS
,
1540 qemu_iovec_from_buf(qiov
, bytes_done
, cluster_data
, cur_bytes
);
1545 g_assert_not_reached();
1551 offset
+= cur_bytes
;
1552 bytes_done
+= cur_bytes
;
1557 qemu_co_mutex_unlock(&s
->lock
);
1559 qemu_iovec_destroy(&hd_qiov
);
1560 qemu_vfree(cluster_data
);
1565 static coroutine_fn
int qcow2_co_pwritev(BlockDriverState
*bs
, uint64_t offset
,
1566 uint64_t bytes
, QEMUIOVector
*qiov
,
1569 BDRVQcow2State
*s
= bs
->opaque
;
1570 int offset_in_cluster
;
1572 unsigned int cur_bytes
; /* number of sectors in current iteration */
1573 uint64_t cluster_offset
;
1574 QEMUIOVector hd_qiov
;
1575 uint64_t bytes_done
= 0;
1576 uint8_t *cluster_data
= NULL
;
1577 QCowL2Meta
*l2meta
= NULL
;
1579 trace_qcow2_writev_start_req(qemu_coroutine_self(), offset
, bytes
);
1581 qemu_iovec_init(&hd_qiov
, qiov
->niov
);
1583 s
->cluster_cache_offset
= -1; /* disable compressed cache */
1585 qemu_co_mutex_lock(&s
->lock
);
1587 while (bytes
!= 0) {
1591 trace_qcow2_writev_start_part(qemu_coroutine_self());
1592 offset_in_cluster
= offset_into_cluster(s
, offset
);
1593 cur_bytes
= MIN(bytes
, INT_MAX
);
1594 if (bs
->encrypted
) {
1595 cur_bytes
= MIN(cur_bytes
,
1596 QCOW_MAX_CRYPT_CLUSTERS
* s
->cluster_size
1597 - offset_in_cluster
);
1600 ret
= qcow2_alloc_cluster_offset(bs
, offset
, &cur_bytes
,
1601 &cluster_offset
, &l2meta
);
1606 assert((cluster_offset
& 511) == 0);
1608 qemu_iovec_reset(&hd_qiov
);
1609 qemu_iovec_concat(&hd_qiov
, qiov
, bytes_done
, cur_bytes
);
1611 if (bs
->encrypted
) {
1614 if (!cluster_data
) {
1615 cluster_data
= qemu_try_blockalign(bs
->file
->bs
,
1616 QCOW_MAX_CRYPT_CLUSTERS
1618 if (cluster_data
== NULL
) {
1624 assert(hd_qiov
.size
<=
1625 QCOW_MAX_CRYPT_CLUSTERS
* s
->cluster_size
);
1626 qemu_iovec_to_buf(&hd_qiov
, 0, cluster_data
, hd_qiov
.size
);
1628 if (qcow2_encrypt_sectors(s
, offset
>> BDRV_SECTOR_BITS
,
1629 cluster_data
, cluster_data
,
1630 cur_bytes
>>BDRV_SECTOR_BITS
,
1637 qemu_iovec_reset(&hd_qiov
);
1638 qemu_iovec_add(&hd_qiov
, cluster_data
, cur_bytes
);
1641 ret
= qcow2_pre_write_overlap_check(bs
, 0,
1642 cluster_offset
+ offset_in_cluster
, cur_bytes
);
1647 qemu_co_mutex_unlock(&s
->lock
);
1648 BLKDBG_EVENT(bs
->file
, BLKDBG_WRITE_AIO
);
1649 trace_qcow2_writev_data(qemu_coroutine_self(),
1650 cluster_offset
+ offset_in_cluster
);
1651 ret
= bdrv_co_pwritev(bs
->file
,
1652 cluster_offset
+ offset_in_cluster
,
1653 cur_bytes
, &hd_qiov
, 0);
1654 qemu_co_mutex_lock(&s
->lock
);
1659 while (l2meta
!= NULL
) {
1662 ret
= qcow2_alloc_cluster_link_l2(bs
, l2meta
);
1667 /* Take the request off the list of running requests */
1668 if (l2meta
->nb_clusters
!= 0) {
1669 QLIST_REMOVE(l2meta
, next_in_flight
);
1672 qemu_co_queue_restart_all(&l2meta
->dependent_requests
);
1674 next
= l2meta
->next
;
1680 offset
+= cur_bytes
;
1681 bytes_done
+= cur_bytes
;
1682 trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_bytes
);
1687 qemu_co_mutex_unlock(&s
->lock
);
1689 while (l2meta
!= NULL
) {
1692 if (l2meta
->nb_clusters
!= 0) {
1693 QLIST_REMOVE(l2meta
, next_in_flight
);
1695 qemu_co_queue_restart_all(&l2meta
->dependent_requests
);
1697 next
= l2meta
->next
;
1702 qemu_iovec_destroy(&hd_qiov
);
1703 qemu_vfree(cluster_data
);
1704 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret
);
1709 static int qcow2_inactivate(BlockDriverState
*bs
)
1711 BDRVQcow2State
*s
= bs
->opaque
;
1712 int ret
, result
= 0;
1714 ret
= qcow2_cache_flush(bs
, s
->l2_table_cache
);
1717 error_report("Failed to flush the L2 table cache: %s",
1721 ret
= qcow2_cache_flush(bs
, s
->refcount_block_cache
);
1724 error_report("Failed to flush the refcount block cache: %s",
1729 qcow2_mark_clean(bs
);
1735 static void qcow2_close(BlockDriverState
*bs
)
1737 BDRVQcow2State
*s
= bs
->opaque
;
1738 qemu_vfree(s
->l1_table
);
1739 /* else pre-write overlap checks in cache_destroy may crash */
1742 if (!(s
->flags
& BDRV_O_INACTIVE
)) {
1743 qcow2_inactivate(bs
);
1746 cache_clean_timer_del(bs
);
1747 qcow2_cache_destroy(bs
, s
->l2_table_cache
);
1748 qcow2_cache_destroy(bs
, s
->refcount_block_cache
);
1750 qcrypto_cipher_free(s
->cipher
);
1753 g_free(s
->unknown_header_fields
);
1754 cleanup_unknown_header_ext(bs
);
1756 g_free(s
->image_backing_file
);
1757 g_free(s
->image_backing_format
);
1759 g_free(s
->cluster_cache
);
1760 qemu_vfree(s
->cluster_data
);
1761 qcow2_refcount_close(bs
);
1762 qcow2_free_snapshots(bs
);
1765 static void qcow2_invalidate_cache(BlockDriverState
*bs
, Error
**errp
)
1767 BDRVQcow2State
*s
= bs
->opaque
;
1768 int flags
= s
->flags
;
1769 QCryptoCipher
*cipher
= NULL
;
1771 Error
*local_err
= NULL
;
1775 * Backing files are read-only which makes all of their metadata immutable,
1776 * that means we don't have to worry about reopening them here.
1784 memset(s
, 0, sizeof(BDRVQcow2State
));
1785 options
= qdict_clone_shallow(bs
->options
);
1787 flags
&= ~BDRV_O_INACTIVE
;
1788 ret
= qcow2_open(bs
, options
, flags
, &local_err
);
1791 error_propagate(errp
, local_err
);
1792 error_prepend(errp
, "Could not reopen qcow2 layer: ");
1795 } else if (ret
< 0) {
1796 error_setg_errno(errp
, -ret
, "Could not reopen qcow2 layer");
1804 static size_t header_ext_add(char *buf
, uint32_t magic
, const void *s
,
1805 size_t len
, size_t buflen
)
1807 QCowExtension
*ext_backing_fmt
= (QCowExtension
*) buf
;
1808 size_t ext_len
= sizeof(QCowExtension
) + ((len
+ 7) & ~7);
1810 if (buflen
< ext_len
) {
1814 *ext_backing_fmt
= (QCowExtension
) {
1815 .magic
= cpu_to_be32(magic
),
1816 .len
= cpu_to_be32(len
),
1820 memcpy(buf
+ sizeof(QCowExtension
), s
, len
);
1827 * Updates the qcow2 header, including the variable length parts of it, i.e.
1828 * the backing file name and all extensions. qcow2 was not designed to allow
1829 * such changes, so if we run out of space (we can only use the first cluster)
1830 * this function may fail.
1832 * Returns 0 on success, -errno in error cases.
1834 int qcow2_update_header(BlockDriverState
*bs
)
1836 BDRVQcow2State
*s
= bs
->opaque
;
1839 size_t buflen
= s
->cluster_size
;
1841 uint64_t total_size
;
1842 uint32_t refcount_table_clusters
;
1843 size_t header_length
;
1844 Qcow2UnknownHeaderExtension
*uext
;
1846 buf
= qemu_blockalign(bs
, buflen
);
1848 /* Header structure */
1849 header
= (QCowHeader
*) buf
;
1851 if (buflen
< sizeof(*header
)) {
1856 header_length
= sizeof(*header
) + s
->unknown_header_fields_size
;
1857 total_size
= bs
->total_sectors
* BDRV_SECTOR_SIZE
;
1858 refcount_table_clusters
= s
->refcount_table_size
>> (s
->cluster_bits
- 3);
1860 *header
= (QCowHeader
) {
1861 /* Version 2 fields */
1862 .magic
= cpu_to_be32(QCOW_MAGIC
),
1863 .version
= cpu_to_be32(s
->qcow_version
),
1864 .backing_file_offset
= 0,
1865 .backing_file_size
= 0,
1866 .cluster_bits
= cpu_to_be32(s
->cluster_bits
),
1867 .size
= cpu_to_be64(total_size
),
1868 .crypt_method
= cpu_to_be32(s
->crypt_method_header
),
1869 .l1_size
= cpu_to_be32(s
->l1_size
),
1870 .l1_table_offset
= cpu_to_be64(s
->l1_table_offset
),
1871 .refcount_table_offset
= cpu_to_be64(s
->refcount_table_offset
),
1872 .refcount_table_clusters
= cpu_to_be32(refcount_table_clusters
),
1873 .nb_snapshots
= cpu_to_be32(s
->nb_snapshots
),
1874 .snapshots_offset
= cpu_to_be64(s
->snapshots_offset
),
1876 /* Version 3 fields */
1877 .incompatible_features
= cpu_to_be64(s
->incompatible_features
),
1878 .compatible_features
= cpu_to_be64(s
->compatible_features
),
1879 .autoclear_features
= cpu_to_be64(s
->autoclear_features
),
1880 .refcount_order
= cpu_to_be32(s
->refcount_order
),
1881 .header_length
= cpu_to_be32(header_length
),
1884 /* For older versions, write a shorter header */
1885 switch (s
->qcow_version
) {
1887 ret
= offsetof(QCowHeader
, incompatible_features
);
1890 ret
= sizeof(*header
);
1899 memset(buf
, 0, buflen
);
1901 /* Preserve any unknown field in the header */
1902 if (s
->unknown_header_fields_size
) {
1903 if (buflen
< s
->unknown_header_fields_size
) {
1908 memcpy(buf
, s
->unknown_header_fields
, s
->unknown_header_fields_size
);
1909 buf
+= s
->unknown_header_fields_size
;
1910 buflen
-= s
->unknown_header_fields_size
;
1913 /* Backing file format header extension */
1914 if (s
->image_backing_format
) {
1915 ret
= header_ext_add(buf
, QCOW2_EXT_MAGIC_BACKING_FORMAT
,
1916 s
->image_backing_format
,
1917 strlen(s
->image_backing_format
),
1928 if (s
->qcow_version
>= 3) {
1929 Qcow2Feature features
[] = {
1931 .type
= QCOW2_FEAT_TYPE_INCOMPATIBLE
,
1932 .bit
= QCOW2_INCOMPAT_DIRTY_BITNR
,
1933 .name
= "dirty bit",
1936 .type
= QCOW2_FEAT_TYPE_INCOMPATIBLE
,
1937 .bit
= QCOW2_INCOMPAT_CORRUPT_BITNR
,
1938 .name
= "corrupt bit",
1941 .type
= QCOW2_FEAT_TYPE_COMPATIBLE
,
1942 .bit
= QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR
,
1943 .name
= "lazy refcounts",
1947 ret
= header_ext_add(buf
, QCOW2_EXT_MAGIC_FEATURE_TABLE
,
1948 features
, sizeof(features
), buflen
);
1956 /* Keep unknown header extensions */
1957 QLIST_FOREACH(uext
, &s
->unknown_header_ext
, next
) {
1958 ret
= header_ext_add(buf
, uext
->magic
, uext
->data
, uext
->len
, buflen
);
1967 /* End of header extensions */
1968 ret
= header_ext_add(buf
, QCOW2_EXT_MAGIC_END
, NULL
, 0, buflen
);
1976 /* Backing file name */
1977 if (s
->image_backing_file
) {
1978 size_t backing_file_len
= strlen(s
->image_backing_file
);
1980 if (buflen
< backing_file_len
) {
1985 /* Using strncpy is ok here, since buf is not NUL-terminated. */
1986 strncpy(buf
, s
->image_backing_file
, buflen
);
1988 header
->backing_file_offset
= cpu_to_be64(buf
- ((char*) header
));
1989 header
->backing_file_size
= cpu_to_be32(backing_file_len
);
1992 /* Write the new header */
1993 ret
= bdrv_pwrite(bs
->file
, 0, header
, s
->cluster_size
);
2004 static int qcow2_change_backing_file(BlockDriverState
*bs
,
2005 const char *backing_file
, const char *backing_fmt
)
2007 BDRVQcow2State
*s
= bs
->opaque
;
2009 if (backing_file
&& strlen(backing_file
) > 1023) {
2013 pstrcpy(bs
->backing_file
, sizeof(bs
->backing_file
), backing_file
?: "");
2014 pstrcpy(bs
->backing_format
, sizeof(bs
->backing_format
), backing_fmt
?: "");
2016 g_free(s
->image_backing_file
);
2017 g_free(s
->image_backing_format
);
2019 s
->image_backing_file
= backing_file
? g_strdup(bs
->backing_file
) : NULL
;
2020 s
->image_backing_format
= backing_fmt
? g_strdup(bs
->backing_format
) : NULL
;
2022 return qcow2_update_header(bs
);
2025 static int preallocate(BlockDriverState
*bs
)
2029 uint64_t host_offset
= 0;
2030 unsigned int cur_bytes
;
2034 bytes
= bdrv_getlength(bs
);
2038 cur_bytes
= MIN(bytes
, INT_MAX
);
2039 ret
= qcow2_alloc_cluster_offset(bs
, offset
, &cur_bytes
,
2040 &host_offset
, &meta
);
2046 QCowL2Meta
*next
= meta
->next
;
2048 ret
= qcow2_alloc_cluster_link_l2(bs
, meta
);
2050 qcow2_free_any_clusters(bs
, meta
->alloc_offset
,
2051 meta
->nb_clusters
, QCOW2_DISCARD_NEVER
);
2055 /* There are no dependent requests, but we need to remove our
2056 * request from the list of in-flight requests */
2057 QLIST_REMOVE(meta
, next_in_flight
);
2063 /* TODO Preallocate data if requested */
2066 offset
+= cur_bytes
;
2070 * It is expected that the image file is large enough to actually contain
2071 * all of the allocated clusters (otherwise we get failing reads after
2072 * EOF). Extend the image to the last allocated sector.
2074 if (host_offset
!= 0) {
2076 ret
= bdrv_pwrite(bs
->file
, (host_offset
+ cur_bytes
) - 1,
2086 static int qcow2_create2(const char *filename
, int64_t total_size
,
2087 const char *backing_file
, const char *backing_format
,
2088 int flags
, size_t cluster_size
, PreallocMode prealloc
,
2089 QemuOpts
*opts
, int version
, int refcount_order
,
2095 /* Calculate cluster_bits */
2096 cluster_bits
= ctz32(cluster_size
);
2097 if (cluster_bits
< MIN_CLUSTER_BITS
|| cluster_bits
> MAX_CLUSTER_BITS
||
2098 (1 << cluster_bits
) != cluster_size
)
2100 error_setg(errp
, "Cluster size must be a power of two between %d and "
2101 "%dk", 1 << MIN_CLUSTER_BITS
, 1 << (MAX_CLUSTER_BITS
- 10));
2106 * Open the image file and write a minimal qcow2 header.
2108 * We keep things simple and start with a zero-sized image. We also
2109 * do without refcount blocks or a L1 table for now. We'll fix the
2110 * inconsistency later.
2112 * We do need a refcount table because growing the refcount table means
2113 * allocating two new refcount blocks - the seconds of which would be at
2114 * 2 GB for 64k clusters, and we don't want to have a 2 GB initial file
2115 * size for any qcow2 image.
2119 uint64_t* refcount_table
;
2120 Error
*local_err
= NULL
;
2123 if (prealloc
== PREALLOC_MODE_FULL
|| prealloc
== PREALLOC_MODE_FALLOC
) {
2124 /* Note: The following calculation does not need to be exact; if it is a
2125 * bit off, either some bytes will be "leaked" (which is fine) or we
2126 * will need to increase the file size by some bytes (which is fine,
2127 * too, as long as the bulk is allocated here). Therefore, using
2128 * floating point arithmetic is fine. */
2129 int64_t meta_size
= 0;
2130 uint64_t nreftablee
, nrefblocke
, nl1e
, nl2e
;
2131 int64_t aligned_total_size
= align_offset(total_size
, cluster_size
);
2132 int refblock_bits
, refblock_size
;
2133 /* refcount entry size in bytes */
2134 double rces
= (1 << refcount_order
) / 8.;
2136 /* see qcow2_open() */
2137 refblock_bits
= cluster_bits
- (refcount_order
- 3);
2138 refblock_size
= 1 << refblock_bits
;
2140 /* header: 1 cluster */
2141 meta_size
+= cluster_size
;
2143 /* total size of L2 tables */
2144 nl2e
= aligned_total_size
/ cluster_size
;
2145 nl2e
= align_offset(nl2e
, cluster_size
/ sizeof(uint64_t));
2146 meta_size
+= nl2e
* sizeof(uint64_t);
2148 /* total size of L1 tables */
2149 nl1e
= nl2e
* sizeof(uint64_t) / cluster_size
;
2150 nl1e
= align_offset(nl1e
, cluster_size
/ sizeof(uint64_t));
2151 meta_size
+= nl1e
* sizeof(uint64_t);
2153 /* total size of refcount blocks
2155 * note: every host cluster is reference-counted, including metadata
2156 * (even refcount blocks are recursively included).
2158 * a = total_size (this is the guest disk size)
2159 * m = meta size not including refcount blocks and refcount tables
2161 * y1 = number of refcount blocks entries
2162 * y2 = meta size including everything
2163 * rces = refcount entry size in bytes
2166 * y2 = y1 * rces + y1 * rces * sizeof(u64) / c + m
2168 * y1 = (a + m) / (c - rces - rces * sizeof(u64) / c)
2170 nrefblocke
= (aligned_total_size
+ meta_size
+ cluster_size
)
2171 / (cluster_size
- rces
- rces
* sizeof(uint64_t)
2173 meta_size
+= DIV_ROUND_UP(nrefblocke
, refblock_size
) * cluster_size
;
2175 /* total size of refcount tables */
2176 nreftablee
= nrefblocke
/ refblock_size
;
2177 nreftablee
= align_offset(nreftablee
, cluster_size
/ sizeof(uint64_t));
2178 meta_size
+= nreftablee
* sizeof(uint64_t);
2180 qemu_opt_set_number(opts
, BLOCK_OPT_SIZE
,
2181 aligned_total_size
+ meta_size
, &error_abort
);
2182 qemu_opt_set(opts
, BLOCK_OPT_PREALLOC
, PreallocMode_lookup
[prealloc
],
2186 ret
= bdrv_create_file(filename
, opts
, &local_err
);
2188 error_propagate(errp
, local_err
);
2192 blk
= blk_new_open(filename
, NULL
, NULL
,
2193 BDRV_O_RDWR
| BDRV_O_PROTOCOL
, &local_err
);
2195 error_propagate(errp
, local_err
);
2199 blk_set_allow_write_beyond_eof(blk
, true);
2201 /* Write the header */
2202 QEMU_BUILD_BUG_ON((1 << MIN_CLUSTER_BITS
) < sizeof(*header
));
2203 header
= g_malloc0(cluster_size
);
2204 *header
= (QCowHeader
) {
2205 .magic
= cpu_to_be32(QCOW_MAGIC
),
2206 .version
= cpu_to_be32(version
),
2207 .cluster_bits
= cpu_to_be32(cluster_bits
),
2208 .size
= cpu_to_be64(0),
2209 .l1_table_offset
= cpu_to_be64(0),
2210 .l1_size
= cpu_to_be32(0),
2211 .refcount_table_offset
= cpu_to_be64(cluster_size
),
2212 .refcount_table_clusters
= cpu_to_be32(1),
2213 .refcount_order
= cpu_to_be32(refcount_order
),
2214 .header_length
= cpu_to_be32(sizeof(*header
)),
2217 if (flags
& BLOCK_FLAG_ENCRYPT
) {
2218 header
->crypt_method
= cpu_to_be32(QCOW_CRYPT_AES
);
2220 header
->crypt_method
= cpu_to_be32(QCOW_CRYPT_NONE
);
2223 if (flags
& BLOCK_FLAG_LAZY_REFCOUNTS
) {
2224 header
->compatible_features
|=
2225 cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS
);
2228 ret
= blk_pwrite(blk
, 0, header
, cluster_size
, 0);
2231 error_setg_errno(errp
, -ret
, "Could not write qcow2 header");
2235 /* Write a refcount table with one refcount block */
2236 refcount_table
= g_malloc0(2 * cluster_size
);
2237 refcount_table
[0] = cpu_to_be64(2 * cluster_size
);
2238 ret
= blk_pwrite(blk
, cluster_size
, refcount_table
, 2 * cluster_size
, 0);
2239 g_free(refcount_table
);
2242 error_setg_errno(errp
, -ret
, "Could not write refcount table");
2250 * And now open the image and make it consistent first (i.e. increase the
2251 * refcount of the cluster that is occupied by the header and the refcount
2254 options
= qdict_new();
2255 qdict_put(options
, "driver", qstring_from_str("qcow2"));
2256 blk
= blk_new_open(filename
, NULL
, options
,
2257 BDRV_O_RDWR
| BDRV_O_NO_FLUSH
, &local_err
);
2259 error_propagate(errp
, local_err
);
2264 ret
= qcow2_alloc_clusters(blk_bs(blk
), 3 * cluster_size
);
2266 error_setg_errno(errp
, -ret
, "Could not allocate clusters for qcow2 "
2267 "header and refcount table");
2270 } else if (ret
!= 0) {
2271 error_report("Huh, first cluster in empty image is already in use?");
2275 /* Create a full header (including things like feature table) */
2276 ret
= qcow2_update_header(blk_bs(blk
));
2278 error_setg_errno(errp
, -ret
, "Could not update qcow2 header");
2282 /* Okay, now that we have a valid image, let's give it the right size */
2283 ret
= blk_truncate(blk
, total_size
);
2285 error_setg_errno(errp
, -ret
, "Could not resize image");
2289 /* Want a backing file? There you go.*/
2291 ret
= bdrv_change_backing_file(blk_bs(blk
), backing_file
, backing_format
);
2293 error_setg_errno(errp
, -ret
, "Could not assign backing file '%s' "
2294 "with format '%s'", backing_file
, backing_format
);
2299 /* And if we're supposed to preallocate metadata, do that now */
2300 if (prealloc
!= PREALLOC_MODE_OFF
) {
2301 BDRVQcow2State
*s
= blk_bs(blk
)->opaque
;
2302 qemu_co_mutex_lock(&s
->lock
);
2303 ret
= preallocate(blk_bs(blk
));
2304 qemu_co_mutex_unlock(&s
->lock
);
2306 error_setg_errno(errp
, -ret
, "Could not preallocate metadata");
2314 /* Reopen the image without BDRV_O_NO_FLUSH to flush it before returning */
2315 options
= qdict_new();
2316 qdict_put(options
, "driver", qstring_from_str("qcow2"));
2317 blk
= blk_new_open(filename
, NULL
, options
,
2318 BDRV_O_RDWR
| BDRV_O_NO_BACKING
, &local_err
);
2320 error_propagate(errp
, local_err
);
2333 static int qcow2_create(const char *filename
, QemuOpts
*opts
, Error
**errp
)
2335 char *backing_file
= NULL
;
2336 char *backing_fmt
= NULL
;
2340 size_t cluster_size
= DEFAULT_CLUSTER_SIZE
;
2341 PreallocMode prealloc
;
2343 uint64_t refcount_bits
= 16;
2345 Error
*local_err
= NULL
;
2348 /* Read out options */
2349 size
= ROUND_UP(qemu_opt_get_size_del(opts
, BLOCK_OPT_SIZE
, 0),
2351 backing_file
= qemu_opt_get_del(opts
, BLOCK_OPT_BACKING_FILE
);
2352 backing_fmt
= qemu_opt_get_del(opts
, BLOCK_OPT_BACKING_FMT
);
2353 if (qemu_opt_get_bool_del(opts
, BLOCK_OPT_ENCRYPT
, false)) {
2354 flags
|= BLOCK_FLAG_ENCRYPT
;
2356 cluster_size
= qemu_opt_get_size_del(opts
, BLOCK_OPT_CLUSTER_SIZE
,
2357 DEFAULT_CLUSTER_SIZE
);
2358 buf
= qemu_opt_get_del(opts
, BLOCK_OPT_PREALLOC
);
2359 prealloc
= qapi_enum_parse(PreallocMode_lookup
, buf
,
2360 PREALLOC_MODE__MAX
, PREALLOC_MODE_OFF
,
2363 error_propagate(errp
, local_err
);
2368 buf
= qemu_opt_get_del(opts
, BLOCK_OPT_COMPAT_LEVEL
);
2370 /* keep the default */
2371 } else if (!strcmp(buf
, "0.10")) {
2373 } else if (!strcmp(buf
, "1.1")) {
2376 error_setg(errp
, "Invalid compatibility level: '%s'", buf
);
2381 if (qemu_opt_get_bool_del(opts
, BLOCK_OPT_LAZY_REFCOUNTS
, false)) {
2382 flags
|= BLOCK_FLAG_LAZY_REFCOUNTS
;
2385 if (backing_file
&& prealloc
!= PREALLOC_MODE_OFF
) {
2386 error_setg(errp
, "Backing file and preallocation cannot be used at "
2392 if (version
< 3 && (flags
& BLOCK_FLAG_LAZY_REFCOUNTS
)) {
2393 error_setg(errp
, "Lazy refcounts only supported with compatibility "
2394 "level 1.1 and above (use compat=1.1 or greater)");
2399 refcount_bits
= qemu_opt_get_number_del(opts
, BLOCK_OPT_REFCOUNT_BITS
,
2401 if (refcount_bits
> 64 || !is_power_of_2(refcount_bits
)) {
2402 error_setg(errp
, "Refcount width must be a power of two and may not "
2408 if (version
< 3 && refcount_bits
!= 16) {
2409 error_setg(errp
, "Different refcount widths than 16 bits require "
2410 "compatibility level 1.1 or above (use compat=1.1 or "
2416 refcount_order
= ctz32(refcount_bits
);
2418 ret
= qcow2_create2(filename
, size
, backing_file
, backing_fmt
, flags
,
2419 cluster_size
, prealloc
, opts
, version
, refcount_order
,
2421 error_propagate(errp
, local_err
);
2424 g_free(backing_file
);
2425 g_free(backing_fmt
);
2431 static bool is_zero_sectors(BlockDriverState
*bs
, int64_t start
,
2435 BlockDriverState
*file
;
2441 res
= bdrv_get_block_status_above(bs
, NULL
, start
, count
,
2443 return res
>= 0 && (res
& BDRV_BLOCK_ZERO
) && nr
== count
;
2446 static coroutine_fn
int qcow2_co_pwrite_zeroes(BlockDriverState
*bs
,
2447 int64_t offset
, int count
, BdrvRequestFlags flags
)
2450 BDRVQcow2State
*s
= bs
->opaque
;
2452 uint32_t head
= offset
% s
->cluster_size
;
2453 uint32_t tail
= (offset
+ count
) % s
->cluster_size
;
2455 trace_qcow2_pwrite_zeroes_start_req(qemu_coroutine_self(), offset
, count
);
2458 int64_t cl_start
= (offset
- head
) >> BDRV_SECTOR_BITS
;
2462 assert(head
+ count
<= s
->cluster_size
);
2464 /* check whether remainder of cluster already reads as zero */
2465 if (!(is_zero_sectors(bs
, cl_start
,
2466 DIV_ROUND_UP(head
, BDRV_SECTOR_SIZE
)) &&
2467 is_zero_sectors(bs
, (offset
+ count
) >> BDRV_SECTOR_BITS
,
2468 DIV_ROUND_UP(-tail
& (s
->cluster_size
- 1),
2469 BDRV_SECTOR_SIZE
)))) {
2473 qemu_co_mutex_lock(&s
->lock
);
2474 /* We can have new write after previous check */
2475 offset
= cl_start
<< BDRV_SECTOR_BITS
;
2476 count
= s
->cluster_size
;
2477 nr
= s
->cluster_size
;
2478 ret
= qcow2_get_cluster_offset(bs
, offset
, &nr
, &off
);
2479 if (ret
!= QCOW2_CLUSTER_UNALLOCATED
&& ret
!= QCOW2_CLUSTER_ZERO
) {
2480 qemu_co_mutex_unlock(&s
->lock
);
2484 qemu_co_mutex_lock(&s
->lock
);
2487 trace_qcow2_pwrite_zeroes(qemu_coroutine_self(), offset
, count
);
2489 /* Whatever is left can use real zero clusters */
2490 ret
= qcow2_zero_clusters(bs
, offset
, count
>> BDRV_SECTOR_BITS
, flags
);
2491 qemu_co_mutex_unlock(&s
->lock
);
2496 static coroutine_fn
int qcow2_co_pdiscard(BlockDriverState
*bs
,
2497 int64_t offset
, int count
)
2500 BDRVQcow2State
*s
= bs
->opaque
;
2502 if (!QEMU_IS_ALIGNED(offset
| count
, s
->cluster_size
)) {
2503 assert(count
< s
->cluster_size
);
2507 qemu_co_mutex_lock(&s
->lock
);
2508 ret
= qcow2_discard_clusters(bs
, offset
, count
>> BDRV_SECTOR_BITS
,
2509 QCOW2_DISCARD_REQUEST
, false);
2510 qemu_co_mutex_unlock(&s
->lock
);
2514 static int qcow2_truncate(BlockDriverState
*bs
, int64_t offset
)
2516 BDRVQcow2State
*s
= bs
->opaque
;
2517 int64_t new_l1_size
;
2521 error_report("The new size must be a multiple of 512");
2525 /* cannot proceed if image has snapshots */
2526 if (s
->nb_snapshots
) {
2527 error_report("Can't resize an image which has snapshots");
2531 /* shrinking is currently not supported */
2532 if (offset
< bs
->total_sectors
* 512) {
2533 error_report("qcow2 doesn't support shrinking images yet");
2537 new_l1_size
= size_to_l1(s
, offset
);
2538 ret
= qcow2_grow_l1_table(bs
, new_l1_size
, true);
2543 /* write updated header.size */
2544 offset
= cpu_to_be64(offset
);
2545 ret
= bdrv_pwrite_sync(bs
->file
, offsetof(QCowHeader
, size
),
2546 &offset
, sizeof(uint64_t));
2551 s
->l1_vm_state_index
= new_l1_size
;
2555 /* XXX: put compressed sectors first, then all the cluster aligned
2556 tables to avoid losing bytes in alignment */
2557 static coroutine_fn
int
2558 qcow2_co_pwritev_compressed(BlockDriverState
*bs
, uint64_t offset
,
2559 uint64_t bytes
, QEMUIOVector
*qiov
)
2561 BDRVQcow2State
*s
= bs
->opaque
;
2562 QEMUIOVector hd_qiov
;
2566 uint8_t *buf
, *out_buf
;
2567 uint64_t cluster_offset
;
2570 /* align end of file to a sector boundary to ease reading with
2571 sector based I/Os */
2572 cluster_offset
= bdrv_getlength(bs
->file
->bs
);
2573 return bdrv_truncate(bs
->file
->bs
, cluster_offset
);
2576 buf
= qemu_blockalign(bs
, s
->cluster_size
);
2577 if (bytes
!= s
->cluster_size
) {
2578 if (bytes
> s
->cluster_size
||
2579 offset
+ bytes
!= bs
->total_sectors
<< BDRV_SECTOR_BITS
)
2584 /* Zero-pad last write if image size is not cluster aligned */
2585 memset(buf
+ bytes
, 0, s
->cluster_size
- bytes
);
2587 qemu_iovec_to_buf(qiov
, 0, buf
, bytes
);
2589 out_buf
= g_malloc(s
->cluster_size
);
2591 /* best compression, small window, no zlib header */
2592 memset(&strm
, 0, sizeof(strm
));
2593 ret
= deflateInit2(&strm
, Z_DEFAULT_COMPRESSION
,
2595 9, Z_DEFAULT_STRATEGY
);
2601 strm
.avail_in
= s
->cluster_size
;
2602 strm
.next_in
= (uint8_t *)buf
;
2603 strm
.avail_out
= s
->cluster_size
;
2604 strm
.next_out
= out_buf
;
2606 ret
= deflate(&strm
, Z_FINISH
);
2607 if (ret
!= Z_STREAM_END
&& ret
!= Z_OK
) {
2612 out_len
= strm
.next_out
- out_buf
;
2616 if (ret
!= Z_STREAM_END
|| out_len
>= s
->cluster_size
) {
2617 /* could not compress: write normal cluster */
2618 ret
= qcow2_co_pwritev(bs
, offset
, bytes
, qiov
, 0);
2625 qemu_co_mutex_lock(&s
->lock
);
2627 qcow2_alloc_compressed_cluster_offset(bs
, offset
, out_len
);
2628 if (!cluster_offset
) {
2629 qemu_co_mutex_unlock(&s
->lock
);
2633 cluster_offset
&= s
->cluster_offset_mask
;
2635 ret
= qcow2_pre_write_overlap_check(bs
, 0, cluster_offset
, out_len
);
2636 qemu_co_mutex_unlock(&s
->lock
);
2641 iov
= (struct iovec
) {
2642 .iov_base
= out_buf
,
2645 qemu_iovec_init_external(&hd_qiov
, &iov
, 1);
2647 BLKDBG_EVENT(bs
->file
, BLKDBG_WRITE_COMPRESSED
);
2648 ret
= bdrv_co_pwritev(bs
->file
, cluster_offset
, out_len
, &hd_qiov
, 0);
2660 static int make_completely_empty(BlockDriverState
*bs
)
2662 BDRVQcow2State
*s
= bs
->opaque
;
2663 int ret
, l1_clusters
;
2665 uint64_t *new_reftable
= NULL
;
2666 uint64_t rt_entry
, l1_size2
;
2669 uint64_t reftable_offset
;
2670 uint32_t reftable_clusters
;
2671 } QEMU_PACKED l1_ofs_rt_ofs_cls
;
2673 ret
= qcow2_cache_empty(bs
, s
->l2_table_cache
);
2678 ret
= qcow2_cache_empty(bs
, s
->refcount_block_cache
);
2683 /* Refcounts will be broken utterly */
2684 ret
= qcow2_mark_dirty(bs
);
2689 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_UPDATE
);
2691 l1_clusters
= DIV_ROUND_UP(s
->l1_size
, s
->cluster_size
/ sizeof(uint64_t));
2692 l1_size2
= (uint64_t)s
->l1_size
* sizeof(uint64_t);
2694 /* After this call, neither the in-memory nor the on-disk refcount
2695 * information accurately describe the actual references */
2697 ret
= bdrv_pwrite_zeroes(bs
->file
, s
->l1_table_offset
,
2698 l1_clusters
* s
->cluster_size
, 0);
2700 goto fail_broken_refcounts
;
2702 memset(s
->l1_table
, 0, l1_size2
);
2704 BLKDBG_EVENT(bs
->file
, BLKDBG_EMPTY_IMAGE_PREPARE
);
2706 /* Overwrite enough clusters at the beginning of the sectors to place
2707 * the refcount table, a refcount block and the L1 table in; this may
2708 * overwrite parts of the existing refcount and L1 table, which is not
2709 * an issue because the dirty flag is set, complete data loss is in fact
2710 * desired and partial data loss is consequently fine as well */
2711 ret
= bdrv_pwrite_zeroes(bs
->file
, s
->cluster_size
,
2712 (2 + l1_clusters
) * s
->cluster_size
, 0);
2713 /* This call (even if it failed overall) may have overwritten on-disk
2714 * refcount structures; in that case, the in-memory refcount information
2715 * will probably differ from the on-disk information which makes the BDS
2718 goto fail_broken_refcounts
;
2721 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_UPDATE
);
2722 BLKDBG_EVENT(bs
->file
, BLKDBG_REFTABLE_UPDATE
);
2724 /* "Create" an empty reftable (one cluster) directly after the image
2725 * header and an empty L1 table three clusters after the image header;
2726 * the cluster between those two will be used as the first refblock */
2727 l1_ofs_rt_ofs_cls
.l1_offset
= cpu_to_be64(3 * s
->cluster_size
);
2728 l1_ofs_rt_ofs_cls
.reftable_offset
= cpu_to_be64(s
->cluster_size
);
2729 l1_ofs_rt_ofs_cls
.reftable_clusters
= cpu_to_be32(1);
2730 ret
= bdrv_pwrite_sync(bs
->file
, offsetof(QCowHeader
, l1_table_offset
),
2731 &l1_ofs_rt_ofs_cls
, sizeof(l1_ofs_rt_ofs_cls
));
2733 goto fail_broken_refcounts
;
2736 s
->l1_table_offset
= 3 * s
->cluster_size
;
2738 new_reftable
= g_try_new0(uint64_t, s
->cluster_size
/ sizeof(uint64_t));
2739 if (!new_reftable
) {
2741 goto fail_broken_refcounts
;
2744 s
->refcount_table_offset
= s
->cluster_size
;
2745 s
->refcount_table_size
= s
->cluster_size
/ sizeof(uint64_t);
2747 g_free(s
->refcount_table
);
2748 s
->refcount_table
= new_reftable
;
2749 new_reftable
= NULL
;
2751 /* Now the in-memory refcount information again corresponds to the on-disk
2752 * information (reftable is empty and no refblocks (the refblock cache is
2753 * empty)); however, this means some clusters (e.g. the image header) are
2754 * referenced, but not refcounted, but the normal qcow2 code assumes that
2755 * the in-memory information is always correct */
2757 BLKDBG_EVENT(bs
->file
, BLKDBG_REFBLOCK_ALLOC
);
2759 /* Enter the first refblock into the reftable */
2760 rt_entry
= cpu_to_be64(2 * s
->cluster_size
);
2761 ret
= bdrv_pwrite_sync(bs
->file
, s
->cluster_size
,
2762 &rt_entry
, sizeof(rt_entry
));
2764 goto fail_broken_refcounts
;
2766 s
->refcount_table
[0] = 2 * s
->cluster_size
;
2768 s
->free_cluster_index
= 0;
2769 assert(3 + l1_clusters
<= s
->refcount_block_size
);
2770 offset
= qcow2_alloc_clusters(bs
, 3 * s
->cluster_size
+ l1_size2
);
2773 goto fail_broken_refcounts
;
2774 } else if (offset
> 0) {
2775 error_report("First cluster in emptied image is in use");
2779 /* Now finally the in-memory information corresponds to the on-disk
2780 * structures and is correct */
2781 ret
= qcow2_mark_clean(bs
);
2786 ret
= bdrv_truncate(bs
->file
->bs
, (3 + l1_clusters
) * s
->cluster_size
);
2793 fail_broken_refcounts
:
2794 /* The BDS is unusable at this point. If we wanted to make it usable, we
2795 * would have to call qcow2_refcount_close(), qcow2_refcount_init(),
2796 * qcow2_check_refcounts(), qcow2_refcount_close() and qcow2_refcount_init()
2797 * again. However, because the functions which could have caused this error
2798 * path to be taken are used by those functions as well, it's very likely
2799 * that that sequence will fail as well. Therefore, just eject the BDS. */
2803 g_free(new_reftable
);
2807 static int qcow2_make_empty(BlockDriverState
*bs
)
2809 BDRVQcow2State
*s
= bs
->opaque
;
2810 uint64_t start_sector
;
2811 int sector_step
= (QEMU_ALIGN_DOWN(INT_MAX
, s
->cluster_size
) /
2813 int l1_clusters
, ret
= 0;
2815 l1_clusters
= DIV_ROUND_UP(s
->l1_size
, s
->cluster_size
/ sizeof(uint64_t));
2817 if (s
->qcow_version
>= 3 && !s
->snapshots
&&
2818 3 + l1_clusters
<= s
->refcount_block_size
) {
2819 /* The following function only works for qcow2 v3 images (it requires
2820 * the dirty flag) and only as long as there are no snapshots (because
2821 * it completely empties the image). Furthermore, the L1 table and three
2822 * additional clusters (image header, refcount table, one refcount
2823 * block) have to fit inside one refcount block. */
2824 return make_completely_empty(bs
);
2827 /* This fallback code simply discards every active cluster; this is slow,
2828 * but works in all cases */
2829 for (start_sector
= 0; start_sector
< bs
->total_sectors
;
2830 start_sector
+= sector_step
)
2832 /* As this function is generally used after committing an external
2833 * snapshot, QCOW2_DISCARD_SNAPSHOT seems appropriate. Also, the
2834 * default action for this kind of discard is to pass the discard,
2835 * which will ideally result in an actually smaller image file, as
2836 * is probably desired. */
2837 ret
= qcow2_discard_clusters(bs
, start_sector
* BDRV_SECTOR_SIZE
,
2839 bs
->total_sectors
- start_sector
),
2840 QCOW2_DISCARD_SNAPSHOT
, true);
2849 static coroutine_fn
int qcow2_co_flush_to_os(BlockDriverState
*bs
)
2851 BDRVQcow2State
*s
= bs
->opaque
;
2854 qemu_co_mutex_lock(&s
->lock
);
2855 ret
= qcow2_cache_write(bs
, s
->l2_table_cache
);
2857 qemu_co_mutex_unlock(&s
->lock
);
2861 if (qcow2_need_accurate_refcounts(s
)) {
2862 ret
= qcow2_cache_write(bs
, s
->refcount_block_cache
);
2864 qemu_co_mutex_unlock(&s
->lock
);
2868 qemu_co_mutex_unlock(&s
->lock
);
2873 static int qcow2_get_info(BlockDriverState
*bs
, BlockDriverInfo
*bdi
)
2875 BDRVQcow2State
*s
= bs
->opaque
;
2876 bdi
->unallocated_blocks_are_zero
= true;
2877 bdi
->can_write_zeroes_with_unmap
= (s
->qcow_version
>= 3);
2878 bdi
->cluster_size
= s
->cluster_size
;
2879 bdi
->vm_state_offset
= qcow2_vm_state_offset(s
);
2883 static ImageInfoSpecific
*qcow2_get_specific_info(BlockDriverState
*bs
)
2885 BDRVQcow2State
*s
= bs
->opaque
;
2886 ImageInfoSpecific
*spec_info
= g_new(ImageInfoSpecific
, 1);
2888 *spec_info
= (ImageInfoSpecific
){
2889 .type
= IMAGE_INFO_SPECIFIC_KIND_QCOW2
,
2890 .u
.qcow2
.data
= g_new(ImageInfoSpecificQCow2
, 1),
2892 if (s
->qcow_version
== 2) {
2893 *spec_info
->u
.qcow2
.data
= (ImageInfoSpecificQCow2
){
2894 .compat
= g_strdup("0.10"),
2895 .refcount_bits
= s
->refcount_bits
,
2897 } else if (s
->qcow_version
== 3) {
2898 *spec_info
->u
.qcow2
.data
= (ImageInfoSpecificQCow2
){
2899 .compat
= g_strdup("1.1"),
2900 .lazy_refcounts
= s
->compatible_features
&
2901 QCOW2_COMPAT_LAZY_REFCOUNTS
,
2902 .has_lazy_refcounts
= true,
2903 .corrupt
= s
->incompatible_features
&
2904 QCOW2_INCOMPAT_CORRUPT
,
2905 .has_corrupt
= true,
2906 .refcount_bits
= s
->refcount_bits
,
2909 /* if this assertion fails, this probably means a new version was
2910 * added without having it covered here */
2918 static void dump_refcounts(BlockDriverState
*bs
)
2920 BDRVQcow2State
*s
= bs
->opaque
;
2921 int64_t nb_clusters
, k
, k1
, size
;
2924 size
= bdrv_getlength(bs
->file
->bs
);
2925 nb_clusters
= size_to_clusters(s
, size
);
2926 for(k
= 0; k
< nb_clusters
;) {
2928 refcount
= get_refcount(bs
, k
);
2930 while (k
< nb_clusters
&& get_refcount(bs
, k
) == refcount
)
2932 printf("%" PRId64
": refcount=%d nb=%" PRId64
"\n", k
, refcount
,
2938 static int qcow2_save_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
,
2941 BDRVQcow2State
*s
= bs
->opaque
;
2943 BLKDBG_EVENT(bs
->file
, BLKDBG_VMSTATE_SAVE
);
2944 return bs
->drv
->bdrv_co_pwritev(bs
, qcow2_vm_state_offset(s
) + pos
,
2945 qiov
->size
, qiov
, 0);
2948 static int qcow2_load_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
,
2951 BDRVQcow2State
*s
= bs
->opaque
;
2953 BLKDBG_EVENT(bs
->file
, BLKDBG_VMSTATE_LOAD
);
2954 return bs
->drv
->bdrv_co_preadv(bs
, qcow2_vm_state_offset(s
) + pos
,
2955 qiov
->size
, qiov
, 0);
2959 * Downgrades an image's version. To achieve this, any incompatible features
2960 * have to be removed.
2962 static int qcow2_downgrade(BlockDriverState
*bs
, int target_version
,
2963 BlockDriverAmendStatusCB
*status_cb
, void *cb_opaque
)
2965 BDRVQcow2State
*s
= bs
->opaque
;
2966 int current_version
= s
->qcow_version
;
2969 if (target_version
== current_version
) {
2971 } else if (target_version
> current_version
) {
2973 } else if (target_version
!= 2) {
2977 if (s
->refcount_order
!= 4) {
2978 error_report("compat=0.10 requires refcount_bits=16");
2982 /* clear incompatible features */
2983 if (s
->incompatible_features
& QCOW2_INCOMPAT_DIRTY
) {
2984 ret
= qcow2_mark_clean(bs
);
2990 /* with QCOW2_INCOMPAT_CORRUPT, it is pretty much impossible to get here in
2991 * the first place; if that happens nonetheless, returning -ENOTSUP is the
2992 * best thing to do anyway */
2994 if (s
->incompatible_features
) {
2998 /* since we can ignore compatible features, we can set them to 0 as well */
2999 s
->compatible_features
= 0;
3000 /* if lazy refcounts have been used, they have already been fixed through
3001 * clearing the dirty flag */
3003 /* clearing autoclear features is trivial */
3004 s
->autoclear_features
= 0;
3006 ret
= qcow2_expand_zero_clusters(bs
, status_cb
, cb_opaque
);
3011 s
->qcow_version
= target_version
;
3012 ret
= qcow2_update_header(bs
);
3014 s
->qcow_version
= current_version
;
3020 typedef enum Qcow2AmendOperation
{
3021 /* This is the value Qcow2AmendHelperCBInfo::last_operation will be
3022 * statically initialized to so that the helper CB can discern the first
3023 * invocation from an operation change */
3024 QCOW2_NO_OPERATION
= 0,
3026 QCOW2_CHANGING_REFCOUNT_ORDER
,
3028 } Qcow2AmendOperation
;
3030 typedef struct Qcow2AmendHelperCBInfo
{
3031 /* The code coordinating the amend operations should only modify
3032 * these four fields; the rest will be managed by the CB */
3033 BlockDriverAmendStatusCB
*original_status_cb
;
3034 void *original_cb_opaque
;
3036 Qcow2AmendOperation current_operation
;
3038 /* Total number of operations to perform (only set once) */
3039 int total_operations
;
3041 /* The following fields are managed by the CB */
3043 /* Number of operations completed */
3044 int operations_completed
;
3046 /* Cumulative offset of all completed operations */
3047 int64_t offset_completed
;
3049 Qcow2AmendOperation last_operation
;
3050 int64_t last_work_size
;
3051 } Qcow2AmendHelperCBInfo
;
3053 static void qcow2_amend_helper_cb(BlockDriverState
*bs
,
3054 int64_t operation_offset
,
3055 int64_t operation_work_size
, void *opaque
)
3057 Qcow2AmendHelperCBInfo
*info
= opaque
;
3058 int64_t current_work_size
;
3059 int64_t projected_work_size
;
3061 if (info
->current_operation
!= info
->last_operation
) {
3062 if (info
->last_operation
!= QCOW2_NO_OPERATION
) {
3063 info
->offset_completed
+= info
->last_work_size
;
3064 info
->operations_completed
++;
3067 info
->last_operation
= info
->current_operation
;
3070 assert(info
->total_operations
> 0);
3071 assert(info
->operations_completed
< info
->total_operations
);
3073 info
->last_work_size
= operation_work_size
;
3075 current_work_size
= info
->offset_completed
+ operation_work_size
;
3077 /* current_work_size is the total work size for (operations_completed + 1)
3078 * operations (which includes this one), so multiply it by the number of
3079 * operations not covered and divide it by the number of operations
3080 * covered to get a projection for the operations not covered */
3081 projected_work_size
= current_work_size
* (info
->total_operations
-
3082 info
->operations_completed
- 1)
3083 / (info
->operations_completed
+ 1);
3085 info
->original_status_cb(bs
, info
->offset_completed
+ operation_offset
,
3086 current_work_size
+ projected_work_size
,
3087 info
->original_cb_opaque
);
3090 static int qcow2_amend_options(BlockDriverState
*bs
, QemuOpts
*opts
,
3091 BlockDriverAmendStatusCB
*status_cb
,
3094 BDRVQcow2State
*s
= bs
->opaque
;
3095 int old_version
= s
->qcow_version
, new_version
= old_version
;
3096 uint64_t new_size
= 0;
3097 const char *backing_file
= NULL
, *backing_format
= NULL
;
3098 bool lazy_refcounts
= s
->use_lazy_refcounts
;
3099 const char *compat
= NULL
;
3100 uint64_t cluster_size
= s
->cluster_size
;
3102 int refcount_bits
= s
->refcount_bits
;
3104 QemuOptDesc
*desc
= opts
->list
->desc
;
3105 Qcow2AmendHelperCBInfo helper_cb_info
;
3107 while (desc
&& desc
->name
) {
3108 if (!qemu_opt_find(opts
, desc
->name
)) {
3109 /* only change explicitly defined options */
3114 if (!strcmp(desc
->name
, BLOCK_OPT_COMPAT_LEVEL
)) {
3115 compat
= qemu_opt_get(opts
, BLOCK_OPT_COMPAT_LEVEL
);
3117 /* preserve default */
3118 } else if (!strcmp(compat
, "0.10")) {
3120 } else if (!strcmp(compat
, "1.1")) {
3123 error_report("Unknown compatibility level %s", compat
);
3126 } else if (!strcmp(desc
->name
, BLOCK_OPT_PREALLOC
)) {
3127 error_report("Cannot change preallocation mode");
3129 } else if (!strcmp(desc
->name
, BLOCK_OPT_SIZE
)) {
3130 new_size
= qemu_opt_get_size(opts
, BLOCK_OPT_SIZE
, 0);
3131 } else if (!strcmp(desc
->name
, BLOCK_OPT_BACKING_FILE
)) {
3132 backing_file
= qemu_opt_get(opts
, BLOCK_OPT_BACKING_FILE
);
3133 } else if (!strcmp(desc
->name
, BLOCK_OPT_BACKING_FMT
)) {
3134 backing_format
= qemu_opt_get(opts
, BLOCK_OPT_BACKING_FMT
);
3135 } else if (!strcmp(desc
->name
, BLOCK_OPT_ENCRYPT
)) {
3136 encrypt
= qemu_opt_get_bool(opts
, BLOCK_OPT_ENCRYPT
,
3139 if (encrypt
!= !!s
->cipher
) {
3140 error_report("Changing the encryption flag is not supported");
3143 } else if (!strcmp(desc
->name
, BLOCK_OPT_CLUSTER_SIZE
)) {
3144 cluster_size
= qemu_opt_get_size(opts
, BLOCK_OPT_CLUSTER_SIZE
,
3146 if (cluster_size
!= s
->cluster_size
) {
3147 error_report("Changing the cluster size is not supported");
3150 } else if (!strcmp(desc
->name
, BLOCK_OPT_LAZY_REFCOUNTS
)) {
3151 lazy_refcounts
= qemu_opt_get_bool(opts
, BLOCK_OPT_LAZY_REFCOUNTS
,
3153 } else if (!strcmp(desc
->name
, BLOCK_OPT_REFCOUNT_BITS
)) {
3154 refcount_bits
= qemu_opt_get_number(opts
, BLOCK_OPT_REFCOUNT_BITS
,
3157 if (refcount_bits
<= 0 || refcount_bits
> 64 ||
3158 !is_power_of_2(refcount_bits
))
3160 error_report("Refcount width must be a power of two and may "
3161 "not exceed 64 bits");
3165 /* if this point is reached, this probably means a new option was
3166 * added without having it covered here */
3173 helper_cb_info
= (Qcow2AmendHelperCBInfo
){
3174 .original_status_cb
= status_cb
,
3175 .original_cb_opaque
= cb_opaque
,
3176 .total_operations
= (new_version
< old_version
)
3177 + (s
->refcount_bits
!= refcount_bits
)
3180 /* Upgrade first (some features may require compat=1.1) */
3181 if (new_version
> old_version
) {
3182 s
->qcow_version
= new_version
;
3183 ret
= qcow2_update_header(bs
);
3185 s
->qcow_version
= old_version
;
3190 if (s
->refcount_bits
!= refcount_bits
) {
3191 int refcount_order
= ctz32(refcount_bits
);
3192 Error
*local_error
= NULL
;
3194 if (new_version
< 3 && refcount_bits
!= 16) {
3195 error_report("Different refcount widths than 16 bits require "
3196 "compatibility level 1.1 or above (use compat=1.1 or "
3201 helper_cb_info
.current_operation
= QCOW2_CHANGING_REFCOUNT_ORDER
;
3202 ret
= qcow2_change_refcount_order(bs
, refcount_order
,
3203 &qcow2_amend_helper_cb
,
3204 &helper_cb_info
, &local_error
);
3206 error_report_err(local_error
);
3211 if (backing_file
|| backing_format
) {
3212 ret
= qcow2_change_backing_file(bs
,
3213 backing_file
?: s
->image_backing_file
,
3214 backing_format
?: s
->image_backing_format
);
3220 if (s
->use_lazy_refcounts
!= lazy_refcounts
) {
3221 if (lazy_refcounts
) {
3222 if (new_version
< 3) {
3223 error_report("Lazy refcounts only supported with compatibility "
3224 "level 1.1 and above (use compat=1.1 or greater)");
3227 s
->compatible_features
|= QCOW2_COMPAT_LAZY_REFCOUNTS
;
3228 ret
= qcow2_update_header(bs
);
3230 s
->compatible_features
&= ~QCOW2_COMPAT_LAZY_REFCOUNTS
;
3233 s
->use_lazy_refcounts
= true;
3235 /* make image clean first */
3236 ret
= qcow2_mark_clean(bs
);
3240 /* now disallow lazy refcounts */
3241 s
->compatible_features
&= ~QCOW2_COMPAT_LAZY_REFCOUNTS
;
3242 ret
= qcow2_update_header(bs
);
3244 s
->compatible_features
|= QCOW2_COMPAT_LAZY_REFCOUNTS
;
3247 s
->use_lazy_refcounts
= false;
3252 ret
= bdrv_truncate(bs
, new_size
);
3258 /* Downgrade last (so unsupported features can be removed before) */
3259 if (new_version
< old_version
) {
3260 helper_cb_info
.current_operation
= QCOW2_DOWNGRADING
;
3261 ret
= qcow2_downgrade(bs
, new_version
, &qcow2_amend_helper_cb
,
3272 * If offset or size are negative, respectively, they will not be included in
3273 * the BLOCK_IMAGE_CORRUPTED event emitted.
3274 * fatal will be ignored for read-only BDS; corruptions found there will always
3275 * be considered non-fatal.
3277 void qcow2_signal_corruption(BlockDriverState
*bs
, bool fatal
, int64_t offset
,
3278 int64_t size
, const char *message_format
, ...)
3280 BDRVQcow2State
*s
= bs
->opaque
;
3281 const char *node_name
;
3285 fatal
= fatal
&& !bs
->read_only
;
3287 if (s
->signaled_corruption
&&
3288 (!fatal
|| (s
->incompatible_features
& QCOW2_INCOMPAT_CORRUPT
)))
3293 va_start(ap
, message_format
);
3294 message
= g_strdup_vprintf(message_format
, ap
);
3298 fprintf(stderr
, "qcow2: Marking image as corrupt: %s; further "
3299 "corruption events will be suppressed\n", message
);
3301 fprintf(stderr
, "qcow2: Image is corrupt: %s; further non-fatal "
3302 "corruption events will be suppressed\n", message
);
3305 node_name
= bdrv_get_node_name(bs
);
3306 qapi_event_send_block_image_corrupted(bdrv_get_device_name(bs
),
3307 *node_name
!= '\0', node_name
,
3308 message
, offset
>= 0, offset
,
3310 fatal
, &error_abort
);
3314 qcow2_mark_corrupt(bs
);
3315 bs
->drv
= NULL
; /* make BDS unusable */
3318 s
->signaled_corruption
= true;
3321 static QemuOptsList qcow2_create_opts
= {
3322 .name
= "qcow2-create-opts",
3323 .head
= QTAILQ_HEAD_INITIALIZER(qcow2_create_opts
.head
),
3326 .name
= BLOCK_OPT_SIZE
,
3327 .type
= QEMU_OPT_SIZE
,
3328 .help
= "Virtual disk size"
3331 .name
= BLOCK_OPT_COMPAT_LEVEL
,
3332 .type
= QEMU_OPT_STRING
,
3333 .help
= "Compatibility level (0.10 or 1.1)"
3336 .name
= BLOCK_OPT_BACKING_FILE
,
3337 .type
= QEMU_OPT_STRING
,
3338 .help
= "File name of a base image"
3341 .name
= BLOCK_OPT_BACKING_FMT
,
3342 .type
= QEMU_OPT_STRING
,
3343 .help
= "Image format of the base image"
3346 .name
= BLOCK_OPT_ENCRYPT
,
3347 .type
= QEMU_OPT_BOOL
,
3348 .help
= "Encrypt the image",
3349 .def_value_str
= "off"
3352 .name
= BLOCK_OPT_CLUSTER_SIZE
,
3353 .type
= QEMU_OPT_SIZE
,
3354 .help
= "qcow2 cluster size",
3355 .def_value_str
= stringify(DEFAULT_CLUSTER_SIZE
)
3358 .name
= BLOCK_OPT_PREALLOC
,
3359 .type
= QEMU_OPT_STRING
,
3360 .help
= "Preallocation mode (allowed values: off, metadata, "
3364 .name
= BLOCK_OPT_LAZY_REFCOUNTS
,
3365 .type
= QEMU_OPT_BOOL
,
3366 .help
= "Postpone refcount updates",
3367 .def_value_str
= "off"
3370 .name
= BLOCK_OPT_REFCOUNT_BITS
,
3371 .type
= QEMU_OPT_NUMBER
,
3372 .help
= "Width of a reference count entry in bits",
3373 .def_value_str
= "16"
3375 { /* end of list */ }
3379 BlockDriver bdrv_qcow2
= {
3380 .format_name
= "qcow2",
3381 .instance_size
= sizeof(BDRVQcow2State
),
3382 .bdrv_probe
= qcow2_probe
,
3383 .bdrv_open
= qcow2_open
,
3384 .bdrv_close
= qcow2_close
,
3385 .bdrv_reopen_prepare
= qcow2_reopen_prepare
,
3386 .bdrv_reopen_commit
= qcow2_reopen_commit
,
3387 .bdrv_reopen_abort
= qcow2_reopen_abort
,
3388 .bdrv_join_options
= qcow2_join_options
,
3389 .bdrv_create
= qcow2_create
,
3390 .bdrv_has_zero_init
= bdrv_has_zero_init_1
,
3391 .bdrv_co_get_block_status
= qcow2_co_get_block_status
,
3392 .bdrv_set_key
= qcow2_set_key
,
3394 .bdrv_co_preadv
= qcow2_co_preadv
,
3395 .bdrv_co_pwritev
= qcow2_co_pwritev
,
3396 .bdrv_co_flush_to_os
= qcow2_co_flush_to_os
,
3398 .bdrv_co_pwrite_zeroes
= qcow2_co_pwrite_zeroes
,
3399 .bdrv_co_pdiscard
= qcow2_co_pdiscard
,
3400 .bdrv_truncate
= qcow2_truncate
,
3401 .bdrv_co_pwritev_compressed
= qcow2_co_pwritev_compressed
,
3402 .bdrv_make_empty
= qcow2_make_empty
,
3404 .bdrv_snapshot_create
= qcow2_snapshot_create
,
3405 .bdrv_snapshot_goto
= qcow2_snapshot_goto
,
3406 .bdrv_snapshot_delete
= qcow2_snapshot_delete
,
3407 .bdrv_snapshot_list
= qcow2_snapshot_list
,
3408 .bdrv_snapshot_load_tmp
= qcow2_snapshot_load_tmp
,
3409 .bdrv_get_info
= qcow2_get_info
,
3410 .bdrv_get_specific_info
= qcow2_get_specific_info
,
3412 .bdrv_save_vmstate
= qcow2_save_vmstate
,
3413 .bdrv_load_vmstate
= qcow2_load_vmstate
,
3415 .supports_backing
= true,
3416 .bdrv_change_backing_file
= qcow2_change_backing_file
,
3418 .bdrv_refresh_limits
= qcow2_refresh_limits
,
3419 .bdrv_invalidate_cache
= qcow2_invalidate_cache
,
3420 .bdrv_inactivate
= qcow2_inactivate
,
3422 .create_opts
= &qcow2_create_opts
,
3423 .bdrv_check
= qcow2_check
,
3424 .bdrv_amend_options
= qcow2_amend_options
,
3426 .bdrv_detach_aio_context
= qcow2_detach_aio_context
,
3427 .bdrv_attach_aio_context
= qcow2_attach_aio_context
,
3430 static void bdrv_qcow2_init(void)
3432 bdrv_register(&bdrv_qcow2
);
3435 block_init(bdrv_qcow2_init
);