2 * Block driver for the QCOW version 2 format
4 * Copyright (c) 2004-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu-common.h"
26 #include "block/block_int.h"
27 #include "block/qcow2.h"
28 #include "qemu/range.h"
30 static int64_t alloc_clusters_noref(BlockDriverState
*bs
, uint64_t size
);
31 static int QEMU_WARN_UNUSED_RESULT
update_refcount(BlockDriverState
*bs
,
32 int64_t offset
, int64_t length
,
33 int addend
, enum qcow2_discard_type type
);
36 /*********************************************************/
37 /* refcount handling */
39 int qcow2_refcount_init(BlockDriverState
*bs
)
41 BDRVQcowState
*s
= bs
->opaque
;
42 unsigned int refcount_table_size2
, i
;
45 assert(s
->refcount_table_size
<= INT_MAX
/ sizeof(uint64_t));
46 refcount_table_size2
= s
->refcount_table_size
* sizeof(uint64_t);
47 s
->refcount_table
= g_try_malloc(refcount_table_size2
);
49 if (s
->refcount_table_size
> 0) {
50 if (s
->refcount_table
== NULL
) {
54 BLKDBG_EVENT(bs
->file
, BLKDBG_REFTABLE_LOAD
);
55 ret
= bdrv_pread(bs
->file
, s
->refcount_table_offset
,
56 s
->refcount_table
, refcount_table_size2
);
60 for(i
= 0; i
< s
->refcount_table_size
; i
++)
61 be64_to_cpus(&s
->refcount_table
[i
]);
68 void qcow2_refcount_close(BlockDriverState
*bs
)
70 BDRVQcowState
*s
= bs
->opaque
;
71 g_free(s
->refcount_table
);
75 static int load_refcount_block(BlockDriverState
*bs
,
76 int64_t refcount_block_offset
,
77 void **refcount_block
)
79 BDRVQcowState
*s
= bs
->opaque
;
82 BLKDBG_EVENT(bs
->file
, BLKDBG_REFBLOCK_LOAD
);
83 ret
= qcow2_cache_get(bs
, s
->refcount_block_cache
, refcount_block_offset
,
90 * Returns the refcount of the cluster given by its index. Any non-negative
91 * return value is the refcount of the cluster, negative values are -errno
92 * and indicate an error.
94 int qcow2_get_refcount(BlockDriverState
*bs
, int64_t cluster_index
)
96 BDRVQcowState
*s
= bs
->opaque
;
97 uint64_t refcount_table_index
, block_index
;
98 int64_t refcount_block_offset
;
100 uint16_t *refcount_block
;
103 refcount_table_index
= cluster_index
>> s
->refcount_block_bits
;
104 if (refcount_table_index
>= s
->refcount_table_size
)
106 refcount_block_offset
=
107 s
->refcount_table
[refcount_table_index
] & REFT_OFFSET_MASK
;
108 if (!refcount_block_offset
)
111 if (offset_into_cluster(s
, refcount_block_offset
)) {
112 qcow2_signal_corruption(bs
, true, -1, -1, "Refblock offset %#" PRIx64
113 " unaligned (reftable index: %#" PRIx64
")",
114 refcount_block_offset
, refcount_table_index
);
118 ret
= qcow2_cache_get(bs
, s
->refcount_block_cache
, refcount_block_offset
,
119 (void**) &refcount_block
);
124 block_index
= cluster_index
& (s
->refcount_block_size
- 1);
125 refcount
= be16_to_cpu(refcount_block
[block_index
]);
127 ret
= qcow2_cache_put(bs
, s
->refcount_block_cache
,
128 (void**) &refcount_block
);
137 * Rounds the refcount table size up to avoid growing the table for each single
138 * refcount block that is allocated.
140 static unsigned int next_refcount_table_size(BDRVQcowState
*s
,
141 unsigned int min_size
)
143 unsigned int min_clusters
= (min_size
>> (s
->cluster_bits
- 3)) + 1;
144 unsigned int refcount_table_clusters
=
145 MAX(1, s
->refcount_table_size
>> (s
->cluster_bits
- 3));
147 while (min_clusters
> refcount_table_clusters
) {
148 refcount_table_clusters
= (refcount_table_clusters
* 3 + 1) / 2;
151 return refcount_table_clusters
<< (s
->cluster_bits
- 3);
155 /* Checks if two offsets are described by the same refcount block */
156 static int in_same_refcount_block(BDRVQcowState
*s
, uint64_t offset_a
,
159 uint64_t block_a
= offset_a
>> (s
->cluster_bits
+ s
->refcount_block_bits
);
160 uint64_t block_b
= offset_b
>> (s
->cluster_bits
+ s
->refcount_block_bits
);
162 return (block_a
== block_b
);
166 * Loads a refcount block. If it doesn't exist yet, it is allocated first
167 * (including growing the refcount table if needed).
169 * Returns 0 on success or -errno in error case
171 static int alloc_refcount_block(BlockDriverState
*bs
,
172 int64_t cluster_index
, uint16_t **refcount_block
)
174 BDRVQcowState
*s
= bs
->opaque
;
175 unsigned int refcount_table_index
;
178 BLKDBG_EVENT(bs
->file
, BLKDBG_REFBLOCK_ALLOC
);
180 /* Find the refcount block for the given cluster */
181 refcount_table_index
= cluster_index
>> s
->refcount_block_bits
;
183 if (refcount_table_index
< s
->refcount_table_size
) {
185 uint64_t refcount_block_offset
=
186 s
->refcount_table
[refcount_table_index
] & REFT_OFFSET_MASK
;
188 /* If it's already there, we're done */
189 if (refcount_block_offset
) {
190 if (offset_into_cluster(s
, refcount_block_offset
)) {
191 qcow2_signal_corruption(bs
, true, -1, -1, "Refblock offset %#"
192 PRIx64
" unaligned (reftable index: "
193 "%#x)", refcount_block_offset
,
194 refcount_table_index
);
198 return load_refcount_block(bs
, refcount_block_offset
,
199 (void**) refcount_block
);
204 * If we came here, we need to allocate something. Something is at least
205 * a cluster for the new refcount block. It may also include a new refcount
206 * table if the old refcount table is too small.
208 * Note that allocating clusters here needs some special care:
210 * - We can't use the normal qcow2_alloc_clusters(), it would try to
211 * increase the refcount and very likely we would end up with an endless
212 * recursion. Instead we must place the refcount blocks in a way that
213 * they can describe them themselves.
215 * - We need to consider that at this point we are inside update_refcounts
216 * and potentially doing an initial refcount increase. This means that
217 * some clusters have already been allocated by the caller, but their
218 * refcount isn't accurate yet. If we allocate clusters for metadata, we
219 * need to return -EAGAIN to signal the caller that it needs to restart
220 * the search for free clusters.
222 * - alloc_clusters_noref and qcow2_free_clusters may load a different
223 * refcount block into the cache
226 *refcount_block
= NULL
;
228 /* We write to the refcount table, so we might depend on L2 tables */
229 ret
= qcow2_cache_flush(bs
, s
->l2_table_cache
);
234 /* Allocate the refcount block itself and mark it as used */
235 int64_t new_block
= alloc_clusters_noref(bs
, s
->cluster_size
);
241 fprintf(stderr
, "qcow2: Allocate refcount block %d for %" PRIx64
243 refcount_table_index
, cluster_index
<< s
->cluster_bits
, new_block
);
246 if (in_same_refcount_block(s
, new_block
, cluster_index
<< s
->cluster_bits
)) {
247 /* Zero the new refcount block before updating it */
248 ret
= qcow2_cache_get_empty(bs
, s
->refcount_block_cache
, new_block
,
249 (void**) refcount_block
);
254 memset(*refcount_block
, 0, s
->cluster_size
);
256 /* The block describes itself, need to update the cache */
257 int block_index
= (new_block
>> s
->cluster_bits
) &
258 (s
->refcount_block_size
- 1);
259 (*refcount_block
)[block_index
] = cpu_to_be16(1);
261 /* Described somewhere else. This can recurse at most twice before we
262 * arrive at a block that describes itself. */
263 ret
= update_refcount(bs
, new_block
, s
->cluster_size
, 1,
264 QCOW2_DISCARD_NEVER
);
269 ret
= qcow2_cache_flush(bs
, s
->refcount_block_cache
);
274 /* Initialize the new refcount block only after updating its refcount,
275 * update_refcount uses the refcount cache itself */
276 ret
= qcow2_cache_get_empty(bs
, s
->refcount_block_cache
, new_block
,
277 (void**) refcount_block
);
282 memset(*refcount_block
, 0, s
->cluster_size
);
285 /* Now the new refcount block needs to be written to disk */
286 BLKDBG_EVENT(bs
->file
, BLKDBG_REFBLOCK_ALLOC_WRITE
);
287 qcow2_cache_entry_mark_dirty(s
->refcount_block_cache
, *refcount_block
);
288 ret
= qcow2_cache_flush(bs
, s
->refcount_block_cache
);
293 /* If the refcount table is big enough, just hook the block up there */
294 if (refcount_table_index
< s
->refcount_table_size
) {
295 uint64_t data64
= cpu_to_be64(new_block
);
296 BLKDBG_EVENT(bs
->file
, BLKDBG_REFBLOCK_ALLOC_HOOKUP
);
297 ret
= bdrv_pwrite_sync(bs
->file
,
298 s
->refcount_table_offset
+ refcount_table_index
* sizeof(uint64_t),
299 &data64
, sizeof(data64
));
304 s
->refcount_table
[refcount_table_index
] = new_block
;
306 /* The new refcount block may be where the caller intended to put its
307 * data, so let it restart the search. */
311 ret
= qcow2_cache_put(bs
, s
->refcount_block_cache
, (void**) refcount_block
);
317 * If we come here, we need to grow the refcount table. Again, a new
318 * refcount table needs some space and we can't simply allocate to avoid
321 * Therefore let's grab new refcount blocks at the end of the image, which
322 * will describe themselves and the new refcount table. This way we can
323 * reference them only in the new table and do the switch to the new
324 * refcount table at once without producing an inconsistent state in
327 BLKDBG_EVENT(bs
->file
, BLKDBG_REFTABLE_GROW
);
329 /* Calculate the number of refcount blocks needed so far */
330 uint64_t blocks_used
= DIV_ROUND_UP(cluster_index
, s
->refcount_block_size
);
332 if (blocks_used
> QCOW_MAX_REFTABLE_SIZE
/ sizeof(uint64_t)) {
336 /* And now we need at least one block more for the new metadata */
337 uint64_t table_size
= next_refcount_table_size(s
, blocks_used
+ 1);
338 uint64_t last_table_size
;
339 uint64_t blocks_clusters
;
341 uint64_t table_clusters
=
342 size_to_clusters(s
, table_size
* sizeof(uint64_t));
343 blocks_clusters
= 1 +
344 ((table_clusters
+ s
->refcount_block_size
- 1)
345 / s
->refcount_block_size
);
346 uint64_t meta_clusters
= table_clusters
+ blocks_clusters
;
348 last_table_size
= table_size
;
349 table_size
= next_refcount_table_size(s
, blocks_used
+
350 ((meta_clusters
+ s
->refcount_block_size
- 1)
351 / s
->refcount_block_size
));
353 } while (last_table_size
!= table_size
);
356 fprintf(stderr
, "qcow2: Grow refcount table %" PRId32
" => %" PRId64
"\n",
357 s
->refcount_table_size
, table_size
);
360 /* Create the new refcount table and blocks */
361 uint64_t meta_offset
= (blocks_used
* s
->refcount_block_size
) *
363 uint64_t table_offset
= meta_offset
+ blocks_clusters
* s
->cluster_size
;
364 uint64_t *new_table
= g_try_new0(uint64_t, table_size
);
365 uint16_t *new_blocks
= g_try_malloc0(blocks_clusters
* s
->cluster_size
);
367 assert(table_size
> 0 && blocks_clusters
> 0);
368 if (new_table
== NULL
|| new_blocks
== NULL
) {
373 /* Fill the new refcount table */
374 memcpy(new_table
, s
->refcount_table
,
375 s
->refcount_table_size
* sizeof(uint64_t));
376 new_table
[refcount_table_index
] = new_block
;
379 for (i
= 0; i
< blocks_clusters
; i
++) {
380 new_table
[blocks_used
+ i
] = meta_offset
+ (i
* s
->cluster_size
);
383 /* Fill the refcount blocks */
384 uint64_t table_clusters
= size_to_clusters(s
, table_size
* sizeof(uint64_t));
386 for (i
= 0; i
< table_clusters
+ blocks_clusters
; i
++) {
387 new_blocks
[block
++] = cpu_to_be16(1);
390 /* Write refcount blocks to disk */
391 BLKDBG_EVENT(bs
->file
, BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS
);
392 ret
= bdrv_pwrite_sync(bs
->file
, meta_offset
, new_blocks
,
393 blocks_clusters
* s
->cluster_size
);
400 /* Write refcount table to disk */
401 for(i
= 0; i
< table_size
; i
++) {
402 cpu_to_be64s(&new_table
[i
]);
405 BLKDBG_EVENT(bs
->file
, BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE
);
406 ret
= bdrv_pwrite_sync(bs
->file
, table_offset
, new_table
,
407 table_size
* sizeof(uint64_t));
412 for(i
= 0; i
< table_size
; i
++) {
413 be64_to_cpus(&new_table
[i
]);
416 /* Hook up the new refcount table in the qcow2 header */
418 cpu_to_be64w((uint64_t*)data
, table_offset
);
419 cpu_to_be32w((uint32_t*)(data
+ 8), table_clusters
);
420 BLKDBG_EVENT(bs
->file
, BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE
);
421 ret
= bdrv_pwrite_sync(bs
->file
, offsetof(QCowHeader
, refcount_table_offset
),
427 /* And switch it in memory */
428 uint64_t old_table_offset
= s
->refcount_table_offset
;
429 uint64_t old_table_size
= s
->refcount_table_size
;
431 g_free(s
->refcount_table
);
432 s
->refcount_table
= new_table
;
433 s
->refcount_table_size
= table_size
;
434 s
->refcount_table_offset
= table_offset
;
436 /* Free old table. */
437 qcow2_free_clusters(bs
, old_table_offset
, old_table_size
* sizeof(uint64_t),
438 QCOW2_DISCARD_OTHER
);
440 ret
= load_refcount_block(bs
, new_block
, (void**) refcount_block
);
445 /* If we were trying to do the initial refcount update for some cluster
446 * allocation, we might have used the same clusters to store newly
447 * allocated metadata. Make the caller search some new space. */
454 if (*refcount_block
!= NULL
) {
455 qcow2_cache_put(bs
, s
->refcount_block_cache
, (void**) refcount_block
);
460 void qcow2_process_discards(BlockDriverState
*bs
, int ret
)
462 BDRVQcowState
*s
= bs
->opaque
;
463 Qcow2DiscardRegion
*d
, *next
;
465 QTAILQ_FOREACH_SAFE(d
, &s
->discards
, next
, next
) {
466 QTAILQ_REMOVE(&s
->discards
, d
, next
);
468 /* Discard is optional, ignore the return value */
470 bdrv_discard(bs
->file
,
471 d
->offset
>> BDRV_SECTOR_BITS
,
472 d
->bytes
>> BDRV_SECTOR_BITS
);
479 static void update_refcount_discard(BlockDriverState
*bs
,
480 uint64_t offset
, uint64_t length
)
482 BDRVQcowState
*s
= bs
->opaque
;
483 Qcow2DiscardRegion
*d
, *p
, *next
;
485 QTAILQ_FOREACH(d
, &s
->discards
, next
) {
486 uint64_t new_start
= MIN(offset
, d
->offset
);
487 uint64_t new_end
= MAX(offset
+ length
, d
->offset
+ d
->bytes
);
489 if (new_end
- new_start
<= length
+ d
->bytes
) {
490 /* There can't be any overlap, areas ending up here have no
491 * references any more and therefore shouldn't get freed another
493 assert(d
->bytes
+ length
== new_end
- new_start
);
494 d
->offset
= new_start
;
495 d
->bytes
= new_end
- new_start
;
500 d
= g_malloc(sizeof(*d
));
501 *d
= (Qcow2DiscardRegion
) {
506 QTAILQ_INSERT_TAIL(&s
->discards
, d
, next
);
509 /* Merge discard requests if they are adjacent now */
510 QTAILQ_FOREACH_SAFE(p
, &s
->discards
, next
, next
) {
512 || p
->offset
> d
->offset
+ d
->bytes
513 || d
->offset
> p
->offset
+ p
->bytes
)
518 /* Still no overlap possible */
519 assert(p
->offset
== d
->offset
+ d
->bytes
520 || d
->offset
== p
->offset
+ p
->bytes
);
522 QTAILQ_REMOVE(&s
->discards
, p
, next
);
523 d
->offset
= MIN(d
->offset
, p
->offset
);
524 d
->bytes
+= p
->bytes
;
529 /* XXX: cache several refcount block clusters ? */
530 static int QEMU_WARN_UNUSED_RESULT
update_refcount(BlockDriverState
*bs
,
531 int64_t offset
, int64_t length
, int addend
, enum qcow2_discard_type type
)
533 BDRVQcowState
*s
= bs
->opaque
;
534 int64_t start
, last
, cluster_offset
;
535 uint16_t *refcount_block
= NULL
;
536 int64_t old_table_index
= -1;
540 fprintf(stderr
, "update_refcount: offset=%" PRId64
" size=%" PRId64
" addend=%d\n",
541 offset
, length
, addend
);
545 } else if (length
== 0) {
550 qcow2_cache_set_dependency(bs
, s
->refcount_block_cache
,
554 start
= start_of_cluster(s
, offset
);
555 last
= start_of_cluster(s
, offset
+ length
- 1);
556 for(cluster_offset
= start
; cluster_offset
<= last
;
557 cluster_offset
+= s
->cluster_size
)
559 int block_index
, refcount
;
560 int64_t cluster_index
= cluster_offset
>> s
->cluster_bits
;
561 int64_t table_index
= cluster_index
>> s
->refcount_block_bits
;
563 /* Load the refcount block and allocate it if needed */
564 if (table_index
!= old_table_index
) {
565 if (refcount_block
) {
566 ret
= qcow2_cache_put(bs
, s
->refcount_block_cache
,
567 (void**) &refcount_block
);
573 ret
= alloc_refcount_block(bs
, cluster_index
, &refcount_block
);
578 old_table_index
= table_index
;
580 qcow2_cache_entry_mark_dirty(s
->refcount_block_cache
, refcount_block
);
582 /* we can update the count and save it */
583 block_index
= cluster_index
& (s
->refcount_block_size
- 1);
585 refcount
= be16_to_cpu(refcount_block
[block_index
]);
587 if (refcount
< 0 || refcount
> 0xffff) {
591 if (refcount
== 0 && cluster_index
< s
->free_cluster_index
) {
592 s
->free_cluster_index
= cluster_index
;
594 refcount_block
[block_index
] = cpu_to_be16(refcount
);
596 if (refcount
== 0 && s
->discard_passthrough
[type
]) {
597 update_refcount_discard(bs
, cluster_offset
, s
->cluster_size
);
603 if (!s
->cache_discards
) {
604 qcow2_process_discards(bs
, ret
);
607 /* Write last changed block to disk */
608 if (refcount_block
) {
610 wret
= qcow2_cache_put(bs
, s
->refcount_block_cache
,
611 (void**) &refcount_block
);
613 return ret
< 0 ? ret
: wret
;
618 * Try do undo any updates if an error is returned (This may succeed in
619 * some cases like ENOSPC for allocating a new refcount block)
623 dummy
= update_refcount(bs
, offset
, cluster_offset
- offset
, -addend
,
624 QCOW2_DISCARD_NEVER
);
632 * Increases or decreases the refcount of a given cluster.
634 * If the return value is non-negative, it is the new refcount of the cluster.
635 * If it is negative, it is -errno and indicates an error.
637 int qcow2_update_cluster_refcount(BlockDriverState
*bs
,
638 int64_t cluster_index
,
640 enum qcow2_discard_type type
)
642 BDRVQcowState
*s
= bs
->opaque
;
645 ret
= update_refcount(bs
, cluster_index
<< s
->cluster_bits
, 1, addend
,
651 return qcow2_get_refcount(bs
, cluster_index
);
656 /*********************************************************/
657 /* cluster allocation functions */
661 /* return < 0 if error */
662 static int64_t alloc_clusters_noref(BlockDriverState
*bs
, uint64_t size
)
664 BDRVQcowState
*s
= bs
->opaque
;
665 uint64_t i
, nb_clusters
;
668 nb_clusters
= size_to_clusters(s
, size
);
670 for(i
= 0; i
< nb_clusters
; i
++) {
671 uint64_t next_cluster_index
= s
->free_cluster_index
++;
672 refcount
= qcow2_get_refcount(bs
, next_cluster_index
);
676 } else if (refcount
!= 0) {
681 /* Make sure that all offsets in the "allocated" range are representable
683 if (s
->free_cluster_index
> 0 &&
684 s
->free_cluster_index
- 1 > (INT64_MAX
>> s
->cluster_bits
))
690 fprintf(stderr
, "alloc_clusters: size=%" PRId64
" -> %" PRId64
"\n",
692 (s
->free_cluster_index
- nb_clusters
) << s
->cluster_bits
);
694 return (s
->free_cluster_index
- nb_clusters
) << s
->cluster_bits
;
697 int64_t qcow2_alloc_clusters(BlockDriverState
*bs
, uint64_t size
)
702 BLKDBG_EVENT(bs
->file
, BLKDBG_CLUSTER_ALLOC
);
704 offset
= alloc_clusters_noref(bs
, size
);
709 ret
= update_refcount(bs
, offset
, size
, 1, QCOW2_DISCARD_NEVER
);
710 } while (ret
== -EAGAIN
);
719 int qcow2_alloc_clusters_at(BlockDriverState
*bs
, uint64_t offset
,
722 BDRVQcowState
*s
= bs
->opaque
;
723 uint64_t cluster_index
;
727 assert(nb_clusters
>= 0);
728 if (nb_clusters
== 0) {
733 /* Check how many clusters there are free */
734 cluster_index
= offset
>> s
->cluster_bits
;
735 for(i
= 0; i
< nb_clusters
; i
++) {
736 refcount
= qcow2_get_refcount(bs
, cluster_index
++);
740 } else if (refcount
!= 0) {
745 /* And then allocate them */
746 ret
= update_refcount(bs
, offset
, i
<< s
->cluster_bits
, 1,
747 QCOW2_DISCARD_NEVER
);
748 } while (ret
== -EAGAIN
);
757 /* only used to allocate compressed sectors. We try to allocate
758 contiguous sectors. size must be <= cluster_size */
759 int64_t qcow2_alloc_bytes(BlockDriverState
*bs
, int size
)
761 BDRVQcowState
*s
= bs
->opaque
;
763 size_t free_in_cluster
;
766 BLKDBG_EVENT(bs
->file
, BLKDBG_CLUSTER_ALLOC_BYTES
);
767 assert(size
> 0 && size
<= s
->cluster_size
);
768 assert(!s
->free_byte_offset
|| offset_into_cluster(s
, s
->free_byte_offset
));
770 offset
= s
->free_byte_offset
;
773 int refcount
= qcow2_get_refcount(bs
, offset
>> s
->cluster_bits
);
778 if (refcount
== 0xffff) {
783 free_in_cluster
= s
->cluster_size
- offset_into_cluster(s
, offset
);
784 if (!offset
|| free_in_cluster
< size
) {
785 int64_t new_cluster
= alloc_clusters_noref(bs
, s
->cluster_size
);
786 if (new_cluster
< 0) {
790 if (!offset
|| ROUND_UP(offset
, s
->cluster_size
) != new_cluster
) {
791 offset
= new_cluster
;
796 ret
= update_refcount(bs
, offset
, size
, 1, QCOW2_DISCARD_NEVER
);
801 /* The cluster refcount was incremented; refcount blocks must be flushed
802 * before the caller's L2 table updates. */
803 qcow2_cache_set_dependency(bs
, s
->l2_table_cache
, s
->refcount_block_cache
);
805 s
->free_byte_offset
= offset
+ size
;
806 if (!offset_into_cluster(s
, s
->free_byte_offset
)) {
807 s
->free_byte_offset
= 0;
813 void qcow2_free_clusters(BlockDriverState
*bs
,
814 int64_t offset
, int64_t size
,
815 enum qcow2_discard_type type
)
819 BLKDBG_EVENT(bs
->file
, BLKDBG_CLUSTER_FREE
);
820 ret
= update_refcount(bs
, offset
, size
, -1, type
);
822 fprintf(stderr
, "qcow2_free_clusters failed: %s\n", strerror(-ret
));
823 /* TODO Remember the clusters to free them later and avoid leaking */
828 * Free a cluster using its L2 entry (handles clusters of all types, e.g.
829 * normal cluster, compressed cluster, etc.)
831 void qcow2_free_any_clusters(BlockDriverState
*bs
, uint64_t l2_entry
,
832 int nb_clusters
, enum qcow2_discard_type type
)
834 BDRVQcowState
*s
= bs
->opaque
;
836 switch (qcow2_get_cluster_type(l2_entry
)) {
837 case QCOW2_CLUSTER_COMPRESSED
:
840 nb_csectors
= ((l2_entry
>> s
->csize_shift
) &
842 qcow2_free_clusters(bs
,
843 (l2_entry
& s
->cluster_offset_mask
) & ~511,
844 nb_csectors
* 512, type
);
847 case QCOW2_CLUSTER_NORMAL
:
848 case QCOW2_CLUSTER_ZERO
:
849 if (l2_entry
& L2E_OFFSET_MASK
) {
850 if (offset_into_cluster(s
, l2_entry
& L2E_OFFSET_MASK
)) {
851 qcow2_signal_corruption(bs
, false, -1, -1,
852 "Cannot free unaligned cluster %#llx",
853 l2_entry
& L2E_OFFSET_MASK
);
855 qcow2_free_clusters(bs
, l2_entry
& L2E_OFFSET_MASK
,
856 nb_clusters
<< s
->cluster_bits
, type
);
860 case QCOW2_CLUSTER_UNALLOCATED
:
869 /*********************************************************/
870 /* snapshots and image creation */
874 /* update the refcounts of snapshots and the copied flag */
875 int qcow2_update_snapshot_refcount(BlockDriverState
*bs
,
876 int64_t l1_table_offset
, int l1_size
, int addend
)
878 BDRVQcowState
*s
= bs
->opaque
;
879 uint64_t *l1_table
, *l2_table
, l2_offset
, offset
, l1_size2
;
880 bool l1_allocated
= false;
881 int64_t old_offset
, old_l2_offset
;
882 int i
, j
, l1_modified
= 0, nb_csectors
, refcount
;
887 l1_size2
= l1_size
* sizeof(uint64_t);
889 s
->cache_discards
= true;
891 /* WARNING: qcow2_snapshot_goto relies on this function not using the
892 * l1_table_offset when it is the current s->l1_table_offset! Be careful
893 * when changing this! */
894 if (l1_table_offset
!= s
->l1_table_offset
) {
895 l1_table
= g_try_malloc0(align_offset(l1_size2
, 512));
896 if (l1_size2
&& l1_table
== NULL
) {
902 ret
= bdrv_pread(bs
->file
, l1_table_offset
, l1_table
, l1_size2
);
907 for(i
= 0;i
< l1_size
; i
++)
908 be64_to_cpus(&l1_table
[i
]);
910 assert(l1_size
== s
->l1_size
);
911 l1_table
= s
->l1_table
;
912 l1_allocated
= false;
915 for(i
= 0; i
< l1_size
; i
++) {
916 l2_offset
= l1_table
[i
];
918 old_l2_offset
= l2_offset
;
919 l2_offset
&= L1E_OFFSET_MASK
;
921 if (offset_into_cluster(s
, l2_offset
)) {
922 qcow2_signal_corruption(bs
, true, -1, -1, "L2 table offset %#"
923 PRIx64
" unaligned (L1 index: %#x)",
929 ret
= qcow2_cache_get(bs
, s
->l2_table_cache
, l2_offset
,
935 for(j
= 0; j
< s
->l2_size
; j
++) {
936 uint64_t cluster_index
;
938 offset
= be64_to_cpu(l2_table
[j
]);
940 offset
&= ~QCOW_OFLAG_COPIED
;
942 switch (qcow2_get_cluster_type(offset
)) {
943 case QCOW2_CLUSTER_COMPRESSED
:
944 nb_csectors
= ((offset
>> s
->csize_shift
) &
947 ret
= update_refcount(bs
,
948 (offset
& s
->cluster_offset_mask
) & ~511,
949 nb_csectors
* 512, addend
,
950 QCOW2_DISCARD_SNAPSHOT
);
955 /* compressed clusters are never modified */
959 case QCOW2_CLUSTER_NORMAL
:
960 case QCOW2_CLUSTER_ZERO
:
961 if (offset_into_cluster(s
, offset
& L2E_OFFSET_MASK
)) {
962 qcow2_signal_corruption(bs
, true, -1, -1, "Data "
963 "cluster offset %#llx "
964 "unaligned (L2 offset: %#"
965 PRIx64
", L2 index: %#x)",
966 offset
& L2E_OFFSET_MASK
,
972 cluster_index
= (offset
& L2E_OFFSET_MASK
) >> s
->cluster_bits
;
973 if (!cluster_index
) {
979 refcount
= qcow2_update_cluster_refcount(bs
,
980 cluster_index
, addend
,
981 QCOW2_DISCARD_SNAPSHOT
);
983 refcount
= qcow2_get_refcount(bs
, cluster_index
);
992 case QCOW2_CLUSTER_UNALLOCATED
:
1000 if (refcount
== 1) {
1001 offset
|= QCOW_OFLAG_COPIED
;
1003 if (offset
!= old_offset
) {
1005 qcow2_cache_set_dependency(bs
, s
->l2_table_cache
,
1006 s
->refcount_block_cache
);
1008 l2_table
[j
] = cpu_to_be64(offset
);
1009 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_table
);
1013 ret
= qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
1020 refcount
= qcow2_update_cluster_refcount(bs
, l2_offset
>>
1021 s
->cluster_bits
, addend
, QCOW2_DISCARD_SNAPSHOT
);
1023 refcount
= qcow2_get_refcount(bs
, l2_offset
>> s
->cluster_bits
);
1028 } else if (refcount
== 1) {
1029 l2_offset
|= QCOW_OFLAG_COPIED
;
1031 if (l2_offset
!= old_l2_offset
) {
1032 l1_table
[i
] = l2_offset
;
1038 ret
= bdrv_flush(bs
);
1041 qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
1044 s
->cache_discards
= false;
1045 qcow2_process_discards(bs
, ret
);
1047 /* Update L1 only if it isn't deleted anyway (addend = -1) */
1048 if (ret
== 0 && addend
>= 0 && l1_modified
) {
1049 for (i
= 0; i
< l1_size
; i
++) {
1050 cpu_to_be64s(&l1_table
[i
]);
1053 ret
= bdrv_pwrite_sync(bs
->file
, l1_table_offset
, l1_table
, l1_size2
);
1055 for (i
= 0; i
< l1_size
; i
++) {
1056 be64_to_cpus(&l1_table
[i
]);
1067 /*********************************************************/
1068 /* refcount checking functions */
1073 * Increases the refcount for a range of clusters in a given refcount table.
1074 * This is used to construct a temporary refcount table out of L1 and L2 tables
1075 * which can be compared the the refcount table saved in the image.
1077 * Modifies the number of errors in res.
1079 static int inc_refcounts(BlockDriverState
*bs
,
1080 BdrvCheckResult
*res
,
1081 uint16_t **refcount_table
,
1082 int64_t *refcount_table_size
,
1083 int64_t offset
, int64_t size
)
1085 BDRVQcowState
*s
= bs
->opaque
;
1086 uint64_t start
, last
, cluster_offset
, k
;
1092 start
= start_of_cluster(s
, offset
);
1093 last
= start_of_cluster(s
, offset
+ size
- 1);
1094 for(cluster_offset
= start
; cluster_offset
<= last
;
1095 cluster_offset
+= s
->cluster_size
) {
1096 k
= cluster_offset
>> s
->cluster_bits
;
1097 if (k
>= *refcount_table_size
) {
1098 int64_t old_refcount_table_size
= *refcount_table_size
;
1099 uint16_t *new_refcount_table
;
1101 *refcount_table_size
= k
+ 1;
1102 new_refcount_table
= g_try_realloc(*refcount_table
,
1103 *refcount_table_size
*
1104 sizeof(**refcount_table
));
1105 if (!new_refcount_table
) {
1106 *refcount_table_size
= old_refcount_table_size
;
1107 res
->check_errors
++;
1110 *refcount_table
= new_refcount_table
;
1112 memset(*refcount_table
+ old_refcount_table_size
, 0,
1113 (*refcount_table_size
- old_refcount_table_size
) *
1114 sizeof(**refcount_table
));
1117 if (++(*refcount_table
)[k
] == 0) {
1118 fprintf(stderr
, "ERROR: overflow cluster offset=0x%" PRIx64
1119 "\n", cluster_offset
);
1127 /* Flags for check_refcounts_l1() and check_refcounts_l2() */
1129 CHECK_FRAG_INFO
= 0x2, /* update BlockFragInfo counters */
1133 * Increases the refcount in the given refcount table for the all clusters
1134 * referenced in the L2 table. While doing so, performs some checks on L2
1137 * Returns the number of errors found by the checks or -errno if an internal
1140 static int check_refcounts_l2(BlockDriverState
*bs
, BdrvCheckResult
*res
,
1141 uint16_t **refcount_table
, int64_t *refcount_table_size
, int64_t l2_offset
,
1144 BDRVQcowState
*s
= bs
->opaque
;
1145 uint64_t *l2_table
, l2_entry
;
1146 uint64_t next_contiguous_offset
= 0;
1147 int i
, l2_size
, nb_csectors
, ret
;
1149 /* Read L2 table from disk */
1150 l2_size
= s
->l2_size
* sizeof(uint64_t);
1151 l2_table
= g_malloc(l2_size
);
1153 ret
= bdrv_pread(bs
->file
, l2_offset
, l2_table
, l2_size
);
1155 fprintf(stderr
, "ERROR: I/O error in check_refcounts_l2\n");
1156 res
->check_errors
++;
1160 /* Do the actual checks */
1161 for(i
= 0; i
< s
->l2_size
; i
++) {
1162 l2_entry
= be64_to_cpu(l2_table
[i
]);
1164 switch (qcow2_get_cluster_type(l2_entry
)) {
1165 case QCOW2_CLUSTER_COMPRESSED
:
1166 /* Compressed clusters don't have QCOW_OFLAG_COPIED */
1167 if (l2_entry
& QCOW_OFLAG_COPIED
) {
1168 fprintf(stderr
, "ERROR: cluster %" PRId64
": "
1169 "copied flag must never be set for compressed "
1170 "clusters\n", l2_entry
>> s
->cluster_bits
);
1171 l2_entry
&= ~QCOW_OFLAG_COPIED
;
1175 /* Mark cluster as used */
1176 nb_csectors
= ((l2_entry
>> s
->csize_shift
) &
1178 l2_entry
&= s
->cluster_offset_mask
;
1179 ret
= inc_refcounts(bs
, res
, refcount_table
, refcount_table_size
,
1180 l2_entry
& ~511, nb_csectors
* 512);
1185 if (flags
& CHECK_FRAG_INFO
) {
1186 res
->bfi
.allocated_clusters
++;
1187 res
->bfi
.compressed_clusters
++;
1189 /* Compressed clusters are fragmented by nature. Since they
1190 * take up sub-sector space but we only have sector granularity
1191 * I/O we need to re-read the same sectors even for adjacent
1192 * compressed clusters.
1194 res
->bfi
.fragmented_clusters
++;
1198 case QCOW2_CLUSTER_ZERO
:
1199 if ((l2_entry
& L2E_OFFSET_MASK
) == 0) {
1204 case QCOW2_CLUSTER_NORMAL
:
1206 uint64_t offset
= l2_entry
& L2E_OFFSET_MASK
;
1208 if (flags
& CHECK_FRAG_INFO
) {
1209 res
->bfi
.allocated_clusters
++;
1210 if (next_contiguous_offset
&&
1211 offset
!= next_contiguous_offset
) {
1212 res
->bfi
.fragmented_clusters
++;
1214 next_contiguous_offset
= offset
+ s
->cluster_size
;
1217 /* Mark cluster as used */
1218 ret
= inc_refcounts(bs
, res
, refcount_table
, refcount_table_size
,
1219 offset
, s
->cluster_size
);
1224 /* Correct offsets are cluster aligned */
1225 if (offset_into_cluster(s
, offset
)) {
1226 fprintf(stderr
, "ERROR offset=%" PRIx64
": Cluster is not "
1227 "properly aligned; L2 entry corrupted.\n", offset
);
1233 case QCOW2_CLUSTER_UNALLOCATED
:
1250 * Increases the refcount for the L1 table, its L2 tables and all referenced
1251 * clusters in the given refcount table. While doing so, performs some checks
1252 * on L1 and L2 entries.
1254 * Returns the number of errors found by the checks or -errno if an internal
1257 static int check_refcounts_l1(BlockDriverState
*bs
,
1258 BdrvCheckResult
*res
,
1259 uint16_t **refcount_table
,
1260 int64_t *refcount_table_size
,
1261 int64_t l1_table_offset
, int l1_size
,
1264 BDRVQcowState
*s
= bs
->opaque
;
1265 uint64_t *l1_table
= NULL
, l2_offset
, l1_size2
;
1268 l1_size2
= l1_size
* sizeof(uint64_t);
1270 /* Mark L1 table as used */
1271 ret
= inc_refcounts(bs
, res
, refcount_table
, refcount_table_size
,
1272 l1_table_offset
, l1_size2
);
1277 /* Read L1 table entries from disk */
1279 l1_table
= g_try_malloc(l1_size2
);
1280 if (l1_table
== NULL
) {
1282 res
->check_errors
++;
1285 ret
= bdrv_pread(bs
->file
, l1_table_offset
, l1_table
, l1_size2
);
1287 fprintf(stderr
, "ERROR: I/O error in check_refcounts_l1\n");
1288 res
->check_errors
++;
1291 for(i
= 0;i
< l1_size
; i
++)
1292 be64_to_cpus(&l1_table
[i
]);
1295 /* Do the actual checks */
1296 for(i
= 0; i
< l1_size
; i
++) {
1297 l2_offset
= l1_table
[i
];
1299 /* Mark L2 table as used */
1300 l2_offset
&= L1E_OFFSET_MASK
;
1301 ret
= inc_refcounts(bs
, res
, refcount_table
, refcount_table_size
,
1302 l2_offset
, s
->cluster_size
);
1307 /* L2 tables are cluster aligned */
1308 if (offset_into_cluster(s
, l2_offset
)) {
1309 fprintf(stderr
, "ERROR l2_offset=%" PRIx64
": Table is not "
1310 "cluster aligned; L1 entry corrupted\n", l2_offset
);
1314 /* Process and check L2 entries */
1315 ret
= check_refcounts_l2(bs
, res
, refcount_table
,
1316 refcount_table_size
, l2_offset
, flags
);
1331 * Checks the OFLAG_COPIED flag for all L1 and L2 entries.
1333 * This function does not print an error message nor does it increment
1334 * check_errors if qcow2_get_refcount fails (this is because such an error will
1335 * have been already detected and sufficiently signaled by the calling function
1336 * (qcow2_check_refcounts) by the time this function is called).
1338 static int check_oflag_copied(BlockDriverState
*bs
, BdrvCheckResult
*res
,
1341 BDRVQcowState
*s
= bs
->opaque
;
1342 uint64_t *l2_table
= qemu_blockalign(bs
, s
->cluster_size
);
1347 for (i
= 0; i
< s
->l1_size
; i
++) {
1348 uint64_t l1_entry
= s
->l1_table
[i
];
1349 uint64_t l2_offset
= l1_entry
& L1E_OFFSET_MASK
;
1350 bool l2_dirty
= false;
1356 refcount
= qcow2_get_refcount(bs
, l2_offset
>> s
->cluster_bits
);
1358 /* don't print message nor increment check_errors */
1361 if ((refcount
== 1) != ((l1_entry
& QCOW_OFLAG_COPIED
) != 0)) {
1362 fprintf(stderr
, "%s OFLAG_COPIED L2 cluster: l1_index=%d "
1363 "l1_entry=%" PRIx64
" refcount=%d\n",
1364 fix
& BDRV_FIX_ERRORS
? "Repairing" :
1366 i
, l1_entry
, refcount
);
1367 if (fix
& BDRV_FIX_ERRORS
) {
1368 s
->l1_table
[i
] = refcount
== 1
1369 ? l1_entry
| QCOW_OFLAG_COPIED
1370 : l1_entry
& ~QCOW_OFLAG_COPIED
;
1371 ret
= qcow2_write_l1_entry(bs
, i
);
1373 res
->check_errors
++;
1376 res
->corruptions_fixed
++;
1382 ret
= bdrv_pread(bs
->file
, l2_offset
, l2_table
,
1383 s
->l2_size
* sizeof(uint64_t));
1385 fprintf(stderr
, "ERROR: Could not read L2 table: %s\n",
1387 res
->check_errors
++;
1391 for (j
= 0; j
< s
->l2_size
; j
++) {
1392 uint64_t l2_entry
= be64_to_cpu(l2_table
[j
]);
1393 uint64_t data_offset
= l2_entry
& L2E_OFFSET_MASK
;
1394 int cluster_type
= qcow2_get_cluster_type(l2_entry
);
1396 if ((cluster_type
== QCOW2_CLUSTER_NORMAL
) ||
1397 ((cluster_type
== QCOW2_CLUSTER_ZERO
) && (data_offset
!= 0))) {
1398 refcount
= qcow2_get_refcount(bs
,
1399 data_offset
>> s
->cluster_bits
);
1401 /* don't print message nor increment check_errors */
1404 if ((refcount
== 1) != ((l2_entry
& QCOW_OFLAG_COPIED
) != 0)) {
1405 fprintf(stderr
, "%s OFLAG_COPIED data cluster: "
1406 "l2_entry=%" PRIx64
" refcount=%d\n",
1407 fix
& BDRV_FIX_ERRORS
? "Repairing" :
1409 l2_entry
, refcount
);
1410 if (fix
& BDRV_FIX_ERRORS
) {
1411 l2_table
[j
] = cpu_to_be64(refcount
== 1
1412 ? l2_entry
| QCOW_OFLAG_COPIED
1413 : l2_entry
& ~QCOW_OFLAG_COPIED
);
1415 res
->corruptions_fixed
++;
1424 ret
= qcow2_pre_write_overlap_check(bs
, QCOW2_OL_ACTIVE_L2
,
1425 l2_offset
, s
->cluster_size
);
1427 fprintf(stderr
, "ERROR: Could not write L2 table; metadata "
1428 "overlap check failed: %s\n", strerror(-ret
));
1429 res
->check_errors
++;
1433 ret
= bdrv_pwrite(bs
->file
, l2_offset
, l2_table
, s
->cluster_size
);
1435 fprintf(stderr
, "ERROR: Could not write L2 table: %s\n",
1437 res
->check_errors
++;
1446 qemu_vfree(l2_table
);
1451 * Checks consistency of refblocks and accounts for each refblock in
1454 static int check_refblocks(BlockDriverState
*bs
, BdrvCheckResult
*res
,
1455 BdrvCheckMode fix
, bool *rebuild
,
1456 uint16_t **refcount_table
, int64_t *nb_clusters
)
1458 BDRVQcowState
*s
= bs
->opaque
;
1462 for(i
= 0; i
< s
->refcount_table_size
; i
++) {
1463 uint64_t offset
, cluster
;
1464 offset
= s
->refcount_table
[i
];
1465 cluster
= offset
>> s
->cluster_bits
;
1467 /* Refcount blocks are cluster aligned */
1468 if (offset_into_cluster(s
, offset
)) {
1469 fprintf(stderr
, "ERROR refcount block %" PRId64
" is not "
1470 "cluster aligned; refcount table entry corrupted\n", i
);
1476 if (cluster
>= *nb_clusters
) {
1477 fprintf(stderr
, "%s refcount block %" PRId64
" is outside image\n",
1478 fix
& BDRV_FIX_ERRORS
? "Repairing" : "ERROR", i
);
1480 if (fix
& BDRV_FIX_ERRORS
) {
1481 int64_t old_nb_clusters
= *nb_clusters
;
1482 uint16_t *new_refcount_table
;
1484 if (offset
> INT64_MAX
- s
->cluster_size
) {
1489 ret
= bdrv_truncate(bs
->file
, offset
+ s
->cluster_size
);
1493 size
= bdrv_getlength(bs
->file
);
1499 *nb_clusters
= size_to_clusters(s
, size
);
1500 assert(*nb_clusters
>= old_nb_clusters
);
1502 new_refcount_table
= g_try_realloc(*refcount_table
,
1504 sizeof(**refcount_table
));
1505 if (!new_refcount_table
) {
1506 *nb_clusters
= old_nb_clusters
;
1507 res
->check_errors
++;
1510 *refcount_table
= new_refcount_table
;
1512 memset(*refcount_table
+ old_nb_clusters
, 0,
1513 (*nb_clusters
- old_nb_clusters
) *
1514 sizeof(**refcount_table
));
1516 if (cluster
>= *nb_clusters
) {
1521 res
->corruptions_fixed
++;
1522 ret
= inc_refcounts(bs
, res
, refcount_table
, nb_clusters
,
1523 offset
, s
->cluster_size
);
1527 /* No need to check whether the refcount is now greater than 1:
1528 * This area was just allocated and zeroed, so it can only be
1529 * exactly 1 after inc_refcounts() */
1535 fprintf(stderr
, "ERROR could not resize image: %s\n",
1544 ret
= inc_refcounts(bs
, res
, refcount_table
, nb_clusters
,
1545 offset
, s
->cluster_size
);
1549 if ((*refcount_table
)[cluster
] != 1) {
1550 fprintf(stderr
, "ERROR refcount block %" PRId64
1551 " refcount=%d\n", i
, (*refcount_table
)[cluster
]);
1562 * Calculates an in-memory refcount table.
1564 static int calculate_refcounts(BlockDriverState
*bs
, BdrvCheckResult
*res
,
1565 BdrvCheckMode fix
, bool *rebuild
,
1566 uint16_t **refcount_table
, int64_t *nb_clusters
)
1568 BDRVQcowState
*s
= bs
->opaque
;
1573 if (!*refcount_table
) {
1574 *refcount_table
= g_try_new0(uint16_t, *nb_clusters
);
1575 if (*nb_clusters
&& *refcount_table
== NULL
) {
1576 res
->check_errors
++;
1582 ret
= inc_refcounts(bs
, res
, refcount_table
, nb_clusters
,
1583 0, s
->cluster_size
);
1588 /* current L1 table */
1589 ret
= check_refcounts_l1(bs
, res
, refcount_table
, nb_clusters
,
1590 s
->l1_table_offset
, s
->l1_size
, CHECK_FRAG_INFO
);
1596 for (i
= 0; i
< s
->nb_snapshots
; i
++) {
1597 sn
= s
->snapshots
+ i
;
1598 ret
= check_refcounts_l1(bs
, res
, refcount_table
, nb_clusters
,
1599 sn
->l1_table_offset
, sn
->l1_size
, 0);
1604 ret
= inc_refcounts(bs
, res
, refcount_table
, nb_clusters
,
1605 s
->snapshots_offset
, s
->snapshots_size
);
1611 ret
= inc_refcounts(bs
, res
, refcount_table
, nb_clusters
,
1612 s
->refcount_table_offset
,
1613 s
->refcount_table_size
* sizeof(uint64_t));
1618 return check_refblocks(bs
, res
, fix
, rebuild
, refcount_table
, nb_clusters
);
1622 * Compares the actual reference count for each cluster in the image against the
1623 * refcount as reported by the refcount structures on-disk.
1625 static void compare_refcounts(BlockDriverState
*bs
, BdrvCheckResult
*res
,
1626 BdrvCheckMode fix
, bool *rebuild
,
1627 int64_t *highest_cluster
,
1628 uint16_t *refcount_table
, int64_t nb_clusters
)
1630 BDRVQcowState
*s
= bs
->opaque
;
1632 int refcount1
, refcount2
, ret
;
1634 for (i
= 0, *highest_cluster
= 0; i
< nb_clusters
; i
++) {
1635 refcount1
= qcow2_get_refcount(bs
, i
);
1636 if (refcount1
< 0) {
1637 fprintf(stderr
, "Can't get refcount for cluster %" PRId64
": %s\n",
1638 i
, strerror(-refcount1
));
1639 res
->check_errors
++;
1643 refcount2
= refcount_table
[i
];
1645 if (refcount1
> 0 || refcount2
> 0) {
1646 *highest_cluster
= i
;
1649 if (refcount1
!= refcount2
) {
1650 /* Check if we're allowed to fix the mismatch */
1651 int *num_fixed
= NULL
;
1652 if (refcount1
== 0) {
1654 } else if (refcount1
> refcount2
&& (fix
& BDRV_FIX_LEAKS
)) {
1655 num_fixed
= &res
->leaks_fixed
;
1656 } else if (refcount1
< refcount2
&& (fix
& BDRV_FIX_ERRORS
)) {
1657 num_fixed
= &res
->corruptions_fixed
;
1660 fprintf(stderr
, "%s cluster %" PRId64
" refcount=%d reference=%d\n",
1661 num_fixed
!= NULL
? "Repairing" :
1662 refcount1
< refcount2
? "ERROR" :
1664 i
, refcount1
, refcount2
);
1667 ret
= update_refcount(bs
, i
<< s
->cluster_bits
, 1,
1668 refcount2
- refcount1
,
1669 QCOW2_DISCARD_ALWAYS
);
1676 /* And if we couldn't, print an error */
1677 if (refcount1
< refcount2
) {
1687 * Allocates clusters using an in-memory refcount table (IMRT) in contrast to
1688 * the on-disk refcount structures.
1690 * On input, *first_free_cluster tells where to start looking, and need not
1691 * actually be a free cluster; the returned offset will not be before that
1692 * cluster. On output, *first_free_cluster points to the first gap found, even
1693 * if that gap was too small to be used as the returned offset.
1695 * Note that *first_free_cluster is a cluster index whereas the return value is
1698 static int64_t alloc_clusters_imrt(BlockDriverState
*bs
,
1700 uint16_t **refcount_table
,
1701 int64_t *imrt_nb_clusters
,
1702 int64_t *first_free_cluster
)
1704 BDRVQcowState
*s
= bs
->opaque
;
1705 int64_t cluster
= *first_free_cluster
, i
;
1706 bool first_gap
= true;
1707 int contiguous_free_clusters
;
1709 /* Starting at *first_free_cluster, find a range of at least cluster_count
1710 * continuously free clusters */
1711 for (contiguous_free_clusters
= 0;
1712 cluster
< *imrt_nb_clusters
&&
1713 contiguous_free_clusters
< cluster_count
;
1716 if (!(*refcount_table
)[cluster
]) {
1717 contiguous_free_clusters
++;
1719 /* If this is the first free cluster found, update
1720 * *first_free_cluster accordingly */
1721 *first_free_cluster
= cluster
;
1724 } else if (contiguous_free_clusters
) {
1725 contiguous_free_clusters
= 0;
1729 /* If contiguous_free_clusters is greater than zero, it contains the number
1730 * of continuously free clusters until the current cluster; the first free
1731 * cluster in the current "gap" is therefore
1732 * cluster - contiguous_free_clusters */
1734 /* If no such range could be found, grow the in-memory refcount table
1735 * accordingly to append free clusters at the end of the image */
1736 if (contiguous_free_clusters
< cluster_count
) {
1737 int64_t old_imrt_nb_clusters
= *imrt_nb_clusters
;
1738 uint16_t *new_refcount_table
;
1740 /* contiguous_free_clusters clusters are already empty at the image end;
1741 * we need cluster_count clusters; therefore, we have to allocate
1742 * cluster_count - contiguous_free_clusters new clusters at the end of
1743 * the image (which is the current value of cluster; note that cluster
1744 * may exceed old_imrt_nb_clusters if *first_free_cluster pointed beyond
1746 *imrt_nb_clusters
= cluster
+ cluster_count
- contiguous_free_clusters
;
1747 new_refcount_table
= g_try_realloc(*refcount_table
,
1749 sizeof(**refcount_table
));
1750 if (!new_refcount_table
) {
1751 *imrt_nb_clusters
= old_imrt_nb_clusters
;
1754 *refcount_table
= new_refcount_table
;
1756 memset(*refcount_table
+ old_imrt_nb_clusters
, 0,
1757 (*imrt_nb_clusters
- old_imrt_nb_clusters
) *
1758 sizeof(**refcount_table
));
1761 /* Go back to the first free cluster */
1762 cluster
-= contiguous_free_clusters
;
1763 for (i
= 0; i
< cluster_count
; i
++) {
1764 (*refcount_table
)[cluster
+ i
] = 1;
1767 return cluster
<< s
->cluster_bits
;
1771 * Creates a new refcount structure based solely on the in-memory information
1772 * given through *refcount_table. All necessary allocations will be reflected
1775 * On success, the old refcount structure is leaked (it will be covered by the
1776 * new refcount structure).
1778 static int rebuild_refcount_structure(BlockDriverState
*bs
,
1779 BdrvCheckResult
*res
,
1780 uint16_t **refcount_table
,
1781 int64_t *nb_clusters
)
1783 BDRVQcowState
*s
= bs
->opaque
;
1784 int64_t first_free_cluster
= 0, reftable_offset
= -1, cluster
= 0;
1785 int64_t refblock_offset
, refblock_start
, refblock_index
;
1786 uint32_t reftable_size
= 0;
1787 uint64_t *on_disk_reftable
= NULL
;
1788 uint16_t *on_disk_refblock
;
1791 uint64_t reftable_offset
;
1792 uint32_t reftable_clusters
;
1793 } QEMU_PACKED reftable_offset_and_clusters
;
1795 qcow2_cache_empty(bs
, s
->refcount_block_cache
);
1798 for (; cluster
< *nb_clusters
; cluster
++) {
1799 if (!(*refcount_table
)[cluster
]) {
1803 refblock_index
= cluster
>> s
->refcount_block_bits
;
1804 refblock_start
= refblock_index
<< s
->refcount_block_bits
;
1806 /* Don't allocate a cluster in a refblock already written to disk */
1807 if (first_free_cluster
< refblock_start
) {
1808 first_free_cluster
= refblock_start
;
1810 refblock_offset
= alloc_clusters_imrt(bs
, 1, refcount_table
,
1811 nb_clusters
, &first_free_cluster
);
1812 if (refblock_offset
< 0) {
1813 fprintf(stderr
, "ERROR allocating refblock: %s\n",
1814 strerror(-refblock_offset
));
1815 res
->check_errors
++;
1816 ret
= refblock_offset
;
1820 if (reftable_size
<= refblock_index
) {
1821 uint32_t old_reftable_size
= reftable_size
;
1822 uint64_t *new_on_disk_reftable
;
1824 reftable_size
= ROUND_UP((refblock_index
+ 1) * sizeof(uint64_t),
1825 s
->cluster_size
) / sizeof(uint64_t);
1826 new_on_disk_reftable
= g_try_realloc(on_disk_reftable
,
1829 if (!new_on_disk_reftable
) {
1830 res
->check_errors
++;
1834 on_disk_reftable
= new_on_disk_reftable
;
1836 memset(on_disk_reftable
+ old_reftable_size
, 0,
1837 (reftable_size
- old_reftable_size
) * sizeof(uint64_t));
1839 /* The offset we have for the reftable is now no longer valid;
1840 * this will leak that range, but we can easily fix that by running
1841 * a leak-fixing check after this rebuild operation */
1842 reftable_offset
= -1;
1844 on_disk_reftable
[refblock_index
] = refblock_offset
;
1846 /* If this is apparently the last refblock (for now), try to squeeze the
1848 if (refblock_index
== (*nb_clusters
- 1) >> s
->refcount_block_bits
&&
1849 reftable_offset
< 0)
1851 uint64_t reftable_clusters
= size_to_clusters(s
, reftable_size
*
1853 reftable_offset
= alloc_clusters_imrt(bs
, reftable_clusters
,
1854 refcount_table
, nb_clusters
,
1855 &first_free_cluster
);
1856 if (reftable_offset
< 0) {
1857 fprintf(stderr
, "ERROR allocating reftable: %s\n",
1858 strerror(-reftable_offset
));
1859 res
->check_errors
++;
1860 ret
= reftable_offset
;
1865 ret
= qcow2_pre_write_overlap_check(bs
, 0, refblock_offset
,
1868 fprintf(stderr
, "ERROR writing refblock: %s\n", strerror(-ret
));
1872 on_disk_refblock
= qemu_blockalign0(bs
->file
, s
->cluster_size
);
1873 for (i
= 0; i
< s
->refcount_block_size
&&
1874 refblock_start
+ i
< *nb_clusters
; i
++)
1876 on_disk_refblock
[i
] =
1877 cpu_to_be16((*refcount_table
)[refblock_start
+ i
]);
1880 ret
= bdrv_write(bs
->file
, refblock_offset
/ BDRV_SECTOR_SIZE
,
1881 (void *)on_disk_refblock
, s
->cluster_sectors
);
1882 qemu_vfree(on_disk_refblock
);
1884 fprintf(stderr
, "ERROR writing refblock: %s\n", strerror(-ret
));
1888 /* Go to the end of this refblock */
1889 cluster
= refblock_start
+ s
->refcount_block_size
- 1;
1892 if (reftable_offset
< 0) {
1893 uint64_t post_refblock_start
, reftable_clusters
;
1895 post_refblock_start
= ROUND_UP(*nb_clusters
, s
->refcount_block_size
);
1896 reftable_clusters
= size_to_clusters(s
,
1897 reftable_size
* sizeof(uint64_t));
1898 /* Not pretty but simple */
1899 if (first_free_cluster
< post_refblock_start
) {
1900 first_free_cluster
= post_refblock_start
;
1902 reftable_offset
= alloc_clusters_imrt(bs
, reftable_clusters
,
1903 refcount_table
, nb_clusters
,
1904 &first_free_cluster
);
1905 if (reftable_offset
< 0) {
1906 fprintf(stderr
, "ERROR allocating reftable: %s\n",
1907 strerror(-reftable_offset
));
1908 res
->check_errors
++;
1909 ret
= reftable_offset
;
1913 goto write_refblocks
;
1916 assert(on_disk_reftable
);
1918 for (refblock_index
= 0; refblock_index
< reftable_size
; refblock_index
++) {
1919 cpu_to_be64s(&on_disk_reftable
[refblock_index
]);
1922 ret
= qcow2_pre_write_overlap_check(bs
, 0, reftable_offset
,
1923 reftable_size
* sizeof(uint64_t));
1925 fprintf(stderr
, "ERROR writing reftable: %s\n", strerror(-ret
));
1929 assert(reftable_size
< INT_MAX
/ sizeof(uint64_t));
1930 ret
= bdrv_pwrite(bs
->file
, reftable_offset
, on_disk_reftable
,
1931 reftable_size
* sizeof(uint64_t));
1933 fprintf(stderr
, "ERROR writing reftable: %s\n", strerror(-ret
));
1937 /* Enter new reftable into the image header */
1938 cpu_to_be64w(&reftable_offset_and_clusters
.reftable_offset
,
1940 cpu_to_be32w(&reftable_offset_and_clusters
.reftable_clusters
,
1941 size_to_clusters(s
, reftable_size
* sizeof(uint64_t)));
1942 ret
= bdrv_pwrite_sync(bs
->file
, offsetof(QCowHeader
,
1943 refcount_table_offset
),
1944 &reftable_offset_and_clusters
,
1945 sizeof(reftable_offset_and_clusters
));
1947 fprintf(stderr
, "ERROR setting reftable: %s\n", strerror(-ret
));
1951 for (refblock_index
= 0; refblock_index
< reftable_size
; refblock_index
++) {
1952 be64_to_cpus(&on_disk_reftable
[refblock_index
]);
1954 s
->refcount_table
= on_disk_reftable
;
1955 s
->refcount_table_offset
= reftable_offset
;
1956 s
->refcount_table_size
= reftable_size
;
1961 g_free(on_disk_reftable
);
1966 * Checks an image for refcount consistency.
1968 * Returns 0 if no errors are found, the number of errors in case the image is
1969 * detected as corrupted, and -errno when an internal error occurred.
1971 int qcow2_check_refcounts(BlockDriverState
*bs
, BdrvCheckResult
*res
,
1974 BDRVQcowState
*s
= bs
->opaque
;
1975 BdrvCheckResult pre_compare_res
;
1976 int64_t size
, highest_cluster
, nb_clusters
;
1977 uint16_t *refcount_table
= NULL
;
1978 bool rebuild
= false;
1981 size
= bdrv_getlength(bs
->file
);
1983 res
->check_errors
++;
1987 nb_clusters
= size_to_clusters(s
, size
);
1988 if (nb_clusters
> INT_MAX
) {
1989 res
->check_errors
++;
1993 res
->bfi
.total_clusters
=
1994 size_to_clusters(s
, bs
->total_sectors
* BDRV_SECTOR_SIZE
);
1996 ret
= calculate_refcounts(bs
, res
, fix
, &rebuild
, &refcount_table
,
2002 /* In case we don't need to rebuild the refcount structure (but want to fix
2003 * something), this function is immediately called again, in which case the
2004 * result should be ignored */
2005 pre_compare_res
= *res
;
2006 compare_refcounts(bs
, res
, 0, &rebuild
, &highest_cluster
, refcount_table
,
2009 if (rebuild
&& (fix
& BDRV_FIX_ERRORS
)) {
2010 BdrvCheckResult old_res
= *res
;
2011 int fresh_leaks
= 0;
2013 fprintf(stderr
, "Rebuilding refcount structure\n");
2014 ret
= rebuild_refcount_structure(bs
, res
, &refcount_table
,
2020 res
->corruptions
= 0;
2023 /* Because the old reftable has been exchanged for a new one the
2024 * references have to be recalculated */
2026 memset(refcount_table
, 0, nb_clusters
* sizeof(uint16_t));
2027 ret
= calculate_refcounts(bs
, res
, 0, &rebuild
, &refcount_table
,
2033 if (fix
& BDRV_FIX_LEAKS
) {
2034 /* The old refcount structures are now leaked, fix it; the result
2035 * can be ignored, aside from leaks which were introduced by
2036 * rebuild_refcount_structure() that could not be fixed */
2037 BdrvCheckResult saved_res
= *res
;
2038 *res
= (BdrvCheckResult
){ 0 };
2040 compare_refcounts(bs
, res
, BDRV_FIX_LEAKS
, &rebuild
,
2041 &highest_cluster
, refcount_table
, nb_clusters
);
2043 fprintf(stderr
, "ERROR rebuilt refcount structure is still "
2047 /* Any leaks accounted for here were introduced by
2048 * rebuild_refcount_structure() because that function has created a
2049 * new refcount structure from scratch */
2050 fresh_leaks
= res
->leaks
;
2054 if (res
->corruptions
< old_res
.corruptions
) {
2055 res
->corruptions_fixed
+= old_res
.corruptions
- res
->corruptions
;
2057 if (res
->leaks
< old_res
.leaks
) {
2058 res
->leaks_fixed
+= old_res
.leaks
- res
->leaks
;
2060 res
->leaks
+= fresh_leaks
;
2063 fprintf(stderr
, "ERROR need to rebuild refcount structures\n");
2064 res
->check_errors
++;
2069 if (res
->leaks
|| res
->corruptions
) {
2070 *res
= pre_compare_res
;
2071 compare_refcounts(bs
, res
, fix
, &rebuild
, &highest_cluster
,
2072 refcount_table
, nb_clusters
);
2076 /* check OFLAG_COPIED */
2077 ret
= check_oflag_copied(bs
, res
, fix
);
2082 res
->image_end_offset
= (highest_cluster
+ 1) * s
->cluster_size
;
2086 g_free(refcount_table
);
2091 #define overlaps_with(ofs, sz) \
2092 ranges_overlap(offset, size, ofs, sz)
2095 * Checks if the given offset into the image file is actually free to use by
2096 * looking for overlaps with important metadata sections (L1/L2 tables etc.),
2097 * i.e. a sanity check without relying on the refcount tables.
2099 * The ign parameter specifies what checks not to perform (being a bitmask of
2100 * QCow2MetadataOverlap values), i.e., what sections to ignore.
2103 * - 0 if writing to this offset will not affect the mentioned metadata
2104 * - a positive QCow2MetadataOverlap value indicating one overlapping section
2105 * - a negative value (-errno) indicating an error while performing a check,
2106 * e.g. when bdrv_read failed on QCOW2_OL_INACTIVE_L2
2108 int qcow2_check_metadata_overlap(BlockDriverState
*bs
, int ign
, int64_t offset
,
2111 BDRVQcowState
*s
= bs
->opaque
;
2112 int chk
= s
->overlap_check
& ~ign
;
2119 if (chk
& QCOW2_OL_MAIN_HEADER
) {
2120 if (offset
< s
->cluster_size
) {
2121 return QCOW2_OL_MAIN_HEADER
;
2125 /* align range to test to cluster boundaries */
2126 size
= align_offset(offset_into_cluster(s
, offset
) + size
, s
->cluster_size
);
2127 offset
= start_of_cluster(s
, offset
);
2129 if ((chk
& QCOW2_OL_ACTIVE_L1
) && s
->l1_size
) {
2130 if (overlaps_with(s
->l1_table_offset
, s
->l1_size
* sizeof(uint64_t))) {
2131 return QCOW2_OL_ACTIVE_L1
;
2135 if ((chk
& QCOW2_OL_REFCOUNT_TABLE
) && s
->refcount_table_size
) {
2136 if (overlaps_with(s
->refcount_table_offset
,
2137 s
->refcount_table_size
* sizeof(uint64_t))) {
2138 return QCOW2_OL_REFCOUNT_TABLE
;
2142 if ((chk
& QCOW2_OL_SNAPSHOT_TABLE
) && s
->snapshots_size
) {
2143 if (overlaps_with(s
->snapshots_offset
, s
->snapshots_size
)) {
2144 return QCOW2_OL_SNAPSHOT_TABLE
;
2148 if ((chk
& QCOW2_OL_INACTIVE_L1
) && s
->snapshots
) {
2149 for (i
= 0; i
< s
->nb_snapshots
; i
++) {
2150 if (s
->snapshots
[i
].l1_size
&&
2151 overlaps_with(s
->snapshots
[i
].l1_table_offset
,
2152 s
->snapshots
[i
].l1_size
* sizeof(uint64_t))) {
2153 return QCOW2_OL_INACTIVE_L1
;
2158 if ((chk
& QCOW2_OL_ACTIVE_L2
) && s
->l1_table
) {
2159 for (i
= 0; i
< s
->l1_size
; i
++) {
2160 if ((s
->l1_table
[i
] & L1E_OFFSET_MASK
) &&
2161 overlaps_with(s
->l1_table
[i
] & L1E_OFFSET_MASK
,
2163 return QCOW2_OL_ACTIVE_L2
;
2168 if ((chk
& QCOW2_OL_REFCOUNT_BLOCK
) && s
->refcount_table
) {
2169 for (i
= 0; i
< s
->refcount_table_size
; i
++) {
2170 if ((s
->refcount_table
[i
] & REFT_OFFSET_MASK
) &&
2171 overlaps_with(s
->refcount_table
[i
] & REFT_OFFSET_MASK
,
2173 return QCOW2_OL_REFCOUNT_BLOCK
;
2178 if ((chk
& QCOW2_OL_INACTIVE_L2
) && s
->snapshots
) {
2179 for (i
= 0; i
< s
->nb_snapshots
; i
++) {
2180 uint64_t l1_ofs
= s
->snapshots
[i
].l1_table_offset
;
2181 uint32_t l1_sz
= s
->snapshots
[i
].l1_size
;
2182 uint64_t l1_sz2
= l1_sz
* sizeof(uint64_t);
2183 uint64_t *l1
= g_try_malloc(l1_sz2
);
2186 if (l1_sz2
&& l1
== NULL
) {
2190 ret
= bdrv_pread(bs
->file
, l1_ofs
, l1
, l1_sz2
);
2196 for (j
= 0; j
< l1_sz
; j
++) {
2197 uint64_t l2_ofs
= be64_to_cpu(l1
[j
]) & L1E_OFFSET_MASK
;
2198 if (l2_ofs
&& overlaps_with(l2_ofs
, s
->cluster_size
)) {
2200 return QCOW2_OL_INACTIVE_L2
;
2211 static const char *metadata_ol_names
[] = {
2212 [QCOW2_OL_MAIN_HEADER_BITNR
] = "qcow2_header",
2213 [QCOW2_OL_ACTIVE_L1_BITNR
] = "active L1 table",
2214 [QCOW2_OL_ACTIVE_L2_BITNR
] = "active L2 table",
2215 [QCOW2_OL_REFCOUNT_TABLE_BITNR
] = "refcount table",
2216 [QCOW2_OL_REFCOUNT_BLOCK_BITNR
] = "refcount block",
2217 [QCOW2_OL_SNAPSHOT_TABLE_BITNR
] = "snapshot table",
2218 [QCOW2_OL_INACTIVE_L1_BITNR
] = "inactive L1 table",
2219 [QCOW2_OL_INACTIVE_L2_BITNR
] = "inactive L2 table",
2223 * First performs a check for metadata overlaps (through
2224 * qcow2_check_metadata_overlap); if that fails with a negative value (error
2225 * while performing a check), that value is returned. If an impending overlap
2226 * is detected, the BDS will be made unusable, the qcow2 file marked corrupt
2227 * and -EIO returned.
2229 * Returns 0 if there were neither overlaps nor errors while checking for
2230 * overlaps; or a negative value (-errno) on error.
2232 int qcow2_pre_write_overlap_check(BlockDriverState
*bs
, int ign
, int64_t offset
,
2235 int ret
= qcow2_check_metadata_overlap(bs
, ign
, offset
, size
);
2239 } else if (ret
> 0) {
2240 int metadata_ol_bitnr
= ffs(ret
) - 1;
2241 assert(metadata_ol_bitnr
< QCOW2_OL_MAX_BITNR
);
2243 qcow2_signal_corruption(bs
, true, offset
, size
, "Preventing invalid "
2244 "write on metadata (overlaps with %s)",
2245 metadata_ol_names
[metadata_ol_bitnr
]);