target/ppc: use helper for excp handling
[qemu/ar7.git] / block / qcow2-refcount.c
blob9e96f64c8b59f2ff581d48cb97c4bbc0b79117c9
1 /*
2 * Block driver for the QCOW version 2 format
4 * Copyright (c) 2004-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include "qapi/error.h"
27 #include "qemu-common.h"
28 #include "block/block_int.h"
29 #include "block/qcow2.h"
30 #include "qemu/range.h"
31 #include "qemu/bswap.h"
33 static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size);
34 static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs,
35 int64_t offset, int64_t length, uint64_t addend,
36 bool decrease, enum qcow2_discard_type type);
38 static uint64_t get_refcount_ro0(const void *refcount_array, uint64_t index);
39 static uint64_t get_refcount_ro1(const void *refcount_array, uint64_t index);
40 static uint64_t get_refcount_ro2(const void *refcount_array, uint64_t index);
41 static uint64_t get_refcount_ro3(const void *refcount_array, uint64_t index);
42 static uint64_t get_refcount_ro4(const void *refcount_array, uint64_t index);
43 static uint64_t get_refcount_ro5(const void *refcount_array, uint64_t index);
44 static uint64_t get_refcount_ro6(const void *refcount_array, uint64_t index);
46 static void set_refcount_ro0(void *refcount_array, uint64_t index,
47 uint64_t value);
48 static void set_refcount_ro1(void *refcount_array, uint64_t index,
49 uint64_t value);
50 static void set_refcount_ro2(void *refcount_array, uint64_t index,
51 uint64_t value);
52 static void set_refcount_ro3(void *refcount_array, uint64_t index,
53 uint64_t value);
54 static void set_refcount_ro4(void *refcount_array, uint64_t index,
55 uint64_t value);
56 static void set_refcount_ro5(void *refcount_array, uint64_t index,
57 uint64_t value);
58 static void set_refcount_ro6(void *refcount_array, uint64_t index,
59 uint64_t value);
62 static Qcow2GetRefcountFunc *const get_refcount_funcs[] = {
63 &get_refcount_ro0,
64 &get_refcount_ro1,
65 &get_refcount_ro2,
66 &get_refcount_ro3,
67 &get_refcount_ro4,
68 &get_refcount_ro5,
69 &get_refcount_ro6
72 static Qcow2SetRefcountFunc *const set_refcount_funcs[] = {
73 &set_refcount_ro0,
74 &set_refcount_ro1,
75 &set_refcount_ro2,
76 &set_refcount_ro3,
77 &set_refcount_ro4,
78 &set_refcount_ro5,
79 &set_refcount_ro6
83 /*********************************************************/
84 /* refcount handling */
86 static void update_max_refcount_table_index(BDRVQcow2State *s)
88 unsigned i = s->refcount_table_size - 1;
89 while (i > 0 && (s->refcount_table[i] & REFT_OFFSET_MASK) == 0) {
90 i--;
92 /* Set s->max_refcount_table_index to the index of the last used entry */
93 s->max_refcount_table_index = i;
96 int qcow2_refcount_init(BlockDriverState *bs)
98 BDRVQcow2State *s = bs->opaque;
99 unsigned int refcount_table_size2, i;
100 int ret;
102 assert(s->refcount_order >= 0 && s->refcount_order <= 6);
104 s->get_refcount = get_refcount_funcs[s->refcount_order];
105 s->set_refcount = set_refcount_funcs[s->refcount_order];
107 assert(s->refcount_table_size <= INT_MAX / sizeof(uint64_t));
108 refcount_table_size2 = s->refcount_table_size * sizeof(uint64_t);
109 s->refcount_table = g_try_malloc(refcount_table_size2);
111 if (s->refcount_table_size > 0) {
112 if (s->refcount_table == NULL) {
113 ret = -ENOMEM;
114 goto fail;
116 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_LOAD);
117 ret = bdrv_pread(bs->file, s->refcount_table_offset,
118 s->refcount_table, refcount_table_size2);
119 if (ret < 0) {
120 goto fail;
122 for(i = 0; i < s->refcount_table_size; i++)
123 be64_to_cpus(&s->refcount_table[i]);
124 update_max_refcount_table_index(s);
126 return 0;
127 fail:
128 return ret;
131 void qcow2_refcount_close(BlockDriverState *bs)
133 BDRVQcow2State *s = bs->opaque;
134 g_free(s->refcount_table);
138 static uint64_t get_refcount_ro0(const void *refcount_array, uint64_t index)
140 return (((const uint8_t *)refcount_array)[index / 8] >> (index % 8)) & 0x1;
143 static void set_refcount_ro0(void *refcount_array, uint64_t index,
144 uint64_t value)
146 assert(!(value >> 1));
147 ((uint8_t *)refcount_array)[index / 8] &= ~(0x1 << (index % 8));
148 ((uint8_t *)refcount_array)[index / 8] |= value << (index % 8);
151 static uint64_t get_refcount_ro1(const void *refcount_array, uint64_t index)
153 return (((const uint8_t *)refcount_array)[index / 4] >> (2 * (index % 4)))
154 & 0x3;
157 static void set_refcount_ro1(void *refcount_array, uint64_t index,
158 uint64_t value)
160 assert(!(value >> 2));
161 ((uint8_t *)refcount_array)[index / 4] &= ~(0x3 << (2 * (index % 4)));
162 ((uint8_t *)refcount_array)[index / 4] |= value << (2 * (index % 4));
165 static uint64_t get_refcount_ro2(const void *refcount_array, uint64_t index)
167 return (((const uint8_t *)refcount_array)[index / 2] >> (4 * (index % 2)))
168 & 0xf;
171 static void set_refcount_ro2(void *refcount_array, uint64_t index,
172 uint64_t value)
174 assert(!(value >> 4));
175 ((uint8_t *)refcount_array)[index / 2] &= ~(0xf << (4 * (index % 2)));
176 ((uint8_t *)refcount_array)[index / 2] |= value << (4 * (index % 2));
179 static uint64_t get_refcount_ro3(const void *refcount_array, uint64_t index)
181 return ((const uint8_t *)refcount_array)[index];
184 static void set_refcount_ro3(void *refcount_array, uint64_t index,
185 uint64_t value)
187 assert(!(value >> 8));
188 ((uint8_t *)refcount_array)[index] = value;
191 static uint64_t get_refcount_ro4(const void *refcount_array, uint64_t index)
193 return be16_to_cpu(((const uint16_t *)refcount_array)[index]);
196 static void set_refcount_ro4(void *refcount_array, uint64_t index,
197 uint64_t value)
199 assert(!(value >> 16));
200 ((uint16_t *)refcount_array)[index] = cpu_to_be16(value);
203 static uint64_t get_refcount_ro5(const void *refcount_array, uint64_t index)
205 return be32_to_cpu(((const uint32_t *)refcount_array)[index]);
208 static void set_refcount_ro5(void *refcount_array, uint64_t index,
209 uint64_t value)
211 assert(!(value >> 32));
212 ((uint32_t *)refcount_array)[index] = cpu_to_be32(value);
215 static uint64_t get_refcount_ro6(const void *refcount_array, uint64_t index)
217 return be64_to_cpu(((const uint64_t *)refcount_array)[index]);
220 static void set_refcount_ro6(void *refcount_array, uint64_t index,
221 uint64_t value)
223 ((uint64_t *)refcount_array)[index] = cpu_to_be64(value);
227 static int load_refcount_block(BlockDriverState *bs,
228 int64_t refcount_block_offset,
229 void **refcount_block)
231 BDRVQcow2State *s = bs->opaque;
233 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_LOAD);
234 return qcow2_cache_get(bs, s->refcount_block_cache, refcount_block_offset,
235 refcount_block);
239 * Retrieves the refcount of the cluster given by its index and stores it in
240 * *refcount. Returns 0 on success and -errno on failure.
242 int qcow2_get_refcount(BlockDriverState *bs, int64_t cluster_index,
243 uint64_t *refcount)
245 BDRVQcow2State *s = bs->opaque;
246 uint64_t refcount_table_index, block_index;
247 int64_t refcount_block_offset;
248 int ret;
249 void *refcount_block;
251 refcount_table_index = cluster_index >> s->refcount_block_bits;
252 if (refcount_table_index >= s->refcount_table_size) {
253 *refcount = 0;
254 return 0;
256 refcount_block_offset =
257 s->refcount_table[refcount_table_index] & REFT_OFFSET_MASK;
258 if (!refcount_block_offset) {
259 *refcount = 0;
260 return 0;
263 if (offset_into_cluster(s, refcount_block_offset)) {
264 qcow2_signal_corruption(bs, true, -1, -1, "Refblock offset %#" PRIx64
265 " unaligned (reftable index: %#" PRIx64 ")",
266 refcount_block_offset, refcount_table_index);
267 return -EIO;
270 ret = qcow2_cache_get(bs, s->refcount_block_cache, refcount_block_offset,
271 &refcount_block);
272 if (ret < 0) {
273 return ret;
276 block_index = cluster_index & (s->refcount_block_size - 1);
277 *refcount = s->get_refcount(refcount_block, block_index);
279 qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block);
281 return 0;
285 * Rounds the refcount table size up to avoid growing the table for each single
286 * refcount block that is allocated.
288 static unsigned int next_refcount_table_size(BDRVQcow2State *s,
289 unsigned int min_size)
291 unsigned int min_clusters = (min_size >> (s->cluster_bits - 3)) + 1;
292 unsigned int refcount_table_clusters =
293 MAX(1, s->refcount_table_size >> (s->cluster_bits - 3));
295 while (min_clusters > refcount_table_clusters) {
296 refcount_table_clusters = (refcount_table_clusters * 3 + 1) / 2;
299 return refcount_table_clusters << (s->cluster_bits - 3);
303 /* Checks if two offsets are described by the same refcount block */
304 static int in_same_refcount_block(BDRVQcow2State *s, uint64_t offset_a,
305 uint64_t offset_b)
307 uint64_t block_a = offset_a >> (s->cluster_bits + s->refcount_block_bits);
308 uint64_t block_b = offset_b >> (s->cluster_bits + s->refcount_block_bits);
310 return (block_a == block_b);
314 * Loads a refcount block. If it doesn't exist yet, it is allocated first
315 * (including growing the refcount table if needed).
317 * Returns 0 on success or -errno in error case
319 static int alloc_refcount_block(BlockDriverState *bs,
320 int64_t cluster_index, void **refcount_block)
322 BDRVQcow2State *s = bs->opaque;
323 unsigned int refcount_table_index;
324 int ret;
326 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC);
328 /* Find the refcount block for the given cluster */
329 refcount_table_index = cluster_index >> s->refcount_block_bits;
331 if (refcount_table_index < s->refcount_table_size) {
333 uint64_t refcount_block_offset =
334 s->refcount_table[refcount_table_index] & REFT_OFFSET_MASK;
336 /* If it's already there, we're done */
337 if (refcount_block_offset) {
338 if (offset_into_cluster(s, refcount_block_offset)) {
339 qcow2_signal_corruption(bs, true, -1, -1, "Refblock offset %#"
340 PRIx64 " unaligned (reftable index: "
341 "%#x)", refcount_block_offset,
342 refcount_table_index);
343 return -EIO;
346 return load_refcount_block(bs, refcount_block_offset,
347 refcount_block);
352 * If we came here, we need to allocate something. Something is at least
353 * a cluster for the new refcount block. It may also include a new refcount
354 * table if the old refcount table is too small.
356 * Note that allocating clusters here needs some special care:
358 * - We can't use the normal qcow2_alloc_clusters(), it would try to
359 * increase the refcount and very likely we would end up with an endless
360 * recursion. Instead we must place the refcount blocks in a way that
361 * they can describe them themselves.
363 * - We need to consider that at this point we are inside update_refcounts
364 * and potentially doing an initial refcount increase. This means that
365 * some clusters have already been allocated by the caller, but their
366 * refcount isn't accurate yet. If we allocate clusters for metadata, we
367 * need to return -EAGAIN to signal the caller that it needs to restart
368 * the search for free clusters.
370 * - alloc_clusters_noref and qcow2_free_clusters may load a different
371 * refcount block into the cache
374 *refcount_block = NULL;
376 /* We write to the refcount table, so we might depend on L2 tables */
377 ret = qcow2_cache_flush(bs, s->l2_table_cache);
378 if (ret < 0) {
379 return ret;
382 /* Allocate the refcount block itself and mark it as used */
383 int64_t new_block = alloc_clusters_noref(bs, s->cluster_size);
384 if (new_block < 0) {
385 return new_block;
388 #ifdef DEBUG_ALLOC2
389 fprintf(stderr, "qcow2: Allocate refcount block %d for %" PRIx64
390 " at %" PRIx64 "\n",
391 refcount_table_index, cluster_index << s->cluster_bits, new_block);
392 #endif
394 if (in_same_refcount_block(s, new_block, cluster_index << s->cluster_bits)) {
395 /* Zero the new refcount block before updating it */
396 ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block,
397 refcount_block);
398 if (ret < 0) {
399 goto fail_block;
402 memset(*refcount_block, 0, s->cluster_size);
404 /* The block describes itself, need to update the cache */
405 int block_index = (new_block >> s->cluster_bits) &
406 (s->refcount_block_size - 1);
407 s->set_refcount(*refcount_block, block_index, 1);
408 } else {
409 /* Described somewhere else. This can recurse at most twice before we
410 * arrive at a block that describes itself. */
411 ret = update_refcount(bs, new_block, s->cluster_size, 1, false,
412 QCOW2_DISCARD_NEVER);
413 if (ret < 0) {
414 goto fail_block;
417 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
418 if (ret < 0) {
419 goto fail_block;
422 /* Initialize the new refcount block only after updating its refcount,
423 * update_refcount uses the refcount cache itself */
424 ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block,
425 refcount_block);
426 if (ret < 0) {
427 goto fail_block;
430 memset(*refcount_block, 0, s->cluster_size);
433 /* Now the new refcount block needs to be written to disk */
434 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE);
435 qcow2_cache_entry_mark_dirty(bs, s->refcount_block_cache, *refcount_block);
436 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
437 if (ret < 0) {
438 goto fail_block;
441 /* If the refcount table is big enough, just hook the block up there */
442 if (refcount_table_index < s->refcount_table_size) {
443 uint64_t data64 = cpu_to_be64(new_block);
444 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_HOOKUP);
445 ret = bdrv_pwrite_sync(bs->file,
446 s->refcount_table_offset + refcount_table_index * sizeof(uint64_t),
447 &data64, sizeof(data64));
448 if (ret < 0) {
449 goto fail_block;
452 s->refcount_table[refcount_table_index] = new_block;
453 /* If there's a hole in s->refcount_table then it can happen
454 * that refcount_table_index < s->max_refcount_table_index */
455 s->max_refcount_table_index =
456 MAX(s->max_refcount_table_index, refcount_table_index);
458 /* The new refcount block may be where the caller intended to put its
459 * data, so let it restart the search. */
460 return -EAGAIN;
463 qcow2_cache_put(bs, s->refcount_block_cache, refcount_block);
466 * If we come here, we need to grow the refcount table. Again, a new
467 * refcount table needs some space and we can't simply allocate to avoid
468 * endless recursion.
470 * Therefore let's grab new refcount blocks at the end of the image, which
471 * will describe themselves and the new refcount table. This way we can
472 * reference them only in the new table and do the switch to the new
473 * refcount table at once without producing an inconsistent state in
474 * between.
476 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_GROW);
478 /* Calculate the number of refcount blocks needed so far; this will be the
479 * basis for calculating the index of the first cluster used for the
480 * self-describing refcount structures which we are about to create.
482 * Because we reached this point, there cannot be any refcount entries for
483 * cluster_index or higher indices yet. However, because new_block has been
484 * allocated to describe that cluster (and it will assume this role later
485 * on), we cannot use that index; also, new_block may actually have a higher
486 * cluster index than cluster_index, so it needs to be taken into account
487 * here (and 1 needs to be added to its value because that cluster is used).
489 uint64_t blocks_used = DIV_ROUND_UP(MAX(cluster_index + 1,
490 (new_block >> s->cluster_bits) + 1),
491 s->refcount_block_size);
493 if (blocks_used > QCOW_MAX_REFTABLE_SIZE / sizeof(uint64_t)) {
494 return -EFBIG;
497 /* And now we need at least one block more for the new metadata */
498 uint64_t table_size = next_refcount_table_size(s, blocks_used + 1);
499 uint64_t last_table_size;
500 uint64_t blocks_clusters;
501 do {
502 uint64_t table_clusters =
503 size_to_clusters(s, table_size * sizeof(uint64_t));
504 blocks_clusters = 1 +
505 DIV_ROUND_UP(table_clusters, s->refcount_block_size);
506 uint64_t meta_clusters = table_clusters + blocks_clusters;
508 last_table_size = table_size;
509 table_size = next_refcount_table_size(s, blocks_used +
510 DIV_ROUND_UP(meta_clusters, s->refcount_block_size));
512 } while (last_table_size != table_size);
514 #ifdef DEBUG_ALLOC2
515 fprintf(stderr, "qcow2: Grow refcount table %" PRId32 " => %" PRId64 "\n",
516 s->refcount_table_size, table_size);
517 #endif
519 /* Create the new refcount table and blocks */
520 uint64_t meta_offset = (blocks_used * s->refcount_block_size) *
521 s->cluster_size;
522 uint64_t table_offset = meta_offset + blocks_clusters * s->cluster_size;
523 uint64_t *new_table = g_try_new0(uint64_t, table_size);
524 void *new_blocks = g_try_malloc0(blocks_clusters * s->cluster_size);
526 assert(table_size > 0 && blocks_clusters > 0);
527 if (new_table == NULL || new_blocks == NULL) {
528 ret = -ENOMEM;
529 goto fail_table;
532 /* Fill the new refcount table */
533 memcpy(new_table, s->refcount_table,
534 s->refcount_table_size * sizeof(uint64_t));
535 new_table[refcount_table_index] = new_block;
537 int i;
538 for (i = 0; i < blocks_clusters; i++) {
539 new_table[blocks_used + i] = meta_offset + (i * s->cluster_size);
542 /* Fill the refcount blocks */
543 uint64_t table_clusters = size_to_clusters(s, table_size * sizeof(uint64_t));
544 int block = 0;
545 for (i = 0; i < table_clusters + blocks_clusters; i++) {
546 s->set_refcount(new_blocks, block++, 1);
549 /* Write refcount blocks to disk */
550 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS);
551 ret = bdrv_pwrite_sync(bs->file, meta_offset, new_blocks,
552 blocks_clusters * s->cluster_size);
553 g_free(new_blocks);
554 new_blocks = NULL;
555 if (ret < 0) {
556 goto fail_table;
559 /* Write refcount table to disk */
560 for(i = 0; i < table_size; i++) {
561 cpu_to_be64s(&new_table[i]);
564 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE);
565 ret = bdrv_pwrite_sync(bs->file, table_offset, new_table,
566 table_size * sizeof(uint64_t));
567 if (ret < 0) {
568 goto fail_table;
571 for(i = 0; i < table_size; i++) {
572 be64_to_cpus(&new_table[i]);
575 /* Hook up the new refcount table in the qcow2 header */
576 struct QEMU_PACKED {
577 uint64_t d64;
578 uint32_t d32;
579 } data;
580 data.d64 = cpu_to_be64(table_offset);
581 data.d32 = cpu_to_be32(table_clusters);
582 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE);
583 ret = bdrv_pwrite_sync(bs->file,
584 offsetof(QCowHeader, refcount_table_offset),
585 &data, sizeof(data));
586 if (ret < 0) {
587 goto fail_table;
590 /* And switch it in memory */
591 uint64_t old_table_offset = s->refcount_table_offset;
592 uint64_t old_table_size = s->refcount_table_size;
594 g_free(s->refcount_table);
595 s->refcount_table = new_table;
596 s->refcount_table_size = table_size;
597 s->refcount_table_offset = table_offset;
598 update_max_refcount_table_index(s);
600 /* Free old table. */
601 qcow2_free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t),
602 QCOW2_DISCARD_OTHER);
604 ret = load_refcount_block(bs, new_block, refcount_block);
605 if (ret < 0) {
606 return ret;
609 /* If we were trying to do the initial refcount update for some cluster
610 * allocation, we might have used the same clusters to store newly
611 * allocated metadata. Make the caller search some new space. */
612 return -EAGAIN;
614 fail_table:
615 g_free(new_blocks);
616 g_free(new_table);
617 fail_block:
618 if (*refcount_block != NULL) {
619 qcow2_cache_put(bs, s->refcount_block_cache, refcount_block);
621 return ret;
624 void qcow2_process_discards(BlockDriverState *bs, int ret)
626 BDRVQcow2State *s = bs->opaque;
627 Qcow2DiscardRegion *d, *next;
629 QTAILQ_FOREACH_SAFE(d, &s->discards, next, next) {
630 QTAILQ_REMOVE(&s->discards, d, next);
632 /* Discard is optional, ignore the return value */
633 if (ret >= 0) {
634 bdrv_pdiscard(bs->file->bs, d->offset, d->bytes);
637 g_free(d);
641 static void update_refcount_discard(BlockDriverState *bs,
642 uint64_t offset, uint64_t length)
644 BDRVQcow2State *s = bs->opaque;
645 Qcow2DiscardRegion *d, *p, *next;
647 QTAILQ_FOREACH(d, &s->discards, next) {
648 uint64_t new_start = MIN(offset, d->offset);
649 uint64_t new_end = MAX(offset + length, d->offset + d->bytes);
651 if (new_end - new_start <= length + d->bytes) {
652 /* There can't be any overlap, areas ending up here have no
653 * references any more and therefore shouldn't get freed another
654 * time. */
655 assert(d->bytes + length == new_end - new_start);
656 d->offset = new_start;
657 d->bytes = new_end - new_start;
658 goto found;
662 d = g_malloc(sizeof(*d));
663 *d = (Qcow2DiscardRegion) {
664 .bs = bs,
665 .offset = offset,
666 .bytes = length,
668 QTAILQ_INSERT_TAIL(&s->discards, d, next);
670 found:
671 /* Merge discard requests if they are adjacent now */
672 QTAILQ_FOREACH_SAFE(p, &s->discards, next, next) {
673 if (p == d
674 || p->offset > d->offset + d->bytes
675 || d->offset > p->offset + p->bytes)
677 continue;
680 /* Still no overlap possible */
681 assert(p->offset == d->offset + d->bytes
682 || d->offset == p->offset + p->bytes);
684 QTAILQ_REMOVE(&s->discards, p, next);
685 d->offset = MIN(d->offset, p->offset);
686 d->bytes += p->bytes;
687 g_free(p);
691 /* XXX: cache several refcount block clusters ? */
692 /* @addend is the absolute value of the addend; if @decrease is set, @addend
693 * will be subtracted from the current refcount, otherwise it will be added */
694 static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs,
695 int64_t offset,
696 int64_t length,
697 uint64_t addend,
698 bool decrease,
699 enum qcow2_discard_type type)
701 BDRVQcow2State *s = bs->opaque;
702 int64_t start, last, cluster_offset;
703 void *refcount_block = NULL;
704 int64_t old_table_index = -1;
705 int ret;
707 #ifdef DEBUG_ALLOC2
708 fprintf(stderr, "update_refcount: offset=%" PRId64 " size=%" PRId64
709 " addend=%s%" PRIu64 "\n", offset, length, decrease ? "-" : "",
710 addend);
711 #endif
712 if (length < 0) {
713 return -EINVAL;
714 } else if (length == 0) {
715 return 0;
718 if (decrease) {
719 qcow2_cache_set_dependency(bs, s->refcount_block_cache,
720 s->l2_table_cache);
723 start = start_of_cluster(s, offset);
724 last = start_of_cluster(s, offset + length - 1);
725 for(cluster_offset = start; cluster_offset <= last;
726 cluster_offset += s->cluster_size)
728 int block_index;
729 uint64_t refcount;
730 int64_t cluster_index = cluster_offset >> s->cluster_bits;
731 int64_t table_index = cluster_index >> s->refcount_block_bits;
733 /* Load the refcount block and allocate it if needed */
734 if (table_index != old_table_index) {
735 if (refcount_block) {
736 qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block);
738 ret = alloc_refcount_block(bs, cluster_index, &refcount_block);
739 if (ret < 0) {
740 goto fail;
743 old_table_index = table_index;
745 qcow2_cache_entry_mark_dirty(bs, s->refcount_block_cache,
746 refcount_block);
748 /* we can update the count and save it */
749 block_index = cluster_index & (s->refcount_block_size - 1);
751 refcount = s->get_refcount(refcount_block, block_index);
752 if (decrease ? (refcount - addend > refcount)
753 : (refcount + addend < refcount ||
754 refcount + addend > s->refcount_max))
756 ret = -EINVAL;
757 goto fail;
759 if (decrease) {
760 refcount -= addend;
761 } else {
762 refcount += addend;
764 if (refcount == 0 && cluster_index < s->free_cluster_index) {
765 s->free_cluster_index = cluster_index;
767 s->set_refcount(refcount_block, block_index, refcount);
769 if (refcount == 0 && s->discard_passthrough[type]) {
770 update_refcount_discard(bs, cluster_offset, s->cluster_size);
774 ret = 0;
775 fail:
776 if (!s->cache_discards) {
777 qcow2_process_discards(bs, ret);
780 /* Write last changed block to disk */
781 if (refcount_block) {
782 qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block);
786 * Try do undo any updates if an error is returned (This may succeed in
787 * some cases like ENOSPC for allocating a new refcount block)
789 if (ret < 0) {
790 int dummy;
791 dummy = update_refcount(bs, offset, cluster_offset - offset, addend,
792 !decrease, QCOW2_DISCARD_NEVER);
793 (void)dummy;
796 return ret;
800 * Increases or decreases the refcount of a given cluster.
802 * @addend is the absolute value of the addend; if @decrease is set, @addend
803 * will be subtracted from the current refcount, otherwise it will be added.
805 * On success 0 is returned; on failure -errno is returned.
807 int qcow2_update_cluster_refcount(BlockDriverState *bs,
808 int64_t cluster_index,
809 uint64_t addend, bool decrease,
810 enum qcow2_discard_type type)
812 BDRVQcow2State *s = bs->opaque;
813 int ret;
815 ret = update_refcount(bs, cluster_index << s->cluster_bits, 1, addend,
816 decrease, type);
817 if (ret < 0) {
818 return ret;
821 return 0;
826 /*********************************************************/
827 /* cluster allocation functions */
831 /* return < 0 if error */
832 static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size)
834 BDRVQcow2State *s = bs->opaque;
835 uint64_t i, nb_clusters, refcount;
836 int ret;
838 /* We can't allocate clusters if they may still be queued for discard. */
839 if (s->cache_discards) {
840 qcow2_process_discards(bs, 0);
843 nb_clusters = size_to_clusters(s, size);
844 retry:
845 for(i = 0; i < nb_clusters; i++) {
846 uint64_t next_cluster_index = s->free_cluster_index++;
847 ret = qcow2_get_refcount(bs, next_cluster_index, &refcount);
849 if (ret < 0) {
850 return ret;
851 } else if (refcount != 0) {
852 goto retry;
856 /* Make sure that all offsets in the "allocated" range are representable
857 * in an int64_t */
858 if (s->free_cluster_index > 0 &&
859 s->free_cluster_index - 1 > (INT64_MAX >> s->cluster_bits))
861 return -EFBIG;
864 #ifdef DEBUG_ALLOC2
865 fprintf(stderr, "alloc_clusters: size=%" PRId64 " -> %" PRId64 "\n",
866 size,
867 (s->free_cluster_index - nb_clusters) << s->cluster_bits);
868 #endif
869 return (s->free_cluster_index - nb_clusters) << s->cluster_bits;
872 int64_t qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size)
874 int64_t offset;
875 int ret;
877 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC);
878 do {
879 offset = alloc_clusters_noref(bs, size);
880 if (offset < 0) {
881 return offset;
884 ret = update_refcount(bs, offset, size, 1, false, QCOW2_DISCARD_NEVER);
885 } while (ret == -EAGAIN);
887 if (ret < 0) {
888 return ret;
891 return offset;
894 int64_t qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
895 int64_t nb_clusters)
897 BDRVQcow2State *s = bs->opaque;
898 uint64_t cluster_index, refcount;
899 uint64_t i;
900 int ret;
902 assert(nb_clusters >= 0);
903 if (nb_clusters == 0) {
904 return 0;
907 do {
908 /* Check how many clusters there are free */
909 cluster_index = offset >> s->cluster_bits;
910 for(i = 0; i < nb_clusters; i++) {
911 ret = qcow2_get_refcount(bs, cluster_index++, &refcount);
912 if (ret < 0) {
913 return ret;
914 } else if (refcount != 0) {
915 break;
919 /* And then allocate them */
920 ret = update_refcount(bs, offset, i << s->cluster_bits, 1, false,
921 QCOW2_DISCARD_NEVER);
922 } while (ret == -EAGAIN);
924 if (ret < 0) {
925 return ret;
928 return i;
931 /* only used to allocate compressed sectors. We try to allocate
932 contiguous sectors. size must be <= cluster_size */
933 int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size)
935 BDRVQcow2State *s = bs->opaque;
936 int64_t offset;
937 size_t free_in_cluster;
938 int ret;
940 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_BYTES);
941 assert(size > 0 && size <= s->cluster_size);
942 assert(!s->free_byte_offset || offset_into_cluster(s, s->free_byte_offset));
944 offset = s->free_byte_offset;
946 if (offset) {
947 uint64_t refcount;
948 ret = qcow2_get_refcount(bs, offset >> s->cluster_bits, &refcount);
949 if (ret < 0) {
950 return ret;
953 if (refcount == s->refcount_max) {
954 offset = 0;
958 free_in_cluster = s->cluster_size - offset_into_cluster(s, offset);
959 do {
960 if (!offset || free_in_cluster < size) {
961 int64_t new_cluster = alloc_clusters_noref(bs, s->cluster_size);
962 if (new_cluster < 0) {
963 return new_cluster;
966 if (!offset || ROUND_UP(offset, s->cluster_size) != new_cluster) {
967 offset = new_cluster;
968 free_in_cluster = s->cluster_size;
969 } else {
970 free_in_cluster += s->cluster_size;
974 assert(offset);
975 ret = update_refcount(bs, offset, size, 1, false, QCOW2_DISCARD_NEVER);
976 if (ret < 0) {
977 offset = 0;
979 } while (ret == -EAGAIN);
980 if (ret < 0) {
981 return ret;
984 /* The cluster refcount was incremented; refcount blocks must be flushed
985 * before the caller's L2 table updates. */
986 qcow2_cache_set_dependency(bs, s->l2_table_cache, s->refcount_block_cache);
988 s->free_byte_offset = offset + size;
989 if (!offset_into_cluster(s, s->free_byte_offset)) {
990 s->free_byte_offset = 0;
993 return offset;
996 void qcow2_free_clusters(BlockDriverState *bs,
997 int64_t offset, int64_t size,
998 enum qcow2_discard_type type)
1000 int ret;
1002 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_FREE);
1003 ret = update_refcount(bs, offset, size, 1, true, type);
1004 if (ret < 0) {
1005 fprintf(stderr, "qcow2_free_clusters failed: %s\n", strerror(-ret));
1006 /* TODO Remember the clusters to free them later and avoid leaking */
1011 * Free a cluster using its L2 entry (handles clusters of all types, e.g.
1012 * normal cluster, compressed cluster, etc.)
1014 void qcow2_free_any_clusters(BlockDriverState *bs, uint64_t l2_entry,
1015 int nb_clusters, enum qcow2_discard_type type)
1017 BDRVQcow2State *s = bs->opaque;
1019 switch (qcow2_get_cluster_type(l2_entry)) {
1020 case QCOW2_CLUSTER_COMPRESSED:
1022 int nb_csectors;
1023 nb_csectors = ((l2_entry >> s->csize_shift) &
1024 s->csize_mask) + 1;
1025 qcow2_free_clusters(bs,
1026 (l2_entry & s->cluster_offset_mask) & ~511,
1027 nb_csectors * 512, type);
1029 break;
1030 case QCOW2_CLUSTER_NORMAL:
1031 case QCOW2_CLUSTER_ZERO:
1032 if (l2_entry & L2E_OFFSET_MASK) {
1033 if (offset_into_cluster(s, l2_entry & L2E_OFFSET_MASK)) {
1034 qcow2_signal_corruption(bs, false, -1, -1,
1035 "Cannot free unaligned cluster %#llx",
1036 l2_entry & L2E_OFFSET_MASK);
1037 } else {
1038 qcow2_free_clusters(bs, l2_entry & L2E_OFFSET_MASK,
1039 nb_clusters << s->cluster_bits, type);
1042 break;
1043 case QCOW2_CLUSTER_UNALLOCATED:
1044 break;
1045 default:
1046 abort();
1052 /*********************************************************/
1053 /* snapshots and image creation */
1057 /* update the refcounts of snapshots and the copied flag */
1058 int qcow2_update_snapshot_refcount(BlockDriverState *bs,
1059 int64_t l1_table_offset, int l1_size, int addend)
1061 BDRVQcow2State *s = bs->opaque;
1062 uint64_t *l1_table, *l2_table, l2_offset, offset, l1_size2, refcount;
1063 bool l1_allocated = false;
1064 int64_t old_offset, old_l2_offset;
1065 int i, j, l1_modified = 0, nb_csectors;
1066 int ret;
1068 assert(addend >= -1 && addend <= 1);
1070 l2_table = NULL;
1071 l1_table = NULL;
1072 l1_size2 = l1_size * sizeof(uint64_t);
1074 s->cache_discards = true;
1076 /* WARNING: qcow2_snapshot_goto relies on this function not using the
1077 * l1_table_offset when it is the current s->l1_table_offset! Be careful
1078 * when changing this! */
1079 if (l1_table_offset != s->l1_table_offset) {
1080 l1_table = g_try_malloc0(align_offset(l1_size2, 512));
1081 if (l1_size2 && l1_table == NULL) {
1082 ret = -ENOMEM;
1083 goto fail;
1085 l1_allocated = true;
1087 ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2);
1088 if (ret < 0) {
1089 goto fail;
1092 for(i = 0;i < l1_size; i++)
1093 be64_to_cpus(&l1_table[i]);
1094 } else {
1095 assert(l1_size == s->l1_size);
1096 l1_table = s->l1_table;
1097 l1_allocated = false;
1100 for(i = 0; i < l1_size; i++) {
1101 l2_offset = l1_table[i];
1102 if (l2_offset) {
1103 old_l2_offset = l2_offset;
1104 l2_offset &= L1E_OFFSET_MASK;
1106 if (offset_into_cluster(s, l2_offset)) {
1107 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#"
1108 PRIx64 " unaligned (L1 index: %#x)",
1109 l2_offset, i);
1110 ret = -EIO;
1111 goto fail;
1114 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset,
1115 (void**) &l2_table);
1116 if (ret < 0) {
1117 goto fail;
1120 for(j = 0; j < s->l2_size; j++) {
1121 uint64_t cluster_index;
1123 offset = be64_to_cpu(l2_table[j]);
1124 old_offset = offset;
1125 offset &= ~QCOW_OFLAG_COPIED;
1127 switch (qcow2_get_cluster_type(offset)) {
1128 case QCOW2_CLUSTER_COMPRESSED:
1129 nb_csectors = ((offset >> s->csize_shift) &
1130 s->csize_mask) + 1;
1131 if (addend != 0) {
1132 ret = update_refcount(bs,
1133 (offset & s->cluster_offset_mask) & ~511,
1134 nb_csectors * 512, abs(addend), addend < 0,
1135 QCOW2_DISCARD_SNAPSHOT);
1136 if (ret < 0) {
1137 goto fail;
1140 /* compressed clusters are never modified */
1141 refcount = 2;
1142 break;
1144 case QCOW2_CLUSTER_NORMAL:
1145 case QCOW2_CLUSTER_ZERO:
1146 if (offset_into_cluster(s, offset & L2E_OFFSET_MASK)) {
1147 qcow2_signal_corruption(bs, true, -1, -1, "Data "
1148 "cluster offset %#llx "
1149 "unaligned (L2 offset: %#"
1150 PRIx64 ", L2 index: %#x)",
1151 offset & L2E_OFFSET_MASK,
1152 l2_offset, j);
1153 ret = -EIO;
1154 goto fail;
1157 cluster_index = (offset & L2E_OFFSET_MASK) >> s->cluster_bits;
1158 if (!cluster_index) {
1159 /* unallocated */
1160 refcount = 0;
1161 break;
1163 if (addend != 0) {
1164 ret = qcow2_update_cluster_refcount(bs,
1165 cluster_index, abs(addend), addend < 0,
1166 QCOW2_DISCARD_SNAPSHOT);
1167 if (ret < 0) {
1168 goto fail;
1172 ret = qcow2_get_refcount(bs, cluster_index, &refcount);
1173 if (ret < 0) {
1174 goto fail;
1176 break;
1178 case QCOW2_CLUSTER_UNALLOCATED:
1179 refcount = 0;
1180 break;
1182 default:
1183 abort();
1186 if (refcount == 1) {
1187 offset |= QCOW_OFLAG_COPIED;
1189 if (offset != old_offset) {
1190 if (addend > 0) {
1191 qcow2_cache_set_dependency(bs, s->l2_table_cache,
1192 s->refcount_block_cache);
1194 l2_table[j] = cpu_to_be64(offset);
1195 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache,
1196 l2_table);
1200 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1202 if (addend != 0) {
1203 ret = qcow2_update_cluster_refcount(bs, l2_offset >>
1204 s->cluster_bits,
1205 abs(addend), addend < 0,
1206 QCOW2_DISCARD_SNAPSHOT);
1207 if (ret < 0) {
1208 goto fail;
1211 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits,
1212 &refcount);
1213 if (ret < 0) {
1214 goto fail;
1215 } else if (refcount == 1) {
1216 l2_offset |= QCOW_OFLAG_COPIED;
1218 if (l2_offset != old_l2_offset) {
1219 l1_table[i] = l2_offset;
1220 l1_modified = 1;
1225 ret = bdrv_flush(bs);
1226 fail:
1227 if (l2_table) {
1228 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1231 s->cache_discards = false;
1232 qcow2_process_discards(bs, ret);
1234 /* Update L1 only if it isn't deleted anyway (addend = -1) */
1235 if (ret == 0 && addend >= 0 && l1_modified) {
1236 for (i = 0; i < l1_size; i++) {
1237 cpu_to_be64s(&l1_table[i]);
1240 ret = bdrv_pwrite_sync(bs->file, l1_table_offset,
1241 l1_table, l1_size2);
1243 for (i = 0; i < l1_size; i++) {
1244 be64_to_cpus(&l1_table[i]);
1247 if (l1_allocated)
1248 g_free(l1_table);
1249 return ret;
1255 /*********************************************************/
1256 /* refcount checking functions */
1259 static uint64_t refcount_array_byte_size(BDRVQcow2State *s, uint64_t entries)
1261 /* This assertion holds because there is no way we can address more than
1262 * 2^(64 - 9) clusters at once (with cluster size 512 = 2^9, and because
1263 * offsets have to be representable in bytes); due to every cluster
1264 * corresponding to one refcount entry, we are well below that limit */
1265 assert(entries < (UINT64_C(1) << (64 - 9)));
1267 /* Thanks to the assertion this will not overflow, because
1268 * s->refcount_order < 7.
1269 * (note: x << s->refcount_order == x * s->refcount_bits) */
1270 return DIV_ROUND_UP(entries << s->refcount_order, 8);
1274 * Reallocates *array so that it can hold new_size entries. *size must contain
1275 * the current number of entries in *array. If the reallocation fails, *array
1276 * and *size will not be modified and -errno will be returned. If the
1277 * reallocation is successful, *array will be set to the new buffer, *size
1278 * will be set to new_size and 0 will be returned. The size of the reallocated
1279 * refcount array buffer will be aligned to a cluster boundary, and the newly
1280 * allocated area will be zeroed.
1282 static int realloc_refcount_array(BDRVQcow2State *s, void **array,
1283 int64_t *size, int64_t new_size)
1285 int64_t old_byte_size, new_byte_size;
1286 void *new_ptr;
1288 /* Round to clusters so the array can be directly written to disk */
1289 old_byte_size = size_to_clusters(s, refcount_array_byte_size(s, *size))
1290 * s->cluster_size;
1291 new_byte_size = size_to_clusters(s, refcount_array_byte_size(s, new_size))
1292 * s->cluster_size;
1294 if (new_byte_size == old_byte_size) {
1295 *size = new_size;
1296 return 0;
1299 assert(new_byte_size > 0);
1301 if (new_byte_size > SIZE_MAX) {
1302 return -ENOMEM;
1305 new_ptr = g_try_realloc(*array, new_byte_size);
1306 if (!new_ptr) {
1307 return -ENOMEM;
1310 if (new_byte_size > old_byte_size) {
1311 memset((char *)new_ptr + old_byte_size, 0,
1312 new_byte_size - old_byte_size);
1315 *array = new_ptr;
1316 *size = new_size;
1318 return 0;
1322 * Increases the refcount for a range of clusters in a given refcount table.
1323 * This is used to construct a temporary refcount table out of L1 and L2 tables
1324 * which can be compared to the refcount table saved in the image.
1326 * Modifies the number of errors in res.
1328 static int inc_refcounts(BlockDriverState *bs,
1329 BdrvCheckResult *res,
1330 void **refcount_table,
1331 int64_t *refcount_table_size,
1332 int64_t offset, int64_t size)
1334 BDRVQcow2State *s = bs->opaque;
1335 uint64_t start, last, cluster_offset, k, refcount;
1336 int ret;
1338 if (size <= 0) {
1339 return 0;
1342 start = start_of_cluster(s, offset);
1343 last = start_of_cluster(s, offset + size - 1);
1344 for(cluster_offset = start; cluster_offset <= last;
1345 cluster_offset += s->cluster_size) {
1346 k = cluster_offset >> s->cluster_bits;
1347 if (k >= *refcount_table_size) {
1348 ret = realloc_refcount_array(s, refcount_table,
1349 refcount_table_size, k + 1);
1350 if (ret < 0) {
1351 res->check_errors++;
1352 return ret;
1356 refcount = s->get_refcount(*refcount_table, k);
1357 if (refcount == s->refcount_max) {
1358 fprintf(stderr, "ERROR: overflow cluster offset=0x%" PRIx64
1359 "\n", cluster_offset);
1360 fprintf(stderr, "Use qemu-img amend to increase the refcount entry "
1361 "width or qemu-img convert to create a clean copy if the "
1362 "image cannot be opened for writing\n");
1363 res->corruptions++;
1364 continue;
1366 s->set_refcount(*refcount_table, k, refcount + 1);
1369 return 0;
1372 /* Flags for check_refcounts_l1() and check_refcounts_l2() */
1373 enum {
1374 CHECK_FRAG_INFO = 0x2, /* update BlockFragInfo counters */
1378 * Increases the refcount in the given refcount table for the all clusters
1379 * referenced in the L2 table. While doing so, performs some checks on L2
1380 * entries.
1382 * Returns the number of errors found by the checks or -errno if an internal
1383 * error occurred.
1385 static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
1386 void **refcount_table,
1387 int64_t *refcount_table_size, int64_t l2_offset,
1388 int flags)
1390 BDRVQcow2State *s = bs->opaque;
1391 uint64_t *l2_table, l2_entry;
1392 uint64_t next_contiguous_offset = 0;
1393 int i, l2_size, nb_csectors, ret;
1395 /* Read L2 table from disk */
1396 l2_size = s->l2_size * sizeof(uint64_t);
1397 l2_table = g_malloc(l2_size);
1399 ret = bdrv_pread(bs->file, l2_offset, l2_table, l2_size);
1400 if (ret < 0) {
1401 fprintf(stderr, "ERROR: I/O error in check_refcounts_l2\n");
1402 res->check_errors++;
1403 goto fail;
1406 /* Do the actual checks */
1407 for(i = 0; i < s->l2_size; i++) {
1408 l2_entry = be64_to_cpu(l2_table[i]);
1410 switch (qcow2_get_cluster_type(l2_entry)) {
1411 case QCOW2_CLUSTER_COMPRESSED:
1412 /* Compressed clusters don't have QCOW_OFLAG_COPIED */
1413 if (l2_entry & QCOW_OFLAG_COPIED) {
1414 fprintf(stderr, "ERROR: cluster %" PRId64 ": "
1415 "copied flag must never be set for compressed "
1416 "clusters\n", l2_entry >> s->cluster_bits);
1417 l2_entry &= ~QCOW_OFLAG_COPIED;
1418 res->corruptions++;
1421 /* Mark cluster as used */
1422 nb_csectors = ((l2_entry >> s->csize_shift) &
1423 s->csize_mask) + 1;
1424 l2_entry &= s->cluster_offset_mask;
1425 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size,
1426 l2_entry & ~511, nb_csectors * 512);
1427 if (ret < 0) {
1428 goto fail;
1431 if (flags & CHECK_FRAG_INFO) {
1432 res->bfi.allocated_clusters++;
1433 res->bfi.compressed_clusters++;
1435 /* Compressed clusters are fragmented by nature. Since they
1436 * take up sub-sector space but we only have sector granularity
1437 * I/O we need to re-read the same sectors even for adjacent
1438 * compressed clusters.
1440 res->bfi.fragmented_clusters++;
1442 break;
1444 case QCOW2_CLUSTER_ZERO:
1445 if ((l2_entry & L2E_OFFSET_MASK) == 0) {
1446 break;
1448 /* fall through */
1450 case QCOW2_CLUSTER_NORMAL:
1452 uint64_t offset = l2_entry & L2E_OFFSET_MASK;
1454 if (flags & CHECK_FRAG_INFO) {
1455 res->bfi.allocated_clusters++;
1456 if (next_contiguous_offset &&
1457 offset != next_contiguous_offset) {
1458 res->bfi.fragmented_clusters++;
1460 next_contiguous_offset = offset + s->cluster_size;
1463 /* Mark cluster as used */
1464 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size,
1465 offset, s->cluster_size);
1466 if (ret < 0) {
1467 goto fail;
1470 /* Correct offsets are cluster aligned */
1471 if (offset_into_cluster(s, offset)) {
1472 fprintf(stderr, "ERROR offset=%" PRIx64 ": Cluster is not "
1473 "properly aligned; L2 entry corrupted.\n", offset);
1474 res->corruptions++;
1476 break;
1479 case QCOW2_CLUSTER_UNALLOCATED:
1480 break;
1482 default:
1483 abort();
1487 g_free(l2_table);
1488 return 0;
1490 fail:
1491 g_free(l2_table);
1492 return ret;
1496 * Increases the refcount for the L1 table, its L2 tables and all referenced
1497 * clusters in the given refcount table. While doing so, performs some checks
1498 * on L1 and L2 entries.
1500 * Returns the number of errors found by the checks or -errno if an internal
1501 * error occurred.
1503 static int check_refcounts_l1(BlockDriverState *bs,
1504 BdrvCheckResult *res,
1505 void **refcount_table,
1506 int64_t *refcount_table_size,
1507 int64_t l1_table_offset, int l1_size,
1508 int flags)
1510 BDRVQcow2State *s = bs->opaque;
1511 uint64_t *l1_table = NULL, l2_offset, l1_size2;
1512 int i, ret;
1514 l1_size2 = l1_size * sizeof(uint64_t);
1516 /* Mark L1 table as used */
1517 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size,
1518 l1_table_offset, l1_size2);
1519 if (ret < 0) {
1520 goto fail;
1523 /* Read L1 table entries from disk */
1524 if (l1_size2 > 0) {
1525 l1_table = g_try_malloc(l1_size2);
1526 if (l1_table == NULL) {
1527 ret = -ENOMEM;
1528 res->check_errors++;
1529 goto fail;
1531 ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2);
1532 if (ret < 0) {
1533 fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n");
1534 res->check_errors++;
1535 goto fail;
1537 for(i = 0;i < l1_size; i++)
1538 be64_to_cpus(&l1_table[i]);
1541 /* Do the actual checks */
1542 for(i = 0; i < l1_size; i++) {
1543 l2_offset = l1_table[i];
1544 if (l2_offset) {
1545 /* Mark L2 table as used */
1546 l2_offset &= L1E_OFFSET_MASK;
1547 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size,
1548 l2_offset, s->cluster_size);
1549 if (ret < 0) {
1550 goto fail;
1553 /* L2 tables are cluster aligned */
1554 if (offset_into_cluster(s, l2_offset)) {
1555 fprintf(stderr, "ERROR l2_offset=%" PRIx64 ": Table is not "
1556 "cluster aligned; L1 entry corrupted\n", l2_offset);
1557 res->corruptions++;
1560 /* Process and check L2 entries */
1561 ret = check_refcounts_l2(bs, res, refcount_table,
1562 refcount_table_size, l2_offset, flags);
1563 if (ret < 0) {
1564 goto fail;
1568 g_free(l1_table);
1569 return 0;
1571 fail:
1572 g_free(l1_table);
1573 return ret;
1577 * Checks the OFLAG_COPIED flag for all L1 and L2 entries.
1579 * This function does not print an error message nor does it increment
1580 * check_errors if qcow2_get_refcount fails (this is because such an error will
1581 * have been already detected and sufficiently signaled by the calling function
1582 * (qcow2_check_refcounts) by the time this function is called).
1584 static int check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res,
1585 BdrvCheckMode fix)
1587 BDRVQcow2State *s = bs->opaque;
1588 uint64_t *l2_table = qemu_blockalign(bs, s->cluster_size);
1589 int ret;
1590 uint64_t refcount;
1591 int i, j;
1593 for (i = 0; i < s->l1_size; i++) {
1594 uint64_t l1_entry = s->l1_table[i];
1595 uint64_t l2_offset = l1_entry & L1E_OFFSET_MASK;
1596 bool l2_dirty = false;
1598 if (!l2_offset) {
1599 continue;
1602 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits,
1603 &refcount);
1604 if (ret < 0) {
1605 /* don't print message nor increment check_errors */
1606 continue;
1608 if ((refcount == 1) != ((l1_entry & QCOW_OFLAG_COPIED) != 0)) {
1609 fprintf(stderr, "%s OFLAG_COPIED L2 cluster: l1_index=%d "
1610 "l1_entry=%" PRIx64 " refcount=%" PRIu64 "\n",
1611 fix & BDRV_FIX_ERRORS ? "Repairing" :
1612 "ERROR",
1613 i, l1_entry, refcount);
1614 if (fix & BDRV_FIX_ERRORS) {
1615 s->l1_table[i] = refcount == 1
1616 ? l1_entry | QCOW_OFLAG_COPIED
1617 : l1_entry & ~QCOW_OFLAG_COPIED;
1618 ret = qcow2_write_l1_entry(bs, i);
1619 if (ret < 0) {
1620 res->check_errors++;
1621 goto fail;
1623 res->corruptions_fixed++;
1624 } else {
1625 res->corruptions++;
1629 ret = bdrv_pread(bs->file, l2_offset, l2_table,
1630 s->l2_size * sizeof(uint64_t));
1631 if (ret < 0) {
1632 fprintf(stderr, "ERROR: Could not read L2 table: %s\n",
1633 strerror(-ret));
1634 res->check_errors++;
1635 goto fail;
1638 for (j = 0; j < s->l2_size; j++) {
1639 uint64_t l2_entry = be64_to_cpu(l2_table[j]);
1640 uint64_t data_offset = l2_entry & L2E_OFFSET_MASK;
1641 int cluster_type = qcow2_get_cluster_type(l2_entry);
1643 if ((cluster_type == QCOW2_CLUSTER_NORMAL) ||
1644 ((cluster_type == QCOW2_CLUSTER_ZERO) && (data_offset != 0))) {
1645 ret = qcow2_get_refcount(bs,
1646 data_offset >> s->cluster_bits,
1647 &refcount);
1648 if (ret < 0) {
1649 /* don't print message nor increment check_errors */
1650 continue;
1652 if ((refcount == 1) != ((l2_entry & QCOW_OFLAG_COPIED) != 0)) {
1653 fprintf(stderr, "%s OFLAG_COPIED data cluster: "
1654 "l2_entry=%" PRIx64 " refcount=%" PRIu64 "\n",
1655 fix & BDRV_FIX_ERRORS ? "Repairing" :
1656 "ERROR",
1657 l2_entry, refcount);
1658 if (fix & BDRV_FIX_ERRORS) {
1659 l2_table[j] = cpu_to_be64(refcount == 1
1660 ? l2_entry | QCOW_OFLAG_COPIED
1661 : l2_entry & ~QCOW_OFLAG_COPIED);
1662 l2_dirty = true;
1663 res->corruptions_fixed++;
1664 } else {
1665 res->corruptions++;
1671 if (l2_dirty) {
1672 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L2,
1673 l2_offset, s->cluster_size);
1674 if (ret < 0) {
1675 fprintf(stderr, "ERROR: Could not write L2 table; metadata "
1676 "overlap check failed: %s\n", strerror(-ret));
1677 res->check_errors++;
1678 goto fail;
1681 ret = bdrv_pwrite(bs->file, l2_offset, l2_table,
1682 s->cluster_size);
1683 if (ret < 0) {
1684 fprintf(stderr, "ERROR: Could not write L2 table: %s\n",
1685 strerror(-ret));
1686 res->check_errors++;
1687 goto fail;
1692 ret = 0;
1694 fail:
1695 qemu_vfree(l2_table);
1696 return ret;
1700 * Checks consistency of refblocks and accounts for each refblock in
1701 * *refcount_table.
1703 static int check_refblocks(BlockDriverState *bs, BdrvCheckResult *res,
1704 BdrvCheckMode fix, bool *rebuild,
1705 void **refcount_table, int64_t *nb_clusters)
1707 BDRVQcow2State *s = bs->opaque;
1708 int64_t i, size;
1709 int ret;
1711 for(i = 0; i < s->refcount_table_size; i++) {
1712 uint64_t offset, cluster;
1713 offset = s->refcount_table[i];
1714 cluster = offset >> s->cluster_bits;
1716 /* Refcount blocks are cluster aligned */
1717 if (offset_into_cluster(s, offset)) {
1718 fprintf(stderr, "ERROR refcount block %" PRId64 " is not "
1719 "cluster aligned; refcount table entry corrupted\n", i);
1720 res->corruptions++;
1721 *rebuild = true;
1722 continue;
1725 if (cluster >= *nb_clusters) {
1726 fprintf(stderr, "%s refcount block %" PRId64 " is outside image\n",
1727 fix & BDRV_FIX_ERRORS ? "Repairing" : "ERROR", i);
1729 if (fix & BDRV_FIX_ERRORS) {
1730 int64_t new_nb_clusters;
1732 if (offset > INT64_MAX - s->cluster_size) {
1733 ret = -EINVAL;
1734 goto resize_fail;
1737 ret = bdrv_truncate(bs->file, offset + s->cluster_size);
1738 if (ret < 0) {
1739 goto resize_fail;
1741 size = bdrv_getlength(bs->file->bs);
1742 if (size < 0) {
1743 ret = size;
1744 goto resize_fail;
1747 new_nb_clusters = size_to_clusters(s, size);
1748 assert(new_nb_clusters >= *nb_clusters);
1750 ret = realloc_refcount_array(s, refcount_table,
1751 nb_clusters, new_nb_clusters);
1752 if (ret < 0) {
1753 res->check_errors++;
1754 return ret;
1757 if (cluster >= *nb_clusters) {
1758 ret = -EINVAL;
1759 goto resize_fail;
1762 res->corruptions_fixed++;
1763 ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
1764 offset, s->cluster_size);
1765 if (ret < 0) {
1766 return ret;
1768 /* No need to check whether the refcount is now greater than 1:
1769 * This area was just allocated and zeroed, so it can only be
1770 * exactly 1 after inc_refcounts() */
1771 continue;
1773 resize_fail:
1774 res->corruptions++;
1775 *rebuild = true;
1776 fprintf(stderr, "ERROR could not resize image: %s\n",
1777 strerror(-ret));
1778 } else {
1779 res->corruptions++;
1781 continue;
1784 if (offset != 0) {
1785 ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
1786 offset, s->cluster_size);
1787 if (ret < 0) {
1788 return ret;
1790 if (s->get_refcount(*refcount_table, cluster) != 1) {
1791 fprintf(stderr, "ERROR refcount block %" PRId64
1792 " refcount=%" PRIu64 "\n", i,
1793 s->get_refcount(*refcount_table, cluster));
1794 res->corruptions++;
1795 *rebuild = true;
1800 return 0;
1804 * Calculates an in-memory refcount table.
1806 static int calculate_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
1807 BdrvCheckMode fix, bool *rebuild,
1808 void **refcount_table, int64_t *nb_clusters)
1810 BDRVQcow2State *s = bs->opaque;
1811 int64_t i;
1812 QCowSnapshot *sn;
1813 int ret;
1815 if (!*refcount_table) {
1816 int64_t old_size = 0;
1817 ret = realloc_refcount_array(s, refcount_table,
1818 &old_size, *nb_clusters);
1819 if (ret < 0) {
1820 res->check_errors++;
1821 return ret;
1825 /* header */
1826 ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
1827 0, s->cluster_size);
1828 if (ret < 0) {
1829 return ret;
1832 /* current L1 table */
1833 ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters,
1834 s->l1_table_offset, s->l1_size, CHECK_FRAG_INFO);
1835 if (ret < 0) {
1836 return ret;
1839 /* snapshots */
1840 for (i = 0; i < s->nb_snapshots; i++) {
1841 sn = s->snapshots + i;
1842 ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters,
1843 sn->l1_table_offset, sn->l1_size, 0);
1844 if (ret < 0) {
1845 return ret;
1848 ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
1849 s->snapshots_offset, s->snapshots_size);
1850 if (ret < 0) {
1851 return ret;
1854 /* refcount data */
1855 ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
1856 s->refcount_table_offset,
1857 s->refcount_table_size * sizeof(uint64_t));
1858 if (ret < 0) {
1859 return ret;
1862 return check_refblocks(bs, res, fix, rebuild, refcount_table, nb_clusters);
1866 * Compares the actual reference count for each cluster in the image against the
1867 * refcount as reported by the refcount structures on-disk.
1869 static void compare_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
1870 BdrvCheckMode fix, bool *rebuild,
1871 int64_t *highest_cluster,
1872 void *refcount_table, int64_t nb_clusters)
1874 BDRVQcow2State *s = bs->opaque;
1875 int64_t i;
1876 uint64_t refcount1, refcount2;
1877 int ret;
1879 for (i = 0, *highest_cluster = 0; i < nb_clusters; i++) {
1880 ret = qcow2_get_refcount(bs, i, &refcount1);
1881 if (ret < 0) {
1882 fprintf(stderr, "Can't get refcount for cluster %" PRId64 ": %s\n",
1883 i, strerror(-ret));
1884 res->check_errors++;
1885 continue;
1888 refcount2 = s->get_refcount(refcount_table, i);
1890 if (refcount1 > 0 || refcount2 > 0) {
1891 *highest_cluster = i;
1894 if (refcount1 != refcount2) {
1895 /* Check if we're allowed to fix the mismatch */
1896 int *num_fixed = NULL;
1897 if (refcount1 == 0) {
1898 *rebuild = true;
1899 } else if (refcount1 > refcount2 && (fix & BDRV_FIX_LEAKS)) {
1900 num_fixed = &res->leaks_fixed;
1901 } else if (refcount1 < refcount2 && (fix & BDRV_FIX_ERRORS)) {
1902 num_fixed = &res->corruptions_fixed;
1905 fprintf(stderr, "%s cluster %" PRId64 " refcount=%" PRIu64
1906 " reference=%" PRIu64 "\n",
1907 num_fixed != NULL ? "Repairing" :
1908 refcount1 < refcount2 ? "ERROR" :
1909 "Leaked",
1910 i, refcount1, refcount2);
1912 if (num_fixed) {
1913 ret = update_refcount(bs, i << s->cluster_bits, 1,
1914 refcount_diff(refcount1, refcount2),
1915 refcount1 > refcount2,
1916 QCOW2_DISCARD_ALWAYS);
1917 if (ret >= 0) {
1918 (*num_fixed)++;
1919 continue;
1923 /* And if we couldn't, print an error */
1924 if (refcount1 < refcount2) {
1925 res->corruptions++;
1926 } else {
1927 res->leaks++;
1934 * Allocates clusters using an in-memory refcount table (IMRT) in contrast to
1935 * the on-disk refcount structures.
1937 * On input, *first_free_cluster tells where to start looking, and need not
1938 * actually be a free cluster; the returned offset will not be before that
1939 * cluster. On output, *first_free_cluster points to the first gap found, even
1940 * if that gap was too small to be used as the returned offset.
1942 * Note that *first_free_cluster is a cluster index whereas the return value is
1943 * an offset.
1945 static int64_t alloc_clusters_imrt(BlockDriverState *bs,
1946 int cluster_count,
1947 void **refcount_table,
1948 int64_t *imrt_nb_clusters,
1949 int64_t *first_free_cluster)
1951 BDRVQcow2State *s = bs->opaque;
1952 int64_t cluster = *first_free_cluster, i;
1953 bool first_gap = true;
1954 int contiguous_free_clusters;
1955 int ret;
1957 /* Starting at *first_free_cluster, find a range of at least cluster_count
1958 * continuously free clusters */
1959 for (contiguous_free_clusters = 0;
1960 cluster < *imrt_nb_clusters &&
1961 contiguous_free_clusters < cluster_count;
1962 cluster++)
1964 if (!s->get_refcount(*refcount_table, cluster)) {
1965 contiguous_free_clusters++;
1966 if (first_gap) {
1967 /* If this is the first free cluster found, update
1968 * *first_free_cluster accordingly */
1969 *first_free_cluster = cluster;
1970 first_gap = false;
1972 } else if (contiguous_free_clusters) {
1973 contiguous_free_clusters = 0;
1977 /* If contiguous_free_clusters is greater than zero, it contains the number
1978 * of continuously free clusters until the current cluster; the first free
1979 * cluster in the current "gap" is therefore
1980 * cluster - contiguous_free_clusters */
1982 /* If no such range could be found, grow the in-memory refcount table
1983 * accordingly to append free clusters at the end of the image */
1984 if (contiguous_free_clusters < cluster_count) {
1985 /* contiguous_free_clusters clusters are already empty at the image end;
1986 * we need cluster_count clusters; therefore, we have to allocate
1987 * cluster_count - contiguous_free_clusters new clusters at the end of
1988 * the image (which is the current value of cluster; note that cluster
1989 * may exceed old_imrt_nb_clusters if *first_free_cluster pointed beyond
1990 * the image end) */
1991 ret = realloc_refcount_array(s, refcount_table, imrt_nb_clusters,
1992 cluster + cluster_count
1993 - contiguous_free_clusters);
1994 if (ret < 0) {
1995 return ret;
1999 /* Go back to the first free cluster */
2000 cluster -= contiguous_free_clusters;
2001 for (i = 0; i < cluster_count; i++) {
2002 s->set_refcount(*refcount_table, cluster + i, 1);
2005 return cluster << s->cluster_bits;
2009 * Creates a new refcount structure based solely on the in-memory information
2010 * given through *refcount_table. All necessary allocations will be reflected
2011 * in that array.
2013 * On success, the old refcount structure is leaked (it will be covered by the
2014 * new refcount structure).
2016 static int rebuild_refcount_structure(BlockDriverState *bs,
2017 BdrvCheckResult *res,
2018 void **refcount_table,
2019 int64_t *nb_clusters)
2021 BDRVQcow2State *s = bs->opaque;
2022 int64_t first_free_cluster = 0, reftable_offset = -1, cluster = 0;
2023 int64_t refblock_offset, refblock_start, refblock_index;
2024 uint32_t reftable_size = 0;
2025 uint64_t *on_disk_reftable = NULL;
2026 void *on_disk_refblock;
2027 int ret = 0;
2028 struct {
2029 uint64_t reftable_offset;
2030 uint32_t reftable_clusters;
2031 } QEMU_PACKED reftable_offset_and_clusters;
2033 qcow2_cache_empty(bs, s->refcount_block_cache);
2035 write_refblocks:
2036 for (; cluster < *nb_clusters; cluster++) {
2037 if (!s->get_refcount(*refcount_table, cluster)) {
2038 continue;
2041 refblock_index = cluster >> s->refcount_block_bits;
2042 refblock_start = refblock_index << s->refcount_block_bits;
2044 /* Don't allocate a cluster in a refblock already written to disk */
2045 if (first_free_cluster < refblock_start) {
2046 first_free_cluster = refblock_start;
2048 refblock_offset = alloc_clusters_imrt(bs, 1, refcount_table,
2049 nb_clusters, &first_free_cluster);
2050 if (refblock_offset < 0) {
2051 fprintf(stderr, "ERROR allocating refblock: %s\n",
2052 strerror(-refblock_offset));
2053 res->check_errors++;
2054 ret = refblock_offset;
2055 goto fail;
2058 if (reftable_size <= refblock_index) {
2059 uint32_t old_reftable_size = reftable_size;
2060 uint64_t *new_on_disk_reftable;
2062 reftable_size = ROUND_UP((refblock_index + 1) * sizeof(uint64_t),
2063 s->cluster_size) / sizeof(uint64_t);
2064 new_on_disk_reftable = g_try_realloc(on_disk_reftable,
2065 reftable_size *
2066 sizeof(uint64_t));
2067 if (!new_on_disk_reftable) {
2068 res->check_errors++;
2069 ret = -ENOMEM;
2070 goto fail;
2072 on_disk_reftable = new_on_disk_reftable;
2074 memset(on_disk_reftable + old_reftable_size, 0,
2075 (reftable_size - old_reftable_size) * sizeof(uint64_t));
2077 /* The offset we have for the reftable is now no longer valid;
2078 * this will leak that range, but we can easily fix that by running
2079 * a leak-fixing check after this rebuild operation */
2080 reftable_offset = -1;
2082 on_disk_reftable[refblock_index] = refblock_offset;
2084 /* If this is apparently the last refblock (for now), try to squeeze the
2085 * reftable in */
2086 if (refblock_index == (*nb_clusters - 1) >> s->refcount_block_bits &&
2087 reftable_offset < 0)
2089 uint64_t reftable_clusters = size_to_clusters(s, reftable_size *
2090 sizeof(uint64_t));
2091 reftable_offset = alloc_clusters_imrt(bs, reftable_clusters,
2092 refcount_table, nb_clusters,
2093 &first_free_cluster);
2094 if (reftable_offset < 0) {
2095 fprintf(stderr, "ERROR allocating reftable: %s\n",
2096 strerror(-reftable_offset));
2097 res->check_errors++;
2098 ret = reftable_offset;
2099 goto fail;
2103 ret = qcow2_pre_write_overlap_check(bs, 0, refblock_offset,
2104 s->cluster_size);
2105 if (ret < 0) {
2106 fprintf(stderr, "ERROR writing refblock: %s\n", strerror(-ret));
2107 goto fail;
2110 /* The size of *refcount_table is always cluster-aligned, therefore the
2111 * write operation will not overflow */
2112 on_disk_refblock = (void *)((char *) *refcount_table +
2113 refblock_index * s->cluster_size);
2115 ret = bdrv_write(bs->file, refblock_offset / BDRV_SECTOR_SIZE,
2116 on_disk_refblock, s->cluster_sectors);
2117 if (ret < 0) {
2118 fprintf(stderr, "ERROR writing refblock: %s\n", strerror(-ret));
2119 goto fail;
2122 /* Go to the end of this refblock */
2123 cluster = refblock_start + s->refcount_block_size - 1;
2126 if (reftable_offset < 0) {
2127 uint64_t post_refblock_start, reftable_clusters;
2129 post_refblock_start = ROUND_UP(*nb_clusters, s->refcount_block_size);
2130 reftable_clusters = size_to_clusters(s,
2131 reftable_size * sizeof(uint64_t));
2132 /* Not pretty but simple */
2133 if (first_free_cluster < post_refblock_start) {
2134 first_free_cluster = post_refblock_start;
2136 reftable_offset = alloc_clusters_imrt(bs, reftable_clusters,
2137 refcount_table, nb_clusters,
2138 &first_free_cluster);
2139 if (reftable_offset < 0) {
2140 fprintf(stderr, "ERROR allocating reftable: %s\n",
2141 strerror(-reftable_offset));
2142 res->check_errors++;
2143 ret = reftable_offset;
2144 goto fail;
2147 goto write_refblocks;
2150 assert(on_disk_reftable);
2152 for (refblock_index = 0; refblock_index < reftable_size; refblock_index++) {
2153 cpu_to_be64s(&on_disk_reftable[refblock_index]);
2156 ret = qcow2_pre_write_overlap_check(bs, 0, reftable_offset,
2157 reftable_size * sizeof(uint64_t));
2158 if (ret < 0) {
2159 fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret));
2160 goto fail;
2163 assert(reftable_size < INT_MAX / sizeof(uint64_t));
2164 ret = bdrv_pwrite(bs->file, reftable_offset, on_disk_reftable,
2165 reftable_size * sizeof(uint64_t));
2166 if (ret < 0) {
2167 fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret));
2168 goto fail;
2171 /* Enter new reftable into the image header */
2172 reftable_offset_and_clusters.reftable_offset = cpu_to_be64(reftable_offset);
2173 reftable_offset_and_clusters.reftable_clusters =
2174 cpu_to_be32(size_to_clusters(s, reftable_size * sizeof(uint64_t)));
2175 ret = bdrv_pwrite_sync(bs->file,
2176 offsetof(QCowHeader, refcount_table_offset),
2177 &reftable_offset_and_clusters,
2178 sizeof(reftable_offset_and_clusters));
2179 if (ret < 0) {
2180 fprintf(stderr, "ERROR setting reftable: %s\n", strerror(-ret));
2181 goto fail;
2184 for (refblock_index = 0; refblock_index < reftable_size; refblock_index++) {
2185 be64_to_cpus(&on_disk_reftable[refblock_index]);
2187 s->refcount_table = on_disk_reftable;
2188 s->refcount_table_offset = reftable_offset;
2189 s->refcount_table_size = reftable_size;
2190 update_max_refcount_table_index(s);
2192 return 0;
2194 fail:
2195 g_free(on_disk_reftable);
2196 return ret;
2200 * Checks an image for refcount consistency.
2202 * Returns 0 if no errors are found, the number of errors in case the image is
2203 * detected as corrupted, and -errno when an internal error occurred.
2205 int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
2206 BdrvCheckMode fix)
2208 BDRVQcow2State *s = bs->opaque;
2209 BdrvCheckResult pre_compare_res;
2210 int64_t size, highest_cluster, nb_clusters;
2211 void *refcount_table = NULL;
2212 bool rebuild = false;
2213 int ret;
2215 size = bdrv_getlength(bs->file->bs);
2216 if (size < 0) {
2217 res->check_errors++;
2218 return size;
2221 nb_clusters = size_to_clusters(s, size);
2222 if (nb_clusters > INT_MAX) {
2223 res->check_errors++;
2224 return -EFBIG;
2227 res->bfi.total_clusters =
2228 size_to_clusters(s, bs->total_sectors * BDRV_SECTOR_SIZE);
2230 ret = calculate_refcounts(bs, res, fix, &rebuild, &refcount_table,
2231 &nb_clusters);
2232 if (ret < 0) {
2233 goto fail;
2236 /* In case we don't need to rebuild the refcount structure (but want to fix
2237 * something), this function is immediately called again, in which case the
2238 * result should be ignored */
2239 pre_compare_res = *res;
2240 compare_refcounts(bs, res, 0, &rebuild, &highest_cluster, refcount_table,
2241 nb_clusters);
2243 if (rebuild && (fix & BDRV_FIX_ERRORS)) {
2244 BdrvCheckResult old_res = *res;
2245 int fresh_leaks = 0;
2247 fprintf(stderr, "Rebuilding refcount structure\n");
2248 ret = rebuild_refcount_structure(bs, res, &refcount_table,
2249 &nb_clusters);
2250 if (ret < 0) {
2251 goto fail;
2254 res->corruptions = 0;
2255 res->leaks = 0;
2257 /* Because the old reftable has been exchanged for a new one the
2258 * references have to be recalculated */
2259 rebuild = false;
2260 memset(refcount_table, 0, refcount_array_byte_size(s, nb_clusters));
2261 ret = calculate_refcounts(bs, res, 0, &rebuild, &refcount_table,
2262 &nb_clusters);
2263 if (ret < 0) {
2264 goto fail;
2267 if (fix & BDRV_FIX_LEAKS) {
2268 /* The old refcount structures are now leaked, fix it; the result
2269 * can be ignored, aside from leaks which were introduced by
2270 * rebuild_refcount_structure() that could not be fixed */
2271 BdrvCheckResult saved_res = *res;
2272 *res = (BdrvCheckResult){ 0 };
2274 compare_refcounts(bs, res, BDRV_FIX_LEAKS, &rebuild,
2275 &highest_cluster, refcount_table, nb_clusters);
2276 if (rebuild) {
2277 fprintf(stderr, "ERROR rebuilt refcount structure is still "
2278 "broken\n");
2281 /* Any leaks accounted for here were introduced by
2282 * rebuild_refcount_structure() because that function has created a
2283 * new refcount structure from scratch */
2284 fresh_leaks = res->leaks;
2285 *res = saved_res;
2288 if (res->corruptions < old_res.corruptions) {
2289 res->corruptions_fixed += old_res.corruptions - res->corruptions;
2291 if (res->leaks < old_res.leaks) {
2292 res->leaks_fixed += old_res.leaks - res->leaks;
2294 res->leaks += fresh_leaks;
2295 } else if (fix) {
2296 if (rebuild) {
2297 fprintf(stderr, "ERROR need to rebuild refcount structures\n");
2298 res->check_errors++;
2299 ret = -EIO;
2300 goto fail;
2303 if (res->leaks || res->corruptions) {
2304 *res = pre_compare_res;
2305 compare_refcounts(bs, res, fix, &rebuild, &highest_cluster,
2306 refcount_table, nb_clusters);
2310 /* check OFLAG_COPIED */
2311 ret = check_oflag_copied(bs, res, fix);
2312 if (ret < 0) {
2313 goto fail;
2316 res->image_end_offset = (highest_cluster + 1) * s->cluster_size;
2317 ret = 0;
2319 fail:
2320 g_free(refcount_table);
2322 return ret;
2325 #define overlaps_with(ofs, sz) \
2326 ranges_overlap(offset, size, ofs, sz)
2329 * Checks if the given offset into the image file is actually free to use by
2330 * looking for overlaps with important metadata sections (L1/L2 tables etc.),
2331 * i.e. a sanity check without relying on the refcount tables.
2333 * The ign parameter specifies what checks not to perform (being a bitmask of
2334 * QCow2MetadataOverlap values), i.e., what sections to ignore.
2336 * Returns:
2337 * - 0 if writing to this offset will not affect the mentioned metadata
2338 * - a positive QCow2MetadataOverlap value indicating one overlapping section
2339 * - a negative value (-errno) indicating an error while performing a check,
2340 * e.g. when bdrv_read failed on QCOW2_OL_INACTIVE_L2
2342 int qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset,
2343 int64_t size)
2345 BDRVQcow2State *s = bs->opaque;
2346 int chk = s->overlap_check & ~ign;
2347 int i, j;
2349 if (!size) {
2350 return 0;
2353 if (chk & QCOW2_OL_MAIN_HEADER) {
2354 if (offset < s->cluster_size) {
2355 return QCOW2_OL_MAIN_HEADER;
2359 /* align range to test to cluster boundaries */
2360 size = align_offset(offset_into_cluster(s, offset) + size, s->cluster_size);
2361 offset = start_of_cluster(s, offset);
2363 if ((chk & QCOW2_OL_ACTIVE_L1) && s->l1_size) {
2364 if (overlaps_with(s->l1_table_offset, s->l1_size * sizeof(uint64_t))) {
2365 return QCOW2_OL_ACTIVE_L1;
2369 if ((chk & QCOW2_OL_REFCOUNT_TABLE) && s->refcount_table_size) {
2370 if (overlaps_with(s->refcount_table_offset,
2371 s->refcount_table_size * sizeof(uint64_t))) {
2372 return QCOW2_OL_REFCOUNT_TABLE;
2376 if ((chk & QCOW2_OL_SNAPSHOT_TABLE) && s->snapshots_size) {
2377 if (overlaps_with(s->snapshots_offset, s->snapshots_size)) {
2378 return QCOW2_OL_SNAPSHOT_TABLE;
2382 if ((chk & QCOW2_OL_INACTIVE_L1) && s->snapshots) {
2383 for (i = 0; i < s->nb_snapshots; i++) {
2384 if (s->snapshots[i].l1_size &&
2385 overlaps_with(s->snapshots[i].l1_table_offset,
2386 s->snapshots[i].l1_size * sizeof(uint64_t))) {
2387 return QCOW2_OL_INACTIVE_L1;
2392 if ((chk & QCOW2_OL_ACTIVE_L2) && s->l1_table) {
2393 for (i = 0; i < s->l1_size; i++) {
2394 if ((s->l1_table[i] & L1E_OFFSET_MASK) &&
2395 overlaps_with(s->l1_table[i] & L1E_OFFSET_MASK,
2396 s->cluster_size)) {
2397 return QCOW2_OL_ACTIVE_L2;
2402 if ((chk & QCOW2_OL_REFCOUNT_BLOCK) && s->refcount_table) {
2403 unsigned last_entry = s->max_refcount_table_index;
2404 assert(last_entry < s->refcount_table_size);
2405 assert(last_entry + 1 == s->refcount_table_size ||
2406 (s->refcount_table[last_entry + 1] & REFT_OFFSET_MASK) == 0);
2407 for (i = 0; i <= last_entry; i++) {
2408 if ((s->refcount_table[i] & REFT_OFFSET_MASK) &&
2409 overlaps_with(s->refcount_table[i] & REFT_OFFSET_MASK,
2410 s->cluster_size)) {
2411 return QCOW2_OL_REFCOUNT_BLOCK;
2416 if ((chk & QCOW2_OL_INACTIVE_L2) && s->snapshots) {
2417 for (i = 0; i < s->nb_snapshots; i++) {
2418 uint64_t l1_ofs = s->snapshots[i].l1_table_offset;
2419 uint32_t l1_sz = s->snapshots[i].l1_size;
2420 uint64_t l1_sz2 = l1_sz * sizeof(uint64_t);
2421 uint64_t *l1 = g_try_malloc(l1_sz2);
2422 int ret;
2424 if (l1_sz2 && l1 == NULL) {
2425 return -ENOMEM;
2428 ret = bdrv_pread(bs->file, l1_ofs, l1, l1_sz2);
2429 if (ret < 0) {
2430 g_free(l1);
2431 return ret;
2434 for (j = 0; j < l1_sz; j++) {
2435 uint64_t l2_ofs = be64_to_cpu(l1[j]) & L1E_OFFSET_MASK;
2436 if (l2_ofs && overlaps_with(l2_ofs, s->cluster_size)) {
2437 g_free(l1);
2438 return QCOW2_OL_INACTIVE_L2;
2442 g_free(l1);
2446 return 0;
2449 static const char *metadata_ol_names[] = {
2450 [QCOW2_OL_MAIN_HEADER_BITNR] = "qcow2_header",
2451 [QCOW2_OL_ACTIVE_L1_BITNR] = "active L1 table",
2452 [QCOW2_OL_ACTIVE_L2_BITNR] = "active L2 table",
2453 [QCOW2_OL_REFCOUNT_TABLE_BITNR] = "refcount table",
2454 [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = "refcount block",
2455 [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = "snapshot table",
2456 [QCOW2_OL_INACTIVE_L1_BITNR] = "inactive L1 table",
2457 [QCOW2_OL_INACTIVE_L2_BITNR] = "inactive L2 table",
2461 * First performs a check for metadata overlaps (through
2462 * qcow2_check_metadata_overlap); if that fails with a negative value (error
2463 * while performing a check), that value is returned. If an impending overlap
2464 * is detected, the BDS will be made unusable, the qcow2 file marked corrupt
2465 * and -EIO returned.
2467 * Returns 0 if there were neither overlaps nor errors while checking for
2468 * overlaps; or a negative value (-errno) on error.
2470 int qcow2_pre_write_overlap_check(BlockDriverState *bs, int ign, int64_t offset,
2471 int64_t size)
2473 int ret = qcow2_check_metadata_overlap(bs, ign, offset, size);
2475 if (ret < 0) {
2476 return ret;
2477 } else if (ret > 0) {
2478 int metadata_ol_bitnr = ctz32(ret);
2479 assert(metadata_ol_bitnr < QCOW2_OL_MAX_BITNR);
2481 qcow2_signal_corruption(bs, true, offset, size, "Preventing invalid "
2482 "write on metadata (overlaps with %s)",
2483 metadata_ol_names[metadata_ol_bitnr]);
2484 return -EIO;
2487 return 0;
2490 /* A pointer to a function of this type is given to walk_over_reftable(). That
2491 * function will create refblocks and pass them to a RefblockFinishOp once they
2492 * are completed (@refblock). @refblock_empty is set if the refblock is
2493 * completely empty.
2495 * Along with the refblock, a corresponding reftable entry is passed, in the
2496 * reftable @reftable (which may be reallocated) at @reftable_index.
2498 * @allocated should be set to true if a new cluster has been allocated.
2500 typedef int (RefblockFinishOp)(BlockDriverState *bs, uint64_t **reftable,
2501 uint64_t reftable_index, uint64_t *reftable_size,
2502 void *refblock, bool refblock_empty,
2503 bool *allocated, Error **errp);
2506 * This "operation" for walk_over_reftable() allocates the refblock on disk (if
2507 * it is not empty) and inserts its offset into the new reftable. The size of
2508 * this new reftable is increased as required.
2510 static int alloc_refblock(BlockDriverState *bs, uint64_t **reftable,
2511 uint64_t reftable_index, uint64_t *reftable_size,
2512 void *refblock, bool refblock_empty, bool *allocated,
2513 Error **errp)
2515 BDRVQcow2State *s = bs->opaque;
2516 int64_t offset;
2518 if (!refblock_empty && reftable_index >= *reftable_size) {
2519 uint64_t *new_reftable;
2520 uint64_t new_reftable_size;
2522 new_reftable_size = ROUND_UP(reftable_index + 1,
2523 s->cluster_size / sizeof(uint64_t));
2524 if (new_reftable_size > QCOW_MAX_REFTABLE_SIZE / sizeof(uint64_t)) {
2525 error_setg(errp,
2526 "This operation would make the refcount table grow "
2527 "beyond the maximum size supported by QEMU, aborting");
2528 return -ENOTSUP;
2531 new_reftable = g_try_realloc(*reftable, new_reftable_size *
2532 sizeof(uint64_t));
2533 if (!new_reftable) {
2534 error_setg(errp, "Failed to increase reftable buffer size");
2535 return -ENOMEM;
2538 memset(new_reftable + *reftable_size, 0,
2539 (new_reftable_size - *reftable_size) * sizeof(uint64_t));
2541 *reftable = new_reftable;
2542 *reftable_size = new_reftable_size;
2545 if (!refblock_empty && !(*reftable)[reftable_index]) {
2546 offset = qcow2_alloc_clusters(bs, s->cluster_size);
2547 if (offset < 0) {
2548 error_setg_errno(errp, -offset, "Failed to allocate refblock");
2549 return offset;
2551 (*reftable)[reftable_index] = offset;
2552 *allocated = true;
2555 return 0;
2559 * This "operation" for walk_over_reftable() writes the refblock to disk at the
2560 * offset specified by the new reftable's entry. It does not modify the new
2561 * reftable or change any refcounts.
2563 static int flush_refblock(BlockDriverState *bs, uint64_t **reftable,
2564 uint64_t reftable_index, uint64_t *reftable_size,
2565 void *refblock, bool refblock_empty, bool *allocated,
2566 Error **errp)
2568 BDRVQcow2State *s = bs->opaque;
2569 int64_t offset;
2570 int ret;
2572 if (reftable_index < *reftable_size && (*reftable)[reftable_index]) {
2573 offset = (*reftable)[reftable_index];
2575 ret = qcow2_pre_write_overlap_check(bs, 0, offset, s->cluster_size);
2576 if (ret < 0) {
2577 error_setg_errno(errp, -ret, "Overlap check failed");
2578 return ret;
2581 ret = bdrv_pwrite(bs->file, offset, refblock, s->cluster_size);
2582 if (ret < 0) {
2583 error_setg_errno(errp, -ret, "Failed to write refblock");
2584 return ret;
2586 } else {
2587 assert(refblock_empty);
2590 return 0;
2594 * This function walks over the existing reftable and every referenced refblock;
2595 * if @new_set_refcount is non-NULL, it is called for every refcount entry to
2596 * create an equal new entry in the passed @new_refblock. Once that
2597 * @new_refblock is completely filled, @operation will be called.
2599 * @status_cb and @cb_opaque are used for the amend operation's status callback.
2600 * @index is the index of the walk_over_reftable() calls and @total is the total
2601 * number of walk_over_reftable() calls per amend operation. Both are used for
2602 * calculating the parameters for the status callback.
2604 * @allocated is set to true if a new cluster has been allocated.
2606 static int walk_over_reftable(BlockDriverState *bs, uint64_t **new_reftable,
2607 uint64_t *new_reftable_index,
2608 uint64_t *new_reftable_size,
2609 void *new_refblock, int new_refblock_size,
2610 int new_refcount_bits,
2611 RefblockFinishOp *operation, bool *allocated,
2612 Qcow2SetRefcountFunc *new_set_refcount,
2613 BlockDriverAmendStatusCB *status_cb,
2614 void *cb_opaque, int index, int total,
2615 Error **errp)
2617 BDRVQcow2State *s = bs->opaque;
2618 uint64_t reftable_index;
2619 bool new_refblock_empty = true;
2620 int refblock_index;
2621 int new_refblock_index = 0;
2622 int ret;
2624 for (reftable_index = 0; reftable_index < s->refcount_table_size;
2625 reftable_index++)
2627 uint64_t refblock_offset = s->refcount_table[reftable_index]
2628 & REFT_OFFSET_MASK;
2630 status_cb(bs, (uint64_t)index * s->refcount_table_size + reftable_index,
2631 (uint64_t)total * s->refcount_table_size, cb_opaque);
2633 if (refblock_offset) {
2634 void *refblock;
2636 if (offset_into_cluster(s, refblock_offset)) {
2637 qcow2_signal_corruption(bs, true, -1, -1, "Refblock offset %#"
2638 PRIx64 " unaligned (reftable index: %#"
2639 PRIx64 ")", refblock_offset,
2640 reftable_index);
2641 error_setg(errp,
2642 "Image is corrupt (unaligned refblock offset)");
2643 return -EIO;
2646 ret = qcow2_cache_get(bs, s->refcount_block_cache, refblock_offset,
2647 &refblock);
2648 if (ret < 0) {
2649 error_setg_errno(errp, -ret, "Failed to retrieve refblock");
2650 return ret;
2653 for (refblock_index = 0; refblock_index < s->refcount_block_size;
2654 refblock_index++)
2656 uint64_t refcount;
2658 if (new_refblock_index >= new_refblock_size) {
2659 /* new_refblock is now complete */
2660 ret = operation(bs, new_reftable, *new_reftable_index,
2661 new_reftable_size, new_refblock,
2662 new_refblock_empty, allocated, errp);
2663 if (ret < 0) {
2664 qcow2_cache_put(bs, s->refcount_block_cache, &refblock);
2665 return ret;
2668 (*new_reftable_index)++;
2669 new_refblock_index = 0;
2670 new_refblock_empty = true;
2673 refcount = s->get_refcount(refblock, refblock_index);
2674 if (new_refcount_bits < 64 && refcount >> new_refcount_bits) {
2675 uint64_t offset;
2677 qcow2_cache_put(bs, s->refcount_block_cache, &refblock);
2679 offset = ((reftable_index << s->refcount_block_bits)
2680 + refblock_index) << s->cluster_bits;
2682 error_setg(errp, "Cannot decrease refcount entry width to "
2683 "%i bits: Cluster at offset %#" PRIx64 " has a "
2684 "refcount of %" PRIu64, new_refcount_bits,
2685 offset, refcount);
2686 return -EINVAL;
2689 if (new_set_refcount) {
2690 new_set_refcount(new_refblock, new_refblock_index++,
2691 refcount);
2692 } else {
2693 new_refblock_index++;
2695 new_refblock_empty = new_refblock_empty && refcount == 0;
2698 qcow2_cache_put(bs, s->refcount_block_cache, &refblock);
2699 } else {
2700 /* No refblock means every refcount is 0 */
2701 for (refblock_index = 0; refblock_index < s->refcount_block_size;
2702 refblock_index++)
2704 if (new_refblock_index >= new_refblock_size) {
2705 /* new_refblock is now complete */
2706 ret = operation(bs, new_reftable, *new_reftable_index,
2707 new_reftable_size, new_refblock,
2708 new_refblock_empty, allocated, errp);
2709 if (ret < 0) {
2710 return ret;
2713 (*new_reftable_index)++;
2714 new_refblock_index = 0;
2715 new_refblock_empty = true;
2718 if (new_set_refcount) {
2719 new_set_refcount(new_refblock, new_refblock_index++, 0);
2720 } else {
2721 new_refblock_index++;
2727 if (new_refblock_index > 0) {
2728 /* Complete the potentially existing partially filled final refblock */
2729 if (new_set_refcount) {
2730 for (; new_refblock_index < new_refblock_size;
2731 new_refblock_index++)
2733 new_set_refcount(new_refblock, new_refblock_index, 0);
2737 ret = operation(bs, new_reftable, *new_reftable_index,
2738 new_reftable_size, new_refblock, new_refblock_empty,
2739 allocated, errp);
2740 if (ret < 0) {
2741 return ret;
2744 (*new_reftable_index)++;
2747 status_cb(bs, (uint64_t)(index + 1) * s->refcount_table_size,
2748 (uint64_t)total * s->refcount_table_size, cb_opaque);
2750 return 0;
2753 int qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order,
2754 BlockDriverAmendStatusCB *status_cb,
2755 void *cb_opaque, Error **errp)
2757 BDRVQcow2State *s = bs->opaque;
2758 Qcow2GetRefcountFunc *new_get_refcount;
2759 Qcow2SetRefcountFunc *new_set_refcount;
2760 void *new_refblock = qemu_blockalign(bs->file->bs, s->cluster_size);
2761 uint64_t *new_reftable = NULL, new_reftable_size = 0;
2762 uint64_t *old_reftable, old_reftable_size, old_reftable_offset;
2763 uint64_t new_reftable_index = 0;
2764 uint64_t i;
2765 int64_t new_reftable_offset = 0, allocated_reftable_size = 0;
2766 int new_refblock_size, new_refcount_bits = 1 << refcount_order;
2767 int old_refcount_order;
2768 int walk_index = 0;
2769 int ret;
2770 bool new_allocation;
2772 assert(s->qcow_version >= 3);
2773 assert(refcount_order >= 0 && refcount_order <= 6);
2775 /* see qcow2_open() */
2776 new_refblock_size = 1 << (s->cluster_bits - (refcount_order - 3));
2778 new_get_refcount = get_refcount_funcs[refcount_order];
2779 new_set_refcount = set_refcount_funcs[refcount_order];
2782 do {
2783 int total_walks;
2785 new_allocation = false;
2787 /* At least we have to do this walk and the one which writes the
2788 * refblocks; also, at least we have to do this loop here at least
2789 * twice (normally), first to do the allocations, and second to
2790 * determine that everything is correctly allocated, this then makes
2791 * three walks in total */
2792 total_walks = MAX(walk_index + 2, 3);
2794 /* First, allocate the structures so they are present in the refcount
2795 * structures */
2796 ret = walk_over_reftable(bs, &new_reftable, &new_reftable_index,
2797 &new_reftable_size, NULL, new_refblock_size,
2798 new_refcount_bits, &alloc_refblock,
2799 &new_allocation, NULL, status_cb, cb_opaque,
2800 walk_index++, total_walks, errp);
2801 if (ret < 0) {
2802 goto done;
2805 new_reftable_index = 0;
2807 if (new_allocation) {
2808 if (new_reftable_offset) {
2809 qcow2_free_clusters(bs, new_reftable_offset,
2810 allocated_reftable_size * sizeof(uint64_t),
2811 QCOW2_DISCARD_NEVER);
2814 new_reftable_offset = qcow2_alloc_clusters(bs, new_reftable_size *
2815 sizeof(uint64_t));
2816 if (new_reftable_offset < 0) {
2817 error_setg_errno(errp, -new_reftable_offset,
2818 "Failed to allocate the new reftable");
2819 ret = new_reftable_offset;
2820 goto done;
2822 allocated_reftable_size = new_reftable_size;
2824 } while (new_allocation);
2826 /* Second, write the new refblocks */
2827 ret = walk_over_reftable(bs, &new_reftable, &new_reftable_index,
2828 &new_reftable_size, new_refblock,
2829 new_refblock_size, new_refcount_bits,
2830 &flush_refblock, &new_allocation, new_set_refcount,
2831 status_cb, cb_opaque, walk_index, walk_index + 1,
2832 errp);
2833 if (ret < 0) {
2834 goto done;
2836 assert(!new_allocation);
2839 /* Write the new reftable */
2840 ret = qcow2_pre_write_overlap_check(bs, 0, new_reftable_offset,
2841 new_reftable_size * sizeof(uint64_t));
2842 if (ret < 0) {
2843 error_setg_errno(errp, -ret, "Overlap check failed");
2844 goto done;
2847 for (i = 0; i < new_reftable_size; i++) {
2848 cpu_to_be64s(&new_reftable[i]);
2851 ret = bdrv_pwrite(bs->file, new_reftable_offset, new_reftable,
2852 new_reftable_size * sizeof(uint64_t));
2854 for (i = 0; i < new_reftable_size; i++) {
2855 be64_to_cpus(&new_reftable[i]);
2858 if (ret < 0) {
2859 error_setg_errno(errp, -ret, "Failed to write the new reftable");
2860 goto done;
2864 /* Empty the refcount cache */
2865 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
2866 if (ret < 0) {
2867 error_setg_errno(errp, -ret, "Failed to flush the refblock cache");
2868 goto done;
2871 /* Update the image header to point to the new reftable; this only updates
2872 * the fields which are relevant to qcow2_update_header(); other fields
2873 * such as s->refcount_table or s->refcount_bits stay stale for now
2874 * (because we have to restore everything if qcow2_update_header() fails) */
2875 old_refcount_order = s->refcount_order;
2876 old_reftable_size = s->refcount_table_size;
2877 old_reftable_offset = s->refcount_table_offset;
2879 s->refcount_order = refcount_order;
2880 s->refcount_table_size = new_reftable_size;
2881 s->refcount_table_offset = new_reftable_offset;
2883 ret = qcow2_update_header(bs);
2884 if (ret < 0) {
2885 s->refcount_order = old_refcount_order;
2886 s->refcount_table_size = old_reftable_size;
2887 s->refcount_table_offset = old_reftable_offset;
2888 error_setg_errno(errp, -ret, "Failed to update the qcow2 header");
2889 goto done;
2892 /* Now update the rest of the in-memory information */
2893 old_reftable = s->refcount_table;
2894 s->refcount_table = new_reftable;
2895 update_max_refcount_table_index(s);
2897 s->refcount_bits = 1 << refcount_order;
2898 s->refcount_max = UINT64_C(1) << (s->refcount_bits - 1);
2899 s->refcount_max += s->refcount_max - 1;
2901 s->refcount_block_bits = s->cluster_bits - (refcount_order - 3);
2902 s->refcount_block_size = 1 << s->refcount_block_bits;
2904 s->get_refcount = new_get_refcount;
2905 s->set_refcount = new_set_refcount;
2907 /* For cleaning up all old refblocks and the old reftable below the "done"
2908 * label */
2909 new_reftable = old_reftable;
2910 new_reftable_size = old_reftable_size;
2911 new_reftable_offset = old_reftable_offset;
2913 done:
2914 if (new_reftable) {
2915 /* On success, new_reftable actually points to the old reftable (and
2916 * new_reftable_size is the old reftable's size); but that is just
2917 * fine */
2918 for (i = 0; i < new_reftable_size; i++) {
2919 uint64_t offset = new_reftable[i] & REFT_OFFSET_MASK;
2920 if (offset) {
2921 qcow2_free_clusters(bs, offset, s->cluster_size,
2922 QCOW2_DISCARD_OTHER);
2925 g_free(new_reftable);
2927 if (new_reftable_offset > 0) {
2928 qcow2_free_clusters(bs, new_reftable_offset,
2929 new_reftable_size * sizeof(uint64_t),
2930 QCOW2_DISCARD_OTHER);
2934 qemu_vfree(new_refblock);
2935 return ret;