migration: extend migration_bitmap
[qemu/kevin.git] / block / qcow2-refcount.c
blobb0ee42d81bc6f5a89a98d0751ab47e5cbc27e612
1 /*
2 * Block driver for the QCOW version 2 format
4 * Copyright (c) 2004-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu-common.h"
26 #include "block/block_int.h"
27 #include "block/qcow2.h"
28 #include "qemu/range.h"
30 static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size);
31 static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs,
32 int64_t offset, int64_t length, uint64_t addend,
33 bool decrease, enum qcow2_discard_type type);
35 static uint64_t get_refcount_ro0(const void *refcount_array, uint64_t index);
36 static uint64_t get_refcount_ro1(const void *refcount_array, uint64_t index);
37 static uint64_t get_refcount_ro2(const void *refcount_array, uint64_t index);
38 static uint64_t get_refcount_ro3(const void *refcount_array, uint64_t index);
39 static uint64_t get_refcount_ro4(const void *refcount_array, uint64_t index);
40 static uint64_t get_refcount_ro5(const void *refcount_array, uint64_t index);
41 static uint64_t get_refcount_ro6(const void *refcount_array, uint64_t index);
43 static void set_refcount_ro0(void *refcount_array, uint64_t index,
44 uint64_t value);
45 static void set_refcount_ro1(void *refcount_array, uint64_t index,
46 uint64_t value);
47 static void set_refcount_ro2(void *refcount_array, uint64_t index,
48 uint64_t value);
49 static void set_refcount_ro3(void *refcount_array, uint64_t index,
50 uint64_t value);
51 static void set_refcount_ro4(void *refcount_array, uint64_t index,
52 uint64_t value);
53 static void set_refcount_ro5(void *refcount_array, uint64_t index,
54 uint64_t value);
55 static void set_refcount_ro6(void *refcount_array, uint64_t index,
56 uint64_t value);
59 static Qcow2GetRefcountFunc *const get_refcount_funcs[] = {
60 &get_refcount_ro0,
61 &get_refcount_ro1,
62 &get_refcount_ro2,
63 &get_refcount_ro3,
64 &get_refcount_ro4,
65 &get_refcount_ro5,
66 &get_refcount_ro6
69 static Qcow2SetRefcountFunc *const set_refcount_funcs[] = {
70 &set_refcount_ro0,
71 &set_refcount_ro1,
72 &set_refcount_ro2,
73 &set_refcount_ro3,
74 &set_refcount_ro4,
75 &set_refcount_ro5,
76 &set_refcount_ro6
80 /*********************************************************/
81 /* refcount handling */
83 int qcow2_refcount_init(BlockDriverState *bs)
85 BDRVQcowState *s = bs->opaque;
86 unsigned int refcount_table_size2, i;
87 int ret;
89 assert(s->refcount_order >= 0 && s->refcount_order <= 6);
91 s->get_refcount = get_refcount_funcs[s->refcount_order];
92 s->set_refcount = set_refcount_funcs[s->refcount_order];
94 assert(s->refcount_table_size <= INT_MAX / sizeof(uint64_t));
95 refcount_table_size2 = s->refcount_table_size * sizeof(uint64_t);
96 s->refcount_table = g_try_malloc(refcount_table_size2);
98 if (s->refcount_table_size > 0) {
99 if (s->refcount_table == NULL) {
100 ret = -ENOMEM;
101 goto fail;
103 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_LOAD);
104 ret = bdrv_pread(bs->file, s->refcount_table_offset,
105 s->refcount_table, refcount_table_size2);
106 if (ret < 0) {
107 goto fail;
109 for(i = 0; i < s->refcount_table_size; i++)
110 be64_to_cpus(&s->refcount_table[i]);
112 return 0;
113 fail:
114 return ret;
117 void qcow2_refcount_close(BlockDriverState *bs)
119 BDRVQcowState *s = bs->opaque;
120 g_free(s->refcount_table);
124 static uint64_t get_refcount_ro0(const void *refcount_array, uint64_t index)
126 return (((const uint8_t *)refcount_array)[index / 8] >> (index % 8)) & 0x1;
129 static void set_refcount_ro0(void *refcount_array, uint64_t index,
130 uint64_t value)
132 assert(!(value >> 1));
133 ((uint8_t *)refcount_array)[index / 8] &= ~(0x1 << (index % 8));
134 ((uint8_t *)refcount_array)[index / 8] |= value << (index % 8);
137 static uint64_t get_refcount_ro1(const void *refcount_array, uint64_t index)
139 return (((const uint8_t *)refcount_array)[index / 4] >> (2 * (index % 4)))
140 & 0x3;
143 static void set_refcount_ro1(void *refcount_array, uint64_t index,
144 uint64_t value)
146 assert(!(value >> 2));
147 ((uint8_t *)refcount_array)[index / 4] &= ~(0x3 << (2 * (index % 4)));
148 ((uint8_t *)refcount_array)[index / 4] |= value << (2 * (index % 4));
151 static uint64_t get_refcount_ro2(const void *refcount_array, uint64_t index)
153 return (((const uint8_t *)refcount_array)[index / 2] >> (4 * (index % 2)))
154 & 0xf;
157 static void set_refcount_ro2(void *refcount_array, uint64_t index,
158 uint64_t value)
160 assert(!(value >> 4));
161 ((uint8_t *)refcount_array)[index / 2] &= ~(0xf << (4 * (index % 2)));
162 ((uint8_t *)refcount_array)[index / 2] |= value << (4 * (index % 2));
165 static uint64_t get_refcount_ro3(const void *refcount_array, uint64_t index)
167 return ((const uint8_t *)refcount_array)[index];
170 static void set_refcount_ro3(void *refcount_array, uint64_t index,
171 uint64_t value)
173 assert(!(value >> 8));
174 ((uint8_t *)refcount_array)[index] = value;
177 static uint64_t get_refcount_ro4(const void *refcount_array, uint64_t index)
179 return be16_to_cpu(((const uint16_t *)refcount_array)[index]);
182 static void set_refcount_ro4(void *refcount_array, uint64_t index,
183 uint64_t value)
185 assert(!(value >> 16));
186 ((uint16_t *)refcount_array)[index] = cpu_to_be16(value);
189 static uint64_t get_refcount_ro5(const void *refcount_array, uint64_t index)
191 return be32_to_cpu(((const uint32_t *)refcount_array)[index]);
194 static void set_refcount_ro5(void *refcount_array, uint64_t index,
195 uint64_t value)
197 assert(!(value >> 32));
198 ((uint32_t *)refcount_array)[index] = cpu_to_be32(value);
201 static uint64_t get_refcount_ro6(const void *refcount_array, uint64_t index)
203 return be64_to_cpu(((const uint64_t *)refcount_array)[index]);
206 static void set_refcount_ro6(void *refcount_array, uint64_t index,
207 uint64_t value)
209 ((uint64_t *)refcount_array)[index] = cpu_to_be64(value);
213 static int load_refcount_block(BlockDriverState *bs,
214 int64_t refcount_block_offset,
215 void **refcount_block)
217 BDRVQcowState *s = bs->opaque;
218 int ret;
220 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_LOAD);
221 ret = qcow2_cache_get(bs, s->refcount_block_cache, refcount_block_offset,
222 refcount_block);
224 return ret;
228 * Retrieves the refcount of the cluster given by its index and stores it in
229 * *refcount. Returns 0 on success and -errno on failure.
231 int qcow2_get_refcount(BlockDriverState *bs, int64_t cluster_index,
232 uint64_t *refcount)
234 BDRVQcowState *s = bs->opaque;
235 uint64_t refcount_table_index, block_index;
236 int64_t refcount_block_offset;
237 int ret;
238 void *refcount_block;
240 refcount_table_index = cluster_index >> s->refcount_block_bits;
241 if (refcount_table_index >= s->refcount_table_size) {
242 *refcount = 0;
243 return 0;
245 refcount_block_offset =
246 s->refcount_table[refcount_table_index] & REFT_OFFSET_MASK;
247 if (!refcount_block_offset) {
248 *refcount = 0;
249 return 0;
252 if (offset_into_cluster(s, refcount_block_offset)) {
253 qcow2_signal_corruption(bs, true, -1, -1, "Refblock offset %#" PRIx64
254 " unaligned (reftable index: %#" PRIx64 ")",
255 refcount_block_offset, refcount_table_index);
256 return -EIO;
259 ret = qcow2_cache_get(bs, s->refcount_block_cache, refcount_block_offset,
260 &refcount_block);
261 if (ret < 0) {
262 return ret;
265 block_index = cluster_index & (s->refcount_block_size - 1);
266 *refcount = s->get_refcount(refcount_block, block_index);
268 qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block);
270 return 0;
274 * Rounds the refcount table size up to avoid growing the table for each single
275 * refcount block that is allocated.
277 static unsigned int next_refcount_table_size(BDRVQcowState *s,
278 unsigned int min_size)
280 unsigned int min_clusters = (min_size >> (s->cluster_bits - 3)) + 1;
281 unsigned int refcount_table_clusters =
282 MAX(1, s->refcount_table_size >> (s->cluster_bits - 3));
284 while (min_clusters > refcount_table_clusters) {
285 refcount_table_clusters = (refcount_table_clusters * 3 + 1) / 2;
288 return refcount_table_clusters << (s->cluster_bits - 3);
292 /* Checks if two offsets are described by the same refcount block */
293 static int in_same_refcount_block(BDRVQcowState *s, uint64_t offset_a,
294 uint64_t offset_b)
296 uint64_t block_a = offset_a >> (s->cluster_bits + s->refcount_block_bits);
297 uint64_t block_b = offset_b >> (s->cluster_bits + s->refcount_block_bits);
299 return (block_a == block_b);
303 * Loads a refcount block. If it doesn't exist yet, it is allocated first
304 * (including growing the refcount table if needed).
306 * Returns 0 on success or -errno in error case
308 static int alloc_refcount_block(BlockDriverState *bs,
309 int64_t cluster_index, void **refcount_block)
311 BDRVQcowState *s = bs->opaque;
312 unsigned int refcount_table_index;
313 int ret;
315 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC);
317 /* Find the refcount block for the given cluster */
318 refcount_table_index = cluster_index >> s->refcount_block_bits;
320 if (refcount_table_index < s->refcount_table_size) {
322 uint64_t refcount_block_offset =
323 s->refcount_table[refcount_table_index] & REFT_OFFSET_MASK;
325 /* If it's already there, we're done */
326 if (refcount_block_offset) {
327 if (offset_into_cluster(s, refcount_block_offset)) {
328 qcow2_signal_corruption(bs, true, -1, -1, "Refblock offset %#"
329 PRIx64 " unaligned (reftable index: "
330 "%#x)", refcount_block_offset,
331 refcount_table_index);
332 return -EIO;
335 return load_refcount_block(bs, refcount_block_offset,
336 refcount_block);
341 * If we came here, we need to allocate something. Something is at least
342 * a cluster for the new refcount block. It may also include a new refcount
343 * table if the old refcount table is too small.
345 * Note that allocating clusters here needs some special care:
347 * - We can't use the normal qcow2_alloc_clusters(), it would try to
348 * increase the refcount and very likely we would end up with an endless
349 * recursion. Instead we must place the refcount blocks in a way that
350 * they can describe them themselves.
352 * - We need to consider that at this point we are inside update_refcounts
353 * and potentially doing an initial refcount increase. This means that
354 * some clusters have already been allocated by the caller, but their
355 * refcount isn't accurate yet. If we allocate clusters for metadata, we
356 * need to return -EAGAIN to signal the caller that it needs to restart
357 * the search for free clusters.
359 * - alloc_clusters_noref and qcow2_free_clusters may load a different
360 * refcount block into the cache
363 *refcount_block = NULL;
365 /* We write to the refcount table, so we might depend on L2 tables */
366 ret = qcow2_cache_flush(bs, s->l2_table_cache);
367 if (ret < 0) {
368 return ret;
371 /* Allocate the refcount block itself and mark it as used */
372 int64_t new_block = alloc_clusters_noref(bs, s->cluster_size);
373 if (new_block < 0) {
374 return new_block;
377 #ifdef DEBUG_ALLOC2
378 fprintf(stderr, "qcow2: Allocate refcount block %d for %" PRIx64
379 " at %" PRIx64 "\n",
380 refcount_table_index, cluster_index << s->cluster_bits, new_block);
381 #endif
383 if (in_same_refcount_block(s, new_block, cluster_index << s->cluster_bits)) {
384 /* Zero the new refcount block before updating it */
385 ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block,
386 refcount_block);
387 if (ret < 0) {
388 goto fail_block;
391 memset(*refcount_block, 0, s->cluster_size);
393 /* The block describes itself, need to update the cache */
394 int block_index = (new_block >> s->cluster_bits) &
395 (s->refcount_block_size - 1);
396 s->set_refcount(*refcount_block, block_index, 1);
397 } else {
398 /* Described somewhere else. This can recurse at most twice before we
399 * arrive at a block that describes itself. */
400 ret = update_refcount(bs, new_block, s->cluster_size, 1, false,
401 QCOW2_DISCARD_NEVER);
402 if (ret < 0) {
403 goto fail_block;
406 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
407 if (ret < 0) {
408 goto fail_block;
411 /* Initialize the new refcount block only after updating its refcount,
412 * update_refcount uses the refcount cache itself */
413 ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block,
414 refcount_block);
415 if (ret < 0) {
416 goto fail_block;
419 memset(*refcount_block, 0, s->cluster_size);
422 /* Now the new refcount block needs to be written to disk */
423 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE);
424 qcow2_cache_entry_mark_dirty(bs, s->refcount_block_cache, *refcount_block);
425 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
426 if (ret < 0) {
427 goto fail_block;
430 /* If the refcount table is big enough, just hook the block up there */
431 if (refcount_table_index < s->refcount_table_size) {
432 uint64_t data64 = cpu_to_be64(new_block);
433 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_HOOKUP);
434 ret = bdrv_pwrite_sync(bs->file,
435 s->refcount_table_offset + refcount_table_index * sizeof(uint64_t),
436 &data64, sizeof(data64));
437 if (ret < 0) {
438 goto fail_block;
441 s->refcount_table[refcount_table_index] = new_block;
443 /* The new refcount block may be where the caller intended to put its
444 * data, so let it restart the search. */
445 return -EAGAIN;
448 qcow2_cache_put(bs, s->refcount_block_cache, refcount_block);
451 * If we come here, we need to grow the refcount table. Again, a new
452 * refcount table needs some space and we can't simply allocate to avoid
453 * endless recursion.
455 * Therefore let's grab new refcount blocks at the end of the image, which
456 * will describe themselves and the new refcount table. This way we can
457 * reference them only in the new table and do the switch to the new
458 * refcount table at once without producing an inconsistent state in
459 * between.
461 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_GROW);
463 /* Calculate the number of refcount blocks needed so far; this will be the
464 * basis for calculating the index of the first cluster used for the
465 * self-describing refcount structures which we are about to create.
467 * Because we reached this point, there cannot be any refcount entries for
468 * cluster_index or higher indices yet. However, because new_block has been
469 * allocated to describe that cluster (and it will assume this role later
470 * on), we cannot use that index; also, new_block may actually have a higher
471 * cluster index than cluster_index, so it needs to be taken into account
472 * here (and 1 needs to be added to its value because that cluster is used).
474 uint64_t blocks_used = DIV_ROUND_UP(MAX(cluster_index + 1,
475 (new_block >> s->cluster_bits) + 1),
476 s->refcount_block_size);
478 if (blocks_used > QCOW_MAX_REFTABLE_SIZE / sizeof(uint64_t)) {
479 return -EFBIG;
482 /* And now we need at least one block more for the new metadata */
483 uint64_t table_size = next_refcount_table_size(s, blocks_used + 1);
484 uint64_t last_table_size;
485 uint64_t blocks_clusters;
486 do {
487 uint64_t table_clusters =
488 size_to_clusters(s, table_size * sizeof(uint64_t));
489 blocks_clusters = 1 +
490 ((table_clusters + s->refcount_block_size - 1)
491 / s->refcount_block_size);
492 uint64_t meta_clusters = table_clusters + blocks_clusters;
494 last_table_size = table_size;
495 table_size = next_refcount_table_size(s, blocks_used +
496 ((meta_clusters + s->refcount_block_size - 1)
497 / s->refcount_block_size));
499 } while (last_table_size != table_size);
501 #ifdef DEBUG_ALLOC2
502 fprintf(stderr, "qcow2: Grow refcount table %" PRId32 " => %" PRId64 "\n",
503 s->refcount_table_size, table_size);
504 #endif
506 /* Create the new refcount table and blocks */
507 uint64_t meta_offset = (blocks_used * s->refcount_block_size) *
508 s->cluster_size;
509 uint64_t table_offset = meta_offset + blocks_clusters * s->cluster_size;
510 uint64_t *new_table = g_try_new0(uint64_t, table_size);
511 void *new_blocks = g_try_malloc0(blocks_clusters * s->cluster_size);
513 assert(table_size > 0 && blocks_clusters > 0);
514 if (new_table == NULL || new_blocks == NULL) {
515 ret = -ENOMEM;
516 goto fail_table;
519 /* Fill the new refcount table */
520 memcpy(new_table, s->refcount_table,
521 s->refcount_table_size * sizeof(uint64_t));
522 new_table[refcount_table_index] = new_block;
524 int i;
525 for (i = 0; i < blocks_clusters; i++) {
526 new_table[blocks_used + i] = meta_offset + (i * s->cluster_size);
529 /* Fill the refcount blocks */
530 uint64_t table_clusters = size_to_clusters(s, table_size * sizeof(uint64_t));
531 int block = 0;
532 for (i = 0; i < table_clusters + blocks_clusters; i++) {
533 s->set_refcount(new_blocks, block++, 1);
536 /* Write refcount blocks to disk */
537 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS);
538 ret = bdrv_pwrite_sync(bs->file, meta_offset, new_blocks,
539 blocks_clusters * s->cluster_size);
540 g_free(new_blocks);
541 new_blocks = NULL;
542 if (ret < 0) {
543 goto fail_table;
546 /* Write refcount table to disk */
547 for(i = 0; i < table_size; i++) {
548 cpu_to_be64s(&new_table[i]);
551 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE);
552 ret = bdrv_pwrite_sync(bs->file, table_offset, new_table,
553 table_size * sizeof(uint64_t));
554 if (ret < 0) {
555 goto fail_table;
558 for(i = 0; i < table_size; i++) {
559 be64_to_cpus(&new_table[i]);
562 /* Hook up the new refcount table in the qcow2 header */
563 uint8_t data[12];
564 cpu_to_be64w((uint64_t*)data, table_offset);
565 cpu_to_be32w((uint32_t*)(data + 8), table_clusters);
566 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE);
567 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, refcount_table_offset),
568 data, sizeof(data));
569 if (ret < 0) {
570 goto fail_table;
573 /* And switch it in memory */
574 uint64_t old_table_offset = s->refcount_table_offset;
575 uint64_t old_table_size = s->refcount_table_size;
577 g_free(s->refcount_table);
578 s->refcount_table = new_table;
579 s->refcount_table_size = table_size;
580 s->refcount_table_offset = table_offset;
582 /* Free old table. */
583 qcow2_free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t),
584 QCOW2_DISCARD_OTHER);
586 ret = load_refcount_block(bs, new_block, refcount_block);
587 if (ret < 0) {
588 return ret;
591 /* If we were trying to do the initial refcount update for some cluster
592 * allocation, we might have used the same clusters to store newly
593 * allocated metadata. Make the caller search some new space. */
594 return -EAGAIN;
596 fail_table:
597 g_free(new_blocks);
598 g_free(new_table);
599 fail_block:
600 if (*refcount_block != NULL) {
601 qcow2_cache_put(bs, s->refcount_block_cache, refcount_block);
603 return ret;
606 void qcow2_process_discards(BlockDriverState *bs, int ret)
608 BDRVQcowState *s = bs->opaque;
609 Qcow2DiscardRegion *d, *next;
611 QTAILQ_FOREACH_SAFE(d, &s->discards, next, next) {
612 QTAILQ_REMOVE(&s->discards, d, next);
614 /* Discard is optional, ignore the return value */
615 if (ret >= 0) {
616 bdrv_discard(bs->file,
617 d->offset >> BDRV_SECTOR_BITS,
618 d->bytes >> BDRV_SECTOR_BITS);
621 g_free(d);
625 static void update_refcount_discard(BlockDriverState *bs,
626 uint64_t offset, uint64_t length)
628 BDRVQcowState *s = bs->opaque;
629 Qcow2DiscardRegion *d, *p, *next;
631 QTAILQ_FOREACH(d, &s->discards, next) {
632 uint64_t new_start = MIN(offset, d->offset);
633 uint64_t new_end = MAX(offset + length, d->offset + d->bytes);
635 if (new_end - new_start <= length + d->bytes) {
636 /* There can't be any overlap, areas ending up here have no
637 * references any more and therefore shouldn't get freed another
638 * time. */
639 assert(d->bytes + length == new_end - new_start);
640 d->offset = new_start;
641 d->bytes = new_end - new_start;
642 goto found;
646 d = g_malloc(sizeof(*d));
647 *d = (Qcow2DiscardRegion) {
648 .bs = bs,
649 .offset = offset,
650 .bytes = length,
652 QTAILQ_INSERT_TAIL(&s->discards, d, next);
654 found:
655 /* Merge discard requests if they are adjacent now */
656 QTAILQ_FOREACH_SAFE(p, &s->discards, next, next) {
657 if (p == d
658 || p->offset > d->offset + d->bytes
659 || d->offset > p->offset + p->bytes)
661 continue;
664 /* Still no overlap possible */
665 assert(p->offset == d->offset + d->bytes
666 || d->offset == p->offset + p->bytes);
668 QTAILQ_REMOVE(&s->discards, p, next);
669 d->offset = MIN(d->offset, p->offset);
670 d->bytes += p->bytes;
671 g_free(p);
675 /* XXX: cache several refcount block clusters ? */
676 /* @addend is the absolute value of the addend; if @decrease is set, @addend
677 * will be subtracted from the current refcount, otherwise it will be added */
678 static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs,
679 int64_t offset,
680 int64_t length,
681 uint64_t addend,
682 bool decrease,
683 enum qcow2_discard_type type)
685 BDRVQcowState *s = bs->opaque;
686 int64_t start, last, cluster_offset;
687 void *refcount_block = NULL;
688 int64_t old_table_index = -1;
689 int ret;
691 #ifdef DEBUG_ALLOC2
692 fprintf(stderr, "update_refcount: offset=%" PRId64 " size=%" PRId64
693 " addend=%s%" PRIu64 "\n", offset, length, decrease ? "-" : "",
694 addend);
695 #endif
696 if (length < 0) {
697 return -EINVAL;
698 } else if (length == 0) {
699 return 0;
702 if (decrease) {
703 qcow2_cache_set_dependency(bs, s->refcount_block_cache,
704 s->l2_table_cache);
707 start = start_of_cluster(s, offset);
708 last = start_of_cluster(s, offset + length - 1);
709 for(cluster_offset = start; cluster_offset <= last;
710 cluster_offset += s->cluster_size)
712 int block_index;
713 uint64_t refcount;
714 int64_t cluster_index = cluster_offset >> s->cluster_bits;
715 int64_t table_index = cluster_index >> s->refcount_block_bits;
717 /* Load the refcount block and allocate it if needed */
718 if (table_index != old_table_index) {
719 if (refcount_block) {
720 qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block);
722 ret = alloc_refcount_block(bs, cluster_index, &refcount_block);
723 if (ret < 0) {
724 goto fail;
727 old_table_index = table_index;
729 qcow2_cache_entry_mark_dirty(bs, s->refcount_block_cache,
730 refcount_block);
732 /* we can update the count and save it */
733 block_index = cluster_index & (s->refcount_block_size - 1);
735 refcount = s->get_refcount(refcount_block, block_index);
736 if (decrease ? (refcount - addend > refcount)
737 : (refcount + addend < refcount ||
738 refcount + addend > s->refcount_max))
740 ret = -EINVAL;
741 goto fail;
743 if (decrease) {
744 refcount -= addend;
745 } else {
746 refcount += addend;
748 if (refcount == 0 && cluster_index < s->free_cluster_index) {
749 s->free_cluster_index = cluster_index;
751 s->set_refcount(refcount_block, block_index, refcount);
753 if (refcount == 0 && s->discard_passthrough[type]) {
754 update_refcount_discard(bs, cluster_offset, s->cluster_size);
758 ret = 0;
759 fail:
760 if (!s->cache_discards) {
761 qcow2_process_discards(bs, ret);
764 /* Write last changed block to disk */
765 if (refcount_block) {
766 qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block);
770 * Try do undo any updates if an error is returned (This may succeed in
771 * some cases like ENOSPC for allocating a new refcount block)
773 if (ret < 0) {
774 int dummy;
775 dummy = update_refcount(bs, offset, cluster_offset - offset, addend,
776 !decrease, QCOW2_DISCARD_NEVER);
777 (void)dummy;
780 return ret;
784 * Increases or decreases the refcount of a given cluster.
786 * @addend is the absolute value of the addend; if @decrease is set, @addend
787 * will be subtracted from the current refcount, otherwise it will be added.
789 * On success 0 is returned; on failure -errno is returned.
791 int qcow2_update_cluster_refcount(BlockDriverState *bs,
792 int64_t cluster_index,
793 uint64_t addend, bool decrease,
794 enum qcow2_discard_type type)
796 BDRVQcowState *s = bs->opaque;
797 int ret;
799 ret = update_refcount(bs, cluster_index << s->cluster_bits, 1, addend,
800 decrease, type);
801 if (ret < 0) {
802 return ret;
805 return 0;
810 /*********************************************************/
811 /* cluster allocation functions */
815 /* return < 0 if error */
816 static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size)
818 BDRVQcowState *s = bs->opaque;
819 uint64_t i, nb_clusters, refcount;
820 int ret;
822 /* We can't allocate clusters if they may still be queued for discard. */
823 if (s->cache_discards) {
824 qcow2_process_discards(bs, 0);
827 nb_clusters = size_to_clusters(s, size);
828 retry:
829 for(i = 0; i < nb_clusters; i++) {
830 uint64_t next_cluster_index = s->free_cluster_index++;
831 ret = qcow2_get_refcount(bs, next_cluster_index, &refcount);
833 if (ret < 0) {
834 return ret;
835 } else if (refcount != 0) {
836 goto retry;
840 /* Make sure that all offsets in the "allocated" range are representable
841 * in an int64_t */
842 if (s->free_cluster_index > 0 &&
843 s->free_cluster_index - 1 > (INT64_MAX >> s->cluster_bits))
845 return -EFBIG;
848 #ifdef DEBUG_ALLOC2
849 fprintf(stderr, "alloc_clusters: size=%" PRId64 " -> %" PRId64 "\n",
850 size,
851 (s->free_cluster_index - nb_clusters) << s->cluster_bits);
852 #endif
853 return (s->free_cluster_index - nb_clusters) << s->cluster_bits;
856 int64_t qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size)
858 int64_t offset;
859 int ret;
861 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC);
862 do {
863 offset = alloc_clusters_noref(bs, size);
864 if (offset < 0) {
865 return offset;
868 ret = update_refcount(bs, offset, size, 1, false, QCOW2_DISCARD_NEVER);
869 } while (ret == -EAGAIN);
871 if (ret < 0) {
872 return ret;
875 return offset;
878 int qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
879 int nb_clusters)
881 BDRVQcowState *s = bs->opaque;
882 uint64_t cluster_index, refcount;
883 uint64_t i;
884 int ret;
886 assert(nb_clusters >= 0);
887 if (nb_clusters == 0) {
888 return 0;
891 do {
892 /* Check how many clusters there are free */
893 cluster_index = offset >> s->cluster_bits;
894 for(i = 0; i < nb_clusters; i++) {
895 ret = qcow2_get_refcount(bs, cluster_index++, &refcount);
896 if (ret < 0) {
897 return ret;
898 } else if (refcount != 0) {
899 break;
903 /* And then allocate them */
904 ret = update_refcount(bs, offset, i << s->cluster_bits, 1, false,
905 QCOW2_DISCARD_NEVER);
906 } while (ret == -EAGAIN);
908 if (ret < 0) {
909 return ret;
912 return i;
915 /* only used to allocate compressed sectors. We try to allocate
916 contiguous sectors. size must be <= cluster_size */
917 int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size)
919 BDRVQcowState *s = bs->opaque;
920 int64_t offset;
921 size_t free_in_cluster;
922 int ret;
924 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_BYTES);
925 assert(size > 0 && size <= s->cluster_size);
926 assert(!s->free_byte_offset || offset_into_cluster(s, s->free_byte_offset));
928 offset = s->free_byte_offset;
930 if (offset) {
931 uint64_t refcount;
932 ret = qcow2_get_refcount(bs, offset >> s->cluster_bits, &refcount);
933 if (ret < 0) {
934 return ret;
937 if (refcount == s->refcount_max) {
938 offset = 0;
942 free_in_cluster = s->cluster_size - offset_into_cluster(s, offset);
943 do {
944 if (!offset || free_in_cluster < size) {
945 int64_t new_cluster = alloc_clusters_noref(bs, s->cluster_size);
946 if (new_cluster < 0) {
947 return new_cluster;
950 if (!offset || ROUND_UP(offset, s->cluster_size) != new_cluster) {
951 offset = new_cluster;
955 assert(offset);
956 ret = update_refcount(bs, offset, size, 1, false, QCOW2_DISCARD_NEVER);
957 } while (ret == -EAGAIN);
958 if (ret < 0) {
959 return ret;
962 /* The cluster refcount was incremented; refcount blocks must be flushed
963 * before the caller's L2 table updates. */
964 qcow2_cache_set_dependency(bs, s->l2_table_cache, s->refcount_block_cache);
966 s->free_byte_offset = offset + size;
967 if (!offset_into_cluster(s, s->free_byte_offset)) {
968 s->free_byte_offset = 0;
971 return offset;
974 void qcow2_free_clusters(BlockDriverState *bs,
975 int64_t offset, int64_t size,
976 enum qcow2_discard_type type)
978 int ret;
980 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_FREE);
981 ret = update_refcount(bs, offset, size, 1, true, type);
982 if (ret < 0) {
983 fprintf(stderr, "qcow2_free_clusters failed: %s\n", strerror(-ret));
984 /* TODO Remember the clusters to free them later and avoid leaking */
989 * Free a cluster using its L2 entry (handles clusters of all types, e.g.
990 * normal cluster, compressed cluster, etc.)
992 void qcow2_free_any_clusters(BlockDriverState *bs, uint64_t l2_entry,
993 int nb_clusters, enum qcow2_discard_type type)
995 BDRVQcowState *s = bs->opaque;
997 switch (qcow2_get_cluster_type(l2_entry)) {
998 case QCOW2_CLUSTER_COMPRESSED:
1000 int nb_csectors;
1001 nb_csectors = ((l2_entry >> s->csize_shift) &
1002 s->csize_mask) + 1;
1003 qcow2_free_clusters(bs,
1004 (l2_entry & s->cluster_offset_mask) & ~511,
1005 nb_csectors * 512, type);
1007 break;
1008 case QCOW2_CLUSTER_NORMAL:
1009 case QCOW2_CLUSTER_ZERO:
1010 if (l2_entry & L2E_OFFSET_MASK) {
1011 if (offset_into_cluster(s, l2_entry & L2E_OFFSET_MASK)) {
1012 qcow2_signal_corruption(bs, false, -1, -1,
1013 "Cannot free unaligned cluster %#llx",
1014 l2_entry & L2E_OFFSET_MASK);
1015 } else {
1016 qcow2_free_clusters(bs, l2_entry & L2E_OFFSET_MASK,
1017 nb_clusters << s->cluster_bits, type);
1020 break;
1021 case QCOW2_CLUSTER_UNALLOCATED:
1022 break;
1023 default:
1024 abort();
1030 /*********************************************************/
1031 /* snapshots and image creation */
1035 /* update the refcounts of snapshots and the copied flag */
1036 int qcow2_update_snapshot_refcount(BlockDriverState *bs,
1037 int64_t l1_table_offset, int l1_size, int addend)
1039 BDRVQcowState *s = bs->opaque;
1040 uint64_t *l1_table, *l2_table, l2_offset, offset, l1_size2, refcount;
1041 bool l1_allocated = false;
1042 int64_t old_offset, old_l2_offset;
1043 int i, j, l1_modified = 0, nb_csectors;
1044 int ret;
1046 assert(addend >= -1 && addend <= 1);
1048 l2_table = NULL;
1049 l1_table = NULL;
1050 l1_size2 = l1_size * sizeof(uint64_t);
1052 s->cache_discards = true;
1054 /* WARNING: qcow2_snapshot_goto relies on this function not using the
1055 * l1_table_offset when it is the current s->l1_table_offset! Be careful
1056 * when changing this! */
1057 if (l1_table_offset != s->l1_table_offset) {
1058 l1_table = g_try_malloc0(align_offset(l1_size2, 512));
1059 if (l1_size2 && l1_table == NULL) {
1060 ret = -ENOMEM;
1061 goto fail;
1063 l1_allocated = true;
1065 ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2);
1066 if (ret < 0) {
1067 goto fail;
1070 for(i = 0;i < l1_size; i++)
1071 be64_to_cpus(&l1_table[i]);
1072 } else {
1073 assert(l1_size == s->l1_size);
1074 l1_table = s->l1_table;
1075 l1_allocated = false;
1078 for(i = 0; i < l1_size; i++) {
1079 l2_offset = l1_table[i];
1080 if (l2_offset) {
1081 old_l2_offset = l2_offset;
1082 l2_offset &= L1E_OFFSET_MASK;
1084 if (offset_into_cluster(s, l2_offset)) {
1085 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#"
1086 PRIx64 " unaligned (L1 index: %#x)",
1087 l2_offset, i);
1088 ret = -EIO;
1089 goto fail;
1092 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset,
1093 (void**) &l2_table);
1094 if (ret < 0) {
1095 goto fail;
1098 for(j = 0; j < s->l2_size; j++) {
1099 uint64_t cluster_index;
1101 offset = be64_to_cpu(l2_table[j]);
1102 old_offset = offset;
1103 offset &= ~QCOW_OFLAG_COPIED;
1105 switch (qcow2_get_cluster_type(offset)) {
1106 case QCOW2_CLUSTER_COMPRESSED:
1107 nb_csectors = ((offset >> s->csize_shift) &
1108 s->csize_mask) + 1;
1109 if (addend != 0) {
1110 ret = update_refcount(bs,
1111 (offset & s->cluster_offset_mask) & ~511,
1112 nb_csectors * 512, abs(addend), addend < 0,
1113 QCOW2_DISCARD_SNAPSHOT);
1114 if (ret < 0) {
1115 goto fail;
1118 /* compressed clusters are never modified */
1119 refcount = 2;
1120 break;
1122 case QCOW2_CLUSTER_NORMAL:
1123 case QCOW2_CLUSTER_ZERO:
1124 if (offset_into_cluster(s, offset & L2E_OFFSET_MASK)) {
1125 qcow2_signal_corruption(bs, true, -1, -1, "Data "
1126 "cluster offset %#llx "
1127 "unaligned (L2 offset: %#"
1128 PRIx64 ", L2 index: %#x)",
1129 offset & L2E_OFFSET_MASK,
1130 l2_offset, j);
1131 ret = -EIO;
1132 goto fail;
1135 cluster_index = (offset & L2E_OFFSET_MASK) >> s->cluster_bits;
1136 if (!cluster_index) {
1137 /* unallocated */
1138 refcount = 0;
1139 break;
1141 if (addend != 0) {
1142 ret = qcow2_update_cluster_refcount(bs,
1143 cluster_index, abs(addend), addend < 0,
1144 QCOW2_DISCARD_SNAPSHOT);
1145 if (ret < 0) {
1146 goto fail;
1150 ret = qcow2_get_refcount(bs, cluster_index, &refcount);
1151 if (ret < 0) {
1152 goto fail;
1154 break;
1156 case QCOW2_CLUSTER_UNALLOCATED:
1157 refcount = 0;
1158 break;
1160 default:
1161 abort();
1164 if (refcount == 1) {
1165 offset |= QCOW_OFLAG_COPIED;
1167 if (offset != old_offset) {
1168 if (addend > 0) {
1169 qcow2_cache_set_dependency(bs, s->l2_table_cache,
1170 s->refcount_block_cache);
1172 l2_table[j] = cpu_to_be64(offset);
1173 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache,
1174 l2_table);
1178 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1180 if (addend != 0) {
1181 ret = qcow2_update_cluster_refcount(bs, l2_offset >>
1182 s->cluster_bits,
1183 abs(addend), addend < 0,
1184 QCOW2_DISCARD_SNAPSHOT);
1185 if (ret < 0) {
1186 goto fail;
1189 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits,
1190 &refcount);
1191 if (ret < 0) {
1192 goto fail;
1193 } else if (refcount == 1) {
1194 l2_offset |= QCOW_OFLAG_COPIED;
1196 if (l2_offset != old_l2_offset) {
1197 l1_table[i] = l2_offset;
1198 l1_modified = 1;
1203 ret = bdrv_flush(bs);
1204 fail:
1205 if (l2_table) {
1206 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1209 s->cache_discards = false;
1210 qcow2_process_discards(bs, ret);
1212 /* Update L1 only if it isn't deleted anyway (addend = -1) */
1213 if (ret == 0 && addend >= 0 && l1_modified) {
1214 for (i = 0; i < l1_size; i++) {
1215 cpu_to_be64s(&l1_table[i]);
1218 ret = bdrv_pwrite_sync(bs->file, l1_table_offset, l1_table, l1_size2);
1220 for (i = 0; i < l1_size; i++) {
1221 be64_to_cpus(&l1_table[i]);
1224 if (l1_allocated)
1225 g_free(l1_table);
1226 return ret;
1232 /*********************************************************/
1233 /* refcount checking functions */
1236 static size_t refcount_array_byte_size(BDRVQcowState *s, uint64_t entries)
1238 /* This assertion holds because there is no way we can address more than
1239 * 2^(64 - 9) clusters at once (with cluster size 512 = 2^9, and because
1240 * offsets have to be representable in bytes); due to every cluster
1241 * corresponding to one refcount entry, we are well below that limit */
1242 assert(entries < (UINT64_C(1) << (64 - 9)));
1244 /* Thanks to the assertion this will not overflow, because
1245 * s->refcount_order < 7.
1246 * (note: x << s->refcount_order == x * s->refcount_bits) */
1247 return DIV_ROUND_UP(entries << s->refcount_order, 8);
1251 * Reallocates *array so that it can hold new_size entries. *size must contain
1252 * the current number of entries in *array. If the reallocation fails, *array
1253 * and *size will not be modified and -errno will be returned. If the
1254 * reallocation is successful, *array will be set to the new buffer, *size
1255 * will be set to new_size and 0 will be returned. The size of the reallocated
1256 * refcount array buffer will be aligned to a cluster boundary, and the newly
1257 * allocated area will be zeroed.
1259 static int realloc_refcount_array(BDRVQcowState *s, void **array,
1260 int64_t *size, int64_t new_size)
1262 size_t old_byte_size, new_byte_size;
1263 void *new_ptr;
1265 /* Round to clusters so the array can be directly written to disk */
1266 old_byte_size = size_to_clusters(s, refcount_array_byte_size(s, *size))
1267 * s->cluster_size;
1268 new_byte_size = size_to_clusters(s, refcount_array_byte_size(s, new_size))
1269 * s->cluster_size;
1271 if (new_byte_size == old_byte_size) {
1272 *size = new_size;
1273 return 0;
1276 assert(new_byte_size > 0);
1278 new_ptr = g_try_realloc(*array, new_byte_size);
1279 if (!new_ptr) {
1280 return -ENOMEM;
1283 if (new_byte_size > old_byte_size) {
1284 memset((void *)((uintptr_t)new_ptr + old_byte_size), 0,
1285 new_byte_size - old_byte_size);
1288 *array = new_ptr;
1289 *size = new_size;
1291 return 0;
1295 * Increases the refcount for a range of clusters in a given refcount table.
1296 * This is used to construct a temporary refcount table out of L1 and L2 tables
1297 * which can be compared the the refcount table saved in the image.
1299 * Modifies the number of errors in res.
1301 static int inc_refcounts(BlockDriverState *bs,
1302 BdrvCheckResult *res,
1303 void **refcount_table,
1304 int64_t *refcount_table_size,
1305 int64_t offset, int64_t size)
1307 BDRVQcowState *s = bs->opaque;
1308 uint64_t start, last, cluster_offset, k, refcount;
1309 int ret;
1311 if (size <= 0) {
1312 return 0;
1315 start = start_of_cluster(s, offset);
1316 last = start_of_cluster(s, offset + size - 1);
1317 for(cluster_offset = start; cluster_offset <= last;
1318 cluster_offset += s->cluster_size) {
1319 k = cluster_offset >> s->cluster_bits;
1320 if (k >= *refcount_table_size) {
1321 ret = realloc_refcount_array(s, refcount_table,
1322 refcount_table_size, k + 1);
1323 if (ret < 0) {
1324 res->check_errors++;
1325 return ret;
1329 refcount = s->get_refcount(*refcount_table, k);
1330 if (refcount == s->refcount_max) {
1331 fprintf(stderr, "ERROR: overflow cluster offset=0x%" PRIx64
1332 "\n", cluster_offset);
1333 res->corruptions++;
1334 continue;
1336 s->set_refcount(*refcount_table, k, refcount + 1);
1339 return 0;
1342 /* Flags for check_refcounts_l1() and check_refcounts_l2() */
1343 enum {
1344 CHECK_FRAG_INFO = 0x2, /* update BlockFragInfo counters */
1348 * Increases the refcount in the given refcount table for the all clusters
1349 * referenced in the L2 table. While doing so, performs some checks on L2
1350 * entries.
1352 * Returns the number of errors found by the checks or -errno if an internal
1353 * error occurred.
1355 static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
1356 void **refcount_table,
1357 int64_t *refcount_table_size, int64_t l2_offset,
1358 int flags)
1360 BDRVQcowState *s = bs->opaque;
1361 uint64_t *l2_table, l2_entry;
1362 uint64_t next_contiguous_offset = 0;
1363 int i, l2_size, nb_csectors, ret;
1365 /* Read L2 table from disk */
1366 l2_size = s->l2_size * sizeof(uint64_t);
1367 l2_table = g_malloc(l2_size);
1369 ret = bdrv_pread(bs->file, l2_offset, l2_table, l2_size);
1370 if (ret < 0) {
1371 fprintf(stderr, "ERROR: I/O error in check_refcounts_l2\n");
1372 res->check_errors++;
1373 goto fail;
1376 /* Do the actual checks */
1377 for(i = 0; i < s->l2_size; i++) {
1378 l2_entry = be64_to_cpu(l2_table[i]);
1380 switch (qcow2_get_cluster_type(l2_entry)) {
1381 case QCOW2_CLUSTER_COMPRESSED:
1382 /* Compressed clusters don't have QCOW_OFLAG_COPIED */
1383 if (l2_entry & QCOW_OFLAG_COPIED) {
1384 fprintf(stderr, "ERROR: cluster %" PRId64 ": "
1385 "copied flag must never be set for compressed "
1386 "clusters\n", l2_entry >> s->cluster_bits);
1387 l2_entry &= ~QCOW_OFLAG_COPIED;
1388 res->corruptions++;
1391 /* Mark cluster as used */
1392 nb_csectors = ((l2_entry >> s->csize_shift) &
1393 s->csize_mask) + 1;
1394 l2_entry &= s->cluster_offset_mask;
1395 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size,
1396 l2_entry & ~511, nb_csectors * 512);
1397 if (ret < 0) {
1398 goto fail;
1401 if (flags & CHECK_FRAG_INFO) {
1402 res->bfi.allocated_clusters++;
1403 res->bfi.compressed_clusters++;
1405 /* Compressed clusters are fragmented by nature. Since they
1406 * take up sub-sector space but we only have sector granularity
1407 * I/O we need to re-read the same sectors even for adjacent
1408 * compressed clusters.
1410 res->bfi.fragmented_clusters++;
1412 break;
1414 case QCOW2_CLUSTER_ZERO:
1415 if ((l2_entry & L2E_OFFSET_MASK) == 0) {
1416 break;
1418 /* fall through */
1420 case QCOW2_CLUSTER_NORMAL:
1422 uint64_t offset = l2_entry & L2E_OFFSET_MASK;
1424 if (flags & CHECK_FRAG_INFO) {
1425 res->bfi.allocated_clusters++;
1426 if (next_contiguous_offset &&
1427 offset != next_contiguous_offset) {
1428 res->bfi.fragmented_clusters++;
1430 next_contiguous_offset = offset + s->cluster_size;
1433 /* Mark cluster as used */
1434 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size,
1435 offset, s->cluster_size);
1436 if (ret < 0) {
1437 goto fail;
1440 /* Correct offsets are cluster aligned */
1441 if (offset_into_cluster(s, offset)) {
1442 fprintf(stderr, "ERROR offset=%" PRIx64 ": Cluster is not "
1443 "properly aligned; L2 entry corrupted.\n", offset);
1444 res->corruptions++;
1446 break;
1449 case QCOW2_CLUSTER_UNALLOCATED:
1450 break;
1452 default:
1453 abort();
1457 g_free(l2_table);
1458 return 0;
1460 fail:
1461 g_free(l2_table);
1462 return ret;
1466 * Increases the refcount for the L1 table, its L2 tables and all referenced
1467 * clusters in the given refcount table. While doing so, performs some checks
1468 * on L1 and L2 entries.
1470 * Returns the number of errors found by the checks or -errno if an internal
1471 * error occurred.
1473 static int check_refcounts_l1(BlockDriverState *bs,
1474 BdrvCheckResult *res,
1475 void **refcount_table,
1476 int64_t *refcount_table_size,
1477 int64_t l1_table_offset, int l1_size,
1478 int flags)
1480 BDRVQcowState *s = bs->opaque;
1481 uint64_t *l1_table = NULL, l2_offset, l1_size2;
1482 int i, ret;
1484 l1_size2 = l1_size * sizeof(uint64_t);
1486 /* Mark L1 table as used */
1487 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size,
1488 l1_table_offset, l1_size2);
1489 if (ret < 0) {
1490 goto fail;
1493 /* Read L1 table entries from disk */
1494 if (l1_size2 > 0) {
1495 l1_table = g_try_malloc(l1_size2);
1496 if (l1_table == NULL) {
1497 ret = -ENOMEM;
1498 res->check_errors++;
1499 goto fail;
1501 ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2);
1502 if (ret < 0) {
1503 fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n");
1504 res->check_errors++;
1505 goto fail;
1507 for(i = 0;i < l1_size; i++)
1508 be64_to_cpus(&l1_table[i]);
1511 /* Do the actual checks */
1512 for(i = 0; i < l1_size; i++) {
1513 l2_offset = l1_table[i];
1514 if (l2_offset) {
1515 /* Mark L2 table as used */
1516 l2_offset &= L1E_OFFSET_MASK;
1517 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size,
1518 l2_offset, s->cluster_size);
1519 if (ret < 0) {
1520 goto fail;
1523 /* L2 tables are cluster aligned */
1524 if (offset_into_cluster(s, l2_offset)) {
1525 fprintf(stderr, "ERROR l2_offset=%" PRIx64 ": Table is not "
1526 "cluster aligned; L1 entry corrupted\n", l2_offset);
1527 res->corruptions++;
1530 /* Process and check L2 entries */
1531 ret = check_refcounts_l2(bs, res, refcount_table,
1532 refcount_table_size, l2_offset, flags);
1533 if (ret < 0) {
1534 goto fail;
1538 g_free(l1_table);
1539 return 0;
1541 fail:
1542 g_free(l1_table);
1543 return ret;
1547 * Checks the OFLAG_COPIED flag for all L1 and L2 entries.
1549 * This function does not print an error message nor does it increment
1550 * check_errors if qcow2_get_refcount fails (this is because such an error will
1551 * have been already detected and sufficiently signaled by the calling function
1552 * (qcow2_check_refcounts) by the time this function is called).
1554 static int check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res,
1555 BdrvCheckMode fix)
1557 BDRVQcowState *s = bs->opaque;
1558 uint64_t *l2_table = qemu_blockalign(bs, s->cluster_size);
1559 int ret;
1560 uint64_t refcount;
1561 int i, j;
1563 for (i = 0; i < s->l1_size; i++) {
1564 uint64_t l1_entry = s->l1_table[i];
1565 uint64_t l2_offset = l1_entry & L1E_OFFSET_MASK;
1566 bool l2_dirty = false;
1568 if (!l2_offset) {
1569 continue;
1572 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits,
1573 &refcount);
1574 if (ret < 0) {
1575 /* don't print message nor increment check_errors */
1576 continue;
1578 if ((refcount == 1) != ((l1_entry & QCOW_OFLAG_COPIED) != 0)) {
1579 fprintf(stderr, "%s OFLAG_COPIED L2 cluster: l1_index=%d "
1580 "l1_entry=%" PRIx64 " refcount=%" PRIu64 "\n",
1581 fix & BDRV_FIX_ERRORS ? "Repairing" :
1582 "ERROR",
1583 i, l1_entry, refcount);
1584 if (fix & BDRV_FIX_ERRORS) {
1585 s->l1_table[i] = refcount == 1
1586 ? l1_entry | QCOW_OFLAG_COPIED
1587 : l1_entry & ~QCOW_OFLAG_COPIED;
1588 ret = qcow2_write_l1_entry(bs, i);
1589 if (ret < 0) {
1590 res->check_errors++;
1591 goto fail;
1593 res->corruptions_fixed++;
1594 } else {
1595 res->corruptions++;
1599 ret = bdrv_pread(bs->file, l2_offset, l2_table,
1600 s->l2_size * sizeof(uint64_t));
1601 if (ret < 0) {
1602 fprintf(stderr, "ERROR: Could not read L2 table: %s\n",
1603 strerror(-ret));
1604 res->check_errors++;
1605 goto fail;
1608 for (j = 0; j < s->l2_size; j++) {
1609 uint64_t l2_entry = be64_to_cpu(l2_table[j]);
1610 uint64_t data_offset = l2_entry & L2E_OFFSET_MASK;
1611 int cluster_type = qcow2_get_cluster_type(l2_entry);
1613 if ((cluster_type == QCOW2_CLUSTER_NORMAL) ||
1614 ((cluster_type == QCOW2_CLUSTER_ZERO) && (data_offset != 0))) {
1615 ret = qcow2_get_refcount(bs,
1616 data_offset >> s->cluster_bits,
1617 &refcount);
1618 if (ret < 0) {
1619 /* don't print message nor increment check_errors */
1620 continue;
1622 if ((refcount == 1) != ((l2_entry & QCOW_OFLAG_COPIED) != 0)) {
1623 fprintf(stderr, "%s OFLAG_COPIED data cluster: "
1624 "l2_entry=%" PRIx64 " refcount=%" PRIu64 "\n",
1625 fix & BDRV_FIX_ERRORS ? "Repairing" :
1626 "ERROR",
1627 l2_entry, refcount);
1628 if (fix & BDRV_FIX_ERRORS) {
1629 l2_table[j] = cpu_to_be64(refcount == 1
1630 ? l2_entry | QCOW_OFLAG_COPIED
1631 : l2_entry & ~QCOW_OFLAG_COPIED);
1632 l2_dirty = true;
1633 res->corruptions_fixed++;
1634 } else {
1635 res->corruptions++;
1641 if (l2_dirty) {
1642 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L2,
1643 l2_offset, s->cluster_size);
1644 if (ret < 0) {
1645 fprintf(stderr, "ERROR: Could not write L2 table; metadata "
1646 "overlap check failed: %s\n", strerror(-ret));
1647 res->check_errors++;
1648 goto fail;
1651 ret = bdrv_pwrite(bs->file, l2_offset, l2_table, s->cluster_size);
1652 if (ret < 0) {
1653 fprintf(stderr, "ERROR: Could not write L2 table: %s\n",
1654 strerror(-ret));
1655 res->check_errors++;
1656 goto fail;
1661 ret = 0;
1663 fail:
1664 qemu_vfree(l2_table);
1665 return ret;
1669 * Checks consistency of refblocks and accounts for each refblock in
1670 * *refcount_table.
1672 static int check_refblocks(BlockDriverState *bs, BdrvCheckResult *res,
1673 BdrvCheckMode fix, bool *rebuild,
1674 void **refcount_table, int64_t *nb_clusters)
1676 BDRVQcowState *s = bs->opaque;
1677 int64_t i, size;
1678 int ret;
1680 for(i = 0; i < s->refcount_table_size; i++) {
1681 uint64_t offset, cluster;
1682 offset = s->refcount_table[i];
1683 cluster = offset >> s->cluster_bits;
1685 /* Refcount blocks are cluster aligned */
1686 if (offset_into_cluster(s, offset)) {
1687 fprintf(stderr, "ERROR refcount block %" PRId64 " is not "
1688 "cluster aligned; refcount table entry corrupted\n", i);
1689 res->corruptions++;
1690 *rebuild = true;
1691 continue;
1694 if (cluster >= *nb_clusters) {
1695 fprintf(stderr, "%s refcount block %" PRId64 " is outside image\n",
1696 fix & BDRV_FIX_ERRORS ? "Repairing" : "ERROR", i);
1698 if (fix & BDRV_FIX_ERRORS) {
1699 int64_t new_nb_clusters;
1701 if (offset > INT64_MAX - s->cluster_size) {
1702 ret = -EINVAL;
1703 goto resize_fail;
1706 ret = bdrv_truncate(bs->file, offset + s->cluster_size);
1707 if (ret < 0) {
1708 goto resize_fail;
1710 size = bdrv_getlength(bs->file);
1711 if (size < 0) {
1712 ret = size;
1713 goto resize_fail;
1716 new_nb_clusters = size_to_clusters(s, size);
1717 assert(new_nb_clusters >= *nb_clusters);
1719 ret = realloc_refcount_array(s, refcount_table,
1720 nb_clusters, new_nb_clusters);
1721 if (ret < 0) {
1722 res->check_errors++;
1723 return ret;
1726 if (cluster >= *nb_clusters) {
1727 ret = -EINVAL;
1728 goto resize_fail;
1731 res->corruptions_fixed++;
1732 ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
1733 offset, s->cluster_size);
1734 if (ret < 0) {
1735 return ret;
1737 /* No need to check whether the refcount is now greater than 1:
1738 * This area was just allocated and zeroed, so it can only be
1739 * exactly 1 after inc_refcounts() */
1740 continue;
1742 resize_fail:
1743 res->corruptions++;
1744 *rebuild = true;
1745 fprintf(stderr, "ERROR could not resize image: %s\n",
1746 strerror(-ret));
1747 } else {
1748 res->corruptions++;
1750 continue;
1753 if (offset != 0) {
1754 ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
1755 offset, s->cluster_size);
1756 if (ret < 0) {
1757 return ret;
1759 if (s->get_refcount(*refcount_table, cluster) != 1) {
1760 fprintf(stderr, "ERROR refcount block %" PRId64
1761 " refcount=%" PRIu64 "\n", i,
1762 s->get_refcount(*refcount_table, cluster));
1763 res->corruptions++;
1764 *rebuild = true;
1769 return 0;
1773 * Calculates an in-memory refcount table.
1775 static int calculate_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
1776 BdrvCheckMode fix, bool *rebuild,
1777 void **refcount_table, int64_t *nb_clusters)
1779 BDRVQcowState *s = bs->opaque;
1780 int64_t i;
1781 QCowSnapshot *sn;
1782 int ret;
1784 if (!*refcount_table) {
1785 int64_t old_size = 0;
1786 ret = realloc_refcount_array(s, refcount_table,
1787 &old_size, *nb_clusters);
1788 if (ret < 0) {
1789 res->check_errors++;
1790 return ret;
1794 /* header */
1795 ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
1796 0, s->cluster_size);
1797 if (ret < 0) {
1798 return ret;
1801 /* current L1 table */
1802 ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters,
1803 s->l1_table_offset, s->l1_size, CHECK_FRAG_INFO);
1804 if (ret < 0) {
1805 return ret;
1808 /* snapshots */
1809 for (i = 0; i < s->nb_snapshots; i++) {
1810 sn = s->snapshots + i;
1811 ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters,
1812 sn->l1_table_offset, sn->l1_size, 0);
1813 if (ret < 0) {
1814 return ret;
1817 ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
1818 s->snapshots_offset, s->snapshots_size);
1819 if (ret < 0) {
1820 return ret;
1823 /* refcount data */
1824 ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
1825 s->refcount_table_offset,
1826 s->refcount_table_size * sizeof(uint64_t));
1827 if (ret < 0) {
1828 return ret;
1831 return check_refblocks(bs, res, fix, rebuild, refcount_table, nb_clusters);
1835 * Compares the actual reference count for each cluster in the image against the
1836 * refcount as reported by the refcount structures on-disk.
1838 static void compare_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
1839 BdrvCheckMode fix, bool *rebuild,
1840 int64_t *highest_cluster,
1841 void *refcount_table, int64_t nb_clusters)
1843 BDRVQcowState *s = bs->opaque;
1844 int64_t i;
1845 uint64_t refcount1, refcount2;
1846 int ret;
1848 for (i = 0, *highest_cluster = 0; i < nb_clusters; i++) {
1849 ret = qcow2_get_refcount(bs, i, &refcount1);
1850 if (ret < 0) {
1851 fprintf(stderr, "Can't get refcount for cluster %" PRId64 ": %s\n",
1852 i, strerror(-ret));
1853 res->check_errors++;
1854 continue;
1857 refcount2 = s->get_refcount(refcount_table, i);
1859 if (refcount1 > 0 || refcount2 > 0) {
1860 *highest_cluster = i;
1863 if (refcount1 != refcount2) {
1864 /* Check if we're allowed to fix the mismatch */
1865 int *num_fixed = NULL;
1866 if (refcount1 == 0) {
1867 *rebuild = true;
1868 } else if (refcount1 > refcount2 && (fix & BDRV_FIX_LEAKS)) {
1869 num_fixed = &res->leaks_fixed;
1870 } else if (refcount1 < refcount2 && (fix & BDRV_FIX_ERRORS)) {
1871 num_fixed = &res->corruptions_fixed;
1874 fprintf(stderr, "%s cluster %" PRId64 " refcount=%" PRIu64
1875 " reference=%" PRIu64 "\n",
1876 num_fixed != NULL ? "Repairing" :
1877 refcount1 < refcount2 ? "ERROR" :
1878 "Leaked",
1879 i, refcount1, refcount2);
1881 if (num_fixed) {
1882 ret = update_refcount(bs, i << s->cluster_bits, 1,
1883 refcount_diff(refcount1, refcount2),
1884 refcount1 > refcount2,
1885 QCOW2_DISCARD_ALWAYS);
1886 if (ret >= 0) {
1887 (*num_fixed)++;
1888 continue;
1892 /* And if we couldn't, print an error */
1893 if (refcount1 < refcount2) {
1894 res->corruptions++;
1895 } else {
1896 res->leaks++;
1903 * Allocates clusters using an in-memory refcount table (IMRT) in contrast to
1904 * the on-disk refcount structures.
1906 * On input, *first_free_cluster tells where to start looking, and need not
1907 * actually be a free cluster; the returned offset will not be before that
1908 * cluster. On output, *first_free_cluster points to the first gap found, even
1909 * if that gap was too small to be used as the returned offset.
1911 * Note that *first_free_cluster is a cluster index whereas the return value is
1912 * an offset.
1914 static int64_t alloc_clusters_imrt(BlockDriverState *bs,
1915 int cluster_count,
1916 void **refcount_table,
1917 int64_t *imrt_nb_clusters,
1918 int64_t *first_free_cluster)
1920 BDRVQcowState *s = bs->opaque;
1921 int64_t cluster = *first_free_cluster, i;
1922 bool first_gap = true;
1923 int contiguous_free_clusters;
1924 int ret;
1926 /* Starting at *first_free_cluster, find a range of at least cluster_count
1927 * continuously free clusters */
1928 for (contiguous_free_clusters = 0;
1929 cluster < *imrt_nb_clusters &&
1930 contiguous_free_clusters < cluster_count;
1931 cluster++)
1933 if (!s->get_refcount(*refcount_table, cluster)) {
1934 contiguous_free_clusters++;
1935 if (first_gap) {
1936 /* If this is the first free cluster found, update
1937 * *first_free_cluster accordingly */
1938 *first_free_cluster = cluster;
1939 first_gap = false;
1941 } else if (contiguous_free_clusters) {
1942 contiguous_free_clusters = 0;
1946 /* If contiguous_free_clusters is greater than zero, it contains the number
1947 * of continuously free clusters until the current cluster; the first free
1948 * cluster in the current "gap" is therefore
1949 * cluster - contiguous_free_clusters */
1951 /* If no such range could be found, grow the in-memory refcount table
1952 * accordingly to append free clusters at the end of the image */
1953 if (contiguous_free_clusters < cluster_count) {
1954 /* contiguous_free_clusters clusters are already empty at the image end;
1955 * we need cluster_count clusters; therefore, we have to allocate
1956 * cluster_count - contiguous_free_clusters new clusters at the end of
1957 * the image (which is the current value of cluster; note that cluster
1958 * may exceed old_imrt_nb_clusters if *first_free_cluster pointed beyond
1959 * the image end) */
1960 ret = realloc_refcount_array(s, refcount_table, imrt_nb_clusters,
1961 cluster + cluster_count
1962 - contiguous_free_clusters);
1963 if (ret < 0) {
1964 return ret;
1968 /* Go back to the first free cluster */
1969 cluster -= contiguous_free_clusters;
1970 for (i = 0; i < cluster_count; i++) {
1971 s->set_refcount(*refcount_table, cluster + i, 1);
1974 return cluster << s->cluster_bits;
1978 * Creates a new refcount structure based solely on the in-memory information
1979 * given through *refcount_table. All necessary allocations will be reflected
1980 * in that array.
1982 * On success, the old refcount structure is leaked (it will be covered by the
1983 * new refcount structure).
1985 static int rebuild_refcount_structure(BlockDriverState *bs,
1986 BdrvCheckResult *res,
1987 void **refcount_table,
1988 int64_t *nb_clusters)
1990 BDRVQcowState *s = bs->opaque;
1991 int64_t first_free_cluster = 0, reftable_offset = -1, cluster = 0;
1992 int64_t refblock_offset, refblock_start, refblock_index;
1993 uint32_t reftable_size = 0;
1994 uint64_t *on_disk_reftable = NULL;
1995 void *on_disk_refblock;
1996 int ret = 0;
1997 struct {
1998 uint64_t reftable_offset;
1999 uint32_t reftable_clusters;
2000 } QEMU_PACKED reftable_offset_and_clusters;
2002 qcow2_cache_empty(bs, s->refcount_block_cache);
2004 write_refblocks:
2005 for (; cluster < *nb_clusters; cluster++) {
2006 if (!s->get_refcount(*refcount_table, cluster)) {
2007 continue;
2010 refblock_index = cluster >> s->refcount_block_bits;
2011 refblock_start = refblock_index << s->refcount_block_bits;
2013 /* Don't allocate a cluster in a refblock already written to disk */
2014 if (first_free_cluster < refblock_start) {
2015 first_free_cluster = refblock_start;
2017 refblock_offset = alloc_clusters_imrt(bs, 1, refcount_table,
2018 nb_clusters, &first_free_cluster);
2019 if (refblock_offset < 0) {
2020 fprintf(stderr, "ERROR allocating refblock: %s\n",
2021 strerror(-refblock_offset));
2022 res->check_errors++;
2023 ret = refblock_offset;
2024 goto fail;
2027 if (reftable_size <= refblock_index) {
2028 uint32_t old_reftable_size = reftable_size;
2029 uint64_t *new_on_disk_reftable;
2031 reftable_size = ROUND_UP((refblock_index + 1) * sizeof(uint64_t),
2032 s->cluster_size) / sizeof(uint64_t);
2033 new_on_disk_reftable = g_try_realloc(on_disk_reftable,
2034 reftable_size *
2035 sizeof(uint64_t));
2036 if (!new_on_disk_reftable) {
2037 res->check_errors++;
2038 ret = -ENOMEM;
2039 goto fail;
2041 on_disk_reftable = new_on_disk_reftable;
2043 memset(on_disk_reftable + old_reftable_size, 0,
2044 (reftable_size - old_reftable_size) * sizeof(uint64_t));
2046 /* The offset we have for the reftable is now no longer valid;
2047 * this will leak that range, but we can easily fix that by running
2048 * a leak-fixing check after this rebuild operation */
2049 reftable_offset = -1;
2051 on_disk_reftable[refblock_index] = refblock_offset;
2053 /* If this is apparently the last refblock (for now), try to squeeze the
2054 * reftable in */
2055 if (refblock_index == (*nb_clusters - 1) >> s->refcount_block_bits &&
2056 reftable_offset < 0)
2058 uint64_t reftable_clusters = size_to_clusters(s, reftable_size *
2059 sizeof(uint64_t));
2060 reftable_offset = alloc_clusters_imrt(bs, reftable_clusters,
2061 refcount_table, nb_clusters,
2062 &first_free_cluster);
2063 if (reftable_offset < 0) {
2064 fprintf(stderr, "ERROR allocating reftable: %s\n",
2065 strerror(-reftable_offset));
2066 res->check_errors++;
2067 ret = reftable_offset;
2068 goto fail;
2072 ret = qcow2_pre_write_overlap_check(bs, 0, refblock_offset,
2073 s->cluster_size);
2074 if (ret < 0) {
2075 fprintf(stderr, "ERROR writing refblock: %s\n", strerror(-ret));
2076 goto fail;
2079 /* The size of *refcount_table is always cluster-aligned, therefore the
2080 * write operation will not overflow */
2081 on_disk_refblock = (void *)((char *) *refcount_table +
2082 refblock_index * s->cluster_size);
2084 ret = bdrv_write(bs->file, refblock_offset / BDRV_SECTOR_SIZE,
2085 on_disk_refblock, s->cluster_sectors);
2086 if (ret < 0) {
2087 fprintf(stderr, "ERROR writing refblock: %s\n", strerror(-ret));
2088 goto fail;
2091 /* Go to the end of this refblock */
2092 cluster = refblock_start + s->refcount_block_size - 1;
2095 if (reftable_offset < 0) {
2096 uint64_t post_refblock_start, reftable_clusters;
2098 post_refblock_start = ROUND_UP(*nb_clusters, s->refcount_block_size);
2099 reftable_clusters = size_to_clusters(s,
2100 reftable_size * sizeof(uint64_t));
2101 /* Not pretty but simple */
2102 if (first_free_cluster < post_refblock_start) {
2103 first_free_cluster = post_refblock_start;
2105 reftable_offset = alloc_clusters_imrt(bs, reftable_clusters,
2106 refcount_table, nb_clusters,
2107 &first_free_cluster);
2108 if (reftable_offset < 0) {
2109 fprintf(stderr, "ERROR allocating reftable: %s\n",
2110 strerror(-reftable_offset));
2111 res->check_errors++;
2112 ret = reftable_offset;
2113 goto fail;
2116 goto write_refblocks;
2119 assert(on_disk_reftable);
2121 for (refblock_index = 0; refblock_index < reftable_size; refblock_index++) {
2122 cpu_to_be64s(&on_disk_reftable[refblock_index]);
2125 ret = qcow2_pre_write_overlap_check(bs, 0, reftable_offset,
2126 reftable_size * sizeof(uint64_t));
2127 if (ret < 0) {
2128 fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret));
2129 goto fail;
2132 assert(reftable_size < INT_MAX / sizeof(uint64_t));
2133 ret = bdrv_pwrite(bs->file, reftable_offset, on_disk_reftable,
2134 reftable_size * sizeof(uint64_t));
2135 if (ret < 0) {
2136 fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret));
2137 goto fail;
2140 /* Enter new reftable into the image header */
2141 cpu_to_be64w(&reftable_offset_and_clusters.reftable_offset,
2142 reftable_offset);
2143 cpu_to_be32w(&reftable_offset_and_clusters.reftable_clusters,
2144 size_to_clusters(s, reftable_size * sizeof(uint64_t)));
2145 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader,
2146 refcount_table_offset),
2147 &reftable_offset_and_clusters,
2148 sizeof(reftable_offset_and_clusters));
2149 if (ret < 0) {
2150 fprintf(stderr, "ERROR setting reftable: %s\n", strerror(-ret));
2151 goto fail;
2154 for (refblock_index = 0; refblock_index < reftable_size; refblock_index++) {
2155 be64_to_cpus(&on_disk_reftable[refblock_index]);
2157 s->refcount_table = on_disk_reftable;
2158 s->refcount_table_offset = reftable_offset;
2159 s->refcount_table_size = reftable_size;
2161 return 0;
2163 fail:
2164 g_free(on_disk_reftable);
2165 return ret;
2169 * Checks an image for refcount consistency.
2171 * Returns 0 if no errors are found, the number of errors in case the image is
2172 * detected as corrupted, and -errno when an internal error occurred.
2174 int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
2175 BdrvCheckMode fix)
2177 BDRVQcowState *s = bs->opaque;
2178 BdrvCheckResult pre_compare_res;
2179 int64_t size, highest_cluster, nb_clusters;
2180 void *refcount_table = NULL;
2181 bool rebuild = false;
2182 int ret;
2184 size = bdrv_getlength(bs->file);
2185 if (size < 0) {
2186 res->check_errors++;
2187 return size;
2190 nb_clusters = size_to_clusters(s, size);
2191 if (nb_clusters > INT_MAX) {
2192 res->check_errors++;
2193 return -EFBIG;
2196 res->bfi.total_clusters =
2197 size_to_clusters(s, bs->total_sectors * BDRV_SECTOR_SIZE);
2199 ret = calculate_refcounts(bs, res, fix, &rebuild, &refcount_table,
2200 &nb_clusters);
2201 if (ret < 0) {
2202 goto fail;
2205 /* In case we don't need to rebuild the refcount structure (but want to fix
2206 * something), this function is immediately called again, in which case the
2207 * result should be ignored */
2208 pre_compare_res = *res;
2209 compare_refcounts(bs, res, 0, &rebuild, &highest_cluster, refcount_table,
2210 nb_clusters);
2212 if (rebuild && (fix & BDRV_FIX_ERRORS)) {
2213 BdrvCheckResult old_res = *res;
2214 int fresh_leaks = 0;
2216 fprintf(stderr, "Rebuilding refcount structure\n");
2217 ret = rebuild_refcount_structure(bs, res, &refcount_table,
2218 &nb_clusters);
2219 if (ret < 0) {
2220 goto fail;
2223 res->corruptions = 0;
2224 res->leaks = 0;
2226 /* Because the old reftable has been exchanged for a new one the
2227 * references have to be recalculated */
2228 rebuild = false;
2229 memset(refcount_table, 0, refcount_array_byte_size(s, nb_clusters));
2230 ret = calculate_refcounts(bs, res, 0, &rebuild, &refcount_table,
2231 &nb_clusters);
2232 if (ret < 0) {
2233 goto fail;
2236 if (fix & BDRV_FIX_LEAKS) {
2237 /* The old refcount structures are now leaked, fix it; the result
2238 * can be ignored, aside from leaks which were introduced by
2239 * rebuild_refcount_structure() that could not be fixed */
2240 BdrvCheckResult saved_res = *res;
2241 *res = (BdrvCheckResult){ 0 };
2243 compare_refcounts(bs, res, BDRV_FIX_LEAKS, &rebuild,
2244 &highest_cluster, refcount_table, nb_clusters);
2245 if (rebuild) {
2246 fprintf(stderr, "ERROR rebuilt refcount structure is still "
2247 "broken\n");
2250 /* Any leaks accounted for here were introduced by
2251 * rebuild_refcount_structure() because that function has created a
2252 * new refcount structure from scratch */
2253 fresh_leaks = res->leaks;
2254 *res = saved_res;
2257 if (res->corruptions < old_res.corruptions) {
2258 res->corruptions_fixed += old_res.corruptions - res->corruptions;
2260 if (res->leaks < old_res.leaks) {
2261 res->leaks_fixed += old_res.leaks - res->leaks;
2263 res->leaks += fresh_leaks;
2264 } else if (fix) {
2265 if (rebuild) {
2266 fprintf(stderr, "ERROR need to rebuild refcount structures\n");
2267 res->check_errors++;
2268 ret = -EIO;
2269 goto fail;
2272 if (res->leaks || res->corruptions) {
2273 *res = pre_compare_res;
2274 compare_refcounts(bs, res, fix, &rebuild, &highest_cluster,
2275 refcount_table, nb_clusters);
2279 /* check OFLAG_COPIED */
2280 ret = check_oflag_copied(bs, res, fix);
2281 if (ret < 0) {
2282 goto fail;
2285 res->image_end_offset = (highest_cluster + 1) * s->cluster_size;
2286 ret = 0;
2288 fail:
2289 g_free(refcount_table);
2291 return ret;
2294 #define overlaps_with(ofs, sz) \
2295 ranges_overlap(offset, size, ofs, sz)
2298 * Checks if the given offset into the image file is actually free to use by
2299 * looking for overlaps with important metadata sections (L1/L2 tables etc.),
2300 * i.e. a sanity check without relying on the refcount tables.
2302 * The ign parameter specifies what checks not to perform (being a bitmask of
2303 * QCow2MetadataOverlap values), i.e., what sections to ignore.
2305 * Returns:
2306 * - 0 if writing to this offset will not affect the mentioned metadata
2307 * - a positive QCow2MetadataOverlap value indicating one overlapping section
2308 * - a negative value (-errno) indicating an error while performing a check,
2309 * e.g. when bdrv_read failed on QCOW2_OL_INACTIVE_L2
2311 int qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset,
2312 int64_t size)
2314 BDRVQcowState *s = bs->opaque;
2315 int chk = s->overlap_check & ~ign;
2316 int i, j;
2318 if (!size) {
2319 return 0;
2322 if (chk & QCOW2_OL_MAIN_HEADER) {
2323 if (offset < s->cluster_size) {
2324 return QCOW2_OL_MAIN_HEADER;
2328 /* align range to test to cluster boundaries */
2329 size = align_offset(offset_into_cluster(s, offset) + size, s->cluster_size);
2330 offset = start_of_cluster(s, offset);
2332 if ((chk & QCOW2_OL_ACTIVE_L1) && s->l1_size) {
2333 if (overlaps_with(s->l1_table_offset, s->l1_size * sizeof(uint64_t))) {
2334 return QCOW2_OL_ACTIVE_L1;
2338 if ((chk & QCOW2_OL_REFCOUNT_TABLE) && s->refcount_table_size) {
2339 if (overlaps_with(s->refcount_table_offset,
2340 s->refcount_table_size * sizeof(uint64_t))) {
2341 return QCOW2_OL_REFCOUNT_TABLE;
2345 if ((chk & QCOW2_OL_SNAPSHOT_TABLE) && s->snapshots_size) {
2346 if (overlaps_with(s->snapshots_offset, s->snapshots_size)) {
2347 return QCOW2_OL_SNAPSHOT_TABLE;
2351 if ((chk & QCOW2_OL_INACTIVE_L1) && s->snapshots) {
2352 for (i = 0; i < s->nb_snapshots; i++) {
2353 if (s->snapshots[i].l1_size &&
2354 overlaps_with(s->snapshots[i].l1_table_offset,
2355 s->snapshots[i].l1_size * sizeof(uint64_t))) {
2356 return QCOW2_OL_INACTIVE_L1;
2361 if ((chk & QCOW2_OL_ACTIVE_L2) && s->l1_table) {
2362 for (i = 0; i < s->l1_size; i++) {
2363 if ((s->l1_table[i] & L1E_OFFSET_MASK) &&
2364 overlaps_with(s->l1_table[i] & L1E_OFFSET_MASK,
2365 s->cluster_size)) {
2366 return QCOW2_OL_ACTIVE_L2;
2371 if ((chk & QCOW2_OL_REFCOUNT_BLOCK) && s->refcount_table) {
2372 for (i = 0; i < s->refcount_table_size; i++) {
2373 if ((s->refcount_table[i] & REFT_OFFSET_MASK) &&
2374 overlaps_with(s->refcount_table[i] & REFT_OFFSET_MASK,
2375 s->cluster_size)) {
2376 return QCOW2_OL_REFCOUNT_BLOCK;
2381 if ((chk & QCOW2_OL_INACTIVE_L2) && s->snapshots) {
2382 for (i = 0; i < s->nb_snapshots; i++) {
2383 uint64_t l1_ofs = s->snapshots[i].l1_table_offset;
2384 uint32_t l1_sz = s->snapshots[i].l1_size;
2385 uint64_t l1_sz2 = l1_sz * sizeof(uint64_t);
2386 uint64_t *l1 = g_try_malloc(l1_sz2);
2387 int ret;
2389 if (l1_sz2 && l1 == NULL) {
2390 return -ENOMEM;
2393 ret = bdrv_pread(bs->file, l1_ofs, l1, l1_sz2);
2394 if (ret < 0) {
2395 g_free(l1);
2396 return ret;
2399 for (j = 0; j < l1_sz; j++) {
2400 uint64_t l2_ofs = be64_to_cpu(l1[j]) & L1E_OFFSET_MASK;
2401 if (l2_ofs && overlaps_with(l2_ofs, s->cluster_size)) {
2402 g_free(l1);
2403 return QCOW2_OL_INACTIVE_L2;
2407 g_free(l1);
2411 return 0;
2414 static const char *metadata_ol_names[] = {
2415 [QCOW2_OL_MAIN_HEADER_BITNR] = "qcow2_header",
2416 [QCOW2_OL_ACTIVE_L1_BITNR] = "active L1 table",
2417 [QCOW2_OL_ACTIVE_L2_BITNR] = "active L2 table",
2418 [QCOW2_OL_REFCOUNT_TABLE_BITNR] = "refcount table",
2419 [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = "refcount block",
2420 [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = "snapshot table",
2421 [QCOW2_OL_INACTIVE_L1_BITNR] = "inactive L1 table",
2422 [QCOW2_OL_INACTIVE_L2_BITNR] = "inactive L2 table",
2426 * First performs a check for metadata overlaps (through
2427 * qcow2_check_metadata_overlap); if that fails with a negative value (error
2428 * while performing a check), that value is returned. If an impending overlap
2429 * is detected, the BDS will be made unusable, the qcow2 file marked corrupt
2430 * and -EIO returned.
2432 * Returns 0 if there were neither overlaps nor errors while checking for
2433 * overlaps; or a negative value (-errno) on error.
2435 int qcow2_pre_write_overlap_check(BlockDriverState *bs, int ign, int64_t offset,
2436 int64_t size)
2438 int ret = qcow2_check_metadata_overlap(bs, ign, offset, size);
2440 if (ret < 0) {
2441 return ret;
2442 } else if (ret > 0) {
2443 int metadata_ol_bitnr = ctz32(ret);
2444 assert(metadata_ol_bitnr < QCOW2_OL_MAX_BITNR);
2446 qcow2_signal_corruption(bs, true, offset, size, "Preventing invalid "
2447 "write on metadata (overlaps with %s)",
2448 metadata_ol_names[metadata_ol_bitnr]);
2449 return -EIO;
2452 return 0;