m68k: is_mem is useless
[qemu.git] / block / qcow2-refcount.c
blob0632fc3bc0ec5a950e8e05cc4ca1c84e057f4a34
1 /*
2 * Block driver for the QCOW version 2 format
4 * Copyright (c) 2004-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu-common.h"
26 #include "block/block_int.h"
27 #include "block/qcow2.h"
28 #include "qemu/range.h"
30 static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size);
31 static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs,
32 int64_t offset, int64_t length, uint64_t addend,
33 bool decrease, enum qcow2_discard_type type);
35 static uint64_t get_refcount_ro0(const void *refcount_array, uint64_t index);
36 static uint64_t get_refcount_ro1(const void *refcount_array, uint64_t index);
37 static uint64_t get_refcount_ro2(const void *refcount_array, uint64_t index);
38 static uint64_t get_refcount_ro3(const void *refcount_array, uint64_t index);
39 static uint64_t get_refcount_ro4(const void *refcount_array, uint64_t index);
40 static uint64_t get_refcount_ro5(const void *refcount_array, uint64_t index);
41 static uint64_t get_refcount_ro6(const void *refcount_array, uint64_t index);
43 static void set_refcount_ro0(void *refcount_array, uint64_t index,
44 uint64_t value);
45 static void set_refcount_ro1(void *refcount_array, uint64_t index,
46 uint64_t value);
47 static void set_refcount_ro2(void *refcount_array, uint64_t index,
48 uint64_t value);
49 static void set_refcount_ro3(void *refcount_array, uint64_t index,
50 uint64_t value);
51 static void set_refcount_ro4(void *refcount_array, uint64_t index,
52 uint64_t value);
53 static void set_refcount_ro5(void *refcount_array, uint64_t index,
54 uint64_t value);
55 static void set_refcount_ro6(void *refcount_array, uint64_t index,
56 uint64_t value);
59 static Qcow2GetRefcountFunc *const get_refcount_funcs[] = {
60 &get_refcount_ro0,
61 &get_refcount_ro1,
62 &get_refcount_ro2,
63 &get_refcount_ro3,
64 &get_refcount_ro4,
65 &get_refcount_ro5,
66 &get_refcount_ro6
69 static Qcow2SetRefcountFunc *const set_refcount_funcs[] = {
70 &set_refcount_ro0,
71 &set_refcount_ro1,
72 &set_refcount_ro2,
73 &set_refcount_ro3,
74 &set_refcount_ro4,
75 &set_refcount_ro5,
76 &set_refcount_ro6
80 /*********************************************************/
81 /* refcount handling */
83 int qcow2_refcount_init(BlockDriverState *bs)
85 BDRVQcowState *s = bs->opaque;
86 unsigned int refcount_table_size2, i;
87 int ret;
89 assert(s->refcount_order >= 0 && s->refcount_order <= 6);
91 s->get_refcount = get_refcount_funcs[s->refcount_order];
92 s->set_refcount = set_refcount_funcs[s->refcount_order];
94 assert(s->refcount_table_size <= INT_MAX / sizeof(uint64_t));
95 refcount_table_size2 = s->refcount_table_size * sizeof(uint64_t);
96 s->refcount_table = g_try_malloc(refcount_table_size2);
98 if (s->refcount_table_size > 0) {
99 if (s->refcount_table == NULL) {
100 ret = -ENOMEM;
101 goto fail;
103 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_LOAD);
104 ret = bdrv_pread(bs->file, s->refcount_table_offset,
105 s->refcount_table, refcount_table_size2);
106 if (ret < 0) {
107 goto fail;
109 for(i = 0; i < s->refcount_table_size; i++)
110 be64_to_cpus(&s->refcount_table[i]);
112 return 0;
113 fail:
114 return ret;
117 void qcow2_refcount_close(BlockDriverState *bs)
119 BDRVQcowState *s = bs->opaque;
120 g_free(s->refcount_table);
124 static uint64_t get_refcount_ro0(const void *refcount_array, uint64_t index)
126 return (((const uint8_t *)refcount_array)[index / 8] >> (index % 8)) & 0x1;
129 static void set_refcount_ro0(void *refcount_array, uint64_t index,
130 uint64_t value)
132 assert(!(value >> 1));
133 ((uint8_t *)refcount_array)[index / 8] &= ~(0x1 << (index % 8));
134 ((uint8_t *)refcount_array)[index / 8] |= value << (index % 8);
137 static uint64_t get_refcount_ro1(const void *refcount_array, uint64_t index)
139 return (((const uint8_t *)refcount_array)[index / 4] >> (2 * (index % 4)))
140 & 0x3;
143 static void set_refcount_ro1(void *refcount_array, uint64_t index,
144 uint64_t value)
146 assert(!(value >> 2));
147 ((uint8_t *)refcount_array)[index / 4] &= ~(0x3 << (2 * (index % 4)));
148 ((uint8_t *)refcount_array)[index / 4] |= value << (2 * (index % 4));
151 static uint64_t get_refcount_ro2(const void *refcount_array, uint64_t index)
153 return (((const uint8_t *)refcount_array)[index / 2] >> (4 * (index % 2)))
154 & 0xf;
157 static void set_refcount_ro2(void *refcount_array, uint64_t index,
158 uint64_t value)
160 assert(!(value >> 4));
161 ((uint8_t *)refcount_array)[index / 2] &= ~(0xf << (4 * (index % 2)));
162 ((uint8_t *)refcount_array)[index / 2] |= value << (4 * (index % 2));
165 static uint64_t get_refcount_ro3(const void *refcount_array, uint64_t index)
167 return ((const uint8_t *)refcount_array)[index];
170 static void set_refcount_ro3(void *refcount_array, uint64_t index,
171 uint64_t value)
173 assert(!(value >> 8));
174 ((uint8_t *)refcount_array)[index] = value;
177 static uint64_t get_refcount_ro4(const void *refcount_array, uint64_t index)
179 return be16_to_cpu(((const uint16_t *)refcount_array)[index]);
182 static void set_refcount_ro4(void *refcount_array, uint64_t index,
183 uint64_t value)
185 assert(!(value >> 16));
186 ((uint16_t *)refcount_array)[index] = cpu_to_be16(value);
189 static uint64_t get_refcount_ro5(const void *refcount_array, uint64_t index)
191 return be32_to_cpu(((const uint32_t *)refcount_array)[index]);
194 static void set_refcount_ro5(void *refcount_array, uint64_t index,
195 uint64_t value)
197 assert(!(value >> 32));
198 ((uint32_t *)refcount_array)[index] = cpu_to_be32(value);
201 static uint64_t get_refcount_ro6(const void *refcount_array, uint64_t index)
203 return be64_to_cpu(((const uint64_t *)refcount_array)[index]);
206 static void set_refcount_ro6(void *refcount_array, uint64_t index,
207 uint64_t value)
209 ((uint64_t *)refcount_array)[index] = cpu_to_be64(value);
213 static int load_refcount_block(BlockDriverState *bs,
214 int64_t refcount_block_offset,
215 void **refcount_block)
217 BDRVQcowState *s = bs->opaque;
218 int ret;
220 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_LOAD);
221 ret = qcow2_cache_get(bs, s->refcount_block_cache, refcount_block_offset,
222 refcount_block);
224 return ret;
228 * Retrieves the refcount of the cluster given by its index and stores it in
229 * *refcount. Returns 0 on success and -errno on failure.
231 int qcow2_get_refcount(BlockDriverState *bs, int64_t cluster_index,
232 uint64_t *refcount)
234 BDRVQcowState *s = bs->opaque;
235 uint64_t refcount_table_index, block_index;
236 int64_t refcount_block_offset;
237 int ret;
238 void *refcount_block;
240 refcount_table_index = cluster_index >> s->refcount_block_bits;
241 if (refcount_table_index >= s->refcount_table_size) {
242 *refcount = 0;
243 return 0;
245 refcount_block_offset =
246 s->refcount_table[refcount_table_index] & REFT_OFFSET_MASK;
247 if (!refcount_block_offset) {
248 *refcount = 0;
249 return 0;
252 if (offset_into_cluster(s, refcount_block_offset)) {
253 qcow2_signal_corruption(bs, true, -1, -1, "Refblock offset %#" PRIx64
254 " unaligned (reftable index: %#" PRIx64 ")",
255 refcount_block_offset, refcount_table_index);
256 return -EIO;
259 ret = qcow2_cache_get(bs, s->refcount_block_cache, refcount_block_offset,
260 &refcount_block);
261 if (ret < 0) {
262 return ret;
265 block_index = cluster_index & (s->refcount_block_size - 1);
266 *refcount = s->get_refcount(refcount_block, block_index);
268 qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block);
270 return 0;
274 * Rounds the refcount table size up to avoid growing the table for each single
275 * refcount block that is allocated.
277 static unsigned int next_refcount_table_size(BDRVQcowState *s,
278 unsigned int min_size)
280 unsigned int min_clusters = (min_size >> (s->cluster_bits - 3)) + 1;
281 unsigned int refcount_table_clusters =
282 MAX(1, s->refcount_table_size >> (s->cluster_bits - 3));
284 while (min_clusters > refcount_table_clusters) {
285 refcount_table_clusters = (refcount_table_clusters * 3 + 1) / 2;
288 return refcount_table_clusters << (s->cluster_bits - 3);
292 /* Checks if two offsets are described by the same refcount block */
293 static int in_same_refcount_block(BDRVQcowState *s, uint64_t offset_a,
294 uint64_t offset_b)
296 uint64_t block_a = offset_a >> (s->cluster_bits + s->refcount_block_bits);
297 uint64_t block_b = offset_b >> (s->cluster_bits + s->refcount_block_bits);
299 return (block_a == block_b);
303 * Loads a refcount block. If it doesn't exist yet, it is allocated first
304 * (including growing the refcount table if needed).
306 * Returns 0 on success or -errno in error case
308 static int alloc_refcount_block(BlockDriverState *bs,
309 int64_t cluster_index, void **refcount_block)
311 BDRVQcowState *s = bs->opaque;
312 unsigned int refcount_table_index;
313 int ret;
315 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC);
317 /* Find the refcount block for the given cluster */
318 refcount_table_index = cluster_index >> s->refcount_block_bits;
320 if (refcount_table_index < s->refcount_table_size) {
322 uint64_t refcount_block_offset =
323 s->refcount_table[refcount_table_index] & REFT_OFFSET_MASK;
325 /* If it's already there, we're done */
326 if (refcount_block_offset) {
327 if (offset_into_cluster(s, refcount_block_offset)) {
328 qcow2_signal_corruption(bs, true, -1, -1, "Refblock offset %#"
329 PRIx64 " unaligned (reftable index: "
330 "%#x)", refcount_block_offset,
331 refcount_table_index);
332 return -EIO;
335 return load_refcount_block(bs, refcount_block_offset,
336 refcount_block);
341 * If we came here, we need to allocate something. Something is at least
342 * a cluster for the new refcount block. It may also include a new refcount
343 * table if the old refcount table is too small.
345 * Note that allocating clusters here needs some special care:
347 * - We can't use the normal qcow2_alloc_clusters(), it would try to
348 * increase the refcount and very likely we would end up with an endless
349 * recursion. Instead we must place the refcount blocks in a way that
350 * they can describe them themselves.
352 * - We need to consider that at this point we are inside update_refcounts
353 * and potentially doing an initial refcount increase. This means that
354 * some clusters have already been allocated by the caller, but their
355 * refcount isn't accurate yet. If we allocate clusters for metadata, we
356 * need to return -EAGAIN to signal the caller that it needs to restart
357 * the search for free clusters.
359 * - alloc_clusters_noref and qcow2_free_clusters may load a different
360 * refcount block into the cache
363 *refcount_block = NULL;
365 /* We write to the refcount table, so we might depend on L2 tables */
366 ret = qcow2_cache_flush(bs, s->l2_table_cache);
367 if (ret < 0) {
368 return ret;
371 /* Allocate the refcount block itself and mark it as used */
372 int64_t new_block = alloc_clusters_noref(bs, s->cluster_size);
373 if (new_block < 0) {
374 return new_block;
377 #ifdef DEBUG_ALLOC2
378 fprintf(stderr, "qcow2: Allocate refcount block %d for %" PRIx64
379 " at %" PRIx64 "\n",
380 refcount_table_index, cluster_index << s->cluster_bits, new_block);
381 #endif
383 if (in_same_refcount_block(s, new_block, cluster_index << s->cluster_bits)) {
384 /* Zero the new refcount block before updating it */
385 ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block,
386 refcount_block);
387 if (ret < 0) {
388 goto fail_block;
391 memset(*refcount_block, 0, s->cluster_size);
393 /* The block describes itself, need to update the cache */
394 int block_index = (new_block >> s->cluster_bits) &
395 (s->refcount_block_size - 1);
396 s->set_refcount(*refcount_block, block_index, 1);
397 } else {
398 /* Described somewhere else. This can recurse at most twice before we
399 * arrive at a block that describes itself. */
400 ret = update_refcount(bs, new_block, s->cluster_size, 1, false,
401 QCOW2_DISCARD_NEVER);
402 if (ret < 0) {
403 goto fail_block;
406 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
407 if (ret < 0) {
408 goto fail_block;
411 /* Initialize the new refcount block only after updating its refcount,
412 * update_refcount uses the refcount cache itself */
413 ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block,
414 refcount_block);
415 if (ret < 0) {
416 goto fail_block;
419 memset(*refcount_block, 0, s->cluster_size);
422 /* Now the new refcount block needs to be written to disk */
423 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE);
424 qcow2_cache_entry_mark_dirty(bs, s->refcount_block_cache, *refcount_block);
425 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
426 if (ret < 0) {
427 goto fail_block;
430 /* If the refcount table is big enough, just hook the block up there */
431 if (refcount_table_index < s->refcount_table_size) {
432 uint64_t data64 = cpu_to_be64(new_block);
433 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_HOOKUP);
434 ret = bdrv_pwrite_sync(bs->file,
435 s->refcount_table_offset + refcount_table_index * sizeof(uint64_t),
436 &data64, sizeof(data64));
437 if (ret < 0) {
438 goto fail_block;
441 s->refcount_table[refcount_table_index] = new_block;
443 /* The new refcount block may be where the caller intended to put its
444 * data, so let it restart the search. */
445 return -EAGAIN;
448 qcow2_cache_put(bs, s->refcount_block_cache, refcount_block);
451 * If we come here, we need to grow the refcount table. Again, a new
452 * refcount table needs some space and we can't simply allocate to avoid
453 * endless recursion.
455 * Therefore let's grab new refcount blocks at the end of the image, which
456 * will describe themselves and the new refcount table. This way we can
457 * reference them only in the new table and do the switch to the new
458 * refcount table at once without producing an inconsistent state in
459 * between.
461 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_GROW);
463 /* Calculate the number of refcount blocks needed so far; this will be the
464 * basis for calculating the index of the first cluster used for the
465 * self-describing refcount structures which we are about to create.
467 * Because we reached this point, there cannot be any refcount entries for
468 * cluster_index or higher indices yet. However, because new_block has been
469 * allocated to describe that cluster (and it will assume this role later
470 * on), we cannot use that index; also, new_block may actually have a higher
471 * cluster index than cluster_index, so it needs to be taken into account
472 * here (and 1 needs to be added to its value because that cluster is used).
474 uint64_t blocks_used = DIV_ROUND_UP(MAX(cluster_index + 1,
475 (new_block >> s->cluster_bits) + 1),
476 s->refcount_block_size);
478 if (blocks_used > QCOW_MAX_REFTABLE_SIZE / sizeof(uint64_t)) {
479 return -EFBIG;
482 /* And now we need at least one block more for the new metadata */
483 uint64_t table_size = next_refcount_table_size(s, blocks_used + 1);
484 uint64_t last_table_size;
485 uint64_t blocks_clusters;
486 do {
487 uint64_t table_clusters =
488 size_to_clusters(s, table_size * sizeof(uint64_t));
489 blocks_clusters = 1 +
490 ((table_clusters + s->refcount_block_size - 1)
491 / s->refcount_block_size);
492 uint64_t meta_clusters = table_clusters + blocks_clusters;
494 last_table_size = table_size;
495 table_size = next_refcount_table_size(s, blocks_used +
496 ((meta_clusters + s->refcount_block_size - 1)
497 / s->refcount_block_size));
499 } while (last_table_size != table_size);
501 #ifdef DEBUG_ALLOC2
502 fprintf(stderr, "qcow2: Grow refcount table %" PRId32 " => %" PRId64 "\n",
503 s->refcount_table_size, table_size);
504 #endif
506 /* Create the new refcount table and blocks */
507 uint64_t meta_offset = (blocks_used * s->refcount_block_size) *
508 s->cluster_size;
509 uint64_t table_offset = meta_offset + blocks_clusters * s->cluster_size;
510 uint64_t *new_table = g_try_new0(uint64_t, table_size);
511 void *new_blocks = g_try_malloc0(blocks_clusters * s->cluster_size);
513 assert(table_size > 0 && blocks_clusters > 0);
514 if (new_table == NULL || new_blocks == NULL) {
515 ret = -ENOMEM;
516 goto fail_table;
519 /* Fill the new refcount table */
520 memcpy(new_table, s->refcount_table,
521 s->refcount_table_size * sizeof(uint64_t));
522 new_table[refcount_table_index] = new_block;
524 int i;
525 for (i = 0; i < blocks_clusters; i++) {
526 new_table[blocks_used + i] = meta_offset + (i * s->cluster_size);
529 /* Fill the refcount blocks */
530 uint64_t table_clusters = size_to_clusters(s, table_size * sizeof(uint64_t));
531 int block = 0;
532 for (i = 0; i < table_clusters + blocks_clusters; i++) {
533 s->set_refcount(new_blocks, block++, 1);
536 /* Write refcount blocks to disk */
537 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS);
538 ret = bdrv_pwrite_sync(bs->file, meta_offset, new_blocks,
539 blocks_clusters * s->cluster_size);
540 g_free(new_blocks);
541 new_blocks = NULL;
542 if (ret < 0) {
543 goto fail_table;
546 /* Write refcount table to disk */
547 for(i = 0; i < table_size; i++) {
548 cpu_to_be64s(&new_table[i]);
551 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE);
552 ret = bdrv_pwrite_sync(bs->file, table_offset, new_table,
553 table_size * sizeof(uint64_t));
554 if (ret < 0) {
555 goto fail_table;
558 for(i = 0; i < table_size; i++) {
559 be64_to_cpus(&new_table[i]);
562 /* Hook up the new refcount table in the qcow2 header */
563 uint8_t data[12];
564 cpu_to_be64w((uint64_t*)data, table_offset);
565 cpu_to_be32w((uint32_t*)(data + 8), table_clusters);
566 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE);
567 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, refcount_table_offset),
568 data, sizeof(data));
569 if (ret < 0) {
570 goto fail_table;
573 /* And switch it in memory */
574 uint64_t old_table_offset = s->refcount_table_offset;
575 uint64_t old_table_size = s->refcount_table_size;
577 g_free(s->refcount_table);
578 s->refcount_table = new_table;
579 s->refcount_table_size = table_size;
580 s->refcount_table_offset = table_offset;
582 /* Free old table. */
583 qcow2_free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t),
584 QCOW2_DISCARD_OTHER);
586 ret = load_refcount_block(bs, new_block, refcount_block);
587 if (ret < 0) {
588 return ret;
591 /* If we were trying to do the initial refcount update for some cluster
592 * allocation, we might have used the same clusters to store newly
593 * allocated metadata. Make the caller search some new space. */
594 return -EAGAIN;
596 fail_table:
597 g_free(new_blocks);
598 g_free(new_table);
599 fail_block:
600 if (*refcount_block != NULL) {
601 qcow2_cache_put(bs, s->refcount_block_cache, refcount_block);
603 return ret;
606 void qcow2_process_discards(BlockDriverState *bs, int ret)
608 BDRVQcowState *s = bs->opaque;
609 Qcow2DiscardRegion *d, *next;
611 QTAILQ_FOREACH_SAFE(d, &s->discards, next, next) {
612 QTAILQ_REMOVE(&s->discards, d, next);
614 /* Discard is optional, ignore the return value */
615 if (ret >= 0) {
616 bdrv_discard(bs->file,
617 d->offset >> BDRV_SECTOR_BITS,
618 d->bytes >> BDRV_SECTOR_BITS);
621 g_free(d);
625 static void update_refcount_discard(BlockDriverState *bs,
626 uint64_t offset, uint64_t length)
628 BDRVQcowState *s = bs->opaque;
629 Qcow2DiscardRegion *d, *p, *next;
631 QTAILQ_FOREACH(d, &s->discards, next) {
632 uint64_t new_start = MIN(offset, d->offset);
633 uint64_t new_end = MAX(offset + length, d->offset + d->bytes);
635 if (new_end - new_start <= length + d->bytes) {
636 /* There can't be any overlap, areas ending up here have no
637 * references any more and therefore shouldn't get freed another
638 * time. */
639 assert(d->bytes + length == new_end - new_start);
640 d->offset = new_start;
641 d->bytes = new_end - new_start;
642 goto found;
646 d = g_malloc(sizeof(*d));
647 *d = (Qcow2DiscardRegion) {
648 .bs = bs,
649 .offset = offset,
650 .bytes = length,
652 QTAILQ_INSERT_TAIL(&s->discards, d, next);
654 found:
655 /* Merge discard requests if they are adjacent now */
656 QTAILQ_FOREACH_SAFE(p, &s->discards, next, next) {
657 if (p == d
658 || p->offset > d->offset + d->bytes
659 || d->offset > p->offset + p->bytes)
661 continue;
664 /* Still no overlap possible */
665 assert(p->offset == d->offset + d->bytes
666 || d->offset == p->offset + p->bytes);
668 QTAILQ_REMOVE(&s->discards, p, next);
669 d->offset = MIN(d->offset, p->offset);
670 d->bytes += p->bytes;
671 g_free(p);
675 /* XXX: cache several refcount block clusters ? */
676 /* @addend is the absolute value of the addend; if @decrease is set, @addend
677 * will be subtracted from the current refcount, otherwise it will be added */
678 static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs,
679 int64_t offset,
680 int64_t length,
681 uint64_t addend,
682 bool decrease,
683 enum qcow2_discard_type type)
685 BDRVQcowState *s = bs->opaque;
686 int64_t start, last, cluster_offset;
687 void *refcount_block = NULL;
688 int64_t old_table_index = -1;
689 int ret;
691 #ifdef DEBUG_ALLOC2
692 fprintf(stderr, "update_refcount: offset=%" PRId64 " size=%" PRId64
693 " addend=%s%" PRIu64 "\n", offset, length, decrease ? "-" : "",
694 addend);
695 #endif
696 if (length < 0) {
697 return -EINVAL;
698 } else if (length == 0) {
699 return 0;
702 if (decrease) {
703 qcow2_cache_set_dependency(bs, s->refcount_block_cache,
704 s->l2_table_cache);
707 start = start_of_cluster(s, offset);
708 last = start_of_cluster(s, offset + length - 1);
709 for(cluster_offset = start; cluster_offset <= last;
710 cluster_offset += s->cluster_size)
712 int block_index;
713 uint64_t refcount;
714 int64_t cluster_index = cluster_offset >> s->cluster_bits;
715 int64_t table_index = cluster_index >> s->refcount_block_bits;
717 /* Load the refcount block and allocate it if needed */
718 if (table_index != old_table_index) {
719 if (refcount_block) {
720 qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block);
722 ret = alloc_refcount_block(bs, cluster_index, &refcount_block);
723 if (ret < 0) {
724 goto fail;
727 old_table_index = table_index;
729 qcow2_cache_entry_mark_dirty(bs, s->refcount_block_cache,
730 refcount_block);
732 /* we can update the count and save it */
733 block_index = cluster_index & (s->refcount_block_size - 1);
735 refcount = s->get_refcount(refcount_block, block_index);
736 if (decrease ? (refcount - addend > refcount)
737 : (refcount + addend < refcount ||
738 refcount + addend > s->refcount_max))
740 ret = -EINVAL;
741 goto fail;
743 if (decrease) {
744 refcount -= addend;
745 } else {
746 refcount += addend;
748 if (refcount == 0 && cluster_index < s->free_cluster_index) {
749 s->free_cluster_index = cluster_index;
751 s->set_refcount(refcount_block, block_index, refcount);
753 if (refcount == 0 && s->discard_passthrough[type]) {
754 update_refcount_discard(bs, cluster_offset, s->cluster_size);
758 ret = 0;
759 fail:
760 if (!s->cache_discards) {
761 qcow2_process_discards(bs, ret);
764 /* Write last changed block to disk */
765 if (refcount_block) {
766 qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block);
770 * Try do undo any updates if an error is returned (This may succeed in
771 * some cases like ENOSPC for allocating a new refcount block)
773 if (ret < 0) {
774 int dummy;
775 dummy = update_refcount(bs, offset, cluster_offset - offset, addend,
776 !decrease, QCOW2_DISCARD_NEVER);
777 (void)dummy;
780 return ret;
784 * Increases or decreases the refcount of a given cluster.
786 * @addend is the absolute value of the addend; if @decrease is set, @addend
787 * will be subtracted from the current refcount, otherwise it will be added.
789 * On success 0 is returned; on failure -errno is returned.
791 int qcow2_update_cluster_refcount(BlockDriverState *bs,
792 int64_t cluster_index,
793 uint64_t addend, bool decrease,
794 enum qcow2_discard_type type)
796 BDRVQcowState *s = bs->opaque;
797 int ret;
799 ret = update_refcount(bs, cluster_index << s->cluster_bits, 1, addend,
800 decrease, type);
801 if (ret < 0) {
802 return ret;
805 return 0;
810 /*********************************************************/
811 /* cluster allocation functions */
815 /* return < 0 if error */
816 static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size)
818 BDRVQcowState *s = bs->opaque;
819 uint64_t i, nb_clusters, refcount;
820 int ret;
822 /* We can't allocate clusters if they may still be queued for discard. */
823 if (s->cache_discards) {
824 qcow2_process_discards(bs, 0);
827 nb_clusters = size_to_clusters(s, size);
828 retry:
829 for(i = 0; i < nb_clusters; i++) {
830 uint64_t next_cluster_index = s->free_cluster_index++;
831 ret = qcow2_get_refcount(bs, next_cluster_index, &refcount);
833 if (ret < 0) {
834 return ret;
835 } else if (refcount != 0) {
836 goto retry;
840 /* Make sure that all offsets in the "allocated" range are representable
841 * in an int64_t */
842 if (s->free_cluster_index > 0 &&
843 s->free_cluster_index - 1 > (INT64_MAX >> s->cluster_bits))
845 return -EFBIG;
848 #ifdef DEBUG_ALLOC2
849 fprintf(stderr, "alloc_clusters: size=%" PRId64 " -> %" PRId64 "\n",
850 size,
851 (s->free_cluster_index - nb_clusters) << s->cluster_bits);
852 #endif
853 return (s->free_cluster_index - nb_clusters) << s->cluster_bits;
856 int64_t qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size)
858 int64_t offset;
859 int ret;
861 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC);
862 do {
863 offset = alloc_clusters_noref(bs, size);
864 if (offset < 0) {
865 return offset;
868 ret = update_refcount(bs, offset, size, 1, false, QCOW2_DISCARD_NEVER);
869 } while (ret == -EAGAIN);
871 if (ret < 0) {
872 return ret;
875 return offset;
878 int qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
879 int nb_clusters)
881 BDRVQcowState *s = bs->opaque;
882 uint64_t cluster_index, refcount;
883 uint64_t i;
884 int ret;
886 assert(nb_clusters >= 0);
887 if (nb_clusters == 0) {
888 return 0;
891 do {
892 /* Check how many clusters there are free */
893 cluster_index = offset >> s->cluster_bits;
894 for(i = 0; i < nb_clusters; i++) {
895 ret = qcow2_get_refcount(bs, cluster_index++, &refcount);
896 if (ret < 0) {
897 return ret;
898 } else if (refcount != 0) {
899 break;
903 /* And then allocate them */
904 ret = update_refcount(bs, offset, i << s->cluster_bits, 1, false,
905 QCOW2_DISCARD_NEVER);
906 } while (ret == -EAGAIN);
908 if (ret < 0) {
909 return ret;
912 return i;
915 /* only used to allocate compressed sectors. We try to allocate
916 contiguous sectors. size must be <= cluster_size */
917 int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size)
919 BDRVQcowState *s = bs->opaque;
920 int64_t offset;
921 size_t free_in_cluster;
922 int ret;
924 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_BYTES);
925 assert(size > 0 && size <= s->cluster_size);
926 assert(!s->free_byte_offset || offset_into_cluster(s, s->free_byte_offset));
928 offset = s->free_byte_offset;
930 if (offset) {
931 uint64_t refcount;
932 ret = qcow2_get_refcount(bs, offset >> s->cluster_bits, &refcount);
933 if (ret < 0) {
934 return ret;
937 if (refcount == s->refcount_max) {
938 offset = 0;
942 free_in_cluster = s->cluster_size - offset_into_cluster(s, offset);
943 if (!offset || free_in_cluster < size) {
944 int64_t new_cluster = alloc_clusters_noref(bs, s->cluster_size);
945 if (new_cluster < 0) {
946 return new_cluster;
949 if (!offset || ROUND_UP(offset, s->cluster_size) != new_cluster) {
950 offset = new_cluster;
954 assert(offset);
955 ret = update_refcount(bs, offset, size, 1, false, QCOW2_DISCARD_NEVER);
956 if (ret < 0) {
957 return ret;
960 /* The cluster refcount was incremented; refcount blocks must be flushed
961 * before the caller's L2 table updates. */
962 qcow2_cache_set_dependency(bs, s->l2_table_cache, s->refcount_block_cache);
964 s->free_byte_offset = offset + size;
965 if (!offset_into_cluster(s, s->free_byte_offset)) {
966 s->free_byte_offset = 0;
969 return offset;
972 void qcow2_free_clusters(BlockDriverState *bs,
973 int64_t offset, int64_t size,
974 enum qcow2_discard_type type)
976 int ret;
978 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_FREE);
979 ret = update_refcount(bs, offset, size, 1, true, type);
980 if (ret < 0) {
981 fprintf(stderr, "qcow2_free_clusters failed: %s\n", strerror(-ret));
982 /* TODO Remember the clusters to free them later and avoid leaking */
987 * Free a cluster using its L2 entry (handles clusters of all types, e.g.
988 * normal cluster, compressed cluster, etc.)
990 void qcow2_free_any_clusters(BlockDriverState *bs, uint64_t l2_entry,
991 int nb_clusters, enum qcow2_discard_type type)
993 BDRVQcowState *s = bs->opaque;
995 switch (qcow2_get_cluster_type(l2_entry)) {
996 case QCOW2_CLUSTER_COMPRESSED:
998 int nb_csectors;
999 nb_csectors = ((l2_entry >> s->csize_shift) &
1000 s->csize_mask) + 1;
1001 qcow2_free_clusters(bs,
1002 (l2_entry & s->cluster_offset_mask) & ~511,
1003 nb_csectors * 512, type);
1005 break;
1006 case QCOW2_CLUSTER_NORMAL:
1007 case QCOW2_CLUSTER_ZERO:
1008 if (l2_entry & L2E_OFFSET_MASK) {
1009 if (offset_into_cluster(s, l2_entry & L2E_OFFSET_MASK)) {
1010 qcow2_signal_corruption(bs, false, -1, -1,
1011 "Cannot free unaligned cluster %#llx",
1012 l2_entry & L2E_OFFSET_MASK);
1013 } else {
1014 qcow2_free_clusters(bs, l2_entry & L2E_OFFSET_MASK,
1015 nb_clusters << s->cluster_bits, type);
1018 break;
1019 case QCOW2_CLUSTER_UNALLOCATED:
1020 break;
1021 default:
1022 abort();
1028 /*********************************************************/
1029 /* snapshots and image creation */
1033 /* update the refcounts of snapshots and the copied flag */
1034 int qcow2_update_snapshot_refcount(BlockDriverState *bs,
1035 int64_t l1_table_offset, int l1_size, int addend)
1037 BDRVQcowState *s = bs->opaque;
1038 uint64_t *l1_table, *l2_table, l2_offset, offset, l1_size2, refcount;
1039 bool l1_allocated = false;
1040 int64_t old_offset, old_l2_offset;
1041 int i, j, l1_modified = 0, nb_csectors;
1042 int ret;
1044 assert(addend >= -1 && addend <= 1);
1046 l2_table = NULL;
1047 l1_table = NULL;
1048 l1_size2 = l1_size * sizeof(uint64_t);
1050 s->cache_discards = true;
1052 /* WARNING: qcow2_snapshot_goto relies on this function not using the
1053 * l1_table_offset when it is the current s->l1_table_offset! Be careful
1054 * when changing this! */
1055 if (l1_table_offset != s->l1_table_offset) {
1056 l1_table = g_try_malloc0(align_offset(l1_size2, 512));
1057 if (l1_size2 && l1_table == NULL) {
1058 ret = -ENOMEM;
1059 goto fail;
1061 l1_allocated = true;
1063 ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2);
1064 if (ret < 0) {
1065 goto fail;
1068 for(i = 0;i < l1_size; i++)
1069 be64_to_cpus(&l1_table[i]);
1070 } else {
1071 assert(l1_size == s->l1_size);
1072 l1_table = s->l1_table;
1073 l1_allocated = false;
1076 for(i = 0; i < l1_size; i++) {
1077 l2_offset = l1_table[i];
1078 if (l2_offset) {
1079 old_l2_offset = l2_offset;
1080 l2_offset &= L1E_OFFSET_MASK;
1082 if (offset_into_cluster(s, l2_offset)) {
1083 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#"
1084 PRIx64 " unaligned (L1 index: %#x)",
1085 l2_offset, i);
1086 ret = -EIO;
1087 goto fail;
1090 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset,
1091 (void**) &l2_table);
1092 if (ret < 0) {
1093 goto fail;
1096 for(j = 0; j < s->l2_size; j++) {
1097 uint64_t cluster_index;
1099 offset = be64_to_cpu(l2_table[j]);
1100 old_offset = offset;
1101 offset &= ~QCOW_OFLAG_COPIED;
1103 switch (qcow2_get_cluster_type(offset)) {
1104 case QCOW2_CLUSTER_COMPRESSED:
1105 nb_csectors = ((offset >> s->csize_shift) &
1106 s->csize_mask) + 1;
1107 if (addend != 0) {
1108 ret = update_refcount(bs,
1109 (offset & s->cluster_offset_mask) & ~511,
1110 nb_csectors * 512, abs(addend), addend < 0,
1111 QCOW2_DISCARD_SNAPSHOT);
1112 if (ret < 0) {
1113 goto fail;
1116 /* compressed clusters are never modified */
1117 refcount = 2;
1118 break;
1120 case QCOW2_CLUSTER_NORMAL:
1121 case QCOW2_CLUSTER_ZERO:
1122 if (offset_into_cluster(s, offset & L2E_OFFSET_MASK)) {
1123 qcow2_signal_corruption(bs, true, -1, -1, "Data "
1124 "cluster offset %#llx "
1125 "unaligned (L2 offset: %#"
1126 PRIx64 ", L2 index: %#x)",
1127 offset & L2E_OFFSET_MASK,
1128 l2_offset, j);
1129 ret = -EIO;
1130 goto fail;
1133 cluster_index = (offset & L2E_OFFSET_MASK) >> s->cluster_bits;
1134 if (!cluster_index) {
1135 /* unallocated */
1136 refcount = 0;
1137 break;
1139 if (addend != 0) {
1140 ret = qcow2_update_cluster_refcount(bs,
1141 cluster_index, abs(addend), addend < 0,
1142 QCOW2_DISCARD_SNAPSHOT);
1143 if (ret < 0) {
1144 goto fail;
1148 ret = qcow2_get_refcount(bs, cluster_index, &refcount);
1149 if (ret < 0) {
1150 goto fail;
1152 break;
1154 case QCOW2_CLUSTER_UNALLOCATED:
1155 refcount = 0;
1156 break;
1158 default:
1159 abort();
1162 if (refcount == 1) {
1163 offset |= QCOW_OFLAG_COPIED;
1165 if (offset != old_offset) {
1166 if (addend > 0) {
1167 qcow2_cache_set_dependency(bs, s->l2_table_cache,
1168 s->refcount_block_cache);
1170 l2_table[j] = cpu_to_be64(offset);
1171 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache,
1172 l2_table);
1176 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1178 if (addend != 0) {
1179 ret = qcow2_update_cluster_refcount(bs, l2_offset >>
1180 s->cluster_bits,
1181 abs(addend), addend < 0,
1182 QCOW2_DISCARD_SNAPSHOT);
1183 if (ret < 0) {
1184 goto fail;
1187 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits,
1188 &refcount);
1189 if (ret < 0) {
1190 goto fail;
1191 } else if (refcount == 1) {
1192 l2_offset |= QCOW_OFLAG_COPIED;
1194 if (l2_offset != old_l2_offset) {
1195 l1_table[i] = l2_offset;
1196 l1_modified = 1;
1201 ret = bdrv_flush(bs);
1202 fail:
1203 if (l2_table) {
1204 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1207 s->cache_discards = false;
1208 qcow2_process_discards(bs, ret);
1210 /* Update L1 only if it isn't deleted anyway (addend = -1) */
1211 if (ret == 0 && addend >= 0 && l1_modified) {
1212 for (i = 0; i < l1_size; i++) {
1213 cpu_to_be64s(&l1_table[i]);
1216 ret = bdrv_pwrite_sync(bs->file, l1_table_offset, l1_table, l1_size2);
1218 for (i = 0; i < l1_size; i++) {
1219 be64_to_cpus(&l1_table[i]);
1222 if (l1_allocated)
1223 g_free(l1_table);
1224 return ret;
1230 /*********************************************************/
1231 /* refcount checking functions */
1234 static size_t refcount_array_byte_size(BDRVQcowState *s, uint64_t entries)
1236 /* This assertion holds because there is no way we can address more than
1237 * 2^(64 - 9) clusters at once (with cluster size 512 = 2^9, and because
1238 * offsets have to be representable in bytes); due to every cluster
1239 * corresponding to one refcount entry, we are well below that limit */
1240 assert(entries < (UINT64_C(1) << (64 - 9)));
1242 /* Thanks to the assertion this will not overflow, because
1243 * s->refcount_order < 7.
1244 * (note: x << s->refcount_order == x * s->refcount_bits) */
1245 return DIV_ROUND_UP(entries << s->refcount_order, 8);
1249 * Reallocates *array so that it can hold new_size entries. *size must contain
1250 * the current number of entries in *array. If the reallocation fails, *array
1251 * and *size will not be modified and -errno will be returned. If the
1252 * reallocation is successful, *array will be set to the new buffer, *size
1253 * will be set to new_size and 0 will be returned. The size of the reallocated
1254 * refcount array buffer will be aligned to a cluster boundary, and the newly
1255 * allocated area will be zeroed.
1257 static int realloc_refcount_array(BDRVQcowState *s, void **array,
1258 int64_t *size, int64_t new_size)
1260 size_t old_byte_size, new_byte_size;
1261 void *new_ptr;
1263 /* Round to clusters so the array can be directly written to disk */
1264 old_byte_size = size_to_clusters(s, refcount_array_byte_size(s, *size))
1265 * s->cluster_size;
1266 new_byte_size = size_to_clusters(s, refcount_array_byte_size(s, new_size))
1267 * s->cluster_size;
1269 if (new_byte_size == old_byte_size) {
1270 *size = new_size;
1271 return 0;
1274 assert(new_byte_size > 0);
1276 new_ptr = g_try_realloc(*array, new_byte_size);
1277 if (!new_ptr) {
1278 return -ENOMEM;
1281 if (new_byte_size > old_byte_size) {
1282 memset((void *)((uintptr_t)new_ptr + old_byte_size), 0,
1283 new_byte_size - old_byte_size);
1286 *array = new_ptr;
1287 *size = new_size;
1289 return 0;
1293 * Increases the refcount for a range of clusters in a given refcount table.
1294 * This is used to construct a temporary refcount table out of L1 and L2 tables
1295 * which can be compared the the refcount table saved in the image.
1297 * Modifies the number of errors in res.
1299 static int inc_refcounts(BlockDriverState *bs,
1300 BdrvCheckResult *res,
1301 void **refcount_table,
1302 int64_t *refcount_table_size,
1303 int64_t offset, int64_t size)
1305 BDRVQcowState *s = bs->opaque;
1306 uint64_t start, last, cluster_offset, k, refcount;
1307 int ret;
1309 if (size <= 0) {
1310 return 0;
1313 start = start_of_cluster(s, offset);
1314 last = start_of_cluster(s, offset + size - 1);
1315 for(cluster_offset = start; cluster_offset <= last;
1316 cluster_offset += s->cluster_size) {
1317 k = cluster_offset >> s->cluster_bits;
1318 if (k >= *refcount_table_size) {
1319 ret = realloc_refcount_array(s, refcount_table,
1320 refcount_table_size, k + 1);
1321 if (ret < 0) {
1322 res->check_errors++;
1323 return ret;
1327 refcount = s->get_refcount(*refcount_table, k);
1328 if (refcount == s->refcount_max) {
1329 fprintf(stderr, "ERROR: overflow cluster offset=0x%" PRIx64
1330 "\n", cluster_offset);
1331 res->corruptions++;
1332 continue;
1334 s->set_refcount(*refcount_table, k, refcount + 1);
1337 return 0;
1340 /* Flags for check_refcounts_l1() and check_refcounts_l2() */
1341 enum {
1342 CHECK_FRAG_INFO = 0x2, /* update BlockFragInfo counters */
1346 * Increases the refcount in the given refcount table for the all clusters
1347 * referenced in the L2 table. While doing so, performs some checks on L2
1348 * entries.
1350 * Returns the number of errors found by the checks or -errno if an internal
1351 * error occurred.
1353 static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
1354 void **refcount_table,
1355 int64_t *refcount_table_size, int64_t l2_offset,
1356 int flags)
1358 BDRVQcowState *s = bs->opaque;
1359 uint64_t *l2_table, l2_entry;
1360 uint64_t next_contiguous_offset = 0;
1361 int i, l2_size, nb_csectors, ret;
1363 /* Read L2 table from disk */
1364 l2_size = s->l2_size * sizeof(uint64_t);
1365 l2_table = g_malloc(l2_size);
1367 ret = bdrv_pread(bs->file, l2_offset, l2_table, l2_size);
1368 if (ret < 0) {
1369 fprintf(stderr, "ERROR: I/O error in check_refcounts_l2\n");
1370 res->check_errors++;
1371 goto fail;
1374 /* Do the actual checks */
1375 for(i = 0; i < s->l2_size; i++) {
1376 l2_entry = be64_to_cpu(l2_table[i]);
1378 switch (qcow2_get_cluster_type(l2_entry)) {
1379 case QCOW2_CLUSTER_COMPRESSED:
1380 /* Compressed clusters don't have QCOW_OFLAG_COPIED */
1381 if (l2_entry & QCOW_OFLAG_COPIED) {
1382 fprintf(stderr, "ERROR: cluster %" PRId64 ": "
1383 "copied flag must never be set for compressed "
1384 "clusters\n", l2_entry >> s->cluster_bits);
1385 l2_entry &= ~QCOW_OFLAG_COPIED;
1386 res->corruptions++;
1389 /* Mark cluster as used */
1390 nb_csectors = ((l2_entry >> s->csize_shift) &
1391 s->csize_mask) + 1;
1392 l2_entry &= s->cluster_offset_mask;
1393 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size,
1394 l2_entry & ~511, nb_csectors * 512);
1395 if (ret < 0) {
1396 goto fail;
1399 if (flags & CHECK_FRAG_INFO) {
1400 res->bfi.allocated_clusters++;
1401 res->bfi.compressed_clusters++;
1403 /* Compressed clusters are fragmented by nature. Since they
1404 * take up sub-sector space but we only have sector granularity
1405 * I/O we need to re-read the same sectors even for adjacent
1406 * compressed clusters.
1408 res->bfi.fragmented_clusters++;
1410 break;
1412 case QCOW2_CLUSTER_ZERO:
1413 if ((l2_entry & L2E_OFFSET_MASK) == 0) {
1414 break;
1416 /* fall through */
1418 case QCOW2_CLUSTER_NORMAL:
1420 uint64_t offset = l2_entry & L2E_OFFSET_MASK;
1422 if (flags & CHECK_FRAG_INFO) {
1423 res->bfi.allocated_clusters++;
1424 if (next_contiguous_offset &&
1425 offset != next_contiguous_offset) {
1426 res->bfi.fragmented_clusters++;
1428 next_contiguous_offset = offset + s->cluster_size;
1431 /* Mark cluster as used */
1432 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size,
1433 offset, s->cluster_size);
1434 if (ret < 0) {
1435 goto fail;
1438 /* Correct offsets are cluster aligned */
1439 if (offset_into_cluster(s, offset)) {
1440 fprintf(stderr, "ERROR offset=%" PRIx64 ": Cluster is not "
1441 "properly aligned; L2 entry corrupted.\n", offset);
1442 res->corruptions++;
1444 break;
1447 case QCOW2_CLUSTER_UNALLOCATED:
1448 break;
1450 default:
1451 abort();
1455 g_free(l2_table);
1456 return 0;
1458 fail:
1459 g_free(l2_table);
1460 return ret;
1464 * Increases the refcount for the L1 table, its L2 tables and all referenced
1465 * clusters in the given refcount table. While doing so, performs some checks
1466 * on L1 and L2 entries.
1468 * Returns the number of errors found by the checks or -errno if an internal
1469 * error occurred.
1471 static int check_refcounts_l1(BlockDriverState *bs,
1472 BdrvCheckResult *res,
1473 void **refcount_table,
1474 int64_t *refcount_table_size,
1475 int64_t l1_table_offset, int l1_size,
1476 int flags)
1478 BDRVQcowState *s = bs->opaque;
1479 uint64_t *l1_table = NULL, l2_offset, l1_size2;
1480 int i, ret;
1482 l1_size2 = l1_size * sizeof(uint64_t);
1484 /* Mark L1 table as used */
1485 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size,
1486 l1_table_offset, l1_size2);
1487 if (ret < 0) {
1488 goto fail;
1491 /* Read L1 table entries from disk */
1492 if (l1_size2 > 0) {
1493 l1_table = g_try_malloc(l1_size2);
1494 if (l1_table == NULL) {
1495 ret = -ENOMEM;
1496 res->check_errors++;
1497 goto fail;
1499 ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2);
1500 if (ret < 0) {
1501 fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n");
1502 res->check_errors++;
1503 goto fail;
1505 for(i = 0;i < l1_size; i++)
1506 be64_to_cpus(&l1_table[i]);
1509 /* Do the actual checks */
1510 for(i = 0; i < l1_size; i++) {
1511 l2_offset = l1_table[i];
1512 if (l2_offset) {
1513 /* Mark L2 table as used */
1514 l2_offset &= L1E_OFFSET_MASK;
1515 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size,
1516 l2_offset, s->cluster_size);
1517 if (ret < 0) {
1518 goto fail;
1521 /* L2 tables are cluster aligned */
1522 if (offset_into_cluster(s, l2_offset)) {
1523 fprintf(stderr, "ERROR l2_offset=%" PRIx64 ": Table is not "
1524 "cluster aligned; L1 entry corrupted\n", l2_offset);
1525 res->corruptions++;
1528 /* Process and check L2 entries */
1529 ret = check_refcounts_l2(bs, res, refcount_table,
1530 refcount_table_size, l2_offset, flags);
1531 if (ret < 0) {
1532 goto fail;
1536 g_free(l1_table);
1537 return 0;
1539 fail:
1540 g_free(l1_table);
1541 return ret;
1545 * Checks the OFLAG_COPIED flag for all L1 and L2 entries.
1547 * This function does not print an error message nor does it increment
1548 * check_errors if qcow2_get_refcount fails (this is because such an error will
1549 * have been already detected and sufficiently signaled by the calling function
1550 * (qcow2_check_refcounts) by the time this function is called).
1552 static int check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res,
1553 BdrvCheckMode fix)
1555 BDRVQcowState *s = bs->opaque;
1556 uint64_t *l2_table = qemu_blockalign(bs, s->cluster_size);
1557 int ret;
1558 uint64_t refcount;
1559 int i, j;
1561 for (i = 0; i < s->l1_size; i++) {
1562 uint64_t l1_entry = s->l1_table[i];
1563 uint64_t l2_offset = l1_entry & L1E_OFFSET_MASK;
1564 bool l2_dirty = false;
1566 if (!l2_offset) {
1567 continue;
1570 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits,
1571 &refcount);
1572 if (ret < 0) {
1573 /* don't print message nor increment check_errors */
1574 continue;
1576 if ((refcount == 1) != ((l1_entry & QCOW_OFLAG_COPIED) != 0)) {
1577 fprintf(stderr, "%s OFLAG_COPIED L2 cluster: l1_index=%d "
1578 "l1_entry=%" PRIx64 " refcount=%" PRIu64 "\n",
1579 fix & BDRV_FIX_ERRORS ? "Repairing" :
1580 "ERROR",
1581 i, l1_entry, refcount);
1582 if (fix & BDRV_FIX_ERRORS) {
1583 s->l1_table[i] = refcount == 1
1584 ? l1_entry | QCOW_OFLAG_COPIED
1585 : l1_entry & ~QCOW_OFLAG_COPIED;
1586 ret = qcow2_write_l1_entry(bs, i);
1587 if (ret < 0) {
1588 res->check_errors++;
1589 goto fail;
1591 res->corruptions_fixed++;
1592 } else {
1593 res->corruptions++;
1597 ret = bdrv_pread(bs->file, l2_offset, l2_table,
1598 s->l2_size * sizeof(uint64_t));
1599 if (ret < 0) {
1600 fprintf(stderr, "ERROR: Could not read L2 table: %s\n",
1601 strerror(-ret));
1602 res->check_errors++;
1603 goto fail;
1606 for (j = 0; j < s->l2_size; j++) {
1607 uint64_t l2_entry = be64_to_cpu(l2_table[j]);
1608 uint64_t data_offset = l2_entry & L2E_OFFSET_MASK;
1609 int cluster_type = qcow2_get_cluster_type(l2_entry);
1611 if ((cluster_type == QCOW2_CLUSTER_NORMAL) ||
1612 ((cluster_type == QCOW2_CLUSTER_ZERO) && (data_offset != 0))) {
1613 ret = qcow2_get_refcount(bs,
1614 data_offset >> s->cluster_bits,
1615 &refcount);
1616 if (ret < 0) {
1617 /* don't print message nor increment check_errors */
1618 continue;
1620 if ((refcount == 1) != ((l2_entry & QCOW_OFLAG_COPIED) != 0)) {
1621 fprintf(stderr, "%s OFLAG_COPIED data cluster: "
1622 "l2_entry=%" PRIx64 " refcount=%" PRIu64 "\n",
1623 fix & BDRV_FIX_ERRORS ? "Repairing" :
1624 "ERROR",
1625 l2_entry, refcount);
1626 if (fix & BDRV_FIX_ERRORS) {
1627 l2_table[j] = cpu_to_be64(refcount == 1
1628 ? l2_entry | QCOW_OFLAG_COPIED
1629 : l2_entry & ~QCOW_OFLAG_COPIED);
1630 l2_dirty = true;
1631 res->corruptions_fixed++;
1632 } else {
1633 res->corruptions++;
1639 if (l2_dirty) {
1640 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L2,
1641 l2_offset, s->cluster_size);
1642 if (ret < 0) {
1643 fprintf(stderr, "ERROR: Could not write L2 table; metadata "
1644 "overlap check failed: %s\n", strerror(-ret));
1645 res->check_errors++;
1646 goto fail;
1649 ret = bdrv_pwrite(bs->file, l2_offset, l2_table, s->cluster_size);
1650 if (ret < 0) {
1651 fprintf(stderr, "ERROR: Could not write L2 table: %s\n",
1652 strerror(-ret));
1653 res->check_errors++;
1654 goto fail;
1659 ret = 0;
1661 fail:
1662 qemu_vfree(l2_table);
1663 return ret;
1667 * Checks consistency of refblocks and accounts for each refblock in
1668 * *refcount_table.
1670 static int check_refblocks(BlockDriverState *bs, BdrvCheckResult *res,
1671 BdrvCheckMode fix, bool *rebuild,
1672 void **refcount_table, int64_t *nb_clusters)
1674 BDRVQcowState *s = bs->opaque;
1675 int64_t i, size;
1676 int ret;
1678 for(i = 0; i < s->refcount_table_size; i++) {
1679 uint64_t offset, cluster;
1680 offset = s->refcount_table[i];
1681 cluster = offset >> s->cluster_bits;
1683 /* Refcount blocks are cluster aligned */
1684 if (offset_into_cluster(s, offset)) {
1685 fprintf(stderr, "ERROR refcount block %" PRId64 " is not "
1686 "cluster aligned; refcount table entry corrupted\n", i);
1687 res->corruptions++;
1688 *rebuild = true;
1689 continue;
1692 if (cluster >= *nb_clusters) {
1693 fprintf(stderr, "%s refcount block %" PRId64 " is outside image\n",
1694 fix & BDRV_FIX_ERRORS ? "Repairing" : "ERROR", i);
1696 if (fix & BDRV_FIX_ERRORS) {
1697 int64_t new_nb_clusters;
1699 if (offset > INT64_MAX - s->cluster_size) {
1700 ret = -EINVAL;
1701 goto resize_fail;
1704 ret = bdrv_truncate(bs->file, offset + s->cluster_size);
1705 if (ret < 0) {
1706 goto resize_fail;
1708 size = bdrv_getlength(bs->file);
1709 if (size < 0) {
1710 ret = size;
1711 goto resize_fail;
1714 new_nb_clusters = size_to_clusters(s, size);
1715 assert(new_nb_clusters >= *nb_clusters);
1717 ret = realloc_refcount_array(s, refcount_table,
1718 nb_clusters, new_nb_clusters);
1719 if (ret < 0) {
1720 res->check_errors++;
1721 return ret;
1724 if (cluster >= *nb_clusters) {
1725 ret = -EINVAL;
1726 goto resize_fail;
1729 res->corruptions_fixed++;
1730 ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
1731 offset, s->cluster_size);
1732 if (ret < 0) {
1733 return ret;
1735 /* No need to check whether the refcount is now greater than 1:
1736 * This area was just allocated and zeroed, so it can only be
1737 * exactly 1 after inc_refcounts() */
1738 continue;
1740 resize_fail:
1741 res->corruptions++;
1742 *rebuild = true;
1743 fprintf(stderr, "ERROR could not resize image: %s\n",
1744 strerror(-ret));
1745 } else {
1746 res->corruptions++;
1748 continue;
1751 if (offset != 0) {
1752 ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
1753 offset, s->cluster_size);
1754 if (ret < 0) {
1755 return ret;
1757 if (s->get_refcount(*refcount_table, cluster) != 1) {
1758 fprintf(stderr, "ERROR refcount block %" PRId64
1759 " refcount=%" PRIu64 "\n", i,
1760 s->get_refcount(*refcount_table, cluster));
1761 res->corruptions++;
1762 *rebuild = true;
1767 return 0;
1771 * Calculates an in-memory refcount table.
1773 static int calculate_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
1774 BdrvCheckMode fix, bool *rebuild,
1775 void **refcount_table, int64_t *nb_clusters)
1777 BDRVQcowState *s = bs->opaque;
1778 int64_t i;
1779 QCowSnapshot *sn;
1780 int ret;
1782 if (!*refcount_table) {
1783 int64_t old_size = 0;
1784 ret = realloc_refcount_array(s, refcount_table,
1785 &old_size, *nb_clusters);
1786 if (ret < 0) {
1787 res->check_errors++;
1788 return ret;
1792 /* header */
1793 ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
1794 0, s->cluster_size);
1795 if (ret < 0) {
1796 return ret;
1799 /* current L1 table */
1800 ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters,
1801 s->l1_table_offset, s->l1_size, CHECK_FRAG_INFO);
1802 if (ret < 0) {
1803 return ret;
1806 /* snapshots */
1807 for (i = 0; i < s->nb_snapshots; i++) {
1808 sn = s->snapshots + i;
1809 ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters,
1810 sn->l1_table_offset, sn->l1_size, 0);
1811 if (ret < 0) {
1812 return ret;
1815 ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
1816 s->snapshots_offset, s->snapshots_size);
1817 if (ret < 0) {
1818 return ret;
1821 /* refcount data */
1822 ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
1823 s->refcount_table_offset,
1824 s->refcount_table_size * sizeof(uint64_t));
1825 if (ret < 0) {
1826 return ret;
1829 return check_refblocks(bs, res, fix, rebuild, refcount_table, nb_clusters);
1833 * Compares the actual reference count for each cluster in the image against the
1834 * refcount as reported by the refcount structures on-disk.
1836 static void compare_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
1837 BdrvCheckMode fix, bool *rebuild,
1838 int64_t *highest_cluster,
1839 void *refcount_table, int64_t nb_clusters)
1841 BDRVQcowState *s = bs->opaque;
1842 int64_t i;
1843 uint64_t refcount1, refcount2;
1844 int ret;
1846 for (i = 0, *highest_cluster = 0; i < nb_clusters; i++) {
1847 ret = qcow2_get_refcount(bs, i, &refcount1);
1848 if (ret < 0) {
1849 fprintf(stderr, "Can't get refcount for cluster %" PRId64 ": %s\n",
1850 i, strerror(-ret));
1851 res->check_errors++;
1852 continue;
1855 refcount2 = s->get_refcount(refcount_table, i);
1857 if (refcount1 > 0 || refcount2 > 0) {
1858 *highest_cluster = i;
1861 if (refcount1 != refcount2) {
1862 /* Check if we're allowed to fix the mismatch */
1863 int *num_fixed = NULL;
1864 if (refcount1 == 0) {
1865 *rebuild = true;
1866 } else if (refcount1 > refcount2 && (fix & BDRV_FIX_LEAKS)) {
1867 num_fixed = &res->leaks_fixed;
1868 } else if (refcount1 < refcount2 && (fix & BDRV_FIX_ERRORS)) {
1869 num_fixed = &res->corruptions_fixed;
1872 fprintf(stderr, "%s cluster %" PRId64 " refcount=%" PRIu64
1873 " reference=%" PRIu64 "\n",
1874 num_fixed != NULL ? "Repairing" :
1875 refcount1 < refcount2 ? "ERROR" :
1876 "Leaked",
1877 i, refcount1, refcount2);
1879 if (num_fixed) {
1880 ret = update_refcount(bs, i << s->cluster_bits, 1,
1881 refcount_diff(refcount1, refcount2),
1882 refcount1 > refcount2,
1883 QCOW2_DISCARD_ALWAYS);
1884 if (ret >= 0) {
1885 (*num_fixed)++;
1886 continue;
1890 /* And if we couldn't, print an error */
1891 if (refcount1 < refcount2) {
1892 res->corruptions++;
1893 } else {
1894 res->leaks++;
1901 * Allocates clusters using an in-memory refcount table (IMRT) in contrast to
1902 * the on-disk refcount structures.
1904 * On input, *first_free_cluster tells where to start looking, and need not
1905 * actually be a free cluster; the returned offset will not be before that
1906 * cluster. On output, *first_free_cluster points to the first gap found, even
1907 * if that gap was too small to be used as the returned offset.
1909 * Note that *first_free_cluster is a cluster index whereas the return value is
1910 * an offset.
1912 static int64_t alloc_clusters_imrt(BlockDriverState *bs,
1913 int cluster_count,
1914 void **refcount_table,
1915 int64_t *imrt_nb_clusters,
1916 int64_t *first_free_cluster)
1918 BDRVQcowState *s = bs->opaque;
1919 int64_t cluster = *first_free_cluster, i;
1920 bool first_gap = true;
1921 int contiguous_free_clusters;
1922 int ret;
1924 /* Starting at *first_free_cluster, find a range of at least cluster_count
1925 * continuously free clusters */
1926 for (contiguous_free_clusters = 0;
1927 cluster < *imrt_nb_clusters &&
1928 contiguous_free_clusters < cluster_count;
1929 cluster++)
1931 if (!s->get_refcount(*refcount_table, cluster)) {
1932 contiguous_free_clusters++;
1933 if (first_gap) {
1934 /* If this is the first free cluster found, update
1935 * *first_free_cluster accordingly */
1936 *first_free_cluster = cluster;
1937 first_gap = false;
1939 } else if (contiguous_free_clusters) {
1940 contiguous_free_clusters = 0;
1944 /* If contiguous_free_clusters is greater than zero, it contains the number
1945 * of continuously free clusters until the current cluster; the first free
1946 * cluster in the current "gap" is therefore
1947 * cluster - contiguous_free_clusters */
1949 /* If no such range could be found, grow the in-memory refcount table
1950 * accordingly to append free clusters at the end of the image */
1951 if (contiguous_free_clusters < cluster_count) {
1952 /* contiguous_free_clusters clusters are already empty at the image end;
1953 * we need cluster_count clusters; therefore, we have to allocate
1954 * cluster_count - contiguous_free_clusters new clusters at the end of
1955 * the image (which is the current value of cluster; note that cluster
1956 * may exceed old_imrt_nb_clusters if *first_free_cluster pointed beyond
1957 * the image end) */
1958 ret = realloc_refcount_array(s, refcount_table, imrt_nb_clusters,
1959 cluster + cluster_count
1960 - contiguous_free_clusters);
1961 if (ret < 0) {
1962 return ret;
1966 /* Go back to the first free cluster */
1967 cluster -= contiguous_free_clusters;
1968 for (i = 0; i < cluster_count; i++) {
1969 s->set_refcount(*refcount_table, cluster + i, 1);
1972 return cluster << s->cluster_bits;
1976 * Creates a new refcount structure based solely on the in-memory information
1977 * given through *refcount_table. All necessary allocations will be reflected
1978 * in that array.
1980 * On success, the old refcount structure is leaked (it will be covered by the
1981 * new refcount structure).
1983 static int rebuild_refcount_structure(BlockDriverState *bs,
1984 BdrvCheckResult *res,
1985 void **refcount_table,
1986 int64_t *nb_clusters)
1988 BDRVQcowState *s = bs->opaque;
1989 int64_t first_free_cluster = 0, reftable_offset = -1, cluster = 0;
1990 int64_t refblock_offset, refblock_start, refblock_index;
1991 uint32_t reftable_size = 0;
1992 uint64_t *on_disk_reftable = NULL;
1993 void *on_disk_refblock;
1994 int ret = 0;
1995 struct {
1996 uint64_t reftable_offset;
1997 uint32_t reftable_clusters;
1998 } QEMU_PACKED reftable_offset_and_clusters;
2000 qcow2_cache_empty(bs, s->refcount_block_cache);
2002 write_refblocks:
2003 for (; cluster < *nb_clusters; cluster++) {
2004 if (!s->get_refcount(*refcount_table, cluster)) {
2005 continue;
2008 refblock_index = cluster >> s->refcount_block_bits;
2009 refblock_start = refblock_index << s->refcount_block_bits;
2011 /* Don't allocate a cluster in a refblock already written to disk */
2012 if (first_free_cluster < refblock_start) {
2013 first_free_cluster = refblock_start;
2015 refblock_offset = alloc_clusters_imrt(bs, 1, refcount_table,
2016 nb_clusters, &first_free_cluster);
2017 if (refblock_offset < 0) {
2018 fprintf(stderr, "ERROR allocating refblock: %s\n",
2019 strerror(-refblock_offset));
2020 res->check_errors++;
2021 ret = refblock_offset;
2022 goto fail;
2025 if (reftable_size <= refblock_index) {
2026 uint32_t old_reftable_size = reftable_size;
2027 uint64_t *new_on_disk_reftable;
2029 reftable_size = ROUND_UP((refblock_index + 1) * sizeof(uint64_t),
2030 s->cluster_size) / sizeof(uint64_t);
2031 new_on_disk_reftable = g_try_realloc(on_disk_reftable,
2032 reftable_size *
2033 sizeof(uint64_t));
2034 if (!new_on_disk_reftable) {
2035 res->check_errors++;
2036 ret = -ENOMEM;
2037 goto fail;
2039 on_disk_reftable = new_on_disk_reftable;
2041 memset(on_disk_reftable + old_reftable_size, 0,
2042 (reftable_size - old_reftable_size) * sizeof(uint64_t));
2044 /* The offset we have for the reftable is now no longer valid;
2045 * this will leak that range, but we can easily fix that by running
2046 * a leak-fixing check after this rebuild operation */
2047 reftable_offset = -1;
2049 on_disk_reftable[refblock_index] = refblock_offset;
2051 /* If this is apparently the last refblock (for now), try to squeeze the
2052 * reftable in */
2053 if (refblock_index == (*nb_clusters - 1) >> s->refcount_block_bits &&
2054 reftable_offset < 0)
2056 uint64_t reftable_clusters = size_to_clusters(s, reftable_size *
2057 sizeof(uint64_t));
2058 reftable_offset = alloc_clusters_imrt(bs, reftable_clusters,
2059 refcount_table, nb_clusters,
2060 &first_free_cluster);
2061 if (reftable_offset < 0) {
2062 fprintf(stderr, "ERROR allocating reftable: %s\n",
2063 strerror(-reftable_offset));
2064 res->check_errors++;
2065 ret = reftable_offset;
2066 goto fail;
2070 ret = qcow2_pre_write_overlap_check(bs, 0, refblock_offset,
2071 s->cluster_size);
2072 if (ret < 0) {
2073 fprintf(stderr, "ERROR writing refblock: %s\n", strerror(-ret));
2074 goto fail;
2077 /* The size of *refcount_table is always cluster-aligned, therefore the
2078 * write operation will not overflow */
2079 on_disk_refblock = (void *)((char *) *refcount_table +
2080 refblock_index * s->cluster_size);
2082 ret = bdrv_write(bs->file, refblock_offset / BDRV_SECTOR_SIZE,
2083 on_disk_refblock, s->cluster_sectors);
2084 if (ret < 0) {
2085 fprintf(stderr, "ERROR writing refblock: %s\n", strerror(-ret));
2086 goto fail;
2089 /* Go to the end of this refblock */
2090 cluster = refblock_start + s->refcount_block_size - 1;
2093 if (reftable_offset < 0) {
2094 uint64_t post_refblock_start, reftable_clusters;
2096 post_refblock_start = ROUND_UP(*nb_clusters, s->refcount_block_size);
2097 reftable_clusters = size_to_clusters(s,
2098 reftable_size * sizeof(uint64_t));
2099 /* Not pretty but simple */
2100 if (first_free_cluster < post_refblock_start) {
2101 first_free_cluster = post_refblock_start;
2103 reftable_offset = alloc_clusters_imrt(bs, reftable_clusters,
2104 refcount_table, nb_clusters,
2105 &first_free_cluster);
2106 if (reftable_offset < 0) {
2107 fprintf(stderr, "ERROR allocating reftable: %s\n",
2108 strerror(-reftable_offset));
2109 res->check_errors++;
2110 ret = reftable_offset;
2111 goto fail;
2114 goto write_refblocks;
2117 assert(on_disk_reftable);
2119 for (refblock_index = 0; refblock_index < reftable_size; refblock_index++) {
2120 cpu_to_be64s(&on_disk_reftable[refblock_index]);
2123 ret = qcow2_pre_write_overlap_check(bs, 0, reftable_offset,
2124 reftable_size * sizeof(uint64_t));
2125 if (ret < 0) {
2126 fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret));
2127 goto fail;
2130 assert(reftable_size < INT_MAX / sizeof(uint64_t));
2131 ret = bdrv_pwrite(bs->file, reftable_offset, on_disk_reftable,
2132 reftable_size * sizeof(uint64_t));
2133 if (ret < 0) {
2134 fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret));
2135 goto fail;
2138 /* Enter new reftable into the image header */
2139 cpu_to_be64w(&reftable_offset_and_clusters.reftable_offset,
2140 reftable_offset);
2141 cpu_to_be32w(&reftable_offset_and_clusters.reftable_clusters,
2142 size_to_clusters(s, reftable_size * sizeof(uint64_t)));
2143 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader,
2144 refcount_table_offset),
2145 &reftable_offset_and_clusters,
2146 sizeof(reftable_offset_and_clusters));
2147 if (ret < 0) {
2148 fprintf(stderr, "ERROR setting reftable: %s\n", strerror(-ret));
2149 goto fail;
2152 for (refblock_index = 0; refblock_index < reftable_size; refblock_index++) {
2153 be64_to_cpus(&on_disk_reftable[refblock_index]);
2155 s->refcount_table = on_disk_reftable;
2156 s->refcount_table_offset = reftable_offset;
2157 s->refcount_table_size = reftable_size;
2159 return 0;
2161 fail:
2162 g_free(on_disk_reftable);
2163 return ret;
2167 * Checks an image for refcount consistency.
2169 * Returns 0 if no errors are found, the number of errors in case the image is
2170 * detected as corrupted, and -errno when an internal error occurred.
2172 int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
2173 BdrvCheckMode fix)
2175 BDRVQcowState *s = bs->opaque;
2176 BdrvCheckResult pre_compare_res;
2177 int64_t size, highest_cluster, nb_clusters;
2178 void *refcount_table = NULL;
2179 bool rebuild = false;
2180 int ret;
2182 size = bdrv_getlength(bs->file);
2183 if (size < 0) {
2184 res->check_errors++;
2185 return size;
2188 nb_clusters = size_to_clusters(s, size);
2189 if (nb_clusters > INT_MAX) {
2190 res->check_errors++;
2191 return -EFBIG;
2194 res->bfi.total_clusters =
2195 size_to_clusters(s, bs->total_sectors * BDRV_SECTOR_SIZE);
2197 ret = calculate_refcounts(bs, res, fix, &rebuild, &refcount_table,
2198 &nb_clusters);
2199 if (ret < 0) {
2200 goto fail;
2203 /* In case we don't need to rebuild the refcount structure (but want to fix
2204 * something), this function is immediately called again, in which case the
2205 * result should be ignored */
2206 pre_compare_res = *res;
2207 compare_refcounts(bs, res, 0, &rebuild, &highest_cluster, refcount_table,
2208 nb_clusters);
2210 if (rebuild && (fix & BDRV_FIX_ERRORS)) {
2211 BdrvCheckResult old_res = *res;
2212 int fresh_leaks = 0;
2214 fprintf(stderr, "Rebuilding refcount structure\n");
2215 ret = rebuild_refcount_structure(bs, res, &refcount_table,
2216 &nb_clusters);
2217 if (ret < 0) {
2218 goto fail;
2221 res->corruptions = 0;
2222 res->leaks = 0;
2224 /* Because the old reftable has been exchanged for a new one the
2225 * references have to be recalculated */
2226 rebuild = false;
2227 memset(refcount_table, 0, refcount_array_byte_size(s, nb_clusters));
2228 ret = calculate_refcounts(bs, res, 0, &rebuild, &refcount_table,
2229 &nb_clusters);
2230 if (ret < 0) {
2231 goto fail;
2234 if (fix & BDRV_FIX_LEAKS) {
2235 /* The old refcount structures are now leaked, fix it; the result
2236 * can be ignored, aside from leaks which were introduced by
2237 * rebuild_refcount_structure() that could not be fixed */
2238 BdrvCheckResult saved_res = *res;
2239 *res = (BdrvCheckResult){ 0 };
2241 compare_refcounts(bs, res, BDRV_FIX_LEAKS, &rebuild,
2242 &highest_cluster, refcount_table, nb_clusters);
2243 if (rebuild) {
2244 fprintf(stderr, "ERROR rebuilt refcount structure is still "
2245 "broken\n");
2248 /* Any leaks accounted for here were introduced by
2249 * rebuild_refcount_structure() because that function has created a
2250 * new refcount structure from scratch */
2251 fresh_leaks = res->leaks;
2252 *res = saved_res;
2255 if (res->corruptions < old_res.corruptions) {
2256 res->corruptions_fixed += old_res.corruptions - res->corruptions;
2258 if (res->leaks < old_res.leaks) {
2259 res->leaks_fixed += old_res.leaks - res->leaks;
2261 res->leaks += fresh_leaks;
2262 } else if (fix) {
2263 if (rebuild) {
2264 fprintf(stderr, "ERROR need to rebuild refcount structures\n");
2265 res->check_errors++;
2266 ret = -EIO;
2267 goto fail;
2270 if (res->leaks || res->corruptions) {
2271 *res = pre_compare_res;
2272 compare_refcounts(bs, res, fix, &rebuild, &highest_cluster,
2273 refcount_table, nb_clusters);
2277 /* check OFLAG_COPIED */
2278 ret = check_oflag_copied(bs, res, fix);
2279 if (ret < 0) {
2280 goto fail;
2283 res->image_end_offset = (highest_cluster + 1) * s->cluster_size;
2284 ret = 0;
2286 fail:
2287 g_free(refcount_table);
2289 return ret;
2292 #define overlaps_with(ofs, sz) \
2293 ranges_overlap(offset, size, ofs, sz)
2296 * Checks if the given offset into the image file is actually free to use by
2297 * looking for overlaps with important metadata sections (L1/L2 tables etc.),
2298 * i.e. a sanity check without relying on the refcount tables.
2300 * The ign parameter specifies what checks not to perform (being a bitmask of
2301 * QCow2MetadataOverlap values), i.e., what sections to ignore.
2303 * Returns:
2304 * - 0 if writing to this offset will not affect the mentioned metadata
2305 * - a positive QCow2MetadataOverlap value indicating one overlapping section
2306 * - a negative value (-errno) indicating an error while performing a check,
2307 * e.g. when bdrv_read failed on QCOW2_OL_INACTIVE_L2
2309 int qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset,
2310 int64_t size)
2312 BDRVQcowState *s = bs->opaque;
2313 int chk = s->overlap_check & ~ign;
2314 int i, j;
2316 if (!size) {
2317 return 0;
2320 if (chk & QCOW2_OL_MAIN_HEADER) {
2321 if (offset < s->cluster_size) {
2322 return QCOW2_OL_MAIN_HEADER;
2326 /* align range to test to cluster boundaries */
2327 size = align_offset(offset_into_cluster(s, offset) + size, s->cluster_size);
2328 offset = start_of_cluster(s, offset);
2330 if ((chk & QCOW2_OL_ACTIVE_L1) && s->l1_size) {
2331 if (overlaps_with(s->l1_table_offset, s->l1_size * sizeof(uint64_t))) {
2332 return QCOW2_OL_ACTIVE_L1;
2336 if ((chk & QCOW2_OL_REFCOUNT_TABLE) && s->refcount_table_size) {
2337 if (overlaps_with(s->refcount_table_offset,
2338 s->refcount_table_size * sizeof(uint64_t))) {
2339 return QCOW2_OL_REFCOUNT_TABLE;
2343 if ((chk & QCOW2_OL_SNAPSHOT_TABLE) && s->snapshots_size) {
2344 if (overlaps_with(s->snapshots_offset, s->snapshots_size)) {
2345 return QCOW2_OL_SNAPSHOT_TABLE;
2349 if ((chk & QCOW2_OL_INACTIVE_L1) && s->snapshots) {
2350 for (i = 0; i < s->nb_snapshots; i++) {
2351 if (s->snapshots[i].l1_size &&
2352 overlaps_with(s->snapshots[i].l1_table_offset,
2353 s->snapshots[i].l1_size * sizeof(uint64_t))) {
2354 return QCOW2_OL_INACTIVE_L1;
2359 if ((chk & QCOW2_OL_ACTIVE_L2) && s->l1_table) {
2360 for (i = 0; i < s->l1_size; i++) {
2361 if ((s->l1_table[i] & L1E_OFFSET_MASK) &&
2362 overlaps_with(s->l1_table[i] & L1E_OFFSET_MASK,
2363 s->cluster_size)) {
2364 return QCOW2_OL_ACTIVE_L2;
2369 if ((chk & QCOW2_OL_REFCOUNT_BLOCK) && s->refcount_table) {
2370 for (i = 0; i < s->refcount_table_size; i++) {
2371 if ((s->refcount_table[i] & REFT_OFFSET_MASK) &&
2372 overlaps_with(s->refcount_table[i] & REFT_OFFSET_MASK,
2373 s->cluster_size)) {
2374 return QCOW2_OL_REFCOUNT_BLOCK;
2379 if ((chk & QCOW2_OL_INACTIVE_L2) && s->snapshots) {
2380 for (i = 0; i < s->nb_snapshots; i++) {
2381 uint64_t l1_ofs = s->snapshots[i].l1_table_offset;
2382 uint32_t l1_sz = s->snapshots[i].l1_size;
2383 uint64_t l1_sz2 = l1_sz * sizeof(uint64_t);
2384 uint64_t *l1 = g_try_malloc(l1_sz2);
2385 int ret;
2387 if (l1_sz2 && l1 == NULL) {
2388 return -ENOMEM;
2391 ret = bdrv_pread(bs->file, l1_ofs, l1, l1_sz2);
2392 if (ret < 0) {
2393 g_free(l1);
2394 return ret;
2397 for (j = 0; j < l1_sz; j++) {
2398 uint64_t l2_ofs = be64_to_cpu(l1[j]) & L1E_OFFSET_MASK;
2399 if (l2_ofs && overlaps_with(l2_ofs, s->cluster_size)) {
2400 g_free(l1);
2401 return QCOW2_OL_INACTIVE_L2;
2405 g_free(l1);
2409 return 0;
2412 static const char *metadata_ol_names[] = {
2413 [QCOW2_OL_MAIN_HEADER_BITNR] = "qcow2_header",
2414 [QCOW2_OL_ACTIVE_L1_BITNR] = "active L1 table",
2415 [QCOW2_OL_ACTIVE_L2_BITNR] = "active L2 table",
2416 [QCOW2_OL_REFCOUNT_TABLE_BITNR] = "refcount table",
2417 [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = "refcount block",
2418 [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = "snapshot table",
2419 [QCOW2_OL_INACTIVE_L1_BITNR] = "inactive L1 table",
2420 [QCOW2_OL_INACTIVE_L2_BITNR] = "inactive L2 table",
2424 * First performs a check for metadata overlaps (through
2425 * qcow2_check_metadata_overlap); if that fails with a negative value (error
2426 * while performing a check), that value is returned. If an impending overlap
2427 * is detected, the BDS will be made unusable, the qcow2 file marked corrupt
2428 * and -EIO returned.
2430 * Returns 0 if there were neither overlaps nor errors while checking for
2431 * overlaps; or a negative value (-errno) on error.
2433 int qcow2_pre_write_overlap_check(BlockDriverState *bs, int ign, int64_t offset,
2434 int64_t size)
2436 int ret = qcow2_check_metadata_overlap(bs, ign, offset, size);
2438 if (ret < 0) {
2439 return ret;
2440 } else if (ret > 0) {
2441 int metadata_ol_bitnr = ctz32(ret);
2442 assert(metadata_ol_bitnr < QCOW2_OL_MAX_BITNR);
2444 qcow2_signal_corruption(bs, true, offset, size, "Preventing invalid "
2445 "write on metadata (overlaps with %s)",
2446 metadata_ol_names[metadata_ol_bitnr]);
2447 return -EIO;
2450 return 0;