libcacard: Lock NSS cert db when selecting an applet on an emulated card
[qemu/ar7.git] / block / qcow2-refcount.c
blob1477031206fecf452045a4711fa3e7773dcd738f
1 /*
2 * Block driver for the QCOW version 2 format
4 * Copyright (c) 2004-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu-common.h"
26 #include "block/block_int.h"
27 #include "block/qcow2.h"
28 #include "qemu/range.h"
30 static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size);
31 static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs,
32 int64_t offset, int64_t length,
33 int addend, enum qcow2_discard_type type);
36 /*********************************************************/
37 /* refcount handling */
39 int qcow2_refcount_init(BlockDriverState *bs)
41 BDRVQcowState *s = bs->opaque;
42 unsigned int refcount_table_size2, i;
43 int ret;
45 assert(s->refcount_table_size <= INT_MAX / sizeof(uint64_t));
46 refcount_table_size2 = s->refcount_table_size * sizeof(uint64_t);
47 s->refcount_table = g_try_malloc(refcount_table_size2);
49 if (s->refcount_table_size > 0) {
50 if (s->refcount_table == NULL) {
51 ret = -ENOMEM;
52 goto fail;
54 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_LOAD);
55 ret = bdrv_pread(bs->file, s->refcount_table_offset,
56 s->refcount_table, refcount_table_size2);
57 if (ret < 0) {
58 goto fail;
60 for(i = 0; i < s->refcount_table_size; i++)
61 be64_to_cpus(&s->refcount_table[i]);
63 return 0;
64 fail:
65 return ret;
68 void qcow2_refcount_close(BlockDriverState *bs)
70 BDRVQcowState *s = bs->opaque;
71 g_free(s->refcount_table);
75 static int load_refcount_block(BlockDriverState *bs,
76 int64_t refcount_block_offset,
77 void **refcount_block)
79 BDRVQcowState *s = bs->opaque;
80 int ret;
82 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_LOAD);
83 ret = qcow2_cache_get(bs, s->refcount_block_cache, refcount_block_offset,
84 refcount_block);
86 return ret;
90 * Returns the refcount of the cluster given by its index. Any non-negative
91 * return value is the refcount of the cluster, negative values are -errno
92 * and indicate an error.
94 static int get_refcount(BlockDriverState *bs, int64_t cluster_index)
96 BDRVQcowState *s = bs->opaque;
97 uint64_t refcount_table_index, block_index;
98 int64_t refcount_block_offset;
99 int ret;
100 uint16_t *refcount_block;
101 uint16_t refcount;
103 refcount_table_index = cluster_index >> s->refcount_block_bits;
104 if (refcount_table_index >= s->refcount_table_size)
105 return 0;
106 refcount_block_offset =
107 s->refcount_table[refcount_table_index] & REFT_OFFSET_MASK;
108 if (!refcount_block_offset)
109 return 0;
111 if (offset_into_cluster(s, refcount_block_offset)) {
112 qcow2_signal_corruption(bs, true, -1, -1, "Refblock offset %#" PRIx64
113 " unaligned (reftable index: %#" PRIx64 ")",
114 refcount_block_offset, refcount_table_index);
115 return -EIO;
118 ret = qcow2_cache_get(bs, s->refcount_block_cache, refcount_block_offset,
119 (void**) &refcount_block);
120 if (ret < 0) {
121 return ret;
124 block_index = cluster_index & (s->refcount_block_size - 1);
125 refcount = be16_to_cpu(refcount_block[block_index]);
127 ret = qcow2_cache_put(bs, s->refcount_block_cache,
128 (void**) &refcount_block);
129 if (ret < 0) {
130 return ret;
133 return refcount;
137 * Rounds the refcount table size up to avoid growing the table for each single
138 * refcount block that is allocated.
140 static unsigned int next_refcount_table_size(BDRVQcowState *s,
141 unsigned int min_size)
143 unsigned int min_clusters = (min_size >> (s->cluster_bits - 3)) + 1;
144 unsigned int refcount_table_clusters =
145 MAX(1, s->refcount_table_size >> (s->cluster_bits - 3));
147 while (min_clusters > refcount_table_clusters) {
148 refcount_table_clusters = (refcount_table_clusters * 3 + 1) / 2;
151 return refcount_table_clusters << (s->cluster_bits - 3);
155 /* Checks if two offsets are described by the same refcount block */
156 static int in_same_refcount_block(BDRVQcowState *s, uint64_t offset_a,
157 uint64_t offset_b)
159 uint64_t block_a = offset_a >> (s->cluster_bits + s->refcount_block_bits);
160 uint64_t block_b = offset_b >> (s->cluster_bits + s->refcount_block_bits);
162 return (block_a == block_b);
166 * Loads a refcount block. If it doesn't exist yet, it is allocated first
167 * (including growing the refcount table if needed).
169 * Returns 0 on success or -errno in error case
171 static int alloc_refcount_block(BlockDriverState *bs,
172 int64_t cluster_index, uint16_t **refcount_block)
174 BDRVQcowState *s = bs->opaque;
175 unsigned int refcount_table_index;
176 int ret;
178 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC);
180 /* Find the refcount block for the given cluster */
181 refcount_table_index = cluster_index >> s->refcount_block_bits;
183 if (refcount_table_index < s->refcount_table_size) {
185 uint64_t refcount_block_offset =
186 s->refcount_table[refcount_table_index] & REFT_OFFSET_MASK;
188 /* If it's already there, we're done */
189 if (refcount_block_offset) {
190 if (offset_into_cluster(s, refcount_block_offset)) {
191 qcow2_signal_corruption(bs, true, -1, -1, "Refblock offset %#"
192 PRIx64 " unaligned (reftable index: "
193 "%#x)", refcount_block_offset,
194 refcount_table_index);
195 return -EIO;
198 return load_refcount_block(bs, refcount_block_offset,
199 (void**) refcount_block);
204 * If we came here, we need to allocate something. Something is at least
205 * a cluster for the new refcount block. It may also include a new refcount
206 * table if the old refcount table is too small.
208 * Note that allocating clusters here needs some special care:
210 * - We can't use the normal qcow2_alloc_clusters(), it would try to
211 * increase the refcount and very likely we would end up with an endless
212 * recursion. Instead we must place the refcount blocks in a way that
213 * they can describe them themselves.
215 * - We need to consider that at this point we are inside update_refcounts
216 * and potentially doing an initial refcount increase. This means that
217 * some clusters have already been allocated by the caller, but their
218 * refcount isn't accurate yet. If we allocate clusters for metadata, we
219 * need to return -EAGAIN to signal the caller that it needs to restart
220 * the search for free clusters.
222 * - alloc_clusters_noref and qcow2_free_clusters may load a different
223 * refcount block into the cache
226 *refcount_block = NULL;
228 /* We write to the refcount table, so we might depend on L2 tables */
229 ret = qcow2_cache_flush(bs, s->l2_table_cache);
230 if (ret < 0) {
231 return ret;
234 /* Allocate the refcount block itself and mark it as used */
235 int64_t new_block = alloc_clusters_noref(bs, s->cluster_size);
236 if (new_block < 0) {
237 return new_block;
240 #ifdef DEBUG_ALLOC2
241 fprintf(stderr, "qcow2: Allocate refcount block %d for %" PRIx64
242 " at %" PRIx64 "\n",
243 refcount_table_index, cluster_index << s->cluster_bits, new_block);
244 #endif
246 if (in_same_refcount_block(s, new_block, cluster_index << s->cluster_bits)) {
247 /* Zero the new refcount block before updating it */
248 ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block,
249 (void**) refcount_block);
250 if (ret < 0) {
251 goto fail_block;
254 memset(*refcount_block, 0, s->cluster_size);
256 /* The block describes itself, need to update the cache */
257 int block_index = (new_block >> s->cluster_bits) &
258 (s->refcount_block_size - 1);
259 (*refcount_block)[block_index] = cpu_to_be16(1);
260 } else {
261 /* Described somewhere else. This can recurse at most twice before we
262 * arrive at a block that describes itself. */
263 ret = update_refcount(bs, new_block, s->cluster_size, 1,
264 QCOW2_DISCARD_NEVER);
265 if (ret < 0) {
266 goto fail_block;
269 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
270 if (ret < 0) {
271 goto fail_block;
274 /* Initialize the new refcount block only after updating its refcount,
275 * update_refcount uses the refcount cache itself */
276 ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block,
277 (void**) refcount_block);
278 if (ret < 0) {
279 goto fail_block;
282 memset(*refcount_block, 0, s->cluster_size);
285 /* Now the new refcount block needs to be written to disk */
286 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE);
287 qcow2_cache_entry_mark_dirty(s->refcount_block_cache, *refcount_block);
288 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
289 if (ret < 0) {
290 goto fail_block;
293 /* If the refcount table is big enough, just hook the block up there */
294 if (refcount_table_index < s->refcount_table_size) {
295 uint64_t data64 = cpu_to_be64(new_block);
296 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_HOOKUP);
297 ret = bdrv_pwrite_sync(bs->file,
298 s->refcount_table_offset + refcount_table_index * sizeof(uint64_t),
299 &data64, sizeof(data64));
300 if (ret < 0) {
301 goto fail_block;
304 s->refcount_table[refcount_table_index] = new_block;
306 /* The new refcount block may be where the caller intended to put its
307 * data, so let it restart the search. */
308 return -EAGAIN;
311 ret = qcow2_cache_put(bs, s->refcount_block_cache, (void**) refcount_block);
312 if (ret < 0) {
313 goto fail_block;
317 * If we come here, we need to grow the refcount table. Again, a new
318 * refcount table needs some space and we can't simply allocate to avoid
319 * endless recursion.
321 * Therefore let's grab new refcount blocks at the end of the image, which
322 * will describe themselves and the new refcount table. This way we can
323 * reference them only in the new table and do the switch to the new
324 * refcount table at once without producing an inconsistent state in
325 * between.
327 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_GROW);
329 /* Calculate the number of refcount blocks needed so far */
330 uint64_t blocks_used = DIV_ROUND_UP(cluster_index, s->refcount_block_size);
332 if (blocks_used > QCOW_MAX_REFTABLE_SIZE / sizeof(uint64_t)) {
333 return -EFBIG;
336 /* And now we need at least one block more for the new metadata */
337 uint64_t table_size = next_refcount_table_size(s, blocks_used + 1);
338 uint64_t last_table_size;
339 uint64_t blocks_clusters;
340 do {
341 uint64_t table_clusters =
342 size_to_clusters(s, table_size * sizeof(uint64_t));
343 blocks_clusters = 1 +
344 ((table_clusters + s->refcount_block_size - 1)
345 / s->refcount_block_size);
346 uint64_t meta_clusters = table_clusters + blocks_clusters;
348 last_table_size = table_size;
349 table_size = next_refcount_table_size(s, blocks_used +
350 ((meta_clusters + s->refcount_block_size - 1)
351 / s->refcount_block_size));
353 } while (last_table_size != table_size);
355 #ifdef DEBUG_ALLOC2
356 fprintf(stderr, "qcow2: Grow refcount table %" PRId32 " => %" PRId64 "\n",
357 s->refcount_table_size, table_size);
358 #endif
360 /* Create the new refcount table and blocks */
361 uint64_t meta_offset = (blocks_used * s->refcount_block_size) *
362 s->cluster_size;
363 uint64_t table_offset = meta_offset + blocks_clusters * s->cluster_size;
364 uint64_t *new_table = g_try_new0(uint64_t, table_size);
365 uint16_t *new_blocks = g_try_malloc0(blocks_clusters * s->cluster_size);
367 assert(table_size > 0 && blocks_clusters > 0);
368 if (new_table == NULL || new_blocks == NULL) {
369 ret = -ENOMEM;
370 goto fail_table;
373 /* Fill the new refcount table */
374 memcpy(new_table, s->refcount_table,
375 s->refcount_table_size * sizeof(uint64_t));
376 new_table[refcount_table_index] = new_block;
378 int i;
379 for (i = 0; i < blocks_clusters; i++) {
380 new_table[blocks_used + i] = meta_offset + (i * s->cluster_size);
383 /* Fill the refcount blocks */
384 uint64_t table_clusters = size_to_clusters(s, table_size * sizeof(uint64_t));
385 int block = 0;
386 for (i = 0; i < table_clusters + blocks_clusters; i++) {
387 new_blocks[block++] = cpu_to_be16(1);
390 /* Write refcount blocks to disk */
391 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS);
392 ret = bdrv_pwrite_sync(bs->file, meta_offset, new_blocks,
393 blocks_clusters * s->cluster_size);
394 g_free(new_blocks);
395 new_blocks = NULL;
396 if (ret < 0) {
397 goto fail_table;
400 /* Write refcount table to disk */
401 for(i = 0; i < table_size; i++) {
402 cpu_to_be64s(&new_table[i]);
405 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE);
406 ret = bdrv_pwrite_sync(bs->file, table_offset, new_table,
407 table_size * sizeof(uint64_t));
408 if (ret < 0) {
409 goto fail_table;
412 for(i = 0; i < table_size; i++) {
413 be64_to_cpus(&new_table[i]);
416 /* Hook up the new refcount table in the qcow2 header */
417 uint8_t data[12];
418 cpu_to_be64w((uint64_t*)data, table_offset);
419 cpu_to_be32w((uint32_t*)(data + 8), table_clusters);
420 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE);
421 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, refcount_table_offset),
422 data, sizeof(data));
423 if (ret < 0) {
424 goto fail_table;
427 /* And switch it in memory */
428 uint64_t old_table_offset = s->refcount_table_offset;
429 uint64_t old_table_size = s->refcount_table_size;
431 g_free(s->refcount_table);
432 s->refcount_table = new_table;
433 s->refcount_table_size = table_size;
434 s->refcount_table_offset = table_offset;
436 /* Free old table. */
437 qcow2_free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t),
438 QCOW2_DISCARD_OTHER);
440 ret = load_refcount_block(bs, new_block, (void**) refcount_block);
441 if (ret < 0) {
442 return ret;
445 /* If we were trying to do the initial refcount update for some cluster
446 * allocation, we might have used the same clusters to store newly
447 * allocated metadata. Make the caller search some new space. */
448 return -EAGAIN;
450 fail_table:
451 g_free(new_blocks);
452 g_free(new_table);
453 fail_block:
454 if (*refcount_block != NULL) {
455 qcow2_cache_put(bs, s->refcount_block_cache, (void**) refcount_block);
457 return ret;
460 void qcow2_process_discards(BlockDriverState *bs, int ret)
462 BDRVQcowState *s = bs->opaque;
463 Qcow2DiscardRegion *d, *next;
465 QTAILQ_FOREACH_SAFE(d, &s->discards, next, next) {
466 QTAILQ_REMOVE(&s->discards, d, next);
468 /* Discard is optional, ignore the return value */
469 if (ret >= 0) {
470 bdrv_discard(bs->file,
471 d->offset >> BDRV_SECTOR_BITS,
472 d->bytes >> BDRV_SECTOR_BITS);
475 g_free(d);
479 static void update_refcount_discard(BlockDriverState *bs,
480 uint64_t offset, uint64_t length)
482 BDRVQcowState *s = bs->opaque;
483 Qcow2DiscardRegion *d, *p, *next;
485 QTAILQ_FOREACH(d, &s->discards, next) {
486 uint64_t new_start = MIN(offset, d->offset);
487 uint64_t new_end = MAX(offset + length, d->offset + d->bytes);
489 if (new_end - new_start <= length + d->bytes) {
490 /* There can't be any overlap, areas ending up here have no
491 * references any more and therefore shouldn't get freed another
492 * time. */
493 assert(d->bytes + length == new_end - new_start);
494 d->offset = new_start;
495 d->bytes = new_end - new_start;
496 goto found;
500 d = g_malloc(sizeof(*d));
501 *d = (Qcow2DiscardRegion) {
502 .bs = bs,
503 .offset = offset,
504 .bytes = length,
506 QTAILQ_INSERT_TAIL(&s->discards, d, next);
508 found:
509 /* Merge discard requests if they are adjacent now */
510 QTAILQ_FOREACH_SAFE(p, &s->discards, next, next) {
511 if (p == d
512 || p->offset > d->offset + d->bytes
513 || d->offset > p->offset + p->bytes)
515 continue;
518 /* Still no overlap possible */
519 assert(p->offset == d->offset + d->bytes
520 || d->offset == p->offset + p->bytes);
522 QTAILQ_REMOVE(&s->discards, p, next);
523 d->offset = MIN(d->offset, p->offset);
524 d->bytes += p->bytes;
525 g_free(p);
529 /* XXX: cache several refcount block clusters ? */
530 static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs,
531 int64_t offset, int64_t length, int addend, enum qcow2_discard_type type)
533 BDRVQcowState *s = bs->opaque;
534 int64_t start, last, cluster_offset;
535 uint16_t *refcount_block = NULL;
536 int64_t old_table_index = -1;
537 int ret;
539 #ifdef DEBUG_ALLOC2
540 fprintf(stderr, "update_refcount: offset=%" PRId64 " size=%" PRId64 " addend=%d\n",
541 offset, length, addend);
542 #endif
543 if (length < 0) {
544 return -EINVAL;
545 } else if (length == 0) {
546 return 0;
549 if (addend < 0) {
550 qcow2_cache_set_dependency(bs, s->refcount_block_cache,
551 s->l2_table_cache);
554 start = start_of_cluster(s, offset);
555 last = start_of_cluster(s, offset + length - 1);
556 for(cluster_offset = start; cluster_offset <= last;
557 cluster_offset += s->cluster_size)
559 int block_index, refcount;
560 int64_t cluster_index = cluster_offset >> s->cluster_bits;
561 int64_t table_index = cluster_index >> s->refcount_block_bits;
563 /* Load the refcount block and allocate it if needed */
564 if (table_index != old_table_index) {
565 if (refcount_block) {
566 ret = qcow2_cache_put(bs, s->refcount_block_cache,
567 (void**) &refcount_block);
568 if (ret < 0) {
569 goto fail;
573 ret = alloc_refcount_block(bs, cluster_index, &refcount_block);
574 if (ret < 0) {
575 goto fail;
578 old_table_index = table_index;
580 qcow2_cache_entry_mark_dirty(s->refcount_block_cache, refcount_block);
582 /* we can update the count and save it */
583 block_index = cluster_index & (s->refcount_block_size - 1);
585 refcount = be16_to_cpu(refcount_block[block_index]);
586 refcount += addend;
587 if (refcount < 0 || refcount > 0xffff) {
588 ret = -EINVAL;
589 goto fail;
591 if (refcount == 0 && cluster_index < s->free_cluster_index) {
592 s->free_cluster_index = cluster_index;
594 refcount_block[block_index] = cpu_to_be16(refcount);
596 if (refcount == 0 && s->discard_passthrough[type]) {
597 update_refcount_discard(bs, cluster_offset, s->cluster_size);
601 ret = 0;
602 fail:
603 if (!s->cache_discards) {
604 qcow2_process_discards(bs, ret);
607 /* Write last changed block to disk */
608 if (refcount_block) {
609 int wret;
610 wret = qcow2_cache_put(bs, s->refcount_block_cache,
611 (void**) &refcount_block);
612 if (wret < 0) {
613 return ret < 0 ? ret : wret;
618 * Try do undo any updates if an error is returned (This may succeed in
619 * some cases like ENOSPC for allocating a new refcount block)
621 if (ret < 0) {
622 int dummy;
623 dummy = update_refcount(bs, offset, cluster_offset - offset, -addend,
624 QCOW2_DISCARD_NEVER);
625 (void)dummy;
628 return ret;
632 * Increases or decreases the refcount of a given cluster by one.
633 * addend must be 1 or -1.
635 * If the return value is non-negative, it is the new refcount of the cluster.
636 * If it is negative, it is -errno and indicates an error.
638 int qcow2_update_cluster_refcount(BlockDriverState *bs,
639 int64_t cluster_index,
640 int addend,
641 enum qcow2_discard_type type)
643 BDRVQcowState *s = bs->opaque;
644 int ret;
646 ret = update_refcount(bs, cluster_index << s->cluster_bits, 1, addend,
647 type);
648 if (ret < 0) {
649 return ret;
652 return get_refcount(bs, cluster_index);
657 /*********************************************************/
658 /* cluster allocation functions */
662 /* return < 0 if error */
663 static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size)
665 BDRVQcowState *s = bs->opaque;
666 uint64_t i, nb_clusters;
667 int refcount;
669 nb_clusters = size_to_clusters(s, size);
670 retry:
671 for(i = 0; i < nb_clusters; i++) {
672 uint64_t next_cluster_index = s->free_cluster_index++;
673 refcount = get_refcount(bs, next_cluster_index);
675 if (refcount < 0) {
676 return refcount;
677 } else if (refcount != 0) {
678 goto retry;
682 /* Make sure that all offsets in the "allocated" range are representable
683 * in an int64_t */
684 if (s->free_cluster_index > 0 &&
685 s->free_cluster_index - 1 > (INT64_MAX >> s->cluster_bits))
687 return -EFBIG;
690 #ifdef DEBUG_ALLOC2
691 fprintf(stderr, "alloc_clusters: size=%" PRId64 " -> %" PRId64 "\n",
692 size,
693 (s->free_cluster_index - nb_clusters) << s->cluster_bits);
694 #endif
695 return (s->free_cluster_index - nb_clusters) << s->cluster_bits;
698 int64_t qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size)
700 int64_t offset;
701 int ret;
703 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC);
704 do {
705 offset = alloc_clusters_noref(bs, size);
706 if (offset < 0) {
707 return offset;
710 ret = update_refcount(bs, offset, size, 1, QCOW2_DISCARD_NEVER);
711 } while (ret == -EAGAIN);
713 if (ret < 0) {
714 return ret;
717 return offset;
720 int qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
721 int nb_clusters)
723 BDRVQcowState *s = bs->opaque;
724 uint64_t cluster_index;
725 uint64_t i;
726 int refcount, ret;
728 assert(nb_clusters >= 0);
729 if (nb_clusters == 0) {
730 return 0;
733 do {
734 /* Check how many clusters there are free */
735 cluster_index = offset >> s->cluster_bits;
736 for(i = 0; i < nb_clusters; i++) {
737 refcount = get_refcount(bs, cluster_index++);
739 if (refcount < 0) {
740 return refcount;
741 } else if (refcount != 0) {
742 break;
746 /* And then allocate them */
747 ret = update_refcount(bs, offset, i << s->cluster_bits, 1,
748 QCOW2_DISCARD_NEVER);
749 } while (ret == -EAGAIN);
751 if (ret < 0) {
752 return ret;
755 return i;
758 /* only used to allocate compressed sectors. We try to allocate
759 contiguous sectors. size must be <= cluster_size */
760 int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size)
762 BDRVQcowState *s = bs->opaque;
763 int64_t offset, cluster_offset;
764 int free_in_cluster;
766 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_BYTES);
767 assert(size > 0 && size <= s->cluster_size);
768 if (s->free_byte_offset == 0) {
769 offset = qcow2_alloc_clusters(bs, s->cluster_size);
770 if (offset < 0) {
771 return offset;
773 s->free_byte_offset = offset;
775 redo:
776 free_in_cluster = s->cluster_size -
777 offset_into_cluster(s, s->free_byte_offset);
778 if (size <= free_in_cluster) {
779 /* enough space in current cluster */
780 offset = s->free_byte_offset;
781 s->free_byte_offset += size;
782 free_in_cluster -= size;
783 if (free_in_cluster == 0)
784 s->free_byte_offset = 0;
785 if (offset_into_cluster(s, offset) != 0)
786 qcow2_update_cluster_refcount(bs, offset >> s->cluster_bits, 1,
787 QCOW2_DISCARD_NEVER);
788 } else {
789 offset = qcow2_alloc_clusters(bs, s->cluster_size);
790 if (offset < 0) {
791 return offset;
793 cluster_offset = start_of_cluster(s, s->free_byte_offset);
794 if ((cluster_offset + s->cluster_size) == offset) {
795 /* we are lucky: contiguous data */
796 offset = s->free_byte_offset;
797 qcow2_update_cluster_refcount(bs, offset >> s->cluster_bits, 1,
798 QCOW2_DISCARD_NEVER);
799 s->free_byte_offset += size;
800 } else {
801 s->free_byte_offset = offset;
802 goto redo;
806 /* The cluster refcount was incremented, either by qcow2_alloc_clusters()
807 * or explicitly by qcow2_update_cluster_refcount(). Refcount blocks must
808 * be flushed before the caller's L2 table updates.
810 qcow2_cache_set_dependency(bs, s->l2_table_cache, s->refcount_block_cache);
811 return offset;
814 void qcow2_free_clusters(BlockDriverState *bs,
815 int64_t offset, int64_t size,
816 enum qcow2_discard_type type)
818 int ret;
820 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_FREE);
821 ret = update_refcount(bs, offset, size, -1, type);
822 if (ret < 0) {
823 fprintf(stderr, "qcow2_free_clusters failed: %s\n", strerror(-ret));
824 /* TODO Remember the clusters to free them later and avoid leaking */
829 * Free a cluster using its L2 entry (handles clusters of all types, e.g.
830 * normal cluster, compressed cluster, etc.)
832 void qcow2_free_any_clusters(BlockDriverState *bs, uint64_t l2_entry,
833 int nb_clusters, enum qcow2_discard_type type)
835 BDRVQcowState *s = bs->opaque;
837 switch (qcow2_get_cluster_type(l2_entry)) {
838 case QCOW2_CLUSTER_COMPRESSED:
840 int nb_csectors;
841 nb_csectors = ((l2_entry >> s->csize_shift) &
842 s->csize_mask) + 1;
843 qcow2_free_clusters(bs,
844 (l2_entry & s->cluster_offset_mask) & ~511,
845 nb_csectors * 512, type);
847 break;
848 case QCOW2_CLUSTER_NORMAL:
849 case QCOW2_CLUSTER_ZERO:
850 if (l2_entry & L2E_OFFSET_MASK) {
851 if (offset_into_cluster(s, l2_entry & L2E_OFFSET_MASK)) {
852 qcow2_signal_corruption(bs, false, -1, -1,
853 "Cannot free unaligned cluster %#llx",
854 l2_entry & L2E_OFFSET_MASK);
855 } else {
856 qcow2_free_clusters(bs, l2_entry & L2E_OFFSET_MASK,
857 nb_clusters << s->cluster_bits, type);
860 break;
861 case QCOW2_CLUSTER_UNALLOCATED:
862 break;
863 default:
864 abort();
870 /*********************************************************/
871 /* snapshots and image creation */
875 /* update the refcounts of snapshots and the copied flag */
876 int qcow2_update_snapshot_refcount(BlockDriverState *bs,
877 int64_t l1_table_offset, int l1_size, int addend)
879 BDRVQcowState *s = bs->opaque;
880 uint64_t *l1_table, *l2_table, l2_offset, offset, l1_size2;
881 bool l1_allocated = false;
882 int64_t old_offset, old_l2_offset;
883 int i, j, l1_modified = 0, nb_csectors, refcount;
884 int ret;
886 l2_table = NULL;
887 l1_table = NULL;
888 l1_size2 = l1_size * sizeof(uint64_t);
890 s->cache_discards = true;
892 /* WARNING: qcow2_snapshot_goto relies on this function not using the
893 * l1_table_offset when it is the current s->l1_table_offset! Be careful
894 * when changing this! */
895 if (l1_table_offset != s->l1_table_offset) {
896 l1_table = g_try_malloc0(align_offset(l1_size2, 512));
897 if (l1_size2 && l1_table == NULL) {
898 ret = -ENOMEM;
899 goto fail;
901 l1_allocated = true;
903 ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2);
904 if (ret < 0) {
905 goto fail;
908 for(i = 0;i < l1_size; i++)
909 be64_to_cpus(&l1_table[i]);
910 } else {
911 assert(l1_size == s->l1_size);
912 l1_table = s->l1_table;
913 l1_allocated = false;
916 for(i = 0; i < l1_size; i++) {
917 l2_offset = l1_table[i];
918 if (l2_offset) {
919 old_l2_offset = l2_offset;
920 l2_offset &= L1E_OFFSET_MASK;
922 if (offset_into_cluster(s, l2_offset)) {
923 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#"
924 PRIx64 " unaligned (L1 index: %#x)",
925 l2_offset, i);
926 ret = -EIO;
927 goto fail;
930 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset,
931 (void**) &l2_table);
932 if (ret < 0) {
933 goto fail;
936 for(j = 0; j < s->l2_size; j++) {
937 uint64_t cluster_index;
939 offset = be64_to_cpu(l2_table[j]);
940 old_offset = offset;
941 offset &= ~QCOW_OFLAG_COPIED;
943 switch (qcow2_get_cluster_type(offset)) {
944 case QCOW2_CLUSTER_COMPRESSED:
945 nb_csectors = ((offset >> s->csize_shift) &
946 s->csize_mask) + 1;
947 if (addend != 0) {
948 ret = update_refcount(bs,
949 (offset & s->cluster_offset_mask) & ~511,
950 nb_csectors * 512, addend,
951 QCOW2_DISCARD_SNAPSHOT);
952 if (ret < 0) {
953 goto fail;
956 /* compressed clusters are never modified */
957 refcount = 2;
958 break;
960 case QCOW2_CLUSTER_NORMAL:
961 case QCOW2_CLUSTER_ZERO:
962 if (offset_into_cluster(s, offset & L2E_OFFSET_MASK)) {
963 qcow2_signal_corruption(bs, true, -1, -1, "Data "
964 "cluster offset %#llx "
965 "unaligned (L2 offset: %#"
966 PRIx64 ", L2 index: %#x)",
967 offset & L2E_OFFSET_MASK,
968 l2_offset, j);
969 ret = -EIO;
970 goto fail;
973 cluster_index = (offset & L2E_OFFSET_MASK) >> s->cluster_bits;
974 if (!cluster_index) {
975 /* unallocated */
976 refcount = 0;
977 break;
979 if (addend != 0) {
980 refcount = qcow2_update_cluster_refcount(bs,
981 cluster_index, addend,
982 QCOW2_DISCARD_SNAPSHOT);
983 } else {
984 refcount = get_refcount(bs, cluster_index);
987 if (refcount < 0) {
988 ret = refcount;
989 goto fail;
991 break;
993 case QCOW2_CLUSTER_UNALLOCATED:
994 refcount = 0;
995 break;
997 default:
998 abort();
1001 if (refcount == 1) {
1002 offset |= QCOW_OFLAG_COPIED;
1004 if (offset != old_offset) {
1005 if (addend > 0) {
1006 qcow2_cache_set_dependency(bs, s->l2_table_cache,
1007 s->refcount_block_cache);
1009 l2_table[j] = cpu_to_be64(offset);
1010 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
1014 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1015 if (ret < 0) {
1016 goto fail;
1020 if (addend != 0) {
1021 refcount = qcow2_update_cluster_refcount(bs, l2_offset >>
1022 s->cluster_bits, addend, QCOW2_DISCARD_SNAPSHOT);
1023 } else {
1024 refcount = get_refcount(bs, l2_offset >> s->cluster_bits);
1026 if (refcount < 0) {
1027 ret = refcount;
1028 goto fail;
1029 } else if (refcount == 1) {
1030 l2_offset |= QCOW_OFLAG_COPIED;
1032 if (l2_offset != old_l2_offset) {
1033 l1_table[i] = l2_offset;
1034 l1_modified = 1;
1039 ret = bdrv_flush(bs);
1040 fail:
1041 if (l2_table) {
1042 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1045 s->cache_discards = false;
1046 qcow2_process_discards(bs, ret);
1048 /* Update L1 only if it isn't deleted anyway (addend = -1) */
1049 if (ret == 0 && addend >= 0 && l1_modified) {
1050 for (i = 0; i < l1_size; i++) {
1051 cpu_to_be64s(&l1_table[i]);
1054 ret = bdrv_pwrite_sync(bs->file, l1_table_offset, l1_table, l1_size2);
1056 for (i = 0; i < l1_size; i++) {
1057 be64_to_cpus(&l1_table[i]);
1060 if (l1_allocated)
1061 g_free(l1_table);
1062 return ret;
1068 /*********************************************************/
1069 /* refcount checking functions */
1074 * Increases the refcount for a range of clusters in a given refcount table.
1075 * This is used to construct a temporary refcount table out of L1 and L2 tables
1076 * which can be compared the the refcount table saved in the image.
1078 * Modifies the number of errors in res.
1080 static int inc_refcounts(BlockDriverState *bs,
1081 BdrvCheckResult *res,
1082 uint16_t **refcount_table,
1083 int64_t *refcount_table_size,
1084 int64_t offset, int64_t size)
1086 BDRVQcowState *s = bs->opaque;
1087 uint64_t start, last, cluster_offset, k;
1089 if (size <= 0) {
1090 return 0;
1093 start = start_of_cluster(s, offset);
1094 last = start_of_cluster(s, offset + size - 1);
1095 for(cluster_offset = start; cluster_offset <= last;
1096 cluster_offset += s->cluster_size) {
1097 k = cluster_offset >> s->cluster_bits;
1098 if (k >= *refcount_table_size) {
1099 int64_t old_refcount_table_size = *refcount_table_size;
1100 uint16_t *new_refcount_table;
1102 *refcount_table_size = k + 1;
1103 new_refcount_table = g_try_realloc(*refcount_table,
1104 *refcount_table_size *
1105 sizeof(**refcount_table));
1106 if (!new_refcount_table) {
1107 *refcount_table_size = old_refcount_table_size;
1108 res->check_errors++;
1109 return -ENOMEM;
1111 *refcount_table = new_refcount_table;
1113 memset(*refcount_table + old_refcount_table_size, 0,
1114 (*refcount_table_size - old_refcount_table_size) *
1115 sizeof(**refcount_table));
1118 if (++(*refcount_table)[k] == 0) {
1119 fprintf(stderr, "ERROR: overflow cluster offset=0x%" PRIx64
1120 "\n", cluster_offset);
1121 res->corruptions++;
1125 return 0;
1128 /* Flags for check_refcounts_l1() and check_refcounts_l2() */
1129 enum {
1130 CHECK_FRAG_INFO = 0x2, /* update BlockFragInfo counters */
1134 * Increases the refcount in the given refcount table for the all clusters
1135 * referenced in the L2 table. While doing so, performs some checks on L2
1136 * entries.
1138 * Returns the number of errors found by the checks or -errno if an internal
1139 * error occurred.
1141 static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
1142 uint16_t **refcount_table, int64_t *refcount_table_size, int64_t l2_offset,
1143 int flags)
1145 BDRVQcowState *s = bs->opaque;
1146 uint64_t *l2_table, l2_entry;
1147 uint64_t next_contiguous_offset = 0;
1148 int i, l2_size, nb_csectors, ret;
1150 /* Read L2 table from disk */
1151 l2_size = s->l2_size * sizeof(uint64_t);
1152 l2_table = g_malloc(l2_size);
1154 ret = bdrv_pread(bs->file, l2_offset, l2_table, l2_size);
1155 if (ret < 0) {
1156 fprintf(stderr, "ERROR: I/O error in check_refcounts_l2\n");
1157 res->check_errors++;
1158 goto fail;
1161 /* Do the actual checks */
1162 for(i = 0; i < s->l2_size; i++) {
1163 l2_entry = be64_to_cpu(l2_table[i]);
1165 switch (qcow2_get_cluster_type(l2_entry)) {
1166 case QCOW2_CLUSTER_COMPRESSED:
1167 /* Compressed clusters don't have QCOW_OFLAG_COPIED */
1168 if (l2_entry & QCOW_OFLAG_COPIED) {
1169 fprintf(stderr, "ERROR: cluster %" PRId64 ": "
1170 "copied flag must never be set for compressed "
1171 "clusters\n", l2_entry >> s->cluster_bits);
1172 l2_entry &= ~QCOW_OFLAG_COPIED;
1173 res->corruptions++;
1176 /* Mark cluster as used */
1177 nb_csectors = ((l2_entry >> s->csize_shift) &
1178 s->csize_mask) + 1;
1179 l2_entry &= s->cluster_offset_mask;
1180 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size,
1181 l2_entry & ~511, nb_csectors * 512);
1182 if (ret < 0) {
1183 goto fail;
1186 if (flags & CHECK_FRAG_INFO) {
1187 res->bfi.allocated_clusters++;
1188 res->bfi.compressed_clusters++;
1190 /* Compressed clusters are fragmented by nature. Since they
1191 * take up sub-sector space but we only have sector granularity
1192 * I/O we need to re-read the same sectors even for adjacent
1193 * compressed clusters.
1195 res->bfi.fragmented_clusters++;
1197 break;
1199 case QCOW2_CLUSTER_ZERO:
1200 if ((l2_entry & L2E_OFFSET_MASK) == 0) {
1201 break;
1203 /* fall through */
1205 case QCOW2_CLUSTER_NORMAL:
1207 uint64_t offset = l2_entry & L2E_OFFSET_MASK;
1209 if (flags & CHECK_FRAG_INFO) {
1210 res->bfi.allocated_clusters++;
1211 if (next_contiguous_offset &&
1212 offset != next_contiguous_offset) {
1213 res->bfi.fragmented_clusters++;
1215 next_contiguous_offset = offset + s->cluster_size;
1218 /* Mark cluster as used */
1219 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size,
1220 offset, s->cluster_size);
1221 if (ret < 0) {
1222 goto fail;
1225 /* Correct offsets are cluster aligned */
1226 if (offset_into_cluster(s, offset)) {
1227 fprintf(stderr, "ERROR offset=%" PRIx64 ": Cluster is not "
1228 "properly aligned; L2 entry corrupted.\n", offset);
1229 res->corruptions++;
1231 break;
1234 case QCOW2_CLUSTER_UNALLOCATED:
1235 break;
1237 default:
1238 abort();
1242 g_free(l2_table);
1243 return 0;
1245 fail:
1246 g_free(l2_table);
1247 return ret;
1251 * Increases the refcount for the L1 table, its L2 tables and all referenced
1252 * clusters in the given refcount table. While doing so, performs some checks
1253 * on L1 and L2 entries.
1255 * Returns the number of errors found by the checks or -errno if an internal
1256 * error occurred.
1258 static int check_refcounts_l1(BlockDriverState *bs,
1259 BdrvCheckResult *res,
1260 uint16_t **refcount_table,
1261 int64_t *refcount_table_size,
1262 int64_t l1_table_offset, int l1_size,
1263 int flags)
1265 BDRVQcowState *s = bs->opaque;
1266 uint64_t *l1_table = NULL, l2_offset, l1_size2;
1267 int i, ret;
1269 l1_size2 = l1_size * sizeof(uint64_t);
1271 /* Mark L1 table as used */
1272 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size,
1273 l1_table_offset, l1_size2);
1274 if (ret < 0) {
1275 goto fail;
1278 /* Read L1 table entries from disk */
1279 if (l1_size2 > 0) {
1280 l1_table = g_try_malloc(l1_size2);
1281 if (l1_table == NULL) {
1282 ret = -ENOMEM;
1283 res->check_errors++;
1284 goto fail;
1286 ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2);
1287 if (ret < 0) {
1288 fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n");
1289 res->check_errors++;
1290 goto fail;
1292 for(i = 0;i < l1_size; i++)
1293 be64_to_cpus(&l1_table[i]);
1296 /* Do the actual checks */
1297 for(i = 0; i < l1_size; i++) {
1298 l2_offset = l1_table[i];
1299 if (l2_offset) {
1300 /* Mark L2 table as used */
1301 l2_offset &= L1E_OFFSET_MASK;
1302 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size,
1303 l2_offset, s->cluster_size);
1304 if (ret < 0) {
1305 goto fail;
1308 /* L2 tables are cluster aligned */
1309 if (offset_into_cluster(s, l2_offset)) {
1310 fprintf(stderr, "ERROR l2_offset=%" PRIx64 ": Table is not "
1311 "cluster aligned; L1 entry corrupted\n", l2_offset);
1312 res->corruptions++;
1315 /* Process and check L2 entries */
1316 ret = check_refcounts_l2(bs, res, refcount_table,
1317 refcount_table_size, l2_offset, flags);
1318 if (ret < 0) {
1319 goto fail;
1323 g_free(l1_table);
1324 return 0;
1326 fail:
1327 g_free(l1_table);
1328 return ret;
1332 * Checks the OFLAG_COPIED flag for all L1 and L2 entries.
1334 * This function does not print an error message nor does it increment
1335 * check_errors if get_refcount fails (this is because such an error will have
1336 * been already detected and sufficiently signaled by the calling function
1337 * (qcow2_check_refcounts) by the time this function is called).
1339 static int check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res,
1340 BdrvCheckMode fix)
1342 BDRVQcowState *s = bs->opaque;
1343 uint64_t *l2_table = qemu_blockalign(bs, s->cluster_size);
1344 int ret;
1345 int refcount;
1346 int i, j;
1348 for (i = 0; i < s->l1_size; i++) {
1349 uint64_t l1_entry = s->l1_table[i];
1350 uint64_t l2_offset = l1_entry & L1E_OFFSET_MASK;
1351 bool l2_dirty = false;
1353 if (!l2_offset) {
1354 continue;
1357 refcount = get_refcount(bs, l2_offset >> s->cluster_bits);
1358 if (refcount < 0) {
1359 /* don't print message nor increment check_errors */
1360 continue;
1362 if ((refcount == 1) != ((l1_entry & QCOW_OFLAG_COPIED) != 0)) {
1363 fprintf(stderr, "%s OFLAG_COPIED L2 cluster: l1_index=%d "
1364 "l1_entry=%" PRIx64 " refcount=%d\n",
1365 fix & BDRV_FIX_ERRORS ? "Repairing" :
1366 "ERROR",
1367 i, l1_entry, refcount);
1368 if (fix & BDRV_FIX_ERRORS) {
1369 s->l1_table[i] = refcount == 1
1370 ? l1_entry | QCOW_OFLAG_COPIED
1371 : l1_entry & ~QCOW_OFLAG_COPIED;
1372 ret = qcow2_write_l1_entry(bs, i);
1373 if (ret < 0) {
1374 res->check_errors++;
1375 goto fail;
1377 res->corruptions_fixed++;
1378 } else {
1379 res->corruptions++;
1383 ret = bdrv_pread(bs->file, l2_offset, l2_table,
1384 s->l2_size * sizeof(uint64_t));
1385 if (ret < 0) {
1386 fprintf(stderr, "ERROR: Could not read L2 table: %s\n",
1387 strerror(-ret));
1388 res->check_errors++;
1389 goto fail;
1392 for (j = 0; j < s->l2_size; j++) {
1393 uint64_t l2_entry = be64_to_cpu(l2_table[j]);
1394 uint64_t data_offset = l2_entry & L2E_OFFSET_MASK;
1395 int cluster_type = qcow2_get_cluster_type(l2_entry);
1397 if ((cluster_type == QCOW2_CLUSTER_NORMAL) ||
1398 ((cluster_type == QCOW2_CLUSTER_ZERO) && (data_offset != 0))) {
1399 refcount = get_refcount(bs, data_offset >> s->cluster_bits);
1400 if (refcount < 0) {
1401 /* don't print message nor increment check_errors */
1402 continue;
1404 if ((refcount == 1) != ((l2_entry & QCOW_OFLAG_COPIED) != 0)) {
1405 fprintf(stderr, "%s OFLAG_COPIED data cluster: "
1406 "l2_entry=%" PRIx64 " refcount=%d\n",
1407 fix & BDRV_FIX_ERRORS ? "Repairing" :
1408 "ERROR",
1409 l2_entry, refcount);
1410 if (fix & BDRV_FIX_ERRORS) {
1411 l2_table[j] = cpu_to_be64(refcount == 1
1412 ? l2_entry | QCOW_OFLAG_COPIED
1413 : l2_entry & ~QCOW_OFLAG_COPIED);
1414 l2_dirty = true;
1415 res->corruptions_fixed++;
1416 } else {
1417 res->corruptions++;
1423 if (l2_dirty) {
1424 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L2,
1425 l2_offset, s->cluster_size);
1426 if (ret < 0) {
1427 fprintf(stderr, "ERROR: Could not write L2 table; metadata "
1428 "overlap check failed: %s\n", strerror(-ret));
1429 res->check_errors++;
1430 goto fail;
1433 ret = bdrv_pwrite(bs->file, l2_offset, l2_table, s->cluster_size);
1434 if (ret < 0) {
1435 fprintf(stderr, "ERROR: Could not write L2 table: %s\n",
1436 strerror(-ret));
1437 res->check_errors++;
1438 goto fail;
1443 ret = 0;
1445 fail:
1446 qemu_vfree(l2_table);
1447 return ret;
1451 * Checks consistency of refblocks and accounts for each refblock in
1452 * *refcount_table.
1454 static int check_refblocks(BlockDriverState *bs, BdrvCheckResult *res,
1455 BdrvCheckMode fix, bool *rebuild,
1456 uint16_t **refcount_table, int64_t *nb_clusters)
1458 BDRVQcowState *s = bs->opaque;
1459 int64_t i, size;
1460 int ret;
1462 for(i = 0; i < s->refcount_table_size; i++) {
1463 uint64_t offset, cluster;
1464 offset = s->refcount_table[i];
1465 cluster = offset >> s->cluster_bits;
1467 /* Refcount blocks are cluster aligned */
1468 if (offset_into_cluster(s, offset)) {
1469 fprintf(stderr, "ERROR refcount block %" PRId64 " is not "
1470 "cluster aligned; refcount table entry corrupted\n", i);
1471 res->corruptions++;
1472 *rebuild = true;
1473 continue;
1476 if (cluster >= *nb_clusters) {
1477 fprintf(stderr, "%s refcount block %" PRId64 " is outside image\n",
1478 fix & BDRV_FIX_ERRORS ? "Repairing" : "ERROR", i);
1480 if (fix & BDRV_FIX_ERRORS) {
1481 int64_t old_nb_clusters = *nb_clusters;
1482 uint16_t *new_refcount_table;
1484 if (offset > INT64_MAX - s->cluster_size) {
1485 ret = -EINVAL;
1486 goto resize_fail;
1489 ret = bdrv_truncate(bs->file, offset + s->cluster_size);
1490 if (ret < 0) {
1491 goto resize_fail;
1493 size = bdrv_getlength(bs->file);
1494 if (size < 0) {
1495 ret = size;
1496 goto resize_fail;
1499 *nb_clusters = size_to_clusters(s, size);
1500 assert(*nb_clusters >= old_nb_clusters);
1502 new_refcount_table = g_try_realloc(*refcount_table,
1503 *nb_clusters *
1504 sizeof(**refcount_table));
1505 if (!new_refcount_table) {
1506 *nb_clusters = old_nb_clusters;
1507 res->check_errors++;
1508 return -ENOMEM;
1510 *refcount_table = new_refcount_table;
1512 memset(*refcount_table + old_nb_clusters, 0,
1513 (*nb_clusters - old_nb_clusters) *
1514 sizeof(**refcount_table));
1516 if (cluster >= *nb_clusters) {
1517 ret = -EINVAL;
1518 goto resize_fail;
1521 res->corruptions_fixed++;
1522 ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
1523 offset, s->cluster_size);
1524 if (ret < 0) {
1525 return ret;
1527 /* No need to check whether the refcount is now greater than 1:
1528 * This area was just allocated and zeroed, so it can only be
1529 * exactly 1 after inc_refcounts() */
1530 continue;
1532 resize_fail:
1533 res->corruptions++;
1534 *rebuild = true;
1535 fprintf(stderr, "ERROR could not resize image: %s\n",
1536 strerror(-ret));
1537 } else {
1538 res->corruptions++;
1540 continue;
1543 if (offset != 0) {
1544 ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
1545 offset, s->cluster_size);
1546 if (ret < 0) {
1547 return ret;
1549 if ((*refcount_table)[cluster] != 1) {
1550 fprintf(stderr, "ERROR refcount block %" PRId64
1551 " refcount=%d\n", i, (*refcount_table)[cluster]);
1552 res->corruptions++;
1553 *rebuild = true;
1558 return 0;
1562 * Calculates an in-memory refcount table.
1564 static int calculate_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
1565 BdrvCheckMode fix, bool *rebuild,
1566 uint16_t **refcount_table, int64_t *nb_clusters)
1568 BDRVQcowState *s = bs->opaque;
1569 int64_t i;
1570 QCowSnapshot *sn;
1571 int ret;
1573 if (!*refcount_table) {
1574 *refcount_table = g_try_new0(uint16_t, *nb_clusters);
1575 if (*nb_clusters && *refcount_table == NULL) {
1576 res->check_errors++;
1577 return -ENOMEM;
1581 /* header */
1582 ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
1583 0, s->cluster_size);
1584 if (ret < 0) {
1585 return ret;
1588 /* current L1 table */
1589 ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters,
1590 s->l1_table_offset, s->l1_size, CHECK_FRAG_INFO);
1591 if (ret < 0) {
1592 return ret;
1595 /* snapshots */
1596 for (i = 0; i < s->nb_snapshots; i++) {
1597 sn = s->snapshots + i;
1598 ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters,
1599 sn->l1_table_offset, sn->l1_size, 0);
1600 if (ret < 0) {
1601 return ret;
1604 ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
1605 s->snapshots_offset, s->snapshots_size);
1606 if (ret < 0) {
1607 return ret;
1610 /* refcount data */
1611 ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
1612 s->refcount_table_offset,
1613 s->refcount_table_size * sizeof(uint64_t));
1614 if (ret < 0) {
1615 return ret;
1618 return check_refblocks(bs, res, fix, rebuild, refcount_table, nb_clusters);
1622 * Compares the actual reference count for each cluster in the image against the
1623 * refcount as reported by the refcount structures on-disk.
1625 static void compare_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
1626 BdrvCheckMode fix, bool *rebuild,
1627 int64_t *highest_cluster,
1628 uint16_t *refcount_table, int64_t nb_clusters)
1630 BDRVQcowState *s = bs->opaque;
1631 int64_t i;
1632 int refcount1, refcount2, ret;
1634 for (i = 0, *highest_cluster = 0; i < nb_clusters; i++) {
1635 refcount1 = get_refcount(bs, i);
1636 if (refcount1 < 0) {
1637 fprintf(stderr, "Can't get refcount for cluster %" PRId64 ": %s\n",
1638 i, strerror(-refcount1));
1639 res->check_errors++;
1640 continue;
1643 refcount2 = refcount_table[i];
1645 if (refcount1 > 0 || refcount2 > 0) {
1646 *highest_cluster = i;
1649 if (refcount1 != refcount2) {
1650 /* Check if we're allowed to fix the mismatch */
1651 int *num_fixed = NULL;
1652 if (refcount1 == 0) {
1653 *rebuild = true;
1654 } else if (refcount1 > refcount2 && (fix & BDRV_FIX_LEAKS)) {
1655 num_fixed = &res->leaks_fixed;
1656 } else if (refcount1 < refcount2 && (fix & BDRV_FIX_ERRORS)) {
1657 num_fixed = &res->corruptions_fixed;
1660 fprintf(stderr, "%s cluster %" PRId64 " refcount=%d reference=%d\n",
1661 num_fixed != NULL ? "Repairing" :
1662 refcount1 < refcount2 ? "ERROR" :
1663 "Leaked",
1664 i, refcount1, refcount2);
1666 if (num_fixed) {
1667 ret = update_refcount(bs, i << s->cluster_bits, 1,
1668 refcount2 - refcount1,
1669 QCOW2_DISCARD_ALWAYS);
1670 if (ret >= 0) {
1671 (*num_fixed)++;
1672 continue;
1676 /* And if we couldn't, print an error */
1677 if (refcount1 < refcount2) {
1678 res->corruptions++;
1679 } else {
1680 res->leaks++;
1687 * Allocates clusters using an in-memory refcount table (IMRT) in contrast to
1688 * the on-disk refcount structures.
1690 * On input, *first_free_cluster tells where to start looking, and need not
1691 * actually be a free cluster; the returned offset will not be before that
1692 * cluster. On output, *first_free_cluster points to the first gap found, even
1693 * if that gap was too small to be used as the returned offset.
1695 * Note that *first_free_cluster is a cluster index whereas the return value is
1696 * an offset.
1698 static int64_t alloc_clusters_imrt(BlockDriverState *bs,
1699 int cluster_count,
1700 uint16_t **refcount_table,
1701 int64_t *imrt_nb_clusters,
1702 int64_t *first_free_cluster)
1704 BDRVQcowState *s = bs->opaque;
1705 int64_t cluster = *first_free_cluster, i;
1706 bool first_gap = true;
1707 int contiguous_free_clusters;
1709 /* Starting at *first_free_cluster, find a range of at least cluster_count
1710 * continuously free clusters */
1711 for (contiguous_free_clusters = 0;
1712 cluster < *imrt_nb_clusters &&
1713 contiguous_free_clusters < cluster_count;
1714 cluster++)
1716 if (!(*refcount_table)[cluster]) {
1717 contiguous_free_clusters++;
1718 if (first_gap) {
1719 /* If this is the first free cluster found, update
1720 * *first_free_cluster accordingly */
1721 *first_free_cluster = cluster;
1722 first_gap = false;
1724 } else if (contiguous_free_clusters) {
1725 contiguous_free_clusters = 0;
1729 /* If contiguous_free_clusters is greater than zero, it contains the number
1730 * of continuously free clusters until the current cluster; the first free
1731 * cluster in the current "gap" is therefore
1732 * cluster - contiguous_free_clusters */
1734 /* If no such range could be found, grow the in-memory refcount table
1735 * accordingly to append free clusters at the end of the image */
1736 if (contiguous_free_clusters < cluster_count) {
1737 int64_t old_imrt_nb_clusters = *imrt_nb_clusters;
1738 uint16_t *new_refcount_table;
1740 /* contiguous_free_clusters clusters are already empty at the image end;
1741 * we need cluster_count clusters; therefore, we have to allocate
1742 * cluster_count - contiguous_free_clusters new clusters at the end of
1743 * the image (which is the current value of cluster; note that cluster
1744 * may exceed old_imrt_nb_clusters if *first_free_cluster pointed beyond
1745 * the image end) */
1746 *imrt_nb_clusters = cluster + cluster_count - contiguous_free_clusters;
1747 new_refcount_table = g_try_realloc(*refcount_table,
1748 *imrt_nb_clusters *
1749 sizeof(**refcount_table));
1750 if (!new_refcount_table) {
1751 *imrt_nb_clusters = old_imrt_nb_clusters;
1752 return -ENOMEM;
1754 *refcount_table = new_refcount_table;
1756 memset(*refcount_table + old_imrt_nb_clusters, 0,
1757 (*imrt_nb_clusters - old_imrt_nb_clusters) *
1758 sizeof(**refcount_table));
1761 /* Go back to the first free cluster */
1762 cluster -= contiguous_free_clusters;
1763 for (i = 0; i < cluster_count; i++) {
1764 (*refcount_table)[cluster + i] = 1;
1767 return cluster << s->cluster_bits;
1771 * Creates a new refcount structure based solely on the in-memory information
1772 * given through *refcount_table. All necessary allocations will be reflected
1773 * in that array.
1775 * On success, the old refcount structure is leaked (it will be covered by the
1776 * new refcount structure).
1778 static int rebuild_refcount_structure(BlockDriverState *bs,
1779 BdrvCheckResult *res,
1780 uint16_t **refcount_table,
1781 int64_t *nb_clusters)
1783 BDRVQcowState *s = bs->opaque;
1784 int64_t first_free_cluster = 0, reftable_offset = -1, cluster = 0;
1785 int64_t refblock_offset, refblock_start, refblock_index;
1786 uint32_t reftable_size = 0;
1787 uint64_t *on_disk_reftable = NULL;
1788 uint16_t *on_disk_refblock;
1789 int i, ret = 0;
1790 struct {
1791 uint64_t reftable_offset;
1792 uint32_t reftable_clusters;
1793 } QEMU_PACKED reftable_offset_and_clusters;
1795 qcow2_cache_empty(bs, s->refcount_block_cache);
1797 write_refblocks:
1798 for (; cluster < *nb_clusters; cluster++) {
1799 if (!(*refcount_table)[cluster]) {
1800 continue;
1803 refblock_index = cluster >> s->refcount_block_bits;
1804 refblock_start = refblock_index << s->refcount_block_bits;
1806 /* Don't allocate a cluster in a refblock already written to disk */
1807 if (first_free_cluster < refblock_start) {
1808 first_free_cluster = refblock_start;
1810 refblock_offset = alloc_clusters_imrt(bs, 1, refcount_table,
1811 nb_clusters, &first_free_cluster);
1812 if (refblock_offset < 0) {
1813 fprintf(stderr, "ERROR allocating refblock: %s\n",
1814 strerror(-refblock_offset));
1815 res->check_errors++;
1816 ret = refblock_offset;
1817 goto fail;
1820 if (reftable_size <= refblock_index) {
1821 uint32_t old_reftable_size = reftable_size;
1822 uint64_t *new_on_disk_reftable;
1824 reftable_size = ROUND_UP((refblock_index + 1) * sizeof(uint64_t),
1825 s->cluster_size) / sizeof(uint64_t);
1826 new_on_disk_reftable = g_try_realloc(on_disk_reftable,
1827 reftable_size *
1828 sizeof(uint64_t));
1829 if (!new_on_disk_reftable) {
1830 res->check_errors++;
1831 ret = -ENOMEM;
1832 goto fail;
1834 on_disk_reftable = new_on_disk_reftable;
1836 memset(on_disk_reftable + old_reftable_size, 0,
1837 (reftable_size - old_reftable_size) * sizeof(uint64_t));
1839 /* The offset we have for the reftable is now no longer valid;
1840 * this will leak that range, but we can easily fix that by running
1841 * a leak-fixing check after this rebuild operation */
1842 reftable_offset = -1;
1844 on_disk_reftable[refblock_index] = refblock_offset;
1846 /* If this is apparently the last refblock (for now), try to squeeze the
1847 * reftable in */
1848 if (refblock_index == (*nb_clusters - 1) >> s->refcount_block_bits &&
1849 reftable_offset < 0)
1851 uint64_t reftable_clusters = size_to_clusters(s, reftable_size *
1852 sizeof(uint64_t));
1853 reftable_offset = alloc_clusters_imrt(bs, reftable_clusters,
1854 refcount_table, nb_clusters,
1855 &first_free_cluster);
1856 if (reftable_offset < 0) {
1857 fprintf(stderr, "ERROR allocating reftable: %s\n",
1858 strerror(-reftable_offset));
1859 res->check_errors++;
1860 ret = reftable_offset;
1861 goto fail;
1865 ret = qcow2_pre_write_overlap_check(bs, 0, refblock_offset,
1866 s->cluster_size);
1867 if (ret < 0) {
1868 fprintf(stderr, "ERROR writing refblock: %s\n", strerror(-ret));
1869 goto fail;
1872 on_disk_refblock = qemu_blockalign0(bs->file, s->cluster_size);
1873 for (i = 0; i < s->refcount_block_size &&
1874 refblock_start + i < *nb_clusters; i++)
1876 on_disk_refblock[i] =
1877 cpu_to_be16((*refcount_table)[refblock_start + i]);
1880 ret = bdrv_write(bs->file, refblock_offset / BDRV_SECTOR_SIZE,
1881 (void *)on_disk_refblock, s->cluster_sectors);
1882 qemu_vfree(on_disk_refblock);
1883 if (ret < 0) {
1884 fprintf(stderr, "ERROR writing refblock: %s\n", strerror(-ret));
1885 goto fail;
1888 /* Go to the end of this refblock */
1889 cluster = refblock_start + s->refcount_block_size - 1;
1892 if (reftable_offset < 0) {
1893 uint64_t post_refblock_start, reftable_clusters;
1895 post_refblock_start = ROUND_UP(*nb_clusters, s->refcount_block_size);
1896 reftable_clusters = size_to_clusters(s,
1897 reftable_size * sizeof(uint64_t));
1898 /* Not pretty but simple */
1899 if (first_free_cluster < post_refblock_start) {
1900 first_free_cluster = post_refblock_start;
1902 reftable_offset = alloc_clusters_imrt(bs, reftable_clusters,
1903 refcount_table, nb_clusters,
1904 &first_free_cluster);
1905 if (reftable_offset < 0) {
1906 fprintf(stderr, "ERROR allocating reftable: %s\n",
1907 strerror(-reftable_offset));
1908 res->check_errors++;
1909 ret = reftable_offset;
1910 goto fail;
1913 goto write_refblocks;
1916 assert(on_disk_reftable);
1918 for (refblock_index = 0; refblock_index < reftable_size; refblock_index++) {
1919 cpu_to_be64s(&on_disk_reftable[refblock_index]);
1922 ret = qcow2_pre_write_overlap_check(bs, 0, reftable_offset,
1923 reftable_size * sizeof(uint64_t));
1924 if (ret < 0) {
1925 fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret));
1926 goto fail;
1929 assert(reftable_size < INT_MAX / sizeof(uint64_t));
1930 ret = bdrv_pwrite(bs->file, reftable_offset, on_disk_reftable,
1931 reftable_size * sizeof(uint64_t));
1932 if (ret < 0) {
1933 fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret));
1934 goto fail;
1937 /* Enter new reftable into the image header */
1938 cpu_to_be64w(&reftable_offset_and_clusters.reftable_offset,
1939 reftable_offset);
1940 cpu_to_be32w(&reftable_offset_and_clusters.reftable_clusters,
1941 size_to_clusters(s, reftable_size * sizeof(uint64_t)));
1942 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader,
1943 refcount_table_offset),
1944 &reftable_offset_and_clusters,
1945 sizeof(reftable_offset_and_clusters));
1946 if (ret < 0) {
1947 fprintf(stderr, "ERROR setting reftable: %s\n", strerror(-ret));
1948 goto fail;
1951 for (refblock_index = 0; refblock_index < reftable_size; refblock_index++) {
1952 be64_to_cpus(&on_disk_reftable[refblock_index]);
1954 s->refcount_table = on_disk_reftable;
1955 s->refcount_table_offset = reftable_offset;
1956 s->refcount_table_size = reftable_size;
1958 return 0;
1960 fail:
1961 g_free(on_disk_reftable);
1962 return ret;
1966 * Checks an image for refcount consistency.
1968 * Returns 0 if no errors are found, the number of errors in case the image is
1969 * detected as corrupted, and -errno when an internal error occurred.
1971 int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
1972 BdrvCheckMode fix)
1974 BDRVQcowState *s = bs->opaque;
1975 BdrvCheckResult pre_compare_res;
1976 int64_t size, highest_cluster, nb_clusters;
1977 uint16_t *refcount_table = NULL;
1978 bool rebuild = false;
1979 int ret;
1981 size = bdrv_getlength(bs->file);
1982 if (size < 0) {
1983 res->check_errors++;
1984 return size;
1987 nb_clusters = size_to_clusters(s, size);
1988 if (nb_clusters > INT_MAX) {
1989 res->check_errors++;
1990 return -EFBIG;
1993 res->bfi.total_clusters =
1994 size_to_clusters(s, bs->total_sectors * BDRV_SECTOR_SIZE);
1996 ret = calculate_refcounts(bs, res, fix, &rebuild, &refcount_table,
1997 &nb_clusters);
1998 if (ret < 0) {
1999 goto fail;
2002 /* In case we don't need to rebuild the refcount structure (but want to fix
2003 * something), this function is immediately called again, in which case the
2004 * result should be ignored */
2005 pre_compare_res = *res;
2006 compare_refcounts(bs, res, 0, &rebuild, &highest_cluster, refcount_table,
2007 nb_clusters);
2009 if (rebuild && (fix & BDRV_FIX_ERRORS)) {
2010 BdrvCheckResult old_res = *res;
2011 int fresh_leaks = 0;
2013 fprintf(stderr, "Rebuilding refcount structure\n");
2014 ret = rebuild_refcount_structure(bs, res, &refcount_table,
2015 &nb_clusters);
2016 if (ret < 0) {
2017 goto fail;
2020 res->corruptions = 0;
2021 res->leaks = 0;
2023 /* Because the old reftable has been exchanged for a new one the
2024 * references have to be recalculated */
2025 rebuild = false;
2026 memset(refcount_table, 0, nb_clusters * sizeof(uint16_t));
2027 ret = calculate_refcounts(bs, res, 0, &rebuild, &refcount_table,
2028 &nb_clusters);
2029 if (ret < 0) {
2030 goto fail;
2033 if (fix & BDRV_FIX_LEAKS) {
2034 /* The old refcount structures are now leaked, fix it; the result
2035 * can be ignored, aside from leaks which were introduced by
2036 * rebuild_refcount_structure() that could not be fixed */
2037 BdrvCheckResult saved_res = *res;
2038 *res = (BdrvCheckResult){ 0 };
2040 compare_refcounts(bs, res, BDRV_FIX_LEAKS, &rebuild,
2041 &highest_cluster, refcount_table, nb_clusters);
2042 if (rebuild) {
2043 fprintf(stderr, "ERROR rebuilt refcount structure is still "
2044 "broken\n");
2047 /* Any leaks accounted for here were introduced by
2048 * rebuild_refcount_structure() because that function has created a
2049 * new refcount structure from scratch */
2050 fresh_leaks = res->leaks;
2051 *res = saved_res;
2054 if (res->corruptions < old_res.corruptions) {
2055 res->corruptions_fixed += old_res.corruptions - res->corruptions;
2057 if (res->leaks < old_res.leaks) {
2058 res->leaks_fixed += old_res.leaks - res->leaks;
2060 res->leaks += fresh_leaks;
2061 } else if (fix) {
2062 if (rebuild) {
2063 fprintf(stderr, "ERROR need to rebuild refcount structures\n");
2064 res->check_errors++;
2065 ret = -EIO;
2066 goto fail;
2069 if (res->leaks || res->corruptions) {
2070 *res = pre_compare_res;
2071 compare_refcounts(bs, res, fix, &rebuild, &highest_cluster,
2072 refcount_table, nb_clusters);
2076 /* check OFLAG_COPIED */
2077 ret = check_oflag_copied(bs, res, fix);
2078 if (ret < 0) {
2079 goto fail;
2082 res->image_end_offset = (highest_cluster + 1) * s->cluster_size;
2083 ret = 0;
2085 fail:
2086 g_free(refcount_table);
2088 return ret;
2091 #define overlaps_with(ofs, sz) \
2092 ranges_overlap(offset, size, ofs, sz)
2095 * Checks if the given offset into the image file is actually free to use by
2096 * looking for overlaps with important metadata sections (L1/L2 tables etc.),
2097 * i.e. a sanity check without relying on the refcount tables.
2099 * The ign parameter specifies what checks not to perform (being a bitmask of
2100 * QCow2MetadataOverlap values), i.e., what sections to ignore.
2102 * Returns:
2103 * - 0 if writing to this offset will not affect the mentioned metadata
2104 * - a positive QCow2MetadataOverlap value indicating one overlapping section
2105 * - a negative value (-errno) indicating an error while performing a check,
2106 * e.g. when bdrv_read failed on QCOW2_OL_INACTIVE_L2
2108 int qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset,
2109 int64_t size)
2111 BDRVQcowState *s = bs->opaque;
2112 int chk = s->overlap_check & ~ign;
2113 int i, j;
2115 if (!size) {
2116 return 0;
2119 if (chk & QCOW2_OL_MAIN_HEADER) {
2120 if (offset < s->cluster_size) {
2121 return QCOW2_OL_MAIN_HEADER;
2125 /* align range to test to cluster boundaries */
2126 size = align_offset(offset_into_cluster(s, offset) + size, s->cluster_size);
2127 offset = start_of_cluster(s, offset);
2129 if ((chk & QCOW2_OL_ACTIVE_L1) && s->l1_size) {
2130 if (overlaps_with(s->l1_table_offset, s->l1_size * sizeof(uint64_t))) {
2131 return QCOW2_OL_ACTIVE_L1;
2135 if ((chk & QCOW2_OL_REFCOUNT_TABLE) && s->refcount_table_size) {
2136 if (overlaps_with(s->refcount_table_offset,
2137 s->refcount_table_size * sizeof(uint64_t))) {
2138 return QCOW2_OL_REFCOUNT_TABLE;
2142 if ((chk & QCOW2_OL_SNAPSHOT_TABLE) && s->snapshots_size) {
2143 if (overlaps_with(s->snapshots_offset, s->snapshots_size)) {
2144 return QCOW2_OL_SNAPSHOT_TABLE;
2148 if ((chk & QCOW2_OL_INACTIVE_L1) && s->snapshots) {
2149 for (i = 0; i < s->nb_snapshots; i++) {
2150 if (s->snapshots[i].l1_size &&
2151 overlaps_with(s->snapshots[i].l1_table_offset,
2152 s->snapshots[i].l1_size * sizeof(uint64_t))) {
2153 return QCOW2_OL_INACTIVE_L1;
2158 if ((chk & QCOW2_OL_ACTIVE_L2) && s->l1_table) {
2159 for (i = 0; i < s->l1_size; i++) {
2160 if ((s->l1_table[i] & L1E_OFFSET_MASK) &&
2161 overlaps_with(s->l1_table[i] & L1E_OFFSET_MASK,
2162 s->cluster_size)) {
2163 return QCOW2_OL_ACTIVE_L2;
2168 if ((chk & QCOW2_OL_REFCOUNT_BLOCK) && s->refcount_table) {
2169 for (i = 0; i < s->refcount_table_size; i++) {
2170 if ((s->refcount_table[i] & REFT_OFFSET_MASK) &&
2171 overlaps_with(s->refcount_table[i] & REFT_OFFSET_MASK,
2172 s->cluster_size)) {
2173 return QCOW2_OL_REFCOUNT_BLOCK;
2178 if ((chk & QCOW2_OL_INACTIVE_L2) && s->snapshots) {
2179 for (i = 0; i < s->nb_snapshots; i++) {
2180 uint64_t l1_ofs = s->snapshots[i].l1_table_offset;
2181 uint32_t l1_sz = s->snapshots[i].l1_size;
2182 uint64_t l1_sz2 = l1_sz * sizeof(uint64_t);
2183 uint64_t *l1 = g_try_malloc(l1_sz2);
2184 int ret;
2186 if (l1_sz2 && l1 == NULL) {
2187 return -ENOMEM;
2190 ret = bdrv_pread(bs->file, l1_ofs, l1, l1_sz2);
2191 if (ret < 0) {
2192 g_free(l1);
2193 return ret;
2196 for (j = 0; j < l1_sz; j++) {
2197 uint64_t l2_ofs = be64_to_cpu(l1[j]) & L1E_OFFSET_MASK;
2198 if (l2_ofs && overlaps_with(l2_ofs, s->cluster_size)) {
2199 g_free(l1);
2200 return QCOW2_OL_INACTIVE_L2;
2204 g_free(l1);
2208 return 0;
2211 static const char *metadata_ol_names[] = {
2212 [QCOW2_OL_MAIN_HEADER_BITNR] = "qcow2_header",
2213 [QCOW2_OL_ACTIVE_L1_BITNR] = "active L1 table",
2214 [QCOW2_OL_ACTIVE_L2_BITNR] = "active L2 table",
2215 [QCOW2_OL_REFCOUNT_TABLE_BITNR] = "refcount table",
2216 [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = "refcount block",
2217 [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = "snapshot table",
2218 [QCOW2_OL_INACTIVE_L1_BITNR] = "inactive L1 table",
2219 [QCOW2_OL_INACTIVE_L2_BITNR] = "inactive L2 table",
2223 * First performs a check for metadata overlaps (through
2224 * qcow2_check_metadata_overlap); if that fails with a negative value (error
2225 * while performing a check), that value is returned. If an impending overlap
2226 * is detected, the BDS will be made unusable, the qcow2 file marked corrupt
2227 * and -EIO returned.
2229 * Returns 0 if there were neither overlaps nor errors while checking for
2230 * overlaps; or a negative value (-errno) on error.
2232 int qcow2_pre_write_overlap_check(BlockDriverState *bs, int ign, int64_t offset,
2233 int64_t size)
2235 int ret = qcow2_check_metadata_overlap(bs, ign, offset, size);
2237 if (ret < 0) {
2238 return ret;
2239 } else if (ret > 0) {
2240 int metadata_ol_bitnr = ffs(ret) - 1;
2241 assert(metadata_ol_bitnr < QCOW2_OL_MAX_BITNR);
2243 qcow2_signal_corruption(bs, true, offset, size, "Preventing invalid "
2244 "write on metadata (overlaps with %s)",
2245 metadata_ol_names[metadata_ol_bitnr]);
2246 return -EIO;
2249 return 0;