riscv: virt: Correct pci "bus-range" encoding
[qemu/ar7.git] / block / qcow2-cluster.c
blobcc5609e27a76eef4dea382bf60920659f9c786bb
1 /*
2 * Block driver for the QCOW version 2 format
4 * Copyright (c) 2004-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include <zlib.h>
28 #include "qapi/error.h"
29 #include "qcow2.h"
30 #include "qemu/bswap.h"
31 #include "trace.h"
33 int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t exact_size)
35 BDRVQcow2State *s = bs->opaque;
36 int new_l1_size, i, ret;
38 if (exact_size >= s->l1_size) {
39 return 0;
42 new_l1_size = exact_size;
44 #ifdef DEBUG_ALLOC2
45 fprintf(stderr, "shrink l1_table from %d to %d\n", s->l1_size, new_l1_size);
46 #endif
48 BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE);
49 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset +
50 new_l1_size * sizeof(uint64_t),
51 (s->l1_size - new_l1_size) * sizeof(uint64_t), 0);
52 if (ret < 0) {
53 goto fail;
56 ret = bdrv_flush(bs->file->bs);
57 if (ret < 0) {
58 goto fail;
61 BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_FREE_L2_CLUSTERS);
62 for (i = s->l1_size - 1; i > new_l1_size - 1; i--) {
63 if ((s->l1_table[i] & L1E_OFFSET_MASK) == 0) {
64 continue;
66 qcow2_free_clusters(bs, s->l1_table[i] & L1E_OFFSET_MASK,
67 s->cluster_size, QCOW2_DISCARD_ALWAYS);
68 s->l1_table[i] = 0;
70 return 0;
72 fail:
74 * If the write in the l1_table failed the image may contain a partially
75 * overwritten l1_table. In this case it would be better to clear the
76 * l1_table in memory to avoid possible image corruption.
78 memset(s->l1_table + new_l1_size, 0,
79 (s->l1_size - new_l1_size) * sizeof(uint64_t));
80 return ret;
83 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
84 bool exact_size)
86 BDRVQcow2State *s = bs->opaque;
87 int new_l1_size2, ret, i;
88 uint64_t *new_l1_table;
89 int64_t old_l1_table_offset, old_l1_size;
90 int64_t new_l1_table_offset, new_l1_size;
91 uint8_t data[12];
93 if (min_size <= s->l1_size)
94 return 0;
96 /* Do a sanity check on min_size before trying to calculate new_l1_size
97 * (this prevents overflows during the while loop for the calculation of
98 * new_l1_size) */
99 if (min_size > INT_MAX / sizeof(uint64_t)) {
100 return -EFBIG;
103 if (exact_size) {
104 new_l1_size = min_size;
105 } else {
106 /* Bump size up to reduce the number of times we have to grow */
107 new_l1_size = s->l1_size;
108 if (new_l1_size == 0) {
109 new_l1_size = 1;
111 while (min_size > new_l1_size) {
112 new_l1_size = DIV_ROUND_UP(new_l1_size * 3, 2);
116 QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE > INT_MAX);
117 if (new_l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) {
118 return -EFBIG;
121 #ifdef DEBUG_ALLOC2
122 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n",
123 s->l1_size, new_l1_size);
124 #endif
126 new_l1_size2 = sizeof(uint64_t) * new_l1_size;
127 new_l1_table = qemu_try_blockalign(bs->file->bs,
128 ROUND_UP(new_l1_size2, 512));
129 if (new_l1_table == NULL) {
130 return -ENOMEM;
132 memset(new_l1_table, 0, ROUND_UP(new_l1_size2, 512));
134 if (s->l1_size) {
135 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
138 /* write new table (align to cluster) */
139 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
140 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
141 if (new_l1_table_offset < 0) {
142 qemu_vfree(new_l1_table);
143 return new_l1_table_offset;
146 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
147 if (ret < 0) {
148 goto fail;
151 /* the L1 position has not yet been updated, so these clusters must
152 * indeed be completely free */
153 ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset,
154 new_l1_size2, false);
155 if (ret < 0) {
156 goto fail;
159 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE);
160 for(i = 0; i < s->l1_size; i++)
161 new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
162 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset,
163 new_l1_table, new_l1_size2);
164 if (ret < 0)
165 goto fail;
166 for(i = 0; i < s->l1_size; i++)
167 new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
169 /* set new table */
170 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE);
171 stl_be_p(data, new_l1_size);
172 stq_be_p(data + 4, new_l1_table_offset);
173 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size),
174 data, sizeof(data));
175 if (ret < 0) {
176 goto fail;
178 qemu_vfree(s->l1_table);
179 old_l1_table_offset = s->l1_table_offset;
180 s->l1_table_offset = new_l1_table_offset;
181 s->l1_table = new_l1_table;
182 old_l1_size = s->l1_size;
183 s->l1_size = new_l1_size;
184 qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t),
185 QCOW2_DISCARD_OTHER);
186 return 0;
187 fail:
188 qemu_vfree(new_l1_table);
189 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2,
190 QCOW2_DISCARD_OTHER);
191 return ret;
195 * l2_load
197 * @bs: The BlockDriverState
198 * @offset: A guest offset, used to calculate what slice of the L2
199 * table to load.
200 * @l2_offset: Offset to the L2 table in the image file.
201 * @l2_slice: Location to store the pointer to the L2 slice.
203 * Loads a L2 slice into memory (L2 slices are the parts of L2 tables
204 * that are loaded by the qcow2 cache). If the slice is in the cache,
205 * the cache is used; otherwise the L2 slice is loaded from the image
206 * file.
208 static int l2_load(BlockDriverState *bs, uint64_t offset,
209 uint64_t l2_offset, uint64_t **l2_slice)
211 BDRVQcow2State *s = bs->opaque;
212 int start_of_slice = sizeof(uint64_t) *
213 (offset_to_l2_index(s, offset) - offset_to_l2_slice_index(s, offset));
215 return qcow2_cache_get(bs, s->l2_table_cache, l2_offset + start_of_slice,
216 (void **)l2_slice);
220 * Writes one sector of the L1 table to the disk (can't update single entries
221 * and we really don't want bdrv_pread to perform a read-modify-write)
223 #define L1_ENTRIES_PER_SECTOR (512 / 8)
224 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index)
226 BDRVQcow2State *s = bs->opaque;
227 uint64_t buf[L1_ENTRIES_PER_SECTOR] = { 0 };
228 int l1_start_index;
229 int i, ret;
231 l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
232 for (i = 0; i < L1_ENTRIES_PER_SECTOR && l1_start_index + i < s->l1_size;
233 i++)
235 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
238 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1,
239 s->l1_table_offset + 8 * l1_start_index, sizeof(buf), false);
240 if (ret < 0) {
241 return ret;
244 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
245 ret = bdrv_pwrite_sync(bs->file,
246 s->l1_table_offset + 8 * l1_start_index,
247 buf, sizeof(buf));
248 if (ret < 0) {
249 return ret;
252 return 0;
256 * l2_allocate
258 * Allocate a new l2 entry in the file. If l1_index points to an already
259 * used entry in the L2 table (i.e. we are doing a copy on write for the L2
260 * table) copy the contents of the old L2 table into the newly allocated one.
261 * Otherwise the new table is initialized with zeros.
265 static int l2_allocate(BlockDriverState *bs, int l1_index)
267 BDRVQcow2State *s = bs->opaque;
268 uint64_t old_l2_offset;
269 uint64_t *l2_slice = NULL;
270 unsigned slice, slice_size2, n_slices;
271 int64_t l2_offset;
272 int ret;
274 old_l2_offset = s->l1_table[l1_index];
276 trace_qcow2_l2_allocate(bs, l1_index);
278 /* allocate a new l2 entry */
280 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
281 if (l2_offset < 0) {
282 ret = l2_offset;
283 goto fail;
286 /* The offset must fit in the offset field of the L1 table entry */
287 assert((l2_offset & L1E_OFFSET_MASK) == l2_offset);
289 /* If we're allocating the table at offset 0 then something is wrong */
290 if (l2_offset == 0) {
291 qcow2_signal_corruption(bs, true, -1, -1, "Preventing invalid "
292 "allocation of L2 table at offset 0");
293 ret = -EIO;
294 goto fail;
297 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
298 if (ret < 0) {
299 goto fail;
302 /* allocate a new entry in the l2 cache */
304 slice_size2 = s->l2_slice_size * sizeof(uint64_t);
305 n_slices = s->cluster_size / slice_size2;
307 trace_qcow2_l2_allocate_get_empty(bs, l1_index);
308 for (slice = 0; slice < n_slices; slice++) {
309 ret = qcow2_cache_get_empty(bs, s->l2_table_cache,
310 l2_offset + slice * slice_size2,
311 (void **) &l2_slice);
312 if (ret < 0) {
313 goto fail;
316 if ((old_l2_offset & L1E_OFFSET_MASK) == 0) {
317 /* if there was no old l2 table, clear the new slice */
318 memset(l2_slice, 0, slice_size2);
319 } else {
320 uint64_t *old_slice;
321 uint64_t old_l2_slice_offset =
322 (old_l2_offset & L1E_OFFSET_MASK) + slice * slice_size2;
324 /* if there was an old l2 table, read a slice from the disk */
325 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ);
326 ret = qcow2_cache_get(bs, s->l2_table_cache, old_l2_slice_offset,
327 (void **) &old_slice);
328 if (ret < 0) {
329 goto fail;
332 memcpy(l2_slice, old_slice, slice_size2);
334 qcow2_cache_put(s->l2_table_cache, (void **) &old_slice);
337 /* write the l2 slice to the file */
338 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE);
340 trace_qcow2_l2_allocate_write_l2(bs, l1_index);
341 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
342 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
345 ret = qcow2_cache_flush(bs, s->l2_table_cache);
346 if (ret < 0) {
347 goto fail;
350 /* update the L1 entry */
351 trace_qcow2_l2_allocate_write_l1(bs, l1_index);
352 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
353 ret = qcow2_write_l1_entry(bs, l1_index);
354 if (ret < 0) {
355 goto fail;
358 trace_qcow2_l2_allocate_done(bs, l1_index, 0);
359 return 0;
361 fail:
362 trace_qcow2_l2_allocate_done(bs, l1_index, ret);
363 if (l2_slice != NULL) {
364 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
366 s->l1_table[l1_index] = old_l2_offset;
367 if (l2_offset > 0) {
368 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
369 QCOW2_DISCARD_ALWAYS);
371 return ret;
375 * Checks how many clusters in a given L2 slice are contiguous in the image
376 * file. As soon as one of the flags in the bitmask stop_flags changes compared
377 * to the first cluster, the search is stopped and the cluster is not counted
378 * as contiguous. (This allows it, for example, to stop at the first compressed
379 * cluster which may require a different handling)
381 static int count_contiguous_clusters(BlockDriverState *bs, int nb_clusters,
382 int cluster_size, uint64_t *l2_slice, uint64_t stop_flags)
384 int i;
385 QCow2ClusterType first_cluster_type;
386 uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED;
387 uint64_t first_entry = be64_to_cpu(l2_slice[0]);
388 uint64_t offset = first_entry & mask;
390 first_cluster_type = qcow2_get_cluster_type(bs, first_entry);
391 if (first_cluster_type == QCOW2_CLUSTER_UNALLOCATED) {
392 return 0;
395 /* must be allocated */
396 assert(first_cluster_type == QCOW2_CLUSTER_NORMAL ||
397 first_cluster_type == QCOW2_CLUSTER_ZERO_ALLOC);
399 for (i = 0; i < nb_clusters; i++) {
400 uint64_t l2_entry = be64_to_cpu(l2_slice[i]) & mask;
401 if (offset + (uint64_t) i * cluster_size != l2_entry) {
402 break;
406 return i;
410 * Checks how many consecutive unallocated clusters in a given L2
411 * slice have the same cluster type.
413 static int count_contiguous_clusters_unallocated(BlockDriverState *bs,
414 int nb_clusters,
415 uint64_t *l2_slice,
416 QCow2ClusterType wanted_type)
418 int i;
420 assert(wanted_type == QCOW2_CLUSTER_ZERO_PLAIN ||
421 wanted_type == QCOW2_CLUSTER_UNALLOCATED);
422 for (i = 0; i < nb_clusters; i++) {
423 uint64_t entry = be64_to_cpu(l2_slice[i]);
424 QCow2ClusterType type = qcow2_get_cluster_type(bs, entry);
426 if (type != wanted_type) {
427 break;
431 return i;
434 static int coroutine_fn do_perform_cow_read(BlockDriverState *bs,
435 uint64_t src_cluster_offset,
436 unsigned offset_in_cluster,
437 QEMUIOVector *qiov)
439 int ret;
441 if (qiov->size == 0) {
442 return 0;
445 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
447 if (!bs->drv) {
448 return -ENOMEDIUM;
451 /* Call .bdrv_co_readv() directly instead of using the public block-layer
452 * interface. This avoids double I/O throttling and request tracking,
453 * which can lead to deadlock when block layer copy-on-read is enabled.
455 ret = bs->drv->bdrv_co_preadv(bs, src_cluster_offset + offset_in_cluster,
456 qiov->size, qiov, 0);
457 if (ret < 0) {
458 return ret;
461 return 0;
464 static bool coroutine_fn do_perform_cow_encrypt(BlockDriverState *bs,
465 uint64_t src_cluster_offset,
466 uint64_t cluster_offset,
467 unsigned offset_in_cluster,
468 uint8_t *buffer,
469 unsigned bytes)
471 if (bytes && bs->encrypted) {
472 BDRVQcow2State *s = bs->opaque;
473 assert((offset_in_cluster & ~BDRV_SECTOR_MASK) == 0);
474 assert((bytes & ~BDRV_SECTOR_MASK) == 0);
475 assert(s->crypto);
476 if (qcow2_co_encrypt(bs, cluster_offset,
477 src_cluster_offset + offset_in_cluster,
478 buffer, bytes) < 0) {
479 return false;
482 return true;
485 static int coroutine_fn do_perform_cow_write(BlockDriverState *bs,
486 uint64_t cluster_offset,
487 unsigned offset_in_cluster,
488 QEMUIOVector *qiov)
490 BDRVQcow2State *s = bs->opaque;
491 int ret;
493 if (qiov->size == 0) {
494 return 0;
497 ret = qcow2_pre_write_overlap_check(bs, 0,
498 cluster_offset + offset_in_cluster, qiov->size, true);
499 if (ret < 0) {
500 return ret;
503 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
504 ret = bdrv_co_pwritev(s->data_file, cluster_offset + offset_in_cluster,
505 qiov->size, qiov, 0);
506 if (ret < 0) {
507 return ret;
510 return 0;
515 * get_cluster_offset
517 * For a given offset of the virtual disk, find the cluster type and offset in
518 * the qcow2 file. The offset is stored in *cluster_offset.
520 * On entry, *bytes is the maximum number of contiguous bytes starting at
521 * offset that we are interested in.
523 * On exit, *bytes is the number of bytes starting at offset that have the same
524 * cluster type and (if applicable) are stored contiguously in the image file.
525 * Compressed clusters are always returned one by one.
527 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
528 * cases.
530 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
531 unsigned int *bytes, uint64_t *cluster_offset)
533 BDRVQcow2State *s = bs->opaque;
534 unsigned int l2_index;
535 uint64_t l1_index, l2_offset, *l2_slice;
536 int c;
537 unsigned int offset_in_cluster;
538 uint64_t bytes_available, bytes_needed, nb_clusters;
539 QCow2ClusterType type;
540 int ret;
542 offset_in_cluster = offset_into_cluster(s, offset);
543 bytes_needed = (uint64_t) *bytes + offset_in_cluster;
545 /* compute how many bytes there are between the start of the cluster
546 * containing offset and the end of the l2 slice that contains
547 * the entry pointing to it */
548 bytes_available =
549 ((uint64_t) (s->l2_slice_size - offset_to_l2_slice_index(s, offset)))
550 << s->cluster_bits;
552 if (bytes_needed > bytes_available) {
553 bytes_needed = bytes_available;
556 *cluster_offset = 0;
558 /* seek to the l2 offset in the l1 table */
560 l1_index = offset_to_l1_index(s, offset);
561 if (l1_index >= s->l1_size) {
562 type = QCOW2_CLUSTER_UNALLOCATED;
563 goto out;
566 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
567 if (!l2_offset) {
568 type = QCOW2_CLUSTER_UNALLOCATED;
569 goto out;
572 if (offset_into_cluster(s, l2_offset)) {
573 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
574 " unaligned (L1 index: %#" PRIx64 ")",
575 l2_offset, l1_index);
576 return -EIO;
579 /* load the l2 slice in memory */
581 ret = l2_load(bs, offset, l2_offset, &l2_slice);
582 if (ret < 0) {
583 return ret;
586 /* find the cluster offset for the given disk offset */
588 l2_index = offset_to_l2_slice_index(s, offset);
589 *cluster_offset = be64_to_cpu(l2_slice[l2_index]);
591 nb_clusters = size_to_clusters(s, bytes_needed);
592 /* bytes_needed <= *bytes + offset_in_cluster, both of which are unsigned
593 * integers; the minimum cluster size is 512, so this assertion is always
594 * true */
595 assert(nb_clusters <= INT_MAX);
597 type = qcow2_get_cluster_type(bs, *cluster_offset);
598 if (s->qcow_version < 3 && (type == QCOW2_CLUSTER_ZERO_PLAIN ||
599 type == QCOW2_CLUSTER_ZERO_ALLOC)) {
600 qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found"
601 " in pre-v3 image (L2 offset: %#" PRIx64
602 ", L2 index: %#x)", l2_offset, l2_index);
603 ret = -EIO;
604 goto fail;
606 switch (type) {
607 case QCOW2_CLUSTER_COMPRESSED:
608 if (has_data_file(bs)) {
609 qcow2_signal_corruption(bs, true, -1, -1, "Compressed cluster "
610 "entry found in image with external data "
611 "file (L2 offset: %#" PRIx64 ", L2 index: "
612 "%#x)", l2_offset, l2_index);
613 ret = -EIO;
614 goto fail;
616 /* Compressed clusters can only be processed one by one */
617 c = 1;
618 *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK;
619 break;
620 case QCOW2_CLUSTER_ZERO_PLAIN:
621 case QCOW2_CLUSTER_UNALLOCATED:
622 /* how many empty clusters ? */
623 c = count_contiguous_clusters_unallocated(bs, nb_clusters,
624 &l2_slice[l2_index], type);
625 *cluster_offset = 0;
626 break;
627 case QCOW2_CLUSTER_ZERO_ALLOC:
628 case QCOW2_CLUSTER_NORMAL:
629 /* how many allocated clusters ? */
630 c = count_contiguous_clusters(bs, nb_clusters, s->cluster_size,
631 &l2_slice[l2_index], QCOW_OFLAG_ZERO);
632 *cluster_offset &= L2E_OFFSET_MASK;
633 if (offset_into_cluster(s, *cluster_offset)) {
634 qcow2_signal_corruption(bs, true, -1, -1,
635 "Cluster allocation offset %#"
636 PRIx64 " unaligned (L2 offset: %#" PRIx64
637 ", L2 index: %#x)", *cluster_offset,
638 l2_offset, l2_index);
639 ret = -EIO;
640 goto fail;
642 if (has_data_file(bs) && *cluster_offset != offset - offset_in_cluster)
644 qcow2_signal_corruption(bs, true, -1, -1,
645 "External data file host cluster offset %#"
646 PRIx64 " does not match guest cluster "
647 "offset: %#" PRIx64
648 ", L2 index: %#x)", *cluster_offset,
649 offset - offset_in_cluster, l2_index);
650 ret = -EIO;
651 goto fail;
653 break;
654 default:
655 abort();
658 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
660 bytes_available = (int64_t)c * s->cluster_size;
662 out:
663 if (bytes_available > bytes_needed) {
664 bytes_available = bytes_needed;
667 /* bytes_available <= bytes_needed <= *bytes + offset_in_cluster;
668 * subtracting offset_in_cluster will therefore definitely yield something
669 * not exceeding UINT_MAX */
670 assert(bytes_available - offset_in_cluster <= UINT_MAX);
671 *bytes = bytes_available - offset_in_cluster;
673 return type;
675 fail:
676 qcow2_cache_put(s->l2_table_cache, (void **)&l2_slice);
677 return ret;
681 * get_cluster_table
683 * for a given disk offset, load (and allocate if needed)
684 * the appropriate slice of its l2 table.
686 * the cluster index in the l2 slice is given to the caller.
688 * Returns 0 on success, -errno in failure case
690 static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
691 uint64_t **new_l2_slice,
692 int *new_l2_index)
694 BDRVQcow2State *s = bs->opaque;
695 unsigned int l2_index;
696 uint64_t l1_index, l2_offset;
697 uint64_t *l2_slice = NULL;
698 int ret;
700 /* seek to the l2 offset in the l1 table */
702 l1_index = offset_to_l1_index(s, offset);
703 if (l1_index >= s->l1_size) {
704 ret = qcow2_grow_l1_table(bs, l1_index + 1, false);
705 if (ret < 0) {
706 return ret;
710 assert(l1_index < s->l1_size);
711 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
712 if (offset_into_cluster(s, l2_offset)) {
713 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
714 " unaligned (L1 index: %#" PRIx64 ")",
715 l2_offset, l1_index);
716 return -EIO;
719 if (!(s->l1_table[l1_index] & QCOW_OFLAG_COPIED)) {
720 /* First allocate a new L2 table (and do COW if needed) */
721 ret = l2_allocate(bs, l1_index);
722 if (ret < 0) {
723 return ret;
726 /* Then decrease the refcount of the old table */
727 if (l2_offset) {
728 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
729 QCOW2_DISCARD_OTHER);
732 /* Get the offset of the newly-allocated l2 table */
733 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
734 assert(offset_into_cluster(s, l2_offset) == 0);
737 /* load the l2 slice in memory */
738 ret = l2_load(bs, offset, l2_offset, &l2_slice);
739 if (ret < 0) {
740 return ret;
743 /* find the cluster offset for the given disk offset */
745 l2_index = offset_to_l2_slice_index(s, offset);
747 *new_l2_slice = l2_slice;
748 *new_l2_index = l2_index;
750 return 0;
754 * alloc_compressed_cluster_offset
756 * For a given offset on the virtual disk, allocate a new compressed cluster
757 * and put the host offset of the cluster into *host_offset. If a cluster is
758 * already allocated at the offset, return an error.
760 * Return 0 on success and -errno in error cases
762 int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
763 uint64_t offset,
764 int compressed_size,
765 uint64_t *host_offset)
767 BDRVQcow2State *s = bs->opaque;
768 int l2_index, ret;
769 uint64_t *l2_slice;
770 int64_t cluster_offset;
771 int nb_csectors;
773 if (has_data_file(bs)) {
774 return 0;
777 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index);
778 if (ret < 0) {
779 return ret;
782 /* Compression can't overwrite anything. Fail if the cluster was already
783 * allocated. */
784 cluster_offset = be64_to_cpu(l2_slice[l2_index]);
785 if (cluster_offset & L2E_OFFSET_MASK) {
786 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
787 return -EIO;
790 cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
791 if (cluster_offset < 0) {
792 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
793 return cluster_offset;
796 nb_csectors =
797 (cluster_offset + compressed_size - 1) / QCOW2_COMPRESSED_SECTOR_SIZE -
798 (cluster_offset / QCOW2_COMPRESSED_SECTOR_SIZE);
800 cluster_offset |= QCOW_OFLAG_COMPRESSED |
801 ((uint64_t)nb_csectors << s->csize_shift);
803 /* update L2 table */
805 /* compressed clusters never have the copied flag */
807 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
808 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
809 l2_slice[l2_index] = cpu_to_be64(cluster_offset);
810 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
812 *host_offset = cluster_offset & s->cluster_offset_mask;
813 return 0;
816 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m)
818 BDRVQcow2State *s = bs->opaque;
819 Qcow2COWRegion *start = &m->cow_start;
820 Qcow2COWRegion *end = &m->cow_end;
821 unsigned buffer_size;
822 unsigned data_bytes = end->offset - (start->offset + start->nb_bytes);
823 bool merge_reads;
824 uint8_t *start_buffer, *end_buffer;
825 QEMUIOVector qiov;
826 int ret;
828 assert(start->nb_bytes <= UINT_MAX - end->nb_bytes);
829 assert(start->nb_bytes + end->nb_bytes <= UINT_MAX - data_bytes);
830 assert(start->offset + start->nb_bytes <= end->offset);
831 assert(!m->data_qiov || m->data_qiov->size == data_bytes);
833 if ((start->nb_bytes == 0 && end->nb_bytes == 0) || m->skip_cow) {
834 return 0;
837 /* If we have to read both the start and end COW regions and the
838 * middle region is not too large then perform just one read
839 * operation */
840 merge_reads = start->nb_bytes && end->nb_bytes && data_bytes <= 16384;
841 if (merge_reads) {
842 buffer_size = start->nb_bytes + data_bytes + end->nb_bytes;
843 } else {
844 /* If we have to do two reads, add some padding in the middle
845 * if necessary to make sure that the end region is optimally
846 * aligned. */
847 size_t align = bdrv_opt_mem_align(bs);
848 assert(align > 0 && align <= UINT_MAX);
849 assert(QEMU_ALIGN_UP(start->nb_bytes, align) <=
850 UINT_MAX - end->nb_bytes);
851 buffer_size = QEMU_ALIGN_UP(start->nb_bytes, align) + end->nb_bytes;
854 /* Reserve a buffer large enough to store all the data that we're
855 * going to read */
856 start_buffer = qemu_try_blockalign(bs, buffer_size);
857 if (start_buffer == NULL) {
858 return -ENOMEM;
860 /* The part of the buffer where the end region is located */
861 end_buffer = start_buffer + buffer_size - end->nb_bytes;
863 qemu_iovec_init(&qiov, 2 + (m->data_qiov ? m->data_qiov->niov : 0));
865 qemu_co_mutex_unlock(&s->lock);
866 /* First we read the existing data from both COW regions. We
867 * either read the whole region in one go, or the start and end
868 * regions separately. */
869 if (merge_reads) {
870 qemu_iovec_add(&qiov, start_buffer, buffer_size);
871 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov);
872 } else {
873 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
874 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov);
875 if (ret < 0) {
876 goto fail;
879 qemu_iovec_reset(&qiov);
880 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
881 ret = do_perform_cow_read(bs, m->offset, end->offset, &qiov);
883 if (ret < 0) {
884 goto fail;
887 /* Encrypt the data if necessary before writing it */
888 if (bs->encrypted) {
889 if (!do_perform_cow_encrypt(bs, m->offset, m->alloc_offset,
890 start->offset, start_buffer,
891 start->nb_bytes) ||
892 !do_perform_cow_encrypt(bs, m->offset, m->alloc_offset,
893 end->offset, end_buffer, end->nb_bytes)) {
894 ret = -EIO;
895 goto fail;
899 /* And now we can write everything. If we have the guest data we
900 * can write everything in one single operation */
901 if (m->data_qiov) {
902 qemu_iovec_reset(&qiov);
903 if (start->nb_bytes) {
904 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
906 qemu_iovec_concat(&qiov, m->data_qiov, 0, data_bytes);
907 if (end->nb_bytes) {
908 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
910 /* NOTE: we have a write_aio blkdebug event here followed by
911 * a cow_write one in do_perform_cow_write(), but there's only
912 * one single I/O operation */
913 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
914 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov);
915 } else {
916 /* If there's no guest data then write both COW regions separately */
917 qemu_iovec_reset(&qiov);
918 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
919 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov);
920 if (ret < 0) {
921 goto fail;
924 qemu_iovec_reset(&qiov);
925 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
926 ret = do_perform_cow_write(bs, m->alloc_offset, end->offset, &qiov);
929 fail:
930 qemu_co_mutex_lock(&s->lock);
933 * Before we update the L2 table to actually point to the new cluster, we
934 * need to be sure that the refcounts have been increased and COW was
935 * handled.
937 if (ret == 0) {
938 qcow2_cache_depends_on_flush(s->l2_table_cache);
941 qemu_vfree(start_buffer);
942 qemu_iovec_destroy(&qiov);
943 return ret;
946 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
948 BDRVQcow2State *s = bs->opaque;
949 int i, j = 0, l2_index, ret;
950 uint64_t *old_cluster, *l2_slice;
951 uint64_t cluster_offset = m->alloc_offset;
953 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters);
954 assert(m->nb_clusters > 0);
956 old_cluster = g_try_new(uint64_t, m->nb_clusters);
957 if (old_cluster == NULL) {
958 ret = -ENOMEM;
959 goto err;
962 /* copy content of unmodified sectors */
963 ret = perform_cow(bs, m);
964 if (ret < 0) {
965 goto err;
968 /* Update L2 table. */
969 if (s->use_lazy_refcounts) {
970 qcow2_mark_dirty(bs);
972 if (qcow2_need_accurate_refcounts(s)) {
973 qcow2_cache_set_dependency(bs, s->l2_table_cache,
974 s->refcount_block_cache);
977 ret = get_cluster_table(bs, m->offset, &l2_slice, &l2_index);
978 if (ret < 0) {
979 goto err;
981 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
983 assert(l2_index + m->nb_clusters <= s->l2_slice_size);
984 for (i = 0; i < m->nb_clusters; i++) {
985 /* if two concurrent writes happen to the same unallocated cluster
986 * each write allocates separate cluster and writes data concurrently.
987 * The first one to complete updates l2 table with pointer to its
988 * cluster the second one has to do RMW (which is done above by
989 * perform_cow()), update l2 table with its cluster pointer and free
990 * old cluster. This is what this loop does */
991 if (l2_slice[l2_index + i] != 0) {
992 old_cluster[j++] = l2_slice[l2_index + i];
995 l2_slice[l2_index + i] = cpu_to_be64((cluster_offset +
996 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
1000 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1003 * If this was a COW, we need to decrease the refcount of the old cluster.
1005 * Don't discard clusters that reach a refcount of 0 (e.g. compressed
1006 * clusters), the next write will reuse them anyway.
1008 if (!m->keep_old_clusters && j != 0) {
1009 for (i = 0; i < j; i++) {
1010 qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1,
1011 QCOW2_DISCARD_NEVER);
1015 ret = 0;
1016 err:
1017 g_free(old_cluster);
1018 return ret;
1022 * Frees the allocated clusters because the request failed and they won't
1023 * actually be linked.
1025 void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m)
1027 BDRVQcow2State *s = bs->opaque;
1028 qcow2_free_clusters(bs, m->alloc_offset, m->nb_clusters << s->cluster_bits,
1029 QCOW2_DISCARD_NEVER);
1033 * Returns the number of contiguous clusters that can be used for an allocating
1034 * write, but require COW to be performed (this includes yet unallocated space,
1035 * which must copy from the backing file)
1037 static int count_cow_clusters(BlockDriverState *bs, int nb_clusters,
1038 uint64_t *l2_slice, int l2_index)
1040 int i;
1042 for (i = 0; i < nb_clusters; i++) {
1043 uint64_t l2_entry = be64_to_cpu(l2_slice[l2_index + i]);
1044 QCow2ClusterType cluster_type = qcow2_get_cluster_type(bs, l2_entry);
1046 switch(cluster_type) {
1047 case QCOW2_CLUSTER_NORMAL:
1048 if (l2_entry & QCOW_OFLAG_COPIED) {
1049 goto out;
1051 break;
1052 case QCOW2_CLUSTER_UNALLOCATED:
1053 case QCOW2_CLUSTER_COMPRESSED:
1054 case QCOW2_CLUSTER_ZERO_PLAIN:
1055 case QCOW2_CLUSTER_ZERO_ALLOC:
1056 break;
1057 default:
1058 abort();
1062 out:
1063 assert(i <= nb_clusters);
1064 return i;
1068 * Check if there already is an AIO write request in flight which allocates
1069 * the same cluster. In this case we need to wait until the previous
1070 * request has completed and updated the L2 table accordingly.
1072 * Returns:
1073 * 0 if there was no dependency. *cur_bytes indicates the number of
1074 * bytes from guest_offset that can be read before the next
1075 * dependency must be processed (or the request is complete)
1077 * -EAGAIN if we had to wait for another request, previously gathered
1078 * information on cluster allocation may be invalid now. The caller
1079 * must start over anyway, so consider *cur_bytes undefined.
1081 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
1082 uint64_t *cur_bytes, QCowL2Meta **m)
1084 BDRVQcow2State *s = bs->opaque;
1085 QCowL2Meta *old_alloc;
1086 uint64_t bytes = *cur_bytes;
1088 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
1090 uint64_t start = guest_offset;
1091 uint64_t end = start + bytes;
1092 uint64_t old_start = l2meta_cow_start(old_alloc);
1093 uint64_t old_end = l2meta_cow_end(old_alloc);
1095 if (end <= old_start || start >= old_end) {
1096 /* No intersection */
1097 } else {
1098 if (start < old_start) {
1099 /* Stop at the start of a running allocation */
1100 bytes = old_start - start;
1101 } else {
1102 bytes = 0;
1105 /* Stop if already an l2meta exists. After yielding, it wouldn't
1106 * be valid any more, so we'd have to clean up the old L2Metas
1107 * and deal with requests depending on them before starting to
1108 * gather new ones. Not worth the trouble. */
1109 if (bytes == 0 && *m) {
1110 *cur_bytes = 0;
1111 return 0;
1114 if (bytes == 0) {
1115 /* Wait for the dependency to complete. We need to recheck
1116 * the free/allocated clusters when we continue. */
1117 qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock);
1118 return -EAGAIN;
1123 /* Make sure that existing clusters and new allocations are only used up to
1124 * the next dependency if we shortened the request above */
1125 *cur_bytes = bytes;
1127 return 0;
1131 * Checks how many already allocated clusters that don't require a copy on
1132 * write there are at the given guest_offset (up to *bytes). If *host_offset is
1133 * not INV_OFFSET, only physically contiguous clusters beginning at this host
1134 * offset are counted.
1136 * Note that guest_offset may not be cluster aligned. In this case, the
1137 * returned *host_offset points to exact byte referenced by guest_offset and
1138 * therefore isn't cluster aligned as well.
1140 * Returns:
1141 * 0: if no allocated clusters are available at the given offset.
1142 * *bytes is normally unchanged. It is set to 0 if the cluster
1143 * is allocated and doesn't need COW, but doesn't have the right
1144 * physical offset.
1146 * 1: if allocated clusters that don't require a COW are available at
1147 * the requested offset. *bytes may have decreased and describes
1148 * the length of the area that can be written to.
1150 * -errno: in error cases
1152 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset,
1153 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1155 BDRVQcow2State *s = bs->opaque;
1156 int l2_index;
1157 uint64_t cluster_offset;
1158 uint64_t *l2_slice;
1159 uint64_t nb_clusters;
1160 unsigned int keep_clusters;
1161 int ret;
1163 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset,
1164 *bytes);
1166 assert(*host_offset == INV_OFFSET || offset_into_cluster(s, guest_offset)
1167 == offset_into_cluster(s, *host_offset));
1170 * Calculate the number of clusters to look for. We stop at L2 slice
1171 * boundaries to keep things simple.
1173 nb_clusters =
1174 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1176 l2_index = offset_to_l2_slice_index(s, guest_offset);
1177 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index);
1178 assert(nb_clusters <= INT_MAX);
1180 /* Find L2 entry for the first involved cluster */
1181 ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index);
1182 if (ret < 0) {
1183 return ret;
1186 cluster_offset = be64_to_cpu(l2_slice[l2_index]);
1188 /* Check how many clusters are already allocated and don't need COW */
1189 if (qcow2_get_cluster_type(bs, cluster_offset) == QCOW2_CLUSTER_NORMAL
1190 && (cluster_offset & QCOW_OFLAG_COPIED))
1192 /* If a specific host_offset is required, check it */
1193 bool offset_matches =
1194 (cluster_offset & L2E_OFFSET_MASK) == *host_offset;
1196 if (offset_into_cluster(s, cluster_offset & L2E_OFFSET_MASK)) {
1197 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset "
1198 "%#llx unaligned (guest offset: %#" PRIx64
1199 ")", cluster_offset & L2E_OFFSET_MASK,
1200 guest_offset);
1201 ret = -EIO;
1202 goto out;
1205 if (*host_offset != INV_OFFSET && !offset_matches) {
1206 *bytes = 0;
1207 ret = 0;
1208 goto out;
1211 /* We keep all QCOW_OFLAG_COPIED clusters */
1212 keep_clusters =
1213 count_contiguous_clusters(bs, nb_clusters, s->cluster_size,
1214 &l2_slice[l2_index],
1215 QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO);
1216 assert(keep_clusters <= nb_clusters);
1218 *bytes = MIN(*bytes,
1219 keep_clusters * s->cluster_size
1220 - offset_into_cluster(s, guest_offset));
1222 ret = 1;
1223 } else {
1224 ret = 0;
1227 /* Cleanup */
1228 out:
1229 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1231 /* Only return a host offset if we actually made progress. Otherwise we
1232 * would make requirements for handle_alloc() that it can't fulfill */
1233 if (ret > 0) {
1234 *host_offset = (cluster_offset & L2E_OFFSET_MASK)
1235 + offset_into_cluster(s, guest_offset);
1238 return ret;
1242 * Allocates new clusters for the given guest_offset.
1244 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
1245 * contain the number of clusters that have been allocated and are contiguous
1246 * in the image file.
1248 * If *host_offset is not INV_OFFSET, it specifies the offset in the image file
1249 * at which the new clusters must start. *nb_clusters can be 0 on return in
1250 * this case if the cluster at host_offset is already in use. If *host_offset
1251 * is INV_OFFSET, the clusters can be allocated anywhere in the image file.
1253 * *host_offset is updated to contain the offset into the image file at which
1254 * the first allocated cluster starts.
1256 * Return 0 on success and -errno in error cases. -EAGAIN means that the
1257 * function has been waiting for another request and the allocation must be
1258 * restarted, but the whole request should not be failed.
1260 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
1261 uint64_t *host_offset, uint64_t *nb_clusters)
1263 BDRVQcow2State *s = bs->opaque;
1265 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset,
1266 *host_offset, *nb_clusters);
1268 if (has_data_file(bs)) {
1269 assert(*host_offset == INV_OFFSET ||
1270 *host_offset == start_of_cluster(s, guest_offset));
1271 *host_offset = start_of_cluster(s, guest_offset);
1272 return 0;
1275 /* Allocate new clusters */
1276 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
1277 if (*host_offset == INV_OFFSET) {
1278 int64_t cluster_offset =
1279 qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size);
1280 if (cluster_offset < 0) {
1281 return cluster_offset;
1283 *host_offset = cluster_offset;
1284 return 0;
1285 } else {
1286 int64_t ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters);
1287 if (ret < 0) {
1288 return ret;
1290 *nb_clusters = ret;
1291 return 0;
1296 * Allocates new clusters for an area that either is yet unallocated or needs a
1297 * copy on write. If *host_offset is not INV_OFFSET, clusters are only
1298 * allocated if the new allocation can match the specified host offset.
1300 * Note that guest_offset may not be cluster aligned. In this case, the
1301 * returned *host_offset points to exact byte referenced by guest_offset and
1302 * therefore isn't cluster aligned as well.
1304 * Returns:
1305 * 0: if no clusters could be allocated. *bytes is set to 0,
1306 * *host_offset is left unchanged.
1308 * 1: if new clusters were allocated. *bytes may be decreased if the
1309 * new allocation doesn't cover all of the requested area.
1310 * *host_offset is updated to contain the host offset of the first
1311 * newly allocated cluster.
1313 * -errno: in error cases
1315 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
1316 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1318 BDRVQcow2State *s = bs->opaque;
1319 int l2_index;
1320 uint64_t *l2_slice;
1321 uint64_t entry;
1322 uint64_t nb_clusters;
1323 int ret;
1324 bool keep_old_clusters = false;
1326 uint64_t alloc_cluster_offset = INV_OFFSET;
1328 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset,
1329 *bytes);
1330 assert(*bytes > 0);
1333 * Calculate the number of clusters to look for. We stop at L2 slice
1334 * boundaries to keep things simple.
1336 nb_clusters =
1337 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1339 l2_index = offset_to_l2_slice_index(s, guest_offset);
1340 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index);
1341 assert(nb_clusters <= INT_MAX);
1343 /* Find L2 entry for the first involved cluster */
1344 ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index);
1345 if (ret < 0) {
1346 return ret;
1349 entry = be64_to_cpu(l2_slice[l2_index]);
1351 /* For the moment, overwrite compressed clusters one by one */
1352 if (entry & QCOW_OFLAG_COMPRESSED) {
1353 nb_clusters = 1;
1354 } else {
1355 nb_clusters = count_cow_clusters(bs, nb_clusters, l2_slice, l2_index);
1358 /* This function is only called when there were no non-COW clusters, so if
1359 * we can't find any unallocated or COW clusters either, something is
1360 * wrong with our code. */
1361 assert(nb_clusters > 0);
1363 if (qcow2_get_cluster_type(bs, entry) == QCOW2_CLUSTER_ZERO_ALLOC &&
1364 (entry & QCOW_OFLAG_COPIED) &&
1365 (*host_offset == INV_OFFSET ||
1366 start_of_cluster(s, *host_offset) == (entry & L2E_OFFSET_MASK)))
1368 int preallocated_nb_clusters;
1370 if (offset_into_cluster(s, entry & L2E_OFFSET_MASK)) {
1371 qcow2_signal_corruption(bs, true, -1, -1, "Preallocated zero "
1372 "cluster offset %#llx unaligned (guest "
1373 "offset: %#" PRIx64 ")",
1374 entry & L2E_OFFSET_MASK, guest_offset);
1375 ret = -EIO;
1376 goto fail;
1379 /* Try to reuse preallocated zero clusters; contiguous normal clusters
1380 * would be fine, too, but count_cow_clusters() above has limited
1381 * nb_clusters already to a range of COW clusters */
1382 preallocated_nb_clusters =
1383 count_contiguous_clusters(bs, nb_clusters, s->cluster_size,
1384 &l2_slice[l2_index], QCOW_OFLAG_COPIED);
1385 assert(preallocated_nb_clusters > 0);
1387 nb_clusters = preallocated_nb_clusters;
1388 alloc_cluster_offset = entry & L2E_OFFSET_MASK;
1390 /* We want to reuse these clusters, so qcow2_alloc_cluster_link_l2()
1391 * should not free them. */
1392 keep_old_clusters = true;
1395 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1397 if (alloc_cluster_offset == INV_OFFSET) {
1398 /* Allocate, if necessary at a given offset in the image file */
1399 alloc_cluster_offset = *host_offset == INV_OFFSET ? INV_OFFSET :
1400 start_of_cluster(s, *host_offset);
1401 ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset,
1402 &nb_clusters);
1403 if (ret < 0) {
1404 goto fail;
1407 /* Can't extend contiguous allocation */
1408 if (nb_clusters == 0) {
1409 *bytes = 0;
1410 return 0;
1413 assert(alloc_cluster_offset != INV_OFFSET);
1417 * Save info needed for meta data update.
1419 * requested_bytes: Number of bytes from the start of the first
1420 * newly allocated cluster to the end of the (possibly shortened
1421 * before) write request.
1423 * avail_bytes: Number of bytes from the start of the first
1424 * newly allocated to the end of the last newly allocated cluster.
1426 * nb_bytes: The number of bytes from the start of the first
1427 * newly allocated cluster to the end of the area that the write
1428 * request actually writes to (excluding COW at the end)
1430 uint64_t requested_bytes = *bytes + offset_into_cluster(s, guest_offset);
1431 int avail_bytes = MIN(INT_MAX, nb_clusters << s->cluster_bits);
1432 int nb_bytes = MIN(requested_bytes, avail_bytes);
1433 QCowL2Meta *old_m = *m;
1435 *m = g_malloc0(sizeof(**m));
1437 **m = (QCowL2Meta) {
1438 .next = old_m,
1440 .alloc_offset = alloc_cluster_offset,
1441 .offset = start_of_cluster(s, guest_offset),
1442 .nb_clusters = nb_clusters,
1444 .keep_old_clusters = keep_old_clusters,
1446 .cow_start = {
1447 .offset = 0,
1448 .nb_bytes = offset_into_cluster(s, guest_offset),
1450 .cow_end = {
1451 .offset = nb_bytes,
1452 .nb_bytes = avail_bytes - nb_bytes,
1455 qemu_co_queue_init(&(*m)->dependent_requests);
1456 QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight);
1458 *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset);
1459 *bytes = MIN(*bytes, nb_bytes - offset_into_cluster(s, guest_offset));
1460 assert(*bytes != 0);
1462 return 1;
1464 fail:
1465 if (*m && (*m)->nb_clusters > 0) {
1466 QLIST_REMOVE(*m, next_in_flight);
1468 return ret;
1472 * alloc_cluster_offset
1474 * For a given offset on the virtual disk, find the cluster offset in qcow2
1475 * file. If the offset is not found, allocate a new cluster.
1477 * If the cluster was already allocated, m->nb_clusters is set to 0 and
1478 * other fields in m are meaningless.
1480 * If the cluster is newly allocated, m->nb_clusters is set to the number of
1481 * contiguous clusters that have been allocated. In this case, the other
1482 * fields of m are valid and contain information about the first allocated
1483 * cluster.
1485 * If the request conflicts with another write request in flight, the coroutine
1486 * is queued and will be reentered when the dependency has completed.
1488 * Return 0 on success and -errno in error cases
1490 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
1491 unsigned int *bytes, uint64_t *host_offset,
1492 QCowL2Meta **m)
1494 BDRVQcow2State *s = bs->opaque;
1495 uint64_t start, remaining;
1496 uint64_t cluster_offset;
1497 uint64_t cur_bytes;
1498 int ret;
1500 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *bytes);
1502 again:
1503 start = offset;
1504 remaining = *bytes;
1505 cluster_offset = INV_OFFSET;
1506 *host_offset = INV_OFFSET;
1507 cur_bytes = 0;
1508 *m = NULL;
1510 while (true) {
1512 if (*host_offset == INV_OFFSET && cluster_offset != INV_OFFSET) {
1513 *host_offset = start_of_cluster(s, cluster_offset);
1516 assert(remaining >= cur_bytes);
1518 start += cur_bytes;
1519 remaining -= cur_bytes;
1521 if (cluster_offset != INV_OFFSET) {
1522 cluster_offset += cur_bytes;
1525 if (remaining == 0) {
1526 break;
1529 cur_bytes = remaining;
1532 * Now start gathering as many contiguous clusters as possible:
1534 * 1. Check for overlaps with in-flight allocations
1536 * a) Overlap not in the first cluster -> shorten this request and
1537 * let the caller handle the rest in its next loop iteration.
1539 * b) Real overlaps of two requests. Yield and restart the search
1540 * for contiguous clusters (the situation could have changed
1541 * while we were sleeping)
1543 * c) TODO: Request starts in the same cluster as the in-flight
1544 * allocation ends. Shorten the COW of the in-fight allocation,
1545 * set cluster_offset to write to the same cluster and set up
1546 * the right synchronisation between the in-flight request and
1547 * the new one.
1549 ret = handle_dependencies(bs, start, &cur_bytes, m);
1550 if (ret == -EAGAIN) {
1551 /* Currently handle_dependencies() doesn't yield if we already had
1552 * an allocation. If it did, we would have to clean up the L2Meta
1553 * structs before starting over. */
1554 assert(*m == NULL);
1555 goto again;
1556 } else if (ret < 0) {
1557 return ret;
1558 } else if (cur_bytes == 0) {
1559 break;
1560 } else {
1561 /* handle_dependencies() may have decreased cur_bytes (shortened
1562 * the allocations below) so that the next dependency is processed
1563 * correctly during the next loop iteration. */
1567 * 2. Count contiguous COPIED clusters.
1569 ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m);
1570 if (ret < 0) {
1571 return ret;
1572 } else if (ret) {
1573 continue;
1574 } else if (cur_bytes == 0) {
1575 break;
1579 * 3. If the request still hasn't completed, allocate new clusters,
1580 * considering any cluster_offset of steps 1c or 2.
1582 ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m);
1583 if (ret < 0) {
1584 return ret;
1585 } else if (ret) {
1586 continue;
1587 } else {
1588 assert(cur_bytes == 0);
1589 break;
1593 *bytes -= remaining;
1594 assert(*bytes > 0);
1595 assert(*host_offset != INV_OFFSET);
1597 return 0;
1601 * This discards as many clusters of nb_clusters as possible at once (i.e.
1602 * all clusters in the same L2 slice) and returns the number of discarded
1603 * clusters.
1605 static int discard_in_l2_slice(BlockDriverState *bs, uint64_t offset,
1606 uint64_t nb_clusters,
1607 enum qcow2_discard_type type, bool full_discard)
1609 BDRVQcow2State *s = bs->opaque;
1610 uint64_t *l2_slice;
1611 int l2_index;
1612 int ret;
1613 int i;
1615 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index);
1616 if (ret < 0) {
1617 return ret;
1620 /* Limit nb_clusters to one L2 slice */
1621 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index);
1622 assert(nb_clusters <= INT_MAX);
1624 for (i = 0; i < nb_clusters; i++) {
1625 uint64_t old_l2_entry;
1627 old_l2_entry = be64_to_cpu(l2_slice[l2_index + i]);
1630 * If full_discard is false, make sure that a discarded area reads back
1631 * as zeroes for v3 images (we cannot do it for v2 without actually
1632 * writing a zero-filled buffer). We can skip the operation if the
1633 * cluster is already marked as zero, or if it's unallocated and we
1634 * don't have a backing file.
1636 * TODO We might want to use bdrv_block_status(bs) here, but we're
1637 * holding s->lock, so that doesn't work today.
1639 * If full_discard is true, the sector should not read back as zeroes,
1640 * but rather fall through to the backing file.
1642 switch (qcow2_get_cluster_type(bs, old_l2_entry)) {
1643 case QCOW2_CLUSTER_UNALLOCATED:
1644 if (full_discard || !bs->backing) {
1645 continue;
1647 break;
1649 case QCOW2_CLUSTER_ZERO_PLAIN:
1650 if (!full_discard) {
1651 continue;
1653 break;
1655 case QCOW2_CLUSTER_ZERO_ALLOC:
1656 case QCOW2_CLUSTER_NORMAL:
1657 case QCOW2_CLUSTER_COMPRESSED:
1658 break;
1660 default:
1661 abort();
1664 /* First remove L2 entries */
1665 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
1666 if (!full_discard && s->qcow_version >= 3) {
1667 l2_slice[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1668 } else {
1669 l2_slice[l2_index + i] = cpu_to_be64(0);
1672 /* Then decrease the refcount */
1673 qcow2_free_any_clusters(bs, old_l2_entry, 1, type);
1676 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1678 return nb_clusters;
1681 int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset,
1682 uint64_t bytes, enum qcow2_discard_type type,
1683 bool full_discard)
1685 BDRVQcow2State *s = bs->opaque;
1686 uint64_t end_offset = offset + bytes;
1687 uint64_t nb_clusters;
1688 int64_t cleared;
1689 int ret;
1691 /* Caller must pass aligned values, except at image end */
1692 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
1693 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) ||
1694 end_offset == bs->total_sectors << BDRV_SECTOR_BITS);
1696 nb_clusters = size_to_clusters(s, bytes);
1698 s->cache_discards = true;
1700 /* Each L2 slice is handled by its own loop iteration */
1701 while (nb_clusters > 0) {
1702 cleared = discard_in_l2_slice(bs, offset, nb_clusters, type,
1703 full_discard);
1704 if (cleared < 0) {
1705 ret = cleared;
1706 goto fail;
1709 nb_clusters -= cleared;
1710 offset += (cleared * s->cluster_size);
1713 ret = 0;
1714 fail:
1715 s->cache_discards = false;
1716 qcow2_process_discards(bs, ret);
1718 return ret;
1722 * This zeroes as many clusters of nb_clusters as possible at once (i.e.
1723 * all clusters in the same L2 slice) and returns the number of zeroed
1724 * clusters.
1726 static int zero_in_l2_slice(BlockDriverState *bs, uint64_t offset,
1727 uint64_t nb_clusters, int flags)
1729 BDRVQcow2State *s = bs->opaque;
1730 uint64_t *l2_slice;
1731 int l2_index;
1732 int ret;
1733 int i;
1734 bool unmap = !!(flags & BDRV_REQ_MAY_UNMAP);
1736 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index);
1737 if (ret < 0) {
1738 return ret;
1741 /* Limit nb_clusters to one L2 slice */
1742 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index);
1743 assert(nb_clusters <= INT_MAX);
1745 for (i = 0; i < nb_clusters; i++) {
1746 uint64_t old_offset;
1747 QCow2ClusterType cluster_type;
1749 old_offset = be64_to_cpu(l2_slice[l2_index + i]);
1752 * Minimize L2 changes if the cluster already reads back as
1753 * zeroes with correct allocation.
1755 cluster_type = qcow2_get_cluster_type(bs, old_offset);
1756 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN ||
1757 (cluster_type == QCOW2_CLUSTER_ZERO_ALLOC && !unmap)) {
1758 continue;
1761 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
1762 if (cluster_type == QCOW2_CLUSTER_COMPRESSED || unmap) {
1763 l2_slice[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1764 qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST);
1765 } else {
1766 l2_slice[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO);
1770 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1772 return nb_clusters;
1775 int qcow2_cluster_zeroize(BlockDriverState *bs, uint64_t offset,
1776 uint64_t bytes, int flags)
1778 BDRVQcow2State *s = bs->opaque;
1779 uint64_t end_offset = offset + bytes;
1780 uint64_t nb_clusters;
1781 int64_t cleared;
1782 int ret;
1784 /* If we have to stay in sync with an external data file, zero out
1785 * s->data_file first. */
1786 if (data_file_is_raw(bs)) {
1787 assert(has_data_file(bs));
1788 ret = bdrv_co_pwrite_zeroes(s->data_file, offset, bytes, flags);
1789 if (ret < 0) {
1790 return ret;
1794 /* Caller must pass aligned values, except at image end */
1795 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
1796 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) ||
1797 end_offset == bs->total_sectors << BDRV_SECTOR_BITS);
1799 /* The zero flag is only supported by version 3 and newer */
1800 if (s->qcow_version < 3) {
1801 return -ENOTSUP;
1804 /* Each L2 slice is handled by its own loop iteration */
1805 nb_clusters = size_to_clusters(s, bytes);
1807 s->cache_discards = true;
1809 while (nb_clusters > 0) {
1810 cleared = zero_in_l2_slice(bs, offset, nb_clusters, flags);
1811 if (cleared < 0) {
1812 ret = cleared;
1813 goto fail;
1816 nb_clusters -= cleared;
1817 offset += (cleared * s->cluster_size);
1820 ret = 0;
1821 fail:
1822 s->cache_discards = false;
1823 qcow2_process_discards(bs, ret);
1825 return ret;
1829 * Expands all zero clusters in a specific L1 table (or deallocates them, for
1830 * non-backed non-pre-allocated zero clusters).
1832 * l1_entries and *visited_l1_entries are used to keep track of progress for
1833 * status_cb(). l1_entries contains the total number of L1 entries and
1834 * *visited_l1_entries counts all visited L1 entries.
1836 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
1837 int l1_size, int64_t *visited_l1_entries,
1838 int64_t l1_entries,
1839 BlockDriverAmendStatusCB *status_cb,
1840 void *cb_opaque)
1842 BDRVQcow2State *s = bs->opaque;
1843 bool is_active_l1 = (l1_table == s->l1_table);
1844 uint64_t *l2_slice = NULL;
1845 unsigned slice, slice_size2, n_slices;
1846 int ret;
1847 int i, j;
1849 slice_size2 = s->l2_slice_size * sizeof(uint64_t);
1850 n_slices = s->cluster_size / slice_size2;
1852 if (!is_active_l1) {
1853 /* inactive L2 tables require a buffer to be stored in when loading
1854 * them from disk */
1855 l2_slice = qemu_try_blockalign(bs->file->bs, slice_size2);
1856 if (l2_slice == NULL) {
1857 return -ENOMEM;
1861 for (i = 0; i < l1_size; i++) {
1862 uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK;
1863 uint64_t l2_refcount;
1865 if (!l2_offset) {
1866 /* unallocated */
1867 (*visited_l1_entries)++;
1868 if (status_cb) {
1869 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque);
1871 continue;
1874 if (offset_into_cluster(s, l2_offset)) {
1875 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#"
1876 PRIx64 " unaligned (L1 index: %#x)",
1877 l2_offset, i);
1878 ret = -EIO;
1879 goto fail;
1882 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits,
1883 &l2_refcount);
1884 if (ret < 0) {
1885 goto fail;
1888 for (slice = 0; slice < n_slices; slice++) {
1889 uint64_t slice_offset = l2_offset + slice * slice_size2;
1890 bool l2_dirty = false;
1891 if (is_active_l1) {
1892 /* get active L2 tables from cache */
1893 ret = qcow2_cache_get(bs, s->l2_table_cache, slice_offset,
1894 (void **)&l2_slice);
1895 } else {
1896 /* load inactive L2 tables from disk */
1897 ret = bdrv_pread(bs->file, slice_offset, l2_slice, slice_size2);
1899 if (ret < 0) {
1900 goto fail;
1903 for (j = 0; j < s->l2_slice_size; j++) {
1904 uint64_t l2_entry = be64_to_cpu(l2_slice[j]);
1905 int64_t offset = l2_entry & L2E_OFFSET_MASK;
1906 QCow2ClusterType cluster_type =
1907 qcow2_get_cluster_type(bs, l2_entry);
1909 if (cluster_type != QCOW2_CLUSTER_ZERO_PLAIN &&
1910 cluster_type != QCOW2_CLUSTER_ZERO_ALLOC) {
1911 continue;
1914 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1915 if (!bs->backing) {
1916 /* not backed; therefore we can simply deallocate the
1917 * cluster */
1918 l2_slice[j] = 0;
1919 l2_dirty = true;
1920 continue;
1923 offset = qcow2_alloc_clusters(bs, s->cluster_size);
1924 if (offset < 0) {
1925 ret = offset;
1926 goto fail;
1929 if (l2_refcount > 1) {
1930 /* For shared L2 tables, set the refcount accordingly
1931 * (it is already 1 and needs to be l2_refcount) */
1932 ret = qcow2_update_cluster_refcount(
1933 bs, offset >> s->cluster_bits,
1934 refcount_diff(1, l2_refcount), false,
1935 QCOW2_DISCARD_OTHER);
1936 if (ret < 0) {
1937 qcow2_free_clusters(bs, offset, s->cluster_size,
1938 QCOW2_DISCARD_OTHER);
1939 goto fail;
1944 if (offset_into_cluster(s, offset)) {
1945 int l2_index = slice * s->l2_slice_size + j;
1946 qcow2_signal_corruption(
1947 bs, true, -1, -1,
1948 "Cluster allocation offset "
1949 "%#" PRIx64 " unaligned (L2 offset: %#"
1950 PRIx64 ", L2 index: %#x)", offset,
1951 l2_offset, l2_index);
1952 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1953 qcow2_free_clusters(bs, offset, s->cluster_size,
1954 QCOW2_DISCARD_ALWAYS);
1956 ret = -EIO;
1957 goto fail;
1960 ret = qcow2_pre_write_overlap_check(bs, 0, offset,
1961 s->cluster_size, true);
1962 if (ret < 0) {
1963 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1964 qcow2_free_clusters(bs, offset, s->cluster_size,
1965 QCOW2_DISCARD_ALWAYS);
1967 goto fail;
1970 ret = bdrv_pwrite_zeroes(s->data_file, offset,
1971 s->cluster_size, 0);
1972 if (ret < 0) {
1973 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1974 qcow2_free_clusters(bs, offset, s->cluster_size,
1975 QCOW2_DISCARD_ALWAYS);
1977 goto fail;
1980 if (l2_refcount == 1) {
1981 l2_slice[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED);
1982 } else {
1983 l2_slice[j] = cpu_to_be64(offset);
1985 l2_dirty = true;
1988 if (is_active_l1) {
1989 if (l2_dirty) {
1990 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
1991 qcow2_cache_depends_on_flush(s->l2_table_cache);
1993 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1994 } else {
1995 if (l2_dirty) {
1996 ret = qcow2_pre_write_overlap_check(
1997 bs, QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2,
1998 slice_offset, slice_size2, false);
1999 if (ret < 0) {
2000 goto fail;
2003 ret = bdrv_pwrite(bs->file, slice_offset,
2004 l2_slice, slice_size2);
2005 if (ret < 0) {
2006 goto fail;
2012 (*visited_l1_entries)++;
2013 if (status_cb) {
2014 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque);
2018 ret = 0;
2020 fail:
2021 if (l2_slice) {
2022 if (!is_active_l1) {
2023 qemu_vfree(l2_slice);
2024 } else {
2025 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
2028 return ret;
2032 * For backed images, expands all zero clusters on the image. For non-backed
2033 * images, deallocates all non-pre-allocated zero clusters (and claims the
2034 * allocation for pre-allocated ones). This is important for downgrading to a
2035 * qcow2 version which doesn't yet support metadata zero clusters.
2037 int qcow2_expand_zero_clusters(BlockDriverState *bs,
2038 BlockDriverAmendStatusCB *status_cb,
2039 void *cb_opaque)
2041 BDRVQcow2State *s = bs->opaque;
2042 uint64_t *l1_table = NULL;
2043 int64_t l1_entries = 0, visited_l1_entries = 0;
2044 int ret;
2045 int i, j;
2047 if (status_cb) {
2048 l1_entries = s->l1_size;
2049 for (i = 0; i < s->nb_snapshots; i++) {
2050 l1_entries += s->snapshots[i].l1_size;
2054 ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size,
2055 &visited_l1_entries, l1_entries,
2056 status_cb, cb_opaque);
2057 if (ret < 0) {
2058 goto fail;
2061 /* Inactive L1 tables may point to active L2 tables - therefore it is
2062 * necessary to flush the L2 table cache before trying to access the L2
2063 * tables pointed to by inactive L1 entries (else we might try to expand
2064 * zero clusters that have already been expanded); furthermore, it is also
2065 * necessary to empty the L2 table cache, since it may contain tables which
2066 * are now going to be modified directly on disk, bypassing the cache.
2067 * qcow2_cache_empty() does both for us. */
2068 ret = qcow2_cache_empty(bs, s->l2_table_cache);
2069 if (ret < 0) {
2070 goto fail;
2073 for (i = 0; i < s->nb_snapshots; i++) {
2074 int l1_size2;
2075 uint64_t *new_l1_table;
2076 Error *local_err = NULL;
2078 ret = qcow2_validate_table(bs, s->snapshots[i].l1_table_offset,
2079 s->snapshots[i].l1_size, sizeof(uint64_t),
2080 QCOW_MAX_L1_SIZE, "Snapshot L1 table",
2081 &local_err);
2082 if (ret < 0) {
2083 error_report_err(local_err);
2084 goto fail;
2087 l1_size2 = s->snapshots[i].l1_size * sizeof(uint64_t);
2088 new_l1_table = g_try_realloc(l1_table, l1_size2);
2090 if (!new_l1_table) {
2091 ret = -ENOMEM;
2092 goto fail;
2095 l1_table = new_l1_table;
2097 ret = bdrv_pread(bs->file, s->snapshots[i].l1_table_offset,
2098 l1_table, l1_size2);
2099 if (ret < 0) {
2100 goto fail;
2103 for (j = 0; j < s->snapshots[i].l1_size; j++) {
2104 be64_to_cpus(&l1_table[j]);
2107 ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size,
2108 &visited_l1_entries, l1_entries,
2109 status_cb, cb_opaque);
2110 if (ret < 0) {
2111 goto fail;
2115 ret = 0;
2117 fail:
2118 g_free(l1_table);
2119 return ret;