docs: provide documentation on the POWER9 XIVE interrupt controller
[qemu/ar7.git] / block / qcow2-cluster.c
blobb36f4aa84ab2bbc308c9b9b0f71021afac0965db
1 /*
2 * Block driver for the QCOW version 2 format
4 * Copyright (c) 2004-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include <zlib.h>
28 #include "qapi/error.h"
29 #include "qemu-common.h"
30 #include "block/block_int.h"
31 #include "qcow2.h"
32 #include "qemu/bswap.h"
33 #include "trace.h"
35 int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t exact_size)
37 BDRVQcow2State *s = bs->opaque;
38 int new_l1_size, i, ret;
40 if (exact_size >= s->l1_size) {
41 return 0;
44 new_l1_size = exact_size;
46 #ifdef DEBUG_ALLOC2
47 fprintf(stderr, "shrink l1_table from %d to %d\n", s->l1_size, new_l1_size);
48 #endif
50 BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE);
51 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset +
52 new_l1_size * sizeof(uint64_t),
53 (s->l1_size - new_l1_size) * sizeof(uint64_t), 0);
54 if (ret < 0) {
55 goto fail;
58 ret = bdrv_flush(bs->file->bs);
59 if (ret < 0) {
60 goto fail;
63 BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_FREE_L2_CLUSTERS);
64 for (i = s->l1_size - 1; i > new_l1_size - 1; i--) {
65 if ((s->l1_table[i] & L1E_OFFSET_MASK) == 0) {
66 continue;
68 qcow2_free_clusters(bs, s->l1_table[i] & L1E_OFFSET_MASK,
69 s->cluster_size, QCOW2_DISCARD_ALWAYS);
70 s->l1_table[i] = 0;
72 return 0;
74 fail:
76 * If the write in the l1_table failed the image may contain a partially
77 * overwritten l1_table. In this case it would be better to clear the
78 * l1_table in memory to avoid possible image corruption.
80 memset(s->l1_table + new_l1_size, 0,
81 (s->l1_size - new_l1_size) * sizeof(uint64_t));
82 return ret;
85 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
86 bool exact_size)
88 BDRVQcow2State *s = bs->opaque;
89 int new_l1_size2, ret, i;
90 uint64_t *new_l1_table;
91 int64_t old_l1_table_offset, old_l1_size;
92 int64_t new_l1_table_offset, new_l1_size;
93 uint8_t data[12];
95 if (min_size <= s->l1_size)
96 return 0;
98 /* Do a sanity check on min_size before trying to calculate new_l1_size
99 * (this prevents overflows during the while loop for the calculation of
100 * new_l1_size) */
101 if (min_size > INT_MAX / sizeof(uint64_t)) {
102 return -EFBIG;
105 if (exact_size) {
106 new_l1_size = min_size;
107 } else {
108 /* Bump size up to reduce the number of times we have to grow */
109 new_l1_size = s->l1_size;
110 if (new_l1_size == 0) {
111 new_l1_size = 1;
113 while (min_size > new_l1_size) {
114 new_l1_size = DIV_ROUND_UP(new_l1_size * 3, 2);
118 QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE > INT_MAX);
119 if (new_l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) {
120 return -EFBIG;
123 #ifdef DEBUG_ALLOC2
124 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n",
125 s->l1_size, new_l1_size);
126 #endif
128 new_l1_size2 = sizeof(uint64_t) * new_l1_size;
129 new_l1_table = qemu_try_blockalign(bs->file->bs,
130 ROUND_UP(new_l1_size2, 512));
131 if (new_l1_table == NULL) {
132 return -ENOMEM;
134 memset(new_l1_table, 0, ROUND_UP(new_l1_size2, 512));
136 if (s->l1_size) {
137 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
140 /* write new table (align to cluster) */
141 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
142 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
143 if (new_l1_table_offset < 0) {
144 qemu_vfree(new_l1_table);
145 return new_l1_table_offset;
148 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
149 if (ret < 0) {
150 goto fail;
153 /* the L1 position has not yet been updated, so these clusters must
154 * indeed be completely free */
155 ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset,
156 new_l1_size2, false);
157 if (ret < 0) {
158 goto fail;
161 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE);
162 for(i = 0; i < s->l1_size; i++)
163 new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
164 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset,
165 new_l1_table, new_l1_size2);
166 if (ret < 0)
167 goto fail;
168 for(i = 0; i < s->l1_size; i++)
169 new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
171 /* set new table */
172 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE);
173 stl_be_p(data, new_l1_size);
174 stq_be_p(data + 4, new_l1_table_offset);
175 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size),
176 data, sizeof(data));
177 if (ret < 0) {
178 goto fail;
180 qemu_vfree(s->l1_table);
181 old_l1_table_offset = s->l1_table_offset;
182 s->l1_table_offset = new_l1_table_offset;
183 s->l1_table = new_l1_table;
184 old_l1_size = s->l1_size;
185 s->l1_size = new_l1_size;
186 qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t),
187 QCOW2_DISCARD_OTHER);
188 return 0;
189 fail:
190 qemu_vfree(new_l1_table);
191 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2,
192 QCOW2_DISCARD_OTHER);
193 return ret;
197 * l2_load
199 * @bs: The BlockDriverState
200 * @offset: A guest offset, used to calculate what slice of the L2
201 * table to load.
202 * @l2_offset: Offset to the L2 table in the image file.
203 * @l2_slice: Location to store the pointer to the L2 slice.
205 * Loads a L2 slice into memory (L2 slices are the parts of L2 tables
206 * that are loaded by the qcow2 cache). If the slice is in the cache,
207 * the cache is used; otherwise the L2 slice is loaded from the image
208 * file.
210 static int l2_load(BlockDriverState *bs, uint64_t offset,
211 uint64_t l2_offset, uint64_t **l2_slice)
213 BDRVQcow2State *s = bs->opaque;
214 int start_of_slice = sizeof(uint64_t) *
215 (offset_to_l2_index(s, offset) - offset_to_l2_slice_index(s, offset));
217 return qcow2_cache_get(bs, s->l2_table_cache, l2_offset + start_of_slice,
218 (void **)l2_slice);
222 * Writes one sector of the L1 table to the disk (can't update single entries
223 * and we really don't want bdrv_pread to perform a read-modify-write)
225 #define L1_ENTRIES_PER_SECTOR (512 / 8)
226 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index)
228 BDRVQcow2State *s = bs->opaque;
229 uint64_t buf[L1_ENTRIES_PER_SECTOR] = { 0 };
230 int l1_start_index;
231 int i, ret;
233 l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
234 for (i = 0; i < L1_ENTRIES_PER_SECTOR && l1_start_index + i < s->l1_size;
235 i++)
237 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
240 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1,
241 s->l1_table_offset + 8 * l1_start_index, sizeof(buf), false);
242 if (ret < 0) {
243 return ret;
246 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
247 ret = bdrv_pwrite_sync(bs->file,
248 s->l1_table_offset + 8 * l1_start_index,
249 buf, sizeof(buf));
250 if (ret < 0) {
251 return ret;
254 return 0;
258 * l2_allocate
260 * Allocate a new l2 entry in the file. If l1_index points to an already
261 * used entry in the L2 table (i.e. we are doing a copy on write for the L2
262 * table) copy the contents of the old L2 table into the newly allocated one.
263 * Otherwise the new table is initialized with zeros.
267 static int l2_allocate(BlockDriverState *bs, int l1_index)
269 BDRVQcow2State *s = bs->opaque;
270 uint64_t old_l2_offset;
271 uint64_t *l2_slice = NULL;
272 unsigned slice, slice_size2, n_slices;
273 int64_t l2_offset;
274 int ret;
276 old_l2_offset = s->l1_table[l1_index];
278 trace_qcow2_l2_allocate(bs, l1_index);
280 /* allocate a new l2 entry */
282 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
283 if (l2_offset < 0) {
284 ret = l2_offset;
285 goto fail;
288 /* The offset must fit in the offset field of the L1 table entry */
289 assert((l2_offset & L1E_OFFSET_MASK) == l2_offset);
291 /* If we're allocating the table at offset 0 then something is wrong */
292 if (l2_offset == 0) {
293 qcow2_signal_corruption(bs, true, -1, -1, "Preventing invalid "
294 "allocation of L2 table at offset 0");
295 ret = -EIO;
296 goto fail;
299 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
300 if (ret < 0) {
301 goto fail;
304 /* allocate a new entry in the l2 cache */
306 slice_size2 = s->l2_slice_size * sizeof(uint64_t);
307 n_slices = s->cluster_size / slice_size2;
309 trace_qcow2_l2_allocate_get_empty(bs, l1_index);
310 for (slice = 0; slice < n_slices; slice++) {
311 ret = qcow2_cache_get_empty(bs, s->l2_table_cache,
312 l2_offset + slice * slice_size2,
313 (void **) &l2_slice);
314 if (ret < 0) {
315 goto fail;
318 if ((old_l2_offset & L1E_OFFSET_MASK) == 0) {
319 /* if there was no old l2 table, clear the new slice */
320 memset(l2_slice, 0, slice_size2);
321 } else {
322 uint64_t *old_slice;
323 uint64_t old_l2_slice_offset =
324 (old_l2_offset & L1E_OFFSET_MASK) + slice * slice_size2;
326 /* if there was an old l2 table, read a slice from the disk */
327 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ);
328 ret = qcow2_cache_get(bs, s->l2_table_cache, old_l2_slice_offset,
329 (void **) &old_slice);
330 if (ret < 0) {
331 goto fail;
334 memcpy(l2_slice, old_slice, slice_size2);
336 qcow2_cache_put(s->l2_table_cache, (void **) &old_slice);
339 /* write the l2 slice to the file */
340 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE);
342 trace_qcow2_l2_allocate_write_l2(bs, l1_index);
343 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
344 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
347 ret = qcow2_cache_flush(bs, s->l2_table_cache);
348 if (ret < 0) {
349 goto fail;
352 /* update the L1 entry */
353 trace_qcow2_l2_allocate_write_l1(bs, l1_index);
354 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
355 ret = qcow2_write_l1_entry(bs, l1_index);
356 if (ret < 0) {
357 goto fail;
360 trace_qcow2_l2_allocate_done(bs, l1_index, 0);
361 return 0;
363 fail:
364 trace_qcow2_l2_allocate_done(bs, l1_index, ret);
365 if (l2_slice != NULL) {
366 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
368 s->l1_table[l1_index] = old_l2_offset;
369 if (l2_offset > 0) {
370 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
371 QCOW2_DISCARD_ALWAYS);
373 return ret;
377 * Checks how many clusters in a given L2 slice are contiguous in the image
378 * file. As soon as one of the flags in the bitmask stop_flags changes compared
379 * to the first cluster, the search is stopped and the cluster is not counted
380 * as contiguous. (This allows it, for example, to stop at the first compressed
381 * cluster which may require a different handling)
383 static int count_contiguous_clusters(BlockDriverState *bs, int nb_clusters,
384 int cluster_size, uint64_t *l2_slice, uint64_t stop_flags)
386 int i;
387 QCow2ClusterType first_cluster_type;
388 uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED;
389 uint64_t first_entry = be64_to_cpu(l2_slice[0]);
390 uint64_t offset = first_entry & mask;
392 first_cluster_type = qcow2_get_cluster_type(bs, first_entry);
393 if (first_cluster_type == QCOW2_CLUSTER_UNALLOCATED) {
394 return 0;
397 /* must be allocated */
398 assert(first_cluster_type == QCOW2_CLUSTER_NORMAL ||
399 first_cluster_type == QCOW2_CLUSTER_ZERO_ALLOC);
401 for (i = 0; i < nb_clusters; i++) {
402 uint64_t l2_entry = be64_to_cpu(l2_slice[i]) & mask;
403 if (offset + (uint64_t) i * cluster_size != l2_entry) {
404 break;
408 return i;
412 * Checks how many consecutive unallocated clusters in a given L2
413 * slice have the same cluster type.
415 static int count_contiguous_clusters_unallocated(BlockDriverState *bs,
416 int nb_clusters,
417 uint64_t *l2_slice,
418 QCow2ClusterType wanted_type)
420 int i;
422 assert(wanted_type == QCOW2_CLUSTER_ZERO_PLAIN ||
423 wanted_type == QCOW2_CLUSTER_UNALLOCATED);
424 for (i = 0; i < nb_clusters; i++) {
425 uint64_t entry = be64_to_cpu(l2_slice[i]);
426 QCow2ClusterType type = qcow2_get_cluster_type(bs, entry);
428 if (type != wanted_type) {
429 break;
433 return i;
436 static int coroutine_fn do_perform_cow_read(BlockDriverState *bs,
437 uint64_t src_cluster_offset,
438 unsigned offset_in_cluster,
439 QEMUIOVector *qiov)
441 int ret;
443 if (qiov->size == 0) {
444 return 0;
447 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
449 if (!bs->drv) {
450 return -ENOMEDIUM;
453 /* Call .bdrv_co_readv() directly instead of using the public block-layer
454 * interface. This avoids double I/O throttling and request tracking,
455 * which can lead to deadlock when block layer copy-on-read is enabled.
457 ret = bs->drv->bdrv_co_preadv(bs, src_cluster_offset + offset_in_cluster,
458 qiov->size, qiov, 0);
459 if (ret < 0) {
460 return ret;
463 return 0;
466 static bool coroutine_fn do_perform_cow_encrypt(BlockDriverState *bs,
467 uint64_t src_cluster_offset,
468 uint64_t cluster_offset,
469 unsigned offset_in_cluster,
470 uint8_t *buffer,
471 unsigned bytes)
473 if (bytes && bs->encrypted) {
474 BDRVQcow2State *s = bs->opaque;
475 int64_t offset = (s->crypt_physical_offset ?
476 (cluster_offset + offset_in_cluster) :
477 (src_cluster_offset + offset_in_cluster));
478 assert((offset_in_cluster & ~BDRV_SECTOR_MASK) == 0);
479 assert((bytes & ~BDRV_SECTOR_MASK) == 0);
480 assert(s->crypto);
481 if (qcrypto_block_encrypt(s->crypto, offset, buffer, bytes, NULL) < 0) {
482 return false;
485 return true;
488 static int coroutine_fn do_perform_cow_write(BlockDriverState *bs,
489 uint64_t cluster_offset,
490 unsigned offset_in_cluster,
491 QEMUIOVector *qiov)
493 BDRVQcow2State *s = bs->opaque;
494 int ret;
496 if (qiov->size == 0) {
497 return 0;
500 ret = qcow2_pre_write_overlap_check(bs, 0,
501 cluster_offset + offset_in_cluster, qiov->size, true);
502 if (ret < 0) {
503 return ret;
506 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
507 ret = bdrv_co_pwritev(s->data_file, cluster_offset + offset_in_cluster,
508 qiov->size, qiov, 0);
509 if (ret < 0) {
510 return ret;
513 return 0;
518 * get_cluster_offset
520 * For a given offset of the virtual disk, find the cluster type and offset in
521 * the qcow2 file. The offset is stored in *cluster_offset.
523 * On entry, *bytes is the maximum number of contiguous bytes starting at
524 * offset that we are interested in.
526 * On exit, *bytes is the number of bytes starting at offset that have the same
527 * cluster type and (if applicable) are stored contiguously in the image file.
528 * Compressed clusters are always returned one by one.
530 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
531 * cases.
533 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
534 unsigned int *bytes, uint64_t *cluster_offset)
536 BDRVQcow2State *s = bs->opaque;
537 unsigned int l2_index;
538 uint64_t l1_index, l2_offset, *l2_slice;
539 int c;
540 unsigned int offset_in_cluster;
541 uint64_t bytes_available, bytes_needed, nb_clusters;
542 QCow2ClusterType type;
543 int ret;
545 offset_in_cluster = offset_into_cluster(s, offset);
546 bytes_needed = (uint64_t) *bytes + offset_in_cluster;
548 /* compute how many bytes there are between the start of the cluster
549 * containing offset and the end of the l2 slice that contains
550 * the entry pointing to it */
551 bytes_available =
552 ((uint64_t) (s->l2_slice_size - offset_to_l2_slice_index(s, offset)))
553 << s->cluster_bits;
555 if (bytes_needed > bytes_available) {
556 bytes_needed = bytes_available;
559 *cluster_offset = 0;
561 /* seek to the l2 offset in the l1 table */
563 l1_index = offset_to_l1_index(s, offset);
564 if (l1_index >= s->l1_size) {
565 type = QCOW2_CLUSTER_UNALLOCATED;
566 goto out;
569 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
570 if (!l2_offset) {
571 type = QCOW2_CLUSTER_UNALLOCATED;
572 goto out;
575 if (offset_into_cluster(s, l2_offset)) {
576 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
577 " unaligned (L1 index: %#" PRIx64 ")",
578 l2_offset, l1_index);
579 return -EIO;
582 /* load the l2 slice in memory */
584 ret = l2_load(bs, offset, l2_offset, &l2_slice);
585 if (ret < 0) {
586 return ret;
589 /* find the cluster offset for the given disk offset */
591 l2_index = offset_to_l2_slice_index(s, offset);
592 *cluster_offset = be64_to_cpu(l2_slice[l2_index]);
594 nb_clusters = size_to_clusters(s, bytes_needed);
595 /* bytes_needed <= *bytes + offset_in_cluster, both of which are unsigned
596 * integers; the minimum cluster size is 512, so this assertion is always
597 * true */
598 assert(nb_clusters <= INT_MAX);
600 type = qcow2_get_cluster_type(bs, *cluster_offset);
601 if (s->qcow_version < 3 && (type == QCOW2_CLUSTER_ZERO_PLAIN ||
602 type == QCOW2_CLUSTER_ZERO_ALLOC)) {
603 qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found"
604 " in pre-v3 image (L2 offset: %#" PRIx64
605 ", L2 index: %#x)", l2_offset, l2_index);
606 ret = -EIO;
607 goto fail;
609 switch (type) {
610 case QCOW2_CLUSTER_COMPRESSED:
611 if (has_data_file(bs)) {
612 qcow2_signal_corruption(bs, true, -1, -1, "Compressed cluster "
613 "entry found in image with external data "
614 "file (L2 offset: %#" PRIx64 ", L2 index: "
615 "%#x)", l2_offset, l2_index);
616 ret = -EIO;
617 goto fail;
619 /* Compressed clusters can only be processed one by one */
620 c = 1;
621 *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK;
622 break;
623 case QCOW2_CLUSTER_ZERO_PLAIN:
624 case QCOW2_CLUSTER_UNALLOCATED:
625 /* how many empty clusters ? */
626 c = count_contiguous_clusters_unallocated(bs, nb_clusters,
627 &l2_slice[l2_index], type);
628 *cluster_offset = 0;
629 break;
630 case QCOW2_CLUSTER_ZERO_ALLOC:
631 case QCOW2_CLUSTER_NORMAL:
632 /* how many allocated clusters ? */
633 c = count_contiguous_clusters(bs, nb_clusters, s->cluster_size,
634 &l2_slice[l2_index], QCOW_OFLAG_ZERO);
635 *cluster_offset &= L2E_OFFSET_MASK;
636 if (offset_into_cluster(s, *cluster_offset)) {
637 qcow2_signal_corruption(bs, true, -1, -1,
638 "Cluster allocation offset %#"
639 PRIx64 " unaligned (L2 offset: %#" PRIx64
640 ", L2 index: %#x)", *cluster_offset,
641 l2_offset, l2_index);
642 ret = -EIO;
643 goto fail;
645 if (has_data_file(bs) && *cluster_offset != offset - offset_in_cluster)
647 qcow2_signal_corruption(bs, true, -1, -1,
648 "External data file host cluster offset %#"
649 PRIx64 " does not match guest cluster "
650 "offset: %#" PRIx64
651 ", L2 index: %#x)", *cluster_offset,
652 offset - offset_in_cluster, l2_index);
653 ret = -EIO;
654 goto fail;
656 break;
657 default:
658 abort();
661 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
663 bytes_available = (int64_t)c * s->cluster_size;
665 out:
666 if (bytes_available > bytes_needed) {
667 bytes_available = bytes_needed;
670 /* bytes_available <= bytes_needed <= *bytes + offset_in_cluster;
671 * subtracting offset_in_cluster will therefore definitely yield something
672 * not exceeding UINT_MAX */
673 assert(bytes_available - offset_in_cluster <= UINT_MAX);
674 *bytes = bytes_available - offset_in_cluster;
676 return type;
678 fail:
679 qcow2_cache_put(s->l2_table_cache, (void **)&l2_slice);
680 return ret;
684 * get_cluster_table
686 * for a given disk offset, load (and allocate if needed)
687 * the appropriate slice of its l2 table.
689 * the cluster index in the l2 slice is given to the caller.
691 * Returns 0 on success, -errno in failure case
693 static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
694 uint64_t **new_l2_slice,
695 int *new_l2_index)
697 BDRVQcow2State *s = bs->opaque;
698 unsigned int l2_index;
699 uint64_t l1_index, l2_offset;
700 uint64_t *l2_slice = NULL;
701 int ret;
703 /* seek to the l2 offset in the l1 table */
705 l1_index = offset_to_l1_index(s, offset);
706 if (l1_index >= s->l1_size) {
707 ret = qcow2_grow_l1_table(bs, l1_index + 1, false);
708 if (ret < 0) {
709 return ret;
713 assert(l1_index < s->l1_size);
714 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
715 if (offset_into_cluster(s, l2_offset)) {
716 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
717 " unaligned (L1 index: %#" PRIx64 ")",
718 l2_offset, l1_index);
719 return -EIO;
722 if (!(s->l1_table[l1_index] & QCOW_OFLAG_COPIED)) {
723 /* First allocate a new L2 table (and do COW if needed) */
724 ret = l2_allocate(bs, l1_index);
725 if (ret < 0) {
726 return ret;
729 /* Then decrease the refcount of the old table */
730 if (l2_offset) {
731 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
732 QCOW2_DISCARD_OTHER);
735 /* Get the offset of the newly-allocated l2 table */
736 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
737 assert(offset_into_cluster(s, l2_offset) == 0);
740 /* load the l2 slice in memory */
741 ret = l2_load(bs, offset, l2_offset, &l2_slice);
742 if (ret < 0) {
743 return ret;
746 /* find the cluster offset for the given disk offset */
748 l2_index = offset_to_l2_slice_index(s, offset);
750 *new_l2_slice = l2_slice;
751 *new_l2_index = l2_index;
753 return 0;
757 * alloc_compressed_cluster_offset
759 * For a given offset on the virtual disk, allocate a new compressed cluster
760 * and put the host offset of the cluster into *host_offset. If a cluster is
761 * already allocated at the offset, return an error.
763 * Return 0 on success and -errno in error cases
765 int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
766 uint64_t offset,
767 int compressed_size,
768 uint64_t *host_offset)
770 BDRVQcow2State *s = bs->opaque;
771 int l2_index, ret;
772 uint64_t *l2_slice;
773 int64_t cluster_offset;
774 int nb_csectors;
776 if (has_data_file(bs)) {
777 return 0;
780 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index);
781 if (ret < 0) {
782 return ret;
785 /* Compression can't overwrite anything. Fail if the cluster was already
786 * allocated. */
787 cluster_offset = be64_to_cpu(l2_slice[l2_index]);
788 if (cluster_offset & L2E_OFFSET_MASK) {
789 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
790 return -EIO;
793 cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
794 if (cluster_offset < 0) {
795 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
796 return cluster_offset;
799 nb_csectors =
800 (cluster_offset + compressed_size - 1) / QCOW2_COMPRESSED_SECTOR_SIZE -
801 (cluster_offset / QCOW2_COMPRESSED_SECTOR_SIZE);
803 cluster_offset |= QCOW_OFLAG_COMPRESSED |
804 ((uint64_t)nb_csectors << s->csize_shift);
806 /* update L2 table */
808 /* compressed clusters never have the copied flag */
810 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
811 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
812 l2_slice[l2_index] = cpu_to_be64(cluster_offset);
813 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
815 *host_offset = cluster_offset & s->cluster_offset_mask;
816 return 0;
819 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m)
821 BDRVQcow2State *s = bs->opaque;
822 Qcow2COWRegion *start = &m->cow_start;
823 Qcow2COWRegion *end = &m->cow_end;
824 unsigned buffer_size;
825 unsigned data_bytes = end->offset - (start->offset + start->nb_bytes);
826 bool merge_reads;
827 uint8_t *start_buffer, *end_buffer;
828 QEMUIOVector qiov;
829 int ret;
831 assert(start->nb_bytes <= UINT_MAX - end->nb_bytes);
832 assert(start->nb_bytes + end->nb_bytes <= UINT_MAX - data_bytes);
833 assert(start->offset + start->nb_bytes <= end->offset);
834 assert(!m->data_qiov || m->data_qiov->size == data_bytes);
836 if (start->nb_bytes == 0 && end->nb_bytes == 0) {
837 return 0;
840 /* If we have to read both the start and end COW regions and the
841 * middle region is not too large then perform just one read
842 * operation */
843 merge_reads = start->nb_bytes && end->nb_bytes && data_bytes <= 16384;
844 if (merge_reads) {
845 buffer_size = start->nb_bytes + data_bytes + end->nb_bytes;
846 } else {
847 /* If we have to do two reads, add some padding in the middle
848 * if necessary to make sure that the end region is optimally
849 * aligned. */
850 size_t align = bdrv_opt_mem_align(bs);
851 assert(align > 0 && align <= UINT_MAX);
852 assert(QEMU_ALIGN_UP(start->nb_bytes, align) <=
853 UINT_MAX - end->nb_bytes);
854 buffer_size = QEMU_ALIGN_UP(start->nb_bytes, align) + end->nb_bytes;
857 /* Reserve a buffer large enough to store all the data that we're
858 * going to read */
859 start_buffer = qemu_try_blockalign(bs, buffer_size);
860 if (start_buffer == NULL) {
861 return -ENOMEM;
863 /* The part of the buffer where the end region is located */
864 end_buffer = start_buffer + buffer_size - end->nb_bytes;
866 qemu_iovec_init(&qiov, 2 + (m->data_qiov ? m->data_qiov->niov : 0));
868 qemu_co_mutex_unlock(&s->lock);
869 /* First we read the existing data from both COW regions. We
870 * either read the whole region in one go, or the start and end
871 * regions separately. */
872 if (merge_reads) {
873 qemu_iovec_add(&qiov, start_buffer, buffer_size);
874 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov);
875 } else {
876 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
877 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov);
878 if (ret < 0) {
879 goto fail;
882 qemu_iovec_reset(&qiov);
883 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
884 ret = do_perform_cow_read(bs, m->offset, end->offset, &qiov);
886 if (ret < 0) {
887 goto fail;
890 /* Encrypt the data if necessary before writing it */
891 if (bs->encrypted) {
892 if (!do_perform_cow_encrypt(bs, m->offset, m->alloc_offset,
893 start->offset, start_buffer,
894 start->nb_bytes) ||
895 !do_perform_cow_encrypt(bs, m->offset, m->alloc_offset,
896 end->offset, end_buffer, end->nb_bytes)) {
897 ret = -EIO;
898 goto fail;
902 /* And now we can write everything. If we have the guest data we
903 * can write everything in one single operation */
904 if (m->data_qiov) {
905 qemu_iovec_reset(&qiov);
906 if (start->nb_bytes) {
907 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
909 qemu_iovec_concat(&qiov, m->data_qiov, 0, data_bytes);
910 if (end->nb_bytes) {
911 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
913 /* NOTE: we have a write_aio blkdebug event here followed by
914 * a cow_write one in do_perform_cow_write(), but there's only
915 * one single I/O operation */
916 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
917 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov);
918 } else {
919 /* If there's no guest data then write both COW regions separately */
920 qemu_iovec_reset(&qiov);
921 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
922 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov);
923 if (ret < 0) {
924 goto fail;
927 qemu_iovec_reset(&qiov);
928 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
929 ret = do_perform_cow_write(bs, m->alloc_offset, end->offset, &qiov);
932 fail:
933 qemu_co_mutex_lock(&s->lock);
936 * Before we update the L2 table to actually point to the new cluster, we
937 * need to be sure that the refcounts have been increased and COW was
938 * handled.
940 if (ret == 0) {
941 qcow2_cache_depends_on_flush(s->l2_table_cache);
944 qemu_vfree(start_buffer);
945 qemu_iovec_destroy(&qiov);
946 return ret;
949 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
951 BDRVQcow2State *s = bs->opaque;
952 int i, j = 0, l2_index, ret;
953 uint64_t *old_cluster, *l2_slice;
954 uint64_t cluster_offset = m->alloc_offset;
956 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters);
957 assert(m->nb_clusters > 0);
959 old_cluster = g_try_new(uint64_t, m->nb_clusters);
960 if (old_cluster == NULL) {
961 ret = -ENOMEM;
962 goto err;
965 /* copy content of unmodified sectors */
966 ret = perform_cow(bs, m);
967 if (ret < 0) {
968 goto err;
971 /* Update L2 table. */
972 if (s->use_lazy_refcounts) {
973 qcow2_mark_dirty(bs);
975 if (qcow2_need_accurate_refcounts(s)) {
976 qcow2_cache_set_dependency(bs, s->l2_table_cache,
977 s->refcount_block_cache);
980 ret = get_cluster_table(bs, m->offset, &l2_slice, &l2_index);
981 if (ret < 0) {
982 goto err;
984 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
986 assert(l2_index + m->nb_clusters <= s->l2_slice_size);
987 for (i = 0; i < m->nb_clusters; i++) {
988 /* if two concurrent writes happen to the same unallocated cluster
989 * each write allocates separate cluster and writes data concurrently.
990 * The first one to complete updates l2 table with pointer to its
991 * cluster the second one has to do RMW (which is done above by
992 * perform_cow()), update l2 table with its cluster pointer and free
993 * old cluster. This is what this loop does */
994 if (l2_slice[l2_index + i] != 0) {
995 old_cluster[j++] = l2_slice[l2_index + i];
998 l2_slice[l2_index + i] = cpu_to_be64((cluster_offset +
999 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
1003 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1006 * If this was a COW, we need to decrease the refcount of the old cluster.
1008 * Don't discard clusters that reach a refcount of 0 (e.g. compressed
1009 * clusters), the next write will reuse them anyway.
1011 if (!m->keep_old_clusters && j != 0) {
1012 for (i = 0; i < j; i++) {
1013 qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1,
1014 QCOW2_DISCARD_NEVER);
1018 ret = 0;
1019 err:
1020 g_free(old_cluster);
1021 return ret;
1025 * Frees the allocated clusters because the request failed and they won't
1026 * actually be linked.
1028 void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m)
1030 BDRVQcow2State *s = bs->opaque;
1031 qcow2_free_clusters(bs, m->alloc_offset, m->nb_clusters << s->cluster_bits,
1032 QCOW2_DISCARD_NEVER);
1036 * Returns the number of contiguous clusters that can be used for an allocating
1037 * write, but require COW to be performed (this includes yet unallocated space,
1038 * which must copy from the backing file)
1040 static int count_cow_clusters(BlockDriverState *bs, int nb_clusters,
1041 uint64_t *l2_slice, int l2_index)
1043 int i;
1045 for (i = 0; i < nb_clusters; i++) {
1046 uint64_t l2_entry = be64_to_cpu(l2_slice[l2_index + i]);
1047 QCow2ClusterType cluster_type = qcow2_get_cluster_type(bs, l2_entry);
1049 switch(cluster_type) {
1050 case QCOW2_CLUSTER_NORMAL:
1051 if (l2_entry & QCOW_OFLAG_COPIED) {
1052 goto out;
1054 break;
1055 case QCOW2_CLUSTER_UNALLOCATED:
1056 case QCOW2_CLUSTER_COMPRESSED:
1057 case QCOW2_CLUSTER_ZERO_PLAIN:
1058 case QCOW2_CLUSTER_ZERO_ALLOC:
1059 break;
1060 default:
1061 abort();
1065 out:
1066 assert(i <= nb_clusters);
1067 return i;
1071 * Check if there already is an AIO write request in flight which allocates
1072 * the same cluster. In this case we need to wait until the previous
1073 * request has completed and updated the L2 table accordingly.
1075 * Returns:
1076 * 0 if there was no dependency. *cur_bytes indicates the number of
1077 * bytes from guest_offset that can be read before the next
1078 * dependency must be processed (or the request is complete)
1080 * -EAGAIN if we had to wait for another request, previously gathered
1081 * information on cluster allocation may be invalid now. The caller
1082 * must start over anyway, so consider *cur_bytes undefined.
1084 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
1085 uint64_t *cur_bytes, QCowL2Meta **m)
1087 BDRVQcow2State *s = bs->opaque;
1088 QCowL2Meta *old_alloc;
1089 uint64_t bytes = *cur_bytes;
1091 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
1093 uint64_t start = guest_offset;
1094 uint64_t end = start + bytes;
1095 uint64_t old_start = l2meta_cow_start(old_alloc);
1096 uint64_t old_end = l2meta_cow_end(old_alloc);
1098 if (end <= old_start || start >= old_end) {
1099 /* No intersection */
1100 } else {
1101 if (start < old_start) {
1102 /* Stop at the start of a running allocation */
1103 bytes = old_start - start;
1104 } else {
1105 bytes = 0;
1108 /* Stop if already an l2meta exists. After yielding, it wouldn't
1109 * be valid any more, so we'd have to clean up the old L2Metas
1110 * and deal with requests depending on them before starting to
1111 * gather new ones. Not worth the trouble. */
1112 if (bytes == 0 && *m) {
1113 *cur_bytes = 0;
1114 return 0;
1117 if (bytes == 0) {
1118 /* Wait for the dependency to complete. We need to recheck
1119 * the free/allocated clusters when we continue. */
1120 qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock);
1121 return -EAGAIN;
1126 /* Make sure that existing clusters and new allocations are only used up to
1127 * the next dependency if we shortened the request above */
1128 *cur_bytes = bytes;
1130 return 0;
1134 * Checks how many already allocated clusters that don't require a copy on
1135 * write there are at the given guest_offset (up to *bytes). If *host_offset is
1136 * not INV_OFFSET, only physically contiguous clusters beginning at this host
1137 * offset are counted.
1139 * Note that guest_offset may not be cluster aligned. In this case, the
1140 * returned *host_offset points to exact byte referenced by guest_offset and
1141 * therefore isn't cluster aligned as well.
1143 * Returns:
1144 * 0: if no allocated clusters are available at the given offset.
1145 * *bytes is normally unchanged. It is set to 0 if the cluster
1146 * is allocated and doesn't need COW, but doesn't have the right
1147 * physical offset.
1149 * 1: if allocated clusters that don't require a COW are available at
1150 * the requested offset. *bytes may have decreased and describes
1151 * the length of the area that can be written to.
1153 * -errno: in error cases
1155 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset,
1156 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1158 BDRVQcow2State *s = bs->opaque;
1159 int l2_index;
1160 uint64_t cluster_offset;
1161 uint64_t *l2_slice;
1162 uint64_t nb_clusters;
1163 unsigned int keep_clusters;
1164 int ret;
1166 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset,
1167 *bytes);
1169 assert(*host_offset == INV_OFFSET || offset_into_cluster(s, guest_offset)
1170 == offset_into_cluster(s, *host_offset));
1173 * Calculate the number of clusters to look for. We stop at L2 slice
1174 * boundaries to keep things simple.
1176 nb_clusters =
1177 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1179 l2_index = offset_to_l2_slice_index(s, guest_offset);
1180 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index);
1181 assert(nb_clusters <= INT_MAX);
1183 /* Find L2 entry for the first involved cluster */
1184 ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index);
1185 if (ret < 0) {
1186 return ret;
1189 cluster_offset = be64_to_cpu(l2_slice[l2_index]);
1191 /* Check how many clusters are already allocated and don't need COW */
1192 if (qcow2_get_cluster_type(bs, cluster_offset) == QCOW2_CLUSTER_NORMAL
1193 && (cluster_offset & QCOW_OFLAG_COPIED))
1195 /* If a specific host_offset is required, check it */
1196 bool offset_matches =
1197 (cluster_offset & L2E_OFFSET_MASK) == *host_offset;
1199 if (offset_into_cluster(s, cluster_offset & L2E_OFFSET_MASK)) {
1200 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset "
1201 "%#llx unaligned (guest offset: %#" PRIx64
1202 ")", cluster_offset & L2E_OFFSET_MASK,
1203 guest_offset);
1204 ret = -EIO;
1205 goto out;
1208 if (*host_offset != INV_OFFSET && !offset_matches) {
1209 *bytes = 0;
1210 ret = 0;
1211 goto out;
1214 /* We keep all QCOW_OFLAG_COPIED clusters */
1215 keep_clusters =
1216 count_contiguous_clusters(bs, nb_clusters, s->cluster_size,
1217 &l2_slice[l2_index],
1218 QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO);
1219 assert(keep_clusters <= nb_clusters);
1221 *bytes = MIN(*bytes,
1222 keep_clusters * s->cluster_size
1223 - offset_into_cluster(s, guest_offset));
1225 ret = 1;
1226 } else {
1227 ret = 0;
1230 /* Cleanup */
1231 out:
1232 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1234 /* Only return a host offset if we actually made progress. Otherwise we
1235 * would make requirements for handle_alloc() that it can't fulfill */
1236 if (ret > 0) {
1237 *host_offset = (cluster_offset & L2E_OFFSET_MASK)
1238 + offset_into_cluster(s, guest_offset);
1241 return ret;
1245 * Allocates new clusters for the given guest_offset.
1247 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
1248 * contain the number of clusters that have been allocated and are contiguous
1249 * in the image file.
1251 * If *host_offset is not INV_OFFSET, it specifies the offset in the image file
1252 * at which the new clusters must start. *nb_clusters can be 0 on return in
1253 * this case if the cluster at host_offset is already in use. If *host_offset
1254 * is INV_OFFSET, the clusters can be allocated anywhere in the image file.
1256 * *host_offset is updated to contain the offset into the image file at which
1257 * the first allocated cluster starts.
1259 * Return 0 on success and -errno in error cases. -EAGAIN means that the
1260 * function has been waiting for another request and the allocation must be
1261 * restarted, but the whole request should not be failed.
1263 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
1264 uint64_t *host_offset, uint64_t *nb_clusters)
1266 BDRVQcow2State *s = bs->opaque;
1268 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset,
1269 *host_offset, *nb_clusters);
1271 if (has_data_file(bs)) {
1272 assert(*host_offset == INV_OFFSET ||
1273 *host_offset == start_of_cluster(s, guest_offset));
1274 *host_offset = start_of_cluster(s, guest_offset);
1275 return 0;
1278 /* Allocate new clusters */
1279 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
1280 if (*host_offset == INV_OFFSET) {
1281 int64_t cluster_offset =
1282 qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size);
1283 if (cluster_offset < 0) {
1284 return cluster_offset;
1286 *host_offset = cluster_offset;
1287 return 0;
1288 } else {
1289 int64_t ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters);
1290 if (ret < 0) {
1291 return ret;
1293 *nb_clusters = ret;
1294 return 0;
1299 * Allocates new clusters for an area that either is yet unallocated or needs a
1300 * copy on write. If *host_offset is not INV_OFFSET, clusters are only
1301 * allocated if the new allocation can match the specified host offset.
1303 * Note that guest_offset may not be cluster aligned. In this case, the
1304 * returned *host_offset points to exact byte referenced by guest_offset and
1305 * therefore isn't cluster aligned as well.
1307 * Returns:
1308 * 0: if no clusters could be allocated. *bytes is set to 0,
1309 * *host_offset is left unchanged.
1311 * 1: if new clusters were allocated. *bytes may be decreased if the
1312 * new allocation doesn't cover all of the requested area.
1313 * *host_offset is updated to contain the host offset of the first
1314 * newly allocated cluster.
1316 * -errno: in error cases
1318 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
1319 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1321 BDRVQcow2State *s = bs->opaque;
1322 int l2_index;
1323 uint64_t *l2_slice;
1324 uint64_t entry;
1325 uint64_t nb_clusters;
1326 int ret;
1327 bool keep_old_clusters = false;
1329 uint64_t alloc_cluster_offset = INV_OFFSET;
1331 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset,
1332 *bytes);
1333 assert(*bytes > 0);
1336 * Calculate the number of clusters to look for. We stop at L2 slice
1337 * boundaries to keep things simple.
1339 nb_clusters =
1340 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1342 l2_index = offset_to_l2_slice_index(s, guest_offset);
1343 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index);
1344 assert(nb_clusters <= INT_MAX);
1346 /* Find L2 entry for the first involved cluster */
1347 ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index);
1348 if (ret < 0) {
1349 return ret;
1352 entry = be64_to_cpu(l2_slice[l2_index]);
1354 /* For the moment, overwrite compressed clusters one by one */
1355 if (entry & QCOW_OFLAG_COMPRESSED) {
1356 nb_clusters = 1;
1357 } else {
1358 nb_clusters = count_cow_clusters(bs, nb_clusters, l2_slice, l2_index);
1361 /* This function is only called when there were no non-COW clusters, so if
1362 * we can't find any unallocated or COW clusters either, something is
1363 * wrong with our code. */
1364 assert(nb_clusters > 0);
1366 if (qcow2_get_cluster_type(bs, entry) == QCOW2_CLUSTER_ZERO_ALLOC &&
1367 (entry & QCOW_OFLAG_COPIED) &&
1368 (*host_offset == INV_OFFSET ||
1369 start_of_cluster(s, *host_offset) == (entry & L2E_OFFSET_MASK)))
1371 int preallocated_nb_clusters;
1373 if (offset_into_cluster(s, entry & L2E_OFFSET_MASK)) {
1374 qcow2_signal_corruption(bs, true, -1, -1, "Preallocated zero "
1375 "cluster offset %#llx unaligned (guest "
1376 "offset: %#" PRIx64 ")",
1377 entry & L2E_OFFSET_MASK, guest_offset);
1378 ret = -EIO;
1379 goto fail;
1382 /* Try to reuse preallocated zero clusters; contiguous normal clusters
1383 * would be fine, too, but count_cow_clusters() above has limited
1384 * nb_clusters already to a range of COW clusters */
1385 preallocated_nb_clusters =
1386 count_contiguous_clusters(bs, nb_clusters, s->cluster_size,
1387 &l2_slice[l2_index], QCOW_OFLAG_COPIED);
1388 assert(preallocated_nb_clusters > 0);
1390 nb_clusters = preallocated_nb_clusters;
1391 alloc_cluster_offset = entry & L2E_OFFSET_MASK;
1393 /* We want to reuse these clusters, so qcow2_alloc_cluster_link_l2()
1394 * should not free them. */
1395 keep_old_clusters = true;
1398 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1400 if (alloc_cluster_offset == INV_OFFSET) {
1401 /* Allocate, if necessary at a given offset in the image file */
1402 alloc_cluster_offset = *host_offset == INV_OFFSET ? INV_OFFSET :
1403 start_of_cluster(s, *host_offset);
1404 ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset,
1405 &nb_clusters);
1406 if (ret < 0) {
1407 goto fail;
1410 /* Can't extend contiguous allocation */
1411 if (nb_clusters == 0) {
1412 *bytes = 0;
1413 return 0;
1416 assert(alloc_cluster_offset != INV_OFFSET);
1420 * Save info needed for meta data update.
1422 * requested_bytes: Number of bytes from the start of the first
1423 * newly allocated cluster to the end of the (possibly shortened
1424 * before) write request.
1426 * avail_bytes: Number of bytes from the start of the first
1427 * newly allocated to the end of the last newly allocated cluster.
1429 * nb_bytes: The number of bytes from the start of the first
1430 * newly allocated cluster to the end of the area that the write
1431 * request actually writes to (excluding COW at the end)
1433 uint64_t requested_bytes = *bytes + offset_into_cluster(s, guest_offset);
1434 int avail_bytes = MIN(INT_MAX, nb_clusters << s->cluster_bits);
1435 int nb_bytes = MIN(requested_bytes, avail_bytes);
1436 QCowL2Meta *old_m = *m;
1438 *m = g_malloc0(sizeof(**m));
1440 **m = (QCowL2Meta) {
1441 .next = old_m,
1443 .alloc_offset = alloc_cluster_offset,
1444 .offset = start_of_cluster(s, guest_offset),
1445 .nb_clusters = nb_clusters,
1447 .keep_old_clusters = keep_old_clusters,
1449 .cow_start = {
1450 .offset = 0,
1451 .nb_bytes = offset_into_cluster(s, guest_offset),
1453 .cow_end = {
1454 .offset = nb_bytes,
1455 .nb_bytes = avail_bytes - nb_bytes,
1458 qemu_co_queue_init(&(*m)->dependent_requests);
1459 QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight);
1461 *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset);
1462 *bytes = MIN(*bytes, nb_bytes - offset_into_cluster(s, guest_offset));
1463 assert(*bytes != 0);
1465 return 1;
1467 fail:
1468 if (*m && (*m)->nb_clusters > 0) {
1469 QLIST_REMOVE(*m, next_in_flight);
1471 return ret;
1475 * alloc_cluster_offset
1477 * For a given offset on the virtual disk, find the cluster offset in qcow2
1478 * file. If the offset is not found, allocate a new cluster.
1480 * If the cluster was already allocated, m->nb_clusters is set to 0 and
1481 * other fields in m are meaningless.
1483 * If the cluster is newly allocated, m->nb_clusters is set to the number of
1484 * contiguous clusters that have been allocated. In this case, the other
1485 * fields of m are valid and contain information about the first allocated
1486 * cluster.
1488 * If the request conflicts with another write request in flight, the coroutine
1489 * is queued and will be reentered when the dependency has completed.
1491 * Return 0 on success and -errno in error cases
1493 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
1494 unsigned int *bytes, uint64_t *host_offset,
1495 QCowL2Meta **m)
1497 BDRVQcow2State *s = bs->opaque;
1498 uint64_t start, remaining;
1499 uint64_t cluster_offset;
1500 uint64_t cur_bytes;
1501 int ret;
1503 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *bytes);
1505 again:
1506 start = offset;
1507 remaining = *bytes;
1508 cluster_offset = INV_OFFSET;
1509 *host_offset = INV_OFFSET;
1510 cur_bytes = 0;
1511 *m = NULL;
1513 while (true) {
1515 if (*host_offset == INV_OFFSET && cluster_offset != INV_OFFSET) {
1516 *host_offset = start_of_cluster(s, cluster_offset);
1519 assert(remaining >= cur_bytes);
1521 start += cur_bytes;
1522 remaining -= cur_bytes;
1524 if (cluster_offset != INV_OFFSET) {
1525 cluster_offset += cur_bytes;
1528 if (remaining == 0) {
1529 break;
1532 cur_bytes = remaining;
1535 * Now start gathering as many contiguous clusters as possible:
1537 * 1. Check for overlaps with in-flight allocations
1539 * a) Overlap not in the first cluster -> shorten this request and
1540 * let the caller handle the rest in its next loop iteration.
1542 * b) Real overlaps of two requests. Yield and restart the search
1543 * for contiguous clusters (the situation could have changed
1544 * while we were sleeping)
1546 * c) TODO: Request starts in the same cluster as the in-flight
1547 * allocation ends. Shorten the COW of the in-fight allocation,
1548 * set cluster_offset to write to the same cluster and set up
1549 * the right synchronisation between the in-flight request and
1550 * the new one.
1552 ret = handle_dependencies(bs, start, &cur_bytes, m);
1553 if (ret == -EAGAIN) {
1554 /* Currently handle_dependencies() doesn't yield if we already had
1555 * an allocation. If it did, we would have to clean up the L2Meta
1556 * structs before starting over. */
1557 assert(*m == NULL);
1558 goto again;
1559 } else if (ret < 0) {
1560 return ret;
1561 } else if (cur_bytes == 0) {
1562 break;
1563 } else {
1564 /* handle_dependencies() may have decreased cur_bytes (shortened
1565 * the allocations below) so that the next dependency is processed
1566 * correctly during the next loop iteration. */
1570 * 2. Count contiguous COPIED clusters.
1572 ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m);
1573 if (ret < 0) {
1574 return ret;
1575 } else if (ret) {
1576 continue;
1577 } else if (cur_bytes == 0) {
1578 break;
1582 * 3. If the request still hasn't completed, allocate new clusters,
1583 * considering any cluster_offset of steps 1c or 2.
1585 ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m);
1586 if (ret < 0) {
1587 return ret;
1588 } else if (ret) {
1589 continue;
1590 } else {
1591 assert(cur_bytes == 0);
1592 break;
1596 *bytes -= remaining;
1597 assert(*bytes > 0);
1598 assert(*host_offset != INV_OFFSET);
1600 return 0;
1604 * This discards as many clusters of nb_clusters as possible at once (i.e.
1605 * all clusters in the same L2 slice) and returns the number of discarded
1606 * clusters.
1608 static int discard_in_l2_slice(BlockDriverState *bs, uint64_t offset,
1609 uint64_t nb_clusters,
1610 enum qcow2_discard_type type, bool full_discard)
1612 BDRVQcow2State *s = bs->opaque;
1613 uint64_t *l2_slice;
1614 int l2_index;
1615 int ret;
1616 int i;
1618 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index);
1619 if (ret < 0) {
1620 return ret;
1623 /* Limit nb_clusters to one L2 slice */
1624 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index);
1625 assert(nb_clusters <= INT_MAX);
1627 for (i = 0; i < nb_clusters; i++) {
1628 uint64_t old_l2_entry;
1630 old_l2_entry = be64_to_cpu(l2_slice[l2_index + i]);
1633 * If full_discard is false, make sure that a discarded area reads back
1634 * as zeroes for v3 images (we cannot do it for v2 without actually
1635 * writing a zero-filled buffer). We can skip the operation if the
1636 * cluster is already marked as zero, or if it's unallocated and we
1637 * don't have a backing file.
1639 * TODO We might want to use bdrv_block_status(bs) here, but we're
1640 * holding s->lock, so that doesn't work today.
1642 * If full_discard is true, the sector should not read back as zeroes,
1643 * but rather fall through to the backing file.
1645 switch (qcow2_get_cluster_type(bs, old_l2_entry)) {
1646 case QCOW2_CLUSTER_UNALLOCATED:
1647 if (full_discard || !bs->backing) {
1648 continue;
1650 break;
1652 case QCOW2_CLUSTER_ZERO_PLAIN:
1653 if (!full_discard) {
1654 continue;
1656 break;
1658 case QCOW2_CLUSTER_ZERO_ALLOC:
1659 case QCOW2_CLUSTER_NORMAL:
1660 case QCOW2_CLUSTER_COMPRESSED:
1661 break;
1663 default:
1664 abort();
1667 /* First remove L2 entries */
1668 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
1669 if (!full_discard && s->qcow_version >= 3) {
1670 l2_slice[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1671 } else {
1672 l2_slice[l2_index + i] = cpu_to_be64(0);
1675 /* Then decrease the refcount */
1676 qcow2_free_any_clusters(bs, old_l2_entry, 1, type);
1679 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1681 return nb_clusters;
1684 int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset,
1685 uint64_t bytes, enum qcow2_discard_type type,
1686 bool full_discard)
1688 BDRVQcow2State *s = bs->opaque;
1689 uint64_t end_offset = offset + bytes;
1690 uint64_t nb_clusters;
1691 int64_t cleared;
1692 int ret;
1694 /* Caller must pass aligned values, except at image end */
1695 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
1696 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) ||
1697 end_offset == bs->total_sectors << BDRV_SECTOR_BITS);
1699 nb_clusters = size_to_clusters(s, bytes);
1701 s->cache_discards = true;
1703 /* Each L2 slice is handled by its own loop iteration */
1704 while (nb_clusters > 0) {
1705 cleared = discard_in_l2_slice(bs, offset, nb_clusters, type,
1706 full_discard);
1707 if (cleared < 0) {
1708 ret = cleared;
1709 goto fail;
1712 nb_clusters -= cleared;
1713 offset += (cleared * s->cluster_size);
1716 ret = 0;
1717 fail:
1718 s->cache_discards = false;
1719 qcow2_process_discards(bs, ret);
1721 return ret;
1725 * This zeroes as many clusters of nb_clusters as possible at once (i.e.
1726 * all clusters in the same L2 slice) and returns the number of zeroed
1727 * clusters.
1729 static int zero_in_l2_slice(BlockDriverState *bs, uint64_t offset,
1730 uint64_t nb_clusters, int flags)
1732 BDRVQcow2State *s = bs->opaque;
1733 uint64_t *l2_slice;
1734 int l2_index;
1735 int ret;
1736 int i;
1737 bool unmap = !!(flags & BDRV_REQ_MAY_UNMAP);
1739 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index);
1740 if (ret < 0) {
1741 return ret;
1744 /* Limit nb_clusters to one L2 slice */
1745 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index);
1746 assert(nb_clusters <= INT_MAX);
1748 for (i = 0; i < nb_clusters; i++) {
1749 uint64_t old_offset;
1750 QCow2ClusterType cluster_type;
1752 old_offset = be64_to_cpu(l2_slice[l2_index + i]);
1755 * Minimize L2 changes if the cluster already reads back as
1756 * zeroes with correct allocation.
1758 cluster_type = qcow2_get_cluster_type(bs, old_offset);
1759 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN ||
1760 (cluster_type == QCOW2_CLUSTER_ZERO_ALLOC && !unmap)) {
1761 continue;
1764 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
1765 if (cluster_type == QCOW2_CLUSTER_COMPRESSED || unmap) {
1766 l2_slice[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1767 qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST);
1768 } else {
1769 l2_slice[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO);
1773 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1775 return nb_clusters;
1778 int qcow2_cluster_zeroize(BlockDriverState *bs, uint64_t offset,
1779 uint64_t bytes, int flags)
1781 BDRVQcow2State *s = bs->opaque;
1782 uint64_t end_offset = offset + bytes;
1783 uint64_t nb_clusters;
1784 int64_t cleared;
1785 int ret;
1787 /* If we have to stay in sync with an external data file, zero out
1788 * s->data_file first. */
1789 if (data_file_is_raw(bs)) {
1790 assert(has_data_file(bs));
1791 ret = bdrv_co_pwrite_zeroes(s->data_file, offset, bytes, flags);
1792 if (ret < 0) {
1793 return ret;
1797 /* Caller must pass aligned values, except at image end */
1798 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
1799 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) ||
1800 end_offset == bs->total_sectors << BDRV_SECTOR_BITS);
1802 /* The zero flag is only supported by version 3 and newer */
1803 if (s->qcow_version < 3) {
1804 return -ENOTSUP;
1807 /* Each L2 slice is handled by its own loop iteration */
1808 nb_clusters = size_to_clusters(s, bytes);
1810 s->cache_discards = true;
1812 while (nb_clusters > 0) {
1813 cleared = zero_in_l2_slice(bs, offset, nb_clusters, flags);
1814 if (cleared < 0) {
1815 ret = cleared;
1816 goto fail;
1819 nb_clusters -= cleared;
1820 offset += (cleared * s->cluster_size);
1823 ret = 0;
1824 fail:
1825 s->cache_discards = false;
1826 qcow2_process_discards(bs, ret);
1828 return ret;
1832 * Expands all zero clusters in a specific L1 table (or deallocates them, for
1833 * non-backed non-pre-allocated zero clusters).
1835 * l1_entries and *visited_l1_entries are used to keep track of progress for
1836 * status_cb(). l1_entries contains the total number of L1 entries and
1837 * *visited_l1_entries counts all visited L1 entries.
1839 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
1840 int l1_size, int64_t *visited_l1_entries,
1841 int64_t l1_entries,
1842 BlockDriverAmendStatusCB *status_cb,
1843 void *cb_opaque)
1845 BDRVQcow2State *s = bs->opaque;
1846 bool is_active_l1 = (l1_table == s->l1_table);
1847 uint64_t *l2_slice = NULL;
1848 unsigned slice, slice_size2, n_slices;
1849 int ret;
1850 int i, j;
1852 slice_size2 = s->l2_slice_size * sizeof(uint64_t);
1853 n_slices = s->cluster_size / slice_size2;
1855 if (!is_active_l1) {
1856 /* inactive L2 tables require a buffer to be stored in when loading
1857 * them from disk */
1858 l2_slice = qemu_try_blockalign(bs->file->bs, slice_size2);
1859 if (l2_slice == NULL) {
1860 return -ENOMEM;
1864 for (i = 0; i < l1_size; i++) {
1865 uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK;
1866 uint64_t l2_refcount;
1868 if (!l2_offset) {
1869 /* unallocated */
1870 (*visited_l1_entries)++;
1871 if (status_cb) {
1872 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque);
1874 continue;
1877 if (offset_into_cluster(s, l2_offset)) {
1878 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#"
1879 PRIx64 " unaligned (L1 index: %#x)",
1880 l2_offset, i);
1881 ret = -EIO;
1882 goto fail;
1885 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits,
1886 &l2_refcount);
1887 if (ret < 0) {
1888 goto fail;
1891 for (slice = 0; slice < n_slices; slice++) {
1892 uint64_t slice_offset = l2_offset + slice * slice_size2;
1893 bool l2_dirty = false;
1894 if (is_active_l1) {
1895 /* get active L2 tables from cache */
1896 ret = qcow2_cache_get(bs, s->l2_table_cache, slice_offset,
1897 (void **)&l2_slice);
1898 } else {
1899 /* load inactive L2 tables from disk */
1900 ret = bdrv_pread(bs->file, slice_offset, l2_slice, slice_size2);
1902 if (ret < 0) {
1903 goto fail;
1906 for (j = 0; j < s->l2_slice_size; j++) {
1907 uint64_t l2_entry = be64_to_cpu(l2_slice[j]);
1908 int64_t offset = l2_entry & L2E_OFFSET_MASK;
1909 QCow2ClusterType cluster_type =
1910 qcow2_get_cluster_type(bs, l2_entry);
1912 if (cluster_type != QCOW2_CLUSTER_ZERO_PLAIN &&
1913 cluster_type != QCOW2_CLUSTER_ZERO_ALLOC) {
1914 continue;
1917 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1918 if (!bs->backing) {
1919 /* not backed; therefore we can simply deallocate the
1920 * cluster */
1921 l2_slice[j] = 0;
1922 l2_dirty = true;
1923 continue;
1926 offset = qcow2_alloc_clusters(bs, s->cluster_size);
1927 if (offset < 0) {
1928 ret = offset;
1929 goto fail;
1932 if (l2_refcount > 1) {
1933 /* For shared L2 tables, set the refcount accordingly
1934 * (it is already 1 and needs to be l2_refcount) */
1935 ret = qcow2_update_cluster_refcount(
1936 bs, offset >> s->cluster_bits,
1937 refcount_diff(1, l2_refcount), false,
1938 QCOW2_DISCARD_OTHER);
1939 if (ret < 0) {
1940 qcow2_free_clusters(bs, offset, s->cluster_size,
1941 QCOW2_DISCARD_OTHER);
1942 goto fail;
1947 if (offset_into_cluster(s, offset)) {
1948 int l2_index = slice * s->l2_slice_size + j;
1949 qcow2_signal_corruption(
1950 bs, true, -1, -1,
1951 "Cluster allocation offset "
1952 "%#" PRIx64 " unaligned (L2 offset: %#"
1953 PRIx64 ", L2 index: %#x)", offset,
1954 l2_offset, l2_index);
1955 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1956 qcow2_free_clusters(bs, offset, s->cluster_size,
1957 QCOW2_DISCARD_ALWAYS);
1959 ret = -EIO;
1960 goto fail;
1963 ret = qcow2_pre_write_overlap_check(bs, 0, offset,
1964 s->cluster_size, true);
1965 if (ret < 0) {
1966 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1967 qcow2_free_clusters(bs, offset, s->cluster_size,
1968 QCOW2_DISCARD_ALWAYS);
1970 goto fail;
1973 ret = bdrv_pwrite_zeroes(s->data_file, offset,
1974 s->cluster_size, 0);
1975 if (ret < 0) {
1976 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1977 qcow2_free_clusters(bs, offset, s->cluster_size,
1978 QCOW2_DISCARD_ALWAYS);
1980 goto fail;
1983 if (l2_refcount == 1) {
1984 l2_slice[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED);
1985 } else {
1986 l2_slice[j] = cpu_to_be64(offset);
1988 l2_dirty = true;
1991 if (is_active_l1) {
1992 if (l2_dirty) {
1993 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
1994 qcow2_cache_depends_on_flush(s->l2_table_cache);
1996 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1997 } else {
1998 if (l2_dirty) {
1999 ret = qcow2_pre_write_overlap_check(
2000 bs, QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2,
2001 slice_offset, slice_size2, false);
2002 if (ret < 0) {
2003 goto fail;
2006 ret = bdrv_pwrite(bs->file, slice_offset,
2007 l2_slice, slice_size2);
2008 if (ret < 0) {
2009 goto fail;
2015 (*visited_l1_entries)++;
2016 if (status_cb) {
2017 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque);
2021 ret = 0;
2023 fail:
2024 if (l2_slice) {
2025 if (!is_active_l1) {
2026 qemu_vfree(l2_slice);
2027 } else {
2028 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
2031 return ret;
2035 * For backed images, expands all zero clusters on the image. For non-backed
2036 * images, deallocates all non-pre-allocated zero clusters (and claims the
2037 * allocation for pre-allocated ones). This is important for downgrading to a
2038 * qcow2 version which doesn't yet support metadata zero clusters.
2040 int qcow2_expand_zero_clusters(BlockDriverState *bs,
2041 BlockDriverAmendStatusCB *status_cb,
2042 void *cb_opaque)
2044 BDRVQcow2State *s = bs->opaque;
2045 uint64_t *l1_table = NULL;
2046 int64_t l1_entries = 0, visited_l1_entries = 0;
2047 int ret;
2048 int i, j;
2050 if (status_cb) {
2051 l1_entries = s->l1_size;
2052 for (i = 0; i < s->nb_snapshots; i++) {
2053 l1_entries += s->snapshots[i].l1_size;
2057 ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size,
2058 &visited_l1_entries, l1_entries,
2059 status_cb, cb_opaque);
2060 if (ret < 0) {
2061 goto fail;
2064 /* Inactive L1 tables may point to active L2 tables - therefore it is
2065 * necessary to flush the L2 table cache before trying to access the L2
2066 * tables pointed to by inactive L1 entries (else we might try to expand
2067 * zero clusters that have already been expanded); furthermore, it is also
2068 * necessary to empty the L2 table cache, since it may contain tables which
2069 * are now going to be modified directly on disk, bypassing the cache.
2070 * qcow2_cache_empty() does both for us. */
2071 ret = qcow2_cache_empty(bs, s->l2_table_cache);
2072 if (ret < 0) {
2073 goto fail;
2076 for (i = 0; i < s->nb_snapshots; i++) {
2077 int l1_size2;
2078 uint64_t *new_l1_table;
2079 Error *local_err = NULL;
2081 ret = qcow2_validate_table(bs, s->snapshots[i].l1_table_offset,
2082 s->snapshots[i].l1_size, sizeof(uint64_t),
2083 QCOW_MAX_L1_SIZE, "Snapshot L1 table",
2084 &local_err);
2085 if (ret < 0) {
2086 error_report_err(local_err);
2087 goto fail;
2090 l1_size2 = s->snapshots[i].l1_size * sizeof(uint64_t);
2091 new_l1_table = g_try_realloc(l1_table, l1_size2);
2093 if (!new_l1_table) {
2094 ret = -ENOMEM;
2095 goto fail;
2098 l1_table = new_l1_table;
2100 ret = bdrv_pread(bs->file, s->snapshots[i].l1_table_offset,
2101 l1_table, l1_size2);
2102 if (ret < 0) {
2103 goto fail;
2106 for (j = 0; j < s->snapshots[i].l1_size; j++) {
2107 be64_to_cpus(&l1_table[j]);
2110 ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size,
2111 &visited_l1_entries, l1_entries,
2112 status_cb, cb_opaque);
2113 if (ret < 0) {
2114 goto fail;
2118 ret = 0;
2120 fail:
2121 g_free(l1_table);
2122 return ret;