vfio/pci: Add virtual capabilities quirk infrastructure
[qemu/ar7.git] / block / qcow2-cluster.c
blobd2518d18935035990403830069a7306dec804c97
1 /*
2 * Block driver for the QCOW version 2 format
4 * Copyright (c) 2004-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include <zlib.h>
28 #include "qapi/error.h"
29 #include "qemu-common.h"
30 #include "block/block_int.h"
31 #include "block/qcow2.h"
32 #include "qemu/bswap.h"
33 #include "trace.h"
35 int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t exact_size)
37 BDRVQcow2State *s = bs->opaque;
38 int new_l1_size, i, ret;
40 if (exact_size >= s->l1_size) {
41 return 0;
44 new_l1_size = exact_size;
46 #ifdef DEBUG_ALLOC2
47 fprintf(stderr, "shrink l1_table from %d to %d\n", s->l1_size, new_l1_size);
48 #endif
50 BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE);
51 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset +
52 new_l1_size * sizeof(uint64_t),
53 (s->l1_size - new_l1_size) * sizeof(uint64_t), 0);
54 if (ret < 0) {
55 goto fail;
58 ret = bdrv_flush(bs->file->bs);
59 if (ret < 0) {
60 goto fail;
63 BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_FREE_L2_CLUSTERS);
64 for (i = s->l1_size - 1; i > new_l1_size - 1; i--) {
65 if ((s->l1_table[i] & L1E_OFFSET_MASK) == 0) {
66 continue;
68 qcow2_free_clusters(bs, s->l1_table[i] & L1E_OFFSET_MASK,
69 s->cluster_size, QCOW2_DISCARD_ALWAYS);
70 s->l1_table[i] = 0;
72 return 0;
74 fail:
76 * If the write in the l1_table failed the image may contain a partially
77 * overwritten l1_table. In this case it would be better to clear the
78 * l1_table in memory to avoid possible image corruption.
80 memset(s->l1_table + new_l1_size, 0,
81 (s->l1_size - new_l1_size) * sizeof(uint64_t));
82 return ret;
85 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
86 bool exact_size)
88 BDRVQcow2State *s = bs->opaque;
89 int new_l1_size2, ret, i;
90 uint64_t *new_l1_table;
91 int64_t old_l1_table_offset, old_l1_size;
92 int64_t new_l1_table_offset, new_l1_size;
93 uint8_t data[12];
95 if (min_size <= s->l1_size)
96 return 0;
98 /* Do a sanity check on min_size before trying to calculate new_l1_size
99 * (this prevents overflows during the while loop for the calculation of
100 * new_l1_size) */
101 if (min_size > INT_MAX / sizeof(uint64_t)) {
102 return -EFBIG;
105 if (exact_size) {
106 new_l1_size = min_size;
107 } else {
108 /* Bump size up to reduce the number of times we have to grow */
109 new_l1_size = s->l1_size;
110 if (new_l1_size == 0) {
111 new_l1_size = 1;
113 while (min_size > new_l1_size) {
114 new_l1_size = DIV_ROUND_UP(new_l1_size * 3, 2);
118 QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE > INT_MAX);
119 if (new_l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) {
120 return -EFBIG;
123 #ifdef DEBUG_ALLOC2
124 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n",
125 s->l1_size, new_l1_size);
126 #endif
128 new_l1_size2 = sizeof(uint64_t) * new_l1_size;
129 new_l1_table = qemu_try_blockalign(bs->file->bs,
130 align_offset(new_l1_size2, 512));
131 if (new_l1_table == NULL) {
132 return -ENOMEM;
134 memset(new_l1_table, 0, align_offset(new_l1_size2, 512));
136 if (s->l1_size) {
137 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
140 /* write new table (align to cluster) */
141 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
142 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
143 if (new_l1_table_offset < 0) {
144 qemu_vfree(new_l1_table);
145 return new_l1_table_offset;
148 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
149 if (ret < 0) {
150 goto fail;
153 /* the L1 position has not yet been updated, so these clusters must
154 * indeed be completely free */
155 ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset,
156 new_l1_size2);
157 if (ret < 0) {
158 goto fail;
161 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE);
162 for(i = 0; i < s->l1_size; i++)
163 new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
164 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset,
165 new_l1_table, new_l1_size2);
166 if (ret < 0)
167 goto fail;
168 for(i = 0; i < s->l1_size; i++)
169 new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
171 /* set new table */
172 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE);
173 stl_be_p(data, new_l1_size);
174 stq_be_p(data + 4, new_l1_table_offset);
175 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size),
176 data, sizeof(data));
177 if (ret < 0) {
178 goto fail;
180 qemu_vfree(s->l1_table);
181 old_l1_table_offset = s->l1_table_offset;
182 s->l1_table_offset = new_l1_table_offset;
183 s->l1_table = new_l1_table;
184 old_l1_size = s->l1_size;
185 s->l1_size = new_l1_size;
186 qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t),
187 QCOW2_DISCARD_OTHER);
188 return 0;
189 fail:
190 qemu_vfree(new_l1_table);
191 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2,
192 QCOW2_DISCARD_OTHER);
193 return ret;
197 * l2_load
199 * Loads a L2 table into memory. If the table is in the cache, the cache
200 * is used; otherwise the L2 table is loaded from the image file.
202 * Returns a pointer to the L2 table on success, or NULL if the read from
203 * the image file failed.
206 static int l2_load(BlockDriverState *bs, uint64_t l2_offset,
207 uint64_t **l2_table)
209 BDRVQcow2State *s = bs->opaque;
211 return qcow2_cache_get(bs, s->l2_table_cache, l2_offset,
212 (void **)l2_table);
216 * Writes one sector of the L1 table to the disk (can't update single entries
217 * and we really don't want bdrv_pread to perform a read-modify-write)
219 #define L1_ENTRIES_PER_SECTOR (512 / 8)
220 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index)
222 BDRVQcow2State *s = bs->opaque;
223 uint64_t buf[L1_ENTRIES_PER_SECTOR] = { 0 };
224 int l1_start_index;
225 int i, ret;
227 l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
228 for (i = 0; i < L1_ENTRIES_PER_SECTOR && l1_start_index + i < s->l1_size;
229 i++)
231 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
234 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1,
235 s->l1_table_offset + 8 * l1_start_index, sizeof(buf));
236 if (ret < 0) {
237 return ret;
240 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
241 ret = bdrv_pwrite_sync(bs->file,
242 s->l1_table_offset + 8 * l1_start_index,
243 buf, sizeof(buf));
244 if (ret < 0) {
245 return ret;
248 return 0;
252 * l2_allocate
254 * Allocate a new l2 entry in the file. If l1_index points to an already
255 * used entry in the L2 table (i.e. we are doing a copy on write for the L2
256 * table) copy the contents of the old L2 table into the newly allocated one.
257 * Otherwise the new table is initialized with zeros.
261 static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table)
263 BDRVQcow2State *s = bs->opaque;
264 uint64_t old_l2_offset;
265 uint64_t *l2_table = NULL;
266 int64_t l2_offset;
267 int ret;
269 old_l2_offset = s->l1_table[l1_index];
271 trace_qcow2_l2_allocate(bs, l1_index);
273 /* allocate a new l2 entry */
275 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
276 if (l2_offset < 0) {
277 ret = l2_offset;
278 goto fail;
281 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
282 if (ret < 0) {
283 goto fail;
286 /* allocate a new entry in the l2 cache */
288 trace_qcow2_l2_allocate_get_empty(bs, l1_index);
289 ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table);
290 if (ret < 0) {
291 goto fail;
294 l2_table = *table;
296 if ((old_l2_offset & L1E_OFFSET_MASK) == 0) {
297 /* if there was no old l2 table, clear the new table */
298 memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
299 } else {
300 uint64_t* old_table;
302 /* if there was an old l2 table, read it from the disk */
303 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ);
304 ret = qcow2_cache_get(bs, s->l2_table_cache,
305 old_l2_offset & L1E_OFFSET_MASK,
306 (void**) &old_table);
307 if (ret < 0) {
308 goto fail;
311 memcpy(l2_table, old_table, s->cluster_size);
313 qcow2_cache_put(bs, s->l2_table_cache, (void **) &old_table);
316 /* write the l2 table to the file */
317 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE);
319 trace_qcow2_l2_allocate_write_l2(bs, l1_index);
320 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
321 ret = qcow2_cache_flush(bs, s->l2_table_cache);
322 if (ret < 0) {
323 goto fail;
326 /* update the L1 entry */
327 trace_qcow2_l2_allocate_write_l1(bs, l1_index);
328 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
329 ret = qcow2_write_l1_entry(bs, l1_index);
330 if (ret < 0) {
331 goto fail;
334 *table = l2_table;
335 trace_qcow2_l2_allocate_done(bs, l1_index, 0);
336 return 0;
338 fail:
339 trace_qcow2_l2_allocate_done(bs, l1_index, ret);
340 if (l2_table != NULL) {
341 qcow2_cache_put(bs, s->l2_table_cache, (void**) table);
343 s->l1_table[l1_index] = old_l2_offset;
344 if (l2_offset > 0) {
345 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
346 QCOW2_DISCARD_ALWAYS);
348 return ret;
352 * Checks how many clusters in a given L2 table are contiguous in the image
353 * file. As soon as one of the flags in the bitmask stop_flags changes compared
354 * to the first cluster, the search is stopped and the cluster is not counted
355 * as contiguous. (This allows it, for example, to stop at the first compressed
356 * cluster which may require a different handling)
358 static int count_contiguous_clusters(int nb_clusters, int cluster_size,
359 uint64_t *l2_table, uint64_t stop_flags)
361 int i;
362 QCow2ClusterType first_cluster_type;
363 uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED;
364 uint64_t first_entry = be64_to_cpu(l2_table[0]);
365 uint64_t offset = first_entry & mask;
367 if (!offset) {
368 return 0;
371 /* must be allocated */
372 first_cluster_type = qcow2_get_cluster_type(first_entry);
373 assert(first_cluster_type == QCOW2_CLUSTER_NORMAL ||
374 first_cluster_type == QCOW2_CLUSTER_ZERO_ALLOC);
376 for (i = 0; i < nb_clusters; i++) {
377 uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask;
378 if (offset + (uint64_t) i * cluster_size != l2_entry) {
379 break;
383 return i;
387 * Checks how many consecutive unallocated clusters in a given L2
388 * table have the same cluster type.
390 static int count_contiguous_clusters_unallocated(int nb_clusters,
391 uint64_t *l2_table,
392 QCow2ClusterType wanted_type)
394 int i;
396 assert(wanted_type == QCOW2_CLUSTER_ZERO_PLAIN ||
397 wanted_type == QCOW2_CLUSTER_UNALLOCATED);
398 for (i = 0; i < nb_clusters; i++) {
399 uint64_t entry = be64_to_cpu(l2_table[i]);
400 QCow2ClusterType type = qcow2_get_cluster_type(entry);
402 if (type != wanted_type) {
403 break;
407 return i;
410 static int coroutine_fn do_perform_cow_read(BlockDriverState *bs,
411 uint64_t src_cluster_offset,
412 unsigned offset_in_cluster,
413 QEMUIOVector *qiov)
415 int ret;
417 if (qiov->size == 0) {
418 return 0;
421 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
423 if (!bs->drv) {
424 return -ENOMEDIUM;
427 /* Call .bdrv_co_readv() directly instead of using the public block-layer
428 * interface. This avoids double I/O throttling and request tracking,
429 * which can lead to deadlock when block layer copy-on-read is enabled.
431 ret = bs->drv->bdrv_co_preadv(bs, src_cluster_offset + offset_in_cluster,
432 qiov->size, qiov, 0);
433 if (ret < 0) {
434 return ret;
437 return 0;
440 static bool coroutine_fn do_perform_cow_encrypt(BlockDriverState *bs,
441 uint64_t src_cluster_offset,
442 uint64_t cluster_offset,
443 unsigned offset_in_cluster,
444 uint8_t *buffer,
445 unsigned bytes)
447 if (bytes && bs->encrypted) {
448 BDRVQcow2State *s = bs->opaque;
449 int64_t sector = (s->crypt_physical_offset ?
450 (cluster_offset + offset_in_cluster) :
451 (src_cluster_offset + offset_in_cluster))
452 >> BDRV_SECTOR_BITS;
453 assert((offset_in_cluster & ~BDRV_SECTOR_MASK) == 0);
454 assert((bytes & ~BDRV_SECTOR_MASK) == 0);
455 assert(s->crypto);
456 if (qcrypto_block_encrypt(s->crypto, sector, buffer,
457 bytes, NULL) < 0) {
458 return false;
461 return true;
464 static int coroutine_fn do_perform_cow_write(BlockDriverState *bs,
465 uint64_t cluster_offset,
466 unsigned offset_in_cluster,
467 QEMUIOVector *qiov)
469 int ret;
471 if (qiov->size == 0) {
472 return 0;
475 ret = qcow2_pre_write_overlap_check(bs, 0,
476 cluster_offset + offset_in_cluster, qiov->size);
477 if (ret < 0) {
478 return ret;
481 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
482 ret = bdrv_co_pwritev(bs->file, cluster_offset + offset_in_cluster,
483 qiov->size, qiov, 0);
484 if (ret < 0) {
485 return ret;
488 return 0;
493 * get_cluster_offset
495 * For a given offset of the virtual disk, find the cluster type and offset in
496 * the qcow2 file. The offset is stored in *cluster_offset.
498 * On entry, *bytes is the maximum number of contiguous bytes starting at
499 * offset that we are interested in.
501 * On exit, *bytes is the number of bytes starting at offset that have the same
502 * cluster type and (if applicable) are stored contiguously in the image file.
503 * Compressed clusters are always returned one by one.
505 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
506 * cases.
508 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
509 unsigned int *bytes, uint64_t *cluster_offset)
511 BDRVQcow2State *s = bs->opaque;
512 unsigned int l2_index;
513 uint64_t l1_index, l2_offset, *l2_table;
514 int l1_bits, c;
515 unsigned int offset_in_cluster;
516 uint64_t bytes_available, bytes_needed, nb_clusters;
517 QCow2ClusterType type;
518 int ret;
520 offset_in_cluster = offset_into_cluster(s, offset);
521 bytes_needed = (uint64_t) *bytes + offset_in_cluster;
523 l1_bits = s->l2_bits + s->cluster_bits;
525 /* compute how many bytes there are between the start of the cluster
526 * containing offset and the end of the l1 entry */
527 bytes_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1))
528 + offset_in_cluster;
530 if (bytes_needed > bytes_available) {
531 bytes_needed = bytes_available;
534 *cluster_offset = 0;
536 /* seek to the l2 offset in the l1 table */
538 l1_index = offset >> l1_bits;
539 if (l1_index >= s->l1_size) {
540 type = QCOW2_CLUSTER_UNALLOCATED;
541 goto out;
544 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
545 if (!l2_offset) {
546 type = QCOW2_CLUSTER_UNALLOCATED;
547 goto out;
550 if (offset_into_cluster(s, l2_offset)) {
551 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
552 " unaligned (L1 index: %#" PRIx64 ")",
553 l2_offset, l1_index);
554 return -EIO;
557 /* load the l2 table in memory */
559 ret = l2_load(bs, l2_offset, &l2_table);
560 if (ret < 0) {
561 return ret;
564 /* find the cluster offset for the given disk offset */
566 l2_index = offset_to_l2_index(s, offset);
567 *cluster_offset = be64_to_cpu(l2_table[l2_index]);
569 nb_clusters = size_to_clusters(s, bytes_needed);
570 /* bytes_needed <= *bytes + offset_in_cluster, both of which are unsigned
571 * integers; the minimum cluster size is 512, so this assertion is always
572 * true */
573 assert(nb_clusters <= INT_MAX);
575 type = qcow2_get_cluster_type(*cluster_offset);
576 if (s->qcow_version < 3 && (type == QCOW2_CLUSTER_ZERO_PLAIN ||
577 type == QCOW2_CLUSTER_ZERO_ALLOC)) {
578 qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found"
579 " in pre-v3 image (L2 offset: %#" PRIx64
580 ", L2 index: %#x)", l2_offset, l2_index);
581 ret = -EIO;
582 goto fail;
584 switch (type) {
585 case QCOW2_CLUSTER_COMPRESSED:
586 /* Compressed clusters can only be processed one by one */
587 c = 1;
588 *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK;
589 break;
590 case QCOW2_CLUSTER_ZERO_PLAIN:
591 case QCOW2_CLUSTER_UNALLOCATED:
592 /* how many empty clusters ? */
593 c = count_contiguous_clusters_unallocated(nb_clusters,
594 &l2_table[l2_index], type);
595 *cluster_offset = 0;
596 break;
597 case QCOW2_CLUSTER_ZERO_ALLOC:
598 case QCOW2_CLUSTER_NORMAL:
599 /* how many allocated clusters ? */
600 c = count_contiguous_clusters(nb_clusters, s->cluster_size,
601 &l2_table[l2_index], QCOW_OFLAG_ZERO);
602 *cluster_offset &= L2E_OFFSET_MASK;
603 if (offset_into_cluster(s, *cluster_offset)) {
604 qcow2_signal_corruption(bs, true, -1, -1,
605 "Cluster allocation offset %#"
606 PRIx64 " unaligned (L2 offset: %#" PRIx64
607 ", L2 index: %#x)", *cluster_offset,
608 l2_offset, l2_index);
609 ret = -EIO;
610 goto fail;
612 break;
613 default:
614 abort();
617 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
619 bytes_available = (int64_t)c * s->cluster_size;
621 out:
622 if (bytes_available > bytes_needed) {
623 bytes_available = bytes_needed;
626 /* bytes_available <= bytes_needed <= *bytes + offset_in_cluster;
627 * subtracting offset_in_cluster will therefore definitely yield something
628 * not exceeding UINT_MAX */
629 assert(bytes_available - offset_in_cluster <= UINT_MAX);
630 *bytes = bytes_available - offset_in_cluster;
632 return type;
634 fail:
635 qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table);
636 return ret;
640 * get_cluster_table
642 * for a given disk offset, load (and allocate if needed)
643 * the l2 table.
645 * the l2 table offset in the qcow2 file and the cluster index
646 * in the l2 table are given to the caller.
648 * Returns 0 on success, -errno in failure case
650 static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
651 uint64_t **new_l2_table,
652 int *new_l2_index)
654 BDRVQcow2State *s = bs->opaque;
655 unsigned int l2_index;
656 uint64_t l1_index, l2_offset;
657 uint64_t *l2_table = NULL;
658 int ret;
660 /* seek to the l2 offset in the l1 table */
662 l1_index = offset >> (s->l2_bits + s->cluster_bits);
663 if (l1_index >= s->l1_size) {
664 ret = qcow2_grow_l1_table(bs, l1_index + 1, false);
665 if (ret < 0) {
666 return ret;
670 assert(l1_index < s->l1_size);
671 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
672 if (offset_into_cluster(s, l2_offset)) {
673 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
674 " unaligned (L1 index: %#" PRIx64 ")",
675 l2_offset, l1_index);
676 return -EIO;
679 /* seek the l2 table of the given l2 offset */
681 if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) {
682 /* load the l2 table in memory */
683 ret = l2_load(bs, l2_offset, &l2_table);
684 if (ret < 0) {
685 return ret;
687 } else {
688 /* First allocate a new L2 table (and do COW if needed) */
689 ret = l2_allocate(bs, l1_index, &l2_table);
690 if (ret < 0) {
691 return ret;
694 /* Then decrease the refcount of the old table */
695 if (l2_offset) {
696 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
697 QCOW2_DISCARD_OTHER);
701 /* find the cluster offset for the given disk offset */
703 l2_index = offset_to_l2_index(s, offset);
705 *new_l2_table = l2_table;
706 *new_l2_index = l2_index;
708 return 0;
712 * alloc_compressed_cluster_offset
714 * For a given offset of the disk image, return cluster offset in
715 * qcow2 file.
717 * If the offset is not found, allocate a new compressed cluster.
719 * Return the cluster offset if successful,
720 * Return 0, otherwise.
724 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
725 uint64_t offset,
726 int compressed_size)
728 BDRVQcow2State *s = bs->opaque;
729 int l2_index, ret;
730 uint64_t *l2_table;
731 int64_t cluster_offset;
732 int nb_csectors;
734 ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
735 if (ret < 0) {
736 return 0;
739 /* Compression can't overwrite anything. Fail if the cluster was already
740 * allocated. */
741 cluster_offset = be64_to_cpu(l2_table[l2_index]);
742 if (cluster_offset & L2E_OFFSET_MASK) {
743 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
744 return 0;
747 cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
748 if (cluster_offset < 0) {
749 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
750 return 0;
753 nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
754 (cluster_offset >> 9);
756 cluster_offset |= QCOW_OFLAG_COMPRESSED |
757 ((uint64_t)nb_csectors << s->csize_shift);
759 /* update L2 table */
761 /* compressed clusters never have the copied flag */
763 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
764 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
765 l2_table[l2_index] = cpu_to_be64(cluster_offset);
766 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
768 return cluster_offset;
771 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m)
773 BDRVQcow2State *s = bs->opaque;
774 Qcow2COWRegion *start = &m->cow_start;
775 Qcow2COWRegion *end = &m->cow_end;
776 unsigned buffer_size;
777 unsigned data_bytes = end->offset - (start->offset + start->nb_bytes);
778 bool merge_reads;
779 uint8_t *start_buffer, *end_buffer;
780 QEMUIOVector qiov;
781 int ret;
783 assert(start->nb_bytes <= UINT_MAX - end->nb_bytes);
784 assert(start->nb_bytes + end->nb_bytes <= UINT_MAX - data_bytes);
785 assert(start->offset + start->nb_bytes <= end->offset);
786 assert(!m->data_qiov || m->data_qiov->size == data_bytes);
788 if (start->nb_bytes == 0 && end->nb_bytes == 0) {
789 return 0;
792 /* If we have to read both the start and end COW regions and the
793 * middle region is not too large then perform just one read
794 * operation */
795 merge_reads = start->nb_bytes && end->nb_bytes && data_bytes <= 16384;
796 if (merge_reads) {
797 buffer_size = start->nb_bytes + data_bytes + end->nb_bytes;
798 } else {
799 /* If we have to do two reads, add some padding in the middle
800 * if necessary to make sure that the end region is optimally
801 * aligned. */
802 size_t align = bdrv_opt_mem_align(bs);
803 assert(align > 0 && align <= UINT_MAX);
804 assert(QEMU_ALIGN_UP(start->nb_bytes, align) <=
805 UINT_MAX - end->nb_bytes);
806 buffer_size = QEMU_ALIGN_UP(start->nb_bytes, align) + end->nb_bytes;
809 /* Reserve a buffer large enough to store all the data that we're
810 * going to read */
811 start_buffer = qemu_try_blockalign(bs, buffer_size);
812 if (start_buffer == NULL) {
813 return -ENOMEM;
815 /* The part of the buffer where the end region is located */
816 end_buffer = start_buffer + buffer_size - end->nb_bytes;
818 qemu_iovec_init(&qiov, 2 + (m->data_qiov ? m->data_qiov->niov : 0));
820 qemu_co_mutex_unlock(&s->lock);
821 /* First we read the existing data from both COW regions. We
822 * either read the whole region in one go, or the start and end
823 * regions separately. */
824 if (merge_reads) {
825 qemu_iovec_add(&qiov, start_buffer, buffer_size);
826 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov);
827 } else {
828 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
829 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov);
830 if (ret < 0) {
831 goto fail;
834 qemu_iovec_reset(&qiov);
835 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
836 ret = do_perform_cow_read(bs, m->offset, end->offset, &qiov);
838 if (ret < 0) {
839 goto fail;
842 /* Encrypt the data if necessary before writing it */
843 if (bs->encrypted) {
844 if (!do_perform_cow_encrypt(bs, m->offset, m->alloc_offset,
845 start->offset, start_buffer,
846 start->nb_bytes) ||
847 !do_perform_cow_encrypt(bs, m->offset, m->alloc_offset,
848 end->offset, end_buffer, end->nb_bytes)) {
849 ret = -EIO;
850 goto fail;
854 /* And now we can write everything. If we have the guest data we
855 * can write everything in one single operation */
856 if (m->data_qiov) {
857 qemu_iovec_reset(&qiov);
858 if (start->nb_bytes) {
859 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
861 qemu_iovec_concat(&qiov, m->data_qiov, 0, data_bytes);
862 if (end->nb_bytes) {
863 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
865 /* NOTE: we have a write_aio blkdebug event here followed by
866 * a cow_write one in do_perform_cow_write(), but there's only
867 * one single I/O operation */
868 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
869 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov);
870 } else {
871 /* If there's no guest data then write both COW regions separately */
872 qemu_iovec_reset(&qiov);
873 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
874 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov);
875 if (ret < 0) {
876 goto fail;
879 qemu_iovec_reset(&qiov);
880 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
881 ret = do_perform_cow_write(bs, m->alloc_offset, end->offset, &qiov);
884 fail:
885 qemu_co_mutex_lock(&s->lock);
888 * Before we update the L2 table to actually point to the new cluster, we
889 * need to be sure that the refcounts have been increased and COW was
890 * handled.
892 if (ret == 0) {
893 qcow2_cache_depends_on_flush(s->l2_table_cache);
896 qemu_vfree(start_buffer);
897 qemu_iovec_destroy(&qiov);
898 return ret;
901 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
903 BDRVQcow2State *s = bs->opaque;
904 int i, j = 0, l2_index, ret;
905 uint64_t *old_cluster, *l2_table;
906 uint64_t cluster_offset = m->alloc_offset;
908 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters);
909 assert(m->nb_clusters > 0);
911 old_cluster = g_try_new(uint64_t, m->nb_clusters);
912 if (old_cluster == NULL) {
913 ret = -ENOMEM;
914 goto err;
917 /* copy content of unmodified sectors */
918 ret = perform_cow(bs, m);
919 if (ret < 0) {
920 goto err;
923 /* Update L2 table. */
924 if (s->use_lazy_refcounts) {
925 qcow2_mark_dirty(bs);
927 if (qcow2_need_accurate_refcounts(s)) {
928 qcow2_cache_set_dependency(bs, s->l2_table_cache,
929 s->refcount_block_cache);
932 ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index);
933 if (ret < 0) {
934 goto err;
936 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
938 assert(l2_index + m->nb_clusters <= s->l2_size);
939 for (i = 0; i < m->nb_clusters; i++) {
940 /* if two concurrent writes happen to the same unallocated cluster
941 * each write allocates separate cluster and writes data concurrently.
942 * The first one to complete updates l2 table with pointer to its
943 * cluster the second one has to do RMW (which is done above by
944 * perform_cow()), update l2 table with its cluster pointer and free
945 * old cluster. This is what this loop does */
946 if (l2_table[l2_index + i] != 0) {
947 old_cluster[j++] = l2_table[l2_index + i];
950 l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
951 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
955 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
958 * If this was a COW, we need to decrease the refcount of the old cluster.
960 * Don't discard clusters that reach a refcount of 0 (e.g. compressed
961 * clusters), the next write will reuse them anyway.
963 if (!m->keep_old_clusters && j != 0) {
964 for (i = 0; i < j; i++) {
965 qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1,
966 QCOW2_DISCARD_NEVER);
970 ret = 0;
971 err:
972 g_free(old_cluster);
973 return ret;
977 * Returns the number of contiguous clusters that can be used for an allocating
978 * write, but require COW to be performed (this includes yet unallocated space,
979 * which must copy from the backing file)
981 static int count_cow_clusters(BDRVQcow2State *s, int nb_clusters,
982 uint64_t *l2_table, int l2_index)
984 int i;
986 for (i = 0; i < nb_clusters; i++) {
987 uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]);
988 QCow2ClusterType cluster_type = qcow2_get_cluster_type(l2_entry);
990 switch(cluster_type) {
991 case QCOW2_CLUSTER_NORMAL:
992 if (l2_entry & QCOW_OFLAG_COPIED) {
993 goto out;
995 break;
996 case QCOW2_CLUSTER_UNALLOCATED:
997 case QCOW2_CLUSTER_COMPRESSED:
998 case QCOW2_CLUSTER_ZERO_PLAIN:
999 case QCOW2_CLUSTER_ZERO_ALLOC:
1000 break;
1001 default:
1002 abort();
1006 out:
1007 assert(i <= nb_clusters);
1008 return i;
1012 * Check if there already is an AIO write request in flight which allocates
1013 * the same cluster. In this case we need to wait until the previous
1014 * request has completed and updated the L2 table accordingly.
1016 * Returns:
1017 * 0 if there was no dependency. *cur_bytes indicates the number of
1018 * bytes from guest_offset that can be read before the next
1019 * dependency must be processed (or the request is complete)
1021 * -EAGAIN if we had to wait for another request, previously gathered
1022 * information on cluster allocation may be invalid now. The caller
1023 * must start over anyway, so consider *cur_bytes undefined.
1025 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
1026 uint64_t *cur_bytes, QCowL2Meta **m)
1028 BDRVQcow2State *s = bs->opaque;
1029 QCowL2Meta *old_alloc;
1030 uint64_t bytes = *cur_bytes;
1032 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
1034 uint64_t start = guest_offset;
1035 uint64_t end = start + bytes;
1036 uint64_t old_start = l2meta_cow_start(old_alloc);
1037 uint64_t old_end = l2meta_cow_end(old_alloc);
1039 if (end <= old_start || start >= old_end) {
1040 /* No intersection */
1041 } else {
1042 if (start < old_start) {
1043 /* Stop at the start of a running allocation */
1044 bytes = old_start - start;
1045 } else {
1046 bytes = 0;
1049 /* Stop if already an l2meta exists. After yielding, it wouldn't
1050 * be valid any more, so we'd have to clean up the old L2Metas
1051 * and deal with requests depending on them before starting to
1052 * gather new ones. Not worth the trouble. */
1053 if (bytes == 0 && *m) {
1054 *cur_bytes = 0;
1055 return 0;
1058 if (bytes == 0) {
1059 /* Wait for the dependency to complete. We need to recheck
1060 * the free/allocated clusters when we continue. */
1061 qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock);
1062 return -EAGAIN;
1067 /* Make sure that existing clusters and new allocations are only used up to
1068 * the next dependency if we shortened the request above */
1069 *cur_bytes = bytes;
1071 return 0;
1075 * Checks how many already allocated clusters that don't require a copy on
1076 * write there are at the given guest_offset (up to *bytes). If
1077 * *host_offset is not zero, only physically contiguous clusters beginning at
1078 * this host offset are counted.
1080 * Note that guest_offset may not be cluster aligned. In this case, the
1081 * returned *host_offset points to exact byte referenced by guest_offset and
1082 * therefore isn't cluster aligned as well.
1084 * Returns:
1085 * 0: if no allocated clusters are available at the given offset.
1086 * *bytes is normally unchanged. It is set to 0 if the cluster
1087 * is allocated and doesn't need COW, but doesn't have the right
1088 * physical offset.
1090 * 1: if allocated clusters that don't require a COW are available at
1091 * the requested offset. *bytes may have decreased and describes
1092 * the length of the area that can be written to.
1094 * -errno: in error cases
1096 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset,
1097 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1099 BDRVQcow2State *s = bs->opaque;
1100 int l2_index;
1101 uint64_t cluster_offset;
1102 uint64_t *l2_table;
1103 uint64_t nb_clusters;
1104 unsigned int keep_clusters;
1105 int ret;
1107 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset,
1108 *bytes);
1110 assert(*host_offset == 0 || offset_into_cluster(s, guest_offset)
1111 == offset_into_cluster(s, *host_offset));
1114 * Calculate the number of clusters to look for. We stop at L2 table
1115 * boundaries to keep things simple.
1117 nb_clusters =
1118 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1120 l2_index = offset_to_l2_index(s, guest_offset);
1121 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1122 assert(nb_clusters <= INT_MAX);
1124 /* Find L2 entry for the first involved cluster */
1125 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
1126 if (ret < 0) {
1127 return ret;
1130 cluster_offset = be64_to_cpu(l2_table[l2_index]);
1132 /* Check how many clusters are already allocated and don't need COW */
1133 if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL
1134 && (cluster_offset & QCOW_OFLAG_COPIED))
1136 /* If a specific host_offset is required, check it */
1137 bool offset_matches =
1138 (cluster_offset & L2E_OFFSET_MASK) == *host_offset;
1140 if (offset_into_cluster(s, cluster_offset & L2E_OFFSET_MASK)) {
1141 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset "
1142 "%#llx unaligned (guest offset: %#" PRIx64
1143 ")", cluster_offset & L2E_OFFSET_MASK,
1144 guest_offset);
1145 ret = -EIO;
1146 goto out;
1149 if (*host_offset != 0 && !offset_matches) {
1150 *bytes = 0;
1151 ret = 0;
1152 goto out;
1155 /* We keep all QCOW_OFLAG_COPIED clusters */
1156 keep_clusters =
1157 count_contiguous_clusters(nb_clusters, s->cluster_size,
1158 &l2_table[l2_index],
1159 QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO);
1160 assert(keep_clusters <= nb_clusters);
1162 *bytes = MIN(*bytes,
1163 keep_clusters * s->cluster_size
1164 - offset_into_cluster(s, guest_offset));
1166 ret = 1;
1167 } else {
1168 ret = 0;
1171 /* Cleanup */
1172 out:
1173 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1175 /* Only return a host offset if we actually made progress. Otherwise we
1176 * would make requirements for handle_alloc() that it can't fulfill */
1177 if (ret > 0) {
1178 *host_offset = (cluster_offset & L2E_OFFSET_MASK)
1179 + offset_into_cluster(s, guest_offset);
1182 return ret;
1186 * Allocates new clusters for the given guest_offset.
1188 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
1189 * contain the number of clusters that have been allocated and are contiguous
1190 * in the image file.
1192 * If *host_offset is non-zero, it specifies the offset in the image file at
1193 * which the new clusters must start. *nb_clusters can be 0 on return in this
1194 * case if the cluster at host_offset is already in use. If *host_offset is
1195 * zero, the clusters can be allocated anywhere in the image file.
1197 * *host_offset is updated to contain the offset into the image file at which
1198 * the first allocated cluster starts.
1200 * Return 0 on success and -errno in error cases. -EAGAIN means that the
1201 * function has been waiting for another request and the allocation must be
1202 * restarted, but the whole request should not be failed.
1204 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
1205 uint64_t *host_offset, uint64_t *nb_clusters)
1207 BDRVQcow2State *s = bs->opaque;
1209 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset,
1210 *host_offset, *nb_clusters);
1212 /* Allocate new clusters */
1213 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
1214 if (*host_offset == 0) {
1215 int64_t cluster_offset =
1216 qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size);
1217 if (cluster_offset < 0) {
1218 return cluster_offset;
1220 *host_offset = cluster_offset;
1221 return 0;
1222 } else {
1223 int64_t ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters);
1224 if (ret < 0) {
1225 return ret;
1227 *nb_clusters = ret;
1228 return 0;
1233 * Allocates new clusters for an area that either is yet unallocated or needs a
1234 * copy on write. If *host_offset is non-zero, clusters are only allocated if
1235 * the new allocation can match the specified host offset.
1237 * Note that guest_offset may not be cluster aligned. In this case, the
1238 * returned *host_offset points to exact byte referenced by guest_offset and
1239 * therefore isn't cluster aligned as well.
1241 * Returns:
1242 * 0: if no clusters could be allocated. *bytes is set to 0,
1243 * *host_offset is left unchanged.
1245 * 1: if new clusters were allocated. *bytes may be decreased if the
1246 * new allocation doesn't cover all of the requested area.
1247 * *host_offset is updated to contain the host offset of the first
1248 * newly allocated cluster.
1250 * -errno: in error cases
1252 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
1253 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1255 BDRVQcow2State *s = bs->opaque;
1256 int l2_index;
1257 uint64_t *l2_table;
1258 uint64_t entry;
1259 uint64_t nb_clusters;
1260 int ret;
1261 bool keep_old_clusters = false;
1263 uint64_t alloc_cluster_offset = 0;
1265 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset,
1266 *bytes);
1267 assert(*bytes > 0);
1270 * Calculate the number of clusters to look for. We stop at L2 table
1271 * boundaries to keep things simple.
1273 nb_clusters =
1274 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1276 l2_index = offset_to_l2_index(s, guest_offset);
1277 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1278 assert(nb_clusters <= INT_MAX);
1280 /* Find L2 entry for the first involved cluster */
1281 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
1282 if (ret < 0) {
1283 return ret;
1286 entry = be64_to_cpu(l2_table[l2_index]);
1288 /* For the moment, overwrite compressed clusters one by one */
1289 if (entry & QCOW_OFLAG_COMPRESSED) {
1290 nb_clusters = 1;
1291 } else {
1292 nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index);
1295 /* This function is only called when there were no non-COW clusters, so if
1296 * we can't find any unallocated or COW clusters either, something is
1297 * wrong with our code. */
1298 assert(nb_clusters > 0);
1300 if (qcow2_get_cluster_type(entry) == QCOW2_CLUSTER_ZERO_ALLOC &&
1301 (entry & QCOW_OFLAG_COPIED) &&
1302 (!*host_offset ||
1303 start_of_cluster(s, *host_offset) == (entry & L2E_OFFSET_MASK)))
1305 /* Try to reuse preallocated zero clusters; contiguous normal clusters
1306 * would be fine, too, but count_cow_clusters() above has limited
1307 * nb_clusters already to a range of COW clusters */
1308 int preallocated_nb_clusters =
1309 count_contiguous_clusters(nb_clusters, s->cluster_size,
1310 &l2_table[l2_index], QCOW_OFLAG_COPIED);
1311 assert(preallocated_nb_clusters > 0);
1313 nb_clusters = preallocated_nb_clusters;
1314 alloc_cluster_offset = entry & L2E_OFFSET_MASK;
1316 /* We want to reuse these clusters, so qcow2_alloc_cluster_link_l2()
1317 * should not free them. */
1318 keep_old_clusters = true;
1321 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1323 if (!alloc_cluster_offset) {
1324 /* Allocate, if necessary at a given offset in the image file */
1325 alloc_cluster_offset = start_of_cluster(s, *host_offset);
1326 ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset,
1327 &nb_clusters);
1328 if (ret < 0) {
1329 goto fail;
1332 /* Can't extend contiguous allocation */
1333 if (nb_clusters == 0) {
1334 *bytes = 0;
1335 return 0;
1338 /* !*host_offset would overwrite the image header and is reserved for
1339 * "no host offset preferred". If 0 was a valid host offset, it'd
1340 * trigger the following overlap check; do that now to avoid having an
1341 * invalid value in *host_offset. */
1342 if (!alloc_cluster_offset) {
1343 ret = qcow2_pre_write_overlap_check(bs, 0, alloc_cluster_offset,
1344 nb_clusters * s->cluster_size);
1345 assert(ret < 0);
1346 goto fail;
1351 * Save info needed for meta data update.
1353 * requested_bytes: Number of bytes from the start of the first
1354 * newly allocated cluster to the end of the (possibly shortened
1355 * before) write request.
1357 * avail_bytes: Number of bytes from the start of the first
1358 * newly allocated to the end of the last newly allocated cluster.
1360 * nb_bytes: The number of bytes from the start of the first
1361 * newly allocated cluster to the end of the area that the write
1362 * request actually writes to (excluding COW at the end)
1364 uint64_t requested_bytes = *bytes + offset_into_cluster(s, guest_offset);
1365 int avail_bytes = MIN(INT_MAX, nb_clusters << s->cluster_bits);
1366 int nb_bytes = MIN(requested_bytes, avail_bytes);
1367 QCowL2Meta *old_m = *m;
1369 *m = g_malloc0(sizeof(**m));
1371 **m = (QCowL2Meta) {
1372 .next = old_m,
1374 .alloc_offset = alloc_cluster_offset,
1375 .offset = start_of_cluster(s, guest_offset),
1376 .nb_clusters = nb_clusters,
1378 .keep_old_clusters = keep_old_clusters,
1380 .cow_start = {
1381 .offset = 0,
1382 .nb_bytes = offset_into_cluster(s, guest_offset),
1384 .cow_end = {
1385 .offset = nb_bytes,
1386 .nb_bytes = avail_bytes - nb_bytes,
1389 qemu_co_queue_init(&(*m)->dependent_requests);
1390 QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight);
1392 *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset);
1393 *bytes = MIN(*bytes, nb_bytes - offset_into_cluster(s, guest_offset));
1394 assert(*bytes != 0);
1396 return 1;
1398 fail:
1399 if (*m && (*m)->nb_clusters > 0) {
1400 QLIST_REMOVE(*m, next_in_flight);
1402 return ret;
1406 * alloc_cluster_offset
1408 * For a given offset on the virtual disk, find the cluster offset in qcow2
1409 * file. If the offset is not found, allocate a new cluster.
1411 * If the cluster was already allocated, m->nb_clusters is set to 0 and
1412 * other fields in m are meaningless.
1414 * If the cluster is newly allocated, m->nb_clusters is set to the number of
1415 * contiguous clusters that have been allocated. In this case, the other
1416 * fields of m are valid and contain information about the first allocated
1417 * cluster.
1419 * If the request conflicts with another write request in flight, the coroutine
1420 * is queued and will be reentered when the dependency has completed.
1422 * Return 0 on success and -errno in error cases
1424 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
1425 unsigned int *bytes, uint64_t *host_offset,
1426 QCowL2Meta **m)
1428 BDRVQcow2State *s = bs->opaque;
1429 uint64_t start, remaining;
1430 uint64_t cluster_offset;
1431 uint64_t cur_bytes;
1432 int ret;
1434 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *bytes);
1436 again:
1437 start = offset;
1438 remaining = *bytes;
1439 cluster_offset = 0;
1440 *host_offset = 0;
1441 cur_bytes = 0;
1442 *m = NULL;
1444 while (true) {
1446 if (!*host_offset) {
1447 *host_offset = start_of_cluster(s, cluster_offset);
1450 assert(remaining >= cur_bytes);
1452 start += cur_bytes;
1453 remaining -= cur_bytes;
1454 cluster_offset += cur_bytes;
1456 if (remaining == 0) {
1457 break;
1460 cur_bytes = remaining;
1463 * Now start gathering as many contiguous clusters as possible:
1465 * 1. Check for overlaps with in-flight allocations
1467 * a) Overlap not in the first cluster -> shorten this request and
1468 * let the caller handle the rest in its next loop iteration.
1470 * b) Real overlaps of two requests. Yield and restart the search
1471 * for contiguous clusters (the situation could have changed
1472 * while we were sleeping)
1474 * c) TODO: Request starts in the same cluster as the in-flight
1475 * allocation ends. Shorten the COW of the in-fight allocation,
1476 * set cluster_offset to write to the same cluster and set up
1477 * the right synchronisation between the in-flight request and
1478 * the new one.
1480 ret = handle_dependencies(bs, start, &cur_bytes, m);
1481 if (ret == -EAGAIN) {
1482 /* Currently handle_dependencies() doesn't yield if we already had
1483 * an allocation. If it did, we would have to clean up the L2Meta
1484 * structs before starting over. */
1485 assert(*m == NULL);
1486 goto again;
1487 } else if (ret < 0) {
1488 return ret;
1489 } else if (cur_bytes == 0) {
1490 break;
1491 } else {
1492 /* handle_dependencies() may have decreased cur_bytes (shortened
1493 * the allocations below) so that the next dependency is processed
1494 * correctly during the next loop iteration. */
1498 * 2. Count contiguous COPIED clusters.
1500 ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m);
1501 if (ret < 0) {
1502 return ret;
1503 } else if (ret) {
1504 continue;
1505 } else if (cur_bytes == 0) {
1506 break;
1510 * 3. If the request still hasn't completed, allocate new clusters,
1511 * considering any cluster_offset of steps 1c or 2.
1513 ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m);
1514 if (ret < 0) {
1515 return ret;
1516 } else if (ret) {
1517 continue;
1518 } else {
1519 assert(cur_bytes == 0);
1520 break;
1524 *bytes -= remaining;
1525 assert(*bytes > 0);
1526 assert(*host_offset != 0);
1528 return 0;
1531 static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
1532 const uint8_t *buf, int buf_size)
1534 z_stream strm1, *strm = &strm1;
1535 int ret, out_len;
1537 memset(strm, 0, sizeof(*strm));
1539 strm->next_in = (uint8_t *)buf;
1540 strm->avail_in = buf_size;
1541 strm->next_out = out_buf;
1542 strm->avail_out = out_buf_size;
1544 ret = inflateInit2(strm, -12);
1545 if (ret != Z_OK)
1546 return -1;
1547 ret = inflate(strm, Z_FINISH);
1548 out_len = strm->next_out - out_buf;
1549 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
1550 out_len != out_buf_size) {
1551 inflateEnd(strm);
1552 return -1;
1554 inflateEnd(strm);
1555 return 0;
1558 int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
1560 BDRVQcow2State *s = bs->opaque;
1561 int ret, csize, nb_csectors, sector_offset;
1562 uint64_t coffset;
1564 coffset = cluster_offset & s->cluster_offset_mask;
1565 if (s->cluster_cache_offset != coffset) {
1566 nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
1567 sector_offset = coffset & 511;
1568 csize = nb_csectors * 512 - sector_offset;
1570 /* Allocate buffers on first decompress operation, most images are
1571 * uncompressed and the memory overhead can be avoided. The buffers
1572 * are freed in .bdrv_close().
1574 if (!s->cluster_data) {
1575 /* one more sector for decompressed data alignment */
1576 s->cluster_data = qemu_try_blockalign(bs->file->bs,
1577 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size + 512);
1578 if (!s->cluster_data) {
1579 return -ENOMEM;
1582 if (!s->cluster_cache) {
1583 s->cluster_cache = g_malloc(s->cluster_size);
1586 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
1587 ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data,
1588 nb_csectors);
1589 if (ret < 0) {
1590 return ret;
1592 if (decompress_buffer(s->cluster_cache, s->cluster_size,
1593 s->cluster_data + sector_offset, csize) < 0) {
1594 return -EIO;
1596 s->cluster_cache_offset = coffset;
1598 return 0;
1602 * This discards as many clusters of nb_clusters as possible at once (i.e.
1603 * all clusters in the same L2 table) and returns the number of discarded
1604 * clusters.
1606 static int discard_single_l2(BlockDriverState *bs, uint64_t offset,
1607 uint64_t nb_clusters, enum qcow2_discard_type type,
1608 bool full_discard)
1610 BDRVQcow2State *s = bs->opaque;
1611 uint64_t *l2_table;
1612 int l2_index;
1613 int ret;
1614 int i;
1616 ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1617 if (ret < 0) {
1618 return ret;
1621 /* Limit nb_clusters to one L2 table */
1622 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1623 assert(nb_clusters <= INT_MAX);
1625 for (i = 0; i < nb_clusters; i++) {
1626 uint64_t old_l2_entry;
1628 old_l2_entry = be64_to_cpu(l2_table[l2_index + i]);
1631 * If full_discard is false, make sure that a discarded area reads back
1632 * as zeroes for v3 images (we cannot do it for v2 without actually
1633 * writing a zero-filled buffer). We can skip the operation if the
1634 * cluster is already marked as zero, or if it's unallocated and we
1635 * don't have a backing file.
1637 * TODO We might want to use bdrv_get_block_status(bs) here, but we're
1638 * holding s->lock, so that doesn't work today.
1640 * If full_discard is true, the sector should not read back as zeroes,
1641 * but rather fall through to the backing file.
1643 switch (qcow2_get_cluster_type(old_l2_entry)) {
1644 case QCOW2_CLUSTER_UNALLOCATED:
1645 if (full_discard || !bs->backing) {
1646 continue;
1648 break;
1650 case QCOW2_CLUSTER_ZERO_PLAIN:
1651 if (!full_discard) {
1652 continue;
1654 break;
1656 case QCOW2_CLUSTER_ZERO_ALLOC:
1657 case QCOW2_CLUSTER_NORMAL:
1658 case QCOW2_CLUSTER_COMPRESSED:
1659 break;
1661 default:
1662 abort();
1665 /* First remove L2 entries */
1666 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
1667 if (!full_discard && s->qcow_version >= 3) {
1668 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1669 } else {
1670 l2_table[l2_index + i] = cpu_to_be64(0);
1673 /* Then decrease the refcount */
1674 qcow2_free_any_clusters(bs, old_l2_entry, 1, type);
1677 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1679 return nb_clusters;
1682 int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset,
1683 uint64_t bytes, enum qcow2_discard_type type,
1684 bool full_discard)
1686 BDRVQcow2State *s = bs->opaque;
1687 uint64_t end_offset = offset + bytes;
1688 uint64_t nb_clusters;
1689 int64_t cleared;
1690 int ret;
1692 /* Caller must pass aligned values, except at image end */
1693 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
1694 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) ||
1695 end_offset == bs->total_sectors << BDRV_SECTOR_BITS);
1697 nb_clusters = size_to_clusters(s, bytes);
1699 s->cache_discards = true;
1701 /* Each L2 table is handled by its own loop iteration */
1702 while (nb_clusters > 0) {
1703 cleared = discard_single_l2(bs, offset, nb_clusters, type,
1704 full_discard);
1705 if (cleared < 0) {
1706 ret = cleared;
1707 goto fail;
1710 nb_clusters -= cleared;
1711 offset += (cleared * s->cluster_size);
1714 ret = 0;
1715 fail:
1716 s->cache_discards = false;
1717 qcow2_process_discards(bs, ret);
1719 return ret;
1723 * This zeroes as many clusters of nb_clusters as possible at once (i.e.
1724 * all clusters in the same L2 table) and returns the number of zeroed
1725 * clusters.
1727 static int zero_single_l2(BlockDriverState *bs, uint64_t offset,
1728 uint64_t nb_clusters, int flags)
1730 BDRVQcow2State *s = bs->opaque;
1731 uint64_t *l2_table;
1732 int l2_index;
1733 int ret;
1734 int i;
1735 bool unmap = !!(flags & BDRV_REQ_MAY_UNMAP);
1737 ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1738 if (ret < 0) {
1739 return ret;
1742 /* Limit nb_clusters to one L2 table */
1743 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1744 assert(nb_clusters <= INT_MAX);
1746 for (i = 0; i < nb_clusters; i++) {
1747 uint64_t old_offset;
1748 QCow2ClusterType cluster_type;
1750 old_offset = be64_to_cpu(l2_table[l2_index + i]);
1753 * Minimize L2 changes if the cluster already reads back as
1754 * zeroes with correct allocation.
1756 cluster_type = qcow2_get_cluster_type(old_offset);
1757 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN ||
1758 (cluster_type == QCOW2_CLUSTER_ZERO_ALLOC && !unmap)) {
1759 continue;
1762 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
1763 if (cluster_type == QCOW2_CLUSTER_COMPRESSED || unmap) {
1764 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1765 qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST);
1766 } else {
1767 l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO);
1771 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1773 return nb_clusters;
1776 int qcow2_cluster_zeroize(BlockDriverState *bs, uint64_t offset,
1777 uint64_t bytes, int flags)
1779 BDRVQcow2State *s = bs->opaque;
1780 uint64_t end_offset = offset + bytes;
1781 uint64_t nb_clusters;
1782 int64_t cleared;
1783 int ret;
1785 /* Caller must pass aligned values, except at image end */
1786 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
1787 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) ||
1788 end_offset == bs->total_sectors << BDRV_SECTOR_BITS);
1790 /* The zero flag is only supported by version 3 and newer */
1791 if (s->qcow_version < 3) {
1792 return -ENOTSUP;
1795 /* Each L2 table is handled by its own loop iteration */
1796 nb_clusters = size_to_clusters(s, bytes);
1798 s->cache_discards = true;
1800 while (nb_clusters > 0) {
1801 cleared = zero_single_l2(bs, offset, nb_clusters, flags);
1802 if (cleared < 0) {
1803 ret = cleared;
1804 goto fail;
1807 nb_clusters -= cleared;
1808 offset += (cleared * s->cluster_size);
1811 ret = 0;
1812 fail:
1813 s->cache_discards = false;
1814 qcow2_process_discards(bs, ret);
1816 return ret;
1820 * Expands all zero clusters in a specific L1 table (or deallocates them, for
1821 * non-backed non-pre-allocated zero clusters).
1823 * l1_entries and *visited_l1_entries are used to keep track of progress for
1824 * status_cb(). l1_entries contains the total number of L1 entries and
1825 * *visited_l1_entries counts all visited L1 entries.
1827 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
1828 int l1_size, int64_t *visited_l1_entries,
1829 int64_t l1_entries,
1830 BlockDriverAmendStatusCB *status_cb,
1831 void *cb_opaque)
1833 BDRVQcow2State *s = bs->opaque;
1834 bool is_active_l1 = (l1_table == s->l1_table);
1835 uint64_t *l2_table = NULL;
1836 int ret;
1837 int i, j;
1839 if (!is_active_l1) {
1840 /* inactive L2 tables require a buffer to be stored in when loading
1841 * them from disk */
1842 l2_table = qemu_try_blockalign(bs->file->bs, s->cluster_size);
1843 if (l2_table == NULL) {
1844 return -ENOMEM;
1848 for (i = 0; i < l1_size; i++) {
1849 uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK;
1850 bool l2_dirty = false;
1851 uint64_t l2_refcount;
1853 if (!l2_offset) {
1854 /* unallocated */
1855 (*visited_l1_entries)++;
1856 if (status_cb) {
1857 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque);
1859 continue;
1862 if (offset_into_cluster(s, l2_offset)) {
1863 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#"
1864 PRIx64 " unaligned (L1 index: %#x)",
1865 l2_offset, i);
1866 ret = -EIO;
1867 goto fail;
1870 if (is_active_l1) {
1871 /* get active L2 tables from cache */
1872 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset,
1873 (void **)&l2_table);
1874 } else {
1875 /* load inactive L2 tables from disk */
1876 ret = bdrv_read(bs->file, l2_offset / BDRV_SECTOR_SIZE,
1877 (void *)l2_table, s->cluster_sectors);
1879 if (ret < 0) {
1880 goto fail;
1883 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits,
1884 &l2_refcount);
1885 if (ret < 0) {
1886 goto fail;
1889 for (j = 0; j < s->l2_size; j++) {
1890 uint64_t l2_entry = be64_to_cpu(l2_table[j]);
1891 int64_t offset = l2_entry & L2E_OFFSET_MASK;
1892 QCow2ClusterType cluster_type = qcow2_get_cluster_type(l2_entry);
1894 if (cluster_type != QCOW2_CLUSTER_ZERO_PLAIN &&
1895 cluster_type != QCOW2_CLUSTER_ZERO_ALLOC) {
1896 continue;
1899 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1900 if (!bs->backing) {
1901 /* not backed; therefore we can simply deallocate the
1902 * cluster */
1903 l2_table[j] = 0;
1904 l2_dirty = true;
1905 continue;
1908 offset = qcow2_alloc_clusters(bs, s->cluster_size);
1909 if (offset < 0) {
1910 ret = offset;
1911 goto fail;
1914 if (l2_refcount > 1) {
1915 /* For shared L2 tables, set the refcount accordingly (it is
1916 * already 1 and needs to be l2_refcount) */
1917 ret = qcow2_update_cluster_refcount(bs,
1918 offset >> s->cluster_bits,
1919 refcount_diff(1, l2_refcount), false,
1920 QCOW2_DISCARD_OTHER);
1921 if (ret < 0) {
1922 qcow2_free_clusters(bs, offset, s->cluster_size,
1923 QCOW2_DISCARD_OTHER);
1924 goto fail;
1929 if (offset_into_cluster(s, offset)) {
1930 qcow2_signal_corruption(bs, true, -1, -1,
1931 "Cluster allocation offset "
1932 "%#" PRIx64 " unaligned (L2 offset: %#"
1933 PRIx64 ", L2 index: %#x)", offset,
1934 l2_offset, j);
1935 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1936 qcow2_free_clusters(bs, offset, s->cluster_size,
1937 QCOW2_DISCARD_ALWAYS);
1939 ret = -EIO;
1940 goto fail;
1943 ret = qcow2_pre_write_overlap_check(bs, 0, offset, s->cluster_size);
1944 if (ret < 0) {
1945 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1946 qcow2_free_clusters(bs, offset, s->cluster_size,
1947 QCOW2_DISCARD_ALWAYS);
1949 goto fail;
1952 ret = bdrv_pwrite_zeroes(bs->file, offset, s->cluster_size, 0);
1953 if (ret < 0) {
1954 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1955 qcow2_free_clusters(bs, offset, s->cluster_size,
1956 QCOW2_DISCARD_ALWAYS);
1958 goto fail;
1961 if (l2_refcount == 1) {
1962 l2_table[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED);
1963 } else {
1964 l2_table[j] = cpu_to_be64(offset);
1966 l2_dirty = true;
1969 if (is_active_l1) {
1970 if (l2_dirty) {
1971 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
1972 qcow2_cache_depends_on_flush(s->l2_table_cache);
1974 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1975 } else {
1976 if (l2_dirty) {
1977 ret = qcow2_pre_write_overlap_check(bs,
1978 QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2, l2_offset,
1979 s->cluster_size);
1980 if (ret < 0) {
1981 goto fail;
1984 ret = bdrv_write(bs->file, l2_offset / BDRV_SECTOR_SIZE,
1985 (void *)l2_table, s->cluster_sectors);
1986 if (ret < 0) {
1987 goto fail;
1992 (*visited_l1_entries)++;
1993 if (status_cb) {
1994 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque);
1998 ret = 0;
2000 fail:
2001 if (l2_table) {
2002 if (!is_active_l1) {
2003 qemu_vfree(l2_table);
2004 } else {
2005 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
2008 return ret;
2012 * For backed images, expands all zero clusters on the image. For non-backed
2013 * images, deallocates all non-pre-allocated zero clusters (and claims the
2014 * allocation for pre-allocated ones). This is important for downgrading to a
2015 * qcow2 version which doesn't yet support metadata zero clusters.
2017 int qcow2_expand_zero_clusters(BlockDriverState *bs,
2018 BlockDriverAmendStatusCB *status_cb,
2019 void *cb_opaque)
2021 BDRVQcow2State *s = bs->opaque;
2022 uint64_t *l1_table = NULL;
2023 int64_t l1_entries = 0, visited_l1_entries = 0;
2024 int ret;
2025 int i, j;
2027 if (status_cb) {
2028 l1_entries = s->l1_size;
2029 for (i = 0; i < s->nb_snapshots; i++) {
2030 l1_entries += s->snapshots[i].l1_size;
2034 ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size,
2035 &visited_l1_entries, l1_entries,
2036 status_cb, cb_opaque);
2037 if (ret < 0) {
2038 goto fail;
2041 /* Inactive L1 tables may point to active L2 tables - therefore it is
2042 * necessary to flush the L2 table cache before trying to access the L2
2043 * tables pointed to by inactive L1 entries (else we might try to expand
2044 * zero clusters that have already been expanded); furthermore, it is also
2045 * necessary to empty the L2 table cache, since it may contain tables which
2046 * are now going to be modified directly on disk, bypassing the cache.
2047 * qcow2_cache_empty() does both for us. */
2048 ret = qcow2_cache_empty(bs, s->l2_table_cache);
2049 if (ret < 0) {
2050 goto fail;
2053 for (i = 0; i < s->nb_snapshots; i++) {
2054 int l1_sectors = DIV_ROUND_UP(s->snapshots[i].l1_size *
2055 sizeof(uint64_t), BDRV_SECTOR_SIZE);
2057 l1_table = g_realloc(l1_table, l1_sectors * BDRV_SECTOR_SIZE);
2059 ret = bdrv_read(bs->file,
2060 s->snapshots[i].l1_table_offset / BDRV_SECTOR_SIZE,
2061 (void *)l1_table, l1_sectors);
2062 if (ret < 0) {
2063 goto fail;
2066 for (j = 0; j < s->snapshots[i].l1_size; j++) {
2067 be64_to_cpus(&l1_table[j]);
2070 ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size,
2071 &visited_l1_entries, l1_entries,
2072 status_cb, cb_opaque);
2073 if (ret < 0) {
2074 goto fail;
2078 ret = 0;
2080 fail:
2081 g_free(l1_table);
2082 return ret;