hw/sparc/sun4m.c: Fix misusing qemu_allocate_irqs for single irq
[qemu.git] / block / qcow2-cluster.c
blob1a5c97a5aecacd06655742606c653eca503a0384
1 /*
2 * Block driver for the QCOW version 2 format
4 * Copyright (c) 2004-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include <zlib.h>
27 #include "qemu-common.h"
28 #include "block/block_int.h"
29 #include "block/qcow2.h"
30 #include "trace.h"
32 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
33 bool exact_size)
35 BDRVQcowState *s = bs->opaque;
36 int new_l1_size2, ret, i;
37 uint64_t *new_l1_table;
38 int64_t old_l1_table_offset, old_l1_size;
39 int64_t new_l1_table_offset, new_l1_size;
40 uint8_t data[12];
42 if (min_size <= s->l1_size)
43 return 0;
45 /* Do a sanity check on min_size before trying to calculate new_l1_size
46 * (this prevents overflows during the while loop for the calculation of
47 * new_l1_size) */
48 if (min_size > INT_MAX / sizeof(uint64_t)) {
49 return -EFBIG;
52 if (exact_size) {
53 new_l1_size = min_size;
54 } else {
55 /* Bump size up to reduce the number of times we have to grow */
56 new_l1_size = s->l1_size;
57 if (new_l1_size == 0) {
58 new_l1_size = 1;
60 while (min_size > new_l1_size) {
61 new_l1_size = (new_l1_size * 3 + 1) / 2;
65 if (new_l1_size > INT_MAX / sizeof(uint64_t)) {
66 return -EFBIG;
69 #ifdef DEBUG_ALLOC2
70 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n",
71 s->l1_size, new_l1_size);
72 #endif
74 new_l1_size2 = sizeof(uint64_t) * new_l1_size;
75 new_l1_table = qemu_try_blockalign(bs->file,
76 align_offset(new_l1_size2, 512));
77 if (new_l1_table == NULL) {
78 return -ENOMEM;
80 memset(new_l1_table, 0, align_offset(new_l1_size2, 512));
82 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
84 /* write new table (align to cluster) */
85 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
86 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
87 if (new_l1_table_offset < 0) {
88 qemu_vfree(new_l1_table);
89 return new_l1_table_offset;
92 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
93 if (ret < 0) {
94 goto fail;
97 /* the L1 position has not yet been updated, so these clusters must
98 * indeed be completely free */
99 ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset,
100 new_l1_size2);
101 if (ret < 0) {
102 goto fail;
105 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE);
106 for(i = 0; i < s->l1_size; i++)
107 new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
108 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2);
109 if (ret < 0)
110 goto fail;
111 for(i = 0; i < s->l1_size; i++)
112 new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
114 /* set new table */
115 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE);
116 cpu_to_be32w((uint32_t*)data, new_l1_size);
117 stq_be_p(data + 4, new_l1_table_offset);
118 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data));
119 if (ret < 0) {
120 goto fail;
122 qemu_vfree(s->l1_table);
123 old_l1_table_offset = s->l1_table_offset;
124 s->l1_table_offset = new_l1_table_offset;
125 s->l1_table = new_l1_table;
126 old_l1_size = s->l1_size;
127 s->l1_size = new_l1_size;
128 qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t),
129 QCOW2_DISCARD_OTHER);
130 return 0;
131 fail:
132 qemu_vfree(new_l1_table);
133 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2,
134 QCOW2_DISCARD_OTHER);
135 return ret;
139 * l2_load
141 * Loads a L2 table into memory. If the table is in the cache, the cache
142 * is used; otherwise the L2 table is loaded from the image file.
144 * Returns a pointer to the L2 table on success, or NULL if the read from
145 * the image file failed.
148 static int l2_load(BlockDriverState *bs, uint64_t l2_offset,
149 uint64_t **l2_table)
151 BDRVQcowState *s = bs->opaque;
152 int ret;
154 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, (void**) l2_table);
156 return ret;
160 * Writes one sector of the L1 table to the disk (can't update single entries
161 * and we really don't want bdrv_pread to perform a read-modify-write)
163 #define L1_ENTRIES_PER_SECTOR (512 / 8)
164 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index)
166 BDRVQcowState *s = bs->opaque;
167 uint64_t buf[L1_ENTRIES_PER_SECTOR] = { 0 };
168 int l1_start_index;
169 int i, ret;
171 l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
172 for (i = 0; i < L1_ENTRIES_PER_SECTOR && l1_start_index + i < s->l1_size;
173 i++)
175 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
178 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1,
179 s->l1_table_offset + 8 * l1_start_index, sizeof(buf));
180 if (ret < 0) {
181 return ret;
184 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
185 ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset + 8 * l1_start_index,
186 buf, sizeof(buf));
187 if (ret < 0) {
188 return ret;
191 return 0;
195 * l2_allocate
197 * Allocate a new l2 entry in the file. If l1_index points to an already
198 * used entry in the L2 table (i.e. we are doing a copy on write for the L2
199 * table) copy the contents of the old L2 table into the newly allocated one.
200 * Otherwise the new table is initialized with zeros.
204 static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table)
206 BDRVQcowState *s = bs->opaque;
207 uint64_t old_l2_offset;
208 uint64_t *l2_table = NULL;
209 int64_t l2_offset;
210 int ret;
212 old_l2_offset = s->l1_table[l1_index];
214 trace_qcow2_l2_allocate(bs, l1_index);
216 /* allocate a new l2 entry */
218 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
219 if (l2_offset < 0) {
220 ret = l2_offset;
221 goto fail;
224 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
225 if (ret < 0) {
226 goto fail;
229 /* allocate a new entry in the l2 cache */
231 trace_qcow2_l2_allocate_get_empty(bs, l1_index);
232 ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table);
233 if (ret < 0) {
234 goto fail;
237 l2_table = *table;
239 if ((old_l2_offset & L1E_OFFSET_MASK) == 0) {
240 /* if there was no old l2 table, clear the new table */
241 memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
242 } else {
243 uint64_t* old_table;
245 /* if there was an old l2 table, read it from the disk */
246 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ);
247 ret = qcow2_cache_get(bs, s->l2_table_cache,
248 old_l2_offset & L1E_OFFSET_MASK,
249 (void**) &old_table);
250 if (ret < 0) {
251 goto fail;
254 memcpy(l2_table, old_table, s->cluster_size);
256 qcow2_cache_put(bs, s->l2_table_cache, (void **) &old_table);
259 /* write the l2 table to the file */
260 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE);
262 trace_qcow2_l2_allocate_write_l2(bs, l1_index);
263 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
264 ret = qcow2_cache_flush(bs, s->l2_table_cache);
265 if (ret < 0) {
266 goto fail;
269 /* update the L1 entry */
270 trace_qcow2_l2_allocate_write_l1(bs, l1_index);
271 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
272 ret = qcow2_write_l1_entry(bs, l1_index);
273 if (ret < 0) {
274 goto fail;
277 *table = l2_table;
278 trace_qcow2_l2_allocate_done(bs, l1_index, 0);
279 return 0;
281 fail:
282 trace_qcow2_l2_allocate_done(bs, l1_index, ret);
283 if (l2_table != NULL) {
284 qcow2_cache_put(bs, s->l2_table_cache, (void**) table);
286 s->l1_table[l1_index] = old_l2_offset;
287 if (l2_offset > 0) {
288 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
289 QCOW2_DISCARD_ALWAYS);
291 return ret;
295 * Checks how many clusters in a given L2 table are contiguous in the image
296 * file. As soon as one of the flags in the bitmask stop_flags changes compared
297 * to the first cluster, the search is stopped and the cluster is not counted
298 * as contiguous. (This allows it, for example, to stop at the first compressed
299 * cluster which may require a different handling)
301 static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size,
302 uint64_t *l2_table, uint64_t stop_flags)
304 int i;
305 uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED;
306 uint64_t first_entry = be64_to_cpu(l2_table[0]);
307 uint64_t offset = first_entry & mask;
309 if (!offset)
310 return 0;
312 assert(qcow2_get_cluster_type(first_entry) != QCOW2_CLUSTER_COMPRESSED);
314 for (i = 0; i < nb_clusters; i++) {
315 uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask;
316 if (offset + (uint64_t) i * cluster_size != l2_entry) {
317 break;
321 return i;
324 static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table)
326 int i;
328 for (i = 0; i < nb_clusters; i++) {
329 int type = qcow2_get_cluster_type(be64_to_cpu(l2_table[i]));
331 if (type != QCOW2_CLUSTER_UNALLOCATED) {
332 break;
336 return i;
339 /* The crypt function is compatible with the linux cryptoloop
340 algorithm for < 4 GB images. NOTE: out_buf == in_buf is
341 supported */
342 void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
343 uint8_t *out_buf, const uint8_t *in_buf,
344 int nb_sectors, int enc,
345 const AES_KEY *key)
347 union {
348 uint64_t ll[2];
349 uint8_t b[16];
350 } ivec;
351 int i;
353 for(i = 0; i < nb_sectors; i++) {
354 ivec.ll[0] = cpu_to_le64(sector_num);
355 ivec.ll[1] = 0;
356 AES_cbc_encrypt(in_buf, out_buf, 512, key,
357 ivec.b, enc);
358 sector_num++;
359 in_buf += 512;
360 out_buf += 512;
364 static int coroutine_fn copy_sectors(BlockDriverState *bs,
365 uint64_t start_sect,
366 uint64_t cluster_offset,
367 int n_start, int n_end)
369 BDRVQcowState *s = bs->opaque;
370 QEMUIOVector qiov;
371 struct iovec iov;
372 int n, ret;
374 n = n_end - n_start;
375 if (n <= 0) {
376 return 0;
379 iov.iov_len = n * BDRV_SECTOR_SIZE;
380 iov.iov_base = qemu_try_blockalign(bs, iov.iov_len);
381 if (iov.iov_base == NULL) {
382 return -ENOMEM;
385 qemu_iovec_init_external(&qiov, &iov, 1);
387 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
389 if (!bs->drv) {
390 ret = -ENOMEDIUM;
391 goto out;
394 /* Call .bdrv_co_readv() directly instead of using the public block-layer
395 * interface. This avoids double I/O throttling and request tracking,
396 * which can lead to deadlock when block layer copy-on-read is enabled.
398 ret = bs->drv->bdrv_co_readv(bs, start_sect + n_start, n, &qiov);
399 if (ret < 0) {
400 goto out;
403 if (bs->encrypted) {
404 assert(s->crypt_method);
405 qcow2_encrypt_sectors(s, start_sect + n_start,
406 iov.iov_base, iov.iov_base, n, 1,
407 &s->aes_encrypt_key);
410 ret = qcow2_pre_write_overlap_check(bs, 0,
411 cluster_offset + n_start * BDRV_SECTOR_SIZE, n * BDRV_SECTOR_SIZE);
412 if (ret < 0) {
413 goto out;
416 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
417 ret = bdrv_co_writev(bs->file, (cluster_offset >> 9) + n_start, n, &qiov);
418 if (ret < 0) {
419 goto out;
422 ret = 0;
423 out:
424 qemu_vfree(iov.iov_base);
425 return ret;
430 * get_cluster_offset
432 * For a given offset of the disk image, find the cluster offset in
433 * qcow2 file. The offset is stored in *cluster_offset.
435 * on entry, *num is the number of contiguous sectors we'd like to
436 * access following offset.
438 * on exit, *num is the number of contiguous sectors we can read.
440 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
441 * cases.
443 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
444 int *num, uint64_t *cluster_offset)
446 BDRVQcowState *s = bs->opaque;
447 unsigned int l2_index;
448 uint64_t l1_index, l2_offset, *l2_table;
449 int l1_bits, c;
450 unsigned int index_in_cluster, nb_clusters;
451 uint64_t nb_available, nb_needed;
452 int ret;
454 index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1);
455 nb_needed = *num + index_in_cluster;
457 l1_bits = s->l2_bits + s->cluster_bits;
459 /* compute how many bytes there are between the offset and
460 * the end of the l1 entry
463 nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1));
465 /* compute the number of available sectors */
467 nb_available = (nb_available >> 9) + index_in_cluster;
469 if (nb_needed > nb_available) {
470 nb_needed = nb_available;
473 *cluster_offset = 0;
475 /* seek the the l2 offset in the l1 table */
477 l1_index = offset >> l1_bits;
478 if (l1_index >= s->l1_size) {
479 ret = QCOW2_CLUSTER_UNALLOCATED;
480 goto out;
483 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
484 if (!l2_offset) {
485 ret = QCOW2_CLUSTER_UNALLOCATED;
486 goto out;
489 if (offset_into_cluster(s, l2_offset)) {
490 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
491 " unaligned (L1 index: %#" PRIx64 ")",
492 l2_offset, l1_index);
493 return -EIO;
496 /* load the l2 table in memory */
498 ret = l2_load(bs, l2_offset, &l2_table);
499 if (ret < 0) {
500 return ret;
503 /* find the cluster offset for the given disk offset */
505 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
506 *cluster_offset = be64_to_cpu(l2_table[l2_index]);
507 nb_clusters = size_to_clusters(s, nb_needed << 9);
509 ret = qcow2_get_cluster_type(*cluster_offset);
510 switch (ret) {
511 case QCOW2_CLUSTER_COMPRESSED:
512 /* Compressed clusters can only be processed one by one */
513 c = 1;
514 *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK;
515 break;
516 case QCOW2_CLUSTER_ZERO:
517 if (s->qcow_version < 3) {
518 qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found"
519 " in pre-v3 image (L2 offset: %#" PRIx64
520 ", L2 index: %#x)", l2_offset, l2_index);
521 ret = -EIO;
522 goto fail;
524 c = count_contiguous_clusters(nb_clusters, s->cluster_size,
525 &l2_table[l2_index], QCOW_OFLAG_ZERO);
526 *cluster_offset = 0;
527 break;
528 case QCOW2_CLUSTER_UNALLOCATED:
529 /* how many empty clusters ? */
530 c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]);
531 *cluster_offset = 0;
532 break;
533 case QCOW2_CLUSTER_NORMAL:
534 /* how many allocated clusters ? */
535 c = count_contiguous_clusters(nb_clusters, s->cluster_size,
536 &l2_table[l2_index], QCOW_OFLAG_ZERO);
537 *cluster_offset &= L2E_OFFSET_MASK;
538 if (offset_into_cluster(s, *cluster_offset)) {
539 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset %#"
540 PRIx64 " unaligned (L2 offset: %#" PRIx64
541 ", L2 index: %#x)", *cluster_offset,
542 l2_offset, l2_index);
543 ret = -EIO;
544 goto fail;
546 break;
547 default:
548 abort();
551 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
553 nb_available = (c * s->cluster_sectors);
555 out:
556 if (nb_available > nb_needed)
557 nb_available = nb_needed;
559 *num = nb_available - index_in_cluster;
561 return ret;
563 fail:
564 qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table);
565 return ret;
569 * get_cluster_table
571 * for a given disk offset, load (and allocate if needed)
572 * the l2 table.
574 * the l2 table offset in the qcow2 file and the cluster index
575 * in the l2 table are given to the caller.
577 * Returns 0 on success, -errno in failure case
579 static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
580 uint64_t **new_l2_table,
581 int *new_l2_index)
583 BDRVQcowState *s = bs->opaque;
584 unsigned int l2_index;
585 uint64_t l1_index, l2_offset;
586 uint64_t *l2_table = NULL;
587 int ret;
589 /* seek the the l2 offset in the l1 table */
591 l1_index = offset >> (s->l2_bits + s->cluster_bits);
592 if (l1_index >= s->l1_size) {
593 ret = qcow2_grow_l1_table(bs, l1_index + 1, false);
594 if (ret < 0) {
595 return ret;
599 assert(l1_index < s->l1_size);
600 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
601 if (offset_into_cluster(s, l2_offset)) {
602 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
603 " unaligned (L1 index: %#" PRIx64 ")",
604 l2_offset, l1_index);
605 return -EIO;
608 /* seek the l2 table of the given l2 offset */
610 if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) {
611 /* load the l2 table in memory */
612 ret = l2_load(bs, l2_offset, &l2_table);
613 if (ret < 0) {
614 return ret;
616 } else {
617 /* First allocate a new L2 table (and do COW if needed) */
618 ret = l2_allocate(bs, l1_index, &l2_table);
619 if (ret < 0) {
620 return ret;
623 /* Then decrease the refcount of the old table */
624 if (l2_offset) {
625 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
626 QCOW2_DISCARD_OTHER);
630 /* find the cluster offset for the given disk offset */
632 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
634 *new_l2_table = l2_table;
635 *new_l2_index = l2_index;
637 return 0;
641 * alloc_compressed_cluster_offset
643 * For a given offset of the disk image, return cluster offset in
644 * qcow2 file.
646 * If the offset is not found, allocate a new compressed cluster.
648 * Return the cluster offset if successful,
649 * Return 0, otherwise.
653 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
654 uint64_t offset,
655 int compressed_size)
657 BDRVQcowState *s = bs->opaque;
658 int l2_index, ret;
659 uint64_t *l2_table;
660 int64_t cluster_offset;
661 int nb_csectors;
663 ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
664 if (ret < 0) {
665 return 0;
668 /* Compression can't overwrite anything. Fail if the cluster was already
669 * allocated. */
670 cluster_offset = be64_to_cpu(l2_table[l2_index]);
671 if (cluster_offset & L2E_OFFSET_MASK) {
672 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
673 return 0;
676 cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
677 if (cluster_offset < 0) {
678 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
679 return 0;
682 nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
683 (cluster_offset >> 9);
685 cluster_offset |= QCOW_OFLAG_COMPRESSED |
686 ((uint64_t)nb_csectors << s->csize_shift);
688 /* update L2 table */
690 /* compressed clusters never have the copied flag */
692 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
693 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
694 l2_table[l2_index] = cpu_to_be64(cluster_offset);
695 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
697 return cluster_offset;
700 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m, Qcow2COWRegion *r)
702 BDRVQcowState *s = bs->opaque;
703 int ret;
705 if (r->nb_sectors == 0) {
706 return 0;
709 qemu_co_mutex_unlock(&s->lock);
710 ret = copy_sectors(bs, m->offset / BDRV_SECTOR_SIZE, m->alloc_offset,
711 r->offset / BDRV_SECTOR_SIZE,
712 r->offset / BDRV_SECTOR_SIZE + r->nb_sectors);
713 qemu_co_mutex_lock(&s->lock);
715 if (ret < 0) {
716 return ret;
720 * Before we update the L2 table to actually point to the new cluster, we
721 * need to be sure that the refcounts have been increased and COW was
722 * handled.
724 qcow2_cache_depends_on_flush(s->l2_table_cache);
726 return 0;
729 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
731 BDRVQcowState *s = bs->opaque;
732 int i, j = 0, l2_index, ret;
733 uint64_t *old_cluster, *l2_table;
734 uint64_t cluster_offset = m->alloc_offset;
736 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters);
737 assert(m->nb_clusters > 0);
739 old_cluster = g_try_new(uint64_t, m->nb_clusters);
740 if (old_cluster == NULL) {
741 ret = -ENOMEM;
742 goto err;
745 /* copy content of unmodified sectors */
746 ret = perform_cow(bs, m, &m->cow_start);
747 if (ret < 0) {
748 goto err;
751 ret = perform_cow(bs, m, &m->cow_end);
752 if (ret < 0) {
753 goto err;
756 /* Update L2 table. */
757 if (s->use_lazy_refcounts) {
758 qcow2_mark_dirty(bs);
760 if (qcow2_need_accurate_refcounts(s)) {
761 qcow2_cache_set_dependency(bs, s->l2_table_cache,
762 s->refcount_block_cache);
765 ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index);
766 if (ret < 0) {
767 goto err;
769 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
771 assert(l2_index + m->nb_clusters <= s->l2_size);
772 for (i = 0; i < m->nb_clusters; i++) {
773 /* if two concurrent writes happen to the same unallocated cluster
774 * each write allocates separate cluster and writes data concurrently.
775 * The first one to complete updates l2 table with pointer to its
776 * cluster the second one has to do RMW (which is done above by
777 * copy_sectors()), update l2 table with its cluster pointer and free
778 * old cluster. This is what this loop does */
779 if(l2_table[l2_index + i] != 0)
780 old_cluster[j++] = l2_table[l2_index + i];
782 l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
783 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
787 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
790 * If this was a COW, we need to decrease the refcount of the old cluster.
791 * Also flush bs->file to get the right order for L2 and refcount update.
793 * Don't discard clusters that reach a refcount of 0 (e.g. compressed
794 * clusters), the next write will reuse them anyway.
796 if (j != 0) {
797 for (i = 0; i < j; i++) {
798 qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1,
799 QCOW2_DISCARD_NEVER);
803 ret = 0;
804 err:
805 g_free(old_cluster);
806 return ret;
810 * Returns the number of contiguous clusters that can be used for an allocating
811 * write, but require COW to be performed (this includes yet unallocated space,
812 * which must copy from the backing file)
814 static int count_cow_clusters(BDRVQcowState *s, int nb_clusters,
815 uint64_t *l2_table, int l2_index)
817 int i;
819 for (i = 0; i < nb_clusters; i++) {
820 uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]);
821 int cluster_type = qcow2_get_cluster_type(l2_entry);
823 switch(cluster_type) {
824 case QCOW2_CLUSTER_NORMAL:
825 if (l2_entry & QCOW_OFLAG_COPIED) {
826 goto out;
828 break;
829 case QCOW2_CLUSTER_UNALLOCATED:
830 case QCOW2_CLUSTER_COMPRESSED:
831 case QCOW2_CLUSTER_ZERO:
832 break;
833 default:
834 abort();
838 out:
839 assert(i <= nb_clusters);
840 return i;
844 * Check if there already is an AIO write request in flight which allocates
845 * the same cluster. In this case we need to wait until the previous
846 * request has completed and updated the L2 table accordingly.
848 * Returns:
849 * 0 if there was no dependency. *cur_bytes indicates the number of
850 * bytes from guest_offset that can be read before the next
851 * dependency must be processed (or the request is complete)
853 * -EAGAIN if we had to wait for another request, previously gathered
854 * information on cluster allocation may be invalid now. The caller
855 * must start over anyway, so consider *cur_bytes undefined.
857 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
858 uint64_t *cur_bytes, QCowL2Meta **m)
860 BDRVQcowState *s = bs->opaque;
861 QCowL2Meta *old_alloc;
862 uint64_t bytes = *cur_bytes;
864 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
866 uint64_t start = guest_offset;
867 uint64_t end = start + bytes;
868 uint64_t old_start = l2meta_cow_start(old_alloc);
869 uint64_t old_end = l2meta_cow_end(old_alloc);
871 if (end <= old_start || start >= old_end) {
872 /* No intersection */
873 } else {
874 if (start < old_start) {
875 /* Stop at the start of a running allocation */
876 bytes = old_start - start;
877 } else {
878 bytes = 0;
881 /* Stop if already an l2meta exists. After yielding, it wouldn't
882 * be valid any more, so we'd have to clean up the old L2Metas
883 * and deal with requests depending on them before starting to
884 * gather new ones. Not worth the trouble. */
885 if (bytes == 0 && *m) {
886 *cur_bytes = 0;
887 return 0;
890 if (bytes == 0) {
891 /* Wait for the dependency to complete. We need to recheck
892 * the free/allocated clusters when we continue. */
893 qemu_co_mutex_unlock(&s->lock);
894 qemu_co_queue_wait(&old_alloc->dependent_requests);
895 qemu_co_mutex_lock(&s->lock);
896 return -EAGAIN;
901 /* Make sure that existing clusters and new allocations are only used up to
902 * the next dependency if we shortened the request above */
903 *cur_bytes = bytes;
905 return 0;
909 * Checks how many already allocated clusters that don't require a copy on
910 * write there are at the given guest_offset (up to *bytes). If
911 * *host_offset is not zero, only physically contiguous clusters beginning at
912 * this host offset are counted.
914 * Note that guest_offset may not be cluster aligned. In this case, the
915 * returned *host_offset points to exact byte referenced by guest_offset and
916 * therefore isn't cluster aligned as well.
918 * Returns:
919 * 0: if no allocated clusters are available at the given offset.
920 * *bytes is normally unchanged. It is set to 0 if the cluster
921 * is allocated and doesn't need COW, but doesn't have the right
922 * physical offset.
924 * 1: if allocated clusters that don't require a COW are available at
925 * the requested offset. *bytes may have decreased and describes
926 * the length of the area that can be written to.
928 * -errno: in error cases
930 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset,
931 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
933 BDRVQcowState *s = bs->opaque;
934 int l2_index;
935 uint64_t cluster_offset;
936 uint64_t *l2_table;
937 unsigned int nb_clusters;
938 unsigned int keep_clusters;
939 int ret;
941 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset,
942 *bytes);
944 assert(*host_offset == 0 || offset_into_cluster(s, guest_offset)
945 == offset_into_cluster(s, *host_offset));
948 * Calculate the number of clusters to look for. We stop at L2 table
949 * boundaries to keep things simple.
951 nb_clusters =
952 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
954 l2_index = offset_to_l2_index(s, guest_offset);
955 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
957 /* Find L2 entry for the first involved cluster */
958 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
959 if (ret < 0) {
960 return ret;
963 cluster_offset = be64_to_cpu(l2_table[l2_index]);
965 /* Check how many clusters are already allocated and don't need COW */
966 if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL
967 && (cluster_offset & QCOW_OFLAG_COPIED))
969 /* If a specific host_offset is required, check it */
970 bool offset_matches =
971 (cluster_offset & L2E_OFFSET_MASK) == *host_offset;
973 if (offset_into_cluster(s, cluster_offset & L2E_OFFSET_MASK)) {
974 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset "
975 "%#llx unaligned (guest offset: %#" PRIx64
976 ")", cluster_offset & L2E_OFFSET_MASK,
977 guest_offset);
978 ret = -EIO;
979 goto out;
982 if (*host_offset != 0 && !offset_matches) {
983 *bytes = 0;
984 ret = 0;
985 goto out;
988 /* We keep all QCOW_OFLAG_COPIED clusters */
989 keep_clusters =
990 count_contiguous_clusters(nb_clusters, s->cluster_size,
991 &l2_table[l2_index],
992 QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO);
993 assert(keep_clusters <= nb_clusters);
995 *bytes = MIN(*bytes,
996 keep_clusters * s->cluster_size
997 - offset_into_cluster(s, guest_offset));
999 ret = 1;
1000 } else {
1001 ret = 0;
1004 /* Cleanup */
1005 out:
1006 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1008 /* Only return a host offset if we actually made progress. Otherwise we
1009 * would make requirements for handle_alloc() that it can't fulfill */
1010 if (ret > 0) {
1011 *host_offset = (cluster_offset & L2E_OFFSET_MASK)
1012 + offset_into_cluster(s, guest_offset);
1015 return ret;
1019 * Allocates new clusters for the given guest_offset.
1021 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
1022 * contain the number of clusters that have been allocated and are contiguous
1023 * in the image file.
1025 * If *host_offset is non-zero, it specifies the offset in the image file at
1026 * which the new clusters must start. *nb_clusters can be 0 on return in this
1027 * case if the cluster at host_offset is already in use. If *host_offset is
1028 * zero, the clusters can be allocated anywhere in the image file.
1030 * *host_offset is updated to contain the offset into the image file at which
1031 * the first allocated cluster starts.
1033 * Return 0 on success and -errno in error cases. -EAGAIN means that the
1034 * function has been waiting for another request and the allocation must be
1035 * restarted, but the whole request should not be failed.
1037 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
1038 uint64_t *host_offset, unsigned int *nb_clusters)
1040 BDRVQcowState *s = bs->opaque;
1042 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset,
1043 *host_offset, *nb_clusters);
1045 /* Allocate new clusters */
1046 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
1047 if (*host_offset == 0) {
1048 int64_t cluster_offset =
1049 qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size);
1050 if (cluster_offset < 0) {
1051 return cluster_offset;
1053 *host_offset = cluster_offset;
1054 return 0;
1055 } else {
1056 int ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters);
1057 if (ret < 0) {
1058 return ret;
1060 *nb_clusters = ret;
1061 return 0;
1066 * Allocates new clusters for an area that either is yet unallocated or needs a
1067 * copy on write. If *host_offset is non-zero, clusters are only allocated if
1068 * the new allocation can match the specified host offset.
1070 * Note that guest_offset may not be cluster aligned. In this case, the
1071 * returned *host_offset points to exact byte referenced by guest_offset and
1072 * therefore isn't cluster aligned as well.
1074 * Returns:
1075 * 0: if no clusters could be allocated. *bytes is set to 0,
1076 * *host_offset is left unchanged.
1078 * 1: if new clusters were allocated. *bytes may be decreased if the
1079 * new allocation doesn't cover all of the requested area.
1080 * *host_offset is updated to contain the host offset of the first
1081 * newly allocated cluster.
1083 * -errno: in error cases
1085 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
1086 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1088 BDRVQcowState *s = bs->opaque;
1089 int l2_index;
1090 uint64_t *l2_table;
1091 uint64_t entry;
1092 unsigned int nb_clusters;
1093 int ret;
1095 uint64_t alloc_cluster_offset;
1097 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset,
1098 *bytes);
1099 assert(*bytes > 0);
1102 * Calculate the number of clusters to look for. We stop at L2 table
1103 * boundaries to keep things simple.
1105 nb_clusters =
1106 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1108 l2_index = offset_to_l2_index(s, guest_offset);
1109 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1111 /* Find L2 entry for the first involved cluster */
1112 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
1113 if (ret < 0) {
1114 return ret;
1117 entry = be64_to_cpu(l2_table[l2_index]);
1119 /* For the moment, overwrite compressed clusters one by one */
1120 if (entry & QCOW_OFLAG_COMPRESSED) {
1121 nb_clusters = 1;
1122 } else {
1123 nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index);
1126 /* This function is only called when there were no non-COW clusters, so if
1127 * we can't find any unallocated or COW clusters either, something is
1128 * wrong with our code. */
1129 assert(nb_clusters > 0);
1131 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1133 /* Allocate, if necessary at a given offset in the image file */
1134 alloc_cluster_offset = start_of_cluster(s, *host_offset);
1135 ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset,
1136 &nb_clusters);
1137 if (ret < 0) {
1138 goto fail;
1141 /* Can't extend contiguous allocation */
1142 if (nb_clusters == 0) {
1143 *bytes = 0;
1144 return 0;
1147 /* !*host_offset would overwrite the image header and is reserved for "no
1148 * host offset preferred". If 0 was a valid host offset, it'd trigger the
1149 * following overlap check; do that now to avoid having an invalid value in
1150 * *host_offset. */
1151 if (!alloc_cluster_offset) {
1152 ret = qcow2_pre_write_overlap_check(bs, 0, alloc_cluster_offset,
1153 nb_clusters * s->cluster_size);
1154 assert(ret < 0);
1155 goto fail;
1159 * Save info needed for meta data update.
1161 * requested_sectors: Number of sectors from the start of the first
1162 * newly allocated cluster to the end of the (possibly shortened
1163 * before) write request.
1165 * avail_sectors: Number of sectors from the start of the first
1166 * newly allocated to the end of the last newly allocated cluster.
1168 * nb_sectors: The number of sectors from the start of the first
1169 * newly allocated cluster to the end of the area that the write
1170 * request actually writes to (excluding COW at the end)
1172 int requested_sectors =
1173 (*bytes + offset_into_cluster(s, guest_offset))
1174 >> BDRV_SECTOR_BITS;
1175 int avail_sectors = nb_clusters
1176 << (s->cluster_bits - BDRV_SECTOR_BITS);
1177 int alloc_n_start = offset_into_cluster(s, guest_offset)
1178 >> BDRV_SECTOR_BITS;
1179 int nb_sectors = MIN(requested_sectors, avail_sectors);
1180 QCowL2Meta *old_m = *m;
1182 *m = g_malloc0(sizeof(**m));
1184 **m = (QCowL2Meta) {
1185 .next = old_m,
1187 .alloc_offset = alloc_cluster_offset,
1188 .offset = start_of_cluster(s, guest_offset),
1189 .nb_clusters = nb_clusters,
1190 .nb_available = nb_sectors,
1192 .cow_start = {
1193 .offset = 0,
1194 .nb_sectors = alloc_n_start,
1196 .cow_end = {
1197 .offset = nb_sectors * BDRV_SECTOR_SIZE,
1198 .nb_sectors = avail_sectors - nb_sectors,
1201 qemu_co_queue_init(&(*m)->dependent_requests);
1202 QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight);
1204 *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset);
1205 *bytes = MIN(*bytes, (nb_sectors * BDRV_SECTOR_SIZE)
1206 - offset_into_cluster(s, guest_offset));
1207 assert(*bytes != 0);
1209 return 1;
1211 fail:
1212 if (*m && (*m)->nb_clusters > 0) {
1213 QLIST_REMOVE(*m, next_in_flight);
1215 return ret;
1219 * alloc_cluster_offset
1221 * For a given offset on the virtual disk, find the cluster offset in qcow2
1222 * file. If the offset is not found, allocate a new cluster.
1224 * If the cluster was already allocated, m->nb_clusters is set to 0 and
1225 * other fields in m are meaningless.
1227 * If the cluster is newly allocated, m->nb_clusters is set to the number of
1228 * contiguous clusters that have been allocated. In this case, the other
1229 * fields of m are valid and contain information about the first allocated
1230 * cluster.
1232 * If the request conflicts with another write request in flight, the coroutine
1233 * is queued and will be reentered when the dependency has completed.
1235 * Return 0 on success and -errno in error cases
1237 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
1238 int *num, uint64_t *host_offset, QCowL2Meta **m)
1240 BDRVQcowState *s = bs->opaque;
1241 uint64_t start, remaining;
1242 uint64_t cluster_offset;
1243 uint64_t cur_bytes;
1244 int ret;
1246 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *num);
1248 assert((offset & ~BDRV_SECTOR_MASK) == 0);
1250 again:
1251 start = offset;
1252 remaining = (uint64_t)*num << BDRV_SECTOR_BITS;
1253 cluster_offset = 0;
1254 *host_offset = 0;
1255 cur_bytes = 0;
1256 *m = NULL;
1258 while (true) {
1260 if (!*host_offset) {
1261 *host_offset = start_of_cluster(s, cluster_offset);
1264 assert(remaining >= cur_bytes);
1266 start += cur_bytes;
1267 remaining -= cur_bytes;
1268 cluster_offset += cur_bytes;
1270 if (remaining == 0) {
1271 break;
1274 cur_bytes = remaining;
1277 * Now start gathering as many contiguous clusters as possible:
1279 * 1. Check for overlaps with in-flight allocations
1281 * a) Overlap not in the first cluster -> shorten this request and
1282 * let the caller handle the rest in its next loop iteration.
1284 * b) Real overlaps of two requests. Yield and restart the search
1285 * for contiguous clusters (the situation could have changed
1286 * while we were sleeping)
1288 * c) TODO: Request starts in the same cluster as the in-flight
1289 * allocation ends. Shorten the COW of the in-fight allocation,
1290 * set cluster_offset to write to the same cluster and set up
1291 * the right synchronisation between the in-flight request and
1292 * the new one.
1294 ret = handle_dependencies(bs, start, &cur_bytes, m);
1295 if (ret == -EAGAIN) {
1296 /* Currently handle_dependencies() doesn't yield if we already had
1297 * an allocation. If it did, we would have to clean up the L2Meta
1298 * structs before starting over. */
1299 assert(*m == NULL);
1300 goto again;
1301 } else if (ret < 0) {
1302 return ret;
1303 } else if (cur_bytes == 0) {
1304 break;
1305 } else {
1306 /* handle_dependencies() may have decreased cur_bytes (shortened
1307 * the allocations below) so that the next dependency is processed
1308 * correctly during the next loop iteration. */
1312 * 2. Count contiguous COPIED clusters.
1314 ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m);
1315 if (ret < 0) {
1316 return ret;
1317 } else if (ret) {
1318 continue;
1319 } else if (cur_bytes == 0) {
1320 break;
1324 * 3. If the request still hasn't completed, allocate new clusters,
1325 * considering any cluster_offset of steps 1c or 2.
1327 ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m);
1328 if (ret < 0) {
1329 return ret;
1330 } else if (ret) {
1331 continue;
1332 } else {
1333 assert(cur_bytes == 0);
1334 break;
1338 *num -= remaining >> BDRV_SECTOR_BITS;
1339 assert(*num > 0);
1340 assert(*host_offset != 0);
1342 return 0;
1345 static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
1346 const uint8_t *buf, int buf_size)
1348 z_stream strm1, *strm = &strm1;
1349 int ret, out_len;
1351 memset(strm, 0, sizeof(*strm));
1353 strm->next_in = (uint8_t *)buf;
1354 strm->avail_in = buf_size;
1355 strm->next_out = out_buf;
1356 strm->avail_out = out_buf_size;
1358 ret = inflateInit2(strm, -12);
1359 if (ret != Z_OK)
1360 return -1;
1361 ret = inflate(strm, Z_FINISH);
1362 out_len = strm->next_out - out_buf;
1363 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
1364 out_len != out_buf_size) {
1365 inflateEnd(strm);
1366 return -1;
1368 inflateEnd(strm);
1369 return 0;
1372 int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
1374 BDRVQcowState *s = bs->opaque;
1375 int ret, csize, nb_csectors, sector_offset;
1376 uint64_t coffset;
1378 coffset = cluster_offset & s->cluster_offset_mask;
1379 if (s->cluster_cache_offset != coffset) {
1380 nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
1381 sector_offset = coffset & 511;
1382 csize = nb_csectors * 512 - sector_offset;
1383 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
1384 ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, nb_csectors);
1385 if (ret < 0) {
1386 return ret;
1388 if (decompress_buffer(s->cluster_cache, s->cluster_size,
1389 s->cluster_data + sector_offset, csize) < 0) {
1390 return -EIO;
1392 s->cluster_cache_offset = coffset;
1394 return 0;
1398 * This discards as many clusters of nb_clusters as possible at once (i.e.
1399 * all clusters in the same L2 table) and returns the number of discarded
1400 * clusters.
1402 static int discard_single_l2(BlockDriverState *bs, uint64_t offset,
1403 unsigned int nb_clusters, enum qcow2_discard_type type, bool full_discard)
1405 BDRVQcowState *s = bs->opaque;
1406 uint64_t *l2_table;
1407 int l2_index;
1408 int ret;
1409 int i;
1411 ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1412 if (ret < 0) {
1413 return ret;
1416 /* Limit nb_clusters to one L2 table */
1417 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1419 for (i = 0; i < nb_clusters; i++) {
1420 uint64_t old_l2_entry;
1422 old_l2_entry = be64_to_cpu(l2_table[l2_index + i]);
1425 * If full_discard is false, make sure that a discarded area reads back
1426 * as zeroes for v3 images (we cannot do it for v2 without actually
1427 * writing a zero-filled buffer). We can skip the operation if the
1428 * cluster is already marked as zero, or if it's unallocated and we
1429 * don't have a backing file.
1431 * TODO We might want to use bdrv_get_block_status(bs) here, but we're
1432 * holding s->lock, so that doesn't work today.
1434 * If full_discard is true, the sector should not read back as zeroes,
1435 * but rather fall through to the backing file.
1437 switch (qcow2_get_cluster_type(old_l2_entry)) {
1438 case QCOW2_CLUSTER_UNALLOCATED:
1439 if (full_discard || !bs->backing_hd) {
1440 continue;
1442 break;
1444 case QCOW2_CLUSTER_ZERO:
1445 if (!full_discard) {
1446 continue;
1448 break;
1450 case QCOW2_CLUSTER_NORMAL:
1451 case QCOW2_CLUSTER_COMPRESSED:
1452 break;
1454 default:
1455 abort();
1458 /* First remove L2 entries */
1459 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
1460 if (!full_discard && s->qcow_version >= 3) {
1461 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1462 } else {
1463 l2_table[l2_index + i] = cpu_to_be64(0);
1466 /* Then decrease the refcount */
1467 qcow2_free_any_clusters(bs, old_l2_entry, 1, type);
1470 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1472 return nb_clusters;
1475 int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset,
1476 int nb_sectors, enum qcow2_discard_type type, bool full_discard)
1478 BDRVQcowState *s = bs->opaque;
1479 uint64_t end_offset;
1480 unsigned int nb_clusters;
1481 int ret;
1483 end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS);
1485 /* Round start up and end down */
1486 offset = align_offset(offset, s->cluster_size);
1487 end_offset = start_of_cluster(s, end_offset);
1489 if (offset > end_offset) {
1490 return 0;
1493 nb_clusters = size_to_clusters(s, end_offset - offset);
1495 s->cache_discards = true;
1497 /* Each L2 table is handled by its own loop iteration */
1498 while (nb_clusters > 0) {
1499 ret = discard_single_l2(bs, offset, nb_clusters, type, full_discard);
1500 if (ret < 0) {
1501 goto fail;
1504 nb_clusters -= ret;
1505 offset += (ret * s->cluster_size);
1508 ret = 0;
1509 fail:
1510 s->cache_discards = false;
1511 qcow2_process_discards(bs, ret);
1513 return ret;
1517 * This zeroes as many clusters of nb_clusters as possible at once (i.e.
1518 * all clusters in the same L2 table) and returns the number of zeroed
1519 * clusters.
1521 static int zero_single_l2(BlockDriverState *bs, uint64_t offset,
1522 unsigned int nb_clusters)
1524 BDRVQcowState *s = bs->opaque;
1525 uint64_t *l2_table;
1526 int l2_index;
1527 int ret;
1528 int i;
1530 ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1531 if (ret < 0) {
1532 return ret;
1535 /* Limit nb_clusters to one L2 table */
1536 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1538 for (i = 0; i < nb_clusters; i++) {
1539 uint64_t old_offset;
1541 old_offset = be64_to_cpu(l2_table[l2_index + i]);
1543 /* Update L2 entries */
1544 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
1545 if (old_offset & QCOW_OFLAG_COMPRESSED) {
1546 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1547 qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST);
1548 } else {
1549 l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO);
1553 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1555 return nb_clusters;
1558 int qcow2_zero_clusters(BlockDriverState *bs, uint64_t offset, int nb_sectors)
1560 BDRVQcowState *s = bs->opaque;
1561 unsigned int nb_clusters;
1562 int ret;
1564 /* The zero flag is only supported by version 3 and newer */
1565 if (s->qcow_version < 3) {
1566 return -ENOTSUP;
1569 /* Each L2 table is handled by its own loop iteration */
1570 nb_clusters = size_to_clusters(s, nb_sectors << BDRV_SECTOR_BITS);
1572 s->cache_discards = true;
1574 while (nb_clusters > 0) {
1575 ret = zero_single_l2(bs, offset, nb_clusters);
1576 if (ret < 0) {
1577 goto fail;
1580 nb_clusters -= ret;
1581 offset += (ret * s->cluster_size);
1584 ret = 0;
1585 fail:
1586 s->cache_discards = false;
1587 qcow2_process_discards(bs, ret);
1589 return ret;
1593 * Expands all zero clusters in a specific L1 table (or deallocates them, for
1594 * non-backed non-pre-allocated zero clusters).
1596 * l1_entries and *visited_l1_entries are used to keep track of progress for
1597 * status_cb(). l1_entries contains the total number of L1 entries and
1598 * *visited_l1_entries counts all visited L1 entries.
1600 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
1601 int l1_size, int64_t *visited_l1_entries,
1602 int64_t l1_entries,
1603 BlockDriverAmendStatusCB *status_cb)
1605 BDRVQcowState *s = bs->opaque;
1606 bool is_active_l1 = (l1_table == s->l1_table);
1607 uint64_t *l2_table = NULL;
1608 int ret;
1609 int i, j;
1611 if (!is_active_l1) {
1612 /* inactive L2 tables require a buffer to be stored in when loading
1613 * them from disk */
1614 l2_table = qemu_try_blockalign(bs->file, s->cluster_size);
1615 if (l2_table == NULL) {
1616 return -ENOMEM;
1620 for (i = 0; i < l1_size; i++) {
1621 uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK;
1622 bool l2_dirty = false;
1623 uint64_t l2_refcount;
1625 if (!l2_offset) {
1626 /* unallocated */
1627 (*visited_l1_entries)++;
1628 if (status_cb) {
1629 status_cb(bs, *visited_l1_entries, l1_entries);
1631 continue;
1634 if (offset_into_cluster(s, l2_offset)) {
1635 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#"
1636 PRIx64 " unaligned (L1 index: %#x)",
1637 l2_offset, i);
1638 ret = -EIO;
1639 goto fail;
1642 if (is_active_l1) {
1643 /* get active L2 tables from cache */
1644 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset,
1645 (void **)&l2_table);
1646 } else {
1647 /* load inactive L2 tables from disk */
1648 ret = bdrv_read(bs->file, l2_offset / BDRV_SECTOR_SIZE,
1649 (void *)l2_table, s->cluster_sectors);
1651 if (ret < 0) {
1652 goto fail;
1655 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits,
1656 &l2_refcount);
1657 if (ret < 0) {
1658 goto fail;
1661 for (j = 0; j < s->l2_size; j++) {
1662 uint64_t l2_entry = be64_to_cpu(l2_table[j]);
1663 int64_t offset = l2_entry & L2E_OFFSET_MASK;
1664 int cluster_type = qcow2_get_cluster_type(l2_entry);
1665 bool preallocated = offset != 0;
1667 if (cluster_type != QCOW2_CLUSTER_ZERO) {
1668 continue;
1671 if (!preallocated) {
1672 if (!bs->backing_hd) {
1673 /* not backed; therefore we can simply deallocate the
1674 * cluster */
1675 l2_table[j] = 0;
1676 l2_dirty = true;
1677 continue;
1680 offset = qcow2_alloc_clusters(bs, s->cluster_size);
1681 if (offset < 0) {
1682 ret = offset;
1683 goto fail;
1686 if (l2_refcount > 1) {
1687 /* For shared L2 tables, set the refcount accordingly (it is
1688 * already 1 and needs to be l2_refcount) */
1689 ret = qcow2_update_cluster_refcount(bs,
1690 offset >> s->cluster_bits,
1691 refcount_diff(1, l2_refcount), false,
1692 QCOW2_DISCARD_OTHER);
1693 if (ret < 0) {
1694 qcow2_free_clusters(bs, offset, s->cluster_size,
1695 QCOW2_DISCARD_OTHER);
1696 goto fail;
1701 if (offset_into_cluster(s, offset)) {
1702 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset "
1703 "%#" PRIx64 " unaligned (L2 offset: %#"
1704 PRIx64 ", L2 index: %#x)", offset,
1705 l2_offset, j);
1706 if (!preallocated) {
1707 qcow2_free_clusters(bs, offset, s->cluster_size,
1708 QCOW2_DISCARD_ALWAYS);
1710 ret = -EIO;
1711 goto fail;
1714 ret = qcow2_pre_write_overlap_check(bs, 0, offset, s->cluster_size);
1715 if (ret < 0) {
1716 if (!preallocated) {
1717 qcow2_free_clusters(bs, offset, s->cluster_size,
1718 QCOW2_DISCARD_ALWAYS);
1720 goto fail;
1723 ret = bdrv_write_zeroes(bs->file, offset / BDRV_SECTOR_SIZE,
1724 s->cluster_sectors, 0);
1725 if (ret < 0) {
1726 if (!preallocated) {
1727 qcow2_free_clusters(bs, offset, s->cluster_size,
1728 QCOW2_DISCARD_ALWAYS);
1730 goto fail;
1733 if (l2_refcount == 1) {
1734 l2_table[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED);
1735 } else {
1736 l2_table[j] = cpu_to_be64(offset);
1738 l2_dirty = true;
1741 if (is_active_l1) {
1742 if (l2_dirty) {
1743 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
1744 qcow2_cache_depends_on_flush(s->l2_table_cache);
1746 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1747 } else {
1748 if (l2_dirty) {
1749 ret = qcow2_pre_write_overlap_check(bs,
1750 QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2, l2_offset,
1751 s->cluster_size);
1752 if (ret < 0) {
1753 goto fail;
1756 ret = bdrv_write(bs->file, l2_offset / BDRV_SECTOR_SIZE,
1757 (void *)l2_table, s->cluster_sectors);
1758 if (ret < 0) {
1759 goto fail;
1764 (*visited_l1_entries)++;
1765 if (status_cb) {
1766 status_cb(bs, *visited_l1_entries, l1_entries);
1770 ret = 0;
1772 fail:
1773 if (l2_table) {
1774 if (!is_active_l1) {
1775 qemu_vfree(l2_table);
1776 } else {
1777 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1780 return ret;
1784 * For backed images, expands all zero clusters on the image. For non-backed
1785 * images, deallocates all non-pre-allocated zero clusters (and claims the
1786 * allocation for pre-allocated ones). This is important for downgrading to a
1787 * qcow2 version which doesn't yet support metadata zero clusters.
1789 int qcow2_expand_zero_clusters(BlockDriverState *bs,
1790 BlockDriverAmendStatusCB *status_cb)
1792 BDRVQcowState *s = bs->opaque;
1793 uint64_t *l1_table = NULL;
1794 int64_t l1_entries = 0, visited_l1_entries = 0;
1795 int ret;
1796 int i, j;
1798 if (status_cb) {
1799 l1_entries = s->l1_size;
1800 for (i = 0; i < s->nb_snapshots; i++) {
1801 l1_entries += s->snapshots[i].l1_size;
1805 ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size,
1806 &visited_l1_entries, l1_entries,
1807 status_cb);
1808 if (ret < 0) {
1809 goto fail;
1812 /* Inactive L1 tables may point to active L2 tables - therefore it is
1813 * necessary to flush the L2 table cache before trying to access the L2
1814 * tables pointed to by inactive L1 entries (else we might try to expand
1815 * zero clusters that have already been expanded); furthermore, it is also
1816 * necessary to empty the L2 table cache, since it may contain tables which
1817 * are now going to be modified directly on disk, bypassing the cache.
1818 * qcow2_cache_empty() does both for us. */
1819 ret = qcow2_cache_empty(bs, s->l2_table_cache);
1820 if (ret < 0) {
1821 goto fail;
1824 for (i = 0; i < s->nb_snapshots; i++) {
1825 int l1_sectors = (s->snapshots[i].l1_size * sizeof(uint64_t) +
1826 BDRV_SECTOR_SIZE - 1) / BDRV_SECTOR_SIZE;
1828 l1_table = g_realloc(l1_table, l1_sectors * BDRV_SECTOR_SIZE);
1830 ret = bdrv_read(bs->file, s->snapshots[i].l1_table_offset /
1831 BDRV_SECTOR_SIZE, (void *)l1_table, l1_sectors);
1832 if (ret < 0) {
1833 goto fail;
1836 for (j = 0; j < s->snapshots[i].l1_size; j++) {
1837 be64_to_cpus(&l1_table[j]);
1840 ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size,
1841 &visited_l1_entries, l1_entries,
1842 status_cb);
1843 if (ret < 0) {
1844 goto fail;
1848 ret = 0;
1850 fail:
1851 g_free(l1_table);
1852 return ret;