2 * Block driver for the QCOW version 2 format
4 * Copyright (c) 2004-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
28 #include "qapi/error.h"
29 #include "qemu-common.h"
30 #include "block/block_int.h"
31 #include "block/qcow2.h"
32 #include "qemu/bswap.h"
35 int qcow2_grow_l1_table(BlockDriverState
*bs
, uint64_t min_size
,
38 BDRVQcow2State
*s
= bs
->opaque
;
39 int new_l1_size2
, ret
, i
;
40 uint64_t *new_l1_table
;
41 int64_t old_l1_table_offset
, old_l1_size
;
42 int64_t new_l1_table_offset
, new_l1_size
;
45 if (min_size
<= s
->l1_size
)
48 /* Do a sanity check on min_size before trying to calculate new_l1_size
49 * (this prevents overflows during the while loop for the calculation of
51 if (min_size
> INT_MAX
/ sizeof(uint64_t)) {
56 new_l1_size
= min_size
;
58 /* Bump size up to reduce the number of times we have to grow */
59 new_l1_size
= s
->l1_size
;
60 if (new_l1_size
== 0) {
63 while (min_size
> new_l1_size
) {
64 new_l1_size
= (new_l1_size
* 3 + 1) / 2;
68 QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE
> INT_MAX
);
69 if (new_l1_size
> QCOW_MAX_L1_SIZE
/ sizeof(uint64_t)) {
74 fprintf(stderr
, "grow l1_table from %d to %" PRId64
"\n",
75 s
->l1_size
, new_l1_size
);
78 new_l1_size2
= sizeof(uint64_t) * new_l1_size
;
79 new_l1_table
= qemu_try_blockalign(bs
->file
->bs
,
80 align_offset(new_l1_size2
, 512));
81 if (new_l1_table
== NULL
) {
84 memset(new_l1_table
, 0, align_offset(new_l1_size2
, 512));
87 memcpy(new_l1_table
, s
->l1_table
, s
->l1_size
* sizeof(uint64_t));
90 /* write new table (align to cluster) */
91 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_GROW_ALLOC_TABLE
);
92 new_l1_table_offset
= qcow2_alloc_clusters(bs
, new_l1_size2
);
93 if (new_l1_table_offset
< 0) {
94 qemu_vfree(new_l1_table
);
95 return new_l1_table_offset
;
98 ret
= qcow2_cache_flush(bs
, s
->refcount_block_cache
);
103 /* the L1 position has not yet been updated, so these clusters must
104 * indeed be completely free */
105 ret
= qcow2_pre_write_overlap_check(bs
, 0, new_l1_table_offset
,
111 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_GROW_WRITE_TABLE
);
112 for(i
= 0; i
< s
->l1_size
; i
++)
113 new_l1_table
[i
] = cpu_to_be64(new_l1_table
[i
]);
114 ret
= bdrv_pwrite_sync(bs
->file
, new_l1_table_offset
,
115 new_l1_table
, new_l1_size2
);
118 for(i
= 0; i
< s
->l1_size
; i
++)
119 new_l1_table
[i
] = be64_to_cpu(new_l1_table
[i
]);
122 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_GROW_ACTIVATE_TABLE
);
123 stl_be_p(data
, new_l1_size
);
124 stq_be_p(data
+ 4, new_l1_table_offset
);
125 ret
= bdrv_pwrite_sync(bs
->file
, offsetof(QCowHeader
, l1_size
),
130 qemu_vfree(s
->l1_table
);
131 old_l1_table_offset
= s
->l1_table_offset
;
132 s
->l1_table_offset
= new_l1_table_offset
;
133 s
->l1_table
= new_l1_table
;
134 old_l1_size
= s
->l1_size
;
135 s
->l1_size
= new_l1_size
;
136 qcow2_free_clusters(bs
, old_l1_table_offset
, old_l1_size
* sizeof(uint64_t),
137 QCOW2_DISCARD_OTHER
);
140 qemu_vfree(new_l1_table
);
141 qcow2_free_clusters(bs
, new_l1_table_offset
, new_l1_size2
,
142 QCOW2_DISCARD_OTHER
);
149 * Loads a L2 table into memory. If the table is in the cache, the cache
150 * is used; otherwise the L2 table is loaded from the image file.
152 * Returns a pointer to the L2 table on success, or NULL if the read from
153 * the image file failed.
156 static int l2_load(BlockDriverState
*bs
, uint64_t l2_offset
,
159 BDRVQcow2State
*s
= bs
->opaque
;
161 return qcow2_cache_get(bs
, s
->l2_table_cache
, l2_offset
,
166 * Writes one sector of the L1 table to the disk (can't update single entries
167 * and we really don't want bdrv_pread to perform a read-modify-write)
169 #define L1_ENTRIES_PER_SECTOR (512 / 8)
170 int qcow2_write_l1_entry(BlockDriverState
*bs
, int l1_index
)
172 BDRVQcow2State
*s
= bs
->opaque
;
173 uint64_t buf
[L1_ENTRIES_PER_SECTOR
] = { 0 };
177 l1_start_index
= l1_index
& ~(L1_ENTRIES_PER_SECTOR
- 1);
178 for (i
= 0; i
< L1_ENTRIES_PER_SECTOR
&& l1_start_index
+ i
< s
->l1_size
;
181 buf
[i
] = cpu_to_be64(s
->l1_table
[l1_start_index
+ i
]);
184 ret
= qcow2_pre_write_overlap_check(bs
, QCOW2_OL_ACTIVE_L1
,
185 s
->l1_table_offset
+ 8 * l1_start_index
, sizeof(buf
));
190 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_UPDATE
);
191 ret
= bdrv_pwrite_sync(bs
->file
,
192 s
->l1_table_offset
+ 8 * l1_start_index
,
204 * Allocate a new l2 entry in the file. If l1_index points to an already
205 * used entry in the L2 table (i.e. we are doing a copy on write for the L2
206 * table) copy the contents of the old L2 table into the newly allocated one.
207 * Otherwise the new table is initialized with zeros.
211 static int l2_allocate(BlockDriverState
*bs
, int l1_index
, uint64_t **table
)
213 BDRVQcow2State
*s
= bs
->opaque
;
214 uint64_t old_l2_offset
;
215 uint64_t *l2_table
= NULL
;
219 old_l2_offset
= s
->l1_table
[l1_index
];
221 trace_qcow2_l2_allocate(bs
, l1_index
);
223 /* allocate a new l2 entry */
225 l2_offset
= qcow2_alloc_clusters(bs
, s
->l2_size
* sizeof(uint64_t));
231 ret
= qcow2_cache_flush(bs
, s
->refcount_block_cache
);
236 /* allocate a new entry in the l2 cache */
238 trace_qcow2_l2_allocate_get_empty(bs
, l1_index
);
239 ret
= qcow2_cache_get_empty(bs
, s
->l2_table_cache
, l2_offset
, (void**) table
);
246 if ((old_l2_offset
& L1E_OFFSET_MASK
) == 0) {
247 /* if there was no old l2 table, clear the new table */
248 memset(l2_table
, 0, s
->l2_size
* sizeof(uint64_t));
252 /* if there was an old l2 table, read it from the disk */
253 BLKDBG_EVENT(bs
->file
, BLKDBG_L2_ALLOC_COW_READ
);
254 ret
= qcow2_cache_get(bs
, s
->l2_table_cache
,
255 old_l2_offset
& L1E_OFFSET_MASK
,
256 (void**) &old_table
);
261 memcpy(l2_table
, old_table
, s
->cluster_size
);
263 qcow2_cache_put(bs
, s
->l2_table_cache
, (void **) &old_table
);
266 /* write the l2 table to the file */
267 BLKDBG_EVENT(bs
->file
, BLKDBG_L2_ALLOC_WRITE
);
269 trace_qcow2_l2_allocate_write_l2(bs
, l1_index
);
270 qcow2_cache_entry_mark_dirty(bs
, s
->l2_table_cache
, l2_table
);
271 ret
= qcow2_cache_flush(bs
, s
->l2_table_cache
);
276 /* update the L1 entry */
277 trace_qcow2_l2_allocate_write_l1(bs
, l1_index
);
278 s
->l1_table
[l1_index
] = l2_offset
| QCOW_OFLAG_COPIED
;
279 ret
= qcow2_write_l1_entry(bs
, l1_index
);
285 trace_qcow2_l2_allocate_done(bs
, l1_index
, 0);
289 trace_qcow2_l2_allocate_done(bs
, l1_index
, ret
);
290 if (l2_table
!= NULL
) {
291 qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) table
);
293 s
->l1_table
[l1_index
] = old_l2_offset
;
295 qcow2_free_clusters(bs
, l2_offset
, s
->l2_size
* sizeof(uint64_t),
296 QCOW2_DISCARD_ALWAYS
);
302 * Checks how many clusters in a given L2 table are contiguous in the image
303 * file. As soon as one of the flags in the bitmask stop_flags changes compared
304 * to the first cluster, the search is stopped and the cluster is not counted
305 * as contiguous. (This allows it, for example, to stop at the first compressed
306 * cluster which may require a different handling)
308 static int count_contiguous_clusters(int nb_clusters
, int cluster_size
,
309 uint64_t *l2_table
, uint64_t stop_flags
)
312 uint64_t mask
= stop_flags
| L2E_OFFSET_MASK
| QCOW_OFLAG_COMPRESSED
;
313 uint64_t first_entry
= be64_to_cpu(l2_table
[0]);
314 uint64_t offset
= first_entry
& mask
;
319 assert(qcow2_get_cluster_type(first_entry
) == QCOW2_CLUSTER_NORMAL
);
321 for (i
= 0; i
< nb_clusters
; i
++) {
322 uint64_t l2_entry
= be64_to_cpu(l2_table
[i
]) & mask
;
323 if (offset
+ (uint64_t) i
* cluster_size
!= l2_entry
) {
331 static int count_contiguous_clusters_by_type(int nb_clusters
,
337 for (i
= 0; i
< nb_clusters
; i
++) {
338 int type
= qcow2_get_cluster_type(be64_to_cpu(l2_table
[i
]));
340 if (type
!= wanted_type
) {
348 /* The crypt function is compatible with the linux cryptoloop
349 algorithm for < 4 GB images. NOTE: out_buf == in_buf is
351 int qcow2_encrypt_sectors(BDRVQcow2State
*s
, int64_t sector_num
,
352 uint8_t *out_buf
, const uint8_t *in_buf
,
353 int nb_sectors
, bool enc
,
363 for(i
= 0; i
< nb_sectors
; i
++) {
364 ivec
.ll
[0] = cpu_to_le64(sector_num
);
366 if (qcrypto_cipher_setiv(s
->cipher
,
367 ivec
.b
, G_N_ELEMENTS(ivec
.b
),
372 ret
= qcrypto_cipher_encrypt(s
->cipher
,
378 ret
= qcrypto_cipher_decrypt(s
->cipher
,
394 static int coroutine_fn
do_perform_cow(BlockDriverState
*bs
,
395 uint64_t src_cluster_offset
,
396 uint64_t cluster_offset
,
397 int offset_in_cluster
,
400 BDRVQcow2State
*s
= bs
->opaque
;
406 iov
.iov_base
= qemu_try_blockalign(bs
, iov
.iov_len
);
407 if (iov
.iov_base
== NULL
) {
411 qemu_iovec_init_external(&qiov
, &iov
, 1);
413 BLKDBG_EVENT(bs
->file
, BLKDBG_COW_READ
);
420 /* Call .bdrv_co_readv() directly instead of using the public block-layer
421 * interface. This avoids double I/O throttling and request tracking,
422 * which can lead to deadlock when block layer copy-on-read is enabled.
424 ret
= bs
->drv
->bdrv_co_preadv(bs
, src_cluster_offset
+ offset_in_cluster
,
432 int64_t sector
= (src_cluster_offset
+ offset_in_cluster
)
435 assert((offset_in_cluster
& ~BDRV_SECTOR_MASK
) == 0);
436 assert((bytes
& ~BDRV_SECTOR_MASK
) == 0);
437 if (qcow2_encrypt_sectors(s
, sector
, iov
.iov_base
, iov
.iov_base
,
438 bytes
>> BDRV_SECTOR_BITS
, true, &err
) < 0) {
445 ret
= qcow2_pre_write_overlap_check(bs
, 0,
446 cluster_offset
+ offset_in_cluster
, bytes
);
451 BLKDBG_EVENT(bs
->file
, BLKDBG_COW_WRITE
);
452 ret
= bdrv_co_pwritev(bs
->file
, cluster_offset
+ offset_in_cluster
,
460 qemu_vfree(iov
.iov_base
);
468 * For a given offset of the virtual disk, find the cluster type and offset in
469 * the qcow2 file. The offset is stored in *cluster_offset.
471 * On entry, *bytes is the maximum number of contiguous bytes starting at
472 * offset that we are interested in.
474 * On exit, *bytes is the number of bytes starting at offset that have the same
475 * cluster type and (if applicable) are stored contiguously in the image file.
476 * Compressed clusters are always returned one by one.
478 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
481 int qcow2_get_cluster_offset(BlockDriverState
*bs
, uint64_t offset
,
482 unsigned int *bytes
, uint64_t *cluster_offset
)
484 BDRVQcow2State
*s
= bs
->opaque
;
485 unsigned int l2_index
;
486 uint64_t l1_index
, l2_offset
, *l2_table
;
488 unsigned int offset_in_cluster
;
489 uint64_t bytes_available
, bytes_needed
, nb_clusters
;
492 offset_in_cluster
= offset_into_cluster(s
, offset
);
493 bytes_needed
= (uint64_t) *bytes
+ offset_in_cluster
;
495 l1_bits
= s
->l2_bits
+ s
->cluster_bits
;
497 /* compute how many bytes there are between the start of the cluster
498 * containing offset and the end of the l1 entry */
499 bytes_available
= (1ULL << l1_bits
) - (offset
& ((1ULL << l1_bits
) - 1))
502 if (bytes_needed
> bytes_available
) {
503 bytes_needed
= bytes_available
;
508 /* seek to the l2 offset in the l1 table */
510 l1_index
= offset
>> l1_bits
;
511 if (l1_index
>= s
->l1_size
) {
512 ret
= QCOW2_CLUSTER_UNALLOCATED
;
516 l2_offset
= s
->l1_table
[l1_index
] & L1E_OFFSET_MASK
;
518 ret
= QCOW2_CLUSTER_UNALLOCATED
;
522 if (offset_into_cluster(s
, l2_offset
)) {
523 qcow2_signal_corruption(bs
, true, -1, -1, "L2 table offset %#" PRIx64
524 " unaligned (L1 index: %#" PRIx64
")",
525 l2_offset
, l1_index
);
529 /* load the l2 table in memory */
531 ret
= l2_load(bs
, l2_offset
, &l2_table
);
536 /* find the cluster offset for the given disk offset */
538 l2_index
= (offset
>> s
->cluster_bits
) & (s
->l2_size
- 1);
539 *cluster_offset
= be64_to_cpu(l2_table
[l2_index
]);
541 nb_clusters
= size_to_clusters(s
, bytes_needed
);
542 /* bytes_needed <= *bytes + offset_in_cluster, both of which are unsigned
543 * integers; the minimum cluster size is 512, so this assertion is always
545 assert(nb_clusters
<= INT_MAX
);
547 ret
= qcow2_get_cluster_type(*cluster_offset
);
549 case QCOW2_CLUSTER_COMPRESSED
:
550 /* Compressed clusters can only be processed one by one */
552 *cluster_offset
&= L2E_COMPRESSED_OFFSET_SIZE_MASK
;
554 case QCOW2_CLUSTER_ZERO
:
555 if (s
->qcow_version
< 3) {
556 qcow2_signal_corruption(bs
, true, -1, -1, "Zero cluster entry found"
557 " in pre-v3 image (L2 offset: %#" PRIx64
558 ", L2 index: %#x)", l2_offset
, l2_index
);
562 c
= count_contiguous_clusters_by_type(nb_clusters
, &l2_table
[l2_index
],
566 case QCOW2_CLUSTER_UNALLOCATED
:
567 /* how many empty clusters ? */
568 c
= count_contiguous_clusters_by_type(nb_clusters
, &l2_table
[l2_index
],
569 QCOW2_CLUSTER_UNALLOCATED
);
572 case QCOW2_CLUSTER_NORMAL
:
573 /* how many allocated clusters ? */
574 c
= count_contiguous_clusters(nb_clusters
, s
->cluster_size
,
575 &l2_table
[l2_index
], QCOW_OFLAG_ZERO
);
576 *cluster_offset
&= L2E_OFFSET_MASK
;
577 if (offset_into_cluster(s
, *cluster_offset
)) {
578 qcow2_signal_corruption(bs
, true, -1, -1, "Data cluster offset %#"
579 PRIx64
" unaligned (L2 offset: %#" PRIx64
580 ", L2 index: %#x)", *cluster_offset
,
581 l2_offset
, l2_index
);
590 qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
592 bytes_available
= (int64_t)c
* s
->cluster_size
;
595 if (bytes_available
> bytes_needed
) {
596 bytes_available
= bytes_needed
;
599 /* bytes_available <= bytes_needed <= *bytes + offset_in_cluster;
600 * subtracting offset_in_cluster will therefore definitely yield something
601 * not exceeding UINT_MAX */
602 assert(bytes_available
- offset_in_cluster
<= UINT_MAX
);
603 *bytes
= bytes_available
- offset_in_cluster
;
608 qcow2_cache_put(bs
, s
->l2_table_cache
, (void **)&l2_table
);
615 * for a given disk offset, load (and allocate if needed)
618 * the l2 table offset in the qcow2 file and the cluster index
619 * in the l2 table are given to the caller.
621 * Returns 0 on success, -errno in failure case
623 static int get_cluster_table(BlockDriverState
*bs
, uint64_t offset
,
624 uint64_t **new_l2_table
,
627 BDRVQcow2State
*s
= bs
->opaque
;
628 unsigned int l2_index
;
629 uint64_t l1_index
, l2_offset
;
630 uint64_t *l2_table
= NULL
;
633 /* seek to the l2 offset in the l1 table */
635 l1_index
= offset
>> (s
->l2_bits
+ s
->cluster_bits
);
636 if (l1_index
>= s
->l1_size
) {
637 ret
= qcow2_grow_l1_table(bs
, l1_index
+ 1, false);
643 assert(l1_index
< s
->l1_size
);
644 l2_offset
= s
->l1_table
[l1_index
] & L1E_OFFSET_MASK
;
645 if (offset_into_cluster(s
, l2_offset
)) {
646 qcow2_signal_corruption(bs
, true, -1, -1, "L2 table offset %#" PRIx64
647 " unaligned (L1 index: %#" PRIx64
")",
648 l2_offset
, l1_index
);
652 /* seek the l2 table of the given l2 offset */
654 if (s
->l1_table
[l1_index
] & QCOW_OFLAG_COPIED
) {
655 /* load the l2 table in memory */
656 ret
= l2_load(bs
, l2_offset
, &l2_table
);
661 /* First allocate a new L2 table (and do COW if needed) */
662 ret
= l2_allocate(bs
, l1_index
, &l2_table
);
667 /* Then decrease the refcount of the old table */
669 qcow2_free_clusters(bs
, l2_offset
, s
->l2_size
* sizeof(uint64_t),
670 QCOW2_DISCARD_OTHER
);
674 /* find the cluster offset for the given disk offset */
676 l2_index
= (offset
>> s
->cluster_bits
) & (s
->l2_size
- 1);
678 *new_l2_table
= l2_table
;
679 *new_l2_index
= l2_index
;
685 * alloc_compressed_cluster_offset
687 * For a given offset of the disk image, return cluster offset in
690 * If the offset is not found, allocate a new compressed cluster.
692 * Return the cluster offset if successful,
693 * Return 0, otherwise.
697 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState
*bs
,
701 BDRVQcow2State
*s
= bs
->opaque
;
704 int64_t cluster_offset
;
707 ret
= get_cluster_table(bs
, offset
, &l2_table
, &l2_index
);
712 /* Compression can't overwrite anything. Fail if the cluster was already
714 cluster_offset
= be64_to_cpu(l2_table
[l2_index
]);
715 if (cluster_offset
& L2E_OFFSET_MASK
) {
716 qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
720 cluster_offset
= qcow2_alloc_bytes(bs
, compressed_size
);
721 if (cluster_offset
< 0) {
722 qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
726 nb_csectors
= ((cluster_offset
+ compressed_size
- 1) >> 9) -
727 (cluster_offset
>> 9);
729 cluster_offset
|= QCOW_OFLAG_COMPRESSED
|
730 ((uint64_t)nb_csectors
<< s
->csize_shift
);
732 /* update L2 table */
734 /* compressed clusters never have the copied flag */
736 BLKDBG_EVENT(bs
->file
, BLKDBG_L2_UPDATE_COMPRESSED
);
737 qcow2_cache_entry_mark_dirty(bs
, s
->l2_table_cache
, l2_table
);
738 l2_table
[l2_index
] = cpu_to_be64(cluster_offset
);
739 qcow2_cache_put(bs
, s
->l2_table_cache
, (void **) &l2_table
);
741 return cluster_offset
;
744 static int perform_cow(BlockDriverState
*bs
, QCowL2Meta
*m
, Qcow2COWRegion
*r
)
746 BDRVQcow2State
*s
= bs
->opaque
;
749 if (r
->nb_bytes
== 0) {
753 qemu_co_mutex_unlock(&s
->lock
);
754 ret
= do_perform_cow(bs
, m
->offset
, m
->alloc_offset
, r
->offset
, r
->nb_bytes
);
755 qemu_co_mutex_lock(&s
->lock
);
762 * Before we update the L2 table to actually point to the new cluster, we
763 * need to be sure that the refcounts have been increased and COW was
766 qcow2_cache_depends_on_flush(s
->l2_table_cache
);
771 int qcow2_alloc_cluster_link_l2(BlockDriverState
*bs
, QCowL2Meta
*m
)
773 BDRVQcow2State
*s
= bs
->opaque
;
774 int i
, j
= 0, l2_index
, ret
;
775 uint64_t *old_cluster
, *l2_table
;
776 uint64_t cluster_offset
= m
->alloc_offset
;
778 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m
->nb_clusters
);
779 assert(m
->nb_clusters
> 0);
781 old_cluster
= g_try_new(uint64_t, m
->nb_clusters
);
782 if (old_cluster
== NULL
) {
787 /* copy content of unmodified sectors */
788 ret
= perform_cow(bs
, m
, &m
->cow_start
);
793 ret
= perform_cow(bs
, m
, &m
->cow_end
);
798 /* Update L2 table. */
799 if (s
->use_lazy_refcounts
) {
800 qcow2_mark_dirty(bs
);
802 if (qcow2_need_accurate_refcounts(s
)) {
803 qcow2_cache_set_dependency(bs
, s
->l2_table_cache
,
804 s
->refcount_block_cache
);
807 ret
= get_cluster_table(bs
, m
->offset
, &l2_table
, &l2_index
);
811 qcow2_cache_entry_mark_dirty(bs
, s
->l2_table_cache
, l2_table
);
813 assert(l2_index
+ m
->nb_clusters
<= s
->l2_size
);
814 for (i
= 0; i
< m
->nb_clusters
; i
++) {
815 /* if two concurrent writes happen to the same unallocated cluster
816 * each write allocates separate cluster and writes data concurrently.
817 * The first one to complete updates l2 table with pointer to its
818 * cluster the second one has to do RMW (which is done above by
819 * perform_cow()), update l2 table with its cluster pointer and free
820 * old cluster. This is what this loop does */
821 if (l2_table
[l2_index
+ i
] != 0) {
822 old_cluster
[j
++] = l2_table
[l2_index
+ i
];
825 l2_table
[l2_index
+ i
] = cpu_to_be64((cluster_offset
+
826 (i
<< s
->cluster_bits
)) | QCOW_OFLAG_COPIED
);
830 qcow2_cache_put(bs
, s
->l2_table_cache
, (void **) &l2_table
);
833 * If this was a COW, we need to decrease the refcount of the old cluster.
835 * Don't discard clusters that reach a refcount of 0 (e.g. compressed
836 * clusters), the next write will reuse them anyway.
839 for (i
= 0; i
< j
; i
++) {
840 qcow2_free_any_clusters(bs
, be64_to_cpu(old_cluster
[i
]), 1,
841 QCOW2_DISCARD_NEVER
);
852 * Returns the number of contiguous clusters that can be used for an allocating
853 * write, but require COW to be performed (this includes yet unallocated space,
854 * which must copy from the backing file)
856 static int count_cow_clusters(BDRVQcow2State
*s
, int nb_clusters
,
857 uint64_t *l2_table
, int l2_index
)
861 for (i
= 0; i
< nb_clusters
; i
++) {
862 uint64_t l2_entry
= be64_to_cpu(l2_table
[l2_index
+ i
]);
863 int cluster_type
= qcow2_get_cluster_type(l2_entry
);
865 switch(cluster_type
) {
866 case QCOW2_CLUSTER_NORMAL
:
867 if (l2_entry
& QCOW_OFLAG_COPIED
) {
871 case QCOW2_CLUSTER_UNALLOCATED
:
872 case QCOW2_CLUSTER_COMPRESSED
:
873 case QCOW2_CLUSTER_ZERO
:
881 assert(i
<= nb_clusters
);
886 * Check if there already is an AIO write request in flight which allocates
887 * the same cluster. In this case we need to wait until the previous
888 * request has completed and updated the L2 table accordingly.
891 * 0 if there was no dependency. *cur_bytes indicates the number of
892 * bytes from guest_offset that can be read before the next
893 * dependency must be processed (or the request is complete)
895 * -EAGAIN if we had to wait for another request, previously gathered
896 * information on cluster allocation may be invalid now. The caller
897 * must start over anyway, so consider *cur_bytes undefined.
899 static int handle_dependencies(BlockDriverState
*bs
, uint64_t guest_offset
,
900 uint64_t *cur_bytes
, QCowL2Meta
**m
)
902 BDRVQcow2State
*s
= bs
->opaque
;
903 QCowL2Meta
*old_alloc
;
904 uint64_t bytes
= *cur_bytes
;
906 QLIST_FOREACH(old_alloc
, &s
->cluster_allocs
, next_in_flight
) {
908 uint64_t start
= guest_offset
;
909 uint64_t end
= start
+ bytes
;
910 uint64_t old_start
= l2meta_cow_start(old_alloc
);
911 uint64_t old_end
= l2meta_cow_end(old_alloc
);
913 if (end
<= old_start
|| start
>= old_end
) {
914 /* No intersection */
916 if (start
< old_start
) {
917 /* Stop at the start of a running allocation */
918 bytes
= old_start
- start
;
923 /* Stop if already an l2meta exists. After yielding, it wouldn't
924 * be valid any more, so we'd have to clean up the old L2Metas
925 * and deal with requests depending on them before starting to
926 * gather new ones. Not worth the trouble. */
927 if (bytes
== 0 && *m
) {
933 /* Wait for the dependency to complete. We need to recheck
934 * the free/allocated clusters when we continue. */
935 qemu_co_mutex_unlock(&s
->lock
);
936 qemu_co_queue_wait(&old_alloc
->dependent_requests
);
937 qemu_co_mutex_lock(&s
->lock
);
943 /* Make sure that existing clusters and new allocations are only used up to
944 * the next dependency if we shortened the request above */
951 * Checks how many already allocated clusters that don't require a copy on
952 * write there are at the given guest_offset (up to *bytes). If
953 * *host_offset is not zero, only physically contiguous clusters beginning at
954 * this host offset are counted.
956 * Note that guest_offset may not be cluster aligned. In this case, the
957 * returned *host_offset points to exact byte referenced by guest_offset and
958 * therefore isn't cluster aligned as well.
961 * 0: if no allocated clusters are available at the given offset.
962 * *bytes is normally unchanged. It is set to 0 if the cluster
963 * is allocated and doesn't need COW, but doesn't have the right
966 * 1: if allocated clusters that don't require a COW are available at
967 * the requested offset. *bytes may have decreased and describes
968 * the length of the area that can be written to.
970 * -errno: in error cases
972 static int handle_copied(BlockDriverState
*bs
, uint64_t guest_offset
,
973 uint64_t *host_offset
, uint64_t *bytes
, QCowL2Meta
**m
)
975 BDRVQcow2State
*s
= bs
->opaque
;
977 uint64_t cluster_offset
;
979 uint64_t nb_clusters
;
980 unsigned int keep_clusters
;
983 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset
, *host_offset
,
986 assert(*host_offset
== 0 || offset_into_cluster(s
, guest_offset
)
987 == offset_into_cluster(s
, *host_offset
));
990 * Calculate the number of clusters to look for. We stop at L2 table
991 * boundaries to keep things simple.
994 size_to_clusters(s
, offset_into_cluster(s
, guest_offset
) + *bytes
);
996 l2_index
= offset_to_l2_index(s
, guest_offset
);
997 nb_clusters
= MIN(nb_clusters
, s
->l2_size
- l2_index
);
998 assert(nb_clusters
<= INT_MAX
);
1000 /* Find L2 entry for the first involved cluster */
1001 ret
= get_cluster_table(bs
, guest_offset
, &l2_table
, &l2_index
);
1006 cluster_offset
= be64_to_cpu(l2_table
[l2_index
]);
1008 /* Check how many clusters are already allocated and don't need COW */
1009 if (qcow2_get_cluster_type(cluster_offset
) == QCOW2_CLUSTER_NORMAL
1010 && (cluster_offset
& QCOW_OFLAG_COPIED
))
1012 /* If a specific host_offset is required, check it */
1013 bool offset_matches
=
1014 (cluster_offset
& L2E_OFFSET_MASK
) == *host_offset
;
1016 if (offset_into_cluster(s
, cluster_offset
& L2E_OFFSET_MASK
)) {
1017 qcow2_signal_corruption(bs
, true, -1, -1, "Data cluster offset "
1018 "%#llx unaligned (guest offset: %#" PRIx64
1019 ")", cluster_offset
& L2E_OFFSET_MASK
,
1025 if (*host_offset
!= 0 && !offset_matches
) {
1031 /* We keep all QCOW_OFLAG_COPIED clusters */
1033 count_contiguous_clusters(nb_clusters
, s
->cluster_size
,
1034 &l2_table
[l2_index
],
1035 QCOW_OFLAG_COPIED
| QCOW_OFLAG_ZERO
);
1036 assert(keep_clusters
<= nb_clusters
);
1038 *bytes
= MIN(*bytes
,
1039 keep_clusters
* s
->cluster_size
1040 - offset_into_cluster(s
, guest_offset
));
1049 qcow2_cache_put(bs
, s
->l2_table_cache
, (void **) &l2_table
);
1051 /* Only return a host offset if we actually made progress. Otherwise we
1052 * would make requirements for handle_alloc() that it can't fulfill */
1054 *host_offset
= (cluster_offset
& L2E_OFFSET_MASK
)
1055 + offset_into_cluster(s
, guest_offset
);
1062 * Allocates new clusters for the given guest_offset.
1064 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
1065 * contain the number of clusters that have been allocated and are contiguous
1066 * in the image file.
1068 * If *host_offset is non-zero, it specifies the offset in the image file at
1069 * which the new clusters must start. *nb_clusters can be 0 on return in this
1070 * case if the cluster at host_offset is already in use. If *host_offset is
1071 * zero, the clusters can be allocated anywhere in the image file.
1073 * *host_offset is updated to contain the offset into the image file at which
1074 * the first allocated cluster starts.
1076 * Return 0 on success and -errno in error cases. -EAGAIN means that the
1077 * function has been waiting for another request and the allocation must be
1078 * restarted, but the whole request should not be failed.
1080 static int do_alloc_cluster_offset(BlockDriverState
*bs
, uint64_t guest_offset
,
1081 uint64_t *host_offset
, uint64_t *nb_clusters
)
1083 BDRVQcow2State
*s
= bs
->opaque
;
1085 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset
,
1086 *host_offset
, *nb_clusters
);
1088 /* Allocate new clusters */
1089 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
1090 if (*host_offset
== 0) {
1091 int64_t cluster_offset
=
1092 qcow2_alloc_clusters(bs
, *nb_clusters
* s
->cluster_size
);
1093 if (cluster_offset
< 0) {
1094 return cluster_offset
;
1096 *host_offset
= cluster_offset
;
1099 int64_t ret
= qcow2_alloc_clusters_at(bs
, *host_offset
, *nb_clusters
);
1109 * Allocates new clusters for an area that either is yet unallocated or needs a
1110 * copy on write. If *host_offset is non-zero, clusters are only allocated if
1111 * the new allocation can match the specified host offset.
1113 * Note that guest_offset may not be cluster aligned. In this case, the
1114 * returned *host_offset points to exact byte referenced by guest_offset and
1115 * therefore isn't cluster aligned as well.
1118 * 0: if no clusters could be allocated. *bytes is set to 0,
1119 * *host_offset is left unchanged.
1121 * 1: if new clusters were allocated. *bytes may be decreased if the
1122 * new allocation doesn't cover all of the requested area.
1123 * *host_offset is updated to contain the host offset of the first
1124 * newly allocated cluster.
1126 * -errno: in error cases
1128 static int handle_alloc(BlockDriverState
*bs
, uint64_t guest_offset
,
1129 uint64_t *host_offset
, uint64_t *bytes
, QCowL2Meta
**m
)
1131 BDRVQcow2State
*s
= bs
->opaque
;
1135 uint64_t nb_clusters
;
1138 uint64_t alloc_cluster_offset
;
1140 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset
, *host_offset
,
1145 * Calculate the number of clusters to look for. We stop at L2 table
1146 * boundaries to keep things simple.
1149 size_to_clusters(s
, offset_into_cluster(s
, guest_offset
) + *bytes
);
1151 l2_index
= offset_to_l2_index(s
, guest_offset
);
1152 nb_clusters
= MIN(nb_clusters
, s
->l2_size
- l2_index
);
1153 assert(nb_clusters
<= INT_MAX
);
1155 /* Find L2 entry for the first involved cluster */
1156 ret
= get_cluster_table(bs
, guest_offset
, &l2_table
, &l2_index
);
1161 entry
= be64_to_cpu(l2_table
[l2_index
]);
1163 /* For the moment, overwrite compressed clusters one by one */
1164 if (entry
& QCOW_OFLAG_COMPRESSED
) {
1167 nb_clusters
= count_cow_clusters(s
, nb_clusters
, l2_table
, l2_index
);
1170 /* This function is only called when there were no non-COW clusters, so if
1171 * we can't find any unallocated or COW clusters either, something is
1172 * wrong with our code. */
1173 assert(nb_clusters
> 0);
1175 qcow2_cache_put(bs
, s
->l2_table_cache
, (void **) &l2_table
);
1177 /* Allocate, if necessary at a given offset in the image file */
1178 alloc_cluster_offset
= start_of_cluster(s
, *host_offset
);
1179 ret
= do_alloc_cluster_offset(bs
, guest_offset
, &alloc_cluster_offset
,
1185 /* Can't extend contiguous allocation */
1186 if (nb_clusters
== 0) {
1191 /* !*host_offset would overwrite the image header and is reserved for "no
1192 * host offset preferred". If 0 was a valid host offset, it'd trigger the
1193 * following overlap check; do that now to avoid having an invalid value in
1195 if (!alloc_cluster_offset
) {
1196 ret
= qcow2_pre_write_overlap_check(bs
, 0, alloc_cluster_offset
,
1197 nb_clusters
* s
->cluster_size
);
1203 * Save info needed for meta data update.
1205 * requested_bytes: Number of bytes from the start of the first
1206 * newly allocated cluster to the end of the (possibly shortened
1207 * before) write request.
1209 * avail_bytes: Number of bytes from the start of the first
1210 * newly allocated to the end of the last newly allocated cluster.
1212 * nb_bytes: The number of bytes from the start of the first
1213 * newly allocated cluster to the end of the area that the write
1214 * request actually writes to (excluding COW at the end)
1216 uint64_t requested_bytes
= *bytes
+ offset_into_cluster(s
, guest_offset
);
1217 int avail_bytes
= MIN(INT_MAX
, nb_clusters
<< s
->cluster_bits
);
1218 int nb_bytes
= MIN(requested_bytes
, avail_bytes
);
1219 QCowL2Meta
*old_m
= *m
;
1221 *m
= g_malloc0(sizeof(**m
));
1223 **m
= (QCowL2Meta
) {
1226 .alloc_offset
= alloc_cluster_offset
,
1227 .offset
= start_of_cluster(s
, guest_offset
),
1228 .nb_clusters
= nb_clusters
,
1232 .nb_bytes
= offset_into_cluster(s
, guest_offset
),
1236 .nb_bytes
= avail_bytes
- nb_bytes
,
1239 qemu_co_queue_init(&(*m
)->dependent_requests
);
1240 QLIST_INSERT_HEAD(&s
->cluster_allocs
, *m
, next_in_flight
);
1242 *host_offset
= alloc_cluster_offset
+ offset_into_cluster(s
, guest_offset
);
1243 *bytes
= MIN(*bytes
, nb_bytes
- offset_into_cluster(s
, guest_offset
));
1244 assert(*bytes
!= 0);
1249 if (*m
&& (*m
)->nb_clusters
> 0) {
1250 QLIST_REMOVE(*m
, next_in_flight
);
1256 * alloc_cluster_offset
1258 * For a given offset on the virtual disk, find the cluster offset in qcow2
1259 * file. If the offset is not found, allocate a new cluster.
1261 * If the cluster was already allocated, m->nb_clusters is set to 0 and
1262 * other fields in m are meaningless.
1264 * If the cluster is newly allocated, m->nb_clusters is set to the number of
1265 * contiguous clusters that have been allocated. In this case, the other
1266 * fields of m are valid and contain information about the first allocated
1269 * If the request conflicts with another write request in flight, the coroutine
1270 * is queued and will be reentered when the dependency has completed.
1272 * Return 0 on success and -errno in error cases
1274 int qcow2_alloc_cluster_offset(BlockDriverState
*bs
, uint64_t offset
,
1275 unsigned int *bytes
, uint64_t *host_offset
,
1278 BDRVQcow2State
*s
= bs
->opaque
;
1279 uint64_t start
, remaining
;
1280 uint64_t cluster_offset
;
1284 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset
, *bytes
);
1296 if (!*host_offset
) {
1297 *host_offset
= start_of_cluster(s
, cluster_offset
);
1300 assert(remaining
>= cur_bytes
);
1303 remaining
-= cur_bytes
;
1304 cluster_offset
+= cur_bytes
;
1306 if (remaining
== 0) {
1310 cur_bytes
= remaining
;
1313 * Now start gathering as many contiguous clusters as possible:
1315 * 1. Check for overlaps with in-flight allocations
1317 * a) Overlap not in the first cluster -> shorten this request and
1318 * let the caller handle the rest in its next loop iteration.
1320 * b) Real overlaps of two requests. Yield and restart the search
1321 * for contiguous clusters (the situation could have changed
1322 * while we were sleeping)
1324 * c) TODO: Request starts in the same cluster as the in-flight
1325 * allocation ends. Shorten the COW of the in-fight allocation,
1326 * set cluster_offset to write to the same cluster and set up
1327 * the right synchronisation between the in-flight request and
1330 ret
= handle_dependencies(bs
, start
, &cur_bytes
, m
);
1331 if (ret
== -EAGAIN
) {
1332 /* Currently handle_dependencies() doesn't yield if we already had
1333 * an allocation. If it did, we would have to clean up the L2Meta
1334 * structs before starting over. */
1337 } else if (ret
< 0) {
1339 } else if (cur_bytes
== 0) {
1342 /* handle_dependencies() may have decreased cur_bytes (shortened
1343 * the allocations below) so that the next dependency is processed
1344 * correctly during the next loop iteration. */
1348 * 2. Count contiguous COPIED clusters.
1350 ret
= handle_copied(bs
, start
, &cluster_offset
, &cur_bytes
, m
);
1355 } else if (cur_bytes
== 0) {
1360 * 3. If the request still hasn't completed, allocate new clusters,
1361 * considering any cluster_offset of steps 1c or 2.
1363 ret
= handle_alloc(bs
, start
, &cluster_offset
, &cur_bytes
, m
);
1369 assert(cur_bytes
== 0);
1374 *bytes
-= remaining
;
1376 assert(*host_offset
!= 0);
1381 static int decompress_buffer(uint8_t *out_buf
, int out_buf_size
,
1382 const uint8_t *buf
, int buf_size
)
1384 z_stream strm1
, *strm
= &strm1
;
1387 memset(strm
, 0, sizeof(*strm
));
1389 strm
->next_in
= (uint8_t *)buf
;
1390 strm
->avail_in
= buf_size
;
1391 strm
->next_out
= out_buf
;
1392 strm
->avail_out
= out_buf_size
;
1394 ret
= inflateInit2(strm
, -12);
1397 ret
= inflate(strm
, Z_FINISH
);
1398 out_len
= strm
->next_out
- out_buf
;
1399 if ((ret
!= Z_STREAM_END
&& ret
!= Z_BUF_ERROR
) ||
1400 out_len
!= out_buf_size
) {
1408 int qcow2_decompress_cluster(BlockDriverState
*bs
, uint64_t cluster_offset
)
1410 BDRVQcow2State
*s
= bs
->opaque
;
1411 int ret
, csize
, nb_csectors
, sector_offset
;
1414 coffset
= cluster_offset
& s
->cluster_offset_mask
;
1415 if (s
->cluster_cache_offset
!= coffset
) {
1416 nb_csectors
= ((cluster_offset
>> s
->csize_shift
) & s
->csize_mask
) + 1;
1417 sector_offset
= coffset
& 511;
1418 csize
= nb_csectors
* 512 - sector_offset
;
1419 BLKDBG_EVENT(bs
->file
, BLKDBG_READ_COMPRESSED
);
1420 ret
= bdrv_read(bs
->file
, coffset
>> 9, s
->cluster_data
,
1425 if (decompress_buffer(s
->cluster_cache
, s
->cluster_size
,
1426 s
->cluster_data
+ sector_offset
, csize
) < 0) {
1429 s
->cluster_cache_offset
= coffset
;
1435 * This discards as many clusters of nb_clusters as possible at once (i.e.
1436 * all clusters in the same L2 table) and returns the number of discarded
1439 static int discard_single_l2(BlockDriverState
*bs
, uint64_t offset
,
1440 uint64_t nb_clusters
, enum qcow2_discard_type type
,
1443 BDRVQcow2State
*s
= bs
->opaque
;
1449 ret
= get_cluster_table(bs
, offset
, &l2_table
, &l2_index
);
1454 /* Limit nb_clusters to one L2 table */
1455 nb_clusters
= MIN(nb_clusters
, s
->l2_size
- l2_index
);
1456 assert(nb_clusters
<= INT_MAX
);
1458 for (i
= 0; i
< nb_clusters
; i
++) {
1459 uint64_t old_l2_entry
;
1461 old_l2_entry
= be64_to_cpu(l2_table
[l2_index
+ i
]);
1464 * If full_discard is false, make sure that a discarded area reads back
1465 * as zeroes for v3 images (we cannot do it for v2 without actually
1466 * writing a zero-filled buffer). We can skip the operation if the
1467 * cluster is already marked as zero, or if it's unallocated and we
1468 * don't have a backing file.
1470 * TODO We might want to use bdrv_get_block_status(bs) here, but we're
1471 * holding s->lock, so that doesn't work today.
1473 * If full_discard is true, the sector should not read back as zeroes,
1474 * but rather fall through to the backing file.
1476 switch (qcow2_get_cluster_type(old_l2_entry
)) {
1477 case QCOW2_CLUSTER_UNALLOCATED
:
1478 if (full_discard
|| !bs
->backing
) {
1483 case QCOW2_CLUSTER_ZERO
:
1484 if (!full_discard
) {
1489 case QCOW2_CLUSTER_NORMAL
:
1490 case QCOW2_CLUSTER_COMPRESSED
:
1497 /* First remove L2 entries */
1498 qcow2_cache_entry_mark_dirty(bs
, s
->l2_table_cache
, l2_table
);
1499 if (!full_discard
&& s
->qcow_version
>= 3) {
1500 l2_table
[l2_index
+ i
] = cpu_to_be64(QCOW_OFLAG_ZERO
);
1502 l2_table
[l2_index
+ i
] = cpu_to_be64(0);
1505 /* Then decrease the refcount */
1506 qcow2_free_any_clusters(bs
, old_l2_entry
, 1, type
);
1509 qcow2_cache_put(bs
, s
->l2_table_cache
, (void **) &l2_table
);
1514 int qcow2_discard_clusters(BlockDriverState
*bs
, uint64_t offset
,
1515 int nb_sectors
, enum qcow2_discard_type type
, bool full_discard
)
1517 BDRVQcow2State
*s
= bs
->opaque
;
1518 uint64_t end_offset
;
1519 uint64_t nb_clusters
;
1522 end_offset
= offset
+ (nb_sectors
<< BDRV_SECTOR_BITS
);
1524 /* Round start up and end down */
1525 offset
= align_offset(offset
, s
->cluster_size
);
1526 end_offset
= start_of_cluster(s
, end_offset
);
1528 if (offset
> end_offset
) {
1532 nb_clusters
= size_to_clusters(s
, end_offset
- offset
);
1534 s
->cache_discards
= true;
1536 /* Each L2 table is handled by its own loop iteration */
1537 while (nb_clusters
> 0) {
1538 ret
= discard_single_l2(bs
, offset
, nb_clusters
, type
, full_discard
);
1544 offset
+= (ret
* s
->cluster_size
);
1549 s
->cache_discards
= false;
1550 qcow2_process_discards(bs
, ret
);
1556 * This zeroes as many clusters of nb_clusters as possible at once (i.e.
1557 * all clusters in the same L2 table) and returns the number of zeroed
1560 static int zero_single_l2(BlockDriverState
*bs
, uint64_t offset
,
1561 uint64_t nb_clusters
, int flags
)
1563 BDRVQcow2State
*s
= bs
->opaque
;
1569 ret
= get_cluster_table(bs
, offset
, &l2_table
, &l2_index
);
1574 /* Limit nb_clusters to one L2 table */
1575 nb_clusters
= MIN(nb_clusters
, s
->l2_size
- l2_index
);
1576 assert(nb_clusters
<= INT_MAX
);
1578 for (i
= 0; i
< nb_clusters
; i
++) {
1579 uint64_t old_offset
;
1581 old_offset
= be64_to_cpu(l2_table
[l2_index
+ i
]);
1583 /* Update L2 entries */
1584 qcow2_cache_entry_mark_dirty(bs
, s
->l2_table_cache
, l2_table
);
1585 if (old_offset
& QCOW_OFLAG_COMPRESSED
|| flags
& BDRV_REQ_MAY_UNMAP
) {
1586 l2_table
[l2_index
+ i
] = cpu_to_be64(QCOW_OFLAG_ZERO
);
1587 qcow2_free_any_clusters(bs
, old_offset
, 1, QCOW2_DISCARD_REQUEST
);
1589 l2_table
[l2_index
+ i
] |= cpu_to_be64(QCOW_OFLAG_ZERO
);
1593 qcow2_cache_put(bs
, s
->l2_table_cache
, (void **) &l2_table
);
1598 int qcow2_zero_clusters(BlockDriverState
*bs
, uint64_t offset
, int nb_sectors
,
1601 BDRVQcow2State
*s
= bs
->opaque
;
1602 uint64_t nb_clusters
;
1605 /* The zero flag is only supported by version 3 and newer */
1606 if (s
->qcow_version
< 3) {
1610 /* Each L2 table is handled by its own loop iteration */
1611 nb_clusters
= size_to_clusters(s
, nb_sectors
<< BDRV_SECTOR_BITS
);
1613 s
->cache_discards
= true;
1615 while (nb_clusters
> 0) {
1616 ret
= zero_single_l2(bs
, offset
, nb_clusters
, flags
);
1622 offset
+= (ret
* s
->cluster_size
);
1627 s
->cache_discards
= false;
1628 qcow2_process_discards(bs
, ret
);
1634 * Expands all zero clusters in a specific L1 table (or deallocates them, for
1635 * non-backed non-pre-allocated zero clusters).
1637 * l1_entries and *visited_l1_entries are used to keep track of progress for
1638 * status_cb(). l1_entries contains the total number of L1 entries and
1639 * *visited_l1_entries counts all visited L1 entries.
1641 static int expand_zero_clusters_in_l1(BlockDriverState
*bs
, uint64_t *l1_table
,
1642 int l1_size
, int64_t *visited_l1_entries
,
1644 BlockDriverAmendStatusCB
*status_cb
,
1647 BDRVQcow2State
*s
= bs
->opaque
;
1648 bool is_active_l1
= (l1_table
== s
->l1_table
);
1649 uint64_t *l2_table
= NULL
;
1653 if (!is_active_l1
) {
1654 /* inactive L2 tables require a buffer to be stored in when loading
1656 l2_table
= qemu_try_blockalign(bs
->file
->bs
, s
->cluster_size
);
1657 if (l2_table
== NULL
) {
1662 for (i
= 0; i
< l1_size
; i
++) {
1663 uint64_t l2_offset
= l1_table
[i
] & L1E_OFFSET_MASK
;
1664 bool l2_dirty
= false;
1665 uint64_t l2_refcount
;
1669 (*visited_l1_entries
)++;
1671 status_cb(bs
, *visited_l1_entries
, l1_entries
, cb_opaque
);
1676 if (offset_into_cluster(s
, l2_offset
)) {
1677 qcow2_signal_corruption(bs
, true, -1, -1, "L2 table offset %#"
1678 PRIx64
" unaligned (L1 index: %#x)",
1685 /* get active L2 tables from cache */
1686 ret
= qcow2_cache_get(bs
, s
->l2_table_cache
, l2_offset
,
1687 (void **)&l2_table
);
1689 /* load inactive L2 tables from disk */
1690 ret
= bdrv_read(bs
->file
, l2_offset
/ BDRV_SECTOR_SIZE
,
1691 (void *)l2_table
, s
->cluster_sectors
);
1697 ret
= qcow2_get_refcount(bs
, l2_offset
>> s
->cluster_bits
,
1703 for (j
= 0; j
< s
->l2_size
; j
++) {
1704 uint64_t l2_entry
= be64_to_cpu(l2_table
[j
]);
1705 int64_t offset
= l2_entry
& L2E_OFFSET_MASK
;
1706 int cluster_type
= qcow2_get_cluster_type(l2_entry
);
1707 bool preallocated
= offset
!= 0;
1709 if (cluster_type
!= QCOW2_CLUSTER_ZERO
) {
1713 if (!preallocated
) {
1715 /* not backed; therefore we can simply deallocate the
1722 offset
= qcow2_alloc_clusters(bs
, s
->cluster_size
);
1728 if (l2_refcount
> 1) {
1729 /* For shared L2 tables, set the refcount accordingly (it is
1730 * already 1 and needs to be l2_refcount) */
1731 ret
= qcow2_update_cluster_refcount(bs
,
1732 offset
>> s
->cluster_bits
,
1733 refcount_diff(1, l2_refcount
), false,
1734 QCOW2_DISCARD_OTHER
);
1736 qcow2_free_clusters(bs
, offset
, s
->cluster_size
,
1737 QCOW2_DISCARD_OTHER
);
1743 if (offset_into_cluster(s
, offset
)) {
1744 qcow2_signal_corruption(bs
, true, -1, -1, "Data cluster offset "
1745 "%#" PRIx64
" unaligned (L2 offset: %#"
1746 PRIx64
", L2 index: %#x)", offset
,
1748 if (!preallocated
) {
1749 qcow2_free_clusters(bs
, offset
, s
->cluster_size
,
1750 QCOW2_DISCARD_ALWAYS
);
1756 ret
= qcow2_pre_write_overlap_check(bs
, 0, offset
, s
->cluster_size
);
1758 if (!preallocated
) {
1759 qcow2_free_clusters(bs
, offset
, s
->cluster_size
,
1760 QCOW2_DISCARD_ALWAYS
);
1765 ret
= bdrv_pwrite_zeroes(bs
->file
, offset
, s
->cluster_size
, 0);
1767 if (!preallocated
) {
1768 qcow2_free_clusters(bs
, offset
, s
->cluster_size
,
1769 QCOW2_DISCARD_ALWAYS
);
1774 if (l2_refcount
== 1) {
1775 l2_table
[j
] = cpu_to_be64(offset
| QCOW_OFLAG_COPIED
);
1777 l2_table
[j
] = cpu_to_be64(offset
);
1784 qcow2_cache_entry_mark_dirty(bs
, s
->l2_table_cache
, l2_table
);
1785 qcow2_cache_depends_on_flush(s
->l2_table_cache
);
1787 qcow2_cache_put(bs
, s
->l2_table_cache
, (void **) &l2_table
);
1790 ret
= qcow2_pre_write_overlap_check(bs
,
1791 QCOW2_OL_INACTIVE_L2
| QCOW2_OL_ACTIVE_L2
, l2_offset
,
1797 ret
= bdrv_write(bs
->file
, l2_offset
/ BDRV_SECTOR_SIZE
,
1798 (void *)l2_table
, s
->cluster_sectors
);
1805 (*visited_l1_entries
)++;
1807 status_cb(bs
, *visited_l1_entries
, l1_entries
, cb_opaque
);
1815 if (!is_active_l1
) {
1816 qemu_vfree(l2_table
);
1818 qcow2_cache_put(bs
, s
->l2_table_cache
, (void **) &l2_table
);
1825 * For backed images, expands all zero clusters on the image. For non-backed
1826 * images, deallocates all non-pre-allocated zero clusters (and claims the
1827 * allocation for pre-allocated ones). This is important for downgrading to a
1828 * qcow2 version which doesn't yet support metadata zero clusters.
1830 int qcow2_expand_zero_clusters(BlockDriverState
*bs
,
1831 BlockDriverAmendStatusCB
*status_cb
,
1834 BDRVQcow2State
*s
= bs
->opaque
;
1835 uint64_t *l1_table
= NULL
;
1836 int64_t l1_entries
= 0, visited_l1_entries
= 0;
1841 l1_entries
= s
->l1_size
;
1842 for (i
= 0; i
< s
->nb_snapshots
; i
++) {
1843 l1_entries
+= s
->snapshots
[i
].l1_size
;
1847 ret
= expand_zero_clusters_in_l1(bs
, s
->l1_table
, s
->l1_size
,
1848 &visited_l1_entries
, l1_entries
,
1849 status_cb
, cb_opaque
);
1854 /* Inactive L1 tables may point to active L2 tables - therefore it is
1855 * necessary to flush the L2 table cache before trying to access the L2
1856 * tables pointed to by inactive L1 entries (else we might try to expand
1857 * zero clusters that have already been expanded); furthermore, it is also
1858 * necessary to empty the L2 table cache, since it may contain tables which
1859 * are now going to be modified directly on disk, bypassing the cache.
1860 * qcow2_cache_empty() does both for us. */
1861 ret
= qcow2_cache_empty(bs
, s
->l2_table_cache
);
1866 for (i
= 0; i
< s
->nb_snapshots
; i
++) {
1867 int l1_sectors
= DIV_ROUND_UP(s
->snapshots
[i
].l1_size
*
1868 sizeof(uint64_t), BDRV_SECTOR_SIZE
);
1870 l1_table
= g_realloc(l1_table
, l1_sectors
* BDRV_SECTOR_SIZE
);
1872 ret
= bdrv_read(bs
->file
,
1873 s
->snapshots
[i
].l1_table_offset
/ BDRV_SECTOR_SIZE
,
1874 (void *)l1_table
, l1_sectors
);
1879 for (j
= 0; j
< s
->snapshots
[i
].l1_size
; j
++) {
1880 be64_to_cpus(&l1_table
[j
]);
1883 ret
= expand_zero_clusters_in_l1(bs
, l1_table
, s
->snapshots
[i
].l1_size
,
1884 &visited_l1_entries
, l1_entries
,
1885 status_cb
, cb_opaque
);