2 * Block driver for the QCOW version 2 format
4 * Copyright (c) 2004-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
28 #include "qapi/error.h"
30 #include "qemu/bswap.h"
33 int qcow2_shrink_l1_table(BlockDriverState
*bs
, uint64_t exact_size
)
35 BDRVQcow2State
*s
= bs
->opaque
;
36 int new_l1_size
, i
, ret
;
38 if (exact_size
>= s
->l1_size
) {
42 new_l1_size
= exact_size
;
45 fprintf(stderr
, "shrink l1_table from %d to %d\n", s
->l1_size
, new_l1_size
);
48 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_SHRINK_WRITE_TABLE
);
49 ret
= bdrv_pwrite_zeroes(bs
->file
, s
->l1_table_offset
+
50 new_l1_size
* sizeof(uint64_t),
51 (s
->l1_size
- new_l1_size
) * sizeof(uint64_t), 0);
56 ret
= bdrv_flush(bs
->file
->bs
);
61 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_SHRINK_FREE_L2_CLUSTERS
);
62 for (i
= s
->l1_size
- 1; i
> new_l1_size
- 1; i
--) {
63 if ((s
->l1_table
[i
] & L1E_OFFSET_MASK
) == 0) {
66 qcow2_free_clusters(bs
, s
->l1_table
[i
] & L1E_OFFSET_MASK
,
67 s
->cluster_size
, QCOW2_DISCARD_ALWAYS
);
74 * If the write in the l1_table failed the image may contain a partially
75 * overwritten l1_table. In this case it would be better to clear the
76 * l1_table in memory to avoid possible image corruption.
78 memset(s
->l1_table
+ new_l1_size
, 0,
79 (s
->l1_size
- new_l1_size
) * sizeof(uint64_t));
83 int qcow2_grow_l1_table(BlockDriverState
*bs
, uint64_t min_size
,
86 BDRVQcow2State
*s
= bs
->opaque
;
87 int new_l1_size2
, ret
, i
;
88 uint64_t *new_l1_table
;
89 int64_t old_l1_table_offset
, old_l1_size
;
90 int64_t new_l1_table_offset
, new_l1_size
;
93 if (min_size
<= s
->l1_size
)
96 /* Do a sanity check on min_size before trying to calculate new_l1_size
97 * (this prevents overflows during the while loop for the calculation of
99 if (min_size
> INT_MAX
/ sizeof(uint64_t)) {
104 new_l1_size
= min_size
;
106 /* Bump size up to reduce the number of times we have to grow */
107 new_l1_size
= s
->l1_size
;
108 if (new_l1_size
== 0) {
111 while (min_size
> new_l1_size
) {
112 new_l1_size
= DIV_ROUND_UP(new_l1_size
* 3, 2);
116 QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE
> INT_MAX
);
117 if (new_l1_size
> QCOW_MAX_L1_SIZE
/ sizeof(uint64_t)) {
122 fprintf(stderr
, "grow l1_table from %d to %" PRId64
"\n",
123 s
->l1_size
, new_l1_size
);
126 new_l1_size2
= sizeof(uint64_t) * new_l1_size
;
127 new_l1_table
= qemu_try_blockalign(bs
->file
->bs
, new_l1_size2
);
128 if (new_l1_table
== NULL
) {
131 memset(new_l1_table
, 0, new_l1_size2
);
134 memcpy(new_l1_table
, s
->l1_table
, s
->l1_size
* sizeof(uint64_t));
137 /* write new table (align to cluster) */
138 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_GROW_ALLOC_TABLE
);
139 new_l1_table_offset
= qcow2_alloc_clusters(bs
, new_l1_size2
);
140 if (new_l1_table_offset
< 0) {
141 qemu_vfree(new_l1_table
);
142 return new_l1_table_offset
;
145 ret
= qcow2_cache_flush(bs
, s
->refcount_block_cache
);
150 /* the L1 position has not yet been updated, so these clusters must
151 * indeed be completely free */
152 ret
= qcow2_pre_write_overlap_check(bs
, 0, new_l1_table_offset
,
153 new_l1_size2
, false);
158 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_GROW_WRITE_TABLE
);
159 for(i
= 0; i
< s
->l1_size
; i
++)
160 new_l1_table
[i
] = cpu_to_be64(new_l1_table
[i
]);
161 ret
= bdrv_pwrite_sync(bs
->file
, new_l1_table_offset
,
162 new_l1_table
, new_l1_size2
);
165 for(i
= 0; i
< s
->l1_size
; i
++)
166 new_l1_table
[i
] = be64_to_cpu(new_l1_table
[i
]);
169 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_GROW_ACTIVATE_TABLE
);
170 stl_be_p(data
, new_l1_size
);
171 stq_be_p(data
+ 4, new_l1_table_offset
);
172 ret
= bdrv_pwrite_sync(bs
->file
, offsetof(QCowHeader
, l1_size
),
177 qemu_vfree(s
->l1_table
);
178 old_l1_table_offset
= s
->l1_table_offset
;
179 s
->l1_table_offset
= new_l1_table_offset
;
180 s
->l1_table
= new_l1_table
;
181 old_l1_size
= s
->l1_size
;
182 s
->l1_size
= new_l1_size
;
183 qcow2_free_clusters(bs
, old_l1_table_offset
, old_l1_size
* sizeof(uint64_t),
184 QCOW2_DISCARD_OTHER
);
187 qemu_vfree(new_l1_table
);
188 qcow2_free_clusters(bs
, new_l1_table_offset
, new_l1_size2
,
189 QCOW2_DISCARD_OTHER
);
196 * @bs: The BlockDriverState
197 * @offset: A guest offset, used to calculate what slice of the L2
199 * @l2_offset: Offset to the L2 table in the image file.
200 * @l2_slice: Location to store the pointer to the L2 slice.
202 * Loads a L2 slice into memory (L2 slices are the parts of L2 tables
203 * that are loaded by the qcow2 cache). If the slice is in the cache,
204 * the cache is used; otherwise the L2 slice is loaded from the image
207 static int l2_load(BlockDriverState
*bs
, uint64_t offset
,
208 uint64_t l2_offset
, uint64_t **l2_slice
)
210 BDRVQcow2State
*s
= bs
->opaque
;
211 int start_of_slice
= sizeof(uint64_t) *
212 (offset_to_l2_index(s
, offset
) - offset_to_l2_slice_index(s
, offset
));
214 return qcow2_cache_get(bs
, s
->l2_table_cache
, l2_offset
+ start_of_slice
,
219 * Writes an L1 entry to disk (note that depending on the alignment
220 * requirements this function may write more that just one entry in
221 * order to prevent bdrv_pwrite from performing a read-modify-write)
223 int qcow2_write_l1_entry(BlockDriverState
*bs
, int l1_index
)
225 BDRVQcow2State
*s
= bs
->opaque
;
228 int bufsize
= MAX(sizeof(uint64_t),
229 MIN(bs
->file
->bs
->bl
.request_alignment
, s
->cluster_size
));
230 int nentries
= bufsize
/ sizeof(uint64_t);
231 g_autofree
uint64_t *buf
= g_try_new0(uint64_t, nentries
);
237 l1_start_index
= QEMU_ALIGN_DOWN(l1_index
, nentries
);
238 for (i
= 0; i
< MIN(nentries
, s
->l1_size
- l1_start_index
); i
++) {
239 buf
[i
] = cpu_to_be64(s
->l1_table
[l1_start_index
+ i
]);
242 ret
= qcow2_pre_write_overlap_check(bs
, QCOW2_OL_ACTIVE_L1
,
243 s
->l1_table_offset
+ 8 * l1_start_index
, bufsize
, false);
248 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_UPDATE
);
249 ret
= bdrv_pwrite_sync(bs
->file
,
250 s
->l1_table_offset
+ 8 * l1_start_index
,
262 * Allocate a new l2 entry in the file. If l1_index points to an already
263 * used entry in the L2 table (i.e. we are doing a copy on write for the L2
264 * table) copy the contents of the old L2 table into the newly allocated one.
265 * Otherwise the new table is initialized with zeros.
269 static int l2_allocate(BlockDriverState
*bs
, int l1_index
)
271 BDRVQcow2State
*s
= bs
->opaque
;
272 uint64_t old_l2_offset
;
273 uint64_t *l2_slice
= NULL
;
274 unsigned slice
, slice_size2
, n_slices
;
278 old_l2_offset
= s
->l1_table
[l1_index
];
280 trace_qcow2_l2_allocate(bs
, l1_index
);
282 /* allocate a new l2 entry */
284 l2_offset
= qcow2_alloc_clusters(bs
, s
->l2_size
* sizeof(uint64_t));
290 /* The offset must fit in the offset field of the L1 table entry */
291 assert((l2_offset
& L1E_OFFSET_MASK
) == l2_offset
);
293 /* If we're allocating the table at offset 0 then something is wrong */
294 if (l2_offset
== 0) {
295 qcow2_signal_corruption(bs
, true, -1, -1, "Preventing invalid "
296 "allocation of L2 table at offset 0");
301 ret
= qcow2_cache_flush(bs
, s
->refcount_block_cache
);
306 /* allocate a new entry in the l2 cache */
308 slice_size2
= s
->l2_slice_size
* sizeof(uint64_t);
309 n_slices
= s
->cluster_size
/ slice_size2
;
311 trace_qcow2_l2_allocate_get_empty(bs
, l1_index
);
312 for (slice
= 0; slice
< n_slices
; slice
++) {
313 ret
= qcow2_cache_get_empty(bs
, s
->l2_table_cache
,
314 l2_offset
+ slice
* slice_size2
,
315 (void **) &l2_slice
);
320 if ((old_l2_offset
& L1E_OFFSET_MASK
) == 0) {
321 /* if there was no old l2 table, clear the new slice */
322 memset(l2_slice
, 0, slice_size2
);
325 uint64_t old_l2_slice_offset
=
326 (old_l2_offset
& L1E_OFFSET_MASK
) + slice
* slice_size2
;
328 /* if there was an old l2 table, read a slice from the disk */
329 BLKDBG_EVENT(bs
->file
, BLKDBG_L2_ALLOC_COW_READ
);
330 ret
= qcow2_cache_get(bs
, s
->l2_table_cache
, old_l2_slice_offset
,
331 (void **) &old_slice
);
336 memcpy(l2_slice
, old_slice
, slice_size2
);
338 qcow2_cache_put(s
->l2_table_cache
, (void **) &old_slice
);
341 /* write the l2 slice to the file */
342 BLKDBG_EVENT(bs
->file
, BLKDBG_L2_ALLOC_WRITE
);
344 trace_qcow2_l2_allocate_write_l2(bs
, l1_index
);
345 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_slice
);
346 qcow2_cache_put(s
->l2_table_cache
, (void **) &l2_slice
);
349 ret
= qcow2_cache_flush(bs
, s
->l2_table_cache
);
354 /* update the L1 entry */
355 trace_qcow2_l2_allocate_write_l1(bs
, l1_index
);
356 s
->l1_table
[l1_index
] = l2_offset
| QCOW_OFLAG_COPIED
;
357 ret
= qcow2_write_l1_entry(bs
, l1_index
);
362 trace_qcow2_l2_allocate_done(bs
, l1_index
, 0);
366 trace_qcow2_l2_allocate_done(bs
, l1_index
, ret
);
367 if (l2_slice
!= NULL
) {
368 qcow2_cache_put(s
->l2_table_cache
, (void **) &l2_slice
);
370 s
->l1_table
[l1_index
] = old_l2_offset
;
372 qcow2_free_clusters(bs
, l2_offset
, s
->l2_size
* sizeof(uint64_t),
373 QCOW2_DISCARD_ALWAYS
);
379 * Checks how many clusters in a given L2 slice are contiguous in the image
380 * file. As soon as one of the flags in the bitmask stop_flags changes compared
381 * to the first cluster, the search is stopped and the cluster is not counted
382 * as contiguous. (This allows it, for example, to stop at the first compressed
383 * cluster which may require a different handling)
385 static int count_contiguous_clusters(BlockDriverState
*bs
, int nb_clusters
,
386 int cluster_size
, uint64_t *l2_slice
, uint64_t stop_flags
)
389 QCow2ClusterType first_cluster_type
;
390 uint64_t mask
= stop_flags
| L2E_OFFSET_MASK
| QCOW_OFLAG_COMPRESSED
;
391 uint64_t first_entry
= be64_to_cpu(l2_slice
[0]);
392 uint64_t offset
= first_entry
& mask
;
394 first_cluster_type
= qcow2_get_cluster_type(bs
, first_entry
);
395 if (first_cluster_type
== QCOW2_CLUSTER_UNALLOCATED
) {
399 /* must be allocated */
400 assert(first_cluster_type
== QCOW2_CLUSTER_NORMAL
||
401 first_cluster_type
== QCOW2_CLUSTER_ZERO_ALLOC
);
403 for (i
= 0; i
< nb_clusters
; i
++) {
404 uint64_t l2_entry
= be64_to_cpu(l2_slice
[i
]) & mask
;
405 if (offset
+ (uint64_t) i
* cluster_size
!= l2_entry
) {
414 * Checks how many consecutive unallocated clusters in a given L2
415 * slice have the same cluster type.
417 static int count_contiguous_clusters_unallocated(BlockDriverState
*bs
,
420 QCow2ClusterType wanted_type
)
424 assert(wanted_type
== QCOW2_CLUSTER_ZERO_PLAIN
||
425 wanted_type
== QCOW2_CLUSTER_UNALLOCATED
);
426 for (i
= 0; i
< nb_clusters
; i
++) {
427 uint64_t entry
= be64_to_cpu(l2_slice
[i
]);
428 QCow2ClusterType type
= qcow2_get_cluster_type(bs
, entry
);
430 if (type
!= wanted_type
) {
438 static int coroutine_fn
do_perform_cow_read(BlockDriverState
*bs
,
439 uint64_t src_cluster_offset
,
440 unsigned offset_in_cluster
,
445 if (qiov
->size
== 0) {
449 BLKDBG_EVENT(bs
->file
, BLKDBG_COW_READ
);
455 /* Call .bdrv_co_readv() directly instead of using the public block-layer
456 * interface. This avoids double I/O throttling and request tracking,
457 * which can lead to deadlock when block layer copy-on-read is enabled.
459 ret
= bs
->drv
->bdrv_co_preadv_part(bs
,
460 src_cluster_offset
+ offset_in_cluster
,
461 qiov
->size
, qiov
, 0, 0);
469 static int coroutine_fn
do_perform_cow_write(BlockDriverState
*bs
,
470 uint64_t cluster_offset
,
471 unsigned offset_in_cluster
,
474 BDRVQcow2State
*s
= bs
->opaque
;
477 if (qiov
->size
== 0) {
481 ret
= qcow2_pre_write_overlap_check(bs
, 0,
482 cluster_offset
+ offset_in_cluster
, qiov
->size
, true);
487 BLKDBG_EVENT(bs
->file
, BLKDBG_COW_WRITE
);
488 ret
= bdrv_co_pwritev(s
->data_file
, cluster_offset
+ offset_in_cluster
,
489 qiov
->size
, qiov
, 0);
501 * For a given offset of the virtual disk, find the cluster type and offset in
502 * the qcow2 file. The offset is stored in *cluster_offset.
504 * On entry, *bytes is the maximum number of contiguous bytes starting at
505 * offset that we are interested in.
507 * On exit, *bytes is the number of bytes starting at offset that have the same
508 * cluster type and (if applicable) are stored contiguously in the image file.
509 * Compressed clusters are always returned one by one.
511 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
514 int qcow2_get_cluster_offset(BlockDriverState
*bs
, uint64_t offset
,
515 unsigned int *bytes
, uint64_t *cluster_offset
)
517 BDRVQcow2State
*s
= bs
->opaque
;
518 unsigned int l2_index
;
519 uint64_t l1_index
, l2_offset
, *l2_slice
;
521 unsigned int offset_in_cluster
;
522 uint64_t bytes_available
, bytes_needed
, nb_clusters
;
523 QCow2ClusterType type
;
526 offset_in_cluster
= offset_into_cluster(s
, offset
);
527 bytes_needed
= (uint64_t) *bytes
+ offset_in_cluster
;
529 /* compute how many bytes there are between the start of the cluster
530 * containing offset and the end of the l2 slice that contains
531 * the entry pointing to it */
533 ((uint64_t) (s
->l2_slice_size
- offset_to_l2_slice_index(s
, offset
)))
536 if (bytes_needed
> bytes_available
) {
537 bytes_needed
= bytes_available
;
542 /* seek to the l2 offset in the l1 table */
544 l1_index
= offset_to_l1_index(s
, offset
);
545 if (l1_index
>= s
->l1_size
) {
546 type
= QCOW2_CLUSTER_UNALLOCATED
;
550 l2_offset
= s
->l1_table
[l1_index
] & L1E_OFFSET_MASK
;
552 type
= QCOW2_CLUSTER_UNALLOCATED
;
556 if (offset_into_cluster(s
, l2_offset
)) {
557 qcow2_signal_corruption(bs
, true, -1, -1, "L2 table offset %#" PRIx64
558 " unaligned (L1 index: %#" PRIx64
")",
559 l2_offset
, l1_index
);
563 /* load the l2 slice in memory */
565 ret
= l2_load(bs
, offset
, l2_offset
, &l2_slice
);
570 /* find the cluster offset for the given disk offset */
572 l2_index
= offset_to_l2_slice_index(s
, offset
);
573 *cluster_offset
= be64_to_cpu(l2_slice
[l2_index
]);
575 nb_clusters
= size_to_clusters(s
, bytes_needed
);
576 /* bytes_needed <= *bytes + offset_in_cluster, both of which are unsigned
577 * integers; the minimum cluster size is 512, so this assertion is always
579 assert(nb_clusters
<= INT_MAX
);
581 type
= qcow2_get_cluster_type(bs
, *cluster_offset
);
582 if (s
->qcow_version
< 3 && (type
== QCOW2_CLUSTER_ZERO_PLAIN
||
583 type
== QCOW2_CLUSTER_ZERO_ALLOC
)) {
584 qcow2_signal_corruption(bs
, true, -1, -1, "Zero cluster entry found"
585 " in pre-v3 image (L2 offset: %#" PRIx64
586 ", L2 index: %#x)", l2_offset
, l2_index
);
591 case QCOW2_CLUSTER_COMPRESSED
:
592 if (has_data_file(bs
)) {
593 qcow2_signal_corruption(bs
, true, -1, -1, "Compressed cluster "
594 "entry found in image with external data "
595 "file (L2 offset: %#" PRIx64
", L2 index: "
596 "%#x)", l2_offset
, l2_index
);
600 /* Compressed clusters can only be processed one by one */
602 *cluster_offset
&= L2E_COMPRESSED_OFFSET_SIZE_MASK
;
604 case QCOW2_CLUSTER_ZERO_PLAIN
:
605 case QCOW2_CLUSTER_UNALLOCATED
:
606 /* how many empty clusters ? */
607 c
= count_contiguous_clusters_unallocated(bs
, nb_clusters
,
608 &l2_slice
[l2_index
], type
);
611 case QCOW2_CLUSTER_ZERO_ALLOC
:
612 case QCOW2_CLUSTER_NORMAL
:
613 /* how many allocated clusters ? */
614 c
= count_contiguous_clusters(bs
, nb_clusters
, s
->cluster_size
,
615 &l2_slice
[l2_index
], QCOW_OFLAG_ZERO
);
616 *cluster_offset
&= L2E_OFFSET_MASK
;
617 if (offset_into_cluster(s
, *cluster_offset
)) {
618 qcow2_signal_corruption(bs
, true, -1, -1,
619 "Cluster allocation offset %#"
620 PRIx64
" unaligned (L2 offset: %#" PRIx64
621 ", L2 index: %#x)", *cluster_offset
,
622 l2_offset
, l2_index
);
626 if (has_data_file(bs
) && *cluster_offset
!= offset
- offset_in_cluster
)
628 qcow2_signal_corruption(bs
, true, -1, -1,
629 "External data file host cluster offset %#"
630 PRIx64
" does not match guest cluster "
632 ", L2 index: %#x)", *cluster_offset
,
633 offset
- offset_in_cluster
, l2_index
);
642 qcow2_cache_put(s
->l2_table_cache
, (void **) &l2_slice
);
644 bytes_available
= (int64_t)c
* s
->cluster_size
;
647 if (bytes_available
> bytes_needed
) {
648 bytes_available
= bytes_needed
;
651 /* bytes_available <= bytes_needed <= *bytes + offset_in_cluster;
652 * subtracting offset_in_cluster will therefore definitely yield something
653 * not exceeding UINT_MAX */
654 assert(bytes_available
- offset_in_cluster
<= UINT_MAX
);
655 *bytes
= bytes_available
- offset_in_cluster
;
660 qcow2_cache_put(s
->l2_table_cache
, (void **)&l2_slice
);
667 * for a given disk offset, load (and allocate if needed)
668 * the appropriate slice of its l2 table.
670 * the cluster index in the l2 slice is given to the caller.
672 * Returns 0 on success, -errno in failure case
674 static int get_cluster_table(BlockDriverState
*bs
, uint64_t offset
,
675 uint64_t **new_l2_slice
,
678 BDRVQcow2State
*s
= bs
->opaque
;
679 unsigned int l2_index
;
680 uint64_t l1_index
, l2_offset
;
681 uint64_t *l2_slice
= NULL
;
684 /* seek to the l2 offset in the l1 table */
686 l1_index
= offset_to_l1_index(s
, offset
);
687 if (l1_index
>= s
->l1_size
) {
688 ret
= qcow2_grow_l1_table(bs
, l1_index
+ 1, false);
694 assert(l1_index
< s
->l1_size
);
695 l2_offset
= s
->l1_table
[l1_index
] & L1E_OFFSET_MASK
;
696 if (offset_into_cluster(s
, l2_offset
)) {
697 qcow2_signal_corruption(bs
, true, -1, -1, "L2 table offset %#" PRIx64
698 " unaligned (L1 index: %#" PRIx64
")",
699 l2_offset
, l1_index
);
703 if (!(s
->l1_table
[l1_index
] & QCOW_OFLAG_COPIED
)) {
704 /* First allocate a new L2 table (and do COW if needed) */
705 ret
= l2_allocate(bs
, l1_index
);
710 /* Then decrease the refcount of the old table */
712 qcow2_free_clusters(bs
, l2_offset
, s
->l2_size
* sizeof(uint64_t),
713 QCOW2_DISCARD_OTHER
);
716 /* Get the offset of the newly-allocated l2 table */
717 l2_offset
= s
->l1_table
[l1_index
] & L1E_OFFSET_MASK
;
718 assert(offset_into_cluster(s
, l2_offset
) == 0);
721 /* load the l2 slice in memory */
722 ret
= l2_load(bs
, offset
, l2_offset
, &l2_slice
);
727 /* find the cluster offset for the given disk offset */
729 l2_index
= offset_to_l2_slice_index(s
, offset
);
731 *new_l2_slice
= l2_slice
;
732 *new_l2_index
= l2_index
;
738 * alloc_compressed_cluster_offset
740 * For a given offset on the virtual disk, allocate a new compressed cluster
741 * and put the host offset of the cluster into *host_offset. If a cluster is
742 * already allocated at the offset, return an error.
744 * Return 0 on success and -errno in error cases
746 int qcow2_alloc_compressed_cluster_offset(BlockDriverState
*bs
,
749 uint64_t *host_offset
)
751 BDRVQcow2State
*s
= bs
->opaque
;
754 int64_t cluster_offset
;
757 if (has_data_file(bs
)) {
761 ret
= get_cluster_table(bs
, offset
, &l2_slice
, &l2_index
);
766 /* Compression can't overwrite anything. Fail if the cluster was already
768 cluster_offset
= be64_to_cpu(l2_slice
[l2_index
]);
769 if (cluster_offset
& L2E_OFFSET_MASK
) {
770 qcow2_cache_put(s
->l2_table_cache
, (void **) &l2_slice
);
774 cluster_offset
= qcow2_alloc_bytes(bs
, compressed_size
);
775 if (cluster_offset
< 0) {
776 qcow2_cache_put(s
->l2_table_cache
, (void **) &l2_slice
);
777 return cluster_offset
;
781 (cluster_offset
+ compressed_size
- 1) / QCOW2_COMPRESSED_SECTOR_SIZE
-
782 (cluster_offset
/ QCOW2_COMPRESSED_SECTOR_SIZE
);
784 /* The offset and size must fit in their fields of the L2 table entry */
785 assert((cluster_offset
& s
->cluster_offset_mask
) == cluster_offset
);
786 assert((nb_csectors
& s
->csize_mask
) == nb_csectors
);
788 cluster_offset
|= QCOW_OFLAG_COMPRESSED
|
789 ((uint64_t)nb_csectors
<< s
->csize_shift
);
791 /* update L2 table */
793 /* compressed clusters never have the copied flag */
795 BLKDBG_EVENT(bs
->file
, BLKDBG_L2_UPDATE_COMPRESSED
);
796 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_slice
);
797 l2_slice
[l2_index
] = cpu_to_be64(cluster_offset
);
798 qcow2_cache_put(s
->l2_table_cache
, (void **) &l2_slice
);
800 *host_offset
= cluster_offset
& s
->cluster_offset_mask
;
804 static int perform_cow(BlockDriverState
*bs
, QCowL2Meta
*m
)
806 BDRVQcow2State
*s
= bs
->opaque
;
807 Qcow2COWRegion
*start
= &m
->cow_start
;
808 Qcow2COWRegion
*end
= &m
->cow_end
;
809 unsigned buffer_size
;
810 unsigned data_bytes
= end
->offset
- (start
->offset
+ start
->nb_bytes
);
812 uint8_t *start_buffer
, *end_buffer
;
816 assert(start
->nb_bytes
<= UINT_MAX
- end
->nb_bytes
);
817 assert(start
->nb_bytes
+ end
->nb_bytes
<= UINT_MAX
- data_bytes
);
818 assert(start
->offset
+ start
->nb_bytes
<= end
->offset
);
820 if ((start
->nb_bytes
== 0 && end
->nb_bytes
== 0) || m
->skip_cow
) {
824 /* If we have to read both the start and end COW regions and the
825 * middle region is not too large then perform just one read
827 merge_reads
= start
->nb_bytes
&& end
->nb_bytes
&& data_bytes
<= 16384;
829 buffer_size
= start
->nb_bytes
+ data_bytes
+ end
->nb_bytes
;
831 /* If we have to do two reads, add some padding in the middle
832 * if necessary to make sure that the end region is optimally
834 size_t align
= bdrv_opt_mem_align(bs
);
835 assert(align
> 0 && align
<= UINT_MAX
);
836 assert(QEMU_ALIGN_UP(start
->nb_bytes
, align
) <=
837 UINT_MAX
- end
->nb_bytes
);
838 buffer_size
= QEMU_ALIGN_UP(start
->nb_bytes
, align
) + end
->nb_bytes
;
841 /* Reserve a buffer large enough to store all the data that we're
843 start_buffer
= qemu_try_blockalign(bs
, buffer_size
);
844 if (start_buffer
== NULL
) {
847 /* The part of the buffer where the end region is located */
848 end_buffer
= start_buffer
+ buffer_size
- end
->nb_bytes
;
850 qemu_iovec_init(&qiov
, 2 + (m
->data_qiov
?
851 qemu_iovec_subvec_niov(m
->data_qiov
,
856 qemu_co_mutex_unlock(&s
->lock
);
857 /* First we read the existing data from both COW regions. We
858 * either read the whole region in one go, or the start and end
859 * regions separately. */
861 qemu_iovec_add(&qiov
, start_buffer
, buffer_size
);
862 ret
= do_perform_cow_read(bs
, m
->offset
, start
->offset
, &qiov
);
864 qemu_iovec_add(&qiov
, start_buffer
, start
->nb_bytes
);
865 ret
= do_perform_cow_read(bs
, m
->offset
, start
->offset
, &qiov
);
870 qemu_iovec_reset(&qiov
);
871 qemu_iovec_add(&qiov
, end_buffer
, end
->nb_bytes
);
872 ret
= do_perform_cow_read(bs
, m
->offset
, end
->offset
, &qiov
);
878 /* Encrypt the data if necessary before writing it */
880 ret
= qcow2_co_encrypt(bs
,
881 m
->alloc_offset
+ start
->offset
,
882 m
->offset
+ start
->offset
,
883 start_buffer
, start
->nb_bytes
);
888 ret
= qcow2_co_encrypt(bs
,
889 m
->alloc_offset
+ end
->offset
,
890 m
->offset
+ end
->offset
,
891 end_buffer
, end
->nb_bytes
);
897 /* And now we can write everything. If we have the guest data we
898 * can write everything in one single operation */
900 qemu_iovec_reset(&qiov
);
901 if (start
->nb_bytes
) {
902 qemu_iovec_add(&qiov
, start_buffer
, start
->nb_bytes
);
904 qemu_iovec_concat(&qiov
, m
->data_qiov
, m
->data_qiov_offset
, data_bytes
);
906 qemu_iovec_add(&qiov
, end_buffer
, end
->nb_bytes
);
908 /* NOTE: we have a write_aio blkdebug event here followed by
909 * a cow_write one in do_perform_cow_write(), but there's only
910 * one single I/O operation */
911 BLKDBG_EVENT(bs
->file
, BLKDBG_WRITE_AIO
);
912 ret
= do_perform_cow_write(bs
, m
->alloc_offset
, start
->offset
, &qiov
);
914 /* If there's no guest data then write both COW regions separately */
915 qemu_iovec_reset(&qiov
);
916 qemu_iovec_add(&qiov
, start_buffer
, start
->nb_bytes
);
917 ret
= do_perform_cow_write(bs
, m
->alloc_offset
, start
->offset
, &qiov
);
922 qemu_iovec_reset(&qiov
);
923 qemu_iovec_add(&qiov
, end_buffer
, end
->nb_bytes
);
924 ret
= do_perform_cow_write(bs
, m
->alloc_offset
, end
->offset
, &qiov
);
928 qemu_co_mutex_lock(&s
->lock
);
931 * Before we update the L2 table to actually point to the new cluster, we
932 * need to be sure that the refcounts have been increased and COW was
936 qcow2_cache_depends_on_flush(s
->l2_table_cache
);
939 qemu_vfree(start_buffer
);
940 qemu_iovec_destroy(&qiov
);
944 int qcow2_alloc_cluster_link_l2(BlockDriverState
*bs
, QCowL2Meta
*m
)
946 BDRVQcow2State
*s
= bs
->opaque
;
947 int i
, j
= 0, l2_index
, ret
;
948 uint64_t *old_cluster
, *l2_slice
;
949 uint64_t cluster_offset
= m
->alloc_offset
;
951 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m
->nb_clusters
);
952 assert(m
->nb_clusters
> 0);
954 old_cluster
= g_try_new(uint64_t, m
->nb_clusters
);
955 if (old_cluster
== NULL
) {
960 /* copy content of unmodified sectors */
961 ret
= perform_cow(bs
, m
);
966 /* Update L2 table. */
967 if (s
->use_lazy_refcounts
) {
968 qcow2_mark_dirty(bs
);
970 if (qcow2_need_accurate_refcounts(s
)) {
971 qcow2_cache_set_dependency(bs
, s
->l2_table_cache
,
972 s
->refcount_block_cache
);
975 ret
= get_cluster_table(bs
, m
->offset
, &l2_slice
, &l2_index
);
979 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_slice
);
981 assert(l2_index
+ m
->nb_clusters
<= s
->l2_slice_size
);
982 for (i
= 0; i
< m
->nb_clusters
; i
++) {
983 uint64_t offset
= cluster_offset
+ (i
<< s
->cluster_bits
);
984 /* if two concurrent writes happen to the same unallocated cluster
985 * each write allocates separate cluster and writes data concurrently.
986 * The first one to complete updates l2 table with pointer to its
987 * cluster the second one has to do RMW (which is done above by
988 * perform_cow()), update l2 table with its cluster pointer and free
989 * old cluster. This is what this loop does */
990 if (l2_slice
[l2_index
+ i
] != 0) {
991 old_cluster
[j
++] = l2_slice
[l2_index
+ i
];
994 /* The offset must fit in the offset field of the L2 table entry */
995 assert((offset
& L2E_OFFSET_MASK
) == offset
);
997 l2_slice
[l2_index
+ i
] = cpu_to_be64(offset
| QCOW_OFLAG_COPIED
);
1001 qcow2_cache_put(s
->l2_table_cache
, (void **) &l2_slice
);
1004 * If this was a COW, we need to decrease the refcount of the old cluster.
1006 * Don't discard clusters that reach a refcount of 0 (e.g. compressed
1007 * clusters), the next write will reuse them anyway.
1009 if (!m
->keep_old_clusters
&& j
!= 0) {
1010 for (i
= 0; i
< j
; i
++) {
1011 qcow2_free_any_clusters(bs
, be64_to_cpu(old_cluster
[i
]), 1,
1012 QCOW2_DISCARD_NEVER
);
1018 g_free(old_cluster
);
1023 * Frees the allocated clusters because the request failed and they won't
1024 * actually be linked.
1026 void qcow2_alloc_cluster_abort(BlockDriverState
*bs
, QCowL2Meta
*m
)
1028 BDRVQcow2State
*s
= bs
->opaque
;
1029 if (!has_data_file(bs
) && !m
->keep_old_clusters
) {
1030 qcow2_free_clusters(bs
, m
->alloc_offset
,
1031 m
->nb_clusters
<< s
->cluster_bits
,
1032 QCOW2_DISCARD_NEVER
);
1037 * Returns the number of contiguous clusters that can be used for an allocating
1038 * write, but require COW to be performed (this includes yet unallocated space,
1039 * which must copy from the backing file)
1041 static int count_cow_clusters(BlockDriverState
*bs
, int nb_clusters
,
1042 uint64_t *l2_slice
, int l2_index
)
1046 for (i
= 0; i
< nb_clusters
; i
++) {
1047 uint64_t l2_entry
= be64_to_cpu(l2_slice
[l2_index
+ i
]);
1048 QCow2ClusterType cluster_type
= qcow2_get_cluster_type(bs
, l2_entry
);
1050 switch(cluster_type
) {
1051 case QCOW2_CLUSTER_NORMAL
:
1052 if (l2_entry
& QCOW_OFLAG_COPIED
) {
1056 case QCOW2_CLUSTER_UNALLOCATED
:
1057 case QCOW2_CLUSTER_COMPRESSED
:
1058 case QCOW2_CLUSTER_ZERO_PLAIN
:
1059 case QCOW2_CLUSTER_ZERO_ALLOC
:
1067 assert(i
<= nb_clusters
);
1072 * Check if there already is an AIO write request in flight which allocates
1073 * the same cluster. In this case we need to wait until the previous
1074 * request has completed and updated the L2 table accordingly.
1077 * 0 if there was no dependency. *cur_bytes indicates the number of
1078 * bytes from guest_offset that can be read before the next
1079 * dependency must be processed (or the request is complete)
1081 * -EAGAIN if we had to wait for another request, previously gathered
1082 * information on cluster allocation may be invalid now. The caller
1083 * must start over anyway, so consider *cur_bytes undefined.
1085 static int handle_dependencies(BlockDriverState
*bs
, uint64_t guest_offset
,
1086 uint64_t *cur_bytes
, QCowL2Meta
**m
)
1088 BDRVQcow2State
*s
= bs
->opaque
;
1089 QCowL2Meta
*old_alloc
;
1090 uint64_t bytes
= *cur_bytes
;
1092 QLIST_FOREACH(old_alloc
, &s
->cluster_allocs
, next_in_flight
) {
1094 uint64_t start
= guest_offset
;
1095 uint64_t end
= start
+ bytes
;
1096 uint64_t old_start
= l2meta_cow_start(old_alloc
);
1097 uint64_t old_end
= l2meta_cow_end(old_alloc
);
1099 if (end
<= old_start
|| start
>= old_end
) {
1100 /* No intersection */
1102 if (start
< old_start
) {
1103 /* Stop at the start of a running allocation */
1104 bytes
= old_start
- start
;
1109 /* Stop if already an l2meta exists. After yielding, it wouldn't
1110 * be valid any more, so we'd have to clean up the old L2Metas
1111 * and deal with requests depending on them before starting to
1112 * gather new ones. Not worth the trouble. */
1113 if (bytes
== 0 && *m
) {
1119 /* Wait for the dependency to complete. We need to recheck
1120 * the free/allocated clusters when we continue. */
1121 qemu_co_queue_wait(&old_alloc
->dependent_requests
, &s
->lock
);
1127 /* Make sure that existing clusters and new allocations are only used up to
1128 * the next dependency if we shortened the request above */
1135 * Checks how many already allocated clusters that don't require a copy on
1136 * write there are at the given guest_offset (up to *bytes). If *host_offset is
1137 * not INV_OFFSET, only physically contiguous clusters beginning at this host
1138 * offset are counted.
1140 * Note that guest_offset may not be cluster aligned. In this case, the
1141 * returned *host_offset points to exact byte referenced by guest_offset and
1142 * therefore isn't cluster aligned as well.
1145 * 0: if no allocated clusters are available at the given offset.
1146 * *bytes is normally unchanged. It is set to 0 if the cluster
1147 * is allocated and doesn't need COW, but doesn't have the right
1150 * 1: if allocated clusters that don't require a COW are available at
1151 * the requested offset. *bytes may have decreased and describes
1152 * the length of the area that can be written to.
1154 * -errno: in error cases
1156 static int handle_copied(BlockDriverState
*bs
, uint64_t guest_offset
,
1157 uint64_t *host_offset
, uint64_t *bytes
, QCowL2Meta
**m
)
1159 BDRVQcow2State
*s
= bs
->opaque
;
1161 uint64_t cluster_offset
;
1163 uint64_t nb_clusters
;
1164 unsigned int keep_clusters
;
1167 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset
, *host_offset
,
1170 assert(*host_offset
== INV_OFFSET
|| offset_into_cluster(s
, guest_offset
)
1171 == offset_into_cluster(s
, *host_offset
));
1174 * Calculate the number of clusters to look for. We stop at L2 slice
1175 * boundaries to keep things simple.
1178 size_to_clusters(s
, offset_into_cluster(s
, guest_offset
) + *bytes
);
1180 l2_index
= offset_to_l2_slice_index(s
, guest_offset
);
1181 nb_clusters
= MIN(nb_clusters
, s
->l2_slice_size
- l2_index
);
1182 assert(nb_clusters
<= INT_MAX
);
1184 /* Find L2 entry for the first involved cluster */
1185 ret
= get_cluster_table(bs
, guest_offset
, &l2_slice
, &l2_index
);
1190 cluster_offset
= be64_to_cpu(l2_slice
[l2_index
]);
1192 /* Check how many clusters are already allocated and don't need COW */
1193 if (qcow2_get_cluster_type(bs
, cluster_offset
) == QCOW2_CLUSTER_NORMAL
1194 && (cluster_offset
& QCOW_OFLAG_COPIED
))
1196 /* If a specific host_offset is required, check it */
1197 bool offset_matches
=
1198 (cluster_offset
& L2E_OFFSET_MASK
) == *host_offset
;
1200 if (offset_into_cluster(s
, cluster_offset
& L2E_OFFSET_MASK
)) {
1201 qcow2_signal_corruption(bs
, true, -1, -1, "Data cluster offset "
1202 "%#llx unaligned (guest offset: %#" PRIx64
1203 ")", cluster_offset
& L2E_OFFSET_MASK
,
1209 if (*host_offset
!= INV_OFFSET
&& !offset_matches
) {
1215 /* We keep all QCOW_OFLAG_COPIED clusters */
1217 count_contiguous_clusters(bs
, nb_clusters
, s
->cluster_size
,
1218 &l2_slice
[l2_index
],
1219 QCOW_OFLAG_COPIED
| QCOW_OFLAG_ZERO
);
1220 assert(keep_clusters
<= nb_clusters
);
1222 *bytes
= MIN(*bytes
,
1223 keep_clusters
* s
->cluster_size
1224 - offset_into_cluster(s
, guest_offset
));
1233 qcow2_cache_put(s
->l2_table_cache
, (void **) &l2_slice
);
1235 /* Only return a host offset if we actually made progress. Otherwise we
1236 * would make requirements for handle_alloc() that it can't fulfill */
1238 *host_offset
= (cluster_offset
& L2E_OFFSET_MASK
)
1239 + offset_into_cluster(s
, guest_offset
);
1246 * Allocates new clusters for the given guest_offset.
1248 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
1249 * contain the number of clusters that have been allocated and are contiguous
1250 * in the image file.
1252 * If *host_offset is not INV_OFFSET, it specifies the offset in the image file
1253 * at which the new clusters must start. *nb_clusters can be 0 on return in
1254 * this case if the cluster at host_offset is already in use. If *host_offset
1255 * is INV_OFFSET, the clusters can be allocated anywhere in the image file.
1257 * *host_offset is updated to contain the offset into the image file at which
1258 * the first allocated cluster starts.
1260 * Return 0 on success and -errno in error cases. -EAGAIN means that the
1261 * function has been waiting for another request and the allocation must be
1262 * restarted, but the whole request should not be failed.
1264 static int do_alloc_cluster_offset(BlockDriverState
*bs
, uint64_t guest_offset
,
1265 uint64_t *host_offset
, uint64_t *nb_clusters
)
1267 BDRVQcow2State
*s
= bs
->opaque
;
1269 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset
,
1270 *host_offset
, *nb_clusters
);
1272 if (has_data_file(bs
)) {
1273 assert(*host_offset
== INV_OFFSET
||
1274 *host_offset
== start_of_cluster(s
, guest_offset
));
1275 *host_offset
= start_of_cluster(s
, guest_offset
);
1279 /* Allocate new clusters */
1280 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
1281 if (*host_offset
== INV_OFFSET
) {
1282 int64_t cluster_offset
=
1283 qcow2_alloc_clusters(bs
, *nb_clusters
* s
->cluster_size
);
1284 if (cluster_offset
< 0) {
1285 return cluster_offset
;
1287 *host_offset
= cluster_offset
;
1290 int64_t ret
= qcow2_alloc_clusters_at(bs
, *host_offset
, *nb_clusters
);
1300 * Allocates new clusters for an area that either is yet unallocated or needs a
1301 * copy on write. If *host_offset is not INV_OFFSET, clusters are only
1302 * allocated if the new allocation can match the specified host offset.
1304 * Note that guest_offset may not be cluster aligned. In this case, the
1305 * returned *host_offset points to exact byte referenced by guest_offset and
1306 * therefore isn't cluster aligned as well.
1309 * 0: if no clusters could be allocated. *bytes is set to 0,
1310 * *host_offset is left unchanged.
1312 * 1: if new clusters were allocated. *bytes may be decreased if the
1313 * new allocation doesn't cover all of the requested area.
1314 * *host_offset is updated to contain the host offset of the first
1315 * newly allocated cluster.
1317 * -errno: in error cases
1319 static int handle_alloc(BlockDriverState
*bs
, uint64_t guest_offset
,
1320 uint64_t *host_offset
, uint64_t *bytes
, QCowL2Meta
**m
)
1322 BDRVQcow2State
*s
= bs
->opaque
;
1326 uint64_t nb_clusters
;
1328 bool keep_old_clusters
= false;
1330 uint64_t alloc_cluster_offset
= INV_OFFSET
;
1332 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset
, *host_offset
,
1337 * Calculate the number of clusters to look for. We stop at L2 slice
1338 * boundaries to keep things simple.
1341 size_to_clusters(s
, offset_into_cluster(s
, guest_offset
) + *bytes
);
1343 l2_index
= offset_to_l2_slice_index(s
, guest_offset
);
1344 nb_clusters
= MIN(nb_clusters
, s
->l2_slice_size
- l2_index
);
1345 assert(nb_clusters
<= INT_MAX
);
1347 /* Limit total allocation byte count to INT_MAX */
1348 nb_clusters
= MIN(nb_clusters
, INT_MAX
>> s
->cluster_bits
);
1350 /* Find L2 entry for the first involved cluster */
1351 ret
= get_cluster_table(bs
, guest_offset
, &l2_slice
, &l2_index
);
1356 entry
= be64_to_cpu(l2_slice
[l2_index
]);
1357 nb_clusters
= count_cow_clusters(bs
, nb_clusters
, l2_slice
, l2_index
);
1359 /* This function is only called when there were no non-COW clusters, so if
1360 * we can't find any unallocated or COW clusters either, something is
1361 * wrong with our code. */
1362 assert(nb_clusters
> 0);
1364 if (qcow2_get_cluster_type(bs
, entry
) == QCOW2_CLUSTER_ZERO_ALLOC
&&
1365 (entry
& QCOW_OFLAG_COPIED
) &&
1366 (*host_offset
== INV_OFFSET
||
1367 start_of_cluster(s
, *host_offset
) == (entry
& L2E_OFFSET_MASK
)))
1369 int preallocated_nb_clusters
;
1371 if (offset_into_cluster(s
, entry
& L2E_OFFSET_MASK
)) {
1372 qcow2_signal_corruption(bs
, true, -1, -1, "Preallocated zero "
1373 "cluster offset %#llx unaligned (guest "
1374 "offset: %#" PRIx64
")",
1375 entry
& L2E_OFFSET_MASK
, guest_offset
);
1380 /* Try to reuse preallocated zero clusters; contiguous normal clusters
1381 * would be fine, too, but count_cow_clusters() above has limited
1382 * nb_clusters already to a range of COW clusters */
1383 preallocated_nb_clusters
=
1384 count_contiguous_clusters(bs
, nb_clusters
, s
->cluster_size
,
1385 &l2_slice
[l2_index
], QCOW_OFLAG_COPIED
);
1386 assert(preallocated_nb_clusters
> 0);
1388 nb_clusters
= preallocated_nb_clusters
;
1389 alloc_cluster_offset
= entry
& L2E_OFFSET_MASK
;
1391 /* We want to reuse these clusters, so qcow2_alloc_cluster_link_l2()
1392 * should not free them. */
1393 keep_old_clusters
= true;
1396 qcow2_cache_put(s
->l2_table_cache
, (void **) &l2_slice
);
1398 if (alloc_cluster_offset
== INV_OFFSET
) {
1399 /* Allocate, if necessary at a given offset in the image file */
1400 alloc_cluster_offset
= *host_offset
== INV_OFFSET
? INV_OFFSET
:
1401 start_of_cluster(s
, *host_offset
);
1402 ret
= do_alloc_cluster_offset(bs
, guest_offset
, &alloc_cluster_offset
,
1408 /* Can't extend contiguous allocation */
1409 if (nb_clusters
== 0) {
1414 assert(alloc_cluster_offset
!= INV_OFFSET
);
1418 * Save info needed for meta data update.
1420 * requested_bytes: Number of bytes from the start of the first
1421 * newly allocated cluster to the end of the (possibly shortened
1422 * before) write request.
1424 * avail_bytes: Number of bytes from the start of the first
1425 * newly allocated to the end of the last newly allocated cluster.
1427 * nb_bytes: The number of bytes from the start of the first
1428 * newly allocated cluster to the end of the area that the write
1429 * request actually writes to (excluding COW at the end)
1431 uint64_t requested_bytes
= *bytes
+ offset_into_cluster(s
, guest_offset
);
1432 int avail_bytes
= nb_clusters
<< s
->cluster_bits
;
1433 int nb_bytes
= MIN(requested_bytes
, avail_bytes
);
1434 QCowL2Meta
*old_m
= *m
;
1436 *m
= g_malloc0(sizeof(**m
));
1438 **m
= (QCowL2Meta
) {
1441 .alloc_offset
= alloc_cluster_offset
,
1442 .offset
= start_of_cluster(s
, guest_offset
),
1443 .nb_clusters
= nb_clusters
,
1445 .keep_old_clusters
= keep_old_clusters
,
1449 .nb_bytes
= offset_into_cluster(s
, guest_offset
),
1453 .nb_bytes
= avail_bytes
- nb_bytes
,
1456 qemu_co_queue_init(&(*m
)->dependent_requests
);
1457 QLIST_INSERT_HEAD(&s
->cluster_allocs
, *m
, next_in_flight
);
1459 *host_offset
= alloc_cluster_offset
+ offset_into_cluster(s
, guest_offset
);
1460 *bytes
= MIN(*bytes
, nb_bytes
- offset_into_cluster(s
, guest_offset
));
1461 assert(*bytes
!= 0);
1466 if (*m
&& (*m
)->nb_clusters
> 0) {
1467 QLIST_REMOVE(*m
, next_in_flight
);
1473 * alloc_cluster_offset
1475 * For a given offset on the virtual disk, find the cluster offset in qcow2
1476 * file. If the offset is not found, allocate a new cluster.
1478 * If the cluster was already allocated, m->nb_clusters is set to 0 and
1479 * other fields in m are meaningless.
1481 * If the cluster is newly allocated, m->nb_clusters is set to the number of
1482 * contiguous clusters that have been allocated. In this case, the other
1483 * fields of m are valid and contain information about the first allocated
1486 * If the request conflicts with another write request in flight, the coroutine
1487 * is queued and will be reentered when the dependency has completed.
1489 * Return 0 on success and -errno in error cases
1491 int qcow2_alloc_cluster_offset(BlockDriverState
*bs
, uint64_t offset
,
1492 unsigned int *bytes
, uint64_t *host_offset
,
1495 BDRVQcow2State
*s
= bs
->opaque
;
1496 uint64_t start
, remaining
;
1497 uint64_t cluster_offset
;
1501 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset
, *bytes
);
1506 cluster_offset
= INV_OFFSET
;
1507 *host_offset
= INV_OFFSET
;
1513 if (*host_offset
== INV_OFFSET
&& cluster_offset
!= INV_OFFSET
) {
1514 *host_offset
= start_of_cluster(s
, cluster_offset
);
1517 assert(remaining
>= cur_bytes
);
1520 remaining
-= cur_bytes
;
1522 if (cluster_offset
!= INV_OFFSET
) {
1523 cluster_offset
+= cur_bytes
;
1526 if (remaining
== 0) {
1530 cur_bytes
= remaining
;
1533 * Now start gathering as many contiguous clusters as possible:
1535 * 1. Check for overlaps with in-flight allocations
1537 * a) Overlap not in the first cluster -> shorten this request and
1538 * let the caller handle the rest in its next loop iteration.
1540 * b) Real overlaps of two requests. Yield and restart the search
1541 * for contiguous clusters (the situation could have changed
1542 * while we were sleeping)
1544 * c) TODO: Request starts in the same cluster as the in-flight
1545 * allocation ends. Shorten the COW of the in-fight allocation,
1546 * set cluster_offset to write to the same cluster and set up
1547 * the right synchronisation between the in-flight request and
1550 ret
= handle_dependencies(bs
, start
, &cur_bytes
, m
);
1551 if (ret
== -EAGAIN
) {
1552 /* Currently handle_dependencies() doesn't yield if we already had
1553 * an allocation. If it did, we would have to clean up the L2Meta
1554 * structs before starting over. */
1557 } else if (ret
< 0) {
1559 } else if (cur_bytes
== 0) {
1562 /* handle_dependencies() may have decreased cur_bytes (shortened
1563 * the allocations below) so that the next dependency is processed
1564 * correctly during the next loop iteration. */
1568 * 2. Count contiguous COPIED clusters.
1570 ret
= handle_copied(bs
, start
, &cluster_offset
, &cur_bytes
, m
);
1575 } else if (cur_bytes
== 0) {
1580 * 3. If the request still hasn't completed, allocate new clusters,
1581 * considering any cluster_offset of steps 1c or 2.
1583 ret
= handle_alloc(bs
, start
, &cluster_offset
, &cur_bytes
, m
);
1589 assert(cur_bytes
== 0);
1594 *bytes
-= remaining
;
1596 assert(*host_offset
!= INV_OFFSET
);
1602 * This discards as many clusters of nb_clusters as possible at once (i.e.
1603 * all clusters in the same L2 slice) and returns the number of discarded
1606 static int discard_in_l2_slice(BlockDriverState
*bs
, uint64_t offset
,
1607 uint64_t nb_clusters
,
1608 enum qcow2_discard_type type
, bool full_discard
)
1610 BDRVQcow2State
*s
= bs
->opaque
;
1616 ret
= get_cluster_table(bs
, offset
, &l2_slice
, &l2_index
);
1621 /* Limit nb_clusters to one L2 slice */
1622 nb_clusters
= MIN(nb_clusters
, s
->l2_slice_size
- l2_index
);
1623 assert(nb_clusters
<= INT_MAX
);
1625 for (i
= 0; i
< nb_clusters
; i
++) {
1626 uint64_t old_l2_entry
;
1628 old_l2_entry
= be64_to_cpu(l2_slice
[l2_index
+ i
]);
1631 * If full_discard is false, make sure that a discarded area reads back
1632 * as zeroes for v3 images (we cannot do it for v2 without actually
1633 * writing a zero-filled buffer). We can skip the operation if the
1634 * cluster is already marked as zero, or if it's unallocated and we
1635 * don't have a backing file.
1637 * TODO We might want to use bdrv_block_status(bs) here, but we're
1638 * holding s->lock, so that doesn't work today.
1640 * If full_discard is true, the sector should not read back as zeroes,
1641 * but rather fall through to the backing file.
1643 switch (qcow2_get_cluster_type(bs
, old_l2_entry
)) {
1644 case QCOW2_CLUSTER_UNALLOCATED
:
1645 if (full_discard
|| !bs
->backing
) {
1650 case QCOW2_CLUSTER_ZERO_PLAIN
:
1651 if (!full_discard
) {
1656 case QCOW2_CLUSTER_ZERO_ALLOC
:
1657 case QCOW2_CLUSTER_NORMAL
:
1658 case QCOW2_CLUSTER_COMPRESSED
:
1665 /* First remove L2 entries */
1666 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_slice
);
1667 if (!full_discard
&& s
->qcow_version
>= 3) {
1668 l2_slice
[l2_index
+ i
] = cpu_to_be64(QCOW_OFLAG_ZERO
);
1670 l2_slice
[l2_index
+ i
] = cpu_to_be64(0);
1673 /* Then decrease the refcount */
1674 qcow2_free_any_clusters(bs
, old_l2_entry
, 1, type
);
1677 qcow2_cache_put(s
->l2_table_cache
, (void **) &l2_slice
);
1682 int qcow2_cluster_discard(BlockDriverState
*bs
, uint64_t offset
,
1683 uint64_t bytes
, enum qcow2_discard_type type
,
1686 BDRVQcow2State
*s
= bs
->opaque
;
1687 uint64_t end_offset
= offset
+ bytes
;
1688 uint64_t nb_clusters
;
1692 /* Caller must pass aligned values, except at image end */
1693 assert(QEMU_IS_ALIGNED(offset
, s
->cluster_size
));
1694 assert(QEMU_IS_ALIGNED(end_offset
, s
->cluster_size
) ||
1695 end_offset
== bs
->total_sectors
<< BDRV_SECTOR_BITS
);
1697 nb_clusters
= size_to_clusters(s
, bytes
);
1699 s
->cache_discards
= true;
1701 /* Each L2 slice is handled by its own loop iteration */
1702 while (nb_clusters
> 0) {
1703 cleared
= discard_in_l2_slice(bs
, offset
, nb_clusters
, type
,
1710 nb_clusters
-= cleared
;
1711 offset
+= (cleared
* s
->cluster_size
);
1716 s
->cache_discards
= false;
1717 qcow2_process_discards(bs
, ret
);
1723 * This zeroes as many clusters of nb_clusters as possible at once (i.e.
1724 * all clusters in the same L2 slice) and returns the number of zeroed
1727 static int zero_in_l2_slice(BlockDriverState
*bs
, uint64_t offset
,
1728 uint64_t nb_clusters
, int flags
)
1730 BDRVQcow2State
*s
= bs
->opaque
;
1735 bool unmap
= !!(flags
& BDRV_REQ_MAY_UNMAP
);
1737 ret
= get_cluster_table(bs
, offset
, &l2_slice
, &l2_index
);
1742 /* Limit nb_clusters to one L2 slice */
1743 nb_clusters
= MIN(nb_clusters
, s
->l2_slice_size
- l2_index
);
1744 assert(nb_clusters
<= INT_MAX
);
1746 for (i
= 0; i
< nb_clusters
; i
++) {
1747 uint64_t old_offset
;
1748 QCow2ClusterType cluster_type
;
1750 old_offset
= be64_to_cpu(l2_slice
[l2_index
+ i
]);
1753 * Minimize L2 changes if the cluster already reads back as
1754 * zeroes with correct allocation.
1756 cluster_type
= qcow2_get_cluster_type(bs
, old_offset
);
1757 if (cluster_type
== QCOW2_CLUSTER_ZERO_PLAIN
||
1758 (cluster_type
== QCOW2_CLUSTER_ZERO_ALLOC
&& !unmap
)) {
1762 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_slice
);
1763 if (cluster_type
== QCOW2_CLUSTER_COMPRESSED
|| unmap
) {
1764 l2_slice
[l2_index
+ i
] = cpu_to_be64(QCOW_OFLAG_ZERO
);
1765 qcow2_free_any_clusters(bs
, old_offset
, 1, QCOW2_DISCARD_REQUEST
);
1767 l2_slice
[l2_index
+ i
] |= cpu_to_be64(QCOW_OFLAG_ZERO
);
1771 qcow2_cache_put(s
->l2_table_cache
, (void **) &l2_slice
);
1776 int qcow2_cluster_zeroize(BlockDriverState
*bs
, uint64_t offset
,
1777 uint64_t bytes
, int flags
)
1779 BDRVQcow2State
*s
= bs
->opaque
;
1780 uint64_t end_offset
= offset
+ bytes
;
1781 uint64_t nb_clusters
;
1785 /* If we have to stay in sync with an external data file, zero out
1786 * s->data_file first. */
1787 if (data_file_is_raw(bs
)) {
1788 assert(has_data_file(bs
));
1789 ret
= bdrv_co_pwrite_zeroes(s
->data_file
, offset
, bytes
, flags
);
1795 /* Caller must pass aligned values, except at image end */
1796 assert(QEMU_IS_ALIGNED(offset
, s
->cluster_size
));
1797 assert(QEMU_IS_ALIGNED(end_offset
, s
->cluster_size
) ||
1798 end_offset
== bs
->total_sectors
<< BDRV_SECTOR_BITS
);
1800 /* The zero flag is only supported by version 3 and newer */
1801 if (s
->qcow_version
< 3) {
1805 /* Each L2 slice is handled by its own loop iteration */
1806 nb_clusters
= size_to_clusters(s
, bytes
);
1808 s
->cache_discards
= true;
1810 while (nb_clusters
> 0) {
1811 cleared
= zero_in_l2_slice(bs
, offset
, nb_clusters
, flags
);
1817 nb_clusters
-= cleared
;
1818 offset
+= (cleared
* s
->cluster_size
);
1823 s
->cache_discards
= false;
1824 qcow2_process_discards(bs
, ret
);
1830 * Expands all zero clusters in a specific L1 table (or deallocates them, for
1831 * non-backed non-pre-allocated zero clusters).
1833 * l1_entries and *visited_l1_entries are used to keep track of progress for
1834 * status_cb(). l1_entries contains the total number of L1 entries and
1835 * *visited_l1_entries counts all visited L1 entries.
1837 static int expand_zero_clusters_in_l1(BlockDriverState
*bs
, uint64_t *l1_table
,
1838 int l1_size
, int64_t *visited_l1_entries
,
1840 BlockDriverAmendStatusCB
*status_cb
,
1843 BDRVQcow2State
*s
= bs
->opaque
;
1844 bool is_active_l1
= (l1_table
== s
->l1_table
);
1845 uint64_t *l2_slice
= NULL
;
1846 unsigned slice
, slice_size2
, n_slices
;
1850 slice_size2
= s
->l2_slice_size
* sizeof(uint64_t);
1851 n_slices
= s
->cluster_size
/ slice_size2
;
1853 if (!is_active_l1
) {
1854 /* inactive L2 tables require a buffer to be stored in when loading
1856 l2_slice
= qemu_try_blockalign(bs
->file
->bs
, slice_size2
);
1857 if (l2_slice
== NULL
) {
1862 for (i
= 0; i
< l1_size
; i
++) {
1863 uint64_t l2_offset
= l1_table
[i
] & L1E_OFFSET_MASK
;
1864 uint64_t l2_refcount
;
1868 (*visited_l1_entries
)++;
1870 status_cb(bs
, *visited_l1_entries
, l1_entries
, cb_opaque
);
1875 if (offset_into_cluster(s
, l2_offset
)) {
1876 qcow2_signal_corruption(bs
, true, -1, -1, "L2 table offset %#"
1877 PRIx64
" unaligned (L1 index: %#x)",
1883 ret
= qcow2_get_refcount(bs
, l2_offset
>> s
->cluster_bits
,
1889 for (slice
= 0; slice
< n_slices
; slice
++) {
1890 uint64_t slice_offset
= l2_offset
+ slice
* slice_size2
;
1891 bool l2_dirty
= false;
1893 /* get active L2 tables from cache */
1894 ret
= qcow2_cache_get(bs
, s
->l2_table_cache
, slice_offset
,
1895 (void **)&l2_slice
);
1897 /* load inactive L2 tables from disk */
1898 ret
= bdrv_pread(bs
->file
, slice_offset
, l2_slice
, slice_size2
);
1904 for (j
= 0; j
< s
->l2_slice_size
; j
++) {
1905 uint64_t l2_entry
= be64_to_cpu(l2_slice
[j
]);
1906 int64_t offset
= l2_entry
& L2E_OFFSET_MASK
;
1907 QCow2ClusterType cluster_type
=
1908 qcow2_get_cluster_type(bs
, l2_entry
);
1910 if (cluster_type
!= QCOW2_CLUSTER_ZERO_PLAIN
&&
1911 cluster_type
!= QCOW2_CLUSTER_ZERO_ALLOC
) {
1915 if (cluster_type
== QCOW2_CLUSTER_ZERO_PLAIN
) {
1917 /* not backed; therefore we can simply deallocate the
1924 offset
= qcow2_alloc_clusters(bs
, s
->cluster_size
);
1930 /* The offset must fit in the offset field */
1931 assert((offset
& L2E_OFFSET_MASK
) == offset
);
1933 if (l2_refcount
> 1) {
1934 /* For shared L2 tables, set the refcount accordingly
1935 * (it is already 1 and needs to be l2_refcount) */
1936 ret
= qcow2_update_cluster_refcount(
1937 bs
, offset
>> s
->cluster_bits
,
1938 refcount_diff(1, l2_refcount
), false,
1939 QCOW2_DISCARD_OTHER
);
1941 qcow2_free_clusters(bs
, offset
, s
->cluster_size
,
1942 QCOW2_DISCARD_OTHER
);
1948 if (offset_into_cluster(s
, offset
)) {
1949 int l2_index
= slice
* s
->l2_slice_size
+ j
;
1950 qcow2_signal_corruption(
1952 "Cluster allocation offset "
1953 "%#" PRIx64
" unaligned (L2 offset: %#"
1954 PRIx64
", L2 index: %#x)", offset
,
1955 l2_offset
, l2_index
);
1956 if (cluster_type
== QCOW2_CLUSTER_ZERO_PLAIN
) {
1957 qcow2_free_clusters(bs
, offset
, s
->cluster_size
,
1958 QCOW2_DISCARD_ALWAYS
);
1964 ret
= qcow2_pre_write_overlap_check(bs
, 0, offset
,
1965 s
->cluster_size
, true);
1967 if (cluster_type
== QCOW2_CLUSTER_ZERO_PLAIN
) {
1968 qcow2_free_clusters(bs
, offset
, s
->cluster_size
,
1969 QCOW2_DISCARD_ALWAYS
);
1974 ret
= bdrv_pwrite_zeroes(s
->data_file
, offset
,
1975 s
->cluster_size
, 0);
1977 if (cluster_type
== QCOW2_CLUSTER_ZERO_PLAIN
) {
1978 qcow2_free_clusters(bs
, offset
, s
->cluster_size
,
1979 QCOW2_DISCARD_ALWAYS
);
1984 if (l2_refcount
== 1) {
1985 l2_slice
[j
] = cpu_to_be64(offset
| QCOW_OFLAG_COPIED
);
1987 l2_slice
[j
] = cpu_to_be64(offset
);
1994 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_slice
);
1995 qcow2_cache_depends_on_flush(s
->l2_table_cache
);
1997 qcow2_cache_put(s
->l2_table_cache
, (void **) &l2_slice
);
2000 ret
= qcow2_pre_write_overlap_check(
2001 bs
, QCOW2_OL_INACTIVE_L2
| QCOW2_OL_ACTIVE_L2
,
2002 slice_offset
, slice_size2
, false);
2007 ret
= bdrv_pwrite(bs
->file
, slice_offset
,
2008 l2_slice
, slice_size2
);
2016 (*visited_l1_entries
)++;
2018 status_cb(bs
, *visited_l1_entries
, l1_entries
, cb_opaque
);
2026 if (!is_active_l1
) {
2027 qemu_vfree(l2_slice
);
2029 qcow2_cache_put(s
->l2_table_cache
, (void **) &l2_slice
);
2036 * For backed images, expands all zero clusters on the image. For non-backed
2037 * images, deallocates all non-pre-allocated zero clusters (and claims the
2038 * allocation for pre-allocated ones). This is important for downgrading to a
2039 * qcow2 version which doesn't yet support metadata zero clusters.
2041 int qcow2_expand_zero_clusters(BlockDriverState
*bs
,
2042 BlockDriverAmendStatusCB
*status_cb
,
2045 BDRVQcow2State
*s
= bs
->opaque
;
2046 uint64_t *l1_table
= NULL
;
2047 int64_t l1_entries
= 0, visited_l1_entries
= 0;
2052 l1_entries
= s
->l1_size
;
2053 for (i
= 0; i
< s
->nb_snapshots
; i
++) {
2054 l1_entries
+= s
->snapshots
[i
].l1_size
;
2058 ret
= expand_zero_clusters_in_l1(bs
, s
->l1_table
, s
->l1_size
,
2059 &visited_l1_entries
, l1_entries
,
2060 status_cb
, cb_opaque
);
2065 /* Inactive L1 tables may point to active L2 tables - therefore it is
2066 * necessary to flush the L2 table cache before trying to access the L2
2067 * tables pointed to by inactive L1 entries (else we might try to expand
2068 * zero clusters that have already been expanded); furthermore, it is also
2069 * necessary to empty the L2 table cache, since it may contain tables which
2070 * are now going to be modified directly on disk, bypassing the cache.
2071 * qcow2_cache_empty() does both for us. */
2072 ret
= qcow2_cache_empty(bs
, s
->l2_table_cache
);
2077 for (i
= 0; i
< s
->nb_snapshots
; i
++) {
2079 uint64_t *new_l1_table
;
2080 Error
*local_err
= NULL
;
2082 ret
= qcow2_validate_table(bs
, s
->snapshots
[i
].l1_table_offset
,
2083 s
->snapshots
[i
].l1_size
, sizeof(uint64_t),
2084 QCOW_MAX_L1_SIZE
, "Snapshot L1 table",
2087 error_report_err(local_err
);
2091 l1_size2
= s
->snapshots
[i
].l1_size
* sizeof(uint64_t);
2092 new_l1_table
= g_try_realloc(l1_table
, l1_size2
);
2094 if (!new_l1_table
) {
2099 l1_table
= new_l1_table
;
2101 ret
= bdrv_pread(bs
->file
, s
->snapshots
[i
].l1_table_offset
,
2102 l1_table
, l1_size2
);
2107 for (j
= 0; j
< s
->snapshots
[i
].l1_size
; j
++) {
2108 be64_to_cpus(&l1_table
[j
]);
2111 ret
= expand_zero_clusters_in_l1(bs
, l1_table
, s
->snapshots
[i
].l1_size
,
2112 &visited_l1_entries
, l1_entries
,
2113 status_cb
, cb_opaque
);