2 * Block driver for the QCOW version 2 format
4 * Copyright (c) 2004-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 #include "qemu-common.h"
28 #include "block/block_int.h"
29 #include "block/qcow2.h"
32 int qcow2_grow_l1_table(BlockDriverState
*bs
, uint64_t min_size
,
35 BDRVQcowState
*s
= bs
->opaque
;
36 int new_l1_size2
, ret
, i
;
37 uint64_t *new_l1_table
;
38 int64_t old_l1_table_offset
, old_l1_size
;
39 int64_t new_l1_table_offset
, new_l1_size
;
42 if (min_size
<= s
->l1_size
)
45 /* Do a sanity check on min_size before trying to calculate new_l1_size
46 * (this prevents overflows during the while loop for the calculation of
48 if (min_size
> INT_MAX
/ sizeof(uint64_t)) {
53 new_l1_size
= min_size
;
55 /* Bump size up to reduce the number of times we have to grow */
56 new_l1_size
= s
->l1_size
;
57 if (new_l1_size
== 0) {
60 while (min_size
> new_l1_size
) {
61 new_l1_size
= (new_l1_size
* 3 + 1) / 2;
65 if (new_l1_size
> INT_MAX
/ sizeof(uint64_t)) {
70 fprintf(stderr
, "grow l1_table from %d to %" PRId64
"\n",
71 s
->l1_size
, new_l1_size
);
74 new_l1_size2
= sizeof(uint64_t) * new_l1_size
;
75 new_l1_table
= qemu_try_blockalign(bs
->file
,
76 align_offset(new_l1_size2
, 512));
77 if (new_l1_table
== NULL
) {
80 memset(new_l1_table
, 0, align_offset(new_l1_size2
, 512));
82 memcpy(new_l1_table
, s
->l1_table
, s
->l1_size
* sizeof(uint64_t));
84 /* write new table (align to cluster) */
85 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_GROW_ALLOC_TABLE
);
86 new_l1_table_offset
= qcow2_alloc_clusters(bs
, new_l1_size2
);
87 if (new_l1_table_offset
< 0) {
88 qemu_vfree(new_l1_table
);
89 return new_l1_table_offset
;
92 ret
= qcow2_cache_flush(bs
, s
->refcount_block_cache
);
97 /* the L1 position has not yet been updated, so these clusters must
98 * indeed be completely free */
99 ret
= qcow2_pre_write_overlap_check(bs
, 0, new_l1_table_offset
,
105 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_GROW_WRITE_TABLE
);
106 for(i
= 0; i
< s
->l1_size
; i
++)
107 new_l1_table
[i
] = cpu_to_be64(new_l1_table
[i
]);
108 ret
= bdrv_pwrite_sync(bs
->file
, new_l1_table_offset
, new_l1_table
, new_l1_size2
);
111 for(i
= 0; i
< s
->l1_size
; i
++)
112 new_l1_table
[i
] = be64_to_cpu(new_l1_table
[i
]);
115 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_GROW_ACTIVATE_TABLE
);
116 cpu_to_be32w((uint32_t*)data
, new_l1_size
);
117 stq_be_p(data
+ 4, new_l1_table_offset
);
118 ret
= bdrv_pwrite_sync(bs
->file
, offsetof(QCowHeader
, l1_size
), data
,sizeof(data
));
122 qemu_vfree(s
->l1_table
);
123 old_l1_table_offset
= s
->l1_table_offset
;
124 s
->l1_table_offset
= new_l1_table_offset
;
125 s
->l1_table
= new_l1_table
;
126 old_l1_size
= s
->l1_size
;
127 s
->l1_size
= new_l1_size
;
128 qcow2_free_clusters(bs
, old_l1_table_offset
, old_l1_size
* sizeof(uint64_t),
129 QCOW2_DISCARD_OTHER
);
132 qemu_vfree(new_l1_table
);
133 qcow2_free_clusters(bs
, new_l1_table_offset
, new_l1_size2
,
134 QCOW2_DISCARD_OTHER
);
141 * Loads a L2 table into memory. If the table is in the cache, the cache
142 * is used; otherwise the L2 table is loaded from the image file.
144 * Returns a pointer to the L2 table on success, or NULL if the read from
145 * the image file failed.
148 static int l2_load(BlockDriverState
*bs
, uint64_t l2_offset
,
151 BDRVQcowState
*s
= bs
->opaque
;
154 ret
= qcow2_cache_get(bs
, s
->l2_table_cache
, l2_offset
, (void**) l2_table
);
160 * Writes one sector of the L1 table to the disk (can't update single entries
161 * and we really don't want bdrv_pread to perform a read-modify-write)
163 #define L1_ENTRIES_PER_SECTOR (512 / 8)
164 int qcow2_write_l1_entry(BlockDriverState
*bs
, int l1_index
)
166 BDRVQcowState
*s
= bs
->opaque
;
167 uint64_t buf
[L1_ENTRIES_PER_SECTOR
];
171 l1_start_index
= l1_index
& ~(L1_ENTRIES_PER_SECTOR
- 1);
172 for (i
= 0; i
< L1_ENTRIES_PER_SECTOR
; i
++) {
173 buf
[i
] = cpu_to_be64(s
->l1_table
[l1_start_index
+ i
]);
176 ret
= qcow2_pre_write_overlap_check(bs
, QCOW2_OL_ACTIVE_L1
,
177 s
->l1_table_offset
+ 8 * l1_start_index
, sizeof(buf
));
182 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_UPDATE
);
183 ret
= bdrv_pwrite_sync(bs
->file
, s
->l1_table_offset
+ 8 * l1_start_index
,
195 * Allocate a new l2 entry in the file. If l1_index points to an already
196 * used entry in the L2 table (i.e. we are doing a copy on write for the L2
197 * table) copy the contents of the old L2 table into the newly allocated one.
198 * Otherwise the new table is initialized with zeros.
202 static int l2_allocate(BlockDriverState
*bs
, int l1_index
, uint64_t **table
)
204 BDRVQcowState
*s
= bs
->opaque
;
205 uint64_t old_l2_offset
;
206 uint64_t *l2_table
= NULL
;
210 old_l2_offset
= s
->l1_table
[l1_index
];
212 trace_qcow2_l2_allocate(bs
, l1_index
);
214 /* allocate a new l2 entry */
216 l2_offset
= qcow2_alloc_clusters(bs
, s
->l2_size
* sizeof(uint64_t));
222 ret
= qcow2_cache_flush(bs
, s
->refcount_block_cache
);
227 /* allocate a new entry in the l2 cache */
229 trace_qcow2_l2_allocate_get_empty(bs
, l1_index
);
230 ret
= qcow2_cache_get_empty(bs
, s
->l2_table_cache
, l2_offset
, (void**) table
);
237 if ((old_l2_offset
& L1E_OFFSET_MASK
) == 0) {
238 /* if there was no old l2 table, clear the new table */
239 memset(l2_table
, 0, s
->l2_size
* sizeof(uint64_t));
243 /* if there was an old l2 table, read it from the disk */
244 BLKDBG_EVENT(bs
->file
, BLKDBG_L2_ALLOC_COW_READ
);
245 ret
= qcow2_cache_get(bs
, s
->l2_table_cache
,
246 old_l2_offset
& L1E_OFFSET_MASK
,
247 (void**) &old_table
);
252 memcpy(l2_table
, old_table
, s
->cluster_size
);
254 ret
= qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &old_table
);
260 /* write the l2 table to the file */
261 BLKDBG_EVENT(bs
->file
, BLKDBG_L2_ALLOC_WRITE
);
263 trace_qcow2_l2_allocate_write_l2(bs
, l1_index
);
264 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_table
);
265 ret
= qcow2_cache_flush(bs
, s
->l2_table_cache
);
270 /* update the L1 entry */
271 trace_qcow2_l2_allocate_write_l1(bs
, l1_index
);
272 s
->l1_table
[l1_index
] = l2_offset
| QCOW_OFLAG_COPIED
;
273 ret
= qcow2_write_l1_entry(bs
, l1_index
);
279 trace_qcow2_l2_allocate_done(bs
, l1_index
, 0);
283 trace_qcow2_l2_allocate_done(bs
, l1_index
, ret
);
284 if (l2_table
!= NULL
) {
285 qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) table
);
287 s
->l1_table
[l1_index
] = old_l2_offset
;
289 qcow2_free_clusters(bs
, l2_offset
, s
->l2_size
* sizeof(uint64_t),
290 QCOW2_DISCARD_ALWAYS
);
296 * Checks how many clusters in a given L2 table are contiguous in the image
297 * file. As soon as one of the flags in the bitmask stop_flags changes compared
298 * to the first cluster, the search is stopped and the cluster is not counted
299 * as contiguous. (This allows it, for example, to stop at the first compressed
300 * cluster which may require a different handling)
302 static int count_contiguous_clusters(uint64_t nb_clusters
, int cluster_size
,
303 uint64_t *l2_table
, uint64_t stop_flags
)
306 uint64_t mask
= stop_flags
| L2E_OFFSET_MASK
| QCOW_OFLAG_COMPRESSED
;
307 uint64_t first_entry
= be64_to_cpu(l2_table
[0]);
308 uint64_t offset
= first_entry
& mask
;
313 assert(qcow2_get_cluster_type(first_entry
) != QCOW2_CLUSTER_COMPRESSED
);
315 for (i
= 0; i
< nb_clusters
; i
++) {
316 uint64_t l2_entry
= be64_to_cpu(l2_table
[i
]) & mask
;
317 if (offset
+ (uint64_t) i
* cluster_size
!= l2_entry
) {
325 static int count_contiguous_free_clusters(uint64_t nb_clusters
, uint64_t *l2_table
)
329 for (i
= 0; i
< nb_clusters
; i
++) {
330 int type
= qcow2_get_cluster_type(be64_to_cpu(l2_table
[i
]));
332 if (type
!= QCOW2_CLUSTER_UNALLOCATED
) {
340 /* The crypt function is compatible with the linux cryptoloop
341 algorithm for < 4 GB images. NOTE: out_buf == in_buf is
343 void qcow2_encrypt_sectors(BDRVQcowState
*s
, int64_t sector_num
,
344 uint8_t *out_buf
, const uint8_t *in_buf
,
345 int nb_sectors
, int enc
,
354 for(i
= 0; i
< nb_sectors
; i
++) {
355 ivec
.ll
[0] = cpu_to_le64(sector_num
);
357 AES_cbc_encrypt(in_buf
, out_buf
, 512, key
,
365 static int coroutine_fn
copy_sectors(BlockDriverState
*bs
,
367 uint64_t cluster_offset
,
368 int n_start
, int n_end
)
370 BDRVQcowState
*s
= bs
->opaque
;
380 iov
.iov_len
= n
* BDRV_SECTOR_SIZE
;
381 iov
.iov_base
= qemu_try_blockalign(bs
, iov
.iov_len
);
382 if (iov
.iov_base
== NULL
) {
386 qemu_iovec_init_external(&qiov
, &iov
, 1);
388 BLKDBG_EVENT(bs
->file
, BLKDBG_COW_READ
);
395 /* Call .bdrv_co_readv() directly instead of using the public block-layer
396 * interface. This avoids double I/O throttling and request tracking,
397 * which can lead to deadlock when block layer copy-on-read is enabled.
399 ret
= bs
->drv
->bdrv_co_readv(bs
, start_sect
+ n_start
, n
, &qiov
);
404 if (s
->crypt_method
) {
405 qcow2_encrypt_sectors(s
, start_sect
+ n_start
,
406 iov
.iov_base
, iov
.iov_base
, n
, 1,
407 &s
->aes_encrypt_key
);
410 ret
= qcow2_pre_write_overlap_check(bs
, 0,
411 cluster_offset
+ n_start
* BDRV_SECTOR_SIZE
, n
* BDRV_SECTOR_SIZE
);
416 BLKDBG_EVENT(bs
->file
, BLKDBG_COW_WRITE
);
417 ret
= bdrv_co_writev(bs
->file
, (cluster_offset
>> 9) + n_start
, n
, &qiov
);
424 qemu_vfree(iov
.iov_base
);
432 * For a given offset of the disk image, find the cluster offset in
433 * qcow2 file. The offset is stored in *cluster_offset.
435 * on entry, *num is the number of contiguous sectors we'd like to
436 * access following offset.
438 * on exit, *num is the number of contiguous sectors we can read.
440 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
443 int qcow2_get_cluster_offset(BlockDriverState
*bs
, uint64_t offset
,
444 int *num
, uint64_t *cluster_offset
)
446 BDRVQcowState
*s
= bs
->opaque
;
447 unsigned int l2_index
;
448 uint64_t l1_index
, l2_offset
, *l2_table
;
450 unsigned int index_in_cluster
, nb_clusters
;
451 uint64_t nb_available
, nb_needed
;
454 index_in_cluster
= (offset
>> 9) & (s
->cluster_sectors
- 1);
455 nb_needed
= *num
+ index_in_cluster
;
457 l1_bits
= s
->l2_bits
+ s
->cluster_bits
;
459 /* compute how many bytes there are between the offset and
460 * the end of the l1 entry
463 nb_available
= (1ULL << l1_bits
) - (offset
& ((1ULL << l1_bits
) - 1));
465 /* compute the number of available sectors */
467 nb_available
= (nb_available
>> 9) + index_in_cluster
;
469 if (nb_needed
> nb_available
) {
470 nb_needed
= nb_available
;
475 /* seek the the l2 offset in the l1 table */
477 l1_index
= offset
>> l1_bits
;
478 if (l1_index
>= s
->l1_size
) {
479 ret
= QCOW2_CLUSTER_UNALLOCATED
;
483 l2_offset
= s
->l1_table
[l1_index
] & L1E_OFFSET_MASK
;
485 ret
= QCOW2_CLUSTER_UNALLOCATED
;
489 if (offset_into_cluster(s
, l2_offset
)) {
490 qcow2_signal_corruption(bs
, true, -1, -1, "L2 table offset %#" PRIx64
491 " unaligned (L1 index: %#" PRIx64
")",
492 l2_offset
, l1_index
);
496 /* load the l2 table in memory */
498 ret
= l2_load(bs
, l2_offset
, &l2_table
);
503 /* find the cluster offset for the given disk offset */
505 l2_index
= (offset
>> s
->cluster_bits
) & (s
->l2_size
- 1);
506 *cluster_offset
= be64_to_cpu(l2_table
[l2_index
]);
507 nb_clusters
= size_to_clusters(s
, nb_needed
<< 9);
509 ret
= qcow2_get_cluster_type(*cluster_offset
);
511 case QCOW2_CLUSTER_COMPRESSED
:
512 /* Compressed clusters can only be processed one by one */
514 *cluster_offset
&= L2E_COMPRESSED_OFFSET_SIZE_MASK
;
516 case QCOW2_CLUSTER_ZERO
:
517 if (s
->qcow_version
< 3) {
518 qcow2_signal_corruption(bs
, true, -1, -1, "Zero cluster entry found"
519 " in pre-v3 image (L2 offset: %#" PRIx64
520 ", L2 index: %#x)", l2_offset
, l2_index
);
524 c
= count_contiguous_clusters(nb_clusters
, s
->cluster_size
,
525 &l2_table
[l2_index
], QCOW_OFLAG_ZERO
);
528 case QCOW2_CLUSTER_UNALLOCATED
:
529 /* how many empty clusters ? */
530 c
= count_contiguous_free_clusters(nb_clusters
, &l2_table
[l2_index
]);
533 case QCOW2_CLUSTER_NORMAL
:
534 /* how many allocated clusters ? */
535 c
= count_contiguous_clusters(nb_clusters
, s
->cluster_size
,
536 &l2_table
[l2_index
], QCOW_OFLAG_ZERO
);
537 *cluster_offset
&= L2E_OFFSET_MASK
;
538 if (offset_into_cluster(s
, *cluster_offset
)) {
539 qcow2_signal_corruption(bs
, true, -1, -1, "Data cluster offset %#"
540 PRIx64
" unaligned (L2 offset: %#" PRIx64
541 ", L2 index: %#x)", *cluster_offset
,
542 l2_offset
, l2_index
);
551 qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
553 nb_available
= (c
* s
->cluster_sectors
);
556 if (nb_available
> nb_needed
)
557 nb_available
= nb_needed
;
559 *num
= nb_available
- index_in_cluster
;
564 qcow2_cache_put(bs
, s
->l2_table_cache
, (void **)&l2_table
);
571 * for a given disk offset, load (and allocate if needed)
574 * the l2 table offset in the qcow2 file and the cluster index
575 * in the l2 table are given to the caller.
577 * Returns 0 on success, -errno in failure case
579 static int get_cluster_table(BlockDriverState
*bs
, uint64_t offset
,
580 uint64_t **new_l2_table
,
583 BDRVQcowState
*s
= bs
->opaque
;
584 unsigned int l2_index
;
585 uint64_t l1_index
, l2_offset
;
586 uint64_t *l2_table
= NULL
;
589 /* seek the the l2 offset in the l1 table */
591 l1_index
= offset
>> (s
->l2_bits
+ s
->cluster_bits
);
592 if (l1_index
>= s
->l1_size
) {
593 ret
= qcow2_grow_l1_table(bs
, l1_index
+ 1, false);
599 assert(l1_index
< s
->l1_size
);
600 l2_offset
= s
->l1_table
[l1_index
] & L1E_OFFSET_MASK
;
601 if (offset_into_cluster(s
, l2_offset
)) {
602 qcow2_signal_corruption(bs
, true, -1, -1, "L2 table offset %#" PRIx64
603 " unaligned (L1 index: %#" PRIx64
")",
604 l2_offset
, l1_index
);
608 /* seek the l2 table of the given l2 offset */
610 if (s
->l1_table
[l1_index
] & QCOW_OFLAG_COPIED
) {
611 /* load the l2 table in memory */
612 ret
= l2_load(bs
, l2_offset
, &l2_table
);
617 /* First allocate a new L2 table (and do COW if needed) */
618 ret
= l2_allocate(bs
, l1_index
, &l2_table
);
623 /* Then decrease the refcount of the old table */
625 qcow2_free_clusters(bs
, l2_offset
, s
->l2_size
* sizeof(uint64_t),
626 QCOW2_DISCARD_OTHER
);
630 /* find the cluster offset for the given disk offset */
632 l2_index
= (offset
>> s
->cluster_bits
) & (s
->l2_size
- 1);
634 *new_l2_table
= l2_table
;
635 *new_l2_index
= l2_index
;
641 * alloc_compressed_cluster_offset
643 * For a given offset of the disk image, return cluster offset in
646 * If the offset is not found, allocate a new compressed cluster.
648 * Return the cluster offset if successful,
649 * Return 0, otherwise.
653 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState
*bs
,
657 BDRVQcowState
*s
= bs
->opaque
;
660 int64_t cluster_offset
;
663 ret
= get_cluster_table(bs
, offset
, &l2_table
, &l2_index
);
668 /* Compression can't overwrite anything. Fail if the cluster was already
670 cluster_offset
= be64_to_cpu(l2_table
[l2_index
]);
671 if (cluster_offset
& L2E_OFFSET_MASK
) {
672 qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
676 cluster_offset
= qcow2_alloc_bytes(bs
, compressed_size
);
677 if (cluster_offset
< 0) {
678 qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
682 nb_csectors
= ((cluster_offset
+ compressed_size
- 1) >> 9) -
683 (cluster_offset
>> 9);
685 cluster_offset
|= QCOW_OFLAG_COMPRESSED
|
686 ((uint64_t)nb_csectors
<< s
->csize_shift
);
688 /* update L2 table */
690 /* compressed clusters never have the copied flag */
692 BLKDBG_EVENT(bs
->file
, BLKDBG_L2_UPDATE_COMPRESSED
);
693 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_table
);
694 l2_table
[l2_index
] = cpu_to_be64(cluster_offset
);
695 ret
= qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
700 return cluster_offset
;
703 static int perform_cow(BlockDriverState
*bs
, QCowL2Meta
*m
, Qcow2COWRegion
*r
)
705 BDRVQcowState
*s
= bs
->opaque
;
708 if (r
->nb_sectors
== 0) {
712 qemu_co_mutex_unlock(&s
->lock
);
713 ret
= copy_sectors(bs
, m
->offset
/ BDRV_SECTOR_SIZE
, m
->alloc_offset
,
714 r
->offset
/ BDRV_SECTOR_SIZE
,
715 r
->offset
/ BDRV_SECTOR_SIZE
+ r
->nb_sectors
);
716 qemu_co_mutex_lock(&s
->lock
);
723 * Before we update the L2 table to actually point to the new cluster, we
724 * need to be sure that the refcounts have been increased and COW was
727 qcow2_cache_depends_on_flush(s
->l2_table_cache
);
732 int qcow2_alloc_cluster_link_l2(BlockDriverState
*bs
, QCowL2Meta
*m
)
734 BDRVQcowState
*s
= bs
->opaque
;
735 int i
, j
= 0, l2_index
, ret
;
736 uint64_t *old_cluster
, *l2_table
;
737 uint64_t cluster_offset
= m
->alloc_offset
;
739 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m
->nb_clusters
);
740 assert(m
->nb_clusters
> 0);
742 old_cluster
= g_try_new(uint64_t, m
->nb_clusters
);
743 if (old_cluster
== NULL
) {
748 /* copy content of unmodified sectors */
749 ret
= perform_cow(bs
, m
, &m
->cow_start
);
754 ret
= perform_cow(bs
, m
, &m
->cow_end
);
759 /* Update L2 table. */
760 if (s
->use_lazy_refcounts
) {
761 qcow2_mark_dirty(bs
);
763 if (qcow2_need_accurate_refcounts(s
)) {
764 qcow2_cache_set_dependency(bs
, s
->l2_table_cache
,
765 s
->refcount_block_cache
);
768 ret
= get_cluster_table(bs
, m
->offset
, &l2_table
, &l2_index
);
772 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_table
);
774 assert(l2_index
+ m
->nb_clusters
<= s
->l2_size
);
775 for (i
= 0; i
< m
->nb_clusters
; i
++) {
776 /* if two concurrent writes happen to the same unallocated cluster
777 * each write allocates separate cluster and writes data concurrently.
778 * The first one to complete updates l2 table with pointer to its
779 * cluster the second one has to do RMW (which is done above by
780 * copy_sectors()), update l2 table with its cluster pointer and free
781 * old cluster. This is what this loop does */
782 if(l2_table
[l2_index
+ i
] != 0)
783 old_cluster
[j
++] = l2_table
[l2_index
+ i
];
785 l2_table
[l2_index
+ i
] = cpu_to_be64((cluster_offset
+
786 (i
<< s
->cluster_bits
)) | QCOW_OFLAG_COPIED
);
790 ret
= qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
796 * If this was a COW, we need to decrease the refcount of the old cluster.
797 * Also flush bs->file to get the right order for L2 and refcount update.
799 * Don't discard clusters that reach a refcount of 0 (e.g. compressed
800 * clusters), the next write will reuse them anyway.
803 for (i
= 0; i
< j
; i
++) {
804 qcow2_free_any_clusters(bs
, be64_to_cpu(old_cluster
[i
]), 1,
805 QCOW2_DISCARD_NEVER
);
816 * Returns the number of contiguous clusters that can be used for an allocating
817 * write, but require COW to be performed (this includes yet unallocated space,
818 * which must copy from the backing file)
820 static int count_cow_clusters(BDRVQcowState
*s
, int nb_clusters
,
821 uint64_t *l2_table
, int l2_index
)
825 for (i
= 0; i
< nb_clusters
; i
++) {
826 uint64_t l2_entry
= be64_to_cpu(l2_table
[l2_index
+ i
]);
827 int cluster_type
= qcow2_get_cluster_type(l2_entry
);
829 switch(cluster_type
) {
830 case QCOW2_CLUSTER_NORMAL
:
831 if (l2_entry
& QCOW_OFLAG_COPIED
) {
835 case QCOW2_CLUSTER_UNALLOCATED
:
836 case QCOW2_CLUSTER_COMPRESSED
:
837 case QCOW2_CLUSTER_ZERO
:
845 assert(i
<= nb_clusters
);
850 * Check if there already is an AIO write request in flight which allocates
851 * the same cluster. In this case we need to wait until the previous
852 * request has completed and updated the L2 table accordingly.
855 * 0 if there was no dependency. *cur_bytes indicates the number of
856 * bytes from guest_offset that can be read before the next
857 * dependency must be processed (or the request is complete)
859 * -EAGAIN if we had to wait for another request, previously gathered
860 * information on cluster allocation may be invalid now. The caller
861 * must start over anyway, so consider *cur_bytes undefined.
863 static int handle_dependencies(BlockDriverState
*bs
, uint64_t guest_offset
,
864 uint64_t *cur_bytes
, QCowL2Meta
**m
)
866 BDRVQcowState
*s
= bs
->opaque
;
867 QCowL2Meta
*old_alloc
;
868 uint64_t bytes
= *cur_bytes
;
870 QLIST_FOREACH(old_alloc
, &s
->cluster_allocs
, next_in_flight
) {
872 uint64_t start
= guest_offset
;
873 uint64_t end
= start
+ bytes
;
874 uint64_t old_start
= l2meta_cow_start(old_alloc
);
875 uint64_t old_end
= l2meta_cow_end(old_alloc
);
877 if (end
<= old_start
|| start
>= old_end
) {
878 /* No intersection */
880 if (start
< old_start
) {
881 /* Stop at the start of a running allocation */
882 bytes
= old_start
- start
;
887 /* Stop if already an l2meta exists. After yielding, it wouldn't
888 * be valid any more, so we'd have to clean up the old L2Metas
889 * and deal with requests depending on them before starting to
890 * gather new ones. Not worth the trouble. */
891 if (bytes
== 0 && *m
) {
897 /* Wait for the dependency to complete. We need to recheck
898 * the free/allocated clusters when we continue. */
899 qemu_co_mutex_unlock(&s
->lock
);
900 qemu_co_queue_wait(&old_alloc
->dependent_requests
);
901 qemu_co_mutex_lock(&s
->lock
);
907 /* Make sure that existing clusters and new allocations are only used up to
908 * the next dependency if we shortened the request above */
915 * Checks how many already allocated clusters that don't require a copy on
916 * write there are at the given guest_offset (up to *bytes). If
917 * *host_offset is not zero, only physically contiguous clusters beginning at
918 * this host offset are counted.
920 * Note that guest_offset may not be cluster aligned. In this case, the
921 * returned *host_offset points to exact byte referenced by guest_offset and
922 * therefore isn't cluster aligned as well.
925 * 0: if no allocated clusters are available at the given offset.
926 * *bytes is normally unchanged. It is set to 0 if the cluster
927 * is allocated and doesn't need COW, but doesn't have the right
930 * 1: if allocated clusters that don't require a COW are available at
931 * the requested offset. *bytes may have decreased and describes
932 * the length of the area that can be written to.
934 * -errno: in error cases
936 static int handle_copied(BlockDriverState
*bs
, uint64_t guest_offset
,
937 uint64_t *host_offset
, uint64_t *bytes
, QCowL2Meta
**m
)
939 BDRVQcowState
*s
= bs
->opaque
;
941 uint64_t cluster_offset
;
943 unsigned int nb_clusters
;
944 unsigned int keep_clusters
;
947 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset
, *host_offset
,
950 assert(*host_offset
== 0 || offset_into_cluster(s
, guest_offset
)
951 == offset_into_cluster(s
, *host_offset
));
954 * Calculate the number of clusters to look for. We stop at L2 table
955 * boundaries to keep things simple.
958 size_to_clusters(s
, offset_into_cluster(s
, guest_offset
) + *bytes
);
960 l2_index
= offset_to_l2_index(s
, guest_offset
);
961 nb_clusters
= MIN(nb_clusters
, s
->l2_size
- l2_index
);
963 /* Find L2 entry for the first involved cluster */
964 ret
= get_cluster_table(bs
, guest_offset
, &l2_table
, &l2_index
);
969 cluster_offset
= be64_to_cpu(l2_table
[l2_index
]);
971 /* Check how many clusters are already allocated and don't need COW */
972 if (qcow2_get_cluster_type(cluster_offset
) == QCOW2_CLUSTER_NORMAL
973 && (cluster_offset
& QCOW_OFLAG_COPIED
))
975 /* If a specific host_offset is required, check it */
976 bool offset_matches
=
977 (cluster_offset
& L2E_OFFSET_MASK
) == *host_offset
;
979 if (offset_into_cluster(s
, cluster_offset
& L2E_OFFSET_MASK
)) {
980 qcow2_signal_corruption(bs
, true, -1, -1, "Data cluster offset "
981 "%#llx unaligned (guest offset: %#" PRIx64
982 ")", cluster_offset
& L2E_OFFSET_MASK
,
988 if (*host_offset
!= 0 && !offset_matches
) {
994 /* We keep all QCOW_OFLAG_COPIED clusters */
996 count_contiguous_clusters(nb_clusters
, s
->cluster_size
,
998 QCOW_OFLAG_COPIED
| QCOW_OFLAG_ZERO
);
999 assert(keep_clusters
<= nb_clusters
);
1001 *bytes
= MIN(*bytes
,
1002 keep_clusters
* s
->cluster_size
1003 - offset_into_cluster(s
, guest_offset
));
1012 pret
= qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
1017 /* Only return a host offset if we actually made progress. Otherwise we
1018 * would make requirements for handle_alloc() that it can't fulfill */
1020 *host_offset
= (cluster_offset
& L2E_OFFSET_MASK
)
1021 + offset_into_cluster(s
, guest_offset
);
1028 * Allocates new clusters for the given guest_offset.
1030 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
1031 * contain the number of clusters that have been allocated and are contiguous
1032 * in the image file.
1034 * If *host_offset is non-zero, it specifies the offset in the image file at
1035 * which the new clusters must start. *nb_clusters can be 0 on return in this
1036 * case if the cluster at host_offset is already in use. If *host_offset is
1037 * zero, the clusters can be allocated anywhere in the image file.
1039 * *host_offset is updated to contain the offset into the image file at which
1040 * the first allocated cluster starts.
1042 * Return 0 on success and -errno in error cases. -EAGAIN means that the
1043 * function has been waiting for another request and the allocation must be
1044 * restarted, but the whole request should not be failed.
1046 static int do_alloc_cluster_offset(BlockDriverState
*bs
, uint64_t guest_offset
,
1047 uint64_t *host_offset
, unsigned int *nb_clusters
)
1049 BDRVQcowState
*s
= bs
->opaque
;
1051 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset
,
1052 *host_offset
, *nb_clusters
);
1054 /* Allocate new clusters */
1055 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
1056 if (*host_offset
== 0) {
1057 int64_t cluster_offset
=
1058 qcow2_alloc_clusters(bs
, *nb_clusters
* s
->cluster_size
);
1059 if (cluster_offset
< 0) {
1060 return cluster_offset
;
1062 *host_offset
= cluster_offset
;
1065 int ret
= qcow2_alloc_clusters_at(bs
, *host_offset
, *nb_clusters
);
1075 * Allocates new clusters for an area that either is yet unallocated or needs a
1076 * copy on write. If *host_offset is non-zero, clusters are only allocated if
1077 * the new allocation can match the specified host offset.
1079 * Note that guest_offset may not be cluster aligned. In this case, the
1080 * returned *host_offset points to exact byte referenced by guest_offset and
1081 * therefore isn't cluster aligned as well.
1084 * 0: if no clusters could be allocated. *bytes is set to 0,
1085 * *host_offset is left unchanged.
1087 * 1: if new clusters were allocated. *bytes may be decreased if the
1088 * new allocation doesn't cover all of the requested area.
1089 * *host_offset is updated to contain the host offset of the first
1090 * newly allocated cluster.
1092 * -errno: in error cases
1094 static int handle_alloc(BlockDriverState
*bs
, uint64_t guest_offset
,
1095 uint64_t *host_offset
, uint64_t *bytes
, QCowL2Meta
**m
)
1097 BDRVQcowState
*s
= bs
->opaque
;
1101 unsigned int nb_clusters
;
1104 uint64_t alloc_cluster_offset
;
1106 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset
, *host_offset
,
1111 * Calculate the number of clusters to look for. We stop at L2 table
1112 * boundaries to keep things simple.
1115 size_to_clusters(s
, offset_into_cluster(s
, guest_offset
) + *bytes
);
1117 l2_index
= offset_to_l2_index(s
, guest_offset
);
1118 nb_clusters
= MIN(nb_clusters
, s
->l2_size
- l2_index
);
1120 /* Find L2 entry for the first involved cluster */
1121 ret
= get_cluster_table(bs
, guest_offset
, &l2_table
, &l2_index
);
1126 entry
= be64_to_cpu(l2_table
[l2_index
]);
1128 /* For the moment, overwrite compressed clusters one by one */
1129 if (entry
& QCOW_OFLAG_COMPRESSED
) {
1132 nb_clusters
= count_cow_clusters(s
, nb_clusters
, l2_table
, l2_index
);
1135 /* This function is only called when there were no non-COW clusters, so if
1136 * we can't find any unallocated or COW clusters either, something is
1137 * wrong with our code. */
1138 assert(nb_clusters
> 0);
1140 ret
= qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
1145 /* Allocate, if necessary at a given offset in the image file */
1146 alloc_cluster_offset
= start_of_cluster(s
, *host_offset
);
1147 ret
= do_alloc_cluster_offset(bs
, guest_offset
, &alloc_cluster_offset
,
1153 /* Can't extend contiguous allocation */
1154 if (nb_clusters
== 0) {
1159 /* !*host_offset would overwrite the image header and is reserved for "no
1160 * host offset preferred". If 0 was a valid host offset, it'd trigger the
1161 * following overlap check; do that now to avoid having an invalid value in
1163 if (!alloc_cluster_offset
) {
1164 ret
= qcow2_pre_write_overlap_check(bs
, 0, alloc_cluster_offset
,
1165 nb_clusters
* s
->cluster_size
);
1171 * Save info needed for meta data update.
1173 * requested_sectors: Number of sectors from the start of the first
1174 * newly allocated cluster to the end of the (possibly shortened
1175 * before) write request.
1177 * avail_sectors: Number of sectors from the start of the first
1178 * newly allocated to the end of the last newly allocated cluster.
1180 * nb_sectors: The number of sectors from the start of the first
1181 * newly allocated cluster to the end of the area that the write
1182 * request actually writes to (excluding COW at the end)
1184 int requested_sectors
=
1185 (*bytes
+ offset_into_cluster(s
, guest_offset
))
1186 >> BDRV_SECTOR_BITS
;
1187 int avail_sectors
= nb_clusters
1188 << (s
->cluster_bits
- BDRV_SECTOR_BITS
);
1189 int alloc_n_start
= offset_into_cluster(s
, guest_offset
)
1190 >> BDRV_SECTOR_BITS
;
1191 int nb_sectors
= MIN(requested_sectors
, avail_sectors
);
1192 QCowL2Meta
*old_m
= *m
;
1194 *m
= g_malloc0(sizeof(**m
));
1196 **m
= (QCowL2Meta
) {
1199 .alloc_offset
= alloc_cluster_offset
,
1200 .offset
= start_of_cluster(s
, guest_offset
),
1201 .nb_clusters
= nb_clusters
,
1202 .nb_available
= nb_sectors
,
1206 .nb_sectors
= alloc_n_start
,
1209 .offset
= nb_sectors
* BDRV_SECTOR_SIZE
,
1210 .nb_sectors
= avail_sectors
- nb_sectors
,
1213 qemu_co_queue_init(&(*m
)->dependent_requests
);
1214 QLIST_INSERT_HEAD(&s
->cluster_allocs
, *m
, next_in_flight
);
1216 *host_offset
= alloc_cluster_offset
+ offset_into_cluster(s
, guest_offset
);
1217 *bytes
= MIN(*bytes
, (nb_sectors
* BDRV_SECTOR_SIZE
)
1218 - offset_into_cluster(s
, guest_offset
));
1219 assert(*bytes
!= 0);
1224 if (*m
&& (*m
)->nb_clusters
> 0) {
1225 QLIST_REMOVE(*m
, next_in_flight
);
1231 * alloc_cluster_offset
1233 * For a given offset on the virtual disk, find the cluster offset in qcow2
1234 * file. If the offset is not found, allocate a new cluster.
1236 * If the cluster was already allocated, m->nb_clusters is set to 0 and
1237 * other fields in m are meaningless.
1239 * If the cluster is newly allocated, m->nb_clusters is set to the number of
1240 * contiguous clusters that have been allocated. In this case, the other
1241 * fields of m are valid and contain information about the first allocated
1244 * If the request conflicts with another write request in flight, the coroutine
1245 * is queued and will be reentered when the dependency has completed.
1247 * Return 0 on success and -errno in error cases
1249 int qcow2_alloc_cluster_offset(BlockDriverState
*bs
, uint64_t offset
,
1250 int *num
, uint64_t *host_offset
, QCowL2Meta
**m
)
1252 BDRVQcowState
*s
= bs
->opaque
;
1253 uint64_t start
, remaining
;
1254 uint64_t cluster_offset
;
1258 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset
, *num
);
1260 assert((offset
& ~BDRV_SECTOR_MASK
) == 0);
1264 remaining
= *num
<< BDRV_SECTOR_BITS
;
1272 if (!*host_offset
) {
1273 *host_offset
= start_of_cluster(s
, cluster_offset
);
1276 assert(remaining
>= cur_bytes
);
1279 remaining
-= cur_bytes
;
1280 cluster_offset
+= cur_bytes
;
1282 if (remaining
== 0) {
1286 cur_bytes
= remaining
;
1289 * Now start gathering as many contiguous clusters as possible:
1291 * 1. Check for overlaps with in-flight allocations
1293 * a) Overlap not in the first cluster -> shorten this request and
1294 * let the caller handle the rest in its next loop iteration.
1296 * b) Real overlaps of two requests. Yield and restart the search
1297 * for contiguous clusters (the situation could have changed
1298 * while we were sleeping)
1300 * c) TODO: Request starts in the same cluster as the in-flight
1301 * allocation ends. Shorten the COW of the in-fight allocation,
1302 * set cluster_offset to write to the same cluster and set up
1303 * the right synchronisation between the in-flight request and
1306 ret
= handle_dependencies(bs
, start
, &cur_bytes
, m
);
1307 if (ret
== -EAGAIN
) {
1308 /* Currently handle_dependencies() doesn't yield if we already had
1309 * an allocation. If it did, we would have to clean up the L2Meta
1310 * structs before starting over. */
1313 } else if (ret
< 0) {
1315 } else if (cur_bytes
== 0) {
1318 /* handle_dependencies() may have decreased cur_bytes (shortened
1319 * the allocations below) so that the next dependency is processed
1320 * correctly during the next loop iteration. */
1324 * 2. Count contiguous COPIED clusters.
1326 ret
= handle_copied(bs
, start
, &cluster_offset
, &cur_bytes
, m
);
1331 } else if (cur_bytes
== 0) {
1336 * 3. If the request still hasn't completed, allocate new clusters,
1337 * considering any cluster_offset of steps 1c or 2.
1339 ret
= handle_alloc(bs
, start
, &cluster_offset
, &cur_bytes
, m
);
1345 assert(cur_bytes
== 0);
1350 *num
-= remaining
>> BDRV_SECTOR_BITS
;
1352 assert(*host_offset
!= 0);
1357 static int decompress_buffer(uint8_t *out_buf
, int out_buf_size
,
1358 const uint8_t *buf
, int buf_size
)
1360 z_stream strm1
, *strm
= &strm1
;
1363 memset(strm
, 0, sizeof(*strm
));
1365 strm
->next_in
= (uint8_t *)buf
;
1366 strm
->avail_in
= buf_size
;
1367 strm
->next_out
= out_buf
;
1368 strm
->avail_out
= out_buf_size
;
1370 ret
= inflateInit2(strm
, -12);
1373 ret
= inflate(strm
, Z_FINISH
);
1374 out_len
= strm
->next_out
- out_buf
;
1375 if ((ret
!= Z_STREAM_END
&& ret
!= Z_BUF_ERROR
) ||
1376 out_len
!= out_buf_size
) {
1384 int qcow2_decompress_cluster(BlockDriverState
*bs
, uint64_t cluster_offset
)
1386 BDRVQcowState
*s
= bs
->opaque
;
1387 int ret
, csize
, nb_csectors
, sector_offset
;
1390 coffset
= cluster_offset
& s
->cluster_offset_mask
;
1391 if (s
->cluster_cache_offset
!= coffset
) {
1392 nb_csectors
= ((cluster_offset
>> s
->csize_shift
) & s
->csize_mask
) + 1;
1393 sector_offset
= coffset
& 511;
1394 csize
= nb_csectors
* 512 - sector_offset
;
1395 BLKDBG_EVENT(bs
->file
, BLKDBG_READ_COMPRESSED
);
1396 ret
= bdrv_read(bs
->file
, coffset
>> 9, s
->cluster_data
, nb_csectors
);
1400 if (decompress_buffer(s
->cluster_cache
, s
->cluster_size
,
1401 s
->cluster_data
+ sector_offset
, csize
) < 0) {
1404 s
->cluster_cache_offset
= coffset
;
1410 * This discards as many clusters of nb_clusters as possible at once (i.e.
1411 * all clusters in the same L2 table) and returns the number of discarded
1414 static int discard_single_l2(BlockDriverState
*bs
, uint64_t offset
,
1415 unsigned int nb_clusters
, enum qcow2_discard_type type
)
1417 BDRVQcowState
*s
= bs
->opaque
;
1423 ret
= get_cluster_table(bs
, offset
, &l2_table
, &l2_index
);
1428 /* Limit nb_clusters to one L2 table */
1429 nb_clusters
= MIN(nb_clusters
, s
->l2_size
- l2_index
);
1431 for (i
= 0; i
< nb_clusters
; i
++) {
1432 uint64_t old_l2_entry
;
1434 old_l2_entry
= be64_to_cpu(l2_table
[l2_index
+ i
]);
1437 * Make sure that a discarded area reads back as zeroes for v3 images
1438 * (we cannot do it for v2 without actually writing a zero-filled
1439 * buffer). We can skip the operation if the cluster is already marked
1440 * as zero, or if it's unallocated and we don't have a backing file.
1442 * TODO We might want to use bdrv_get_block_status(bs) here, but we're
1443 * holding s->lock, so that doesn't work today.
1445 switch (qcow2_get_cluster_type(old_l2_entry
)) {
1446 case QCOW2_CLUSTER_UNALLOCATED
:
1447 if (!bs
->backing_hd
) {
1452 case QCOW2_CLUSTER_ZERO
:
1455 case QCOW2_CLUSTER_NORMAL
:
1456 case QCOW2_CLUSTER_COMPRESSED
:
1463 /* First remove L2 entries */
1464 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_table
);
1465 if (s
->qcow_version
>= 3) {
1466 l2_table
[l2_index
+ i
] = cpu_to_be64(QCOW_OFLAG_ZERO
);
1468 l2_table
[l2_index
+ i
] = cpu_to_be64(0);
1471 /* Then decrease the refcount */
1472 qcow2_free_any_clusters(bs
, old_l2_entry
, 1, type
);
1475 ret
= qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
1483 int qcow2_discard_clusters(BlockDriverState
*bs
, uint64_t offset
,
1484 int nb_sectors
, enum qcow2_discard_type type
)
1486 BDRVQcowState
*s
= bs
->opaque
;
1487 uint64_t end_offset
;
1488 unsigned int nb_clusters
;
1491 end_offset
= offset
+ (nb_sectors
<< BDRV_SECTOR_BITS
);
1493 /* Round start up and end down */
1494 offset
= align_offset(offset
, s
->cluster_size
);
1495 end_offset
= start_of_cluster(s
, end_offset
);
1497 if (offset
> end_offset
) {
1501 nb_clusters
= size_to_clusters(s
, end_offset
- offset
);
1503 s
->cache_discards
= true;
1505 /* Each L2 table is handled by its own loop iteration */
1506 while (nb_clusters
> 0) {
1507 ret
= discard_single_l2(bs
, offset
, nb_clusters
, type
);
1513 offset
+= (ret
* s
->cluster_size
);
1518 s
->cache_discards
= false;
1519 qcow2_process_discards(bs
, ret
);
1525 * This zeroes as many clusters of nb_clusters as possible at once (i.e.
1526 * all clusters in the same L2 table) and returns the number of zeroed
1529 static int zero_single_l2(BlockDriverState
*bs
, uint64_t offset
,
1530 unsigned int nb_clusters
)
1532 BDRVQcowState
*s
= bs
->opaque
;
1538 ret
= get_cluster_table(bs
, offset
, &l2_table
, &l2_index
);
1543 /* Limit nb_clusters to one L2 table */
1544 nb_clusters
= MIN(nb_clusters
, s
->l2_size
- l2_index
);
1546 for (i
= 0; i
< nb_clusters
; i
++) {
1547 uint64_t old_offset
;
1549 old_offset
= be64_to_cpu(l2_table
[l2_index
+ i
]);
1551 /* Update L2 entries */
1552 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_table
);
1553 if (old_offset
& QCOW_OFLAG_COMPRESSED
) {
1554 l2_table
[l2_index
+ i
] = cpu_to_be64(QCOW_OFLAG_ZERO
);
1555 qcow2_free_any_clusters(bs
, old_offset
, 1, QCOW2_DISCARD_REQUEST
);
1557 l2_table
[l2_index
+ i
] |= cpu_to_be64(QCOW_OFLAG_ZERO
);
1561 ret
= qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
1569 int qcow2_zero_clusters(BlockDriverState
*bs
, uint64_t offset
, int nb_sectors
)
1571 BDRVQcowState
*s
= bs
->opaque
;
1572 unsigned int nb_clusters
;
1575 /* The zero flag is only supported by version 3 and newer */
1576 if (s
->qcow_version
< 3) {
1580 /* Each L2 table is handled by its own loop iteration */
1581 nb_clusters
= size_to_clusters(s
, nb_sectors
<< BDRV_SECTOR_BITS
);
1583 s
->cache_discards
= true;
1585 while (nb_clusters
> 0) {
1586 ret
= zero_single_l2(bs
, offset
, nb_clusters
);
1592 offset
+= (ret
* s
->cluster_size
);
1597 s
->cache_discards
= false;
1598 qcow2_process_discards(bs
, ret
);
1604 * Expands all zero clusters in a specific L1 table (or deallocates them, for
1605 * non-backed non-pre-allocated zero clusters).
1607 * expanded_clusters is a bitmap where every bit corresponds to one cluster in
1608 * the image file; a bit gets set if the corresponding cluster has been used for
1609 * zero expansion (i.e., has been filled with zeroes and is referenced from an
1610 * L2 table). nb_clusters contains the total cluster count of the image file,
1611 * i.e., the number of bits in expanded_clusters.
1613 static int expand_zero_clusters_in_l1(BlockDriverState
*bs
, uint64_t *l1_table
,
1614 int l1_size
, uint8_t **expanded_clusters
,
1615 uint64_t *nb_clusters
)
1617 BDRVQcowState
*s
= bs
->opaque
;
1618 bool is_active_l1
= (l1_table
== s
->l1_table
);
1619 uint64_t *l2_table
= NULL
;
1623 if (!is_active_l1
) {
1624 /* inactive L2 tables require a buffer to be stored in when loading
1626 l2_table
= qemu_try_blockalign(bs
->file
, s
->cluster_size
);
1627 if (l2_table
== NULL
) {
1632 for (i
= 0; i
< l1_size
; i
++) {
1633 uint64_t l2_offset
= l1_table
[i
] & L1E_OFFSET_MASK
;
1634 bool l2_dirty
= false;
1642 /* get active L2 tables from cache */
1643 ret
= qcow2_cache_get(bs
, s
->l2_table_cache
, l2_offset
,
1644 (void **)&l2_table
);
1646 /* load inactive L2 tables from disk */
1647 ret
= bdrv_read(bs
->file
, l2_offset
/ BDRV_SECTOR_SIZE
,
1648 (void *)l2_table
, s
->cluster_sectors
);
1654 for (j
= 0; j
< s
->l2_size
; j
++) {
1655 uint64_t l2_entry
= be64_to_cpu(l2_table
[j
]);
1656 int64_t offset
= l2_entry
& L2E_OFFSET_MASK
, cluster_index
;
1657 int cluster_type
= qcow2_get_cluster_type(l2_entry
);
1658 bool preallocated
= offset
!= 0;
1660 if (cluster_type
== QCOW2_CLUSTER_NORMAL
) {
1661 cluster_index
= offset
>> s
->cluster_bits
;
1662 assert((cluster_index
>= 0) && (cluster_index
< *nb_clusters
));
1663 if ((*expanded_clusters
)[cluster_index
/ 8] &
1664 (1 << (cluster_index
% 8))) {
1665 /* Probably a shared L2 table; this cluster was a zero
1666 * cluster which has been expanded, its refcount
1667 * therefore most likely requires an update. */
1668 ret
= qcow2_update_cluster_refcount(bs
, cluster_index
, 1,
1669 QCOW2_DISCARD_NEVER
);
1673 /* Since we just increased the refcount, the COPIED flag may
1674 * no longer be set. */
1675 l2_table
[j
] = cpu_to_be64(l2_entry
& ~QCOW_OFLAG_COPIED
);
1680 else if (qcow2_get_cluster_type(l2_entry
) != QCOW2_CLUSTER_ZERO
) {
1684 if (!preallocated
) {
1685 if (!bs
->backing_hd
) {
1686 /* not backed; therefore we can simply deallocate the
1693 offset
= qcow2_alloc_clusters(bs
, s
->cluster_size
);
1700 ret
= qcow2_pre_write_overlap_check(bs
, 0, offset
, s
->cluster_size
);
1702 if (!preallocated
) {
1703 qcow2_free_clusters(bs
, offset
, s
->cluster_size
,
1704 QCOW2_DISCARD_ALWAYS
);
1709 ret
= bdrv_write_zeroes(bs
->file
, offset
/ BDRV_SECTOR_SIZE
,
1710 s
->cluster_sectors
, 0);
1712 if (!preallocated
) {
1713 qcow2_free_clusters(bs
, offset
, s
->cluster_size
,
1714 QCOW2_DISCARD_ALWAYS
);
1719 l2_table
[j
] = cpu_to_be64(offset
| QCOW_OFLAG_COPIED
);
1722 cluster_index
= offset
>> s
->cluster_bits
;
1724 if (cluster_index
>= *nb_clusters
) {
1725 uint64_t old_bitmap_size
= (*nb_clusters
+ 7) / 8;
1726 uint64_t new_bitmap_size
;
1727 /* The offset may lie beyond the old end of the underlying image
1728 * file for growable files only */
1729 assert(bs
->file
->growable
);
1730 *nb_clusters
= size_to_clusters(s
, bs
->file
->total_sectors
*
1732 new_bitmap_size
= (*nb_clusters
+ 7) / 8;
1733 *expanded_clusters
= g_realloc(*expanded_clusters
,
1735 /* clear the newly allocated space */
1736 memset(&(*expanded_clusters
)[old_bitmap_size
], 0,
1737 new_bitmap_size
- old_bitmap_size
);
1740 assert((cluster_index
>= 0) && (cluster_index
< *nb_clusters
));
1741 (*expanded_clusters
)[cluster_index
/ 8] |= 1 << (cluster_index
% 8);
1746 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_table
);
1747 qcow2_cache_depends_on_flush(s
->l2_table_cache
);
1749 ret
= qcow2_cache_put(bs
, s
->l2_table_cache
, (void **)&l2_table
);
1756 ret
= qcow2_pre_write_overlap_check(bs
,
1757 QCOW2_OL_INACTIVE_L2
| QCOW2_OL_ACTIVE_L2
, l2_offset
,
1763 ret
= bdrv_write(bs
->file
, l2_offset
/ BDRV_SECTOR_SIZE
,
1764 (void *)l2_table
, s
->cluster_sectors
);
1776 if (!is_active_l1
) {
1777 qemu_vfree(l2_table
);
1780 qcow2_cache_put(bs
, s
->l2_table_cache
, (void **)&l2_table
);
1782 ret
= qcow2_cache_put(bs
, s
->l2_table_cache
,
1783 (void **)&l2_table
);
1791 * For backed images, expands all zero clusters on the image. For non-backed
1792 * images, deallocates all non-pre-allocated zero clusters (and claims the
1793 * allocation for pre-allocated ones). This is important for downgrading to a
1794 * qcow2 version which doesn't yet support metadata zero clusters.
1796 int qcow2_expand_zero_clusters(BlockDriverState
*bs
)
1798 BDRVQcowState
*s
= bs
->opaque
;
1799 uint64_t *l1_table
= NULL
;
1800 uint64_t nb_clusters
;
1801 uint8_t *expanded_clusters
;
1805 nb_clusters
= size_to_clusters(s
, bs
->file
->total_sectors
*
1807 expanded_clusters
= g_try_malloc0((nb_clusters
+ 7) / 8);
1808 if (expanded_clusters
== NULL
) {
1813 ret
= expand_zero_clusters_in_l1(bs
, s
->l1_table
, s
->l1_size
,
1814 &expanded_clusters
, &nb_clusters
);
1819 /* Inactive L1 tables may point to active L2 tables - therefore it is
1820 * necessary to flush the L2 table cache before trying to access the L2
1821 * tables pointed to by inactive L1 entries (else we might try to expand
1822 * zero clusters that have already been expanded); furthermore, it is also
1823 * necessary to empty the L2 table cache, since it may contain tables which
1824 * are now going to be modified directly on disk, bypassing the cache.
1825 * qcow2_cache_empty() does both for us. */
1826 ret
= qcow2_cache_empty(bs
, s
->l2_table_cache
);
1831 for (i
= 0; i
< s
->nb_snapshots
; i
++) {
1832 int l1_sectors
= (s
->snapshots
[i
].l1_size
* sizeof(uint64_t) +
1833 BDRV_SECTOR_SIZE
- 1) / BDRV_SECTOR_SIZE
;
1835 l1_table
= g_realloc(l1_table
, l1_sectors
* BDRV_SECTOR_SIZE
);
1837 ret
= bdrv_read(bs
->file
, s
->snapshots
[i
].l1_table_offset
/
1838 BDRV_SECTOR_SIZE
, (void *)l1_table
, l1_sectors
);
1843 for (j
= 0; j
< s
->snapshots
[i
].l1_size
; j
++) {
1844 be64_to_cpus(&l1_table
[j
]);
1847 ret
= expand_zero_clusters_in_l1(bs
, l1_table
, s
->snapshots
[i
].l1_size
,
1848 &expanded_clusters
, &nb_clusters
);
1857 g_free(expanded_clusters
);