2 * Block driver for the QCOW version 2 format
4 * Copyright (c) 2004-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 #include "qemu-common.h"
28 #include "block_int.h"
29 #include "block/qcow2.h"
32 int qcow2_grow_l1_table(BlockDriverState
*bs
, int min_size
, bool exact_size
)
34 BDRVQcowState
*s
= bs
->opaque
;
35 int new_l1_size
, new_l1_size2
, ret
, i
;
36 uint64_t *new_l1_table
;
37 int64_t new_l1_table_offset
;
40 if (min_size
<= s
->l1_size
)
44 new_l1_size
= min_size
;
46 /* Bump size up to reduce the number of times we have to grow */
47 new_l1_size
= s
->l1_size
;
48 if (new_l1_size
== 0) {
51 while (min_size
> new_l1_size
) {
52 new_l1_size
= (new_l1_size
* 3 + 1) / 2;
57 fprintf(stderr
, "grow l1_table from %d to %d\n", s
->l1_size
, new_l1_size
);
60 new_l1_size2
= sizeof(uint64_t) * new_l1_size
;
61 new_l1_table
= g_malloc0(align_offset(new_l1_size2
, 512));
62 memcpy(new_l1_table
, s
->l1_table
, s
->l1_size
* sizeof(uint64_t));
64 /* write new table (align to cluster) */
65 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_GROW_ALLOC_TABLE
);
66 new_l1_table_offset
= qcow2_alloc_clusters(bs
, new_l1_size2
);
67 if (new_l1_table_offset
< 0) {
69 return new_l1_table_offset
;
72 ret
= qcow2_cache_flush(bs
, s
->refcount_block_cache
);
77 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_GROW_WRITE_TABLE
);
78 for(i
= 0; i
< s
->l1_size
; i
++)
79 new_l1_table
[i
] = cpu_to_be64(new_l1_table
[i
]);
80 ret
= bdrv_pwrite_sync(bs
->file
, new_l1_table_offset
, new_l1_table
, new_l1_size2
);
83 for(i
= 0; i
< s
->l1_size
; i
++)
84 new_l1_table
[i
] = be64_to_cpu(new_l1_table
[i
]);
87 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_GROW_ACTIVATE_TABLE
);
88 cpu_to_be32w((uint32_t*)data
, new_l1_size
);
89 cpu_to_be64wu((uint64_t*)(data
+ 4), new_l1_table_offset
);
90 ret
= bdrv_pwrite_sync(bs
->file
, offsetof(QCowHeader
, l1_size
), data
,sizeof(data
));
95 qcow2_free_clusters(bs
, s
->l1_table_offset
, s
->l1_size
* sizeof(uint64_t));
96 s
->l1_table_offset
= new_l1_table_offset
;
97 s
->l1_table
= new_l1_table
;
98 s
->l1_size
= new_l1_size
;
101 g_free(new_l1_table
);
102 qcow2_free_clusters(bs
, new_l1_table_offset
, new_l1_size2
);
109 * Loads a L2 table into memory. If the table is in the cache, the cache
110 * is used; otherwise the L2 table is loaded from the image file.
112 * Returns a pointer to the L2 table on success, or NULL if the read from
113 * the image file failed.
116 static int l2_load(BlockDriverState
*bs
, uint64_t l2_offset
,
119 BDRVQcowState
*s
= bs
->opaque
;
122 ret
= qcow2_cache_get(bs
, s
->l2_table_cache
, l2_offset
, (void**) l2_table
);
128 * Writes one sector of the L1 table to the disk (can't update single entries
129 * and we really don't want bdrv_pread to perform a read-modify-write)
131 #define L1_ENTRIES_PER_SECTOR (512 / 8)
132 static int write_l1_entry(BlockDriverState
*bs
, int l1_index
)
134 BDRVQcowState
*s
= bs
->opaque
;
135 uint64_t buf
[L1_ENTRIES_PER_SECTOR
];
139 l1_start_index
= l1_index
& ~(L1_ENTRIES_PER_SECTOR
- 1);
140 for (i
= 0; i
< L1_ENTRIES_PER_SECTOR
; i
++) {
141 buf
[i
] = cpu_to_be64(s
->l1_table
[l1_start_index
+ i
]);
144 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_UPDATE
);
145 ret
= bdrv_pwrite_sync(bs
->file
, s
->l1_table_offset
+ 8 * l1_start_index
,
157 * Allocate a new l2 entry in the file. If l1_index points to an already
158 * used entry in the L2 table (i.e. we are doing a copy on write for the L2
159 * table) copy the contents of the old L2 table into the newly allocated one.
160 * Otherwise the new table is initialized with zeros.
164 static int l2_allocate(BlockDriverState
*bs
, int l1_index
, uint64_t **table
)
166 BDRVQcowState
*s
= bs
->opaque
;
167 uint64_t old_l2_offset
;
172 old_l2_offset
= s
->l1_table
[l1_index
];
174 trace_qcow2_l2_allocate(bs
, l1_index
);
176 /* allocate a new l2 entry */
178 l2_offset
= qcow2_alloc_clusters(bs
, s
->l2_size
* sizeof(uint64_t));
183 ret
= qcow2_cache_flush(bs
, s
->refcount_block_cache
);
188 /* allocate a new entry in the l2 cache */
190 trace_qcow2_l2_allocate_get_empty(bs
, l1_index
);
191 ret
= qcow2_cache_get_empty(bs
, s
->l2_table_cache
, l2_offset
, (void**) table
);
198 if (old_l2_offset
== 0) {
199 /* if there was no old l2 table, clear the new table */
200 memset(l2_table
, 0, s
->l2_size
* sizeof(uint64_t));
204 /* if there was an old l2 table, read it from the disk */
205 BLKDBG_EVENT(bs
->file
, BLKDBG_L2_ALLOC_COW_READ
);
206 ret
= qcow2_cache_get(bs
, s
->l2_table_cache
, old_l2_offset
,
207 (void**) &old_table
);
212 memcpy(l2_table
, old_table
, s
->cluster_size
);
214 ret
= qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &old_table
);
220 /* write the l2 table to the file */
221 BLKDBG_EVENT(bs
->file
, BLKDBG_L2_ALLOC_WRITE
);
223 trace_qcow2_l2_allocate_write_l2(bs
, l1_index
);
224 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_table
);
225 ret
= qcow2_cache_flush(bs
, s
->l2_table_cache
);
230 /* update the L1 entry */
231 trace_qcow2_l2_allocate_write_l1(bs
, l1_index
);
232 s
->l1_table
[l1_index
] = l2_offset
| QCOW_OFLAG_COPIED
;
233 ret
= write_l1_entry(bs
, l1_index
);
239 trace_qcow2_l2_allocate_done(bs
, l1_index
, 0);
243 trace_qcow2_l2_allocate_done(bs
, l1_index
, ret
);
244 qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) table
);
245 s
->l1_table
[l1_index
] = old_l2_offset
;
249 static int count_contiguous_clusters(uint64_t nb_clusters
, int cluster_size
,
250 uint64_t *l2_table
, uint64_t start
, uint64_t mask
)
253 uint64_t offset
= be64_to_cpu(l2_table
[0]) & ~mask
;
258 for (i
= start
; i
< start
+ nb_clusters
; i
++)
259 if (offset
+ (uint64_t) i
* cluster_size
!= (be64_to_cpu(l2_table
[i
]) & ~mask
))
265 static int count_contiguous_free_clusters(uint64_t nb_clusters
, uint64_t *l2_table
)
269 while(nb_clusters
-- && l2_table
[i
] == 0)
275 /* The crypt function is compatible with the linux cryptoloop
276 algorithm for < 4 GB images. NOTE: out_buf == in_buf is
278 void qcow2_encrypt_sectors(BDRVQcowState
*s
, int64_t sector_num
,
279 uint8_t *out_buf
, const uint8_t *in_buf
,
280 int nb_sectors
, int enc
,
289 for(i
= 0; i
< nb_sectors
; i
++) {
290 ivec
.ll
[0] = cpu_to_le64(sector_num
);
292 AES_cbc_encrypt(in_buf
, out_buf
, 512, key
,
300 static int coroutine_fn
copy_sectors(BlockDriverState
*bs
,
302 uint64_t cluster_offset
,
303 int n_start
, int n_end
)
305 BDRVQcowState
*s
= bs
->opaque
;
311 * If this is the last cluster and it is only partially used, we must only
312 * copy until the end of the image, or bdrv_check_request will fail for the
313 * bdrv_read/write calls below.
315 if (start_sect
+ n_end
> bs
->total_sectors
) {
316 n_end
= bs
->total_sectors
- start_sect
;
324 iov
.iov_len
= n
* BDRV_SECTOR_SIZE
;
325 iov
.iov_base
= qemu_blockalign(bs
, iov
.iov_len
);
327 qemu_iovec_init_external(&qiov
, &iov
, 1);
329 BLKDBG_EVENT(bs
->file
, BLKDBG_COW_READ
);
331 /* Call .bdrv_co_readv() directly instead of using the public block-layer
332 * interface. This avoids double I/O throttling and request tracking,
333 * which can lead to deadlock when block layer copy-on-read is enabled.
335 ret
= bs
->drv
->bdrv_co_readv(bs
, start_sect
+ n_start
, n
, &qiov
);
340 if (s
->crypt_method
) {
341 qcow2_encrypt_sectors(s
, start_sect
+ n_start
,
342 iov
.iov_base
, iov
.iov_base
, n
, 1,
343 &s
->aes_encrypt_key
);
346 BLKDBG_EVENT(bs
->file
, BLKDBG_COW_WRITE
);
347 ret
= bdrv_co_writev(bs
->file
, (cluster_offset
>> 9) + n_start
, n
, &qiov
);
354 qemu_vfree(iov
.iov_base
);
362 * For a given offset of the disk image, find the cluster offset in
363 * qcow2 file. The offset is stored in *cluster_offset.
365 * on entry, *num is the number of contiguous sectors we'd like to
366 * access following offset.
368 * on exit, *num is the number of contiguous sectors we can read.
370 * Return 0, if the offset is found
371 * Return -errno, otherwise.
375 int qcow2_get_cluster_offset(BlockDriverState
*bs
, uint64_t offset
,
376 int *num
, uint64_t *cluster_offset
)
378 BDRVQcowState
*s
= bs
->opaque
;
379 unsigned int l1_index
, l2_index
;
380 uint64_t l2_offset
, *l2_table
;
382 unsigned int index_in_cluster
, nb_clusters
;
383 uint64_t nb_available
, nb_needed
;
386 index_in_cluster
= (offset
>> 9) & (s
->cluster_sectors
- 1);
387 nb_needed
= *num
+ index_in_cluster
;
389 l1_bits
= s
->l2_bits
+ s
->cluster_bits
;
391 /* compute how many bytes there are between the offset and
392 * the end of the l1 entry
395 nb_available
= (1ULL << l1_bits
) - (offset
& ((1ULL << l1_bits
) - 1));
397 /* compute the number of available sectors */
399 nb_available
= (nb_available
>> 9) + index_in_cluster
;
401 if (nb_needed
> nb_available
) {
402 nb_needed
= nb_available
;
407 /* seek the the l2 offset in the l1 table */
409 l1_index
= offset
>> l1_bits
;
410 if (l1_index
>= s
->l1_size
)
413 l2_offset
= s
->l1_table
[l1_index
];
415 /* seek the l2 table of the given l2 offset */
420 /* load the l2 table in memory */
422 l2_offset
&= ~QCOW_OFLAG_COPIED
;
423 ret
= l2_load(bs
, l2_offset
, &l2_table
);
428 /* find the cluster offset for the given disk offset */
430 l2_index
= (offset
>> s
->cluster_bits
) & (s
->l2_size
- 1);
431 *cluster_offset
= be64_to_cpu(l2_table
[l2_index
]);
432 nb_clusters
= size_to_clusters(s
, nb_needed
<< 9);
434 if (!*cluster_offset
) {
435 /* how many empty clusters ? */
436 c
= count_contiguous_free_clusters(nb_clusters
, &l2_table
[l2_index
]);
438 /* how many allocated clusters ? */
439 c
= count_contiguous_clusters(nb_clusters
, s
->cluster_size
,
440 &l2_table
[l2_index
], 0, QCOW_OFLAG_COPIED
);
443 qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
445 nb_available
= (c
* s
->cluster_sectors
);
447 if (nb_available
> nb_needed
)
448 nb_available
= nb_needed
;
450 *num
= nb_available
- index_in_cluster
;
452 *cluster_offset
&=~QCOW_OFLAG_COPIED
;
459 * for a given disk offset, load (and allocate if needed)
462 * the l2 table offset in the qcow2 file and the cluster index
463 * in the l2 table are given to the caller.
465 * Returns 0 on success, -errno in failure case
467 static int get_cluster_table(BlockDriverState
*bs
, uint64_t offset
,
468 uint64_t **new_l2_table
,
471 BDRVQcowState
*s
= bs
->opaque
;
472 unsigned int l1_index
, l2_index
;
474 uint64_t *l2_table
= NULL
;
477 /* seek the the l2 offset in the l1 table */
479 l1_index
= offset
>> (s
->l2_bits
+ s
->cluster_bits
);
480 if (l1_index
>= s
->l1_size
) {
481 ret
= qcow2_grow_l1_table(bs
, l1_index
+ 1, false);
486 l2_offset
= s
->l1_table
[l1_index
];
488 /* seek the l2 table of the given l2 offset */
490 if (l2_offset
& QCOW_OFLAG_COPIED
) {
491 /* load the l2 table in memory */
492 l2_offset
&= ~QCOW_OFLAG_COPIED
;
493 ret
= l2_load(bs
, l2_offset
, &l2_table
);
498 /* First allocate a new L2 table (and do COW if needed) */
499 ret
= l2_allocate(bs
, l1_index
, &l2_table
);
504 /* Then decrease the refcount of the old table */
506 qcow2_free_clusters(bs
, l2_offset
, s
->l2_size
* sizeof(uint64_t));
508 l2_offset
= s
->l1_table
[l1_index
] & ~QCOW_OFLAG_COPIED
;
511 /* find the cluster offset for the given disk offset */
513 l2_index
= (offset
>> s
->cluster_bits
) & (s
->l2_size
- 1);
515 *new_l2_table
= l2_table
;
516 *new_l2_index
= l2_index
;
522 * alloc_compressed_cluster_offset
524 * For a given offset of the disk image, return cluster offset in
527 * If the offset is not found, allocate a new compressed cluster.
529 * Return the cluster offset if successful,
530 * Return 0, otherwise.
534 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState
*bs
,
538 BDRVQcowState
*s
= bs
->opaque
;
541 int64_t cluster_offset
;
544 ret
= get_cluster_table(bs
, offset
, &l2_table
, &l2_index
);
549 cluster_offset
= be64_to_cpu(l2_table
[l2_index
]);
550 if (cluster_offset
& QCOW_OFLAG_COPIED
) {
551 qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
556 qcow2_free_any_clusters(bs
, cluster_offset
, 1);
558 cluster_offset
= qcow2_alloc_bytes(bs
, compressed_size
);
559 if (cluster_offset
< 0) {
560 qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
564 nb_csectors
= ((cluster_offset
+ compressed_size
- 1) >> 9) -
565 (cluster_offset
>> 9);
567 cluster_offset
|= QCOW_OFLAG_COMPRESSED
|
568 ((uint64_t)nb_csectors
<< s
->csize_shift
);
570 /* update L2 table */
572 /* compressed clusters never have the copied flag */
574 BLKDBG_EVENT(bs
->file
, BLKDBG_L2_UPDATE_COMPRESSED
);
575 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_table
);
576 l2_table
[l2_index
] = cpu_to_be64(cluster_offset
);
577 ret
= qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
582 return cluster_offset
;
585 int qcow2_alloc_cluster_link_l2(BlockDriverState
*bs
, QCowL2Meta
*m
)
587 BDRVQcowState
*s
= bs
->opaque
;
588 int i
, j
= 0, l2_index
, ret
;
589 uint64_t *old_cluster
, start_sect
, *l2_table
;
590 uint64_t cluster_offset
= m
->alloc_offset
;
593 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m
->nb_clusters
);
595 if (m
->nb_clusters
== 0)
598 old_cluster
= g_malloc(m
->nb_clusters
* sizeof(uint64_t));
600 /* copy content of unmodified sectors */
601 start_sect
= (m
->offset
& ~(s
->cluster_size
- 1)) >> 9;
604 qemu_co_mutex_unlock(&s
->lock
);
605 ret
= copy_sectors(bs
, start_sect
, cluster_offset
, 0, m
->n_start
);
606 qemu_co_mutex_lock(&s
->lock
);
611 if (m
->nb_available
& (s
->cluster_sectors
- 1)) {
612 uint64_t end
= m
->nb_available
& ~(uint64_t)(s
->cluster_sectors
- 1);
614 qemu_co_mutex_unlock(&s
->lock
);
615 ret
= copy_sectors(bs
, start_sect
+ end
, cluster_offset
+ (end
<< 9),
616 m
->nb_available
- end
, s
->cluster_sectors
);
617 qemu_co_mutex_lock(&s
->lock
);
625 * Before we update the L2 table to actually point to the new cluster, we
626 * need to be sure that the refcounts have been increased and COW was
630 qcow2_cache_depends_on_flush(s
->l2_table_cache
);
633 qcow2_cache_set_dependency(bs
, s
->l2_table_cache
, s
->refcount_block_cache
);
634 ret
= get_cluster_table(bs
, m
->offset
, &l2_table
, &l2_index
);
638 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_table
);
640 for (i
= 0; i
< m
->nb_clusters
; i
++) {
641 /* if two concurrent writes happen to the same unallocated cluster
642 * each write allocates separate cluster and writes data concurrently.
643 * The first one to complete updates l2 table with pointer to its
644 * cluster the second one has to do RMW (which is done above by
645 * copy_sectors()), update l2 table with its cluster pointer and free
646 * old cluster. This is what this loop does */
647 if(l2_table
[l2_index
+ i
] != 0)
648 old_cluster
[j
++] = l2_table
[l2_index
+ i
];
650 l2_table
[l2_index
+ i
] = cpu_to_be64((cluster_offset
+
651 (i
<< s
->cluster_bits
)) | QCOW_OFLAG_COPIED
);
655 ret
= qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
661 * If this was a COW, we need to decrease the refcount of the old cluster.
662 * Also flush bs->file to get the right order for L2 and refcount update.
665 for (i
= 0; i
< j
; i
++) {
666 qcow2_free_any_clusters(bs
,
667 be64_to_cpu(old_cluster
[i
]) & ~QCOW_OFLAG_COPIED
, 1);
678 * Returns the number of contiguous clusters that can be used for an allocating
679 * write, but require COW to be performed (this includes yet unallocated space,
680 * which must copy from the backing file)
682 static int count_cow_clusters(BDRVQcowState
*s
, int nb_clusters
,
683 uint64_t *l2_table
, int l2_index
)
686 uint64_t cluster_offset
;
688 while (i
< nb_clusters
) {
689 i
+= count_contiguous_clusters(nb_clusters
- i
, s
->cluster_size
,
690 &l2_table
[l2_index
], i
, 0);
691 if ((i
>= nb_clusters
) || be64_to_cpu(l2_table
[l2_index
+ i
])) {
695 i
+= count_contiguous_free_clusters(nb_clusters
- i
,
696 &l2_table
[l2_index
+ i
]);
697 if (i
>= nb_clusters
) {
701 cluster_offset
= be64_to_cpu(l2_table
[l2_index
+ i
]);
703 if ((cluster_offset
& QCOW_OFLAG_COPIED
) ||
704 (cluster_offset
& QCOW_OFLAG_COMPRESSED
))
708 assert(i
<= nb_clusters
);
713 * Allocates new clusters for the given guest_offset.
715 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
716 * contain the number of clusters that have been allocated and are contiguous
719 * If *host_offset is non-zero, it specifies the offset in the image file at
720 * which the new clusters must start. *nb_clusters can be 0 on return in this
721 * case if the cluster at host_offset is already in use. If *host_offset is
722 * zero, the clusters can be allocated anywhere in the image file.
724 * *host_offset is updated to contain the offset into the image file at which
725 * the first allocated cluster starts.
727 * Return 0 on success and -errno in error cases. -EAGAIN means that the
728 * function has been waiting for another request and the allocation must be
729 * restarted, but the whole request should not be failed.
731 static int do_alloc_cluster_offset(BlockDriverState
*bs
, uint64_t guest_offset
,
732 uint64_t *host_offset
, unsigned int *nb_clusters
, uint64_t *l2_table
)
734 BDRVQcowState
*s
= bs
->opaque
;
735 int64_t cluster_offset
;
736 QCowL2Meta
*old_alloc
;
738 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset
,
739 *host_offset
, *nb_clusters
);
742 * Check if there already is an AIO write request in flight which allocates
743 * the same cluster. In this case we need to wait until the previous
744 * request has completed and updated the L2 table accordingly.
746 QLIST_FOREACH(old_alloc
, &s
->cluster_allocs
, next_in_flight
) {
748 uint64_t start
= guest_offset
>> s
->cluster_bits
;
749 uint64_t end
= start
+ *nb_clusters
;
750 uint64_t old_start
= old_alloc
->offset
>> s
->cluster_bits
;
751 uint64_t old_end
= old_start
+ old_alloc
->nb_clusters
;
753 if (end
< old_start
|| start
> old_end
) {
754 /* No intersection */
756 if (start
< old_start
) {
757 /* Stop at the start of a running allocation */
758 *nb_clusters
= old_start
- start
;
763 if (*nb_clusters
== 0) {
764 /* Wait for the dependency to complete. We need to recheck
765 * the free/allocated clusters when we continue. */
766 qemu_co_mutex_unlock(&s
->lock
);
767 qemu_co_queue_wait(&old_alloc
->dependent_requests
);
768 qemu_co_mutex_lock(&s
->lock
);
778 /* Allocate new clusters */
779 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
780 if (*host_offset
== 0) {
781 cluster_offset
= qcow2_alloc_clusters(bs
, *nb_clusters
* s
->cluster_size
);
783 cluster_offset
= *host_offset
;
784 *nb_clusters
= qcow2_alloc_clusters_at(bs
, cluster_offset
, *nb_clusters
);
787 if (cluster_offset
< 0) {
788 return cluster_offset
;
790 *host_offset
= cluster_offset
;
795 * alloc_cluster_offset
797 * For a given offset on the virtual disk, find the cluster offset in qcow2
798 * file. If the offset is not found, allocate a new cluster.
800 * If the cluster was already allocated, m->nb_clusters is set to 0 and
801 * other fields in m are meaningless.
803 * If the cluster is newly allocated, m->nb_clusters is set to the number of
804 * contiguous clusters that have been allocated. In this case, the other
805 * fields of m are valid and contain information about the first allocated
808 * If the request conflicts with another write request in flight, the coroutine
809 * is queued and will be reentered when the dependency has completed.
811 * Return 0 on success and -errno in error cases
813 int qcow2_alloc_cluster_offset(BlockDriverState
*bs
, uint64_t offset
,
814 int n_start
, int n_end
, int *num
, QCowL2Meta
*m
)
816 BDRVQcowState
*s
= bs
->opaque
;
817 int l2_index
, ret
, sectors
;
819 unsigned int nb_clusters
, keep_clusters
;
820 uint64_t cluster_offset
;
822 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset
,
825 /* Find L2 entry for the first involved cluster */
826 ret
= get_cluster_table(bs
, offset
, &l2_table
, &l2_index
);
832 * Calculate the number of clusters to look for. We stop at L2 table
833 * boundaries to keep things simple.
836 nb_clusters
= MIN(size_to_clusters(s
, n_end
<< BDRV_SECTOR_BITS
),
837 s
->l2_size
- l2_index
);
839 cluster_offset
= be64_to_cpu(l2_table
[l2_index
]);
842 * Check how many clusters are already allocated and don't need COW, and how
843 * many need a new allocation.
845 if (cluster_offset
& QCOW_OFLAG_COPIED
) {
846 /* We keep all QCOW_OFLAG_COPIED clusters */
847 keep_clusters
= count_contiguous_clusters(nb_clusters
, s
->cluster_size
,
848 &l2_table
[l2_index
], 0, 0);
849 assert(keep_clusters
<= nb_clusters
);
850 nb_clusters
-= keep_clusters
;
852 /* For the moment, overwrite compressed clusters one by one */
853 if (cluster_offset
& QCOW_OFLAG_COMPRESSED
) {
856 nb_clusters
= count_cow_clusters(s
, nb_clusters
, l2_table
, l2_index
);
863 cluster_offset
&= ~QCOW_OFLAG_COPIED
;
865 /* If there is something left to allocate, do that now */
867 .cluster_offset
= cluster_offset
,
870 qemu_co_queue_init(&m
->dependent_requests
);
872 if (nb_clusters
> 0) {
873 uint64_t alloc_offset
;
874 uint64_t alloc_cluster_offset
;
875 uint64_t keep_bytes
= keep_clusters
* s
->cluster_size
;
877 /* Calculate start and size of allocation */
878 alloc_offset
= offset
+ keep_bytes
;
880 if (keep_clusters
== 0) {
881 alloc_cluster_offset
= 0;
883 alloc_cluster_offset
= cluster_offset
+ keep_bytes
;
886 /* Allocate, if necessary at a given offset in the image file */
887 ret
= do_alloc_cluster_offset(bs
, alloc_offset
, &alloc_cluster_offset
,
888 &nb_clusters
, l2_table
);
889 if (ret
== -EAGAIN
) {
891 } else if (ret
< 0) {
895 /* save info needed for meta data update */
896 if (nb_clusters
> 0) {
897 int requested_sectors
= n_end
- keep_clusters
* s
->cluster_sectors
;
898 int avail_sectors
= (keep_clusters
+ nb_clusters
)
899 << (s
->cluster_bits
- BDRV_SECTOR_BITS
);
902 .cluster_offset
= keep_clusters
== 0 ?
903 alloc_cluster_offset
: cluster_offset
,
904 .alloc_offset
= alloc_cluster_offset
,
905 .offset
= alloc_offset
,
906 .n_start
= keep_clusters
== 0 ? n_start
: 0,
907 .nb_clusters
= nb_clusters
,
908 .nb_available
= MIN(requested_sectors
, avail_sectors
),
910 qemu_co_queue_init(&m
->dependent_requests
);
911 QLIST_INSERT_HEAD(&s
->cluster_allocs
, m
, next_in_flight
);
915 /* Some cleanup work */
916 ret
= qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
921 sectors
= (keep_clusters
+ nb_clusters
) << (s
->cluster_bits
- 9);
922 if (sectors
> n_end
) {
926 assert(sectors
> n_start
);
927 *num
= sectors
- n_start
;
932 qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
934 if (nb_clusters
> 0) {
935 QLIST_REMOVE(m
, next_in_flight
);
940 static int decompress_buffer(uint8_t *out_buf
, int out_buf_size
,
941 const uint8_t *buf
, int buf_size
)
943 z_stream strm1
, *strm
= &strm1
;
946 memset(strm
, 0, sizeof(*strm
));
948 strm
->next_in
= (uint8_t *)buf
;
949 strm
->avail_in
= buf_size
;
950 strm
->next_out
= out_buf
;
951 strm
->avail_out
= out_buf_size
;
953 ret
= inflateInit2(strm
, -12);
956 ret
= inflate(strm
, Z_FINISH
);
957 out_len
= strm
->next_out
- out_buf
;
958 if ((ret
!= Z_STREAM_END
&& ret
!= Z_BUF_ERROR
) ||
959 out_len
!= out_buf_size
) {
967 int qcow2_decompress_cluster(BlockDriverState
*bs
, uint64_t cluster_offset
)
969 BDRVQcowState
*s
= bs
->opaque
;
970 int ret
, csize
, nb_csectors
, sector_offset
;
973 coffset
= cluster_offset
& s
->cluster_offset_mask
;
974 if (s
->cluster_cache_offset
!= coffset
) {
975 nb_csectors
= ((cluster_offset
>> s
->csize_shift
) & s
->csize_mask
) + 1;
976 sector_offset
= coffset
& 511;
977 csize
= nb_csectors
* 512 - sector_offset
;
978 BLKDBG_EVENT(bs
->file
, BLKDBG_READ_COMPRESSED
);
979 ret
= bdrv_read(bs
->file
, coffset
>> 9, s
->cluster_data
, nb_csectors
);
983 if (decompress_buffer(s
->cluster_cache
, s
->cluster_size
,
984 s
->cluster_data
+ sector_offset
, csize
) < 0) {
987 s
->cluster_cache_offset
= coffset
;
993 * This discards as many clusters of nb_clusters as possible at once (i.e.
994 * all clusters in the same L2 table) and returns the number of discarded
997 static int discard_single_l2(BlockDriverState
*bs
, uint64_t offset
,
998 unsigned int nb_clusters
)
1000 BDRVQcowState
*s
= bs
->opaque
;
1006 ret
= get_cluster_table(bs
, offset
, &l2_table
, &l2_index
);
1011 /* Limit nb_clusters to one L2 table */
1012 nb_clusters
= MIN(nb_clusters
, s
->l2_size
- l2_index
);
1014 for (i
= 0; i
< nb_clusters
; i
++) {
1015 uint64_t old_offset
;
1017 old_offset
= be64_to_cpu(l2_table
[l2_index
+ i
]);
1018 old_offset
&= ~QCOW_OFLAG_COPIED
;
1020 if (old_offset
== 0) {
1024 /* First remove L2 entries */
1025 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_table
);
1026 l2_table
[l2_index
+ i
] = cpu_to_be64(0);
1028 /* Then decrease the refcount */
1029 qcow2_free_any_clusters(bs
, old_offset
, 1);
1032 ret
= qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
1040 int qcow2_discard_clusters(BlockDriverState
*bs
, uint64_t offset
,
1043 BDRVQcowState
*s
= bs
->opaque
;
1044 uint64_t end_offset
;
1045 unsigned int nb_clusters
;
1048 end_offset
= offset
+ (nb_sectors
<< BDRV_SECTOR_BITS
);
1050 /* Round start up and end down */
1051 offset
= align_offset(offset
, s
->cluster_size
);
1052 end_offset
&= ~(s
->cluster_size
- 1);
1054 if (offset
> end_offset
) {
1058 nb_clusters
= size_to_clusters(s
, end_offset
- offset
);
1060 /* Each L2 table is handled by its own loop iteration */
1061 while (nb_clusters
> 0) {
1062 ret
= discard_single_l2(bs
, offset
, nb_clusters
);
1068 offset
+= (ret
* s
->cluster_size
);