9pfs: handle transport errors in pdu_complete()
[qemu/kevin.git] / block / qcow2-cluster.c
blob3d341fd9cb463c29c3cb2f9c79d29a01d812e3f2
1 /*
2 * Block driver for the QCOW version 2 format
4 * Copyright (c) 2004-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include <zlib.h>
28 #include "qapi/error.h"
29 #include "qemu-common.h"
30 #include "block/block_int.h"
31 #include "block/qcow2.h"
32 #include "qemu/bswap.h"
33 #include "trace.h"
35 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
36 bool exact_size)
38 BDRVQcow2State *s = bs->opaque;
39 int new_l1_size2, ret, i;
40 uint64_t *new_l1_table;
41 int64_t old_l1_table_offset, old_l1_size;
42 int64_t new_l1_table_offset, new_l1_size;
43 uint8_t data[12];
45 if (min_size <= s->l1_size)
46 return 0;
48 /* Do a sanity check on min_size before trying to calculate new_l1_size
49 * (this prevents overflows during the while loop for the calculation of
50 * new_l1_size) */
51 if (min_size > INT_MAX / sizeof(uint64_t)) {
52 return -EFBIG;
55 if (exact_size) {
56 new_l1_size = min_size;
57 } else {
58 /* Bump size up to reduce the number of times we have to grow */
59 new_l1_size = s->l1_size;
60 if (new_l1_size == 0) {
61 new_l1_size = 1;
63 while (min_size > new_l1_size) {
64 new_l1_size = (new_l1_size * 3 + 1) / 2;
68 QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE > INT_MAX);
69 if (new_l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) {
70 return -EFBIG;
73 #ifdef DEBUG_ALLOC2
74 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n",
75 s->l1_size, new_l1_size);
76 #endif
78 new_l1_size2 = sizeof(uint64_t) * new_l1_size;
79 new_l1_table = qemu_try_blockalign(bs->file->bs,
80 align_offset(new_l1_size2, 512));
81 if (new_l1_table == NULL) {
82 return -ENOMEM;
84 memset(new_l1_table, 0, align_offset(new_l1_size2, 512));
86 if (s->l1_size) {
87 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
90 /* write new table (align to cluster) */
91 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
92 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
93 if (new_l1_table_offset < 0) {
94 qemu_vfree(new_l1_table);
95 return new_l1_table_offset;
98 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
99 if (ret < 0) {
100 goto fail;
103 /* the L1 position has not yet been updated, so these clusters must
104 * indeed be completely free */
105 ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset,
106 new_l1_size2);
107 if (ret < 0) {
108 goto fail;
111 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE);
112 for(i = 0; i < s->l1_size; i++)
113 new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
114 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset,
115 new_l1_table, new_l1_size2);
116 if (ret < 0)
117 goto fail;
118 for(i = 0; i < s->l1_size; i++)
119 new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
121 /* set new table */
122 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE);
123 stl_be_p(data, new_l1_size);
124 stq_be_p(data + 4, new_l1_table_offset);
125 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size),
126 data, sizeof(data));
127 if (ret < 0) {
128 goto fail;
130 qemu_vfree(s->l1_table);
131 old_l1_table_offset = s->l1_table_offset;
132 s->l1_table_offset = new_l1_table_offset;
133 s->l1_table = new_l1_table;
134 old_l1_size = s->l1_size;
135 s->l1_size = new_l1_size;
136 qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t),
137 QCOW2_DISCARD_OTHER);
138 return 0;
139 fail:
140 qemu_vfree(new_l1_table);
141 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2,
142 QCOW2_DISCARD_OTHER);
143 return ret;
147 * l2_load
149 * Loads a L2 table into memory. If the table is in the cache, the cache
150 * is used; otherwise the L2 table is loaded from the image file.
152 * Returns a pointer to the L2 table on success, or NULL if the read from
153 * the image file failed.
156 static int l2_load(BlockDriverState *bs, uint64_t l2_offset,
157 uint64_t **l2_table)
159 BDRVQcow2State *s = bs->opaque;
161 return qcow2_cache_get(bs, s->l2_table_cache, l2_offset,
162 (void **)l2_table);
166 * Writes one sector of the L1 table to the disk (can't update single entries
167 * and we really don't want bdrv_pread to perform a read-modify-write)
169 #define L1_ENTRIES_PER_SECTOR (512 / 8)
170 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index)
172 BDRVQcow2State *s = bs->opaque;
173 uint64_t buf[L1_ENTRIES_PER_SECTOR] = { 0 };
174 int l1_start_index;
175 int i, ret;
177 l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
178 for (i = 0; i < L1_ENTRIES_PER_SECTOR && l1_start_index + i < s->l1_size;
179 i++)
181 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
184 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1,
185 s->l1_table_offset + 8 * l1_start_index, sizeof(buf));
186 if (ret < 0) {
187 return ret;
190 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
191 ret = bdrv_pwrite_sync(bs->file,
192 s->l1_table_offset + 8 * l1_start_index,
193 buf, sizeof(buf));
194 if (ret < 0) {
195 return ret;
198 return 0;
202 * l2_allocate
204 * Allocate a new l2 entry in the file. If l1_index points to an already
205 * used entry in the L2 table (i.e. we are doing a copy on write for the L2
206 * table) copy the contents of the old L2 table into the newly allocated one.
207 * Otherwise the new table is initialized with zeros.
211 static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table)
213 BDRVQcow2State *s = bs->opaque;
214 uint64_t old_l2_offset;
215 uint64_t *l2_table = NULL;
216 int64_t l2_offset;
217 int ret;
219 old_l2_offset = s->l1_table[l1_index];
221 trace_qcow2_l2_allocate(bs, l1_index);
223 /* allocate a new l2 entry */
225 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
226 if (l2_offset < 0) {
227 ret = l2_offset;
228 goto fail;
231 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
232 if (ret < 0) {
233 goto fail;
236 /* allocate a new entry in the l2 cache */
238 trace_qcow2_l2_allocate_get_empty(bs, l1_index);
239 ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table);
240 if (ret < 0) {
241 goto fail;
244 l2_table = *table;
246 if ((old_l2_offset & L1E_OFFSET_MASK) == 0) {
247 /* if there was no old l2 table, clear the new table */
248 memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
249 } else {
250 uint64_t* old_table;
252 /* if there was an old l2 table, read it from the disk */
253 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ);
254 ret = qcow2_cache_get(bs, s->l2_table_cache,
255 old_l2_offset & L1E_OFFSET_MASK,
256 (void**) &old_table);
257 if (ret < 0) {
258 goto fail;
261 memcpy(l2_table, old_table, s->cluster_size);
263 qcow2_cache_put(bs, s->l2_table_cache, (void **) &old_table);
266 /* write the l2 table to the file */
267 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE);
269 trace_qcow2_l2_allocate_write_l2(bs, l1_index);
270 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
271 ret = qcow2_cache_flush(bs, s->l2_table_cache);
272 if (ret < 0) {
273 goto fail;
276 /* update the L1 entry */
277 trace_qcow2_l2_allocate_write_l1(bs, l1_index);
278 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
279 ret = qcow2_write_l1_entry(bs, l1_index);
280 if (ret < 0) {
281 goto fail;
284 *table = l2_table;
285 trace_qcow2_l2_allocate_done(bs, l1_index, 0);
286 return 0;
288 fail:
289 trace_qcow2_l2_allocate_done(bs, l1_index, ret);
290 if (l2_table != NULL) {
291 qcow2_cache_put(bs, s->l2_table_cache, (void**) table);
293 s->l1_table[l1_index] = old_l2_offset;
294 if (l2_offset > 0) {
295 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
296 QCOW2_DISCARD_ALWAYS);
298 return ret;
302 * Checks how many clusters in a given L2 table are contiguous in the image
303 * file. As soon as one of the flags in the bitmask stop_flags changes compared
304 * to the first cluster, the search is stopped and the cluster is not counted
305 * as contiguous. (This allows it, for example, to stop at the first compressed
306 * cluster which may require a different handling)
308 static int count_contiguous_clusters(int nb_clusters, int cluster_size,
309 uint64_t *l2_table, uint64_t stop_flags)
311 int i;
312 QCow2ClusterType first_cluster_type;
313 uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED;
314 uint64_t first_entry = be64_to_cpu(l2_table[0]);
315 uint64_t offset = first_entry & mask;
317 if (!offset) {
318 return 0;
321 /* must be allocated */
322 first_cluster_type = qcow2_get_cluster_type(first_entry);
323 assert(first_cluster_type == QCOW2_CLUSTER_NORMAL ||
324 first_cluster_type == QCOW2_CLUSTER_ZERO_ALLOC);
326 for (i = 0; i < nb_clusters; i++) {
327 uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask;
328 if (offset + (uint64_t) i * cluster_size != l2_entry) {
329 break;
333 return i;
337 * Checks how many consecutive unallocated clusters in a given L2
338 * table have the same cluster type.
340 static int count_contiguous_clusters_unallocated(int nb_clusters,
341 uint64_t *l2_table,
342 QCow2ClusterType wanted_type)
344 int i;
346 assert(wanted_type == QCOW2_CLUSTER_ZERO_PLAIN ||
347 wanted_type == QCOW2_CLUSTER_UNALLOCATED);
348 for (i = 0; i < nb_clusters; i++) {
349 uint64_t entry = be64_to_cpu(l2_table[i]);
350 QCow2ClusterType type = qcow2_get_cluster_type(entry);
352 if (type != wanted_type) {
353 break;
357 return i;
360 /* The crypt function is compatible with the linux cryptoloop
361 algorithm for < 4 GB images. NOTE: out_buf == in_buf is
362 supported */
363 int qcow2_encrypt_sectors(BDRVQcow2State *s, int64_t sector_num,
364 uint8_t *out_buf, const uint8_t *in_buf,
365 int nb_sectors, bool enc,
366 Error **errp)
368 union {
369 uint64_t ll[2];
370 uint8_t b[16];
371 } ivec;
372 int i;
373 int ret;
375 for(i = 0; i < nb_sectors; i++) {
376 ivec.ll[0] = cpu_to_le64(sector_num);
377 ivec.ll[1] = 0;
378 if (qcrypto_cipher_setiv(s->cipher,
379 ivec.b, G_N_ELEMENTS(ivec.b),
380 errp) < 0) {
381 return -1;
383 if (enc) {
384 ret = qcrypto_cipher_encrypt(s->cipher,
385 in_buf,
386 out_buf,
387 512,
388 errp);
389 } else {
390 ret = qcrypto_cipher_decrypt(s->cipher,
391 in_buf,
392 out_buf,
393 512,
394 errp);
396 if (ret < 0) {
397 return -1;
399 sector_num++;
400 in_buf += 512;
401 out_buf += 512;
403 return 0;
406 static int coroutine_fn do_perform_cow_read(BlockDriverState *bs,
407 uint64_t src_cluster_offset,
408 unsigned offset_in_cluster,
409 QEMUIOVector *qiov)
411 int ret;
413 if (qiov->size == 0) {
414 return 0;
417 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
419 if (!bs->drv) {
420 return -ENOMEDIUM;
423 /* Call .bdrv_co_readv() directly instead of using the public block-layer
424 * interface. This avoids double I/O throttling and request tracking,
425 * which can lead to deadlock when block layer copy-on-read is enabled.
427 ret = bs->drv->bdrv_co_preadv(bs, src_cluster_offset + offset_in_cluster,
428 qiov->size, qiov, 0);
429 if (ret < 0) {
430 return ret;
433 return 0;
436 static bool coroutine_fn do_perform_cow_encrypt(BlockDriverState *bs,
437 uint64_t src_cluster_offset,
438 unsigned offset_in_cluster,
439 uint8_t *buffer,
440 unsigned bytes)
442 if (bytes && bs->encrypted) {
443 BDRVQcow2State *s = bs->opaque;
444 int64_t sector = (src_cluster_offset + offset_in_cluster)
445 >> BDRV_SECTOR_BITS;
446 assert(s->cipher);
447 assert((offset_in_cluster & ~BDRV_SECTOR_MASK) == 0);
448 assert((bytes & ~BDRV_SECTOR_MASK) == 0);
449 if (qcow2_encrypt_sectors(s, sector, buffer, buffer,
450 bytes >> BDRV_SECTOR_BITS, true, NULL) < 0) {
451 return false;
454 return true;
457 static int coroutine_fn do_perform_cow_write(BlockDriverState *bs,
458 uint64_t cluster_offset,
459 unsigned offset_in_cluster,
460 QEMUIOVector *qiov)
462 int ret;
464 if (qiov->size == 0) {
465 return 0;
468 ret = qcow2_pre_write_overlap_check(bs, 0,
469 cluster_offset + offset_in_cluster, qiov->size);
470 if (ret < 0) {
471 return ret;
474 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
475 ret = bdrv_co_pwritev(bs->file, cluster_offset + offset_in_cluster,
476 qiov->size, qiov, 0);
477 if (ret < 0) {
478 return ret;
481 return 0;
486 * get_cluster_offset
488 * For a given offset of the virtual disk, find the cluster type and offset in
489 * the qcow2 file. The offset is stored in *cluster_offset.
491 * On entry, *bytes is the maximum number of contiguous bytes starting at
492 * offset that we are interested in.
494 * On exit, *bytes is the number of bytes starting at offset that have the same
495 * cluster type and (if applicable) are stored contiguously in the image file.
496 * Compressed clusters are always returned one by one.
498 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
499 * cases.
501 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
502 unsigned int *bytes, uint64_t *cluster_offset)
504 BDRVQcow2State *s = bs->opaque;
505 unsigned int l2_index;
506 uint64_t l1_index, l2_offset, *l2_table;
507 int l1_bits, c;
508 unsigned int offset_in_cluster;
509 uint64_t bytes_available, bytes_needed, nb_clusters;
510 QCow2ClusterType type;
511 int ret;
513 offset_in_cluster = offset_into_cluster(s, offset);
514 bytes_needed = (uint64_t) *bytes + offset_in_cluster;
516 l1_bits = s->l2_bits + s->cluster_bits;
518 /* compute how many bytes there are between the start of the cluster
519 * containing offset and the end of the l1 entry */
520 bytes_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1))
521 + offset_in_cluster;
523 if (bytes_needed > bytes_available) {
524 bytes_needed = bytes_available;
527 *cluster_offset = 0;
529 /* seek to the l2 offset in the l1 table */
531 l1_index = offset >> l1_bits;
532 if (l1_index >= s->l1_size) {
533 type = QCOW2_CLUSTER_UNALLOCATED;
534 goto out;
537 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
538 if (!l2_offset) {
539 type = QCOW2_CLUSTER_UNALLOCATED;
540 goto out;
543 if (offset_into_cluster(s, l2_offset)) {
544 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
545 " unaligned (L1 index: %#" PRIx64 ")",
546 l2_offset, l1_index);
547 return -EIO;
550 /* load the l2 table in memory */
552 ret = l2_load(bs, l2_offset, &l2_table);
553 if (ret < 0) {
554 return ret;
557 /* find the cluster offset for the given disk offset */
559 l2_index = offset_to_l2_index(s, offset);
560 *cluster_offset = be64_to_cpu(l2_table[l2_index]);
562 nb_clusters = size_to_clusters(s, bytes_needed);
563 /* bytes_needed <= *bytes + offset_in_cluster, both of which are unsigned
564 * integers; the minimum cluster size is 512, so this assertion is always
565 * true */
566 assert(nb_clusters <= INT_MAX);
568 type = qcow2_get_cluster_type(*cluster_offset);
569 if (s->qcow_version < 3 && (type == QCOW2_CLUSTER_ZERO_PLAIN ||
570 type == QCOW2_CLUSTER_ZERO_ALLOC)) {
571 qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found"
572 " in pre-v3 image (L2 offset: %#" PRIx64
573 ", L2 index: %#x)", l2_offset, l2_index);
574 ret = -EIO;
575 goto fail;
577 switch (type) {
578 case QCOW2_CLUSTER_COMPRESSED:
579 /* Compressed clusters can only be processed one by one */
580 c = 1;
581 *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK;
582 break;
583 case QCOW2_CLUSTER_ZERO_PLAIN:
584 case QCOW2_CLUSTER_UNALLOCATED:
585 /* how many empty clusters ? */
586 c = count_contiguous_clusters_unallocated(nb_clusters,
587 &l2_table[l2_index], type);
588 *cluster_offset = 0;
589 break;
590 case QCOW2_CLUSTER_ZERO_ALLOC:
591 case QCOW2_CLUSTER_NORMAL:
592 /* how many allocated clusters ? */
593 c = count_contiguous_clusters(nb_clusters, s->cluster_size,
594 &l2_table[l2_index], QCOW_OFLAG_ZERO);
595 *cluster_offset &= L2E_OFFSET_MASK;
596 if (offset_into_cluster(s, *cluster_offset)) {
597 qcow2_signal_corruption(bs, true, -1, -1,
598 "Cluster allocation offset %#"
599 PRIx64 " unaligned (L2 offset: %#" PRIx64
600 ", L2 index: %#x)", *cluster_offset,
601 l2_offset, l2_index);
602 ret = -EIO;
603 goto fail;
605 break;
606 default:
607 abort();
610 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
612 bytes_available = (int64_t)c * s->cluster_size;
614 out:
615 if (bytes_available > bytes_needed) {
616 bytes_available = bytes_needed;
619 /* bytes_available <= bytes_needed <= *bytes + offset_in_cluster;
620 * subtracting offset_in_cluster will therefore definitely yield something
621 * not exceeding UINT_MAX */
622 assert(bytes_available - offset_in_cluster <= UINT_MAX);
623 *bytes = bytes_available - offset_in_cluster;
625 return type;
627 fail:
628 qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table);
629 return ret;
633 * get_cluster_table
635 * for a given disk offset, load (and allocate if needed)
636 * the l2 table.
638 * the l2 table offset in the qcow2 file and the cluster index
639 * in the l2 table are given to the caller.
641 * Returns 0 on success, -errno in failure case
643 static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
644 uint64_t **new_l2_table,
645 int *new_l2_index)
647 BDRVQcow2State *s = bs->opaque;
648 unsigned int l2_index;
649 uint64_t l1_index, l2_offset;
650 uint64_t *l2_table = NULL;
651 int ret;
653 /* seek to the l2 offset in the l1 table */
655 l1_index = offset >> (s->l2_bits + s->cluster_bits);
656 if (l1_index >= s->l1_size) {
657 ret = qcow2_grow_l1_table(bs, l1_index + 1, false);
658 if (ret < 0) {
659 return ret;
663 assert(l1_index < s->l1_size);
664 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
665 if (offset_into_cluster(s, l2_offset)) {
666 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
667 " unaligned (L1 index: %#" PRIx64 ")",
668 l2_offset, l1_index);
669 return -EIO;
672 /* seek the l2 table of the given l2 offset */
674 if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) {
675 /* load the l2 table in memory */
676 ret = l2_load(bs, l2_offset, &l2_table);
677 if (ret < 0) {
678 return ret;
680 } else {
681 /* First allocate a new L2 table (and do COW if needed) */
682 ret = l2_allocate(bs, l1_index, &l2_table);
683 if (ret < 0) {
684 return ret;
687 /* Then decrease the refcount of the old table */
688 if (l2_offset) {
689 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
690 QCOW2_DISCARD_OTHER);
694 /* find the cluster offset for the given disk offset */
696 l2_index = offset_to_l2_index(s, offset);
698 *new_l2_table = l2_table;
699 *new_l2_index = l2_index;
701 return 0;
705 * alloc_compressed_cluster_offset
707 * For a given offset of the disk image, return cluster offset in
708 * qcow2 file.
710 * If the offset is not found, allocate a new compressed cluster.
712 * Return the cluster offset if successful,
713 * Return 0, otherwise.
717 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
718 uint64_t offset,
719 int compressed_size)
721 BDRVQcow2State *s = bs->opaque;
722 int l2_index, ret;
723 uint64_t *l2_table;
724 int64_t cluster_offset;
725 int nb_csectors;
727 ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
728 if (ret < 0) {
729 return 0;
732 /* Compression can't overwrite anything. Fail if the cluster was already
733 * allocated. */
734 cluster_offset = be64_to_cpu(l2_table[l2_index]);
735 if (cluster_offset & L2E_OFFSET_MASK) {
736 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
737 return 0;
740 cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
741 if (cluster_offset < 0) {
742 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
743 return 0;
746 nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
747 (cluster_offset >> 9);
749 cluster_offset |= QCOW_OFLAG_COMPRESSED |
750 ((uint64_t)nb_csectors << s->csize_shift);
752 /* update L2 table */
754 /* compressed clusters never have the copied flag */
756 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
757 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
758 l2_table[l2_index] = cpu_to_be64(cluster_offset);
759 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
761 return cluster_offset;
764 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m)
766 BDRVQcow2State *s = bs->opaque;
767 Qcow2COWRegion *start = &m->cow_start;
768 Qcow2COWRegion *end = &m->cow_end;
769 unsigned buffer_size;
770 unsigned data_bytes = end->offset - (start->offset + start->nb_bytes);
771 bool merge_reads;
772 uint8_t *start_buffer, *end_buffer;
773 QEMUIOVector qiov;
774 int ret;
776 assert(start->nb_bytes <= UINT_MAX - end->nb_bytes);
777 assert(start->nb_bytes + end->nb_bytes <= UINT_MAX - data_bytes);
778 assert(start->offset + start->nb_bytes <= end->offset);
779 assert(!m->data_qiov || m->data_qiov->size == data_bytes);
781 if (start->nb_bytes == 0 && end->nb_bytes == 0) {
782 return 0;
785 /* If we have to read both the start and end COW regions and the
786 * middle region is not too large then perform just one read
787 * operation */
788 merge_reads = start->nb_bytes && end->nb_bytes && data_bytes <= 16384;
789 if (merge_reads) {
790 buffer_size = start->nb_bytes + data_bytes + end->nb_bytes;
791 } else {
792 /* If we have to do two reads, add some padding in the middle
793 * if necessary to make sure that the end region is optimally
794 * aligned. */
795 size_t align = bdrv_opt_mem_align(bs);
796 assert(align > 0 && align <= UINT_MAX);
797 assert(QEMU_ALIGN_UP(start->nb_bytes, align) <=
798 UINT_MAX - end->nb_bytes);
799 buffer_size = QEMU_ALIGN_UP(start->nb_bytes, align) + end->nb_bytes;
802 /* Reserve a buffer large enough to store all the data that we're
803 * going to read */
804 start_buffer = qemu_try_blockalign(bs, buffer_size);
805 if (start_buffer == NULL) {
806 return -ENOMEM;
808 /* The part of the buffer where the end region is located */
809 end_buffer = start_buffer + buffer_size - end->nb_bytes;
811 qemu_iovec_init(&qiov, 2 + (m->data_qiov ? m->data_qiov->niov : 0));
813 qemu_co_mutex_unlock(&s->lock);
814 /* First we read the existing data from both COW regions. We
815 * either read the whole region in one go, or the start and end
816 * regions separately. */
817 if (merge_reads) {
818 qemu_iovec_add(&qiov, start_buffer, buffer_size);
819 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov);
820 } else {
821 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
822 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov);
823 if (ret < 0) {
824 goto fail;
827 qemu_iovec_reset(&qiov);
828 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
829 ret = do_perform_cow_read(bs, m->offset, end->offset, &qiov);
831 if (ret < 0) {
832 goto fail;
835 /* Encrypt the data if necessary before writing it */
836 if (bs->encrypted) {
837 if (!do_perform_cow_encrypt(bs, m->offset, start->offset,
838 start_buffer, start->nb_bytes) ||
839 !do_perform_cow_encrypt(bs, m->offset, end->offset,
840 end_buffer, end->nb_bytes)) {
841 ret = -EIO;
842 goto fail;
846 /* And now we can write everything. If we have the guest data we
847 * can write everything in one single operation */
848 if (m->data_qiov) {
849 qemu_iovec_reset(&qiov);
850 if (start->nb_bytes) {
851 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
853 qemu_iovec_concat(&qiov, m->data_qiov, 0, data_bytes);
854 if (end->nb_bytes) {
855 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
857 /* NOTE: we have a write_aio blkdebug event here followed by
858 * a cow_write one in do_perform_cow_write(), but there's only
859 * one single I/O operation */
860 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
861 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov);
862 } else {
863 /* If there's no guest data then write both COW regions separately */
864 qemu_iovec_reset(&qiov);
865 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
866 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov);
867 if (ret < 0) {
868 goto fail;
871 qemu_iovec_reset(&qiov);
872 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
873 ret = do_perform_cow_write(bs, m->alloc_offset, end->offset, &qiov);
876 fail:
877 qemu_co_mutex_lock(&s->lock);
880 * Before we update the L2 table to actually point to the new cluster, we
881 * need to be sure that the refcounts have been increased and COW was
882 * handled.
884 if (ret == 0) {
885 qcow2_cache_depends_on_flush(s->l2_table_cache);
888 qemu_vfree(start_buffer);
889 qemu_iovec_destroy(&qiov);
890 return ret;
893 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
895 BDRVQcow2State *s = bs->opaque;
896 int i, j = 0, l2_index, ret;
897 uint64_t *old_cluster, *l2_table;
898 uint64_t cluster_offset = m->alloc_offset;
900 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters);
901 assert(m->nb_clusters > 0);
903 old_cluster = g_try_new(uint64_t, m->nb_clusters);
904 if (old_cluster == NULL) {
905 ret = -ENOMEM;
906 goto err;
909 /* copy content of unmodified sectors */
910 ret = perform_cow(bs, m);
911 if (ret < 0) {
912 goto err;
915 /* Update L2 table. */
916 if (s->use_lazy_refcounts) {
917 qcow2_mark_dirty(bs);
919 if (qcow2_need_accurate_refcounts(s)) {
920 qcow2_cache_set_dependency(bs, s->l2_table_cache,
921 s->refcount_block_cache);
924 ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index);
925 if (ret < 0) {
926 goto err;
928 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
930 assert(l2_index + m->nb_clusters <= s->l2_size);
931 for (i = 0; i < m->nb_clusters; i++) {
932 /* if two concurrent writes happen to the same unallocated cluster
933 * each write allocates separate cluster and writes data concurrently.
934 * The first one to complete updates l2 table with pointer to its
935 * cluster the second one has to do RMW (which is done above by
936 * perform_cow()), update l2 table with its cluster pointer and free
937 * old cluster. This is what this loop does */
938 if (l2_table[l2_index + i] != 0) {
939 old_cluster[j++] = l2_table[l2_index + i];
942 l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
943 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
947 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
950 * If this was a COW, we need to decrease the refcount of the old cluster.
952 * Don't discard clusters that reach a refcount of 0 (e.g. compressed
953 * clusters), the next write will reuse them anyway.
955 if (!m->keep_old_clusters && j != 0) {
956 for (i = 0; i < j; i++) {
957 qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1,
958 QCOW2_DISCARD_NEVER);
962 ret = 0;
963 err:
964 g_free(old_cluster);
965 return ret;
969 * Returns the number of contiguous clusters that can be used for an allocating
970 * write, but require COW to be performed (this includes yet unallocated space,
971 * which must copy from the backing file)
973 static int count_cow_clusters(BDRVQcow2State *s, int nb_clusters,
974 uint64_t *l2_table, int l2_index)
976 int i;
978 for (i = 0; i < nb_clusters; i++) {
979 uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]);
980 QCow2ClusterType cluster_type = qcow2_get_cluster_type(l2_entry);
982 switch(cluster_type) {
983 case QCOW2_CLUSTER_NORMAL:
984 if (l2_entry & QCOW_OFLAG_COPIED) {
985 goto out;
987 break;
988 case QCOW2_CLUSTER_UNALLOCATED:
989 case QCOW2_CLUSTER_COMPRESSED:
990 case QCOW2_CLUSTER_ZERO_PLAIN:
991 case QCOW2_CLUSTER_ZERO_ALLOC:
992 break;
993 default:
994 abort();
998 out:
999 assert(i <= nb_clusters);
1000 return i;
1004 * Check if there already is an AIO write request in flight which allocates
1005 * the same cluster. In this case we need to wait until the previous
1006 * request has completed and updated the L2 table accordingly.
1008 * Returns:
1009 * 0 if there was no dependency. *cur_bytes indicates the number of
1010 * bytes from guest_offset that can be read before the next
1011 * dependency must be processed (or the request is complete)
1013 * -EAGAIN if we had to wait for another request, previously gathered
1014 * information on cluster allocation may be invalid now. The caller
1015 * must start over anyway, so consider *cur_bytes undefined.
1017 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
1018 uint64_t *cur_bytes, QCowL2Meta **m)
1020 BDRVQcow2State *s = bs->opaque;
1021 QCowL2Meta *old_alloc;
1022 uint64_t bytes = *cur_bytes;
1024 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
1026 uint64_t start = guest_offset;
1027 uint64_t end = start + bytes;
1028 uint64_t old_start = l2meta_cow_start(old_alloc);
1029 uint64_t old_end = l2meta_cow_end(old_alloc);
1031 if (end <= old_start || start >= old_end) {
1032 /* No intersection */
1033 } else {
1034 if (start < old_start) {
1035 /* Stop at the start of a running allocation */
1036 bytes = old_start - start;
1037 } else {
1038 bytes = 0;
1041 /* Stop if already an l2meta exists. After yielding, it wouldn't
1042 * be valid any more, so we'd have to clean up the old L2Metas
1043 * and deal with requests depending on them before starting to
1044 * gather new ones. Not worth the trouble. */
1045 if (bytes == 0 && *m) {
1046 *cur_bytes = 0;
1047 return 0;
1050 if (bytes == 0) {
1051 /* Wait for the dependency to complete. We need to recheck
1052 * the free/allocated clusters when we continue. */
1053 qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock);
1054 return -EAGAIN;
1059 /* Make sure that existing clusters and new allocations are only used up to
1060 * the next dependency if we shortened the request above */
1061 *cur_bytes = bytes;
1063 return 0;
1067 * Checks how many already allocated clusters that don't require a copy on
1068 * write there are at the given guest_offset (up to *bytes). If
1069 * *host_offset is not zero, only physically contiguous clusters beginning at
1070 * this host offset are counted.
1072 * Note that guest_offset may not be cluster aligned. In this case, the
1073 * returned *host_offset points to exact byte referenced by guest_offset and
1074 * therefore isn't cluster aligned as well.
1076 * Returns:
1077 * 0: if no allocated clusters are available at the given offset.
1078 * *bytes is normally unchanged. It is set to 0 if the cluster
1079 * is allocated and doesn't need COW, but doesn't have the right
1080 * physical offset.
1082 * 1: if allocated clusters that don't require a COW are available at
1083 * the requested offset. *bytes may have decreased and describes
1084 * the length of the area that can be written to.
1086 * -errno: in error cases
1088 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset,
1089 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1091 BDRVQcow2State *s = bs->opaque;
1092 int l2_index;
1093 uint64_t cluster_offset;
1094 uint64_t *l2_table;
1095 uint64_t nb_clusters;
1096 unsigned int keep_clusters;
1097 int ret;
1099 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset,
1100 *bytes);
1102 assert(*host_offset == 0 || offset_into_cluster(s, guest_offset)
1103 == offset_into_cluster(s, *host_offset));
1106 * Calculate the number of clusters to look for. We stop at L2 table
1107 * boundaries to keep things simple.
1109 nb_clusters =
1110 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1112 l2_index = offset_to_l2_index(s, guest_offset);
1113 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1114 assert(nb_clusters <= INT_MAX);
1116 /* Find L2 entry for the first involved cluster */
1117 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
1118 if (ret < 0) {
1119 return ret;
1122 cluster_offset = be64_to_cpu(l2_table[l2_index]);
1124 /* Check how many clusters are already allocated and don't need COW */
1125 if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL
1126 && (cluster_offset & QCOW_OFLAG_COPIED))
1128 /* If a specific host_offset is required, check it */
1129 bool offset_matches =
1130 (cluster_offset & L2E_OFFSET_MASK) == *host_offset;
1132 if (offset_into_cluster(s, cluster_offset & L2E_OFFSET_MASK)) {
1133 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset "
1134 "%#llx unaligned (guest offset: %#" PRIx64
1135 ")", cluster_offset & L2E_OFFSET_MASK,
1136 guest_offset);
1137 ret = -EIO;
1138 goto out;
1141 if (*host_offset != 0 && !offset_matches) {
1142 *bytes = 0;
1143 ret = 0;
1144 goto out;
1147 /* We keep all QCOW_OFLAG_COPIED clusters */
1148 keep_clusters =
1149 count_contiguous_clusters(nb_clusters, s->cluster_size,
1150 &l2_table[l2_index],
1151 QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO);
1152 assert(keep_clusters <= nb_clusters);
1154 *bytes = MIN(*bytes,
1155 keep_clusters * s->cluster_size
1156 - offset_into_cluster(s, guest_offset));
1158 ret = 1;
1159 } else {
1160 ret = 0;
1163 /* Cleanup */
1164 out:
1165 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1167 /* Only return a host offset if we actually made progress. Otherwise we
1168 * would make requirements for handle_alloc() that it can't fulfill */
1169 if (ret > 0) {
1170 *host_offset = (cluster_offset & L2E_OFFSET_MASK)
1171 + offset_into_cluster(s, guest_offset);
1174 return ret;
1178 * Allocates new clusters for the given guest_offset.
1180 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
1181 * contain the number of clusters that have been allocated and are contiguous
1182 * in the image file.
1184 * If *host_offset is non-zero, it specifies the offset in the image file at
1185 * which the new clusters must start. *nb_clusters can be 0 on return in this
1186 * case if the cluster at host_offset is already in use. If *host_offset is
1187 * zero, the clusters can be allocated anywhere in the image file.
1189 * *host_offset is updated to contain the offset into the image file at which
1190 * the first allocated cluster starts.
1192 * Return 0 on success and -errno in error cases. -EAGAIN means that the
1193 * function has been waiting for another request and the allocation must be
1194 * restarted, but the whole request should not be failed.
1196 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
1197 uint64_t *host_offset, uint64_t *nb_clusters)
1199 BDRVQcow2State *s = bs->opaque;
1201 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset,
1202 *host_offset, *nb_clusters);
1204 /* Allocate new clusters */
1205 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
1206 if (*host_offset == 0) {
1207 int64_t cluster_offset =
1208 qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size);
1209 if (cluster_offset < 0) {
1210 return cluster_offset;
1212 *host_offset = cluster_offset;
1213 return 0;
1214 } else {
1215 int64_t ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters);
1216 if (ret < 0) {
1217 return ret;
1219 *nb_clusters = ret;
1220 return 0;
1225 * Allocates new clusters for an area that either is yet unallocated or needs a
1226 * copy on write. If *host_offset is non-zero, clusters are only allocated if
1227 * the new allocation can match the specified host offset.
1229 * Note that guest_offset may not be cluster aligned. In this case, the
1230 * returned *host_offset points to exact byte referenced by guest_offset and
1231 * therefore isn't cluster aligned as well.
1233 * Returns:
1234 * 0: if no clusters could be allocated. *bytes is set to 0,
1235 * *host_offset is left unchanged.
1237 * 1: if new clusters were allocated. *bytes may be decreased if the
1238 * new allocation doesn't cover all of the requested area.
1239 * *host_offset is updated to contain the host offset of the first
1240 * newly allocated cluster.
1242 * -errno: in error cases
1244 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
1245 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1247 BDRVQcow2State *s = bs->opaque;
1248 int l2_index;
1249 uint64_t *l2_table;
1250 uint64_t entry;
1251 uint64_t nb_clusters;
1252 int ret;
1253 bool keep_old_clusters = false;
1255 uint64_t alloc_cluster_offset = 0;
1257 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset,
1258 *bytes);
1259 assert(*bytes > 0);
1262 * Calculate the number of clusters to look for. We stop at L2 table
1263 * boundaries to keep things simple.
1265 nb_clusters =
1266 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1268 l2_index = offset_to_l2_index(s, guest_offset);
1269 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1270 assert(nb_clusters <= INT_MAX);
1272 /* Find L2 entry for the first involved cluster */
1273 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
1274 if (ret < 0) {
1275 return ret;
1278 entry = be64_to_cpu(l2_table[l2_index]);
1280 /* For the moment, overwrite compressed clusters one by one */
1281 if (entry & QCOW_OFLAG_COMPRESSED) {
1282 nb_clusters = 1;
1283 } else {
1284 nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index);
1287 /* This function is only called when there were no non-COW clusters, so if
1288 * we can't find any unallocated or COW clusters either, something is
1289 * wrong with our code. */
1290 assert(nb_clusters > 0);
1292 if (qcow2_get_cluster_type(entry) == QCOW2_CLUSTER_ZERO_ALLOC &&
1293 (entry & QCOW_OFLAG_COPIED) &&
1294 (!*host_offset ||
1295 start_of_cluster(s, *host_offset) == (entry & L2E_OFFSET_MASK)))
1297 /* Try to reuse preallocated zero clusters; contiguous normal clusters
1298 * would be fine, too, but count_cow_clusters() above has limited
1299 * nb_clusters already to a range of COW clusters */
1300 int preallocated_nb_clusters =
1301 count_contiguous_clusters(nb_clusters, s->cluster_size,
1302 &l2_table[l2_index], QCOW_OFLAG_COPIED);
1303 assert(preallocated_nb_clusters > 0);
1305 nb_clusters = preallocated_nb_clusters;
1306 alloc_cluster_offset = entry & L2E_OFFSET_MASK;
1308 /* We want to reuse these clusters, so qcow2_alloc_cluster_link_l2()
1309 * should not free them. */
1310 keep_old_clusters = true;
1313 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1315 if (!alloc_cluster_offset) {
1316 /* Allocate, if necessary at a given offset in the image file */
1317 alloc_cluster_offset = start_of_cluster(s, *host_offset);
1318 ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset,
1319 &nb_clusters);
1320 if (ret < 0) {
1321 goto fail;
1324 /* Can't extend contiguous allocation */
1325 if (nb_clusters == 0) {
1326 *bytes = 0;
1327 return 0;
1330 /* !*host_offset would overwrite the image header and is reserved for
1331 * "no host offset preferred". If 0 was a valid host offset, it'd
1332 * trigger the following overlap check; do that now to avoid having an
1333 * invalid value in *host_offset. */
1334 if (!alloc_cluster_offset) {
1335 ret = qcow2_pre_write_overlap_check(bs, 0, alloc_cluster_offset,
1336 nb_clusters * s->cluster_size);
1337 assert(ret < 0);
1338 goto fail;
1343 * Save info needed for meta data update.
1345 * requested_bytes: Number of bytes from the start of the first
1346 * newly allocated cluster to the end of the (possibly shortened
1347 * before) write request.
1349 * avail_bytes: Number of bytes from the start of the first
1350 * newly allocated to the end of the last newly allocated cluster.
1352 * nb_bytes: The number of bytes from the start of the first
1353 * newly allocated cluster to the end of the area that the write
1354 * request actually writes to (excluding COW at the end)
1356 uint64_t requested_bytes = *bytes + offset_into_cluster(s, guest_offset);
1357 int avail_bytes = MIN(INT_MAX, nb_clusters << s->cluster_bits);
1358 int nb_bytes = MIN(requested_bytes, avail_bytes);
1359 QCowL2Meta *old_m = *m;
1361 *m = g_malloc0(sizeof(**m));
1363 **m = (QCowL2Meta) {
1364 .next = old_m,
1366 .alloc_offset = alloc_cluster_offset,
1367 .offset = start_of_cluster(s, guest_offset),
1368 .nb_clusters = nb_clusters,
1370 .keep_old_clusters = keep_old_clusters,
1372 .cow_start = {
1373 .offset = 0,
1374 .nb_bytes = offset_into_cluster(s, guest_offset),
1376 .cow_end = {
1377 .offset = nb_bytes,
1378 .nb_bytes = avail_bytes - nb_bytes,
1381 qemu_co_queue_init(&(*m)->dependent_requests);
1382 QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight);
1384 *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset);
1385 *bytes = MIN(*bytes, nb_bytes - offset_into_cluster(s, guest_offset));
1386 assert(*bytes != 0);
1388 return 1;
1390 fail:
1391 if (*m && (*m)->nb_clusters > 0) {
1392 QLIST_REMOVE(*m, next_in_flight);
1394 return ret;
1398 * alloc_cluster_offset
1400 * For a given offset on the virtual disk, find the cluster offset in qcow2
1401 * file. If the offset is not found, allocate a new cluster.
1403 * If the cluster was already allocated, m->nb_clusters is set to 0 and
1404 * other fields in m are meaningless.
1406 * If the cluster is newly allocated, m->nb_clusters is set to the number of
1407 * contiguous clusters that have been allocated. In this case, the other
1408 * fields of m are valid and contain information about the first allocated
1409 * cluster.
1411 * If the request conflicts with another write request in flight, the coroutine
1412 * is queued and will be reentered when the dependency has completed.
1414 * Return 0 on success and -errno in error cases
1416 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
1417 unsigned int *bytes, uint64_t *host_offset,
1418 QCowL2Meta **m)
1420 BDRVQcow2State *s = bs->opaque;
1421 uint64_t start, remaining;
1422 uint64_t cluster_offset;
1423 uint64_t cur_bytes;
1424 int ret;
1426 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *bytes);
1428 again:
1429 start = offset;
1430 remaining = *bytes;
1431 cluster_offset = 0;
1432 *host_offset = 0;
1433 cur_bytes = 0;
1434 *m = NULL;
1436 while (true) {
1438 if (!*host_offset) {
1439 *host_offset = start_of_cluster(s, cluster_offset);
1442 assert(remaining >= cur_bytes);
1444 start += cur_bytes;
1445 remaining -= cur_bytes;
1446 cluster_offset += cur_bytes;
1448 if (remaining == 0) {
1449 break;
1452 cur_bytes = remaining;
1455 * Now start gathering as many contiguous clusters as possible:
1457 * 1. Check for overlaps with in-flight allocations
1459 * a) Overlap not in the first cluster -> shorten this request and
1460 * let the caller handle the rest in its next loop iteration.
1462 * b) Real overlaps of two requests. Yield and restart the search
1463 * for contiguous clusters (the situation could have changed
1464 * while we were sleeping)
1466 * c) TODO: Request starts in the same cluster as the in-flight
1467 * allocation ends. Shorten the COW of the in-fight allocation,
1468 * set cluster_offset to write to the same cluster and set up
1469 * the right synchronisation between the in-flight request and
1470 * the new one.
1472 ret = handle_dependencies(bs, start, &cur_bytes, m);
1473 if (ret == -EAGAIN) {
1474 /* Currently handle_dependencies() doesn't yield if we already had
1475 * an allocation. If it did, we would have to clean up the L2Meta
1476 * structs before starting over. */
1477 assert(*m == NULL);
1478 goto again;
1479 } else if (ret < 0) {
1480 return ret;
1481 } else if (cur_bytes == 0) {
1482 break;
1483 } else {
1484 /* handle_dependencies() may have decreased cur_bytes (shortened
1485 * the allocations below) so that the next dependency is processed
1486 * correctly during the next loop iteration. */
1490 * 2. Count contiguous COPIED clusters.
1492 ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m);
1493 if (ret < 0) {
1494 return ret;
1495 } else if (ret) {
1496 continue;
1497 } else if (cur_bytes == 0) {
1498 break;
1502 * 3. If the request still hasn't completed, allocate new clusters,
1503 * considering any cluster_offset of steps 1c or 2.
1505 ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m);
1506 if (ret < 0) {
1507 return ret;
1508 } else if (ret) {
1509 continue;
1510 } else {
1511 assert(cur_bytes == 0);
1512 break;
1516 *bytes -= remaining;
1517 assert(*bytes > 0);
1518 assert(*host_offset != 0);
1520 return 0;
1523 static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
1524 const uint8_t *buf, int buf_size)
1526 z_stream strm1, *strm = &strm1;
1527 int ret, out_len;
1529 memset(strm, 0, sizeof(*strm));
1531 strm->next_in = (uint8_t *)buf;
1532 strm->avail_in = buf_size;
1533 strm->next_out = out_buf;
1534 strm->avail_out = out_buf_size;
1536 ret = inflateInit2(strm, -12);
1537 if (ret != Z_OK)
1538 return -1;
1539 ret = inflate(strm, Z_FINISH);
1540 out_len = strm->next_out - out_buf;
1541 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
1542 out_len != out_buf_size) {
1543 inflateEnd(strm);
1544 return -1;
1546 inflateEnd(strm);
1547 return 0;
1550 int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
1552 BDRVQcow2State *s = bs->opaque;
1553 int ret, csize, nb_csectors, sector_offset;
1554 uint64_t coffset;
1556 coffset = cluster_offset & s->cluster_offset_mask;
1557 if (s->cluster_cache_offset != coffset) {
1558 nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
1559 sector_offset = coffset & 511;
1560 csize = nb_csectors * 512 - sector_offset;
1561 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
1562 ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data,
1563 nb_csectors);
1564 if (ret < 0) {
1565 return ret;
1567 if (decompress_buffer(s->cluster_cache, s->cluster_size,
1568 s->cluster_data + sector_offset, csize) < 0) {
1569 return -EIO;
1571 s->cluster_cache_offset = coffset;
1573 return 0;
1577 * This discards as many clusters of nb_clusters as possible at once (i.e.
1578 * all clusters in the same L2 table) and returns the number of discarded
1579 * clusters.
1581 static int discard_single_l2(BlockDriverState *bs, uint64_t offset,
1582 uint64_t nb_clusters, enum qcow2_discard_type type,
1583 bool full_discard)
1585 BDRVQcow2State *s = bs->opaque;
1586 uint64_t *l2_table;
1587 int l2_index;
1588 int ret;
1589 int i;
1591 ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1592 if (ret < 0) {
1593 return ret;
1596 /* Limit nb_clusters to one L2 table */
1597 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1598 assert(nb_clusters <= INT_MAX);
1600 for (i = 0; i < nb_clusters; i++) {
1601 uint64_t old_l2_entry;
1603 old_l2_entry = be64_to_cpu(l2_table[l2_index + i]);
1606 * If full_discard is false, make sure that a discarded area reads back
1607 * as zeroes for v3 images (we cannot do it for v2 without actually
1608 * writing a zero-filled buffer). We can skip the operation if the
1609 * cluster is already marked as zero, or if it's unallocated and we
1610 * don't have a backing file.
1612 * TODO We might want to use bdrv_get_block_status(bs) here, but we're
1613 * holding s->lock, so that doesn't work today.
1615 * If full_discard is true, the sector should not read back as zeroes,
1616 * but rather fall through to the backing file.
1618 switch (qcow2_get_cluster_type(old_l2_entry)) {
1619 case QCOW2_CLUSTER_UNALLOCATED:
1620 if (full_discard || !bs->backing) {
1621 continue;
1623 break;
1625 case QCOW2_CLUSTER_ZERO_PLAIN:
1626 if (!full_discard) {
1627 continue;
1629 break;
1631 case QCOW2_CLUSTER_ZERO_ALLOC:
1632 case QCOW2_CLUSTER_NORMAL:
1633 case QCOW2_CLUSTER_COMPRESSED:
1634 break;
1636 default:
1637 abort();
1640 /* First remove L2 entries */
1641 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
1642 if (!full_discard && s->qcow_version >= 3) {
1643 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1644 } else {
1645 l2_table[l2_index + i] = cpu_to_be64(0);
1648 /* Then decrease the refcount */
1649 qcow2_free_any_clusters(bs, old_l2_entry, 1, type);
1652 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1654 return nb_clusters;
1657 int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset,
1658 uint64_t bytes, enum qcow2_discard_type type,
1659 bool full_discard)
1661 BDRVQcow2State *s = bs->opaque;
1662 uint64_t end_offset = offset + bytes;
1663 uint64_t nb_clusters;
1664 int64_t cleared;
1665 int ret;
1667 /* Caller must pass aligned values, except at image end */
1668 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
1669 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) ||
1670 end_offset == bs->total_sectors << BDRV_SECTOR_BITS);
1672 nb_clusters = size_to_clusters(s, bytes);
1674 s->cache_discards = true;
1676 /* Each L2 table is handled by its own loop iteration */
1677 while (nb_clusters > 0) {
1678 cleared = discard_single_l2(bs, offset, nb_clusters, type,
1679 full_discard);
1680 if (cleared < 0) {
1681 ret = cleared;
1682 goto fail;
1685 nb_clusters -= cleared;
1686 offset += (cleared * s->cluster_size);
1689 ret = 0;
1690 fail:
1691 s->cache_discards = false;
1692 qcow2_process_discards(bs, ret);
1694 return ret;
1698 * This zeroes as many clusters of nb_clusters as possible at once (i.e.
1699 * all clusters in the same L2 table) and returns the number of zeroed
1700 * clusters.
1702 static int zero_single_l2(BlockDriverState *bs, uint64_t offset,
1703 uint64_t nb_clusters, int flags)
1705 BDRVQcow2State *s = bs->opaque;
1706 uint64_t *l2_table;
1707 int l2_index;
1708 int ret;
1709 int i;
1710 bool unmap = !!(flags & BDRV_REQ_MAY_UNMAP);
1712 ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1713 if (ret < 0) {
1714 return ret;
1717 /* Limit nb_clusters to one L2 table */
1718 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1719 assert(nb_clusters <= INT_MAX);
1721 for (i = 0; i < nb_clusters; i++) {
1722 uint64_t old_offset;
1723 QCow2ClusterType cluster_type;
1725 old_offset = be64_to_cpu(l2_table[l2_index + i]);
1728 * Minimize L2 changes if the cluster already reads back as
1729 * zeroes with correct allocation.
1731 cluster_type = qcow2_get_cluster_type(old_offset);
1732 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN ||
1733 (cluster_type == QCOW2_CLUSTER_ZERO_ALLOC && !unmap)) {
1734 continue;
1737 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
1738 if (cluster_type == QCOW2_CLUSTER_COMPRESSED || unmap) {
1739 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1740 qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST);
1741 } else {
1742 l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO);
1746 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1748 return nb_clusters;
1751 int qcow2_cluster_zeroize(BlockDriverState *bs, uint64_t offset,
1752 uint64_t bytes, int flags)
1754 BDRVQcow2State *s = bs->opaque;
1755 uint64_t end_offset = offset + bytes;
1756 uint64_t nb_clusters;
1757 int64_t cleared;
1758 int ret;
1760 /* Caller must pass aligned values, except at image end */
1761 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
1762 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) ||
1763 end_offset == bs->total_sectors << BDRV_SECTOR_BITS);
1765 /* The zero flag is only supported by version 3 and newer */
1766 if (s->qcow_version < 3) {
1767 return -ENOTSUP;
1770 /* Each L2 table is handled by its own loop iteration */
1771 nb_clusters = size_to_clusters(s, bytes);
1773 s->cache_discards = true;
1775 while (nb_clusters > 0) {
1776 cleared = zero_single_l2(bs, offset, nb_clusters, flags);
1777 if (cleared < 0) {
1778 ret = cleared;
1779 goto fail;
1782 nb_clusters -= cleared;
1783 offset += (cleared * s->cluster_size);
1786 ret = 0;
1787 fail:
1788 s->cache_discards = false;
1789 qcow2_process_discards(bs, ret);
1791 return ret;
1795 * Expands all zero clusters in a specific L1 table (or deallocates them, for
1796 * non-backed non-pre-allocated zero clusters).
1798 * l1_entries and *visited_l1_entries are used to keep track of progress for
1799 * status_cb(). l1_entries contains the total number of L1 entries and
1800 * *visited_l1_entries counts all visited L1 entries.
1802 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
1803 int l1_size, int64_t *visited_l1_entries,
1804 int64_t l1_entries,
1805 BlockDriverAmendStatusCB *status_cb,
1806 void *cb_opaque)
1808 BDRVQcow2State *s = bs->opaque;
1809 bool is_active_l1 = (l1_table == s->l1_table);
1810 uint64_t *l2_table = NULL;
1811 int ret;
1812 int i, j;
1814 if (!is_active_l1) {
1815 /* inactive L2 tables require a buffer to be stored in when loading
1816 * them from disk */
1817 l2_table = qemu_try_blockalign(bs->file->bs, s->cluster_size);
1818 if (l2_table == NULL) {
1819 return -ENOMEM;
1823 for (i = 0; i < l1_size; i++) {
1824 uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK;
1825 bool l2_dirty = false;
1826 uint64_t l2_refcount;
1828 if (!l2_offset) {
1829 /* unallocated */
1830 (*visited_l1_entries)++;
1831 if (status_cb) {
1832 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque);
1834 continue;
1837 if (offset_into_cluster(s, l2_offset)) {
1838 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#"
1839 PRIx64 " unaligned (L1 index: %#x)",
1840 l2_offset, i);
1841 ret = -EIO;
1842 goto fail;
1845 if (is_active_l1) {
1846 /* get active L2 tables from cache */
1847 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset,
1848 (void **)&l2_table);
1849 } else {
1850 /* load inactive L2 tables from disk */
1851 ret = bdrv_read(bs->file, l2_offset / BDRV_SECTOR_SIZE,
1852 (void *)l2_table, s->cluster_sectors);
1854 if (ret < 0) {
1855 goto fail;
1858 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits,
1859 &l2_refcount);
1860 if (ret < 0) {
1861 goto fail;
1864 for (j = 0; j < s->l2_size; j++) {
1865 uint64_t l2_entry = be64_to_cpu(l2_table[j]);
1866 int64_t offset = l2_entry & L2E_OFFSET_MASK;
1867 QCow2ClusterType cluster_type = qcow2_get_cluster_type(l2_entry);
1869 if (cluster_type != QCOW2_CLUSTER_ZERO_PLAIN &&
1870 cluster_type != QCOW2_CLUSTER_ZERO_ALLOC) {
1871 continue;
1874 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1875 if (!bs->backing) {
1876 /* not backed; therefore we can simply deallocate the
1877 * cluster */
1878 l2_table[j] = 0;
1879 l2_dirty = true;
1880 continue;
1883 offset = qcow2_alloc_clusters(bs, s->cluster_size);
1884 if (offset < 0) {
1885 ret = offset;
1886 goto fail;
1889 if (l2_refcount > 1) {
1890 /* For shared L2 tables, set the refcount accordingly (it is
1891 * already 1 and needs to be l2_refcount) */
1892 ret = qcow2_update_cluster_refcount(bs,
1893 offset >> s->cluster_bits,
1894 refcount_diff(1, l2_refcount), false,
1895 QCOW2_DISCARD_OTHER);
1896 if (ret < 0) {
1897 qcow2_free_clusters(bs, offset, s->cluster_size,
1898 QCOW2_DISCARD_OTHER);
1899 goto fail;
1904 if (offset_into_cluster(s, offset)) {
1905 qcow2_signal_corruption(bs, true, -1, -1,
1906 "Cluster allocation offset "
1907 "%#" PRIx64 " unaligned (L2 offset: %#"
1908 PRIx64 ", L2 index: %#x)", offset,
1909 l2_offset, j);
1910 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1911 qcow2_free_clusters(bs, offset, s->cluster_size,
1912 QCOW2_DISCARD_ALWAYS);
1914 ret = -EIO;
1915 goto fail;
1918 ret = qcow2_pre_write_overlap_check(bs, 0, offset, s->cluster_size);
1919 if (ret < 0) {
1920 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1921 qcow2_free_clusters(bs, offset, s->cluster_size,
1922 QCOW2_DISCARD_ALWAYS);
1924 goto fail;
1927 ret = bdrv_pwrite_zeroes(bs->file, offset, s->cluster_size, 0);
1928 if (ret < 0) {
1929 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1930 qcow2_free_clusters(bs, offset, s->cluster_size,
1931 QCOW2_DISCARD_ALWAYS);
1933 goto fail;
1936 if (l2_refcount == 1) {
1937 l2_table[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED);
1938 } else {
1939 l2_table[j] = cpu_to_be64(offset);
1941 l2_dirty = true;
1944 if (is_active_l1) {
1945 if (l2_dirty) {
1946 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
1947 qcow2_cache_depends_on_flush(s->l2_table_cache);
1949 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1950 } else {
1951 if (l2_dirty) {
1952 ret = qcow2_pre_write_overlap_check(bs,
1953 QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2, l2_offset,
1954 s->cluster_size);
1955 if (ret < 0) {
1956 goto fail;
1959 ret = bdrv_write(bs->file, l2_offset / BDRV_SECTOR_SIZE,
1960 (void *)l2_table, s->cluster_sectors);
1961 if (ret < 0) {
1962 goto fail;
1967 (*visited_l1_entries)++;
1968 if (status_cb) {
1969 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque);
1973 ret = 0;
1975 fail:
1976 if (l2_table) {
1977 if (!is_active_l1) {
1978 qemu_vfree(l2_table);
1979 } else {
1980 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1983 return ret;
1987 * For backed images, expands all zero clusters on the image. For non-backed
1988 * images, deallocates all non-pre-allocated zero clusters (and claims the
1989 * allocation for pre-allocated ones). This is important for downgrading to a
1990 * qcow2 version which doesn't yet support metadata zero clusters.
1992 int qcow2_expand_zero_clusters(BlockDriverState *bs,
1993 BlockDriverAmendStatusCB *status_cb,
1994 void *cb_opaque)
1996 BDRVQcow2State *s = bs->opaque;
1997 uint64_t *l1_table = NULL;
1998 int64_t l1_entries = 0, visited_l1_entries = 0;
1999 int ret;
2000 int i, j;
2002 if (status_cb) {
2003 l1_entries = s->l1_size;
2004 for (i = 0; i < s->nb_snapshots; i++) {
2005 l1_entries += s->snapshots[i].l1_size;
2009 ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size,
2010 &visited_l1_entries, l1_entries,
2011 status_cb, cb_opaque);
2012 if (ret < 0) {
2013 goto fail;
2016 /* Inactive L1 tables may point to active L2 tables - therefore it is
2017 * necessary to flush the L2 table cache before trying to access the L2
2018 * tables pointed to by inactive L1 entries (else we might try to expand
2019 * zero clusters that have already been expanded); furthermore, it is also
2020 * necessary to empty the L2 table cache, since it may contain tables which
2021 * are now going to be modified directly on disk, bypassing the cache.
2022 * qcow2_cache_empty() does both for us. */
2023 ret = qcow2_cache_empty(bs, s->l2_table_cache);
2024 if (ret < 0) {
2025 goto fail;
2028 for (i = 0; i < s->nb_snapshots; i++) {
2029 int l1_sectors = DIV_ROUND_UP(s->snapshots[i].l1_size *
2030 sizeof(uint64_t), BDRV_SECTOR_SIZE);
2032 l1_table = g_realloc(l1_table, l1_sectors * BDRV_SECTOR_SIZE);
2034 ret = bdrv_read(bs->file,
2035 s->snapshots[i].l1_table_offset / BDRV_SECTOR_SIZE,
2036 (void *)l1_table, l1_sectors);
2037 if (ret < 0) {
2038 goto fail;
2041 for (j = 0; j < s->snapshots[i].l1_size; j++) {
2042 be64_to_cpus(&l1_table[j]);
2045 ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size,
2046 &visited_l1_entries, l1_entries,
2047 status_cb, cb_opaque);
2048 if (ret < 0) {
2049 goto fail;
2053 ret = 0;
2055 fail:
2056 g_free(l1_table);
2057 return ret;