hostmem: add properties for NUMA memory policy
[qemu/ar7.git] / block / qcow2-cluster.c
blob4208dc08b5633153ceb51957bcc0ad3d28f5c125
1 /*
2 * Block driver for the QCOW version 2 format
4 * Copyright (c) 2004-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include <zlib.h>
27 #include "qemu-common.h"
28 #include "block/block_int.h"
29 #include "block/qcow2.h"
30 #include "trace.h"
32 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
33 bool exact_size)
35 BDRVQcowState *s = bs->opaque;
36 int new_l1_size2, ret, i;
37 uint64_t *new_l1_table;
38 int64_t old_l1_table_offset, old_l1_size;
39 int64_t new_l1_table_offset, new_l1_size;
40 uint8_t data[12];
42 if (min_size <= s->l1_size)
43 return 0;
45 /* Do a sanity check on min_size before trying to calculate new_l1_size
46 * (this prevents overflows during the while loop for the calculation of
47 * new_l1_size) */
48 if (min_size > INT_MAX / sizeof(uint64_t)) {
49 return -EFBIG;
52 if (exact_size) {
53 new_l1_size = min_size;
54 } else {
55 /* Bump size up to reduce the number of times we have to grow */
56 new_l1_size = s->l1_size;
57 if (new_l1_size == 0) {
58 new_l1_size = 1;
60 while (min_size > new_l1_size) {
61 new_l1_size = (new_l1_size * 3 + 1) / 2;
65 if (new_l1_size > INT_MAX / sizeof(uint64_t)) {
66 return -EFBIG;
69 #ifdef DEBUG_ALLOC2
70 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n",
71 s->l1_size, new_l1_size);
72 #endif
74 new_l1_size2 = sizeof(uint64_t) * new_l1_size;
75 new_l1_table = g_malloc0(align_offset(new_l1_size2, 512));
76 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
78 /* write new table (align to cluster) */
79 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
80 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
81 if (new_l1_table_offset < 0) {
82 g_free(new_l1_table);
83 return new_l1_table_offset;
86 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
87 if (ret < 0) {
88 goto fail;
91 /* the L1 position has not yet been updated, so these clusters must
92 * indeed be completely free */
93 ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset,
94 new_l1_size2);
95 if (ret < 0) {
96 goto fail;
99 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE);
100 for(i = 0; i < s->l1_size; i++)
101 new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
102 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2);
103 if (ret < 0)
104 goto fail;
105 for(i = 0; i < s->l1_size; i++)
106 new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
108 /* set new table */
109 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE);
110 cpu_to_be32w((uint32_t*)data, new_l1_size);
111 stq_be_p(data + 4, new_l1_table_offset);
112 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data));
113 if (ret < 0) {
114 goto fail;
116 g_free(s->l1_table);
117 old_l1_table_offset = s->l1_table_offset;
118 s->l1_table_offset = new_l1_table_offset;
119 s->l1_table = new_l1_table;
120 old_l1_size = s->l1_size;
121 s->l1_size = new_l1_size;
122 qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t),
123 QCOW2_DISCARD_OTHER);
124 return 0;
125 fail:
126 g_free(new_l1_table);
127 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2,
128 QCOW2_DISCARD_OTHER);
129 return ret;
133 * l2_load
135 * Loads a L2 table into memory. If the table is in the cache, the cache
136 * is used; otherwise the L2 table is loaded from the image file.
138 * Returns a pointer to the L2 table on success, or NULL if the read from
139 * the image file failed.
142 static int l2_load(BlockDriverState *bs, uint64_t l2_offset,
143 uint64_t **l2_table)
145 BDRVQcowState *s = bs->opaque;
146 int ret;
148 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, (void**) l2_table);
150 return ret;
154 * Writes one sector of the L1 table to the disk (can't update single entries
155 * and we really don't want bdrv_pread to perform a read-modify-write)
157 #define L1_ENTRIES_PER_SECTOR (512 / 8)
158 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index)
160 BDRVQcowState *s = bs->opaque;
161 uint64_t buf[L1_ENTRIES_PER_SECTOR];
162 int l1_start_index;
163 int i, ret;
165 l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
166 for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) {
167 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
170 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1,
171 s->l1_table_offset + 8 * l1_start_index, sizeof(buf));
172 if (ret < 0) {
173 return ret;
176 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
177 ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset + 8 * l1_start_index,
178 buf, sizeof(buf));
179 if (ret < 0) {
180 return ret;
183 return 0;
187 * l2_allocate
189 * Allocate a new l2 entry in the file. If l1_index points to an already
190 * used entry in the L2 table (i.e. we are doing a copy on write for the L2
191 * table) copy the contents of the old L2 table into the newly allocated one.
192 * Otherwise the new table is initialized with zeros.
196 static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table)
198 BDRVQcowState *s = bs->opaque;
199 uint64_t old_l2_offset;
200 uint64_t *l2_table = NULL;
201 int64_t l2_offset;
202 int ret;
204 old_l2_offset = s->l1_table[l1_index];
206 trace_qcow2_l2_allocate(bs, l1_index);
208 /* allocate a new l2 entry */
210 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
211 if (l2_offset < 0) {
212 ret = l2_offset;
213 goto fail;
216 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
217 if (ret < 0) {
218 goto fail;
221 /* allocate a new entry in the l2 cache */
223 trace_qcow2_l2_allocate_get_empty(bs, l1_index);
224 ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table);
225 if (ret < 0) {
226 goto fail;
229 l2_table = *table;
231 if ((old_l2_offset & L1E_OFFSET_MASK) == 0) {
232 /* if there was no old l2 table, clear the new table */
233 memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
234 } else {
235 uint64_t* old_table;
237 /* if there was an old l2 table, read it from the disk */
238 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ);
239 ret = qcow2_cache_get(bs, s->l2_table_cache,
240 old_l2_offset & L1E_OFFSET_MASK,
241 (void**) &old_table);
242 if (ret < 0) {
243 goto fail;
246 memcpy(l2_table, old_table, s->cluster_size);
248 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &old_table);
249 if (ret < 0) {
250 goto fail;
254 /* write the l2 table to the file */
255 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE);
257 trace_qcow2_l2_allocate_write_l2(bs, l1_index);
258 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
259 ret = qcow2_cache_flush(bs, s->l2_table_cache);
260 if (ret < 0) {
261 goto fail;
264 /* update the L1 entry */
265 trace_qcow2_l2_allocate_write_l1(bs, l1_index);
266 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
267 ret = qcow2_write_l1_entry(bs, l1_index);
268 if (ret < 0) {
269 goto fail;
272 *table = l2_table;
273 trace_qcow2_l2_allocate_done(bs, l1_index, 0);
274 return 0;
276 fail:
277 trace_qcow2_l2_allocate_done(bs, l1_index, ret);
278 if (l2_table != NULL) {
279 qcow2_cache_put(bs, s->l2_table_cache, (void**) table);
281 s->l1_table[l1_index] = old_l2_offset;
282 if (l2_offset > 0) {
283 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
284 QCOW2_DISCARD_ALWAYS);
286 return ret;
290 * Checks how many clusters in a given L2 table are contiguous in the image
291 * file. As soon as one of the flags in the bitmask stop_flags changes compared
292 * to the first cluster, the search is stopped and the cluster is not counted
293 * as contiguous. (This allows it, for example, to stop at the first compressed
294 * cluster which may require a different handling)
296 static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size,
297 uint64_t *l2_table, uint64_t stop_flags)
299 int i;
300 uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED;
301 uint64_t first_entry = be64_to_cpu(l2_table[0]);
302 uint64_t offset = first_entry & mask;
304 if (!offset)
305 return 0;
307 assert(qcow2_get_cluster_type(first_entry) != QCOW2_CLUSTER_COMPRESSED);
309 for (i = 0; i < nb_clusters; i++) {
310 uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask;
311 if (offset + (uint64_t) i * cluster_size != l2_entry) {
312 break;
316 return i;
319 static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table)
321 int i;
323 for (i = 0; i < nb_clusters; i++) {
324 int type = qcow2_get_cluster_type(be64_to_cpu(l2_table[i]));
326 if (type != QCOW2_CLUSTER_UNALLOCATED) {
327 break;
331 return i;
334 /* The crypt function is compatible with the linux cryptoloop
335 algorithm for < 4 GB images. NOTE: out_buf == in_buf is
336 supported */
337 void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
338 uint8_t *out_buf, const uint8_t *in_buf,
339 int nb_sectors, int enc,
340 const AES_KEY *key)
342 union {
343 uint64_t ll[2];
344 uint8_t b[16];
345 } ivec;
346 int i;
348 for(i = 0; i < nb_sectors; i++) {
349 ivec.ll[0] = cpu_to_le64(sector_num);
350 ivec.ll[1] = 0;
351 AES_cbc_encrypt(in_buf, out_buf, 512, key,
352 ivec.b, enc);
353 sector_num++;
354 in_buf += 512;
355 out_buf += 512;
359 static int coroutine_fn copy_sectors(BlockDriverState *bs,
360 uint64_t start_sect,
361 uint64_t cluster_offset,
362 int n_start, int n_end)
364 BDRVQcowState *s = bs->opaque;
365 QEMUIOVector qiov;
366 struct iovec iov;
367 int n, ret;
369 n = n_end - n_start;
370 if (n <= 0) {
371 return 0;
374 iov.iov_len = n * BDRV_SECTOR_SIZE;
375 iov.iov_base = qemu_blockalign(bs, iov.iov_len);
377 qemu_iovec_init_external(&qiov, &iov, 1);
379 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
381 if (!bs->drv) {
382 ret = -ENOMEDIUM;
383 goto out;
386 /* Call .bdrv_co_readv() directly instead of using the public block-layer
387 * interface. This avoids double I/O throttling and request tracking,
388 * which can lead to deadlock when block layer copy-on-read is enabled.
390 ret = bs->drv->bdrv_co_readv(bs, start_sect + n_start, n, &qiov);
391 if (ret < 0) {
392 goto out;
395 if (s->crypt_method) {
396 qcow2_encrypt_sectors(s, start_sect + n_start,
397 iov.iov_base, iov.iov_base, n, 1,
398 &s->aes_encrypt_key);
401 ret = qcow2_pre_write_overlap_check(bs, 0,
402 cluster_offset + n_start * BDRV_SECTOR_SIZE, n * BDRV_SECTOR_SIZE);
403 if (ret < 0) {
404 goto out;
407 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
408 ret = bdrv_co_writev(bs->file, (cluster_offset >> 9) + n_start, n, &qiov);
409 if (ret < 0) {
410 goto out;
413 ret = 0;
414 out:
415 qemu_vfree(iov.iov_base);
416 return ret;
421 * get_cluster_offset
423 * For a given offset of the disk image, find the cluster offset in
424 * qcow2 file. The offset is stored in *cluster_offset.
426 * on entry, *num is the number of contiguous sectors we'd like to
427 * access following offset.
429 * on exit, *num is the number of contiguous sectors we can read.
431 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
432 * cases.
434 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
435 int *num, uint64_t *cluster_offset)
437 BDRVQcowState *s = bs->opaque;
438 unsigned int l2_index;
439 uint64_t l1_index, l2_offset, *l2_table;
440 int l1_bits, c;
441 unsigned int index_in_cluster, nb_clusters;
442 uint64_t nb_available, nb_needed;
443 int ret;
445 index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1);
446 nb_needed = *num + index_in_cluster;
448 l1_bits = s->l2_bits + s->cluster_bits;
450 /* compute how many bytes there are between the offset and
451 * the end of the l1 entry
454 nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1));
456 /* compute the number of available sectors */
458 nb_available = (nb_available >> 9) + index_in_cluster;
460 if (nb_needed > nb_available) {
461 nb_needed = nb_available;
464 *cluster_offset = 0;
466 /* seek the the l2 offset in the l1 table */
468 l1_index = offset >> l1_bits;
469 if (l1_index >= s->l1_size) {
470 ret = QCOW2_CLUSTER_UNALLOCATED;
471 goto out;
474 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
475 if (!l2_offset) {
476 ret = QCOW2_CLUSTER_UNALLOCATED;
477 goto out;
480 /* load the l2 table in memory */
482 ret = l2_load(bs, l2_offset, &l2_table);
483 if (ret < 0) {
484 return ret;
487 /* find the cluster offset for the given disk offset */
489 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
490 *cluster_offset = be64_to_cpu(l2_table[l2_index]);
491 nb_clusters = size_to_clusters(s, nb_needed << 9);
493 ret = qcow2_get_cluster_type(*cluster_offset);
494 switch (ret) {
495 case QCOW2_CLUSTER_COMPRESSED:
496 /* Compressed clusters can only be processed one by one */
497 c = 1;
498 *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK;
499 break;
500 case QCOW2_CLUSTER_ZERO:
501 if (s->qcow_version < 3) {
502 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
503 return -EIO;
505 c = count_contiguous_clusters(nb_clusters, s->cluster_size,
506 &l2_table[l2_index], QCOW_OFLAG_ZERO);
507 *cluster_offset = 0;
508 break;
509 case QCOW2_CLUSTER_UNALLOCATED:
510 /* how many empty clusters ? */
511 c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]);
512 *cluster_offset = 0;
513 break;
514 case QCOW2_CLUSTER_NORMAL:
515 /* how many allocated clusters ? */
516 c = count_contiguous_clusters(nb_clusters, s->cluster_size,
517 &l2_table[l2_index], QCOW_OFLAG_ZERO);
518 *cluster_offset &= L2E_OFFSET_MASK;
519 break;
520 default:
521 abort();
524 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
526 nb_available = (c * s->cluster_sectors);
528 out:
529 if (nb_available > nb_needed)
530 nb_available = nb_needed;
532 *num = nb_available - index_in_cluster;
534 return ret;
538 * get_cluster_table
540 * for a given disk offset, load (and allocate if needed)
541 * the l2 table.
543 * the l2 table offset in the qcow2 file and the cluster index
544 * in the l2 table are given to the caller.
546 * Returns 0 on success, -errno in failure case
548 static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
549 uint64_t **new_l2_table,
550 int *new_l2_index)
552 BDRVQcowState *s = bs->opaque;
553 unsigned int l2_index;
554 uint64_t l1_index, l2_offset;
555 uint64_t *l2_table = NULL;
556 int ret;
558 /* seek the the l2 offset in the l1 table */
560 l1_index = offset >> (s->l2_bits + s->cluster_bits);
561 if (l1_index >= s->l1_size) {
562 ret = qcow2_grow_l1_table(bs, l1_index + 1, false);
563 if (ret < 0) {
564 return ret;
568 assert(l1_index < s->l1_size);
569 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
571 /* seek the l2 table of the given l2 offset */
573 if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) {
574 /* load the l2 table in memory */
575 ret = l2_load(bs, l2_offset, &l2_table);
576 if (ret < 0) {
577 return ret;
579 } else {
580 /* First allocate a new L2 table (and do COW if needed) */
581 ret = l2_allocate(bs, l1_index, &l2_table);
582 if (ret < 0) {
583 return ret;
586 /* Then decrease the refcount of the old table */
587 if (l2_offset) {
588 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
589 QCOW2_DISCARD_OTHER);
593 /* find the cluster offset for the given disk offset */
595 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
597 *new_l2_table = l2_table;
598 *new_l2_index = l2_index;
600 return 0;
604 * alloc_compressed_cluster_offset
606 * For a given offset of the disk image, return cluster offset in
607 * qcow2 file.
609 * If the offset is not found, allocate a new compressed cluster.
611 * Return the cluster offset if successful,
612 * Return 0, otherwise.
616 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
617 uint64_t offset,
618 int compressed_size)
620 BDRVQcowState *s = bs->opaque;
621 int l2_index, ret;
622 uint64_t *l2_table;
623 int64_t cluster_offset;
624 int nb_csectors;
626 ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
627 if (ret < 0) {
628 return 0;
631 /* Compression can't overwrite anything. Fail if the cluster was already
632 * allocated. */
633 cluster_offset = be64_to_cpu(l2_table[l2_index]);
634 if (cluster_offset & L2E_OFFSET_MASK) {
635 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
636 return 0;
639 cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
640 if (cluster_offset < 0) {
641 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
642 return 0;
645 nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
646 (cluster_offset >> 9);
648 cluster_offset |= QCOW_OFLAG_COMPRESSED |
649 ((uint64_t)nb_csectors << s->csize_shift);
651 /* update L2 table */
653 /* compressed clusters never have the copied flag */
655 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
656 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
657 l2_table[l2_index] = cpu_to_be64(cluster_offset);
658 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
659 if (ret < 0) {
660 return 0;
663 return cluster_offset;
666 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m, Qcow2COWRegion *r)
668 BDRVQcowState *s = bs->opaque;
669 int ret;
671 if (r->nb_sectors == 0) {
672 return 0;
675 qemu_co_mutex_unlock(&s->lock);
676 ret = copy_sectors(bs, m->offset / BDRV_SECTOR_SIZE, m->alloc_offset,
677 r->offset / BDRV_SECTOR_SIZE,
678 r->offset / BDRV_SECTOR_SIZE + r->nb_sectors);
679 qemu_co_mutex_lock(&s->lock);
681 if (ret < 0) {
682 return ret;
686 * Before we update the L2 table to actually point to the new cluster, we
687 * need to be sure that the refcounts have been increased and COW was
688 * handled.
690 qcow2_cache_depends_on_flush(s->l2_table_cache);
692 return 0;
695 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
697 BDRVQcowState *s = bs->opaque;
698 int i, j = 0, l2_index, ret;
699 uint64_t *old_cluster, *l2_table;
700 uint64_t cluster_offset = m->alloc_offset;
702 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters);
703 assert(m->nb_clusters > 0);
705 old_cluster = g_malloc(m->nb_clusters * sizeof(uint64_t));
707 /* copy content of unmodified sectors */
708 ret = perform_cow(bs, m, &m->cow_start);
709 if (ret < 0) {
710 goto err;
713 ret = perform_cow(bs, m, &m->cow_end);
714 if (ret < 0) {
715 goto err;
718 /* Update L2 table. */
719 if (s->use_lazy_refcounts) {
720 qcow2_mark_dirty(bs);
722 if (qcow2_need_accurate_refcounts(s)) {
723 qcow2_cache_set_dependency(bs, s->l2_table_cache,
724 s->refcount_block_cache);
727 ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index);
728 if (ret < 0) {
729 goto err;
731 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
733 assert(l2_index + m->nb_clusters <= s->l2_size);
734 for (i = 0; i < m->nb_clusters; i++) {
735 /* if two concurrent writes happen to the same unallocated cluster
736 * each write allocates separate cluster and writes data concurrently.
737 * The first one to complete updates l2 table with pointer to its
738 * cluster the second one has to do RMW (which is done above by
739 * copy_sectors()), update l2 table with its cluster pointer and free
740 * old cluster. This is what this loop does */
741 if(l2_table[l2_index + i] != 0)
742 old_cluster[j++] = l2_table[l2_index + i];
744 l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
745 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
749 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
750 if (ret < 0) {
751 goto err;
755 * If this was a COW, we need to decrease the refcount of the old cluster.
756 * Also flush bs->file to get the right order for L2 and refcount update.
758 * Don't discard clusters that reach a refcount of 0 (e.g. compressed
759 * clusters), the next write will reuse them anyway.
761 if (j != 0) {
762 for (i = 0; i < j; i++) {
763 qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1,
764 QCOW2_DISCARD_NEVER);
768 ret = 0;
769 err:
770 g_free(old_cluster);
771 return ret;
775 * Returns the number of contiguous clusters that can be used for an allocating
776 * write, but require COW to be performed (this includes yet unallocated space,
777 * which must copy from the backing file)
779 static int count_cow_clusters(BDRVQcowState *s, int nb_clusters,
780 uint64_t *l2_table, int l2_index)
782 int i;
784 for (i = 0; i < nb_clusters; i++) {
785 uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]);
786 int cluster_type = qcow2_get_cluster_type(l2_entry);
788 switch(cluster_type) {
789 case QCOW2_CLUSTER_NORMAL:
790 if (l2_entry & QCOW_OFLAG_COPIED) {
791 goto out;
793 break;
794 case QCOW2_CLUSTER_UNALLOCATED:
795 case QCOW2_CLUSTER_COMPRESSED:
796 case QCOW2_CLUSTER_ZERO:
797 break;
798 default:
799 abort();
803 out:
804 assert(i <= nb_clusters);
805 return i;
809 * Check if there already is an AIO write request in flight which allocates
810 * the same cluster. In this case we need to wait until the previous
811 * request has completed and updated the L2 table accordingly.
813 * Returns:
814 * 0 if there was no dependency. *cur_bytes indicates the number of
815 * bytes from guest_offset that can be read before the next
816 * dependency must be processed (or the request is complete)
818 * -EAGAIN if we had to wait for another request, previously gathered
819 * information on cluster allocation may be invalid now. The caller
820 * must start over anyway, so consider *cur_bytes undefined.
822 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
823 uint64_t *cur_bytes, QCowL2Meta **m)
825 BDRVQcowState *s = bs->opaque;
826 QCowL2Meta *old_alloc;
827 uint64_t bytes = *cur_bytes;
829 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
831 uint64_t start = guest_offset;
832 uint64_t end = start + bytes;
833 uint64_t old_start = l2meta_cow_start(old_alloc);
834 uint64_t old_end = l2meta_cow_end(old_alloc);
836 if (end <= old_start || start >= old_end) {
837 /* No intersection */
838 } else {
839 if (start < old_start) {
840 /* Stop at the start of a running allocation */
841 bytes = old_start - start;
842 } else {
843 bytes = 0;
846 /* Stop if already an l2meta exists. After yielding, it wouldn't
847 * be valid any more, so we'd have to clean up the old L2Metas
848 * and deal with requests depending on them before starting to
849 * gather new ones. Not worth the trouble. */
850 if (bytes == 0 && *m) {
851 *cur_bytes = 0;
852 return 0;
855 if (bytes == 0) {
856 /* Wait for the dependency to complete. We need to recheck
857 * the free/allocated clusters when we continue. */
858 qemu_co_mutex_unlock(&s->lock);
859 qemu_co_queue_wait(&old_alloc->dependent_requests);
860 qemu_co_mutex_lock(&s->lock);
861 return -EAGAIN;
866 /* Make sure that existing clusters and new allocations are only used up to
867 * the next dependency if we shortened the request above */
868 *cur_bytes = bytes;
870 return 0;
874 * Checks how many already allocated clusters that don't require a copy on
875 * write there are at the given guest_offset (up to *bytes). If
876 * *host_offset is not zero, only physically contiguous clusters beginning at
877 * this host offset are counted.
879 * Note that guest_offset may not be cluster aligned. In this case, the
880 * returned *host_offset points to exact byte referenced by guest_offset and
881 * therefore isn't cluster aligned as well.
883 * Returns:
884 * 0: if no allocated clusters are available at the given offset.
885 * *bytes is normally unchanged. It is set to 0 if the cluster
886 * is allocated and doesn't need COW, but doesn't have the right
887 * physical offset.
889 * 1: if allocated clusters that don't require a COW are available at
890 * the requested offset. *bytes may have decreased and describes
891 * the length of the area that can be written to.
893 * -errno: in error cases
895 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset,
896 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
898 BDRVQcowState *s = bs->opaque;
899 int l2_index;
900 uint64_t cluster_offset;
901 uint64_t *l2_table;
902 unsigned int nb_clusters;
903 unsigned int keep_clusters;
904 int ret, pret;
906 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset,
907 *bytes);
909 assert(*host_offset == 0 || offset_into_cluster(s, guest_offset)
910 == offset_into_cluster(s, *host_offset));
913 * Calculate the number of clusters to look for. We stop at L2 table
914 * boundaries to keep things simple.
916 nb_clusters =
917 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
919 l2_index = offset_to_l2_index(s, guest_offset);
920 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
922 /* Find L2 entry for the first involved cluster */
923 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
924 if (ret < 0) {
925 return ret;
928 cluster_offset = be64_to_cpu(l2_table[l2_index]);
930 /* Check how many clusters are already allocated and don't need COW */
931 if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL
932 && (cluster_offset & QCOW_OFLAG_COPIED))
934 /* If a specific host_offset is required, check it */
935 bool offset_matches =
936 (cluster_offset & L2E_OFFSET_MASK) == *host_offset;
938 if (*host_offset != 0 && !offset_matches) {
939 *bytes = 0;
940 ret = 0;
941 goto out;
944 /* We keep all QCOW_OFLAG_COPIED clusters */
945 keep_clusters =
946 count_contiguous_clusters(nb_clusters, s->cluster_size,
947 &l2_table[l2_index],
948 QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO);
949 assert(keep_clusters <= nb_clusters);
951 *bytes = MIN(*bytes,
952 keep_clusters * s->cluster_size
953 - offset_into_cluster(s, guest_offset));
955 ret = 1;
956 } else {
957 ret = 0;
960 /* Cleanup */
961 out:
962 pret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
963 if (pret < 0) {
964 return pret;
967 /* Only return a host offset if we actually made progress. Otherwise we
968 * would make requirements for handle_alloc() that it can't fulfill */
969 if (ret) {
970 *host_offset = (cluster_offset & L2E_OFFSET_MASK)
971 + offset_into_cluster(s, guest_offset);
974 return ret;
978 * Allocates new clusters for the given guest_offset.
980 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
981 * contain the number of clusters that have been allocated and are contiguous
982 * in the image file.
984 * If *host_offset is non-zero, it specifies the offset in the image file at
985 * which the new clusters must start. *nb_clusters can be 0 on return in this
986 * case if the cluster at host_offset is already in use. If *host_offset is
987 * zero, the clusters can be allocated anywhere in the image file.
989 * *host_offset is updated to contain the offset into the image file at which
990 * the first allocated cluster starts.
992 * Return 0 on success and -errno in error cases. -EAGAIN means that the
993 * function has been waiting for another request and the allocation must be
994 * restarted, but the whole request should not be failed.
996 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
997 uint64_t *host_offset, unsigned int *nb_clusters)
999 BDRVQcowState *s = bs->opaque;
1001 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset,
1002 *host_offset, *nb_clusters);
1004 /* Allocate new clusters */
1005 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
1006 if (*host_offset == 0) {
1007 int64_t cluster_offset =
1008 qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size);
1009 if (cluster_offset < 0) {
1010 return cluster_offset;
1012 *host_offset = cluster_offset;
1013 return 0;
1014 } else {
1015 int ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters);
1016 if (ret < 0) {
1017 return ret;
1019 *nb_clusters = ret;
1020 return 0;
1025 * Allocates new clusters for an area that either is yet unallocated or needs a
1026 * copy on write. If *host_offset is non-zero, clusters are only allocated if
1027 * the new allocation can match the specified host offset.
1029 * Note that guest_offset may not be cluster aligned. In this case, the
1030 * returned *host_offset points to exact byte referenced by guest_offset and
1031 * therefore isn't cluster aligned as well.
1033 * Returns:
1034 * 0: if no clusters could be allocated. *bytes is set to 0,
1035 * *host_offset is left unchanged.
1037 * 1: if new clusters were allocated. *bytes may be decreased if the
1038 * new allocation doesn't cover all of the requested area.
1039 * *host_offset is updated to contain the host offset of the first
1040 * newly allocated cluster.
1042 * -errno: in error cases
1044 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
1045 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1047 BDRVQcowState *s = bs->opaque;
1048 int l2_index;
1049 uint64_t *l2_table;
1050 uint64_t entry;
1051 unsigned int nb_clusters;
1052 int ret;
1054 uint64_t alloc_cluster_offset;
1056 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset,
1057 *bytes);
1058 assert(*bytes > 0);
1061 * Calculate the number of clusters to look for. We stop at L2 table
1062 * boundaries to keep things simple.
1064 nb_clusters =
1065 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1067 l2_index = offset_to_l2_index(s, guest_offset);
1068 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1070 /* Find L2 entry for the first involved cluster */
1071 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
1072 if (ret < 0) {
1073 return ret;
1076 entry = be64_to_cpu(l2_table[l2_index]);
1078 /* For the moment, overwrite compressed clusters one by one */
1079 if (entry & QCOW_OFLAG_COMPRESSED) {
1080 nb_clusters = 1;
1081 } else {
1082 nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index);
1085 /* This function is only called when there were no non-COW clusters, so if
1086 * we can't find any unallocated or COW clusters either, something is
1087 * wrong with our code. */
1088 assert(nb_clusters > 0);
1090 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1091 if (ret < 0) {
1092 return ret;
1095 /* Allocate, if necessary at a given offset in the image file */
1096 alloc_cluster_offset = start_of_cluster(s, *host_offset);
1097 ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset,
1098 &nb_clusters);
1099 if (ret < 0) {
1100 goto fail;
1103 /* Can't extend contiguous allocation */
1104 if (nb_clusters == 0) {
1105 *bytes = 0;
1106 return 0;
1110 * Save info needed for meta data update.
1112 * requested_sectors: Number of sectors from the start of the first
1113 * newly allocated cluster to the end of the (possibly shortened
1114 * before) write request.
1116 * avail_sectors: Number of sectors from the start of the first
1117 * newly allocated to the end of the last newly allocated cluster.
1119 * nb_sectors: The number of sectors from the start of the first
1120 * newly allocated cluster to the end of the area that the write
1121 * request actually writes to (excluding COW at the end)
1123 int requested_sectors =
1124 (*bytes + offset_into_cluster(s, guest_offset))
1125 >> BDRV_SECTOR_BITS;
1126 int avail_sectors = nb_clusters
1127 << (s->cluster_bits - BDRV_SECTOR_BITS);
1128 int alloc_n_start = offset_into_cluster(s, guest_offset)
1129 >> BDRV_SECTOR_BITS;
1130 int nb_sectors = MIN(requested_sectors, avail_sectors);
1131 QCowL2Meta *old_m = *m;
1133 *m = g_malloc0(sizeof(**m));
1135 **m = (QCowL2Meta) {
1136 .next = old_m,
1138 .alloc_offset = alloc_cluster_offset,
1139 .offset = start_of_cluster(s, guest_offset),
1140 .nb_clusters = nb_clusters,
1141 .nb_available = nb_sectors,
1143 .cow_start = {
1144 .offset = 0,
1145 .nb_sectors = alloc_n_start,
1147 .cow_end = {
1148 .offset = nb_sectors * BDRV_SECTOR_SIZE,
1149 .nb_sectors = avail_sectors - nb_sectors,
1152 qemu_co_queue_init(&(*m)->dependent_requests);
1153 QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight);
1155 *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset);
1156 *bytes = MIN(*bytes, (nb_sectors * BDRV_SECTOR_SIZE)
1157 - offset_into_cluster(s, guest_offset));
1158 assert(*bytes != 0);
1160 return 1;
1162 fail:
1163 if (*m && (*m)->nb_clusters > 0) {
1164 QLIST_REMOVE(*m, next_in_flight);
1166 return ret;
1170 * alloc_cluster_offset
1172 * For a given offset on the virtual disk, find the cluster offset in qcow2
1173 * file. If the offset is not found, allocate a new cluster.
1175 * If the cluster was already allocated, m->nb_clusters is set to 0 and
1176 * other fields in m are meaningless.
1178 * If the cluster is newly allocated, m->nb_clusters is set to the number of
1179 * contiguous clusters that have been allocated. In this case, the other
1180 * fields of m are valid and contain information about the first allocated
1181 * cluster.
1183 * If the request conflicts with another write request in flight, the coroutine
1184 * is queued and will be reentered when the dependency has completed.
1186 * Return 0 on success and -errno in error cases
1188 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
1189 int *num, uint64_t *host_offset, QCowL2Meta **m)
1191 BDRVQcowState *s = bs->opaque;
1192 uint64_t start, remaining;
1193 uint64_t cluster_offset;
1194 uint64_t cur_bytes;
1195 int ret;
1197 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *num);
1199 assert((offset & ~BDRV_SECTOR_MASK) == 0);
1201 again:
1202 start = offset;
1203 remaining = *num << BDRV_SECTOR_BITS;
1204 cluster_offset = 0;
1205 *host_offset = 0;
1206 cur_bytes = 0;
1207 *m = NULL;
1209 while (true) {
1211 if (!*host_offset) {
1212 *host_offset = start_of_cluster(s, cluster_offset);
1215 assert(remaining >= cur_bytes);
1217 start += cur_bytes;
1218 remaining -= cur_bytes;
1219 cluster_offset += cur_bytes;
1221 if (remaining == 0) {
1222 break;
1225 cur_bytes = remaining;
1228 * Now start gathering as many contiguous clusters as possible:
1230 * 1. Check for overlaps with in-flight allocations
1232 * a) Overlap not in the first cluster -> shorten this request and
1233 * let the caller handle the rest in its next loop iteration.
1235 * b) Real overlaps of two requests. Yield and restart the search
1236 * for contiguous clusters (the situation could have changed
1237 * while we were sleeping)
1239 * c) TODO: Request starts in the same cluster as the in-flight
1240 * allocation ends. Shorten the COW of the in-fight allocation,
1241 * set cluster_offset to write to the same cluster and set up
1242 * the right synchronisation between the in-flight request and
1243 * the new one.
1245 ret = handle_dependencies(bs, start, &cur_bytes, m);
1246 if (ret == -EAGAIN) {
1247 /* Currently handle_dependencies() doesn't yield if we already had
1248 * an allocation. If it did, we would have to clean up the L2Meta
1249 * structs before starting over. */
1250 assert(*m == NULL);
1251 goto again;
1252 } else if (ret < 0) {
1253 return ret;
1254 } else if (cur_bytes == 0) {
1255 break;
1256 } else {
1257 /* handle_dependencies() may have decreased cur_bytes (shortened
1258 * the allocations below) so that the next dependency is processed
1259 * correctly during the next loop iteration. */
1263 * 2. Count contiguous COPIED clusters.
1265 ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m);
1266 if (ret < 0) {
1267 return ret;
1268 } else if (ret) {
1269 continue;
1270 } else if (cur_bytes == 0) {
1271 break;
1275 * 3. If the request still hasn't completed, allocate new clusters,
1276 * considering any cluster_offset of steps 1c or 2.
1278 ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m);
1279 if (ret < 0) {
1280 return ret;
1281 } else if (ret) {
1282 continue;
1283 } else {
1284 assert(cur_bytes == 0);
1285 break;
1289 *num -= remaining >> BDRV_SECTOR_BITS;
1290 assert(*num > 0);
1291 assert(*host_offset != 0);
1293 return 0;
1296 static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
1297 const uint8_t *buf, int buf_size)
1299 z_stream strm1, *strm = &strm1;
1300 int ret, out_len;
1302 memset(strm, 0, sizeof(*strm));
1304 strm->next_in = (uint8_t *)buf;
1305 strm->avail_in = buf_size;
1306 strm->next_out = out_buf;
1307 strm->avail_out = out_buf_size;
1309 ret = inflateInit2(strm, -12);
1310 if (ret != Z_OK)
1311 return -1;
1312 ret = inflate(strm, Z_FINISH);
1313 out_len = strm->next_out - out_buf;
1314 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
1315 out_len != out_buf_size) {
1316 inflateEnd(strm);
1317 return -1;
1319 inflateEnd(strm);
1320 return 0;
1323 int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
1325 BDRVQcowState *s = bs->opaque;
1326 int ret, csize, nb_csectors, sector_offset;
1327 uint64_t coffset;
1329 coffset = cluster_offset & s->cluster_offset_mask;
1330 if (s->cluster_cache_offset != coffset) {
1331 nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
1332 sector_offset = coffset & 511;
1333 csize = nb_csectors * 512 - sector_offset;
1334 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
1335 ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, nb_csectors);
1336 if (ret < 0) {
1337 return ret;
1339 if (decompress_buffer(s->cluster_cache, s->cluster_size,
1340 s->cluster_data + sector_offset, csize) < 0) {
1341 return -EIO;
1343 s->cluster_cache_offset = coffset;
1345 return 0;
1349 * This discards as many clusters of nb_clusters as possible at once (i.e.
1350 * all clusters in the same L2 table) and returns the number of discarded
1351 * clusters.
1353 static int discard_single_l2(BlockDriverState *bs, uint64_t offset,
1354 unsigned int nb_clusters, enum qcow2_discard_type type)
1356 BDRVQcowState *s = bs->opaque;
1357 uint64_t *l2_table;
1358 int l2_index;
1359 int ret;
1360 int i;
1362 ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1363 if (ret < 0) {
1364 return ret;
1367 /* Limit nb_clusters to one L2 table */
1368 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1370 for (i = 0; i < nb_clusters; i++) {
1371 uint64_t old_l2_entry;
1373 old_l2_entry = be64_to_cpu(l2_table[l2_index + i]);
1376 * Make sure that a discarded area reads back as zeroes for v3 images
1377 * (we cannot do it for v2 without actually writing a zero-filled
1378 * buffer). We can skip the operation if the cluster is already marked
1379 * as zero, or if it's unallocated and we don't have a backing file.
1381 * TODO We might want to use bdrv_get_block_status(bs) here, but we're
1382 * holding s->lock, so that doesn't work today.
1384 switch (qcow2_get_cluster_type(old_l2_entry)) {
1385 case QCOW2_CLUSTER_UNALLOCATED:
1386 if (!bs->backing_hd) {
1387 continue;
1389 break;
1391 case QCOW2_CLUSTER_ZERO:
1392 continue;
1394 case QCOW2_CLUSTER_NORMAL:
1395 case QCOW2_CLUSTER_COMPRESSED:
1396 break;
1398 default:
1399 abort();
1402 /* First remove L2 entries */
1403 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
1404 if (s->qcow_version >= 3) {
1405 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1406 } else {
1407 l2_table[l2_index + i] = cpu_to_be64(0);
1410 /* Then decrease the refcount */
1411 qcow2_free_any_clusters(bs, old_l2_entry, 1, type);
1414 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1415 if (ret < 0) {
1416 return ret;
1419 return nb_clusters;
1422 int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset,
1423 int nb_sectors, enum qcow2_discard_type type)
1425 BDRVQcowState *s = bs->opaque;
1426 uint64_t end_offset;
1427 unsigned int nb_clusters;
1428 int ret;
1430 end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS);
1432 /* Round start up and end down */
1433 offset = align_offset(offset, s->cluster_size);
1434 end_offset = start_of_cluster(s, end_offset);
1436 if (offset > end_offset) {
1437 return 0;
1440 nb_clusters = size_to_clusters(s, end_offset - offset);
1442 s->cache_discards = true;
1444 /* Each L2 table is handled by its own loop iteration */
1445 while (nb_clusters > 0) {
1446 ret = discard_single_l2(bs, offset, nb_clusters, type);
1447 if (ret < 0) {
1448 goto fail;
1451 nb_clusters -= ret;
1452 offset += (ret * s->cluster_size);
1455 ret = 0;
1456 fail:
1457 s->cache_discards = false;
1458 qcow2_process_discards(bs, ret);
1460 return ret;
1464 * This zeroes as many clusters of nb_clusters as possible at once (i.e.
1465 * all clusters in the same L2 table) and returns the number of zeroed
1466 * clusters.
1468 static int zero_single_l2(BlockDriverState *bs, uint64_t offset,
1469 unsigned int nb_clusters)
1471 BDRVQcowState *s = bs->opaque;
1472 uint64_t *l2_table;
1473 int l2_index;
1474 int ret;
1475 int i;
1477 ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1478 if (ret < 0) {
1479 return ret;
1482 /* Limit nb_clusters to one L2 table */
1483 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1485 for (i = 0; i < nb_clusters; i++) {
1486 uint64_t old_offset;
1488 old_offset = be64_to_cpu(l2_table[l2_index + i]);
1490 /* Update L2 entries */
1491 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
1492 if (old_offset & QCOW_OFLAG_COMPRESSED) {
1493 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1494 qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST);
1495 } else {
1496 l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO);
1500 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1501 if (ret < 0) {
1502 return ret;
1505 return nb_clusters;
1508 int qcow2_zero_clusters(BlockDriverState *bs, uint64_t offset, int nb_sectors)
1510 BDRVQcowState *s = bs->opaque;
1511 unsigned int nb_clusters;
1512 int ret;
1514 /* The zero flag is only supported by version 3 and newer */
1515 if (s->qcow_version < 3) {
1516 return -ENOTSUP;
1519 /* Each L2 table is handled by its own loop iteration */
1520 nb_clusters = size_to_clusters(s, nb_sectors << BDRV_SECTOR_BITS);
1522 s->cache_discards = true;
1524 while (nb_clusters > 0) {
1525 ret = zero_single_l2(bs, offset, nb_clusters);
1526 if (ret < 0) {
1527 goto fail;
1530 nb_clusters -= ret;
1531 offset += (ret * s->cluster_size);
1534 ret = 0;
1535 fail:
1536 s->cache_discards = false;
1537 qcow2_process_discards(bs, ret);
1539 return ret;
1543 * Expands all zero clusters in a specific L1 table (or deallocates them, for
1544 * non-backed non-pre-allocated zero clusters).
1546 * expanded_clusters is a bitmap where every bit corresponds to one cluster in
1547 * the image file; a bit gets set if the corresponding cluster has been used for
1548 * zero expansion (i.e., has been filled with zeroes and is referenced from an
1549 * L2 table). nb_clusters contains the total cluster count of the image file,
1550 * i.e., the number of bits in expanded_clusters.
1552 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
1553 int l1_size, uint8_t **expanded_clusters,
1554 uint64_t *nb_clusters)
1556 BDRVQcowState *s = bs->opaque;
1557 bool is_active_l1 = (l1_table == s->l1_table);
1558 uint64_t *l2_table = NULL;
1559 int ret;
1560 int i, j;
1562 if (!is_active_l1) {
1563 /* inactive L2 tables require a buffer to be stored in when loading
1564 * them from disk */
1565 l2_table = qemu_blockalign(bs, s->cluster_size);
1568 for (i = 0; i < l1_size; i++) {
1569 uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK;
1570 bool l2_dirty = false;
1572 if (!l2_offset) {
1573 /* unallocated */
1574 continue;
1577 if (is_active_l1) {
1578 /* get active L2 tables from cache */
1579 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset,
1580 (void **)&l2_table);
1581 } else {
1582 /* load inactive L2 tables from disk */
1583 ret = bdrv_read(bs->file, l2_offset / BDRV_SECTOR_SIZE,
1584 (void *)l2_table, s->cluster_sectors);
1586 if (ret < 0) {
1587 goto fail;
1590 for (j = 0; j < s->l2_size; j++) {
1591 uint64_t l2_entry = be64_to_cpu(l2_table[j]);
1592 int64_t offset = l2_entry & L2E_OFFSET_MASK, cluster_index;
1593 int cluster_type = qcow2_get_cluster_type(l2_entry);
1594 bool preallocated = offset != 0;
1596 if (cluster_type == QCOW2_CLUSTER_NORMAL) {
1597 cluster_index = offset >> s->cluster_bits;
1598 assert((cluster_index >= 0) && (cluster_index < *nb_clusters));
1599 if ((*expanded_clusters)[cluster_index / 8] &
1600 (1 << (cluster_index % 8))) {
1601 /* Probably a shared L2 table; this cluster was a zero
1602 * cluster which has been expanded, its refcount
1603 * therefore most likely requires an update. */
1604 ret = qcow2_update_cluster_refcount(bs, cluster_index, 1,
1605 QCOW2_DISCARD_NEVER);
1606 if (ret < 0) {
1607 goto fail;
1609 /* Since we just increased the refcount, the COPIED flag may
1610 * no longer be set. */
1611 l2_table[j] = cpu_to_be64(l2_entry & ~QCOW_OFLAG_COPIED);
1612 l2_dirty = true;
1614 continue;
1616 else if (qcow2_get_cluster_type(l2_entry) != QCOW2_CLUSTER_ZERO) {
1617 continue;
1620 if (!preallocated) {
1621 if (!bs->backing_hd) {
1622 /* not backed; therefore we can simply deallocate the
1623 * cluster */
1624 l2_table[j] = 0;
1625 l2_dirty = true;
1626 continue;
1629 offset = qcow2_alloc_clusters(bs, s->cluster_size);
1630 if (offset < 0) {
1631 ret = offset;
1632 goto fail;
1636 ret = qcow2_pre_write_overlap_check(bs, 0, offset, s->cluster_size);
1637 if (ret < 0) {
1638 if (!preallocated) {
1639 qcow2_free_clusters(bs, offset, s->cluster_size,
1640 QCOW2_DISCARD_ALWAYS);
1642 goto fail;
1645 ret = bdrv_write_zeroes(bs->file, offset / BDRV_SECTOR_SIZE,
1646 s->cluster_sectors, 0);
1647 if (ret < 0) {
1648 if (!preallocated) {
1649 qcow2_free_clusters(bs, offset, s->cluster_size,
1650 QCOW2_DISCARD_ALWAYS);
1652 goto fail;
1655 l2_table[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED);
1656 l2_dirty = true;
1658 cluster_index = offset >> s->cluster_bits;
1660 if (cluster_index >= *nb_clusters) {
1661 uint64_t old_bitmap_size = (*nb_clusters + 7) / 8;
1662 uint64_t new_bitmap_size;
1663 /* The offset may lie beyond the old end of the underlying image
1664 * file for growable files only */
1665 assert(bs->file->growable);
1666 *nb_clusters = size_to_clusters(s, bs->file->total_sectors *
1667 BDRV_SECTOR_SIZE);
1668 new_bitmap_size = (*nb_clusters + 7) / 8;
1669 *expanded_clusters = g_realloc(*expanded_clusters,
1670 new_bitmap_size);
1671 /* clear the newly allocated space */
1672 memset(&(*expanded_clusters)[old_bitmap_size], 0,
1673 new_bitmap_size - old_bitmap_size);
1676 assert((cluster_index >= 0) && (cluster_index < *nb_clusters));
1677 (*expanded_clusters)[cluster_index / 8] |= 1 << (cluster_index % 8);
1680 if (is_active_l1) {
1681 if (l2_dirty) {
1682 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
1683 qcow2_cache_depends_on_flush(s->l2_table_cache);
1685 ret = qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table);
1686 if (ret < 0) {
1687 l2_table = NULL;
1688 goto fail;
1690 } else {
1691 if (l2_dirty) {
1692 ret = qcow2_pre_write_overlap_check(bs,
1693 QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2, l2_offset,
1694 s->cluster_size);
1695 if (ret < 0) {
1696 goto fail;
1699 ret = bdrv_write(bs->file, l2_offset / BDRV_SECTOR_SIZE,
1700 (void *)l2_table, s->cluster_sectors);
1701 if (ret < 0) {
1702 goto fail;
1708 ret = 0;
1710 fail:
1711 if (l2_table) {
1712 if (!is_active_l1) {
1713 qemu_vfree(l2_table);
1714 } else {
1715 if (ret < 0) {
1716 qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table);
1717 } else {
1718 ret = qcow2_cache_put(bs, s->l2_table_cache,
1719 (void **)&l2_table);
1723 return ret;
1727 * For backed images, expands all zero clusters on the image. For non-backed
1728 * images, deallocates all non-pre-allocated zero clusters (and claims the
1729 * allocation for pre-allocated ones). This is important for downgrading to a
1730 * qcow2 version which doesn't yet support metadata zero clusters.
1732 int qcow2_expand_zero_clusters(BlockDriverState *bs)
1734 BDRVQcowState *s = bs->opaque;
1735 uint64_t *l1_table = NULL;
1736 uint64_t nb_clusters;
1737 uint8_t *expanded_clusters;
1738 int ret;
1739 int i, j;
1741 nb_clusters = size_to_clusters(s, bs->file->total_sectors *
1742 BDRV_SECTOR_SIZE);
1743 expanded_clusters = g_malloc0((nb_clusters + 7) / 8);
1745 ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size,
1746 &expanded_clusters, &nb_clusters);
1747 if (ret < 0) {
1748 goto fail;
1751 /* Inactive L1 tables may point to active L2 tables - therefore it is
1752 * necessary to flush the L2 table cache before trying to access the L2
1753 * tables pointed to by inactive L1 entries (else we might try to expand
1754 * zero clusters that have already been expanded); furthermore, it is also
1755 * necessary to empty the L2 table cache, since it may contain tables which
1756 * are now going to be modified directly on disk, bypassing the cache.
1757 * qcow2_cache_empty() does both for us. */
1758 ret = qcow2_cache_empty(bs, s->l2_table_cache);
1759 if (ret < 0) {
1760 goto fail;
1763 for (i = 0; i < s->nb_snapshots; i++) {
1764 int l1_sectors = (s->snapshots[i].l1_size * sizeof(uint64_t) +
1765 BDRV_SECTOR_SIZE - 1) / BDRV_SECTOR_SIZE;
1767 l1_table = g_realloc(l1_table, l1_sectors * BDRV_SECTOR_SIZE);
1769 ret = bdrv_read(bs->file, s->snapshots[i].l1_table_offset /
1770 BDRV_SECTOR_SIZE, (void *)l1_table, l1_sectors);
1771 if (ret < 0) {
1772 goto fail;
1775 for (j = 0; j < s->snapshots[i].l1_size; j++) {
1776 be64_to_cpus(&l1_table[j]);
1779 ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size,
1780 &expanded_clusters, &nb_clusters);
1781 if (ret < 0) {
1782 goto fail;
1786 ret = 0;
1788 fail:
1789 g_free(expanded_clusters);
1790 g_free(l1_table);
1791 return ret;