nbd: fix outdated qapi docs syntax for tls-creds
[qemu/ar7.git] / block / qcow2-cluster.c
blob179aa2c728f76c136c0f5c0cf4c704f464d27819
1 /*
2 * Block driver for the QCOW version 2 format
4 * Copyright (c) 2004-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include <zlib.h>
28 #include "qapi/error.h"
29 #include "qemu-common.h"
30 #include "block/block_int.h"
31 #include "qcow2.h"
32 #include "qemu/bswap.h"
33 #include "trace.h"
35 int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t exact_size)
37 BDRVQcow2State *s = bs->opaque;
38 int new_l1_size, i, ret;
40 if (exact_size >= s->l1_size) {
41 return 0;
44 new_l1_size = exact_size;
46 #ifdef DEBUG_ALLOC2
47 fprintf(stderr, "shrink l1_table from %d to %d\n", s->l1_size, new_l1_size);
48 #endif
50 BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE);
51 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset +
52 new_l1_size * sizeof(uint64_t),
53 (s->l1_size - new_l1_size) * sizeof(uint64_t), 0);
54 if (ret < 0) {
55 goto fail;
58 ret = bdrv_flush(bs->file->bs);
59 if (ret < 0) {
60 goto fail;
63 BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_FREE_L2_CLUSTERS);
64 for (i = s->l1_size - 1; i > new_l1_size - 1; i--) {
65 if ((s->l1_table[i] & L1E_OFFSET_MASK) == 0) {
66 continue;
68 qcow2_free_clusters(bs, s->l1_table[i] & L1E_OFFSET_MASK,
69 s->cluster_size, QCOW2_DISCARD_ALWAYS);
70 s->l1_table[i] = 0;
72 return 0;
74 fail:
76 * If the write in the l1_table failed the image may contain a partially
77 * overwritten l1_table. In this case it would be better to clear the
78 * l1_table in memory to avoid possible image corruption.
80 memset(s->l1_table + new_l1_size, 0,
81 (s->l1_size - new_l1_size) * sizeof(uint64_t));
82 return ret;
85 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
86 bool exact_size)
88 BDRVQcow2State *s = bs->opaque;
89 int new_l1_size2, ret, i;
90 uint64_t *new_l1_table;
91 int64_t old_l1_table_offset, old_l1_size;
92 int64_t new_l1_table_offset, new_l1_size;
93 uint8_t data[12];
95 if (min_size <= s->l1_size)
96 return 0;
98 /* Do a sanity check on min_size before trying to calculate new_l1_size
99 * (this prevents overflows during the while loop for the calculation of
100 * new_l1_size) */
101 if (min_size > INT_MAX / sizeof(uint64_t)) {
102 return -EFBIG;
105 if (exact_size) {
106 new_l1_size = min_size;
107 } else {
108 /* Bump size up to reduce the number of times we have to grow */
109 new_l1_size = s->l1_size;
110 if (new_l1_size == 0) {
111 new_l1_size = 1;
113 while (min_size > new_l1_size) {
114 new_l1_size = DIV_ROUND_UP(new_l1_size * 3, 2);
118 QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE > INT_MAX);
119 if (new_l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) {
120 return -EFBIG;
123 #ifdef DEBUG_ALLOC2
124 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n",
125 s->l1_size, new_l1_size);
126 #endif
128 new_l1_size2 = sizeof(uint64_t) * new_l1_size;
129 new_l1_table = qemu_try_blockalign(bs->file->bs,
130 ROUND_UP(new_l1_size2, 512));
131 if (new_l1_table == NULL) {
132 return -ENOMEM;
134 memset(new_l1_table, 0, ROUND_UP(new_l1_size2, 512));
136 if (s->l1_size) {
137 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
140 /* write new table (align to cluster) */
141 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
142 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
143 if (new_l1_table_offset < 0) {
144 qemu_vfree(new_l1_table);
145 return new_l1_table_offset;
148 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
149 if (ret < 0) {
150 goto fail;
153 /* the L1 position has not yet been updated, so these clusters must
154 * indeed be completely free */
155 ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset,
156 new_l1_size2);
157 if (ret < 0) {
158 goto fail;
161 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE);
162 for(i = 0; i < s->l1_size; i++)
163 new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
164 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset,
165 new_l1_table, new_l1_size2);
166 if (ret < 0)
167 goto fail;
168 for(i = 0; i < s->l1_size; i++)
169 new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
171 /* set new table */
172 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE);
173 stl_be_p(data, new_l1_size);
174 stq_be_p(data + 4, new_l1_table_offset);
175 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size),
176 data, sizeof(data));
177 if (ret < 0) {
178 goto fail;
180 qemu_vfree(s->l1_table);
181 old_l1_table_offset = s->l1_table_offset;
182 s->l1_table_offset = new_l1_table_offset;
183 s->l1_table = new_l1_table;
184 old_l1_size = s->l1_size;
185 s->l1_size = new_l1_size;
186 qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t),
187 QCOW2_DISCARD_OTHER);
188 return 0;
189 fail:
190 qemu_vfree(new_l1_table);
191 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2,
192 QCOW2_DISCARD_OTHER);
193 return ret;
197 * l2_load
199 * @bs: The BlockDriverState
200 * @offset: A guest offset, used to calculate what slice of the L2
201 * table to load.
202 * @l2_offset: Offset to the L2 table in the image file.
203 * @l2_slice: Location to store the pointer to the L2 slice.
205 * Loads a L2 slice into memory (L2 slices are the parts of L2 tables
206 * that are loaded by the qcow2 cache). If the slice is in the cache,
207 * the cache is used; otherwise the L2 slice is loaded from the image
208 * file.
210 static int l2_load(BlockDriverState *bs, uint64_t offset,
211 uint64_t l2_offset, uint64_t **l2_slice)
213 BDRVQcow2State *s = bs->opaque;
214 int start_of_slice = sizeof(uint64_t) *
215 (offset_to_l2_index(s, offset) - offset_to_l2_slice_index(s, offset));
217 return qcow2_cache_get(bs, s->l2_table_cache, l2_offset + start_of_slice,
218 (void **)l2_slice);
222 * Writes one sector of the L1 table to the disk (can't update single entries
223 * and we really don't want bdrv_pread to perform a read-modify-write)
225 #define L1_ENTRIES_PER_SECTOR (512 / 8)
226 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index)
228 BDRVQcow2State *s = bs->opaque;
229 uint64_t buf[L1_ENTRIES_PER_SECTOR] = { 0 };
230 int l1_start_index;
231 int i, ret;
233 l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
234 for (i = 0; i < L1_ENTRIES_PER_SECTOR && l1_start_index + i < s->l1_size;
235 i++)
237 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
240 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1,
241 s->l1_table_offset + 8 * l1_start_index, sizeof(buf));
242 if (ret < 0) {
243 return ret;
246 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
247 ret = bdrv_pwrite_sync(bs->file,
248 s->l1_table_offset + 8 * l1_start_index,
249 buf, sizeof(buf));
250 if (ret < 0) {
251 return ret;
254 return 0;
258 * l2_allocate
260 * Allocate a new l2 entry in the file. If l1_index points to an already
261 * used entry in the L2 table (i.e. we are doing a copy on write for the L2
262 * table) copy the contents of the old L2 table into the newly allocated one.
263 * Otherwise the new table is initialized with zeros.
267 static int l2_allocate(BlockDriverState *bs, int l1_index)
269 BDRVQcow2State *s = bs->opaque;
270 uint64_t old_l2_offset;
271 uint64_t *l2_slice = NULL;
272 unsigned slice, slice_size2, n_slices;
273 int64_t l2_offset;
274 int ret;
276 old_l2_offset = s->l1_table[l1_index];
278 trace_qcow2_l2_allocate(bs, l1_index);
280 /* allocate a new l2 entry */
282 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
283 if (l2_offset < 0) {
284 ret = l2_offset;
285 goto fail;
288 /* The offset must fit in the offset field of the L1 table entry */
289 assert((l2_offset & L1E_OFFSET_MASK) == l2_offset);
291 /* If we're allocating the table at offset 0 then something is wrong */
292 if (l2_offset == 0) {
293 qcow2_signal_corruption(bs, true, -1, -1, "Preventing invalid "
294 "allocation of L2 table at offset 0");
295 ret = -EIO;
296 goto fail;
299 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
300 if (ret < 0) {
301 goto fail;
304 /* allocate a new entry in the l2 cache */
306 slice_size2 = s->l2_slice_size * sizeof(uint64_t);
307 n_slices = s->cluster_size / slice_size2;
309 trace_qcow2_l2_allocate_get_empty(bs, l1_index);
310 for (slice = 0; slice < n_slices; slice++) {
311 ret = qcow2_cache_get_empty(bs, s->l2_table_cache,
312 l2_offset + slice * slice_size2,
313 (void **) &l2_slice);
314 if (ret < 0) {
315 goto fail;
318 if ((old_l2_offset & L1E_OFFSET_MASK) == 0) {
319 /* if there was no old l2 table, clear the new slice */
320 memset(l2_slice, 0, slice_size2);
321 } else {
322 uint64_t *old_slice;
323 uint64_t old_l2_slice_offset =
324 (old_l2_offset & L1E_OFFSET_MASK) + slice * slice_size2;
326 /* if there was an old l2 table, read a slice from the disk */
327 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ);
328 ret = qcow2_cache_get(bs, s->l2_table_cache, old_l2_slice_offset,
329 (void **) &old_slice);
330 if (ret < 0) {
331 goto fail;
334 memcpy(l2_slice, old_slice, slice_size2);
336 qcow2_cache_put(s->l2_table_cache, (void **) &old_slice);
339 /* write the l2 slice to the file */
340 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE);
342 trace_qcow2_l2_allocate_write_l2(bs, l1_index);
343 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
344 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
347 ret = qcow2_cache_flush(bs, s->l2_table_cache);
348 if (ret < 0) {
349 goto fail;
352 /* update the L1 entry */
353 trace_qcow2_l2_allocate_write_l1(bs, l1_index);
354 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
355 ret = qcow2_write_l1_entry(bs, l1_index);
356 if (ret < 0) {
357 goto fail;
360 trace_qcow2_l2_allocate_done(bs, l1_index, 0);
361 return 0;
363 fail:
364 trace_qcow2_l2_allocate_done(bs, l1_index, ret);
365 if (l2_slice != NULL) {
366 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
368 s->l1_table[l1_index] = old_l2_offset;
369 if (l2_offset > 0) {
370 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
371 QCOW2_DISCARD_ALWAYS);
373 return ret;
377 * Checks how many clusters in a given L2 slice are contiguous in the image
378 * file. As soon as one of the flags in the bitmask stop_flags changes compared
379 * to the first cluster, the search is stopped and the cluster is not counted
380 * as contiguous. (This allows it, for example, to stop at the first compressed
381 * cluster which may require a different handling)
383 static int count_contiguous_clusters(int nb_clusters, int cluster_size,
384 uint64_t *l2_slice, uint64_t stop_flags)
386 int i;
387 QCow2ClusterType first_cluster_type;
388 uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED;
389 uint64_t first_entry = be64_to_cpu(l2_slice[0]);
390 uint64_t offset = first_entry & mask;
392 if (!offset) {
393 return 0;
396 /* must be allocated */
397 first_cluster_type = qcow2_get_cluster_type(first_entry);
398 assert(first_cluster_type == QCOW2_CLUSTER_NORMAL ||
399 first_cluster_type == QCOW2_CLUSTER_ZERO_ALLOC);
401 for (i = 0; i < nb_clusters; i++) {
402 uint64_t l2_entry = be64_to_cpu(l2_slice[i]) & mask;
403 if (offset + (uint64_t) i * cluster_size != l2_entry) {
404 break;
408 return i;
412 * Checks how many consecutive unallocated clusters in a given L2
413 * slice have the same cluster type.
415 static int count_contiguous_clusters_unallocated(int nb_clusters,
416 uint64_t *l2_slice,
417 QCow2ClusterType wanted_type)
419 int i;
421 assert(wanted_type == QCOW2_CLUSTER_ZERO_PLAIN ||
422 wanted_type == QCOW2_CLUSTER_UNALLOCATED);
423 for (i = 0; i < nb_clusters; i++) {
424 uint64_t entry = be64_to_cpu(l2_slice[i]);
425 QCow2ClusterType type = qcow2_get_cluster_type(entry);
427 if (type != wanted_type) {
428 break;
432 return i;
435 static int coroutine_fn do_perform_cow_read(BlockDriverState *bs,
436 uint64_t src_cluster_offset,
437 unsigned offset_in_cluster,
438 QEMUIOVector *qiov)
440 int ret;
442 if (qiov->size == 0) {
443 return 0;
446 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
448 if (!bs->drv) {
449 return -ENOMEDIUM;
452 /* Call .bdrv_co_readv() directly instead of using the public block-layer
453 * interface. This avoids double I/O throttling and request tracking,
454 * which can lead to deadlock when block layer copy-on-read is enabled.
456 ret = bs->drv->bdrv_co_preadv(bs, src_cluster_offset + offset_in_cluster,
457 qiov->size, qiov, 0);
458 if (ret < 0) {
459 return ret;
462 return 0;
465 static bool coroutine_fn do_perform_cow_encrypt(BlockDriverState *bs,
466 uint64_t src_cluster_offset,
467 uint64_t cluster_offset,
468 unsigned offset_in_cluster,
469 uint8_t *buffer,
470 unsigned bytes)
472 if (bytes && bs->encrypted) {
473 BDRVQcow2State *s = bs->opaque;
474 int64_t offset = (s->crypt_physical_offset ?
475 (cluster_offset + offset_in_cluster) :
476 (src_cluster_offset + offset_in_cluster));
477 assert((offset_in_cluster & ~BDRV_SECTOR_MASK) == 0);
478 assert((bytes & ~BDRV_SECTOR_MASK) == 0);
479 assert(s->crypto);
480 if (qcrypto_block_encrypt(s->crypto, offset, buffer, bytes, NULL) < 0) {
481 return false;
484 return true;
487 static int coroutine_fn do_perform_cow_write(BlockDriverState *bs,
488 uint64_t cluster_offset,
489 unsigned offset_in_cluster,
490 QEMUIOVector *qiov)
492 int ret;
494 if (qiov->size == 0) {
495 return 0;
498 ret = qcow2_pre_write_overlap_check(bs, 0,
499 cluster_offset + offset_in_cluster, qiov->size);
500 if (ret < 0) {
501 return ret;
504 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
505 ret = bdrv_co_pwritev(bs->file, cluster_offset + offset_in_cluster,
506 qiov->size, qiov, 0);
507 if (ret < 0) {
508 return ret;
511 return 0;
516 * get_cluster_offset
518 * For a given offset of the virtual disk, find the cluster type and offset in
519 * the qcow2 file. The offset is stored in *cluster_offset.
521 * On entry, *bytes is the maximum number of contiguous bytes starting at
522 * offset that we are interested in.
524 * On exit, *bytes is the number of bytes starting at offset that have the same
525 * cluster type and (if applicable) are stored contiguously in the image file.
526 * Compressed clusters are always returned one by one.
528 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
529 * cases.
531 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
532 unsigned int *bytes, uint64_t *cluster_offset)
534 BDRVQcow2State *s = bs->opaque;
535 unsigned int l2_index;
536 uint64_t l1_index, l2_offset, *l2_slice;
537 int c;
538 unsigned int offset_in_cluster;
539 uint64_t bytes_available, bytes_needed, nb_clusters;
540 QCow2ClusterType type;
541 int ret;
543 offset_in_cluster = offset_into_cluster(s, offset);
544 bytes_needed = (uint64_t) *bytes + offset_in_cluster;
546 /* compute how many bytes there are between the start of the cluster
547 * containing offset and the end of the l2 slice that contains
548 * the entry pointing to it */
549 bytes_available =
550 ((uint64_t) (s->l2_slice_size - offset_to_l2_slice_index(s, offset)))
551 << s->cluster_bits;
553 if (bytes_needed > bytes_available) {
554 bytes_needed = bytes_available;
557 *cluster_offset = 0;
559 /* seek to the l2 offset in the l1 table */
561 l1_index = offset_to_l1_index(s, offset);
562 if (l1_index >= s->l1_size) {
563 type = QCOW2_CLUSTER_UNALLOCATED;
564 goto out;
567 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
568 if (!l2_offset) {
569 type = QCOW2_CLUSTER_UNALLOCATED;
570 goto out;
573 if (offset_into_cluster(s, l2_offset)) {
574 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
575 " unaligned (L1 index: %#" PRIx64 ")",
576 l2_offset, l1_index);
577 return -EIO;
580 /* load the l2 slice in memory */
582 ret = l2_load(bs, offset, l2_offset, &l2_slice);
583 if (ret < 0) {
584 return ret;
587 /* find the cluster offset for the given disk offset */
589 l2_index = offset_to_l2_slice_index(s, offset);
590 *cluster_offset = be64_to_cpu(l2_slice[l2_index]);
592 nb_clusters = size_to_clusters(s, bytes_needed);
593 /* bytes_needed <= *bytes + offset_in_cluster, both of which are unsigned
594 * integers; the minimum cluster size is 512, so this assertion is always
595 * true */
596 assert(nb_clusters <= INT_MAX);
598 type = qcow2_get_cluster_type(*cluster_offset);
599 if (s->qcow_version < 3 && (type == QCOW2_CLUSTER_ZERO_PLAIN ||
600 type == QCOW2_CLUSTER_ZERO_ALLOC)) {
601 qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found"
602 " in pre-v3 image (L2 offset: %#" PRIx64
603 ", L2 index: %#x)", l2_offset, l2_index);
604 ret = -EIO;
605 goto fail;
607 switch (type) {
608 case QCOW2_CLUSTER_COMPRESSED:
609 /* Compressed clusters can only be processed one by one */
610 c = 1;
611 *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK;
612 break;
613 case QCOW2_CLUSTER_ZERO_PLAIN:
614 case QCOW2_CLUSTER_UNALLOCATED:
615 /* how many empty clusters ? */
616 c = count_contiguous_clusters_unallocated(nb_clusters,
617 &l2_slice[l2_index], type);
618 *cluster_offset = 0;
619 break;
620 case QCOW2_CLUSTER_ZERO_ALLOC:
621 case QCOW2_CLUSTER_NORMAL:
622 /* how many allocated clusters ? */
623 c = count_contiguous_clusters(nb_clusters, s->cluster_size,
624 &l2_slice[l2_index], QCOW_OFLAG_ZERO);
625 *cluster_offset &= L2E_OFFSET_MASK;
626 if (offset_into_cluster(s, *cluster_offset)) {
627 qcow2_signal_corruption(bs, true, -1, -1,
628 "Cluster allocation offset %#"
629 PRIx64 " unaligned (L2 offset: %#" PRIx64
630 ", L2 index: %#x)", *cluster_offset,
631 l2_offset, l2_index);
632 ret = -EIO;
633 goto fail;
635 break;
636 default:
637 abort();
640 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
642 bytes_available = (int64_t)c * s->cluster_size;
644 out:
645 if (bytes_available > bytes_needed) {
646 bytes_available = bytes_needed;
649 /* bytes_available <= bytes_needed <= *bytes + offset_in_cluster;
650 * subtracting offset_in_cluster will therefore definitely yield something
651 * not exceeding UINT_MAX */
652 assert(bytes_available - offset_in_cluster <= UINT_MAX);
653 *bytes = bytes_available - offset_in_cluster;
655 return type;
657 fail:
658 qcow2_cache_put(s->l2_table_cache, (void **)&l2_slice);
659 return ret;
663 * get_cluster_table
665 * for a given disk offset, load (and allocate if needed)
666 * the appropriate slice of its l2 table.
668 * the cluster index in the l2 slice is given to the caller.
670 * Returns 0 on success, -errno in failure case
672 static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
673 uint64_t **new_l2_slice,
674 int *new_l2_index)
676 BDRVQcow2State *s = bs->opaque;
677 unsigned int l2_index;
678 uint64_t l1_index, l2_offset;
679 uint64_t *l2_slice = NULL;
680 int ret;
682 /* seek to the l2 offset in the l1 table */
684 l1_index = offset_to_l1_index(s, offset);
685 if (l1_index >= s->l1_size) {
686 ret = qcow2_grow_l1_table(bs, l1_index + 1, false);
687 if (ret < 0) {
688 return ret;
692 assert(l1_index < s->l1_size);
693 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
694 if (offset_into_cluster(s, l2_offset)) {
695 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
696 " unaligned (L1 index: %#" PRIx64 ")",
697 l2_offset, l1_index);
698 return -EIO;
701 if (!(s->l1_table[l1_index] & QCOW_OFLAG_COPIED)) {
702 /* First allocate a new L2 table (and do COW if needed) */
703 ret = l2_allocate(bs, l1_index);
704 if (ret < 0) {
705 return ret;
708 /* Then decrease the refcount of the old table */
709 if (l2_offset) {
710 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
711 QCOW2_DISCARD_OTHER);
714 /* Get the offset of the newly-allocated l2 table */
715 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
716 assert(offset_into_cluster(s, l2_offset) == 0);
719 /* load the l2 slice in memory */
720 ret = l2_load(bs, offset, l2_offset, &l2_slice);
721 if (ret < 0) {
722 return ret;
725 /* find the cluster offset for the given disk offset */
727 l2_index = offset_to_l2_slice_index(s, offset);
729 *new_l2_slice = l2_slice;
730 *new_l2_index = l2_index;
732 return 0;
736 * alloc_compressed_cluster_offset
738 * For a given offset of the disk image, return cluster offset in
739 * qcow2 file.
741 * If the offset is not found, allocate a new compressed cluster.
743 * Return the cluster offset if successful,
744 * Return 0, otherwise.
748 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
749 uint64_t offset,
750 int compressed_size)
752 BDRVQcow2State *s = bs->opaque;
753 int l2_index, ret;
754 uint64_t *l2_slice;
755 int64_t cluster_offset;
756 int nb_csectors;
758 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index);
759 if (ret < 0) {
760 return 0;
763 /* Compression can't overwrite anything. Fail if the cluster was already
764 * allocated. */
765 cluster_offset = be64_to_cpu(l2_slice[l2_index]);
766 if (cluster_offset & L2E_OFFSET_MASK) {
767 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
768 return 0;
771 cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
772 if (cluster_offset < 0) {
773 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
774 return 0;
777 nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
778 (cluster_offset >> 9);
780 cluster_offset |= QCOW_OFLAG_COMPRESSED |
781 ((uint64_t)nb_csectors << s->csize_shift);
783 /* update L2 table */
785 /* compressed clusters never have the copied flag */
787 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
788 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
789 l2_slice[l2_index] = cpu_to_be64(cluster_offset);
790 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
792 return cluster_offset;
795 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m)
797 BDRVQcow2State *s = bs->opaque;
798 Qcow2COWRegion *start = &m->cow_start;
799 Qcow2COWRegion *end = &m->cow_end;
800 unsigned buffer_size;
801 unsigned data_bytes = end->offset - (start->offset + start->nb_bytes);
802 bool merge_reads;
803 uint8_t *start_buffer, *end_buffer;
804 QEMUIOVector qiov;
805 int ret;
807 assert(start->nb_bytes <= UINT_MAX - end->nb_bytes);
808 assert(start->nb_bytes + end->nb_bytes <= UINT_MAX - data_bytes);
809 assert(start->offset + start->nb_bytes <= end->offset);
810 assert(!m->data_qiov || m->data_qiov->size == data_bytes);
812 if (start->nb_bytes == 0 && end->nb_bytes == 0) {
813 return 0;
816 /* If we have to read both the start and end COW regions and the
817 * middle region is not too large then perform just one read
818 * operation */
819 merge_reads = start->nb_bytes && end->nb_bytes && data_bytes <= 16384;
820 if (merge_reads) {
821 buffer_size = start->nb_bytes + data_bytes + end->nb_bytes;
822 } else {
823 /* If we have to do two reads, add some padding in the middle
824 * if necessary to make sure that the end region is optimally
825 * aligned. */
826 size_t align = bdrv_opt_mem_align(bs);
827 assert(align > 0 && align <= UINT_MAX);
828 assert(QEMU_ALIGN_UP(start->nb_bytes, align) <=
829 UINT_MAX - end->nb_bytes);
830 buffer_size = QEMU_ALIGN_UP(start->nb_bytes, align) + end->nb_bytes;
833 /* Reserve a buffer large enough to store all the data that we're
834 * going to read */
835 start_buffer = qemu_try_blockalign(bs, buffer_size);
836 if (start_buffer == NULL) {
837 return -ENOMEM;
839 /* The part of the buffer where the end region is located */
840 end_buffer = start_buffer + buffer_size - end->nb_bytes;
842 qemu_iovec_init(&qiov, 2 + (m->data_qiov ? m->data_qiov->niov : 0));
844 qemu_co_mutex_unlock(&s->lock);
845 /* First we read the existing data from both COW regions. We
846 * either read the whole region in one go, or the start and end
847 * regions separately. */
848 if (merge_reads) {
849 qemu_iovec_add(&qiov, start_buffer, buffer_size);
850 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov);
851 } else {
852 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
853 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov);
854 if (ret < 0) {
855 goto fail;
858 qemu_iovec_reset(&qiov);
859 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
860 ret = do_perform_cow_read(bs, m->offset, end->offset, &qiov);
862 if (ret < 0) {
863 goto fail;
866 /* Encrypt the data if necessary before writing it */
867 if (bs->encrypted) {
868 if (!do_perform_cow_encrypt(bs, m->offset, m->alloc_offset,
869 start->offset, start_buffer,
870 start->nb_bytes) ||
871 !do_perform_cow_encrypt(bs, m->offset, m->alloc_offset,
872 end->offset, end_buffer, end->nb_bytes)) {
873 ret = -EIO;
874 goto fail;
878 /* And now we can write everything. If we have the guest data we
879 * can write everything in one single operation */
880 if (m->data_qiov) {
881 qemu_iovec_reset(&qiov);
882 if (start->nb_bytes) {
883 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
885 qemu_iovec_concat(&qiov, m->data_qiov, 0, data_bytes);
886 if (end->nb_bytes) {
887 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
889 /* NOTE: we have a write_aio blkdebug event here followed by
890 * a cow_write one in do_perform_cow_write(), but there's only
891 * one single I/O operation */
892 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
893 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov);
894 } else {
895 /* If there's no guest data then write both COW regions separately */
896 qemu_iovec_reset(&qiov);
897 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
898 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov);
899 if (ret < 0) {
900 goto fail;
903 qemu_iovec_reset(&qiov);
904 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
905 ret = do_perform_cow_write(bs, m->alloc_offset, end->offset, &qiov);
908 fail:
909 qemu_co_mutex_lock(&s->lock);
912 * Before we update the L2 table to actually point to the new cluster, we
913 * need to be sure that the refcounts have been increased and COW was
914 * handled.
916 if (ret == 0) {
917 qcow2_cache_depends_on_flush(s->l2_table_cache);
920 qemu_vfree(start_buffer);
921 qemu_iovec_destroy(&qiov);
922 return ret;
925 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
927 BDRVQcow2State *s = bs->opaque;
928 int i, j = 0, l2_index, ret;
929 uint64_t *old_cluster, *l2_slice;
930 uint64_t cluster_offset = m->alloc_offset;
932 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters);
933 assert(m->nb_clusters > 0);
935 old_cluster = g_try_new(uint64_t, m->nb_clusters);
936 if (old_cluster == NULL) {
937 ret = -ENOMEM;
938 goto err;
941 /* copy content of unmodified sectors */
942 ret = perform_cow(bs, m);
943 if (ret < 0) {
944 goto err;
947 /* Update L2 table. */
948 if (s->use_lazy_refcounts) {
949 qcow2_mark_dirty(bs);
951 if (qcow2_need_accurate_refcounts(s)) {
952 qcow2_cache_set_dependency(bs, s->l2_table_cache,
953 s->refcount_block_cache);
956 ret = get_cluster_table(bs, m->offset, &l2_slice, &l2_index);
957 if (ret < 0) {
958 goto err;
960 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
962 assert(l2_index + m->nb_clusters <= s->l2_slice_size);
963 for (i = 0; i < m->nb_clusters; i++) {
964 /* if two concurrent writes happen to the same unallocated cluster
965 * each write allocates separate cluster and writes data concurrently.
966 * The first one to complete updates l2 table with pointer to its
967 * cluster the second one has to do RMW (which is done above by
968 * perform_cow()), update l2 table with its cluster pointer and free
969 * old cluster. This is what this loop does */
970 if (l2_slice[l2_index + i] != 0) {
971 old_cluster[j++] = l2_slice[l2_index + i];
974 l2_slice[l2_index + i] = cpu_to_be64((cluster_offset +
975 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
979 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
982 * If this was a COW, we need to decrease the refcount of the old cluster.
984 * Don't discard clusters that reach a refcount of 0 (e.g. compressed
985 * clusters), the next write will reuse them anyway.
987 if (!m->keep_old_clusters && j != 0) {
988 for (i = 0; i < j; i++) {
989 qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1,
990 QCOW2_DISCARD_NEVER);
994 ret = 0;
995 err:
996 g_free(old_cluster);
997 return ret;
1001 * Frees the allocated clusters because the request failed and they won't
1002 * actually be linked.
1004 void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m)
1006 BDRVQcow2State *s = bs->opaque;
1007 qcow2_free_clusters(bs, m->alloc_offset, m->nb_clusters << s->cluster_bits,
1008 QCOW2_DISCARD_NEVER);
1012 * Returns the number of contiguous clusters that can be used for an allocating
1013 * write, but require COW to be performed (this includes yet unallocated space,
1014 * which must copy from the backing file)
1016 static int count_cow_clusters(BDRVQcow2State *s, int nb_clusters,
1017 uint64_t *l2_slice, int l2_index)
1019 int i;
1021 for (i = 0; i < nb_clusters; i++) {
1022 uint64_t l2_entry = be64_to_cpu(l2_slice[l2_index + i]);
1023 QCow2ClusterType cluster_type = qcow2_get_cluster_type(l2_entry);
1025 switch(cluster_type) {
1026 case QCOW2_CLUSTER_NORMAL:
1027 if (l2_entry & QCOW_OFLAG_COPIED) {
1028 goto out;
1030 break;
1031 case QCOW2_CLUSTER_UNALLOCATED:
1032 case QCOW2_CLUSTER_COMPRESSED:
1033 case QCOW2_CLUSTER_ZERO_PLAIN:
1034 case QCOW2_CLUSTER_ZERO_ALLOC:
1035 break;
1036 default:
1037 abort();
1041 out:
1042 assert(i <= nb_clusters);
1043 return i;
1047 * Check if there already is an AIO write request in flight which allocates
1048 * the same cluster. In this case we need to wait until the previous
1049 * request has completed and updated the L2 table accordingly.
1051 * Returns:
1052 * 0 if there was no dependency. *cur_bytes indicates the number of
1053 * bytes from guest_offset that can be read before the next
1054 * dependency must be processed (or the request is complete)
1056 * -EAGAIN if we had to wait for another request, previously gathered
1057 * information on cluster allocation may be invalid now. The caller
1058 * must start over anyway, so consider *cur_bytes undefined.
1060 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
1061 uint64_t *cur_bytes, QCowL2Meta **m)
1063 BDRVQcow2State *s = bs->opaque;
1064 QCowL2Meta *old_alloc;
1065 uint64_t bytes = *cur_bytes;
1067 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
1069 uint64_t start = guest_offset;
1070 uint64_t end = start + bytes;
1071 uint64_t old_start = l2meta_cow_start(old_alloc);
1072 uint64_t old_end = l2meta_cow_end(old_alloc);
1074 if (end <= old_start || start >= old_end) {
1075 /* No intersection */
1076 } else {
1077 if (start < old_start) {
1078 /* Stop at the start of a running allocation */
1079 bytes = old_start - start;
1080 } else {
1081 bytes = 0;
1084 /* Stop if already an l2meta exists. After yielding, it wouldn't
1085 * be valid any more, so we'd have to clean up the old L2Metas
1086 * and deal with requests depending on them before starting to
1087 * gather new ones. Not worth the trouble. */
1088 if (bytes == 0 && *m) {
1089 *cur_bytes = 0;
1090 return 0;
1093 if (bytes == 0) {
1094 /* Wait for the dependency to complete. We need to recheck
1095 * the free/allocated clusters when we continue. */
1096 qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock);
1097 return -EAGAIN;
1102 /* Make sure that existing clusters and new allocations are only used up to
1103 * the next dependency if we shortened the request above */
1104 *cur_bytes = bytes;
1106 return 0;
1110 * Checks how many already allocated clusters that don't require a copy on
1111 * write there are at the given guest_offset (up to *bytes). If
1112 * *host_offset is not zero, only physically contiguous clusters beginning at
1113 * this host offset are counted.
1115 * Note that guest_offset may not be cluster aligned. In this case, the
1116 * returned *host_offset points to exact byte referenced by guest_offset and
1117 * therefore isn't cluster aligned as well.
1119 * Returns:
1120 * 0: if no allocated clusters are available at the given offset.
1121 * *bytes is normally unchanged. It is set to 0 if the cluster
1122 * is allocated and doesn't need COW, but doesn't have the right
1123 * physical offset.
1125 * 1: if allocated clusters that don't require a COW are available at
1126 * the requested offset. *bytes may have decreased and describes
1127 * the length of the area that can be written to.
1129 * -errno: in error cases
1131 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset,
1132 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1134 BDRVQcow2State *s = bs->opaque;
1135 int l2_index;
1136 uint64_t cluster_offset;
1137 uint64_t *l2_slice;
1138 uint64_t nb_clusters;
1139 unsigned int keep_clusters;
1140 int ret;
1142 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset,
1143 *bytes);
1145 assert(*host_offset == 0 || offset_into_cluster(s, guest_offset)
1146 == offset_into_cluster(s, *host_offset));
1149 * Calculate the number of clusters to look for. We stop at L2 slice
1150 * boundaries to keep things simple.
1152 nb_clusters =
1153 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1155 l2_index = offset_to_l2_slice_index(s, guest_offset);
1156 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index);
1157 assert(nb_clusters <= INT_MAX);
1159 /* Find L2 entry for the first involved cluster */
1160 ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index);
1161 if (ret < 0) {
1162 return ret;
1165 cluster_offset = be64_to_cpu(l2_slice[l2_index]);
1167 /* Check how many clusters are already allocated and don't need COW */
1168 if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL
1169 && (cluster_offset & QCOW_OFLAG_COPIED))
1171 /* If a specific host_offset is required, check it */
1172 bool offset_matches =
1173 (cluster_offset & L2E_OFFSET_MASK) == *host_offset;
1175 if (offset_into_cluster(s, cluster_offset & L2E_OFFSET_MASK)) {
1176 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset "
1177 "%#llx unaligned (guest offset: %#" PRIx64
1178 ")", cluster_offset & L2E_OFFSET_MASK,
1179 guest_offset);
1180 ret = -EIO;
1181 goto out;
1184 if (*host_offset != 0 && !offset_matches) {
1185 *bytes = 0;
1186 ret = 0;
1187 goto out;
1190 /* We keep all QCOW_OFLAG_COPIED clusters */
1191 keep_clusters =
1192 count_contiguous_clusters(nb_clusters, s->cluster_size,
1193 &l2_slice[l2_index],
1194 QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO);
1195 assert(keep_clusters <= nb_clusters);
1197 *bytes = MIN(*bytes,
1198 keep_clusters * s->cluster_size
1199 - offset_into_cluster(s, guest_offset));
1201 ret = 1;
1202 } else {
1203 ret = 0;
1206 /* Cleanup */
1207 out:
1208 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1210 /* Only return a host offset if we actually made progress. Otherwise we
1211 * would make requirements for handle_alloc() that it can't fulfill */
1212 if (ret > 0) {
1213 *host_offset = (cluster_offset & L2E_OFFSET_MASK)
1214 + offset_into_cluster(s, guest_offset);
1217 return ret;
1221 * Allocates new clusters for the given guest_offset.
1223 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
1224 * contain the number of clusters that have been allocated and are contiguous
1225 * in the image file.
1227 * If *host_offset is non-zero, it specifies the offset in the image file at
1228 * which the new clusters must start. *nb_clusters can be 0 on return in this
1229 * case if the cluster at host_offset is already in use. If *host_offset is
1230 * zero, the clusters can be allocated anywhere in the image file.
1232 * *host_offset is updated to contain the offset into the image file at which
1233 * the first allocated cluster starts.
1235 * Return 0 on success and -errno in error cases. -EAGAIN means that the
1236 * function has been waiting for another request and the allocation must be
1237 * restarted, but the whole request should not be failed.
1239 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
1240 uint64_t *host_offset, uint64_t *nb_clusters)
1242 BDRVQcow2State *s = bs->opaque;
1244 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset,
1245 *host_offset, *nb_clusters);
1247 /* Allocate new clusters */
1248 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
1249 if (*host_offset == 0) {
1250 int64_t cluster_offset =
1251 qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size);
1252 if (cluster_offset < 0) {
1253 return cluster_offset;
1255 *host_offset = cluster_offset;
1256 return 0;
1257 } else {
1258 int64_t ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters);
1259 if (ret < 0) {
1260 return ret;
1262 *nb_clusters = ret;
1263 return 0;
1268 * Allocates new clusters for an area that either is yet unallocated or needs a
1269 * copy on write. If *host_offset is non-zero, clusters are only allocated if
1270 * the new allocation can match the specified host offset.
1272 * Note that guest_offset may not be cluster aligned. In this case, the
1273 * returned *host_offset points to exact byte referenced by guest_offset and
1274 * therefore isn't cluster aligned as well.
1276 * Returns:
1277 * 0: if no clusters could be allocated. *bytes is set to 0,
1278 * *host_offset is left unchanged.
1280 * 1: if new clusters were allocated. *bytes may be decreased if the
1281 * new allocation doesn't cover all of the requested area.
1282 * *host_offset is updated to contain the host offset of the first
1283 * newly allocated cluster.
1285 * -errno: in error cases
1287 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
1288 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1290 BDRVQcow2State *s = bs->opaque;
1291 int l2_index;
1292 uint64_t *l2_slice;
1293 uint64_t entry;
1294 uint64_t nb_clusters;
1295 int ret;
1296 bool keep_old_clusters = false;
1298 uint64_t alloc_cluster_offset = 0;
1300 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset,
1301 *bytes);
1302 assert(*bytes > 0);
1305 * Calculate the number of clusters to look for. We stop at L2 slice
1306 * boundaries to keep things simple.
1308 nb_clusters =
1309 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1311 l2_index = offset_to_l2_slice_index(s, guest_offset);
1312 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index);
1313 assert(nb_clusters <= INT_MAX);
1315 /* Find L2 entry for the first involved cluster */
1316 ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index);
1317 if (ret < 0) {
1318 return ret;
1321 entry = be64_to_cpu(l2_slice[l2_index]);
1323 /* For the moment, overwrite compressed clusters one by one */
1324 if (entry & QCOW_OFLAG_COMPRESSED) {
1325 nb_clusters = 1;
1326 } else {
1327 nb_clusters = count_cow_clusters(s, nb_clusters, l2_slice, l2_index);
1330 /* This function is only called when there were no non-COW clusters, so if
1331 * we can't find any unallocated or COW clusters either, something is
1332 * wrong with our code. */
1333 assert(nb_clusters > 0);
1335 if (qcow2_get_cluster_type(entry) == QCOW2_CLUSTER_ZERO_ALLOC &&
1336 (entry & QCOW_OFLAG_COPIED) &&
1337 (!*host_offset ||
1338 start_of_cluster(s, *host_offset) == (entry & L2E_OFFSET_MASK)))
1340 int preallocated_nb_clusters;
1342 if (offset_into_cluster(s, entry & L2E_OFFSET_MASK)) {
1343 qcow2_signal_corruption(bs, true, -1, -1, "Preallocated zero "
1344 "cluster offset %#llx unaligned (guest "
1345 "offset: %#" PRIx64 ")",
1346 entry & L2E_OFFSET_MASK, guest_offset);
1347 ret = -EIO;
1348 goto fail;
1351 /* Try to reuse preallocated zero clusters; contiguous normal clusters
1352 * would be fine, too, but count_cow_clusters() above has limited
1353 * nb_clusters already to a range of COW clusters */
1354 preallocated_nb_clusters =
1355 count_contiguous_clusters(nb_clusters, s->cluster_size,
1356 &l2_slice[l2_index], QCOW_OFLAG_COPIED);
1357 assert(preallocated_nb_clusters > 0);
1359 nb_clusters = preallocated_nb_clusters;
1360 alloc_cluster_offset = entry & L2E_OFFSET_MASK;
1362 /* We want to reuse these clusters, so qcow2_alloc_cluster_link_l2()
1363 * should not free them. */
1364 keep_old_clusters = true;
1367 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1369 if (!alloc_cluster_offset) {
1370 /* Allocate, if necessary at a given offset in the image file */
1371 alloc_cluster_offset = start_of_cluster(s, *host_offset);
1372 ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset,
1373 &nb_clusters);
1374 if (ret < 0) {
1375 goto fail;
1378 /* Can't extend contiguous allocation */
1379 if (nb_clusters == 0) {
1380 *bytes = 0;
1381 return 0;
1384 /* !*host_offset would overwrite the image header and is reserved for
1385 * "no host offset preferred". If 0 was a valid host offset, it'd
1386 * trigger the following overlap check; do that now to avoid having an
1387 * invalid value in *host_offset. */
1388 if (!alloc_cluster_offset) {
1389 ret = qcow2_pre_write_overlap_check(bs, 0, alloc_cluster_offset,
1390 nb_clusters * s->cluster_size);
1391 assert(ret < 0);
1392 goto fail;
1397 * Save info needed for meta data update.
1399 * requested_bytes: Number of bytes from the start of the first
1400 * newly allocated cluster to the end of the (possibly shortened
1401 * before) write request.
1403 * avail_bytes: Number of bytes from the start of the first
1404 * newly allocated to the end of the last newly allocated cluster.
1406 * nb_bytes: The number of bytes from the start of the first
1407 * newly allocated cluster to the end of the area that the write
1408 * request actually writes to (excluding COW at the end)
1410 uint64_t requested_bytes = *bytes + offset_into_cluster(s, guest_offset);
1411 int avail_bytes = MIN(INT_MAX, nb_clusters << s->cluster_bits);
1412 int nb_bytes = MIN(requested_bytes, avail_bytes);
1413 QCowL2Meta *old_m = *m;
1415 *m = g_malloc0(sizeof(**m));
1417 **m = (QCowL2Meta) {
1418 .next = old_m,
1420 .alloc_offset = alloc_cluster_offset,
1421 .offset = start_of_cluster(s, guest_offset),
1422 .nb_clusters = nb_clusters,
1424 .keep_old_clusters = keep_old_clusters,
1426 .cow_start = {
1427 .offset = 0,
1428 .nb_bytes = offset_into_cluster(s, guest_offset),
1430 .cow_end = {
1431 .offset = nb_bytes,
1432 .nb_bytes = avail_bytes - nb_bytes,
1435 qemu_co_queue_init(&(*m)->dependent_requests);
1436 QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight);
1438 *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset);
1439 *bytes = MIN(*bytes, nb_bytes - offset_into_cluster(s, guest_offset));
1440 assert(*bytes != 0);
1442 return 1;
1444 fail:
1445 if (*m && (*m)->nb_clusters > 0) {
1446 QLIST_REMOVE(*m, next_in_flight);
1448 return ret;
1452 * alloc_cluster_offset
1454 * For a given offset on the virtual disk, find the cluster offset in qcow2
1455 * file. If the offset is not found, allocate a new cluster.
1457 * If the cluster was already allocated, m->nb_clusters is set to 0 and
1458 * other fields in m are meaningless.
1460 * If the cluster is newly allocated, m->nb_clusters is set to the number of
1461 * contiguous clusters that have been allocated. In this case, the other
1462 * fields of m are valid and contain information about the first allocated
1463 * cluster.
1465 * If the request conflicts with another write request in flight, the coroutine
1466 * is queued and will be reentered when the dependency has completed.
1468 * Return 0 on success and -errno in error cases
1470 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
1471 unsigned int *bytes, uint64_t *host_offset,
1472 QCowL2Meta **m)
1474 BDRVQcow2State *s = bs->opaque;
1475 uint64_t start, remaining;
1476 uint64_t cluster_offset;
1477 uint64_t cur_bytes;
1478 int ret;
1480 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *bytes);
1482 again:
1483 start = offset;
1484 remaining = *bytes;
1485 cluster_offset = 0;
1486 *host_offset = 0;
1487 cur_bytes = 0;
1488 *m = NULL;
1490 while (true) {
1492 if (!*host_offset) {
1493 *host_offset = start_of_cluster(s, cluster_offset);
1496 assert(remaining >= cur_bytes);
1498 start += cur_bytes;
1499 remaining -= cur_bytes;
1500 cluster_offset += cur_bytes;
1502 if (remaining == 0) {
1503 break;
1506 cur_bytes = remaining;
1509 * Now start gathering as many contiguous clusters as possible:
1511 * 1. Check for overlaps with in-flight allocations
1513 * a) Overlap not in the first cluster -> shorten this request and
1514 * let the caller handle the rest in its next loop iteration.
1516 * b) Real overlaps of two requests. Yield and restart the search
1517 * for contiguous clusters (the situation could have changed
1518 * while we were sleeping)
1520 * c) TODO: Request starts in the same cluster as the in-flight
1521 * allocation ends. Shorten the COW of the in-fight allocation,
1522 * set cluster_offset to write to the same cluster and set up
1523 * the right synchronisation between the in-flight request and
1524 * the new one.
1526 ret = handle_dependencies(bs, start, &cur_bytes, m);
1527 if (ret == -EAGAIN) {
1528 /* Currently handle_dependencies() doesn't yield if we already had
1529 * an allocation. If it did, we would have to clean up the L2Meta
1530 * structs before starting over. */
1531 assert(*m == NULL);
1532 goto again;
1533 } else if (ret < 0) {
1534 return ret;
1535 } else if (cur_bytes == 0) {
1536 break;
1537 } else {
1538 /* handle_dependencies() may have decreased cur_bytes (shortened
1539 * the allocations below) so that the next dependency is processed
1540 * correctly during the next loop iteration. */
1544 * 2. Count contiguous COPIED clusters.
1546 ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m);
1547 if (ret < 0) {
1548 return ret;
1549 } else if (ret) {
1550 continue;
1551 } else if (cur_bytes == 0) {
1552 break;
1556 * 3. If the request still hasn't completed, allocate new clusters,
1557 * considering any cluster_offset of steps 1c or 2.
1559 ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m);
1560 if (ret < 0) {
1561 return ret;
1562 } else if (ret) {
1563 continue;
1564 } else {
1565 assert(cur_bytes == 0);
1566 break;
1570 *bytes -= remaining;
1571 assert(*bytes > 0);
1572 assert(*host_offset != 0);
1574 return 0;
1578 * This discards as many clusters of nb_clusters as possible at once (i.e.
1579 * all clusters in the same L2 slice) and returns the number of discarded
1580 * clusters.
1582 static int discard_in_l2_slice(BlockDriverState *bs, uint64_t offset,
1583 uint64_t nb_clusters,
1584 enum qcow2_discard_type type, bool full_discard)
1586 BDRVQcow2State *s = bs->opaque;
1587 uint64_t *l2_slice;
1588 int l2_index;
1589 int ret;
1590 int i;
1592 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index);
1593 if (ret < 0) {
1594 return ret;
1597 /* Limit nb_clusters to one L2 slice */
1598 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index);
1599 assert(nb_clusters <= INT_MAX);
1601 for (i = 0; i < nb_clusters; i++) {
1602 uint64_t old_l2_entry;
1604 old_l2_entry = be64_to_cpu(l2_slice[l2_index + i]);
1607 * If full_discard is false, make sure that a discarded area reads back
1608 * as zeroes for v3 images (we cannot do it for v2 without actually
1609 * writing a zero-filled buffer). We can skip the operation if the
1610 * cluster is already marked as zero, or if it's unallocated and we
1611 * don't have a backing file.
1613 * TODO We might want to use bdrv_block_status(bs) here, but we're
1614 * holding s->lock, so that doesn't work today.
1616 * If full_discard is true, the sector should not read back as zeroes,
1617 * but rather fall through to the backing file.
1619 switch (qcow2_get_cluster_type(old_l2_entry)) {
1620 case QCOW2_CLUSTER_UNALLOCATED:
1621 if (full_discard || !bs->backing) {
1622 continue;
1624 break;
1626 case QCOW2_CLUSTER_ZERO_PLAIN:
1627 if (!full_discard) {
1628 continue;
1630 break;
1632 case QCOW2_CLUSTER_ZERO_ALLOC:
1633 case QCOW2_CLUSTER_NORMAL:
1634 case QCOW2_CLUSTER_COMPRESSED:
1635 break;
1637 default:
1638 abort();
1641 /* First remove L2 entries */
1642 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
1643 if (!full_discard && s->qcow_version >= 3) {
1644 l2_slice[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1645 } else {
1646 l2_slice[l2_index + i] = cpu_to_be64(0);
1649 /* Then decrease the refcount */
1650 qcow2_free_any_clusters(bs, old_l2_entry, 1, type);
1653 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1655 return nb_clusters;
1658 int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset,
1659 uint64_t bytes, enum qcow2_discard_type type,
1660 bool full_discard)
1662 BDRVQcow2State *s = bs->opaque;
1663 uint64_t end_offset = offset + bytes;
1664 uint64_t nb_clusters;
1665 int64_t cleared;
1666 int ret;
1668 /* Caller must pass aligned values, except at image end */
1669 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
1670 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) ||
1671 end_offset == bs->total_sectors << BDRV_SECTOR_BITS);
1673 nb_clusters = size_to_clusters(s, bytes);
1675 s->cache_discards = true;
1677 /* Each L2 slice is handled by its own loop iteration */
1678 while (nb_clusters > 0) {
1679 cleared = discard_in_l2_slice(bs, offset, nb_clusters, type,
1680 full_discard);
1681 if (cleared < 0) {
1682 ret = cleared;
1683 goto fail;
1686 nb_clusters -= cleared;
1687 offset += (cleared * s->cluster_size);
1690 ret = 0;
1691 fail:
1692 s->cache_discards = false;
1693 qcow2_process_discards(bs, ret);
1695 return ret;
1699 * This zeroes as many clusters of nb_clusters as possible at once (i.e.
1700 * all clusters in the same L2 slice) and returns the number of zeroed
1701 * clusters.
1703 static int zero_in_l2_slice(BlockDriverState *bs, uint64_t offset,
1704 uint64_t nb_clusters, int flags)
1706 BDRVQcow2State *s = bs->opaque;
1707 uint64_t *l2_slice;
1708 int l2_index;
1709 int ret;
1710 int i;
1711 bool unmap = !!(flags & BDRV_REQ_MAY_UNMAP);
1713 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index);
1714 if (ret < 0) {
1715 return ret;
1718 /* Limit nb_clusters to one L2 slice */
1719 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index);
1720 assert(nb_clusters <= INT_MAX);
1722 for (i = 0; i < nb_clusters; i++) {
1723 uint64_t old_offset;
1724 QCow2ClusterType cluster_type;
1726 old_offset = be64_to_cpu(l2_slice[l2_index + i]);
1729 * Minimize L2 changes if the cluster already reads back as
1730 * zeroes with correct allocation.
1732 cluster_type = qcow2_get_cluster_type(old_offset);
1733 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN ||
1734 (cluster_type == QCOW2_CLUSTER_ZERO_ALLOC && !unmap)) {
1735 continue;
1738 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
1739 if (cluster_type == QCOW2_CLUSTER_COMPRESSED || unmap) {
1740 l2_slice[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1741 qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST);
1742 } else {
1743 l2_slice[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO);
1747 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1749 return nb_clusters;
1752 int qcow2_cluster_zeroize(BlockDriverState *bs, uint64_t offset,
1753 uint64_t bytes, int flags)
1755 BDRVQcow2State *s = bs->opaque;
1756 uint64_t end_offset = offset + bytes;
1757 uint64_t nb_clusters;
1758 int64_t cleared;
1759 int ret;
1761 /* Caller must pass aligned values, except at image end */
1762 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
1763 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) ||
1764 end_offset == bs->total_sectors << BDRV_SECTOR_BITS);
1766 /* The zero flag is only supported by version 3 and newer */
1767 if (s->qcow_version < 3) {
1768 return -ENOTSUP;
1771 /* Each L2 slice is handled by its own loop iteration */
1772 nb_clusters = size_to_clusters(s, bytes);
1774 s->cache_discards = true;
1776 while (nb_clusters > 0) {
1777 cleared = zero_in_l2_slice(bs, offset, nb_clusters, flags);
1778 if (cleared < 0) {
1779 ret = cleared;
1780 goto fail;
1783 nb_clusters -= cleared;
1784 offset += (cleared * s->cluster_size);
1787 ret = 0;
1788 fail:
1789 s->cache_discards = false;
1790 qcow2_process_discards(bs, ret);
1792 return ret;
1796 * Expands all zero clusters in a specific L1 table (or deallocates them, for
1797 * non-backed non-pre-allocated zero clusters).
1799 * l1_entries and *visited_l1_entries are used to keep track of progress for
1800 * status_cb(). l1_entries contains the total number of L1 entries and
1801 * *visited_l1_entries counts all visited L1 entries.
1803 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
1804 int l1_size, int64_t *visited_l1_entries,
1805 int64_t l1_entries,
1806 BlockDriverAmendStatusCB *status_cb,
1807 void *cb_opaque)
1809 BDRVQcow2State *s = bs->opaque;
1810 bool is_active_l1 = (l1_table == s->l1_table);
1811 uint64_t *l2_slice = NULL;
1812 unsigned slice, slice_size2, n_slices;
1813 int ret;
1814 int i, j;
1816 slice_size2 = s->l2_slice_size * sizeof(uint64_t);
1817 n_slices = s->cluster_size / slice_size2;
1819 if (!is_active_l1) {
1820 /* inactive L2 tables require a buffer to be stored in when loading
1821 * them from disk */
1822 l2_slice = qemu_try_blockalign(bs->file->bs, slice_size2);
1823 if (l2_slice == NULL) {
1824 return -ENOMEM;
1828 for (i = 0; i < l1_size; i++) {
1829 uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK;
1830 uint64_t l2_refcount;
1832 if (!l2_offset) {
1833 /* unallocated */
1834 (*visited_l1_entries)++;
1835 if (status_cb) {
1836 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque);
1838 continue;
1841 if (offset_into_cluster(s, l2_offset)) {
1842 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#"
1843 PRIx64 " unaligned (L1 index: %#x)",
1844 l2_offset, i);
1845 ret = -EIO;
1846 goto fail;
1849 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits,
1850 &l2_refcount);
1851 if (ret < 0) {
1852 goto fail;
1855 for (slice = 0; slice < n_slices; slice++) {
1856 uint64_t slice_offset = l2_offset + slice * slice_size2;
1857 bool l2_dirty = false;
1858 if (is_active_l1) {
1859 /* get active L2 tables from cache */
1860 ret = qcow2_cache_get(bs, s->l2_table_cache, slice_offset,
1861 (void **)&l2_slice);
1862 } else {
1863 /* load inactive L2 tables from disk */
1864 ret = bdrv_pread(bs->file, slice_offset, l2_slice, slice_size2);
1866 if (ret < 0) {
1867 goto fail;
1870 for (j = 0; j < s->l2_slice_size; j++) {
1871 uint64_t l2_entry = be64_to_cpu(l2_slice[j]);
1872 int64_t offset = l2_entry & L2E_OFFSET_MASK;
1873 QCow2ClusterType cluster_type =
1874 qcow2_get_cluster_type(l2_entry);
1876 if (cluster_type != QCOW2_CLUSTER_ZERO_PLAIN &&
1877 cluster_type != QCOW2_CLUSTER_ZERO_ALLOC) {
1878 continue;
1881 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1882 if (!bs->backing) {
1883 /* not backed; therefore we can simply deallocate the
1884 * cluster */
1885 l2_slice[j] = 0;
1886 l2_dirty = true;
1887 continue;
1890 offset = qcow2_alloc_clusters(bs, s->cluster_size);
1891 if (offset < 0) {
1892 ret = offset;
1893 goto fail;
1896 if (l2_refcount > 1) {
1897 /* For shared L2 tables, set the refcount accordingly
1898 * (it is already 1 and needs to be l2_refcount) */
1899 ret = qcow2_update_cluster_refcount(
1900 bs, offset >> s->cluster_bits,
1901 refcount_diff(1, l2_refcount), false,
1902 QCOW2_DISCARD_OTHER);
1903 if (ret < 0) {
1904 qcow2_free_clusters(bs, offset, s->cluster_size,
1905 QCOW2_DISCARD_OTHER);
1906 goto fail;
1911 if (offset_into_cluster(s, offset)) {
1912 int l2_index = slice * s->l2_slice_size + j;
1913 qcow2_signal_corruption(
1914 bs, true, -1, -1,
1915 "Cluster allocation offset "
1916 "%#" PRIx64 " unaligned (L2 offset: %#"
1917 PRIx64 ", L2 index: %#x)", offset,
1918 l2_offset, l2_index);
1919 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1920 qcow2_free_clusters(bs, offset, s->cluster_size,
1921 QCOW2_DISCARD_ALWAYS);
1923 ret = -EIO;
1924 goto fail;
1927 ret = qcow2_pre_write_overlap_check(bs, 0, offset,
1928 s->cluster_size);
1929 if (ret < 0) {
1930 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1931 qcow2_free_clusters(bs, offset, s->cluster_size,
1932 QCOW2_DISCARD_ALWAYS);
1934 goto fail;
1937 ret = bdrv_pwrite_zeroes(bs->file, offset, s->cluster_size, 0);
1938 if (ret < 0) {
1939 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1940 qcow2_free_clusters(bs, offset, s->cluster_size,
1941 QCOW2_DISCARD_ALWAYS);
1943 goto fail;
1946 if (l2_refcount == 1) {
1947 l2_slice[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED);
1948 } else {
1949 l2_slice[j] = cpu_to_be64(offset);
1951 l2_dirty = true;
1954 if (is_active_l1) {
1955 if (l2_dirty) {
1956 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
1957 qcow2_cache_depends_on_flush(s->l2_table_cache);
1959 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1960 } else {
1961 if (l2_dirty) {
1962 ret = qcow2_pre_write_overlap_check(
1963 bs, QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2,
1964 slice_offset, slice_size2);
1965 if (ret < 0) {
1966 goto fail;
1969 ret = bdrv_pwrite(bs->file, slice_offset,
1970 l2_slice, slice_size2);
1971 if (ret < 0) {
1972 goto fail;
1978 (*visited_l1_entries)++;
1979 if (status_cb) {
1980 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque);
1984 ret = 0;
1986 fail:
1987 if (l2_slice) {
1988 if (!is_active_l1) {
1989 qemu_vfree(l2_slice);
1990 } else {
1991 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1994 return ret;
1998 * For backed images, expands all zero clusters on the image. For non-backed
1999 * images, deallocates all non-pre-allocated zero clusters (and claims the
2000 * allocation for pre-allocated ones). This is important for downgrading to a
2001 * qcow2 version which doesn't yet support metadata zero clusters.
2003 int qcow2_expand_zero_clusters(BlockDriverState *bs,
2004 BlockDriverAmendStatusCB *status_cb,
2005 void *cb_opaque)
2007 BDRVQcow2State *s = bs->opaque;
2008 uint64_t *l1_table = NULL;
2009 int64_t l1_entries = 0, visited_l1_entries = 0;
2010 int ret;
2011 int i, j;
2013 if (status_cb) {
2014 l1_entries = s->l1_size;
2015 for (i = 0; i < s->nb_snapshots; i++) {
2016 l1_entries += s->snapshots[i].l1_size;
2020 ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size,
2021 &visited_l1_entries, l1_entries,
2022 status_cb, cb_opaque);
2023 if (ret < 0) {
2024 goto fail;
2027 /* Inactive L1 tables may point to active L2 tables - therefore it is
2028 * necessary to flush the L2 table cache before trying to access the L2
2029 * tables pointed to by inactive L1 entries (else we might try to expand
2030 * zero clusters that have already been expanded); furthermore, it is also
2031 * necessary to empty the L2 table cache, since it may contain tables which
2032 * are now going to be modified directly on disk, bypassing the cache.
2033 * qcow2_cache_empty() does both for us. */
2034 ret = qcow2_cache_empty(bs, s->l2_table_cache);
2035 if (ret < 0) {
2036 goto fail;
2039 for (i = 0; i < s->nb_snapshots; i++) {
2040 int l1_size2;
2041 uint64_t *new_l1_table;
2042 Error *local_err = NULL;
2044 ret = qcow2_validate_table(bs, s->snapshots[i].l1_table_offset,
2045 s->snapshots[i].l1_size, sizeof(uint64_t),
2046 QCOW_MAX_L1_SIZE, "Snapshot L1 table",
2047 &local_err);
2048 if (ret < 0) {
2049 error_report_err(local_err);
2050 goto fail;
2053 l1_size2 = s->snapshots[i].l1_size * sizeof(uint64_t);
2054 new_l1_table = g_try_realloc(l1_table, l1_size2);
2056 if (!new_l1_table) {
2057 ret = -ENOMEM;
2058 goto fail;
2061 l1_table = new_l1_table;
2063 ret = bdrv_pread(bs->file, s->snapshots[i].l1_table_offset,
2064 l1_table, l1_size2);
2065 if (ret < 0) {
2066 goto fail;
2069 for (j = 0; j < s->snapshots[i].l1_size; j++) {
2070 be64_to_cpus(&l1_table[j]);
2073 ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size,
2074 &visited_l1_entries, l1_entries,
2075 status_cb, cb_opaque);
2076 if (ret < 0) {
2077 goto fail;
2081 ret = 0;
2083 fail:
2084 g_free(l1_table);
2085 return ret;