Merge remote branch 'mst/for_anthony' into staging
[qemu/aliguori-queue.git] / block / qcow2-cluster.c
blobf88118cdc5626933dd48740b227665513b69dc8b
1 /*
2 * Block driver for the QCOW version 2 format
4 * Copyright (c) 2004-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include <zlib.h>
27 #include "qemu-common.h"
28 #include "block_int.h"
29 #include "block/qcow2.h"
31 int qcow2_grow_l1_table(BlockDriverState *bs, int min_size)
33 BDRVQcowState *s = bs->opaque;
34 int new_l1_size, new_l1_size2, ret, i;
35 uint64_t *new_l1_table;
36 uint64_t new_l1_table_offset;
37 uint8_t data[12];
39 new_l1_size = s->l1_size;
40 if (min_size <= new_l1_size)
41 return 0;
42 if (new_l1_size == 0) {
43 new_l1_size = 1;
45 while (min_size > new_l1_size) {
46 new_l1_size = (new_l1_size * 3 + 1) / 2;
48 #ifdef DEBUG_ALLOC2
49 printf("grow l1_table from %d to %d\n", s->l1_size, new_l1_size);
50 #endif
52 new_l1_size2 = sizeof(uint64_t) * new_l1_size;
53 new_l1_table = qemu_mallocz(align_offset(new_l1_size2, 512));
54 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
56 /* write new table (align to cluster) */
57 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
59 for(i = 0; i < s->l1_size; i++)
60 new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
61 ret = bdrv_pwrite(s->hd, new_l1_table_offset, new_l1_table, new_l1_size2);
62 if (ret != new_l1_size2)
63 goto fail;
64 for(i = 0; i < s->l1_size; i++)
65 new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
67 /* set new table */
68 cpu_to_be32w((uint32_t*)data, new_l1_size);
69 cpu_to_be64w((uint64_t*)(data + 4), new_l1_table_offset);
70 if (bdrv_pwrite(s->hd, offsetof(QCowHeader, l1_size), data,
71 sizeof(data)) != sizeof(data))
72 goto fail;
73 qemu_free(s->l1_table);
74 qcow2_free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t));
75 s->l1_table_offset = new_l1_table_offset;
76 s->l1_table = new_l1_table;
77 s->l1_size = new_l1_size;
78 return 0;
79 fail:
80 qemu_free(s->l1_table);
81 return -EIO;
84 void qcow2_l2_cache_reset(BlockDriverState *bs)
86 BDRVQcowState *s = bs->opaque;
88 memset(s->l2_cache, 0, s->l2_size * L2_CACHE_SIZE * sizeof(uint64_t));
89 memset(s->l2_cache_offsets, 0, L2_CACHE_SIZE * sizeof(uint64_t));
90 memset(s->l2_cache_counts, 0, L2_CACHE_SIZE * sizeof(uint32_t));
93 static inline int l2_cache_new_entry(BlockDriverState *bs)
95 BDRVQcowState *s = bs->opaque;
96 uint32_t min_count;
97 int min_index, i;
99 /* find a new entry in the least used one */
100 min_index = 0;
101 min_count = 0xffffffff;
102 for(i = 0; i < L2_CACHE_SIZE; i++) {
103 if (s->l2_cache_counts[i] < min_count) {
104 min_count = s->l2_cache_counts[i];
105 min_index = i;
108 return min_index;
112 * seek_l2_table
114 * seek l2_offset in the l2_cache table
115 * if not found, return NULL,
116 * if found,
117 * increments the l2 cache hit count of the entry,
118 * if counter overflow, divide by two all counters
119 * return the pointer to the l2 cache entry
123 static uint64_t *seek_l2_table(BDRVQcowState *s, uint64_t l2_offset)
125 int i, j;
127 for(i = 0; i < L2_CACHE_SIZE; i++) {
128 if (l2_offset == s->l2_cache_offsets[i]) {
129 /* increment the hit count */
130 if (++s->l2_cache_counts[i] == 0xffffffff) {
131 for(j = 0; j < L2_CACHE_SIZE; j++) {
132 s->l2_cache_counts[j] >>= 1;
135 return s->l2_cache + (i << s->l2_bits);
138 return NULL;
142 * l2_load
144 * Loads a L2 table into memory. If the table is in the cache, the cache
145 * is used; otherwise the L2 table is loaded from the image file.
147 * Returns a pointer to the L2 table on success, or NULL if the read from
148 * the image file failed.
151 static uint64_t *l2_load(BlockDriverState *bs, uint64_t l2_offset)
153 BDRVQcowState *s = bs->opaque;
154 int min_index;
155 uint64_t *l2_table;
157 /* seek if the table for the given offset is in the cache */
159 l2_table = seek_l2_table(s, l2_offset);
160 if (l2_table != NULL)
161 return l2_table;
163 /* not found: load a new entry in the least used one */
165 min_index = l2_cache_new_entry(bs);
166 l2_table = s->l2_cache + (min_index << s->l2_bits);
167 if (bdrv_pread(s->hd, l2_offset, l2_table, s->l2_size * sizeof(uint64_t)) !=
168 s->l2_size * sizeof(uint64_t))
169 return NULL;
170 s->l2_cache_offsets[min_index] = l2_offset;
171 s->l2_cache_counts[min_index] = 1;
173 return l2_table;
177 * Writes one sector of the L1 table to the disk (can't update single entries
178 * and we really don't want bdrv_pread to perform a read-modify-write)
180 #define L1_ENTRIES_PER_SECTOR (512 / 8)
181 static int write_l1_entry(BDRVQcowState *s, int l1_index)
183 uint64_t buf[L1_ENTRIES_PER_SECTOR];
184 int l1_start_index;
185 int i;
187 l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
188 for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) {
189 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
192 if (bdrv_pwrite(s->hd, s->l1_table_offset + 8 * l1_start_index,
193 buf, sizeof(buf)) != sizeof(buf))
195 return -1;
198 return 0;
202 * l2_allocate
204 * Allocate a new l2 entry in the file. If l1_index points to an already
205 * used entry in the L2 table (i.e. we are doing a copy on write for the L2
206 * table) copy the contents of the old L2 table into the newly allocated one.
207 * Otherwise the new table is initialized with zeros.
211 static uint64_t *l2_allocate(BlockDriverState *bs, int l1_index)
213 BDRVQcowState *s = bs->opaque;
214 int min_index;
215 uint64_t old_l2_offset;
216 uint64_t *l2_table, l2_offset;
218 old_l2_offset = s->l1_table[l1_index];
220 /* allocate a new l2 entry */
222 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
224 /* update the L1 entry */
226 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
227 if (write_l1_entry(s, l1_index) < 0) {
228 return NULL;
231 /* allocate a new entry in the l2 cache */
233 min_index = l2_cache_new_entry(bs);
234 l2_table = s->l2_cache + (min_index << s->l2_bits);
236 if (old_l2_offset == 0) {
237 /* if there was no old l2 table, clear the new table */
238 memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
239 } else {
240 /* if there was an old l2 table, read it from the disk */
241 if (bdrv_pread(s->hd, old_l2_offset,
242 l2_table, s->l2_size * sizeof(uint64_t)) !=
243 s->l2_size * sizeof(uint64_t))
244 return NULL;
246 /* write the l2 table to the file */
247 if (bdrv_pwrite(s->hd, l2_offset,
248 l2_table, s->l2_size * sizeof(uint64_t)) !=
249 s->l2_size * sizeof(uint64_t))
250 return NULL;
252 /* update the l2 cache entry */
254 s->l2_cache_offsets[min_index] = l2_offset;
255 s->l2_cache_counts[min_index] = 1;
257 return l2_table;
260 static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size,
261 uint64_t *l2_table, uint64_t start, uint64_t mask)
263 int i;
264 uint64_t offset = be64_to_cpu(l2_table[0]) & ~mask;
266 if (!offset)
267 return 0;
269 for (i = start; i < start + nb_clusters; i++)
270 if (offset + (uint64_t) i * cluster_size != (be64_to_cpu(l2_table[i]) & ~mask))
271 break;
273 return (i - start);
276 static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table)
278 int i = 0;
280 while(nb_clusters-- && l2_table[i] == 0)
281 i++;
283 return i;
286 /* The crypt function is compatible with the linux cryptoloop
287 algorithm for < 4 GB images. NOTE: out_buf == in_buf is
288 supported */
289 void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
290 uint8_t *out_buf, const uint8_t *in_buf,
291 int nb_sectors, int enc,
292 const AES_KEY *key)
294 union {
295 uint64_t ll[2];
296 uint8_t b[16];
297 } ivec;
298 int i;
300 for(i = 0; i < nb_sectors; i++) {
301 ivec.ll[0] = cpu_to_le64(sector_num);
302 ivec.ll[1] = 0;
303 AES_cbc_encrypt(in_buf, out_buf, 512, key,
304 ivec.b, enc);
305 sector_num++;
306 in_buf += 512;
307 out_buf += 512;
312 static int qcow_read(BlockDriverState *bs, int64_t sector_num,
313 uint8_t *buf, int nb_sectors)
315 BDRVQcowState *s = bs->opaque;
316 int ret, index_in_cluster, n, n1;
317 uint64_t cluster_offset;
319 while (nb_sectors > 0) {
320 n = nb_sectors;
321 cluster_offset = qcow2_get_cluster_offset(bs, sector_num << 9, &n);
322 index_in_cluster = sector_num & (s->cluster_sectors - 1);
323 if (!cluster_offset) {
324 if (bs->backing_hd) {
325 /* read from the base image */
326 n1 = qcow2_backing_read1(bs->backing_hd, sector_num, buf, n);
327 if (n1 > 0) {
328 ret = bdrv_read(bs->backing_hd, sector_num, buf, n1);
329 if (ret < 0)
330 return -1;
332 } else {
333 memset(buf, 0, 512 * n);
335 } else if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
336 if (qcow2_decompress_cluster(s, cluster_offset) < 0)
337 return -1;
338 memcpy(buf, s->cluster_cache + index_in_cluster * 512, 512 * n);
339 } else {
340 ret = bdrv_pread(s->hd, cluster_offset + index_in_cluster * 512, buf, n * 512);
341 if (ret != n * 512)
342 return -1;
343 if (s->crypt_method) {
344 qcow2_encrypt_sectors(s, sector_num, buf, buf, n, 0,
345 &s->aes_decrypt_key);
348 nb_sectors -= n;
349 sector_num += n;
350 buf += n * 512;
352 return 0;
355 static int copy_sectors(BlockDriverState *bs, uint64_t start_sect,
356 uint64_t cluster_offset, int n_start, int n_end)
358 BDRVQcowState *s = bs->opaque;
359 int n, ret;
361 n = n_end - n_start;
362 if (n <= 0)
363 return 0;
364 ret = qcow_read(bs, start_sect + n_start, s->cluster_data, n);
365 if (ret < 0)
366 return ret;
367 if (s->crypt_method) {
368 qcow2_encrypt_sectors(s, start_sect + n_start,
369 s->cluster_data,
370 s->cluster_data, n, 1,
371 &s->aes_encrypt_key);
373 ret = bdrv_write(s->hd, (cluster_offset >> 9) + n_start,
374 s->cluster_data, n);
375 if (ret < 0)
376 return ret;
377 return 0;
382 * get_cluster_offset
384 * For a given offset of the disk image, return cluster offset in
385 * qcow2 file.
387 * on entry, *num is the number of contiguous clusters we'd like to
388 * access following offset.
390 * on exit, *num is the number of contiguous clusters we can read.
392 * Return 1, if the offset is found
393 * Return 0, otherwise.
397 uint64_t qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
398 int *num)
400 BDRVQcowState *s = bs->opaque;
401 unsigned int l1_index, l2_index;
402 uint64_t l2_offset, *l2_table, cluster_offset;
403 int l1_bits, c;
404 unsigned int index_in_cluster, nb_clusters;
405 uint64_t nb_available, nb_needed;
407 index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1);
408 nb_needed = *num + index_in_cluster;
410 l1_bits = s->l2_bits + s->cluster_bits;
412 /* compute how many bytes there are between the offset and
413 * the end of the l1 entry
416 nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1));
418 /* compute the number of available sectors */
420 nb_available = (nb_available >> 9) + index_in_cluster;
422 if (nb_needed > nb_available) {
423 nb_needed = nb_available;
426 cluster_offset = 0;
428 /* seek the the l2 offset in the l1 table */
430 l1_index = offset >> l1_bits;
431 if (l1_index >= s->l1_size)
432 goto out;
434 l2_offset = s->l1_table[l1_index];
436 /* seek the l2 table of the given l2 offset */
438 if (!l2_offset)
439 goto out;
441 /* load the l2 table in memory */
443 l2_offset &= ~QCOW_OFLAG_COPIED;
444 l2_table = l2_load(bs, l2_offset);
445 if (l2_table == NULL)
446 return 0;
448 /* find the cluster offset for the given disk offset */
450 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
451 cluster_offset = be64_to_cpu(l2_table[l2_index]);
452 nb_clusters = size_to_clusters(s, nb_needed << 9);
454 if (!cluster_offset) {
455 /* how many empty clusters ? */
456 c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]);
457 } else {
458 /* how many allocated clusters ? */
459 c = count_contiguous_clusters(nb_clusters, s->cluster_size,
460 &l2_table[l2_index], 0, QCOW_OFLAG_COPIED);
463 nb_available = (c * s->cluster_sectors);
464 out:
465 if (nb_available > nb_needed)
466 nb_available = nb_needed;
468 *num = nb_available - index_in_cluster;
470 return cluster_offset & ~QCOW_OFLAG_COPIED;
474 * get_cluster_table
476 * for a given disk offset, load (and allocate if needed)
477 * the l2 table.
479 * the l2 table offset in the qcow2 file and the cluster index
480 * in the l2 table are given to the caller.
484 static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
485 uint64_t **new_l2_table,
486 uint64_t *new_l2_offset,
487 int *new_l2_index)
489 BDRVQcowState *s = bs->opaque;
490 unsigned int l1_index, l2_index;
491 uint64_t l2_offset, *l2_table;
492 int ret;
494 /* seek the the l2 offset in the l1 table */
496 l1_index = offset >> (s->l2_bits + s->cluster_bits);
497 if (l1_index >= s->l1_size) {
498 ret = qcow2_grow_l1_table(bs, l1_index + 1);
499 if (ret < 0)
500 return 0;
502 l2_offset = s->l1_table[l1_index];
504 /* seek the l2 table of the given l2 offset */
506 if (l2_offset & QCOW_OFLAG_COPIED) {
507 /* load the l2 table in memory */
508 l2_offset &= ~QCOW_OFLAG_COPIED;
509 l2_table = l2_load(bs, l2_offset);
510 if (l2_table == NULL)
511 return 0;
512 } else {
513 if (l2_offset)
514 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));
515 l2_table = l2_allocate(bs, l1_index);
516 if (l2_table == NULL)
517 return 0;
518 l2_offset = s->l1_table[l1_index] & ~QCOW_OFLAG_COPIED;
521 /* find the cluster offset for the given disk offset */
523 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
525 *new_l2_table = l2_table;
526 *new_l2_offset = l2_offset;
527 *new_l2_index = l2_index;
529 return 1;
533 * alloc_compressed_cluster_offset
535 * For a given offset of the disk image, return cluster offset in
536 * qcow2 file.
538 * If the offset is not found, allocate a new compressed cluster.
540 * Return the cluster offset if successful,
541 * Return 0, otherwise.
545 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
546 uint64_t offset,
547 int compressed_size)
549 BDRVQcowState *s = bs->opaque;
550 int l2_index, ret;
551 uint64_t l2_offset, *l2_table, cluster_offset;
552 int nb_csectors;
554 ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index);
555 if (ret == 0)
556 return 0;
558 cluster_offset = be64_to_cpu(l2_table[l2_index]);
559 if (cluster_offset & QCOW_OFLAG_COPIED)
560 return cluster_offset & ~QCOW_OFLAG_COPIED;
562 if (cluster_offset)
563 qcow2_free_any_clusters(bs, cluster_offset, 1);
565 cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
566 nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
567 (cluster_offset >> 9);
569 cluster_offset |= QCOW_OFLAG_COMPRESSED |
570 ((uint64_t)nb_csectors << s->csize_shift);
572 /* update L2 table */
574 /* compressed clusters never have the copied flag */
576 l2_table[l2_index] = cpu_to_be64(cluster_offset);
577 if (bdrv_pwrite(s->hd,
578 l2_offset + l2_index * sizeof(uint64_t),
579 l2_table + l2_index,
580 sizeof(uint64_t)) != sizeof(uint64_t))
581 return 0;
583 return cluster_offset;
587 * Write L2 table updates to disk, writing whole sectors to avoid a
588 * read-modify-write in bdrv_pwrite
590 #define L2_ENTRIES_PER_SECTOR (512 / 8)
591 static int write_l2_entries(BDRVQcowState *s, uint64_t *l2_table,
592 uint64_t l2_offset, int l2_index, int num)
594 int l2_start_index = l2_index & ~(L1_ENTRIES_PER_SECTOR - 1);
595 int start_offset = (8 * l2_index) & ~511;
596 int end_offset = (8 * (l2_index + num) + 511) & ~511;
597 size_t len = end_offset - start_offset;
599 if (bdrv_pwrite(s->hd, l2_offset + start_offset, &l2_table[l2_start_index],
600 len) != len)
602 return -1;
605 return 0;
608 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, uint64_t cluster_offset,
609 QCowL2Meta *m)
611 BDRVQcowState *s = bs->opaque;
612 int i, j = 0, l2_index, ret;
613 uint64_t *old_cluster, start_sect, l2_offset, *l2_table;
615 if (m->nb_clusters == 0)
616 return 0;
618 old_cluster = qemu_malloc(m->nb_clusters * sizeof(uint64_t));
620 /* copy content of unmodified sectors */
621 start_sect = (m->offset & ~(s->cluster_size - 1)) >> 9;
622 if (m->n_start) {
623 ret = copy_sectors(bs, start_sect, cluster_offset, 0, m->n_start);
624 if (ret < 0)
625 goto err;
628 if (m->nb_available & (s->cluster_sectors - 1)) {
629 uint64_t end = m->nb_available & ~(uint64_t)(s->cluster_sectors - 1);
630 ret = copy_sectors(bs, start_sect + end, cluster_offset + (end << 9),
631 m->nb_available - end, s->cluster_sectors);
632 if (ret < 0)
633 goto err;
636 ret = -EIO;
637 /* update L2 table */
638 if (!get_cluster_table(bs, m->offset, &l2_table, &l2_offset, &l2_index))
639 goto err;
641 for (i = 0; i < m->nb_clusters; i++) {
642 /* if two concurrent writes happen to the same unallocated cluster
643 * each write allocates separate cluster and writes data concurrently.
644 * The first one to complete updates l2 table with pointer to its
645 * cluster the second one has to do RMW (which is done above by
646 * copy_sectors()), update l2 table with its cluster pointer and free
647 * old cluster. This is what this loop does */
648 if(l2_table[l2_index + i] != 0)
649 old_cluster[j++] = l2_table[l2_index + i];
651 l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
652 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
655 if (write_l2_entries(s, l2_table, l2_offset, l2_index, m->nb_clusters) < 0) {
656 ret = -1;
657 goto err;
660 for (i = 0; i < j; i++)
661 qcow2_free_any_clusters(bs,
662 be64_to_cpu(old_cluster[i]) & ~QCOW_OFLAG_COPIED, 1);
664 ret = 0;
665 err:
666 qemu_free(old_cluster);
667 return ret;
671 * alloc_cluster_offset
673 * For a given offset of the disk image, return cluster offset in
674 * qcow2 file.
676 * If the offset is not found, allocate a new cluster.
678 * Return the cluster offset if successful,
679 * Return 0, otherwise.
683 uint64_t qcow2_alloc_cluster_offset(BlockDriverState *bs,
684 uint64_t offset,
685 int n_start, int n_end,
686 int *num, QCowL2Meta *m)
688 BDRVQcowState *s = bs->opaque;
689 int l2_index, ret;
690 uint64_t l2_offset, *l2_table, cluster_offset;
691 unsigned int nb_clusters, i = 0;
692 QCowL2Meta *old_alloc;
694 ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index);
695 if (ret == 0)
696 return 0;
698 nb_clusters = size_to_clusters(s, n_end << 9);
700 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
702 cluster_offset = be64_to_cpu(l2_table[l2_index]);
704 /* We keep all QCOW_OFLAG_COPIED clusters */
706 if (cluster_offset & QCOW_OFLAG_COPIED) {
707 nb_clusters = count_contiguous_clusters(nb_clusters, s->cluster_size,
708 &l2_table[l2_index], 0, 0);
710 cluster_offset &= ~QCOW_OFLAG_COPIED;
711 m->nb_clusters = 0;
713 goto out;
716 /* for the moment, multiple compressed clusters are not managed */
718 if (cluster_offset & QCOW_OFLAG_COMPRESSED)
719 nb_clusters = 1;
721 /* how many available clusters ? */
723 while (i < nb_clusters) {
724 i += count_contiguous_clusters(nb_clusters - i, s->cluster_size,
725 &l2_table[l2_index], i, 0);
727 if(be64_to_cpu(l2_table[l2_index + i]))
728 break;
730 i += count_contiguous_free_clusters(nb_clusters - i,
731 &l2_table[l2_index + i]);
733 cluster_offset = be64_to_cpu(l2_table[l2_index + i]);
735 if ((cluster_offset & QCOW_OFLAG_COPIED) ||
736 (cluster_offset & QCOW_OFLAG_COMPRESSED))
737 break;
739 nb_clusters = i;
742 * Check if there already is an AIO write request in flight which allocates
743 * the same cluster. In this case we need to wait until the previous
744 * request has completed and updated the L2 table accordingly.
746 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
748 uint64_t end_offset = offset + nb_clusters * s->cluster_size;
749 uint64_t old_offset = old_alloc->offset;
750 uint64_t old_end_offset = old_alloc->offset +
751 old_alloc->nb_clusters * s->cluster_size;
753 if (end_offset < old_offset || offset > old_end_offset) {
754 /* No intersection */
755 } else {
756 if (offset < old_offset) {
757 /* Stop at the start of a running allocation */
758 nb_clusters = (old_offset - offset) >> s->cluster_bits;
759 } else {
760 nb_clusters = 0;
763 if (nb_clusters == 0) {
764 /* Set dependency and wait for a callback */
765 m->depends_on = old_alloc;
766 m->nb_clusters = 0;
767 *num = 0;
768 return 0;
773 if (!nb_clusters) {
774 abort();
777 QLIST_INSERT_HEAD(&s->cluster_allocs, m, next_in_flight);
779 /* allocate a new cluster */
781 cluster_offset = qcow2_alloc_clusters(bs, nb_clusters * s->cluster_size);
783 /* save info needed for meta data update */
784 m->offset = offset;
785 m->n_start = n_start;
786 m->nb_clusters = nb_clusters;
788 out:
789 m->nb_available = MIN(nb_clusters << (s->cluster_bits - 9), n_end);
791 *num = m->nb_available - n_start;
793 return cluster_offset;
796 static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
797 const uint8_t *buf, int buf_size)
799 z_stream strm1, *strm = &strm1;
800 int ret, out_len;
802 memset(strm, 0, sizeof(*strm));
804 strm->next_in = (uint8_t *)buf;
805 strm->avail_in = buf_size;
806 strm->next_out = out_buf;
807 strm->avail_out = out_buf_size;
809 ret = inflateInit2(strm, -12);
810 if (ret != Z_OK)
811 return -1;
812 ret = inflate(strm, Z_FINISH);
813 out_len = strm->next_out - out_buf;
814 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
815 out_len != out_buf_size) {
816 inflateEnd(strm);
817 return -1;
819 inflateEnd(strm);
820 return 0;
823 int qcow2_decompress_cluster(BDRVQcowState *s, uint64_t cluster_offset)
825 int ret, csize, nb_csectors, sector_offset;
826 uint64_t coffset;
828 coffset = cluster_offset & s->cluster_offset_mask;
829 if (s->cluster_cache_offset != coffset) {
830 nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
831 sector_offset = coffset & 511;
832 csize = nb_csectors * 512 - sector_offset;
833 ret = bdrv_read(s->hd, coffset >> 9, s->cluster_data, nb_csectors);
834 if (ret < 0) {
835 return -1;
837 if (decompress_buffer(s->cluster_cache, s->cluster_size,
838 s->cluster_data + sector_offset, csize) < 0) {
839 return -1;
841 s->cluster_cache_offset = coffset;
843 return 0;