spapr: Clean up local variable shadowing in spapr_dt_cpus()
[qemu/kevin.git] / block / dmg.c
blob06a0244a9c16e6d65da4dd781f2f2bf3a1a84d44
1 /*
2 * QEMU Block driver for DMG images
4 * Copyright (c) 2004 Johannes E. Schindelin
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
24 #include "qemu/osdep.h"
25 #include "qapi/error.h"
26 #include "block/block-io.h"
27 #include "block/block_int.h"
28 #include "qemu/bswap.h"
29 #include "qemu/error-report.h"
30 #include "qemu/module.h"
31 #include "qemu/memalign.h"
32 #include "dmg.h"
34 BdrvDmgUncompressFunc *dmg_uncompress_bz2;
35 BdrvDmgUncompressFunc *dmg_uncompress_lzfse;
37 enum {
38 /* Limit chunk sizes to prevent unreasonable amounts of memory being used
39 * or truncating when converting to 32-bit types
41 DMG_LENGTHS_MAX = 64 * 1024 * 1024, /* 64 MB */
42 DMG_SECTORCOUNTS_MAX = DMG_LENGTHS_MAX / 512,
45 enum {
46 /* DMG Block Type */
47 UDZE = 0, /* Zeroes */
48 UDRW, /* RAW type */
49 UDIG, /* Ignore */
50 UDCO = 0x80000004,
51 UDZO,
52 UDBZ,
53 ULFO,
54 UDCM = 0x7ffffffe, /* Comments */
55 UDLE = 0xffffffff /* Last Entry */
58 static int dmg_probe(const uint8_t *buf, int buf_size, const char *filename)
60 int len;
62 if (!filename) {
63 return 0;
66 len = strlen(filename);
67 if (len > 4 && !strcmp(filename + len - 4, ".dmg")) {
68 return 2;
70 return 0;
73 static int read_uint64(BlockDriverState *bs, int64_t offset, uint64_t *result)
75 uint64_t buffer;
76 int ret;
78 ret = bdrv_pread(bs->file, offset, 8, &buffer, 0);
79 if (ret < 0) {
80 return ret;
83 *result = be64_to_cpu(buffer);
84 return 0;
87 static int read_uint32(BlockDriverState *bs, int64_t offset, uint32_t *result)
89 uint32_t buffer;
90 int ret;
92 ret = bdrv_pread(bs->file, offset, 4, &buffer, 0);
93 if (ret < 0) {
94 return ret;
97 *result = be32_to_cpu(buffer);
98 return 0;
101 static inline uint64_t buff_read_uint64(const uint8_t *buffer, int64_t offset)
103 return be64_to_cpu(*(uint64_t *)&buffer[offset]);
106 static inline uint32_t buff_read_uint32(const uint8_t *buffer, int64_t offset)
108 return be32_to_cpu(*(uint32_t *)&buffer[offset]);
111 /* Increase max chunk sizes, if necessary. This function is used to calculate
112 * the buffer sizes needed for compressed/uncompressed chunk I/O.
114 static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk,
115 uint32_t *max_compressed_size,
116 uint32_t *max_sectors_per_chunk)
118 uint32_t compressed_size = 0;
119 uint32_t uncompressed_sectors = 0;
121 switch (s->types[chunk]) {
122 case UDZO: /* zlib compressed */
123 case UDBZ: /* bzip2 compressed */
124 case ULFO: /* lzfse compressed */
125 compressed_size = s->lengths[chunk];
126 uncompressed_sectors = s->sectorcounts[chunk];
127 break;
128 case UDRW: /* copy */
129 uncompressed_sectors = DIV_ROUND_UP(s->lengths[chunk], 512);
130 break;
131 case UDZE: /* zero */
132 case UDIG: /* ignore */
133 /* as the all-zeroes block may be large, it is treated specially: the
134 * sector is not copied from a large buffer, a simple memset is used
135 * instead. Therefore uncompressed_sectors does not need to be set. */
136 break;
139 if (compressed_size > *max_compressed_size) {
140 *max_compressed_size = compressed_size;
142 if (uncompressed_sectors > *max_sectors_per_chunk) {
143 *max_sectors_per_chunk = uncompressed_sectors;
147 static int64_t dmg_find_koly_offset(BdrvChild *file, Error **errp)
149 BlockDriverState *file_bs = file->bs;
150 int64_t length;
151 int64_t offset = 0;
152 uint8_t buffer[515];
153 int i, ret;
155 /* bdrv_getlength returns a multiple of block size (512), rounded up. Since
156 * dmg images can have odd sizes, try to look for the "koly" magic which
157 * marks the begin of the UDIF trailer (512 bytes). This magic can be found
158 * in the last 511 bytes of the second-last sector or the first 4 bytes of
159 * the last sector (search space: 515 bytes) */
160 length = bdrv_getlength(file_bs);
161 if (length < 0) {
162 error_setg_errno(errp, -length,
163 "Failed to get file size while reading UDIF trailer");
164 return length;
165 } else if (length < 512) {
166 error_setg(errp, "dmg file must be at least 512 bytes long");
167 return -EINVAL;
169 if (length > 511 + 512) {
170 offset = length - 511 - 512;
172 length = length < 515 ? length : 515;
173 ret = bdrv_pread(file, offset, length, buffer, 0);
174 if (ret < 0) {
175 error_setg_errno(errp, -ret, "Failed while reading UDIF trailer");
176 return ret;
178 for (i = 0; i < length - 3; i++) {
179 if (buffer[i] == 'k' && buffer[i+1] == 'o' &&
180 buffer[i+2] == 'l' && buffer[i+3] == 'y') {
181 return offset + i;
184 error_setg(errp, "Could not locate UDIF trailer in dmg file");
185 return -EINVAL;
188 /* used when building the sector table */
189 typedef struct DmgHeaderState {
190 /* used internally by dmg_read_mish_block to remember offsets of blocks
191 * across calls */
192 uint64_t data_fork_offset;
193 /* exported for dmg_open */
194 uint32_t max_compressed_size;
195 uint32_t max_sectors_per_chunk;
196 } DmgHeaderState;
198 static bool dmg_is_known_block_type(uint32_t entry_type)
200 switch (entry_type) {
201 case UDZE: /* zeros */
202 case UDRW: /* uncompressed */
203 case UDIG: /* ignore */
204 case UDZO: /* zlib */
205 return true;
206 case UDBZ: /* bzip2 */
207 return !!dmg_uncompress_bz2;
208 case ULFO: /* lzfse */
209 return !!dmg_uncompress_lzfse;
210 default:
211 return false;
215 static int dmg_read_mish_block(BDRVDMGState *s, DmgHeaderState *ds,
216 uint8_t *buffer, uint32_t count)
218 uint32_t type, i;
219 int ret;
220 size_t new_size;
221 uint32_t chunk_count;
222 int64_t offset = 0;
223 uint64_t data_offset;
224 uint64_t in_offset = ds->data_fork_offset;
225 uint64_t out_offset;
227 type = buff_read_uint32(buffer, offset);
228 /* skip data that is not a valid MISH block (invalid magic or too small) */
229 if (type != 0x6d697368 || count < 244) {
230 /* assume success for now */
231 return 0;
234 /* chunk offsets are relative to this sector number */
235 out_offset = buff_read_uint64(buffer, offset + 8);
237 /* location in data fork for (compressed) blob (in bytes) */
238 data_offset = buff_read_uint64(buffer, offset + 0x18);
239 in_offset += data_offset;
241 /* move to begin of chunk entries */
242 offset += 204;
244 chunk_count = (count - 204) / 40;
245 new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count);
246 s->types = g_realloc(s->types, new_size / 2);
247 s->offsets = g_realloc(s->offsets, new_size);
248 s->lengths = g_realloc(s->lengths, new_size);
249 s->sectors = g_realloc(s->sectors, new_size);
250 s->sectorcounts = g_realloc(s->sectorcounts, new_size);
252 for (i = s->n_chunks; i < s->n_chunks + chunk_count; i++) {
253 s->types[i] = buff_read_uint32(buffer, offset);
254 if (!dmg_is_known_block_type(s->types[i])) {
255 switch (s->types[i]) {
256 case UDBZ:
257 warn_report_once("dmg-bzip2 module is missing, accessing bzip2 "
258 "compressed blocks will result in I/O errors");
259 break;
260 case ULFO:
261 warn_report_once("dmg-lzfse module is missing, accessing lzfse "
262 "compressed blocks will result in I/O errors");
263 break;
264 case UDCM:
265 case UDLE:
266 /* Comments and last entry can be ignored without problems */
267 break;
268 default:
269 warn_report_once("Image contains chunks of unknown type %x, "
270 "accessing them will result in I/O errors",
271 s->types[i]);
272 break;
274 chunk_count--;
275 i--;
276 offset += 40;
277 continue;
280 /* sector number */
281 s->sectors[i] = buff_read_uint64(buffer, offset + 8);
282 s->sectors[i] += out_offset;
284 /* sector count */
285 s->sectorcounts[i] = buff_read_uint64(buffer, offset + 0x10);
287 /* all-zeroes sector (type UDZE and UDIG) does not need to be
288 * "uncompressed" and can therefore be unbounded. */
289 if (s->types[i] != UDZE && s->types[i] != UDIG
290 && s->sectorcounts[i] > DMG_SECTORCOUNTS_MAX) {
291 error_report("sector count %" PRIu64 " for chunk %" PRIu32
292 " is larger than max (%u)",
293 s->sectorcounts[i], i, DMG_SECTORCOUNTS_MAX);
294 ret = -EINVAL;
295 goto fail;
298 /* offset in (compressed) data fork */
299 s->offsets[i] = buff_read_uint64(buffer, offset + 0x18);
300 s->offsets[i] += in_offset;
302 /* length in (compressed) data fork */
303 s->lengths[i] = buff_read_uint64(buffer, offset + 0x20);
305 if (s->lengths[i] > DMG_LENGTHS_MAX) {
306 error_report("length %" PRIu64 " for chunk %" PRIu32
307 " is larger than max (%u)",
308 s->lengths[i], i, DMG_LENGTHS_MAX);
309 ret = -EINVAL;
310 goto fail;
313 update_max_chunk_size(s, i, &ds->max_compressed_size,
314 &ds->max_sectors_per_chunk);
315 offset += 40;
317 s->n_chunks += chunk_count;
318 return 0;
320 fail:
321 return ret;
324 static int dmg_read_resource_fork(BlockDriverState *bs, DmgHeaderState *ds,
325 uint64_t info_begin, uint64_t info_length)
327 BDRVDMGState *s = bs->opaque;
328 int ret;
329 uint32_t count, rsrc_data_offset;
330 uint8_t *buffer = NULL;
331 uint64_t info_end;
332 uint64_t offset;
334 /* read offset from begin of resource fork (info_begin) to resource data */
335 ret = read_uint32(bs, info_begin, &rsrc_data_offset);
336 if (ret < 0) {
337 goto fail;
338 } else if (rsrc_data_offset > info_length) {
339 ret = -EINVAL;
340 goto fail;
343 /* read length of resource data */
344 ret = read_uint32(bs, info_begin + 8, &count);
345 if (ret < 0) {
346 goto fail;
347 } else if (count == 0 || rsrc_data_offset + count > info_length) {
348 ret = -EINVAL;
349 goto fail;
352 /* begin of resource data (consisting of one or more resources) */
353 offset = info_begin + rsrc_data_offset;
355 /* end of resource data (there is possibly a following resource map
356 * which will be ignored). */
357 info_end = offset + count;
359 /* read offsets (mish blocks) from one or more resources in resource data */
360 while (offset < info_end) {
361 /* size of following resource */
362 ret = read_uint32(bs, offset, &count);
363 if (ret < 0) {
364 goto fail;
365 } else if (count == 0 || count > info_end - offset) {
366 ret = -EINVAL;
367 goto fail;
369 offset += 4;
371 buffer = g_realloc(buffer, count);
372 ret = bdrv_pread(bs->file, offset, count, buffer, 0);
373 if (ret < 0) {
374 goto fail;
377 ret = dmg_read_mish_block(s, ds, buffer, count);
378 if (ret < 0) {
379 goto fail;
381 /* advance offset by size of resource */
382 offset += count;
384 ret = 0;
386 fail:
387 g_free(buffer);
388 return ret;
391 static int dmg_read_plist_xml(BlockDriverState *bs, DmgHeaderState *ds,
392 uint64_t info_begin, uint64_t info_length)
394 BDRVDMGState *s = bs->opaque;
395 int ret;
396 uint8_t *buffer = NULL;
397 char *data_begin, *data_end;
399 /* Have at least some length to avoid NULL for g_malloc. Attempt to set a
400 * safe upper cap on the data length. A test sample had a XML length of
401 * about 1 MiB. */
402 if (info_length == 0 || info_length > 16 * 1024 * 1024) {
403 ret = -EINVAL;
404 goto fail;
407 buffer = g_malloc(info_length + 1);
408 buffer[info_length] = '\0';
409 ret = bdrv_pread(bs->file, info_begin, info_length, buffer, 0);
410 if (ret < 0) {
411 ret = -EINVAL;
412 goto fail;
415 /* look for <data>...</data>. The data is 284 (0x11c) bytes after base64
416 * decode. The actual data element has 431 (0x1af) bytes which includes tabs
417 * and line feeds. */
418 data_end = (char *)buffer;
419 while ((data_begin = strstr(data_end, "<data>")) != NULL) {
420 guchar *mish;
421 gsize out_len = 0;
423 data_begin += 6;
424 data_end = strstr(data_begin, "</data>");
425 /* malformed XML? */
426 if (data_end == NULL) {
427 ret = -EINVAL;
428 goto fail;
430 *data_end++ = '\0';
431 mish = g_base64_decode(data_begin, &out_len);
432 ret = dmg_read_mish_block(s, ds, mish, (uint32_t)out_len);
433 g_free(mish);
434 if (ret < 0) {
435 goto fail;
438 ret = 0;
440 fail:
441 g_free(buffer);
442 return ret;
445 static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
446 Error **errp)
448 BDRVDMGState *s = bs->opaque;
449 DmgHeaderState ds;
450 uint64_t rsrc_fork_offset, rsrc_fork_length;
451 uint64_t plist_xml_offset, plist_xml_length;
452 int64_t offset;
453 int ret;
455 ret = bdrv_apply_auto_read_only(bs, NULL, errp);
456 if (ret < 0) {
457 return ret;
460 ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
461 if (ret < 0) {
462 return ret;
465 * NB: if uncompress submodules are absent,
466 * ie block_module_load return value == 0, the function pointers
467 * dmg_uncompress_bz2 and dmg_uncompress_lzfse will be NULL.
469 if (block_module_load("dmg-bz2", errp) < 0) {
470 return -EINVAL;
472 if (block_module_load("dmg-lzfse", errp) < 0) {
473 return -EINVAL;
476 s->n_chunks = 0;
477 s->offsets = s->lengths = s->sectors = s->sectorcounts = NULL;
478 /* used by dmg_read_mish_block to keep track of the current I/O position */
479 ds.data_fork_offset = 0;
480 ds.max_compressed_size = 1;
481 ds.max_sectors_per_chunk = 1;
483 /* locate the UDIF trailer */
484 offset = dmg_find_koly_offset(bs->file, errp);
485 if (offset < 0) {
486 ret = offset;
487 goto fail;
490 /* offset of data fork (DataForkOffset) */
491 ret = read_uint64(bs, offset + 0x18, &ds.data_fork_offset);
492 if (ret < 0) {
493 goto fail;
494 } else if (ds.data_fork_offset > offset) {
495 ret = -EINVAL;
496 goto fail;
499 /* offset of resource fork (RsrcForkOffset) */
500 ret = read_uint64(bs, offset + 0x28, &rsrc_fork_offset);
501 if (ret < 0) {
502 goto fail;
504 ret = read_uint64(bs, offset + 0x30, &rsrc_fork_length);
505 if (ret < 0) {
506 goto fail;
508 if (rsrc_fork_offset >= offset ||
509 rsrc_fork_length > offset - rsrc_fork_offset) {
510 ret = -EINVAL;
511 goto fail;
513 /* offset of property list (XMLOffset) */
514 ret = read_uint64(bs, offset + 0xd8, &plist_xml_offset);
515 if (ret < 0) {
516 goto fail;
518 ret = read_uint64(bs, offset + 0xe0, &plist_xml_length);
519 if (ret < 0) {
520 goto fail;
522 if (plist_xml_offset >= offset ||
523 plist_xml_length > offset - plist_xml_offset) {
524 ret = -EINVAL;
525 goto fail;
527 ret = read_uint64(bs, offset + 0x1ec, (uint64_t *)&bs->total_sectors);
528 if (ret < 0) {
529 goto fail;
531 if (bs->total_sectors < 0) {
532 ret = -EINVAL;
533 goto fail;
535 if (rsrc_fork_length != 0) {
536 ret = dmg_read_resource_fork(bs, &ds,
537 rsrc_fork_offset, rsrc_fork_length);
538 if (ret < 0) {
539 goto fail;
541 } else if (plist_xml_length != 0) {
542 ret = dmg_read_plist_xml(bs, &ds, plist_xml_offset, plist_xml_length);
543 if (ret < 0) {
544 goto fail;
546 } else {
547 ret = -EINVAL;
548 goto fail;
551 /* initialize zlib engine */
552 s->compressed_chunk = qemu_try_blockalign(bs->file->bs,
553 ds.max_compressed_size + 1);
554 s->uncompressed_chunk = qemu_try_blockalign(bs->file->bs,
555 512 * ds.max_sectors_per_chunk);
556 if (s->compressed_chunk == NULL || s->uncompressed_chunk == NULL) {
557 ret = -ENOMEM;
558 goto fail;
561 if (inflateInit(&s->zstream) != Z_OK) {
562 ret = -EINVAL;
563 goto fail;
566 s->current_chunk = s->n_chunks;
568 qemu_co_mutex_init(&s->lock);
569 return 0;
571 fail:
572 g_free(s->types);
573 g_free(s->offsets);
574 g_free(s->lengths);
575 g_free(s->sectors);
576 g_free(s->sectorcounts);
577 qemu_vfree(s->compressed_chunk);
578 qemu_vfree(s->uncompressed_chunk);
579 return ret;
582 static void dmg_refresh_limits(BlockDriverState *bs, Error **errp)
584 bs->bl.request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O */
587 static inline int is_sector_in_chunk(BDRVDMGState *s,
588 uint32_t chunk_num, uint64_t sector_num)
590 if (chunk_num >= s->n_chunks || s->sectors[chunk_num] > sector_num ||
591 s->sectors[chunk_num] + s->sectorcounts[chunk_num] <= sector_num) {
592 return 0;
593 } else {
594 return -1;
598 static inline uint32_t search_chunk(BDRVDMGState *s, uint64_t sector_num)
600 /* binary search */
601 uint32_t chunk1 = 0, chunk2 = s->n_chunks, chunk3;
602 while (chunk1 <= chunk2) {
603 chunk3 = (chunk1 + chunk2) / 2;
604 if (s->sectors[chunk3] > sector_num) {
605 if (chunk3 == 0) {
606 goto err;
608 chunk2 = chunk3 - 1;
609 } else if (s->sectors[chunk3] + s->sectorcounts[chunk3] > sector_num) {
610 return chunk3;
611 } else {
612 chunk1 = chunk3 + 1;
615 err:
616 return s->n_chunks; /* error */
619 static int coroutine_fn GRAPH_RDLOCK
620 dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
622 BDRVDMGState *s = bs->opaque;
624 if (!is_sector_in_chunk(s, s->current_chunk, sector_num)) {
625 int ret;
626 uint32_t chunk = search_chunk(s, sector_num);
628 if (chunk >= s->n_chunks) {
629 return -1;
632 s->current_chunk = s->n_chunks;
633 switch (s->types[chunk]) { /* block entry type */
634 case UDZO: { /* zlib compressed */
635 /* we need to buffer, because only the chunk as whole can be
636 * inflated. */
637 ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
638 s->compressed_chunk, 0);
639 if (ret < 0) {
640 return -1;
643 s->zstream.next_in = s->compressed_chunk;
644 s->zstream.avail_in = s->lengths[chunk];
645 s->zstream.next_out = s->uncompressed_chunk;
646 s->zstream.avail_out = 512 * s->sectorcounts[chunk];
647 ret = inflateReset(&s->zstream);
648 if (ret != Z_OK) {
649 return -1;
651 ret = inflate(&s->zstream, Z_FINISH);
652 if (ret != Z_STREAM_END ||
653 s->zstream.total_out != 512 * s->sectorcounts[chunk]) {
654 return -1;
656 break; }
657 case UDBZ: /* bzip2 compressed */
658 if (!dmg_uncompress_bz2) {
659 break;
661 /* we need to buffer, because only the chunk as whole can be
662 * inflated. */
663 ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
664 s->compressed_chunk, 0);
665 if (ret < 0) {
666 return -1;
669 ret = dmg_uncompress_bz2((char *)s->compressed_chunk,
670 (unsigned int) s->lengths[chunk],
671 (char *)s->uncompressed_chunk,
672 (unsigned int)
673 (512 * s->sectorcounts[chunk]));
674 if (ret < 0) {
675 return ret;
677 break;
678 case ULFO:
679 if (!dmg_uncompress_lzfse) {
680 break;
682 /* we need to buffer, because only the chunk as whole can be
683 * inflated. */
684 ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
685 s->compressed_chunk, 0);
686 if (ret < 0) {
687 return -1;
690 ret = dmg_uncompress_lzfse((char *)s->compressed_chunk,
691 (unsigned int) s->lengths[chunk],
692 (char *)s->uncompressed_chunk,
693 (unsigned int)
694 (512 * s->sectorcounts[chunk]));
695 if (ret < 0) {
696 return ret;
698 break;
699 case UDRW: /* copy */
700 ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
701 s->uncompressed_chunk, 0);
702 if (ret < 0) {
703 return -1;
705 break;
706 case UDZE: /* zeros */
707 case UDIG: /* ignore */
708 /* see dmg_read, it is treated specially. No buffer needs to be
709 * pre-filled, the zeroes can be set directly. */
710 break;
712 s->current_chunk = chunk;
714 return 0;
717 static int coroutine_fn GRAPH_RDLOCK
718 dmg_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
719 QEMUIOVector *qiov, BdrvRequestFlags flags)
721 BDRVDMGState *s = bs->opaque;
722 uint64_t sector_num = offset >> BDRV_SECTOR_BITS;
723 int nb_sectors = bytes >> BDRV_SECTOR_BITS;
724 int ret, i;
726 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
727 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
729 qemu_co_mutex_lock(&s->lock);
731 for (i = 0; i < nb_sectors; i++) {
732 uint32_t sector_offset_in_chunk;
733 void *data;
735 if (dmg_read_chunk(bs, sector_num + i) != 0) {
736 ret = -EIO;
737 goto fail;
739 /* Special case: current chunk is all zeroes. Do not perform a memcpy as
740 * s->uncompressed_chunk may be too small to cover the large all-zeroes
741 * section. dmg_read_chunk is called to find s->current_chunk */
742 if (s->types[s->current_chunk] == UDZE
743 || s->types[s->current_chunk] == UDIG) { /* all zeroes block entry */
744 qemu_iovec_memset(qiov, i * 512, 0, 512);
745 continue;
747 sector_offset_in_chunk = sector_num + i - s->sectors[s->current_chunk];
748 data = s->uncompressed_chunk + sector_offset_in_chunk * 512;
749 qemu_iovec_from_buf(qiov, i * 512, data, 512);
752 ret = 0;
753 fail:
754 qemu_co_mutex_unlock(&s->lock);
755 return ret;
758 static void dmg_close(BlockDriverState *bs)
760 BDRVDMGState *s = bs->opaque;
762 g_free(s->types);
763 g_free(s->offsets);
764 g_free(s->lengths);
765 g_free(s->sectors);
766 g_free(s->sectorcounts);
767 qemu_vfree(s->compressed_chunk);
768 qemu_vfree(s->uncompressed_chunk);
770 inflateEnd(&s->zstream);
773 static BlockDriver bdrv_dmg = {
774 .format_name = "dmg",
775 .instance_size = sizeof(BDRVDMGState),
776 .bdrv_probe = dmg_probe,
777 .bdrv_open = dmg_open,
778 .bdrv_refresh_limits = dmg_refresh_limits,
779 .bdrv_child_perm = bdrv_default_perms,
780 .bdrv_co_preadv = dmg_co_preadv,
781 .bdrv_close = dmg_close,
782 .is_format = true,
785 static void bdrv_dmg_init(void)
787 bdrv_register(&bdrv_dmg);
790 block_init(bdrv_dmg_init);