Merge tag 'qemu-macppc-20230206' of https://github.com/mcayland/qemu into staging
[qemu.git] / block / dmg.c
blobe10b9a2ba5afa83bceba8d61df9c1043b61d3ad6
1 /*
2 * QEMU Block driver for DMG images
4 * Copyright (c) 2004 Johannes E. Schindelin
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
24 #include "qemu/osdep.h"
25 #include "qapi/error.h"
26 #include "block/block-io.h"
27 #include "block/block_int.h"
28 #include "qemu/bswap.h"
29 #include "qemu/error-report.h"
30 #include "qemu/module.h"
31 #include "qemu/memalign.h"
32 #include "dmg.h"
34 int (*dmg_uncompress_bz2)(char *next_in, unsigned int avail_in,
35 char *next_out, unsigned int avail_out);
37 int (*dmg_uncompress_lzfse)(char *next_in, unsigned int avail_in,
38 char *next_out, unsigned int avail_out);
40 enum {
41 /* Limit chunk sizes to prevent unreasonable amounts of memory being used
42 * or truncating when converting to 32-bit types
44 DMG_LENGTHS_MAX = 64 * 1024 * 1024, /* 64 MB */
45 DMG_SECTORCOUNTS_MAX = DMG_LENGTHS_MAX / 512,
48 enum {
49 /* DMG Block Type */
50 UDZE = 0, /* Zeroes */
51 UDRW, /* RAW type */
52 UDIG, /* Ignore */
53 UDCO = 0x80000004,
54 UDZO,
55 UDBZ,
56 ULFO,
57 UDCM = 0x7ffffffe, /* Comments */
58 UDLE = 0xffffffff /* Last Entry */
61 static int dmg_probe(const uint8_t *buf, int buf_size, const char *filename)
63 int len;
65 if (!filename) {
66 return 0;
69 len = strlen(filename);
70 if (len > 4 && !strcmp(filename + len - 4, ".dmg")) {
71 return 2;
73 return 0;
76 static int read_uint64(BlockDriverState *bs, int64_t offset, uint64_t *result)
78 uint64_t buffer;
79 int ret;
81 ret = bdrv_pread(bs->file, offset, 8, &buffer, 0);
82 if (ret < 0) {
83 return ret;
86 *result = be64_to_cpu(buffer);
87 return 0;
90 static int read_uint32(BlockDriverState *bs, int64_t offset, uint32_t *result)
92 uint32_t buffer;
93 int ret;
95 ret = bdrv_pread(bs->file, offset, 4, &buffer, 0);
96 if (ret < 0) {
97 return ret;
100 *result = be32_to_cpu(buffer);
101 return 0;
104 static inline uint64_t buff_read_uint64(const uint8_t *buffer, int64_t offset)
106 return be64_to_cpu(*(uint64_t *)&buffer[offset]);
109 static inline uint32_t buff_read_uint32(const uint8_t *buffer, int64_t offset)
111 return be32_to_cpu(*(uint32_t *)&buffer[offset]);
114 /* Increase max chunk sizes, if necessary. This function is used to calculate
115 * the buffer sizes needed for compressed/uncompressed chunk I/O.
117 static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk,
118 uint32_t *max_compressed_size,
119 uint32_t *max_sectors_per_chunk)
121 uint32_t compressed_size = 0;
122 uint32_t uncompressed_sectors = 0;
124 switch (s->types[chunk]) {
125 case UDZO: /* zlib compressed */
126 case UDBZ: /* bzip2 compressed */
127 case ULFO: /* lzfse compressed */
128 compressed_size = s->lengths[chunk];
129 uncompressed_sectors = s->sectorcounts[chunk];
130 break;
131 case UDRW: /* copy */
132 uncompressed_sectors = DIV_ROUND_UP(s->lengths[chunk], 512);
133 break;
134 case UDZE: /* zero */
135 case UDIG: /* ignore */
136 /* as the all-zeroes block may be large, it is treated specially: the
137 * sector is not copied from a large buffer, a simple memset is used
138 * instead. Therefore uncompressed_sectors does not need to be set. */
139 break;
142 if (compressed_size > *max_compressed_size) {
143 *max_compressed_size = compressed_size;
145 if (uncompressed_sectors > *max_sectors_per_chunk) {
146 *max_sectors_per_chunk = uncompressed_sectors;
150 static int64_t dmg_find_koly_offset(BdrvChild *file, Error **errp)
152 BlockDriverState *file_bs = file->bs;
153 int64_t length;
154 int64_t offset = 0;
155 uint8_t buffer[515];
156 int i, ret;
158 /* bdrv_getlength returns a multiple of block size (512), rounded up. Since
159 * dmg images can have odd sizes, try to look for the "koly" magic which
160 * marks the begin of the UDIF trailer (512 bytes). This magic can be found
161 * in the last 511 bytes of the second-last sector or the first 4 bytes of
162 * the last sector (search space: 515 bytes) */
163 length = bdrv_getlength(file_bs);
164 if (length < 0) {
165 error_setg_errno(errp, -length,
166 "Failed to get file size while reading UDIF trailer");
167 return length;
168 } else if (length < 512) {
169 error_setg(errp, "dmg file must be at least 512 bytes long");
170 return -EINVAL;
172 if (length > 511 + 512) {
173 offset = length - 511 - 512;
175 length = length < 515 ? length : 515;
176 ret = bdrv_pread(file, offset, length, buffer, 0);
177 if (ret < 0) {
178 error_setg_errno(errp, -ret, "Failed while reading UDIF trailer");
179 return ret;
181 for (i = 0; i < length - 3; i++) {
182 if (buffer[i] == 'k' && buffer[i+1] == 'o' &&
183 buffer[i+2] == 'l' && buffer[i+3] == 'y') {
184 return offset + i;
187 error_setg(errp, "Could not locate UDIF trailer in dmg file");
188 return -EINVAL;
191 /* used when building the sector table */
192 typedef struct DmgHeaderState {
193 /* used internally by dmg_read_mish_block to remember offsets of blocks
194 * across calls */
195 uint64_t data_fork_offset;
196 /* exported for dmg_open */
197 uint32_t max_compressed_size;
198 uint32_t max_sectors_per_chunk;
199 } DmgHeaderState;
201 static bool dmg_is_known_block_type(uint32_t entry_type)
203 switch (entry_type) {
204 case UDZE: /* zeros */
205 case UDRW: /* uncompressed */
206 case UDIG: /* ignore */
207 case UDZO: /* zlib */
208 return true;
209 case UDBZ: /* bzip2 */
210 return !!dmg_uncompress_bz2;
211 case ULFO: /* lzfse */
212 return !!dmg_uncompress_lzfse;
213 default:
214 return false;
218 static int dmg_read_mish_block(BDRVDMGState *s, DmgHeaderState *ds,
219 uint8_t *buffer, uint32_t count)
221 uint32_t type, i;
222 int ret;
223 size_t new_size;
224 uint32_t chunk_count;
225 int64_t offset = 0;
226 uint64_t data_offset;
227 uint64_t in_offset = ds->data_fork_offset;
228 uint64_t out_offset;
230 type = buff_read_uint32(buffer, offset);
231 /* skip data that is not a valid MISH block (invalid magic or too small) */
232 if (type != 0x6d697368 || count < 244) {
233 /* assume success for now */
234 return 0;
237 /* chunk offsets are relative to this sector number */
238 out_offset = buff_read_uint64(buffer, offset + 8);
240 /* location in data fork for (compressed) blob (in bytes) */
241 data_offset = buff_read_uint64(buffer, offset + 0x18);
242 in_offset += data_offset;
244 /* move to begin of chunk entries */
245 offset += 204;
247 chunk_count = (count - 204) / 40;
248 new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count);
249 s->types = g_realloc(s->types, new_size / 2);
250 s->offsets = g_realloc(s->offsets, new_size);
251 s->lengths = g_realloc(s->lengths, new_size);
252 s->sectors = g_realloc(s->sectors, new_size);
253 s->sectorcounts = g_realloc(s->sectorcounts, new_size);
255 for (i = s->n_chunks; i < s->n_chunks + chunk_count; i++) {
256 s->types[i] = buff_read_uint32(buffer, offset);
257 if (!dmg_is_known_block_type(s->types[i])) {
258 switch (s->types[i]) {
259 case UDBZ:
260 warn_report_once("dmg-bzip2 module is missing, accessing bzip2 "
261 "compressed blocks will result in I/O errors");
262 break;
263 case ULFO:
264 warn_report_once("dmg-lzfse module is missing, accessing lzfse "
265 "compressed blocks will result in I/O errors");
266 break;
267 case UDCM:
268 case UDLE:
269 /* Comments and last entry can be ignored without problems */
270 break;
271 default:
272 warn_report_once("Image contains chunks of unknown type %x, "
273 "accessing them will result in I/O errors",
274 s->types[i]);
275 break;
277 chunk_count--;
278 i--;
279 offset += 40;
280 continue;
283 /* sector number */
284 s->sectors[i] = buff_read_uint64(buffer, offset + 8);
285 s->sectors[i] += out_offset;
287 /* sector count */
288 s->sectorcounts[i] = buff_read_uint64(buffer, offset + 0x10);
290 /* all-zeroes sector (type UDZE and UDIG) does not need to be
291 * "uncompressed" and can therefore be unbounded. */
292 if (s->types[i] != UDZE && s->types[i] != UDIG
293 && s->sectorcounts[i] > DMG_SECTORCOUNTS_MAX) {
294 error_report("sector count %" PRIu64 " for chunk %" PRIu32
295 " is larger than max (%u)",
296 s->sectorcounts[i], i, DMG_SECTORCOUNTS_MAX);
297 ret = -EINVAL;
298 goto fail;
301 /* offset in (compressed) data fork */
302 s->offsets[i] = buff_read_uint64(buffer, offset + 0x18);
303 s->offsets[i] += in_offset;
305 /* length in (compressed) data fork */
306 s->lengths[i] = buff_read_uint64(buffer, offset + 0x20);
308 if (s->lengths[i] > DMG_LENGTHS_MAX) {
309 error_report("length %" PRIu64 " for chunk %" PRIu32
310 " is larger than max (%u)",
311 s->lengths[i], i, DMG_LENGTHS_MAX);
312 ret = -EINVAL;
313 goto fail;
316 update_max_chunk_size(s, i, &ds->max_compressed_size,
317 &ds->max_sectors_per_chunk);
318 offset += 40;
320 s->n_chunks += chunk_count;
321 return 0;
323 fail:
324 return ret;
327 static int dmg_read_resource_fork(BlockDriverState *bs, DmgHeaderState *ds,
328 uint64_t info_begin, uint64_t info_length)
330 BDRVDMGState *s = bs->opaque;
331 int ret;
332 uint32_t count, rsrc_data_offset;
333 uint8_t *buffer = NULL;
334 uint64_t info_end;
335 uint64_t offset;
337 /* read offset from begin of resource fork (info_begin) to resource data */
338 ret = read_uint32(bs, info_begin, &rsrc_data_offset);
339 if (ret < 0) {
340 goto fail;
341 } else if (rsrc_data_offset > info_length) {
342 ret = -EINVAL;
343 goto fail;
346 /* read length of resource data */
347 ret = read_uint32(bs, info_begin + 8, &count);
348 if (ret < 0) {
349 goto fail;
350 } else if (count == 0 || rsrc_data_offset + count > info_length) {
351 ret = -EINVAL;
352 goto fail;
355 /* begin of resource data (consisting of one or more resources) */
356 offset = info_begin + rsrc_data_offset;
358 /* end of resource data (there is possibly a following resource map
359 * which will be ignored). */
360 info_end = offset + count;
362 /* read offsets (mish blocks) from one or more resources in resource data */
363 while (offset < info_end) {
364 /* size of following resource */
365 ret = read_uint32(bs, offset, &count);
366 if (ret < 0) {
367 goto fail;
368 } else if (count == 0 || count > info_end - offset) {
369 ret = -EINVAL;
370 goto fail;
372 offset += 4;
374 buffer = g_realloc(buffer, count);
375 ret = bdrv_pread(bs->file, offset, count, buffer, 0);
376 if (ret < 0) {
377 goto fail;
380 ret = dmg_read_mish_block(s, ds, buffer, count);
381 if (ret < 0) {
382 goto fail;
384 /* advance offset by size of resource */
385 offset += count;
387 ret = 0;
389 fail:
390 g_free(buffer);
391 return ret;
394 static int dmg_read_plist_xml(BlockDriverState *bs, DmgHeaderState *ds,
395 uint64_t info_begin, uint64_t info_length)
397 BDRVDMGState *s = bs->opaque;
398 int ret;
399 uint8_t *buffer = NULL;
400 char *data_begin, *data_end;
402 /* Have at least some length to avoid NULL for g_malloc. Attempt to set a
403 * safe upper cap on the data length. A test sample had a XML length of
404 * about 1 MiB. */
405 if (info_length == 0 || info_length > 16 * 1024 * 1024) {
406 ret = -EINVAL;
407 goto fail;
410 buffer = g_malloc(info_length + 1);
411 buffer[info_length] = '\0';
412 ret = bdrv_pread(bs->file, info_begin, info_length, buffer, 0);
413 if (ret < 0) {
414 ret = -EINVAL;
415 goto fail;
418 /* look for <data>...</data>. The data is 284 (0x11c) bytes after base64
419 * decode. The actual data element has 431 (0x1af) bytes which includes tabs
420 * and line feeds. */
421 data_end = (char *)buffer;
422 while ((data_begin = strstr(data_end, "<data>")) != NULL) {
423 guchar *mish;
424 gsize out_len = 0;
426 data_begin += 6;
427 data_end = strstr(data_begin, "</data>");
428 /* malformed XML? */
429 if (data_end == NULL) {
430 ret = -EINVAL;
431 goto fail;
433 *data_end++ = '\0';
434 mish = g_base64_decode(data_begin, &out_len);
435 ret = dmg_read_mish_block(s, ds, mish, (uint32_t)out_len);
436 g_free(mish);
437 if (ret < 0) {
438 goto fail;
441 ret = 0;
443 fail:
444 g_free(buffer);
445 return ret;
448 static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
449 Error **errp)
451 BDRVDMGState *s = bs->opaque;
452 DmgHeaderState ds;
453 uint64_t rsrc_fork_offset, rsrc_fork_length;
454 uint64_t plist_xml_offset, plist_xml_length;
455 int64_t offset;
456 int ret;
458 ret = bdrv_apply_auto_read_only(bs, NULL, errp);
459 if (ret < 0) {
460 return ret;
463 ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
464 if (ret < 0) {
465 return ret;
468 * NB: if uncompress submodules are absent,
469 * ie block_module_load return value == 0, the function pointers
470 * dmg_uncompress_bz2 and dmg_uncompress_lzfse will be NULL.
472 if (block_module_load("dmg-bz2", errp) < 0) {
473 return -EINVAL;
475 if (block_module_load("dmg-lzfse", errp) < 0) {
476 return -EINVAL;
479 s->n_chunks = 0;
480 s->offsets = s->lengths = s->sectors = s->sectorcounts = NULL;
481 /* used by dmg_read_mish_block to keep track of the current I/O position */
482 ds.data_fork_offset = 0;
483 ds.max_compressed_size = 1;
484 ds.max_sectors_per_chunk = 1;
486 /* locate the UDIF trailer */
487 offset = dmg_find_koly_offset(bs->file, errp);
488 if (offset < 0) {
489 ret = offset;
490 goto fail;
493 /* offset of data fork (DataForkOffset) */
494 ret = read_uint64(bs, offset + 0x18, &ds.data_fork_offset);
495 if (ret < 0) {
496 goto fail;
497 } else if (ds.data_fork_offset > offset) {
498 ret = -EINVAL;
499 goto fail;
502 /* offset of resource fork (RsrcForkOffset) */
503 ret = read_uint64(bs, offset + 0x28, &rsrc_fork_offset);
504 if (ret < 0) {
505 goto fail;
507 ret = read_uint64(bs, offset + 0x30, &rsrc_fork_length);
508 if (ret < 0) {
509 goto fail;
511 if (rsrc_fork_offset >= offset ||
512 rsrc_fork_length > offset - rsrc_fork_offset) {
513 ret = -EINVAL;
514 goto fail;
516 /* offset of property list (XMLOffset) */
517 ret = read_uint64(bs, offset + 0xd8, &plist_xml_offset);
518 if (ret < 0) {
519 goto fail;
521 ret = read_uint64(bs, offset + 0xe0, &plist_xml_length);
522 if (ret < 0) {
523 goto fail;
525 if (plist_xml_offset >= offset ||
526 plist_xml_length > offset - plist_xml_offset) {
527 ret = -EINVAL;
528 goto fail;
530 ret = read_uint64(bs, offset + 0x1ec, (uint64_t *)&bs->total_sectors);
531 if (ret < 0) {
532 goto fail;
534 if (bs->total_sectors < 0) {
535 ret = -EINVAL;
536 goto fail;
538 if (rsrc_fork_length != 0) {
539 ret = dmg_read_resource_fork(bs, &ds,
540 rsrc_fork_offset, rsrc_fork_length);
541 if (ret < 0) {
542 goto fail;
544 } else if (plist_xml_length != 0) {
545 ret = dmg_read_plist_xml(bs, &ds, plist_xml_offset, plist_xml_length);
546 if (ret < 0) {
547 goto fail;
549 } else {
550 ret = -EINVAL;
551 goto fail;
554 /* initialize zlib engine */
555 s->compressed_chunk = qemu_try_blockalign(bs->file->bs,
556 ds.max_compressed_size + 1);
557 s->uncompressed_chunk = qemu_try_blockalign(bs->file->bs,
558 512 * ds.max_sectors_per_chunk);
559 if (s->compressed_chunk == NULL || s->uncompressed_chunk == NULL) {
560 ret = -ENOMEM;
561 goto fail;
564 if (inflateInit(&s->zstream) != Z_OK) {
565 ret = -EINVAL;
566 goto fail;
569 s->current_chunk = s->n_chunks;
571 qemu_co_mutex_init(&s->lock);
572 return 0;
574 fail:
575 g_free(s->types);
576 g_free(s->offsets);
577 g_free(s->lengths);
578 g_free(s->sectors);
579 g_free(s->sectorcounts);
580 qemu_vfree(s->compressed_chunk);
581 qemu_vfree(s->uncompressed_chunk);
582 return ret;
585 static void dmg_refresh_limits(BlockDriverState *bs, Error **errp)
587 bs->bl.request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O */
590 static inline int is_sector_in_chunk(BDRVDMGState *s,
591 uint32_t chunk_num, uint64_t sector_num)
593 if (chunk_num >= s->n_chunks || s->sectors[chunk_num] > sector_num ||
594 s->sectors[chunk_num] + s->sectorcounts[chunk_num] <= sector_num) {
595 return 0;
596 } else {
597 return -1;
601 static inline uint32_t search_chunk(BDRVDMGState *s, uint64_t sector_num)
603 /* binary search */
604 uint32_t chunk1 = 0, chunk2 = s->n_chunks, chunk3;
605 while (chunk1 <= chunk2) {
606 chunk3 = (chunk1 + chunk2) / 2;
607 if (s->sectors[chunk3] > sector_num) {
608 if (chunk3 == 0) {
609 goto err;
611 chunk2 = chunk3 - 1;
612 } else if (s->sectors[chunk3] + s->sectorcounts[chunk3] > sector_num) {
613 return chunk3;
614 } else {
615 chunk1 = chunk3 + 1;
618 err:
619 return s->n_chunks; /* error */
622 static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
624 BDRVDMGState *s = bs->opaque;
626 if (!is_sector_in_chunk(s, s->current_chunk, sector_num)) {
627 int ret;
628 uint32_t chunk = search_chunk(s, sector_num);
630 if (chunk >= s->n_chunks) {
631 return -1;
634 s->current_chunk = s->n_chunks;
635 switch (s->types[chunk]) { /* block entry type */
636 case UDZO: { /* zlib compressed */
637 /* we need to buffer, because only the chunk as whole can be
638 * inflated. */
639 ret = bdrv_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
640 s->compressed_chunk, 0);
641 if (ret < 0) {
642 return -1;
645 s->zstream.next_in = s->compressed_chunk;
646 s->zstream.avail_in = s->lengths[chunk];
647 s->zstream.next_out = s->uncompressed_chunk;
648 s->zstream.avail_out = 512 * s->sectorcounts[chunk];
649 ret = inflateReset(&s->zstream);
650 if (ret != Z_OK) {
651 return -1;
653 ret = inflate(&s->zstream, Z_FINISH);
654 if (ret != Z_STREAM_END ||
655 s->zstream.total_out != 512 * s->sectorcounts[chunk]) {
656 return -1;
658 break; }
659 case UDBZ: /* bzip2 compressed */
660 if (!dmg_uncompress_bz2) {
661 break;
663 /* we need to buffer, because only the chunk as whole can be
664 * inflated. */
665 ret = bdrv_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
666 s->compressed_chunk, 0);
667 if (ret < 0) {
668 return -1;
671 ret = dmg_uncompress_bz2((char *)s->compressed_chunk,
672 (unsigned int) s->lengths[chunk],
673 (char *)s->uncompressed_chunk,
674 (unsigned int)
675 (512 * s->sectorcounts[chunk]));
676 if (ret < 0) {
677 return ret;
679 break;
680 case ULFO:
681 if (!dmg_uncompress_lzfse) {
682 break;
684 /* we need to buffer, because only the chunk as whole can be
685 * inflated. */
686 ret = bdrv_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
687 s->compressed_chunk, 0);
688 if (ret < 0) {
689 return -1;
692 ret = dmg_uncompress_lzfse((char *)s->compressed_chunk,
693 (unsigned int) s->lengths[chunk],
694 (char *)s->uncompressed_chunk,
695 (unsigned int)
696 (512 * s->sectorcounts[chunk]));
697 if (ret < 0) {
698 return ret;
700 break;
701 case UDRW: /* copy */
702 ret = bdrv_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
703 s->uncompressed_chunk, 0);
704 if (ret < 0) {
705 return -1;
707 break;
708 case UDZE: /* zeros */
709 case UDIG: /* ignore */
710 /* see dmg_read, it is treated specially. No buffer needs to be
711 * pre-filled, the zeroes can be set directly. */
712 break;
714 s->current_chunk = chunk;
716 return 0;
719 static int coroutine_fn
720 dmg_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
721 QEMUIOVector *qiov, BdrvRequestFlags flags)
723 BDRVDMGState *s = bs->opaque;
724 uint64_t sector_num = offset >> BDRV_SECTOR_BITS;
725 int nb_sectors = bytes >> BDRV_SECTOR_BITS;
726 int ret, i;
728 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
729 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
731 qemu_co_mutex_lock(&s->lock);
733 for (i = 0; i < nb_sectors; i++) {
734 uint32_t sector_offset_in_chunk;
735 void *data;
737 if (dmg_read_chunk(bs, sector_num + i) != 0) {
738 ret = -EIO;
739 goto fail;
741 /* Special case: current chunk is all zeroes. Do not perform a memcpy as
742 * s->uncompressed_chunk may be too small to cover the large all-zeroes
743 * section. dmg_read_chunk is called to find s->current_chunk */
744 if (s->types[s->current_chunk] == UDZE
745 || s->types[s->current_chunk] == UDIG) { /* all zeroes block entry */
746 qemu_iovec_memset(qiov, i * 512, 0, 512);
747 continue;
749 sector_offset_in_chunk = sector_num + i - s->sectors[s->current_chunk];
750 data = s->uncompressed_chunk + sector_offset_in_chunk * 512;
751 qemu_iovec_from_buf(qiov, i * 512, data, 512);
754 ret = 0;
755 fail:
756 qemu_co_mutex_unlock(&s->lock);
757 return ret;
760 static void dmg_close(BlockDriverState *bs)
762 BDRVDMGState *s = bs->opaque;
764 g_free(s->types);
765 g_free(s->offsets);
766 g_free(s->lengths);
767 g_free(s->sectors);
768 g_free(s->sectorcounts);
769 qemu_vfree(s->compressed_chunk);
770 qemu_vfree(s->uncompressed_chunk);
772 inflateEnd(&s->zstream);
775 static BlockDriver bdrv_dmg = {
776 .format_name = "dmg",
777 .instance_size = sizeof(BDRVDMGState),
778 .bdrv_probe = dmg_probe,
779 .bdrv_open = dmg_open,
780 .bdrv_refresh_limits = dmg_refresh_limits,
781 .bdrv_child_perm = bdrv_default_perms,
782 .bdrv_co_preadv = dmg_co_preadv,
783 .bdrv_close = dmg_close,
784 .is_format = true,
787 static void bdrv_dmg_init(void)
789 bdrv_register(&bdrv_dmg);
792 block_init(bdrv_dmg_init);