spapr_pci: perform unplug via the hotplug handler
[qemu/ar7.git] / block / dmg.c
blob50e91aef6d938cc3ace1b44da6fc2c24ba6bcc0e
1 /*
2 * QEMU Block driver for DMG images
4 * Copyright (c) 2004 Johannes E. Schindelin
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
24 #include "qemu/osdep.h"
25 #include "qapi/error.h"
26 #include "qemu-common.h"
27 #include "block/block_int.h"
28 #include "qemu/bswap.h"
29 #include "qemu/error-report.h"
30 #include "qemu/module.h"
31 #include "dmg.h"
33 int (*dmg_uncompress_bz2)(char *next_in, unsigned int avail_in,
34 char *next_out, unsigned int avail_out);
36 int (*dmg_uncompress_lzfse)(char *next_in, unsigned int avail_in,
37 char *next_out, unsigned int avail_out);
39 enum {
40 /* Limit chunk sizes to prevent unreasonable amounts of memory being used
41 * or truncating when converting to 32-bit types
43 DMG_LENGTHS_MAX = 64 * 1024 * 1024, /* 64 MB */
44 DMG_SECTORCOUNTS_MAX = DMG_LENGTHS_MAX / 512,
47 enum {
48 /* DMG Block Type */
49 UDZE = 0, /* Zeroes */
50 UDRW, /* RAW type */
51 UDIG, /* Ignore */
52 UDCO = 0x80000004,
53 UDZO,
54 UDBZ,
55 ULFO,
56 UDCM = 0x7ffffffe, /* Comments */
57 UDLE /* Last Entry */
60 static int dmg_probe(const uint8_t *buf, int buf_size, const char *filename)
62 int len;
64 if (!filename) {
65 return 0;
68 len = strlen(filename);
69 if (len > 4 && !strcmp(filename + len - 4, ".dmg")) {
70 return 2;
72 return 0;
75 static int read_uint64(BlockDriverState *bs, int64_t offset, uint64_t *result)
77 uint64_t buffer;
78 int ret;
80 ret = bdrv_pread(bs->file, offset, &buffer, 8);
81 if (ret < 0) {
82 return ret;
85 *result = be64_to_cpu(buffer);
86 return 0;
89 static int read_uint32(BlockDriverState *bs, int64_t offset, uint32_t *result)
91 uint32_t buffer;
92 int ret;
94 ret = bdrv_pread(bs->file, offset, &buffer, 4);
95 if (ret < 0) {
96 return ret;
99 *result = be32_to_cpu(buffer);
100 return 0;
103 static inline uint64_t buff_read_uint64(const uint8_t *buffer, int64_t offset)
105 return be64_to_cpu(*(uint64_t *)&buffer[offset]);
108 static inline uint32_t buff_read_uint32(const uint8_t *buffer, int64_t offset)
110 return be32_to_cpu(*(uint32_t *)&buffer[offset]);
113 /* Increase max chunk sizes, if necessary. This function is used to calculate
114 * the buffer sizes needed for compressed/uncompressed chunk I/O.
116 static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk,
117 uint32_t *max_compressed_size,
118 uint32_t *max_sectors_per_chunk)
120 uint32_t compressed_size = 0;
121 uint32_t uncompressed_sectors = 0;
123 switch (s->types[chunk]) {
124 case UDZO: /* zlib compressed */
125 case UDBZ: /* bzip2 compressed */
126 case ULFO: /* lzfse compressed */
127 compressed_size = s->lengths[chunk];
128 uncompressed_sectors = s->sectorcounts[chunk];
129 break;
130 case UDRW: /* copy */
131 uncompressed_sectors = DIV_ROUND_UP(s->lengths[chunk], 512);
132 break;
133 case UDIG: /* zero */
134 /* as the all-zeroes block may be large, it is treated specially: the
135 * sector is not copied from a large buffer, a simple memset is used
136 * instead. Therefore uncompressed_sectors does not need to be set. */
137 break;
140 if (compressed_size > *max_compressed_size) {
141 *max_compressed_size = compressed_size;
143 if (uncompressed_sectors > *max_sectors_per_chunk) {
144 *max_sectors_per_chunk = uncompressed_sectors;
148 static int64_t dmg_find_koly_offset(BdrvChild *file, Error **errp)
150 BlockDriverState *file_bs = file->bs;
151 int64_t length;
152 int64_t offset = 0;
153 uint8_t buffer[515];
154 int i, ret;
156 /* bdrv_getlength returns a multiple of block size (512), rounded up. Since
157 * dmg images can have odd sizes, try to look for the "koly" magic which
158 * marks the begin of the UDIF trailer (512 bytes). This magic can be found
159 * in the last 511 bytes of the second-last sector or the first 4 bytes of
160 * the last sector (search space: 515 bytes) */
161 length = bdrv_getlength(file_bs);
162 if (length < 0) {
163 error_setg_errno(errp, -length,
164 "Failed to get file size while reading UDIF trailer");
165 return length;
166 } else if (length < 512) {
167 error_setg(errp, "dmg file must be at least 512 bytes long");
168 return -EINVAL;
170 if (length > 511 + 512) {
171 offset = length - 511 - 512;
173 length = length < 515 ? length : 515;
174 ret = bdrv_pread(file, offset, buffer, length);
175 if (ret < 0) {
176 error_setg_errno(errp, -ret, "Failed while reading UDIF trailer");
177 return ret;
179 for (i = 0; i < length - 3; i++) {
180 if (buffer[i] == 'k' && buffer[i+1] == 'o' &&
181 buffer[i+2] == 'l' && buffer[i+3] == 'y') {
182 return offset + i;
185 error_setg(errp, "Could not locate UDIF trailer in dmg file");
186 return -EINVAL;
189 /* used when building the sector table */
190 typedef struct DmgHeaderState {
191 /* used internally by dmg_read_mish_block to remember offsets of blocks
192 * across calls */
193 uint64_t data_fork_offset;
194 /* exported for dmg_open */
195 uint32_t max_compressed_size;
196 uint32_t max_sectors_per_chunk;
197 } DmgHeaderState;
199 static bool dmg_is_known_block_type(uint32_t entry_type)
201 switch (entry_type) {
202 case UDRW: /* uncompressed */
203 case UDIG: /* zeroes */
204 case UDZO: /* zlib */
205 return true;
206 case UDBZ: /* bzip2 */
207 return !!dmg_uncompress_bz2;
208 case ULFO: /* lzfse */
209 return !!dmg_uncompress_lzfse;
210 default:
211 return false;
215 static int dmg_read_mish_block(BDRVDMGState *s, DmgHeaderState *ds,
216 uint8_t *buffer, uint32_t count)
218 uint32_t type, i;
219 int ret;
220 size_t new_size;
221 uint32_t chunk_count;
222 int64_t offset = 0;
223 uint64_t data_offset;
224 uint64_t in_offset = ds->data_fork_offset;
225 uint64_t out_offset;
227 type = buff_read_uint32(buffer, offset);
228 /* skip data that is not a valid MISH block (invalid magic or too small) */
229 if (type != 0x6d697368 || count < 244) {
230 /* assume success for now */
231 return 0;
234 /* chunk offsets are relative to this sector number */
235 out_offset = buff_read_uint64(buffer, offset + 8);
237 /* location in data fork for (compressed) blob (in bytes) */
238 data_offset = buff_read_uint64(buffer, offset + 0x18);
239 in_offset += data_offset;
241 /* move to begin of chunk entries */
242 offset += 204;
244 chunk_count = (count - 204) / 40;
245 new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count);
246 s->types = g_realloc(s->types, new_size / 2);
247 s->offsets = g_realloc(s->offsets, new_size);
248 s->lengths = g_realloc(s->lengths, new_size);
249 s->sectors = g_realloc(s->sectors, new_size);
250 s->sectorcounts = g_realloc(s->sectorcounts, new_size);
252 for (i = s->n_chunks; i < s->n_chunks + chunk_count; i++) {
253 s->types[i] = buff_read_uint32(buffer, offset);
254 if (!dmg_is_known_block_type(s->types[i])) {
255 chunk_count--;
256 i--;
257 offset += 40;
258 continue;
261 /* sector number */
262 s->sectors[i] = buff_read_uint64(buffer, offset + 8);
263 s->sectors[i] += out_offset;
265 /* sector count */
266 s->sectorcounts[i] = buff_read_uint64(buffer, offset + 0x10);
268 /* all-zeroes sector (type 2) does not need to be "uncompressed" and can
269 * therefore be unbounded. */
270 if (s->types[i] != 2 && s->sectorcounts[i] > DMG_SECTORCOUNTS_MAX) {
271 error_report("sector count %" PRIu64 " for chunk %" PRIu32
272 " is larger than max (%u)",
273 s->sectorcounts[i], i, DMG_SECTORCOUNTS_MAX);
274 ret = -EINVAL;
275 goto fail;
278 /* offset in (compressed) data fork */
279 s->offsets[i] = buff_read_uint64(buffer, offset + 0x18);
280 s->offsets[i] += in_offset;
282 /* length in (compressed) data fork */
283 s->lengths[i] = buff_read_uint64(buffer, offset + 0x20);
285 if (s->lengths[i] > DMG_LENGTHS_MAX) {
286 error_report("length %" PRIu64 " for chunk %" PRIu32
287 " is larger than max (%u)",
288 s->lengths[i], i, DMG_LENGTHS_MAX);
289 ret = -EINVAL;
290 goto fail;
293 update_max_chunk_size(s, i, &ds->max_compressed_size,
294 &ds->max_sectors_per_chunk);
295 offset += 40;
297 s->n_chunks += chunk_count;
298 return 0;
300 fail:
301 return ret;
304 static int dmg_read_resource_fork(BlockDriverState *bs, DmgHeaderState *ds,
305 uint64_t info_begin, uint64_t info_length)
307 BDRVDMGState *s = bs->opaque;
308 int ret;
309 uint32_t count, rsrc_data_offset;
310 uint8_t *buffer = NULL;
311 uint64_t info_end;
312 uint64_t offset;
314 /* read offset from begin of resource fork (info_begin) to resource data */
315 ret = read_uint32(bs, info_begin, &rsrc_data_offset);
316 if (ret < 0) {
317 goto fail;
318 } else if (rsrc_data_offset > info_length) {
319 ret = -EINVAL;
320 goto fail;
323 /* read length of resource data */
324 ret = read_uint32(bs, info_begin + 8, &count);
325 if (ret < 0) {
326 goto fail;
327 } else if (count == 0 || rsrc_data_offset + count > info_length) {
328 ret = -EINVAL;
329 goto fail;
332 /* begin of resource data (consisting of one or more resources) */
333 offset = info_begin + rsrc_data_offset;
335 /* end of resource data (there is possibly a following resource map
336 * which will be ignored). */
337 info_end = offset + count;
339 /* read offsets (mish blocks) from one or more resources in resource data */
340 while (offset < info_end) {
341 /* size of following resource */
342 ret = read_uint32(bs, offset, &count);
343 if (ret < 0) {
344 goto fail;
345 } else if (count == 0 || count > info_end - offset) {
346 ret = -EINVAL;
347 goto fail;
349 offset += 4;
351 buffer = g_realloc(buffer, count);
352 ret = bdrv_pread(bs->file, offset, buffer, count);
353 if (ret < 0) {
354 goto fail;
357 ret = dmg_read_mish_block(s, ds, buffer, count);
358 if (ret < 0) {
359 goto fail;
361 /* advance offset by size of resource */
362 offset += count;
364 ret = 0;
366 fail:
367 g_free(buffer);
368 return ret;
371 static int dmg_read_plist_xml(BlockDriverState *bs, DmgHeaderState *ds,
372 uint64_t info_begin, uint64_t info_length)
374 BDRVDMGState *s = bs->opaque;
375 int ret;
376 uint8_t *buffer = NULL;
377 char *data_begin, *data_end;
379 /* Have at least some length to avoid NULL for g_malloc. Attempt to set a
380 * safe upper cap on the data length. A test sample had a XML length of
381 * about 1 MiB. */
382 if (info_length == 0 || info_length > 16 * 1024 * 1024) {
383 ret = -EINVAL;
384 goto fail;
387 buffer = g_malloc(info_length + 1);
388 buffer[info_length] = '\0';
389 ret = bdrv_pread(bs->file, info_begin, buffer, info_length);
390 if (ret != info_length) {
391 ret = -EINVAL;
392 goto fail;
395 /* look for <data>...</data>. The data is 284 (0x11c) bytes after base64
396 * decode. The actual data element has 431 (0x1af) bytes which includes tabs
397 * and line feeds. */
398 data_end = (char *)buffer;
399 while ((data_begin = strstr(data_end, "<data>")) != NULL) {
400 guchar *mish;
401 gsize out_len = 0;
403 data_begin += 6;
404 data_end = strstr(data_begin, "</data>");
405 /* malformed XML? */
406 if (data_end == NULL) {
407 ret = -EINVAL;
408 goto fail;
410 *data_end++ = '\0';
411 mish = g_base64_decode(data_begin, &out_len);
412 ret = dmg_read_mish_block(s, ds, mish, (uint32_t)out_len);
413 g_free(mish);
414 if (ret < 0) {
415 goto fail;
418 ret = 0;
420 fail:
421 g_free(buffer);
422 return ret;
425 static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
426 Error **errp)
428 BDRVDMGState *s = bs->opaque;
429 DmgHeaderState ds;
430 uint64_t rsrc_fork_offset, rsrc_fork_length;
431 uint64_t plist_xml_offset, plist_xml_length;
432 int64_t offset;
433 int ret;
435 ret = bdrv_apply_auto_read_only(bs, NULL, errp);
436 if (ret < 0) {
437 return ret;
440 bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file,
441 false, errp);
442 if (!bs->file) {
443 return -EINVAL;
446 block_module_load_one("dmg-bz2");
447 block_module_load_one("dmg-lzfse");
449 s->n_chunks = 0;
450 s->offsets = s->lengths = s->sectors = s->sectorcounts = NULL;
451 /* used by dmg_read_mish_block to keep track of the current I/O position */
452 ds.data_fork_offset = 0;
453 ds.max_compressed_size = 1;
454 ds.max_sectors_per_chunk = 1;
456 /* locate the UDIF trailer */
457 offset = dmg_find_koly_offset(bs->file, errp);
458 if (offset < 0) {
459 ret = offset;
460 goto fail;
463 /* offset of data fork (DataForkOffset) */
464 ret = read_uint64(bs, offset + 0x18, &ds.data_fork_offset);
465 if (ret < 0) {
466 goto fail;
467 } else if (ds.data_fork_offset > offset) {
468 ret = -EINVAL;
469 goto fail;
472 /* offset of resource fork (RsrcForkOffset) */
473 ret = read_uint64(bs, offset + 0x28, &rsrc_fork_offset);
474 if (ret < 0) {
475 goto fail;
477 ret = read_uint64(bs, offset + 0x30, &rsrc_fork_length);
478 if (ret < 0) {
479 goto fail;
481 if (rsrc_fork_offset >= offset ||
482 rsrc_fork_length > offset - rsrc_fork_offset) {
483 ret = -EINVAL;
484 goto fail;
486 /* offset of property list (XMLOffset) */
487 ret = read_uint64(bs, offset + 0xd8, &plist_xml_offset);
488 if (ret < 0) {
489 goto fail;
491 ret = read_uint64(bs, offset + 0xe0, &plist_xml_length);
492 if (ret < 0) {
493 goto fail;
495 if (plist_xml_offset >= offset ||
496 plist_xml_length > offset - plist_xml_offset) {
497 ret = -EINVAL;
498 goto fail;
500 ret = read_uint64(bs, offset + 0x1ec, (uint64_t *)&bs->total_sectors);
501 if (ret < 0) {
502 goto fail;
504 if (bs->total_sectors < 0) {
505 ret = -EINVAL;
506 goto fail;
508 if (rsrc_fork_length != 0) {
509 ret = dmg_read_resource_fork(bs, &ds,
510 rsrc_fork_offset, rsrc_fork_length);
511 if (ret < 0) {
512 goto fail;
514 } else if (plist_xml_length != 0) {
515 ret = dmg_read_plist_xml(bs, &ds, plist_xml_offset, plist_xml_length);
516 if (ret < 0) {
517 goto fail;
519 } else {
520 ret = -EINVAL;
521 goto fail;
524 /* initialize zlib engine */
525 s->compressed_chunk = qemu_try_blockalign(bs->file->bs,
526 ds.max_compressed_size + 1);
527 s->uncompressed_chunk = qemu_try_blockalign(bs->file->bs,
528 512 * ds.max_sectors_per_chunk);
529 if (s->compressed_chunk == NULL || s->uncompressed_chunk == NULL) {
530 ret = -ENOMEM;
531 goto fail;
534 if (inflateInit(&s->zstream) != Z_OK) {
535 ret = -EINVAL;
536 goto fail;
539 s->current_chunk = s->n_chunks;
541 qemu_co_mutex_init(&s->lock);
542 return 0;
544 fail:
545 g_free(s->types);
546 g_free(s->offsets);
547 g_free(s->lengths);
548 g_free(s->sectors);
549 g_free(s->sectorcounts);
550 qemu_vfree(s->compressed_chunk);
551 qemu_vfree(s->uncompressed_chunk);
552 return ret;
555 static void dmg_refresh_limits(BlockDriverState *bs, Error **errp)
557 bs->bl.request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O */
560 static inline int is_sector_in_chunk(BDRVDMGState* s,
561 uint32_t chunk_num, uint64_t sector_num)
563 if (chunk_num >= s->n_chunks || s->sectors[chunk_num] > sector_num ||
564 s->sectors[chunk_num] + s->sectorcounts[chunk_num] <= sector_num) {
565 return 0;
566 } else {
567 return -1;
571 static inline uint32_t search_chunk(BDRVDMGState *s, uint64_t sector_num)
573 /* binary search */
574 uint32_t chunk1 = 0, chunk2 = s->n_chunks, chunk3;
575 while (chunk1 != chunk2) {
576 chunk3 = (chunk1 + chunk2) / 2;
577 if (s->sectors[chunk3] > sector_num) {
578 chunk2 = chunk3;
579 } else if (s->sectors[chunk3] + s->sectorcounts[chunk3] > sector_num) {
580 return chunk3;
581 } else {
582 chunk1 = chunk3;
585 return s->n_chunks; /* error */
588 static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
590 BDRVDMGState *s = bs->opaque;
592 if (!is_sector_in_chunk(s, s->current_chunk, sector_num)) {
593 int ret;
594 uint32_t chunk = search_chunk(s, sector_num);
596 if (chunk >= s->n_chunks) {
597 return -1;
600 s->current_chunk = s->n_chunks;
601 switch (s->types[chunk]) { /* block entry type */
602 case UDZO: { /* zlib compressed */
603 /* we need to buffer, because only the chunk as whole can be
604 * inflated. */
605 ret = bdrv_pread(bs->file, s->offsets[chunk],
606 s->compressed_chunk, s->lengths[chunk]);
607 if (ret != s->lengths[chunk]) {
608 return -1;
611 s->zstream.next_in = s->compressed_chunk;
612 s->zstream.avail_in = s->lengths[chunk];
613 s->zstream.next_out = s->uncompressed_chunk;
614 s->zstream.avail_out = 512 * s->sectorcounts[chunk];
615 ret = inflateReset(&s->zstream);
616 if (ret != Z_OK) {
617 return -1;
619 ret = inflate(&s->zstream, Z_FINISH);
620 if (ret != Z_STREAM_END ||
621 s->zstream.total_out != 512 * s->sectorcounts[chunk]) {
622 return -1;
624 break; }
625 case UDBZ: /* bzip2 compressed */
626 if (!dmg_uncompress_bz2) {
627 break;
629 /* we need to buffer, because only the chunk as whole can be
630 * inflated. */
631 ret = bdrv_pread(bs->file, s->offsets[chunk],
632 s->compressed_chunk, s->lengths[chunk]);
633 if (ret != s->lengths[chunk]) {
634 return -1;
637 ret = dmg_uncompress_bz2((char *)s->compressed_chunk,
638 (unsigned int) s->lengths[chunk],
639 (char *)s->uncompressed_chunk,
640 (unsigned int)
641 (512 * s->sectorcounts[chunk]));
642 if (ret < 0) {
643 return ret;
645 break;
646 case ULFO:
647 if (!dmg_uncompress_lzfse) {
648 break;
650 /* we need to buffer, because only the chunk as whole can be
651 * inflated. */
652 ret = bdrv_pread(bs->file, s->offsets[chunk],
653 s->compressed_chunk, s->lengths[chunk]);
654 if (ret != s->lengths[chunk]) {
655 return -1;
658 ret = dmg_uncompress_lzfse((char *)s->compressed_chunk,
659 (unsigned int) s->lengths[chunk],
660 (char *)s->uncompressed_chunk,
661 (unsigned int)
662 (512 * s->sectorcounts[chunk]));
663 if (ret < 0) {
664 return ret;
666 break;
667 case UDRW: /* copy */
668 ret = bdrv_pread(bs->file, s->offsets[chunk],
669 s->uncompressed_chunk, s->lengths[chunk]);
670 if (ret != s->lengths[chunk]) {
671 return -1;
673 break;
674 case UDIG: /* zero */
675 /* see dmg_read, it is treated specially. No buffer needs to be
676 * pre-filled, the zeroes can be set directly. */
677 break;
679 s->current_chunk = chunk;
681 return 0;
684 static int coroutine_fn
685 dmg_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
686 QEMUIOVector *qiov, int flags)
688 BDRVDMGState *s = bs->opaque;
689 uint64_t sector_num = offset >> BDRV_SECTOR_BITS;
690 int nb_sectors = bytes >> BDRV_SECTOR_BITS;
691 int ret, i;
693 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
694 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
696 qemu_co_mutex_lock(&s->lock);
698 for (i = 0; i < nb_sectors; i++) {
699 uint32_t sector_offset_in_chunk;
700 void *data;
702 if (dmg_read_chunk(bs, sector_num + i) != 0) {
703 ret = -EIO;
704 goto fail;
706 /* Special case: current chunk is all zeroes. Do not perform a memcpy as
707 * s->uncompressed_chunk may be too small to cover the large all-zeroes
708 * section. dmg_read_chunk is called to find s->current_chunk */
709 if (s->types[s->current_chunk] == 2) { /* all zeroes block entry */
710 qemu_iovec_memset(qiov, i * 512, 0, 512);
711 continue;
713 sector_offset_in_chunk = sector_num + i - s->sectors[s->current_chunk];
714 data = s->uncompressed_chunk + sector_offset_in_chunk * 512;
715 qemu_iovec_from_buf(qiov, i * 512, data, 512);
718 ret = 0;
719 fail:
720 qemu_co_mutex_unlock(&s->lock);
721 return ret;
724 static void dmg_close(BlockDriverState *bs)
726 BDRVDMGState *s = bs->opaque;
728 g_free(s->types);
729 g_free(s->offsets);
730 g_free(s->lengths);
731 g_free(s->sectors);
732 g_free(s->sectorcounts);
733 qemu_vfree(s->compressed_chunk);
734 qemu_vfree(s->uncompressed_chunk);
736 inflateEnd(&s->zstream);
739 static BlockDriver bdrv_dmg = {
740 .format_name = "dmg",
741 .instance_size = sizeof(BDRVDMGState),
742 .bdrv_probe = dmg_probe,
743 .bdrv_open = dmg_open,
744 .bdrv_refresh_limits = dmg_refresh_limits,
745 .bdrv_child_perm = bdrv_format_default_perms,
746 .bdrv_co_preadv = dmg_co_preadv,
747 .bdrv_close = dmg_close,
750 static void bdrv_dmg_init(void)
752 bdrv_register(&bdrv_dmg);
755 block_init(bdrv_dmg_init);