s390x/cpumodel: generate CPU feature group lists
[qemu.git] / block / dmg.c
blobb0ed89baa70966067e72a1100e31996e8381fb8b
1 /*
2 * QEMU Block driver for DMG images
4 * Copyright (c) 2004 Johannes E. Schindelin
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
24 #include "qemu/osdep.h"
25 #include "qapi/error.h"
26 #include "qemu-common.h"
27 #include "block/block_int.h"
28 #include "qemu/bswap.h"
29 #include "qemu/error-report.h"
30 #include "qemu/module.h"
31 #include <zlib.h>
32 #ifdef CONFIG_BZIP2
33 #include <bzlib.h>
34 #endif
36 enum {
37 /* Limit chunk sizes to prevent unreasonable amounts of memory being used
38 * or truncating when converting to 32-bit types
40 DMG_LENGTHS_MAX = 64 * 1024 * 1024, /* 64 MB */
41 DMG_SECTORCOUNTS_MAX = DMG_LENGTHS_MAX / 512,
44 typedef struct BDRVDMGState {
45 CoMutex lock;
46 /* each chunk contains a certain number of sectors,
47 * offsets[i] is the offset in the .dmg file,
48 * lengths[i] is the length of the compressed chunk,
49 * sectors[i] is the sector beginning at offsets[i],
50 * sectorcounts[i] is the number of sectors in that chunk,
51 * the sectors array is ordered
52 * 0<=i<n_chunks */
54 uint32_t n_chunks;
55 uint32_t* types;
56 uint64_t* offsets;
57 uint64_t* lengths;
58 uint64_t* sectors;
59 uint64_t* sectorcounts;
60 uint32_t current_chunk;
61 uint8_t *compressed_chunk;
62 uint8_t *uncompressed_chunk;
63 z_stream zstream;
64 #ifdef CONFIG_BZIP2
65 bz_stream bzstream;
66 #endif
67 } BDRVDMGState;
69 static int dmg_probe(const uint8_t *buf, int buf_size, const char *filename)
71 int len;
73 if (!filename) {
74 return 0;
77 len = strlen(filename);
78 if (len > 4 && !strcmp(filename + len - 4, ".dmg")) {
79 return 2;
81 return 0;
84 static int read_uint64(BlockDriverState *bs, int64_t offset, uint64_t *result)
86 uint64_t buffer;
87 int ret;
89 ret = bdrv_pread(bs->file, offset, &buffer, 8);
90 if (ret < 0) {
91 return ret;
94 *result = be64_to_cpu(buffer);
95 return 0;
98 static int read_uint32(BlockDriverState *bs, int64_t offset, uint32_t *result)
100 uint32_t buffer;
101 int ret;
103 ret = bdrv_pread(bs->file, offset, &buffer, 4);
104 if (ret < 0) {
105 return ret;
108 *result = be32_to_cpu(buffer);
109 return 0;
112 static inline uint64_t buff_read_uint64(const uint8_t *buffer, int64_t offset)
114 return be64_to_cpu(*(uint64_t *)&buffer[offset]);
117 static inline uint32_t buff_read_uint32(const uint8_t *buffer, int64_t offset)
119 return be32_to_cpu(*(uint32_t *)&buffer[offset]);
122 /* Increase max chunk sizes, if necessary. This function is used to calculate
123 * the buffer sizes needed for compressed/uncompressed chunk I/O.
125 static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk,
126 uint32_t *max_compressed_size,
127 uint32_t *max_sectors_per_chunk)
129 uint32_t compressed_size = 0;
130 uint32_t uncompressed_sectors = 0;
132 switch (s->types[chunk]) {
133 case 0x80000005: /* zlib compressed */
134 case 0x80000006: /* bzip2 compressed */
135 compressed_size = s->lengths[chunk];
136 uncompressed_sectors = s->sectorcounts[chunk];
137 break;
138 case 1: /* copy */
139 uncompressed_sectors = (s->lengths[chunk] + 511) / 512;
140 break;
141 case 2: /* zero */
142 /* as the all-zeroes block may be large, it is treated specially: the
143 * sector is not copied from a large buffer, a simple memset is used
144 * instead. Therefore uncompressed_sectors does not need to be set. */
145 break;
148 if (compressed_size > *max_compressed_size) {
149 *max_compressed_size = compressed_size;
151 if (uncompressed_sectors > *max_sectors_per_chunk) {
152 *max_sectors_per_chunk = uncompressed_sectors;
156 static int64_t dmg_find_koly_offset(BdrvChild *file, Error **errp)
158 BlockDriverState *file_bs = file->bs;
159 int64_t length;
160 int64_t offset = 0;
161 uint8_t buffer[515];
162 int i, ret;
164 /* bdrv_getlength returns a multiple of block size (512), rounded up. Since
165 * dmg images can have odd sizes, try to look for the "koly" magic which
166 * marks the begin of the UDIF trailer (512 bytes). This magic can be found
167 * in the last 511 bytes of the second-last sector or the first 4 bytes of
168 * the last sector (search space: 515 bytes) */
169 length = bdrv_getlength(file_bs);
170 if (length < 0) {
171 error_setg_errno(errp, -length,
172 "Failed to get file size while reading UDIF trailer");
173 return length;
174 } else if (length < 512) {
175 error_setg(errp, "dmg file must be at least 512 bytes long");
176 return -EINVAL;
178 if (length > 511 + 512) {
179 offset = length - 511 - 512;
181 length = length < 515 ? length : 515;
182 ret = bdrv_pread(file, offset, buffer, length);
183 if (ret < 0) {
184 error_setg_errno(errp, -ret, "Failed while reading UDIF trailer");
185 return ret;
187 for (i = 0; i < length - 3; i++) {
188 if (buffer[i] == 'k' && buffer[i+1] == 'o' &&
189 buffer[i+2] == 'l' && buffer[i+3] == 'y') {
190 return offset + i;
193 error_setg(errp, "Could not locate UDIF trailer in dmg file");
194 return -EINVAL;
197 /* used when building the sector table */
198 typedef struct DmgHeaderState {
199 /* used internally by dmg_read_mish_block to remember offsets of blocks
200 * across calls */
201 uint64_t data_fork_offset;
202 /* exported for dmg_open */
203 uint32_t max_compressed_size;
204 uint32_t max_sectors_per_chunk;
205 } DmgHeaderState;
207 static bool dmg_is_known_block_type(uint32_t entry_type)
209 switch (entry_type) {
210 case 0x00000001: /* uncompressed */
211 case 0x00000002: /* zeroes */
212 case 0x80000005: /* zlib */
213 #ifdef CONFIG_BZIP2
214 case 0x80000006: /* bzip2 */
215 #endif
216 return true;
217 default:
218 return false;
222 static int dmg_read_mish_block(BDRVDMGState *s, DmgHeaderState *ds,
223 uint8_t *buffer, uint32_t count)
225 uint32_t type, i;
226 int ret;
227 size_t new_size;
228 uint32_t chunk_count;
229 int64_t offset = 0;
230 uint64_t data_offset;
231 uint64_t in_offset = ds->data_fork_offset;
232 uint64_t out_offset;
234 type = buff_read_uint32(buffer, offset);
235 /* skip data that is not a valid MISH block (invalid magic or too small) */
236 if (type != 0x6d697368 || count < 244) {
237 /* assume success for now */
238 return 0;
241 /* chunk offsets are relative to this sector number */
242 out_offset = buff_read_uint64(buffer, offset + 8);
244 /* location in data fork for (compressed) blob (in bytes) */
245 data_offset = buff_read_uint64(buffer, offset + 0x18);
246 in_offset += data_offset;
248 /* move to begin of chunk entries */
249 offset += 204;
251 chunk_count = (count - 204) / 40;
252 new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count);
253 s->types = g_realloc(s->types, new_size / 2);
254 s->offsets = g_realloc(s->offsets, new_size);
255 s->lengths = g_realloc(s->lengths, new_size);
256 s->sectors = g_realloc(s->sectors, new_size);
257 s->sectorcounts = g_realloc(s->sectorcounts, new_size);
259 for (i = s->n_chunks; i < s->n_chunks + chunk_count; i++) {
260 s->types[i] = buff_read_uint32(buffer, offset);
261 if (!dmg_is_known_block_type(s->types[i])) {
262 chunk_count--;
263 i--;
264 offset += 40;
265 continue;
268 /* sector number */
269 s->sectors[i] = buff_read_uint64(buffer, offset + 8);
270 s->sectors[i] += out_offset;
272 /* sector count */
273 s->sectorcounts[i] = buff_read_uint64(buffer, offset + 0x10);
275 /* all-zeroes sector (type 2) does not need to be "uncompressed" and can
276 * therefore be unbounded. */
277 if (s->types[i] != 2 && s->sectorcounts[i] > DMG_SECTORCOUNTS_MAX) {
278 error_report("sector count %" PRIu64 " for chunk %" PRIu32
279 " is larger than max (%u)",
280 s->sectorcounts[i], i, DMG_SECTORCOUNTS_MAX);
281 ret = -EINVAL;
282 goto fail;
285 /* offset in (compressed) data fork */
286 s->offsets[i] = buff_read_uint64(buffer, offset + 0x18);
287 s->offsets[i] += in_offset;
289 /* length in (compressed) data fork */
290 s->lengths[i] = buff_read_uint64(buffer, offset + 0x20);
292 if (s->lengths[i] > DMG_LENGTHS_MAX) {
293 error_report("length %" PRIu64 " for chunk %" PRIu32
294 " is larger than max (%u)",
295 s->lengths[i], i, DMG_LENGTHS_MAX);
296 ret = -EINVAL;
297 goto fail;
300 update_max_chunk_size(s, i, &ds->max_compressed_size,
301 &ds->max_sectors_per_chunk);
302 offset += 40;
304 s->n_chunks += chunk_count;
305 return 0;
307 fail:
308 return ret;
311 static int dmg_read_resource_fork(BlockDriverState *bs, DmgHeaderState *ds,
312 uint64_t info_begin, uint64_t info_length)
314 BDRVDMGState *s = bs->opaque;
315 int ret;
316 uint32_t count, rsrc_data_offset;
317 uint8_t *buffer = NULL;
318 uint64_t info_end;
319 uint64_t offset;
321 /* read offset from begin of resource fork (info_begin) to resource data */
322 ret = read_uint32(bs, info_begin, &rsrc_data_offset);
323 if (ret < 0) {
324 goto fail;
325 } else if (rsrc_data_offset > info_length) {
326 ret = -EINVAL;
327 goto fail;
330 /* read length of resource data */
331 ret = read_uint32(bs, info_begin + 8, &count);
332 if (ret < 0) {
333 goto fail;
334 } else if (count == 0 || rsrc_data_offset + count > info_length) {
335 ret = -EINVAL;
336 goto fail;
339 /* begin of resource data (consisting of one or more resources) */
340 offset = info_begin + rsrc_data_offset;
342 /* end of resource data (there is possibly a following resource map
343 * which will be ignored). */
344 info_end = offset + count;
346 /* read offsets (mish blocks) from one or more resources in resource data */
347 while (offset < info_end) {
348 /* size of following resource */
349 ret = read_uint32(bs, offset, &count);
350 if (ret < 0) {
351 goto fail;
352 } else if (count == 0 || count > info_end - offset) {
353 ret = -EINVAL;
354 goto fail;
356 offset += 4;
358 buffer = g_realloc(buffer, count);
359 ret = bdrv_pread(bs->file, offset, buffer, count);
360 if (ret < 0) {
361 goto fail;
364 ret = dmg_read_mish_block(s, ds, buffer, count);
365 if (ret < 0) {
366 goto fail;
368 /* advance offset by size of resource */
369 offset += count;
371 ret = 0;
373 fail:
374 g_free(buffer);
375 return ret;
378 static int dmg_read_plist_xml(BlockDriverState *bs, DmgHeaderState *ds,
379 uint64_t info_begin, uint64_t info_length)
381 BDRVDMGState *s = bs->opaque;
382 int ret;
383 uint8_t *buffer = NULL;
384 char *data_begin, *data_end;
386 /* Have at least some length to avoid NULL for g_malloc. Attempt to set a
387 * safe upper cap on the data length. A test sample had a XML length of
388 * about 1 MiB. */
389 if (info_length == 0 || info_length > 16 * 1024 * 1024) {
390 ret = -EINVAL;
391 goto fail;
394 buffer = g_malloc(info_length + 1);
395 buffer[info_length] = '\0';
396 ret = bdrv_pread(bs->file, info_begin, buffer, info_length);
397 if (ret != info_length) {
398 ret = -EINVAL;
399 goto fail;
402 /* look for <data>...</data>. The data is 284 (0x11c) bytes after base64
403 * decode. The actual data element has 431 (0x1af) bytes which includes tabs
404 * and line feeds. */
405 data_end = (char *)buffer;
406 while ((data_begin = strstr(data_end, "<data>")) != NULL) {
407 guchar *mish;
408 gsize out_len = 0;
410 data_begin += 6;
411 data_end = strstr(data_begin, "</data>");
412 /* malformed XML? */
413 if (data_end == NULL) {
414 ret = -EINVAL;
415 goto fail;
417 *data_end++ = '\0';
418 mish = g_base64_decode(data_begin, &out_len);
419 ret = dmg_read_mish_block(s, ds, mish, (uint32_t)out_len);
420 g_free(mish);
421 if (ret < 0) {
422 goto fail;
425 ret = 0;
427 fail:
428 g_free(buffer);
429 return ret;
432 static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
433 Error **errp)
435 BDRVDMGState *s = bs->opaque;
436 DmgHeaderState ds;
437 uint64_t rsrc_fork_offset, rsrc_fork_length;
438 uint64_t plist_xml_offset, plist_xml_length;
439 int64_t offset;
440 int ret;
442 bs->read_only = true;
444 s->n_chunks = 0;
445 s->offsets = s->lengths = s->sectors = s->sectorcounts = NULL;
446 /* used by dmg_read_mish_block to keep track of the current I/O position */
447 ds.data_fork_offset = 0;
448 ds.max_compressed_size = 1;
449 ds.max_sectors_per_chunk = 1;
451 /* locate the UDIF trailer */
452 offset = dmg_find_koly_offset(bs->file, errp);
453 if (offset < 0) {
454 ret = offset;
455 goto fail;
458 /* offset of data fork (DataForkOffset) */
459 ret = read_uint64(bs, offset + 0x18, &ds.data_fork_offset);
460 if (ret < 0) {
461 goto fail;
462 } else if (ds.data_fork_offset > offset) {
463 ret = -EINVAL;
464 goto fail;
467 /* offset of resource fork (RsrcForkOffset) */
468 ret = read_uint64(bs, offset + 0x28, &rsrc_fork_offset);
469 if (ret < 0) {
470 goto fail;
472 ret = read_uint64(bs, offset + 0x30, &rsrc_fork_length);
473 if (ret < 0) {
474 goto fail;
476 if (rsrc_fork_offset >= offset ||
477 rsrc_fork_length > offset - rsrc_fork_offset) {
478 ret = -EINVAL;
479 goto fail;
481 /* offset of property list (XMLOffset) */
482 ret = read_uint64(bs, offset + 0xd8, &plist_xml_offset);
483 if (ret < 0) {
484 goto fail;
486 ret = read_uint64(bs, offset + 0xe0, &plist_xml_length);
487 if (ret < 0) {
488 goto fail;
490 if (plist_xml_offset >= offset ||
491 plist_xml_length > offset - plist_xml_offset) {
492 ret = -EINVAL;
493 goto fail;
495 ret = read_uint64(bs, offset + 0x1ec, (uint64_t *)&bs->total_sectors);
496 if (ret < 0) {
497 goto fail;
499 if (bs->total_sectors < 0) {
500 ret = -EINVAL;
501 goto fail;
503 if (rsrc_fork_length != 0) {
504 ret = dmg_read_resource_fork(bs, &ds,
505 rsrc_fork_offset, rsrc_fork_length);
506 if (ret < 0) {
507 goto fail;
509 } else if (plist_xml_length != 0) {
510 ret = dmg_read_plist_xml(bs, &ds, plist_xml_offset, plist_xml_length);
511 if (ret < 0) {
512 goto fail;
514 } else {
515 ret = -EINVAL;
516 goto fail;
519 /* initialize zlib engine */
520 s->compressed_chunk = qemu_try_blockalign(bs->file->bs,
521 ds.max_compressed_size + 1);
522 s->uncompressed_chunk = qemu_try_blockalign(bs->file->bs,
523 512 * ds.max_sectors_per_chunk);
524 if (s->compressed_chunk == NULL || s->uncompressed_chunk == NULL) {
525 ret = -ENOMEM;
526 goto fail;
529 if (inflateInit(&s->zstream) != Z_OK) {
530 ret = -EINVAL;
531 goto fail;
534 s->current_chunk = s->n_chunks;
536 qemu_co_mutex_init(&s->lock);
537 return 0;
539 fail:
540 g_free(s->types);
541 g_free(s->offsets);
542 g_free(s->lengths);
543 g_free(s->sectors);
544 g_free(s->sectorcounts);
545 qemu_vfree(s->compressed_chunk);
546 qemu_vfree(s->uncompressed_chunk);
547 return ret;
550 static void dmg_refresh_limits(BlockDriverState *bs, Error **errp)
552 bs->bl.request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O */
555 static inline int is_sector_in_chunk(BDRVDMGState* s,
556 uint32_t chunk_num, uint64_t sector_num)
558 if (chunk_num >= s->n_chunks || s->sectors[chunk_num] > sector_num ||
559 s->sectors[chunk_num] + s->sectorcounts[chunk_num] <= sector_num) {
560 return 0;
561 } else {
562 return -1;
566 static inline uint32_t search_chunk(BDRVDMGState *s, uint64_t sector_num)
568 /* binary search */
569 uint32_t chunk1 = 0, chunk2 = s->n_chunks, chunk3;
570 while (chunk1 != chunk2) {
571 chunk3 = (chunk1 + chunk2) / 2;
572 if (s->sectors[chunk3] > sector_num) {
573 chunk2 = chunk3;
574 } else if (s->sectors[chunk3] + s->sectorcounts[chunk3] > sector_num) {
575 return chunk3;
576 } else {
577 chunk1 = chunk3;
580 return s->n_chunks; /* error */
583 static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
585 BDRVDMGState *s = bs->opaque;
587 if (!is_sector_in_chunk(s, s->current_chunk, sector_num)) {
588 int ret;
589 uint32_t chunk = search_chunk(s, sector_num);
590 #ifdef CONFIG_BZIP2
591 uint64_t total_out;
592 #endif
594 if (chunk >= s->n_chunks) {
595 return -1;
598 s->current_chunk = s->n_chunks;
599 switch (s->types[chunk]) { /* block entry type */
600 case 0x80000005: { /* zlib compressed */
601 /* we need to buffer, because only the chunk as whole can be
602 * inflated. */
603 ret = bdrv_pread(bs->file, s->offsets[chunk],
604 s->compressed_chunk, s->lengths[chunk]);
605 if (ret != s->lengths[chunk]) {
606 return -1;
609 s->zstream.next_in = s->compressed_chunk;
610 s->zstream.avail_in = s->lengths[chunk];
611 s->zstream.next_out = s->uncompressed_chunk;
612 s->zstream.avail_out = 512 * s->sectorcounts[chunk];
613 ret = inflateReset(&s->zstream);
614 if (ret != Z_OK) {
615 return -1;
617 ret = inflate(&s->zstream, Z_FINISH);
618 if (ret != Z_STREAM_END ||
619 s->zstream.total_out != 512 * s->sectorcounts[chunk]) {
620 return -1;
622 break; }
623 #ifdef CONFIG_BZIP2
624 case 0x80000006: /* bzip2 compressed */
625 /* we need to buffer, because only the chunk as whole can be
626 * inflated. */
627 ret = bdrv_pread(bs->file, s->offsets[chunk],
628 s->compressed_chunk, s->lengths[chunk]);
629 if (ret != s->lengths[chunk]) {
630 return -1;
633 ret = BZ2_bzDecompressInit(&s->bzstream, 0, 0);
634 if (ret != BZ_OK) {
635 return -1;
637 s->bzstream.next_in = (char *)s->compressed_chunk;
638 s->bzstream.avail_in = (unsigned int) s->lengths[chunk];
639 s->bzstream.next_out = (char *)s->uncompressed_chunk;
640 s->bzstream.avail_out = (unsigned int) 512 * s->sectorcounts[chunk];
641 ret = BZ2_bzDecompress(&s->bzstream);
642 total_out = ((uint64_t)s->bzstream.total_out_hi32 << 32) +
643 s->bzstream.total_out_lo32;
644 BZ2_bzDecompressEnd(&s->bzstream);
645 if (ret != BZ_STREAM_END ||
646 total_out != 512 * s->sectorcounts[chunk]) {
647 return -1;
649 break;
650 #endif /* CONFIG_BZIP2 */
651 case 1: /* copy */
652 ret = bdrv_pread(bs->file, s->offsets[chunk],
653 s->uncompressed_chunk, s->lengths[chunk]);
654 if (ret != s->lengths[chunk]) {
655 return -1;
657 break;
658 case 2: /* zero */
659 /* see dmg_read, it is treated specially. No buffer needs to be
660 * pre-filled, the zeroes can be set directly. */
661 break;
663 s->current_chunk = chunk;
665 return 0;
668 static int coroutine_fn
669 dmg_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
670 QEMUIOVector *qiov, int flags)
672 BDRVDMGState *s = bs->opaque;
673 uint64_t sector_num = offset >> BDRV_SECTOR_BITS;
674 int nb_sectors = bytes >> BDRV_SECTOR_BITS;
675 int ret, i;
677 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
678 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
680 qemu_co_mutex_lock(&s->lock);
682 for (i = 0; i < nb_sectors; i++) {
683 uint32_t sector_offset_in_chunk;
684 void *data;
686 if (dmg_read_chunk(bs, sector_num + i) != 0) {
687 ret = -EIO;
688 goto fail;
690 /* Special case: current chunk is all zeroes. Do not perform a memcpy as
691 * s->uncompressed_chunk may be too small to cover the large all-zeroes
692 * section. dmg_read_chunk is called to find s->current_chunk */
693 if (s->types[s->current_chunk] == 2) { /* all zeroes block entry */
694 qemu_iovec_memset(qiov, i * 512, 0, 512);
695 continue;
697 sector_offset_in_chunk = sector_num + i - s->sectors[s->current_chunk];
698 data = s->uncompressed_chunk + sector_offset_in_chunk * 512;
699 qemu_iovec_from_buf(qiov, i * 512, data, 512);
702 ret = 0;
703 fail:
704 qemu_co_mutex_unlock(&s->lock);
705 return ret;
708 static void dmg_close(BlockDriverState *bs)
710 BDRVDMGState *s = bs->opaque;
712 g_free(s->types);
713 g_free(s->offsets);
714 g_free(s->lengths);
715 g_free(s->sectors);
716 g_free(s->sectorcounts);
717 qemu_vfree(s->compressed_chunk);
718 qemu_vfree(s->uncompressed_chunk);
720 inflateEnd(&s->zstream);
723 static BlockDriver bdrv_dmg = {
724 .format_name = "dmg",
725 .instance_size = sizeof(BDRVDMGState),
726 .bdrv_probe = dmg_probe,
727 .bdrv_open = dmg_open,
728 .bdrv_refresh_limits = dmg_refresh_limits,
729 .bdrv_co_preadv = dmg_co_preadv,
730 .bdrv_close = dmg_close,
733 static void bdrv_dmg_init(void)
735 bdrv_register(&bdrv_dmg);
738 block_init(bdrv_dmg_init);