spapr_pci: Allow PCI host bridge DMA window to be configured
[qemu/ar7.git] / block / dmg.c
blob546a6f5330deb87e69d4984ea5b4b7c86e80122a
1 /*
2 * QEMU Block driver for DMG images
4 * Copyright (c) 2004 Johannes E. Schindelin
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
24 #include "qemu-common.h"
25 #include "block/block_int.h"
26 #include "qemu/bswap.h"
27 #include "qemu/error-report.h"
28 #include "qemu/module.h"
29 #include <zlib.h>
30 #ifdef CONFIG_BZIP2
31 #include <bzlib.h>
32 #endif
33 #include <glib.h>
35 enum {
36 /* Limit chunk sizes to prevent unreasonable amounts of memory being used
37 * or truncating when converting to 32-bit types
39 DMG_LENGTHS_MAX = 64 * 1024 * 1024, /* 64 MB */
40 DMG_SECTORCOUNTS_MAX = DMG_LENGTHS_MAX / 512,
43 typedef struct BDRVDMGState {
44 CoMutex lock;
45 /* each chunk contains a certain number of sectors,
46 * offsets[i] is the offset in the .dmg file,
47 * lengths[i] is the length of the compressed chunk,
48 * sectors[i] is the sector beginning at offsets[i],
49 * sectorcounts[i] is the number of sectors in that chunk,
50 * the sectors array is ordered
51 * 0<=i<n_chunks */
53 uint32_t n_chunks;
54 uint32_t* types;
55 uint64_t* offsets;
56 uint64_t* lengths;
57 uint64_t* sectors;
58 uint64_t* sectorcounts;
59 uint32_t current_chunk;
60 uint8_t *compressed_chunk;
61 uint8_t *uncompressed_chunk;
62 z_stream zstream;
63 #ifdef CONFIG_BZIP2
64 bz_stream bzstream;
65 #endif
66 } BDRVDMGState;
68 static int dmg_probe(const uint8_t *buf, int buf_size, const char *filename)
70 int len;
72 if (!filename) {
73 return 0;
76 len = strlen(filename);
77 if (len > 4 && !strcmp(filename + len - 4, ".dmg")) {
78 return 2;
80 return 0;
83 static int read_uint64(BlockDriverState *bs, int64_t offset, uint64_t *result)
85 uint64_t buffer;
86 int ret;
88 ret = bdrv_pread(bs->file->bs, offset, &buffer, 8);
89 if (ret < 0) {
90 return ret;
93 *result = be64_to_cpu(buffer);
94 return 0;
97 static int read_uint32(BlockDriverState *bs, int64_t offset, uint32_t *result)
99 uint32_t buffer;
100 int ret;
102 ret = bdrv_pread(bs->file->bs, offset, &buffer, 4);
103 if (ret < 0) {
104 return ret;
107 *result = be32_to_cpu(buffer);
108 return 0;
111 static inline uint64_t buff_read_uint64(const uint8_t *buffer, int64_t offset)
113 return be64_to_cpu(*(uint64_t *)&buffer[offset]);
116 static inline uint32_t buff_read_uint32(const uint8_t *buffer, int64_t offset)
118 return be32_to_cpu(*(uint32_t *)&buffer[offset]);
121 /* Increase max chunk sizes, if necessary. This function is used to calculate
122 * the buffer sizes needed for compressed/uncompressed chunk I/O.
124 static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk,
125 uint32_t *max_compressed_size,
126 uint32_t *max_sectors_per_chunk)
128 uint32_t compressed_size = 0;
129 uint32_t uncompressed_sectors = 0;
131 switch (s->types[chunk]) {
132 case 0x80000005: /* zlib compressed */
133 case 0x80000006: /* bzip2 compressed */
134 compressed_size = s->lengths[chunk];
135 uncompressed_sectors = s->sectorcounts[chunk];
136 break;
137 case 1: /* copy */
138 uncompressed_sectors = (s->lengths[chunk] + 511) / 512;
139 break;
140 case 2: /* zero */
141 /* as the all-zeroes block may be large, it is treated specially: the
142 * sector is not copied from a large buffer, a simple memset is used
143 * instead. Therefore uncompressed_sectors does not need to be set. */
144 break;
147 if (compressed_size > *max_compressed_size) {
148 *max_compressed_size = compressed_size;
150 if (uncompressed_sectors > *max_sectors_per_chunk) {
151 *max_sectors_per_chunk = uncompressed_sectors;
155 static int64_t dmg_find_koly_offset(BlockDriverState *file_bs, Error **errp)
157 int64_t length;
158 int64_t offset = 0;
159 uint8_t buffer[515];
160 int i, ret;
162 /* bdrv_getlength returns a multiple of block size (512), rounded up. Since
163 * dmg images can have odd sizes, try to look for the "koly" magic which
164 * marks the begin of the UDIF trailer (512 bytes). This magic can be found
165 * in the last 511 bytes of the second-last sector or the first 4 bytes of
166 * the last sector (search space: 515 bytes) */
167 length = bdrv_getlength(file_bs);
168 if (length < 0) {
169 error_setg_errno(errp, -length,
170 "Failed to get file size while reading UDIF trailer");
171 return length;
172 } else if (length < 512) {
173 error_setg(errp, "dmg file must be at least 512 bytes long");
174 return -EINVAL;
176 if (length > 511 + 512) {
177 offset = length - 511 - 512;
179 length = length < 515 ? length : 515;
180 ret = bdrv_pread(file_bs, offset, buffer, length);
181 if (ret < 0) {
182 error_setg_errno(errp, -ret, "Failed while reading UDIF trailer");
183 return ret;
185 for (i = 0; i < length - 3; i++) {
186 if (buffer[i] == 'k' && buffer[i+1] == 'o' &&
187 buffer[i+2] == 'l' && buffer[i+3] == 'y') {
188 return offset + i;
191 error_setg(errp, "Could not locate UDIF trailer in dmg file");
192 return -EINVAL;
195 /* used when building the sector table */
196 typedef struct DmgHeaderState {
197 /* used internally by dmg_read_mish_block to remember offsets of blocks
198 * across calls */
199 uint64_t data_fork_offset;
200 /* exported for dmg_open */
201 uint32_t max_compressed_size;
202 uint32_t max_sectors_per_chunk;
203 } DmgHeaderState;
205 static bool dmg_is_known_block_type(uint32_t entry_type)
207 switch (entry_type) {
208 case 0x00000001: /* uncompressed */
209 case 0x00000002: /* zeroes */
210 case 0x80000005: /* zlib */
211 #ifdef CONFIG_BZIP2
212 case 0x80000006: /* bzip2 */
213 #endif
214 return true;
215 default:
216 return false;
220 static int dmg_read_mish_block(BDRVDMGState *s, DmgHeaderState *ds,
221 uint8_t *buffer, uint32_t count)
223 uint32_t type, i;
224 int ret;
225 size_t new_size;
226 uint32_t chunk_count;
227 int64_t offset = 0;
228 uint64_t data_offset;
229 uint64_t in_offset = ds->data_fork_offset;
230 uint64_t out_offset;
232 type = buff_read_uint32(buffer, offset);
233 /* skip data that is not a valid MISH block (invalid magic or too small) */
234 if (type != 0x6d697368 || count < 244) {
235 /* assume success for now */
236 return 0;
239 /* chunk offsets are relative to this sector number */
240 out_offset = buff_read_uint64(buffer, offset + 8);
242 /* location in data fork for (compressed) blob (in bytes) */
243 data_offset = buff_read_uint64(buffer, offset + 0x18);
244 in_offset += data_offset;
246 /* move to begin of chunk entries */
247 offset += 204;
249 chunk_count = (count - 204) / 40;
250 new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count);
251 s->types = g_realloc(s->types, new_size / 2);
252 s->offsets = g_realloc(s->offsets, new_size);
253 s->lengths = g_realloc(s->lengths, new_size);
254 s->sectors = g_realloc(s->sectors, new_size);
255 s->sectorcounts = g_realloc(s->sectorcounts, new_size);
257 for (i = s->n_chunks; i < s->n_chunks + chunk_count; i++) {
258 s->types[i] = buff_read_uint32(buffer, offset);
259 if (!dmg_is_known_block_type(s->types[i])) {
260 chunk_count--;
261 i--;
262 offset += 40;
263 continue;
266 /* sector number */
267 s->sectors[i] = buff_read_uint64(buffer, offset + 8);
268 s->sectors[i] += out_offset;
270 /* sector count */
271 s->sectorcounts[i] = buff_read_uint64(buffer, offset + 0x10);
273 /* all-zeroes sector (type 2) does not need to be "uncompressed" and can
274 * therefore be unbounded. */
275 if (s->types[i] != 2 && s->sectorcounts[i] > DMG_SECTORCOUNTS_MAX) {
276 error_report("sector count %" PRIu64 " for chunk %" PRIu32
277 " is larger than max (%u)",
278 s->sectorcounts[i], i, DMG_SECTORCOUNTS_MAX);
279 ret = -EINVAL;
280 goto fail;
283 /* offset in (compressed) data fork */
284 s->offsets[i] = buff_read_uint64(buffer, offset + 0x18);
285 s->offsets[i] += in_offset;
287 /* length in (compressed) data fork */
288 s->lengths[i] = buff_read_uint64(buffer, offset + 0x20);
290 if (s->lengths[i] > DMG_LENGTHS_MAX) {
291 error_report("length %" PRIu64 " for chunk %" PRIu32
292 " is larger than max (%u)",
293 s->lengths[i], i, DMG_LENGTHS_MAX);
294 ret = -EINVAL;
295 goto fail;
298 update_max_chunk_size(s, i, &ds->max_compressed_size,
299 &ds->max_sectors_per_chunk);
300 offset += 40;
302 s->n_chunks += chunk_count;
303 return 0;
305 fail:
306 return ret;
309 static int dmg_read_resource_fork(BlockDriverState *bs, DmgHeaderState *ds,
310 uint64_t info_begin, uint64_t info_length)
312 BDRVDMGState *s = bs->opaque;
313 int ret;
314 uint32_t count, rsrc_data_offset;
315 uint8_t *buffer = NULL;
316 uint64_t info_end;
317 uint64_t offset;
319 /* read offset from begin of resource fork (info_begin) to resource data */
320 ret = read_uint32(bs, info_begin, &rsrc_data_offset);
321 if (ret < 0) {
322 goto fail;
323 } else if (rsrc_data_offset > info_length) {
324 ret = -EINVAL;
325 goto fail;
328 /* read length of resource data */
329 ret = read_uint32(bs, info_begin + 8, &count);
330 if (ret < 0) {
331 goto fail;
332 } else if (count == 0 || rsrc_data_offset + count > info_length) {
333 ret = -EINVAL;
334 goto fail;
337 /* begin of resource data (consisting of one or more resources) */
338 offset = info_begin + rsrc_data_offset;
340 /* end of resource data (there is possibly a following resource map
341 * which will be ignored). */
342 info_end = offset + count;
344 /* read offsets (mish blocks) from one or more resources in resource data */
345 while (offset < info_end) {
346 /* size of following resource */
347 ret = read_uint32(bs, offset, &count);
348 if (ret < 0) {
349 goto fail;
350 } else if (count == 0 || count > info_end - offset) {
351 ret = -EINVAL;
352 goto fail;
354 offset += 4;
356 buffer = g_realloc(buffer, count);
357 ret = bdrv_pread(bs->file->bs, offset, buffer, count);
358 if (ret < 0) {
359 goto fail;
362 ret = dmg_read_mish_block(s, ds, buffer, count);
363 if (ret < 0) {
364 goto fail;
366 /* advance offset by size of resource */
367 offset += count;
369 ret = 0;
371 fail:
372 g_free(buffer);
373 return ret;
376 static int dmg_read_plist_xml(BlockDriverState *bs, DmgHeaderState *ds,
377 uint64_t info_begin, uint64_t info_length)
379 BDRVDMGState *s = bs->opaque;
380 int ret;
381 uint8_t *buffer = NULL;
382 char *data_begin, *data_end;
384 /* Have at least some length to avoid NULL for g_malloc. Attempt to set a
385 * safe upper cap on the data length. A test sample had a XML length of
386 * about 1 MiB. */
387 if (info_length == 0 || info_length > 16 * 1024 * 1024) {
388 ret = -EINVAL;
389 goto fail;
392 buffer = g_malloc(info_length + 1);
393 buffer[info_length] = '\0';
394 ret = bdrv_pread(bs->file->bs, info_begin, buffer, info_length);
395 if (ret != info_length) {
396 ret = -EINVAL;
397 goto fail;
400 /* look for <data>...</data>. The data is 284 (0x11c) bytes after base64
401 * decode. The actual data element has 431 (0x1af) bytes which includes tabs
402 * and line feeds. */
403 data_end = (char *)buffer;
404 while ((data_begin = strstr(data_end, "<data>")) != NULL) {
405 guchar *mish;
406 gsize out_len = 0;
408 data_begin += 6;
409 data_end = strstr(data_begin, "</data>");
410 /* malformed XML? */
411 if (data_end == NULL) {
412 ret = -EINVAL;
413 goto fail;
415 *data_end++ = '\0';
416 mish = g_base64_decode(data_begin, &out_len);
417 ret = dmg_read_mish_block(s, ds, mish, (uint32_t)out_len);
418 g_free(mish);
419 if (ret < 0) {
420 goto fail;
423 ret = 0;
425 fail:
426 g_free(buffer);
427 return ret;
430 static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
431 Error **errp)
433 BDRVDMGState *s = bs->opaque;
434 DmgHeaderState ds;
435 uint64_t rsrc_fork_offset, rsrc_fork_length;
436 uint64_t plist_xml_offset, plist_xml_length;
437 int64_t offset;
438 int ret;
440 bs->read_only = 1;
441 s->n_chunks = 0;
442 s->offsets = s->lengths = s->sectors = s->sectorcounts = NULL;
443 /* used by dmg_read_mish_block to keep track of the current I/O position */
444 ds.data_fork_offset = 0;
445 ds.max_compressed_size = 1;
446 ds.max_sectors_per_chunk = 1;
448 /* locate the UDIF trailer */
449 offset = dmg_find_koly_offset(bs->file->bs, errp);
450 if (offset < 0) {
451 ret = offset;
452 goto fail;
455 /* offset of data fork (DataForkOffset) */
456 ret = read_uint64(bs, offset + 0x18, &ds.data_fork_offset);
457 if (ret < 0) {
458 goto fail;
459 } else if (ds.data_fork_offset > offset) {
460 ret = -EINVAL;
461 goto fail;
464 /* offset of resource fork (RsrcForkOffset) */
465 ret = read_uint64(bs, offset + 0x28, &rsrc_fork_offset);
466 if (ret < 0) {
467 goto fail;
469 ret = read_uint64(bs, offset + 0x30, &rsrc_fork_length);
470 if (ret < 0) {
471 goto fail;
473 if (rsrc_fork_offset >= offset ||
474 rsrc_fork_length > offset - rsrc_fork_offset) {
475 ret = -EINVAL;
476 goto fail;
478 /* offset of property list (XMLOffset) */
479 ret = read_uint64(bs, offset + 0xd8, &plist_xml_offset);
480 if (ret < 0) {
481 goto fail;
483 ret = read_uint64(bs, offset + 0xe0, &plist_xml_length);
484 if (ret < 0) {
485 goto fail;
487 if (plist_xml_offset >= offset ||
488 plist_xml_length > offset - plist_xml_offset) {
489 ret = -EINVAL;
490 goto fail;
492 ret = read_uint64(bs, offset + 0x1ec, (uint64_t *)&bs->total_sectors);
493 if (ret < 0) {
494 goto fail;
496 if (bs->total_sectors < 0) {
497 ret = -EINVAL;
498 goto fail;
500 if (rsrc_fork_length != 0) {
501 ret = dmg_read_resource_fork(bs, &ds,
502 rsrc_fork_offset, rsrc_fork_length);
503 if (ret < 0) {
504 goto fail;
506 } else if (plist_xml_length != 0) {
507 ret = dmg_read_plist_xml(bs, &ds, plist_xml_offset, plist_xml_length);
508 if (ret < 0) {
509 goto fail;
511 } else {
512 ret = -EINVAL;
513 goto fail;
516 /* initialize zlib engine */
517 s->compressed_chunk = qemu_try_blockalign(bs->file->bs,
518 ds.max_compressed_size + 1);
519 s->uncompressed_chunk = qemu_try_blockalign(bs->file->bs,
520 512 * ds.max_sectors_per_chunk);
521 if (s->compressed_chunk == NULL || s->uncompressed_chunk == NULL) {
522 ret = -ENOMEM;
523 goto fail;
526 if (inflateInit(&s->zstream) != Z_OK) {
527 ret = -EINVAL;
528 goto fail;
531 s->current_chunk = s->n_chunks;
533 qemu_co_mutex_init(&s->lock);
534 return 0;
536 fail:
537 g_free(s->types);
538 g_free(s->offsets);
539 g_free(s->lengths);
540 g_free(s->sectors);
541 g_free(s->sectorcounts);
542 qemu_vfree(s->compressed_chunk);
543 qemu_vfree(s->uncompressed_chunk);
544 return ret;
547 static inline int is_sector_in_chunk(BDRVDMGState* s,
548 uint32_t chunk_num, uint64_t sector_num)
550 if (chunk_num >= s->n_chunks || s->sectors[chunk_num] > sector_num ||
551 s->sectors[chunk_num] + s->sectorcounts[chunk_num] <= sector_num) {
552 return 0;
553 } else {
554 return -1;
558 static inline uint32_t search_chunk(BDRVDMGState *s, uint64_t sector_num)
560 /* binary search */
561 uint32_t chunk1 = 0, chunk2 = s->n_chunks, chunk3;
562 while (chunk1 != chunk2) {
563 chunk3 = (chunk1 + chunk2) / 2;
564 if (s->sectors[chunk3] > sector_num) {
565 chunk2 = chunk3;
566 } else if (s->sectors[chunk3] + s->sectorcounts[chunk3] > sector_num) {
567 return chunk3;
568 } else {
569 chunk1 = chunk3;
572 return s->n_chunks; /* error */
575 static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
577 BDRVDMGState *s = bs->opaque;
579 if (!is_sector_in_chunk(s, s->current_chunk, sector_num)) {
580 int ret;
581 uint32_t chunk = search_chunk(s, sector_num);
582 #ifdef CONFIG_BZIP2
583 uint64_t total_out;
584 #endif
586 if (chunk >= s->n_chunks) {
587 return -1;
590 s->current_chunk = s->n_chunks;
591 switch (s->types[chunk]) { /* block entry type */
592 case 0x80000005: { /* zlib compressed */
593 /* we need to buffer, because only the chunk as whole can be
594 * inflated. */
595 ret = bdrv_pread(bs->file->bs, s->offsets[chunk],
596 s->compressed_chunk, s->lengths[chunk]);
597 if (ret != s->lengths[chunk]) {
598 return -1;
601 s->zstream.next_in = s->compressed_chunk;
602 s->zstream.avail_in = s->lengths[chunk];
603 s->zstream.next_out = s->uncompressed_chunk;
604 s->zstream.avail_out = 512 * s->sectorcounts[chunk];
605 ret = inflateReset(&s->zstream);
606 if (ret != Z_OK) {
607 return -1;
609 ret = inflate(&s->zstream, Z_FINISH);
610 if (ret != Z_STREAM_END ||
611 s->zstream.total_out != 512 * s->sectorcounts[chunk]) {
612 return -1;
614 break; }
615 #ifdef CONFIG_BZIP2
616 case 0x80000006: /* bzip2 compressed */
617 /* we need to buffer, because only the chunk as whole can be
618 * inflated. */
619 ret = bdrv_pread(bs->file->bs, s->offsets[chunk],
620 s->compressed_chunk, s->lengths[chunk]);
621 if (ret != s->lengths[chunk]) {
622 return -1;
625 ret = BZ2_bzDecompressInit(&s->bzstream, 0, 0);
626 if (ret != BZ_OK) {
627 return -1;
629 s->bzstream.next_in = (char *)s->compressed_chunk;
630 s->bzstream.avail_in = (unsigned int) s->lengths[chunk];
631 s->bzstream.next_out = (char *)s->uncompressed_chunk;
632 s->bzstream.avail_out = (unsigned int) 512 * s->sectorcounts[chunk];
633 ret = BZ2_bzDecompress(&s->bzstream);
634 total_out = ((uint64_t)s->bzstream.total_out_hi32 << 32) +
635 s->bzstream.total_out_lo32;
636 BZ2_bzDecompressEnd(&s->bzstream);
637 if (ret != BZ_STREAM_END ||
638 total_out != 512 * s->sectorcounts[chunk]) {
639 return -1;
641 break;
642 #endif /* CONFIG_BZIP2 */
643 case 1: /* copy */
644 ret = bdrv_pread(bs->file->bs, s->offsets[chunk],
645 s->uncompressed_chunk, s->lengths[chunk]);
646 if (ret != s->lengths[chunk]) {
647 return -1;
649 break;
650 case 2: /* zero */
651 /* see dmg_read, it is treated specially. No buffer needs to be
652 * pre-filled, the zeroes can be set directly. */
653 break;
655 s->current_chunk = chunk;
657 return 0;
660 static int dmg_read(BlockDriverState *bs, int64_t sector_num,
661 uint8_t *buf, int nb_sectors)
663 BDRVDMGState *s = bs->opaque;
664 int i;
666 for (i = 0; i < nb_sectors; i++) {
667 uint32_t sector_offset_in_chunk;
668 if (dmg_read_chunk(bs, sector_num + i) != 0) {
669 return -1;
671 /* Special case: current chunk is all zeroes. Do not perform a memcpy as
672 * s->uncompressed_chunk may be too small to cover the large all-zeroes
673 * section. dmg_read_chunk is called to find s->current_chunk */
674 if (s->types[s->current_chunk] == 2) { /* all zeroes block entry */
675 memset(buf + i * 512, 0, 512);
676 continue;
678 sector_offset_in_chunk = sector_num + i - s->sectors[s->current_chunk];
679 memcpy(buf + i * 512,
680 s->uncompressed_chunk + sector_offset_in_chunk * 512, 512);
682 return 0;
685 static coroutine_fn int dmg_co_read(BlockDriverState *bs, int64_t sector_num,
686 uint8_t *buf, int nb_sectors)
688 int ret;
689 BDRVDMGState *s = bs->opaque;
690 qemu_co_mutex_lock(&s->lock);
691 ret = dmg_read(bs, sector_num, buf, nb_sectors);
692 qemu_co_mutex_unlock(&s->lock);
693 return ret;
696 static void dmg_close(BlockDriverState *bs)
698 BDRVDMGState *s = bs->opaque;
700 g_free(s->types);
701 g_free(s->offsets);
702 g_free(s->lengths);
703 g_free(s->sectors);
704 g_free(s->sectorcounts);
705 qemu_vfree(s->compressed_chunk);
706 qemu_vfree(s->uncompressed_chunk);
708 inflateEnd(&s->zstream);
711 static BlockDriver bdrv_dmg = {
712 .format_name = "dmg",
713 .instance_size = sizeof(BDRVDMGState),
714 .bdrv_probe = dmg_probe,
715 .bdrv_open = dmg_open,
716 .bdrv_read = dmg_co_read,
717 .bdrv_close = dmg_close,
720 static void bdrv_dmg_init(void)
722 bdrv_register(&bdrv_dmg);
725 block_init(bdrv_dmg_init);