2 * QEMU Block driver for DMG images
4 * Copyright (c) 2004 Johannes E. Schindelin
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 #include "qemu/osdep.h"
25 #include "qapi/error.h"
26 #include "block/block-io.h"
27 #include "block/block_int.h"
28 #include "qemu/bswap.h"
29 #include "qemu/error-report.h"
30 #include "qemu/module.h"
31 #include "qemu/memalign.h"
34 BdrvDmgUncompressFunc
*dmg_uncompress_bz2
;
35 BdrvDmgUncompressFunc
*dmg_uncompress_lzfse
;
38 /* Limit chunk sizes to prevent unreasonable amounts of memory being used
39 * or truncating when converting to 32-bit types
41 DMG_LENGTHS_MAX
= 64 * 1024 * 1024, /* 64 MB */
42 DMG_SECTORCOUNTS_MAX
= DMG_LENGTHS_MAX
/ 512,
47 UDZE
= 0, /* Zeroes */
54 UDCM
= 0x7ffffffe, /* Comments */
55 UDLE
= 0xffffffff /* Last Entry */
58 static int dmg_probe(const uint8_t *buf
, int buf_size
, const char *filename
)
66 len
= strlen(filename
);
67 if (len
> 4 && !strcmp(filename
+ len
- 4, ".dmg")) {
73 static int GRAPH_RDLOCK
74 read_uint64(BlockDriverState
*bs
, int64_t offset
, uint64_t *result
)
79 ret
= bdrv_pread(bs
->file
, offset
, 8, &buffer
, 0);
84 *result
= be64_to_cpu(buffer
);
88 static int GRAPH_RDLOCK
89 read_uint32(BlockDriverState
*bs
, int64_t offset
, uint32_t *result
)
94 ret
= bdrv_pread(bs
->file
, offset
, 4, &buffer
, 0);
99 *result
= be32_to_cpu(buffer
);
103 static inline uint64_t buff_read_uint64(const uint8_t *buffer
, int64_t offset
)
105 return be64_to_cpu(*(uint64_t *)&buffer
[offset
]);
108 static inline uint32_t buff_read_uint32(const uint8_t *buffer
, int64_t offset
)
110 return be32_to_cpu(*(uint32_t *)&buffer
[offset
]);
113 /* Increase max chunk sizes, if necessary. This function is used to calculate
114 * the buffer sizes needed for compressed/uncompressed chunk I/O.
116 static void update_max_chunk_size(BDRVDMGState
*s
, uint32_t chunk
,
117 uint32_t *max_compressed_size
,
118 uint32_t *max_sectors_per_chunk
)
120 uint32_t compressed_size
= 0;
121 uint32_t uncompressed_sectors
= 0;
123 switch (s
->types
[chunk
]) {
124 case UDZO
: /* zlib compressed */
125 case UDBZ
: /* bzip2 compressed */
126 case ULFO
: /* lzfse compressed */
127 compressed_size
= s
->lengths
[chunk
];
128 uncompressed_sectors
= s
->sectorcounts
[chunk
];
130 case UDRW
: /* copy */
131 uncompressed_sectors
= DIV_ROUND_UP(s
->lengths
[chunk
], 512);
133 case UDZE
: /* zero */
134 case UDIG
: /* ignore */
135 /* as the all-zeroes block may be large, it is treated specially: the
136 * sector is not copied from a large buffer, a simple memset is used
137 * instead. Therefore uncompressed_sectors does not need to be set. */
141 if (compressed_size
> *max_compressed_size
) {
142 *max_compressed_size
= compressed_size
;
144 if (uncompressed_sectors
> *max_sectors_per_chunk
) {
145 *max_sectors_per_chunk
= uncompressed_sectors
;
149 static int64_t dmg_find_koly_offset(BdrvChild
*file
, Error
**errp
)
151 BlockDriverState
*file_bs
= file
->bs
;
157 /* bdrv_getlength returns a multiple of block size (512), rounded up. Since
158 * dmg images can have odd sizes, try to look for the "koly" magic which
159 * marks the begin of the UDIF trailer (512 bytes). This magic can be found
160 * in the last 511 bytes of the second-last sector or the first 4 bytes of
161 * the last sector (search space: 515 bytes) */
162 length
= bdrv_getlength(file_bs
);
164 error_setg_errno(errp
, -length
,
165 "Failed to get file size while reading UDIF trailer");
167 } else if (length
< 512) {
168 error_setg(errp
, "dmg file must be at least 512 bytes long");
171 if (length
> 511 + 512) {
172 offset
= length
- 511 - 512;
174 length
= length
< 515 ? length
: 515;
175 ret
= bdrv_pread(file
, offset
, length
, buffer
, 0);
177 error_setg_errno(errp
, -ret
, "Failed while reading UDIF trailer");
180 for (i
= 0; i
< length
- 3; i
++) {
181 if (buffer
[i
] == 'k' && buffer
[i
+1] == 'o' &&
182 buffer
[i
+2] == 'l' && buffer
[i
+3] == 'y') {
186 error_setg(errp
, "Could not locate UDIF trailer in dmg file");
190 /* used when building the sector table */
191 typedef struct DmgHeaderState
{
192 /* used internally by dmg_read_mish_block to remember offsets of blocks
194 uint64_t data_fork_offset
;
195 /* exported for dmg_open */
196 uint32_t max_compressed_size
;
197 uint32_t max_sectors_per_chunk
;
200 static bool dmg_is_known_block_type(uint32_t entry_type
)
202 switch (entry_type
) {
203 case UDZE
: /* zeros */
204 case UDRW
: /* uncompressed */
205 case UDIG
: /* ignore */
206 case UDZO
: /* zlib */
208 case UDBZ
: /* bzip2 */
209 return !!dmg_uncompress_bz2
;
210 case ULFO
: /* lzfse */
211 return !!dmg_uncompress_lzfse
;
217 static int dmg_read_mish_block(BDRVDMGState
*s
, DmgHeaderState
*ds
,
218 uint8_t *buffer
, uint32_t count
)
223 uint32_t chunk_count
;
225 uint64_t data_offset
;
226 uint64_t in_offset
= ds
->data_fork_offset
;
229 type
= buff_read_uint32(buffer
, offset
);
230 /* skip data that is not a valid MISH block (invalid magic or too small) */
231 if (type
!= 0x6d697368 || count
< 244) {
232 /* assume success for now */
236 /* chunk offsets are relative to this sector number */
237 out_offset
= buff_read_uint64(buffer
, offset
+ 8);
239 /* location in data fork for (compressed) blob (in bytes) */
240 data_offset
= buff_read_uint64(buffer
, offset
+ 0x18);
241 in_offset
+= data_offset
;
243 /* move to begin of chunk entries */
246 chunk_count
= (count
- 204) / 40;
247 new_size
= sizeof(uint64_t) * (s
->n_chunks
+ chunk_count
);
248 s
->types
= g_realloc(s
->types
, new_size
/ 2);
249 s
->offsets
= g_realloc(s
->offsets
, new_size
);
250 s
->lengths
= g_realloc(s
->lengths
, new_size
);
251 s
->sectors
= g_realloc(s
->sectors
, new_size
);
252 s
->sectorcounts
= g_realloc(s
->sectorcounts
, new_size
);
254 for (i
= s
->n_chunks
; i
< s
->n_chunks
+ chunk_count
; i
++) {
255 s
->types
[i
] = buff_read_uint32(buffer
, offset
);
256 if (!dmg_is_known_block_type(s
->types
[i
])) {
257 switch (s
->types
[i
]) {
259 warn_report_once("dmg-bzip2 module is missing, accessing bzip2 "
260 "compressed blocks will result in I/O errors");
263 warn_report_once("dmg-lzfse module is missing, accessing lzfse "
264 "compressed blocks will result in I/O errors");
268 /* Comments and last entry can be ignored without problems */
271 warn_report_once("Image contains chunks of unknown type %x, "
272 "accessing them will result in I/O errors",
283 s
->sectors
[i
] = buff_read_uint64(buffer
, offset
+ 8);
284 s
->sectors
[i
] += out_offset
;
287 s
->sectorcounts
[i
] = buff_read_uint64(buffer
, offset
+ 0x10);
289 /* all-zeroes sector (type UDZE and UDIG) does not need to be
290 * "uncompressed" and can therefore be unbounded. */
291 if (s
->types
[i
] != UDZE
&& s
->types
[i
] != UDIG
292 && s
->sectorcounts
[i
] > DMG_SECTORCOUNTS_MAX
) {
293 error_report("sector count %" PRIu64
" for chunk %" PRIu32
294 " is larger than max (%u)",
295 s
->sectorcounts
[i
], i
, DMG_SECTORCOUNTS_MAX
);
300 /* offset in (compressed) data fork */
301 s
->offsets
[i
] = buff_read_uint64(buffer
, offset
+ 0x18);
302 s
->offsets
[i
] += in_offset
;
304 /* length in (compressed) data fork */
305 s
->lengths
[i
] = buff_read_uint64(buffer
, offset
+ 0x20);
307 if (s
->lengths
[i
] > DMG_LENGTHS_MAX
) {
308 error_report("length %" PRIu64
" for chunk %" PRIu32
309 " is larger than max (%u)",
310 s
->lengths
[i
], i
, DMG_LENGTHS_MAX
);
315 update_max_chunk_size(s
, i
, &ds
->max_compressed_size
,
316 &ds
->max_sectors_per_chunk
);
319 s
->n_chunks
+= chunk_count
;
326 static int GRAPH_RDLOCK
327 dmg_read_resource_fork(BlockDriverState
*bs
, DmgHeaderState
*ds
,
328 uint64_t info_begin
, uint64_t info_length
)
330 BDRVDMGState
*s
= bs
->opaque
;
332 uint32_t count
, rsrc_data_offset
;
333 uint8_t *buffer
= NULL
;
337 /* read offset from begin of resource fork (info_begin) to resource data */
338 ret
= read_uint32(bs
, info_begin
, &rsrc_data_offset
);
341 } else if (rsrc_data_offset
> info_length
) {
346 /* read length of resource data */
347 ret
= read_uint32(bs
, info_begin
+ 8, &count
);
350 } else if (count
== 0 || rsrc_data_offset
+ count
> info_length
) {
355 /* begin of resource data (consisting of one or more resources) */
356 offset
= info_begin
+ rsrc_data_offset
;
358 /* end of resource data (there is possibly a following resource map
359 * which will be ignored). */
360 info_end
= offset
+ count
;
362 /* read offsets (mish blocks) from one or more resources in resource data */
363 while (offset
< info_end
) {
364 /* size of following resource */
365 ret
= read_uint32(bs
, offset
, &count
);
368 } else if (count
== 0 || count
> info_end
- offset
) {
374 buffer
= g_realloc(buffer
, count
);
375 ret
= bdrv_pread(bs
->file
, offset
, count
, buffer
, 0);
380 ret
= dmg_read_mish_block(s
, ds
, buffer
, count
);
384 /* advance offset by size of resource */
394 static int GRAPH_RDLOCK
395 dmg_read_plist_xml(BlockDriverState
*bs
, DmgHeaderState
*ds
,
396 uint64_t info_begin
, uint64_t info_length
)
398 BDRVDMGState
*s
= bs
->opaque
;
400 uint8_t *buffer
= NULL
;
401 char *data_begin
, *data_end
;
403 /* Have at least some length to avoid NULL for g_malloc. Attempt to set a
404 * safe upper cap on the data length. A test sample had a XML length of
406 if (info_length
== 0 || info_length
> 16 * 1024 * 1024) {
411 buffer
= g_malloc(info_length
+ 1);
412 buffer
[info_length
] = '\0';
413 ret
= bdrv_pread(bs
->file
, info_begin
, info_length
, buffer
, 0);
419 /* look for <data>...</data>. The data is 284 (0x11c) bytes after base64
420 * decode. The actual data element has 431 (0x1af) bytes which includes tabs
422 data_end
= (char *)buffer
;
423 while ((data_begin
= strstr(data_end
, "<data>")) != NULL
) {
428 data_end
= strstr(data_begin
, "</data>");
430 if (data_end
== NULL
) {
435 mish
= g_base64_decode(data_begin
, &out_len
);
436 ret
= dmg_read_mish_block(s
, ds
, mish
, (uint32_t)out_len
);
449 static int dmg_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
452 BDRVDMGState
*s
= bs
->opaque
;
454 uint64_t rsrc_fork_offset
, rsrc_fork_length
;
455 uint64_t plist_xml_offset
, plist_xml_length
;
461 bdrv_graph_rdlock_main_loop();
462 ret
= bdrv_apply_auto_read_only(bs
, NULL
, errp
);
463 bdrv_graph_rdunlock_main_loop();
468 ret
= bdrv_open_file_child(NULL
, options
, "file", bs
, errp
);
473 GRAPH_RDLOCK_GUARD_MAINLOOP();
476 * NB: if uncompress submodules are absent,
477 * ie block_module_load return value == 0, the function pointers
478 * dmg_uncompress_bz2 and dmg_uncompress_lzfse will be NULL.
480 if (block_module_load("dmg-bz2", errp
) < 0) {
483 if (block_module_load("dmg-lzfse", errp
) < 0) {
488 s
->offsets
= s
->lengths
= s
->sectors
= s
->sectorcounts
= NULL
;
489 /* used by dmg_read_mish_block to keep track of the current I/O position */
490 ds
.data_fork_offset
= 0;
491 ds
.max_compressed_size
= 1;
492 ds
.max_sectors_per_chunk
= 1;
494 /* locate the UDIF trailer */
495 offset
= dmg_find_koly_offset(bs
->file
, errp
);
501 /* offset of data fork (DataForkOffset) */
502 ret
= read_uint64(bs
, offset
+ 0x18, &ds
.data_fork_offset
);
505 } else if (ds
.data_fork_offset
> offset
) {
510 /* offset of resource fork (RsrcForkOffset) */
511 ret
= read_uint64(bs
, offset
+ 0x28, &rsrc_fork_offset
);
515 ret
= read_uint64(bs
, offset
+ 0x30, &rsrc_fork_length
);
519 if (rsrc_fork_offset
>= offset
||
520 rsrc_fork_length
> offset
- rsrc_fork_offset
) {
524 /* offset of property list (XMLOffset) */
525 ret
= read_uint64(bs
, offset
+ 0xd8, &plist_xml_offset
);
529 ret
= read_uint64(bs
, offset
+ 0xe0, &plist_xml_length
);
533 if (plist_xml_offset
>= offset
||
534 plist_xml_length
> offset
- plist_xml_offset
) {
538 ret
= read_uint64(bs
, offset
+ 0x1ec, (uint64_t *)&bs
->total_sectors
);
542 if (bs
->total_sectors
< 0) {
546 if (rsrc_fork_length
!= 0) {
547 ret
= dmg_read_resource_fork(bs
, &ds
,
548 rsrc_fork_offset
, rsrc_fork_length
);
552 } else if (plist_xml_length
!= 0) {
553 ret
= dmg_read_plist_xml(bs
, &ds
, plist_xml_offset
, plist_xml_length
);
562 /* initialize zlib engine */
563 s
->compressed_chunk
= qemu_try_blockalign(bs
->file
->bs
,
564 ds
.max_compressed_size
+ 1);
565 s
->uncompressed_chunk
= qemu_try_blockalign(bs
->file
->bs
,
566 512 * ds
.max_sectors_per_chunk
);
567 if (s
->compressed_chunk
== NULL
|| s
->uncompressed_chunk
== NULL
) {
572 if (inflateInit(&s
->zstream
) != Z_OK
) {
577 s
->current_chunk
= s
->n_chunks
;
579 qemu_co_mutex_init(&s
->lock
);
587 g_free(s
->sectorcounts
);
588 qemu_vfree(s
->compressed_chunk
);
589 qemu_vfree(s
->uncompressed_chunk
);
593 static void dmg_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
595 bs
->bl
.request_alignment
= BDRV_SECTOR_SIZE
; /* No sub-sector I/O */
598 static inline int is_sector_in_chunk(BDRVDMGState
*s
,
599 uint32_t chunk_num
, uint64_t sector_num
)
601 if (chunk_num
>= s
->n_chunks
|| s
->sectors
[chunk_num
] > sector_num
||
602 s
->sectors
[chunk_num
] + s
->sectorcounts
[chunk_num
] <= sector_num
) {
609 static inline uint32_t search_chunk(BDRVDMGState
*s
, uint64_t sector_num
)
612 uint32_t chunk1
= 0, chunk2
= s
->n_chunks
, chunk3
;
613 while (chunk1
<= chunk2
) {
614 chunk3
= (chunk1
+ chunk2
) / 2;
615 if (s
->sectors
[chunk3
] > sector_num
) {
620 } else if (s
->sectors
[chunk3
] + s
->sectorcounts
[chunk3
] > sector_num
) {
627 return s
->n_chunks
; /* error */
630 static int coroutine_fn GRAPH_RDLOCK
631 dmg_read_chunk(BlockDriverState
*bs
, uint64_t sector_num
)
633 BDRVDMGState
*s
= bs
->opaque
;
635 if (!is_sector_in_chunk(s
, s
->current_chunk
, sector_num
)) {
637 uint32_t chunk
= search_chunk(s
, sector_num
);
639 if (chunk
>= s
->n_chunks
) {
643 s
->current_chunk
= s
->n_chunks
;
644 switch (s
->types
[chunk
]) { /* block entry type */
645 case UDZO
: { /* zlib compressed */
646 /* we need to buffer, because only the chunk as whole can be
648 ret
= bdrv_co_pread(bs
->file
, s
->offsets
[chunk
], s
->lengths
[chunk
],
649 s
->compressed_chunk
, 0);
654 s
->zstream
.next_in
= s
->compressed_chunk
;
655 s
->zstream
.avail_in
= s
->lengths
[chunk
];
656 s
->zstream
.next_out
= s
->uncompressed_chunk
;
657 s
->zstream
.avail_out
= 512 * s
->sectorcounts
[chunk
];
658 ret
= inflateReset(&s
->zstream
);
662 ret
= inflate(&s
->zstream
, Z_FINISH
);
663 if (ret
!= Z_STREAM_END
||
664 s
->zstream
.total_out
!= 512 * s
->sectorcounts
[chunk
]) {
668 case UDBZ
: /* bzip2 compressed */
669 if (!dmg_uncompress_bz2
) {
672 /* we need to buffer, because only the chunk as whole can be
674 ret
= bdrv_co_pread(bs
->file
, s
->offsets
[chunk
], s
->lengths
[chunk
],
675 s
->compressed_chunk
, 0);
680 ret
= dmg_uncompress_bz2((char *)s
->compressed_chunk
,
681 (unsigned int) s
->lengths
[chunk
],
682 (char *)s
->uncompressed_chunk
,
684 (512 * s
->sectorcounts
[chunk
]));
690 if (!dmg_uncompress_lzfse
) {
693 /* we need to buffer, because only the chunk as whole can be
695 ret
= bdrv_co_pread(bs
->file
, s
->offsets
[chunk
], s
->lengths
[chunk
],
696 s
->compressed_chunk
, 0);
701 ret
= dmg_uncompress_lzfse((char *)s
->compressed_chunk
,
702 (unsigned int) s
->lengths
[chunk
],
703 (char *)s
->uncompressed_chunk
,
705 (512 * s
->sectorcounts
[chunk
]));
710 case UDRW
: /* copy */
711 ret
= bdrv_co_pread(bs
->file
, s
->offsets
[chunk
], s
->lengths
[chunk
],
712 s
->uncompressed_chunk
, 0);
717 case UDZE
: /* zeros */
718 case UDIG
: /* ignore */
719 /* see dmg_read, it is treated specially. No buffer needs to be
720 * pre-filled, the zeroes can be set directly. */
723 s
->current_chunk
= chunk
;
728 static int coroutine_fn GRAPH_RDLOCK
729 dmg_co_preadv(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
730 QEMUIOVector
*qiov
, BdrvRequestFlags flags
)
732 BDRVDMGState
*s
= bs
->opaque
;
733 uint64_t sector_num
= offset
>> BDRV_SECTOR_BITS
;
734 int nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
737 assert(QEMU_IS_ALIGNED(offset
, BDRV_SECTOR_SIZE
));
738 assert(QEMU_IS_ALIGNED(bytes
, BDRV_SECTOR_SIZE
));
740 qemu_co_mutex_lock(&s
->lock
);
742 for (i
= 0; i
< nb_sectors
; i
++) {
743 uint32_t sector_offset_in_chunk
;
746 if (dmg_read_chunk(bs
, sector_num
+ i
) != 0) {
750 /* Special case: current chunk is all zeroes. Do not perform a memcpy as
751 * s->uncompressed_chunk may be too small to cover the large all-zeroes
752 * section. dmg_read_chunk is called to find s->current_chunk */
753 if (s
->types
[s
->current_chunk
] == UDZE
754 || s
->types
[s
->current_chunk
] == UDIG
) { /* all zeroes block entry */
755 qemu_iovec_memset(qiov
, i
* 512, 0, 512);
758 sector_offset_in_chunk
= sector_num
+ i
- s
->sectors
[s
->current_chunk
];
759 data
= s
->uncompressed_chunk
+ sector_offset_in_chunk
* 512;
760 qemu_iovec_from_buf(qiov
, i
* 512, data
, 512);
765 qemu_co_mutex_unlock(&s
->lock
);
769 static void dmg_close(BlockDriverState
*bs
)
771 BDRVDMGState
*s
= bs
->opaque
;
777 g_free(s
->sectorcounts
);
778 qemu_vfree(s
->compressed_chunk
);
779 qemu_vfree(s
->uncompressed_chunk
);
781 inflateEnd(&s
->zstream
);
784 static BlockDriver bdrv_dmg
= {
785 .format_name
= "dmg",
786 .instance_size
= sizeof(BDRVDMGState
),
787 .bdrv_probe
= dmg_probe
,
788 .bdrv_open
= dmg_open
,
789 .bdrv_refresh_limits
= dmg_refresh_limits
,
790 .bdrv_child_perm
= bdrv_default_perms
,
791 .bdrv_co_preadv
= dmg_co_preadv
,
792 .bdrv_close
= dmg_close
,
796 static void bdrv_dmg_init(void)
798 bdrv_register(&bdrv_dmg
);
801 block_init(bdrv_dmg_init
);