prep: Remove CPU reset entry point hack related to OpenHack'Ware
[qemu/ar7.git] / block / dmg.c
blob1e153cd76d05b21dace8ab6fc5b18054d9d161be
1 /*
2 * QEMU Block driver for DMG images
4 * Copyright (c) 2004 Johannes E. Schindelin
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
24 #include "qemu-common.h"
25 #include "block/block_int.h"
26 #include "qemu/bswap.h"
27 #include "qemu/module.h"
28 #include <zlib.h>
30 enum {
31 /* Limit chunk sizes to prevent unreasonable amounts of memory being used
32 * or truncating when converting to 32-bit types
34 DMG_LENGTHS_MAX = 64 * 1024 * 1024, /* 64 MB */
35 DMG_SECTORCOUNTS_MAX = DMG_LENGTHS_MAX / 512,
38 typedef struct BDRVDMGState {
39 CoMutex lock;
40 /* each chunk contains a certain number of sectors,
41 * offsets[i] is the offset in the .dmg file,
42 * lengths[i] is the length of the compressed chunk,
43 * sectors[i] is the sector beginning at offsets[i],
44 * sectorcounts[i] is the number of sectors in that chunk,
45 * the sectors array is ordered
46 * 0<=i<n_chunks */
48 uint32_t n_chunks;
49 uint32_t* types;
50 uint64_t* offsets;
51 uint64_t* lengths;
52 uint64_t* sectors;
53 uint64_t* sectorcounts;
54 uint32_t current_chunk;
55 uint8_t *compressed_chunk;
56 uint8_t *uncompressed_chunk;
57 z_stream zstream;
58 } BDRVDMGState;
60 static int dmg_probe(const uint8_t *buf, int buf_size, const char *filename)
62 int len;
64 if (!filename) {
65 return 0;
68 len = strlen(filename);
69 if (len > 4 && !strcmp(filename + len - 4, ".dmg")) {
70 return 2;
72 return 0;
75 static int read_uint64(BlockDriverState *bs, int64_t offset, uint64_t *result)
77 uint64_t buffer;
78 int ret;
80 ret = bdrv_pread(bs->file, offset, &buffer, 8);
81 if (ret < 0) {
82 return ret;
85 *result = be64_to_cpu(buffer);
86 return 0;
89 static int read_uint32(BlockDriverState *bs, int64_t offset, uint32_t *result)
91 uint32_t buffer;
92 int ret;
94 ret = bdrv_pread(bs->file, offset, &buffer, 4);
95 if (ret < 0) {
96 return ret;
99 *result = be32_to_cpu(buffer);
100 return 0;
103 /* Increase max chunk sizes, if necessary. This function is used to calculate
104 * the buffer sizes needed for compressed/uncompressed chunk I/O.
106 static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk,
107 uint32_t *max_compressed_size,
108 uint32_t *max_sectors_per_chunk)
110 uint32_t compressed_size = 0;
111 uint32_t uncompressed_sectors = 0;
113 switch (s->types[chunk]) {
114 case 0x80000005: /* zlib compressed */
115 compressed_size = s->lengths[chunk];
116 uncompressed_sectors = s->sectorcounts[chunk];
117 break;
118 case 1: /* copy */
119 uncompressed_sectors = (s->lengths[chunk] + 511) / 512;
120 break;
121 case 2: /* zero */
122 uncompressed_sectors = s->sectorcounts[chunk];
123 break;
126 if (compressed_size > *max_compressed_size) {
127 *max_compressed_size = compressed_size;
129 if (uncompressed_sectors > *max_sectors_per_chunk) {
130 *max_sectors_per_chunk = uncompressed_sectors;
134 static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
135 Error **errp)
137 BDRVDMGState *s = bs->opaque;
138 uint64_t info_begin, info_end, last_in_offset, last_out_offset;
139 uint32_t count, tmp;
140 uint32_t max_compressed_size = 1, max_sectors_per_chunk = 1, i;
141 int64_t offset;
142 int ret;
144 bs->read_only = 1;
145 s->n_chunks = 0;
146 s->offsets = s->lengths = s->sectors = s->sectorcounts = NULL;
148 /* read offset of info blocks */
149 offset = bdrv_getlength(bs->file);
150 if (offset < 0) {
151 ret = offset;
152 goto fail;
154 offset -= 0x1d8;
156 ret = read_uint64(bs, offset, &info_begin);
157 if (ret < 0) {
158 goto fail;
159 } else if (info_begin == 0) {
160 ret = -EINVAL;
161 goto fail;
164 ret = read_uint32(bs, info_begin, &tmp);
165 if (ret < 0) {
166 goto fail;
167 } else if (tmp != 0x100) {
168 ret = -EINVAL;
169 goto fail;
172 ret = read_uint32(bs, info_begin + 4, &count);
173 if (ret < 0) {
174 goto fail;
175 } else if (count == 0) {
176 ret = -EINVAL;
177 goto fail;
179 info_end = info_begin + count;
181 offset = info_begin + 0x100;
183 /* read offsets */
184 last_in_offset = last_out_offset = 0;
185 while (offset < info_end) {
186 uint32_t type;
188 ret = read_uint32(bs, offset, &count);
189 if (ret < 0) {
190 goto fail;
191 } else if (count == 0) {
192 ret = -EINVAL;
193 goto fail;
195 offset += 4;
197 ret = read_uint32(bs, offset, &type);
198 if (ret < 0) {
199 goto fail;
202 if (type == 0x6d697368 && count >= 244) {
203 size_t new_size;
204 uint32_t chunk_count;
206 offset += 4;
207 offset += 200;
209 chunk_count = (count - 204) / 40;
210 new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count);
211 s->types = g_realloc(s->types, new_size / 2);
212 s->offsets = g_realloc(s->offsets, new_size);
213 s->lengths = g_realloc(s->lengths, new_size);
214 s->sectors = g_realloc(s->sectors, new_size);
215 s->sectorcounts = g_realloc(s->sectorcounts, new_size);
217 for (i = s->n_chunks; i < s->n_chunks + chunk_count; i++) {
218 ret = read_uint32(bs, offset, &s->types[i]);
219 if (ret < 0) {
220 goto fail;
222 offset += 4;
223 if (s->types[i] != 0x80000005 && s->types[i] != 1 &&
224 s->types[i] != 2) {
225 if (s->types[i] == 0xffffffff && i > 0) {
226 last_in_offset = s->offsets[i - 1] + s->lengths[i - 1];
227 last_out_offset = s->sectors[i - 1] +
228 s->sectorcounts[i - 1];
230 chunk_count--;
231 i--;
232 offset += 36;
233 continue;
235 offset += 4;
237 ret = read_uint64(bs, offset, &s->sectors[i]);
238 if (ret < 0) {
239 goto fail;
241 s->sectors[i] += last_out_offset;
242 offset += 8;
244 ret = read_uint64(bs, offset, &s->sectorcounts[i]);
245 if (ret < 0) {
246 goto fail;
248 offset += 8;
250 if (s->sectorcounts[i] > DMG_SECTORCOUNTS_MAX) {
251 error_report("sector count %" PRIu64 " for chunk %" PRIu32
252 " is larger than max (%u)",
253 s->sectorcounts[i], i, DMG_SECTORCOUNTS_MAX);
254 ret = -EINVAL;
255 goto fail;
258 ret = read_uint64(bs, offset, &s->offsets[i]);
259 if (ret < 0) {
260 goto fail;
262 s->offsets[i] += last_in_offset;
263 offset += 8;
265 ret = read_uint64(bs, offset, &s->lengths[i]);
266 if (ret < 0) {
267 goto fail;
269 offset += 8;
271 if (s->lengths[i] > DMG_LENGTHS_MAX) {
272 error_report("length %" PRIu64 " for chunk %" PRIu32
273 " is larger than max (%u)",
274 s->lengths[i], i, DMG_LENGTHS_MAX);
275 ret = -EINVAL;
276 goto fail;
279 update_max_chunk_size(s, i, &max_compressed_size,
280 &max_sectors_per_chunk);
282 s->n_chunks += chunk_count;
286 /* initialize zlib engine */
287 s->compressed_chunk = g_malloc(max_compressed_size + 1);
288 s->uncompressed_chunk = g_malloc(512 * max_sectors_per_chunk);
289 if (inflateInit(&s->zstream) != Z_OK) {
290 ret = -EINVAL;
291 goto fail;
294 s->current_chunk = s->n_chunks;
296 qemu_co_mutex_init(&s->lock);
297 return 0;
299 fail:
300 g_free(s->types);
301 g_free(s->offsets);
302 g_free(s->lengths);
303 g_free(s->sectors);
304 g_free(s->sectorcounts);
305 g_free(s->compressed_chunk);
306 g_free(s->uncompressed_chunk);
307 return ret;
310 static inline int is_sector_in_chunk(BDRVDMGState* s,
311 uint32_t chunk_num, uint64_t sector_num)
313 if (chunk_num >= s->n_chunks || s->sectors[chunk_num] > sector_num ||
314 s->sectors[chunk_num] + s->sectorcounts[chunk_num] <= sector_num) {
315 return 0;
316 } else {
317 return -1;
321 static inline uint32_t search_chunk(BDRVDMGState *s, uint64_t sector_num)
323 /* binary search */
324 uint32_t chunk1 = 0, chunk2 = s->n_chunks, chunk3;
325 while (chunk1 != chunk2) {
326 chunk3 = (chunk1 + chunk2) / 2;
327 if (s->sectors[chunk3] > sector_num) {
328 chunk2 = chunk3;
329 } else if (s->sectors[chunk3] + s->sectorcounts[chunk3] > sector_num) {
330 return chunk3;
331 } else {
332 chunk1 = chunk3;
335 return s->n_chunks; /* error */
338 static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
340 BDRVDMGState *s = bs->opaque;
342 if (!is_sector_in_chunk(s, s->current_chunk, sector_num)) {
343 int ret;
344 uint32_t chunk = search_chunk(s, sector_num);
346 if (chunk >= s->n_chunks) {
347 return -1;
350 s->current_chunk = s->n_chunks;
351 switch (s->types[chunk]) {
352 case 0x80000005: { /* zlib compressed */
353 /* we need to buffer, because only the chunk as whole can be
354 * inflated. */
355 ret = bdrv_pread(bs->file, s->offsets[chunk],
356 s->compressed_chunk, s->lengths[chunk]);
357 if (ret != s->lengths[chunk]) {
358 return -1;
361 s->zstream.next_in = s->compressed_chunk;
362 s->zstream.avail_in = s->lengths[chunk];
363 s->zstream.next_out = s->uncompressed_chunk;
364 s->zstream.avail_out = 512 * s->sectorcounts[chunk];
365 ret = inflateReset(&s->zstream);
366 if (ret != Z_OK) {
367 return -1;
369 ret = inflate(&s->zstream, Z_FINISH);
370 if (ret != Z_STREAM_END ||
371 s->zstream.total_out != 512 * s->sectorcounts[chunk]) {
372 return -1;
374 break; }
375 case 1: /* copy */
376 ret = bdrv_pread(bs->file, s->offsets[chunk],
377 s->uncompressed_chunk, s->lengths[chunk]);
378 if (ret != s->lengths[chunk]) {
379 return -1;
381 break;
382 case 2: /* zero */
383 memset(s->uncompressed_chunk, 0, 512 * s->sectorcounts[chunk]);
384 break;
386 s->current_chunk = chunk;
388 return 0;
391 static int dmg_read(BlockDriverState *bs, int64_t sector_num,
392 uint8_t *buf, int nb_sectors)
394 BDRVDMGState *s = bs->opaque;
395 int i;
397 for (i = 0; i < nb_sectors; i++) {
398 uint32_t sector_offset_in_chunk;
399 if (dmg_read_chunk(bs, sector_num + i) != 0) {
400 return -1;
402 sector_offset_in_chunk = sector_num + i - s->sectors[s->current_chunk];
403 memcpy(buf + i * 512,
404 s->uncompressed_chunk + sector_offset_in_chunk * 512, 512);
406 return 0;
409 static coroutine_fn int dmg_co_read(BlockDriverState *bs, int64_t sector_num,
410 uint8_t *buf, int nb_sectors)
412 int ret;
413 BDRVDMGState *s = bs->opaque;
414 qemu_co_mutex_lock(&s->lock);
415 ret = dmg_read(bs, sector_num, buf, nb_sectors);
416 qemu_co_mutex_unlock(&s->lock);
417 return ret;
420 static void dmg_close(BlockDriverState *bs)
422 BDRVDMGState *s = bs->opaque;
424 g_free(s->types);
425 g_free(s->offsets);
426 g_free(s->lengths);
427 g_free(s->sectors);
428 g_free(s->sectorcounts);
429 g_free(s->compressed_chunk);
430 g_free(s->uncompressed_chunk);
432 inflateEnd(&s->zstream);
435 static BlockDriver bdrv_dmg = {
436 .format_name = "dmg",
437 .instance_size = sizeof(BDRVDMGState),
438 .bdrv_probe = dmg_probe,
439 .bdrv_open = dmg_open,
440 .bdrv_read = dmg_co_read,
441 .bdrv_close = dmg_close,
444 static void bdrv_dmg_init(void)
446 bdrv_register(&bdrv_dmg);
449 block_init(bdrv_dmg_init);