2 * Block driver for the VMDK format
4 * Copyright (c) 2004 Fabrice Bellard
5 * Copyright (c) 2005 Filip Navara
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu-common.h"
27 #include "block_int.h"
30 #define VMDK3_MAGIC (('C' << 24) | ('O' << 16) | ('W' << 8) | 'D')
31 #define VMDK4_MAGIC (('K' << 24) | ('D' << 16) | ('M' << 8) | 'V')
36 uint32_t disk_sectors
;
38 uint32_t l1dir_offset
;
40 uint32_t file_sectors
;
43 uint32_t sectors_per_track
;
53 int32_t num_gtes_per_gte
;
59 } __attribute__((packed
)) VMDK4Header
;
61 #define L2_CACHE_SIZE 16
63 typedef struct BDRVVmdkState
{
65 int64_t l1_table_offset
;
66 int64_t l1_backup_table_offset
;
68 uint32_t *l1_backup_table
;
70 uint32_t l1_entry_sectors
;
74 uint32_t l2_cache_offsets
[L2_CACHE_SIZE
];
75 uint32_t l2_cache_counts
[L2_CACHE_SIZE
];
77 unsigned int cluster_sectors
;
81 typedef struct VmdkMetaData
{
83 unsigned int l1_index
;
84 unsigned int l2_index
;
85 unsigned int l2_offset
;
89 static int vmdk_probe(const uint8_t *buf
, int buf_size
, const char *filename
)
95 magic
= be32_to_cpu(*(uint32_t *)buf
);
96 if (magic
== VMDK3_MAGIC
||
105 #define SECTOR_SIZE 512
106 #define DESC_SIZE 20*SECTOR_SIZE // 20 sectors of 512 bytes each
107 #define HEADER_SIZE 512 // first sector of 512 bytes
109 static uint32_t vmdk_read_cid(BlockDriverState
*bs
, int parent
)
111 char desc
[DESC_SIZE
];
113 const char *p_name
, *cid_str
;
116 /* the descriptor offset = 0x200 */
117 if (bdrv_pread(bs
->file
, 0x200, desc
, DESC_SIZE
) != DESC_SIZE
)
121 cid_str
= "parentCID";
122 cid_str_size
= sizeof("parentCID");
125 cid_str_size
= sizeof("CID");
128 if ((p_name
= strstr(desc
,cid_str
)) != NULL
) {
129 p_name
+= cid_str_size
;
130 sscanf(p_name
,"%x",&cid
);
136 static int vmdk_write_cid(BlockDriverState
*bs
, uint32_t cid
)
138 char desc
[DESC_SIZE
], tmp_desc
[DESC_SIZE
];
139 char *p_name
, *tmp_str
;
141 /* the descriptor offset = 0x200 */
142 if (bdrv_pread(bs
->file
, 0x200, desc
, DESC_SIZE
) != DESC_SIZE
)
145 tmp_str
= strstr(desc
,"parentCID");
146 pstrcpy(tmp_desc
, sizeof(tmp_desc
), tmp_str
);
147 if ((p_name
= strstr(desc
,"CID")) != NULL
) {
148 p_name
+= sizeof("CID");
149 snprintf(p_name
, sizeof(desc
) - (p_name
- desc
), "%x\n", cid
);
150 pstrcat(desc
, sizeof(desc
), tmp_desc
);
153 if (bdrv_pwrite_sync(bs
->file
, 0x200, desc
, DESC_SIZE
) < 0)
158 static int vmdk_is_cid_valid(BlockDriverState
*bs
)
161 BDRVVmdkState
*s
= bs
->opaque
;
162 BlockDriverState
*p_bs
= bs
->backing_hd
;
166 cur_pcid
= vmdk_read_cid(p_bs
,0);
167 if (s
->parent_cid
!= cur_pcid
)
176 static int vmdk_snapshot_create(const char *filename
, const char *backing_file
)
181 char *p_name
, *gd_buf
, *rgd_buf
;
182 const char *real_filename
, *temp_str
;
184 uint32_t gde_entries
, gd_size
;
185 int64_t gd_offset
, rgd_offset
, capacity
, gt_size
;
186 char p_desc
[DESC_SIZE
], s_desc
[DESC_SIZE
], hdr
[HEADER_SIZE
];
187 static const char desc_template
[] =
188 "# Disk DescriptorFile\n"
192 "createType=\"monolithicSparse\"\n"
193 "parentFileNameHint=\"%s\"\n"
195 "# Extent description\n"
196 "RW %u SPARSE \"%s\"\n"
198 "# The Disk Data Base \n"
202 snp_fd
= open(filename
, O_RDWR
| O_CREAT
| O_TRUNC
| O_BINARY
| O_LARGEFILE
, 0644);
205 p_fd
= open(backing_file
, O_RDONLY
| O_BINARY
| O_LARGEFILE
);
211 /* read the header */
212 if (lseek(p_fd
, 0x0, SEEK_SET
) == -1) {
216 if (read(p_fd
, hdr
, HEADER_SIZE
) != HEADER_SIZE
) {
221 /* write the header */
222 if (lseek(snp_fd
, 0x0, SEEK_SET
) == -1) {
226 if (write(snp_fd
, hdr
, HEADER_SIZE
) == -1) {
231 memset(&header
, 0, sizeof(header
));
232 memcpy(&header
,&hdr
[4], sizeof(header
)); // skip the VMDK4_MAGIC
234 if (ftruncate(snp_fd
, header
.grain_offset
<< 9)) {
238 /* the descriptor offset = 0x200 */
239 if (lseek(p_fd
, 0x200, SEEK_SET
) == -1) {
243 if (read(p_fd
, p_desc
, DESC_SIZE
) != DESC_SIZE
) {
248 if ((p_name
= strstr(p_desc
,"CID")) != NULL
) {
249 p_name
+= sizeof("CID");
250 sscanf(p_name
,"%x",&p_cid
);
253 real_filename
= filename
;
254 if ((temp_str
= strrchr(real_filename
, '\\')) != NULL
)
255 real_filename
= temp_str
+ 1;
256 if ((temp_str
= strrchr(real_filename
, '/')) != NULL
)
257 real_filename
= temp_str
+ 1;
258 if ((temp_str
= strrchr(real_filename
, ':')) != NULL
)
259 real_filename
= temp_str
+ 1;
261 snprintf(s_desc
, sizeof(s_desc
), desc_template
, p_cid
, p_cid
, backing_file
,
262 (uint32_t)header
.capacity
, real_filename
);
264 /* write the descriptor */
265 if (lseek(snp_fd
, 0x200, SEEK_SET
) == -1) {
269 if (write(snp_fd
, s_desc
, strlen(s_desc
)) == -1) {
274 gd_offset
= header
.gd_offset
* SECTOR_SIZE
; // offset of GD table
275 rgd_offset
= header
.rgd_offset
* SECTOR_SIZE
; // offset of RGD table
276 capacity
= header
.capacity
* SECTOR_SIZE
; // Extent size
278 * Each GDE span 32M disk, means:
279 * 512 GTE per GT, each GTE points to grain
281 gt_size
= (int64_t)header
.num_gtes_per_gte
* header
.granularity
* SECTOR_SIZE
;
286 gde_entries
= (uint32_t)(capacity
/ gt_size
); // number of gde/rgde
287 gd_size
= gde_entries
* sizeof(uint32_t);
290 rgd_buf
= qemu_malloc(gd_size
);
291 if (lseek(p_fd
, rgd_offset
, SEEK_SET
) == -1) {
295 if (read(p_fd
, rgd_buf
, gd_size
) != gd_size
) {
299 if (lseek(snp_fd
, rgd_offset
, SEEK_SET
) == -1) {
303 if (write(snp_fd
, rgd_buf
, gd_size
) == -1) {
309 gd_buf
= qemu_malloc(gd_size
);
310 if (lseek(p_fd
, gd_offset
, SEEK_SET
) == -1) {
314 if (read(p_fd
, gd_buf
, gd_size
) != gd_size
) {
318 if (lseek(snp_fd
, gd_offset
, SEEK_SET
) == -1) {
322 if (write(snp_fd
, gd_buf
, gd_size
) == -1) {
338 static int vmdk_parent_open(BlockDriverState
*bs
)
341 char desc
[DESC_SIZE
];
343 /* the descriptor offset = 0x200 */
344 if (bdrv_pread(bs
->file
, 0x200, desc
, DESC_SIZE
) != DESC_SIZE
)
347 if ((p_name
= strstr(desc
,"parentFileNameHint")) != NULL
) {
350 p_name
+= sizeof("parentFileNameHint") + 1;
351 if ((end_name
= strchr(p_name
,'\"')) == NULL
)
353 if ((end_name
- p_name
) > sizeof (bs
->backing_file
) - 1)
356 pstrcpy(bs
->backing_file
, end_name
- p_name
+ 1, p_name
);
362 static int vmdk_open(BlockDriverState
*bs
, int flags
)
364 BDRVVmdkState
*s
= bs
->opaque
;
368 if (bdrv_pread(bs
->file
, 0, &magic
, sizeof(magic
)) != sizeof(magic
))
371 magic
= be32_to_cpu(magic
);
372 if (magic
== VMDK3_MAGIC
) {
375 if (bdrv_pread(bs
->file
, sizeof(magic
), &header
, sizeof(header
)) != sizeof(header
))
377 s
->cluster_sectors
= le32_to_cpu(header
.granularity
);
380 bs
->total_sectors
= le32_to_cpu(header
.disk_sectors
);
381 s
->l1_table_offset
= le32_to_cpu(header
.l1dir_offset
) << 9;
382 s
->l1_backup_table_offset
= 0;
383 s
->l1_entry_sectors
= s
->l2_size
* s
->cluster_sectors
;
384 } else if (magic
== VMDK4_MAGIC
) {
387 if (bdrv_pread(bs
->file
, sizeof(magic
), &header
, sizeof(header
)) != sizeof(header
))
389 bs
->total_sectors
= le64_to_cpu(header
.capacity
);
390 s
->cluster_sectors
= le64_to_cpu(header
.granularity
);
391 s
->l2_size
= le32_to_cpu(header
.num_gtes_per_gte
);
392 s
->l1_entry_sectors
= s
->l2_size
* s
->cluster_sectors
;
393 if (s
->l1_entry_sectors
<= 0)
395 s
->l1_size
= (bs
->total_sectors
+ s
->l1_entry_sectors
- 1)
396 / s
->l1_entry_sectors
;
397 s
->l1_table_offset
= le64_to_cpu(header
.rgd_offset
) << 9;
398 s
->l1_backup_table_offset
= le64_to_cpu(header
.gd_offset
) << 9;
400 // try to open parent images, if exist
401 if (vmdk_parent_open(bs
) != 0)
403 // write the CID once after the image creation
404 s
->parent_cid
= vmdk_read_cid(bs
,1);
409 /* read the L1 table */
410 l1_size
= s
->l1_size
* sizeof(uint32_t);
411 s
->l1_table
= qemu_malloc(l1_size
);
412 if (bdrv_pread(bs
->file
, s
->l1_table_offset
, s
->l1_table
, l1_size
) != l1_size
)
414 for(i
= 0; i
< s
->l1_size
; i
++) {
415 le32_to_cpus(&s
->l1_table
[i
]);
418 if (s
->l1_backup_table_offset
) {
419 s
->l1_backup_table
= qemu_malloc(l1_size
);
420 if (bdrv_pread(bs
->file
, s
->l1_backup_table_offset
, s
->l1_backup_table
, l1_size
) != l1_size
)
422 for(i
= 0; i
< s
->l1_size
; i
++) {
423 le32_to_cpus(&s
->l1_backup_table
[i
]);
427 s
->l2_cache
= qemu_malloc(s
->l2_size
* L2_CACHE_SIZE
* sizeof(uint32_t));
430 qemu_free(s
->l1_backup_table
);
431 qemu_free(s
->l1_table
);
432 qemu_free(s
->l2_cache
);
436 static uint64_t get_cluster_offset(BlockDriverState
*bs
, VmdkMetaData
*m_data
,
437 uint64_t offset
, int allocate
);
439 static int get_whole_cluster(BlockDriverState
*bs
, uint64_t cluster_offset
,
440 uint64_t offset
, int allocate
)
442 BDRVVmdkState
*s
= bs
->opaque
;
443 uint8_t whole_grain
[s
->cluster_sectors
*512]; // 128 sectors * 512 bytes each = grain size 64KB
445 // we will be here if it's first write on non-exist grain(cluster).
446 // try to read from parent image, if exist
447 if (bs
->backing_hd
) {
450 if (!vmdk_is_cid_valid(bs
))
453 ret
= bdrv_read(bs
->backing_hd
, offset
>> 9, whole_grain
,
459 //Write grain only into the active image
460 ret
= bdrv_write(bs
->file
, cluster_offset
, whole_grain
,
469 static int vmdk_L2update(BlockDriverState
*bs
, VmdkMetaData
*m_data
)
471 BDRVVmdkState
*s
= bs
->opaque
;
473 /* update L2 table */
474 if (bdrv_pwrite_sync(bs
->file
, ((int64_t)m_data
->l2_offset
* 512) + (m_data
->l2_index
* sizeof(m_data
->offset
)),
475 &(m_data
->offset
), sizeof(m_data
->offset
)) < 0)
477 /* update backup L2 table */
478 if (s
->l1_backup_table_offset
!= 0) {
479 m_data
->l2_offset
= s
->l1_backup_table
[m_data
->l1_index
];
480 if (bdrv_pwrite_sync(bs
->file
, ((int64_t)m_data
->l2_offset
* 512) + (m_data
->l2_index
* sizeof(m_data
->offset
)),
481 &(m_data
->offset
), sizeof(m_data
->offset
)) < 0)
488 static uint64_t get_cluster_offset(BlockDriverState
*bs
, VmdkMetaData
*m_data
,
489 uint64_t offset
, int allocate
)
491 BDRVVmdkState
*s
= bs
->opaque
;
492 unsigned int l1_index
, l2_offset
, l2_index
;
494 uint32_t min_count
, *l2_table
, tmp
= 0;
495 uint64_t cluster_offset
;
500 l1_index
= (offset
>> 9) / s
->l1_entry_sectors
;
501 if (l1_index
>= s
->l1_size
)
503 l2_offset
= s
->l1_table
[l1_index
];
506 for(i
= 0; i
< L2_CACHE_SIZE
; i
++) {
507 if (l2_offset
== s
->l2_cache_offsets
[i
]) {
508 /* increment the hit count */
509 if (++s
->l2_cache_counts
[i
] == 0xffffffff) {
510 for(j
= 0; j
< L2_CACHE_SIZE
; j
++) {
511 s
->l2_cache_counts
[j
] >>= 1;
514 l2_table
= s
->l2_cache
+ (i
* s
->l2_size
);
518 /* not found: load a new entry in the least used one */
520 min_count
= 0xffffffff;
521 for(i
= 0; i
< L2_CACHE_SIZE
; i
++) {
522 if (s
->l2_cache_counts
[i
] < min_count
) {
523 min_count
= s
->l2_cache_counts
[i
];
527 l2_table
= s
->l2_cache
+ (min_index
* s
->l2_size
);
528 if (bdrv_pread(bs
->file
, (int64_t)l2_offset
* 512, l2_table
, s
->l2_size
* sizeof(uint32_t)) !=
529 s
->l2_size
* sizeof(uint32_t))
532 s
->l2_cache_offsets
[min_index
] = l2_offset
;
533 s
->l2_cache_counts
[min_index
] = 1;
535 l2_index
= ((offset
>> 9) / s
->cluster_sectors
) % s
->l2_size
;
536 cluster_offset
= le32_to_cpu(l2_table
[l2_index
]);
538 if (!cluster_offset
) {
542 // Avoid the L2 tables update for the images that have snapshots.
543 cluster_offset
= bdrv_getlength(bs
->file
);
544 bdrv_truncate(bs
->file
, cluster_offset
+ (s
->cluster_sectors
<< 9));
546 cluster_offset
>>= 9;
547 tmp
= cpu_to_le32(cluster_offset
);
548 l2_table
[l2_index
] = tmp
;
550 /* First of all we write grain itself, to avoid race condition
551 * that may to corrupt the image.
552 * This problem may occur because of insufficient space on host disk
553 * or inappropriate VM shutdown.
555 if (get_whole_cluster(bs
, cluster_offset
, offset
, allocate
) == -1)
559 m_data
->offset
= tmp
;
560 m_data
->l1_index
= l1_index
;
561 m_data
->l2_index
= l2_index
;
562 m_data
->l2_offset
= l2_offset
;
566 cluster_offset
<<= 9;
567 return cluster_offset
;
570 static int vmdk_is_allocated(BlockDriverState
*bs
, int64_t sector_num
,
571 int nb_sectors
, int *pnum
)
573 BDRVVmdkState
*s
= bs
->opaque
;
574 int index_in_cluster
, n
;
575 uint64_t cluster_offset
;
577 cluster_offset
= get_cluster_offset(bs
, NULL
, sector_num
<< 9, 0);
578 index_in_cluster
= sector_num
% s
->cluster_sectors
;
579 n
= s
->cluster_sectors
- index_in_cluster
;
583 return (cluster_offset
!= 0);
586 static int vmdk_read(BlockDriverState
*bs
, int64_t sector_num
,
587 uint8_t *buf
, int nb_sectors
)
589 BDRVVmdkState
*s
= bs
->opaque
;
590 int index_in_cluster
, n
, ret
;
591 uint64_t cluster_offset
;
593 while (nb_sectors
> 0) {
594 cluster_offset
= get_cluster_offset(bs
, NULL
, sector_num
<< 9, 0);
595 index_in_cluster
= sector_num
% s
->cluster_sectors
;
596 n
= s
->cluster_sectors
- index_in_cluster
;
599 if (!cluster_offset
) {
600 // try to read from parent image, if exist
601 if (bs
->backing_hd
) {
602 if (!vmdk_is_cid_valid(bs
))
604 ret
= bdrv_read(bs
->backing_hd
, sector_num
, buf
, n
);
608 memset(buf
, 0, 512 * n
);
611 if(bdrv_pread(bs
->file
, cluster_offset
+ index_in_cluster
* 512, buf
, n
* 512) != n
* 512)
621 static int vmdk_write(BlockDriverState
*bs
, int64_t sector_num
,
622 const uint8_t *buf
, int nb_sectors
)
624 BDRVVmdkState
*s
= bs
->opaque
;
626 int index_in_cluster
, n
;
627 uint64_t cluster_offset
;
628 static int cid_update
= 0;
630 if (sector_num
> bs
->total_sectors
) {
632 "(VMDK) Wrong offset: sector_num=0x%" PRIx64
633 " total_sectors=0x%" PRIx64
"\n",
634 sector_num
, bs
->total_sectors
);
638 while (nb_sectors
> 0) {
639 index_in_cluster
= sector_num
& (s
->cluster_sectors
- 1);
640 n
= s
->cluster_sectors
- index_in_cluster
;
643 cluster_offset
= get_cluster_offset(bs
, &m_data
, sector_num
<< 9, 1);
647 if (bdrv_pwrite(bs
->file
, cluster_offset
+ index_in_cluster
* 512, buf
, n
* 512) != n
* 512)
650 /* update L2 tables */
651 if (vmdk_L2update(bs
, &m_data
) == -1)
658 // update CID on the first write every time the virtual disk is opened
660 vmdk_write_cid(bs
, time(NULL
));
667 static int vmdk_create(const char *filename
, QEMUOptionParameter
*options
)
671 uint32_t tmp
, magic
, grains
, gd_size
, gt_size
, gt_count
;
672 static const char desc_template
[] =
673 "# Disk DescriptorFile\n"
676 "parentCID=ffffffff\n"
677 "createType=\"monolithicSparse\"\n"
679 "# Extent description\n"
680 "RW %" PRId64
" SPARSE \"%s\"\n"
682 "# The Disk Data Base \n"
685 "ddb.virtualHWVersion = \"%d\"\n"
686 "ddb.geometry.cylinders = \"%" PRId64
"\"\n"
687 "ddb.geometry.heads = \"16\"\n"
688 "ddb.geometry.sectors = \"63\"\n"
689 "ddb.adapterType = \"ide\"\n";
691 const char *real_filename
, *temp_str
;
692 int64_t total_size
= 0;
693 const char *backing_file
= NULL
;
698 while (options
&& options
->name
) {
699 if (!strcmp(options
->name
, BLOCK_OPT_SIZE
)) {
700 total_size
= options
->value
.n
/ 512;
701 } else if (!strcmp(options
->name
, BLOCK_OPT_BACKING_FILE
)) {
702 backing_file
= options
->value
.s
;
703 } else if (!strcmp(options
->name
, BLOCK_OPT_COMPAT6
)) {
704 flags
|= options
->value
.n
? BLOCK_FLAG_COMPAT6
: 0;
709 /* XXX: add support for backing file */
711 return vmdk_snapshot_create(filename
, backing_file
);
714 fd
= open(filename
, O_WRONLY
| O_CREAT
| O_TRUNC
| O_BINARY
| O_LARGEFILE
,
718 magic
= cpu_to_be32(VMDK4_MAGIC
);
719 memset(&header
, 0, sizeof(header
));
720 header
.version
= cpu_to_le32(1);
721 header
.flags
= cpu_to_le32(3); /* ?? */
722 header
.capacity
= cpu_to_le64(total_size
);
723 header
.granularity
= cpu_to_le64(128);
724 header
.num_gtes_per_gte
= cpu_to_le32(512);
726 grains
= (total_size
+ header
.granularity
- 1) / header
.granularity
;
727 gt_size
= ((header
.num_gtes_per_gte
* sizeof(uint32_t)) + 511) >> 9;
728 gt_count
= (grains
+ header
.num_gtes_per_gte
- 1) / header
.num_gtes_per_gte
;
729 gd_size
= (gt_count
* sizeof(uint32_t) + 511) >> 9;
731 header
.desc_offset
= 1;
732 header
.desc_size
= 20;
733 header
.rgd_offset
= header
.desc_offset
+ header
.desc_size
;
734 header
.gd_offset
= header
.rgd_offset
+ gd_size
+ (gt_size
* gt_count
);
735 header
.grain_offset
=
736 ((header
.gd_offset
+ gd_size
+ (gt_size
* gt_count
) +
737 header
.granularity
- 1) / header
.granularity
) *
740 header
.desc_offset
= cpu_to_le64(header
.desc_offset
);
741 header
.desc_size
= cpu_to_le64(header
.desc_size
);
742 header
.rgd_offset
= cpu_to_le64(header
.rgd_offset
);
743 header
.gd_offset
= cpu_to_le64(header
.gd_offset
);
744 header
.grain_offset
= cpu_to_le64(header
.grain_offset
);
746 header
.check_bytes
[0] = 0xa;
747 header
.check_bytes
[1] = 0x20;
748 header
.check_bytes
[2] = 0xd;
749 header
.check_bytes
[3] = 0xa;
751 /* write all the data */
752 ret
= qemu_write_full(fd
, &magic
, sizeof(magic
));
753 if (ret
!= sizeof(magic
)) {
757 ret
= qemu_write_full(fd
, &header
, sizeof(header
));
758 if (ret
!= sizeof(header
)) {
763 ret
= ftruncate(fd
, header
.grain_offset
<< 9);
769 /* write grain directory */
770 lseek(fd
, le64_to_cpu(header
.rgd_offset
) << 9, SEEK_SET
);
771 for (i
= 0, tmp
= header
.rgd_offset
+ gd_size
;
772 i
< gt_count
; i
++, tmp
+= gt_size
) {
773 ret
= qemu_write_full(fd
, &tmp
, sizeof(tmp
));
774 if (ret
!= sizeof(tmp
)) {
780 /* write backup grain directory */
781 lseek(fd
, le64_to_cpu(header
.gd_offset
) << 9, SEEK_SET
);
782 for (i
= 0, tmp
= header
.gd_offset
+ gd_size
;
783 i
< gt_count
; i
++, tmp
+= gt_size
) {
784 ret
= qemu_write_full(fd
, &tmp
, sizeof(tmp
));
785 if (ret
!= sizeof(tmp
)) {
791 /* compose the descriptor */
792 real_filename
= filename
;
793 if ((temp_str
= strrchr(real_filename
, '\\')) != NULL
)
794 real_filename
= temp_str
+ 1;
795 if ((temp_str
= strrchr(real_filename
, '/')) != NULL
)
796 real_filename
= temp_str
+ 1;
797 if ((temp_str
= strrchr(real_filename
, ':')) != NULL
)
798 real_filename
= temp_str
+ 1;
799 snprintf(desc
, sizeof(desc
), desc_template
, (unsigned int)time(NULL
),
800 total_size
, real_filename
,
801 (flags
& BLOCK_FLAG_COMPAT6
? 6 : 4),
802 total_size
/ (int64_t)(63 * 16));
804 /* write the descriptor */
805 lseek(fd
, le64_to_cpu(header
.desc_offset
) << 9, SEEK_SET
);
806 ret
= qemu_write_full(fd
, desc
, strlen(desc
));
807 if (ret
!= strlen(desc
)) {
818 static void vmdk_close(BlockDriverState
*bs
)
820 BDRVVmdkState
*s
= bs
->opaque
;
822 qemu_free(s
->l1_table
);
823 qemu_free(s
->l2_cache
);
826 static void vmdk_flush(BlockDriverState
*bs
)
828 bdrv_flush(bs
->file
);
832 static QEMUOptionParameter vmdk_create_options
[] = {
834 .name
= BLOCK_OPT_SIZE
,
836 .help
= "Virtual disk size"
839 .name
= BLOCK_OPT_BACKING_FILE
,
841 .help
= "File name of a base image"
844 .name
= BLOCK_OPT_COMPAT6
,
846 .help
= "VMDK version 6 image"
851 static BlockDriver bdrv_vmdk
= {
852 .format_name
= "vmdk",
853 .instance_size
= sizeof(BDRVVmdkState
),
854 .bdrv_probe
= vmdk_probe
,
855 .bdrv_open
= vmdk_open
,
856 .bdrv_read
= vmdk_read
,
857 .bdrv_write
= vmdk_write
,
858 .bdrv_close
= vmdk_close
,
859 .bdrv_create
= vmdk_create
,
860 .bdrv_flush
= vmdk_flush
,
861 .bdrv_is_allocated
= vmdk_is_allocated
,
863 .create_options
= vmdk_create_options
,
866 static void bdrv_vmdk_init(void)
868 bdrv_register(&bdrv_vmdk
);
871 block_init(bdrv_vmdk_init
);