2 * QEMU Enhanced Disk Format
4 * Copyright IBM, Corp. 2010
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "qapi/error.h"
17 #include "qemu/timer.h"
18 #include "qemu/bswap.h"
21 #include "qapi/qmp/qerror.h"
22 #include "sysemu/block-backend.h"
24 static const AIOCBInfo qed_aiocb_info
= {
25 .aiocb_size
= sizeof(QEDAIOCB
),
28 static int bdrv_qed_probe(const uint8_t *buf
, int buf_size
,
31 const QEDHeader
*header
= (const QEDHeader
*)buf
;
33 if (buf_size
< sizeof(*header
)) {
36 if (le32_to_cpu(header
->magic
) != QED_MAGIC
) {
43 * Check whether an image format is raw
45 * @fmt: Backing file format, may be NULL
47 static bool qed_fmt_is_raw(const char *fmt
)
49 return fmt
&& strcmp(fmt
, "raw") == 0;
52 static void qed_header_le_to_cpu(const QEDHeader
*le
, QEDHeader
*cpu
)
54 cpu
->magic
= le32_to_cpu(le
->magic
);
55 cpu
->cluster_size
= le32_to_cpu(le
->cluster_size
);
56 cpu
->table_size
= le32_to_cpu(le
->table_size
);
57 cpu
->header_size
= le32_to_cpu(le
->header_size
);
58 cpu
->features
= le64_to_cpu(le
->features
);
59 cpu
->compat_features
= le64_to_cpu(le
->compat_features
);
60 cpu
->autoclear_features
= le64_to_cpu(le
->autoclear_features
);
61 cpu
->l1_table_offset
= le64_to_cpu(le
->l1_table_offset
);
62 cpu
->image_size
= le64_to_cpu(le
->image_size
);
63 cpu
->backing_filename_offset
= le32_to_cpu(le
->backing_filename_offset
);
64 cpu
->backing_filename_size
= le32_to_cpu(le
->backing_filename_size
);
67 static void qed_header_cpu_to_le(const QEDHeader
*cpu
, QEDHeader
*le
)
69 le
->magic
= cpu_to_le32(cpu
->magic
);
70 le
->cluster_size
= cpu_to_le32(cpu
->cluster_size
);
71 le
->table_size
= cpu_to_le32(cpu
->table_size
);
72 le
->header_size
= cpu_to_le32(cpu
->header_size
);
73 le
->features
= cpu_to_le64(cpu
->features
);
74 le
->compat_features
= cpu_to_le64(cpu
->compat_features
);
75 le
->autoclear_features
= cpu_to_le64(cpu
->autoclear_features
);
76 le
->l1_table_offset
= cpu_to_le64(cpu
->l1_table_offset
);
77 le
->image_size
= cpu_to_le64(cpu
->image_size
);
78 le
->backing_filename_offset
= cpu_to_le32(cpu
->backing_filename_offset
);
79 le
->backing_filename_size
= cpu_to_le32(cpu
->backing_filename_size
);
82 int qed_write_header_sync(BDRVQEDState
*s
)
87 qed_header_cpu_to_le(&s
->header
, &le
);
88 ret
= bdrv_pwrite(s
->bs
->file
, 0, &le
, sizeof(le
));
89 if (ret
!= sizeof(le
)) {
96 * Update header in-place (does not rewrite backing filename or other strings)
98 * This function only updates known header fields in-place and does not affect
99 * extra data after the QED header.
101 static void qed_write_header(BDRVQEDState
*s
, BlockCompletionFunc cb
,
104 /* We must write full sectors for O_DIRECT but cannot necessarily generate
105 * the data following the header if an unrecognized compat feature is
106 * active. Therefore, first read the sectors containing the header, update
107 * them, and write back.
110 int nsectors
= DIV_ROUND_UP(sizeof(QEDHeader
), BDRV_SECTOR_SIZE
);
111 size_t len
= nsectors
* BDRV_SECTOR_SIZE
;
117 buf
= qemu_blockalign(s
->bs
, len
);
118 iov
= (struct iovec
) {
122 qemu_iovec_init_external(&qiov
, &iov
, 1);
124 ret
= bdrv_preadv(s
->bs
->file
, 0, &qiov
);
130 qed_header_cpu_to_le(&s
->header
, (QEDHeader
*) buf
);
132 ret
= bdrv_pwritev(s
->bs
->file
, 0, &qiov
);
143 static uint64_t qed_max_image_size(uint32_t cluster_size
, uint32_t table_size
)
145 uint64_t table_entries
;
148 table_entries
= (table_size
* cluster_size
) / sizeof(uint64_t);
149 l2_size
= table_entries
* cluster_size
;
151 return l2_size
* table_entries
;
154 static bool qed_is_cluster_size_valid(uint32_t cluster_size
)
156 if (cluster_size
< QED_MIN_CLUSTER_SIZE
||
157 cluster_size
> QED_MAX_CLUSTER_SIZE
) {
160 if (cluster_size
& (cluster_size
- 1)) {
161 return false; /* not power of 2 */
166 static bool qed_is_table_size_valid(uint32_t table_size
)
168 if (table_size
< QED_MIN_TABLE_SIZE
||
169 table_size
> QED_MAX_TABLE_SIZE
) {
172 if (table_size
& (table_size
- 1)) {
173 return false; /* not power of 2 */
178 static bool qed_is_image_size_valid(uint64_t image_size
, uint32_t cluster_size
,
181 if (image_size
% BDRV_SECTOR_SIZE
!= 0) {
182 return false; /* not multiple of sector size */
184 if (image_size
> qed_max_image_size(cluster_size
, table_size
)) {
185 return false; /* image is too large */
191 * Read a string of known length from the image file
194 * @offset: File offset to start of string, in bytes
195 * @n: String length in bytes
196 * @buf: Destination buffer
197 * @buflen: Destination buffer length in bytes
198 * @ret: 0 on success, -errno on failure
200 * The string is NUL-terminated.
202 static int qed_read_string(BdrvChild
*file
, uint64_t offset
, size_t n
,
203 char *buf
, size_t buflen
)
209 ret
= bdrv_pread(file
, offset
, buf
, n
);
218 * Allocate new clusters
221 * @n: Number of contiguous clusters to allocate
222 * @ret: Offset of first allocated cluster
224 * This function only produces the offset where the new clusters should be
225 * written. It updates BDRVQEDState but does not make any changes to the image
228 static uint64_t qed_alloc_clusters(BDRVQEDState
*s
, unsigned int n
)
230 uint64_t offset
= s
->file_size
;
231 s
->file_size
+= n
* s
->header
.cluster_size
;
235 QEDTable
*qed_alloc_table(BDRVQEDState
*s
)
237 /* Honor O_DIRECT memory alignment requirements */
238 return qemu_blockalign(s
->bs
,
239 s
->header
.cluster_size
* s
->header
.table_size
);
243 * Allocate a new zeroed L2 table
245 static CachedL2Table
*qed_new_l2_table(BDRVQEDState
*s
)
247 CachedL2Table
*l2_table
= qed_alloc_l2_cache_entry(&s
->l2_cache
);
249 l2_table
->table
= qed_alloc_table(s
);
250 l2_table
->offset
= qed_alloc_clusters(s
, s
->header
.table_size
);
252 memset(l2_table
->table
->offsets
, 0,
253 s
->header
.cluster_size
* s
->header
.table_size
);
257 static void qed_aio_next_io(QEDAIOCB
*acb
, int ret
);
259 static void qed_aio_start_io(QEDAIOCB
*acb
)
261 qed_aio_next_io(acb
, 0);
264 static void qed_aio_next_io_cb(void *opaque
, int ret
)
266 QEDAIOCB
*acb
= opaque
;
268 qed_aio_next_io(acb
, ret
);
271 static void qed_plug_allocating_write_reqs(BDRVQEDState
*s
)
273 assert(!s
->allocating_write_reqs_plugged
);
275 s
->allocating_write_reqs_plugged
= true;
278 static void qed_unplug_allocating_write_reqs(BDRVQEDState
*s
)
282 assert(s
->allocating_write_reqs_plugged
);
284 s
->allocating_write_reqs_plugged
= false;
286 acb
= QSIMPLEQ_FIRST(&s
->allocating_write_reqs
);
288 qed_aio_start_io(acb
);
292 static void qed_finish_clear_need_check(void *opaque
, int ret
)
297 static void qed_flush_after_clear_need_check(void *opaque
, int ret
)
299 BDRVQEDState
*s
= opaque
;
301 bdrv_aio_flush(s
->bs
, qed_finish_clear_need_check
, s
);
303 /* No need to wait until flush completes */
304 qed_unplug_allocating_write_reqs(s
);
307 static void qed_clear_need_check(void *opaque
, int ret
)
309 BDRVQEDState
*s
= opaque
;
312 qed_unplug_allocating_write_reqs(s
);
316 s
->header
.features
&= ~QED_F_NEED_CHECK
;
317 qed_write_header(s
, qed_flush_after_clear_need_check
, s
);
320 static void qed_need_check_timer_cb(void *opaque
)
322 BDRVQEDState
*s
= opaque
;
324 /* The timer should only fire when allocating writes have drained */
325 assert(!QSIMPLEQ_FIRST(&s
->allocating_write_reqs
));
327 trace_qed_need_check_timer_cb(s
);
330 qed_plug_allocating_write_reqs(s
);
332 /* Ensure writes are on disk before clearing flag */
333 bdrv_aio_flush(s
->bs
->file
->bs
, qed_clear_need_check
, s
);
337 void qed_acquire(BDRVQEDState
*s
)
339 aio_context_acquire(bdrv_get_aio_context(s
->bs
));
342 void qed_release(BDRVQEDState
*s
)
344 aio_context_release(bdrv_get_aio_context(s
->bs
));
347 static void qed_start_need_check_timer(BDRVQEDState
*s
)
349 trace_qed_start_need_check_timer(s
);
351 /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for
354 timer_mod(s
->need_check_timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
355 NANOSECONDS_PER_SECOND
* QED_NEED_CHECK_TIMEOUT
);
358 /* It's okay to call this multiple times or when no timer is started */
359 static void qed_cancel_need_check_timer(BDRVQEDState
*s
)
361 trace_qed_cancel_need_check_timer(s
);
362 timer_del(s
->need_check_timer
);
365 static void bdrv_qed_detach_aio_context(BlockDriverState
*bs
)
367 BDRVQEDState
*s
= bs
->opaque
;
369 qed_cancel_need_check_timer(s
);
370 timer_free(s
->need_check_timer
);
373 static void bdrv_qed_attach_aio_context(BlockDriverState
*bs
,
374 AioContext
*new_context
)
376 BDRVQEDState
*s
= bs
->opaque
;
378 s
->need_check_timer
= aio_timer_new(new_context
,
379 QEMU_CLOCK_VIRTUAL
, SCALE_NS
,
380 qed_need_check_timer_cb
, s
);
381 if (s
->header
.features
& QED_F_NEED_CHECK
) {
382 qed_start_need_check_timer(s
);
386 static void bdrv_qed_drain(BlockDriverState
*bs
)
388 BDRVQEDState
*s
= bs
->opaque
;
390 /* Fire the timer immediately in order to start doing I/O as soon as the
393 if (s
->need_check_timer
&& timer_pending(s
->need_check_timer
)) {
394 qed_cancel_need_check_timer(s
);
395 qed_need_check_timer_cb(s
);
399 static int bdrv_qed_do_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
402 BDRVQEDState
*s
= bs
->opaque
;
408 QSIMPLEQ_INIT(&s
->allocating_write_reqs
);
410 ret
= bdrv_pread(bs
->file
, 0, &le_header
, sizeof(le_header
));
414 qed_header_le_to_cpu(&le_header
, &s
->header
);
416 if (s
->header
.magic
!= QED_MAGIC
) {
417 error_setg(errp
, "Image not in QED format");
420 if (s
->header
.features
& ~QED_FEATURE_MASK
) {
421 /* image uses unsupported feature bits */
422 error_setg(errp
, "Unsupported QED features: %" PRIx64
,
423 s
->header
.features
& ~QED_FEATURE_MASK
);
426 if (!qed_is_cluster_size_valid(s
->header
.cluster_size
)) {
430 /* Round down file size to the last cluster */
431 file_size
= bdrv_getlength(bs
->file
->bs
);
435 s
->file_size
= qed_start_of_cluster(s
, file_size
);
437 if (!qed_is_table_size_valid(s
->header
.table_size
)) {
440 if (!qed_is_image_size_valid(s
->header
.image_size
,
441 s
->header
.cluster_size
,
442 s
->header
.table_size
)) {
445 if (!qed_check_table_offset(s
, s
->header
.l1_table_offset
)) {
449 s
->table_nelems
= (s
->header
.cluster_size
* s
->header
.table_size
) /
451 s
->l2_shift
= ctz32(s
->header
.cluster_size
);
452 s
->l2_mask
= s
->table_nelems
- 1;
453 s
->l1_shift
= s
->l2_shift
+ ctz32(s
->table_nelems
);
455 /* Header size calculation must not overflow uint32_t */
456 if (s
->header
.header_size
> UINT32_MAX
/ s
->header
.cluster_size
) {
460 if ((s
->header
.features
& QED_F_BACKING_FILE
)) {
461 if ((uint64_t)s
->header
.backing_filename_offset
+
462 s
->header
.backing_filename_size
>
463 s
->header
.cluster_size
* s
->header
.header_size
) {
467 ret
= qed_read_string(bs
->file
, s
->header
.backing_filename_offset
,
468 s
->header
.backing_filename_size
, bs
->backing_file
,
469 sizeof(bs
->backing_file
));
474 if (s
->header
.features
& QED_F_BACKING_FORMAT_NO_PROBE
) {
475 pstrcpy(bs
->backing_format
, sizeof(bs
->backing_format
), "raw");
479 /* Reset unknown autoclear feature bits. This is a backwards
480 * compatibility mechanism that allows images to be opened by older
481 * programs, which "knock out" unknown feature bits. When an image is
482 * opened by a newer program again it can detect that the autoclear
483 * feature is no longer valid.
485 if ((s
->header
.autoclear_features
& ~QED_AUTOCLEAR_FEATURE_MASK
) != 0 &&
486 !bdrv_is_read_only(bs
->file
->bs
) && !(flags
& BDRV_O_INACTIVE
)) {
487 s
->header
.autoclear_features
&= QED_AUTOCLEAR_FEATURE_MASK
;
489 ret
= qed_write_header_sync(s
);
494 /* From here on only known autoclear feature bits are valid */
495 bdrv_flush(bs
->file
->bs
);
498 s
->l1_table
= qed_alloc_table(s
);
499 qed_init_l2_cache(&s
->l2_cache
);
501 ret
= qed_read_l1_table_sync(s
);
506 /* If image was not closed cleanly, check consistency */
507 if (!(flags
& BDRV_O_CHECK
) && (s
->header
.features
& QED_F_NEED_CHECK
)) {
508 /* Read-only images cannot be fixed. There is no risk of corruption
509 * since write operations are not possible. Therefore, allow
510 * potentially inconsistent images to be opened read-only. This can
511 * aid data recovery from an otherwise inconsistent image.
513 if (!bdrv_is_read_only(bs
->file
->bs
) &&
514 !(flags
& BDRV_O_INACTIVE
)) {
515 BdrvCheckResult result
= {0};
517 ret
= qed_check(s
, &result
, true);
524 bdrv_qed_attach_aio_context(bs
, bdrv_get_aio_context(bs
));
528 qed_free_l2_cache(&s
->l2_cache
);
529 qemu_vfree(s
->l1_table
);
534 static int bdrv_qed_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
537 bs
->file
= bdrv_open_child(NULL
, options
, "file", bs
, &child_file
,
543 return bdrv_qed_do_open(bs
, options
, flags
, errp
);
546 static void bdrv_qed_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
548 BDRVQEDState
*s
= bs
->opaque
;
550 bs
->bl
.pwrite_zeroes_alignment
= s
->header
.cluster_size
;
553 /* We have nothing to do for QED reopen, stubs just return
555 static int bdrv_qed_reopen_prepare(BDRVReopenState
*state
,
556 BlockReopenQueue
*queue
, Error
**errp
)
561 static void bdrv_qed_close(BlockDriverState
*bs
)
563 BDRVQEDState
*s
= bs
->opaque
;
565 bdrv_qed_detach_aio_context(bs
);
567 /* Ensure writes reach stable storage */
568 bdrv_flush(bs
->file
->bs
);
570 /* Clean shutdown, no check required on next open */
571 if (s
->header
.features
& QED_F_NEED_CHECK
) {
572 s
->header
.features
&= ~QED_F_NEED_CHECK
;
573 qed_write_header_sync(s
);
576 qed_free_l2_cache(&s
->l2_cache
);
577 qemu_vfree(s
->l1_table
);
580 static int qed_create(const char *filename
, uint32_t cluster_size
,
581 uint64_t image_size
, uint32_t table_size
,
582 const char *backing_file
, const char *backing_fmt
,
583 QemuOpts
*opts
, Error
**errp
)
587 .cluster_size
= cluster_size
,
588 .table_size
= table_size
,
591 .compat_features
= 0,
592 .l1_table_offset
= cluster_size
,
593 .image_size
= image_size
,
596 uint8_t *l1_table
= NULL
;
597 size_t l1_size
= header
.cluster_size
* header
.table_size
;
598 Error
*local_err
= NULL
;
602 ret
= bdrv_create_file(filename
, opts
, &local_err
);
604 error_propagate(errp
, local_err
);
608 blk
= blk_new_open(filename
, NULL
, NULL
,
609 BDRV_O_RDWR
| BDRV_O_RESIZE
| BDRV_O_PROTOCOL
,
612 error_propagate(errp
, local_err
);
616 blk_set_allow_write_beyond_eof(blk
, true);
618 /* File must start empty and grow, check truncate is supported */
619 ret
= blk_truncate(blk
, 0, errp
);
625 header
.features
|= QED_F_BACKING_FILE
;
626 header
.backing_filename_offset
= sizeof(le_header
);
627 header
.backing_filename_size
= strlen(backing_file
);
629 if (qed_fmt_is_raw(backing_fmt
)) {
630 header
.features
|= QED_F_BACKING_FORMAT_NO_PROBE
;
634 qed_header_cpu_to_le(&header
, &le_header
);
635 ret
= blk_pwrite(blk
, 0, &le_header
, sizeof(le_header
), 0);
639 ret
= blk_pwrite(blk
, sizeof(le_header
), backing_file
,
640 header
.backing_filename_size
, 0);
645 l1_table
= g_malloc0(l1_size
);
646 ret
= blk_pwrite(blk
, header
.l1_table_offset
, l1_table
, l1_size
, 0);
651 ret
= 0; /* success */
658 static int bdrv_qed_create(const char *filename
, QemuOpts
*opts
, Error
**errp
)
660 uint64_t image_size
= 0;
661 uint32_t cluster_size
= QED_DEFAULT_CLUSTER_SIZE
;
662 uint32_t table_size
= QED_DEFAULT_TABLE_SIZE
;
663 char *backing_file
= NULL
;
664 char *backing_fmt
= NULL
;
667 image_size
= ROUND_UP(qemu_opt_get_size_del(opts
, BLOCK_OPT_SIZE
, 0),
669 backing_file
= qemu_opt_get_del(opts
, BLOCK_OPT_BACKING_FILE
);
670 backing_fmt
= qemu_opt_get_del(opts
, BLOCK_OPT_BACKING_FMT
);
671 cluster_size
= qemu_opt_get_size_del(opts
,
672 BLOCK_OPT_CLUSTER_SIZE
,
673 QED_DEFAULT_CLUSTER_SIZE
);
674 table_size
= qemu_opt_get_size_del(opts
, BLOCK_OPT_TABLE_SIZE
,
675 QED_DEFAULT_TABLE_SIZE
);
677 if (!qed_is_cluster_size_valid(cluster_size
)) {
678 error_setg(errp
, "QED cluster size must be within range [%u, %u] "
680 QED_MIN_CLUSTER_SIZE
, QED_MAX_CLUSTER_SIZE
);
684 if (!qed_is_table_size_valid(table_size
)) {
685 error_setg(errp
, "QED table size must be within range [%u, %u] "
687 QED_MIN_TABLE_SIZE
, QED_MAX_TABLE_SIZE
);
691 if (!qed_is_image_size_valid(image_size
, cluster_size
, table_size
)) {
692 error_setg(errp
, "QED image size must be a non-zero multiple of "
693 "cluster size and less than %" PRIu64
" bytes",
694 qed_max_image_size(cluster_size
, table_size
));
699 ret
= qed_create(filename
, cluster_size
, image_size
, table_size
,
700 backing_file
, backing_fmt
, opts
, errp
);
703 g_free(backing_file
);
709 BlockDriverState
*bs
;
714 BlockDriverState
**file
;
717 static void qed_is_allocated_cb(void *opaque
, int ret
, uint64_t offset
, size_t len
)
719 QEDIsAllocatedCB
*cb
= opaque
;
720 BDRVQEDState
*s
= cb
->bs
->opaque
;
721 *cb
->pnum
= len
/ BDRV_SECTOR_SIZE
;
723 case QED_CLUSTER_FOUND
:
724 offset
|= qed_offset_into_cluster(s
, cb
->pos
);
725 cb
->status
= BDRV_BLOCK_DATA
| BDRV_BLOCK_OFFSET_VALID
| offset
;
726 *cb
->file
= cb
->bs
->file
->bs
;
728 case QED_CLUSTER_ZERO
:
729 cb
->status
= BDRV_BLOCK_ZERO
;
746 static int64_t coroutine_fn
bdrv_qed_co_get_block_status(BlockDriverState
*bs
,
748 int nb_sectors
, int *pnum
,
749 BlockDriverState
**file
)
751 BDRVQEDState
*s
= bs
->opaque
;
752 size_t len
= (size_t)nb_sectors
* BDRV_SECTOR_SIZE
;
753 QEDIsAllocatedCB cb
= {
755 .pos
= (uint64_t)sector_num
* BDRV_SECTOR_SIZE
,
756 .status
= BDRV_BLOCK_OFFSET_MASK
,
760 QEDRequest request
= { .l2_table
= NULL
};
764 ret
= qed_find_cluster(s
, &request
, cb
.pos
, &len
, &offset
);
765 qed_is_allocated_cb(&cb
, ret
, offset
, len
);
767 /* The callback was invoked immediately */
768 assert(cb
.status
!= BDRV_BLOCK_OFFSET_MASK
);
770 qed_unref_l2_cache_entry(request
.l2_table
);
775 static BDRVQEDState
*acb_to_s(QEDAIOCB
*acb
)
777 return acb
->common
.bs
->opaque
;
781 * Read from the backing file or zero-fill if no backing file
784 * @pos: Byte position in device
785 * @qiov: Destination I/O vector
786 * @backing_qiov: Possibly shortened copy of qiov, to be allocated here
787 * @cb: Completion function
788 * @opaque: User data for completion function
790 * This function reads qiov->size bytes starting at pos from the backing file.
791 * If there is no backing file then zeroes are read.
793 static int qed_read_backing_file(BDRVQEDState
*s
, uint64_t pos
,
795 QEMUIOVector
**backing_qiov
)
797 uint64_t backing_length
= 0;
801 /* If there is a backing file, get its length. Treat the absence of a
802 * backing file like a zero length backing file.
804 if (s
->bs
->backing
) {
805 int64_t l
= bdrv_getlength(s
->bs
->backing
->bs
);
812 /* Zero all sectors if reading beyond the end of the backing file */
813 if (pos
>= backing_length
||
814 pos
+ qiov
->size
> backing_length
) {
815 qemu_iovec_memset(qiov
, 0, 0, qiov
->size
);
818 /* Complete now if there are no backing file sectors to read */
819 if (pos
>= backing_length
) {
823 /* If the read straddles the end of the backing file, shorten it */
824 size
= MIN((uint64_t)backing_length
- pos
, qiov
->size
);
826 assert(*backing_qiov
== NULL
);
827 *backing_qiov
= g_new(QEMUIOVector
, 1);
828 qemu_iovec_init(*backing_qiov
, qiov
->niov
);
829 qemu_iovec_concat(*backing_qiov
, qiov
, 0, size
);
831 BLKDBG_EVENT(s
->bs
->file
, BLKDBG_READ_BACKING_AIO
);
832 ret
= bdrv_preadv(s
->bs
->backing
, pos
, *backing_qiov
);
840 * Copy data from backing file into the image
843 * @pos: Byte position in device
844 * @len: Number of bytes
845 * @offset: Byte offset in image file
847 static int qed_copy_from_backing_file(BDRVQEDState
*s
, uint64_t pos
,
848 uint64_t len
, uint64_t offset
)
851 QEMUIOVector
*backing_qiov
= NULL
;
855 /* Skip copy entirely if there is no work to do */
860 iov
= (struct iovec
) {
861 .iov_base
= qemu_blockalign(s
->bs
, len
),
864 qemu_iovec_init_external(&qiov
, &iov
, 1);
866 ret
= qed_read_backing_file(s
, pos
, &qiov
, &backing_qiov
);
869 qemu_iovec_destroy(backing_qiov
);
870 g_free(backing_qiov
);
878 BLKDBG_EVENT(s
->bs
->file
, BLKDBG_COW_WRITE
);
879 ret
= bdrv_pwritev(s
->bs
->file
, offset
, &qiov
);
885 qemu_vfree(iov
.iov_base
);
890 * Link one or more contiguous clusters into a table
894 * @index: First cluster index
895 * @n: Number of contiguous clusters
896 * @cluster: First cluster offset
898 * The cluster offset may be an allocated byte offset in the image file, the
899 * zero cluster marker, or the unallocated cluster marker.
901 static void qed_update_l2_table(BDRVQEDState
*s
, QEDTable
*table
, int index
,
902 unsigned int n
, uint64_t cluster
)
905 for (i
= index
; i
< index
+ n
; i
++) {
906 table
->offsets
[i
] = cluster
;
907 if (!qed_offset_is_unalloc_cluster(cluster
) &&
908 !qed_offset_is_zero_cluster(cluster
)) {
909 cluster
+= s
->header
.cluster_size
;
914 static void qed_aio_complete_bh(void *opaque
)
916 QEDAIOCB
*acb
= opaque
;
917 BDRVQEDState
*s
= acb_to_s(acb
);
918 BlockCompletionFunc
*cb
= acb
->common
.cb
;
919 void *user_opaque
= acb
->common
.opaque
;
920 int ret
= acb
->bh_ret
;
924 /* Invoke callback */
926 cb(user_opaque
, ret
);
930 static void qed_resume_alloc_bh(void *opaque
)
932 qed_aio_start_io(opaque
);
935 static void qed_aio_complete(QEDAIOCB
*acb
, int ret
)
937 BDRVQEDState
*s
= acb_to_s(acb
);
939 trace_qed_aio_complete(s
, acb
, ret
);
942 qemu_iovec_destroy(&acb
->cur_qiov
);
943 qed_unref_l2_cache_entry(acb
->request
.l2_table
);
945 /* Free the buffer we may have allocated for zero writes */
946 if (acb
->flags
& QED_AIOCB_ZERO
) {
947 qemu_vfree(acb
->qiov
->iov
[0].iov_base
);
948 acb
->qiov
->iov
[0].iov_base
= NULL
;
951 /* Arrange for a bh to invoke the completion function */
953 aio_bh_schedule_oneshot(bdrv_get_aio_context(acb
->common
.bs
),
954 qed_aio_complete_bh
, acb
);
956 /* Start next allocating write request waiting behind this one. Note that
957 * requests enqueue themselves when they first hit an unallocated cluster
958 * but they wait until the entire request is finished before waking up the
959 * next request in the queue. This ensures that we don't cycle through
960 * requests multiple times but rather finish one at a time completely.
962 if (acb
== QSIMPLEQ_FIRST(&s
->allocating_write_reqs
)) {
964 QSIMPLEQ_REMOVE_HEAD(&s
->allocating_write_reqs
, next
);
965 next_acb
= QSIMPLEQ_FIRST(&s
->allocating_write_reqs
);
967 aio_bh_schedule_oneshot(bdrv_get_aio_context(acb
->common
.bs
),
968 qed_resume_alloc_bh
, next_acb
);
969 } else if (s
->header
.features
& QED_F_NEED_CHECK
) {
970 qed_start_need_check_timer(s
);
976 * Commit the current L2 table to the cache
978 static void qed_commit_l2_update(void *opaque
, int ret
)
980 QEDAIOCB
*acb
= opaque
;
981 BDRVQEDState
*s
= acb_to_s(acb
);
982 CachedL2Table
*l2_table
= acb
->request
.l2_table
;
983 uint64_t l2_offset
= l2_table
->offset
;
985 qed_commit_l2_cache_entry(&s
->l2_cache
, l2_table
);
987 /* This is guaranteed to succeed because we just committed the entry to the
990 acb
->request
.l2_table
= qed_find_l2_cache_entry(&s
->l2_cache
, l2_offset
);
991 assert(acb
->request
.l2_table
!= NULL
);
993 qed_aio_next_io(acb
, ret
);
997 * Update L1 table with new L2 table offset and write it out
999 static void qed_aio_write_l1_update(void *opaque
, int ret
)
1001 QEDAIOCB
*acb
= opaque
;
1002 BDRVQEDState
*s
= acb_to_s(acb
);
1006 qed_aio_complete(acb
, ret
);
1010 index
= qed_l1_index(s
, acb
->cur_pos
);
1011 s
->l1_table
->offsets
[index
] = acb
->request
.l2_table
->offset
;
1013 qed_write_l1_table(s
, index
, 1, qed_commit_l2_update
, acb
);
1017 * Update L2 table with new cluster offsets and write them out
1019 static void qed_aio_write_l2_update(QEDAIOCB
*acb
, int ret
, uint64_t offset
)
1021 BDRVQEDState
*s
= acb_to_s(acb
);
1022 bool need_alloc
= acb
->find_cluster_ret
== QED_CLUSTER_L1
;
1030 qed_unref_l2_cache_entry(acb
->request
.l2_table
);
1031 acb
->request
.l2_table
= qed_new_l2_table(s
);
1034 index
= qed_l2_index(s
, acb
->cur_pos
);
1035 qed_update_l2_table(s
, acb
->request
.l2_table
->table
, index
, acb
->cur_nclusters
,
1039 /* Write out the whole new L2 table */
1040 qed_write_l2_table(s
, &acb
->request
, 0, s
->table_nelems
, true,
1041 qed_aio_write_l1_update
, acb
);
1043 /* Write out only the updated part of the L2 table */
1044 qed_write_l2_table(s
, &acb
->request
, index
, acb
->cur_nclusters
, false,
1045 qed_aio_next_io_cb
, acb
);
1050 qed_aio_complete(acb
, ret
);
1053 static void qed_aio_write_l2_update_cb(void *opaque
, int ret
)
1055 QEDAIOCB
*acb
= opaque
;
1056 qed_aio_write_l2_update(acb
, ret
, acb
->cur_cluster
);
1060 * Flush new data clusters before updating the L2 table
1062 * This flush is necessary when a backing file is in use. A crash during an
1063 * allocating write could result in empty clusters in the image. If the write
1064 * only touched a subregion of the cluster, then backing image sectors have
1065 * been lost in the untouched region. The solution is to flush after writing a
1066 * new data cluster and before updating the L2 table.
1068 static void qed_aio_write_flush_before_l2_update(void *opaque
, int ret
)
1070 QEDAIOCB
*acb
= opaque
;
1071 BDRVQEDState
*s
= acb_to_s(acb
);
1073 if (!bdrv_aio_flush(s
->bs
->file
->bs
, qed_aio_write_l2_update_cb
, opaque
)) {
1074 qed_aio_complete(acb
, -EIO
);
1079 * Write data to the image file
1081 static void qed_aio_write_main(void *opaque
, int ret
)
1083 QEDAIOCB
*acb
= opaque
;
1084 BDRVQEDState
*s
= acb_to_s(acb
);
1085 uint64_t offset
= acb
->cur_cluster
+
1086 qed_offset_into_cluster(s
, acb
->cur_pos
);
1087 BlockCompletionFunc
*next_fn
;
1089 trace_qed_aio_write_main(s
, acb
, ret
, offset
, acb
->cur_qiov
.size
);
1092 qed_aio_complete(acb
, ret
);
1096 if (acb
->find_cluster_ret
== QED_CLUSTER_FOUND
) {
1097 next_fn
= qed_aio_next_io_cb
;
1099 if (s
->bs
->backing
) {
1100 next_fn
= qed_aio_write_flush_before_l2_update
;
1102 next_fn
= qed_aio_write_l2_update_cb
;
1106 BLKDBG_EVENT(s
->bs
->file
, BLKDBG_WRITE_AIO
);
1107 bdrv_aio_writev(s
->bs
->file
, offset
/ BDRV_SECTOR_SIZE
,
1108 &acb
->cur_qiov
, acb
->cur_qiov
.size
/ BDRV_SECTOR_SIZE
,
1113 * Populate untouched regions of new data cluster
1115 static void qed_aio_write_cow(void *opaque
, int ret
)
1117 QEDAIOCB
*acb
= opaque
;
1118 BDRVQEDState
*s
= acb_to_s(acb
);
1119 uint64_t start
, len
, offset
;
1121 /* Populate front untouched region of new data cluster */
1122 start
= qed_start_of_cluster(s
, acb
->cur_pos
);
1123 len
= qed_offset_into_cluster(s
, acb
->cur_pos
);
1125 trace_qed_aio_write_prefill(s
, acb
, start
, len
, acb
->cur_cluster
);
1126 ret
= qed_copy_from_backing_file(s
, start
, len
, acb
->cur_cluster
);
1128 qed_aio_complete(acb
, ret
);
1132 /* Populate back untouched region of new data cluster */
1133 start
= acb
->cur_pos
+ acb
->cur_qiov
.size
;
1134 len
= qed_start_of_cluster(s
, start
+ s
->header
.cluster_size
- 1) - start
;
1135 offset
= acb
->cur_cluster
+
1136 qed_offset_into_cluster(s
, acb
->cur_pos
) +
1139 trace_qed_aio_write_postfill(s
, acb
, start
, len
, offset
);
1140 ret
= qed_copy_from_backing_file(s
, start
, len
, offset
);
1142 qed_aio_write_main(acb
, ret
);
1146 * Check if the QED_F_NEED_CHECK bit should be set during allocating write
1148 static bool qed_should_set_need_check(BDRVQEDState
*s
)
1150 /* The flush before L2 update path ensures consistency */
1151 if (s
->bs
->backing
) {
1155 return !(s
->header
.features
& QED_F_NEED_CHECK
);
1158 static void qed_aio_write_zero_cluster(void *opaque
, int ret
)
1160 QEDAIOCB
*acb
= opaque
;
1163 qed_aio_complete(acb
, ret
);
1167 qed_aio_write_l2_update(acb
, 0, 1);
1171 * Write new data cluster
1173 * @acb: Write request
1174 * @len: Length in bytes
1176 * This path is taken when writing to previously unallocated clusters.
1178 static void qed_aio_write_alloc(QEDAIOCB
*acb
, size_t len
)
1180 BDRVQEDState
*s
= acb_to_s(acb
);
1181 BlockCompletionFunc
*cb
;
1183 /* Cancel timer when the first allocating request comes in */
1184 if (QSIMPLEQ_EMPTY(&s
->allocating_write_reqs
)) {
1185 qed_cancel_need_check_timer(s
);
1188 /* Freeze this request if another allocating write is in progress */
1189 if (acb
!= QSIMPLEQ_FIRST(&s
->allocating_write_reqs
)) {
1190 QSIMPLEQ_INSERT_TAIL(&s
->allocating_write_reqs
, acb
, next
);
1192 if (acb
!= QSIMPLEQ_FIRST(&s
->allocating_write_reqs
) ||
1193 s
->allocating_write_reqs_plugged
) {
1194 return; /* wait for existing request to finish */
1197 acb
->cur_nclusters
= qed_bytes_to_clusters(s
,
1198 qed_offset_into_cluster(s
, acb
->cur_pos
) + len
);
1199 qemu_iovec_concat(&acb
->cur_qiov
, acb
->qiov
, acb
->qiov_offset
, len
);
1201 if (acb
->flags
& QED_AIOCB_ZERO
) {
1202 /* Skip ahead if the clusters are already zero */
1203 if (acb
->find_cluster_ret
== QED_CLUSTER_ZERO
) {
1204 qed_aio_start_io(acb
);
1208 cb
= qed_aio_write_zero_cluster
;
1210 cb
= qed_aio_write_cow
;
1211 acb
->cur_cluster
= qed_alloc_clusters(s
, acb
->cur_nclusters
);
1214 if (qed_should_set_need_check(s
)) {
1215 s
->header
.features
|= QED_F_NEED_CHECK
;
1216 qed_write_header(s
, cb
, acb
);
1223 * Write data cluster in place
1225 * @acb: Write request
1226 * @offset: Cluster offset in bytes
1227 * @len: Length in bytes
1229 * This path is taken when writing to already allocated clusters.
1231 static void qed_aio_write_inplace(QEDAIOCB
*acb
, uint64_t offset
, size_t len
)
1233 /* Allocate buffer for zero writes */
1234 if (acb
->flags
& QED_AIOCB_ZERO
) {
1235 struct iovec
*iov
= acb
->qiov
->iov
;
1237 if (!iov
->iov_base
) {
1238 iov
->iov_base
= qemu_try_blockalign(acb
->common
.bs
, iov
->iov_len
);
1239 if (iov
->iov_base
== NULL
) {
1240 qed_aio_complete(acb
, -ENOMEM
);
1243 memset(iov
->iov_base
, 0, iov
->iov_len
);
1247 /* Calculate the I/O vector */
1248 acb
->cur_cluster
= offset
;
1249 qemu_iovec_concat(&acb
->cur_qiov
, acb
->qiov
, acb
->qiov_offset
, len
);
1251 /* Do the actual write */
1252 qed_aio_write_main(acb
, 0);
1256 * Write data cluster
1258 * @opaque: Write request
1259 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1261 * @offset: Cluster offset in bytes
1262 * @len: Length in bytes
1264 static void qed_aio_write_data(void *opaque
, int ret
,
1265 uint64_t offset
, size_t len
)
1267 QEDAIOCB
*acb
= opaque
;
1269 trace_qed_aio_write_data(acb_to_s(acb
), acb
, ret
, offset
, len
);
1271 acb
->find_cluster_ret
= ret
;
1274 case QED_CLUSTER_FOUND
:
1275 qed_aio_write_inplace(acb
, offset
, len
);
1278 case QED_CLUSTER_L2
:
1279 case QED_CLUSTER_L1
:
1280 case QED_CLUSTER_ZERO
:
1281 qed_aio_write_alloc(acb
, len
);
1285 qed_aio_complete(acb
, ret
);
1293 * @opaque: Read request
1294 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1296 * @offset: Cluster offset in bytes
1297 * @len: Length in bytes
1299 static void qed_aio_read_data(void *opaque
, int ret
,
1300 uint64_t offset
, size_t len
)
1302 QEDAIOCB
*acb
= opaque
;
1303 BDRVQEDState
*s
= acb_to_s(acb
);
1304 BlockDriverState
*bs
= acb
->common
.bs
;
1306 /* Adjust offset into cluster */
1307 offset
+= qed_offset_into_cluster(s
, acb
->cur_pos
);
1309 trace_qed_aio_read_data(s
, acb
, ret
, offset
, len
);
1315 qemu_iovec_concat(&acb
->cur_qiov
, acb
->qiov
, acb
->qiov_offset
, len
);
1317 /* Handle zero cluster and backing file reads */
1318 if (ret
== QED_CLUSTER_ZERO
) {
1319 qemu_iovec_memset(&acb
->cur_qiov
, 0, 0, acb
->cur_qiov
.size
);
1320 qed_aio_start_io(acb
);
1322 } else if (ret
!= QED_CLUSTER_FOUND
) {
1323 ret
= qed_read_backing_file(s
, acb
->cur_pos
, &acb
->cur_qiov
,
1324 &acb
->backing_qiov
);
1325 qed_aio_next_io(acb
, ret
);
1329 BLKDBG_EVENT(bs
->file
, BLKDBG_READ_AIO
);
1330 bdrv_aio_readv(bs
->file
, offset
/ BDRV_SECTOR_SIZE
,
1331 &acb
->cur_qiov
, acb
->cur_qiov
.size
/ BDRV_SECTOR_SIZE
,
1332 qed_aio_next_io_cb
, acb
);
1336 qed_aio_complete(acb
, ret
);
1340 * Begin next I/O or complete the request
1342 static void qed_aio_next_io(QEDAIOCB
*acb
, int ret
)
1344 BDRVQEDState
*s
= acb_to_s(acb
);
1345 QEDFindClusterFunc
*io_fn
= (acb
->flags
& QED_AIOCB_WRITE
) ?
1346 qed_aio_write_data
: qed_aio_read_data
;
1350 trace_qed_aio_next_io(s
, acb
, ret
, acb
->cur_pos
+ acb
->cur_qiov
.size
);
1352 if (acb
->backing_qiov
) {
1353 qemu_iovec_destroy(acb
->backing_qiov
);
1354 g_free(acb
->backing_qiov
);
1355 acb
->backing_qiov
= NULL
;
1358 /* Handle I/O error */
1360 qed_aio_complete(acb
, ret
);
1364 acb
->qiov_offset
+= acb
->cur_qiov
.size
;
1365 acb
->cur_pos
+= acb
->cur_qiov
.size
;
1366 qemu_iovec_reset(&acb
->cur_qiov
);
1368 /* Complete request */
1369 if (acb
->cur_pos
>= acb
->end_pos
) {
1370 qed_aio_complete(acb
, 0);
1374 /* Find next cluster and start I/O */
1375 len
= acb
->end_pos
- acb
->cur_pos
;
1376 ret
= qed_find_cluster(s
, &acb
->request
, acb
->cur_pos
, &len
, &offset
);
1377 io_fn(acb
, ret
, offset
, len
);
1380 static BlockAIOCB
*qed_aio_setup(BlockDriverState
*bs
,
1382 QEMUIOVector
*qiov
, int nb_sectors
,
1383 BlockCompletionFunc
*cb
,
1384 void *opaque
, int flags
)
1386 QEDAIOCB
*acb
= qemu_aio_get(&qed_aiocb_info
, bs
, cb
, opaque
);
1388 trace_qed_aio_setup(bs
->opaque
, acb
, sector_num
, nb_sectors
,
1393 acb
->qiov_offset
= 0;
1394 acb
->cur_pos
= (uint64_t)sector_num
* BDRV_SECTOR_SIZE
;
1395 acb
->end_pos
= acb
->cur_pos
+ nb_sectors
* BDRV_SECTOR_SIZE
;
1396 acb
->backing_qiov
= NULL
;
1397 acb
->request
.l2_table
= NULL
;
1398 qemu_iovec_init(&acb
->cur_qiov
, qiov
->niov
);
1401 qed_aio_start_io(acb
);
1402 return &acb
->common
;
1405 static BlockAIOCB
*bdrv_qed_aio_readv(BlockDriverState
*bs
,
1407 QEMUIOVector
*qiov
, int nb_sectors
,
1408 BlockCompletionFunc
*cb
,
1411 return qed_aio_setup(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 0);
1414 static BlockAIOCB
*bdrv_qed_aio_writev(BlockDriverState
*bs
,
1416 QEMUIOVector
*qiov
, int nb_sectors
,
1417 BlockCompletionFunc
*cb
,
1420 return qed_aio_setup(bs
, sector_num
, qiov
, nb_sectors
, cb
,
1421 opaque
, QED_AIOCB_WRITE
);
1430 static void coroutine_fn
qed_co_pwrite_zeroes_cb(void *opaque
, int ret
)
1432 QEDWriteZeroesCB
*cb
= opaque
;
1437 aio_co_wake(cb
->co
);
1441 static int coroutine_fn
bdrv_qed_co_pwrite_zeroes(BlockDriverState
*bs
,
1444 BdrvRequestFlags flags
)
1446 BlockAIOCB
*blockacb
;
1447 BDRVQEDState
*s
= bs
->opaque
;
1448 QEDWriteZeroesCB cb
= { .done
= false };
1452 /* Fall back if the request is not aligned */
1453 if (qed_offset_into_cluster(s
, offset
) ||
1454 qed_offset_into_cluster(s
, count
)) {
1458 /* Zero writes start without an I/O buffer. If a buffer becomes necessary
1459 * then it will be allocated during request processing.
1461 iov
.iov_base
= NULL
;
1462 iov
.iov_len
= count
;
1464 qemu_iovec_init_external(&qiov
, &iov
, 1);
1465 blockacb
= qed_aio_setup(bs
, offset
>> BDRV_SECTOR_BITS
, &qiov
,
1466 count
>> BDRV_SECTOR_BITS
,
1467 qed_co_pwrite_zeroes_cb
, &cb
,
1468 QED_AIOCB_WRITE
| QED_AIOCB_ZERO
);
1473 cb
.co
= qemu_coroutine_self();
1474 qemu_coroutine_yield();
1480 static int bdrv_qed_truncate(BlockDriverState
*bs
, int64_t offset
, Error
**errp
)
1482 BDRVQEDState
*s
= bs
->opaque
;
1483 uint64_t old_image_size
;
1486 if (!qed_is_image_size_valid(offset
, s
->header
.cluster_size
,
1487 s
->header
.table_size
)) {
1488 error_setg(errp
, "Invalid image size specified");
1492 if ((uint64_t)offset
< s
->header
.image_size
) {
1493 error_setg(errp
, "Shrinking images is currently not supported");
1497 old_image_size
= s
->header
.image_size
;
1498 s
->header
.image_size
= offset
;
1499 ret
= qed_write_header_sync(s
);
1501 s
->header
.image_size
= old_image_size
;
1502 error_setg_errno(errp
, -ret
, "Failed to update the image size");
1507 static int64_t bdrv_qed_getlength(BlockDriverState
*bs
)
1509 BDRVQEDState
*s
= bs
->opaque
;
1510 return s
->header
.image_size
;
1513 static int bdrv_qed_get_info(BlockDriverState
*bs
, BlockDriverInfo
*bdi
)
1515 BDRVQEDState
*s
= bs
->opaque
;
1517 memset(bdi
, 0, sizeof(*bdi
));
1518 bdi
->cluster_size
= s
->header
.cluster_size
;
1519 bdi
->is_dirty
= s
->header
.features
& QED_F_NEED_CHECK
;
1520 bdi
->unallocated_blocks_are_zero
= true;
1521 bdi
->can_write_zeroes_with_unmap
= true;
1525 static int bdrv_qed_change_backing_file(BlockDriverState
*bs
,
1526 const char *backing_file
,
1527 const char *backing_fmt
)
1529 BDRVQEDState
*s
= bs
->opaque
;
1530 QEDHeader new_header
, le_header
;
1532 size_t buffer_len
, backing_file_len
;
1535 /* Refuse to set backing filename if unknown compat feature bits are
1536 * active. If the image uses an unknown compat feature then we may not
1537 * know the layout of data following the header structure and cannot safely
1540 if (backing_file
&& (s
->header
.compat_features
&
1541 ~QED_COMPAT_FEATURE_MASK
)) {
1545 memcpy(&new_header
, &s
->header
, sizeof(new_header
));
1547 new_header
.features
&= ~(QED_F_BACKING_FILE
|
1548 QED_F_BACKING_FORMAT_NO_PROBE
);
1550 /* Adjust feature flags */
1552 new_header
.features
|= QED_F_BACKING_FILE
;
1554 if (qed_fmt_is_raw(backing_fmt
)) {
1555 new_header
.features
|= QED_F_BACKING_FORMAT_NO_PROBE
;
1559 /* Calculate new header size */
1560 backing_file_len
= 0;
1563 backing_file_len
= strlen(backing_file
);
1566 buffer_len
= sizeof(new_header
);
1567 new_header
.backing_filename_offset
= buffer_len
;
1568 new_header
.backing_filename_size
= backing_file_len
;
1569 buffer_len
+= backing_file_len
;
1571 /* Make sure we can rewrite header without failing */
1572 if (buffer_len
> new_header
.header_size
* new_header
.cluster_size
) {
1576 /* Prepare new header */
1577 buffer
= g_malloc(buffer_len
);
1579 qed_header_cpu_to_le(&new_header
, &le_header
);
1580 memcpy(buffer
, &le_header
, sizeof(le_header
));
1581 buffer_len
= sizeof(le_header
);
1584 memcpy(buffer
+ buffer_len
, backing_file
, backing_file_len
);
1585 buffer_len
+= backing_file_len
;
1588 /* Write new header */
1589 ret
= bdrv_pwrite_sync(bs
->file
, 0, buffer
, buffer_len
);
1592 memcpy(&s
->header
, &new_header
, sizeof(new_header
));
1597 static void bdrv_qed_invalidate_cache(BlockDriverState
*bs
, Error
**errp
)
1599 BDRVQEDState
*s
= bs
->opaque
;
1600 Error
*local_err
= NULL
;
1605 memset(s
, 0, sizeof(BDRVQEDState
));
1606 ret
= bdrv_qed_do_open(bs
, NULL
, bs
->open_flags
, &local_err
);
1608 error_propagate(errp
, local_err
);
1609 error_prepend(errp
, "Could not reopen qed layer: ");
1611 } else if (ret
< 0) {
1612 error_setg_errno(errp
, -ret
, "Could not reopen qed layer");
1617 static int bdrv_qed_check(BlockDriverState
*bs
, BdrvCheckResult
*result
,
1620 BDRVQEDState
*s
= bs
->opaque
;
1622 return qed_check(s
, result
, !!fix
);
1625 static QemuOptsList qed_create_opts
= {
1626 .name
= "qed-create-opts",
1627 .head
= QTAILQ_HEAD_INITIALIZER(qed_create_opts
.head
),
1630 .name
= BLOCK_OPT_SIZE
,
1631 .type
= QEMU_OPT_SIZE
,
1632 .help
= "Virtual disk size"
1635 .name
= BLOCK_OPT_BACKING_FILE
,
1636 .type
= QEMU_OPT_STRING
,
1637 .help
= "File name of a base image"
1640 .name
= BLOCK_OPT_BACKING_FMT
,
1641 .type
= QEMU_OPT_STRING
,
1642 .help
= "Image format of the base image"
1645 .name
= BLOCK_OPT_CLUSTER_SIZE
,
1646 .type
= QEMU_OPT_SIZE
,
1647 .help
= "Cluster size (in bytes)",
1648 .def_value_str
= stringify(QED_DEFAULT_CLUSTER_SIZE
)
1651 .name
= BLOCK_OPT_TABLE_SIZE
,
1652 .type
= QEMU_OPT_SIZE
,
1653 .help
= "L1/L2 table size (in clusters)"
1655 { /* end of list */ }
1659 static BlockDriver bdrv_qed
= {
1660 .format_name
= "qed",
1661 .instance_size
= sizeof(BDRVQEDState
),
1662 .create_opts
= &qed_create_opts
,
1663 .supports_backing
= true,
1665 .bdrv_probe
= bdrv_qed_probe
,
1666 .bdrv_open
= bdrv_qed_open
,
1667 .bdrv_close
= bdrv_qed_close
,
1668 .bdrv_reopen_prepare
= bdrv_qed_reopen_prepare
,
1669 .bdrv_child_perm
= bdrv_format_default_perms
,
1670 .bdrv_create
= bdrv_qed_create
,
1671 .bdrv_has_zero_init
= bdrv_has_zero_init_1
,
1672 .bdrv_co_get_block_status
= bdrv_qed_co_get_block_status
,
1673 .bdrv_aio_readv
= bdrv_qed_aio_readv
,
1674 .bdrv_aio_writev
= bdrv_qed_aio_writev
,
1675 .bdrv_co_pwrite_zeroes
= bdrv_qed_co_pwrite_zeroes
,
1676 .bdrv_truncate
= bdrv_qed_truncate
,
1677 .bdrv_getlength
= bdrv_qed_getlength
,
1678 .bdrv_get_info
= bdrv_qed_get_info
,
1679 .bdrv_refresh_limits
= bdrv_qed_refresh_limits
,
1680 .bdrv_change_backing_file
= bdrv_qed_change_backing_file
,
1681 .bdrv_invalidate_cache
= bdrv_qed_invalidate_cache
,
1682 .bdrv_check
= bdrv_qed_check
,
1683 .bdrv_detach_aio_context
= bdrv_qed_detach_aio_context
,
1684 .bdrv_attach_aio_context
= bdrv_qed_attach_aio_context
,
1685 .bdrv_drain
= bdrv_qed_drain
,
1688 static void bdrv_qed_init(void)
1690 bdrv_register(&bdrv_qed
);
1693 block_init(bdrv_qed_init
);