2 * QEMU Enhanced Disk Format
4 * Copyright IBM, Corp. 2010
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "block/qdict.h"
17 #include "qapi/error.h"
18 #include "qemu/timer.h"
19 #include "qemu/bswap.h"
20 #include "qemu/option.h"
23 #include "sysemu/block-backend.h"
24 #include "qapi/qmp/qdict.h"
25 #include "qapi/qobject-input-visitor.h"
26 #include "qapi/qapi-visit-block-core.h"
28 static QemuOptsList qed_create_opts
;
30 static int bdrv_qed_probe(const uint8_t *buf
, int buf_size
,
33 const QEDHeader
*header
= (const QEDHeader
*)buf
;
35 if (buf_size
< sizeof(*header
)) {
38 if (le32_to_cpu(header
->magic
) != QED_MAGIC
) {
45 * Check whether an image format is raw
47 * @fmt: Backing file format, may be NULL
49 static bool qed_fmt_is_raw(const char *fmt
)
51 return fmt
&& strcmp(fmt
, "raw") == 0;
54 static void qed_header_le_to_cpu(const QEDHeader
*le
, QEDHeader
*cpu
)
56 cpu
->magic
= le32_to_cpu(le
->magic
);
57 cpu
->cluster_size
= le32_to_cpu(le
->cluster_size
);
58 cpu
->table_size
= le32_to_cpu(le
->table_size
);
59 cpu
->header_size
= le32_to_cpu(le
->header_size
);
60 cpu
->features
= le64_to_cpu(le
->features
);
61 cpu
->compat_features
= le64_to_cpu(le
->compat_features
);
62 cpu
->autoclear_features
= le64_to_cpu(le
->autoclear_features
);
63 cpu
->l1_table_offset
= le64_to_cpu(le
->l1_table_offset
);
64 cpu
->image_size
= le64_to_cpu(le
->image_size
);
65 cpu
->backing_filename_offset
= le32_to_cpu(le
->backing_filename_offset
);
66 cpu
->backing_filename_size
= le32_to_cpu(le
->backing_filename_size
);
69 static void qed_header_cpu_to_le(const QEDHeader
*cpu
, QEDHeader
*le
)
71 le
->magic
= cpu_to_le32(cpu
->magic
);
72 le
->cluster_size
= cpu_to_le32(cpu
->cluster_size
);
73 le
->table_size
= cpu_to_le32(cpu
->table_size
);
74 le
->header_size
= cpu_to_le32(cpu
->header_size
);
75 le
->features
= cpu_to_le64(cpu
->features
);
76 le
->compat_features
= cpu_to_le64(cpu
->compat_features
);
77 le
->autoclear_features
= cpu_to_le64(cpu
->autoclear_features
);
78 le
->l1_table_offset
= cpu_to_le64(cpu
->l1_table_offset
);
79 le
->image_size
= cpu_to_le64(cpu
->image_size
);
80 le
->backing_filename_offset
= cpu_to_le32(cpu
->backing_filename_offset
);
81 le
->backing_filename_size
= cpu_to_le32(cpu
->backing_filename_size
);
84 int qed_write_header_sync(BDRVQEDState
*s
)
89 qed_header_cpu_to_le(&s
->header
, &le
);
90 ret
= bdrv_pwrite(s
->bs
->file
, 0, &le
, sizeof(le
));
91 if (ret
!= sizeof(le
)) {
98 * Update header in-place (does not rewrite backing filename or other strings)
100 * This function only updates known header fields in-place and does not affect
101 * extra data after the QED header.
103 * No new allocating reqs can start while this function runs.
105 static int coroutine_fn
qed_write_header(BDRVQEDState
*s
)
107 /* We must write full sectors for O_DIRECT but cannot necessarily generate
108 * the data following the header if an unrecognized compat feature is
109 * active. Therefore, first read the sectors containing the header, update
110 * them, and write back.
113 int nsectors
= DIV_ROUND_UP(sizeof(QEDHeader
), BDRV_SECTOR_SIZE
);
114 size_t len
= nsectors
* BDRV_SECTOR_SIZE
;
118 assert(s
->allocating_acb
|| s
->allocating_write_reqs_plugged
);
120 buf
= qemu_blockalign(s
->bs
, len
);
122 ret
= bdrv_co_pread(s
->bs
->file
, 0, len
, buf
, 0);
128 qed_header_cpu_to_le(&s
->header
, (QEDHeader
*) buf
);
130 ret
= bdrv_co_pwrite(s
->bs
->file
, 0, len
, buf
, 0);
141 static uint64_t qed_max_image_size(uint32_t cluster_size
, uint32_t table_size
)
143 uint64_t table_entries
;
146 table_entries
= (table_size
* cluster_size
) / sizeof(uint64_t);
147 l2_size
= table_entries
* cluster_size
;
149 return l2_size
* table_entries
;
152 static bool qed_is_cluster_size_valid(uint32_t cluster_size
)
154 if (cluster_size
< QED_MIN_CLUSTER_SIZE
||
155 cluster_size
> QED_MAX_CLUSTER_SIZE
) {
158 if (cluster_size
& (cluster_size
- 1)) {
159 return false; /* not power of 2 */
164 static bool qed_is_table_size_valid(uint32_t table_size
)
166 if (table_size
< QED_MIN_TABLE_SIZE
||
167 table_size
> QED_MAX_TABLE_SIZE
) {
170 if (table_size
& (table_size
- 1)) {
171 return false; /* not power of 2 */
176 static bool qed_is_image_size_valid(uint64_t image_size
, uint32_t cluster_size
,
179 if (image_size
% BDRV_SECTOR_SIZE
!= 0) {
180 return false; /* not multiple of sector size */
182 if (image_size
> qed_max_image_size(cluster_size
, table_size
)) {
183 return false; /* image is too large */
189 * Read a string of known length from the image file
192 * @offset: File offset to start of string, in bytes
193 * @n: String length in bytes
194 * @buf: Destination buffer
195 * @buflen: Destination buffer length in bytes
196 * @ret: 0 on success, -errno on failure
198 * The string is NUL-terminated.
200 static int qed_read_string(BdrvChild
*file
, uint64_t offset
, size_t n
,
201 char *buf
, size_t buflen
)
207 ret
= bdrv_pread(file
, offset
, buf
, n
);
216 * Allocate new clusters
219 * @n: Number of contiguous clusters to allocate
220 * @ret: Offset of first allocated cluster
222 * This function only produces the offset where the new clusters should be
223 * written. It updates BDRVQEDState but does not make any changes to the image
226 * Called with table_lock held.
228 static uint64_t qed_alloc_clusters(BDRVQEDState
*s
, unsigned int n
)
230 uint64_t offset
= s
->file_size
;
231 s
->file_size
+= n
* s
->header
.cluster_size
;
235 QEDTable
*qed_alloc_table(BDRVQEDState
*s
)
237 /* Honor O_DIRECT memory alignment requirements */
238 return qemu_blockalign(s
->bs
,
239 s
->header
.cluster_size
* s
->header
.table_size
);
243 * Allocate a new zeroed L2 table
245 * Called with table_lock held.
247 static CachedL2Table
*qed_new_l2_table(BDRVQEDState
*s
)
249 CachedL2Table
*l2_table
= qed_alloc_l2_cache_entry(&s
->l2_cache
);
251 l2_table
->table
= qed_alloc_table(s
);
252 l2_table
->offset
= qed_alloc_clusters(s
, s
->header
.table_size
);
254 memset(l2_table
->table
->offsets
, 0,
255 s
->header
.cluster_size
* s
->header
.table_size
);
259 static bool qed_plug_allocating_write_reqs(BDRVQEDState
*s
)
261 qemu_co_mutex_lock(&s
->table_lock
);
263 /* No reentrancy is allowed. */
264 assert(!s
->allocating_write_reqs_plugged
);
265 if (s
->allocating_acb
!= NULL
) {
266 /* Another allocating write came concurrently. This cannot happen
267 * from bdrv_qed_co_drain_begin, but it can happen when the timer runs.
269 qemu_co_mutex_unlock(&s
->table_lock
);
273 s
->allocating_write_reqs_plugged
= true;
274 qemu_co_mutex_unlock(&s
->table_lock
);
278 static void qed_unplug_allocating_write_reqs(BDRVQEDState
*s
)
280 qemu_co_mutex_lock(&s
->table_lock
);
281 assert(s
->allocating_write_reqs_plugged
);
282 s
->allocating_write_reqs_plugged
= false;
283 qemu_co_queue_next(&s
->allocating_write_reqs
);
284 qemu_co_mutex_unlock(&s
->table_lock
);
287 static void coroutine_fn
qed_need_check_timer_entry(void *opaque
)
289 BDRVQEDState
*s
= opaque
;
292 trace_qed_need_check_timer_cb(s
);
294 if (!qed_plug_allocating_write_reqs(s
)) {
298 /* Ensure writes are on disk before clearing flag */
299 ret
= bdrv_co_flush(s
->bs
->file
->bs
);
301 qed_unplug_allocating_write_reqs(s
);
305 s
->header
.features
&= ~QED_F_NEED_CHECK
;
306 ret
= qed_write_header(s
);
309 qed_unplug_allocating_write_reqs(s
);
311 ret
= bdrv_co_flush(s
->bs
);
315 static void qed_need_check_timer_cb(void *opaque
)
317 Coroutine
*co
= qemu_coroutine_create(qed_need_check_timer_entry
, opaque
);
318 qemu_coroutine_enter(co
);
321 static void qed_start_need_check_timer(BDRVQEDState
*s
)
323 trace_qed_start_need_check_timer(s
);
325 /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for
328 timer_mod(s
->need_check_timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
329 NANOSECONDS_PER_SECOND
* QED_NEED_CHECK_TIMEOUT
);
332 /* It's okay to call this multiple times or when no timer is started */
333 static void qed_cancel_need_check_timer(BDRVQEDState
*s
)
335 trace_qed_cancel_need_check_timer(s
);
336 timer_del(s
->need_check_timer
);
339 static void bdrv_qed_detach_aio_context(BlockDriverState
*bs
)
341 BDRVQEDState
*s
= bs
->opaque
;
343 qed_cancel_need_check_timer(s
);
344 timer_free(s
->need_check_timer
);
347 static void bdrv_qed_attach_aio_context(BlockDriverState
*bs
,
348 AioContext
*new_context
)
350 BDRVQEDState
*s
= bs
->opaque
;
352 s
->need_check_timer
= aio_timer_new(new_context
,
353 QEMU_CLOCK_VIRTUAL
, SCALE_NS
,
354 qed_need_check_timer_cb
, s
);
355 if (s
->header
.features
& QED_F_NEED_CHECK
) {
356 qed_start_need_check_timer(s
);
360 static void coroutine_fn
bdrv_qed_co_drain_begin(BlockDriverState
*bs
)
362 BDRVQEDState
*s
= bs
->opaque
;
364 /* Fire the timer immediately in order to start doing I/O as soon as the
367 if (s
->need_check_timer
&& timer_pending(s
->need_check_timer
)) {
368 qed_cancel_need_check_timer(s
);
369 qed_need_check_timer_entry(s
);
373 static void bdrv_qed_init_state(BlockDriverState
*bs
)
375 BDRVQEDState
*s
= bs
->opaque
;
377 memset(s
, 0, sizeof(BDRVQEDState
));
379 qemu_co_mutex_init(&s
->table_lock
);
380 qemu_co_queue_init(&s
->allocating_write_reqs
);
383 /* Called with table_lock held. */
384 static int coroutine_fn
bdrv_qed_do_open(BlockDriverState
*bs
, QDict
*options
,
385 int flags
, Error
**errp
)
387 BDRVQEDState
*s
= bs
->opaque
;
392 ret
= bdrv_pread(bs
->file
, 0, &le_header
, sizeof(le_header
));
396 qed_header_le_to_cpu(&le_header
, &s
->header
);
398 if (s
->header
.magic
!= QED_MAGIC
) {
399 error_setg(errp
, "Image not in QED format");
402 if (s
->header
.features
& ~QED_FEATURE_MASK
) {
403 /* image uses unsupported feature bits */
404 error_setg(errp
, "Unsupported QED features: %" PRIx64
,
405 s
->header
.features
& ~QED_FEATURE_MASK
);
408 if (!qed_is_cluster_size_valid(s
->header
.cluster_size
)) {
412 /* Round down file size to the last cluster */
413 file_size
= bdrv_getlength(bs
->file
->bs
);
417 s
->file_size
= qed_start_of_cluster(s
, file_size
);
419 if (!qed_is_table_size_valid(s
->header
.table_size
)) {
422 if (!qed_is_image_size_valid(s
->header
.image_size
,
423 s
->header
.cluster_size
,
424 s
->header
.table_size
)) {
427 if (!qed_check_table_offset(s
, s
->header
.l1_table_offset
)) {
431 s
->table_nelems
= (s
->header
.cluster_size
* s
->header
.table_size
) /
433 s
->l2_shift
= ctz32(s
->header
.cluster_size
);
434 s
->l2_mask
= s
->table_nelems
- 1;
435 s
->l1_shift
= s
->l2_shift
+ ctz32(s
->table_nelems
);
437 /* Header size calculation must not overflow uint32_t */
438 if (s
->header
.header_size
> UINT32_MAX
/ s
->header
.cluster_size
) {
442 if ((s
->header
.features
& QED_F_BACKING_FILE
)) {
443 if ((uint64_t)s
->header
.backing_filename_offset
+
444 s
->header
.backing_filename_size
>
445 s
->header
.cluster_size
* s
->header
.header_size
) {
449 ret
= qed_read_string(bs
->file
, s
->header
.backing_filename_offset
,
450 s
->header
.backing_filename_size
,
451 bs
->auto_backing_file
,
452 sizeof(bs
->auto_backing_file
));
456 pstrcpy(bs
->backing_file
, sizeof(bs
->backing_file
),
457 bs
->auto_backing_file
);
459 if (s
->header
.features
& QED_F_BACKING_FORMAT_NO_PROBE
) {
460 pstrcpy(bs
->backing_format
, sizeof(bs
->backing_format
), "raw");
464 /* Reset unknown autoclear feature bits. This is a backwards
465 * compatibility mechanism that allows images to be opened by older
466 * programs, which "knock out" unknown feature bits. When an image is
467 * opened by a newer program again it can detect that the autoclear
468 * feature is no longer valid.
470 if ((s
->header
.autoclear_features
& ~QED_AUTOCLEAR_FEATURE_MASK
) != 0 &&
471 !bdrv_is_read_only(bs
->file
->bs
) && !(flags
& BDRV_O_INACTIVE
)) {
472 s
->header
.autoclear_features
&= QED_AUTOCLEAR_FEATURE_MASK
;
474 ret
= qed_write_header_sync(s
);
479 /* From here on only known autoclear feature bits are valid */
480 bdrv_flush(bs
->file
->bs
);
483 s
->l1_table
= qed_alloc_table(s
);
484 qed_init_l2_cache(&s
->l2_cache
);
486 ret
= qed_read_l1_table_sync(s
);
491 /* If image was not closed cleanly, check consistency */
492 if (!(flags
& BDRV_O_CHECK
) && (s
->header
.features
& QED_F_NEED_CHECK
)) {
493 /* Read-only images cannot be fixed. There is no risk of corruption
494 * since write operations are not possible. Therefore, allow
495 * potentially inconsistent images to be opened read-only. This can
496 * aid data recovery from an otherwise inconsistent image.
498 if (!bdrv_is_read_only(bs
->file
->bs
) &&
499 !(flags
& BDRV_O_INACTIVE
)) {
500 BdrvCheckResult result
= {0};
502 ret
= qed_check(s
, &result
, true);
509 bdrv_qed_attach_aio_context(bs
, bdrv_get_aio_context(bs
));
513 qed_free_l2_cache(&s
->l2_cache
);
514 qemu_vfree(s
->l1_table
);
519 typedef struct QEDOpenCo
{
520 BlockDriverState
*bs
;
527 static void coroutine_fn
bdrv_qed_open_entry(void *opaque
)
529 QEDOpenCo
*qoc
= opaque
;
530 BDRVQEDState
*s
= qoc
->bs
->opaque
;
532 qemu_co_mutex_lock(&s
->table_lock
);
533 qoc
->ret
= bdrv_qed_do_open(qoc
->bs
, qoc
->options
, qoc
->flags
, qoc
->errp
);
534 qemu_co_mutex_unlock(&s
->table_lock
);
537 static int bdrv_qed_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
548 bs
->file
= bdrv_open_child(NULL
, options
, "file", bs
, &child_file
,
554 bdrv_qed_init_state(bs
);
555 if (qemu_in_coroutine()) {
556 bdrv_qed_open_entry(&qoc
);
558 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
559 qemu_coroutine_enter(qemu_coroutine_create(bdrv_qed_open_entry
, &qoc
));
560 BDRV_POLL_WHILE(bs
, qoc
.ret
== -EINPROGRESS
);
562 BDRV_POLL_WHILE(bs
, qoc
.ret
== -EINPROGRESS
);
566 static void bdrv_qed_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
568 BDRVQEDState
*s
= bs
->opaque
;
570 bs
->bl
.pwrite_zeroes_alignment
= s
->header
.cluster_size
;
573 /* We have nothing to do for QED reopen, stubs just return
575 static int bdrv_qed_reopen_prepare(BDRVReopenState
*state
,
576 BlockReopenQueue
*queue
, Error
**errp
)
581 static void bdrv_qed_close(BlockDriverState
*bs
)
583 BDRVQEDState
*s
= bs
->opaque
;
585 bdrv_qed_detach_aio_context(bs
);
587 /* Ensure writes reach stable storage */
588 bdrv_flush(bs
->file
->bs
);
590 /* Clean shutdown, no check required on next open */
591 if (s
->header
.features
& QED_F_NEED_CHECK
) {
592 s
->header
.features
&= ~QED_F_NEED_CHECK
;
593 qed_write_header_sync(s
);
596 qed_free_l2_cache(&s
->l2_cache
);
597 qemu_vfree(s
->l1_table
);
600 static int coroutine_fn
bdrv_qed_co_create(BlockdevCreateOptions
*opts
,
603 BlockdevCreateOptionsQed
*qed_opts
;
604 BlockBackend
*blk
= NULL
;
605 BlockDriverState
*bs
= NULL
;
609 uint8_t *l1_table
= NULL
;
613 assert(opts
->driver
== BLOCKDEV_DRIVER_QED
);
614 qed_opts
= &opts
->u
.qed
;
616 /* Validate options and set default values */
617 if (!qed_opts
->has_cluster_size
) {
618 qed_opts
->cluster_size
= QED_DEFAULT_CLUSTER_SIZE
;
620 if (!qed_opts
->has_table_size
) {
621 qed_opts
->table_size
= QED_DEFAULT_TABLE_SIZE
;
624 if (!qed_is_cluster_size_valid(qed_opts
->cluster_size
)) {
625 error_setg(errp
, "QED cluster size must be within range [%u, %u] "
627 QED_MIN_CLUSTER_SIZE
, QED_MAX_CLUSTER_SIZE
);
630 if (!qed_is_table_size_valid(qed_opts
->table_size
)) {
631 error_setg(errp
, "QED table size must be within range [%u, %u] "
633 QED_MIN_TABLE_SIZE
, QED_MAX_TABLE_SIZE
);
636 if (!qed_is_image_size_valid(qed_opts
->size
, qed_opts
->cluster_size
,
637 qed_opts
->table_size
))
639 error_setg(errp
, "QED image size must be a non-zero multiple of "
640 "cluster size and less than %" PRIu64
" bytes",
641 qed_max_image_size(qed_opts
->cluster_size
,
642 qed_opts
->table_size
));
646 /* Create BlockBackend to write to the image */
647 bs
= bdrv_open_blockdev_ref(qed_opts
->file
, errp
);
652 blk
= blk_new(BLK_PERM_WRITE
| BLK_PERM_RESIZE
, BLK_PERM_ALL
);
653 ret
= blk_insert_bs(blk
, bs
, errp
);
657 blk_set_allow_write_beyond_eof(blk
, true);
659 /* Prepare image format */
660 header
= (QEDHeader
) {
662 .cluster_size
= qed_opts
->cluster_size
,
663 .table_size
= qed_opts
->table_size
,
666 .compat_features
= 0,
667 .l1_table_offset
= qed_opts
->cluster_size
,
668 .image_size
= qed_opts
->size
,
671 l1_size
= header
.cluster_size
* header
.table_size
;
673 /* File must start empty and grow, check truncate is supported */
674 ret
= blk_truncate(blk
, 0, PREALLOC_MODE_OFF
, errp
);
679 if (qed_opts
->has_backing_file
) {
680 header
.features
|= QED_F_BACKING_FILE
;
681 header
.backing_filename_offset
= sizeof(le_header
);
682 header
.backing_filename_size
= strlen(qed_opts
->backing_file
);
684 if (qed_opts
->has_backing_fmt
) {
685 const char *backing_fmt
= BlockdevDriver_str(qed_opts
->backing_fmt
);
686 if (qed_fmt_is_raw(backing_fmt
)) {
687 header
.features
|= QED_F_BACKING_FORMAT_NO_PROBE
;
692 qed_header_cpu_to_le(&header
, &le_header
);
693 ret
= blk_pwrite(blk
, 0, &le_header
, sizeof(le_header
), 0);
697 ret
= blk_pwrite(blk
, sizeof(le_header
), qed_opts
->backing_file
,
698 header
.backing_filename_size
, 0);
703 l1_table
= g_malloc0(l1_size
);
704 ret
= blk_pwrite(blk
, header
.l1_table_offset
, l1_table
, l1_size
, 0);
709 ret
= 0; /* success */
717 static int coroutine_fn
bdrv_qed_co_create_opts(const char *filename
,
721 BlockdevCreateOptions
*create_options
= NULL
;
724 BlockDriverState
*bs
= NULL
;
725 Error
*local_err
= NULL
;
728 static const QDictRenames opt_renames
[] = {
729 { BLOCK_OPT_BACKING_FILE
, "backing-file" },
730 { BLOCK_OPT_BACKING_FMT
, "backing-fmt" },
731 { BLOCK_OPT_CLUSTER_SIZE
, "cluster-size" },
732 { BLOCK_OPT_TABLE_SIZE
, "table-size" },
736 /* Parse options and convert legacy syntax */
737 qdict
= qemu_opts_to_qdict_filtered(opts
, NULL
, &qed_create_opts
, true);
739 if (!qdict_rename_keys(qdict
, opt_renames
, errp
)) {
744 /* Create and open the file (protocol layer) */
745 ret
= bdrv_create_file(filename
, opts
, &local_err
);
747 error_propagate(errp
, local_err
);
751 bs
= bdrv_open(filename
, NULL
, NULL
,
752 BDRV_O_RDWR
| BDRV_O_RESIZE
| BDRV_O_PROTOCOL
, errp
);
758 /* Now get the QAPI type BlockdevCreateOptions */
759 qdict_put_str(qdict
, "driver", "qed");
760 qdict_put_str(qdict
, "file", bs
->node_name
);
762 v
= qobject_input_visitor_new_flat_confused(qdict
, errp
);
768 visit_type_BlockdevCreateOptions(v
, NULL
, &create_options
, &local_err
);
772 error_propagate(errp
, local_err
);
777 /* Silently round up size */
778 assert(create_options
->driver
== BLOCKDEV_DRIVER_QED
);
779 create_options
->u
.qed
.size
=
780 ROUND_UP(create_options
->u
.qed
.size
, BDRV_SECTOR_SIZE
);
782 /* Create the qed image (format layer) */
783 ret
= bdrv_qed_co_create(create_options
, errp
);
786 qobject_unref(qdict
);
788 qapi_free_BlockdevCreateOptions(create_options
);
792 static int coroutine_fn
bdrv_qed_co_block_status(BlockDriverState
*bs
,
794 int64_t pos
, int64_t bytes
,
795 int64_t *pnum
, int64_t *map
,
796 BlockDriverState
**file
)
798 BDRVQEDState
*s
= bs
->opaque
;
799 size_t len
= MIN(bytes
, SIZE_MAX
);
801 QEDRequest request
= { .l2_table
= NULL
};
805 qemu_co_mutex_lock(&s
->table_lock
);
806 ret
= qed_find_cluster(s
, &request
, pos
, &len
, &offset
);
810 case QED_CLUSTER_FOUND
:
811 *map
= offset
| qed_offset_into_cluster(s
, pos
);
812 status
= BDRV_BLOCK_DATA
| BDRV_BLOCK_OFFSET_VALID
;
813 *file
= bs
->file
->bs
;
815 case QED_CLUSTER_ZERO
:
816 status
= BDRV_BLOCK_ZERO
;
828 qed_unref_l2_cache_entry(request
.l2_table
);
829 qemu_co_mutex_unlock(&s
->table_lock
);
834 static BDRVQEDState
*acb_to_s(QEDAIOCB
*acb
)
836 return acb
->bs
->opaque
;
840 * Read from the backing file or zero-fill if no backing file
843 * @pos: Byte position in device
844 * @qiov: Destination I/O vector
845 * @backing_qiov: Possibly shortened copy of qiov, to be allocated here
846 * @cb: Completion function
847 * @opaque: User data for completion function
849 * This function reads qiov->size bytes starting at pos from the backing file.
850 * If there is no backing file then zeroes are read.
852 static int coroutine_fn
qed_read_backing_file(BDRVQEDState
*s
, uint64_t pos
,
854 QEMUIOVector
**backing_qiov
)
856 uint64_t backing_length
= 0;
860 /* If there is a backing file, get its length. Treat the absence of a
861 * backing file like a zero length backing file.
863 if (s
->bs
->backing
) {
864 int64_t l
= bdrv_getlength(s
->bs
->backing
->bs
);
871 /* Zero all sectors if reading beyond the end of the backing file */
872 if (pos
>= backing_length
||
873 pos
+ qiov
->size
> backing_length
) {
874 qemu_iovec_memset(qiov
, 0, 0, qiov
->size
);
877 /* Complete now if there are no backing file sectors to read */
878 if (pos
>= backing_length
) {
882 /* If the read straddles the end of the backing file, shorten it */
883 size
= MIN((uint64_t)backing_length
- pos
, qiov
->size
);
885 assert(*backing_qiov
== NULL
);
886 *backing_qiov
= g_new(QEMUIOVector
, 1);
887 qemu_iovec_init(*backing_qiov
, qiov
->niov
);
888 qemu_iovec_concat(*backing_qiov
, qiov
, 0, size
);
890 BLKDBG_EVENT(s
->bs
->file
, BLKDBG_READ_BACKING_AIO
);
891 ret
= bdrv_co_preadv(s
->bs
->backing
, pos
, size
, *backing_qiov
, 0);
899 * Copy data from backing file into the image
902 * @pos: Byte position in device
903 * @len: Number of bytes
904 * @offset: Byte offset in image file
906 static int coroutine_fn
qed_copy_from_backing_file(BDRVQEDState
*s
,
907 uint64_t pos
, uint64_t len
,
911 QEMUIOVector
*backing_qiov
= NULL
;
914 /* Skip copy entirely if there is no work to do */
919 qemu_iovec_init_buf(&qiov
, qemu_blockalign(s
->bs
, len
), len
);
921 ret
= qed_read_backing_file(s
, pos
, &qiov
, &backing_qiov
);
924 qemu_iovec_destroy(backing_qiov
);
925 g_free(backing_qiov
);
933 BLKDBG_EVENT(s
->bs
->file
, BLKDBG_COW_WRITE
);
934 ret
= bdrv_co_pwritev(s
->bs
->file
, offset
, qiov
.size
, &qiov
, 0);
940 qemu_vfree(qemu_iovec_buf(&qiov
));
945 * Link one or more contiguous clusters into a table
949 * @index: First cluster index
950 * @n: Number of contiguous clusters
951 * @cluster: First cluster offset
953 * The cluster offset may be an allocated byte offset in the image file, the
954 * zero cluster marker, or the unallocated cluster marker.
956 * Called with table_lock held.
958 static void coroutine_fn
qed_update_l2_table(BDRVQEDState
*s
, QEDTable
*table
,
959 int index
, unsigned int n
,
963 for (i
= index
; i
< index
+ n
; i
++) {
964 table
->offsets
[i
] = cluster
;
965 if (!qed_offset_is_unalloc_cluster(cluster
) &&
966 !qed_offset_is_zero_cluster(cluster
)) {
967 cluster
+= s
->header
.cluster_size
;
972 /* Called with table_lock held. */
973 static void coroutine_fn
qed_aio_complete(QEDAIOCB
*acb
)
975 BDRVQEDState
*s
= acb_to_s(acb
);
978 qemu_iovec_destroy(&acb
->cur_qiov
);
979 qed_unref_l2_cache_entry(acb
->request
.l2_table
);
981 /* Free the buffer we may have allocated for zero writes */
982 if (acb
->flags
& QED_AIOCB_ZERO
) {
983 qemu_vfree(acb
->qiov
->iov
[0].iov_base
);
984 acb
->qiov
->iov
[0].iov_base
= NULL
;
987 /* Start next allocating write request waiting behind this one. Note that
988 * requests enqueue themselves when they first hit an unallocated cluster
989 * but they wait until the entire request is finished before waking up the
990 * next request in the queue. This ensures that we don't cycle through
991 * requests multiple times but rather finish one at a time completely.
993 if (acb
== s
->allocating_acb
) {
994 s
->allocating_acb
= NULL
;
995 if (!qemu_co_queue_empty(&s
->allocating_write_reqs
)) {
996 qemu_co_queue_next(&s
->allocating_write_reqs
);
997 } else if (s
->header
.features
& QED_F_NEED_CHECK
) {
998 qed_start_need_check_timer(s
);
1004 * Update L1 table with new L2 table offset and write it out
1006 * Called with table_lock held.
1008 static int coroutine_fn
qed_aio_write_l1_update(QEDAIOCB
*acb
)
1010 BDRVQEDState
*s
= acb_to_s(acb
);
1011 CachedL2Table
*l2_table
= acb
->request
.l2_table
;
1012 uint64_t l2_offset
= l2_table
->offset
;
1015 index
= qed_l1_index(s
, acb
->cur_pos
);
1016 s
->l1_table
->offsets
[index
] = l2_table
->offset
;
1018 ret
= qed_write_l1_table(s
, index
, 1);
1020 /* Commit the current L2 table to the cache */
1021 qed_commit_l2_cache_entry(&s
->l2_cache
, l2_table
);
1023 /* This is guaranteed to succeed because we just committed the entry to the
1026 acb
->request
.l2_table
= qed_find_l2_cache_entry(&s
->l2_cache
, l2_offset
);
1027 assert(acb
->request
.l2_table
!= NULL
);
1034 * Update L2 table with new cluster offsets and write them out
1036 * Called with table_lock held.
1038 static int coroutine_fn
qed_aio_write_l2_update(QEDAIOCB
*acb
, uint64_t offset
)
1040 BDRVQEDState
*s
= acb_to_s(acb
);
1041 bool need_alloc
= acb
->find_cluster_ret
== QED_CLUSTER_L1
;
1045 qed_unref_l2_cache_entry(acb
->request
.l2_table
);
1046 acb
->request
.l2_table
= qed_new_l2_table(s
);
1049 index
= qed_l2_index(s
, acb
->cur_pos
);
1050 qed_update_l2_table(s
, acb
->request
.l2_table
->table
, index
, acb
->cur_nclusters
,
1054 /* Write out the whole new L2 table */
1055 ret
= qed_write_l2_table(s
, &acb
->request
, 0, s
->table_nelems
, true);
1059 return qed_aio_write_l1_update(acb
);
1061 /* Write out only the updated part of the L2 table */
1062 ret
= qed_write_l2_table(s
, &acb
->request
, index
, acb
->cur_nclusters
,
1072 * Write data to the image file
1074 * Called with table_lock *not* held.
1076 static int coroutine_fn
qed_aio_write_main(QEDAIOCB
*acb
)
1078 BDRVQEDState
*s
= acb_to_s(acb
);
1079 uint64_t offset
= acb
->cur_cluster
+
1080 qed_offset_into_cluster(s
, acb
->cur_pos
);
1082 trace_qed_aio_write_main(s
, acb
, 0, offset
, acb
->cur_qiov
.size
);
1084 BLKDBG_EVENT(s
->bs
->file
, BLKDBG_WRITE_AIO
);
1085 return bdrv_co_pwritev(s
->bs
->file
, offset
, acb
->cur_qiov
.size
,
1090 * Populate untouched regions of new data cluster
1092 * Called with table_lock held.
1094 static int coroutine_fn
qed_aio_write_cow(QEDAIOCB
*acb
)
1096 BDRVQEDState
*s
= acb_to_s(acb
);
1097 uint64_t start
, len
, offset
;
1100 qemu_co_mutex_unlock(&s
->table_lock
);
1102 /* Populate front untouched region of new data cluster */
1103 start
= qed_start_of_cluster(s
, acb
->cur_pos
);
1104 len
= qed_offset_into_cluster(s
, acb
->cur_pos
);
1106 trace_qed_aio_write_prefill(s
, acb
, start
, len
, acb
->cur_cluster
);
1107 ret
= qed_copy_from_backing_file(s
, start
, len
, acb
->cur_cluster
);
1112 /* Populate back untouched region of new data cluster */
1113 start
= acb
->cur_pos
+ acb
->cur_qiov
.size
;
1114 len
= qed_start_of_cluster(s
, start
+ s
->header
.cluster_size
- 1) - start
;
1115 offset
= acb
->cur_cluster
+
1116 qed_offset_into_cluster(s
, acb
->cur_pos
) +
1119 trace_qed_aio_write_postfill(s
, acb
, start
, len
, offset
);
1120 ret
= qed_copy_from_backing_file(s
, start
, len
, offset
);
1125 ret
= qed_aio_write_main(acb
);
1130 if (s
->bs
->backing
) {
1132 * Flush new data clusters before updating the L2 table
1134 * This flush is necessary when a backing file is in use. A crash
1135 * during an allocating write could result in empty clusters in the
1136 * image. If the write only touched a subregion of the cluster,
1137 * then backing image sectors have been lost in the untouched
1138 * region. The solution is to flush after writing a new data
1139 * cluster and before updating the L2 table.
1141 ret
= bdrv_co_flush(s
->bs
->file
->bs
);
1145 qemu_co_mutex_lock(&s
->table_lock
);
1150 * Check if the QED_F_NEED_CHECK bit should be set during allocating write
1152 static bool qed_should_set_need_check(BDRVQEDState
*s
)
1154 /* The flush before L2 update path ensures consistency */
1155 if (s
->bs
->backing
) {
1159 return !(s
->header
.features
& QED_F_NEED_CHECK
);
1163 * Write new data cluster
1165 * @acb: Write request
1166 * @len: Length in bytes
1168 * This path is taken when writing to previously unallocated clusters.
1170 * Called with table_lock held.
1172 static int coroutine_fn
qed_aio_write_alloc(QEDAIOCB
*acb
, size_t len
)
1174 BDRVQEDState
*s
= acb_to_s(acb
);
1177 /* Cancel timer when the first allocating request comes in */
1178 if (s
->allocating_acb
== NULL
) {
1179 qed_cancel_need_check_timer(s
);
1182 /* Freeze this request if another allocating write is in progress */
1183 if (s
->allocating_acb
!= acb
|| s
->allocating_write_reqs_plugged
) {
1184 if (s
->allocating_acb
!= NULL
) {
1185 qemu_co_queue_wait(&s
->allocating_write_reqs
, &s
->table_lock
);
1186 assert(s
->allocating_acb
== NULL
);
1188 s
->allocating_acb
= acb
;
1189 return -EAGAIN
; /* start over with looking up table entries */
1192 acb
->cur_nclusters
= qed_bytes_to_clusters(s
,
1193 qed_offset_into_cluster(s
, acb
->cur_pos
) + len
);
1194 qemu_iovec_concat(&acb
->cur_qiov
, acb
->qiov
, acb
->qiov_offset
, len
);
1196 if (acb
->flags
& QED_AIOCB_ZERO
) {
1197 /* Skip ahead if the clusters are already zero */
1198 if (acb
->find_cluster_ret
== QED_CLUSTER_ZERO
) {
1201 acb
->cur_cluster
= 1;
1203 acb
->cur_cluster
= qed_alloc_clusters(s
, acb
->cur_nclusters
);
1206 if (qed_should_set_need_check(s
)) {
1207 s
->header
.features
|= QED_F_NEED_CHECK
;
1208 ret
= qed_write_header(s
);
1214 if (!(acb
->flags
& QED_AIOCB_ZERO
)) {
1215 ret
= qed_aio_write_cow(acb
);
1221 return qed_aio_write_l2_update(acb
, acb
->cur_cluster
);
1225 * Write data cluster in place
1227 * @acb: Write request
1228 * @offset: Cluster offset in bytes
1229 * @len: Length in bytes
1231 * This path is taken when writing to already allocated clusters.
1233 * Called with table_lock held.
1235 static int coroutine_fn
qed_aio_write_inplace(QEDAIOCB
*acb
, uint64_t offset
,
1238 BDRVQEDState
*s
= acb_to_s(acb
);
1241 qemu_co_mutex_unlock(&s
->table_lock
);
1243 /* Allocate buffer for zero writes */
1244 if (acb
->flags
& QED_AIOCB_ZERO
) {
1245 struct iovec
*iov
= acb
->qiov
->iov
;
1247 if (!iov
->iov_base
) {
1248 iov
->iov_base
= qemu_try_blockalign(acb
->bs
, iov
->iov_len
);
1249 if (iov
->iov_base
== NULL
) {
1253 memset(iov
->iov_base
, 0, iov
->iov_len
);
1257 /* Calculate the I/O vector */
1258 acb
->cur_cluster
= offset
;
1259 qemu_iovec_concat(&acb
->cur_qiov
, acb
->qiov
, acb
->qiov_offset
, len
);
1261 /* Do the actual write. */
1262 r
= qed_aio_write_main(acb
);
1264 qemu_co_mutex_lock(&s
->table_lock
);
1269 * Write data cluster
1271 * @opaque: Write request
1272 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1
1273 * @offset: Cluster offset in bytes
1274 * @len: Length in bytes
1276 * Called with table_lock held.
1278 static int coroutine_fn
qed_aio_write_data(void *opaque
, int ret
,
1279 uint64_t offset
, size_t len
)
1281 QEDAIOCB
*acb
= opaque
;
1283 trace_qed_aio_write_data(acb_to_s(acb
), acb
, ret
, offset
, len
);
1285 acb
->find_cluster_ret
= ret
;
1288 case QED_CLUSTER_FOUND
:
1289 return qed_aio_write_inplace(acb
, offset
, len
);
1291 case QED_CLUSTER_L2
:
1292 case QED_CLUSTER_L1
:
1293 case QED_CLUSTER_ZERO
:
1294 return qed_aio_write_alloc(acb
, len
);
1297 g_assert_not_reached();
1304 * @opaque: Read request
1305 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1
1306 * @offset: Cluster offset in bytes
1307 * @len: Length in bytes
1309 * Called with table_lock held.
1311 static int coroutine_fn
qed_aio_read_data(void *opaque
, int ret
,
1312 uint64_t offset
, size_t len
)
1314 QEDAIOCB
*acb
= opaque
;
1315 BDRVQEDState
*s
= acb_to_s(acb
);
1316 BlockDriverState
*bs
= acb
->bs
;
1319 qemu_co_mutex_unlock(&s
->table_lock
);
1321 /* Adjust offset into cluster */
1322 offset
+= qed_offset_into_cluster(s
, acb
->cur_pos
);
1324 trace_qed_aio_read_data(s
, acb
, ret
, offset
, len
);
1326 qemu_iovec_concat(&acb
->cur_qiov
, acb
->qiov
, acb
->qiov_offset
, len
);
1328 /* Handle zero cluster and backing file reads, otherwise read
1329 * data cluster directly.
1331 if (ret
== QED_CLUSTER_ZERO
) {
1332 qemu_iovec_memset(&acb
->cur_qiov
, 0, 0, acb
->cur_qiov
.size
);
1334 } else if (ret
!= QED_CLUSTER_FOUND
) {
1335 r
= qed_read_backing_file(s
, acb
->cur_pos
, &acb
->cur_qiov
,
1336 &acb
->backing_qiov
);
1338 BLKDBG_EVENT(bs
->file
, BLKDBG_READ_AIO
);
1339 r
= bdrv_co_preadv(bs
->file
, offset
, acb
->cur_qiov
.size
,
1343 qemu_co_mutex_lock(&s
->table_lock
);
1348 * Begin next I/O or complete the request
1350 static int coroutine_fn
qed_aio_next_io(QEDAIOCB
*acb
)
1352 BDRVQEDState
*s
= acb_to_s(acb
);
1357 qemu_co_mutex_lock(&s
->table_lock
);
1359 trace_qed_aio_next_io(s
, acb
, 0, acb
->cur_pos
+ acb
->cur_qiov
.size
);
1361 if (acb
->backing_qiov
) {
1362 qemu_iovec_destroy(acb
->backing_qiov
);
1363 g_free(acb
->backing_qiov
);
1364 acb
->backing_qiov
= NULL
;
1367 acb
->qiov_offset
+= acb
->cur_qiov
.size
;
1368 acb
->cur_pos
+= acb
->cur_qiov
.size
;
1369 qemu_iovec_reset(&acb
->cur_qiov
);
1371 /* Complete request */
1372 if (acb
->cur_pos
>= acb
->end_pos
) {
1377 /* Find next cluster and start I/O */
1378 len
= acb
->end_pos
- acb
->cur_pos
;
1379 ret
= qed_find_cluster(s
, &acb
->request
, acb
->cur_pos
, &len
, &offset
);
1384 if (acb
->flags
& QED_AIOCB_WRITE
) {
1385 ret
= qed_aio_write_data(acb
, ret
, offset
, len
);
1387 ret
= qed_aio_read_data(acb
, ret
, offset
, len
);
1390 if (ret
< 0 && ret
!= -EAGAIN
) {
1395 trace_qed_aio_complete(s
, acb
, ret
);
1396 qed_aio_complete(acb
);
1397 qemu_co_mutex_unlock(&s
->table_lock
);
1401 static int coroutine_fn
qed_co_request(BlockDriverState
*bs
, int64_t sector_num
,
1402 QEMUIOVector
*qiov
, int nb_sectors
,
1407 .cur_pos
= (uint64_t) sector_num
* BDRV_SECTOR_SIZE
,
1408 .end_pos
= (sector_num
+ nb_sectors
) * BDRV_SECTOR_SIZE
,
1412 qemu_iovec_init(&acb
.cur_qiov
, qiov
->niov
);
1414 trace_qed_aio_setup(bs
->opaque
, &acb
, sector_num
, nb_sectors
, NULL
, flags
);
1417 return qed_aio_next_io(&acb
);
1420 static int coroutine_fn
bdrv_qed_co_readv(BlockDriverState
*bs
,
1421 int64_t sector_num
, int nb_sectors
,
1424 return qed_co_request(bs
, sector_num
, qiov
, nb_sectors
, 0);
1427 static int coroutine_fn
bdrv_qed_co_writev(BlockDriverState
*bs
,
1428 int64_t sector_num
, int nb_sectors
,
1429 QEMUIOVector
*qiov
, int flags
)
1432 return qed_co_request(bs
, sector_num
, qiov
, nb_sectors
, QED_AIOCB_WRITE
);
1435 static int coroutine_fn
bdrv_qed_co_pwrite_zeroes(BlockDriverState
*bs
,
1438 BdrvRequestFlags flags
)
1440 BDRVQEDState
*s
= bs
->opaque
;
1443 * Zero writes start without an I/O buffer. If a buffer becomes necessary
1444 * then it will be allocated during request processing.
1446 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, NULL
, bytes
);
1448 /* Fall back if the request is not aligned */
1449 if (qed_offset_into_cluster(s
, offset
) ||
1450 qed_offset_into_cluster(s
, bytes
)) {
1454 return qed_co_request(bs
, offset
>> BDRV_SECTOR_BITS
, &qiov
,
1455 bytes
>> BDRV_SECTOR_BITS
,
1456 QED_AIOCB_WRITE
| QED_AIOCB_ZERO
);
1459 static int coroutine_fn
bdrv_qed_co_truncate(BlockDriverState
*bs
,
1461 PreallocMode prealloc
,
1464 BDRVQEDState
*s
= bs
->opaque
;
1465 uint64_t old_image_size
;
1468 if (prealloc
!= PREALLOC_MODE_OFF
) {
1469 error_setg(errp
, "Unsupported preallocation mode '%s'",
1470 PreallocMode_str(prealloc
));
1474 if (!qed_is_image_size_valid(offset
, s
->header
.cluster_size
,
1475 s
->header
.table_size
)) {
1476 error_setg(errp
, "Invalid image size specified");
1480 if ((uint64_t)offset
< s
->header
.image_size
) {
1481 error_setg(errp
, "Shrinking images is currently not supported");
1485 old_image_size
= s
->header
.image_size
;
1486 s
->header
.image_size
= offset
;
1487 ret
= qed_write_header_sync(s
);
1489 s
->header
.image_size
= old_image_size
;
1490 error_setg_errno(errp
, -ret
, "Failed to update the image size");
1495 static int64_t bdrv_qed_getlength(BlockDriverState
*bs
)
1497 BDRVQEDState
*s
= bs
->opaque
;
1498 return s
->header
.image_size
;
1501 static int bdrv_qed_get_info(BlockDriverState
*bs
, BlockDriverInfo
*bdi
)
1503 BDRVQEDState
*s
= bs
->opaque
;
1505 memset(bdi
, 0, sizeof(*bdi
));
1506 bdi
->cluster_size
= s
->header
.cluster_size
;
1507 bdi
->is_dirty
= s
->header
.features
& QED_F_NEED_CHECK
;
1508 bdi
->unallocated_blocks_are_zero
= true;
1512 static int bdrv_qed_change_backing_file(BlockDriverState
*bs
,
1513 const char *backing_file
,
1514 const char *backing_fmt
)
1516 BDRVQEDState
*s
= bs
->opaque
;
1517 QEDHeader new_header
, le_header
;
1519 size_t buffer_len
, backing_file_len
;
1522 /* Refuse to set backing filename if unknown compat feature bits are
1523 * active. If the image uses an unknown compat feature then we may not
1524 * know the layout of data following the header structure and cannot safely
1527 if (backing_file
&& (s
->header
.compat_features
&
1528 ~QED_COMPAT_FEATURE_MASK
)) {
1532 memcpy(&new_header
, &s
->header
, sizeof(new_header
));
1534 new_header
.features
&= ~(QED_F_BACKING_FILE
|
1535 QED_F_BACKING_FORMAT_NO_PROBE
);
1537 /* Adjust feature flags */
1539 new_header
.features
|= QED_F_BACKING_FILE
;
1541 if (qed_fmt_is_raw(backing_fmt
)) {
1542 new_header
.features
|= QED_F_BACKING_FORMAT_NO_PROBE
;
1546 /* Calculate new header size */
1547 backing_file_len
= 0;
1550 backing_file_len
= strlen(backing_file
);
1553 buffer_len
= sizeof(new_header
);
1554 new_header
.backing_filename_offset
= buffer_len
;
1555 new_header
.backing_filename_size
= backing_file_len
;
1556 buffer_len
+= backing_file_len
;
1558 /* Make sure we can rewrite header without failing */
1559 if (buffer_len
> new_header
.header_size
* new_header
.cluster_size
) {
1563 /* Prepare new header */
1564 buffer
= g_malloc(buffer_len
);
1566 qed_header_cpu_to_le(&new_header
, &le_header
);
1567 memcpy(buffer
, &le_header
, sizeof(le_header
));
1568 buffer_len
= sizeof(le_header
);
1571 memcpy(buffer
+ buffer_len
, backing_file
, backing_file_len
);
1572 buffer_len
+= backing_file_len
;
1575 /* Write new header */
1576 ret
= bdrv_pwrite_sync(bs
->file
, 0, buffer
, buffer_len
);
1579 memcpy(&s
->header
, &new_header
, sizeof(new_header
));
1584 static void coroutine_fn
bdrv_qed_co_invalidate_cache(BlockDriverState
*bs
,
1587 BDRVQEDState
*s
= bs
->opaque
;
1588 Error
*local_err
= NULL
;
1593 bdrv_qed_init_state(bs
);
1594 qemu_co_mutex_lock(&s
->table_lock
);
1595 ret
= bdrv_qed_do_open(bs
, NULL
, bs
->open_flags
, &local_err
);
1596 qemu_co_mutex_unlock(&s
->table_lock
);
1598 error_propagate_prepend(errp
, local_err
,
1599 "Could not reopen qed layer: ");
1601 } else if (ret
< 0) {
1602 error_setg_errno(errp
, -ret
, "Could not reopen qed layer");
1607 static int coroutine_fn
bdrv_qed_co_check(BlockDriverState
*bs
,
1608 BdrvCheckResult
*result
,
1611 BDRVQEDState
*s
= bs
->opaque
;
1614 qemu_co_mutex_lock(&s
->table_lock
);
1615 ret
= qed_check(s
, result
, !!fix
);
1616 qemu_co_mutex_unlock(&s
->table_lock
);
1621 static QemuOptsList qed_create_opts
= {
1622 .name
= "qed-create-opts",
1623 .head
= QTAILQ_HEAD_INITIALIZER(qed_create_opts
.head
),
1626 .name
= BLOCK_OPT_SIZE
,
1627 .type
= QEMU_OPT_SIZE
,
1628 .help
= "Virtual disk size"
1631 .name
= BLOCK_OPT_BACKING_FILE
,
1632 .type
= QEMU_OPT_STRING
,
1633 .help
= "File name of a base image"
1636 .name
= BLOCK_OPT_BACKING_FMT
,
1637 .type
= QEMU_OPT_STRING
,
1638 .help
= "Image format of the base image"
1641 .name
= BLOCK_OPT_CLUSTER_SIZE
,
1642 .type
= QEMU_OPT_SIZE
,
1643 .help
= "Cluster size (in bytes)",
1644 .def_value_str
= stringify(QED_DEFAULT_CLUSTER_SIZE
)
1647 .name
= BLOCK_OPT_TABLE_SIZE
,
1648 .type
= QEMU_OPT_SIZE
,
1649 .help
= "L1/L2 table size (in clusters)"
1651 { /* end of list */ }
1655 static BlockDriver bdrv_qed
= {
1656 .format_name
= "qed",
1657 .instance_size
= sizeof(BDRVQEDState
),
1658 .create_opts
= &qed_create_opts
,
1659 .supports_backing
= true,
1661 .bdrv_probe
= bdrv_qed_probe
,
1662 .bdrv_open
= bdrv_qed_open
,
1663 .bdrv_close
= bdrv_qed_close
,
1664 .bdrv_reopen_prepare
= bdrv_qed_reopen_prepare
,
1665 .bdrv_child_perm
= bdrv_format_default_perms
,
1666 .bdrv_co_create
= bdrv_qed_co_create
,
1667 .bdrv_co_create_opts
= bdrv_qed_co_create_opts
,
1668 .bdrv_has_zero_init
= bdrv_has_zero_init_1
,
1669 .bdrv_co_block_status
= bdrv_qed_co_block_status
,
1670 .bdrv_co_readv
= bdrv_qed_co_readv
,
1671 .bdrv_co_writev
= bdrv_qed_co_writev
,
1672 .bdrv_co_pwrite_zeroes
= bdrv_qed_co_pwrite_zeroes
,
1673 .bdrv_co_truncate
= bdrv_qed_co_truncate
,
1674 .bdrv_getlength
= bdrv_qed_getlength
,
1675 .bdrv_get_info
= bdrv_qed_get_info
,
1676 .bdrv_refresh_limits
= bdrv_qed_refresh_limits
,
1677 .bdrv_change_backing_file
= bdrv_qed_change_backing_file
,
1678 .bdrv_co_invalidate_cache
= bdrv_qed_co_invalidate_cache
,
1679 .bdrv_co_check
= bdrv_qed_co_check
,
1680 .bdrv_detach_aio_context
= bdrv_qed_detach_aio_context
,
1681 .bdrv_attach_aio_context
= bdrv_qed_attach_aio_context
,
1682 .bdrv_co_drain_begin
= bdrv_qed_co_drain_begin
,
1685 static void bdrv_qed_init(void)
1687 bdrv_register(&bdrv_qed
);
1690 block_init(bdrv_qed_init
);