2 * QEMU Enhanced Disk Format
4 * Copyright IBM, Corp. 2010
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "qapi/error.h"
17 #include "qemu/timer.h"
18 #include "qemu/bswap.h"
21 #include "qapi/qmp/qerror.h"
22 #include "sysemu/block-backend.h"
24 static const AIOCBInfo qed_aiocb_info
= {
25 .aiocb_size
= sizeof(QEDAIOCB
),
28 static int bdrv_qed_probe(const uint8_t *buf
, int buf_size
,
31 const QEDHeader
*header
= (const QEDHeader
*)buf
;
33 if (buf_size
< sizeof(*header
)) {
36 if (le32_to_cpu(header
->magic
) != QED_MAGIC
) {
43 * Check whether an image format is raw
45 * @fmt: Backing file format, may be NULL
47 static bool qed_fmt_is_raw(const char *fmt
)
49 return fmt
&& strcmp(fmt
, "raw") == 0;
52 static void qed_header_le_to_cpu(const QEDHeader
*le
, QEDHeader
*cpu
)
54 cpu
->magic
= le32_to_cpu(le
->magic
);
55 cpu
->cluster_size
= le32_to_cpu(le
->cluster_size
);
56 cpu
->table_size
= le32_to_cpu(le
->table_size
);
57 cpu
->header_size
= le32_to_cpu(le
->header_size
);
58 cpu
->features
= le64_to_cpu(le
->features
);
59 cpu
->compat_features
= le64_to_cpu(le
->compat_features
);
60 cpu
->autoclear_features
= le64_to_cpu(le
->autoclear_features
);
61 cpu
->l1_table_offset
= le64_to_cpu(le
->l1_table_offset
);
62 cpu
->image_size
= le64_to_cpu(le
->image_size
);
63 cpu
->backing_filename_offset
= le32_to_cpu(le
->backing_filename_offset
);
64 cpu
->backing_filename_size
= le32_to_cpu(le
->backing_filename_size
);
67 static void qed_header_cpu_to_le(const QEDHeader
*cpu
, QEDHeader
*le
)
69 le
->magic
= cpu_to_le32(cpu
->magic
);
70 le
->cluster_size
= cpu_to_le32(cpu
->cluster_size
);
71 le
->table_size
= cpu_to_le32(cpu
->table_size
);
72 le
->header_size
= cpu_to_le32(cpu
->header_size
);
73 le
->features
= cpu_to_le64(cpu
->features
);
74 le
->compat_features
= cpu_to_le64(cpu
->compat_features
);
75 le
->autoclear_features
= cpu_to_le64(cpu
->autoclear_features
);
76 le
->l1_table_offset
= cpu_to_le64(cpu
->l1_table_offset
);
77 le
->image_size
= cpu_to_le64(cpu
->image_size
);
78 le
->backing_filename_offset
= cpu_to_le32(cpu
->backing_filename_offset
);
79 le
->backing_filename_size
= cpu_to_le32(cpu
->backing_filename_size
);
82 int qed_write_header_sync(BDRVQEDState
*s
)
87 qed_header_cpu_to_le(&s
->header
, &le
);
88 ret
= bdrv_pwrite(s
->bs
->file
, 0, &le
, sizeof(le
));
89 if (ret
!= sizeof(le
)) {
104 static void qed_write_header_cb(void *opaque
, int ret
)
106 QEDWriteHeaderCB
*write_header_cb
= opaque
;
108 qemu_vfree(write_header_cb
->buf
);
109 gencb_complete(write_header_cb
, ret
);
112 static void qed_write_header_read_cb(void *opaque
, int ret
)
114 QEDWriteHeaderCB
*write_header_cb
= opaque
;
115 BDRVQEDState
*s
= write_header_cb
->s
;
118 qed_write_header_cb(write_header_cb
, ret
);
123 qed_header_cpu_to_le(&s
->header
, (QEDHeader
*)write_header_cb
->buf
);
125 bdrv_aio_writev(s
->bs
->file
, 0, &write_header_cb
->qiov
,
126 write_header_cb
->nsectors
, qed_write_header_cb
,
131 * Update header in-place (does not rewrite backing filename or other strings)
133 * This function only updates known header fields in-place and does not affect
134 * extra data after the QED header.
136 static void qed_write_header(BDRVQEDState
*s
, BlockCompletionFunc cb
,
139 /* We must write full sectors for O_DIRECT but cannot necessarily generate
140 * the data following the header if an unrecognized compat feature is
141 * active. Therefore, first read the sectors containing the header, update
142 * them, and write back.
145 int nsectors
= DIV_ROUND_UP(sizeof(QEDHeader
), BDRV_SECTOR_SIZE
);
146 size_t len
= nsectors
* BDRV_SECTOR_SIZE
;
147 QEDWriteHeaderCB
*write_header_cb
= gencb_alloc(sizeof(*write_header_cb
),
150 write_header_cb
->s
= s
;
151 write_header_cb
->nsectors
= nsectors
;
152 write_header_cb
->buf
= qemu_blockalign(s
->bs
, len
);
153 write_header_cb
->iov
.iov_base
= write_header_cb
->buf
;
154 write_header_cb
->iov
.iov_len
= len
;
155 qemu_iovec_init_external(&write_header_cb
->qiov
, &write_header_cb
->iov
, 1);
157 bdrv_aio_readv(s
->bs
->file
, 0, &write_header_cb
->qiov
, nsectors
,
158 qed_write_header_read_cb
, write_header_cb
);
161 static uint64_t qed_max_image_size(uint32_t cluster_size
, uint32_t table_size
)
163 uint64_t table_entries
;
166 table_entries
= (table_size
* cluster_size
) / sizeof(uint64_t);
167 l2_size
= table_entries
* cluster_size
;
169 return l2_size
* table_entries
;
172 static bool qed_is_cluster_size_valid(uint32_t cluster_size
)
174 if (cluster_size
< QED_MIN_CLUSTER_SIZE
||
175 cluster_size
> QED_MAX_CLUSTER_SIZE
) {
178 if (cluster_size
& (cluster_size
- 1)) {
179 return false; /* not power of 2 */
184 static bool qed_is_table_size_valid(uint32_t table_size
)
186 if (table_size
< QED_MIN_TABLE_SIZE
||
187 table_size
> QED_MAX_TABLE_SIZE
) {
190 if (table_size
& (table_size
- 1)) {
191 return false; /* not power of 2 */
196 static bool qed_is_image_size_valid(uint64_t image_size
, uint32_t cluster_size
,
199 if (image_size
% BDRV_SECTOR_SIZE
!= 0) {
200 return false; /* not multiple of sector size */
202 if (image_size
> qed_max_image_size(cluster_size
, table_size
)) {
203 return false; /* image is too large */
209 * Read a string of known length from the image file
212 * @offset: File offset to start of string, in bytes
213 * @n: String length in bytes
214 * @buf: Destination buffer
215 * @buflen: Destination buffer length in bytes
216 * @ret: 0 on success, -errno on failure
218 * The string is NUL-terminated.
220 static int qed_read_string(BdrvChild
*file
, uint64_t offset
, size_t n
,
221 char *buf
, size_t buflen
)
227 ret
= bdrv_pread(file
, offset
, buf
, n
);
236 * Allocate new clusters
239 * @n: Number of contiguous clusters to allocate
240 * @ret: Offset of first allocated cluster
242 * This function only produces the offset where the new clusters should be
243 * written. It updates BDRVQEDState but does not make any changes to the image
246 static uint64_t qed_alloc_clusters(BDRVQEDState
*s
, unsigned int n
)
248 uint64_t offset
= s
->file_size
;
249 s
->file_size
+= n
* s
->header
.cluster_size
;
253 QEDTable
*qed_alloc_table(BDRVQEDState
*s
)
255 /* Honor O_DIRECT memory alignment requirements */
256 return qemu_blockalign(s
->bs
,
257 s
->header
.cluster_size
* s
->header
.table_size
);
261 * Allocate a new zeroed L2 table
263 static CachedL2Table
*qed_new_l2_table(BDRVQEDState
*s
)
265 CachedL2Table
*l2_table
= qed_alloc_l2_cache_entry(&s
->l2_cache
);
267 l2_table
->table
= qed_alloc_table(s
);
268 l2_table
->offset
= qed_alloc_clusters(s
, s
->header
.table_size
);
270 memset(l2_table
->table
->offsets
, 0,
271 s
->header
.cluster_size
* s
->header
.table_size
);
275 static void qed_aio_next_io(QEDAIOCB
*acb
, int ret
);
277 static void qed_aio_start_io(QEDAIOCB
*acb
)
279 qed_aio_next_io(acb
, 0);
282 static void qed_aio_next_io_cb(void *opaque
, int ret
)
284 QEDAIOCB
*acb
= opaque
;
286 qed_aio_next_io(acb
, ret
);
289 static void qed_plug_allocating_write_reqs(BDRVQEDState
*s
)
291 assert(!s
->allocating_write_reqs_plugged
);
293 s
->allocating_write_reqs_plugged
= true;
296 static void qed_unplug_allocating_write_reqs(BDRVQEDState
*s
)
300 assert(s
->allocating_write_reqs_plugged
);
302 s
->allocating_write_reqs_plugged
= false;
304 acb
= QSIMPLEQ_FIRST(&s
->allocating_write_reqs
);
306 qed_aio_start_io(acb
);
310 static void qed_finish_clear_need_check(void *opaque
, int ret
)
315 static void qed_flush_after_clear_need_check(void *opaque
, int ret
)
317 BDRVQEDState
*s
= opaque
;
319 bdrv_aio_flush(s
->bs
, qed_finish_clear_need_check
, s
);
321 /* No need to wait until flush completes */
322 qed_unplug_allocating_write_reqs(s
);
325 static void qed_clear_need_check(void *opaque
, int ret
)
327 BDRVQEDState
*s
= opaque
;
330 qed_unplug_allocating_write_reqs(s
);
334 s
->header
.features
&= ~QED_F_NEED_CHECK
;
335 qed_write_header(s
, qed_flush_after_clear_need_check
, s
);
338 static void qed_need_check_timer_cb(void *opaque
)
340 BDRVQEDState
*s
= opaque
;
342 /* The timer should only fire when allocating writes have drained */
343 assert(!QSIMPLEQ_FIRST(&s
->allocating_write_reqs
));
345 trace_qed_need_check_timer_cb(s
);
348 qed_plug_allocating_write_reqs(s
);
350 /* Ensure writes are on disk before clearing flag */
351 bdrv_aio_flush(s
->bs
->file
->bs
, qed_clear_need_check
, s
);
355 void qed_acquire(BDRVQEDState
*s
)
357 aio_context_acquire(bdrv_get_aio_context(s
->bs
));
360 void qed_release(BDRVQEDState
*s
)
362 aio_context_release(bdrv_get_aio_context(s
->bs
));
365 static void qed_start_need_check_timer(BDRVQEDState
*s
)
367 trace_qed_start_need_check_timer(s
);
369 /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for
372 timer_mod(s
->need_check_timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
373 NANOSECONDS_PER_SECOND
* QED_NEED_CHECK_TIMEOUT
);
376 /* It's okay to call this multiple times or when no timer is started */
377 static void qed_cancel_need_check_timer(BDRVQEDState
*s
)
379 trace_qed_cancel_need_check_timer(s
);
380 timer_del(s
->need_check_timer
);
383 static void bdrv_qed_detach_aio_context(BlockDriverState
*bs
)
385 BDRVQEDState
*s
= bs
->opaque
;
387 qed_cancel_need_check_timer(s
);
388 timer_free(s
->need_check_timer
);
391 static void bdrv_qed_attach_aio_context(BlockDriverState
*bs
,
392 AioContext
*new_context
)
394 BDRVQEDState
*s
= bs
->opaque
;
396 s
->need_check_timer
= aio_timer_new(new_context
,
397 QEMU_CLOCK_VIRTUAL
, SCALE_NS
,
398 qed_need_check_timer_cb
, s
);
399 if (s
->header
.features
& QED_F_NEED_CHECK
) {
400 qed_start_need_check_timer(s
);
404 static void bdrv_qed_drain(BlockDriverState
*bs
)
406 BDRVQEDState
*s
= bs
->opaque
;
408 /* Fire the timer immediately in order to start doing I/O as soon as the
411 if (s
->need_check_timer
&& timer_pending(s
->need_check_timer
)) {
412 qed_cancel_need_check_timer(s
);
413 qed_need_check_timer_cb(s
);
417 static int bdrv_qed_do_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
420 BDRVQEDState
*s
= bs
->opaque
;
426 QSIMPLEQ_INIT(&s
->allocating_write_reqs
);
428 ret
= bdrv_pread(bs
->file
, 0, &le_header
, sizeof(le_header
));
432 qed_header_le_to_cpu(&le_header
, &s
->header
);
434 if (s
->header
.magic
!= QED_MAGIC
) {
435 error_setg(errp
, "Image not in QED format");
438 if (s
->header
.features
& ~QED_FEATURE_MASK
) {
439 /* image uses unsupported feature bits */
440 error_setg(errp
, "Unsupported QED features: %" PRIx64
,
441 s
->header
.features
& ~QED_FEATURE_MASK
);
444 if (!qed_is_cluster_size_valid(s
->header
.cluster_size
)) {
448 /* Round down file size to the last cluster */
449 file_size
= bdrv_getlength(bs
->file
->bs
);
453 s
->file_size
= qed_start_of_cluster(s
, file_size
);
455 if (!qed_is_table_size_valid(s
->header
.table_size
)) {
458 if (!qed_is_image_size_valid(s
->header
.image_size
,
459 s
->header
.cluster_size
,
460 s
->header
.table_size
)) {
463 if (!qed_check_table_offset(s
, s
->header
.l1_table_offset
)) {
467 s
->table_nelems
= (s
->header
.cluster_size
* s
->header
.table_size
) /
469 s
->l2_shift
= ctz32(s
->header
.cluster_size
);
470 s
->l2_mask
= s
->table_nelems
- 1;
471 s
->l1_shift
= s
->l2_shift
+ ctz32(s
->table_nelems
);
473 /* Header size calculation must not overflow uint32_t */
474 if (s
->header
.header_size
> UINT32_MAX
/ s
->header
.cluster_size
) {
478 if ((s
->header
.features
& QED_F_BACKING_FILE
)) {
479 if ((uint64_t)s
->header
.backing_filename_offset
+
480 s
->header
.backing_filename_size
>
481 s
->header
.cluster_size
* s
->header
.header_size
) {
485 ret
= qed_read_string(bs
->file
, s
->header
.backing_filename_offset
,
486 s
->header
.backing_filename_size
, bs
->backing_file
,
487 sizeof(bs
->backing_file
));
492 if (s
->header
.features
& QED_F_BACKING_FORMAT_NO_PROBE
) {
493 pstrcpy(bs
->backing_format
, sizeof(bs
->backing_format
), "raw");
497 /* Reset unknown autoclear feature bits. This is a backwards
498 * compatibility mechanism that allows images to be opened by older
499 * programs, which "knock out" unknown feature bits. When an image is
500 * opened by a newer program again it can detect that the autoclear
501 * feature is no longer valid.
503 if ((s
->header
.autoclear_features
& ~QED_AUTOCLEAR_FEATURE_MASK
) != 0 &&
504 !bdrv_is_read_only(bs
->file
->bs
) && !(flags
& BDRV_O_INACTIVE
)) {
505 s
->header
.autoclear_features
&= QED_AUTOCLEAR_FEATURE_MASK
;
507 ret
= qed_write_header_sync(s
);
512 /* From here on only known autoclear feature bits are valid */
513 bdrv_flush(bs
->file
->bs
);
516 s
->l1_table
= qed_alloc_table(s
);
517 qed_init_l2_cache(&s
->l2_cache
);
519 ret
= qed_read_l1_table_sync(s
);
524 /* If image was not closed cleanly, check consistency */
525 if (!(flags
& BDRV_O_CHECK
) && (s
->header
.features
& QED_F_NEED_CHECK
)) {
526 /* Read-only images cannot be fixed. There is no risk of corruption
527 * since write operations are not possible. Therefore, allow
528 * potentially inconsistent images to be opened read-only. This can
529 * aid data recovery from an otherwise inconsistent image.
531 if (!bdrv_is_read_only(bs
->file
->bs
) &&
532 !(flags
& BDRV_O_INACTIVE
)) {
533 BdrvCheckResult result
= {0};
535 ret
= qed_check(s
, &result
, true);
542 bdrv_qed_attach_aio_context(bs
, bdrv_get_aio_context(bs
));
546 qed_free_l2_cache(&s
->l2_cache
);
547 qemu_vfree(s
->l1_table
);
552 static int bdrv_qed_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
555 bs
->file
= bdrv_open_child(NULL
, options
, "file", bs
, &child_file
,
561 return bdrv_qed_do_open(bs
, options
, flags
, errp
);
564 static void bdrv_qed_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
566 BDRVQEDState
*s
= bs
->opaque
;
568 bs
->bl
.pwrite_zeroes_alignment
= s
->header
.cluster_size
;
571 /* We have nothing to do for QED reopen, stubs just return
573 static int bdrv_qed_reopen_prepare(BDRVReopenState
*state
,
574 BlockReopenQueue
*queue
, Error
**errp
)
579 static void bdrv_qed_close(BlockDriverState
*bs
)
581 BDRVQEDState
*s
= bs
->opaque
;
583 bdrv_qed_detach_aio_context(bs
);
585 /* Ensure writes reach stable storage */
586 bdrv_flush(bs
->file
->bs
);
588 /* Clean shutdown, no check required on next open */
589 if (s
->header
.features
& QED_F_NEED_CHECK
) {
590 s
->header
.features
&= ~QED_F_NEED_CHECK
;
591 qed_write_header_sync(s
);
594 qed_free_l2_cache(&s
->l2_cache
);
595 qemu_vfree(s
->l1_table
);
598 static int qed_create(const char *filename
, uint32_t cluster_size
,
599 uint64_t image_size
, uint32_t table_size
,
600 const char *backing_file
, const char *backing_fmt
,
601 QemuOpts
*opts
, Error
**errp
)
605 .cluster_size
= cluster_size
,
606 .table_size
= table_size
,
609 .compat_features
= 0,
610 .l1_table_offset
= cluster_size
,
611 .image_size
= image_size
,
614 uint8_t *l1_table
= NULL
;
615 size_t l1_size
= header
.cluster_size
* header
.table_size
;
616 Error
*local_err
= NULL
;
620 ret
= bdrv_create_file(filename
, opts
, &local_err
);
622 error_propagate(errp
, local_err
);
626 blk
= blk_new_open(filename
, NULL
, NULL
,
627 BDRV_O_RDWR
| BDRV_O_RESIZE
| BDRV_O_PROTOCOL
,
630 error_propagate(errp
, local_err
);
634 blk_set_allow_write_beyond_eof(blk
, true);
636 /* File must start empty and grow, check truncate is supported */
637 ret
= blk_truncate(blk
, 0, errp
);
643 header
.features
|= QED_F_BACKING_FILE
;
644 header
.backing_filename_offset
= sizeof(le_header
);
645 header
.backing_filename_size
= strlen(backing_file
);
647 if (qed_fmt_is_raw(backing_fmt
)) {
648 header
.features
|= QED_F_BACKING_FORMAT_NO_PROBE
;
652 qed_header_cpu_to_le(&header
, &le_header
);
653 ret
= blk_pwrite(blk
, 0, &le_header
, sizeof(le_header
), 0);
657 ret
= blk_pwrite(blk
, sizeof(le_header
), backing_file
,
658 header
.backing_filename_size
, 0);
663 l1_table
= g_malloc0(l1_size
);
664 ret
= blk_pwrite(blk
, header
.l1_table_offset
, l1_table
, l1_size
, 0);
669 ret
= 0; /* success */
676 static int bdrv_qed_create(const char *filename
, QemuOpts
*opts
, Error
**errp
)
678 uint64_t image_size
= 0;
679 uint32_t cluster_size
= QED_DEFAULT_CLUSTER_SIZE
;
680 uint32_t table_size
= QED_DEFAULT_TABLE_SIZE
;
681 char *backing_file
= NULL
;
682 char *backing_fmt
= NULL
;
685 image_size
= ROUND_UP(qemu_opt_get_size_del(opts
, BLOCK_OPT_SIZE
, 0),
687 backing_file
= qemu_opt_get_del(opts
, BLOCK_OPT_BACKING_FILE
);
688 backing_fmt
= qemu_opt_get_del(opts
, BLOCK_OPT_BACKING_FMT
);
689 cluster_size
= qemu_opt_get_size_del(opts
,
690 BLOCK_OPT_CLUSTER_SIZE
,
691 QED_DEFAULT_CLUSTER_SIZE
);
692 table_size
= qemu_opt_get_size_del(opts
, BLOCK_OPT_TABLE_SIZE
,
693 QED_DEFAULT_TABLE_SIZE
);
695 if (!qed_is_cluster_size_valid(cluster_size
)) {
696 error_setg(errp
, "QED cluster size must be within range [%u, %u] "
698 QED_MIN_CLUSTER_SIZE
, QED_MAX_CLUSTER_SIZE
);
702 if (!qed_is_table_size_valid(table_size
)) {
703 error_setg(errp
, "QED table size must be within range [%u, %u] "
705 QED_MIN_TABLE_SIZE
, QED_MAX_TABLE_SIZE
);
709 if (!qed_is_image_size_valid(image_size
, cluster_size
, table_size
)) {
710 error_setg(errp
, "QED image size must be a non-zero multiple of "
711 "cluster size and less than %" PRIu64
" bytes",
712 qed_max_image_size(cluster_size
, table_size
));
717 ret
= qed_create(filename
, cluster_size
, image_size
, table_size
,
718 backing_file
, backing_fmt
, opts
, errp
);
721 g_free(backing_file
);
727 BlockDriverState
*bs
;
732 BlockDriverState
**file
;
735 static void qed_is_allocated_cb(void *opaque
, int ret
, uint64_t offset
, size_t len
)
737 QEDIsAllocatedCB
*cb
= opaque
;
738 BDRVQEDState
*s
= cb
->bs
->opaque
;
739 *cb
->pnum
= len
/ BDRV_SECTOR_SIZE
;
741 case QED_CLUSTER_FOUND
:
742 offset
|= qed_offset_into_cluster(s
, cb
->pos
);
743 cb
->status
= BDRV_BLOCK_DATA
| BDRV_BLOCK_OFFSET_VALID
| offset
;
744 *cb
->file
= cb
->bs
->file
->bs
;
746 case QED_CLUSTER_ZERO
:
747 cb
->status
= BDRV_BLOCK_ZERO
;
764 static int64_t coroutine_fn
bdrv_qed_co_get_block_status(BlockDriverState
*bs
,
766 int nb_sectors
, int *pnum
,
767 BlockDriverState
**file
)
769 BDRVQEDState
*s
= bs
->opaque
;
770 size_t len
= (size_t)nb_sectors
* BDRV_SECTOR_SIZE
;
771 QEDIsAllocatedCB cb
= {
773 .pos
= (uint64_t)sector_num
* BDRV_SECTOR_SIZE
,
774 .status
= BDRV_BLOCK_OFFSET_MASK
,
778 QEDRequest request
= { .l2_table
= NULL
};
780 qed_find_cluster(s
, &request
, cb
.pos
, len
, qed_is_allocated_cb
, &cb
);
782 /* Now sleep if the callback wasn't invoked immediately */
783 while (cb
.status
== BDRV_BLOCK_OFFSET_MASK
) {
784 cb
.co
= qemu_coroutine_self();
785 qemu_coroutine_yield();
788 qed_unref_l2_cache_entry(request
.l2_table
);
793 static BDRVQEDState
*acb_to_s(QEDAIOCB
*acb
)
795 return acb
->common
.bs
->opaque
;
799 * Read from the backing file or zero-fill if no backing file
802 * @pos: Byte position in device
803 * @qiov: Destination I/O vector
804 * @backing_qiov: Possibly shortened copy of qiov, to be allocated here
805 * @cb: Completion function
806 * @opaque: User data for completion function
808 * This function reads qiov->size bytes starting at pos from the backing file.
809 * If there is no backing file then zeroes are read.
811 static void qed_read_backing_file(BDRVQEDState
*s
, uint64_t pos
,
813 QEMUIOVector
**backing_qiov
,
814 BlockCompletionFunc
*cb
, void *opaque
)
816 uint64_t backing_length
= 0;
819 /* If there is a backing file, get its length. Treat the absence of a
820 * backing file like a zero length backing file.
822 if (s
->bs
->backing
) {
823 int64_t l
= bdrv_getlength(s
->bs
->backing
->bs
);
831 /* Zero all sectors if reading beyond the end of the backing file */
832 if (pos
>= backing_length
||
833 pos
+ qiov
->size
> backing_length
) {
834 qemu_iovec_memset(qiov
, 0, 0, qiov
->size
);
837 /* Complete now if there are no backing file sectors to read */
838 if (pos
>= backing_length
) {
843 /* If the read straddles the end of the backing file, shorten it */
844 size
= MIN((uint64_t)backing_length
- pos
, qiov
->size
);
846 assert(*backing_qiov
== NULL
);
847 *backing_qiov
= g_new(QEMUIOVector
, 1);
848 qemu_iovec_init(*backing_qiov
, qiov
->niov
);
849 qemu_iovec_concat(*backing_qiov
, qiov
, 0, size
);
851 BLKDBG_EVENT(s
->bs
->file
, BLKDBG_READ_BACKING_AIO
);
852 bdrv_aio_readv(s
->bs
->backing
, pos
/ BDRV_SECTOR_SIZE
,
853 *backing_qiov
, size
/ BDRV_SECTOR_SIZE
, cb
, opaque
);
860 QEMUIOVector
*backing_qiov
;
863 } CopyFromBackingFileCB
;
865 static void qed_copy_from_backing_file_cb(void *opaque
, int ret
)
867 CopyFromBackingFileCB
*copy_cb
= opaque
;
868 qemu_vfree(copy_cb
->iov
.iov_base
);
869 gencb_complete(©_cb
->gencb
, ret
);
872 static void qed_copy_from_backing_file_write(void *opaque
, int ret
)
874 CopyFromBackingFileCB
*copy_cb
= opaque
;
875 BDRVQEDState
*s
= copy_cb
->s
;
877 if (copy_cb
->backing_qiov
) {
878 qemu_iovec_destroy(copy_cb
->backing_qiov
);
879 g_free(copy_cb
->backing_qiov
);
880 copy_cb
->backing_qiov
= NULL
;
884 qed_copy_from_backing_file_cb(copy_cb
, ret
);
888 BLKDBG_EVENT(s
->bs
->file
, BLKDBG_COW_WRITE
);
889 bdrv_aio_writev(s
->bs
->file
, copy_cb
->offset
/ BDRV_SECTOR_SIZE
,
890 ©_cb
->qiov
, copy_cb
->qiov
.size
/ BDRV_SECTOR_SIZE
,
891 qed_copy_from_backing_file_cb
, copy_cb
);
895 * Copy data from backing file into the image
898 * @pos: Byte position in device
899 * @len: Number of bytes
900 * @offset: Byte offset in image file
901 * @cb: Completion function
902 * @opaque: User data for completion function
904 static void qed_copy_from_backing_file(BDRVQEDState
*s
, uint64_t pos
,
905 uint64_t len
, uint64_t offset
,
906 BlockCompletionFunc
*cb
,
909 CopyFromBackingFileCB
*copy_cb
;
911 /* Skip copy entirely if there is no work to do */
917 copy_cb
= gencb_alloc(sizeof(*copy_cb
), cb
, opaque
);
919 copy_cb
->offset
= offset
;
920 copy_cb
->backing_qiov
= NULL
;
921 copy_cb
->iov
.iov_base
= qemu_blockalign(s
->bs
, len
);
922 copy_cb
->iov
.iov_len
= len
;
923 qemu_iovec_init_external(©_cb
->qiov
, ©_cb
->iov
, 1);
925 qed_read_backing_file(s
, pos
, ©_cb
->qiov
, ©_cb
->backing_qiov
,
926 qed_copy_from_backing_file_write
, copy_cb
);
930 * Link one or more contiguous clusters into a table
934 * @index: First cluster index
935 * @n: Number of contiguous clusters
936 * @cluster: First cluster offset
938 * The cluster offset may be an allocated byte offset in the image file, the
939 * zero cluster marker, or the unallocated cluster marker.
941 static void qed_update_l2_table(BDRVQEDState
*s
, QEDTable
*table
, int index
,
942 unsigned int n
, uint64_t cluster
)
945 for (i
= index
; i
< index
+ n
; i
++) {
946 table
->offsets
[i
] = cluster
;
947 if (!qed_offset_is_unalloc_cluster(cluster
) &&
948 !qed_offset_is_zero_cluster(cluster
)) {
949 cluster
+= s
->header
.cluster_size
;
954 static void qed_aio_complete_bh(void *opaque
)
956 QEDAIOCB
*acb
= opaque
;
957 BDRVQEDState
*s
= acb_to_s(acb
);
958 BlockCompletionFunc
*cb
= acb
->common
.cb
;
959 void *user_opaque
= acb
->common
.opaque
;
960 int ret
= acb
->bh_ret
;
964 /* Invoke callback */
966 cb(user_opaque
, ret
);
970 static void qed_aio_complete(QEDAIOCB
*acb
, int ret
)
972 BDRVQEDState
*s
= acb_to_s(acb
);
974 trace_qed_aio_complete(s
, acb
, ret
);
977 qemu_iovec_destroy(&acb
->cur_qiov
);
978 qed_unref_l2_cache_entry(acb
->request
.l2_table
);
980 /* Free the buffer we may have allocated for zero writes */
981 if (acb
->flags
& QED_AIOCB_ZERO
) {
982 qemu_vfree(acb
->qiov
->iov
[0].iov_base
);
983 acb
->qiov
->iov
[0].iov_base
= NULL
;
986 /* Arrange for a bh to invoke the completion function */
988 aio_bh_schedule_oneshot(bdrv_get_aio_context(acb
->common
.bs
),
989 qed_aio_complete_bh
, acb
);
991 /* Start next allocating write request waiting behind this one. Note that
992 * requests enqueue themselves when they first hit an unallocated cluster
993 * but they wait until the entire request is finished before waking up the
994 * next request in the queue. This ensures that we don't cycle through
995 * requests multiple times but rather finish one at a time completely.
997 if (acb
== QSIMPLEQ_FIRST(&s
->allocating_write_reqs
)) {
998 QSIMPLEQ_REMOVE_HEAD(&s
->allocating_write_reqs
, next
);
999 acb
= QSIMPLEQ_FIRST(&s
->allocating_write_reqs
);
1001 qed_aio_start_io(acb
);
1002 } else if (s
->header
.features
& QED_F_NEED_CHECK
) {
1003 qed_start_need_check_timer(s
);
1009 * Commit the current L2 table to the cache
1011 static void qed_commit_l2_update(void *opaque
, int ret
)
1013 QEDAIOCB
*acb
= opaque
;
1014 BDRVQEDState
*s
= acb_to_s(acb
);
1015 CachedL2Table
*l2_table
= acb
->request
.l2_table
;
1016 uint64_t l2_offset
= l2_table
->offset
;
1018 qed_commit_l2_cache_entry(&s
->l2_cache
, l2_table
);
1020 /* This is guaranteed to succeed because we just committed the entry to the
1023 acb
->request
.l2_table
= qed_find_l2_cache_entry(&s
->l2_cache
, l2_offset
);
1024 assert(acb
->request
.l2_table
!= NULL
);
1026 qed_aio_next_io(acb
, ret
);
1030 * Update L1 table with new L2 table offset and write it out
1032 static void qed_aio_write_l1_update(void *opaque
, int ret
)
1034 QEDAIOCB
*acb
= opaque
;
1035 BDRVQEDState
*s
= acb_to_s(acb
);
1039 qed_aio_complete(acb
, ret
);
1043 index
= qed_l1_index(s
, acb
->cur_pos
);
1044 s
->l1_table
->offsets
[index
] = acb
->request
.l2_table
->offset
;
1046 qed_write_l1_table(s
, index
, 1, qed_commit_l2_update
, acb
);
1050 * Update L2 table with new cluster offsets and write them out
1052 static void qed_aio_write_l2_update(QEDAIOCB
*acb
, int ret
, uint64_t offset
)
1054 BDRVQEDState
*s
= acb_to_s(acb
);
1055 bool need_alloc
= acb
->find_cluster_ret
== QED_CLUSTER_L1
;
1063 qed_unref_l2_cache_entry(acb
->request
.l2_table
);
1064 acb
->request
.l2_table
= qed_new_l2_table(s
);
1067 index
= qed_l2_index(s
, acb
->cur_pos
);
1068 qed_update_l2_table(s
, acb
->request
.l2_table
->table
, index
, acb
->cur_nclusters
,
1072 /* Write out the whole new L2 table */
1073 qed_write_l2_table(s
, &acb
->request
, 0, s
->table_nelems
, true,
1074 qed_aio_write_l1_update
, acb
);
1076 /* Write out only the updated part of the L2 table */
1077 qed_write_l2_table(s
, &acb
->request
, index
, acb
->cur_nclusters
, false,
1078 qed_aio_next_io_cb
, acb
);
1083 qed_aio_complete(acb
, ret
);
1086 static void qed_aio_write_l2_update_cb(void *opaque
, int ret
)
1088 QEDAIOCB
*acb
= opaque
;
1089 qed_aio_write_l2_update(acb
, ret
, acb
->cur_cluster
);
1093 * Flush new data clusters before updating the L2 table
1095 * This flush is necessary when a backing file is in use. A crash during an
1096 * allocating write could result in empty clusters in the image. If the write
1097 * only touched a subregion of the cluster, then backing image sectors have
1098 * been lost in the untouched region. The solution is to flush after writing a
1099 * new data cluster and before updating the L2 table.
1101 static void qed_aio_write_flush_before_l2_update(void *opaque
, int ret
)
1103 QEDAIOCB
*acb
= opaque
;
1104 BDRVQEDState
*s
= acb_to_s(acb
);
1106 if (!bdrv_aio_flush(s
->bs
->file
->bs
, qed_aio_write_l2_update_cb
, opaque
)) {
1107 qed_aio_complete(acb
, -EIO
);
1112 * Write data to the image file
1114 static void qed_aio_write_main(void *opaque
, int ret
)
1116 QEDAIOCB
*acb
= opaque
;
1117 BDRVQEDState
*s
= acb_to_s(acb
);
1118 uint64_t offset
= acb
->cur_cluster
+
1119 qed_offset_into_cluster(s
, acb
->cur_pos
);
1120 BlockCompletionFunc
*next_fn
;
1122 trace_qed_aio_write_main(s
, acb
, ret
, offset
, acb
->cur_qiov
.size
);
1125 qed_aio_complete(acb
, ret
);
1129 if (acb
->find_cluster_ret
== QED_CLUSTER_FOUND
) {
1130 next_fn
= qed_aio_next_io_cb
;
1132 if (s
->bs
->backing
) {
1133 next_fn
= qed_aio_write_flush_before_l2_update
;
1135 next_fn
= qed_aio_write_l2_update_cb
;
1139 BLKDBG_EVENT(s
->bs
->file
, BLKDBG_WRITE_AIO
);
1140 bdrv_aio_writev(s
->bs
->file
, offset
/ BDRV_SECTOR_SIZE
,
1141 &acb
->cur_qiov
, acb
->cur_qiov
.size
/ BDRV_SECTOR_SIZE
,
1146 * Populate back untouched region of new data cluster
1148 static void qed_aio_write_postfill(void *opaque
, int ret
)
1150 QEDAIOCB
*acb
= opaque
;
1151 BDRVQEDState
*s
= acb_to_s(acb
);
1152 uint64_t start
= acb
->cur_pos
+ acb
->cur_qiov
.size
;
1154 qed_start_of_cluster(s
, start
+ s
->header
.cluster_size
- 1) - start
;
1155 uint64_t offset
= acb
->cur_cluster
+
1156 qed_offset_into_cluster(s
, acb
->cur_pos
) +
1160 qed_aio_complete(acb
, ret
);
1164 trace_qed_aio_write_postfill(s
, acb
, start
, len
, offset
);
1165 qed_copy_from_backing_file(s
, start
, len
, offset
,
1166 qed_aio_write_main
, acb
);
1170 * Populate front untouched region of new data cluster
1172 static void qed_aio_write_prefill(void *opaque
, int ret
)
1174 QEDAIOCB
*acb
= opaque
;
1175 BDRVQEDState
*s
= acb_to_s(acb
);
1176 uint64_t start
= qed_start_of_cluster(s
, acb
->cur_pos
);
1177 uint64_t len
= qed_offset_into_cluster(s
, acb
->cur_pos
);
1179 trace_qed_aio_write_prefill(s
, acb
, start
, len
, acb
->cur_cluster
);
1180 qed_copy_from_backing_file(s
, start
, len
, acb
->cur_cluster
,
1181 qed_aio_write_postfill
, acb
);
1185 * Check if the QED_F_NEED_CHECK bit should be set during allocating write
1187 static bool qed_should_set_need_check(BDRVQEDState
*s
)
1189 /* The flush before L2 update path ensures consistency */
1190 if (s
->bs
->backing
) {
1194 return !(s
->header
.features
& QED_F_NEED_CHECK
);
1197 static void qed_aio_write_zero_cluster(void *opaque
, int ret
)
1199 QEDAIOCB
*acb
= opaque
;
1202 qed_aio_complete(acb
, ret
);
1206 qed_aio_write_l2_update(acb
, 0, 1);
1210 * Write new data cluster
1212 * @acb: Write request
1213 * @len: Length in bytes
1215 * This path is taken when writing to previously unallocated clusters.
1217 static void qed_aio_write_alloc(QEDAIOCB
*acb
, size_t len
)
1219 BDRVQEDState
*s
= acb_to_s(acb
);
1220 BlockCompletionFunc
*cb
;
1222 /* Cancel timer when the first allocating request comes in */
1223 if (QSIMPLEQ_EMPTY(&s
->allocating_write_reqs
)) {
1224 qed_cancel_need_check_timer(s
);
1227 /* Freeze this request if another allocating write is in progress */
1228 if (acb
!= QSIMPLEQ_FIRST(&s
->allocating_write_reqs
)) {
1229 QSIMPLEQ_INSERT_TAIL(&s
->allocating_write_reqs
, acb
, next
);
1231 if (acb
!= QSIMPLEQ_FIRST(&s
->allocating_write_reqs
) ||
1232 s
->allocating_write_reqs_plugged
) {
1233 return; /* wait for existing request to finish */
1236 acb
->cur_nclusters
= qed_bytes_to_clusters(s
,
1237 qed_offset_into_cluster(s
, acb
->cur_pos
) + len
);
1238 qemu_iovec_concat(&acb
->cur_qiov
, acb
->qiov
, acb
->qiov_offset
, len
);
1240 if (acb
->flags
& QED_AIOCB_ZERO
) {
1241 /* Skip ahead if the clusters are already zero */
1242 if (acb
->find_cluster_ret
== QED_CLUSTER_ZERO
) {
1243 qed_aio_start_io(acb
);
1247 cb
= qed_aio_write_zero_cluster
;
1249 cb
= qed_aio_write_prefill
;
1250 acb
->cur_cluster
= qed_alloc_clusters(s
, acb
->cur_nclusters
);
1253 if (qed_should_set_need_check(s
)) {
1254 s
->header
.features
|= QED_F_NEED_CHECK
;
1255 qed_write_header(s
, cb
, acb
);
1262 * Write data cluster in place
1264 * @acb: Write request
1265 * @offset: Cluster offset in bytes
1266 * @len: Length in bytes
1268 * This path is taken when writing to already allocated clusters.
1270 static void qed_aio_write_inplace(QEDAIOCB
*acb
, uint64_t offset
, size_t len
)
1272 /* Allocate buffer for zero writes */
1273 if (acb
->flags
& QED_AIOCB_ZERO
) {
1274 struct iovec
*iov
= acb
->qiov
->iov
;
1276 if (!iov
->iov_base
) {
1277 iov
->iov_base
= qemu_try_blockalign(acb
->common
.bs
, iov
->iov_len
);
1278 if (iov
->iov_base
== NULL
) {
1279 qed_aio_complete(acb
, -ENOMEM
);
1282 memset(iov
->iov_base
, 0, iov
->iov_len
);
1286 /* Calculate the I/O vector */
1287 acb
->cur_cluster
= offset
;
1288 qemu_iovec_concat(&acb
->cur_qiov
, acb
->qiov
, acb
->qiov_offset
, len
);
1290 /* Do the actual write */
1291 qed_aio_write_main(acb
, 0);
1295 * Write data cluster
1297 * @opaque: Write request
1298 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1300 * @offset: Cluster offset in bytes
1301 * @len: Length in bytes
1303 * Callback from qed_find_cluster().
1305 static void qed_aio_write_data(void *opaque
, int ret
,
1306 uint64_t offset
, size_t len
)
1308 QEDAIOCB
*acb
= opaque
;
1310 trace_qed_aio_write_data(acb_to_s(acb
), acb
, ret
, offset
, len
);
1312 acb
->find_cluster_ret
= ret
;
1315 case QED_CLUSTER_FOUND
:
1316 qed_aio_write_inplace(acb
, offset
, len
);
1319 case QED_CLUSTER_L2
:
1320 case QED_CLUSTER_L1
:
1321 case QED_CLUSTER_ZERO
:
1322 qed_aio_write_alloc(acb
, len
);
1326 qed_aio_complete(acb
, ret
);
1334 * @opaque: Read request
1335 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1337 * @offset: Cluster offset in bytes
1338 * @len: Length in bytes
1340 * Callback from qed_find_cluster().
1342 static void qed_aio_read_data(void *opaque
, int ret
,
1343 uint64_t offset
, size_t len
)
1345 QEDAIOCB
*acb
= opaque
;
1346 BDRVQEDState
*s
= acb_to_s(acb
);
1347 BlockDriverState
*bs
= acb
->common
.bs
;
1349 /* Adjust offset into cluster */
1350 offset
+= qed_offset_into_cluster(s
, acb
->cur_pos
);
1352 trace_qed_aio_read_data(s
, acb
, ret
, offset
, len
);
1358 qemu_iovec_concat(&acb
->cur_qiov
, acb
->qiov
, acb
->qiov_offset
, len
);
1360 /* Handle zero cluster and backing file reads */
1361 if (ret
== QED_CLUSTER_ZERO
) {
1362 qemu_iovec_memset(&acb
->cur_qiov
, 0, 0, acb
->cur_qiov
.size
);
1363 qed_aio_start_io(acb
);
1365 } else if (ret
!= QED_CLUSTER_FOUND
) {
1366 qed_read_backing_file(s
, acb
->cur_pos
, &acb
->cur_qiov
,
1367 &acb
->backing_qiov
, qed_aio_next_io_cb
, acb
);
1371 BLKDBG_EVENT(bs
->file
, BLKDBG_READ_AIO
);
1372 bdrv_aio_readv(bs
->file
, offset
/ BDRV_SECTOR_SIZE
,
1373 &acb
->cur_qiov
, acb
->cur_qiov
.size
/ BDRV_SECTOR_SIZE
,
1374 qed_aio_next_io_cb
, acb
);
1378 qed_aio_complete(acb
, ret
);
1382 * Begin next I/O or complete the request
1384 static void qed_aio_next_io(QEDAIOCB
*acb
, int ret
)
1386 BDRVQEDState
*s
= acb_to_s(acb
);
1387 QEDFindClusterFunc
*io_fn
= (acb
->flags
& QED_AIOCB_WRITE
) ?
1388 qed_aio_write_data
: qed_aio_read_data
;
1390 trace_qed_aio_next_io(s
, acb
, ret
, acb
->cur_pos
+ acb
->cur_qiov
.size
);
1392 if (acb
->backing_qiov
) {
1393 qemu_iovec_destroy(acb
->backing_qiov
);
1394 g_free(acb
->backing_qiov
);
1395 acb
->backing_qiov
= NULL
;
1398 /* Handle I/O error */
1400 qed_aio_complete(acb
, ret
);
1404 acb
->qiov_offset
+= acb
->cur_qiov
.size
;
1405 acb
->cur_pos
+= acb
->cur_qiov
.size
;
1406 qemu_iovec_reset(&acb
->cur_qiov
);
1408 /* Complete request */
1409 if (acb
->cur_pos
>= acb
->end_pos
) {
1410 qed_aio_complete(acb
, 0);
1414 /* Find next cluster and start I/O */
1415 qed_find_cluster(s
, &acb
->request
,
1416 acb
->cur_pos
, acb
->end_pos
- acb
->cur_pos
,
1420 static BlockAIOCB
*qed_aio_setup(BlockDriverState
*bs
,
1422 QEMUIOVector
*qiov
, int nb_sectors
,
1423 BlockCompletionFunc
*cb
,
1424 void *opaque
, int flags
)
1426 QEDAIOCB
*acb
= qemu_aio_get(&qed_aiocb_info
, bs
, cb
, opaque
);
1428 trace_qed_aio_setup(bs
->opaque
, acb
, sector_num
, nb_sectors
,
1433 acb
->qiov_offset
= 0;
1434 acb
->cur_pos
= (uint64_t)sector_num
* BDRV_SECTOR_SIZE
;
1435 acb
->end_pos
= acb
->cur_pos
+ nb_sectors
* BDRV_SECTOR_SIZE
;
1436 acb
->backing_qiov
= NULL
;
1437 acb
->request
.l2_table
= NULL
;
1438 qemu_iovec_init(&acb
->cur_qiov
, qiov
->niov
);
1441 qed_aio_start_io(acb
);
1442 return &acb
->common
;
1445 static BlockAIOCB
*bdrv_qed_aio_readv(BlockDriverState
*bs
,
1447 QEMUIOVector
*qiov
, int nb_sectors
,
1448 BlockCompletionFunc
*cb
,
1451 return qed_aio_setup(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 0);
1454 static BlockAIOCB
*bdrv_qed_aio_writev(BlockDriverState
*bs
,
1456 QEMUIOVector
*qiov
, int nb_sectors
,
1457 BlockCompletionFunc
*cb
,
1460 return qed_aio_setup(bs
, sector_num
, qiov
, nb_sectors
, cb
,
1461 opaque
, QED_AIOCB_WRITE
);
1470 static void coroutine_fn
qed_co_pwrite_zeroes_cb(void *opaque
, int ret
)
1472 QEDWriteZeroesCB
*cb
= opaque
;
1477 aio_co_wake(cb
->co
);
1481 static int coroutine_fn
bdrv_qed_co_pwrite_zeroes(BlockDriverState
*bs
,
1484 BdrvRequestFlags flags
)
1486 BlockAIOCB
*blockacb
;
1487 BDRVQEDState
*s
= bs
->opaque
;
1488 QEDWriteZeroesCB cb
= { .done
= false };
1492 /* Fall back if the request is not aligned */
1493 if (qed_offset_into_cluster(s
, offset
) ||
1494 qed_offset_into_cluster(s
, count
)) {
1498 /* Zero writes start without an I/O buffer. If a buffer becomes necessary
1499 * then it will be allocated during request processing.
1501 iov
.iov_base
= NULL
;
1502 iov
.iov_len
= count
;
1504 qemu_iovec_init_external(&qiov
, &iov
, 1);
1505 blockacb
= qed_aio_setup(bs
, offset
>> BDRV_SECTOR_BITS
, &qiov
,
1506 count
>> BDRV_SECTOR_BITS
,
1507 qed_co_pwrite_zeroes_cb
, &cb
,
1508 QED_AIOCB_WRITE
| QED_AIOCB_ZERO
);
1513 cb
.co
= qemu_coroutine_self();
1514 qemu_coroutine_yield();
1520 static int bdrv_qed_truncate(BlockDriverState
*bs
, int64_t offset
, Error
**errp
)
1522 BDRVQEDState
*s
= bs
->opaque
;
1523 uint64_t old_image_size
;
1526 if (!qed_is_image_size_valid(offset
, s
->header
.cluster_size
,
1527 s
->header
.table_size
)) {
1528 error_setg(errp
, "Invalid image size specified");
1532 if ((uint64_t)offset
< s
->header
.image_size
) {
1533 error_setg(errp
, "Shrinking images is currently not supported");
1537 old_image_size
= s
->header
.image_size
;
1538 s
->header
.image_size
= offset
;
1539 ret
= qed_write_header_sync(s
);
1541 s
->header
.image_size
= old_image_size
;
1542 error_setg_errno(errp
, -ret
, "Failed to update the image size");
1547 static int64_t bdrv_qed_getlength(BlockDriverState
*bs
)
1549 BDRVQEDState
*s
= bs
->opaque
;
1550 return s
->header
.image_size
;
1553 static int bdrv_qed_get_info(BlockDriverState
*bs
, BlockDriverInfo
*bdi
)
1555 BDRVQEDState
*s
= bs
->opaque
;
1557 memset(bdi
, 0, sizeof(*bdi
));
1558 bdi
->cluster_size
= s
->header
.cluster_size
;
1559 bdi
->is_dirty
= s
->header
.features
& QED_F_NEED_CHECK
;
1560 bdi
->unallocated_blocks_are_zero
= true;
1561 bdi
->can_write_zeroes_with_unmap
= true;
1565 static int bdrv_qed_change_backing_file(BlockDriverState
*bs
,
1566 const char *backing_file
,
1567 const char *backing_fmt
)
1569 BDRVQEDState
*s
= bs
->opaque
;
1570 QEDHeader new_header
, le_header
;
1572 size_t buffer_len
, backing_file_len
;
1575 /* Refuse to set backing filename if unknown compat feature bits are
1576 * active. If the image uses an unknown compat feature then we may not
1577 * know the layout of data following the header structure and cannot safely
1580 if (backing_file
&& (s
->header
.compat_features
&
1581 ~QED_COMPAT_FEATURE_MASK
)) {
1585 memcpy(&new_header
, &s
->header
, sizeof(new_header
));
1587 new_header
.features
&= ~(QED_F_BACKING_FILE
|
1588 QED_F_BACKING_FORMAT_NO_PROBE
);
1590 /* Adjust feature flags */
1592 new_header
.features
|= QED_F_BACKING_FILE
;
1594 if (qed_fmt_is_raw(backing_fmt
)) {
1595 new_header
.features
|= QED_F_BACKING_FORMAT_NO_PROBE
;
1599 /* Calculate new header size */
1600 backing_file_len
= 0;
1603 backing_file_len
= strlen(backing_file
);
1606 buffer_len
= sizeof(new_header
);
1607 new_header
.backing_filename_offset
= buffer_len
;
1608 new_header
.backing_filename_size
= backing_file_len
;
1609 buffer_len
+= backing_file_len
;
1611 /* Make sure we can rewrite header without failing */
1612 if (buffer_len
> new_header
.header_size
* new_header
.cluster_size
) {
1616 /* Prepare new header */
1617 buffer
= g_malloc(buffer_len
);
1619 qed_header_cpu_to_le(&new_header
, &le_header
);
1620 memcpy(buffer
, &le_header
, sizeof(le_header
));
1621 buffer_len
= sizeof(le_header
);
1624 memcpy(buffer
+ buffer_len
, backing_file
, backing_file_len
);
1625 buffer_len
+= backing_file_len
;
1628 /* Write new header */
1629 ret
= bdrv_pwrite_sync(bs
->file
, 0, buffer
, buffer_len
);
1632 memcpy(&s
->header
, &new_header
, sizeof(new_header
));
1637 static void bdrv_qed_invalidate_cache(BlockDriverState
*bs
, Error
**errp
)
1639 BDRVQEDState
*s
= bs
->opaque
;
1640 Error
*local_err
= NULL
;
1645 memset(s
, 0, sizeof(BDRVQEDState
));
1646 ret
= bdrv_qed_do_open(bs
, NULL
, bs
->open_flags
, &local_err
);
1648 error_propagate(errp
, local_err
);
1649 error_prepend(errp
, "Could not reopen qed layer: ");
1651 } else if (ret
< 0) {
1652 error_setg_errno(errp
, -ret
, "Could not reopen qed layer");
1657 static int bdrv_qed_check(BlockDriverState
*bs
, BdrvCheckResult
*result
,
1660 BDRVQEDState
*s
= bs
->opaque
;
1662 return qed_check(s
, result
, !!fix
);
1665 static QemuOptsList qed_create_opts
= {
1666 .name
= "qed-create-opts",
1667 .head
= QTAILQ_HEAD_INITIALIZER(qed_create_opts
.head
),
1670 .name
= BLOCK_OPT_SIZE
,
1671 .type
= QEMU_OPT_SIZE
,
1672 .help
= "Virtual disk size"
1675 .name
= BLOCK_OPT_BACKING_FILE
,
1676 .type
= QEMU_OPT_STRING
,
1677 .help
= "File name of a base image"
1680 .name
= BLOCK_OPT_BACKING_FMT
,
1681 .type
= QEMU_OPT_STRING
,
1682 .help
= "Image format of the base image"
1685 .name
= BLOCK_OPT_CLUSTER_SIZE
,
1686 .type
= QEMU_OPT_SIZE
,
1687 .help
= "Cluster size (in bytes)",
1688 .def_value_str
= stringify(QED_DEFAULT_CLUSTER_SIZE
)
1691 .name
= BLOCK_OPT_TABLE_SIZE
,
1692 .type
= QEMU_OPT_SIZE
,
1693 .help
= "L1/L2 table size (in clusters)"
1695 { /* end of list */ }
1699 static BlockDriver bdrv_qed
= {
1700 .format_name
= "qed",
1701 .instance_size
= sizeof(BDRVQEDState
),
1702 .create_opts
= &qed_create_opts
,
1703 .supports_backing
= true,
1705 .bdrv_probe
= bdrv_qed_probe
,
1706 .bdrv_open
= bdrv_qed_open
,
1707 .bdrv_close
= bdrv_qed_close
,
1708 .bdrv_reopen_prepare
= bdrv_qed_reopen_prepare
,
1709 .bdrv_child_perm
= bdrv_format_default_perms
,
1710 .bdrv_create
= bdrv_qed_create
,
1711 .bdrv_has_zero_init
= bdrv_has_zero_init_1
,
1712 .bdrv_co_get_block_status
= bdrv_qed_co_get_block_status
,
1713 .bdrv_aio_readv
= bdrv_qed_aio_readv
,
1714 .bdrv_aio_writev
= bdrv_qed_aio_writev
,
1715 .bdrv_co_pwrite_zeroes
= bdrv_qed_co_pwrite_zeroes
,
1716 .bdrv_truncate
= bdrv_qed_truncate
,
1717 .bdrv_getlength
= bdrv_qed_getlength
,
1718 .bdrv_get_info
= bdrv_qed_get_info
,
1719 .bdrv_refresh_limits
= bdrv_qed_refresh_limits
,
1720 .bdrv_change_backing_file
= bdrv_qed_change_backing_file
,
1721 .bdrv_invalidate_cache
= bdrv_qed_invalidate_cache
,
1722 .bdrv_check
= bdrv_qed_check
,
1723 .bdrv_detach_aio_context
= bdrv_qed_detach_aio_context
,
1724 .bdrv_attach_aio_context
= bdrv_qed_attach_aio_context
,
1725 .bdrv_drain
= bdrv_qed_drain
,
1728 static void bdrv_qed_init(void)
1730 bdrv_register(&bdrv_qed
);
1733 block_init(bdrv_qed_init
);