ppc: add missing FW_CFG_PPC_NVRAM_FLAT definition
[qemu/rayw.git] / block / qed.c
blob65cfe92393355925e014cdd118bd5e82c9297c1e
1 /*
2 * QEMU Enhanced Disk Format
4 * Copyright IBM, Corp. 2010
6 * Authors:
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "qapi/error.h"
17 #include "qemu/timer.h"
18 #include "qemu/bswap.h"
19 #include "qemu/option.h"
20 #include "trace.h"
21 #include "qed.h"
22 #include "sysemu/block-backend.h"
23 #include "qapi/qmp/qdict.h"
24 #include "qapi/qobject-input-visitor.h"
25 #include "qapi/qapi-visit-block-core.h"
27 static QemuOptsList qed_create_opts;
29 static int bdrv_qed_probe(const uint8_t *buf, int buf_size,
30 const char *filename)
32 const QEDHeader *header = (const QEDHeader *)buf;
34 if (buf_size < sizeof(*header)) {
35 return 0;
37 if (le32_to_cpu(header->magic) != QED_MAGIC) {
38 return 0;
40 return 100;
43 /**
44 * Check whether an image format is raw
46 * @fmt: Backing file format, may be NULL
48 static bool qed_fmt_is_raw(const char *fmt)
50 return fmt && strcmp(fmt, "raw") == 0;
53 static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu)
55 cpu->magic = le32_to_cpu(le->magic);
56 cpu->cluster_size = le32_to_cpu(le->cluster_size);
57 cpu->table_size = le32_to_cpu(le->table_size);
58 cpu->header_size = le32_to_cpu(le->header_size);
59 cpu->features = le64_to_cpu(le->features);
60 cpu->compat_features = le64_to_cpu(le->compat_features);
61 cpu->autoclear_features = le64_to_cpu(le->autoclear_features);
62 cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset);
63 cpu->image_size = le64_to_cpu(le->image_size);
64 cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset);
65 cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size);
68 static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le)
70 le->magic = cpu_to_le32(cpu->magic);
71 le->cluster_size = cpu_to_le32(cpu->cluster_size);
72 le->table_size = cpu_to_le32(cpu->table_size);
73 le->header_size = cpu_to_le32(cpu->header_size);
74 le->features = cpu_to_le64(cpu->features);
75 le->compat_features = cpu_to_le64(cpu->compat_features);
76 le->autoclear_features = cpu_to_le64(cpu->autoclear_features);
77 le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset);
78 le->image_size = cpu_to_le64(cpu->image_size);
79 le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset);
80 le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size);
83 int qed_write_header_sync(BDRVQEDState *s)
85 QEDHeader le;
86 int ret;
88 qed_header_cpu_to_le(&s->header, &le);
89 ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le));
90 if (ret != sizeof(le)) {
91 return ret;
93 return 0;
96 /**
97 * Update header in-place (does not rewrite backing filename or other strings)
99 * This function only updates known header fields in-place and does not affect
100 * extra data after the QED header.
102 * No new allocating reqs can start while this function runs.
104 static int coroutine_fn qed_write_header(BDRVQEDState *s)
106 /* We must write full sectors for O_DIRECT but cannot necessarily generate
107 * the data following the header if an unrecognized compat feature is
108 * active. Therefore, first read the sectors containing the header, update
109 * them, and write back.
112 int nsectors = DIV_ROUND_UP(sizeof(QEDHeader), BDRV_SECTOR_SIZE);
113 size_t len = nsectors * BDRV_SECTOR_SIZE;
114 uint8_t *buf;
115 struct iovec iov;
116 QEMUIOVector qiov;
117 int ret;
119 assert(s->allocating_acb || s->allocating_write_reqs_plugged);
121 buf = qemu_blockalign(s->bs, len);
122 iov = (struct iovec) {
123 .iov_base = buf,
124 .iov_len = len,
126 qemu_iovec_init_external(&qiov, &iov, 1);
128 ret = bdrv_co_preadv(s->bs->file, 0, qiov.size, &qiov, 0);
129 if (ret < 0) {
130 goto out;
133 /* Update header */
134 qed_header_cpu_to_le(&s->header, (QEDHeader *) buf);
136 ret = bdrv_co_pwritev(s->bs->file, 0, qiov.size, &qiov, 0);
137 if (ret < 0) {
138 goto out;
141 ret = 0;
142 out:
143 qemu_vfree(buf);
144 return ret;
147 static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size)
149 uint64_t table_entries;
150 uint64_t l2_size;
152 table_entries = (table_size * cluster_size) / sizeof(uint64_t);
153 l2_size = table_entries * cluster_size;
155 return l2_size * table_entries;
158 static bool qed_is_cluster_size_valid(uint32_t cluster_size)
160 if (cluster_size < QED_MIN_CLUSTER_SIZE ||
161 cluster_size > QED_MAX_CLUSTER_SIZE) {
162 return false;
164 if (cluster_size & (cluster_size - 1)) {
165 return false; /* not power of 2 */
167 return true;
170 static bool qed_is_table_size_valid(uint32_t table_size)
172 if (table_size < QED_MIN_TABLE_SIZE ||
173 table_size > QED_MAX_TABLE_SIZE) {
174 return false;
176 if (table_size & (table_size - 1)) {
177 return false; /* not power of 2 */
179 return true;
182 static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size,
183 uint32_t table_size)
185 if (image_size % BDRV_SECTOR_SIZE != 0) {
186 return false; /* not multiple of sector size */
188 if (image_size > qed_max_image_size(cluster_size, table_size)) {
189 return false; /* image is too large */
191 return true;
195 * Read a string of known length from the image file
197 * @file: Image file
198 * @offset: File offset to start of string, in bytes
199 * @n: String length in bytes
200 * @buf: Destination buffer
201 * @buflen: Destination buffer length in bytes
202 * @ret: 0 on success, -errno on failure
204 * The string is NUL-terminated.
206 static int qed_read_string(BdrvChild *file, uint64_t offset, size_t n,
207 char *buf, size_t buflen)
209 int ret;
210 if (n >= buflen) {
211 return -EINVAL;
213 ret = bdrv_pread(file, offset, buf, n);
214 if (ret < 0) {
215 return ret;
217 buf[n] = '\0';
218 return 0;
222 * Allocate new clusters
224 * @s: QED state
225 * @n: Number of contiguous clusters to allocate
226 * @ret: Offset of first allocated cluster
228 * This function only produces the offset where the new clusters should be
229 * written. It updates BDRVQEDState but does not make any changes to the image
230 * file.
232 * Called with table_lock held.
234 static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n)
236 uint64_t offset = s->file_size;
237 s->file_size += n * s->header.cluster_size;
238 return offset;
241 QEDTable *qed_alloc_table(BDRVQEDState *s)
243 /* Honor O_DIRECT memory alignment requirements */
244 return qemu_blockalign(s->bs,
245 s->header.cluster_size * s->header.table_size);
249 * Allocate a new zeroed L2 table
251 * Called with table_lock held.
253 static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
255 CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
257 l2_table->table = qed_alloc_table(s);
258 l2_table->offset = qed_alloc_clusters(s, s->header.table_size);
260 memset(l2_table->table->offsets, 0,
261 s->header.cluster_size * s->header.table_size);
262 return l2_table;
265 static bool qed_plug_allocating_write_reqs(BDRVQEDState *s)
267 qemu_co_mutex_lock(&s->table_lock);
269 /* No reentrancy is allowed. */
270 assert(!s->allocating_write_reqs_plugged);
271 if (s->allocating_acb != NULL) {
272 /* Another allocating write came concurrently. This cannot happen
273 * from bdrv_qed_co_drain_begin, but it can happen when the timer runs.
275 qemu_co_mutex_unlock(&s->table_lock);
276 return false;
279 s->allocating_write_reqs_plugged = true;
280 qemu_co_mutex_unlock(&s->table_lock);
281 return true;
284 static void qed_unplug_allocating_write_reqs(BDRVQEDState *s)
286 qemu_co_mutex_lock(&s->table_lock);
287 assert(s->allocating_write_reqs_plugged);
288 s->allocating_write_reqs_plugged = false;
289 qemu_co_queue_next(&s->allocating_write_reqs);
290 qemu_co_mutex_unlock(&s->table_lock);
293 static void coroutine_fn qed_need_check_timer_entry(void *opaque)
295 BDRVQEDState *s = opaque;
296 int ret;
298 trace_qed_need_check_timer_cb(s);
300 if (!qed_plug_allocating_write_reqs(s)) {
301 return;
304 /* Ensure writes are on disk before clearing flag */
305 ret = bdrv_co_flush(s->bs->file->bs);
306 if (ret < 0) {
307 qed_unplug_allocating_write_reqs(s);
308 return;
311 s->header.features &= ~QED_F_NEED_CHECK;
312 ret = qed_write_header(s);
313 (void) ret;
315 qed_unplug_allocating_write_reqs(s);
317 ret = bdrv_co_flush(s->bs);
318 (void) ret;
321 static void qed_need_check_timer_cb(void *opaque)
323 Coroutine *co = qemu_coroutine_create(qed_need_check_timer_entry, opaque);
324 qemu_coroutine_enter(co);
327 static void qed_start_need_check_timer(BDRVQEDState *s)
329 trace_qed_start_need_check_timer(s);
331 /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for
332 * migration.
334 timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
335 NANOSECONDS_PER_SECOND * QED_NEED_CHECK_TIMEOUT);
338 /* It's okay to call this multiple times or when no timer is started */
339 static void qed_cancel_need_check_timer(BDRVQEDState *s)
341 trace_qed_cancel_need_check_timer(s);
342 timer_del(s->need_check_timer);
345 static void bdrv_qed_detach_aio_context(BlockDriverState *bs)
347 BDRVQEDState *s = bs->opaque;
349 qed_cancel_need_check_timer(s);
350 timer_free(s->need_check_timer);
353 static void bdrv_qed_attach_aio_context(BlockDriverState *bs,
354 AioContext *new_context)
356 BDRVQEDState *s = bs->opaque;
358 s->need_check_timer = aio_timer_new(new_context,
359 QEMU_CLOCK_VIRTUAL, SCALE_NS,
360 qed_need_check_timer_cb, s);
361 if (s->header.features & QED_F_NEED_CHECK) {
362 qed_start_need_check_timer(s);
366 static void coroutine_fn bdrv_qed_co_drain_begin(BlockDriverState *bs)
368 BDRVQEDState *s = bs->opaque;
370 /* Fire the timer immediately in order to start doing I/O as soon as the
371 * header is flushed.
373 if (s->need_check_timer && timer_pending(s->need_check_timer)) {
374 qed_cancel_need_check_timer(s);
375 qed_need_check_timer_entry(s);
379 static void bdrv_qed_init_state(BlockDriverState *bs)
381 BDRVQEDState *s = bs->opaque;
383 memset(s, 0, sizeof(BDRVQEDState));
384 s->bs = bs;
385 qemu_co_mutex_init(&s->table_lock);
386 qemu_co_queue_init(&s->allocating_write_reqs);
389 /* Called with table_lock held. */
390 static int coroutine_fn bdrv_qed_do_open(BlockDriverState *bs, QDict *options,
391 int flags, Error **errp)
393 BDRVQEDState *s = bs->opaque;
394 QEDHeader le_header;
395 int64_t file_size;
396 int ret;
398 ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header));
399 if (ret < 0) {
400 return ret;
402 qed_header_le_to_cpu(&le_header, &s->header);
404 if (s->header.magic != QED_MAGIC) {
405 error_setg(errp, "Image not in QED format");
406 return -EINVAL;
408 if (s->header.features & ~QED_FEATURE_MASK) {
409 /* image uses unsupported feature bits */
410 error_setg(errp, "Unsupported QED features: %" PRIx64,
411 s->header.features & ~QED_FEATURE_MASK);
412 return -ENOTSUP;
414 if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
415 return -EINVAL;
418 /* Round down file size to the last cluster */
419 file_size = bdrv_getlength(bs->file->bs);
420 if (file_size < 0) {
421 return file_size;
423 s->file_size = qed_start_of_cluster(s, file_size);
425 if (!qed_is_table_size_valid(s->header.table_size)) {
426 return -EINVAL;
428 if (!qed_is_image_size_valid(s->header.image_size,
429 s->header.cluster_size,
430 s->header.table_size)) {
431 return -EINVAL;
433 if (!qed_check_table_offset(s, s->header.l1_table_offset)) {
434 return -EINVAL;
437 s->table_nelems = (s->header.cluster_size * s->header.table_size) /
438 sizeof(uint64_t);
439 s->l2_shift = ctz32(s->header.cluster_size);
440 s->l2_mask = s->table_nelems - 1;
441 s->l1_shift = s->l2_shift + ctz32(s->table_nelems);
443 /* Header size calculation must not overflow uint32_t */
444 if (s->header.header_size > UINT32_MAX / s->header.cluster_size) {
445 return -EINVAL;
448 if ((s->header.features & QED_F_BACKING_FILE)) {
449 if ((uint64_t)s->header.backing_filename_offset +
450 s->header.backing_filename_size >
451 s->header.cluster_size * s->header.header_size) {
452 return -EINVAL;
455 ret = qed_read_string(bs->file, s->header.backing_filename_offset,
456 s->header.backing_filename_size, bs->backing_file,
457 sizeof(bs->backing_file));
458 if (ret < 0) {
459 return ret;
462 if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
463 pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw");
467 /* Reset unknown autoclear feature bits. This is a backwards
468 * compatibility mechanism that allows images to be opened by older
469 * programs, which "knock out" unknown feature bits. When an image is
470 * opened by a newer program again it can detect that the autoclear
471 * feature is no longer valid.
473 if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 &&
474 !bdrv_is_read_only(bs->file->bs) && !(flags & BDRV_O_INACTIVE)) {
475 s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK;
477 ret = qed_write_header_sync(s);
478 if (ret) {
479 return ret;
482 /* From here on only known autoclear feature bits are valid */
483 bdrv_flush(bs->file->bs);
486 s->l1_table = qed_alloc_table(s);
487 qed_init_l2_cache(&s->l2_cache);
489 ret = qed_read_l1_table_sync(s);
490 if (ret) {
491 goto out;
494 /* If image was not closed cleanly, check consistency */
495 if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) {
496 /* Read-only images cannot be fixed. There is no risk of corruption
497 * since write operations are not possible. Therefore, allow
498 * potentially inconsistent images to be opened read-only. This can
499 * aid data recovery from an otherwise inconsistent image.
501 if (!bdrv_is_read_only(bs->file->bs) &&
502 !(flags & BDRV_O_INACTIVE)) {
503 BdrvCheckResult result = {0};
505 ret = qed_check(s, &result, true);
506 if (ret) {
507 goto out;
512 bdrv_qed_attach_aio_context(bs, bdrv_get_aio_context(bs));
514 out:
515 if (ret) {
516 qed_free_l2_cache(&s->l2_cache);
517 qemu_vfree(s->l1_table);
519 return ret;
522 typedef struct QEDOpenCo {
523 BlockDriverState *bs;
524 QDict *options;
525 int flags;
526 Error **errp;
527 int ret;
528 } QEDOpenCo;
530 static void coroutine_fn bdrv_qed_open_entry(void *opaque)
532 QEDOpenCo *qoc = opaque;
533 BDRVQEDState *s = qoc->bs->opaque;
535 qemu_co_mutex_lock(&s->table_lock);
536 qoc->ret = bdrv_qed_do_open(qoc->bs, qoc->options, qoc->flags, qoc->errp);
537 qemu_co_mutex_unlock(&s->table_lock);
540 static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags,
541 Error **errp)
543 QEDOpenCo qoc = {
544 .bs = bs,
545 .options = options,
546 .flags = flags,
547 .errp = errp,
548 .ret = -EINPROGRESS
551 bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file,
552 false, errp);
553 if (!bs->file) {
554 return -EINVAL;
557 bdrv_qed_init_state(bs);
558 if (qemu_in_coroutine()) {
559 bdrv_qed_open_entry(&qoc);
560 } else {
561 qemu_coroutine_enter(qemu_coroutine_create(bdrv_qed_open_entry, &qoc));
562 BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS);
564 BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS);
565 return qoc.ret;
568 static void bdrv_qed_refresh_limits(BlockDriverState *bs, Error **errp)
570 BDRVQEDState *s = bs->opaque;
572 bs->bl.pwrite_zeroes_alignment = s->header.cluster_size;
575 /* We have nothing to do for QED reopen, stubs just return
576 * success */
577 static int bdrv_qed_reopen_prepare(BDRVReopenState *state,
578 BlockReopenQueue *queue, Error **errp)
580 return 0;
583 static void bdrv_qed_close(BlockDriverState *bs)
585 BDRVQEDState *s = bs->opaque;
587 bdrv_qed_detach_aio_context(bs);
589 /* Ensure writes reach stable storage */
590 bdrv_flush(bs->file->bs);
592 /* Clean shutdown, no check required on next open */
593 if (s->header.features & QED_F_NEED_CHECK) {
594 s->header.features &= ~QED_F_NEED_CHECK;
595 qed_write_header_sync(s);
598 qed_free_l2_cache(&s->l2_cache);
599 qemu_vfree(s->l1_table);
602 static int coroutine_fn bdrv_qed_co_create(BlockdevCreateOptions *opts,
603 Error **errp)
605 BlockdevCreateOptionsQed *qed_opts;
606 BlockBackend *blk = NULL;
607 BlockDriverState *bs = NULL;
609 QEDHeader header;
610 QEDHeader le_header;
611 uint8_t *l1_table = NULL;
612 size_t l1_size;
613 int ret = 0;
615 assert(opts->driver == BLOCKDEV_DRIVER_QED);
616 qed_opts = &opts->u.qed;
618 /* Validate options and set default values */
619 if (!qed_opts->has_cluster_size) {
620 qed_opts->cluster_size = QED_DEFAULT_CLUSTER_SIZE;
622 if (!qed_opts->has_table_size) {
623 qed_opts->table_size = QED_DEFAULT_TABLE_SIZE;
626 if (!qed_is_cluster_size_valid(qed_opts->cluster_size)) {
627 error_setg(errp, "QED cluster size must be within range [%u, %u] "
628 "and power of 2",
629 QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
630 return -EINVAL;
632 if (!qed_is_table_size_valid(qed_opts->table_size)) {
633 error_setg(errp, "QED table size must be within range [%u, %u] "
634 "and power of 2",
635 QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
636 return -EINVAL;
638 if (!qed_is_image_size_valid(qed_opts->size, qed_opts->cluster_size,
639 qed_opts->table_size))
641 error_setg(errp, "QED image size must be a non-zero multiple of "
642 "cluster size and less than %" PRIu64 " bytes",
643 qed_max_image_size(qed_opts->cluster_size,
644 qed_opts->table_size));
645 return -EINVAL;
648 /* Create BlockBackend to write to the image */
649 bs = bdrv_open_blockdev_ref(qed_opts->file, errp);
650 if (bs == NULL) {
651 return -EIO;
654 blk = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL);
655 ret = blk_insert_bs(blk, bs, errp);
656 if (ret < 0) {
657 goto out;
659 blk_set_allow_write_beyond_eof(blk, true);
661 /* Prepare image format */
662 header = (QEDHeader) {
663 .magic = QED_MAGIC,
664 .cluster_size = qed_opts->cluster_size,
665 .table_size = qed_opts->table_size,
666 .header_size = 1,
667 .features = 0,
668 .compat_features = 0,
669 .l1_table_offset = qed_opts->cluster_size,
670 .image_size = qed_opts->size,
673 l1_size = header.cluster_size * header.table_size;
675 /* File must start empty and grow, check truncate is supported */
676 ret = blk_truncate(blk, 0, PREALLOC_MODE_OFF, errp);
677 if (ret < 0) {
678 goto out;
681 if (qed_opts->has_backing_file) {
682 header.features |= QED_F_BACKING_FILE;
683 header.backing_filename_offset = sizeof(le_header);
684 header.backing_filename_size = strlen(qed_opts->backing_file);
686 if (qed_opts->has_backing_fmt) {
687 const char *backing_fmt = BlockdevDriver_str(qed_opts->backing_fmt);
688 if (qed_fmt_is_raw(backing_fmt)) {
689 header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
694 qed_header_cpu_to_le(&header, &le_header);
695 ret = blk_pwrite(blk, 0, &le_header, sizeof(le_header), 0);
696 if (ret < 0) {
697 goto out;
699 ret = blk_pwrite(blk, sizeof(le_header), qed_opts->backing_file,
700 header.backing_filename_size, 0);
701 if (ret < 0) {
702 goto out;
705 l1_table = g_malloc0(l1_size);
706 ret = blk_pwrite(blk, header.l1_table_offset, l1_table, l1_size, 0);
707 if (ret < 0) {
708 goto out;
711 ret = 0; /* success */
712 out:
713 g_free(l1_table);
714 blk_unref(blk);
715 bdrv_unref(bs);
716 return ret;
719 static int coroutine_fn bdrv_qed_co_create_opts(const char *filename,
720 QemuOpts *opts,
721 Error **errp)
723 BlockdevCreateOptions *create_options = NULL;
724 QDict *qdict = NULL;
725 QObject *qobj;
726 Visitor *v;
727 BlockDriverState *bs = NULL;
728 Error *local_err = NULL;
729 int ret;
731 static const QDictRenames opt_renames[] = {
732 { BLOCK_OPT_BACKING_FILE, "backing-file" },
733 { BLOCK_OPT_BACKING_FMT, "backing-fmt" },
734 { BLOCK_OPT_CLUSTER_SIZE, "cluster-size" },
735 { BLOCK_OPT_TABLE_SIZE, "table-size" },
736 { NULL, NULL },
739 /* Parse options and convert legacy syntax */
740 qdict = qemu_opts_to_qdict_filtered(opts, NULL, &qed_create_opts, true);
742 if (!qdict_rename_keys(qdict, opt_renames, errp)) {
743 ret = -EINVAL;
744 goto fail;
747 /* Create and open the file (protocol layer) */
748 ret = bdrv_create_file(filename, opts, &local_err);
749 if (ret < 0) {
750 error_propagate(errp, local_err);
751 goto fail;
754 bs = bdrv_open(filename, NULL, NULL,
755 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp);
756 if (bs == NULL) {
757 ret = -EIO;
758 goto fail;
761 /* Now get the QAPI type BlockdevCreateOptions */
762 qdict_put_str(qdict, "driver", "qed");
763 qdict_put_str(qdict, "file", bs->node_name);
765 qobj = qdict_crumple(qdict, errp);
766 qobject_unref(qdict);
767 qdict = qobject_to(QDict, qobj);
768 if (qdict == NULL) {
769 ret = -EINVAL;
770 goto fail;
773 v = qobject_input_visitor_new_keyval(QOBJECT(qdict));
774 visit_type_BlockdevCreateOptions(v, NULL, &create_options, &local_err);
775 visit_free(v);
777 if (local_err) {
778 error_propagate(errp, local_err);
779 ret = -EINVAL;
780 goto fail;
783 /* Silently round up size */
784 assert(create_options->driver == BLOCKDEV_DRIVER_QED);
785 create_options->u.qed.size =
786 ROUND_UP(create_options->u.qed.size, BDRV_SECTOR_SIZE);
788 /* Create the qed image (format layer) */
789 ret = bdrv_qed_co_create(create_options, errp);
791 fail:
792 qobject_unref(qdict);
793 bdrv_unref(bs);
794 qapi_free_BlockdevCreateOptions(create_options);
795 return ret;
798 static int coroutine_fn bdrv_qed_co_block_status(BlockDriverState *bs,
799 bool want_zero,
800 int64_t pos, int64_t bytes,
801 int64_t *pnum, int64_t *map,
802 BlockDriverState **file)
804 BDRVQEDState *s = bs->opaque;
805 size_t len = MIN(bytes, SIZE_MAX);
806 int status;
807 QEDRequest request = { .l2_table = NULL };
808 uint64_t offset;
809 int ret;
811 qemu_co_mutex_lock(&s->table_lock);
812 ret = qed_find_cluster(s, &request, pos, &len, &offset);
814 *pnum = len;
815 switch (ret) {
816 case QED_CLUSTER_FOUND:
817 *map = offset | qed_offset_into_cluster(s, pos);
818 status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
819 *file = bs->file->bs;
820 break;
821 case QED_CLUSTER_ZERO:
822 status = BDRV_BLOCK_ZERO;
823 break;
824 case QED_CLUSTER_L2:
825 case QED_CLUSTER_L1:
826 status = 0;
827 break;
828 default:
829 assert(ret < 0);
830 status = ret;
831 break;
834 qed_unref_l2_cache_entry(request.l2_table);
835 qemu_co_mutex_unlock(&s->table_lock);
837 return status;
840 static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
842 return acb->bs->opaque;
846 * Read from the backing file or zero-fill if no backing file
848 * @s: QED state
849 * @pos: Byte position in device
850 * @qiov: Destination I/O vector
851 * @backing_qiov: Possibly shortened copy of qiov, to be allocated here
852 * @cb: Completion function
853 * @opaque: User data for completion function
855 * This function reads qiov->size bytes starting at pos from the backing file.
856 * If there is no backing file then zeroes are read.
858 static int coroutine_fn qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
859 QEMUIOVector *qiov,
860 QEMUIOVector **backing_qiov)
862 uint64_t backing_length = 0;
863 size_t size;
864 int ret;
866 /* If there is a backing file, get its length. Treat the absence of a
867 * backing file like a zero length backing file.
869 if (s->bs->backing) {
870 int64_t l = bdrv_getlength(s->bs->backing->bs);
871 if (l < 0) {
872 return l;
874 backing_length = l;
877 /* Zero all sectors if reading beyond the end of the backing file */
878 if (pos >= backing_length ||
879 pos + qiov->size > backing_length) {
880 qemu_iovec_memset(qiov, 0, 0, qiov->size);
883 /* Complete now if there are no backing file sectors to read */
884 if (pos >= backing_length) {
885 return 0;
888 /* If the read straddles the end of the backing file, shorten it */
889 size = MIN((uint64_t)backing_length - pos, qiov->size);
891 assert(*backing_qiov == NULL);
892 *backing_qiov = g_new(QEMUIOVector, 1);
893 qemu_iovec_init(*backing_qiov, qiov->niov);
894 qemu_iovec_concat(*backing_qiov, qiov, 0, size);
896 BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
897 ret = bdrv_co_preadv(s->bs->backing, pos, size, *backing_qiov, 0);
898 if (ret < 0) {
899 return ret;
901 return 0;
905 * Copy data from backing file into the image
907 * @s: QED state
908 * @pos: Byte position in device
909 * @len: Number of bytes
910 * @offset: Byte offset in image file
912 static int coroutine_fn qed_copy_from_backing_file(BDRVQEDState *s,
913 uint64_t pos, uint64_t len,
914 uint64_t offset)
916 QEMUIOVector qiov;
917 QEMUIOVector *backing_qiov = NULL;
918 struct iovec iov;
919 int ret;
921 /* Skip copy entirely if there is no work to do */
922 if (len == 0) {
923 return 0;
926 iov = (struct iovec) {
927 .iov_base = qemu_blockalign(s->bs, len),
928 .iov_len = len,
930 qemu_iovec_init_external(&qiov, &iov, 1);
932 ret = qed_read_backing_file(s, pos, &qiov, &backing_qiov);
934 if (backing_qiov) {
935 qemu_iovec_destroy(backing_qiov);
936 g_free(backing_qiov);
937 backing_qiov = NULL;
940 if (ret) {
941 goto out;
944 BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE);
945 ret = bdrv_co_pwritev(s->bs->file, offset, qiov.size, &qiov, 0);
946 if (ret < 0) {
947 goto out;
949 ret = 0;
950 out:
951 qemu_vfree(iov.iov_base);
952 return ret;
956 * Link one or more contiguous clusters into a table
958 * @s: QED state
959 * @table: L2 table
960 * @index: First cluster index
961 * @n: Number of contiguous clusters
962 * @cluster: First cluster offset
964 * The cluster offset may be an allocated byte offset in the image file, the
965 * zero cluster marker, or the unallocated cluster marker.
967 * Called with table_lock held.
969 static void coroutine_fn qed_update_l2_table(BDRVQEDState *s, QEDTable *table,
970 int index, unsigned int n,
971 uint64_t cluster)
973 int i;
974 for (i = index; i < index + n; i++) {
975 table->offsets[i] = cluster;
976 if (!qed_offset_is_unalloc_cluster(cluster) &&
977 !qed_offset_is_zero_cluster(cluster)) {
978 cluster += s->header.cluster_size;
983 /* Called with table_lock held. */
984 static void coroutine_fn qed_aio_complete(QEDAIOCB *acb)
986 BDRVQEDState *s = acb_to_s(acb);
988 /* Free resources */
989 qemu_iovec_destroy(&acb->cur_qiov);
990 qed_unref_l2_cache_entry(acb->request.l2_table);
992 /* Free the buffer we may have allocated for zero writes */
993 if (acb->flags & QED_AIOCB_ZERO) {
994 qemu_vfree(acb->qiov->iov[0].iov_base);
995 acb->qiov->iov[0].iov_base = NULL;
998 /* Start next allocating write request waiting behind this one. Note that
999 * requests enqueue themselves when they first hit an unallocated cluster
1000 * but they wait until the entire request is finished before waking up the
1001 * next request in the queue. This ensures that we don't cycle through
1002 * requests multiple times but rather finish one at a time completely.
1004 if (acb == s->allocating_acb) {
1005 s->allocating_acb = NULL;
1006 if (!qemu_co_queue_empty(&s->allocating_write_reqs)) {
1007 qemu_co_queue_next(&s->allocating_write_reqs);
1008 } else if (s->header.features & QED_F_NEED_CHECK) {
1009 qed_start_need_check_timer(s);
1015 * Update L1 table with new L2 table offset and write it out
1017 * Called with table_lock held.
1019 static int coroutine_fn qed_aio_write_l1_update(QEDAIOCB *acb)
1021 BDRVQEDState *s = acb_to_s(acb);
1022 CachedL2Table *l2_table = acb->request.l2_table;
1023 uint64_t l2_offset = l2_table->offset;
1024 int index, ret;
1026 index = qed_l1_index(s, acb->cur_pos);
1027 s->l1_table->offsets[index] = l2_table->offset;
1029 ret = qed_write_l1_table(s, index, 1);
1031 /* Commit the current L2 table to the cache */
1032 qed_commit_l2_cache_entry(&s->l2_cache, l2_table);
1034 /* This is guaranteed to succeed because we just committed the entry to the
1035 * cache.
1037 acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
1038 assert(acb->request.l2_table != NULL);
1040 return ret;
1045 * Update L2 table with new cluster offsets and write them out
1047 * Called with table_lock held.
1049 static int coroutine_fn qed_aio_write_l2_update(QEDAIOCB *acb, uint64_t offset)
1051 BDRVQEDState *s = acb_to_s(acb);
1052 bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1;
1053 int index, ret;
1055 if (need_alloc) {
1056 qed_unref_l2_cache_entry(acb->request.l2_table);
1057 acb->request.l2_table = qed_new_l2_table(s);
1060 index = qed_l2_index(s, acb->cur_pos);
1061 qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters,
1062 offset);
1064 if (need_alloc) {
1065 /* Write out the whole new L2 table */
1066 ret = qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true);
1067 if (ret) {
1068 return ret;
1070 return qed_aio_write_l1_update(acb);
1071 } else {
1072 /* Write out only the updated part of the L2 table */
1073 ret = qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters,
1074 false);
1075 if (ret) {
1076 return ret;
1079 return 0;
1083 * Write data to the image file
1085 * Called with table_lock *not* held.
1087 static int coroutine_fn qed_aio_write_main(QEDAIOCB *acb)
1089 BDRVQEDState *s = acb_to_s(acb);
1090 uint64_t offset = acb->cur_cluster +
1091 qed_offset_into_cluster(s, acb->cur_pos);
1093 trace_qed_aio_write_main(s, acb, 0, offset, acb->cur_qiov.size);
1095 BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
1096 return bdrv_co_pwritev(s->bs->file, offset, acb->cur_qiov.size,
1097 &acb->cur_qiov, 0);
1101 * Populate untouched regions of new data cluster
1103 * Called with table_lock held.
1105 static int coroutine_fn qed_aio_write_cow(QEDAIOCB *acb)
1107 BDRVQEDState *s = acb_to_s(acb);
1108 uint64_t start, len, offset;
1109 int ret;
1111 qemu_co_mutex_unlock(&s->table_lock);
1113 /* Populate front untouched region of new data cluster */
1114 start = qed_start_of_cluster(s, acb->cur_pos);
1115 len = qed_offset_into_cluster(s, acb->cur_pos);
1117 trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster);
1118 ret = qed_copy_from_backing_file(s, start, len, acb->cur_cluster);
1119 if (ret < 0) {
1120 goto out;
1123 /* Populate back untouched region of new data cluster */
1124 start = acb->cur_pos + acb->cur_qiov.size;
1125 len = qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start;
1126 offset = acb->cur_cluster +
1127 qed_offset_into_cluster(s, acb->cur_pos) +
1128 acb->cur_qiov.size;
1130 trace_qed_aio_write_postfill(s, acb, start, len, offset);
1131 ret = qed_copy_from_backing_file(s, start, len, offset);
1132 if (ret < 0) {
1133 goto out;
1136 ret = qed_aio_write_main(acb);
1137 if (ret < 0) {
1138 goto out;
1141 if (s->bs->backing) {
1143 * Flush new data clusters before updating the L2 table
1145 * This flush is necessary when a backing file is in use. A crash
1146 * during an allocating write could result in empty clusters in the
1147 * image. If the write only touched a subregion of the cluster,
1148 * then backing image sectors have been lost in the untouched
1149 * region. The solution is to flush after writing a new data
1150 * cluster and before updating the L2 table.
1152 ret = bdrv_co_flush(s->bs->file->bs);
1155 out:
1156 qemu_co_mutex_lock(&s->table_lock);
1157 return ret;
1161 * Check if the QED_F_NEED_CHECK bit should be set during allocating write
1163 static bool qed_should_set_need_check(BDRVQEDState *s)
1165 /* The flush before L2 update path ensures consistency */
1166 if (s->bs->backing) {
1167 return false;
1170 return !(s->header.features & QED_F_NEED_CHECK);
1174 * Write new data cluster
1176 * @acb: Write request
1177 * @len: Length in bytes
1179 * This path is taken when writing to previously unallocated clusters.
1181 * Called with table_lock held.
1183 static int coroutine_fn qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
1185 BDRVQEDState *s = acb_to_s(acb);
1186 int ret;
1188 /* Cancel timer when the first allocating request comes in */
1189 if (s->allocating_acb == NULL) {
1190 qed_cancel_need_check_timer(s);
1193 /* Freeze this request if another allocating write is in progress */
1194 if (s->allocating_acb != acb || s->allocating_write_reqs_plugged) {
1195 if (s->allocating_acb != NULL) {
1196 qemu_co_queue_wait(&s->allocating_write_reqs, &s->table_lock);
1197 assert(s->allocating_acb == NULL);
1199 s->allocating_acb = acb;
1200 return -EAGAIN; /* start over with looking up table entries */
1203 acb->cur_nclusters = qed_bytes_to_clusters(s,
1204 qed_offset_into_cluster(s, acb->cur_pos) + len);
1205 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1207 if (acb->flags & QED_AIOCB_ZERO) {
1208 /* Skip ahead if the clusters are already zero */
1209 if (acb->find_cluster_ret == QED_CLUSTER_ZERO) {
1210 return 0;
1212 acb->cur_cluster = 1;
1213 } else {
1214 acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters);
1217 if (qed_should_set_need_check(s)) {
1218 s->header.features |= QED_F_NEED_CHECK;
1219 ret = qed_write_header(s);
1220 if (ret < 0) {
1221 return ret;
1225 if (!(acb->flags & QED_AIOCB_ZERO)) {
1226 ret = qed_aio_write_cow(acb);
1227 if (ret < 0) {
1228 return ret;
1232 return qed_aio_write_l2_update(acb, acb->cur_cluster);
1236 * Write data cluster in place
1238 * @acb: Write request
1239 * @offset: Cluster offset in bytes
1240 * @len: Length in bytes
1242 * This path is taken when writing to already allocated clusters.
1244 * Called with table_lock held.
1246 static int coroutine_fn qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset,
1247 size_t len)
1249 BDRVQEDState *s = acb_to_s(acb);
1250 int r;
1252 qemu_co_mutex_unlock(&s->table_lock);
1254 /* Allocate buffer for zero writes */
1255 if (acb->flags & QED_AIOCB_ZERO) {
1256 struct iovec *iov = acb->qiov->iov;
1258 if (!iov->iov_base) {
1259 iov->iov_base = qemu_try_blockalign(acb->bs, iov->iov_len);
1260 if (iov->iov_base == NULL) {
1261 r = -ENOMEM;
1262 goto out;
1264 memset(iov->iov_base, 0, iov->iov_len);
1268 /* Calculate the I/O vector */
1269 acb->cur_cluster = offset;
1270 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1272 /* Do the actual write. */
1273 r = qed_aio_write_main(acb);
1274 out:
1275 qemu_co_mutex_lock(&s->table_lock);
1276 return r;
1280 * Write data cluster
1282 * @opaque: Write request
1283 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1
1284 * @offset: Cluster offset in bytes
1285 * @len: Length in bytes
1287 * Called with table_lock held.
1289 static int coroutine_fn qed_aio_write_data(void *opaque, int ret,
1290 uint64_t offset, size_t len)
1292 QEDAIOCB *acb = opaque;
1294 trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len);
1296 acb->find_cluster_ret = ret;
1298 switch (ret) {
1299 case QED_CLUSTER_FOUND:
1300 return qed_aio_write_inplace(acb, offset, len);
1302 case QED_CLUSTER_L2:
1303 case QED_CLUSTER_L1:
1304 case QED_CLUSTER_ZERO:
1305 return qed_aio_write_alloc(acb, len);
1307 default:
1308 g_assert_not_reached();
1313 * Read data cluster
1315 * @opaque: Read request
1316 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1
1317 * @offset: Cluster offset in bytes
1318 * @len: Length in bytes
1320 * Called with table_lock held.
1322 static int coroutine_fn qed_aio_read_data(void *opaque, int ret,
1323 uint64_t offset, size_t len)
1325 QEDAIOCB *acb = opaque;
1326 BDRVQEDState *s = acb_to_s(acb);
1327 BlockDriverState *bs = acb->bs;
1328 int r;
1330 qemu_co_mutex_unlock(&s->table_lock);
1332 /* Adjust offset into cluster */
1333 offset += qed_offset_into_cluster(s, acb->cur_pos);
1335 trace_qed_aio_read_data(s, acb, ret, offset, len);
1337 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1339 /* Handle zero cluster and backing file reads, otherwise read
1340 * data cluster directly.
1342 if (ret == QED_CLUSTER_ZERO) {
1343 qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size);
1344 r = 0;
1345 } else if (ret != QED_CLUSTER_FOUND) {
1346 r = qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov,
1347 &acb->backing_qiov);
1348 } else {
1349 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
1350 r = bdrv_co_preadv(bs->file, offset, acb->cur_qiov.size,
1351 &acb->cur_qiov, 0);
1354 qemu_co_mutex_lock(&s->table_lock);
1355 return r;
1359 * Begin next I/O or complete the request
1361 static int coroutine_fn qed_aio_next_io(QEDAIOCB *acb)
1363 BDRVQEDState *s = acb_to_s(acb);
1364 uint64_t offset;
1365 size_t len;
1366 int ret;
1368 qemu_co_mutex_lock(&s->table_lock);
1369 while (1) {
1370 trace_qed_aio_next_io(s, acb, 0, acb->cur_pos + acb->cur_qiov.size);
1372 if (acb->backing_qiov) {
1373 qemu_iovec_destroy(acb->backing_qiov);
1374 g_free(acb->backing_qiov);
1375 acb->backing_qiov = NULL;
1378 acb->qiov_offset += acb->cur_qiov.size;
1379 acb->cur_pos += acb->cur_qiov.size;
1380 qemu_iovec_reset(&acb->cur_qiov);
1382 /* Complete request */
1383 if (acb->cur_pos >= acb->end_pos) {
1384 ret = 0;
1385 break;
1388 /* Find next cluster and start I/O */
1389 len = acb->end_pos - acb->cur_pos;
1390 ret = qed_find_cluster(s, &acb->request, acb->cur_pos, &len, &offset);
1391 if (ret < 0) {
1392 break;
1395 if (acb->flags & QED_AIOCB_WRITE) {
1396 ret = qed_aio_write_data(acb, ret, offset, len);
1397 } else {
1398 ret = qed_aio_read_data(acb, ret, offset, len);
1401 if (ret < 0 && ret != -EAGAIN) {
1402 break;
1406 trace_qed_aio_complete(s, acb, ret);
1407 qed_aio_complete(acb);
1408 qemu_co_mutex_unlock(&s->table_lock);
1409 return ret;
1412 static int coroutine_fn qed_co_request(BlockDriverState *bs, int64_t sector_num,
1413 QEMUIOVector *qiov, int nb_sectors,
1414 int flags)
1416 QEDAIOCB acb = {
1417 .bs = bs,
1418 .cur_pos = (uint64_t) sector_num * BDRV_SECTOR_SIZE,
1419 .end_pos = (sector_num + nb_sectors) * BDRV_SECTOR_SIZE,
1420 .qiov = qiov,
1421 .flags = flags,
1423 qemu_iovec_init(&acb.cur_qiov, qiov->niov);
1425 trace_qed_aio_setup(bs->opaque, &acb, sector_num, nb_sectors, NULL, flags);
1427 /* Start request */
1428 return qed_aio_next_io(&acb);
1431 static int coroutine_fn bdrv_qed_co_readv(BlockDriverState *bs,
1432 int64_t sector_num, int nb_sectors,
1433 QEMUIOVector *qiov)
1435 return qed_co_request(bs, sector_num, qiov, nb_sectors, 0);
1438 static int coroutine_fn bdrv_qed_co_writev(BlockDriverState *bs,
1439 int64_t sector_num, int nb_sectors,
1440 QEMUIOVector *qiov, int flags)
1442 assert(!flags);
1443 return qed_co_request(bs, sector_num, qiov, nb_sectors, QED_AIOCB_WRITE);
1446 static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs,
1447 int64_t offset,
1448 int bytes,
1449 BdrvRequestFlags flags)
1451 BDRVQEDState *s = bs->opaque;
1452 QEMUIOVector qiov;
1453 struct iovec iov;
1455 /* Fall back if the request is not aligned */
1456 if (qed_offset_into_cluster(s, offset) ||
1457 qed_offset_into_cluster(s, bytes)) {
1458 return -ENOTSUP;
1461 /* Zero writes start without an I/O buffer. If a buffer becomes necessary
1462 * then it will be allocated during request processing.
1464 iov.iov_base = NULL;
1465 iov.iov_len = bytes;
1467 qemu_iovec_init_external(&qiov, &iov, 1);
1468 return qed_co_request(bs, offset >> BDRV_SECTOR_BITS, &qiov,
1469 bytes >> BDRV_SECTOR_BITS,
1470 QED_AIOCB_WRITE | QED_AIOCB_ZERO);
1473 static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset,
1474 PreallocMode prealloc, Error **errp)
1476 BDRVQEDState *s = bs->opaque;
1477 uint64_t old_image_size;
1478 int ret;
1480 if (prealloc != PREALLOC_MODE_OFF) {
1481 error_setg(errp, "Unsupported preallocation mode '%s'",
1482 PreallocMode_str(prealloc));
1483 return -ENOTSUP;
1486 if (!qed_is_image_size_valid(offset, s->header.cluster_size,
1487 s->header.table_size)) {
1488 error_setg(errp, "Invalid image size specified");
1489 return -EINVAL;
1492 if ((uint64_t)offset < s->header.image_size) {
1493 error_setg(errp, "Shrinking images is currently not supported");
1494 return -ENOTSUP;
1497 old_image_size = s->header.image_size;
1498 s->header.image_size = offset;
1499 ret = qed_write_header_sync(s);
1500 if (ret < 0) {
1501 s->header.image_size = old_image_size;
1502 error_setg_errno(errp, -ret, "Failed to update the image size");
1504 return ret;
1507 static int64_t bdrv_qed_getlength(BlockDriverState *bs)
1509 BDRVQEDState *s = bs->opaque;
1510 return s->header.image_size;
1513 static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1515 BDRVQEDState *s = bs->opaque;
1517 memset(bdi, 0, sizeof(*bdi));
1518 bdi->cluster_size = s->header.cluster_size;
1519 bdi->is_dirty = s->header.features & QED_F_NEED_CHECK;
1520 bdi->unallocated_blocks_are_zero = true;
1521 return 0;
1524 static int bdrv_qed_change_backing_file(BlockDriverState *bs,
1525 const char *backing_file,
1526 const char *backing_fmt)
1528 BDRVQEDState *s = bs->opaque;
1529 QEDHeader new_header, le_header;
1530 void *buffer;
1531 size_t buffer_len, backing_file_len;
1532 int ret;
1534 /* Refuse to set backing filename if unknown compat feature bits are
1535 * active. If the image uses an unknown compat feature then we may not
1536 * know the layout of data following the header structure and cannot safely
1537 * add a new string.
1539 if (backing_file && (s->header.compat_features &
1540 ~QED_COMPAT_FEATURE_MASK)) {
1541 return -ENOTSUP;
1544 memcpy(&new_header, &s->header, sizeof(new_header));
1546 new_header.features &= ~(QED_F_BACKING_FILE |
1547 QED_F_BACKING_FORMAT_NO_PROBE);
1549 /* Adjust feature flags */
1550 if (backing_file) {
1551 new_header.features |= QED_F_BACKING_FILE;
1553 if (qed_fmt_is_raw(backing_fmt)) {
1554 new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
1558 /* Calculate new header size */
1559 backing_file_len = 0;
1561 if (backing_file) {
1562 backing_file_len = strlen(backing_file);
1565 buffer_len = sizeof(new_header);
1566 new_header.backing_filename_offset = buffer_len;
1567 new_header.backing_filename_size = backing_file_len;
1568 buffer_len += backing_file_len;
1570 /* Make sure we can rewrite header without failing */
1571 if (buffer_len > new_header.header_size * new_header.cluster_size) {
1572 return -ENOSPC;
1575 /* Prepare new header */
1576 buffer = g_malloc(buffer_len);
1578 qed_header_cpu_to_le(&new_header, &le_header);
1579 memcpy(buffer, &le_header, sizeof(le_header));
1580 buffer_len = sizeof(le_header);
1582 if (backing_file) {
1583 memcpy(buffer + buffer_len, backing_file, backing_file_len);
1584 buffer_len += backing_file_len;
1587 /* Write new header */
1588 ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len);
1589 g_free(buffer);
1590 if (ret == 0) {
1591 memcpy(&s->header, &new_header, sizeof(new_header));
1593 return ret;
1596 static void coroutine_fn bdrv_qed_co_invalidate_cache(BlockDriverState *bs,
1597 Error **errp)
1599 BDRVQEDState *s = bs->opaque;
1600 Error *local_err = NULL;
1601 int ret;
1603 bdrv_qed_close(bs);
1605 bdrv_qed_init_state(bs);
1606 qemu_co_mutex_lock(&s->table_lock);
1607 ret = bdrv_qed_do_open(bs, NULL, bs->open_flags, &local_err);
1608 qemu_co_mutex_unlock(&s->table_lock);
1609 if (local_err) {
1610 error_propagate(errp, local_err);
1611 error_prepend(errp, "Could not reopen qed layer: ");
1612 return;
1613 } else if (ret < 0) {
1614 error_setg_errno(errp, -ret, "Could not reopen qed layer");
1615 return;
1619 static int bdrv_qed_co_check(BlockDriverState *bs, BdrvCheckResult *result,
1620 BdrvCheckMode fix)
1622 BDRVQEDState *s = bs->opaque;
1623 int ret;
1625 qemu_co_mutex_lock(&s->table_lock);
1626 ret = qed_check(s, result, !!fix);
1627 qemu_co_mutex_unlock(&s->table_lock);
1629 return ret;
1632 static QemuOptsList qed_create_opts = {
1633 .name = "qed-create-opts",
1634 .head = QTAILQ_HEAD_INITIALIZER(qed_create_opts.head),
1635 .desc = {
1637 .name = BLOCK_OPT_SIZE,
1638 .type = QEMU_OPT_SIZE,
1639 .help = "Virtual disk size"
1642 .name = BLOCK_OPT_BACKING_FILE,
1643 .type = QEMU_OPT_STRING,
1644 .help = "File name of a base image"
1647 .name = BLOCK_OPT_BACKING_FMT,
1648 .type = QEMU_OPT_STRING,
1649 .help = "Image format of the base image"
1652 .name = BLOCK_OPT_CLUSTER_SIZE,
1653 .type = QEMU_OPT_SIZE,
1654 .help = "Cluster size (in bytes)",
1655 .def_value_str = stringify(QED_DEFAULT_CLUSTER_SIZE)
1658 .name = BLOCK_OPT_TABLE_SIZE,
1659 .type = QEMU_OPT_SIZE,
1660 .help = "L1/L2 table size (in clusters)"
1662 { /* end of list */ }
1666 static BlockDriver bdrv_qed = {
1667 .format_name = "qed",
1668 .instance_size = sizeof(BDRVQEDState),
1669 .create_opts = &qed_create_opts,
1670 .supports_backing = true,
1672 .bdrv_probe = bdrv_qed_probe,
1673 .bdrv_open = bdrv_qed_open,
1674 .bdrv_close = bdrv_qed_close,
1675 .bdrv_reopen_prepare = bdrv_qed_reopen_prepare,
1676 .bdrv_child_perm = bdrv_format_default_perms,
1677 .bdrv_co_create = bdrv_qed_co_create,
1678 .bdrv_co_create_opts = bdrv_qed_co_create_opts,
1679 .bdrv_has_zero_init = bdrv_has_zero_init_1,
1680 .bdrv_co_block_status = bdrv_qed_co_block_status,
1681 .bdrv_co_readv = bdrv_qed_co_readv,
1682 .bdrv_co_writev = bdrv_qed_co_writev,
1683 .bdrv_co_pwrite_zeroes = bdrv_qed_co_pwrite_zeroes,
1684 .bdrv_truncate = bdrv_qed_truncate,
1685 .bdrv_getlength = bdrv_qed_getlength,
1686 .bdrv_get_info = bdrv_qed_get_info,
1687 .bdrv_refresh_limits = bdrv_qed_refresh_limits,
1688 .bdrv_change_backing_file = bdrv_qed_change_backing_file,
1689 .bdrv_co_invalidate_cache = bdrv_qed_co_invalidate_cache,
1690 .bdrv_co_check = bdrv_qed_co_check,
1691 .bdrv_detach_aio_context = bdrv_qed_detach_aio_context,
1692 .bdrv_attach_aio_context = bdrv_qed_attach_aio_context,
1693 .bdrv_co_drain_begin = bdrv_qed_co_drain_begin,
1696 static void bdrv_qed_init(void)
1698 bdrv_register(&bdrv_qed);
1701 block_init(bdrv_qed_init);