Revert "hw/loongarch/virt: Add cfi01 pflash device"
[qemu/ar7.git] / block / qed.c
blob2f36ad342c425b69f28b594fa3b57f60a6441c96
1 /*
2 * QEMU Enhanced Disk Format
4 * Copyright IBM, Corp. 2010
6 * Authors:
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "block/qdict.h"
17 #include "qapi/error.h"
18 #include "qemu/timer.h"
19 #include "qemu/bswap.h"
20 #include "qemu/main-loop.h"
21 #include "qemu/module.h"
22 #include "qemu/option.h"
23 #include "qemu/memalign.h"
24 #include "trace.h"
25 #include "qed.h"
26 #include "sysemu/block-backend.h"
27 #include "qapi/qmp/qdict.h"
28 #include "qapi/qobject-input-visitor.h"
29 #include "qapi/qapi-visit-block-core.h"
31 static QemuOptsList qed_create_opts;
33 static int bdrv_qed_probe(const uint8_t *buf, int buf_size,
34 const char *filename)
36 const QEDHeader *header = (const QEDHeader *)buf;
38 if (buf_size < sizeof(*header)) {
39 return 0;
41 if (le32_to_cpu(header->magic) != QED_MAGIC) {
42 return 0;
44 return 100;
47 /**
48 * Check whether an image format is raw
50 * @fmt: Backing file format, may be NULL
52 static bool qed_fmt_is_raw(const char *fmt)
54 return fmt && strcmp(fmt, "raw") == 0;
57 static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu)
59 cpu->magic = le32_to_cpu(le->magic);
60 cpu->cluster_size = le32_to_cpu(le->cluster_size);
61 cpu->table_size = le32_to_cpu(le->table_size);
62 cpu->header_size = le32_to_cpu(le->header_size);
63 cpu->features = le64_to_cpu(le->features);
64 cpu->compat_features = le64_to_cpu(le->compat_features);
65 cpu->autoclear_features = le64_to_cpu(le->autoclear_features);
66 cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset);
67 cpu->image_size = le64_to_cpu(le->image_size);
68 cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset);
69 cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size);
72 static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le)
74 le->magic = cpu_to_le32(cpu->magic);
75 le->cluster_size = cpu_to_le32(cpu->cluster_size);
76 le->table_size = cpu_to_le32(cpu->table_size);
77 le->header_size = cpu_to_le32(cpu->header_size);
78 le->features = cpu_to_le64(cpu->features);
79 le->compat_features = cpu_to_le64(cpu->compat_features);
80 le->autoclear_features = cpu_to_le64(cpu->autoclear_features);
81 le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset);
82 le->image_size = cpu_to_le64(cpu->image_size);
83 le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset);
84 le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size);
87 int qed_write_header_sync(BDRVQEDState *s)
89 QEDHeader le;
91 qed_header_cpu_to_le(&s->header, &le);
92 return bdrv_pwrite(s->bs->file, 0, sizeof(le), &le, 0);
95 /**
96 * Update header in-place (does not rewrite backing filename or other strings)
98 * This function only updates known header fields in-place and does not affect
99 * extra data after the QED header.
101 * No new allocating reqs can start while this function runs.
103 static int coroutine_fn qed_write_header(BDRVQEDState *s)
105 /* We must write full sectors for O_DIRECT but cannot necessarily generate
106 * the data following the header if an unrecognized compat feature is
107 * active. Therefore, first read the sectors containing the header, update
108 * them, and write back.
111 int nsectors = DIV_ROUND_UP(sizeof(QEDHeader), BDRV_SECTOR_SIZE);
112 size_t len = nsectors * BDRV_SECTOR_SIZE;
113 uint8_t *buf;
114 int ret;
116 assert(s->allocating_acb || s->allocating_write_reqs_plugged);
118 buf = qemu_blockalign(s->bs, len);
120 ret = bdrv_co_pread(s->bs->file, 0, len, buf, 0);
121 if (ret < 0) {
122 goto out;
125 /* Update header */
126 qed_header_cpu_to_le(&s->header, (QEDHeader *) buf);
128 ret = bdrv_co_pwrite(s->bs->file, 0, len, buf, 0);
129 if (ret < 0) {
130 goto out;
133 ret = 0;
134 out:
135 qemu_vfree(buf);
136 return ret;
139 static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size)
141 uint64_t table_entries;
142 uint64_t l2_size;
144 table_entries = (table_size * cluster_size) / sizeof(uint64_t);
145 l2_size = table_entries * cluster_size;
147 return l2_size * table_entries;
150 static bool qed_is_cluster_size_valid(uint32_t cluster_size)
152 if (cluster_size < QED_MIN_CLUSTER_SIZE ||
153 cluster_size > QED_MAX_CLUSTER_SIZE) {
154 return false;
156 if (cluster_size & (cluster_size - 1)) {
157 return false; /* not power of 2 */
159 return true;
162 static bool qed_is_table_size_valid(uint32_t table_size)
164 if (table_size < QED_MIN_TABLE_SIZE ||
165 table_size > QED_MAX_TABLE_SIZE) {
166 return false;
168 if (table_size & (table_size - 1)) {
169 return false; /* not power of 2 */
171 return true;
174 static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size,
175 uint32_t table_size)
177 if (image_size % BDRV_SECTOR_SIZE != 0) {
178 return false; /* not multiple of sector size */
180 if (image_size > qed_max_image_size(cluster_size, table_size)) {
181 return false; /* image is too large */
183 return true;
187 * Read a string of known length from the image file
189 * @file: Image file
190 * @offset: File offset to start of string, in bytes
191 * @n: String length in bytes
192 * @buf: Destination buffer
193 * @buflen: Destination buffer length in bytes
194 * @ret: 0 on success, -errno on failure
196 * The string is NUL-terminated.
198 static int qed_read_string(BdrvChild *file, uint64_t offset, size_t n,
199 char *buf, size_t buflen)
201 int ret;
202 if (n >= buflen) {
203 return -EINVAL;
205 ret = bdrv_pread(file, offset, n, buf, 0);
206 if (ret < 0) {
207 return ret;
209 buf[n] = '\0';
210 return 0;
214 * Allocate new clusters
216 * @s: QED state
217 * @n: Number of contiguous clusters to allocate
218 * @ret: Offset of first allocated cluster
220 * This function only produces the offset where the new clusters should be
221 * written. It updates BDRVQEDState but does not make any changes to the image
222 * file.
224 * Called with table_lock held.
226 static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n)
228 uint64_t offset = s->file_size;
229 s->file_size += n * s->header.cluster_size;
230 return offset;
233 QEDTable *qed_alloc_table(BDRVQEDState *s)
235 /* Honor O_DIRECT memory alignment requirements */
236 return qemu_blockalign(s->bs,
237 s->header.cluster_size * s->header.table_size);
241 * Allocate a new zeroed L2 table
243 * Called with table_lock held.
245 static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
247 CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
249 l2_table->table = qed_alloc_table(s);
250 l2_table->offset = qed_alloc_clusters(s, s->header.table_size);
252 memset(l2_table->table->offsets, 0,
253 s->header.cluster_size * s->header.table_size);
254 return l2_table;
257 static bool coroutine_fn qed_plug_allocating_write_reqs(BDRVQEDState *s)
259 qemu_co_mutex_lock(&s->table_lock);
261 /* No reentrancy is allowed. */
262 assert(!s->allocating_write_reqs_plugged);
263 if (s->allocating_acb != NULL) {
264 /* Another allocating write came concurrently. This cannot happen
265 * from bdrv_qed_co_drain_begin, but it can happen when the timer runs.
267 qemu_co_mutex_unlock(&s->table_lock);
268 return false;
271 s->allocating_write_reqs_plugged = true;
272 qemu_co_mutex_unlock(&s->table_lock);
273 return true;
276 static void coroutine_fn qed_unplug_allocating_write_reqs(BDRVQEDState *s)
278 qemu_co_mutex_lock(&s->table_lock);
279 assert(s->allocating_write_reqs_plugged);
280 s->allocating_write_reqs_plugged = false;
281 qemu_co_queue_next(&s->allocating_write_reqs);
282 qemu_co_mutex_unlock(&s->table_lock);
285 static void coroutine_fn qed_need_check_timer_entry(void *opaque)
287 BDRVQEDState *s = opaque;
288 int ret;
290 trace_qed_need_check_timer_cb(s);
292 if (!qed_plug_allocating_write_reqs(s)) {
293 return;
296 /* Ensure writes are on disk before clearing flag */
297 ret = bdrv_co_flush(s->bs->file->bs);
298 if (ret < 0) {
299 qed_unplug_allocating_write_reqs(s);
300 return;
303 s->header.features &= ~QED_F_NEED_CHECK;
304 ret = qed_write_header(s);
305 (void) ret;
307 qed_unplug_allocating_write_reqs(s);
309 ret = bdrv_co_flush(s->bs);
310 (void) ret;
313 static void qed_need_check_timer_cb(void *opaque)
315 Coroutine *co = qemu_coroutine_create(qed_need_check_timer_entry, opaque);
316 qemu_coroutine_enter(co);
319 static void qed_start_need_check_timer(BDRVQEDState *s)
321 trace_qed_start_need_check_timer(s);
323 /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for
324 * migration.
326 timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
327 NANOSECONDS_PER_SECOND * QED_NEED_CHECK_TIMEOUT);
330 /* It's okay to call this multiple times or when no timer is started */
331 static void qed_cancel_need_check_timer(BDRVQEDState *s)
333 trace_qed_cancel_need_check_timer(s);
334 timer_del(s->need_check_timer);
337 static void bdrv_qed_detach_aio_context(BlockDriverState *bs)
339 BDRVQEDState *s = bs->opaque;
341 qed_cancel_need_check_timer(s);
342 timer_free(s->need_check_timer);
345 static void bdrv_qed_attach_aio_context(BlockDriverState *bs,
346 AioContext *new_context)
348 BDRVQEDState *s = bs->opaque;
350 s->need_check_timer = aio_timer_new(new_context,
351 QEMU_CLOCK_VIRTUAL, SCALE_NS,
352 qed_need_check_timer_cb, s);
353 if (s->header.features & QED_F_NEED_CHECK) {
354 qed_start_need_check_timer(s);
358 static void coroutine_fn bdrv_qed_co_drain_begin(BlockDriverState *bs)
360 BDRVQEDState *s = bs->opaque;
362 /* Fire the timer immediately in order to start doing I/O as soon as the
363 * header is flushed.
365 if (s->need_check_timer && timer_pending(s->need_check_timer)) {
366 qed_cancel_need_check_timer(s);
367 qed_need_check_timer_entry(s);
371 static void bdrv_qed_init_state(BlockDriverState *bs)
373 BDRVQEDState *s = bs->opaque;
375 memset(s, 0, sizeof(BDRVQEDState));
376 s->bs = bs;
377 qemu_co_mutex_init(&s->table_lock);
378 qemu_co_queue_init(&s->allocating_write_reqs);
381 /* Called with table_lock held. */
382 static int coroutine_fn bdrv_qed_do_open(BlockDriverState *bs, QDict *options,
383 int flags, Error **errp)
385 BDRVQEDState *s = bs->opaque;
386 QEDHeader le_header;
387 int64_t file_size;
388 int ret;
390 ret = bdrv_co_pread(bs->file, 0, sizeof(le_header), &le_header, 0);
391 if (ret < 0) {
392 error_setg(errp, "Failed to read QED header");
393 return ret;
395 qed_header_le_to_cpu(&le_header, &s->header);
397 if (s->header.magic != QED_MAGIC) {
398 error_setg(errp, "Image not in QED format");
399 return -EINVAL;
401 if (s->header.features & ~QED_FEATURE_MASK) {
402 /* image uses unsupported feature bits */
403 error_setg(errp, "Unsupported QED features: %" PRIx64,
404 s->header.features & ~QED_FEATURE_MASK);
405 return -ENOTSUP;
407 if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
408 error_setg(errp, "QED cluster size is invalid");
409 return -EINVAL;
412 /* Round down file size to the last cluster */
413 file_size = bdrv_getlength(bs->file->bs);
414 if (file_size < 0) {
415 error_setg(errp, "Failed to get file length");
416 return file_size;
418 s->file_size = qed_start_of_cluster(s, file_size);
420 if (!qed_is_table_size_valid(s->header.table_size)) {
421 error_setg(errp, "QED table size is invalid");
422 return -EINVAL;
424 if (!qed_is_image_size_valid(s->header.image_size,
425 s->header.cluster_size,
426 s->header.table_size)) {
427 error_setg(errp, "QED image size is invalid");
428 return -EINVAL;
430 if (!qed_check_table_offset(s, s->header.l1_table_offset)) {
431 error_setg(errp, "QED table offset is invalid");
432 return -EINVAL;
435 s->table_nelems = (s->header.cluster_size * s->header.table_size) /
436 sizeof(uint64_t);
437 s->l2_shift = ctz32(s->header.cluster_size);
438 s->l2_mask = s->table_nelems - 1;
439 s->l1_shift = s->l2_shift + ctz32(s->table_nelems);
441 /* Header size calculation must not overflow uint32_t */
442 if (s->header.header_size > UINT32_MAX / s->header.cluster_size) {
443 error_setg(errp, "QED header size is too large");
444 return -EINVAL;
447 if ((s->header.features & QED_F_BACKING_FILE)) {
448 g_autofree char *backing_file_str = NULL;
450 if ((uint64_t)s->header.backing_filename_offset +
451 s->header.backing_filename_size >
452 s->header.cluster_size * s->header.header_size) {
453 error_setg(errp, "QED backing filename offset is invalid");
454 return -EINVAL;
457 backing_file_str = g_malloc(sizeof(bs->backing_file));
458 ret = qed_read_string(bs->file, s->header.backing_filename_offset,
459 s->header.backing_filename_size,
460 backing_file_str, sizeof(bs->backing_file));
461 if (ret < 0) {
462 error_setg(errp, "Failed to read backing filename");
463 return ret;
466 if (!g_str_equal(backing_file_str, bs->backing_file)) {
467 pstrcpy(bs->backing_file, sizeof(bs->backing_file),
468 backing_file_str);
469 pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file),
470 backing_file_str);
473 if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
474 pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw");
478 /* Reset unknown autoclear feature bits. This is a backwards
479 * compatibility mechanism that allows images to be opened by older
480 * programs, which "knock out" unknown feature bits. When an image is
481 * opened by a newer program again it can detect that the autoclear
482 * feature is no longer valid.
484 if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 &&
485 !bdrv_is_read_only(bs->file->bs) && !(flags & BDRV_O_INACTIVE)) {
486 s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK;
488 ret = qed_write_header_sync(s);
489 if (ret) {
490 error_setg(errp, "Failed to update header");
491 return ret;
494 /* From here on only known autoclear feature bits are valid */
495 bdrv_co_flush(bs->file->bs);
498 s->l1_table = qed_alloc_table(s);
499 qed_init_l2_cache(&s->l2_cache);
501 ret = qed_read_l1_table_sync(s);
502 if (ret) {
503 error_setg(errp, "Failed to read L1 table");
504 goto out;
507 /* If image was not closed cleanly, check consistency */
508 if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) {
509 /* Read-only images cannot be fixed. There is no risk of corruption
510 * since write operations are not possible. Therefore, allow
511 * potentially inconsistent images to be opened read-only. This can
512 * aid data recovery from an otherwise inconsistent image.
514 if (!bdrv_is_read_only(bs->file->bs) &&
515 !(flags & BDRV_O_INACTIVE)) {
516 BdrvCheckResult result = {0};
518 ret = qed_check(s, &result, true);
519 if (ret) {
520 error_setg(errp, "Image corrupted");
521 goto out;
526 bdrv_qed_attach_aio_context(bs, bdrv_get_aio_context(bs));
528 out:
529 if (ret) {
530 qed_free_l2_cache(&s->l2_cache);
531 qemu_vfree(s->l1_table);
533 return ret;
536 typedef struct QEDOpenCo {
537 BlockDriverState *bs;
538 QDict *options;
539 int flags;
540 Error **errp;
541 int ret;
542 } QEDOpenCo;
544 static void coroutine_fn bdrv_qed_open_entry(void *opaque)
546 QEDOpenCo *qoc = opaque;
547 BDRVQEDState *s = qoc->bs->opaque;
549 qemu_co_mutex_lock(&s->table_lock);
550 qoc->ret = bdrv_qed_do_open(qoc->bs, qoc->options, qoc->flags, qoc->errp);
551 qemu_co_mutex_unlock(&s->table_lock);
554 static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags,
555 Error **errp)
557 QEDOpenCo qoc = {
558 .bs = bs,
559 .options = options,
560 .flags = flags,
561 .errp = errp,
562 .ret = -EINPROGRESS
564 int ret;
566 ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
567 if (ret < 0) {
568 return ret;
571 bdrv_qed_init_state(bs);
572 if (qemu_in_coroutine()) {
573 bdrv_qed_open_entry(&qoc);
574 } else {
575 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
576 qemu_coroutine_enter(qemu_coroutine_create(bdrv_qed_open_entry, &qoc));
577 BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS);
579 BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS);
580 return qoc.ret;
583 static void bdrv_qed_refresh_limits(BlockDriverState *bs, Error **errp)
585 BDRVQEDState *s = bs->opaque;
587 bs->bl.pwrite_zeroes_alignment = s->header.cluster_size;
588 bs->bl.max_pwrite_zeroes = QEMU_ALIGN_DOWN(INT_MAX, s->header.cluster_size);
591 /* We have nothing to do for QED reopen, stubs just return
592 * success */
593 static int bdrv_qed_reopen_prepare(BDRVReopenState *state,
594 BlockReopenQueue *queue, Error **errp)
596 return 0;
599 static void bdrv_qed_close(BlockDriverState *bs)
601 BDRVQEDState *s = bs->opaque;
603 bdrv_qed_detach_aio_context(bs);
605 /* Ensure writes reach stable storage */
606 bdrv_flush(bs->file->bs);
608 /* Clean shutdown, no check required on next open */
609 if (s->header.features & QED_F_NEED_CHECK) {
610 s->header.features &= ~QED_F_NEED_CHECK;
611 qed_write_header_sync(s);
614 qed_free_l2_cache(&s->l2_cache);
615 qemu_vfree(s->l1_table);
618 static int coroutine_fn bdrv_qed_co_create(BlockdevCreateOptions *opts,
619 Error **errp)
621 BlockdevCreateOptionsQed *qed_opts;
622 BlockBackend *blk = NULL;
623 BlockDriverState *bs = NULL;
625 QEDHeader header;
626 QEDHeader le_header;
627 uint8_t *l1_table = NULL;
628 size_t l1_size;
629 int ret = 0;
631 assert(opts->driver == BLOCKDEV_DRIVER_QED);
632 qed_opts = &opts->u.qed;
634 /* Validate options and set default values */
635 if (!qed_opts->has_cluster_size) {
636 qed_opts->cluster_size = QED_DEFAULT_CLUSTER_SIZE;
638 if (!qed_opts->has_table_size) {
639 qed_opts->table_size = QED_DEFAULT_TABLE_SIZE;
642 if (!qed_is_cluster_size_valid(qed_opts->cluster_size)) {
643 error_setg(errp, "QED cluster size must be within range [%u, %u] "
644 "and power of 2",
645 QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
646 return -EINVAL;
648 if (!qed_is_table_size_valid(qed_opts->table_size)) {
649 error_setg(errp, "QED table size must be within range [%u, %u] "
650 "and power of 2",
651 QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
652 return -EINVAL;
654 if (!qed_is_image_size_valid(qed_opts->size, qed_opts->cluster_size,
655 qed_opts->table_size))
657 error_setg(errp, "QED image size must be a non-zero multiple of "
658 "cluster size and less than %" PRIu64 " bytes",
659 qed_max_image_size(qed_opts->cluster_size,
660 qed_opts->table_size));
661 return -EINVAL;
664 /* Create BlockBackend to write to the image */
665 bs = bdrv_open_blockdev_ref(qed_opts->file, errp);
666 if (bs == NULL) {
667 return -EIO;
670 blk = blk_new_with_bs(bs, BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL,
671 errp);
672 if (!blk) {
673 ret = -EPERM;
674 goto out;
676 blk_set_allow_write_beyond_eof(blk, true);
678 /* Prepare image format */
679 header = (QEDHeader) {
680 .magic = QED_MAGIC,
681 .cluster_size = qed_opts->cluster_size,
682 .table_size = qed_opts->table_size,
683 .header_size = 1,
684 .features = 0,
685 .compat_features = 0,
686 .l1_table_offset = qed_opts->cluster_size,
687 .image_size = qed_opts->size,
690 l1_size = header.cluster_size * header.table_size;
693 * The QED format associates file length with allocation status,
694 * so a new file (which is empty) must have a length of 0.
696 ret = blk_co_truncate(blk, 0, true, PREALLOC_MODE_OFF, 0, errp);
697 if (ret < 0) {
698 goto out;
701 if (qed_opts->has_backing_file) {
702 header.features |= QED_F_BACKING_FILE;
703 header.backing_filename_offset = sizeof(le_header);
704 header.backing_filename_size = strlen(qed_opts->backing_file);
706 if (qed_opts->has_backing_fmt) {
707 const char *backing_fmt = BlockdevDriver_str(qed_opts->backing_fmt);
708 if (qed_fmt_is_raw(backing_fmt)) {
709 header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
714 qed_header_cpu_to_le(&header, &le_header);
715 ret = blk_co_pwrite(blk, 0, sizeof(le_header), &le_header, 0);
716 if (ret < 0) {
717 goto out;
719 ret = blk_co_pwrite(blk, sizeof(le_header), header.backing_filename_size,
720 qed_opts->backing_file, 0);
721 if (ret < 0) {
722 goto out;
725 l1_table = g_malloc0(l1_size);
726 ret = blk_co_pwrite(blk, header.l1_table_offset, l1_size, l1_table, 0);
727 if (ret < 0) {
728 goto out;
731 ret = 0; /* success */
732 out:
733 g_free(l1_table);
734 blk_unref(blk);
735 bdrv_unref(bs);
736 return ret;
739 static int coroutine_fn bdrv_qed_co_create_opts(BlockDriver *drv,
740 const char *filename,
741 QemuOpts *opts,
742 Error **errp)
744 BlockdevCreateOptions *create_options = NULL;
745 QDict *qdict;
746 Visitor *v;
747 BlockDriverState *bs = NULL;
748 int ret;
750 static const QDictRenames opt_renames[] = {
751 { BLOCK_OPT_BACKING_FILE, "backing-file" },
752 { BLOCK_OPT_BACKING_FMT, "backing-fmt" },
753 { BLOCK_OPT_CLUSTER_SIZE, "cluster-size" },
754 { BLOCK_OPT_TABLE_SIZE, "table-size" },
755 { NULL, NULL },
758 /* Parse options and convert legacy syntax */
759 qdict = qemu_opts_to_qdict_filtered(opts, NULL, &qed_create_opts, true);
761 if (!qdict_rename_keys(qdict, opt_renames, errp)) {
762 ret = -EINVAL;
763 goto fail;
766 /* Create and open the file (protocol layer) */
767 ret = bdrv_create_file(filename, opts, errp);
768 if (ret < 0) {
769 goto fail;
772 bs = bdrv_open(filename, NULL, NULL,
773 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp);
774 if (bs == NULL) {
775 ret = -EIO;
776 goto fail;
779 /* Now get the QAPI type BlockdevCreateOptions */
780 qdict_put_str(qdict, "driver", "qed");
781 qdict_put_str(qdict, "file", bs->node_name);
783 v = qobject_input_visitor_new_flat_confused(qdict, errp);
784 if (!v) {
785 ret = -EINVAL;
786 goto fail;
789 visit_type_BlockdevCreateOptions(v, NULL, &create_options, errp);
790 visit_free(v);
791 if (!create_options) {
792 ret = -EINVAL;
793 goto fail;
796 /* Silently round up size */
797 assert(create_options->driver == BLOCKDEV_DRIVER_QED);
798 create_options->u.qed.size =
799 ROUND_UP(create_options->u.qed.size, BDRV_SECTOR_SIZE);
801 /* Create the qed image (format layer) */
802 ret = bdrv_qed_co_create(create_options, errp);
804 fail:
805 qobject_unref(qdict);
806 bdrv_unref(bs);
807 qapi_free_BlockdevCreateOptions(create_options);
808 return ret;
811 static int coroutine_fn bdrv_qed_co_block_status(BlockDriverState *bs,
812 bool want_zero,
813 int64_t pos, int64_t bytes,
814 int64_t *pnum, int64_t *map,
815 BlockDriverState **file)
817 BDRVQEDState *s = bs->opaque;
818 size_t len = MIN(bytes, SIZE_MAX);
819 int status;
820 QEDRequest request = { .l2_table = NULL };
821 uint64_t offset;
822 int ret;
824 qemu_co_mutex_lock(&s->table_lock);
825 ret = qed_find_cluster(s, &request, pos, &len, &offset);
827 *pnum = len;
828 switch (ret) {
829 case QED_CLUSTER_FOUND:
830 *map = offset | qed_offset_into_cluster(s, pos);
831 status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
832 *file = bs->file->bs;
833 break;
834 case QED_CLUSTER_ZERO:
835 status = BDRV_BLOCK_ZERO;
836 break;
837 case QED_CLUSTER_L2:
838 case QED_CLUSTER_L1:
839 status = 0;
840 break;
841 default:
842 assert(ret < 0);
843 status = ret;
844 break;
847 qed_unref_l2_cache_entry(request.l2_table);
848 qemu_co_mutex_unlock(&s->table_lock);
850 return status;
853 static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
855 return acb->bs->opaque;
859 * Read from the backing file or zero-fill if no backing file
861 * @s: QED state
862 * @pos: Byte position in device
863 * @qiov: Destination I/O vector
865 * This function reads qiov->size bytes starting at pos from the backing file.
866 * If there is no backing file then zeroes are read.
868 static int coroutine_fn qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
869 QEMUIOVector *qiov)
871 if (s->bs->backing) {
872 BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
873 return bdrv_co_preadv(s->bs->backing, pos, qiov->size, qiov, 0);
875 qemu_iovec_memset(qiov, 0, 0, qiov->size);
876 return 0;
880 * Copy data from backing file into the image
882 * @s: QED state
883 * @pos: Byte position in device
884 * @len: Number of bytes
885 * @offset: Byte offset in image file
887 static int coroutine_fn qed_copy_from_backing_file(BDRVQEDState *s,
888 uint64_t pos, uint64_t len,
889 uint64_t offset)
891 QEMUIOVector qiov;
892 int ret;
894 /* Skip copy entirely if there is no work to do */
895 if (len == 0) {
896 return 0;
899 qemu_iovec_init_buf(&qiov, qemu_blockalign(s->bs, len), len);
901 ret = qed_read_backing_file(s, pos, &qiov);
903 if (ret) {
904 goto out;
907 BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE);
908 ret = bdrv_co_pwritev(s->bs->file, offset, qiov.size, &qiov, 0);
909 if (ret < 0) {
910 goto out;
912 ret = 0;
913 out:
914 qemu_vfree(qemu_iovec_buf(&qiov));
915 return ret;
919 * Link one or more contiguous clusters into a table
921 * @s: QED state
922 * @table: L2 table
923 * @index: First cluster index
924 * @n: Number of contiguous clusters
925 * @cluster: First cluster offset
927 * The cluster offset may be an allocated byte offset in the image file, the
928 * zero cluster marker, or the unallocated cluster marker.
930 * Called with table_lock held.
932 static void coroutine_fn qed_update_l2_table(BDRVQEDState *s, QEDTable *table,
933 int index, unsigned int n,
934 uint64_t cluster)
936 int i;
937 for (i = index; i < index + n; i++) {
938 table->offsets[i] = cluster;
939 if (!qed_offset_is_unalloc_cluster(cluster) &&
940 !qed_offset_is_zero_cluster(cluster)) {
941 cluster += s->header.cluster_size;
946 /* Called with table_lock held. */
947 static void coroutine_fn qed_aio_complete(QEDAIOCB *acb)
949 BDRVQEDState *s = acb_to_s(acb);
951 /* Free resources */
952 qemu_iovec_destroy(&acb->cur_qiov);
953 qed_unref_l2_cache_entry(acb->request.l2_table);
955 /* Free the buffer we may have allocated for zero writes */
956 if (acb->flags & QED_AIOCB_ZERO) {
957 qemu_vfree(acb->qiov->iov[0].iov_base);
958 acb->qiov->iov[0].iov_base = NULL;
961 /* Start next allocating write request waiting behind this one. Note that
962 * requests enqueue themselves when they first hit an unallocated cluster
963 * but they wait until the entire request is finished before waking up the
964 * next request in the queue. This ensures that we don't cycle through
965 * requests multiple times but rather finish one at a time completely.
967 if (acb == s->allocating_acb) {
968 s->allocating_acb = NULL;
969 if (!qemu_co_queue_empty(&s->allocating_write_reqs)) {
970 qemu_co_queue_next(&s->allocating_write_reqs);
971 } else if (s->header.features & QED_F_NEED_CHECK) {
972 qed_start_need_check_timer(s);
978 * Update L1 table with new L2 table offset and write it out
980 * Called with table_lock held.
982 static int coroutine_fn qed_aio_write_l1_update(QEDAIOCB *acb)
984 BDRVQEDState *s = acb_to_s(acb);
985 CachedL2Table *l2_table = acb->request.l2_table;
986 uint64_t l2_offset = l2_table->offset;
987 int index, ret;
989 index = qed_l1_index(s, acb->cur_pos);
990 s->l1_table->offsets[index] = l2_table->offset;
992 ret = qed_write_l1_table(s, index, 1);
994 /* Commit the current L2 table to the cache */
995 qed_commit_l2_cache_entry(&s->l2_cache, l2_table);
997 /* This is guaranteed to succeed because we just committed the entry to the
998 * cache.
1000 acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
1001 assert(acb->request.l2_table != NULL);
1003 return ret;
1008 * Update L2 table with new cluster offsets and write them out
1010 * Called with table_lock held.
1012 static int coroutine_fn qed_aio_write_l2_update(QEDAIOCB *acb, uint64_t offset)
1014 BDRVQEDState *s = acb_to_s(acb);
1015 bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1;
1016 int index, ret;
1018 if (need_alloc) {
1019 qed_unref_l2_cache_entry(acb->request.l2_table);
1020 acb->request.l2_table = qed_new_l2_table(s);
1023 index = qed_l2_index(s, acb->cur_pos);
1024 qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters,
1025 offset);
1027 if (need_alloc) {
1028 /* Write out the whole new L2 table */
1029 ret = qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true);
1030 if (ret) {
1031 return ret;
1033 return qed_aio_write_l1_update(acb);
1034 } else {
1035 /* Write out only the updated part of the L2 table */
1036 ret = qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters,
1037 false);
1038 if (ret) {
1039 return ret;
1042 return 0;
1046 * Write data to the image file
1048 * Called with table_lock *not* held.
1050 static int coroutine_fn qed_aio_write_main(QEDAIOCB *acb)
1052 BDRVQEDState *s = acb_to_s(acb);
1053 uint64_t offset = acb->cur_cluster +
1054 qed_offset_into_cluster(s, acb->cur_pos);
1056 trace_qed_aio_write_main(s, acb, 0, offset, acb->cur_qiov.size);
1058 BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
1059 return bdrv_co_pwritev(s->bs->file, offset, acb->cur_qiov.size,
1060 &acb->cur_qiov, 0);
1064 * Populate untouched regions of new data cluster
1066 * Called with table_lock held.
1068 static int coroutine_fn qed_aio_write_cow(QEDAIOCB *acb)
1070 BDRVQEDState *s = acb_to_s(acb);
1071 uint64_t start, len, offset;
1072 int ret;
1074 qemu_co_mutex_unlock(&s->table_lock);
1076 /* Populate front untouched region of new data cluster */
1077 start = qed_start_of_cluster(s, acb->cur_pos);
1078 len = qed_offset_into_cluster(s, acb->cur_pos);
1080 trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster);
1081 ret = qed_copy_from_backing_file(s, start, len, acb->cur_cluster);
1082 if (ret < 0) {
1083 goto out;
1086 /* Populate back untouched region of new data cluster */
1087 start = acb->cur_pos + acb->cur_qiov.size;
1088 len = qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start;
1089 offset = acb->cur_cluster +
1090 qed_offset_into_cluster(s, acb->cur_pos) +
1091 acb->cur_qiov.size;
1093 trace_qed_aio_write_postfill(s, acb, start, len, offset);
1094 ret = qed_copy_from_backing_file(s, start, len, offset);
1095 if (ret < 0) {
1096 goto out;
1099 ret = qed_aio_write_main(acb);
1100 if (ret < 0) {
1101 goto out;
1104 if (s->bs->backing) {
1106 * Flush new data clusters before updating the L2 table
1108 * This flush is necessary when a backing file is in use. A crash
1109 * during an allocating write could result in empty clusters in the
1110 * image. If the write only touched a subregion of the cluster,
1111 * then backing image sectors have been lost in the untouched
1112 * region. The solution is to flush after writing a new data
1113 * cluster and before updating the L2 table.
1115 ret = bdrv_co_flush(s->bs->file->bs);
1118 out:
1119 qemu_co_mutex_lock(&s->table_lock);
1120 return ret;
1124 * Check if the QED_F_NEED_CHECK bit should be set during allocating write
1126 static bool qed_should_set_need_check(BDRVQEDState *s)
1128 /* The flush before L2 update path ensures consistency */
1129 if (s->bs->backing) {
1130 return false;
1133 return !(s->header.features & QED_F_NEED_CHECK);
1137 * Write new data cluster
1139 * @acb: Write request
1140 * @len: Length in bytes
1142 * This path is taken when writing to previously unallocated clusters.
1144 * Called with table_lock held.
1146 static int coroutine_fn qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
1148 BDRVQEDState *s = acb_to_s(acb);
1149 int ret;
1151 /* Cancel timer when the first allocating request comes in */
1152 if (s->allocating_acb == NULL) {
1153 qed_cancel_need_check_timer(s);
1156 /* Freeze this request if another allocating write is in progress */
1157 if (s->allocating_acb != acb || s->allocating_write_reqs_plugged) {
1158 if (s->allocating_acb != NULL) {
1159 qemu_co_queue_wait(&s->allocating_write_reqs, &s->table_lock);
1160 assert(s->allocating_acb == NULL);
1162 s->allocating_acb = acb;
1163 return -EAGAIN; /* start over with looking up table entries */
1166 acb->cur_nclusters = qed_bytes_to_clusters(s,
1167 qed_offset_into_cluster(s, acb->cur_pos) + len);
1168 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1170 if (acb->flags & QED_AIOCB_ZERO) {
1171 /* Skip ahead if the clusters are already zero */
1172 if (acb->find_cluster_ret == QED_CLUSTER_ZERO) {
1173 return 0;
1175 acb->cur_cluster = 1;
1176 } else {
1177 acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters);
1180 if (qed_should_set_need_check(s)) {
1181 s->header.features |= QED_F_NEED_CHECK;
1182 ret = qed_write_header(s);
1183 if (ret < 0) {
1184 return ret;
1188 if (!(acb->flags & QED_AIOCB_ZERO)) {
1189 ret = qed_aio_write_cow(acb);
1190 if (ret < 0) {
1191 return ret;
1195 return qed_aio_write_l2_update(acb, acb->cur_cluster);
1199 * Write data cluster in place
1201 * @acb: Write request
1202 * @offset: Cluster offset in bytes
1203 * @len: Length in bytes
1205 * This path is taken when writing to already allocated clusters.
1207 * Called with table_lock held.
1209 static int coroutine_fn qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset,
1210 size_t len)
1212 BDRVQEDState *s = acb_to_s(acb);
1213 int r;
1215 qemu_co_mutex_unlock(&s->table_lock);
1217 /* Allocate buffer for zero writes */
1218 if (acb->flags & QED_AIOCB_ZERO) {
1219 struct iovec *iov = acb->qiov->iov;
1221 if (!iov->iov_base) {
1222 iov->iov_base = qemu_try_blockalign(acb->bs, iov->iov_len);
1223 if (iov->iov_base == NULL) {
1224 r = -ENOMEM;
1225 goto out;
1227 memset(iov->iov_base, 0, iov->iov_len);
1231 /* Calculate the I/O vector */
1232 acb->cur_cluster = offset;
1233 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1235 /* Do the actual write. */
1236 r = qed_aio_write_main(acb);
1237 out:
1238 qemu_co_mutex_lock(&s->table_lock);
1239 return r;
1243 * Write data cluster
1245 * @opaque: Write request
1246 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1
1247 * @offset: Cluster offset in bytes
1248 * @len: Length in bytes
1250 * Called with table_lock held.
1252 static int coroutine_fn qed_aio_write_data(void *opaque, int ret,
1253 uint64_t offset, size_t len)
1255 QEDAIOCB *acb = opaque;
1257 trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len);
1259 acb->find_cluster_ret = ret;
1261 switch (ret) {
1262 case QED_CLUSTER_FOUND:
1263 return qed_aio_write_inplace(acb, offset, len);
1265 case QED_CLUSTER_L2:
1266 case QED_CLUSTER_L1:
1267 case QED_CLUSTER_ZERO:
1268 return qed_aio_write_alloc(acb, len);
1270 default:
1271 g_assert_not_reached();
1276 * Read data cluster
1278 * @opaque: Read request
1279 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1
1280 * @offset: Cluster offset in bytes
1281 * @len: Length in bytes
1283 * Called with table_lock held.
1285 static int coroutine_fn qed_aio_read_data(void *opaque, int ret,
1286 uint64_t offset, size_t len)
1288 QEDAIOCB *acb = opaque;
1289 BDRVQEDState *s = acb_to_s(acb);
1290 BlockDriverState *bs = acb->bs;
1291 int r;
1293 qemu_co_mutex_unlock(&s->table_lock);
1295 /* Adjust offset into cluster */
1296 offset += qed_offset_into_cluster(s, acb->cur_pos);
1298 trace_qed_aio_read_data(s, acb, ret, offset, len);
1300 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1302 /* Handle zero cluster and backing file reads, otherwise read
1303 * data cluster directly.
1305 if (ret == QED_CLUSTER_ZERO) {
1306 qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size);
1307 r = 0;
1308 } else if (ret != QED_CLUSTER_FOUND) {
1309 r = qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov);
1310 } else {
1311 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
1312 r = bdrv_co_preadv(bs->file, offset, acb->cur_qiov.size,
1313 &acb->cur_qiov, 0);
1316 qemu_co_mutex_lock(&s->table_lock);
1317 return r;
1321 * Begin next I/O or complete the request
1323 static int coroutine_fn qed_aio_next_io(QEDAIOCB *acb)
1325 BDRVQEDState *s = acb_to_s(acb);
1326 uint64_t offset;
1327 size_t len;
1328 int ret;
1330 qemu_co_mutex_lock(&s->table_lock);
1331 while (1) {
1332 trace_qed_aio_next_io(s, acb, 0, acb->cur_pos + acb->cur_qiov.size);
1334 acb->qiov_offset += acb->cur_qiov.size;
1335 acb->cur_pos += acb->cur_qiov.size;
1336 qemu_iovec_reset(&acb->cur_qiov);
1338 /* Complete request */
1339 if (acb->cur_pos >= acb->end_pos) {
1340 ret = 0;
1341 break;
1344 /* Find next cluster and start I/O */
1345 len = acb->end_pos - acb->cur_pos;
1346 ret = qed_find_cluster(s, &acb->request, acb->cur_pos, &len, &offset);
1347 if (ret < 0) {
1348 break;
1351 if (acb->flags & QED_AIOCB_WRITE) {
1352 ret = qed_aio_write_data(acb, ret, offset, len);
1353 } else {
1354 ret = qed_aio_read_data(acb, ret, offset, len);
1357 if (ret < 0 && ret != -EAGAIN) {
1358 break;
1362 trace_qed_aio_complete(s, acb, ret);
1363 qed_aio_complete(acb);
1364 qemu_co_mutex_unlock(&s->table_lock);
1365 return ret;
1368 static int coroutine_fn qed_co_request(BlockDriverState *bs, int64_t sector_num,
1369 QEMUIOVector *qiov, int nb_sectors,
1370 int flags)
1372 QEDAIOCB acb = {
1373 .bs = bs,
1374 .cur_pos = (uint64_t) sector_num * BDRV_SECTOR_SIZE,
1375 .end_pos = (sector_num + nb_sectors) * BDRV_SECTOR_SIZE,
1376 .qiov = qiov,
1377 .flags = flags,
1379 qemu_iovec_init(&acb.cur_qiov, qiov->niov);
1381 trace_qed_aio_setup(bs->opaque, &acb, sector_num, nb_sectors, NULL, flags);
1383 /* Start request */
1384 return qed_aio_next_io(&acb);
1387 static int coroutine_fn bdrv_qed_co_readv(BlockDriverState *bs,
1388 int64_t sector_num, int nb_sectors,
1389 QEMUIOVector *qiov)
1391 return qed_co_request(bs, sector_num, qiov, nb_sectors, 0);
1394 static int coroutine_fn bdrv_qed_co_writev(BlockDriverState *bs,
1395 int64_t sector_num, int nb_sectors,
1396 QEMUIOVector *qiov, int flags)
1398 return qed_co_request(bs, sector_num, qiov, nb_sectors, QED_AIOCB_WRITE);
1401 static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs,
1402 int64_t offset,
1403 int64_t bytes,
1404 BdrvRequestFlags flags)
1406 BDRVQEDState *s = bs->opaque;
1409 * Zero writes start without an I/O buffer. If a buffer becomes necessary
1410 * then it will be allocated during request processing.
1412 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, bytes);
1415 * QED is not prepared for 63bit write-zero requests, so rely on
1416 * max_pwrite_zeroes.
1418 assert(bytes <= INT_MAX);
1420 /* Fall back if the request is not aligned */
1421 if (qed_offset_into_cluster(s, offset) ||
1422 qed_offset_into_cluster(s, bytes)) {
1423 return -ENOTSUP;
1426 return qed_co_request(bs, offset >> BDRV_SECTOR_BITS, &qiov,
1427 bytes >> BDRV_SECTOR_BITS,
1428 QED_AIOCB_WRITE | QED_AIOCB_ZERO);
1431 static int coroutine_fn bdrv_qed_co_truncate(BlockDriverState *bs,
1432 int64_t offset,
1433 bool exact,
1434 PreallocMode prealloc,
1435 BdrvRequestFlags flags,
1436 Error **errp)
1438 BDRVQEDState *s = bs->opaque;
1439 uint64_t old_image_size;
1440 int ret;
1442 if (prealloc != PREALLOC_MODE_OFF) {
1443 error_setg(errp, "Unsupported preallocation mode '%s'",
1444 PreallocMode_str(prealloc));
1445 return -ENOTSUP;
1448 if (!qed_is_image_size_valid(offset, s->header.cluster_size,
1449 s->header.table_size)) {
1450 error_setg(errp, "Invalid image size specified");
1451 return -EINVAL;
1454 if ((uint64_t)offset < s->header.image_size) {
1455 error_setg(errp, "Shrinking images is currently not supported");
1456 return -ENOTSUP;
1459 old_image_size = s->header.image_size;
1460 s->header.image_size = offset;
1461 ret = qed_write_header_sync(s);
1462 if (ret < 0) {
1463 s->header.image_size = old_image_size;
1464 error_setg_errno(errp, -ret, "Failed to update the image size");
1466 return ret;
1469 static int64_t bdrv_qed_getlength(BlockDriverState *bs)
1471 BDRVQEDState *s = bs->opaque;
1472 return s->header.image_size;
1475 static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1477 BDRVQEDState *s = bs->opaque;
1479 memset(bdi, 0, sizeof(*bdi));
1480 bdi->cluster_size = s->header.cluster_size;
1481 bdi->is_dirty = s->header.features & QED_F_NEED_CHECK;
1482 return 0;
1485 static int bdrv_qed_change_backing_file(BlockDriverState *bs,
1486 const char *backing_file,
1487 const char *backing_fmt)
1489 BDRVQEDState *s = bs->opaque;
1490 QEDHeader new_header, le_header;
1491 void *buffer;
1492 size_t buffer_len, backing_file_len;
1493 int ret;
1495 /* Refuse to set backing filename if unknown compat feature bits are
1496 * active. If the image uses an unknown compat feature then we may not
1497 * know the layout of data following the header structure and cannot safely
1498 * add a new string.
1500 if (backing_file && (s->header.compat_features &
1501 ~QED_COMPAT_FEATURE_MASK)) {
1502 return -ENOTSUP;
1505 memcpy(&new_header, &s->header, sizeof(new_header));
1507 new_header.features &= ~(QED_F_BACKING_FILE |
1508 QED_F_BACKING_FORMAT_NO_PROBE);
1510 /* Adjust feature flags */
1511 if (backing_file) {
1512 new_header.features |= QED_F_BACKING_FILE;
1514 if (qed_fmt_is_raw(backing_fmt)) {
1515 new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
1519 /* Calculate new header size */
1520 backing_file_len = 0;
1522 if (backing_file) {
1523 backing_file_len = strlen(backing_file);
1526 buffer_len = sizeof(new_header);
1527 new_header.backing_filename_offset = buffer_len;
1528 new_header.backing_filename_size = backing_file_len;
1529 buffer_len += backing_file_len;
1531 /* Make sure we can rewrite header without failing */
1532 if (buffer_len > new_header.header_size * new_header.cluster_size) {
1533 return -ENOSPC;
1536 /* Prepare new header */
1537 buffer = g_malloc(buffer_len);
1539 qed_header_cpu_to_le(&new_header, &le_header);
1540 memcpy(buffer, &le_header, sizeof(le_header));
1541 buffer_len = sizeof(le_header);
1543 if (backing_file) {
1544 memcpy(buffer + buffer_len, backing_file, backing_file_len);
1545 buffer_len += backing_file_len;
1548 /* Write new header */
1549 ret = bdrv_pwrite_sync(bs->file, 0, buffer_len, buffer, 0);
1550 g_free(buffer);
1551 if (ret == 0) {
1552 memcpy(&s->header, &new_header, sizeof(new_header));
1554 return ret;
1557 static void coroutine_fn bdrv_qed_co_invalidate_cache(BlockDriverState *bs,
1558 Error **errp)
1560 BDRVQEDState *s = bs->opaque;
1561 int ret;
1563 bdrv_qed_close(bs);
1565 bdrv_qed_init_state(bs);
1566 qemu_co_mutex_lock(&s->table_lock);
1567 ret = bdrv_qed_do_open(bs, NULL, bs->open_flags, errp);
1568 qemu_co_mutex_unlock(&s->table_lock);
1569 if (ret < 0) {
1570 error_prepend(errp, "Could not reopen qed layer: ");
1574 static int coroutine_fn bdrv_qed_co_check(BlockDriverState *bs,
1575 BdrvCheckResult *result,
1576 BdrvCheckMode fix)
1578 BDRVQEDState *s = bs->opaque;
1579 int ret;
1581 qemu_co_mutex_lock(&s->table_lock);
1582 ret = qed_check(s, result, !!fix);
1583 qemu_co_mutex_unlock(&s->table_lock);
1585 return ret;
1588 static QemuOptsList qed_create_opts = {
1589 .name = "qed-create-opts",
1590 .head = QTAILQ_HEAD_INITIALIZER(qed_create_opts.head),
1591 .desc = {
1593 .name = BLOCK_OPT_SIZE,
1594 .type = QEMU_OPT_SIZE,
1595 .help = "Virtual disk size"
1598 .name = BLOCK_OPT_BACKING_FILE,
1599 .type = QEMU_OPT_STRING,
1600 .help = "File name of a base image"
1603 .name = BLOCK_OPT_BACKING_FMT,
1604 .type = QEMU_OPT_STRING,
1605 .help = "Image format of the base image"
1608 .name = BLOCK_OPT_CLUSTER_SIZE,
1609 .type = QEMU_OPT_SIZE,
1610 .help = "Cluster size (in bytes)",
1611 .def_value_str = stringify(QED_DEFAULT_CLUSTER_SIZE)
1614 .name = BLOCK_OPT_TABLE_SIZE,
1615 .type = QEMU_OPT_SIZE,
1616 .help = "L1/L2 table size (in clusters)"
1618 { /* end of list */ }
1622 static BlockDriver bdrv_qed = {
1623 .format_name = "qed",
1624 .instance_size = sizeof(BDRVQEDState),
1625 .create_opts = &qed_create_opts,
1626 .is_format = true,
1627 .supports_backing = true,
1629 .bdrv_probe = bdrv_qed_probe,
1630 .bdrv_open = bdrv_qed_open,
1631 .bdrv_close = bdrv_qed_close,
1632 .bdrv_reopen_prepare = bdrv_qed_reopen_prepare,
1633 .bdrv_child_perm = bdrv_default_perms,
1634 .bdrv_co_create = bdrv_qed_co_create,
1635 .bdrv_co_create_opts = bdrv_qed_co_create_opts,
1636 .bdrv_has_zero_init = bdrv_has_zero_init_1,
1637 .bdrv_co_block_status = bdrv_qed_co_block_status,
1638 .bdrv_co_readv = bdrv_qed_co_readv,
1639 .bdrv_co_writev = bdrv_qed_co_writev,
1640 .bdrv_co_pwrite_zeroes = bdrv_qed_co_pwrite_zeroes,
1641 .bdrv_co_truncate = bdrv_qed_co_truncate,
1642 .bdrv_getlength = bdrv_qed_getlength,
1643 .bdrv_get_info = bdrv_qed_get_info,
1644 .bdrv_refresh_limits = bdrv_qed_refresh_limits,
1645 .bdrv_change_backing_file = bdrv_qed_change_backing_file,
1646 .bdrv_co_invalidate_cache = bdrv_qed_co_invalidate_cache,
1647 .bdrv_co_check = bdrv_qed_co_check,
1648 .bdrv_detach_aio_context = bdrv_qed_detach_aio_context,
1649 .bdrv_attach_aio_context = bdrv_qed_attach_aio_context,
1650 .bdrv_co_drain_begin = bdrv_qed_co_drain_begin,
1653 static void bdrv_qed_init(void)
1655 bdrv_register(&bdrv_qed);
1658 block_init(bdrv_qed_init);