Merge tag 'for-upstream' of https://repo.or.cz/qemu/kevin into staging
[qemu/ar7.git] / block / io.c
blob34b30e304ebebe10c62e18e7a4d805a8ac18c338
1 /*
2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include "trace.h"
27 #include "sysemu/block-backend.h"
28 #include "block/aio-wait.h"
29 #include "block/blockjob.h"
30 #include "block/blockjob_int.h"
31 #include "block/block_int.h"
32 #include "block/coroutines.h"
33 #include "block/write-threshold.h"
34 #include "qemu/cutils.h"
35 #include "qemu/memalign.h"
36 #include "qapi/error.h"
37 #include "qemu/error-report.h"
38 #include "qemu/main-loop.h"
39 #include "sysemu/replay.h"
41 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
42 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
44 static void bdrv_parent_cb_resize(BlockDriverState *bs);
45 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
46 int64_t offset, int64_t bytes, BdrvRequestFlags flags);
48 static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore,
49 bool ignore_bds_parents)
51 BdrvChild *c, *next;
53 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
54 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
55 continue;
57 bdrv_parent_drained_begin_single(c, false);
61 static void bdrv_parent_drained_end_single_no_poll(BdrvChild *c,
62 int *drained_end_counter)
64 assert(c->parent_quiesce_counter > 0);
65 c->parent_quiesce_counter--;
66 if (c->klass->drained_end) {
67 c->klass->drained_end(c, drained_end_counter);
71 void bdrv_parent_drained_end_single(BdrvChild *c)
73 int drained_end_counter = 0;
74 IO_OR_GS_CODE();
75 bdrv_parent_drained_end_single_no_poll(c, &drained_end_counter);
76 BDRV_POLL_WHILE(c->bs, qatomic_read(&drained_end_counter) > 0);
79 static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore,
80 bool ignore_bds_parents,
81 int *drained_end_counter)
83 BdrvChild *c;
85 QLIST_FOREACH(c, &bs->parents, next_parent) {
86 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
87 continue;
89 bdrv_parent_drained_end_single_no_poll(c, drained_end_counter);
93 static bool bdrv_parent_drained_poll_single(BdrvChild *c)
95 if (c->klass->drained_poll) {
96 return c->klass->drained_poll(c);
98 return false;
101 static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
102 bool ignore_bds_parents)
104 BdrvChild *c, *next;
105 bool busy = false;
107 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
108 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
109 continue;
111 busy |= bdrv_parent_drained_poll_single(c);
114 return busy;
117 void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll)
119 IO_OR_GS_CODE();
120 c->parent_quiesce_counter++;
121 if (c->klass->drained_begin) {
122 c->klass->drained_begin(c);
124 if (poll) {
125 BDRV_POLL_WHILE(c->bs, bdrv_parent_drained_poll_single(c));
129 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
131 dst->pdiscard_alignment = MAX(dst->pdiscard_alignment,
132 src->pdiscard_alignment);
133 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
134 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
135 dst->max_hw_transfer = MIN_NON_ZERO(dst->max_hw_transfer,
136 src->max_hw_transfer);
137 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
138 src->opt_mem_alignment);
139 dst->min_mem_alignment = MAX(dst->min_mem_alignment,
140 src->min_mem_alignment);
141 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
142 dst->max_hw_iov = MIN_NON_ZERO(dst->max_hw_iov, src->max_hw_iov);
145 typedef struct BdrvRefreshLimitsState {
146 BlockDriverState *bs;
147 BlockLimits old_bl;
148 } BdrvRefreshLimitsState;
150 static void bdrv_refresh_limits_abort(void *opaque)
152 BdrvRefreshLimitsState *s = opaque;
154 s->bs->bl = s->old_bl;
157 static TransactionActionDrv bdrv_refresh_limits_drv = {
158 .abort = bdrv_refresh_limits_abort,
159 .clean = g_free,
162 /* @tran is allowed to be NULL, in this case no rollback is possible. */
163 void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp)
165 ERRP_GUARD();
166 BlockDriver *drv = bs->drv;
167 BdrvChild *c;
168 bool have_limits;
170 GLOBAL_STATE_CODE();
172 if (tran) {
173 BdrvRefreshLimitsState *s = g_new(BdrvRefreshLimitsState, 1);
174 *s = (BdrvRefreshLimitsState) {
175 .bs = bs,
176 .old_bl = bs->bl,
178 tran_add(tran, &bdrv_refresh_limits_drv, s);
181 memset(&bs->bl, 0, sizeof(bs->bl));
183 if (!drv) {
184 return;
187 /* Default alignment based on whether driver has byte interface */
188 bs->bl.request_alignment = (drv->bdrv_co_preadv ||
189 drv->bdrv_aio_preadv ||
190 drv->bdrv_co_preadv_part) ? 1 : 512;
192 /* Take some limits from the children as a default */
193 have_limits = false;
194 QLIST_FOREACH(c, &bs->children, next) {
195 if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW))
197 bdrv_merge_limits(&bs->bl, &c->bs->bl);
198 have_limits = true;
202 if (!have_limits) {
203 bs->bl.min_mem_alignment = 512;
204 bs->bl.opt_mem_alignment = qemu_real_host_page_size();
206 /* Safe default since most protocols use readv()/writev()/etc */
207 bs->bl.max_iov = IOV_MAX;
210 /* Then let the driver override it */
211 if (drv->bdrv_refresh_limits) {
212 drv->bdrv_refresh_limits(bs, errp);
213 if (*errp) {
214 return;
218 if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) {
219 error_setg(errp, "Driver requires too large request alignment");
224 * The copy-on-read flag is actually a reference count so multiple users may
225 * use the feature without worrying about clobbering its previous state.
226 * Copy-on-read stays enabled until all users have called to disable it.
228 void bdrv_enable_copy_on_read(BlockDriverState *bs)
230 IO_CODE();
231 qatomic_inc(&bs->copy_on_read);
234 void bdrv_disable_copy_on_read(BlockDriverState *bs)
236 int old = qatomic_fetch_dec(&bs->copy_on_read);
237 IO_CODE();
238 assert(old >= 1);
241 typedef struct {
242 Coroutine *co;
243 BlockDriverState *bs;
244 bool done;
245 bool begin;
246 bool recursive;
247 bool poll;
248 BdrvChild *parent;
249 bool ignore_bds_parents;
250 int *drained_end_counter;
251 } BdrvCoDrainData;
253 static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
255 BdrvCoDrainData *data = opaque;
256 BlockDriverState *bs = data->bs;
258 if (data->begin) {
259 bs->drv->bdrv_co_drain_begin(bs);
260 } else {
261 bs->drv->bdrv_co_drain_end(bs);
264 /* Set data->done and decrement drained_end_counter before bdrv_wakeup() */
265 qatomic_mb_set(&data->done, true);
266 if (!data->begin) {
267 qatomic_dec(data->drained_end_counter);
269 bdrv_dec_in_flight(bs);
271 g_free(data);
274 /* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */
275 static void bdrv_drain_invoke(BlockDriverState *bs, bool begin,
276 int *drained_end_counter)
278 BdrvCoDrainData *data;
280 if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) ||
281 (!begin && !bs->drv->bdrv_co_drain_end)) {
282 return;
285 data = g_new(BdrvCoDrainData, 1);
286 *data = (BdrvCoDrainData) {
287 .bs = bs,
288 .done = false,
289 .begin = begin,
290 .drained_end_counter = drained_end_counter,
293 if (!begin) {
294 qatomic_inc(drained_end_counter);
297 /* Make sure the driver callback completes during the polling phase for
298 * drain_begin. */
299 bdrv_inc_in_flight(bs);
300 data->co = qemu_coroutine_create(bdrv_drain_invoke_entry, data);
301 aio_co_schedule(bdrv_get_aio_context(bs), data->co);
304 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
305 bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
306 BdrvChild *ignore_parent, bool ignore_bds_parents)
308 BdrvChild *child, *next;
309 IO_OR_GS_CODE();
311 if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
312 return true;
315 if (qatomic_read(&bs->in_flight)) {
316 return true;
319 if (recursive) {
320 assert(!ignore_bds_parents);
321 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
322 if (bdrv_drain_poll(child->bs, recursive, child, false)) {
323 return true;
328 return false;
331 static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive,
332 BdrvChild *ignore_parent)
334 return bdrv_drain_poll(bs, recursive, ignore_parent, false);
337 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
338 BdrvChild *parent, bool ignore_bds_parents,
339 bool poll);
340 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
341 BdrvChild *parent, bool ignore_bds_parents,
342 int *drained_end_counter);
344 static void bdrv_co_drain_bh_cb(void *opaque)
346 BdrvCoDrainData *data = opaque;
347 Coroutine *co = data->co;
348 BlockDriverState *bs = data->bs;
350 if (bs) {
351 AioContext *ctx = bdrv_get_aio_context(bs);
352 aio_context_acquire(ctx);
353 bdrv_dec_in_flight(bs);
354 if (data->begin) {
355 assert(!data->drained_end_counter);
356 bdrv_do_drained_begin(bs, data->recursive, data->parent,
357 data->ignore_bds_parents, data->poll);
358 } else {
359 assert(!data->poll);
360 bdrv_do_drained_end(bs, data->recursive, data->parent,
361 data->ignore_bds_parents,
362 data->drained_end_counter);
364 aio_context_release(ctx);
365 } else {
366 assert(data->begin);
367 bdrv_drain_all_begin();
370 data->done = true;
371 aio_co_wake(co);
374 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
375 bool begin, bool recursive,
376 BdrvChild *parent,
377 bool ignore_bds_parents,
378 bool poll,
379 int *drained_end_counter)
381 BdrvCoDrainData data;
382 Coroutine *self = qemu_coroutine_self();
383 AioContext *ctx = bdrv_get_aio_context(bs);
384 AioContext *co_ctx = qemu_coroutine_get_aio_context(self);
386 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
387 * other coroutines run if they were queued by aio_co_enter(). */
389 assert(qemu_in_coroutine());
390 data = (BdrvCoDrainData) {
391 .co = self,
392 .bs = bs,
393 .done = false,
394 .begin = begin,
395 .recursive = recursive,
396 .parent = parent,
397 .ignore_bds_parents = ignore_bds_parents,
398 .poll = poll,
399 .drained_end_counter = drained_end_counter,
402 if (bs) {
403 bdrv_inc_in_flight(bs);
407 * Temporarily drop the lock across yield or we would get deadlocks.
408 * bdrv_co_drain_bh_cb() reaquires the lock as needed.
410 * When we yield below, the lock for the current context will be
411 * released, so if this is actually the lock that protects bs, don't drop
412 * it a second time.
414 if (ctx != co_ctx) {
415 aio_context_release(ctx);
417 replay_bh_schedule_oneshot_event(ctx, bdrv_co_drain_bh_cb, &data);
419 qemu_coroutine_yield();
420 /* If we are resumed from some other event (such as an aio completion or a
421 * timer callback), it is a bug in the caller that should be fixed. */
422 assert(data.done);
424 /* Reaquire the AioContext of bs if we dropped it */
425 if (ctx != co_ctx) {
426 aio_context_acquire(ctx);
430 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
431 BdrvChild *parent, bool ignore_bds_parents)
433 IO_OR_GS_CODE();
434 assert(!qemu_in_coroutine());
436 /* Stop things in parent-to-child order */
437 if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) {
438 aio_disable_external(bdrv_get_aio_context(bs));
441 bdrv_parent_drained_begin(bs, parent, ignore_bds_parents);
442 bdrv_drain_invoke(bs, true, NULL);
445 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
446 BdrvChild *parent, bool ignore_bds_parents,
447 bool poll)
449 BdrvChild *child, *next;
451 if (qemu_in_coroutine()) {
452 bdrv_co_yield_to_drain(bs, true, recursive, parent, ignore_bds_parents,
453 poll, NULL);
454 return;
457 bdrv_do_drained_begin_quiesce(bs, parent, ignore_bds_parents);
459 if (recursive) {
460 assert(!ignore_bds_parents);
461 bs->recursive_quiesce_counter++;
462 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
463 bdrv_do_drained_begin(child->bs, true, child, ignore_bds_parents,
464 false);
469 * Wait for drained requests to finish.
471 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
472 * call is needed so things in this AioContext can make progress even
473 * though we don't return to the main AioContext loop - this automatically
474 * includes other nodes in the same AioContext and therefore all child
475 * nodes.
477 if (poll) {
478 assert(!ignore_bds_parents);
479 BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, recursive, parent));
483 void bdrv_drained_begin(BlockDriverState *bs)
485 IO_OR_GS_CODE();
486 bdrv_do_drained_begin(bs, false, NULL, false, true);
489 void bdrv_subtree_drained_begin(BlockDriverState *bs)
491 IO_OR_GS_CODE();
492 bdrv_do_drained_begin(bs, true, NULL, false, true);
496 * This function does not poll, nor must any of its recursively called
497 * functions. The *drained_end_counter pointee will be incremented
498 * once for every background operation scheduled, and decremented once
499 * the operation settles. Therefore, the pointer must remain valid
500 * until the pointee reaches 0. That implies that whoever sets up the
501 * pointee has to poll until it is 0.
503 * We use atomic operations to access *drained_end_counter, because
504 * (1) when called from bdrv_set_aio_context_ignore(), the subgraph of
505 * @bs may contain nodes in different AioContexts,
506 * (2) bdrv_drain_all_end() uses the same counter for all nodes,
507 * regardless of which AioContext they are in.
509 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
510 BdrvChild *parent, bool ignore_bds_parents,
511 int *drained_end_counter)
513 BdrvChild *child;
514 int old_quiesce_counter;
516 assert(drained_end_counter != NULL);
518 if (qemu_in_coroutine()) {
519 bdrv_co_yield_to_drain(bs, false, recursive, parent, ignore_bds_parents,
520 false, drained_end_counter);
521 return;
523 assert(bs->quiesce_counter > 0);
525 /* Re-enable things in child-to-parent order */
526 bdrv_drain_invoke(bs, false, drained_end_counter);
527 bdrv_parent_drained_end(bs, parent, ignore_bds_parents,
528 drained_end_counter);
530 old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter);
531 if (old_quiesce_counter == 1) {
532 aio_enable_external(bdrv_get_aio_context(bs));
535 if (recursive) {
536 assert(!ignore_bds_parents);
537 bs->recursive_quiesce_counter--;
538 QLIST_FOREACH(child, &bs->children, next) {
539 bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents,
540 drained_end_counter);
545 void bdrv_drained_end(BlockDriverState *bs)
547 int drained_end_counter = 0;
548 IO_OR_GS_CODE();
549 bdrv_do_drained_end(bs, false, NULL, false, &drained_end_counter);
550 BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
553 void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter)
555 IO_CODE();
556 bdrv_do_drained_end(bs, false, NULL, false, drained_end_counter);
559 void bdrv_subtree_drained_end(BlockDriverState *bs)
561 int drained_end_counter = 0;
562 IO_OR_GS_CODE();
563 bdrv_do_drained_end(bs, true, NULL, false, &drained_end_counter);
564 BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
567 void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent)
569 int i;
570 IO_OR_GS_CODE();
572 for (i = 0; i < new_parent->recursive_quiesce_counter; i++) {
573 bdrv_do_drained_begin(child->bs, true, child, false, true);
577 void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent)
579 int drained_end_counter = 0;
580 int i;
581 IO_OR_GS_CODE();
583 for (i = 0; i < old_parent->recursive_quiesce_counter; i++) {
584 bdrv_do_drained_end(child->bs, true, child, false,
585 &drained_end_counter);
588 BDRV_POLL_WHILE(child->bs, qatomic_read(&drained_end_counter) > 0);
591 void bdrv_drain(BlockDriverState *bs)
593 IO_OR_GS_CODE();
594 bdrv_drained_begin(bs);
595 bdrv_drained_end(bs);
598 static void bdrv_drain_assert_idle(BlockDriverState *bs)
600 BdrvChild *child, *next;
602 assert(qatomic_read(&bs->in_flight) == 0);
603 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
604 bdrv_drain_assert_idle(child->bs);
608 unsigned int bdrv_drain_all_count = 0;
610 static bool bdrv_drain_all_poll(void)
612 BlockDriverState *bs = NULL;
613 bool result = false;
614 GLOBAL_STATE_CODE();
616 /* bdrv_drain_poll() can't make changes to the graph and we are holding the
617 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */
618 while ((bs = bdrv_next_all_states(bs))) {
619 AioContext *aio_context = bdrv_get_aio_context(bs);
620 aio_context_acquire(aio_context);
621 result |= bdrv_drain_poll(bs, false, NULL, true);
622 aio_context_release(aio_context);
625 return result;
629 * Wait for pending requests to complete across all BlockDriverStates
631 * This function does not flush data to disk, use bdrv_flush_all() for that
632 * after calling this function.
634 * This pauses all block jobs and disables external clients. It must
635 * be paired with bdrv_drain_all_end().
637 * NOTE: no new block jobs or BlockDriverStates can be created between
638 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
640 void bdrv_drain_all_begin(void)
642 BlockDriverState *bs = NULL;
643 GLOBAL_STATE_CODE();
645 if (qemu_in_coroutine()) {
646 bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true, NULL);
647 return;
651 * bdrv queue is managed by record/replay,
652 * waiting for finishing the I/O requests may
653 * be infinite
655 if (replay_events_enabled()) {
656 return;
659 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
660 * loop AioContext, so make sure we're in the main context. */
661 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
662 assert(bdrv_drain_all_count < INT_MAX);
663 bdrv_drain_all_count++;
665 /* Quiesce all nodes, without polling in-flight requests yet. The graph
666 * cannot change during this loop. */
667 while ((bs = bdrv_next_all_states(bs))) {
668 AioContext *aio_context = bdrv_get_aio_context(bs);
670 aio_context_acquire(aio_context);
671 bdrv_do_drained_begin(bs, false, NULL, true, false);
672 aio_context_release(aio_context);
675 /* Now poll the in-flight requests */
676 AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll());
678 while ((bs = bdrv_next_all_states(bs))) {
679 bdrv_drain_assert_idle(bs);
683 void bdrv_drain_all_end_quiesce(BlockDriverState *bs)
685 int drained_end_counter = 0;
686 GLOBAL_STATE_CODE();
688 g_assert(bs->quiesce_counter > 0);
689 g_assert(!bs->refcnt);
691 while (bs->quiesce_counter) {
692 bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter);
694 BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
697 void bdrv_drain_all_end(void)
699 BlockDriverState *bs = NULL;
700 int drained_end_counter = 0;
701 GLOBAL_STATE_CODE();
704 * bdrv queue is managed by record/replay,
705 * waiting for finishing the I/O requests may
706 * be endless
708 if (replay_events_enabled()) {
709 return;
712 while ((bs = bdrv_next_all_states(bs))) {
713 AioContext *aio_context = bdrv_get_aio_context(bs);
715 aio_context_acquire(aio_context);
716 bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter);
717 aio_context_release(aio_context);
720 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
721 AIO_WAIT_WHILE(NULL, qatomic_read(&drained_end_counter) > 0);
723 assert(bdrv_drain_all_count > 0);
724 bdrv_drain_all_count--;
727 void bdrv_drain_all(void)
729 GLOBAL_STATE_CODE();
730 bdrv_drain_all_begin();
731 bdrv_drain_all_end();
735 * Remove an active request from the tracked requests list
737 * This function should be called when a tracked request is completing.
739 static void coroutine_fn tracked_request_end(BdrvTrackedRequest *req)
741 if (req->serialising) {
742 qatomic_dec(&req->bs->serialising_in_flight);
745 qemu_co_mutex_lock(&req->bs->reqs_lock);
746 QLIST_REMOVE(req, list);
747 qemu_co_queue_restart_all(&req->wait_queue);
748 qemu_co_mutex_unlock(&req->bs->reqs_lock);
752 * Add an active request to the tracked requests list
754 static void coroutine_fn tracked_request_begin(BdrvTrackedRequest *req,
755 BlockDriverState *bs,
756 int64_t offset,
757 int64_t bytes,
758 enum BdrvTrackedRequestType type)
760 bdrv_check_request(offset, bytes, &error_abort);
762 *req = (BdrvTrackedRequest){
763 .bs = bs,
764 .offset = offset,
765 .bytes = bytes,
766 .type = type,
767 .co = qemu_coroutine_self(),
768 .serialising = false,
769 .overlap_offset = offset,
770 .overlap_bytes = bytes,
773 qemu_co_queue_init(&req->wait_queue);
775 qemu_co_mutex_lock(&bs->reqs_lock);
776 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
777 qemu_co_mutex_unlock(&bs->reqs_lock);
780 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
781 int64_t offset, int64_t bytes)
783 bdrv_check_request(offset, bytes, &error_abort);
785 /* aaaa bbbb */
786 if (offset >= req->overlap_offset + req->overlap_bytes) {
787 return false;
789 /* bbbb aaaa */
790 if (req->overlap_offset >= offset + bytes) {
791 return false;
793 return true;
796 /* Called with self->bs->reqs_lock held */
797 static coroutine_fn BdrvTrackedRequest *
798 bdrv_find_conflicting_request(BdrvTrackedRequest *self)
800 BdrvTrackedRequest *req;
802 QLIST_FOREACH(req, &self->bs->tracked_requests, list) {
803 if (req == self || (!req->serialising && !self->serialising)) {
804 continue;
806 if (tracked_request_overlaps(req, self->overlap_offset,
807 self->overlap_bytes))
810 * Hitting this means there was a reentrant request, for
811 * example, a block driver issuing nested requests. This must
812 * never happen since it means deadlock.
814 assert(qemu_coroutine_self() != req->co);
817 * If the request is already (indirectly) waiting for us, or
818 * will wait for us as soon as it wakes up, then just go on
819 * (instead of producing a deadlock in the former case).
821 if (!req->waiting_for) {
822 return req;
827 return NULL;
830 /* Called with self->bs->reqs_lock held */
831 static void coroutine_fn
832 bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self)
834 BdrvTrackedRequest *req;
836 while ((req = bdrv_find_conflicting_request(self))) {
837 self->waiting_for = req;
838 qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock);
839 self->waiting_for = NULL;
843 /* Called with req->bs->reqs_lock held */
844 static void tracked_request_set_serialising(BdrvTrackedRequest *req,
845 uint64_t align)
847 int64_t overlap_offset = req->offset & ~(align - 1);
848 int64_t overlap_bytes =
849 ROUND_UP(req->offset + req->bytes, align) - overlap_offset;
851 bdrv_check_request(req->offset, req->bytes, &error_abort);
853 if (!req->serialising) {
854 qatomic_inc(&req->bs->serialising_in_flight);
855 req->serialising = true;
858 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
859 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
863 * Return the tracked request on @bs for the current coroutine, or
864 * NULL if there is none.
866 BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs)
868 BdrvTrackedRequest *req;
869 Coroutine *self = qemu_coroutine_self();
870 IO_CODE();
872 QLIST_FOREACH(req, &bs->tracked_requests, list) {
873 if (req->co == self) {
874 return req;
878 return NULL;
882 * Round a region to cluster boundaries
884 void bdrv_round_to_clusters(BlockDriverState *bs,
885 int64_t offset, int64_t bytes,
886 int64_t *cluster_offset,
887 int64_t *cluster_bytes)
889 BlockDriverInfo bdi;
890 IO_CODE();
891 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
892 *cluster_offset = offset;
893 *cluster_bytes = bytes;
894 } else {
895 int64_t c = bdi.cluster_size;
896 *cluster_offset = QEMU_ALIGN_DOWN(offset, c);
897 *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
901 static int bdrv_get_cluster_size(BlockDriverState *bs)
903 BlockDriverInfo bdi;
904 int ret;
906 ret = bdrv_get_info(bs, &bdi);
907 if (ret < 0 || bdi.cluster_size == 0) {
908 return bs->bl.request_alignment;
909 } else {
910 return bdi.cluster_size;
914 void bdrv_inc_in_flight(BlockDriverState *bs)
916 IO_CODE();
917 qatomic_inc(&bs->in_flight);
920 void bdrv_wakeup(BlockDriverState *bs)
922 IO_CODE();
923 aio_wait_kick();
926 void bdrv_dec_in_flight(BlockDriverState *bs)
928 IO_CODE();
929 qatomic_dec(&bs->in_flight);
930 bdrv_wakeup(bs);
933 static void coroutine_fn
934 bdrv_wait_serialising_requests(BdrvTrackedRequest *self)
936 BlockDriverState *bs = self->bs;
938 if (!qatomic_read(&bs->serialising_in_flight)) {
939 return;
942 qemu_co_mutex_lock(&bs->reqs_lock);
943 bdrv_wait_serialising_requests_locked(self);
944 qemu_co_mutex_unlock(&bs->reqs_lock);
947 void coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
948 uint64_t align)
950 IO_CODE();
952 qemu_co_mutex_lock(&req->bs->reqs_lock);
954 tracked_request_set_serialising(req, align);
955 bdrv_wait_serialising_requests_locked(req);
957 qemu_co_mutex_unlock(&req->bs->reqs_lock);
960 int bdrv_check_qiov_request(int64_t offset, int64_t bytes,
961 QEMUIOVector *qiov, size_t qiov_offset,
962 Error **errp)
965 * Check generic offset/bytes correctness
968 if (offset < 0) {
969 error_setg(errp, "offset is negative: %" PRIi64, offset);
970 return -EIO;
973 if (bytes < 0) {
974 error_setg(errp, "bytes is negative: %" PRIi64, bytes);
975 return -EIO;
978 if (bytes > BDRV_MAX_LENGTH) {
979 error_setg(errp, "bytes(%" PRIi64 ") exceeds maximum(%" PRIi64 ")",
980 bytes, BDRV_MAX_LENGTH);
981 return -EIO;
984 if (offset > BDRV_MAX_LENGTH) {
985 error_setg(errp, "offset(%" PRIi64 ") exceeds maximum(%" PRIi64 ")",
986 offset, BDRV_MAX_LENGTH);
987 return -EIO;
990 if (offset > BDRV_MAX_LENGTH - bytes) {
991 error_setg(errp, "sum of offset(%" PRIi64 ") and bytes(%" PRIi64 ") "
992 "exceeds maximum(%" PRIi64 ")", offset, bytes,
993 BDRV_MAX_LENGTH);
994 return -EIO;
997 if (!qiov) {
998 return 0;
1002 * Check qiov and qiov_offset
1005 if (qiov_offset > qiov->size) {
1006 error_setg(errp, "qiov_offset(%zu) overflow io vector size(%zu)",
1007 qiov_offset, qiov->size);
1008 return -EIO;
1011 if (bytes > qiov->size - qiov_offset) {
1012 error_setg(errp, "bytes(%" PRIi64 ") + qiov_offset(%zu) overflow io "
1013 "vector size(%zu)", bytes, qiov_offset, qiov->size);
1014 return -EIO;
1017 return 0;
1020 int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp)
1022 return bdrv_check_qiov_request(offset, bytes, NULL, 0, errp);
1025 static int bdrv_check_request32(int64_t offset, int64_t bytes,
1026 QEMUIOVector *qiov, size_t qiov_offset)
1028 int ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL);
1029 if (ret < 0) {
1030 return ret;
1033 if (bytes > BDRV_REQUEST_MAX_BYTES) {
1034 return -EIO;
1037 return 0;
1041 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
1042 * The operation is sped up by checking the block status and only writing
1043 * zeroes to the device if they currently do not return zeroes. Optional
1044 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
1045 * BDRV_REQ_FUA).
1047 * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite().
1049 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
1051 int ret;
1052 int64_t target_size, bytes, offset = 0;
1053 BlockDriverState *bs = child->bs;
1054 IO_CODE();
1056 target_size = bdrv_getlength(bs);
1057 if (target_size < 0) {
1058 return target_size;
1061 for (;;) {
1062 bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES);
1063 if (bytes <= 0) {
1064 return 0;
1066 ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL);
1067 if (ret < 0) {
1068 return ret;
1070 if (ret & BDRV_BLOCK_ZERO) {
1071 offset += bytes;
1072 continue;
1074 ret = bdrv_pwrite_zeroes(child, offset, bytes, flags);
1075 if (ret < 0) {
1076 return ret;
1078 offset += bytes;
1083 * Writes to the file and ensures that no writes are reordered across this
1084 * request (acts as a barrier)
1086 * Returns 0 on success, -errno in error cases.
1088 int coroutine_fn bdrv_co_pwrite_sync(BdrvChild *child, int64_t offset,
1089 int64_t bytes, const void *buf,
1090 BdrvRequestFlags flags)
1092 int ret;
1093 IO_CODE();
1095 ret = bdrv_co_pwrite(child, offset, bytes, buf, flags);
1096 if (ret < 0) {
1097 return ret;
1100 ret = bdrv_co_flush(child->bs);
1101 if (ret < 0) {
1102 return ret;
1105 return 0;
1108 typedef struct CoroutineIOCompletion {
1109 Coroutine *coroutine;
1110 int ret;
1111 } CoroutineIOCompletion;
1113 static void bdrv_co_io_em_complete(void *opaque, int ret)
1115 CoroutineIOCompletion *co = opaque;
1117 co->ret = ret;
1118 aio_co_wake(co->coroutine);
1121 static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
1122 int64_t offset, int64_t bytes,
1123 QEMUIOVector *qiov,
1124 size_t qiov_offset, int flags)
1126 BlockDriver *drv = bs->drv;
1127 int64_t sector_num;
1128 unsigned int nb_sectors;
1129 QEMUIOVector local_qiov;
1130 int ret;
1132 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1133 assert(!(flags & ~bs->supported_read_flags));
1135 if (!drv) {
1136 return -ENOMEDIUM;
1139 if (drv->bdrv_co_preadv_part) {
1140 return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset,
1141 flags);
1144 if (qiov_offset > 0 || bytes != qiov->size) {
1145 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1146 qiov = &local_qiov;
1149 if (drv->bdrv_co_preadv) {
1150 ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
1151 goto out;
1154 if (drv->bdrv_aio_preadv) {
1155 BlockAIOCB *acb;
1156 CoroutineIOCompletion co = {
1157 .coroutine = qemu_coroutine_self(),
1160 acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags,
1161 bdrv_co_io_em_complete, &co);
1162 if (acb == NULL) {
1163 ret = -EIO;
1164 goto out;
1165 } else {
1166 qemu_coroutine_yield();
1167 ret = co.ret;
1168 goto out;
1172 sector_num = offset >> BDRV_SECTOR_BITS;
1173 nb_sectors = bytes >> BDRV_SECTOR_BITS;
1175 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
1176 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
1177 assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1178 assert(drv->bdrv_co_readv);
1180 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
1182 out:
1183 if (qiov == &local_qiov) {
1184 qemu_iovec_destroy(&local_qiov);
1187 return ret;
1190 static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
1191 int64_t offset, int64_t bytes,
1192 QEMUIOVector *qiov,
1193 size_t qiov_offset,
1194 BdrvRequestFlags flags)
1196 BlockDriver *drv = bs->drv;
1197 bool emulate_fua = false;
1198 int64_t sector_num;
1199 unsigned int nb_sectors;
1200 QEMUIOVector local_qiov;
1201 int ret;
1203 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1205 if (!drv) {
1206 return -ENOMEDIUM;
1209 if ((flags & BDRV_REQ_FUA) &&
1210 (~bs->supported_write_flags & BDRV_REQ_FUA)) {
1211 flags &= ~BDRV_REQ_FUA;
1212 emulate_fua = true;
1215 flags &= bs->supported_write_flags;
1217 if (drv->bdrv_co_pwritev_part) {
1218 ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset,
1219 flags);
1220 goto emulate_flags;
1223 if (qiov_offset > 0 || bytes != qiov->size) {
1224 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1225 qiov = &local_qiov;
1228 if (drv->bdrv_co_pwritev) {
1229 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, flags);
1230 goto emulate_flags;
1233 if (drv->bdrv_aio_pwritev) {
1234 BlockAIOCB *acb;
1235 CoroutineIOCompletion co = {
1236 .coroutine = qemu_coroutine_self(),
1239 acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, flags,
1240 bdrv_co_io_em_complete, &co);
1241 if (acb == NULL) {
1242 ret = -EIO;
1243 } else {
1244 qemu_coroutine_yield();
1245 ret = co.ret;
1247 goto emulate_flags;
1250 sector_num = offset >> BDRV_SECTOR_BITS;
1251 nb_sectors = bytes >> BDRV_SECTOR_BITS;
1253 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
1254 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
1255 assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1257 assert(drv->bdrv_co_writev);
1258 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, flags);
1260 emulate_flags:
1261 if (ret == 0 && emulate_fua) {
1262 ret = bdrv_co_flush(bs);
1265 if (qiov == &local_qiov) {
1266 qemu_iovec_destroy(&local_qiov);
1269 return ret;
1272 static int coroutine_fn
1273 bdrv_driver_pwritev_compressed(BlockDriverState *bs, int64_t offset,
1274 int64_t bytes, QEMUIOVector *qiov,
1275 size_t qiov_offset)
1277 BlockDriver *drv = bs->drv;
1278 QEMUIOVector local_qiov;
1279 int ret;
1281 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1283 if (!drv) {
1284 return -ENOMEDIUM;
1287 if (!block_driver_can_compress(drv)) {
1288 return -ENOTSUP;
1291 if (drv->bdrv_co_pwritev_compressed_part) {
1292 return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes,
1293 qiov, qiov_offset);
1296 if (qiov_offset == 0) {
1297 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
1300 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1301 ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov);
1302 qemu_iovec_destroy(&local_qiov);
1304 return ret;
1307 static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child,
1308 int64_t offset, int64_t bytes, QEMUIOVector *qiov,
1309 size_t qiov_offset, int flags)
1311 BlockDriverState *bs = child->bs;
1313 /* Perform I/O through a temporary buffer so that users who scribble over
1314 * their read buffer while the operation is in progress do not end up
1315 * modifying the image file. This is critical for zero-copy guest I/O
1316 * where anything might happen inside guest memory.
1318 void *bounce_buffer = NULL;
1320 BlockDriver *drv = bs->drv;
1321 int64_t cluster_offset;
1322 int64_t cluster_bytes;
1323 int64_t skip_bytes;
1324 int ret;
1325 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1326 BDRV_REQUEST_MAX_BYTES);
1327 int64_t progress = 0;
1328 bool skip_write;
1330 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1332 if (!drv) {
1333 return -ENOMEDIUM;
1337 * Do not write anything when the BDS is inactive. That is not
1338 * allowed, and it would not help.
1340 skip_write = (bs->open_flags & BDRV_O_INACTIVE);
1342 /* FIXME We cannot require callers to have write permissions when all they
1343 * are doing is a read request. If we did things right, write permissions
1344 * would be obtained anyway, but internally by the copy-on-read code. As
1345 * long as it is implemented here rather than in a separate filter driver,
1346 * the copy-on-read code doesn't have its own BdrvChild, however, for which
1347 * it could request permissions. Therefore we have to bypass the permission
1348 * system for the moment. */
1349 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1351 /* Cover entire cluster so no additional backing file I/O is required when
1352 * allocating cluster in the image file. Note that this value may exceed
1353 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1354 * is one reason we loop rather than doing it all at once.
1356 bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
1357 skip_bytes = offset - cluster_offset;
1359 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
1360 cluster_offset, cluster_bytes);
1362 while (cluster_bytes) {
1363 int64_t pnum;
1365 if (skip_write) {
1366 ret = 1; /* "already allocated", so nothing will be copied */
1367 pnum = MIN(cluster_bytes, max_transfer);
1368 } else {
1369 ret = bdrv_is_allocated(bs, cluster_offset,
1370 MIN(cluster_bytes, max_transfer), &pnum);
1371 if (ret < 0) {
1373 * Safe to treat errors in querying allocation as if
1374 * unallocated; we'll probably fail again soon on the
1375 * read, but at least that will set a decent errno.
1377 pnum = MIN(cluster_bytes, max_transfer);
1380 /* Stop at EOF if the image ends in the middle of the cluster */
1381 if (ret == 0 && pnum == 0) {
1382 assert(progress >= bytes);
1383 break;
1386 assert(skip_bytes < pnum);
1389 if (ret <= 0) {
1390 QEMUIOVector local_qiov;
1392 /* Must copy-on-read; use the bounce buffer */
1393 pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
1394 if (!bounce_buffer) {
1395 int64_t max_we_need = MAX(pnum, cluster_bytes - pnum);
1396 int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER);
1397 int64_t bounce_buffer_len = MIN(max_we_need, max_allowed);
1399 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len);
1400 if (!bounce_buffer) {
1401 ret = -ENOMEM;
1402 goto err;
1405 qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum);
1407 ret = bdrv_driver_preadv(bs, cluster_offset, pnum,
1408 &local_qiov, 0, 0);
1409 if (ret < 0) {
1410 goto err;
1413 bdrv_debug_event(bs, BLKDBG_COR_WRITE);
1414 if (drv->bdrv_co_pwrite_zeroes &&
1415 buffer_is_zero(bounce_buffer, pnum)) {
1416 /* FIXME: Should we (perhaps conditionally) be setting
1417 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1418 * that still correctly reads as zero? */
1419 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum,
1420 BDRV_REQ_WRITE_UNCHANGED);
1421 } else {
1422 /* This does not change the data on the disk, it is not
1423 * necessary to flush even in cache=writethrough mode.
1425 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum,
1426 &local_qiov, 0,
1427 BDRV_REQ_WRITE_UNCHANGED);
1430 if (ret < 0) {
1431 /* It might be okay to ignore write errors for guest
1432 * requests. If this is a deliberate copy-on-read
1433 * then we don't want to ignore the error. Simply
1434 * report it in all cases.
1436 goto err;
1439 if (!(flags & BDRV_REQ_PREFETCH)) {
1440 qemu_iovec_from_buf(qiov, qiov_offset + progress,
1441 bounce_buffer + skip_bytes,
1442 MIN(pnum - skip_bytes, bytes - progress));
1444 } else if (!(flags & BDRV_REQ_PREFETCH)) {
1445 /* Read directly into the destination */
1446 ret = bdrv_driver_preadv(bs, offset + progress,
1447 MIN(pnum - skip_bytes, bytes - progress),
1448 qiov, qiov_offset + progress, 0);
1449 if (ret < 0) {
1450 goto err;
1454 cluster_offset += pnum;
1455 cluster_bytes -= pnum;
1456 progress += pnum - skip_bytes;
1457 skip_bytes = 0;
1459 ret = 0;
1461 err:
1462 qemu_vfree(bounce_buffer);
1463 return ret;
1467 * Forwards an already correctly aligned request to the BlockDriver. This
1468 * handles copy on read, zeroing after EOF, and fragmentation of large
1469 * reads; any other features must be implemented by the caller.
1471 static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
1472 BdrvTrackedRequest *req, int64_t offset, int64_t bytes,
1473 int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags)
1475 BlockDriverState *bs = child->bs;
1476 int64_t total_bytes, max_bytes;
1477 int ret = 0;
1478 int64_t bytes_remaining = bytes;
1479 int max_transfer;
1481 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1482 assert(is_power_of_2(align));
1483 assert((offset & (align - 1)) == 0);
1484 assert((bytes & (align - 1)) == 0);
1485 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1486 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1487 align);
1490 * TODO: We would need a per-BDS .supported_read_flags and
1491 * potential fallback support, if we ever implement any read flags
1492 * to pass through to drivers. For now, there aren't any
1493 * passthrough flags except the BDRV_REQ_REGISTERED_BUF optimization hint.
1495 assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH |
1496 BDRV_REQ_REGISTERED_BUF)));
1498 /* Handle Copy on Read and associated serialisation */
1499 if (flags & BDRV_REQ_COPY_ON_READ) {
1500 /* If we touch the same cluster it counts as an overlap. This
1501 * guarantees that allocating writes will be serialized and not race
1502 * with each other for the same cluster. For example, in copy-on-read
1503 * it ensures that the CoR read and write operations are atomic and
1504 * guest writes cannot interleave between them. */
1505 bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs));
1506 } else {
1507 bdrv_wait_serialising_requests(req);
1510 if (flags & BDRV_REQ_COPY_ON_READ) {
1511 int64_t pnum;
1513 /* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */
1514 flags &= ~BDRV_REQ_COPY_ON_READ;
1516 ret = bdrv_is_allocated(bs, offset, bytes, &pnum);
1517 if (ret < 0) {
1518 goto out;
1521 if (!ret || pnum != bytes) {
1522 ret = bdrv_co_do_copy_on_readv(child, offset, bytes,
1523 qiov, qiov_offset, flags);
1524 goto out;
1525 } else if (flags & BDRV_REQ_PREFETCH) {
1526 goto out;
1530 /* Forward the request to the BlockDriver, possibly fragmenting it */
1531 total_bytes = bdrv_getlength(bs);
1532 if (total_bytes < 0) {
1533 ret = total_bytes;
1534 goto out;
1537 assert(!(flags & ~(bs->supported_read_flags | BDRV_REQ_REGISTERED_BUF)));
1539 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
1540 if (bytes <= max_bytes && bytes <= max_transfer) {
1541 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, flags);
1542 goto out;
1545 while (bytes_remaining) {
1546 int64_t num;
1548 if (max_bytes) {
1549 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
1550 assert(num);
1552 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
1553 num, qiov,
1554 qiov_offset + bytes - bytes_remaining,
1555 flags);
1556 max_bytes -= num;
1557 } else {
1558 num = bytes_remaining;
1559 ret = qemu_iovec_memset(qiov, qiov_offset + bytes - bytes_remaining,
1560 0, bytes_remaining);
1562 if (ret < 0) {
1563 goto out;
1565 bytes_remaining -= num;
1568 out:
1569 return ret < 0 ? ret : 0;
1573 * Request padding
1575 * |<---- align ----->| |<----- align ---->|
1576 * |<- head ->|<------------- bytes ------------->|<-- tail -->|
1577 * | | | | | |
1578 * -*----------$-------*-------- ... --------*-----$------------*---
1579 * | | | | | |
1580 * | offset | | end |
1581 * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end)
1582 * [buf ... ) [tail_buf )
1584 * @buf is an aligned allocation needed to store @head and @tail paddings. @head
1585 * is placed at the beginning of @buf and @tail at the @end.
1587 * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk
1588 * around tail, if tail exists.
1590 * @merge_reads is true for small requests,
1591 * if @buf_len == @head + bytes + @tail. In this case it is possible that both
1592 * head and tail exist but @buf_len == align and @tail_buf == @buf.
1594 typedef struct BdrvRequestPadding {
1595 uint8_t *buf;
1596 size_t buf_len;
1597 uint8_t *tail_buf;
1598 size_t head;
1599 size_t tail;
1600 bool merge_reads;
1601 QEMUIOVector local_qiov;
1602 } BdrvRequestPadding;
1604 static bool bdrv_init_padding(BlockDriverState *bs,
1605 int64_t offset, int64_t bytes,
1606 BdrvRequestPadding *pad)
1608 int64_t align = bs->bl.request_alignment;
1609 int64_t sum;
1611 bdrv_check_request(offset, bytes, &error_abort);
1612 assert(align <= INT_MAX); /* documented in block/block_int.h */
1613 assert(align <= SIZE_MAX / 2); /* so we can allocate the buffer */
1615 memset(pad, 0, sizeof(*pad));
1617 pad->head = offset & (align - 1);
1618 pad->tail = ((offset + bytes) & (align - 1));
1619 if (pad->tail) {
1620 pad->tail = align - pad->tail;
1623 if (!pad->head && !pad->tail) {
1624 return false;
1627 assert(bytes); /* Nothing good in aligning zero-length requests */
1629 sum = pad->head + bytes + pad->tail;
1630 pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align;
1631 pad->buf = qemu_blockalign(bs, pad->buf_len);
1632 pad->merge_reads = sum == pad->buf_len;
1633 if (pad->tail) {
1634 pad->tail_buf = pad->buf + pad->buf_len - align;
1637 return true;
1640 static coroutine_fn int bdrv_padding_rmw_read(BdrvChild *child,
1641 BdrvTrackedRequest *req,
1642 BdrvRequestPadding *pad,
1643 bool zero_middle)
1645 QEMUIOVector local_qiov;
1646 BlockDriverState *bs = child->bs;
1647 uint64_t align = bs->bl.request_alignment;
1648 int ret;
1650 assert(req->serialising && pad->buf);
1652 if (pad->head || pad->merge_reads) {
1653 int64_t bytes = pad->merge_reads ? pad->buf_len : align;
1655 qemu_iovec_init_buf(&local_qiov, pad->buf, bytes);
1657 if (pad->head) {
1658 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1660 if (pad->merge_reads && pad->tail) {
1661 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1663 ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes,
1664 align, &local_qiov, 0, 0);
1665 if (ret < 0) {
1666 return ret;
1668 if (pad->head) {
1669 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1671 if (pad->merge_reads && pad->tail) {
1672 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1675 if (pad->merge_reads) {
1676 goto zero_mem;
1680 if (pad->tail) {
1681 qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align);
1683 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1684 ret = bdrv_aligned_preadv(
1685 child, req,
1686 req->overlap_offset + req->overlap_bytes - align,
1687 align, align, &local_qiov, 0, 0);
1688 if (ret < 0) {
1689 return ret;
1691 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1694 zero_mem:
1695 if (zero_middle) {
1696 memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail);
1699 return 0;
1702 static void bdrv_padding_destroy(BdrvRequestPadding *pad)
1704 if (pad->buf) {
1705 qemu_vfree(pad->buf);
1706 qemu_iovec_destroy(&pad->local_qiov);
1708 memset(pad, 0, sizeof(*pad));
1712 * bdrv_pad_request
1714 * Exchange request parameters with padded request if needed. Don't include RMW
1715 * read of padding, bdrv_padding_rmw_read() should be called separately if
1716 * needed.
1718 * Request parameters (@qiov, &qiov_offset, &offset, &bytes) are in-out:
1719 * - on function start they represent original request
1720 * - on failure or when padding is not needed they are unchanged
1721 * - on success when padding is needed they represent padded request
1723 static int bdrv_pad_request(BlockDriverState *bs,
1724 QEMUIOVector **qiov, size_t *qiov_offset,
1725 int64_t *offset, int64_t *bytes,
1726 BdrvRequestPadding *pad, bool *padded,
1727 BdrvRequestFlags *flags)
1729 int ret;
1731 bdrv_check_qiov_request(*offset, *bytes, *qiov, *qiov_offset, &error_abort);
1733 if (!bdrv_init_padding(bs, *offset, *bytes, pad)) {
1734 if (padded) {
1735 *padded = false;
1737 return 0;
1740 ret = qemu_iovec_init_extended(&pad->local_qiov, pad->buf, pad->head,
1741 *qiov, *qiov_offset, *bytes,
1742 pad->buf + pad->buf_len - pad->tail,
1743 pad->tail);
1744 if (ret < 0) {
1745 bdrv_padding_destroy(pad);
1746 return ret;
1748 *bytes += pad->head + pad->tail;
1749 *offset -= pad->head;
1750 *qiov = &pad->local_qiov;
1751 *qiov_offset = 0;
1752 if (padded) {
1753 *padded = true;
1755 if (flags) {
1756 /* Can't use optimization hint with bounce buffer */
1757 *flags &= ~BDRV_REQ_REGISTERED_BUF;
1760 return 0;
1763 int coroutine_fn bdrv_co_preadv(BdrvChild *child,
1764 int64_t offset, int64_t bytes, QEMUIOVector *qiov,
1765 BdrvRequestFlags flags)
1767 IO_CODE();
1768 return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags);
1771 int coroutine_fn bdrv_co_preadv_part(BdrvChild *child,
1772 int64_t offset, int64_t bytes,
1773 QEMUIOVector *qiov, size_t qiov_offset,
1774 BdrvRequestFlags flags)
1776 BlockDriverState *bs = child->bs;
1777 BdrvTrackedRequest req;
1778 BdrvRequestPadding pad;
1779 int ret;
1780 IO_CODE();
1782 trace_bdrv_co_preadv_part(bs, offset, bytes, flags);
1784 if (!bdrv_is_inserted(bs)) {
1785 return -ENOMEDIUM;
1788 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
1789 if (ret < 0) {
1790 return ret;
1793 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
1795 * Aligning zero request is nonsense. Even if driver has special meaning
1796 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
1797 * it to driver due to request_alignment.
1799 * Still, no reason to return an error if someone do unaligned
1800 * zero-length read occasionally.
1802 return 0;
1805 bdrv_inc_in_flight(bs);
1807 /* Don't do copy-on-read if we read data before write operation */
1808 if (qatomic_read(&bs->copy_on_read)) {
1809 flags |= BDRV_REQ_COPY_ON_READ;
1812 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad,
1813 NULL, &flags);
1814 if (ret < 0) {
1815 goto fail;
1818 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
1819 ret = bdrv_aligned_preadv(child, &req, offset, bytes,
1820 bs->bl.request_alignment,
1821 qiov, qiov_offset, flags);
1822 tracked_request_end(&req);
1823 bdrv_padding_destroy(&pad);
1825 fail:
1826 bdrv_dec_in_flight(bs);
1828 return ret;
1831 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
1832 int64_t offset, int64_t bytes, BdrvRequestFlags flags)
1834 BlockDriver *drv = bs->drv;
1835 QEMUIOVector qiov;
1836 void *buf = NULL;
1837 int ret = 0;
1838 bool need_flush = false;
1839 int head = 0;
1840 int tail = 0;
1842 int64_t max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes,
1843 INT64_MAX);
1844 int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1845 bs->bl.request_alignment);
1846 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER);
1848 bdrv_check_request(offset, bytes, &error_abort);
1850 if (!drv) {
1851 return -ENOMEDIUM;
1854 if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) {
1855 return -ENOTSUP;
1858 /* By definition there is no user buffer so this flag doesn't make sense */
1859 if (flags & BDRV_REQ_REGISTERED_BUF) {
1860 return -EINVAL;
1863 /* Invalidate the cached block-status data range if this write overlaps */
1864 bdrv_bsc_invalidate_range(bs, offset, bytes);
1866 assert(alignment % bs->bl.request_alignment == 0);
1867 head = offset % alignment;
1868 tail = (offset + bytes) % alignment;
1869 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1870 assert(max_write_zeroes >= bs->bl.request_alignment);
1872 while (bytes > 0 && !ret) {
1873 int64_t num = bytes;
1875 /* Align request. Block drivers can expect the "bulk" of the request
1876 * to be aligned, and that unaligned requests do not cross cluster
1877 * boundaries.
1879 if (head) {
1880 /* Make a small request up to the first aligned sector. For
1881 * convenience, limit this request to max_transfer even if
1882 * we don't need to fall back to writes. */
1883 num = MIN(MIN(bytes, max_transfer), alignment - head);
1884 head = (head + num) % alignment;
1885 assert(num < max_write_zeroes);
1886 } else if (tail && num > alignment) {
1887 /* Shorten the request to the last aligned sector. */
1888 num -= tail;
1891 /* limit request size */
1892 if (num > max_write_zeroes) {
1893 num = max_write_zeroes;
1896 ret = -ENOTSUP;
1897 /* First try the efficient write zeroes operation */
1898 if (drv->bdrv_co_pwrite_zeroes) {
1899 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1900 flags & bs->supported_zero_flags);
1901 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1902 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1903 need_flush = true;
1905 } else {
1906 assert(!bs->supported_zero_flags);
1909 if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) {
1910 /* Fall back to bounce buffer if write zeroes is unsupported */
1911 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1913 if ((flags & BDRV_REQ_FUA) &&
1914 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1915 /* No need for bdrv_driver_pwrite() to do a fallback
1916 * flush on each chunk; use just one at the end */
1917 write_flags &= ~BDRV_REQ_FUA;
1918 need_flush = true;
1920 num = MIN(num, max_transfer);
1921 if (buf == NULL) {
1922 buf = qemu_try_blockalign0(bs, num);
1923 if (buf == NULL) {
1924 ret = -ENOMEM;
1925 goto fail;
1928 qemu_iovec_init_buf(&qiov, buf, num);
1930 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags);
1932 /* Keep bounce buffer around if it is big enough for all
1933 * all future requests.
1935 if (num < max_transfer) {
1936 qemu_vfree(buf);
1937 buf = NULL;
1941 offset += num;
1942 bytes -= num;
1945 fail:
1946 if (ret == 0 && need_flush) {
1947 ret = bdrv_co_flush(bs);
1949 qemu_vfree(buf);
1950 return ret;
1953 static inline int coroutine_fn
1954 bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, int64_t bytes,
1955 BdrvTrackedRequest *req, int flags)
1957 BlockDriverState *bs = child->bs;
1959 bdrv_check_request(offset, bytes, &error_abort);
1961 if (bdrv_is_read_only(bs)) {
1962 return -EPERM;
1965 assert(!(bs->open_flags & BDRV_O_INACTIVE));
1966 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1967 assert(!(flags & ~BDRV_REQ_MASK));
1968 assert(!((flags & BDRV_REQ_NO_WAIT) && !(flags & BDRV_REQ_SERIALISING)));
1970 if (flags & BDRV_REQ_SERIALISING) {
1971 QEMU_LOCK_GUARD(&bs->reqs_lock);
1973 tracked_request_set_serialising(req, bdrv_get_cluster_size(bs));
1975 if ((flags & BDRV_REQ_NO_WAIT) && bdrv_find_conflicting_request(req)) {
1976 return -EBUSY;
1979 bdrv_wait_serialising_requests_locked(req);
1980 } else {
1981 bdrv_wait_serialising_requests(req);
1984 assert(req->overlap_offset <= offset);
1985 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1986 assert(offset + bytes <= bs->total_sectors * BDRV_SECTOR_SIZE ||
1987 child->perm & BLK_PERM_RESIZE);
1989 switch (req->type) {
1990 case BDRV_TRACKED_WRITE:
1991 case BDRV_TRACKED_DISCARD:
1992 if (flags & BDRV_REQ_WRITE_UNCHANGED) {
1993 assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1994 } else {
1995 assert(child->perm & BLK_PERM_WRITE);
1997 bdrv_write_threshold_check_write(bs, offset, bytes);
1998 return 0;
1999 case BDRV_TRACKED_TRUNCATE:
2000 assert(child->perm & BLK_PERM_RESIZE);
2001 return 0;
2002 default:
2003 abort();
2007 static inline void coroutine_fn
2008 bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes,
2009 BdrvTrackedRequest *req, int ret)
2011 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
2012 BlockDriverState *bs = child->bs;
2014 bdrv_check_request(offset, bytes, &error_abort);
2016 qatomic_inc(&bs->write_gen);
2019 * Discard cannot extend the image, but in error handling cases, such as
2020 * when reverting a qcow2 cluster allocation, the discarded range can pass
2021 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
2022 * here. Instead, just skip it, since semantically a discard request
2023 * beyond EOF cannot expand the image anyway.
2025 if (ret == 0 &&
2026 (req->type == BDRV_TRACKED_TRUNCATE ||
2027 end_sector > bs->total_sectors) &&
2028 req->type != BDRV_TRACKED_DISCARD) {
2029 bs->total_sectors = end_sector;
2030 bdrv_parent_cb_resize(bs);
2031 bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS);
2033 if (req->bytes) {
2034 switch (req->type) {
2035 case BDRV_TRACKED_WRITE:
2036 stat64_max(&bs->wr_highest_offset, offset + bytes);
2037 /* fall through, to set dirty bits */
2038 case BDRV_TRACKED_DISCARD:
2039 bdrv_set_dirty(bs, offset, bytes);
2040 break;
2041 default:
2042 break;
2048 * Forwards an already correctly aligned write request to the BlockDriver,
2049 * after possibly fragmenting it.
2051 static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child,
2052 BdrvTrackedRequest *req, int64_t offset, int64_t bytes,
2053 int64_t align, QEMUIOVector *qiov, size_t qiov_offset,
2054 BdrvRequestFlags flags)
2056 BlockDriverState *bs = child->bs;
2057 BlockDriver *drv = bs->drv;
2058 int ret;
2060 int64_t bytes_remaining = bytes;
2061 int max_transfer;
2063 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
2065 if (!drv) {
2066 return -ENOMEDIUM;
2069 if (bdrv_has_readonly_bitmaps(bs)) {
2070 return -EPERM;
2073 assert(is_power_of_2(align));
2074 assert((offset & (align - 1)) == 0);
2075 assert((bytes & (align - 1)) == 0);
2076 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
2077 align);
2079 ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags);
2081 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
2082 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
2083 qemu_iovec_is_zero(qiov, qiov_offset, bytes)) {
2084 flags |= BDRV_REQ_ZERO_WRITE;
2085 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
2086 flags |= BDRV_REQ_MAY_UNMAP;
2090 if (ret < 0) {
2091 /* Do nothing, write notifier decided to fail this request */
2092 } else if (flags & BDRV_REQ_ZERO_WRITE) {
2093 bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
2094 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
2095 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
2096 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes,
2097 qiov, qiov_offset);
2098 } else if (bytes <= max_transfer) {
2099 bdrv_debug_event(bs, BLKDBG_PWRITEV);
2100 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags);
2101 } else {
2102 bdrv_debug_event(bs, BLKDBG_PWRITEV);
2103 while (bytes_remaining) {
2104 int num = MIN(bytes_remaining, max_transfer);
2105 int local_flags = flags;
2107 assert(num);
2108 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
2109 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
2110 /* If FUA is going to be emulated by flush, we only
2111 * need to flush on the last iteration */
2112 local_flags &= ~BDRV_REQ_FUA;
2115 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
2116 num, qiov,
2117 qiov_offset + bytes - bytes_remaining,
2118 local_flags);
2119 if (ret < 0) {
2120 break;
2122 bytes_remaining -= num;
2125 bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
2127 if (ret >= 0) {
2128 ret = 0;
2130 bdrv_co_write_req_finish(child, offset, bytes, req, ret);
2132 return ret;
2135 static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
2136 int64_t offset,
2137 int64_t bytes,
2138 BdrvRequestFlags flags,
2139 BdrvTrackedRequest *req)
2141 BlockDriverState *bs = child->bs;
2142 QEMUIOVector local_qiov;
2143 uint64_t align = bs->bl.request_alignment;
2144 int ret = 0;
2145 bool padding;
2146 BdrvRequestPadding pad;
2148 /* This flag doesn't make sense for padding or zero writes */
2149 flags &= ~BDRV_REQ_REGISTERED_BUF;
2151 padding = bdrv_init_padding(bs, offset, bytes, &pad);
2152 if (padding) {
2153 assert(!(flags & BDRV_REQ_NO_WAIT));
2154 bdrv_make_request_serialising(req, align);
2156 bdrv_padding_rmw_read(child, req, &pad, true);
2158 if (pad.head || pad.merge_reads) {
2159 int64_t aligned_offset = offset & ~(align - 1);
2160 int64_t write_bytes = pad.merge_reads ? pad.buf_len : align;
2162 qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes);
2163 ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes,
2164 align, &local_qiov, 0,
2165 flags & ~BDRV_REQ_ZERO_WRITE);
2166 if (ret < 0 || pad.merge_reads) {
2167 /* Error or all work is done */
2168 goto out;
2170 offset += write_bytes - pad.head;
2171 bytes -= write_bytes - pad.head;
2175 assert(!bytes || (offset & (align - 1)) == 0);
2176 if (bytes >= align) {
2177 /* Write the aligned part in the middle. */
2178 int64_t aligned_bytes = bytes & ~(align - 1);
2179 ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align,
2180 NULL, 0, flags);
2181 if (ret < 0) {
2182 goto out;
2184 bytes -= aligned_bytes;
2185 offset += aligned_bytes;
2188 assert(!bytes || (offset & (align - 1)) == 0);
2189 if (bytes) {
2190 assert(align == pad.tail + bytes);
2192 qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align);
2193 ret = bdrv_aligned_pwritev(child, req, offset, align, align,
2194 &local_qiov, 0,
2195 flags & ~BDRV_REQ_ZERO_WRITE);
2198 out:
2199 bdrv_padding_destroy(&pad);
2201 return ret;
2205 * Handle a write request in coroutine context
2207 int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
2208 int64_t offset, int64_t bytes, QEMUIOVector *qiov,
2209 BdrvRequestFlags flags)
2211 IO_CODE();
2212 return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags);
2215 int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
2216 int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
2217 BdrvRequestFlags flags)
2219 BlockDriverState *bs = child->bs;
2220 BdrvTrackedRequest req;
2221 uint64_t align = bs->bl.request_alignment;
2222 BdrvRequestPadding pad;
2223 int ret;
2224 bool padded = false;
2225 IO_CODE();
2227 trace_bdrv_co_pwritev_part(child->bs, offset, bytes, flags);
2229 if (!bdrv_is_inserted(bs)) {
2230 return -ENOMEDIUM;
2233 if (flags & BDRV_REQ_ZERO_WRITE) {
2234 ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL);
2235 } else {
2236 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
2238 if (ret < 0) {
2239 return ret;
2242 /* If the request is misaligned then we can't make it efficient */
2243 if ((flags & BDRV_REQ_NO_FALLBACK) &&
2244 !QEMU_IS_ALIGNED(offset | bytes, align))
2246 return -ENOTSUP;
2249 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
2251 * Aligning zero request is nonsense. Even if driver has special meaning
2252 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
2253 * it to driver due to request_alignment.
2255 * Still, no reason to return an error if someone do unaligned
2256 * zero-length write occasionally.
2258 return 0;
2261 if (!(flags & BDRV_REQ_ZERO_WRITE)) {
2263 * Pad request for following read-modify-write cycle.
2264 * bdrv_co_do_zero_pwritev() does aligning by itself, so, we do
2265 * alignment only if there is no ZERO flag.
2267 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad,
2268 &padded, &flags);
2269 if (ret < 0) {
2270 return ret;
2274 bdrv_inc_in_flight(bs);
2275 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
2277 if (flags & BDRV_REQ_ZERO_WRITE) {
2278 assert(!padded);
2279 ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req);
2280 goto out;
2283 if (padded) {
2285 * Request was unaligned to request_alignment and therefore
2286 * padded. We are going to do read-modify-write, and must
2287 * serialize the request to prevent interactions of the
2288 * widened region with other transactions.
2290 assert(!(flags & BDRV_REQ_NO_WAIT));
2291 bdrv_make_request_serialising(&req, align);
2292 bdrv_padding_rmw_read(child, &req, &pad, false);
2295 ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align,
2296 qiov, qiov_offset, flags);
2298 bdrv_padding_destroy(&pad);
2300 out:
2301 tracked_request_end(&req);
2302 bdrv_dec_in_flight(bs);
2304 return ret;
2307 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
2308 int64_t bytes, BdrvRequestFlags flags)
2310 IO_CODE();
2311 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
2313 if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
2314 flags &= ~BDRV_REQ_MAY_UNMAP;
2317 return bdrv_co_pwritev(child, offset, bytes, NULL,
2318 BDRV_REQ_ZERO_WRITE | flags);
2322 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
2324 int bdrv_flush_all(void)
2326 BdrvNextIterator it;
2327 BlockDriverState *bs = NULL;
2328 int result = 0;
2330 GLOBAL_STATE_CODE();
2333 * bdrv queue is managed by record/replay,
2334 * creating new flush request for stopping
2335 * the VM may break the determinism
2337 if (replay_events_enabled()) {
2338 return result;
2341 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
2342 AioContext *aio_context = bdrv_get_aio_context(bs);
2343 int ret;
2345 aio_context_acquire(aio_context);
2346 ret = bdrv_flush(bs);
2347 if (ret < 0 && !result) {
2348 result = ret;
2350 aio_context_release(aio_context);
2353 return result;
2357 * Returns the allocation status of the specified sectors.
2358 * Drivers not implementing the functionality are assumed to not support
2359 * backing files, hence all their sectors are reported as allocated.
2361 * If 'want_zero' is true, the caller is querying for mapping
2362 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
2363 * _ZERO where possible; otherwise, the result favors larger 'pnum',
2364 * with a focus on accurate BDRV_BLOCK_ALLOCATED.
2366 * If 'offset' is beyond the end of the disk image the return value is
2367 * BDRV_BLOCK_EOF and 'pnum' is set to 0.
2369 * 'bytes' is the max value 'pnum' should be set to. If bytes goes
2370 * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2371 * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
2373 * 'pnum' is set to the number of bytes (including and immediately
2374 * following the specified offset) that are easily known to be in the
2375 * same allocated/unallocated state. Note that a second call starting
2376 * at the original offset plus returned pnum may have the same status.
2377 * The returned value is non-zero on success except at end-of-file.
2379 * Returns negative errno on failure. Otherwise, if the
2380 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
2381 * set to the host mapping and BDS corresponding to the guest offset.
2383 static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs,
2384 bool want_zero,
2385 int64_t offset, int64_t bytes,
2386 int64_t *pnum, int64_t *map,
2387 BlockDriverState **file)
2389 int64_t total_size;
2390 int64_t n; /* bytes */
2391 int ret;
2392 int64_t local_map = 0;
2393 BlockDriverState *local_file = NULL;
2394 int64_t aligned_offset, aligned_bytes;
2395 uint32_t align;
2396 bool has_filtered_child;
2398 assert(pnum);
2399 *pnum = 0;
2400 total_size = bdrv_getlength(bs);
2401 if (total_size < 0) {
2402 ret = total_size;
2403 goto early_out;
2406 if (offset >= total_size) {
2407 ret = BDRV_BLOCK_EOF;
2408 goto early_out;
2410 if (!bytes) {
2411 ret = 0;
2412 goto early_out;
2415 n = total_size - offset;
2416 if (n < bytes) {
2417 bytes = n;
2420 /* Must be non-NULL or bdrv_getlength() would have failed */
2421 assert(bs->drv);
2422 has_filtered_child = bdrv_filter_child(bs);
2423 if (!bs->drv->bdrv_co_block_status && !has_filtered_child) {
2424 *pnum = bytes;
2425 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
2426 if (offset + bytes == total_size) {
2427 ret |= BDRV_BLOCK_EOF;
2429 if (bs->drv->protocol_name) {
2430 ret |= BDRV_BLOCK_OFFSET_VALID;
2431 local_map = offset;
2432 local_file = bs;
2434 goto early_out;
2437 bdrv_inc_in_flight(bs);
2439 /* Round out to request_alignment boundaries */
2440 align = bs->bl.request_alignment;
2441 aligned_offset = QEMU_ALIGN_DOWN(offset, align);
2442 aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset;
2444 if (bs->drv->bdrv_co_block_status) {
2446 * Use the block-status cache only for protocol nodes: Format
2447 * drivers are generally quick to inquire the status, but protocol
2448 * drivers often need to get information from outside of qemu, so
2449 * we do not have control over the actual implementation. There
2450 * have been cases where inquiring the status took an unreasonably
2451 * long time, and we can do nothing in qemu to fix it.
2452 * This is especially problematic for images with large data areas,
2453 * because finding the few holes in them and giving them special
2454 * treatment does not gain much performance. Therefore, we try to
2455 * cache the last-identified data region.
2457 * Second, limiting ourselves to protocol nodes allows us to assume
2458 * the block status for data regions to be DATA | OFFSET_VALID, and
2459 * that the host offset is the same as the guest offset.
2461 * Note that it is possible that external writers zero parts of
2462 * the cached regions without the cache being invalidated, and so
2463 * we may report zeroes as data. This is not catastrophic,
2464 * however, because reporting zeroes as data is fine.
2466 if (QLIST_EMPTY(&bs->children) &&
2467 bdrv_bsc_is_data(bs, aligned_offset, pnum))
2469 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
2470 local_file = bs;
2471 local_map = aligned_offset;
2472 } else {
2473 ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
2474 aligned_bytes, pnum, &local_map,
2475 &local_file);
2478 * Note that checking QLIST_EMPTY(&bs->children) is also done when
2479 * the cache is queried above. Technically, we do not need to check
2480 * it here; the worst that can happen is that we fill the cache for
2481 * non-protocol nodes, and then it is never used. However, filling
2482 * the cache requires an RCU update, so double check here to avoid
2483 * such an update if possible.
2485 * Check want_zero, because we only want to update the cache when we
2486 * have accurate information about what is zero and what is data.
2488 if (want_zero &&
2489 ret == (BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID) &&
2490 QLIST_EMPTY(&bs->children))
2493 * When a protocol driver reports BLOCK_OFFSET_VALID, the
2494 * returned local_map value must be the same as the offset we
2495 * have passed (aligned_offset), and local_bs must be the node
2496 * itself.
2497 * Assert this, because we follow this rule when reading from
2498 * the cache (see the `local_file = bs` and
2499 * `local_map = aligned_offset` assignments above), and the
2500 * result the cache delivers must be the same as the driver
2501 * would deliver.
2503 assert(local_file == bs);
2504 assert(local_map == aligned_offset);
2505 bdrv_bsc_fill(bs, aligned_offset, *pnum);
2508 } else {
2509 /* Default code for filters */
2511 local_file = bdrv_filter_bs(bs);
2512 assert(local_file);
2514 *pnum = aligned_bytes;
2515 local_map = aligned_offset;
2516 ret = BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
2518 if (ret < 0) {
2519 *pnum = 0;
2520 goto out;
2524 * The driver's result must be a non-zero multiple of request_alignment.
2525 * Clamp pnum and adjust map to original request.
2527 assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) &&
2528 align > offset - aligned_offset);
2529 if (ret & BDRV_BLOCK_RECURSE) {
2530 assert(ret & BDRV_BLOCK_DATA);
2531 assert(ret & BDRV_BLOCK_OFFSET_VALID);
2532 assert(!(ret & BDRV_BLOCK_ZERO));
2535 *pnum -= offset - aligned_offset;
2536 if (*pnum > bytes) {
2537 *pnum = bytes;
2539 if (ret & BDRV_BLOCK_OFFSET_VALID) {
2540 local_map += offset - aligned_offset;
2543 if (ret & BDRV_BLOCK_RAW) {
2544 assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
2545 ret = bdrv_co_block_status(local_file, want_zero, local_map,
2546 *pnum, pnum, &local_map, &local_file);
2547 goto out;
2550 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
2551 ret |= BDRV_BLOCK_ALLOCATED;
2552 } else if (bs->drv->supports_backing) {
2553 BlockDriverState *cow_bs = bdrv_cow_bs(bs);
2555 if (!cow_bs) {
2556 ret |= BDRV_BLOCK_ZERO;
2557 } else if (want_zero) {
2558 int64_t size2 = bdrv_getlength(cow_bs);
2560 if (size2 >= 0 && offset >= size2) {
2561 ret |= BDRV_BLOCK_ZERO;
2566 if (want_zero && ret & BDRV_BLOCK_RECURSE &&
2567 local_file && local_file != bs &&
2568 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
2569 (ret & BDRV_BLOCK_OFFSET_VALID)) {
2570 int64_t file_pnum;
2571 int ret2;
2573 ret2 = bdrv_co_block_status(local_file, want_zero, local_map,
2574 *pnum, &file_pnum, NULL, NULL);
2575 if (ret2 >= 0) {
2576 /* Ignore errors. This is just providing extra information, it
2577 * is useful but not necessary.
2579 if (ret2 & BDRV_BLOCK_EOF &&
2580 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
2582 * It is valid for the format block driver to read
2583 * beyond the end of the underlying file's current
2584 * size; such areas read as zero.
2586 ret |= BDRV_BLOCK_ZERO;
2587 } else {
2588 /* Limit request to the range reported by the protocol driver */
2589 *pnum = file_pnum;
2590 ret |= (ret2 & BDRV_BLOCK_ZERO);
2595 out:
2596 bdrv_dec_in_flight(bs);
2597 if (ret >= 0 && offset + *pnum == total_size) {
2598 ret |= BDRV_BLOCK_EOF;
2600 early_out:
2601 if (file) {
2602 *file = local_file;
2604 if (map) {
2605 *map = local_map;
2607 return ret;
2610 int coroutine_fn
2611 bdrv_co_common_block_status_above(BlockDriverState *bs,
2612 BlockDriverState *base,
2613 bool include_base,
2614 bool want_zero,
2615 int64_t offset,
2616 int64_t bytes,
2617 int64_t *pnum,
2618 int64_t *map,
2619 BlockDriverState **file,
2620 int *depth)
2622 int ret;
2623 BlockDriverState *p;
2624 int64_t eof = 0;
2625 int dummy;
2626 IO_CODE();
2628 assert(!include_base || base); /* Can't include NULL base */
2630 if (!depth) {
2631 depth = &dummy;
2633 *depth = 0;
2635 if (!include_base && bs == base) {
2636 *pnum = bytes;
2637 return 0;
2640 ret = bdrv_co_block_status(bs, want_zero, offset, bytes, pnum, map, file);
2641 ++*depth;
2642 if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) {
2643 return ret;
2646 if (ret & BDRV_BLOCK_EOF) {
2647 eof = offset + *pnum;
2650 assert(*pnum <= bytes);
2651 bytes = *pnum;
2653 for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base;
2654 p = bdrv_filter_or_cow_bs(p))
2656 ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map,
2657 file);
2658 ++*depth;
2659 if (ret < 0) {
2660 return ret;
2662 if (*pnum == 0) {
2664 * The top layer deferred to this layer, and because this layer is
2665 * short, any zeroes that we synthesize beyond EOF behave as if they
2666 * were allocated at this layer.
2668 * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be
2669 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2670 * below.
2672 assert(ret & BDRV_BLOCK_EOF);
2673 *pnum = bytes;
2674 if (file) {
2675 *file = p;
2677 ret = BDRV_BLOCK_ZERO | BDRV_BLOCK_ALLOCATED;
2678 break;
2680 if (ret & BDRV_BLOCK_ALLOCATED) {
2682 * We've found the node and the status, we must break.
2684 * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be
2685 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2686 * below.
2688 ret &= ~BDRV_BLOCK_EOF;
2689 break;
2692 if (p == base) {
2693 assert(include_base);
2694 break;
2698 * OK, [offset, offset + *pnum) region is unallocated on this layer,
2699 * let's continue the diving.
2701 assert(*pnum <= bytes);
2702 bytes = *pnum;
2705 if (offset + *pnum == eof) {
2706 ret |= BDRV_BLOCK_EOF;
2709 return ret;
2712 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
2713 int64_t offset, int64_t bytes, int64_t *pnum,
2714 int64_t *map, BlockDriverState **file)
2716 IO_CODE();
2717 return bdrv_common_block_status_above(bs, base, false, true, offset, bytes,
2718 pnum, map, file, NULL);
2721 int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
2722 int64_t *pnum, int64_t *map, BlockDriverState **file)
2724 IO_CODE();
2725 return bdrv_block_status_above(bs, bdrv_filter_or_cow_bs(bs),
2726 offset, bytes, pnum, map, file);
2730 * Check @bs (and its backing chain) to see if the range defined
2731 * by @offset and @bytes is known to read as zeroes.
2732 * Return 1 if that is the case, 0 otherwise and -errno on error.
2733 * This test is meant to be fast rather than accurate so returning 0
2734 * does not guarantee non-zero data.
2736 int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset,
2737 int64_t bytes)
2739 int ret;
2740 int64_t pnum = bytes;
2741 IO_CODE();
2743 if (!bytes) {
2744 return 1;
2747 ret = bdrv_co_common_block_status_above(bs, NULL, false, false, offset,
2748 bytes, &pnum, NULL, NULL, NULL);
2750 if (ret < 0) {
2751 return ret;
2754 return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO);
2757 int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes,
2758 int64_t *pnum)
2760 int ret;
2761 int64_t dummy;
2762 IO_CODE();
2764 ret = bdrv_common_block_status_above(bs, bs, true, false, offset,
2765 bytes, pnum ? pnum : &dummy, NULL,
2766 NULL, NULL);
2767 if (ret < 0) {
2768 return ret;
2770 return !!(ret & BDRV_BLOCK_ALLOCATED);
2774 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2776 * Return a positive depth if (a prefix of) the given range is allocated
2777 * in any image between BASE and TOP (BASE is only included if include_base
2778 * is set). Depth 1 is TOP, 2 is the first backing layer, and so forth.
2779 * BASE can be NULL to check if the given offset is allocated in any
2780 * image of the chain. Return 0 otherwise, or negative errno on
2781 * failure.
2783 * 'pnum' is set to the number of bytes (including and immediately
2784 * following the specified offset) that are known to be in the same
2785 * allocated/unallocated state. Note that a subsequent call starting
2786 * at 'offset + *pnum' may return the same allocation status (in other
2787 * words, the result is not necessarily the maximum possible range);
2788 * but 'pnum' will only be 0 when end of file is reached.
2790 int bdrv_is_allocated_above(BlockDriverState *top,
2791 BlockDriverState *base,
2792 bool include_base, int64_t offset,
2793 int64_t bytes, int64_t *pnum)
2795 int depth;
2796 int ret = bdrv_common_block_status_above(top, base, include_base, false,
2797 offset, bytes, pnum, NULL, NULL,
2798 &depth);
2799 IO_CODE();
2800 if (ret < 0) {
2801 return ret;
2804 if (ret & BDRV_BLOCK_ALLOCATED) {
2805 return depth;
2807 return 0;
2810 int coroutine_fn
2811 bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2813 BlockDriver *drv = bs->drv;
2814 BlockDriverState *child_bs = bdrv_primary_bs(bs);
2815 int ret;
2816 IO_CODE();
2818 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
2819 if (ret < 0) {
2820 return ret;
2823 if (!drv) {
2824 return -ENOMEDIUM;
2827 bdrv_inc_in_flight(bs);
2829 if (drv->bdrv_load_vmstate) {
2830 ret = drv->bdrv_load_vmstate(bs, qiov, pos);
2831 } else if (child_bs) {
2832 ret = bdrv_co_readv_vmstate(child_bs, qiov, pos);
2833 } else {
2834 ret = -ENOTSUP;
2837 bdrv_dec_in_flight(bs);
2839 return ret;
2842 int coroutine_fn
2843 bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2845 BlockDriver *drv = bs->drv;
2846 BlockDriverState *child_bs = bdrv_primary_bs(bs);
2847 int ret;
2848 IO_CODE();
2850 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
2851 if (ret < 0) {
2852 return ret;
2855 if (!drv) {
2856 return -ENOMEDIUM;
2859 bdrv_inc_in_flight(bs);
2861 if (drv->bdrv_save_vmstate) {
2862 ret = drv->bdrv_save_vmstate(bs, qiov, pos);
2863 } else if (child_bs) {
2864 ret = bdrv_co_writev_vmstate(child_bs, qiov, pos);
2865 } else {
2866 ret = -ENOTSUP;
2869 bdrv_dec_in_flight(bs);
2871 return ret;
2874 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
2875 int64_t pos, int size)
2877 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2878 int ret = bdrv_writev_vmstate(bs, &qiov, pos);
2879 IO_CODE();
2881 return ret < 0 ? ret : size;
2884 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2885 int64_t pos, int size)
2887 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2888 int ret = bdrv_readv_vmstate(bs, &qiov, pos);
2889 IO_CODE();
2891 return ret < 0 ? ret : size;
2894 /**************************************************************/
2895 /* async I/Os */
2897 void bdrv_aio_cancel(BlockAIOCB *acb)
2899 IO_CODE();
2900 qemu_aio_ref(acb);
2901 bdrv_aio_cancel_async(acb);
2902 while (acb->refcnt > 1) {
2903 if (acb->aiocb_info->get_aio_context) {
2904 aio_poll(acb->aiocb_info->get_aio_context(acb), true);
2905 } else if (acb->bs) {
2906 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
2907 * assert that we're not using an I/O thread. Thread-safe
2908 * code should use bdrv_aio_cancel_async exclusively.
2910 assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context());
2911 aio_poll(bdrv_get_aio_context(acb->bs), true);
2912 } else {
2913 abort();
2916 qemu_aio_unref(acb);
2919 /* Async version of aio cancel. The caller is not blocked if the acb implements
2920 * cancel_async, otherwise we do nothing and let the request normally complete.
2921 * In either case the completion callback must be called. */
2922 void bdrv_aio_cancel_async(BlockAIOCB *acb)
2924 IO_CODE();
2925 if (acb->aiocb_info->cancel_async) {
2926 acb->aiocb_info->cancel_async(acb);
2930 /**************************************************************/
2931 /* Coroutine block device emulation */
2933 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2935 BdrvChild *primary_child = bdrv_primary_child(bs);
2936 BdrvChild *child;
2937 int current_gen;
2938 int ret = 0;
2939 IO_CODE();
2941 bdrv_inc_in_flight(bs);
2943 if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
2944 bdrv_is_sg(bs)) {
2945 goto early_exit;
2948 qemu_co_mutex_lock(&bs->reqs_lock);
2949 current_gen = qatomic_read(&bs->write_gen);
2951 /* Wait until any previous flushes are completed */
2952 while (bs->active_flush_req) {
2953 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
2956 /* Flushes reach this point in nondecreasing current_gen order. */
2957 bs->active_flush_req = true;
2958 qemu_co_mutex_unlock(&bs->reqs_lock);
2960 /* Write back all layers by calling one driver function */
2961 if (bs->drv->bdrv_co_flush) {
2962 ret = bs->drv->bdrv_co_flush(bs);
2963 goto out;
2966 /* Write back cached data to the OS even with cache=unsafe */
2967 BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_OS);
2968 if (bs->drv->bdrv_co_flush_to_os) {
2969 ret = bs->drv->bdrv_co_flush_to_os(bs);
2970 if (ret < 0) {
2971 goto out;
2975 /* But don't actually force it to the disk with cache=unsafe */
2976 if (bs->open_flags & BDRV_O_NO_FLUSH) {
2977 goto flush_children;
2980 /* Check if we really need to flush anything */
2981 if (bs->flushed_gen == current_gen) {
2982 goto flush_children;
2985 BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK);
2986 if (!bs->drv) {
2987 /* bs->drv->bdrv_co_flush() might have ejected the BDS
2988 * (even in case of apparent success) */
2989 ret = -ENOMEDIUM;
2990 goto out;
2992 if (bs->drv->bdrv_co_flush_to_disk) {
2993 ret = bs->drv->bdrv_co_flush_to_disk(bs);
2994 } else if (bs->drv->bdrv_aio_flush) {
2995 BlockAIOCB *acb;
2996 CoroutineIOCompletion co = {
2997 .coroutine = qemu_coroutine_self(),
3000 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
3001 if (acb == NULL) {
3002 ret = -EIO;
3003 } else {
3004 qemu_coroutine_yield();
3005 ret = co.ret;
3007 } else {
3009 * Some block drivers always operate in either writethrough or unsafe
3010 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
3011 * know how the server works (because the behaviour is hardcoded or
3012 * depends on server-side configuration), so we can't ensure that
3013 * everything is safe on disk. Returning an error doesn't work because
3014 * that would break guests even if the server operates in writethrough
3015 * mode.
3017 * Let's hope the user knows what he's doing.
3019 ret = 0;
3022 if (ret < 0) {
3023 goto out;
3026 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
3027 * in the case of cache=unsafe, so there are no useless flushes.
3029 flush_children:
3030 ret = 0;
3031 QLIST_FOREACH(child, &bs->children, next) {
3032 if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) {
3033 int this_child_ret = bdrv_co_flush(child->bs);
3034 if (!ret) {
3035 ret = this_child_ret;
3040 out:
3041 /* Notify any pending flushes that we have completed */
3042 if (ret == 0) {
3043 bs->flushed_gen = current_gen;
3046 qemu_co_mutex_lock(&bs->reqs_lock);
3047 bs->active_flush_req = false;
3048 /* Return value is ignored - it's ok if wait queue is empty */
3049 qemu_co_queue_next(&bs->flush_queue);
3050 qemu_co_mutex_unlock(&bs->reqs_lock);
3052 early_exit:
3053 bdrv_dec_in_flight(bs);
3054 return ret;
3057 int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
3058 int64_t bytes)
3060 BdrvTrackedRequest req;
3061 int ret;
3062 int64_t max_pdiscard;
3063 int head, tail, align;
3064 BlockDriverState *bs = child->bs;
3065 IO_CODE();
3067 if (!bs || !bs->drv || !bdrv_is_inserted(bs)) {
3068 return -ENOMEDIUM;
3071 if (bdrv_has_readonly_bitmaps(bs)) {
3072 return -EPERM;
3075 ret = bdrv_check_request(offset, bytes, NULL);
3076 if (ret < 0) {
3077 return ret;
3080 /* Do nothing if disabled. */
3081 if (!(bs->open_flags & BDRV_O_UNMAP)) {
3082 return 0;
3085 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
3086 return 0;
3089 /* Invalidate the cached block-status data range if this discard overlaps */
3090 bdrv_bsc_invalidate_range(bs, offset, bytes);
3092 /* Discard is advisory, but some devices track and coalesce
3093 * unaligned requests, so we must pass everything down rather than
3094 * round here. Still, most devices will just silently ignore
3095 * unaligned requests (by returning -ENOTSUP), so we must fragment
3096 * the request accordingly. */
3097 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
3098 assert(align % bs->bl.request_alignment == 0);
3099 head = offset % align;
3100 tail = (offset + bytes) % align;
3102 bdrv_inc_in_flight(bs);
3103 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD);
3105 ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0);
3106 if (ret < 0) {
3107 goto out;
3110 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT64_MAX),
3111 align);
3112 assert(max_pdiscard >= bs->bl.request_alignment);
3114 while (bytes > 0) {
3115 int64_t num = bytes;
3117 if (head) {
3118 /* Make small requests to get to alignment boundaries. */
3119 num = MIN(bytes, align - head);
3120 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
3121 num %= bs->bl.request_alignment;
3123 head = (head + num) % align;
3124 assert(num < max_pdiscard);
3125 } else if (tail) {
3126 if (num > align) {
3127 /* Shorten the request to the last aligned cluster. */
3128 num -= tail;
3129 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
3130 tail > bs->bl.request_alignment) {
3131 tail %= bs->bl.request_alignment;
3132 num -= tail;
3135 /* limit request size */
3136 if (num > max_pdiscard) {
3137 num = max_pdiscard;
3140 if (!bs->drv) {
3141 ret = -ENOMEDIUM;
3142 goto out;
3144 if (bs->drv->bdrv_co_pdiscard) {
3145 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
3146 } else {
3147 BlockAIOCB *acb;
3148 CoroutineIOCompletion co = {
3149 .coroutine = qemu_coroutine_self(),
3152 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
3153 bdrv_co_io_em_complete, &co);
3154 if (acb == NULL) {
3155 ret = -EIO;
3156 goto out;
3157 } else {
3158 qemu_coroutine_yield();
3159 ret = co.ret;
3162 if (ret && ret != -ENOTSUP) {
3163 goto out;
3166 offset += num;
3167 bytes -= num;
3169 ret = 0;
3170 out:
3171 bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret);
3172 tracked_request_end(&req);
3173 bdrv_dec_in_flight(bs);
3174 return ret;
3177 int coroutine_fn bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
3179 BlockDriver *drv = bs->drv;
3180 CoroutineIOCompletion co = {
3181 .coroutine = qemu_coroutine_self(),
3183 BlockAIOCB *acb;
3184 IO_CODE();
3186 bdrv_inc_in_flight(bs);
3187 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
3188 co.ret = -ENOTSUP;
3189 goto out;
3192 if (drv->bdrv_co_ioctl) {
3193 co.ret = drv->bdrv_co_ioctl(bs, req, buf);
3194 } else {
3195 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
3196 if (!acb) {
3197 co.ret = -ENOTSUP;
3198 goto out;
3200 qemu_coroutine_yield();
3202 out:
3203 bdrv_dec_in_flight(bs);
3204 return co.ret;
3207 void *qemu_blockalign(BlockDriverState *bs, size_t size)
3209 IO_CODE();
3210 return qemu_memalign(bdrv_opt_mem_align(bs), size);
3213 void *qemu_blockalign0(BlockDriverState *bs, size_t size)
3215 IO_CODE();
3216 return memset(qemu_blockalign(bs, size), 0, size);
3219 void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
3221 size_t align = bdrv_opt_mem_align(bs);
3222 IO_CODE();
3224 /* Ensure that NULL is never returned on success */
3225 assert(align > 0);
3226 if (size == 0) {
3227 size = align;
3230 return qemu_try_memalign(align, size);
3233 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
3235 void *mem = qemu_try_blockalign(bs, size);
3236 IO_CODE();
3238 if (mem) {
3239 memset(mem, 0, size);
3242 return mem;
3245 void bdrv_io_plug(BlockDriverState *bs)
3247 BdrvChild *child;
3248 IO_CODE();
3250 QLIST_FOREACH(child, &bs->children, next) {
3251 bdrv_io_plug(child->bs);
3254 if (qatomic_fetch_inc(&bs->io_plugged) == 0) {
3255 BlockDriver *drv = bs->drv;
3256 if (drv && drv->bdrv_io_plug) {
3257 drv->bdrv_io_plug(bs);
3262 void bdrv_io_unplug(BlockDriverState *bs)
3264 BdrvChild *child;
3265 IO_CODE();
3267 assert(bs->io_plugged);
3268 if (qatomic_fetch_dec(&bs->io_plugged) == 1) {
3269 BlockDriver *drv = bs->drv;
3270 if (drv && drv->bdrv_io_unplug) {
3271 drv->bdrv_io_unplug(bs);
3275 QLIST_FOREACH(child, &bs->children, next) {
3276 bdrv_io_unplug(child->bs);
3280 /* Helper that undoes bdrv_register_buf() when it fails partway through */
3281 static void bdrv_register_buf_rollback(BlockDriverState *bs,
3282 void *host,
3283 size_t size,
3284 BdrvChild *final_child)
3286 BdrvChild *child;
3288 QLIST_FOREACH(child, &bs->children, next) {
3289 if (child == final_child) {
3290 break;
3293 bdrv_unregister_buf(child->bs, host, size);
3296 if (bs->drv && bs->drv->bdrv_unregister_buf) {
3297 bs->drv->bdrv_unregister_buf(bs, host, size);
3301 bool bdrv_register_buf(BlockDriverState *bs, void *host, size_t size,
3302 Error **errp)
3304 BdrvChild *child;
3306 GLOBAL_STATE_CODE();
3307 if (bs->drv && bs->drv->bdrv_register_buf) {
3308 if (!bs->drv->bdrv_register_buf(bs, host, size, errp)) {
3309 return false;
3312 QLIST_FOREACH(child, &bs->children, next) {
3313 if (!bdrv_register_buf(child->bs, host, size, errp)) {
3314 bdrv_register_buf_rollback(bs, host, size, child);
3315 return false;
3318 return true;
3321 void bdrv_unregister_buf(BlockDriverState *bs, void *host, size_t size)
3323 BdrvChild *child;
3325 GLOBAL_STATE_CODE();
3326 if (bs->drv && bs->drv->bdrv_unregister_buf) {
3327 bs->drv->bdrv_unregister_buf(bs, host, size);
3329 QLIST_FOREACH(child, &bs->children, next) {
3330 bdrv_unregister_buf(child->bs, host, size);
3334 static int coroutine_fn bdrv_co_copy_range_internal(
3335 BdrvChild *src, int64_t src_offset, BdrvChild *dst,
3336 int64_t dst_offset, int64_t bytes,
3337 BdrvRequestFlags read_flags, BdrvRequestFlags write_flags,
3338 bool recurse_src)
3340 BdrvTrackedRequest req;
3341 int ret;
3343 /* TODO We can support BDRV_REQ_NO_FALLBACK here */
3344 assert(!(read_flags & BDRV_REQ_NO_FALLBACK));
3345 assert(!(write_flags & BDRV_REQ_NO_FALLBACK));
3346 assert(!(read_flags & BDRV_REQ_NO_WAIT));
3347 assert(!(write_flags & BDRV_REQ_NO_WAIT));
3349 if (!dst || !dst->bs || !bdrv_is_inserted(dst->bs)) {
3350 return -ENOMEDIUM;
3352 ret = bdrv_check_request32(dst_offset, bytes, NULL, 0);
3353 if (ret) {
3354 return ret;
3356 if (write_flags & BDRV_REQ_ZERO_WRITE) {
3357 return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags);
3360 if (!src || !src->bs || !bdrv_is_inserted(src->bs)) {
3361 return -ENOMEDIUM;
3363 ret = bdrv_check_request32(src_offset, bytes, NULL, 0);
3364 if (ret) {
3365 return ret;
3368 if (!src->bs->drv->bdrv_co_copy_range_from
3369 || !dst->bs->drv->bdrv_co_copy_range_to
3370 || src->bs->encrypted || dst->bs->encrypted) {
3371 return -ENOTSUP;
3374 if (recurse_src) {
3375 bdrv_inc_in_flight(src->bs);
3376 tracked_request_begin(&req, src->bs, src_offset, bytes,
3377 BDRV_TRACKED_READ);
3379 /* BDRV_REQ_SERIALISING is only for write operation */
3380 assert(!(read_flags & BDRV_REQ_SERIALISING));
3381 bdrv_wait_serialising_requests(&req);
3383 ret = src->bs->drv->bdrv_co_copy_range_from(src->bs,
3384 src, src_offset,
3385 dst, dst_offset,
3386 bytes,
3387 read_flags, write_flags);
3389 tracked_request_end(&req);
3390 bdrv_dec_in_flight(src->bs);
3391 } else {
3392 bdrv_inc_in_flight(dst->bs);
3393 tracked_request_begin(&req, dst->bs, dst_offset, bytes,
3394 BDRV_TRACKED_WRITE);
3395 ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req,
3396 write_flags);
3397 if (!ret) {
3398 ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs,
3399 src, src_offset,
3400 dst, dst_offset,
3401 bytes,
3402 read_flags, write_flags);
3404 bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret);
3405 tracked_request_end(&req);
3406 bdrv_dec_in_flight(dst->bs);
3409 return ret;
3412 /* Copy range from @src to @dst.
3414 * See the comment of bdrv_co_copy_range for the parameter and return value
3415 * semantics. */
3416 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset,
3417 BdrvChild *dst, int64_t dst_offset,
3418 int64_t bytes,
3419 BdrvRequestFlags read_flags,
3420 BdrvRequestFlags write_flags)
3422 IO_CODE();
3423 trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes,
3424 read_flags, write_flags);
3425 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
3426 bytes, read_flags, write_flags, true);
3429 /* Copy range from @src to @dst.
3431 * See the comment of bdrv_co_copy_range for the parameter and return value
3432 * semantics. */
3433 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset,
3434 BdrvChild *dst, int64_t dst_offset,
3435 int64_t bytes,
3436 BdrvRequestFlags read_flags,
3437 BdrvRequestFlags write_flags)
3439 IO_CODE();
3440 trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
3441 read_flags, write_flags);
3442 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
3443 bytes, read_flags, write_flags, false);
3446 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset,
3447 BdrvChild *dst, int64_t dst_offset,
3448 int64_t bytes, BdrvRequestFlags read_flags,
3449 BdrvRequestFlags write_flags)
3451 IO_CODE();
3452 return bdrv_co_copy_range_from(src, src_offset,
3453 dst, dst_offset,
3454 bytes, read_flags, write_flags);
3457 static void bdrv_parent_cb_resize(BlockDriverState *bs)
3459 BdrvChild *c;
3460 QLIST_FOREACH(c, &bs->parents, next_parent) {
3461 if (c->klass->resize) {
3462 c->klass->resize(c);
3468 * Truncate file to 'offset' bytes (needed only for file protocols)
3470 * If 'exact' is true, the file must be resized to exactly the given
3471 * 'offset'. Otherwise, it is sufficient for the node to be at least
3472 * 'offset' bytes in length.
3474 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
3475 PreallocMode prealloc, BdrvRequestFlags flags,
3476 Error **errp)
3478 BlockDriverState *bs = child->bs;
3479 BdrvChild *filtered, *backing;
3480 BlockDriver *drv = bs->drv;
3481 BdrvTrackedRequest req;
3482 int64_t old_size, new_bytes;
3483 int ret;
3484 IO_CODE();
3486 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
3487 if (!drv) {
3488 error_setg(errp, "No medium inserted");
3489 return -ENOMEDIUM;
3491 if (offset < 0) {
3492 error_setg(errp, "Image size cannot be negative");
3493 return -EINVAL;
3496 ret = bdrv_check_request(offset, 0, errp);
3497 if (ret < 0) {
3498 return ret;
3501 old_size = bdrv_getlength(bs);
3502 if (old_size < 0) {
3503 error_setg_errno(errp, -old_size, "Failed to get old image size");
3504 return old_size;
3507 if (bdrv_is_read_only(bs)) {
3508 error_setg(errp, "Image is read-only");
3509 return -EACCES;
3512 if (offset > old_size) {
3513 new_bytes = offset - old_size;
3514 } else {
3515 new_bytes = 0;
3518 bdrv_inc_in_flight(bs);
3519 tracked_request_begin(&req, bs, offset - new_bytes, new_bytes,
3520 BDRV_TRACKED_TRUNCATE);
3522 /* If we are growing the image and potentially using preallocation for the
3523 * new area, we need to make sure that no write requests are made to it
3524 * concurrently or they might be overwritten by preallocation. */
3525 if (new_bytes) {
3526 bdrv_make_request_serialising(&req, 1);
3528 ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req,
3530 if (ret < 0) {
3531 error_setg_errno(errp, -ret,
3532 "Failed to prepare request for truncation");
3533 goto out;
3536 filtered = bdrv_filter_child(bs);
3537 backing = bdrv_cow_child(bs);
3540 * If the image has a backing file that is large enough that it would
3541 * provide data for the new area, we cannot leave it unallocated because
3542 * then the backing file content would become visible. Instead, zero-fill
3543 * the new area.
3545 * Note that if the image has a backing file, but was opened without the
3546 * backing file, taking care of keeping things consistent with that backing
3547 * file is the user's responsibility.
3549 if (new_bytes && backing) {
3550 int64_t backing_len;
3552 backing_len = bdrv_getlength(backing->bs);
3553 if (backing_len < 0) {
3554 ret = backing_len;
3555 error_setg_errno(errp, -ret, "Could not get backing file size");
3556 goto out;
3559 if (backing_len > old_size) {
3560 flags |= BDRV_REQ_ZERO_WRITE;
3564 if (drv->bdrv_co_truncate) {
3565 if (flags & ~bs->supported_truncate_flags) {
3566 error_setg(errp, "Block driver does not support requested flags");
3567 ret = -ENOTSUP;
3568 goto out;
3570 ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp);
3571 } else if (filtered) {
3572 ret = bdrv_co_truncate(filtered, offset, exact, prealloc, flags, errp);
3573 } else {
3574 error_setg(errp, "Image format driver does not support resize");
3575 ret = -ENOTSUP;
3576 goto out;
3578 if (ret < 0) {
3579 goto out;
3582 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3583 if (ret < 0) {
3584 error_setg_errno(errp, -ret, "Could not refresh total sector count");
3585 } else {
3586 offset = bs->total_sectors * BDRV_SECTOR_SIZE;
3588 /* It's possible that truncation succeeded but refresh_total_sectors
3589 * failed, but the latter doesn't affect how we should finish the request.
3590 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. */
3591 bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0);
3593 out:
3594 tracked_request_end(&req);
3595 bdrv_dec_in_flight(bs);
3597 return ret;
3600 void bdrv_cancel_in_flight(BlockDriverState *bs)
3602 GLOBAL_STATE_CODE();
3603 if (!bs || !bs->drv) {
3604 return;
3607 if (bs->drv->bdrv_cancel_in_flight) {
3608 bs->drv->bdrv_cancel_in_flight(bs);
3612 int coroutine_fn
3613 bdrv_co_preadv_snapshot(BdrvChild *child, int64_t offset, int64_t bytes,
3614 QEMUIOVector *qiov, size_t qiov_offset)
3616 BlockDriverState *bs = child->bs;
3617 BlockDriver *drv = bs->drv;
3618 int ret;
3619 IO_CODE();
3621 if (!drv) {
3622 return -ENOMEDIUM;
3625 if (!drv->bdrv_co_preadv_snapshot) {
3626 return -ENOTSUP;
3629 bdrv_inc_in_flight(bs);
3630 ret = drv->bdrv_co_preadv_snapshot(bs, offset, bytes, qiov, qiov_offset);
3631 bdrv_dec_in_flight(bs);
3633 return ret;
3636 int coroutine_fn
3637 bdrv_co_snapshot_block_status(BlockDriverState *bs,
3638 bool want_zero, int64_t offset, int64_t bytes,
3639 int64_t *pnum, int64_t *map,
3640 BlockDriverState **file)
3642 BlockDriver *drv = bs->drv;
3643 int ret;
3644 IO_CODE();
3646 if (!drv) {
3647 return -ENOMEDIUM;
3650 if (!drv->bdrv_co_snapshot_block_status) {
3651 return -ENOTSUP;
3654 bdrv_inc_in_flight(bs);
3655 ret = drv->bdrv_co_snapshot_block_status(bs, want_zero, offset, bytes,
3656 pnum, map, file);
3657 bdrv_dec_in_flight(bs);
3659 return ret;
3662 int coroutine_fn
3663 bdrv_co_pdiscard_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes)
3665 BlockDriver *drv = bs->drv;
3666 int ret;
3667 IO_CODE();
3669 if (!drv) {
3670 return -ENOMEDIUM;
3673 if (!drv->bdrv_co_pdiscard_snapshot) {
3674 return -ENOTSUP;
3677 bdrv_inc_in_flight(bs);
3678 ret = drv->bdrv_co_pdiscard_snapshot(bs, offset, bytes);
3679 bdrv_dec_in_flight(bs);
3681 return ret;