block: Mark bdrv_co_io_(un)plug() and callers GRAPH_RDLOCK
[qemu/ar7.git] / block / io.c
blobb5459c2f41fb544b1e8178593f4b589efbfae407
1 /*
2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include "trace.h"
27 #include "sysemu/block-backend.h"
28 #include "block/aio-wait.h"
29 #include "block/blockjob.h"
30 #include "block/blockjob_int.h"
31 #include "block/block_int.h"
32 #include "block/coroutines.h"
33 #include "block/dirty-bitmap.h"
34 #include "block/write-threshold.h"
35 #include "qemu/cutils.h"
36 #include "qemu/memalign.h"
37 #include "qapi/error.h"
38 #include "qemu/error-report.h"
39 #include "qemu/main-loop.h"
40 #include "sysemu/replay.h"
42 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
43 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
45 static void bdrv_parent_cb_resize(BlockDriverState *bs);
46 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
47 int64_t offset, int64_t bytes, BdrvRequestFlags flags);
49 static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore)
51 BdrvChild *c, *next;
53 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
54 if (c == ignore) {
55 continue;
57 bdrv_parent_drained_begin_single(c);
61 void bdrv_parent_drained_end_single(BdrvChild *c)
63 IO_OR_GS_CODE();
65 assert(c->quiesced_parent);
66 c->quiesced_parent = false;
68 if (c->klass->drained_end) {
69 c->klass->drained_end(c);
73 static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore)
75 BdrvChild *c;
77 QLIST_FOREACH(c, &bs->parents, next_parent) {
78 if (c == ignore) {
79 continue;
81 bdrv_parent_drained_end_single(c);
85 bool bdrv_parent_drained_poll_single(BdrvChild *c)
87 if (c->klass->drained_poll) {
88 return c->klass->drained_poll(c);
90 return false;
93 static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
94 bool ignore_bds_parents)
96 BdrvChild *c, *next;
97 bool busy = false;
99 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
100 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
101 continue;
103 busy |= bdrv_parent_drained_poll_single(c);
106 return busy;
109 void bdrv_parent_drained_begin_single(BdrvChild *c)
111 IO_OR_GS_CODE();
113 assert(!c->quiesced_parent);
114 c->quiesced_parent = true;
116 if (c->klass->drained_begin) {
117 c->klass->drained_begin(c);
121 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
123 dst->pdiscard_alignment = MAX(dst->pdiscard_alignment,
124 src->pdiscard_alignment);
125 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
126 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
127 dst->max_hw_transfer = MIN_NON_ZERO(dst->max_hw_transfer,
128 src->max_hw_transfer);
129 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
130 src->opt_mem_alignment);
131 dst->min_mem_alignment = MAX(dst->min_mem_alignment,
132 src->min_mem_alignment);
133 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
134 dst->max_hw_iov = MIN_NON_ZERO(dst->max_hw_iov, src->max_hw_iov);
137 typedef struct BdrvRefreshLimitsState {
138 BlockDriverState *bs;
139 BlockLimits old_bl;
140 } BdrvRefreshLimitsState;
142 static void bdrv_refresh_limits_abort(void *opaque)
144 BdrvRefreshLimitsState *s = opaque;
146 s->bs->bl = s->old_bl;
149 static TransactionActionDrv bdrv_refresh_limits_drv = {
150 .abort = bdrv_refresh_limits_abort,
151 .clean = g_free,
154 /* @tran is allowed to be NULL, in this case no rollback is possible. */
155 void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp)
157 ERRP_GUARD();
158 BlockDriver *drv = bs->drv;
159 BdrvChild *c;
160 bool have_limits;
162 GLOBAL_STATE_CODE();
163 assume_graph_lock(); /* FIXME */
165 if (tran) {
166 BdrvRefreshLimitsState *s = g_new(BdrvRefreshLimitsState, 1);
167 *s = (BdrvRefreshLimitsState) {
168 .bs = bs,
169 .old_bl = bs->bl,
171 tran_add(tran, &bdrv_refresh_limits_drv, s);
174 memset(&bs->bl, 0, sizeof(bs->bl));
176 if (!drv) {
177 return;
180 /* Default alignment based on whether driver has byte interface */
181 bs->bl.request_alignment = (drv->bdrv_co_preadv ||
182 drv->bdrv_aio_preadv ||
183 drv->bdrv_co_preadv_part) ? 1 : 512;
185 /* Take some limits from the children as a default */
186 have_limits = false;
187 QLIST_FOREACH(c, &bs->children, next) {
188 if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW))
190 bdrv_merge_limits(&bs->bl, &c->bs->bl);
191 have_limits = true;
195 if (!have_limits) {
196 bs->bl.min_mem_alignment = 512;
197 bs->bl.opt_mem_alignment = qemu_real_host_page_size();
199 /* Safe default since most protocols use readv()/writev()/etc */
200 bs->bl.max_iov = IOV_MAX;
203 /* Then let the driver override it */
204 if (drv->bdrv_refresh_limits) {
205 drv->bdrv_refresh_limits(bs, errp);
206 if (*errp) {
207 return;
211 if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) {
212 error_setg(errp, "Driver requires too large request alignment");
217 * The copy-on-read flag is actually a reference count so multiple users may
218 * use the feature without worrying about clobbering its previous state.
219 * Copy-on-read stays enabled until all users have called to disable it.
221 void bdrv_enable_copy_on_read(BlockDriverState *bs)
223 IO_CODE();
224 qatomic_inc(&bs->copy_on_read);
227 void bdrv_disable_copy_on_read(BlockDriverState *bs)
229 int old = qatomic_fetch_dec(&bs->copy_on_read);
230 IO_CODE();
231 assert(old >= 1);
234 typedef struct {
235 Coroutine *co;
236 BlockDriverState *bs;
237 bool done;
238 bool begin;
239 bool poll;
240 BdrvChild *parent;
241 } BdrvCoDrainData;
243 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
244 bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent,
245 bool ignore_bds_parents)
247 IO_OR_GS_CODE();
249 if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
250 return true;
253 if (qatomic_read(&bs->in_flight)) {
254 return true;
257 return false;
260 static bool bdrv_drain_poll_top_level(BlockDriverState *bs,
261 BdrvChild *ignore_parent)
263 return bdrv_drain_poll(bs, ignore_parent, false);
266 static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent,
267 bool poll);
268 static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent);
270 static void bdrv_co_drain_bh_cb(void *opaque)
272 BdrvCoDrainData *data = opaque;
273 Coroutine *co = data->co;
274 BlockDriverState *bs = data->bs;
276 if (bs) {
277 AioContext *ctx = bdrv_get_aio_context(bs);
278 aio_context_acquire(ctx);
279 bdrv_dec_in_flight(bs);
280 if (data->begin) {
281 bdrv_do_drained_begin(bs, data->parent, data->poll);
282 } else {
283 assert(!data->poll);
284 bdrv_do_drained_end(bs, data->parent);
286 aio_context_release(ctx);
287 } else {
288 assert(data->begin);
289 bdrv_drain_all_begin();
292 data->done = true;
293 aio_co_wake(co);
296 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
297 bool begin,
298 BdrvChild *parent,
299 bool poll)
301 BdrvCoDrainData data;
302 Coroutine *self = qemu_coroutine_self();
303 AioContext *ctx = bdrv_get_aio_context(bs);
304 AioContext *co_ctx = qemu_coroutine_get_aio_context(self);
306 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
307 * other coroutines run if they were queued by aio_co_enter(). */
309 assert(qemu_in_coroutine());
310 data = (BdrvCoDrainData) {
311 .co = self,
312 .bs = bs,
313 .done = false,
314 .begin = begin,
315 .parent = parent,
316 .poll = poll,
319 if (bs) {
320 bdrv_inc_in_flight(bs);
324 * Temporarily drop the lock across yield or we would get deadlocks.
325 * bdrv_co_drain_bh_cb() reaquires the lock as needed.
327 * When we yield below, the lock for the current context will be
328 * released, so if this is actually the lock that protects bs, don't drop
329 * it a second time.
331 if (ctx != co_ctx) {
332 aio_context_release(ctx);
334 replay_bh_schedule_oneshot_event(ctx, bdrv_co_drain_bh_cb, &data);
336 qemu_coroutine_yield();
337 /* If we are resumed from some other event (such as an aio completion or a
338 * timer callback), it is a bug in the caller that should be fixed. */
339 assert(data.done);
341 /* Reaquire the AioContext of bs if we dropped it */
342 if (ctx != co_ctx) {
343 aio_context_acquire(ctx);
347 static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent,
348 bool poll)
350 IO_OR_GS_CODE();
352 if (qemu_in_coroutine()) {
353 bdrv_co_yield_to_drain(bs, true, parent, poll);
354 return;
357 /* Stop things in parent-to-child order */
358 if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) {
359 aio_disable_external(bdrv_get_aio_context(bs));
360 bdrv_parent_drained_begin(bs, parent);
361 if (bs->drv && bs->drv->bdrv_drain_begin) {
362 bs->drv->bdrv_drain_begin(bs);
367 * Wait for drained requests to finish.
369 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
370 * call is needed so things in this AioContext can make progress even
371 * though we don't return to the main AioContext loop - this automatically
372 * includes other nodes in the same AioContext and therefore all child
373 * nodes.
375 if (poll) {
376 BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, parent));
380 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, BdrvChild *parent)
382 bdrv_do_drained_begin(bs, parent, false);
385 void bdrv_drained_begin(BlockDriverState *bs)
387 IO_OR_GS_CODE();
388 bdrv_do_drained_begin(bs, NULL, true);
392 * This function does not poll, nor must any of its recursively called
393 * functions.
395 static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent)
397 int old_quiesce_counter;
399 if (qemu_in_coroutine()) {
400 bdrv_co_yield_to_drain(bs, false, parent, false);
401 return;
403 assert(bs->quiesce_counter > 0);
405 /* Re-enable things in child-to-parent order */
406 old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter);
407 if (old_quiesce_counter == 1) {
408 if (bs->drv && bs->drv->bdrv_drain_end) {
409 bs->drv->bdrv_drain_end(bs);
411 bdrv_parent_drained_end(bs, parent);
412 aio_enable_external(bdrv_get_aio_context(bs));
416 void bdrv_drained_end(BlockDriverState *bs)
418 IO_OR_GS_CODE();
419 bdrv_do_drained_end(bs, NULL);
422 void bdrv_drain(BlockDriverState *bs)
424 IO_OR_GS_CODE();
425 bdrv_drained_begin(bs);
426 bdrv_drained_end(bs);
429 static void bdrv_drain_assert_idle(BlockDriverState *bs)
431 BdrvChild *child, *next;
433 assert(qatomic_read(&bs->in_flight) == 0);
434 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
435 bdrv_drain_assert_idle(child->bs);
439 unsigned int bdrv_drain_all_count = 0;
441 static bool bdrv_drain_all_poll(void)
443 BlockDriverState *bs = NULL;
444 bool result = false;
445 GLOBAL_STATE_CODE();
447 /* bdrv_drain_poll() can't make changes to the graph and we are holding the
448 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */
449 while ((bs = bdrv_next_all_states(bs))) {
450 AioContext *aio_context = bdrv_get_aio_context(bs);
451 aio_context_acquire(aio_context);
452 result |= bdrv_drain_poll(bs, NULL, true);
453 aio_context_release(aio_context);
456 return result;
460 * Wait for pending requests to complete across all BlockDriverStates
462 * This function does not flush data to disk, use bdrv_flush_all() for that
463 * after calling this function.
465 * This pauses all block jobs and disables external clients. It must
466 * be paired with bdrv_drain_all_end().
468 * NOTE: no new block jobs or BlockDriverStates can be created between
469 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
471 void bdrv_drain_all_begin_nopoll(void)
473 BlockDriverState *bs = NULL;
474 GLOBAL_STATE_CODE();
477 * bdrv queue is managed by record/replay,
478 * waiting for finishing the I/O requests may
479 * be infinite
481 if (replay_events_enabled()) {
482 return;
485 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
486 * loop AioContext, so make sure we're in the main context. */
487 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
488 assert(bdrv_drain_all_count < INT_MAX);
489 bdrv_drain_all_count++;
491 /* Quiesce all nodes, without polling in-flight requests yet. The graph
492 * cannot change during this loop. */
493 while ((bs = bdrv_next_all_states(bs))) {
494 AioContext *aio_context = bdrv_get_aio_context(bs);
496 aio_context_acquire(aio_context);
497 bdrv_do_drained_begin(bs, NULL, false);
498 aio_context_release(aio_context);
502 void bdrv_drain_all_begin(void)
504 BlockDriverState *bs = NULL;
506 if (qemu_in_coroutine()) {
507 bdrv_co_yield_to_drain(NULL, true, NULL, true);
508 return;
512 * bdrv queue is managed by record/replay,
513 * waiting for finishing the I/O requests may
514 * be infinite
516 if (replay_events_enabled()) {
517 return;
520 bdrv_drain_all_begin_nopoll();
522 /* Now poll the in-flight requests */
523 AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll());
525 while ((bs = bdrv_next_all_states(bs))) {
526 bdrv_drain_assert_idle(bs);
530 void bdrv_drain_all_end_quiesce(BlockDriverState *bs)
532 GLOBAL_STATE_CODE();
534 g_assert(bs->quiesce_counter > 0);
535 g_assert(!bs->refcnt);
537 while (bs->quiesce_counter) {
538 bdrv_do_drained_end(bs, NULL);
542 void bdrv_drain_all_end(void)
544 BlockDriverState *bs = NULL;
545 GLOBAL_STATE_CODE();
548 * bdrv queue is managed by record/replay,
549 * waiting for finishing the I/O requests may
550 * be endless
552 if (replay_events_enabled()) {
553 return;
556 while ((bs = bdrv_next_all_states(bs))) {
557 AioContext *aio_context = bdrv_get_aio_context(bs);
559 aio_context_acquire(aio_context);
560 bdrv_do_drained_end(bs, NULL);
561 aio_context_release(aio_context);
564 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
565 assert(bdrv_drain_all_count > 0);
566 bdrv_drain_all_count--;
569 void bdrv_drain_all(void)
571 GLOBAL_STATE_CODE();
572 bdrv_drain_all_begin();
573 bdrv_drain_all_end();
577 * Remove an active request from the tracked requests list
579 * This function should be called when a tracked request is completing.
581 static void coroutine_fn tracked_request_end(BdrvTrackedRequest *req)
583 if (req->serialising) {
584 qatomic_dec(&req->bs->serialising_in_flight);
587 qemu_co_mutex_lock(&req->bs->reqs_lock);
588 QLIST_REMOVE(req, list);
589 qemu_co_queue_restart_all(&req->wait_queue);
590 qemu_co_mutex_unlock(&req->bs->reqs_lock);
594 * Add an active request to the tracked requests list
596 static void coroutine_fn tracked_request_begin(BdrvTrackedRequest *req,
597 BlockDriverState *bs,
598 int64_t offset,
599 int64_t bytes,
600 enum BdrvTrackedRequestType type)
602 bdrv_check_request(offset, bytes, &error_abort);
604 *req = (BdrvTrackedRequest){
605 .bs = bs,
606 .offset = offset,
607 .bytes = bytes,
608 .type = type,
609 .co = qemu_coroutine_self(),
610 .serialising = false,
611 .overlap_offset = offset,
612 .overlap_bytes = bytes,
615 qemu_co_queue_init(&req->wait_queue);
617 qemu_co_mutex_lock(&bs->reqs_lock);
618 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
619 qemu_co_mutex_unlock(&bs->reqs_lock);
622 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
623 int64_t offset, int64_t bytes)
625 bdrv_check_request(offset, bytes, &error_abort);
627 /* aaaa bbbb */
628 if (offset >= req->overlap_offset + req->overlap_bytes) {
629 return false;
631 /* bbbb aaaa */
632 if (req->overlap_offset >= offset + bytes) {
633 return false;
635 return true;
638 /* Called with self->bs->reqs_lock held */
639 static coroutine_fn BdrvTrackedRequest *
640 bdrv_find_conflicting_request(BdrvTrackedRequest *self)
642 BdrvTrackedRequest *req;
644 QLIST_FOREACH(req, &self->bs->tracked_requests, list) {
645 if (req == self || (!req->serialising && !self->serialising)) {
646 continue;
648 if (tracked_request_overlaps(req, self->overlap_offset,
649 self->overlap_bytes))
652 * Hitting this means there was a reentrant request, for
653 * example, a block driver issuing nested requests. This must
654 * never happen since it means deadlock.
656 assert(qemu_coroutine_self() != req->co);
659 * If the request is already (indirectly) waiting for us, or
660 * will wait for us as soon as it wakes up, then just go on
661 * (instead of producing a deadlock in the former case).
663 if (!req->waiting_for) {
664 return req;
669 return NULL;
672 /* Called with self->bs->reqs_lock held */
673 static void coroutine_fn
674 bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self)
676 BdrvTrackedRequest *req;
678 while ((req = bdrv_find_conflicting_request(self))) {
679 self->waiting_for = req;
680 qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock);
681 self->waiting_for = NULL;
685 /* Called with req->bs->reqs_lock held */
686 static void tracked_request_set_serialising(BdrvTrackedRequest *req,
687 uint64_t align)
689 int64_t overlap_offset = req->offset & ~(align - 1);
690 int64_t overlap_bytes =
691 ROUND_UP(req->offset + req->bytes, align) - overlap_offset;
693 bdrv_check_request(req->offset, req->bytes, &error_abort);
695 if (!req->serialising) {
696 qatomic_inc(&req->bs->serialising_in_flight);
697 req->serialising = true;
700 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
701 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
705 * Return the tracked request on @bs for the current coroutine, or
706 * NULL if there is none.
708 BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs)
710 BdrvTrackedRequest *req;
711 Coroutine *self = qemu_coroutine_self();
712 IO_CODE();
714 QLIST_FOREACH(req, &bs->tracked_requests, list) {
715 if (req->co == self) {
716 return req;
720 return NULL;
724 * Round a region to cluster boundaries
726 void coroutine_fn bdrv_round_to_clusters(BlockDriverState *bs,
727 int64_t offset, int64_t bytes,
728 int64_t *cluster_offset,
729 int64_t *cluster_bytes)
731 BlockDriverInfo bdi;
732 IO_CODE();
733 if (bdrv_co_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
734 *cluster_offset = offset;
735 *cluster_bytes = bytes;
736 } else {
737 int64_t c = bdi.cluster_size;
738 *cluster_offset = QEMU_ALIGN_DOWN(offset, c);
739 *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
743 static coroutine_fn int bdrv_get_cluster_size(BlockDriverState *bs)
745 BlockDriverInfo bdi;
746 int ret;
748 ret = bdrv_co_get_info(bs, &bdi);
749 if (ret < 0 || bdi.cluster_size == 0) {
750 return bs->bl.request_alignment;
751 } else {
752 return bdi.cluster_size;
756 void bdrv_inc_in_flight(BlockDriverState *bs)
758 IO_CODE();
759 qatomic_inc(&bs->in_flight);
762 void bdrv_wakeup(BlockDriverState *bs)
764 IO_CODE();
765 aio_wait_kick();
768 void bdrv_dec_in_flight(BlockDriverState *bs)
770 IO_CODE();
771 qatomic_dec(&bs->in_flight);
772 bdrv_wakeup(bs);
775 static void coroutine_fn
776 bdrv_wait_serialising_requests(BdrvTrackedRequest *self)
778 BlockDriverState *bs = self->bs;
780 if (!qatomic_read(&bs->serialising_in_flight)) {
781 return;
784 qemu_co_mutex_lock(&bs->reqs_lock);
785 bdrv_wait_serialising_requests_locked(self);
786 qemu_co_mutex_unlock(&bs->reqs_lock);
789 void coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
790 uint64_t align)
792 IO_CODE();
794 qemu_co_mutex_lock(&req->bs->reqs_lock);
796 tracked_request_set_serialising(req, align);
797 bdrv_wait_serialising_requests_locked(req);
799 qemu_co_mutex_unlock(&req->bs->reqs_lock);
802 int bdrv_check_qiov_request(int64_t offset, int64_t bytes,
803 QEMUIOVector *qiov, size_t qiov_offset,
804 Error **errp)
807 * Check generic offset/bytes correctness
810 if (offset < 0) {
811 error_setg(errp, "offset is negative: %" PRIi64, offset);
812 return -EIO;
815 if (bytes < 0) {
816 error_setg(errp, "bytes is negative: %" PRIi64, bytes);
817 return -EIO;
820 if (bytes > BDRV_MAX_LENGTH) {
821 error_setg(errp, "bytes(%" PRIi64 ") exceeds maximum(%" PRIi64 ")",
822 bytes, BDRV_MAX_LENGTH);
823 return -EIO;
826 if (offset > BDRV_MAX_LENGTH) {
827 error_setg(errp, "offset(%" PRIi64 ") exceeds maximum(%" PRIi64 ")",
828 offset, BDRV_MAX_LENGTH);
829 return -EIO;
832 if (offset > BDRV_MAX_LENGTH - bytes) {
833 error_setg(errp, "sum of offset(%" PRIi64 ") and bytes(%" PRIi64 ") "
834 "exceeds maximum(%" PRIi64 ")", offset, bytes,
835 BDRV_MAX_LENGTH);
836 return -EIO;
839 if (!qiov) {
840 return 0;
844 * Check qiov and qiov_offset
847 if (qiov_offset > qiov->size) {
848 error_setg(errp, "qiov_offset(%zu) overflow io vector size(%zu)",
849 qiov_offset, qiov->size);
850 return -EIO;
853 if (bytes > qiov->size - qiov_offset) {
854 error_setg(errp, "bytes(%" PRIi64 ") + qiov_offset(%zu) overflow io "
855 "vector size(%zu)", bytes, qiov_offset, qiov->size);
856 return -EIO;
859 return 0;
862 int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp)
864 return bdrv_check_qiov_request(offset, bytes, NULL, 0, errp);
867 static int bdrv_check_request32(int64_t offset, int64_t bytes,
868 QEMUIOVector *qiov, size_t qiov_offset)
870 int ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL);
871 if (ret < 0) {
872 return ret;
875 if (bytes > BDRV_REQUEST_MAX_BYTES) {
876 return -EIO;
879 return 0;
883 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
884 * The operation is sped up by checking the block status and only writing
885 * zeroes to the device if they currently do not return zeroes. Optional
886 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
887 * BDRV_REQ_FUA).
889 * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite().
891 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
893 int ret;
894 int64_t target_size, bytes, offset = 0;
895 BlockDriverState *bs = child->bs;
896 IO_CODE();
898 target_size = bdrv_getlength(bs);
899 if (target_size < 0) {
900 return target_size;
903 for (;;) {
904 bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES);
905 if (bytes <= 0) {
906 return 0;
908 ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL);
909 if (ret < 0) {
910 return ret;
912 if (ret & BDRV_BLOCK_ZERO) {
913 offset += bytes;
914 continue;
916 ret = bdrv_pwrite_zeroes(child, offset, bytes, flags);
917 if (ret < 0) {
918 return ret;
920 offset += bytes;
925 * Writes to the file and ensures that no writes are reordered across this
926 * request (acts as a barrier)
928 * Returns 0 on success, -errno in error cases.
930 int coroutine_fn bdrv_co_pwrite_sync(BdrvChild *child, int64_t offset,
931 int64_t bytes, const void *buf,
932 BdrvRequestFlags flags)
934 int ret;
935 IO_CODE();
936 assert_bdrv_graph_readable();
938 ret = bdrv_co_pwrite(child, offset, bytes, buf, flags);
939 if (ret < 0) {
940 return ret;
943 ret = bdrv_co_flush(child->bs);
944 if (ret < 0) {
945 return ret;
948 return 0;
951 typedef struct CoroutineIOCompletion {
952 Coroutine *coroutine;
953 int ret;
954 } CoroutineIOCompletion;
956 static void bdrv_co_io_em_complete(void *opaque, int ret)
958 CoroutineIOCompletion *co = opaque;
960 co->ret = ret;
961 aio_co_wake(co->coroutine);
964 static int coroutine_fn GRAPH_RDLOCK
965 bdrv_driver_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
966 QEMUIOVector *qiov, size_t qiov_offset, int flags)
968 BlockDriver *drv = bs->drv;
969 int64_t sector_num;
970 unsigned int nb_sectors;
971 QEMUIOVector local_qiov;
972 int ret;
973 assert_bdrv_graph_readable();
975 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
976 assert(!(flags & ~bs->supported_read_flags));
978 if (!drv) {
979 return -ENOMEDIUM;
982 if (drv->bdrv_co_preadv_part) {
983 return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset,
984 flags);
987 if (qiov_offset > 0 || bytes != qiov->size) {
988 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
989 qiov = &local_qiov;
992 if (drv->bdrv_co_preadv) {
993 ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
994 goto out;
997 if (drv->bdrv_aio_preadv) {
998 BlockAIOCB *acb;
999 CoroutineIOCompletion co = {
1000 .coroutine = qemu_coroutine_self(),
1003 acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags,
1004 bdrv_co_io_em_complete, &co);
1005 if (acb == NULL) {
1006 ret = -EIO;
1007 goto out;
1008 } else {
1009 qemu_coroutine_yield();
1010 ret = co.ret;
1011 goto out;
1015 sector_num = offset >> BDRV_SECTOR_BITS;
1016 nb_sectors = bytes >> BDRV_SECTOR_BITS;
1018 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
1019 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
1020 assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1021 assert(drv->bdrv_co_readv);
1023 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
1025 out:
1026 if (qiov == &local_qiov) {
1027 qemu_iovec_destroy(&local_qiov);
1030 return ret;
1033 static int coroutine_fn GRAPH_RDLOCK
1034 bdrv_driver_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
1035 QEMUIOVector *qiov, size_t qiov_offset,
1036 BdrvRequestFlags flags)
1038 BlockDriver *drv = bs->drv;
1039 bool emulate_fua = false;
1040 int64_t sector_num;
1041 unsigned int nb_sectors;
1042 QEMUIOVector local_qiov;
1043 int ret;
1044 assert_bdrv_graph_readable();
1046 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1048 if (!drv) {
1049 return -ENOMEDIUM;
1052 if ((flags & BDRV_REQ_FUA) &&
1053 (~bs->supported_write_flags & BDRV_REQ_FUA)) {
1054 flags &= ~BDRV_REQ_FUA;
1055 emulate_fua = true;
1058 flags &= bs->supported_write_flags;
1060 if (drv->bdrv_co_pwritev_part) {
1061 ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset,
1062 flags);
1063 goto emulate_flags;
1066 if (qiov_offset > 0 || bytes != qiov->size) {
1067 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1068 qiov = &local_qiov;
1071 if (drv->bdrv_co_pwritev) {
1072 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, flags);
1073 goto emulate_flags;
1076 if (drv->bdrv_aio_pwritev) {
1077 BlockAIOCB *acb;
1078 CoroutineIOCompletion co = {
1079 .coroutine = qemu_coroutine_self(),
1082 acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, flags,
1083 bdrv_co_io_em_complete, &co);
1084 if (acb == NULL) {
1085 ret = -EIO;
1086 } else {
1087 qemu_coroutine_yield();
1088 ret = co.ret;
1090 goto emulate_flags;
1093 sector_num = offset >> BDRV_SECTOR_BITS;
1094 nb_sectors = bytes >> BDRV_SECTOR_BITS;
1096 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
1097 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
1098 assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1100 assert(drv->bdrv_co_writev);
1101 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, flags);
1103 emulate_flags:
1104 if (ret == 0 && emulate_fua) {
1105 ret = bdrv_co_flush(bs);
1108 if (qiov == &local_qiov) {
1109 qemu_iovec_destroy(&local_qiov);
1112 return ret;
1115 static int coroutine_fn GRAPH_RDLOCK
1116 bdrv_driver_pwritev_compressed(BlockDriverState *bs, int64_t offset,
1117 int64_t bytes, QEMUIOVector *qiov,
1118 size_t qiov_offset)
1120 BlockDriver *drv = bs->drv;
1121 QEMUIOVector local_qiov;
1122 int ret;
1123 assert_bdrv_graph_readable();
1125 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1127 if (!drv) {
1128 return -ENOMEDIUM;
1131 if (!block_driver_can_compress(drv)) {
1132 return -ENOTSUP;
1135 if (drv->bdrv_co_pwritev_compressed_part) {
1136 return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes,
1137 qiov, qiov_offset);
1140 if (qiov_offset == 0) {
1141 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
1144 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1145 ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov);
1146 qemu_iovec_destroy(&local_qiov);
1148 return ret;
1151 static int coroutine_fn GRAPH_RDLOCK
1152 bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
1153 QEMUIOVector *qiov, size_t qiov_offset, int flags)
1155 BlockDriverState *bs = child->bs;
1157 /* Perform I/O through a temporary buffer so that users who scribble over
1158 * their read buffer while the operation is in progress do not end up
1159 * modifying the image file. This is critical for zero-copy guest I/O
1160 * where anything might happen inside guest memory.
1162 void *bounce_buffer = NULL;
1164 BlockDriver *drv = bs->drv;
1165 int64_t cluster_offset;
1166 int64_t cluster_bytes;
1167 int64_t skip_bytes;
1168 int ret;
1169 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1170 BDRV_REQUEST_MAX_BYTES);
1171 int64_t progress = 0;
1172 bool skip_write;
1174 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1176 if (!drv) {
1177 return -ENOMEDIUM;
1181 * Do not write anything when the BDS is inactive. That is not
1182 * allowed, and it would not help.
1184 skip_write = (bs->open_flags & BDRV_O_INACTIVE);
1186 /* FIXME We cannot require callers to have write permissions when all they
1187 * are doing is a read request. If we did things right, write permissions
1188 * would be obtained anyway, but internally by the copy-on-read code. As
1189 * long as it is implemented here rather than in a separate filter driver,
1190 * the copy-on-read code doesn't have its own BdrvChild, however, for which
1191 * it could request permissions. Therefore we have to bypass the permission
1192 * system for the moment. */
1193 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1195 /* Cover entire cluster so no additional backing file I/O is required when
1196 * allocating cluster in the image file. Note that this value may exceed
1197 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1198 * is one reason we loop rather than doing it all at once.
1200 bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
1201 skip_bytes = offset - cluster_offset;
1203 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
1204 cluster_offset, cluster_bytes);
1206 while (cluster_bytes) {
1207 int64_t pnum;
1209 if (skip_write) {
1210 ret = 1; /* "already allocated", so nothing will be copied */
1211 pnum = MIN(cluster_bytes, max_transfer);
1212 } else {
1213 ret = bdrv_is_allocated(bs, cluster_offset,
1214 MIN(cluster_bytes, max_transfer), &pnum);
1215 if (ret < 0) {
1217 * Safe to treat errors in querying allocation as if
1218 * unallocated; we'll probably fail again soon on the
1219 * read, but at least that will set a decent errno.
1221 pnum = MIN(cluster_bytes, max_transfer);
1224 /* Stop at EOF if the image ends in the middle of the cluster */
1225 if (ret == 0 && pnum == 0) {
1226 assert(progress >= bytes);
1227 break;
1230 assert(skip_bytes < pnum);
1233 if (ret <= 0) {
1234 QEMUIOVector local_qiov;
1236 /* Must copy-on-read; use the bounce buffer */
1237 pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
1238 if (!bounce_buffer) {
1239 int64_t max_we_need = MAX(pnum, cluster_bytes - pnum);
1240 int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER);
1241 int64_t bounce_buffer_len = MIN(max_we_need, max_allowed);
1243 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len);
1244 if (!bounce_buffer) {
1245 ret = -ENOMEM;
1246 goto err;
1249 qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum);
1251 ret = bdrv_driver_preadv(bs, cluster_offset, pnum,
1252 &local_qiov, 0, 0);
1253 if (ret < 0) {
1254 goto err;
1257 bdrv_co_debug_event(bs, BLKDBG_COR_WRITE);
1258 if (drv->bdrv_co_pwrite_zeroes &&
1259 buffer_is_zero(bounce_buffer, pnum)) {
1260 /* FIXME: Should we (perhaps conditionally) be setting
1261 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1262 * that still correctly reads as zero? */
1263 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum,
1264 BDRV_REQ_WRITE_UNCHANGED);
1265 } else {
1266 /* This does not change the data on the disk, it is not
1267 * necessary to flush even in cache=writethrough mode.
1269 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum,
1270 &local_qiov, 0,
1271 BDRV_REQ_WRITE_UNCHANGED);
1274 if (ret < 0) {
1275 /* It might be okay to ignore write errors for guest
1276 * requests. If this is a deliberate copy-on-read
1277 * then we don't want to ignore the error. Simply
1278 * report it in all cases.
1280 goto err;
1283 if (!(flags & BDRV_REQ_PREFETCH)) {
1284 qemu_iovec_from_buf(qiov, qiov_offset + progress,
1285 bounce_buffer + skip_bytes,
1286 MIN(pnum - skip_bytes, bytes - progress));
1288 } else if (!(flags & BDRV_REQ_PREFETCH)) {
1289 /* Read directly into the destination */
1290 ret = bdrv_driver_preadv(bs, offset + progress,
1291 MIN(pnum - skip_bytes, bytes - progress),
1292 qiov, qiov_offset + progress, 0);
1293 if (ret < 0) {
1294 goto err;
1298 cluster_offset += pnum;
1299 cluster_bytes -= pnum;
1300 progress += pnum - skip_bytes;
1301 skip_bytes = 0;
1303 ret = 0;
1305 err:
1306 qemu_vfree(bounce_buffer);
1307 return ret;
1311 * Forwards an already correctly aligned request to the BlockDriver. This
1312 * handles copy on read, zeroing after EOF, and fragmentation of large
1313 * reads; any other features must be implemented by the caller.
1315 static int coroutine_fn GRAPH_RDLOCK
1316 bdrv_aligned_preadv(BdrvChild *child, BdrvTrackedRequest *req,
1317 int64_t offset, int64_t bytes, int64_t align,
1318 QEMUIOVector *qiov, size_t qiov_offset, int flags)
1320 BlockDriverState *bs = child->bs;
1321 int64_t total_bytes, max_bytes;
1322 int ret = 0;
1323 int64_t bytes_remaining = bytes;
1324 int max_transfer;
1326 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1327 assert(is_power_of_2(align));
1328 assert((offset & (align - 1)) == 0);
1329 assert((bytes & (align - 1)) == 0);
1330 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1331 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1332 align);
1335 * TODO: We would need a per-BDS .supported_read_flags and
1336 * potential fallback support, if we ever implement any read flags
1337 * to pass through to drivers. For now, there aren't any
1338 * passthrough flags except the BDRV_REQ_REGISTERED_BUF optimization hint.
1340 assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH |
1341 BDRV_REQ_REGISTERED_BUF)));
1343 /* Handle Copy on Read and associated serialisation */
1344 if (flags & BDRV_REQ_COPY_ON_READ) {
1345 /* If we touch the same cluster it counts as an overlap. This
1346 * guarantees that allocating writes will be serialized and not race
1347 * with each other for the same cluster. For example, in copy-on-read
1348 * it ensures that the CoR read and write operations are atomic and
1349 * guest writes cannot interleave between them. */
1350 bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs));
1351 } else {
1352 bdrv_wait_serialising_requests(req);
1355 if (flags & BDRV_REQ_COPY_ON_READ) {
1356 int64_t pnum;
1358 /* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */
1359 flags &= ~BDRV_REQ_COPY_ON_READ;
1361 ret = bdrv_is_allocated(bs, offset, bytes, &pnum);
1362 if (ret < 0) {
1363 goto out;
1366 if (!ret || pnum != bytes) {
1367 ret = bdrv_co_do_copy_on_readv(child, offset, bytes,
1368 qiov, qiov_offset, flags);
1369 goto out;
1370 } else if (flags & BDRV_REQ_PREFETCH) {
1371 goto out;
1375 /* Forward the request to the BlockDriver, possibly fragmenting it */
1376 total_bytes = bdrv_getlength(bs);
1377 if (total_bytes < 0) {
1378 ret = total_bytes;
1379 goto out;
1382 assert(!(flags & ~(bs->supported_read_flags | BDRV_REQ_REGISTERED_BUF)));
1384 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
1385 if (bytes <= max_bytes && bytes <= max_transfer) {
1386 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, flags);
1387 goto out;
1390 while (bytes_remaining) {
1391 int64_t num;
1393 if (max_bytes) {
1394 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
1395 assert(num);
1397 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
1398 num, qiov,
1399 qiov_offset + bytes - bytes_remaining,
1400 flags);
1401 max_bytes -= num;
1402 } else {
1403 num = bytes_remaining;
1404 ret = qemu_iovec_memset(qiov, qiov_offset + bytes - bytes_remaining,
1405 0, bytes_remaining);
1407 if (ret < 0) {
1408 goto out;
1410 bytes_remaining -= num;
1413 out:
1414 return ret < 0 ? ret : 0;
1418 * Request padding
1420 * |<---- align ----->| |<----- align ---->|
1421 * |<- head ->|<------------- bytes ------------->|<-- tail -->|
1422 * | | | | | |
1423 * -*----------$-------*-------- ... --------*-----$------------*---
1424 * | | | | | |
1425 * | offset | | end |
1426 * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end)
1427 * [buf ... ) [tail_buf )
1429 * @buf is an aligned allocation needed to store @head and @tail paddings. @head
1430 * is placed at the beginning of @buf and @tail at the @end.
1432 * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk
1433 * around tail, if tail exists.
1435 * @merge_reads is true for small requests,
1436 * if @buf_len == @head + bytes + @tail. In this case it is possible that both
1437 * head and tail exist but @buf_len == align and @tail_buf == @buf.
1439 typedef struct BdrvRequestPadding {
1440 uint8_t *buf;
1441 size_t buf_len;
1442 uint8_t *tail_buf;
1443 size_t head;
1444 size_t tail;
1445 bool merge_reads;
1446 QEMUIOVector local_qiov;
1447 } BdrvRequestPadding;
1449 static bool bdrv_init_padding(BlockDriverState *bs,
1450 int64_t offset, int64_t bytes,
1451 BdrvRequestPadding *pad)
1453 int64_t align = bs->bl.request_alignment;
1454 int64_t sum;
1456 bdrv_check_request(offset, bytes, &error_abort);
1457 assert(align <= INT_MAX); /* documented in block/block_int.h */
1458 assert(align <= SIZE_MAX / 2); /* so we can allocate the buffer */
1460 memset(pad, 0, sizeof(*pad));
1462 pad->head = offset & (align - 1);
1463 pad->tail = ((offset + bytes) & (align - 1));
1464 if (pad->tail) {
1465 pad->tail = align - pad->tail;
1468 if (!pad->head && !pad->tail) {
1469 return false;
1472 assert(bytes); /* Nothing good in aligning zero-length requests */
1474 sum = pad->head + bytes + pad->tail;
1475 pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align;
1476 pad->buf = qemu_blockalign(bs, pad->buf_len);
1477 pad->merge_reads = sum == pad->buf_len;
1478 if (pad->tail) {
1479 pad->tail_buf = pad->buf + pad->buf_len - align;
1482 return true;
1485 static int coroutine_fn GRAPH_RDLOCK
1486 bdrv_padding_rmw_read(BdrvChild *child, BdrvTrackedRequest *req,
1487 BdrvRequestPadding *pad, bool zero_middle)
1489 QEMUIOVector local_qiov;
1490 BlockDriverState *bs = child->bs;
1491 uint64_t align = bs->bl.request_alignment;
1492 int ret;
1494 assert(req->serialising && pad->buf);
1496 if (pad->head || pad->merge_reads) {
1497 int64_t bytes = pad->merge_reads ? pad->buf_len : align;
1499 qemu_iovec_init_buf(&local_qiov, pad->buf, bytes);
1501 if (pad->head) {
1502 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1504 if (pad->merge_reads && pad->tail) {
1505 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1507 ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes,
1508 align, &local_qiov, 0, 0);
1509 if (ret < 0) {
1510 return ret;
1512 if (pad->head) {
1513 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1515 if (pad->merge_reads && pad->tail) {
1516 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1519 if (pad->merge_reads) {
1520 goto zero_mem;
1524 if (pad->tail) {
1525 qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align);
1527 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1528 ret = bdrv_aligned_preadv(
1529 child, req,
1530 req->overlap_offset + req->overlap_bytes - align,
1531 align, align, &local_qiov, 0, 0);
1532 if (ret < 0) {
1533 return ret;
1535 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1538 zero_mem:
1539 if (zero_middle) {
1540 memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail);
1543 return 0;
1546 static void bdrv_padding_destroy(BdrvRequestPadding *pad)
1548 if (pad->buf) {
1549 qemu_vfree(pad->buf);
1550 qemu_iovec_destroy(&pad->local_qiov);
1552 memset(pad, 0, sizeof(*pad));
1556 * bdrv_pad_request
1558 * Exchange request parameters with padded request if needed. Don't include RMW
1559 * read of padding, bdrv_padding_rmw_read() should be called separately if
1560 * needed.
1562 * Request parameters (@qiov, &qiov_offset, &offset, &bytes) are in-out:
1563 * - on function start they represent original request
1564 * - on failure or when padding is not needed they are unchanged
1565 * - on success when padding is needed they represent padded request
1567 static int bdrv_pad_request(BlockDriverState *bs,
1568 QEMUIOVector **qiov, size_t *qiov_offset,
1569 int64_t *offset, int64_t *bytes,
1570 BdrvRequestPadding *pad, bool *padded,
1571 BdrvRequestFlags *flags)
1573 int ret;
1575 bdrv_check_qiov_request(*offset, *bytes, *qiov, *qiov_offset, &error_abort);
1577 if (!bdrv_init_padding(bs, *offset, *bytes, pad)) {
1578 if (padded) {
1579 *padded = false;
1581 return 0;
1584 ret = qemu_iovec_init_extended(&pad->local_qiov, pad->buf, pad->head,
1585 *qiov, *qiov_offset, *bytes,
1586 pad->buf + pad->buf_len - pad->tail,
1587 pad->tail);
1588 if (ret < 0) {
1589 bdrv_padding_destroy(pad);
1590 return ret;
1592 *bytes += pad->head + pad->tail;
1593 *offset -= pad->head;
1594 *qiov = &pad->local_qiov;
1595 *qiov_offset = 0;
1596 if (padded) {
1597 *padded = true;
1599 if (flags) {
1600 /* Can't use optimization hint with bounce buffer */
1601 *flags &= ~BDRV_REQ_REGISTERED_BUF;
1604 return 0;
1607 int coroutine_fn bdrv_co_preadv(BdrvChild *child,
1608 int64_t offset, int64_t bytes, QEMUIOVector *qiov,
1609 BdrvRequestFlags flags)
1611 IO_CODE();
1612 return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags);
1615 int coroutine_fn bdrv_co_preadv_part(BdrvChild *child,
1616 int64_t offset, int64_t bytes,
1617 QEMUIOVector *qiov, size_t qiov_offset,
1618 BdrvRequestFlags flags)
1620 BlockDriverState *bs = child->bs;
1621 BdrvTrackedRequest req;
1622 BdrvRequestPadding pad;
1623 int ret;
1624 IO_CODE();
1626 trace_bdrv_co_preadv_part(bs, offset, bytes, flags);
1628 if (!bdrv_co_is_inserted(bs)) {
1629 return -ENOMEDIUM;
1632 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
1633 if (ret < 0) {
1634 return ret;
1637 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
1639 * Aligning zero request is nonsense. Even if driver has special meaning
1640 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
1641 * it to driver due to request_alignment.
1643 * Still, no reason to return an error if someone do unaligned
1644 * zero-length read occasionally.
1646 return 0;
1649 bdrv_inc_in_flight(bs);
1651 /* Don't do copy-on-read if we read data before write operation */
1652 if (qatomic_read(&bs->copy_on_read)) {
1653 flags |= BDRV_REQ_COPY_ON_READ;
1656 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad,
1657 NULL, &flags);
1658 if (ret < 0) {
1659 goto fail;
1662 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
1663 ret = bdrv_aligned_preadv(child, &req, offset, bytes,
1664 bs->bl.request_alignment,
1665 qiov, qiov_offset, flags);
1666 tracked_request_end(&req);
1667 bdrv_padding_destroy(&pad);
1669 fail:
1670 bdrv_dec_in_flight(bs);
1672 return ret;
1675 static int coroutine_fn GRAPH_RDLOCK
1676 bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
1677 BdrvRequestFlags flags)
1679 BlockDriver *drv = bs->drv;
1680 QEMUIOVector qiov;
1681 void *buf = NULL;
1682 int ret = 0;
1683 bool need_flush = false;
1684 int head = 0;
1685 int tail = 0;
1687 int64_t max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes,
1688 INT64_MAX);
1689 int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1690 bs->bl.request_alignment);
1691 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER);
1693 assert_bdrv_graph_readable();
1694 bdrv_check_request(offset, bytes, &error_abort);
1696 if (!drv) {
1697 return -ENOMEDIUM;
1700 if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) {
1701 return -ENOTSUP;
1704 /* By definition there is no user buffer so this flag doesn't make sense */
1705 if (flags & BDRV_REQ_REGISTERED_BUF) {
1706 return -EINVAL;
1709 /* Invalidate the cached block-status data range if this write overlaps */
1710 bdrv_bsc_invalidate_range(bs, offset, bytes);
1712 assert(alignment % bs->bl.request_alignment == 0);
1713 head = offset % alignment;
1714 tail = (offset + bytes) % alignment;
1715 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1716 assert(max_write_zeroes >= bs->bl.request_alignment);
1718 while (bytes > 0 && !ret) {
1719 int64_t num = bytes;
1721 /* Align request. Block drivers can expect the "bulk" of the request
1722 * to be aligned, and that unaligned requests do not cross cluster
1723 * boundaries.
1725 if (head) {
1726 /* Make a small request up to the first aligned sector. For
1727 * convenience, limit this request to max_transfer even if
1728 * we don't need to fall back to writes. */
1729 num = MIN(MIN(bytes, max_transfer), alignment - head);
1730 head = (head + num) % alignment;
1731 assert(num < max_write_zeroes);
1732 } else if (tail && num > alignment) {
1733 /* Shorten the request to the last aligned sector. */
1734 num -= tail;
1737 /* limit request size */
1738 if (num > max_write_zeroes) {
1739 num = max_write_zeroes;
1742 ret = -ENOTSUP;
1743 /* First try the efficient write zeroes operation */
1744 if (drv->bdrv_co_pwrite_zeroes) {
1745 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1746 flags & bs->supported_zero_flags);
1747 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1748 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1749 need_flush = true;
1751 } else {
1752 assert(!bs->supported_zero_flags);
1755 if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) {
1756 /* Fall back to bounce buffer if write zeroes is unsupported */
1757 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1759 if ((flags & BDRV_REQ_FUA) &&
1760 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1761 /* No need for bdrv_driver_pwrite() to do a fallback
1762 * flush on each chunk; use just one at the end */
1763 write_flags &= ~BDRV_REQ_FUA;
1764 need_flush = true;
1766 num = MIN(num, max_transfer);
1767 if (buf == NULL) {
1768 buf = qemu_try_blockalign0(bs, num);
1769 if (buf == NULL) {
1770 ret = -ENOMEM;
1771 goto fail;
1774 qemu_iovec_init_buf(&qiov, buf, num);
1776 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags);
1778 /* Keep bounce buffer around if it is big enough for all
1779 * all future requests.
1781 if (num < max_transfer) {
1782 qemu_vfree(buf);
1783 buf = NULL;
1787 offset += num;
1788 bytes -= num;
1791 fail:
1792 if (ret == 0 && need_flush) {
1793 ret = bdrv_co_flush(bs);
1795 qemu_vfree(buf);
1796 return ret;
1799 static inline int coroutine_fn
1800 bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, int64_t bytes,
1801 BdrvTrackedRequest *req, int flags)
1803 BlockDriverState *bs = child->bs;
1805 bdrv_check_request(offset, bytes, &error_abort);
1807 if (bdrv_is_read_only(bs)) {
1808 return -EPERM;
1811 assert(!(bs->open_flags & BDRV_O_INACTIVE));
1812 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1813 assert(!(flags & ~BDRV_REQ_MASK));
1814 assert(!((flags & BDRV_REQ_NO_WAIT) && !(flags & BDRV_REQ_SERIALISING)));
1816 if (flags & BDRV_REQ_SERIALISING) {
1817 QEMU_LOCK_GUARD(&bs->reqs_lock);
1819 tracked_request_set_serialising(req, bdrv_get_cluster_size(bs));
1821 if ((flags & BDRV_REQ_NO_WAIT) && bdrv_find_conflicting_request(req)) {
1822 return -EBUSY;
1825 bdrv_wait_serialising_requests_locked(req);
1826 } else {
1827 bdrv_wait_serialising_requests(req);
1830 assert(req->overlap_offset <= offset);
1831 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1832 assert(offset + bytes <= bs->total_sectors * BDRV_SECTOR_SIZE ||
1833 child->perm & BLK_PERM_RESIZE);
1835 switch (req->type) {
1836 case BDRV_TRACKED_WRITE:
1837 case BDRV_TRACKED_DISCARD:
1838 if (flags & BDRV_REQ_WRITE_UNCHANGED) {
1839 assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1840 } else {
1841 assert(child->perm & BLK_PERM_WRITE);
1843 bdrv_write_threshold_check_write(bs, offset, bytes);
1844 return 0;
1845 case BDRV_TRACKED_TRUNCATE:
1846 assert(child->perm & BLK_PERM_RESIZE);
1847 return 0;
1848 default:
1849 abort();
1853 static inline void coroutine_fn
1854 bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes,
1855 BdrvTrackedRequest *req, int ret)
1857 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
1858 BlockDriverState *bs = child->bs;
1860 bdrv_check_request(offset, bytes, &error_abort);
1862 qatomic_inc(&bs->write_gen);
1865 * Discard cannot extend the image, but in error handling cases, such as
1866 * when reverting a qcow2 cluster allocation, the discarded range can pass
1867 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
1868 * here. Instead, just skip it, since semantically a discard request
1869 * beyond EOF cannot expand the image anyway.
1871 if (ret == 0 &&
1872 (req->type == BDRV_TRACKED_TRUNCATE ||
1873 end_sector > bs->total_sectors) &&
1874 req->type != BDRV_TRACKED_DISCARD) {
1875 bs->total_sectors = end_sector;
1876 bdrv_parent_cb_resize(bs);
1877 bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS);
1879 if (req->bytes) {
1880 switch (req->type) {
1881 case BDRV_TRACKED_WRITE:
1882 stat64_max(&bs->wr_highest_offset, offset + bytes);
1883 /* fall through, to set dirty bits */
1884 case BDRV_TRACKED_DISCARD:
1885 bdrv_set_dirty(bs, offset, bytes);
1886 break;
1887 default:
1888 break;
1894 * Forwards an already correctly aligned write request to the BlockDriver,
1895 * after possibly fragmenting it.
1897 static int coroutine_fn GRAPH_RDLOCK
1898 bdrv_aligned_pwritev(BdrvChild *child, BdrvTrackedRequest *req,
1899 int64_t offset, int64_t bytes, int64_t align,
1900 QEMUIOVector *qiov, size_t qiov_offset,
1901 BdrvRequestFlags flags)
1903 BlockDriverState *bs = child->bs;
1904 BlockDriver *drv = bs->drv;
1905 int ret;
1907 int64_t bytes_remaining = bytes;
1908 int max_transfer;
1910 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1912 if (!drv) {
1913 return -ENOMEDIUM;
1916 if (bdrv_has_readonly_bitmaps(bs)) {
1917 return -EPERM;
1920 assert(is_power_of_2(align));
1921 assert((offset & (align - 1)) == 0);
1922 assert((bytes & (align - 1)) == 0);
1923 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1924 align);
1926 ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags);
1928 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
1929 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
1930 qemu_iovec_is_zero(qiov, qiov_offset, bytes)) {
1931 flags |= BDRV_REQ_ZERO_WRITE;
1932 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
1933 flags |= BDRV_REQ_MAY_UNMAP;
1936 /* Can't use optimization hint with bufferless zero write */
1937 flags &= ~BDRV_REQ_REGISTERED_BUF;
1940 if (ret < 0) {
1941 /* Do nothing, write notifier decided to fail this request */
1942 } else if (flags & BDRV_REQ_ZERO_WRITE) {
1943 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_ZERO);
1944 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
1945 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
1946 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes,
1947 qiov, qiov_offset);
1948 } else if (bytes <= max_transfer) {
1949 bdrv_co_debug_event(bs, BLKDBG_PWRITEV);
1950 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags);
1951 } else {
1952 bdrv_co_debug_event(bs, BLKDBG_PWRITEV);
1953 while (bytes_remaining) {
1954 int num = MIN(bytes_remaining, max_transfer);
1955 int local_flags = flags;
1957 assert(num);
1958 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
1959 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1960 /* If FUA is going to be emulated by flush, we only
1961 * need to flush on the last iteration */
1962 local_flags &= ~BDRV_REQ_FUA;
1965 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
1966 num, qiov,
1967 qiov_offset + bytes - bytes_remaining,
1968 local_flags);
1969 if (ret < 0) {
1970 break;
1972 bytes_remaining -= num;
1975 bdrv_co_debug_event(bs, BLKDBG_PWRITEV_DONE);
1977 if (ret >= 0) {
1978 ret = 0;
1980 bdrv_co_write_req_finish(child, offset, bytes, req, ret);
1982 return ret;
1985 static int coroutine_fn GRAPH_RDLOCK
1986 bdrv_co_do_zero_pwritev(BdrvChild *child, int64_t offset, int64_t bytes,
1987 BdrvRequestFlags flags, BdrvTrackedRequest *req)
1989 BlockDriverState *bs = child->bs;
1990 QEMUIOVector local_qiov;
1991 uint64_t align = bs->bl.request_alignment;
1992 int ret = 0;
1993 bool padding;
1994 BdrvRequestPadding pad;
1996 /* This flag doesn't make sense for padding or zero writes */
1997 flags &= ~BDRV_REQ_REGISTERED_BUF;
1999 padding = bdrv_init_padding(bs, offset, bytes, &pad);
2000 if (padding) {
2001 assert(!(flags & BDRV_REQ_NO_WAIT));
2002 bdrv_make_request_serialising(req, align);
2004 bdrv_padding_rmw_read(child, req, &pad, true);
2006 if (pad.head || pad.merge_reads) {
2007 int64_t aligned_offset = offset & ~(align - 1);
2008 int64_t write_bytes = pad.merge_reads ? pad.buf_len : align;
2010 qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes);
2011 ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes,
2012 align, &local_qiov, 0,
2013 flags & ~BDRV_REQ_ZERO_WRITE);
2014 if (ret < 0 || pad.merge_reads) {
2015 /* Error or all work is done */
2016 goto out;
2018 offset += write_bytes - pad.head;
2019 bytes -= write_bytes - pad.head;
2023 assert(!bytes || (offset & (align - 1)) == 0);
2024 if (bytes >= align) {
2025 /* Write the aligned part in the middle. */
2026 int64_t aligned_bytes = bytes & ~(align - 1);
2027 ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align,
2028 NULL, 0, flags);
2029 if (ret < 0) {
2030 goto out;
2032 bytes -= aligned_bytes;
2033 offset += aligned_bytes;
2036 assert(!bytes || (offset & (align - 1)) == 0);
2037 if (bytes) {
2038 assert(align == pad.tail + bytes);
2040 qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align);
2041 ret = bdrv_aligned_pwritev(child, req, offset, align, align,
2042 &local_qiov, 0,
2043 flags & ~BDRV_REQ_ZERO_WRITE);
2046 out:
2047 bdrv_padding_destroy(&pad);
2049 return ret;
2053 * Handle a write request in coroutine context
2055 int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
2056 int64_t offset, int64_t bytes, QEMUIOVector *qiov,
2057 BdrvRequestFlags flags)
2059 IO_CODE();
2060 return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags);
2063 int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
2064 int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
2065 BdrvRequestFlags flags)
2067 BlockDriverState *bs = child->bs;
2068 BdrvTrackedRequest req;
2069 uint64_t align = bs->bl.request_alignment;
2070 BdrvRequestPadding pad;
2071 int ret;
2072 bool padded = false;
2073 IO_CODE();
2075 trace_bdrv_co_pwritev_part(child->bs, offset, bytes, flags);
2077 if (!bdrv_co_is_inserted(bs)) {
2078 return -ENOMEDIUM;
2081 if (flags & BDRV_REQ_ZERO_WRITE) {
2082 ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL);
2083 } else {
2084 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
2086 if (ret < 0) {
2087 return ret;
2090 /* If the request is misaligned then we can't make it efficient */
2091 if ((flags & BDRV_REQ_NO_FALLBACK) &&
2092 !QEMU_IS_ALIGNED(offset | bytes, align))
2094 return -ENOTSUP;
2097 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
2099 * Aligning zero request is nonsense. Even if driver has special meaning
2100 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
2101 * it to driver due to request_alignment.
2103 * Still, no reason to return an error if someone do unaligned
2104 * zero-length write occasionally.
2106 return 0;
2109 if (!(flags & BDRV_REQ_ZERO_WRITE)) {
2111 * Pad request for following read-modify-write cycle.
2112 * bdrv_co_do_zero_pwritev() does aligning by itself, so, we do
2113 * alignment only if there is no ZERO flag.
2115 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad,
2116 &padded, &flags);
2117 if (ret < 0) {
2118 return ret;
2122 bdrv_inc_in_flight(bs);
2123 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
2125 if (flags & BDRV_REQ_ZERO_WRITE) {
2126 assert(!padded);
2127 ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req);
2128 goto out;
2131 if (padded) {
2133 * Request was unaligned to request_alignment and therefore
2134 * padded. We are going to do read-modify-write, and must
2135 * serialize the request to prevent interactions of the
2136 * widened region with other transactions.
2138 assert(!(flags & BDRV_REQ_NO_WAIT));
2139 bdrv_make_request_serialising(&req, align);
2140 bdrv_padding_rmw_read(child, &req, &pad, false);
2143 ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align,
2144 qiov, qiov_offset, flags);
2146 bdrv_padding_destroy(&pad);
2148 out:
2149 tracked_request_end(&req);
2150 bdrv_dec_in_flight(bs);
2152 return ret;
2155 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
2156 int64_t bytes, BdrvRequestFlags flags)
2158 IO_CODE();
2159 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
2160 assert_bdrv_graph_readable();
2162 if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
2163 flags &= ~BDRV_REQ_MAY_UNMAP;
2166 return bdrv_co_pwritev(child, offset, bytes, NULL,
2167 BDRV_REQ_ZERO_WRITE | flags);
2171 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
2173 int bdrv_flush_all(void)
2175 BdrvNextIterator it;
2176 BlockDriverState *bs = NULL;
2177 int result = 0;
2179 GLOBAL_STATE_CODE();
2182 * bdrv queue is managed by record/replay,
2183 * creating new flush request for stopping
2184 * the VM may break the determinism
2186 if (replay_events_enabled()) {
2187 return result;
2190 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
2191 AioContext *aio_context = bdrv_get_aio_context(bs);
2192 int ret;
2194 aio_context_acquire(aio_context);
2195 ret = bdrv_flush(bs);
2196 if (ret < 0 && !result) {
2197 result = ret;
2199 aio_context_release(aio_context);
2202 return result;
2206 * Returns the allocation status of the specified sectors.
2207 * Drivers not implementing the functionality are assumed to not support
2208 * backing files, hence all their sectors are reported as allocated.
2210 * If 'want_zero' is true, the caller is querying for mapping
2211 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
2212 * _ZERO where possible; otherwise, the result favors larger 'pnum',
2213 * with a focus on accurate BDRV_BLOCK_ALLOCATED.
2215 * If 'offset' is beyond the end of the disk image the return value is
2216 * BDRV_BLOCK_EOF and 'pnum' is set to 0.
2218 * 'bytes' is the max value 'pnum' should be set to. If bytes goes
2219 * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2220 * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
2222 * 'pnum' is set to the number of bytes (including and immediately
2223 * following the specified offset) that are easily known to be in the
2224 * same allocated/unallocated state. Note that a second call starting
2225 * at the original offset plus returned pnum may have the same status.
2226 * The returned value is non-zero on success except at end-of-file.
2228 * Returns negative errno on failure. Otherwise, if the
2229 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
2230 * set to the host mapping and BDS corresponding to the guest offset.
2232 static int coroutine_fn GRAPH_RDLOCK
2233 bdrv_co_block_status(BlockDriverState *bs, bool want_zero,
2234 int64_t offset, int64_t bytes,
2235 int64_t *pnum, int64_t *map, BlockDriverState **file)
2237 int64_t total_size;
2238 int64_t n; /* bytes */
2239 int ret;
2240 int64_t local_map = 0;
2241 BlockDriverState *local_file = NULL;
2242 int64_t aligned_offset, aligned_bytes;
2243 uint32_t align;
2244 bool has_filtered_child;
2246 assert(pnum);
2247 assert_bdrv_graph_readable();
2248 *pnum = 0;
2249 total_size = bdrv_getlength(bs);
2250 if (total_size < 0) {
2251 ret = total_size;
2252 goto early_out;
2255 if (offset >= total_size) {
2256 ret = BDRV_BLOCK_EOF;
2257 goto early_out;
2259 if (!bytes) {
2260 ret = 0;
2261 goto early_out;
2264 n = total_size - offset;
2265 if (n < bytes) {
2266 bytes = n;
2269 /* Must be non-NULL or bdrv_getlength() would have failed */
2270 assert(bs->drv);
2271 has_filtered_child = bdrv_filter_child(bs);
2272 if (!bs->drv->bdrv_co_block_status && !has_filtered_child) {
2273 *pnum = bytes;
2274 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
2275 if (offset + bytes == total_size) {
2276 ret |= BDRV_BLOCK_EOF;
2278 if (bs->drv->protocol_name) {
2279 ret |= BDRV_BLOCK_OFFSET_VALID;
2280 local_map = offset;
2281 local_file = bs;
2283 goto early_out;
2286 bdrv_inc_in_flight(bs);
2288 /* Round out to request_alignment boundaries */
2289 align = bs->bl.request_alignment;
2290 aligned_offset = QEMU_ALIGN_DOWN(offset, align);
2291 aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset;
2293 if (bs->drv->bdrv_co_block_status) {
2295 * Use the block-status cache only for protocol nodes: Format
2296 * drivers are generally quick to inquire the status, but protocol
2297 * drivers often need to get information from outside of qemu, so
2298 * we do not have control over the actual implementation. There
2299 * have been cases where inquiring the status took an unreasonably
2300 * long time, and we can do nothing in qemu to fix it.
2301 * This is especially problematic for images with large data areas,
2302 * because finding the few holes in them and giving them special
2303 * treatment does not gain much performance. Therefore, we try to
2304 * cache the last-identified data region.
2306 * Second, limiting ourselves to protocol nodes allows us to assume
2307 * the block status for data regions to be DATA | OFFSET_VALID, and
2308 * that the host offset is the same as the guest offset.
2310 * Note that it is possible that external writers zero parts of
2311 * the cached regions without the cache being invalidated, and so
2312 * we may report zeroes as data. This is not catastrophic,
2313 * however, because reporting zeroes as data is fine.
2315 if (QLIST_EMPTY(&bs->children) &&
2316 bdrv_bsc_is_data(bs, aligned_offset, pnum))
2318 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
2319 local_file = bs;
2320 local_map = aligned_offset;
2321 } else {
2322 ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
2323 aligned_bytes, pnum, &local_map,
2324 &local_file);
2327 * Note that checking QLIST_EMPTY(&bs->children) is also done when
2328 * the cache is queried above. Technically, we do not need to check
2329 * it here; the worst that can happen is that we fill the cache for
2330 * non-protocol nodes, and then it is never used. However, filling
2331 * the cache requires an RCU update, so double check here to avoid
2332 * such an update if possible.
2334 * Check want_zero, because we only want to update the cache when we
2335 * have accurate information about what is zero and what is data.
2337 if (want_zero &&
2338 ret == (BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID) &&
2339 QLIST_EMPTY(&bs->children))
2342 * When a protocol driver reports BLOCK_OFFSET_VALID, the
2343 * returned local_map value must be the same as the offset we
2344 * have passed (aligned_offset), and local_bs must be the node
2345 * itself.
2346 * Assert this, because we follow this rule when reading from
2347 * the cache (see the `local_file = bs` and
2348 * `local_map = aligned_offset` assignments above), and the
2349 * result the cache delivers must be the same as the driver
2350 * would deliver.
2352 assert(local_file == bs);
2353 assert(local_map == aligned_offset);
2354 bdrv_bsc_fill(bs, aligned_offset, *pnum);
2357 } else {
2358 /* Default code for filters */
2360 local_file = bdrv_filter_bs(bs);
2361 assert(local_file);
2363 *pnum = aligned_bytes;
2364 local_map = aligned_offset;
2365 ret = BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
2367 if (ret < 0) {
2368 *pnum = 0;
2369 goto out;
2373 * The driver's result must be a non-zero multiple of request_alignment.
2374 * Clamp pnum and adjust map to original request.
2376 assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) &&
2377 align > offset - aligned_offset);
2378 if (ret & BDRV_BLOCK_RECURSE) {
2379 assert(ret & BDRV_BLOCK_DATA);
2380 assert(ret & BDRV_BLOCK_OFFSET_VALID);
2381 assert(!(ret & BDRV_BLOCK_ZERO));
2384 *pnum -= offset - aligned_offset;
2385 if (*pnum > bytes) {
2386 *pnum = bytes;
2388 if (ret & BDRV_BLOCK_OFFSET_VALID) {
2389 local_map += offset - aligned_offset;
2392 if (ret & BDRV_BLOCK_RAW) {
2393 assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
2394 ret = bdrv_co_block_status(local_file, want_zero, local_map,
2395 *pnum, pnum, &local_map, &local_file);
2396 goto out;
2399 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
2400 ret |= BDRV_BLOCK_ALLOCATED;
2401 } else if (bs->drv->supports_backing) {
2402 BlockDriverState *cow_bs = bdrv_cow_bs(bs);
2404 if (!cow_bs) {
2405 ret |= BDRV_BLOCK_ZERO;
2406 } else if (want_zero) {
2407 int64_t size2 = bdrv_getlength(cow_bs);
2409 if (size2 >= 0 && offset >= size2) {
2410 ret |= BDRV_BLOCK_ZERO;
2415 if (want_zero && ret & BDRV_BLOCK_RECURSE &&
2416 local_file && local_file != bs &&
2417 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
2418 (ret & BDRV_BLOCK_OFFSET_VALID)) {
2419 int64_t file_pnum;
2420 int ret2;
2422 ret2 = bdrv_co_block_status(local_file, want_zero, local_map,
2423 *pnum, &file_pnum, NULL, NULL);
2424 if (ret2 >= 0) {
2425 /* Ignore errors. This is just providing extra information, it
2426 * is useful but not necessary.
2428 if (ret2 & BDRV_BLOCK_EOF &&
2429 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
2431 * It is valid for the format block driver to read
2432 * beyond the end of the underlying file's current
2433 * size; such areas read as zero.
2435 ret |= BDRV_BLOCK_ZERO;
2436 } else {
2437 /* Limit request to the range reported by the protocol driver */
2438 *pnum = file_pnum;
2439 ret |= (ret2 & BDRV_BLOCK_ZERO);
2444 out:
2445 bdrv_dec_in_flight(bs);
2446 if (ret >= 0 && offset + *pnum == total_size) {
2447 ret |= BDRV_BLOCK_EOF;
2449 early_out:
2450 if (file) {
2451 *file = local_file;
2453 if (map) {
2454 *map = local_map;
2456 return ret;
2459 int coroutine_fn
2460 bdrv_co_common_block_status_above(BlockDriverState *bs,
2461 BlockDriverState *base,
2462 bool include_base,
2463 bool want_zero,
2464 int64_t offset,
2465 int64_t bytes,
2466 int64_t *pnum,
2467 int64_t *map,
2468 BlockDriverState **file,
2469 int *depth)
2471 int ret;
2472 BlockDriverState *p;
2473 int64_t eof = 0;
2474 int dummy;
2475 IO_CODE();
2477 assert(!include_base || base); /* Can't include NULL base */
2478 assert_bdrv_graph_readable();
2480 if (!depth) {
2481 depth = &dummy;
2483 *depth = 0;
2485 if (!include_base && bs == base) {
2486 *pnum = bytes;
2487 return 0;
2490 ret = bdrv_co_block_status(bs, want_zero, offset, bytes, pnum, map, file);
2491 ++*depth;
2492 if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) {
2493 return ret;
2496 if (ret & BDRV_BLOCK_EOF) {
2497 eof = offset + *pnum;
2500 assert(*pnum <= bytes);
2501 bytes = *pnum;
2503 for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base;
2504 p = bdrv_filter_or_cow_bs(p))
2506 ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map,
2507 file);
2508 ++*depth;
2509 if (ret < 0) {
2510 return ret;
2512 if (*pnum == 0) {
2514 * The top layer deferred to this layer, and because this layer is
2515 * short, any zeroes that we synthesize beyond EOF behave as if they
2516 * were allocated at this layer.
2518 * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be
2519 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2520 * below.
2522 assert(ret & BDRV_BLOCK_EOF);
2523 *pnum = bytes;
2524 if (file) {
2525 *file = p;
2527 ret = BDRV_BLOCK_ZERO | BDRV_BLOCK_ALLOCATED;
2528 break;
2530 if (ret & BDRV_BLOCK_ALLOCATED) {
2532 * We've found the node and the status, we must break.
2534 * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be
2535 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2536 * below.
2538 ret &= ~BDRV_BLOCK_EOF;
2539 break;
2542 if (p == base) {
2543 assert(include_base);
2544 break;
2548 * OK, [offset, offset + *pnum) region is unallocated on this layer,
2549 * let's continue the diving.
2551 assert(*pnum <= bytes);
2552 bytes = *pnum;
2555 if (offset + *pnum == eof) {
2556 ret |= BDRV_BLOCK_EOF;
2559 return ret;
2562 int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs,
2563 BlockDriverState *base,
2564 int64_t offset, int64_t bytes,
2565 int64_t *pnum, int64_t *map,
2566 BlockDriverState **file)
2568 IO_CODE();
2569 return bdrv_co_common_block_status_above(bs, base, false, true, offset,
2570 bytes, pnum, map, file, NULL);
2573 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
2574 int64_t offset, int64_t bytes, int64_t *pnum,
2575 int64_t *map, BlockDriverState **file)
2577 IO_CODE();
2578 return bdrv_common_block_status_above(bs, base, false, true, offset, bytes,
2579 pnum, map, file, NULL);
2582 int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
2583 int64_t *pnum, int64_t *map, BlockDriverState **file)
2585 IO_CODE();
2586 return bdrv_block_status_above(bs, bdrv_filter_or_cow_bs(bs),
2587 offset, bytes, pnum, map, file);
2591 * Check @bs (and its backing chain) to see if the range defined
2592 * by @offset and @bytes is known to read as zeroes.
2593 * Return 1 if that is the case, 0 otherwise and -errno on error.
2594 * This test is meant to be fast rather than accurate so returning 0
2595 * does not guarantee non-zero data.
2597 int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset,
2598 int64_t bytes)
2600 int ret;
2601 int64_t pnum = bytes;
2602 IO_CODE();
2604 if (!bytes) {
2605 return 1;
2608 ret = bdrv_co_common_block_status_above(bs, NULL, false, false, offset,
2609 bytes, &pnum, NULL, NULL, NULL);
2611 if (ret < 0) {
2612 return ret;
2615 return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO);
2618 int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset,
2619 int64_t bytes, int64_t *pnum)
2621 int ret;
2622 int64_t dummy;
2623 IO_CODE();
2625 ret = bdrv_co_common_block_status_above(bs, bs, true, false, offset,
2626 bytes, pnum ? pnum : &dummy, NULL,
2627 NULL, NULL);
2628 if (ret < 0) {
2629 return ret;
2631 return !!(ret & BDRV_BLOCK_ALLOCATED);
2634 int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes,
2635 int64_t *pnum)
2637 int ret;
2638 int64_t dummy;
2639 IO_CODE();
2641 ret = bdrv_common_block_status_above(bs, bs, true, false, offset,
2642 bytes, pnum ? pnum : &dummy, NULL,
2643 NULL, NULL);
2644 if (ret < 0) {
2645 return ret;
2647 return !!(ret & BDRV_BLOCK_ALLOCATED);
2650 /* See bdrv_is_allocated_above for documentation */
2651 int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *top,
2652 BlockDriverState *base,
2653 bool include_base, int64_t offset,
2654 int64_t bytes, int64_t *pnum)
2656 int depth;
2657 int ret;
2658 IO_CODE();
2660 ret = bdrv_co_common_block_status_above(top, base, include_base, false,
2661 offset, bytes, pnum, NULL, NULL,
2662 &depth);
2663 if (ret < 0) {
2664 return ret;
2667 if (ret & BDRV_BLOCK_ALLOCATED) {
2668 return depth;
2670 return 0;
2674 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2676 * Return a positive depth if (a prefix of) the given range is allocated
2677 * in any image between BASE and TOP (BASE is only included if include_base
2678 * is set). Depth 1 is TOP, 2 is the first backing layer, and so forth.
2679 * BASE can be NULL to check if the given offset is allocated in any
2680 * image of the chain. Return 0 otherwise, or negative errno on
2681 * failure.
2683 * 'pnum' is set to the number of bytes (including and immediately
2684 * following the specified offset) that are known to be in the same
2685 * allocated/unallocated state. Note that a subsequent call starting
2686 * at 'offset + *pnum' may return the same allocation status (in other
2687 * words, the result is not necessarily the maximum possible range);
2688 * but 'pnum' will only be 0 when end of file is reached.
2690 int bdrv_is_allocated_above(BlockDriverState *top,
2691 BlockDriverState *base,
2692 bool include_base, int64_t offset,
2693 int64_t bytes, int64_t *pnum)
2695 int depth;
2696 int ret;
2697 IO_CODE();
2699 ret = bdrv_common_block_status_above(top, base, include_base, false,
2700 offset, bytes, pnum, NULL, NULL,
2701 &depth);
2702 if (ret < 0) {
2703 return ret;
2706 if (ret & BDRV_BLOCK_ALLOCATED) {
2707 return depth;
2709 return 0;
2712 int coroutine_fn
2713 bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2715 BlockDriver *drv = bs->drv;
2716 BlockDriverState *child_bs = bdrv_primary_bs(bs);
2717 int ret;
2718 IO_CODE();
2719 assert_bdrv_graph_readable();
2721 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
2722 if (ret < 0) {
2723 return ret;
2726 if (!drv) {
2727 return -ENOMEDIUM;
2730 bdrv_inc_in_flight(bs);
2732 if (drv->bdrv_co_load_vmstate) {
2733 ret = drv->bdrv_co_load_vmstate(bs, qiov, pos);
2734 } else if (child_bs) {
2735 ret = bdrv_co_readv_vmstate(child_bs, qiov, pos);
2736 } else {
2737 ret = -ENOTSUP;
2740 bdrv_dec_in_flight(bs);
2742 return ret;
2745 int coroutine_fn
2746 bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2748 BlockDriver *drv = bs->drv;
2749 BlockDriverState *child_bs = bdrv_primary_bs(bs);
2750 int ret;
2751 IO_CODE();
2752 assert_bdrv_graph_readable();
2754 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
2755 if (ret < 0) {
2756 return ret;
2759 if (!drv) {
2760 return -ENOMEDIUM;
2763 bdrv_inc_in_flight(bs);
2765 if (drv->bdrv_co_save_vmstate) {
2766 ret = drv->bdrv_co_save_vmstate(bs, qiov, pos);
2767 } else if (child_bs) {
2768 ret = bdrv_co_writev_vmstate(child_bs, qiov, pos);
2769 } else {
2770 ret = -ENOTSUP;
2773 bdrv_dec_in_flight(bs);
2775 return ret;
2778 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
2779 int64_t pos, int size)
2781 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2782 int ret = bdrv_writev_vmstate(bs, &qiov, pos);
2783 IO_CODE();
2785 return ret < 0 ? ret : size;
2788 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2789 int64_t pos, int size)
2791 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2792 int ret = bdrv_readv_vmstate(bs, &qiov, pos);
2793 IO_CODE();
2795 return ret < 0 ? ret : size;
2798 /**************************************************************/
2799 /* async I/Os */
2801 void bdrv_aio_cancel(BlockAIOCB *acb)
2803 IO_CODE();
2804 qemu_aio_ref(acb);
2805 bdrv_aio_cancel_async(acb);
2806 while (acb->refcnt > 1) {
2807 if (acb->aiocb_info->get_aio_context) {
2808 aio_poll(acb->aiocb_info->get_aio_context(acb), true);
2809 } else if (acb->bs) {
2810 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
2811 * assert that we're not using an I/O thread. Thread-safe
2812 * code should use bdrv_aio_cancel_async exclusively.
2814 assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context());
2815 aio_poll(bdrv_get_aio_context(acb->bs), true);
2816 } else {
2817 abort();
2820 qemu_aio_unref(acb);
2823 /* Async version of aio cancel. The caller is not blocked if the acb implements
2824 * cancel_async, otherwise we do nothing and let the request normally complete.
2825 * In either case the completion callback must be called. */
2826 void bdrv_aio_cancel_async(BlockAIOCB *acb)
2828 IO_CODE();
2829 if (acb->aiocb_info->cancel_async) {
2830 acb->aiocb_info->cancel_async(acb);
2834 /**************************************************************/
2835 /* Coroutine block device emulation */
2837 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2839 BdrvChild *primary_child = bdrv_primary_child(bs);
2840 BdrvChild *child;
2841 int current_gen;
2842 int ret = 0;
2843 IO_CODE();
2845 assert_bdrv_graph_readable();
2846 bdrv_inc_in_flight(bs);
2848 if (!bdrv_co_is_inserted(bs) || bdrv_is_read_only(bs) ||
2849 bdrv_is_sg(bs)) {
2850 goto early_exit;
2853 qemu_co_mutex_lock(&bs->reqs_lock);
2854 current_gen = qatomic_read(&bs->write_gen);
2856 /* Wait until any previous flushes are completed */
2857 while (bs->active_flush_req) {
2858 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
2861 /* Flushes reach this point in nondecreasing current_gen order. */
2862 bs->active_flush_req = true;
2863 qemu_co_mutex_unlock(&bs->reqs_lock);
2865 /* Write back all layers by calling one driver function */
2866 if (bs->drv->bdrv_co_flush) {
2867 ret = bs->drv->bdrv_co_flush(bs);
2868 goto out;
2871 /* Write back cached data to the OS even with cache=unsafe */
2872 BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_OS);
2873 if (bs->drv->bdrv_co_flush_to_os) {
2874 ret = bs->drv->bdrv_co_flush_to_os(bs);
2875 if (ret < 0) {
2876 goto out;
2880 /* But don't actually force it to the disk with cache=unsafe */
2881 if (bs->open_flags & BDRV_O_NO_FLUSH) {
2882 goto flush_children;
2885 /* Check if we really need to flush anything */
2886 if (bs->flushed_gen == current_gen) {
2887 goto flush_children;
2890 BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK);
2891 if (!bs->drv) {
2892 /* bs->drv->bdrv_co_flush() might have ejected the BDS
2893 * (even in case of apparent success) */
2894 ret = -ENOMEDIUM;
2895 goto out;
2897 if (bs->drv->bdrv_co_flush_to_disk) {
2898 ret = bs->drv->bdrv_co_flush_to_disk(bs);
2899 } else if (bs->drv->bdrv_aio_flush) {
2900 BlockAIOCB *acb;
2901 CoroutineIOCompletion co = {
2902 .coroutine = qemu_coroutine_self(),
2905 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
2906 if (acb == NULL) {
2907 ret = -EIO;
2908 } else {
2909 qemu_coroutine_yield();
2910 ret = co.ret;
2912 } else {
2914 * Some block drivers always operate in either writethrough or unsafe
2915 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2916 * know how the server works (because the behaviour is hardcoded or
2917 * depends on server-side configuration), so we can't ensure that
2918 * everything is safe on disk. Returning an error doesn't work because
2919 * that would break guests even if the server operates in writethrough
2920 * mode.
2922 * Let's hope the user knows what he's doing.
2924 ret = 0;
2927 if (ret < 0) {
2928 goto out;
2931 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2932 * in the case of cache=unsafe, so there are no useless flushes.
2934 flush_children:
2935 ret = 0;
2936 QLIST_FOREACH(child, &bs->children, next) {
2937 if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) {
2938 int this_child_ret = bdrv_co_flush(child->bs);
2939 if (!ret) {
2940 ret = this_child_ret;
2945 out:
2946 /* Notify any pending flushes that we have completed */
2947 if (ret == 0) {
2948 bs->flushed_gen = current_gen;
2951 qemu_co_mutex_lock(&bs->reqs_lock);
2952 bs->active_flush_req = false;
2953 /* Return value is ignored - it's ok if wait queue is empty */
2954 qemu_co_queue_next(&bs->flush_queue);
2955 qemu_co_mutex_unlock(&bs->reqs_lock);
2957 early_exit:
2958 bdrv_dec_in_flight(bs);
2959 return ret;
2962 int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
2963 int64_t bytes)
2965 BdrvTrackedRequest req;
2966 int ret;
2967 int64_t max_pdiscard;
2968 int head, tail, align;
2969 BlockDriverState *bs = child->bs;
2970 IO_CODE();
2971 assert_bdrv_graph_readable();
2973 if (!bs || !bs->drv || !bdrv_co_is_inserted(bs)) {
2974 return -ENOMEDIUM;
2977 if (bdrv_has_readonly_bitmaps(bs)) {
2978 return -EPERM;
2981 ret = bdrv_check_request(offset, bytes, NULL);
2982 if (ret < 0) {
2983 return ret;
2986 /* Do nothing if disabled. */
2987 if (!(bs->open_flags & BDRV_O_UNMAP)) {
2988 return 0;
2991 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
2992 return 0;
2995 /* Invalidate the cached block-status data range if this discard overlaps */
2996 bdrv_bsc_invalidate_range(bs, offset, bytes);
2998 /* Discard is advisory, but some devices track and coalesce
2999 * unaligned requests, so we must pass everything down rather than
3000 * round here. Still, most devices will just silently ignore
3001 * unaligned requests (by returning -ENOTSUP), so we must fragment
3002 * the request accordingly. */
3003 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
3004 assert(align % bs->bl.request_alignment == 0);
3005 head = offset % align;
3006 tail = (offset + bytes) % align;
3008 bdrv_inc_in_flight(bs);
3009 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD);
3011 ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0);
3012 if (ret < 0) {
3013 goto out;
3016 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT64_MAX),
3017 align);
3018 assert(max_pdiscard >= bs->bl.request_alignment);
3020 while (bytes > 0) {
3021 int64_t num = bytes;
3023 if (head) {
3024 /* Make small requests to get to alignment boundaries. */
3025 num = MIN(bytes, align - head);
3026 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
3027 num %= bs->bl.request_alignment;
3029 head = (head + num) % align;
3030 assert(num < max_pdiscard);
3031 } else if (tail) {
3032 if (num > align) {
3033 /* Shorten the request to the last aligned cluster. */
3034 num -= tail;
3035 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
3036 tail > bs->bl.request_alignment) {
3037 tail %= bs->bl.request_alignment;
3038 num -= tail;
3041 /* limit request size */
3042 if (num > max_pdiscard) {
3043 num = max_pdiscard;
3046 if (!bs->drv) {
3047 ret = -ENOMEDIUM;
3048 goto out;
3050 if (bs->drv->bdrv_co_pdiscard) {
3051 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
3052 } else {
3053 BlockAIOCB *acb;
3054 CoroutineIOCompletion co = {
3055 .coroutine = qemu_coroutine_self(),
3058 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
3059 bdrv_co_io_em_complete, &co);
3060 if (acb == NULL) {
3061 ret = -EIO;
3062 goto out;
3063 } else {
3064 qemu_coroutine_yield();
3065 ret = co.ret;
3068 if (ret && ret != -ENOTSUP) {
3069 goto out;
3072 offset += num;
3073 bytes -= num;
3075 ret = 0;
3076 out:
3077 bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret);
3078 tracked_request_end(&req);
3079 bdrv_dec_in_flight(bs);
3080 return ret;
3083 int coroutine_fn bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
3085 BlockDriver *drv = bs->drv;
3086 CoroutineIOCompletion co = {
3087 .coroutine = qemu_coroutine_self(),
3089 BlockAIOCB *acb;
3090 IO_CODE();
3091 assert_bdrv_graph_readable();
3093 bdrv_inc_in_flight(bs);
3094 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
3095 co.ret = -ENOTSUP;
3096 goto out;
3099 if (drv->bdrv_co_ioctl) {
3100 co.ret = drv->bdrv_co_ioctl(bs, req, buf);
3101 } else {
3102 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
3103 if (!acb) {
3104 co.ret = -ENOTSUP;
3105 goto out;
3107 qemu_coroutine_yield();
3109 out:
3110 bdrv_dec_in_flight(bs);
3111 return co.ret;
3114 void *qemu_blockalign(BlockDriverState *bs, size_t size)
3116 IO_CODE();
3117 return qemu_memalign(bdrv_opt_mem_align(bs), size);
3120 void *qemu_blockalign0(BlockDriverState *bs, size_t size)
3122 IO_CODE();
3123 return memset(qemu_blockalign(bs, size), 0, size);
3126 void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
3128 size_t align = bdrv_opt_mem_align(bs);
3129 IO_CODE();
3131 /* Ensure that NULL is never returned on success */
3132 assert(align > 0);
3133 if (size == 0) {
3134 size = align;
3137 return qemu_try_memalign(align, size);
3140 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
3142 void *mem = qemu_try_blockalign(bs, size);
3143 IO_CODE();
3145 if (mem) {
3146 memset(mem, 0, size);
3149 return mem;
3152 void coroutine_fn bdrv_co_io_plug(BlockDriverState *bs)
3154 BdrvChild *child;
3155 IO_CODE();
3156 assert_bdrv_graph_readable();
3158 QLIST_FOREACH(child, &bs->children, next) {
3159 bdrv_co_io_plug(child->bs);
3162 if (qatomic_fetch_inc(&bs->io_plugged) == 0) {
3163 BlockDriver *drv = bs->drv;
3164 if (drv && drv->bdrv_co_io_plug) {
3165 drv->bdrv_co_io_plug(bs);
3170 void coroutine_fn bdrv_co_io_unplug(BlockDriverState *bs)
3172 BdrvChild *child;
3173 IO_CODE();
3174 assert_bdrv_graph_readable();
3176 assert(bs->io_plugged);
3177 if (qatomic_fetch_dec(&bs->io_plugged) == 1) {
3178 BlockDriver *drv = bs->drv;
3179 if (drv && drv->bdrv_co_io_unplug) {
3180 drv->bdrv_co_io_unplug(bs);
3184 QLIST_FOREACH(child, &bs->children, next) {
3185 bdrv_co_io_unplug(child->bs);
3189 /* Helper that undoes bdrv_register_buf() when it fails partway through */
3190 static void bdrv_register_buf_rollback(BlockDriverState *bs,
3191 void *host,
3192 size_t size,
3193 BdrvChild *final_child)
3195 BdrvChild *child;
3197 QLIST_FOREACH(child, &bs->children, next) {
3198 if (child == final_child) {
3199 break;
3202 bdrv_unregister_buf(child->bs, host, size);
3205 if (bs->drv && bs->drv->bdrv_unregister_buf) {
3206 bs->drv->bdrv_unregister_buf(bs, host, size);
3210 bool bdrv_register_buf(BlockDriverState *bs, void *host, size_t size,
3211 Error **errp)
3213 BdrvChild *child;
3215 GLOBAL_STATE_CODE();
3216 if (bs->drv && bs->drv->bdrv_register_buf) {
3217 if (!bs->drv->bdrv_register_buf(bs, host, size, errp)) {
3218 return false;
3221 QLIST_FOREACH(child, &bs->children, next) {
3222 if (!bdrv_register_buf(child->bs, host, size, errp)) {
3223 bdrv_register_buf_rollback(bs, host, size, child);
3224 return false;
3227 return true;
3230 void bdrv_unregister_buf(BlockDriverState *bs, void *host, size_t size)
3232 BdrvChild *child;
3234 GLOBAL_STATE_CODE();
3235 if (bs->drv && bs->drv->bdrv_unregister_buf) {
3236 bs->drv->bdrv_unregister_buf(bs, host, size);
3238 QLIST_FOREACH(child, &bs->children, next) {
3239 bdrv_unregister_buf(child->bs, host, size);
3243 static int coroutine_fn GRAPH_RDLOCK bdrv_co_copy_range_internal(
3244 BdrvChild *src, int64_t src_offset, BdrvChild *dst,
3245 int64_t dst_offset, int64_t bytes,
3246 BdrvRequestFlags read_flags, BdrvRequestFlags write_flags,
3247 bool recurse_src)
3249 BdrvTrackedRequest req;
3250 int ret;
3251 assert_bdrv_graph_readable();
3253 /* TODO We can support BDRV_REQ_NO_FALLBACK here */
3254 assert(!(read_flags & BDRV_REQ_NO_FALLBACK));
3255 assert(!(write_flags & BDRV_REQ_NO_FALLBACK));
3256 assert(!(read_flags & BDRV_REQ_NO_WAIT));
3257 assert(!(write_flags & BDRV_REQ_NO_WAIT));
3259 if (!dst || !dst->bs || !bdrv_co_is_inserted(dst->bs)) {
3260 return -ENOMEDIUM;
3262 ret = bdrv_check_request32(dst_offset, bytes, NULL, 0);
3263 if (ret) {
3264 return ret;
3266 if (write_flags & BDRV_REQ_ZERO_WRITE) {
3267 return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags);
3270 if (!src || !src->bs || !bdrv_co_is_inserted(src->bs)) {
3271 return -ENOMEDIUM;
3273 ret = bdrv_check_request32(src_offset, bytes, NULL, 0);
3274 if (ret) {
3275 return ret;
3278 if (!src->bs->drv->bdrv_co_copy_range_from
3279 || !dst->bs->drv->bdrv_co_copy_range_to
3280 || src->bs->encrypted || dst->bs->encrypted) {
3281 return -ENOTSUP;
3284 if (recurse_src) {
3285 bdrv_inc_in_flight(src->bs);
3286 tracked_request_begin(&req, src->bs, src_offset, bytes,
3287 BDRV_TRACKED_READ);
3289 /* BDRV_REQ_SERIALISING is only for write operation */
3290 assert(!(read_flags & BDRV_REQ_SERIALISING));
3291 bdrv_wait_serialising_requests(&req);
3293 ret = src->bs->drv->bdrv_co_copy_range_from(src->bs,
3294 src, src_offset,
3295 dst, dst_offset,
3296 bytes,
3297 read_flags, write_flags);
3299 tracked_request_end(&req);
3300 bdrv_dec_in_flight(src->bs);
3301 } else {
3302 bdrv_inc_in_flight(dst->bs);
3303 tracked_request_begin(&req, dst->bs, dst_offset, bytes,
3304 BDRV_TRACKED_WRITE);
3305 ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req,
3306 write_flags);
3307 if (!ret) {
3308 ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs,
3309 src, src_offset,
3310 dst, dst_offset,
3311 bytes,
3312 read_flags, write_flags);
3314 bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret);
3315 tracked_request_end(&req);
3316 bdrv_dec_in_flight(dst->bs);
3319 return ret;
3322 /* Copy range from @src to @dst.
3324 * See the comment of bdrv_co_copy_range for the parameter and return value
3325 * semantics. */
3326 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset,
3327 BdrvChild *dst, int64_t dst_offset,
3328 int64_t bytes,
3329 BdrvRequestFlags read_flags,
3330 BdrvRequestFlags write_flags)
3332 IO_CODE();
3333 assert_bdrv_graph_readable();
3334 trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes,
3335 read_flags, write_flags);
3336 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
3337 bytes, read_flags, write_flags, true);
3340 /* Copy range from @src to @dst.
3342 * See the comment of bdrv_co_copy_range for the parameter and return value
3343 * semantics. */
3344 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset,
3345 BdrvChild *dst, int64_t dst_offset,
3346 int64_t bytes,
3347 BdrvRequestFlags read_flags,
3348 BdrvRequestFlags write_flags)
3350 IO_CODE();
3351 assert_bdrv_graph_readable();
3352 trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
3353 read_flags, write_flags);
3354 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
3355 bytes, read_flags, write_flags, false);
3358 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset,
3359 BdrvChild *dst, int64_t dst_offset,
3360 int64_t bytes, BdrvRequestFlags read_flags,
3361 BdrvRequestFlags write_flags)
3363 IO_CODE();
3364 assert_bdrv_graph_readable();
3366 return bdrv_co_copy_range_from(src, src_offset,
3367 dst, dst_offset,
3368 bytes, read_flags, write_flags);
3371 static void bdrv_parent_cb_resize(BlockDriverState *bs)
3373 BdrvChild *c;
3374 QLIST_FOREACH(c, &bs->parents, next_parent) {
3375 if (c->klass->resize) {
3376 c->klass->resize(c);
3382 * Truncate file to 'offset' bytes (needed only for file protocols)
3384 * If 'exact' is true, the file must be resized to exactly the given
3385 * 'offset'. Otherwise, it is sufficient for the node to be at least
3386 * 'offset' bytes in length.
3388 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
3389 PreallocMode prealloc, BdrvRequestFlags flags,
3390 Error **errp)
3392 BlockDriverState *bs = child->bs;
3393 BdrvChild *filtered, *backing;
3394 BlockDriver *drv = bs->drv;
3395 BdrvTrackedRequest req;
3396 int64_t old_size, new_bytes;
3397 int ret;
3398 IO_CODE();
3399 assert_bdrv_graph_readable();
3401 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
3402 if (!drv) {
3403 error_setg(errp, "No medium inserted");
3404 return -ENOMEDIUM;
3406 if (offset < 0) {
3407 error_setg(errp, "Image size cannot be negative");
3408 return -EINVAL;
3411 ret = bdrv_check_request(offset, 0, errp);
3412 if (ret < 0) {
3413 return ret;
3416 old_size = bdrv_getlength(bs);
3417 if (old_size < 0) {
3418 error_setg_errno(errp, -old_size, "Failed to get old image size");
3419 return old_size;
3422 if (bdrv_is_read_only(bs)) {
3423 error_setg(errp, "Image is read-only");
3424 return -EACCES;
3427 if (offset > old_size) {
3428 new_bytes = offset - old_size;
3429 } else {
3430 new_bytes = 0;
3433 bdrv_inc_in_flight(bs);
3434 tracked_request_begin(&req, bs, offset - new_bytes, new_bytes,
3435 BDRV_TRACKED_TRUNCATE);
3437 /* If we are growing the image and potentially using preallocation for the
3438 * new area, we need to make sure that no write requests are made to it
3439 * concurrently or they might be overwritten by preallocation. */
3440 if (new_bytes) {
3441 bdrv_make_request_serialising(&req, 1);
3443 ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req,
3445 if (ret < 0) {
3446 error_setg_errno(errp, -ret,
3447 "Failed to prepare request for truncation");
3448 goto out;
3451 filtered = bdrv_filter_child(bs);
3452 backing = bdrv_cow_child(bs);
3455 * If the image has a backing file that is large enough that it would
3456 * provide data for the new area, we cannot leave it unallocated because
3457 * then the backing file content would become visible. Instead, zero-fill
3458 * the new area.
3460 * Note that if the image has a backing file, but was opened without the
3461 * backing file, taking care of keeping things consistent with that backing
3462 * file is the user's responsibility.
3464 if (new_bytes && backing) {
3465 int64_t backing_len;
3467 backing_len = bdrv_co_getlength(backing->bs);
3468 if (backing_len < 0) {
3469 ret = backing_len;
3470 error_setg_errno(errp, -ret, "Could not get backing file size");
3471 goto out;
3474 if (backing_len > old_size) {
3475 flags |= BDRV_REQ_ZERO_WRITE;
3479 if (drv->bdrv_co_truncate) {
3480 if (flags & ~bs->supported_truncate_flags) {
3481 error_setg(errp, "Block driver does not support requested flags");
3482 ret = -ENOTSUP;
3483 goto out;
3485 ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp);
3486 } else if (filtered) {
3487 ret = bdrv_co_truncate(filtered, offset, exact, prealloc, flags, errp);
3488 } else {
3489 error_setg(errp, "Image format driver does not support resize");
3490 ret = -ENOTSUP;
3491 goto out;
3493 if (ret < 0) {
3494 goto out;
3497 ret = bdrv_co_refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3498 if (ret < 0) {
3499 error_setg_errno(errp, -ret, "Could not refresh total sector count");
3500 } else {
3501 offset = bs->total_sectors * BDRV_SECTOR_SIZE;
3504 * It's possible that truncation succeeded but bdrv_refresh_total_sectors
3505 * failed, but the latter doesn't affect how we should finish the request.
3506 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled.
3508 bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0);
3510 out:
3511 tracked_request_end(&req);
3512 bdrv_dec_in_flight(bs);
3514 return ret;
3517 void bdrv_cancel_in_flight(BlockDriverState *bs)
3519 GLOBAL_STATE_CODE();
3520 if (!bs || !bs->drv) {
3521 return;
3524 if (bs->drv->bdrv_cancel_in_flight) {
3525 bs->drv->bdrv_cancel_in_flight(bs);
3529 int coroutine_fn
3530 bdrv_co_preadv_snapshot(BdrvChild *child, int64_t offset, int64_t bytes,
3531 QEMUIOVector *qiov, size_t qiov_offset)
3533 BlockDriverState *bs = child->bs;
3534 BlockDriver *drv = bs->drv;
3535 int ret;
3536 IO_CODE();
3537 assert_bdrv_graph_readable();
3539 if (!drv) {
3540 return -ENOMEDIUM;
3543 if (!drv->bdrv_co_preadv_snapshot) {
3544 return -ENOTSUP;
3547 bdrv_inc_in_flight(bs);
3548 ret = drv->bdrv_co_preadv_snapshot(bs, offset, bytes, qiov, qiov_offset);
3549 bdrv_dec_in_flight(bs);
3551 return ret;
3554 int coroutine_fn
3555 bdrv_co_snapshot_block_status(BlockDriverState *bs,
3556 bool want_zero, int64_t offset, int64_t bytes,
3557 int64_t *pnum, int64_t *map,
3558 BlockDriverState **file)
3560 BlockDriver *drv = bs->drv;
3561 int ret;
3562 IO_CODE();
3563 assert_bdrv_graph_readable();
3565 if (!drv) {
3566 return -ENOMEDIUM;
3569 if (!drv->bdrv_co_snapshot_block_status) {
3570 return -ENOTSUP;
3573 bdrv_inc_in_flight(bs);
3574 ret = drv->bdrv_co_snapshot_block_status(bs, want_zero, offset, bytes,
3575 pnum, map, file);
3576 bdrv_dec_in_flight(bs);
3578 return ret;
3581 int coroutine_fn
3582 bdrv_co_pdiscard_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes)
3584 BlockDriver *drv = bs->drv;
3585 int ret;
3586 IO_CODE();
3587 assert_bdrv_graph_readable();
3589 if (!drv) {
3590 return -ENOMEDIUM;
3593 if (!drv->bdrv_co_pdiscard_snapshot) {
3594 return -ENOTSUP;
3597 bdrv_inc_in_flight(bs);
3598 ret = drv->bdrv_co_pdiscard_snapshot(bs, offset, bytes);
3599 bdrv_dec_in_flight(bs);
3601 return ret;