Merge remote-tracking branch 'remotes/marcandre/tags/rtd-pull-request' into staging
[qemu/ar7.git] / block / io.c
blob35b6c56efc2348f7111e2bf84b05d7cf33be39c4
1 /*
2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include "trace.h"
27 #include "sysemu/block-backend.h"
28 #include "block/aio-wait.h"
29 #include "block/blockjob.h"
30 #include "block/blockjob_int.h"
31 #include "block/block_int.h"
32 #include "block/coroutines.h"
33 #include "qemu/cutils.h"
34 #include "qapi/error.h"
35 #include "qemu/error-report.h"
36 #include "qemu/main-loop.h"
37 #include "sysemu/replay.h"
39 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
40 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
42 static void bdrv_parent_cb_resize(BlockDriverState *bs);
43 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
44 int64_t offset, int64_t bytes, BdrvRequestFlags flags);
46 static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore,
47 bool ignore_bds_parents)
49 BdrvChild *c, *next;
51 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
52 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
53 continue;
55 bdrv_parent_drained_begin_single(c, false);
59 static void bdrv_parent_drained_end_single_no_poll(BdrvChild *c,
60 int *drained_end_counter)
62 assert(c->parent_quiesce_counter > 0);
63 c->parent_quiesce_counter--;
64 if (c->klass->drained_end) {
65 c->klass->drained_end(c, drained_end_counter);
69 void bdrv_parent_drained_end_single(BdrvChild *c)
71 int drained_end_counter = 0;
72 bdrv_parent_drained_end_single_no_poll(c, &drained_end_counter);
73 BDRV_POLL_WHILE(c->bs, qatomic_read(&drained_end_counter) > 0);
76 static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore,
77 bool ignore_bds_parents,
78 int *drained_end_counter)
80 BdrvChild *c;
82 QLIST_FOREACH(c, &bs->parents, next_parent) {
83 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
84 continue;
86 bdrv_parent_drained_end_single_no_poll(c, drained_end_counter);
90 static bool bdrv_parent_drained_poll_single(BdrvChild *c)
92 if (c->klass->drained_poll) {
93 return c->klass->drained_poll(c);
95 return false;
98 static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
99 bool ignore_bds_parents)
101 BdrvChild *c, *next;
102 bool busy = false;
104 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
105 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
106 continue;
108 busy |= bdrv_parent_drained_poll_single(c);
111 return busy;
114 void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll)
116 c->parent_quiesce_counter++;
117 if (c->klass->drained_begin) {
118 c->klass->drained_begin(c);
120 if (poll) {
121 BDRV_POLL_WHILE(c->bs, bdrv_parent_drained_poll_single(c));
125 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
127 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
128 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
129 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
130 src->opt_mem_alignment);
131 dst->min_mem_alignment = MAX(dst->min_mem_alignment,
132 src->min_mem_alignment);
133 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
136 typedef struct BdrvRefreshLimitsState {
137 BlockDriverState *bs;
138 BlockLimits old_bl;
139 } BdrvRefreshLimitsState;
141 static void bdrv_refresh_limits_abort(void *opaque)
143 BdrvRefreshLimitsState *s = opaque;
145 s->bs->bl = s->old_bl;
148 static TransactionActionDrv bdrv_refresh_limits_drv = {
149 .abort = bdrv_refresh_limits_abort,
150 .clean = g_free,
153 /* @tran is allowed to be NULL, in this case no rollback is possible. */
154 void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp)
156 ERRP_GUARD();
157 BlockDriver *drv = bs->drv;
158 BdrvChild *c;
159 bool have_limits;
161 if (tran) {
162 BdrvRefreshLimitsState *s = g_new(BdrvRefreshLimitsState, 1);
163 *s = (BdrvRefreshLimitsState) {
164 .bs = bs,
165 .old_bl = bs->bl,
167 tran_add(tran, &bdrv_refresh_limits_drv, s);
170 memset(&bs->bl, 0, sizeof(bs->bl));
172 if (!drv) {
173 return;
176 /* Default alignment based on whether driver has byte interface */
177 bs->bl.request_alignment = (drv->bdrv_co_preadv ||
178 drv->bdrv_aio_preadv ||
179 drv->bdrv_co_preadv_part) ? 1 : 512;
181 /* Take some limits from the children as a default */
182 have_limits = false;
183 QLIST_FOREACH(c, &bs->children, next) {
184 if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW))
186 bdrv_refresh_limits(c->bs, tran, errp);
187 if (*errp) {
188 return;
190 bdrv_merge_limits(&bs->bl, &c->bs->bl);
191 have_limits = true;
195 if (!have_limits) {
196 bs->bl.min_mem_alignment = 512;
197 bs->bl.opt_mem_alignment = qemu_real_host_page_size;
199 /* Safe default since most protocols use readv()/writev()/etc */
200 bs->bl.max_iov = IOV_MAX;
203 /* Then let the driver override it */
204 if (drv->bdrv_refresh_limits) {
205 drv->bdrv_refresh_limits(bs, errp);
206 if (*errp) {
207 return;
211 if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) {
212 error_setg(errp, "Driver requires too large request alignment");
217 * The copy-on-read flag is actually a reference count so multiple users may
218 * use the feature without worrying about clobbering its previous state.
219 * Copy-on-read stays enabled until all users have called to disable it.
221 void bdrv_enable_copy_on_read(BlockDriverState *bs)
223 qatomic_inc(&bs->copy_on_read);
226 void bdrv_disable_copy_on_read(BlockDriverState *bs)
228 int old = qatomic_fetch_dec(&bs->copy_on_read);
229 assert(old >= 1);
232 typedef struct {
233 Coroutine *co;
234 BlockDriverState *bs;
235 bool done;
236 bool begin;
237 bool recursive;
238 bool poll;
239 BdrvChild *parent;
240 bool ignore_bds_parents;
241 int *drained_end_counter;
242 } BdrvCoDrainData;
244 static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
246 BdrvCoDrainData *data = opaque;
247 BlockDriverState *bs = data->bs;
249 if (data->begin) {
250 bs->drv->bdrv_co_drain_begin(bs);
251 } else {
252 bs->drv->bdrv_co_drain_end(bs);
255 /* Set data->done and decrement drained_end_counter before bdrv_wakeup() */
256 qatomic_mb_set(&data->done, true);
257 if (!data->begin) {
258 qatomic_dec(data->drained_end_counter);
260 bdrv_dec_in_flight(bs);
262 g_free(data);
265 /* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */
266 static void bdrv_drain_invoke(BlockDriverState *bs, bool begin,
267 int *drained_end_counter)
269 BdrvCoDrainData *data;
271 if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) ||
272 (!begin && !bs->drv->bdrv_co_drain_end)) {
273 return;
276 data = g_new(BdrvCoDrainData, 1);
277 *data = (BdrvCoDrainData) {
278 .bs = bs,
279 .done = false,
280 .begin = begin,
281 .drained_end_counter = drained_end_counter,
284 if (!begin) {
285 qatomic_inc(drained_end_counter);
288 /* Make sure the driver callback completes during the polling phase for
289 * drain_begin. */
290 bdrv_inc_in_flight(bs);
291 data->co = qemu_coroutine_create(bdrv_drain_invoke_entry, data);
292 aio_co_schedule(bdrv_get_aio_context(bs), data->co);
295 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
296 bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
297 BdrvChild *ignore_parent, bool ignore_bds_parents)
299 BdrvChild *child, *next;
301 if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
302 return true;
305 if (qatomic_read(&bs->in_flight)) {
306 return true;
309 if (recursive) {
310 assert(!ignore_bds_parents);
311 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
312 if (bdrv_drain_poll(child->bs, recursive, child, false)) {
313 return true;
318 return false;
321 static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive,
322 BdrvChild *ignore_parent)
324 return bdrv_drain_poll(bs, recursive, ignore_parent, false);
327 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
328 BdrvChild *parent, bool ignore_bds_parents,
329 bool poll);
330 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
331 BdrvChild *parent, bool ignore_bds_parents,
332 int *drained_end_counter);
334 static void bdrv_co_drain_bh_cb(void *opaque)
336 BdrvCoDrainData *data = opaque;
337 Coroutine *co = data->co;
338 BlockDriverState *bs = data->bs;
340 if (bs) {
341 AioContext *ctx = bdrv_get_aio_context(bs);
342 aio_context_acquire(ctx);
343 bdrv_dec_in_flight(bs);
344 if (data->begin) {
345 assert(!data->drained_end_counter);
346 bdrv_do_drained_begin(bs, data->recursive, data->parent,
347 data->ignore_bds_parents, data->poll);
348 } else {
349 assert(!data->poll);
350 bdrv_do_drained_end(bs, data->recursive, data->parent,
351 data->ignore_bds_parents,
352 data->drained_end_counter);
354 aio_context_release(ctx);
355 } else {
356 assert(data->begin);
357 bdrv_drain_all_begin();
360 data->done = true;
361 aio_co_wake(co);
364 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
365 bool begin, bool recursive,
366 BdrvChild *parent,
367 bool ignore_bds_parents,
368 bool poll,
369 int *drained_end_counter)
371 BdrvCoDrainData data;
372 Coroutine *self = qemu_coroutine_self();
373 AioContext *ctx = bdrv_get_aio_context(bs);
374 AioContext *co_ctx = qemu_coroutine_get_aio_context(self);
376 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
377 * other coroutines run if they were queued by aio_co_enter(). */
379 assert(qemu_in_coroutine());
380 data = (BdrvCoDrainData) {
381 .co = self,
382 .bs = bs,
383 .done = false,
384 .begin = begin,
385 .recursive = recursive,
386 .parent = parent,
387 .ignore_bds_parents = ignore_bds_parents,
388 .poll = poll,
389 .drained_end_counter = drained_end_counter,
392 if (bs) {
393 bdrv_inc_in_flight(bs);
397 * Temporarily drop the lock across yield or we would get deadlocks.
398 * bdrv_co_drain_bh_cb() reaquires the lock as needed.
400 * When we yield below, the lock for the current context will be
401 * released, so if this is actually the lock that protects bs, don't drop
402 * it a second time.
404 if (ctx != co_ctx) {
405 aio_context_release(ctx);
407 replay_bh_schedule_oneshot_event(ctx, bdrv_co_drain_bh_cb, &data);
409 qemu_coroutine_yield();
410 /* If we are resumed from some other event (such as an aio completion or a
411 * timer callback), it is a bug in the caller that should be fixed. */
412 assert(data.done);
414 /* Reaquire the AioContext of bs if we dropped it */
415 if (ctx != co_ctx) {
416 aio_context_acquire(ctx);
420 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
421 BdrvChild *parent, bool ignore_bds_parents)
423 assert(!qemu_in_coroutine());
425 /* Stop things in parent-to-child order */
426 if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) {
427 aio_disable_external(bdrv_get_aio_context(bs));
430 bdrv_parent_drained_begin(bs, parent, ignore_bds_parents);
431 bdrv_drain_invoke(bs, true, NULL);
434 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
435 BdrvChild *parent, bool ignore_bds_parents,
436 bool poll)
438 BdrvChild *child, *next;
440 if (qemu_in_coroutine()) {
441 bdrv_co_yield_to_drain(bs, true, recursive, parent, ignore_bds_parents,
442 poll, NULL);
443 return;
446 bdrv_do_drained_begin_quiesce(bs, parent, ignore_bds_parents);
448 if (recursive) {
449 assert(!ignore_bds_parents);
450 bs->recursive_quiesce_counter++;
451 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
452 bdrv_do_drained_begin(child->bs, true, child, ignore_bds_parents,
453 false);
458 * Wait for drained requests to finish.
460 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
461 * call is needed so things in this AioContext can make progress even
462 * though we don't return to the main AioContext loop - this automatically
463 * includes other nodes in the same AioContext and therefore all child
464 * nodes.
466 if (poll) {
467 assert(!ignore_bds_parents);
468 BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, recursive, parent));
472 void bdrv_drained_begin(BlockDriverState *bs)
474 bdrv_do_drained_begin(bs, false, NULL, false, true);
477 void bdrv_subtree_drained_begin(BlockDriverState *bs)
479 bdrv_do_drained_begin(bs, true, NULL, false, true);
483 * This function does not poll, nor must any of its recursively called
484 * functions. The *drained_end_counter pointee will be incremented
485 * once for every background operation scheduled, and decremented once
486 * the operation settles. Therefore, the pointer must remain valid
487 * until the pointee reaches 0. That implies that whoever sets up the
488 * pointee has to poll until it is 0.
490 * We use atomic operations to access *drained_end_counter, because
491 * (1) when called from bdrv_set_aio_context_ignore(), the subgraph of
492 * @bs may contain nodes in different AioContexts,
493 * (2) bdrv_drain_all_end() uses the same counter for all nodes,
494 * regardless of which AioContext they are in.
496 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
497 BdrvChild *parent, bool ignore_bds_parents,
498 int *drained_end_counter)
500 BdrvChild *child;
501 int old_quiesce_counter;
503 assert(drained_end_counter != NULL);
505 if (qemu_in_coroutine()) {
506 bdrv_co_yield_to_drain(bs, false, recursive, parent, ignore_bds_parents,
507 false, drained_end_counter);
508 return;
510 assert(bs->quiesce_counter > 0);
512 /* Re-enable things in child-to-parent order */
513 bdrv_drain_invoke(bs, false, drained_end_counter);
514 bdrv_parent_drained_end(bs, parent, ignore_bds_parents,
515 drained_end_counter);
517 old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter);
518 if (old_quiesce_counter == 1) {
519 aio_enable_external(bdrv_get_aio_context(bs));
522 if (recursive) {
523 assert(!ignore_bds_parents);
524 bs->recursive_quiesce_counter--;
525 QLIST_FOREACH(child, &bs->children, next) {
526 bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents,
527 drained_end_counter);
532 void bdrv_drained_end(BlockDriverState *bs)
534 int drained_end_counter = 0;
535 bdrv_do_drained_end(bs, false, NULL, false, &drained_end_counter);
536 BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
539 void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter)
541 bdrv_do_drained_end(bs, false, NULL, false, drained_end_counter);
544 void bdrv_subtree_drained_end(BlockDriverState *bs)
546 int drained_end_counter = 0;
547 bdrv_do_drained_end(bs, true, NULL, false, &drained_end_counter);
548 BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
551 void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent)
553 int i;
555 for (i = 0; i < new_parent->recursive_quiesce_counter; i++) {
556 bdrv_do_drained_begin(child->bs, true, child, false, true);
560 void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent)
562 int drained_end_counter = 0;
563 int i;
565 for (i = 0; i < old_parent->recursive_quiesce_counter; i++) {
566 bdrv_do_drained_end(child->bs, true, child, false,
567 &drained_end_counter);
570 BDRV_POLL_WHILE(child->bs, qatomic_read(&drained_end_counter) > 0);
574 * Wait for pending requests to complete on a single BlockDriverState subtree,
575 * and suspend block driver's internal I/O until next request arrives.
577 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
578 * AioContext.
580 void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
582 assert(qemu_in_coroutine());
583 bdrv_drained_begin(bs);
584 bdrv_drained_end(bs);
587 void bdrv_drain(BlockDriverState *bs)
589 bdrv_drained_begin(bs);
590 bdrv_drained_end(bs);
593 static void bdrv_drain_assert_idle(BlockDriverState *bs)
595 BdrvChild *child, *next;
597 assert(qatomic_read(&bs->in_flight) == 0);
598 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
599 bdrv_drain_assert_idle(child->bs);
603 unsigned int bdrv_drain_all_count = 0;
605 static bool bdrv_drain_all_poll(void)
607 BlockDriverState *bs = NULL;
608 bool result = false;
610 /* bdrv_drain_poll() can't make changes to the graph and we are holding the
611 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */
612 while ((bs = bdrv_next_all_states(bs))) {
613 AioContext *aio_context = bdrv_get_aio_context(bs);
614 aio_context_acquire(aio_context);
615 result |= bdrv_drain_poll(bs, false, NULL, true);
616 aio_context_release(aio_context);
619 return result;
623 * Wait for pending requests to complete across all BlockDriverStates
625 * This function does not flush data to disk, use bdrv_flush_all() for that
626 * after calling this function.
628 * This pauses all block jobs and disables external clients. It must
629 * be paired with bdrv_drain_all_end().
631 * NOTE: no new block jobs or BlockDriverStates can be created between
632 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
634 void bdrv_drain_all_begin(void)
636 BlockDriverState *bs = NULL;
638 if (qemu_in_coroutine()) {
639 bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true, NULL);
640 return;
644 * bdrv queue is managed by record/replay,
645 * waiting for finishing the I/O requests may
646 * be infinite
648 if (replay_events_enabled()) {
649 return;
652 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
653 * loop AioContext, so make sure we're in the main context. */
654 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
655 assert(bdrv_drain_all_count < INT_MAX);
656 bdrv_drain_all_count++;
658 /* Quiesce all nodes, without polling in-flight requests yet. The graph
659 * cannot change during this loop. */
660 while ((bs = bdrv_next_all_states(bs))) {
661 AioContext *aio_context = bdrv_get_aio_context(bs);
663 aio_context_acquire(aio_context);
664 bdrv_do_drained_begin(bs, false, NULL, true, false);
665 aio_context_release(aio_context);
668 /* Now poll the in-flight requests */
669 AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll());
671 while ((bs = bdrv_next_all_states(bs))) {
672 bdrv_drain_assert_idle(bs);
676 void bdrv_drain_all_end_quiesce(BlockDriverState *bs)
678 int drained_end_counter = 0;
680 g_assert(bs->quiesce_counter > 0);
681 g_assert(!bs->refcnt);
683 while (bs->quiesce_counter) {
684 bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter);
686 BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
689 void bdrv_drain_all_end(void)
691 BlockDriverState *bs = NULL;
692 int drained_end_counter = 0;
695 * bdrv queue is managed by record/replay,
696 * waiting for finishing the I/O requests may
697 * be endless
699 if (replay_events_enabled()) {
700 return;
703 while ((bs = bdrv_next_all_states(bs))) {
704 AioContext *aio_context = bdrv_get_aio_context(bs);
706 aio_context_acquire(aio_context);
707 bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter);
708 aio_context_release(aio_context);
711 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
712 AIO_WAIT_WHILE(NULL, qatomic_read(&drained_end_counter) > 0);
714 assert(bdrv_drain_all_count > 0);
715 bdrv_drain_all_count--;
718 void bdrv_drain_all(void)
720 bdrv_drain_all_begin();
721 bdrv_drain_all_end();
725 * Remove an active request from the tracked requests list
727 * This function should be called when a tracked request is completing.
729 static void tracked_request_end(BdrvTrackedRequest *req)
731 if (req->serialising) {
732 qatomic_dec(&req->bs->serialising_in_flight);
735 qemu_co_mutex_lock(&req->bs->reqs_lock);
736 QLIST_REMOVE(req, list);
737 qemu_co_queue_restart_all(&req->wait_queue);
738 qemu_co_mutex_unlock(&req->bs->reqs_lock);
742 * Add an active request to the tracked requests list
744 static void tracked_request_begin(BdrvTrackedRequest *req,
745 BlockDriverState *bs,
746 int64_t offset,
747 int64_t bytes,
748 enum BdrvTrackedRequestType type)
750 bdrv_check_request(offset, bytes, &error_abort);
752 *req = (BdrvTrackedRequest){
753 .bs = bs,
754 .offset = offset,
755 .bytes = bytes,
756 .type = type,
757 .co = qemu_coroutine_self(),
758 .serialising = false,
759 .overlap_offset = offset,
760 .overlap_bytes = bytes,
763 qemu_co_queue_init(&req->wait_queue);
765 qemu_co_mutex_lock(&bs->reqs_lock);
766 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
767 qemu_co_mutex_unlock(&bs->reqs_lock);
770 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
771 int64_t offset, int64_t bytes)
773 bdrv_check_request(offset, bytes, &error_abort);
775 /* aaaa bbbb */
776 if (offset >= req->overlap_offset + req->overlap_bytes) {
777 return false;
779 /* bbbb aaaa */
780 if (req->overlap_offset >= offset + bytes) {
781 return false;
783 return true;
786 /* Called with self->bs->reqs_lock held */
787 static BdrvTrackedRequest *
788 bdrv_find_conflicting_request(BdrvTrackedRequest *self)
790 BdrvTrackedRequest *req;
792 QLIST_FOREACH(req, &self->bs->tracked_requests, list) {
793 if (req == self || (!req->serialising && !self->serialising)) {
794 continue;
796 if (tracked_request_overlaps(req, self->overlap_offset,
797 self->overlap_bytes))
800 * Hitting this means there was a reentrant request, for
801 * example, a block driver issuing nested requests. This must
802 * never happen since it means deadlock.
804 assert(qemu_coroutine_self() != req->co);
807 * If the request is already (indirectly) waiting for us, or
808 * will wait for us as soon as it wakes up, then just go on
809 * (instead of producing a deadlock in the former case).
811 if (!req->waiting_for) {
812 return req;
817 return NULL;
820 /* Called with self->bs->reqs_lock held */
821 static bool coroutine_fn
822 bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self)
824 BdrvTrackedRequest *req;
825 bool waited = false;
827 while ((req = bdrv_find_conflicting_request(self))) {
828 self->waiting_for = req;
829 qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock);
830 self->waiting_for = NULL;
831 waited = true;
834 return waited;
837 /* Called with req->bs->reqs_lock held */
838 static void tracked_request_set_serialising(BdrvTrackedRequest *req,
839 uint64_t align)
841 int64_t overlap_offset = req->offset & ~(align - 1);
842 int64_t overlap_bytes =
843 ROUND_UP(req->offset + req->bytes, align) - overlap_offset;
845 bdrv_check_request(req->offset, req->bytes, &error_abort);
847 if (!req->serialising) {
848 qatomic_inc(&req->bs->serialising_in_flight);
849 req->serialising = true;
852 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
853 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
857 * Return the tracked request on @bs for the current coroutine, or
858 * NULL if there is none.
860 BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs)
862 BdrvTrackedRequest *req;
863 Coroutine *self = qemu_coroutine_self();
865 QLIST_FOREACH(req, &bs->tracked_requests, list) {
866 if (req->co == self) {
867 return req;
871 return NULL;
875 * Round a region to cluster boundaries
877 void bdrv_round_to_clusters(BlockDriverState *bs,
878 int64_t offset, int64_t bytes,
879 int64_t *cluster_offset,
880 int64_t *cluster_bytes)
882 BlockDriverInfo bdi;
884 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
885 *cluster_offset = offset;
886 *cluster_bytes = bytes;
887 } else {
888 int64_t c = bdi.cluster_size;
889 *cluster_offset = QEMU_ALIGN_DOWN(offset, c);
890 *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
894 static int bdrv_get_cluster_size(BlockDriverState *bs)
896 BlockDriverInfo bdi;
897 int ret;
899 ret = bdrv_get_info(bs, &bdi);
900 if (ret < 0 || bdi.cluster_size == 0) {
901 return bs->bl.request_alignment;
902 } else {
903 return bdi.cluster_size;
907 void bdrv_inc_in_flight(BlockDriverState *bs)
909 qatomic_inc(&bs->in_flight);
912 void bdrv_wakeup(BlockDriverState *bs)
914 aio_wait_kick();
917 void bdrv_dec_in_flight(BlockDriverState *bs)
919 qatomic_dec(&bs->in_flight);
920 bdrv_wakeup(bs);
923 static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self)
925 BlockDriverState *bs = self->bs;
926 bool waited = false;
928 if (!qatomic_read(&bs->serialising_in_flight)) {
929 return false;
932 qemu_co_mutex_lock(&bs->reqs_lock);
933 waited = bdrv_wait_serialising_requests_locked(self);
934 qemu_co_mutex_unlock(&bs->reqs_lock);
936 return waited;
939 bool coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
940 uint64_t align)
942 bool waited;
944 qemu_co_mutex_lock(&req->bs->reqs_lock);
946 tracked_request_set_serialising(req, align);
947 waited = bdrv_wait_serialising_requests_locked(req);
949 qemu_co_mutex_unlock(&req->bs->reqs_lock);
951 return waited;
954 static int bdrv_check_qiov_request(int64_t offset, int64_t bytes,
955 QEMUIOVector *qiov, size_t qiov_offset,
956 Error **errp)
959 * Check generic offset/bytes correctness
962 if (offset < 0) {
963 error_setg(errp, "offset is negative: %" PRIi64, offset);
964 return -EIO;
967 if (bytes < 0) {
968 error_setg(errp, "bytes is negative: %" PRIi64, bytes);
969 return -EIO;
972 if (bytes > BDRV_MAX_LENGTH) {
973 error_setg(errp, "bytes(%" PRIi64 ") exceeds maximum(%" PRIi64 ")",
974 bytes, BDRV_MAX_LENGTH);
975 return -EIO;
978 if (offset > BDRV_MAX_LENGTH) {
979 error_setg(errp, "offset(%" PRIi64 ") exceeds maximum(%" PRIi64 ")",
980 offset, BDRV_MAX_LENGTH);
981 return -EIO;
984 if (offset > BDRV_MAX_LENGTH - bytes) {
985 error_setg(errp, "sum of offset(%" PRIi64 ") and bytes(%" PRIi64 ") "
986 "exceeds maximum(%" PRIi64 ")", offset, bytes,
987 BDRV_MAX_LENGTH);
988 return -EIO;
991 if (!qiov) {
992 return 0;
996 * Check qiov and qiov_offset
999 if (qiov_offset > qiov->size) {
1000 error_setg(errp, "qiov_offset(%zu) overflow io vector size(%zu)",
1001 qiov_offset, qiov->size);
1002 return -EIO;
1005 if (bytes > qiov->size - qiov_offset) {
1006 error_setg(errp, "bytes(%" PRIi64 ") + qiov_offset(%zu) overflow io "
1007 "vector size(%zu)", bytes, qiov_offset, qiov->size);
1008 return -EIO;
1011 return 0;
1014 int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp)
1016 return bdrv_check_qiov_request(offset, bytes, NULL, 0, errp);
1019 static int bdrv_check_request32(int64_t offset, int64_t bytes,
1020 QEMUIOVector *qiov, size_t qiov_offset)
1022 int ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL);
1023 if (ret < 0) {
1024 return ret;
1027 if (bytes > BDRV_REQUEST_MAX_BYTES) {
1028 return -EIO;
1031 return 0;
1034 int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
1035 int64_t bytes, BdrvRequestFlags flags)
1037 return bdrv_pwritev(child, offset, bytes, NULL,
1038 BDRV_REQ_ZERO_WRITE | flags);
1042 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
1043 * The operation is sped up by checking the block status and only writing
1044 * zeroes to the device if they currently do not return zeroes. Optional
1045 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
1046 * BDRV_REQ_FUA).
1048 * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite().
1050 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
1052 int ret;
1053 int64_t target_size, bytes, offset = 0;
1054 BlockDriverState *bs = child->bs;
1056 target_size = bdrv_getlength(bs);
1057 if (target_size < 0) {
1058 return target_size;
1061 for (;;) {
1062 bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES);
1063 if (bytes <= 0) {
1064 return 0;
1066 ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL);
1067 if (ret < 0) {
1068 return ret;
1070 if (ret & BDRV_BLOCK_ZERO) {
1071 offset += bytes;
1072 continue;
1074 ret = bdrv_pwrite_zeroes(child, offset, bytes, flags);
1075 if (ret < 0) {
1076 return ret;
1078 offset += bytes;
1082 /* See bdrv_pwrite() for the return codes */
1083 int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int64_t bytes)
1085 int ret;
1086 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
1088 if (bytes < 0) {
1089 return -EINVAL;
1092 ret = bdrv_preadv(child, offset, bytes, &qiov, 0);
1094 return ret < 0 ? ret : bytes;
1097 /* Return no. of bytes on success or < 0 on error. Important errors are:
1098 -EIO generic I/O error (may happen for all errors)
1099 -ENOMEDIUM No media inserted.
1100 -EINVAL Invalid offset or number of bytes
1101 -EACCES Trying to write a read-only device
1103 int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf,
1104 int64_t bytes)
1106 int ret;
1107 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
1109 if (bytes < 0) {
1110 return -EINVAL;
1113 ret = bdrv_pwritev(child, offset, bytes, &qiov, 0);
1115 return ret < 0 ? ret : bytes;
1119 * Writes to the file and ensures that no writes are reordered across this
1120 * request (acts as a barrier)
1122 * Returns 0 on success, -errno in error cases.
1124 int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
1125 const void *buf, int64_t count)
1127 int ret;
1129 ret = bdrv_pwrite(child, offset, buf, count);
1130 if (ret < 0) {
1131 return ret;
1134 ret = bdrv_flush(child->bs);
1135 if (ret < 0) {
1136 return ret;
1139 return 0;
1142 typedef struct CoroutineIOCompletion {
1143 Coroutine *coroutine;
1144 int ret;
1145 } CoroutineIOCompletion;
1147 static void bdrv_co_io_em_complete(void *opaque, int ret)
1149 CoroutineIOCompletion *co = opaque;
1151 co->ret = ret;
1152 aio_co_wake(co->coroutine);
1155 static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
1156 int64_t offset, int64_t bytes,
1157 QEMUIOVector *qiov,
1158 size_t qiov_offset, int flags)
1160 BlockDriver *drv = bs->drv;
1161 int64_t sector_num;
1162 unsigned int nb_sectors;
1163 QEMUIOVector local_qiov;
1164 int ret;
1166 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1167 assert(!(flags & ~BDRV_REQ_MASK));
1168 assert(!(flags & BDRV_REQ_NO_FALLBACK));
1170 if (!drv) {
1171 return -ENOMEDIUM;
1174 if (drv->bdrv_co_preadv_part) {
1175 return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset,
1176 flags);
1179 if (qiov_offset > 0 || bytes != qiov->size) {
1180 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1181 qiov = &local_qiov;
1184 if (drv->bdrv_co_preadv) {
1185 ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
1186 goto out;
1189 if (drv->bdrv_aio_preadv) {
1190 BlockAIOCB *acb;
1191 CoroutineIOCompletion co = {
1192 .coroutine = qemu_coroutine_self(),
1195 acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags,
1196 bdrv_co_io_em_complete, &co);
1197 if (acb == NULL) {
1198 ret = -EIO;
1199 goto out;
1200 } else {
1201 qemu_coroutine_yield();
1202 ret = co.ret;
1203 goto out;
1207 sector_num = offset >> BDRV_SECTOR_BITS;
1208 nb_sectors = bytes >> BDRV_SECTOR_BITS;
1210 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
1211 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
1212 assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1213 assert(drv->bdrv_co_readv);
1215 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
1217 out:
1218 if (qiov == &local_qiov) {
1219 qemu_iovec_destroy(&local_qiov);
1222 return ret;
1225 static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
1226 int64_t offset, int64_t bytes,
1227 QEMUIOVector *qiov,
1228 size_t qiov_offset, int flags)
1230 BlockDriver *drv = bs->drv;
1231 int64_t sector_num;
1232 unsigned int nb_sectors;
1233 QEMUIOVector local_qiov;
1234 int ret;
1236 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1237 assert(!(flags & ~BDRV_REQ_MASK));
1238 assert(!(flags & BDRV_REQ_NO_FALLBACK));
1240 if (!drv) {
1241 return -ENOMEDIUM;
1244 if (drv->bdrv_co_pwritev_part) {
1245 ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset,
1246 flags & bs->supported_write_flags);
1247 flags &= ~bs->supported_write_flags;
1248 goto emulate_flags;
1251 if (qiov_offset > 0 || bytes != qiov->size) {
1252 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1253 qiov = &local_qiov;
1256 if (drv->bdrv_co_pwritev) {
1257 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov,
1258 flags & bs->supported_write_flags);
1259 flags &= ~bs->supported_write_flags;
1260 goto emulate_flags;
1263 if (drv->bdrv_aio_pwritev) {
1264 BlockAIOCB *acb;
1265 CoroutineIOCompletion co = {
1266 .coroutine = qemu_coroutine_self(),
1269 acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov,
1270 flags & bs->supported_write_flags,
1271 bdrv_co_io_em_complete, &co);
1272 flags &= ~bs->supported_write_flags;
1273 if (acb == NULL) {
1274 ret = -EIO;
1275 } else {
1276 qemu_coroutine_yield();
1277 ret = co.ret;
1279 goto emulate_flags;
1282 sector_num = offset >> BDRV_SECTOR_BITS;
1283 nb_sectors = bytes >> BDRV_SECTOR_BITS;
1285 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
1286 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
1287 assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1289 assert(drv->bdrv_co_writev);
1290 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov,
1291 flags & bs->supported_write_flags);
1292 flags &= ~bs->supported_write_flags;
1294 emulate_flags:
1295 if (ret == 0 && (flags & BDRV_REQ_FUA)) {
1296 ret = bdrv_co_flush(bs);
1299 if (qiov == &local_qiov) {
1300 qemu_iovec_destroy(&local_qiov);
1303 return ret;
1306 static int coroutine_fn
1307 bdrv_driver_pwritev_compressed(BlockDriverState *bs, int64_t offset,
1308 int64_t bytes, QEMUIOVector *qiov,
1309 size_t qiov_offset)
1311 BlockDriver *drv = bs->drv;
1312 QEMUIOVector local_qiov;
1313 int ret;
1315 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1317 if (!drv) {
1318 return -ENOMEDIUM;
1321 if (!block_driver_can_compress(drv)) {
1322 return -ENOTSUP;
1325 if (drv->bdrv_co_pwritev_compressed_part) {
1326 return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes,
1327 qiov, qiov_offset);
1330 if (qiov_offset == 0) {
1331 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
1334 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1335 ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov);
1336 qemu_iovec_destroy(&local_qiov);
1338 return ret;
1341 static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child,
1342 int64_t offset, int64_t bytes, QEMUIOVector *qiov,
1343 size_t qiov_offset, int flags)
1345 BlockDriverState *bs = child->bs;
1347 /* Perform I/O through a temporary buffer so that users who scribble over
1348 * their read buffer while the operation is in progress do not end up
1349 * modifying the image file. This is critical for zero-copy guest I/O
1350 * where anything might happen inside guest memory.
1352 void *bounce_buffer = NULL;
1354 BlockDriver *drv = bs->drv;
1355 int64_t cluster_offset;
1356 int64_t cluster_bytes;
1357 int64_t skip_bytes;
1358 int ret;
1359 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1360 BDRV_REQUEST_MAX_BYTES);
1361 int64_t progress = 0;
1362 bool skip_write;
1364 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1366 if (!drv) {
1367 return -ENOMEDIUM;
1371 * Do not write anything when the BDS is inactive. That is not
1372 * allowed, and it would not help.
1374 skip_write = (bs->open_flags & BDRV_O_INACTIVE);
1376 /* FIXME We cannot require callers to have write permissions when all they
1377 * are doing is a read request. If we did things right, write permissions
1378 * would be obtained anyway, but internally by the copy-on-read code. As
1379 * long as it is implemented here rather than in a separate filter driver,
1380 * the copy-on-read code doesn't have its own BdrvChild, however, for which
1381 * it could request permissions. Therefore we have to bypass the permission
1382 * system for the moment. */
1383 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1385 /* Cover entire cluster so no additional backing file I/O is required when
1386 * allocating cluster in the image file. Note that this value may exceed
1387 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1388 * is one reason we loop rather than doing it all at once.
1390 bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
1391 skip_bytes = offset - cluster_offset;
1393 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
1394 cluster_offset, cluster_bytes);
1396 while (cluster_bytes) {
1397 int64_t pnum;
1399 if (skip_write) {
1400 ret = 1; /* "already allocated", so nothing will be copied */
1401 pnum = MIN(cluster_bytes, max_transfer);
1402 } else {
1403 ret = bdrv_is_allocated(bs, cluster_offset,
1404 MIN(cluster_bytes, max_transfer), &pnum);
1405 if (ret < 0) {
1407 * Safe to treat errors in querying allocation as if
1408 * unallocated; we'll probably fail again soon on the
1409 * read, but at least that will set a decent errno.
1411 pnum = MIN(cluster_bytes, max_transfer);
1414 /* Stop at EOF if the image ends in the middle of the cluster */
1415 if (ret == 0 && pnum == 0) {
1416 assert(progress >= bytes);
1417 break;
1420 assert(skip_bytes < pnum);
1423 if (ret <= 0) {
1424 QEMUIOVector local_qiov;
1426 /* Must copy-on-read; use the bounce buffer */
1427 pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
1428 if (!bounce_buffer) {
1429 int64_t max_we_need = MAX(pnum, cluster_bytes - pnum);
1430 int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER);
1431 int64_t bounce_buffer_len = MIN(max_we_need, max_allowed);
1433 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len);
1434 if (!bounce_buffer) {
1435 ret = -ENOMEM;
1436 goto err;
1439 qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum);
1441 ret = bdrv_driver_preadv(bs, cluster_offset, pnum,
1442 &local_qiov, 0, 0);
1443 if (ret < 0) {
1444 goto err;
1447 bdrv_debug_event(bs, BLKDBG_COR_WRITE);
1448 if (drv->bdrv_co_pwrite_zeroes &&
1449 buffer_is_zero(bounce_buffer, pnum)) {
1450 /* FIXME: Should we (perhaps conditionally) be setting
1451 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1452 * that still correctly reads as zero? */
1453 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum,
1454 BDRV_REQ_WRITE_UNCHANGED);
1455 } else {
1456 /* This does not change the data on the disk, it is not
1457 * necessary to flush even in cache=writethrough mode.
1459 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum,
1460 &local_qiov, 0,
1461 BDRV_REQ_WRITE_UNCHANGED);
1464 if (ret < 0) {
1465 /* It might be okay to ignore write errors for guest
1466 * requests. If this is a deliberate copy-on-read
1467 * then we don't want to ignore the error. Simply
1468 * report it in all cases.
1470 goto err;
1473 if (!(flags & BDRV_REQ_PREFETCH)) {
1474 qemu_iovec_from_buf(qiov, qiov_offset + progress,
1475 bounce_buffer + skip_bytes,
1476 MIN(pnum - skip_bytes, bytes - progress));
1478 } else if (!(flags & BDRV_REQ_PREFETCH)) {
1479 /* Read directly into the destination */
1480 ret = bdrv_driver_preadv(bs, offset + progress,
1481 MIN(pnum - skip_bytes, bytes - progress),
1482 qiov, qiov_offset + progress, 0);
1483 if (ret < 0) {
1484 goto err;
1488 cluster_offset += pnum;
1489 cluster_bytes -= pnum;
1490 progress += pnum - skip_bytes;
1491 skip_bytes = 0;
1493 ret = 0;
1495 err:
1496 qemu_vfree(bounce_buffer);
1497 return ret;
1501 * Forwards an already correctly aligned request to the BlockDriver. This
1502 * handles copy on read, zeroing after EOF, and fragmentation of large
1503 * reads; any other features must be implemented by the caller.
1505 static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
1506 BdrvTrackedRequest *req, int64_t offset, int64_t bytes,
1507 int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags)
1509 BlockDriverState *bs = child->bs;
1510 int64_t total_bytes, max_bytes;
1511 int ret = 0;
1512 int64_t bytes_remaining = bytes;
1513 int max_transfer;
1515 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1516 assert(is_power_of_2(align));
1517 assert((offset & (align - 1)) == 0);
1518 assert((bytes & (align - 1)) == 0);
1519 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1520 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1521 align);
1523 /* TODO: We would need a per-BDS .supported_read_flags and
1524 * potential fallback support, if we ever implement any read flags
1525 * to pass through to drivers. For now, there aren't any
1526 * passthrough flags. */
1527 assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH)));
1529 /* Handle Copy on Read and associated serialisation */
1530 if (flags & BDRV_REQ_COPY_ON_READ) {
1531 /* If we touch the same cluster it counts as an overlap. This
1532 * guarantees that allocating writes will be serialized and not race
1533 * with each other for the same cluster. For example, in copy-on-read
1534 * it ensures that the CoR read and write operations are atomic and
1535 * guest writes cannot interleave between them. */
1536 bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs));
1537 } else {
1538 bdrv_wait_serialising_requests(req);
1541 if (flags & BDRV_REQ_COPY_ON_READ) {
1542 int64_t pnum;
1544 /* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */
1545 flags &= ~BDRV_REQ_COPY_ON_READ;
1547 ret = bdrv_is_allocated(bs, offset, bytes, &pnum);
1548 if (ret < 0) {
1549 goto out;
1552 if (!ret || pnum != bytes) {
1553 ret = bdrv_co_do_copy_on_readv(child, offset, bytes,
1554 qiov, qiov_offset, flags);
1555 goto out;
1556 } else if (flags & BDRV_REQ_PREFETCH) {
1557 goto out;
1561 /* Forward the request to the BlockDriver, possibly fragmenting it */
1562 total_bytes = bdrv_getlength(bs);
1563 if (total_bytes < 0) {
1564 ret = total_bytes;
1565 goto out;
1568 assert(!(flags & ~bs->supported_read_flags));
1570 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
1571 if (bytes <= max_bytes && bytes <= max_transfer) {
1572 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, flags);
1573 goto out;
1576 while (bytes_remaining) {
1577 int64_t num;
1579 if (max_bytes) {
1580 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
1581 assert(num);
1583 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
1584 num, qiov,
1585 qiov_offset + bytes - bytes_remaining,
1586 flags);
1587 max_bytes -= num;
1588 } else {
1589 num = bytes_remaining;
1590 ret = qemu_iovec_memset(qiov, qiov_offset + bytes - bytes_remaining,
1591 0, bytes_remaining);
1593 if (ret < 0) {
1594 goto out;
1596 bytes_remaining -= num;
1599 out:
1600 return ret < 0 ? ret : 0;
1604 * Request padding
1606 * |<---- align ----->| |<----- align ---->|
1607 * |<- head ->|<------------- bytes ------------->|<-- tail -->|
1608 * | | | | | |
1609 * -*----------$-------*-------- ... --------*-----$------------*---
1610 * | | | | | |
1611 * | offset | | end |
1612 * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end)
1613 * [buf ... ) [tail_buf )
1615 * @buf is an aligned allocation needed to store @head and @tail paddings. @head
1616 * is placed at the beginning of @buf and @tail at the @end.
1618 * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk
1619 * around tail, if tail exists.
1621 * @merge_reads is true for small requests,
1622 * if @buf_len == @head + bytes + @tail. In this case it is possible that both
1623 * head and tail exist but @buf_len == align and @tail_buf == @buf.
1625 typedef struct BdrvRequestPadding {
1626 uint8_t *buf;
1627 size_t buf_len;
1628 uint8_t *tail_buf;
1629 size_t head;
1630 size_t tail;
1631 bool merge_reads;
1632 QEMUIOVector local_qiov;
1633 } BdrvRequestPadding;
1635 static bool bdrv_init_padding(BlockDriverState *bs,
1636 int64_t offset, int64_t bytes,
1637 BdrvRequestPadding *pad)
1639 int64_t align = bs->bl.request_alignment;
1640 int64_t sum;
1642 bdrv_check_request(offset, bytes, &error_abort);
1643 assert(align <= INT_MAX); /* documented in block/block_int.h */
1644 assert(align <= SIZE_MAX / 2); /* so we can allocate the buffer */
1646 memset(pad, 0, sizeof(*pad));
1648 pad->head = offset & (align - 1);
1649 pad->tail = ((offset + bytes) & (align - 1));
1650 if (pad->tail) {
1651 pad->tail = align - pad->tail;
1654 if (!pad->head && !pad->tail) {
1655 return false;
1658 assert(bytes); /* Nothing good in aligning zero-length requests */
1660 sum = pad->head + bytes + pad->tail;
1661 pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align;
1662 pad->buf = qemu_blockalign(bs, pad->buf_len);
1663 pad->merge_reads = sum == pad->buf_len;
1664 if (pad->tail) {
1665 pad->tail_buf = pad->buf + pad->buf_len - align;
1668 return true;
1671 static int bdrv_padding_rmw_read(BdrvChild *child,
1672 BdrvTrackedRequest *req,
1673 BdrvRequestPadding *pad,
1674 bool zero_middle)
1676 QEMUIOVector local_qiov;
1677 BlockDriverState *bs = child->bs;
1678 uint64_t align = bs->bl.request_alignment;
1679 int ret;
1681 assert(req->serialising && pad->buf);
1683 if (pad->head || pad->merge_reads) {
1684 int64_t bytes = pad->merge_reads ? pad->buf_len : align;
1686 qemu_iovec_init_buf(&local_qiov, pad->buf, bytes);
1688 if (pad->head) {
1689 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1691 if (pad->merge_reads && pad->tail) {
1692 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1694 ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes,
1695 align, &local_qiov, 0, 0);
1696 if (ret < 0) {
1697 return ret;
1699 if (pad->head) {
1700 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1702 if (pad->merge_reads && pad->tail) {
1703 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1706 if (pad->merge_reads) {
1707 goto zero_mem;
1711 if (pad->tail) {
1712 qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align);
1714 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1715 ret = bdrv_aligned_preadv(
1716 child, req,
1717 req->overlap_offset + req->overlap_bytes - align,
1718 align, align, &local_qiov, 0, 0);
1719 if (ret < 0) {
1720 return ret;
1722 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1725 zero_mem:
1726 if (zero_middle) {
1727 memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail);
1730 return 0;
1733 static void bdrv_padding_destroy(BdrvRequestPadding *pad)
1735 if (pad->buf) {
1736 qemu_vfree(pad->buf);
1737 qemu_iovec_destroy(&pad->local_qiov);
1739 memset(pad, 0, sizeof(*pad));
1743 * bdrv_pad_request
1745 * Exchange request parameters with padded request if needed. Don't include RMW
1746 * read of padding, bdrv_padding_rmw_read() should be called separately if
1747 * needed.
1749 * Request parameters (@qiov, &qiov_offset, &offset, &bytes) are in-out:
1750 * - on function start they represent original request
1751 * - on failure or when padding is not needed they are unchanged
1752 * - on success when padding is needed they represent padded request
1754 static int bdrv_pad_request(BlockDriverState *bs,
1755 QEMUIOVector **qiov, size_t *qiov_offset,
1756 int64_t *offset, int64_t *bytes,
1757 BdrvRequestPadding *pad, bool *padded)
1759 int ret;
1761 bdrv_check_qiov_request(*offset, *bytes, *qiov, *qiov_offset, &error_abort);
1763 if (!bdrv_init_padding(bs, *offset, *bytes, pad)) {
1764 if (padded) {
1765 *padded = false;
1767 return 0;
1770 ret = qemu_iovec_init_extended(&pad->local_qiov, pad->buf, pad->head,
1771 *qiov, *qiov_offset, *bytes,
1772 pad->buf + pad->buf_len - pad->tail,
1773 pad->tail);
1774 if (ret < 0) {
1775 bdrv_padding_destroy(pad);
1776 return ret;
1778 *bytes += pad->head + pad->tail;
1779 *offset -= pad->head;
1780 *qiov = &pad->local_qiov;
1781 *qiov_offset = 0;
1782 if (padded) {
1783 *padded = true;
1786 return 0;
1789 int coroutine_fn bdrv_co_preadv(BdrvChild *child,
1790 int64_t offset, int64_t bytes, QEMUIOVector *qiov,
1791 BdrvRequestFlags flags)
1793 return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags);
1796 int coroutine_fn bdrv_co_preadv_part(BdrvChild *child,
1797 int64_t offset, int64_t bytes,
1798 QEMUIOVector *qiov, size_t qiov_offset,
1799 BdrvRequestFlags flags)
1801 BlockDriverState *bs = child->bs;
1802 BdrvTrackedRequest req;
1803 BdrvRequestPadding pad;
1804 int ret;
1806 trace_bdrv_co_preadv_part(bs, offset, bytes, flags);
1808 if (!bdrv_is_inserted(bs)) {
1809 return -ENOMEDIUM;
1812 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
1813 if (ret < 0) {
1814 return ret;
1817 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
1819 * Aligning zero request is nonsense. Even if driver has special meaning
1820 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
1821 * it to driver due to request_alignment.
1823 * Still, no reason to return an error if someone do unaligned
1824 * zero-length read occasionally.
1826 return 0;
1829 bdrv_inc_in_flight(bs);
1831 /* Don't do copy-on-read if we read data before write operation */
1832 if (qatomic_read(&bs->copy_on_read)) {
1833 flags |= BDRV_REQ_COPY_ON_READ;
1836 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad,
1837 NULL);
1838 if (ret < 0) {
1839 return ret;
1842 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
1843 ret = bdrv_aligned_preadv(child, &req, offset, bytes,
1844 bs->bl.request_alignment,
1845 qiov, qiov_offset, flags);
1846 tracked_request_end(&req);
1847 bdrv_dec_in_flight(bs);
1849 bdrv_padding_destroy(&pad);
1851 return ret;
1854 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
1855 int64_t offset, int64_t bytes, BdrvRequestFlags flags)
1857 BlockDriver *drv = bs->drv;
1858 QEMUIOVector qiov;
1859 void *buf = NULL;
1860 int ret = 0;
1861 bool need_flush = false;
1862 int head = 0;
1863 int tail = 0;
1865 int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
1866 int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1867 bs->bl.request_alignment);
1868 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER);
1870 bdrv_check_request(offset, bytes, &error_abort);
1872 if (!drv) {
1873 return -ENOMEDIUM;
1876 if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) {
1877 return -ENOTSUP;
1880 assert(alignment % bs->bl.request_alignment == 0);
1881 head = offset % alignment;
1882 tail = (offset + bytes) % alignment;
1883 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1884 assert(max_write_zeroes >= bs->bl.request_alignment);
1886 while (bytes > 0 && !ret) {
1887 int64_t num = bytes;
1889 /* Align request. Block drivers can expect the "bulk" of the request
1890 * to be aligned, and that unaligned requests do not cross cluster
1891 * boundaries.
1893 if (head) {
1894 /* Make a small request up to the first aligned sector. For
1895 * convenience, limit this request to max_transfer even if
1896 * we don't need to fall back to writes. */
1897 num = MIN(MIN(bytes, max_transfer), alignment - head);
1898 head = (head + num) % alignment;
1899 assert(num < max_write_zeroes);
1900 } else if (tail && num > alignment) {
1901 /* Shorten the request to the last aligned sector. */
1902 num -= tail;
1905 /* limit request size */
1906 if (num > max_write_zeroes) {
1907 num = max_write_zeroes;
1910 ret = -ENOTSUP;
1911 /* First try the efficient write zeroes operation */
1912 if (drv->bdrv_co_pwrite_zeroes) {
1913 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1914 flags & bs->supported_zero_flags);
1915 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1916 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1917 need_flush = true;
1919 } else {
1920 assert(!bs->supported_zero_flags);
1923 if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) {
1924 /* Fall back to bounce buffer if write zeroes is unsupported */
1925 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1927 if ((flags & BDRV_REQ_FUA) &&
1928 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1929 /* No need for bdrv_driver_pwrite() to do a fallback
1930 * flush on each chunk; use just one at the end */
1931 write_flags &= ~BDRV_REQ_FUA;
1932 need_flush = true;
1934 num = MIN(num, max_transfer);
1935 if (buf == NULL) {
1936 buf = qemu_try_blockalign0(bs, num);
1937 if (buf == NULL) {
1938 ret = -ENOMEM;
1939 goto fail;
1942 qemu_iovec_init_buf(&qiov, buf, num);
1944 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags);
1946 /* Keep bounce buffer around if it is big enough for all
1947 * all future requests.
1949 if (num < max_transfer) {
1950 qemu_vfree(buf);
1951 buf = NULL;
1955 offset += num;
1956 bytes -= num;
1959 fail:
1960 if (ret == 0 && need_flush) {
1961 ret = bdrv_co_flush(bs);
1963 qemu_vfree(buf);
1964 return ret;
1967 static inline int coroutine_fn
1968 bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, int64_t bytes,
1969 BdrvTrackedRequest *req, int flags)
1971 BlockDriverState *bs = child->bs;
1973 bdrv_check_request(offset, bytes, &error_abort);
1975 if (bs->read_only) {
1976 return -EPERM;
1979 assert(!(bs->open_flags & BDRV_O_INACTIVE));
1980 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1981 assert(!(flags & ~BDRV_REQ_MASK));
1982 assert(!((flags & BDRV_REQ_NO_WAIT) && !(flags & BDRV_REQ_SERIALISING)));
1984 if (flags & BDRV_REQ_SERIALISING) {
1985 QEMU_LOCK_GUARD(&bs->reqs_lock);
1987 tracked_request_set_serialising(req, bdrv_get_cluster_size(bs));
1989 if ((flags & BDRV_REQ_NO_WAIT) && bdrv_find_conflicting_request(req)) {
1990 return -EBUSY;
1993 bdrv_wait_serialising_requests_locked(req);
1994 } else {
1995 bdrv_wait_serialising_requests(req);
1998 assert(req->overlap_offset <= offset);
1999 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
2000 assert(offset + bytes <= bs->total_sectors * BDRV_SECTOR_SIZE ||
2001 child->perm & BLK_PERM_RESIZE);
2003 switch (req->type) {
2004 case BDRV_TRACKED_WRITE:
2005 case BDRV_TRACKED_DISCARD:
2006 if (flags & BDRV_REQ_WRITE_UNCHANGED) {
2007 assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
2008 } else {
2009 assert(child->perm & BLK_PERM_WRITE);
2011 return notifier_with_return_list_notify(&bs->before_write_notifiers,
2012 req);
2013 case BDRV_TRACKED_TRUNCATE:
2014 assert(child->perm & BLK_PERM_RESIZE);
2015 return 0;
2016 default:
2017 abort();
2021 static inline void coroutine_fn
2022 bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes,
2023 BdrvTrackedRequest *req, int ret)
2025 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
2026 BlockDriverState *bs = child->bs;
2028 bdrv_check_request(offset, bytes, &error_abort);
2030 qatomic_inc(&bs->write_gen);
2033 * Discard cannot extend the image, but in error handling cases, such as
2034 * when reverting a qcow2 cluster allocation, the discarded range can pass
2035 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
2036 * here. Instead, just skip it, since semantically a discard request
2037 * beyond EOF cannot expand the image anyway.
2039 if (ret == 0 &&
2040 (req->type == BDRV_TRACKED_TRUNCATE ||
2041 end_sector > bs->total_sectors) &&
2042 req->type != BDRV_TRACKED_DISCARD) {
2043 bs->total_sectors = end_sector;
2044 bdrv_parent_cb_resize(bs);
2045 bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS);
2047 if (req->bytes) {
2048 switch (req->type) {
2049 case BDRV_TRACKED_WRITE:
2050 stat64_max(&bs->wr_highest_offset, offset + bytes);
2051 /* fall through, to set dirty bits */
2052 case BDRV_TRACKED_DISCARD:
2053 bdrv_set_dirty(bs, offset, bytes);
2054 break;
2055 default:
2056 break;
2062 * Forwards an already correctly aligned write request to the BlockDriver,
2063 * after possibly fragmenting it.
2065 static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child,
2066 BdrvTrackedRequest *req, int64_t offset, int64_t bytes,
2067 int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags)
2069 BlockDriverState *bs = child->bs;
2070 BlockDriver *drv = bs->drv;
2071 int ret;
2073 int64_t bytes_remaining = bytes;
2074 int max_transfer;
2076 bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
2078 if (!drv) {
2079 return -ENOMEDIUM;
2082 if (bdrv_has_readonly_bitmaps(bs)) {
2083 return -EPERM;
2086 assert(is_power_of_2(align));
2087 assert((offset & (align - 1)) == 0);
2088 assert((bytes & (align - 1)) == 0);
2089 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
2090 align);
2092 ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags);
2094 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
2095 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
2096 qemu_iovec_is_zero(qiov, qiov_offset, bytes)) {
2097 flags |= BDRV_REQ_ZERO_WRITE;
2098 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
2099 flags |= BDRV_REQ_MAY_UNMAP;
2103 if (ret < 0) {
2104 /* Do nothing, write notifier decided to fail this request */
2105 } else if (flags & BDRV_REQ_ZERO_WRITE) {
2106 bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
2107 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
2108 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
2109 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes,
2110 qiov, qiov_offset);
2111 } else if (bytes <= max_transfer) {
2112 bdrv_debug_event(bs, BLKDBG_PWRITEV);
2113 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags);
2114 } else {
2115 bdrv_debug_event(bs, BLKDBG_PWRITEV);
2116 while (bytes_remaining) {
2117 int num = MIN(bytes_remaining, max_transfer);
2118 int local_flags = flags;
2120 assert(num);
2121 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
2122 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
2123 /* If FUA is going to be emulated by flush, we only
2124 * need to flush on the last iteration */
2125 local_flags &= ~BDRV_REQ_FUA;
2128 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
2129 num, qiov,
2130 qiov_offset + bytes - bytes_remaining,
2131 local_flags);
2132 if (ret < 0) {
2133 break;
2135 bytes_remaining -= num;
2138 bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
2140 if (ret >= 0) {
2141 ret = 0;
2143 bdrv_co_write_req_finish(child, offset, bytes, req, ret);
2145 return ret;
2148 static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
2149 int64_t offset,
2150 int64_t bytes,
2151 BdrvRequestFlags flags,
2152 BdrvTrackedRequest *req)
2154 BlockDriverState *bs = child->bs;
2155 QEMUIOVector local_qiov;
2156 uint64_t align = bs->bl.request_alignment;
2157 int ret = 0;
2158 bool padding;
2159 BdrvRequestPadding pad;
2161 padding = bdrv_init_padding(bs, offset, bytes, &pad);
2162 if (padding) {
2163 bdrv_make_request_serialising(req, align);
2165 bdrv_padding_rmw_read(child, req, &pad, true);
2167 if (pad.head || pad.merge_reads) {
2168 int64_t aligned_offset = offset & ~(align - 1);
2169 int64_t write_bytes = pad.merge_reads ? pad.buf_len : align;
2171 qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes);
2172 ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes,
2173 align, &local_qiov, 0,
2174 flags & ~BDRV_REQ_ZERO_WRITE);
2175 if (ret < 0 || pad.merge_reads) {
2176 /* Error or all work is done */
2177 goto out;
2179 offset += write_bytes - pad.head;
2180 bytes -= write_bytes - pad.head;
2184 assert(!bytes || (offset & (align - 1)) == 0);
2185 if (bytes >= align) {
2186 /* Write the aligned part in the middle. */
2187 int64_t aligned_bytes = bytes & ~(align - 1);
2188 ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align,
2189 NULL, 0, flags);
2190 if (ret < 0) {
2191 goto out;
2193 bytes -= aligned_bytes;
2194 offset += aligned_bytes;
2197 assert(!bytes || (offset & (align - 1)) == 0);
2198 if (bytes) {
2199 assert(align == pad.tail + bytes);
2201 qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align);
2202 ret = bdrv_aligned_pwritev(child, req, offset, align, align,
2203 &local_qiov, 0,
2204 flags & ~BDRV_REQ_ZERO_WRITE);
2207 out:
2208 bdrv_padding_destroy(&pad);
2210 return ret;
2214 * Handle a write request in coroutine context
2216 int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
2217 int64_t offset, int64_t bytes, QEMUIOVector *qiov,
2218 BdrvRequestFlags flags)
2220 return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags);
2223 int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
2224 int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
2225 BdrvRequestFlags flags)
2227 BlockDriverState *bs = child->bs;
2228 BdrvTrackedRequest req;
2229 uint64_t align = bs->bl.request_alignment;
2230 BdrvRequestPadding pad;
2231 int ret;
2232 bool padded = false;
2234 trace_bdrv_co_pwritev_part(child->bs, offset, bytes, flags);
2236 if (!bdrv_is_inserted(bs)) {
2237 return -ENOMEDIUM;
2240 ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
2241 if (ret < 0) {
2242 return ret;
2245 /* If the request is misaligned then we can't make it efficient */
2246 if ((flags & BDRV_REQ_NO_FALLBACK) &&
2247 !QEMU_IS_ALIGNED(offset | bytes, align))
2249 return -ENOTSUP;
2252 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
2254 * Aligning zero request is nonsense. Even if driver has special meaning
2255 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
2256 * it to driver due to request_alignment.
2258 * Still, no reason to return an error if someone do unaligned
2259 * zero-length write occasionally.
2261 return 0;
2264 if (!(flags & BDRV_REQ_ZERO_WRITE)) {
2266 * Pad request for following read-modify-write cycle.
2267 * bdrv_co_do_zero_pwritev() does aligning by itself, so, we do
2268 * alignment only if there is no ZERO flag.
2270 ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad,
2271 &padded);
2272 if (ret < 0) {
2273 return ret;
2277 bdrv_inc_in_flight(bs);
2278 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
2280 if (flags & BDRV_REQ_ZERO_WRITE) {
2281 assert(!padded);
2282 ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req);
2283 goto out;
2286 if (padded) {
2288 * Request was unaligned to request_alignment and therefore
2289 * padded. We are going to do read-modify-write, and must
2290 * serialize the request to prevent interactions of the
2291 * widened region with other transactions.
2293 bdrv_make_request_serialising(&req, align);
2294 bdrv_padding_rmw_read(child, &req, &pad, false);
2297 ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align,
2298 qiov, qiov_offset, flags);
2300 bdrv_padding_destroy(&pad);
2302 out:
2303 tracked_request_end(&req);
2304 bdrv_dec_in_flight(bs);
2306 return ret;
2309 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
2310 int64_t bytes, BdrvRequestFlags flags)
2312 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
2314 if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
2315 flags &= ~BDRV_REQ_MAY_UNMAP;
2318 return bdrv_co_pwritev(child, offset, bytes, NULL,
2319 BDRV_REQ_ZERO_WRITE | flags);
2323 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
2325 int bdrv_flush_all(void)
2327 BdrvNextIterator it;
2328 BlockDriverState *bs = NULL;
2329 int result = 0;
2332 * bdrv queue is managed by record/replay,
2333 * creating new flush request for stopping
2334 * the VM may break the determinism
2336 if (replay_events_enabled()) {
2337 return result;
2340 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
2341 AioContext *aio_context = bdrv_get_aio_context(bs);
2342 int ret;
2344 aio_context_acquire(aio_context);
2345 ret = bdrv_flush(bs);
2346 if (ret < 0 && !result) {
2347 result = ret;
2349 aio_context_release(aio_context);
2352 return result;
2356 * Returns the allocation status of the specified sectors.
2357 * Drivers not implementing the functionality are assumed to not support
2358 * backing files, hence all their sectors are reported as allocated.
2360 * If 'want_zero' is true, the caller is querying for mapping
2361 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
2362 * _ZERO where possible; otherwise, the result favors larger 'pnum',
2363 * with a focus on accurate BDRV_BLOCK_ALLOCATED.
2365 * If 'offset' is beyond the end of the disk image the return value is
2366 * BDRV_BLOCK_EOF and 'pnum' is set to 0.
2368 * 'bytes' is the max value 'pnum' should be set to. If bytes goes
2369 * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2370 * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
2372 * 'pnum' is set to the number of bytes (including and immediately
2373 * following the specified offset) that are easily known to be in the
2374 * same allocated/unallocated state. Note that a second call starting
2375 * at the original offset plus returned pnum may have the same status.
2376 * The returned value is non-zero on success except at end-of-file.
2378 * Returns negative errno on failure. Otherwise, if the
2379 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
2380 * set to the host mapping and BDS corresponding to the guest offset.
2382 static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs,
2383 bool want_zero,
2384 int64_t offset, int64_t bytes,
2385 int64_t *pnum, int64_t *map,
2386 BlockDriverState **file)
2388 int64_t total_size;
2389 int64_t n; /* bytes */
2390 int ret;
2391 int64_t local_map = 0;
2392 BlockDriverState *local_file = NULL;
2393 int64_t aligned_offset, aligned_bytes;
2394 uint32_t align;
2395 bool has_filtered_child;
2397 assert(pnum);
2398 *pnum = 0;
2399 total_size = bdrv_getlength(bs);
2400 if (total_size < 0) {
2401 ret = total_size;
2402 goto early_out;
2405 if (offset >= total_size) {
2406 ret = BDRV_BLOCK_EOF;
2407 goto early_out;
2409 if (!bytes) {
2410 ret = 0;
2411 goto early_out;
2414 n = total_size - offset;
2415 if (n < bytes) {
2416 bytes = n;
2419 /* Must be non-NULL or bdrv_getlength() would have failed */
2420 assert(bs->drv);
2421 has_filtered_child = bdrv_filter_child(bs);
2422 if (!bs->drv->bdrv_co_block_status && !has_filtered_child) {
2423 *pnum = bytes;
2424 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
2425 if (offset + bytes == total_size) {
2426 ret |= BDRV_BLOCK_EOF;
2428 if (bs->drv->protocol_name) {
2429 ret |= BDRV_BLOCK_OFFSET_VALID;
2430 local_map = offset;
2431 local_file = bs;
2433 goto early_out;
2436 bdrv_inc_in_flight(bs);
2438 /* Round out to request_alignment boundaries */
2439 align = bs->bl.request_alignment;
2440 aligned_offset = QEMU_ALIGN_DOWN(offset, align);
2441 aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset;
2443 if (bs->drv->bdrv_co_block_status) {
2444 ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
2445 aligned_bytes, pnum, &local_map,
2446 &local_file);
2447 } else {
2448 /* Default code for filters */
2450 local_file = bdrv_filter_bs(bs);
2451 assert(local_file);
2453 *pnum = aligned_bytes;
2454 local_map = aligned_offset;
2455 ret = BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
2457 if (ret < 0) {
2458 *pnum = 0;
2459 goto out;
2463 * The driver's result must be a non-zero multiple of request_alignment.
2464 * Clamp pnum and adjust map to original request.
2466 assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) &&
2467 align > offset - aligned_offset);
2468 if (ret & BDRV_BLOCK_RECURSE) {
2469 assert(ret & BDRV_BLOCK_DATA);
2470 assert(ret & BDRV_BLOCK_OFFSET_VALID);
2471 assert(!(ret & BDRV_BLOCK_ZERO));
2474 *pnum -= offset - aligned_offset;
2475 if (*pnum > bytes) {
2476 *pnum = bytes;
2478 if (ret & BDRV_BLOCK_OFFSET_VALID) {
2479 local_map += offset - aligned_offset;
2482 if (ret & BDRV_BLOCK_RAW) {
2483 assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
2484 ret = bdrv_co_block_status(local_file, want_zero, local_map,
2485 *pnum, pnum, &local_map, &local_file);
2486 goto out;
2489 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
2490 ret |= BDRV_BLOCK_ALLOCATED;
2491 } else if (bs->drv->supports_backing) {
2492 BlockDriverState *cow_bs = bdrv_cow_bs(bs);
2494 if (!cow_bs) {
2495 ret |= BDRV_BLOCK_ZERO;
2496 } else if (want_zero) {
2497 int64_t size2 = bdrv_getlength(cow_bs);
2499 if (size2 >= 0 && offset >= size2) {
2500 ret |= BDRV_BLOCK_ZERO;
2505 if (want_zero && ret & BDRV_BLOCK_RECURSE &&
2506 local_file && local_file != bs &&
2507 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
2508 (ret & BDRV_BLOCK_OFFSET_VALID)) {
2509 int64_t file_pnum;
2510 int ret2;
2512 ret2 = bdrv_co_block_status(local_file, want_zero, local_map,
2513 *pnum, &file_pnum, NULL, NULL);
2514 if (ret2 >= 0) {
2515 /* Ignore errors. This is just providing extra information, it
2516 * is useful but not necessary.
2518 if (ret2 & BDRV_BLOCK_EOF &&
2519 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
2521 * It is valid for the format block driver to read
2522 * beyond the end of the underlying file's current
2523 * size; such areas read as zero.
2525 ret |= BDRV_BLOCK_ZERO;
2526 } else {
2527 /* Limit request to the range reported by the protocol driver */
2528 *pnum = file_pnum;
2529 ret |= (ret2 & BDRV_BLOCK_ZERO);
2534 out:
2535 bdrv_dec_in_flight(bs);
2536 if (ret >= 0 && offset + *pnum == total_size) {
2537 ret |= BDRV_BLOCK_EOF;
2539 early_out:
2540 if (file) {
2541 *file = local_file;
2543 if (map) {
2544 *map = local_map;
2546 return ret;
2549 int coroutine_fn
2550 bdrv_co_common_block_status_above(BlockDriverState *bs,
2551 BlockDriverState *base,
2552 bool include_base,
2553 bool want_zero,
2554 int64_t offset,
2555 int64_t bytes,
2556 int64_t *pnum,
2557 int64_t *map,
2558 BlockDriverState **file,
2559 int *depth)
2561 int ret;
2562 BlockDriverState *p;
2563 int64_t eof = 0;
2564 int dummy;
2566 assert(!include_base || base); /* Can't include NULL base */
2568 if (!depth) {
2569 depth = &dummy;
2571 *depth = 0;
2573 if (!include_base && bs == base) {
2574 *pnum = bytes;
2575 return 0;
2578 ret = bdrv_co_block_status(bs, want_zero, offset, bytes, pnum, map, file);
2579 ++*depth;
2580 if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) {
2581 return ret;
2584 if (ret & BDRV_BLOCK_EOF) {
2585 eof = offset + *pnum;
2588 assert(*pnum <= bytes);
2589 bytes = *pnum;
2591 for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base;
2592 p = bdrv_filter_or_cow_bs(p))
2594 ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map,
2595 file);
2596 ++*depth;
2597 if (ret < 0) {
2598 return ret;
2600 if (*pnum == 0) {
2602 * The top layer deferred to this layer, and because this layer is
2603 * short, any zeroes that we synthesize beyond EOF behave as if they
2604 * were allocated at this layer.
2606 * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be
2607 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2608 * below.
2610 assert(ret & BDRV_BLOCK_EOF);
2611 *pnum = bytes;
2612 if (file) {
2613 *file = p;
2615 ret = BDRV_BLOCK_ZERO | BDRV_BLOCK_ALLOCATED;
2616 break;
2618 if (ret & BDRV_BLOCK_ALLOCATED) {
2620 * We've found the node and the status, we must break.
2622 * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be
2623 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2624 * below.
2626 ret &= ~BDRV_BLOCK_EOF;
2627 break;
2630 if (p == base) {
2631 assert(include_base);
2632 break;
2636 * OK, [offset, offset + *pnum) region is unallocated on this layer,
2637 * let's continue the diving.
2639 assert(*pnum <= bytes);
2640 bytes = *pnum;
2643 if (offset + *pnum == eof) {
2644 ret |= BDRV_BLOCK_EOF;
2647 return ret;
2650 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
2651 int64_t offset, int64_t bytes, int64_t *pnum,
2652 int64_t *map, BlockDriverState **file)
2654 return bdrv_common_block_status_above(bs, base, false, true, offset, bytes,
2655 pnum, map, file, NULL);
2658 int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
2659 int64_t *pnum, int64_t *map, BlockDriverState **file)
2661 return bdrv_block_status_above(bs, bdrv_filter_or_cow_bs(bs),
2662 offset, bytes, pnum, map, file);
2666 * Check @bs (and its backing chain) to see if the range defined
2667 * by @offset and @bytes is known to read as zeroes.
2668 * Return 1 if that is the case, 0 otherwise and -errno on error.
2669 * This test is meant to be fast rather than accurate so returning 0
2670 * does not guarantee non-zero data.
2672 int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset,
2673 int64_t bytes)
2675 int ret;
2676 int64_t pnum = bytes;
2678 if (!bytes) {
2679 return 1;
2682 ret = bdrv_common_block_status_above(bs, NULL, false, false, offset,
2683 bytes, &pnum, NULL, NULL, NULL);
2685 if (ret < 0) {
2686 return ret;
2689 return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO);
2692 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset,
2693 int64_t bytes, int64_t *pnum)
2695 int ret;
2696 int64_t dummy;
2698 ret = bdrv_common_block_status_above(bs, bs, true, false, offset,
2699 bytes, pnum ? pnum : &dummy, NULL,
2700 NULL, NULL);
2701 if (ret < 0) {
2702 return ret;
2704 return !!(ret & BDRV_BLOCK_ALLOCATED);
2708 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2710 * Return a positive depth if (a prefix of) the given range is allocated
2711 * in any image between BASE and TOP (BASE is only included if include_base
2712 * is set). Depth 1 is TOP, 2 is the first backing layer, and so forth.
2713 * BASE can be NULL to check if the given offset is allocated in any
2714 * image of the chain. Return 0 otherwise, or negative errno on
2715 * failure.
2717 * 'pnum' is set to the number of bytes (including and immediately
2718 * following the specified offset) that are known to be in the same
2719 * allocated/unallocated state. Note that a subsequent call starting
2720 * at 'offset + *pnum' may return the same allocation status (in other
2721 * words, the result is not necessarily the maximum possible range);
2722 * but 'pnum' will only be 0 when end of file is reached.
2724 int bdrv_is_allocated_above(BlockDriverState *top,
2725 BlockDriverState *base,
2726 bool include_base, int64_t offset,
2727 int64_t bytes, int64_t *pnum)
2729 int depth;
2730 int ret = bdrv_common_block_status_above(top, base, include_base, false,
2731 offset, bytes, pnum, NULL, NULL,
2732 &depth);
2733 if (ret < 0) {
2734 return ret;
2737 if (ret & BDRV_BLOCK_ALLOCATED) {
2738 return depth;
2740 return 0;
2743 int coroutine_fn
2744 bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2746 BlockDriver *drv = bs->drv;
2747 BlockDriverState *child_bs = bdrv_primary_bs(bs);
2748 int ret = -ENOTSUP;
2750 if (!drv) {
2751 return -ENOMEDIUM;
2754 bdrv_inc_in_flight(bs);
2756 if (drv->bdrv_load_vmstate) {
2757 ret = drv->bdrv_load_vmstate(bs, qiov, pos);
2758 } else if (child_bs) {
2759 ret = bdrv_co_readv_vmstate(child_bs, qiov, pos);
2762 bdrv_dec_in_flight(bs);
2764 return ret;
2767 int coroutine_fn
2768 bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2770 BlockDriver *drv = bs->drv;
2771 BlockDriverState *child_bs = bdrv_primary_bs(bs);
2772 int ret = -ENOTSUP;
2774 if (!drv) {
2775 return -ENOMEDIUM;
2778 bdrv_inc_in_flight(bs);
2780 if (drv->bdrv_save_vmstate) {
2781 ret = drv->bdrv_save_vmstate(bs, qiov, pos);
2782 } else if (child_bs) {
2783 ret = bdrv_co_writev_vmstate(child_bs, qiov, pos);
2786 bdrv_dec_in_flight(bs);
2788 return ret;
2791 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
2792 int64_t pos, int size)
2794 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2795 int ret = bdrv_writev_vmstate(bs, &qiov, pos);
2797 return ret < 0 ? ret : size;
2800 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2801 int64_t pos, int size)
2803 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2804 int ret = bdrv_readv_vmstate(bs, &qiov, pos);
2806 return ret < 0 ? ret : size;
2809 /**************************************************************/
2810 /* async I/Os */
2812 void bdrv_aio_cancel(BlockAIOCB *acb)
2814 qemu_aio_ref(acb);
2815 bdrv_aio_cancel_async(acb);
2816 while (acb->refcnt > 1) {
2817 if (acb->aiocb_info->get_aio_context) {
2818 aio_poll(acb->aiocb_info->get_aio_context(acb), true);
2819 } else if (acb->bs) {
2820 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
2821 * assert that we're not using an I/O thread. Thread-safe
2822 * code should use bdrv_aio_cancel_async exclusively.
2824 assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context());
2825 aio_poll(bdrv_get_aio_context(acb->bs), true);
2826 } else {
2827 abort();
2830 qemu_aio_unref(acb);
2833 /* Async version of aio cancel. The caller is not blocked if the acb implements
2834 * cancel_async, otherwise we do nothing and let the request normally complete.
2835 * In either case the completion callback must be called. */
2836 void bdrv_aio_cancel_async(BlockAIOCB *acb)
2838 if (acb->aiocb_info->cancel_async) {
2839 acb->aiocb_info->cancel_async(acb);
2843 /**************************************************************/
2844 /* Coroutine block device emulation */
2846 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2848 BdrvChild *primary_child = bdrv_primary_child(bs);
2849 BdrvChild *child;
2850 int current_gen;
2851 int ret = 0;
2853 bdrv_inc_in_flight(bs);
2855 if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
2856 bdrv_is_sg(bs)) {
2857 goto early_exit;
2860 qemu_co_mutex_lock(&bs->reqs_lock);
2861 current_gen = qatomic_read(&bs->write_gen);
2863 /* Wait until any previous flushes are completed */
2864 while (bs->active_flush_req) {
2865 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
2868 /* Flushes reach this point in nondecreasing current_gen order. */
2869 bs->active_flush_req = true;
2870 qemu_co_mutex_unlock(&bs->reqs_lock);
2872 /* Write back all layers by calling one driver function */
2873 if (bs->drv->bdrv_co_flush) {
2874 ret = bs->drv->bdrv_co_flush(bs);
2875 goto out;
2878 /* Write back cached data to the OS even with cache=unsafe */
2879 BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_OS);
2880 if (bs->drv->bdrv_co_flush_to_os) {
2881 ret = bs->drv->bdrv_co_flush_to_os(bs);
2882 if (ret < 0) {
2883 goto out;
2887 /* But don't actually force it to the disk with cache=unsafe */
2888 if (bs->open_flags & BDRV_O_NO_FLUSH) {
2889 goto flush_children;
2892 /* Check if we really need to flush anything */
2893 if (bs->flushed_gen == current_gen) {
2894 goto flush_children;
2897 BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK);
2898 if (!bs->drv) {
2899 /* bs->drv->bdrv_co_flush() might have ejected the BDS
2900 * (even in case of apparent success) */
2901 ret = -ENOMEDIUM;
2902 goto out;
2904 if (bs->drv->bdrv_co_flush_to_disk) {
2905 ret = bs->drv->bdrv_co_flush_to_disk(bs);
2906 } else if (bs->drv->bdrv_aio_flush) {
2907 BlockAIOCB *acb;
2908 CoroutineIOCompletion co = {
2909 .coroutine = qemu_coroutine_self(),
2912 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
2913 if (acb == NULL) {
2914 ret = -EIO;
2915 } else {
2916 qemu_coroutine_yield();
2917 ret = co.ret;
2919 } else {
2921 * Some block drivers always operate in either writethrough or unsafe
2922 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2923 * know how the server works (because the behaviour is hardcoded or
2924 * depends on server-side configuration), so we can't ensure that
2925 * everything is safe on disk. Returning an error doesn't work because
2926 * that would break guests even if the server operates in writethrough
2927 * mode.
2929 * Let's hope the user knows what he's doing.
2931 ret = 0;
2934 if (ret < 0) {
2935 goto out;
2938 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2939 * in the case of cache=unsafe, so there are no useless flushes.
2941 flush_children:
2942 ret = 0;
2943 QLIST_FOREACH(child, &bs->children, next) {
2944 if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) {
2945 int this_child_ret = bdrv_co_flush(child->bs);
2946 if (!ret) {
2947 ret = this_child_ret;
2952 out:
2953 /* Notify any pending flushes that we have completed */
2954 if (ret == 0) {
2955 bs->flushed_gen = current_gen;
2958 qemu_co_mutex_lock(&bs->reqs_lock);
2959 bs->active_flush_req = false;
2960 /* Return value is ignored - it's ok if wait queue is empty */
2961 qemu_co_queue_next(&bs->flush_queue);
2962 qemu_co_mutex_unlock(&bs->reqs_lock);
2964 early_exit:
2965 bdrv_dec_in_flight(bs);
2966 return ret;
2969 int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
2970 int64_t bytes)
2972 BdrvTrackedRequest req;
2973 int max_pdiscard, ret;
2974 int head, tail, align;
2975 BlockDriverState *bs = child->bs;
2977 if (!bs || !bs->drv || !bdrv_is_inserted(bs)) {
2978 return -ENOMEDIUM;
2981 if (bdrv_has_readonly_bitmaps(bs)) {
2982 return -EPERM;
2985 ret = bdrv_check_request(offset, bytes, NULL);
2986 if (ret < 0) {
2987 return ret;
2990 /* Do nothing if disabled. */
2991 if (!(bs->open_flags & BDRV_O_UNMAP)) {
2992 return 0;
2995 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
2996 return 0;
2999 /* Discard is advisory, but some devices track and coalesce
3000 * unaligned requests, so we must pass everything down rather than
3001 * round here. Still, most devices will just silently ignore
3002 * unaligned requests (by returning -ENOTSUP), so we must fragment
3003 * the request accordingly. */
3004 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
3005 assert(align % bs->bl.request_alignment == 0);
3006 head = offset % align;
3007 tail = (offset + bytes) % align;
3009 bdrv_inc_in_flight(bs);
3010 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD);
3012 ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0);
3013 if (ret < 0) {
3014 goto out;
3017 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX),
3018 align);
3019 assert(max_pdiscard >= bs->bl.request_alignment);
3021 while (bytes > 0) {
3022 int64_t num = bytes;
3024 if (head) {
3025 /* Make small requests to get to alignment boundaries. */
3026 num = MIN(bytes, align - head);
3027 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
3028 num %= bs->bl.request_alignment;
3030 head = (head + num) % align;
3031 assert(num < max_pdiscard);
3032 } else if (tail) {
3033 if (num > align) {
3034 /* Shorten the request to the last aligned cluster. */
3035 num -= tail;
3036 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
3037 tail > bs->bl.request_alignment) {
3038 tail %= bs->bl.request_alignment;
3039 num -= tail;
3042 /* limit request size */
3043 if (num > max_pdiscard) {
3044 num = max_pdiscard;
3047 if (!bs->drv) {
3048 ret = -ENOMEDIUM;
3049 goto out;
3051 if (bs->drv->bdrv_co_pdiscard) {
3052 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
3053 } else {
3054 BlockAIOCB *acb;
3055 CoroutineIOCompletion co = {
3056 .coroutine = qemu_coroutine_self(),
3059 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
3060 bdrv_co_io_em_complete, &co);
3061 if (acb == NULL) {
3062 ret = -EIO;
3063 goto out;
3064 } else {
3065 qemu_coroutine_yield();
3066 ret = co.ret;
3069 if (ret && ret != -ENOTSUP) {
3070 goto out;
3073 offset += num;
3074 bytes -= num;
3076 ret = 0;
3077 out:
3078 bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret);
3079 tracked_request_end(&req);
3080 bdrv_dec_in_flight(bs);
3081 return ret;
3084 int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
3086 BlockDriver *drv = bs->drv;
3087 CoroutineIOCompletion co = {
3088 .coroutine = qemu_coroutine_self(),
3090 BlockAIOCB *acb;
3092 bdrv_inc_in_flight(bs);
3093 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
3094 co.ret = -ENOTSUP;
3095 goto out;
3098 if (drv->bdrv_co_ioctl) {
3099 co.ret = drv->bdrv_co_ioctl(bs, req, buf);
3100 } else {
3101 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
3102 if (!acb) {
3103 co.ret = -ENOTSUP;
3104 goto out;
3106 qemu_coroutine_yield();
3108 out:
3109 bdrv_dec_in_flight(bs);
3110 return co.ret;
3113 void *qemu_blockalign(BlockDriverState *bs, size_t size)
3115 return qemu_memalign(bdrv_opt_mem_align(bs), size);
3118 void *qemu_blockalign0(BlockDriverState *bs, size_t size)
3120 return memset(qemu_blockalign(bs, size), 0, size);
3123 void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
3125 size_t align = bdrv_opt_mem_align(bs);
3127 /* Ensure that NULL is never returned on success */
3128 assert(align > 0);
3129 if (size == 0) {
3130 size = align;
3133 return qemu_try_memalign(align, size);
3136 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
3138 void *mem = qemu_try_blockalign(bs, size);
3140 if (mem) {
3141 memset(mem, 0, size);
3144 return mem;
3148 * Check if all memory in this vector is sector aligned.
3150 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
3152 int i;
3153 size_t alignment = bdrv_min_mem_align(bs);
3155 for (i = 0; i < qiov->niov; i++) {
3156 if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
3157 return false;
3159 if (qiov->iov[i].iov_len % alignment) {
3160 return false;
3164 return true;
3167 void bdrv_add_before_write_notifier(BlockDriverState *bs,
3168 NotifierWithReturn *notifier)
3170 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
3173 void bdrv_io_plug(BlockDriverState *bs)
3175 BdrvChild *child;
3177 QLIST_FOREACH(child, &bs->children, next) {
3178 bdrv_io_plug(child->bs);
3181 if (qatomic_fetch_inc(&bs->io_plugged) == 0) {
3182 BlockDriver *drv = bs->drv;
3183 if (drv && drv->bdrv_io_plug) {
3184 drv->bdrv_io_plug(bs);
3189 void bdrv_io_unplug(BlockDriverState *bs)
3191 BdrvChild *child;
3193 assert(bs->io_plugged);
3194 if (qatomic_fetch_dec(&bs->io_plugged) == 1) {
3195 BlockDriver *drv = bs->drv;
3196 if (drv && drv->bdrv_io_unplug) {
3197 drv->bdrv_io_unplug(bs);
3201 QLIST_FOREACH(child, &bs->children, next) {
3202 bdrv_io_unplug(child->bs);
3206 void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size)
3208 BdrvChild *child;
3210 if (bs->drv && bs->drv->bdrv_register_buf) {
3211 bs->drv->bdrv_register_buf(bs, host, size);
3213 QLIST_FOREACH(child, &bs->children, next) {
3214 bdrv_register_buf(child->bs, host, size);
3218 void bdrv_unregister_buf(BlockDriverState *bs, void *host)
3220 BdrvChild *child;
3222 if (bs->drv && bs->drv->bdrv_unregister_buf) {
3223 bs->drv->bdrv_unregister_buf(bs, host);
3225 QLIST_FOREACH(child, &bs->children, next) {
3226 bdrv_unregister_buf(child->bs, host);
3230 static int coroutine_fn bdrv_co_copy_range_internal(
3231 BdrvChild *src, int64_t src_offset, BdrvChild *dst,
3232 int64_t dst_offset, int64_t bytes,
3233 BdrvRequestFlags read_flags, BdrvRequestFlags write_flags,
3234 bool recurse_src)
3236 BdrvTrackedRequest req;
3237 int ret;
3239 /* TODO We can support BDRV_REQ_NO_FALLBACK here */
3240 assert(!(read_flags & BDRV_REQ_NO_FALLBACK));
3241 assert(!(write_flags & BDRV_REQ_NO_FALLBACK));
3243 if (!dst || !dst->bs || !bdrv_is_inserted(dst->bs)) {
3244 return -ENOMEDIUM;
3246 ret = bdrv_check_request32(dst_offset, bytes, NULL, 0);
3247 if (ret) {
3248 return ret;
3250 if (write_flags & BDRV_REQ_ZERO_WRITE) {
3251 return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags);
3254 if (!src || !src->bs || !bdrv_is_inserted(src->bs)) {
3255 return -ENOMEDIUM;
3257 ret = bdrv_check_request32(src_offset, bytes, NULL, 0);
3258 if (ret) {
3259 return ret;
3262 if (!src->bs->drv->bdrv_co_copy_range_from
3263 || !dst->bs->drv->bdrv_co_copy_range_to
3264 || src->bs->encrypted || dst->bs->encrypted) {
3265 return -ENOTSUP;
3268 if (recurse_src) {
3269 bdrv_inc_in_flight(src->bs);
3270 tracked_request_begin(&req, src->bs, src_offset, bytes,
3271 BDRV_TRACKED_READ);
3273 /* BDRV_REQ_SERIALISING is only for write operation */
3274 assert(!(read_flags & BDRV_REQ_SERIALISING));
3275 bdrv_wait_serialising_requests(&req);
3277 ret = src->bs->drv->bdrv_co_copy_range_from(src->bs,
3278 src, src_offset,
3279 dst, dst_offset,
3280 bytes,
3281 read_flags, write_flags);
3283 tracked_request_end(&req);
3284 bdrv_dec_in_flight(src->bs);
3285 } else {
3286 bdrv_inc_in_flight(dst->bs);
3287 tracked_request_begin(&req, dst->bs, dst_offset, bytes,
3288 BDRV_TRACKED_WRITE);
3289 ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req,
3290 write_flags);
3291 if (!ret) {
3292 ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs,
3293 src, src_offset,
3294 dst, dst_offset,
3295 bytes,
3296 read_flags, write_flags);
3298 bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret);
3299 tracked_request_end(&req);
3300 bdrv_dec_in_flight(dst->bs);
3303 return ret;
3306 /* Copy range from @src to @dst.
3308 * See the comment of bdrv_co_copy_range for the parameter and return value
3309 * semantics. */
3310 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset,
3311 BdrvChild *dst, int64_t dst_offset,
3312 int64_t bytes,
3313 BdrvRequestFlags read_flags,
3314 BdrvRequestFlags write_flags)
3316 trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes,
3317 read_flags, write_flags);
3318 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
3319 bytes, read_flags, write_flags, true);
3322 /* Copy range from @src to @dst.
3324 * See the comment of bdrv_co_copy_range for the parameter and return value
3325 * semantics. */
3326 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset,
3327 BdrvChild *dst, int64_t dst_offset,
3328 int64_t bytes,
3329 BdrvRequestFlags read_flags,
3330 BdrvRequestFlags write_flags)
3332 trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
3333 read_flags, write_flags);
3334 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
3335 bytes, read_flags, write_flags, false);
3338 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset,
3339 BdrvChild *dst, int64_t dst_offset,
3340 int64_t bytes, BdrvRequestFlags read_flags,
3341 BdrvRequestFlags write_flags)
3343 return bdrv_co_copy_range_from(src, src_offset,
3344 dst, dst_offset,
3345 bytes, read_flags, write_flags);
3348 static void bdrv_parent_cb_resize(BlockDriverState *bs)
3350 BdrvChild *c;
3351 QLIST_FOREACH(c, &bs->parents, next_parent) {
3352 if (c->klass->resize) {
3353 c->klass->resize(c);
3359 * Truncate file to 'offset' bytes (needed only for file protocols)
3361 * If 'exact' is true, the file must be resized to exactly the given
3362 * 'offset'. Otherwise, it is sufficient for the node to be at least
3363 * 'offset' bytes in length.
3365 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
3366 PreallocMode prealloc, BdrvRequestFlags flags,
3367 Error **errp)
3369 BlockDriverState *bs = child->bs;
3370 BdrvChild *filtered, *backing;
3371 BlockDriver *drv = bs->drv;
3372 BdrvTrackedRequest req;
3373 int64_t old_size, new_bytes;
3374 int ret;
3377 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
3378 if (!drv) {
3379 error_setg(errp, "No medium inserted");
3380 return -ENOMEDIUM;
3382 if (offset < 0) {
3383 error_setg(errp, "Image size cannot be negative");
3384 return -EINVAL;
3387 ret = bdrv_check_request(offset, 0, errp);
3388 if (ret < 0) {
3389 return ret;
3392 old_size = bdrv_getlength(bs);
3393 if (old_size < 0) {
3394 error_setg_errno(errp, -old_size, "Failed to get old image size");
3395 return old_size;
3398 if (offset > old_size) {
3399 new_bytes = offset - old_size;
3400 } else {
3401 new_bytes = 0;
3404 bdrv_inc_in_flight(bs);
3405 tracked_request_begin(&req, bs, offset - new_bytes, new_bytes,
3406 BDRV_TRACKED_TRUNCATE);
3408 /* If we are growing the image and potentially using preallocation for the
3409 * new area, we need to make sure that no write requests are made to it
3410 * concurrently or they might be overwritten by preallocation. */
3411 if (new_bytes) {
3412 bdrv_make_request_serialising(&req, 1);
3414 if (bs->read_only) {
3415 error_setg(errp, "Image is read-only");
3416 ret = -EACCES;
3417 goto out;
3419 ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req,
3421 if (ret < 0) {
3422 error_setg_errno(errp, -ret,
3423 "Failed to prepare request for truncation");
3424 goto out;
3427 filtered = bdrv_filter_child(bs);
3428 backing = bdrv_cow_child(bs);
3431 * If the image has a backing file that is large enough that it would
3432 * provide data for the new area, we cannot leave it unallocated because
3433 * then the backing file content would become visible. Instead, zero-fill
3434 * the new area.
3436 * Note that if the image has a backing file, but was opened without the
3437 * backing file, taking care of keeping things consistent with that backing
3438 * file is the user's responsibility.
3440 if (new_bytes && backing) {
3441 int64_t backing_len;
3443 backing_len = bdrv_getlength(backing->bs);
3444 if (backing_len < 0) {
3445 ret = backing_len;
3446 error_setg_errno(errp, -ret, "Could not get backing file size");
3447 goto out;
3450 if (backing_len > old_size) {
3451 flags |= BDRV_REQ_ZERO_WRITE;
3455 if (drv->bdrv_co_truncate) {
3456 if (flags & ~bs->supported_truncate_flags) {
3457 error_setg(errp, "Block driver does not support requested flags");
3458 ret = -ENOTSUP;
3459 goto out;
3461 ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp);
3462 } else if (filtered) {
3463 ret = bdrv_co_truncate(filtered, offset, exact, prealloc, flags, errp);
3464 } else {
3465 error_setg(errp, "Image format driver does not support resize");
3466 ret = -ENOTSUP;
3467 goto out;
3469 if (ret < 0) {
3470 goto out;
3473 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3474 if (ret < 0) {
3475 error_setg_errno(errp, -ret, "Could not refresh total sector count");
3476 } else {
3477 offset = bs->total_sectors * BDRV_SECTOR_SIZE;
3479 /* It's possible that truncation succeeded but refresh_total_sectors
3480 * failed, but the latter doesn't affect how we should finish the request.
3481 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. */
3482 bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0);
3484 out:
3485 tracked_request_end(&req);
3486 bdrv_dec_in_flight(bs);
3488 return ret;
3491 void bdrv_cancel_in_flight(BlockDriverState *bs)
3493 if (!bs || !bs->drv) {
3494 return;
3497 if (bs->drv->bdrv_cancel_in_flight) {
3498 bs->drv->bdrv_cancel_in_flight(bs);