2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "sysemu/block-backend.h"
28 #include "block/aio-wait.h"
29 #include "block/blockjob.h"
30 #include "block/blockjob_int.h"
31 #include "block/block_int.h"
32 #include "block/coroutines.h"
33 #include "qemu/cutils.h"
34 #include "qapi/error.h"
35 #include "qemu/error-report.h"
36 #include "qemu/main-loop.h"
37 #include "sysemu/replay.h"
39 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
40 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
42 static void bdrv_parent_cb_resize(BlockDriverState
*bs
);
43 static int coroutine_fn
bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
,
44 int64_t offset
, int64_t bytes
, BdrvRequestFlags flags
);
46 static void bdrv_parent_drained_begin(BlockDriverState
*bs
, BdrvChild
*ignore
,
47 bool ignore_bds_parents
)
51 QLIST_FOREACH_SAFE(c
, &bs
->parents
, next_parent
, next
) {
52 if (c
== ignore
|| (ignore_bds_parents
&& c
->klass
->parent_is_bds
)) {
55 bdrv_parent_drained_begin_single(c
, false);
59 static void bdrv_parent_drained_end_single_no_poll(BdrvChild
*c
,
60 int *drained_end_counter
)
62 assert(c
->parent_quiesce_counter
> 0);
63 c
->parent_quiesce_counter
--;
64 if (c
->klass
->drained_end
) {
65 c
->klass
->drained_end(c
, drained_end_counter
);
69 void bdrv_parent_drained_end_single(BdrvChild
*c
)
71 int drained_end_counter
= 0;
72 bdrv_parent_drained_end_single_no_poll(c
, &drained_end_counter
);
73 BDRV_POLL_WHILE(c
->bs
, qatomic_read(&drained_end_counter
) > 0);
76 static void bdrv_parent_drained_end(BlockDriverState
*bs
, BdrvChild
*ignore
,
77 bool ignore_bds_parents
,
78 int *drained_end_counter
)
82 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
83 if (c
== ignore
|| (ignore_bds_parents
&& c
->klass
->parent_is_bds
)) {
86 bdrv_parent_drained_end_single_no_poll(c
, drained_end_counter
);
90 static bool bdrv_parent_drained_poll_single(BdrvChild
*c
)
92 if (c
->klass
->drained_poll
) {
93 return c
->klass
->drained_poll(c
);
98 static bool bdrv_parent_drained_poll(BlockDriverState
*bs
, BdrvChild
*ignore
,
99 bool ignore_bds_parents
)
104 QLIST_FOREACH_SAFE(c
, &bs
->parents
, next_parent
, next
) {
105 if (c
== ignore
|| (ignore_bds_parents
&& c
->klass
->parent_is_bds
)) {
108 busy
|= bdrv_parent_drained_poll_single(c
);
114 void bdrv_parent_drained_begin_single(BdrvChild
*c
, bool poll
)
116 c
->parent_quiesce_counter
++;
117 if (c
->klass
->drained_begin
) {
118 c
->klass
->drained_begin(c
);
121 BDRV_POLL_WHILE(c
->bs
, bdrv_parent_drained_poll_single(c
));
125 static void bdrv_merge_limits(BlockLimits
*dst
, const BlockLimits
*src
)
127 dst
->opt_transfer
= MAX(dst
->opt_transfer
, src
->opt_transfer
);
128 dst
->max_transfer
= MIN_NON_ZERO(dst
->max_transfer
, src
->max_transfer
);
129 dst
->opt_mem_alignment
= MAX(dst
->opt_mem_alignment
,
130 src
->opt_mem_alignment
);
131 dst
->min_mem_alignment
= MAX(dst
->min_mem_alignment
,
132 src
->min_mem_alignment
);
133 dst
->max_iov
= MIN_NON_ZERO(dst
->max_iov
, src
->max_iov
);
136 typedef struct BdrvRefreshLimitsState
{
137 BlockDriverState
*bs
;
139 } BdrvRefreshLimitsState
;
141 static void bdrv_refresh_limits_abort(void *opaque
)
143 BdrvRefreshLimitsState
*s
= opaque
;
145 s
->bs
->bl
= s
->old_bl
;
148 static TransactionActionDrv bdrv_refresh_limits_drv
= {
149 .abort
= bdrv_refresh_limits_abort
,
153 /* @tran is allowed to be NULL, in this case no rollback is possible. */
154 void bdrv_refresh_limits(BlockDriverState
*bs
, Transaction
*tran
, Error
**errp
)
157 BlockDriver
*drv
= bs
->drv
;
162 BdrvRefreshLimitsState
*s
= g_new(BdrvRefreshLimitsState
, 1);
163 *s
= (BdrvRefreshLimitsState
) {
167 tran_add(tran
, &bdrv_refresh_limits_drv
, s
);
170 memset(&bs
->bl
, 0, sizeof(bs
->bl
));
176 /* Default alignment based on whether driver has byte interface */
177 bs
->bl
.request_alignment
= (drv
->bdrv_co_preadv
||
178 drv
->bdrv_aio_preadv
||
179 drv
->bdrv_co_preadv_part
) ? 1 : 512;
181 /* Take some limits from the children as a default */
183 QLIST_FOREACH(c
, &bs
->children
, next
) {
184 if (c
->role
& (BDRV_CHILD_DATA
| BDRV_CHILD_FILTERED
| BDRV_CHILD_COW
))
186 bdrv_refresh_limits(c
->bs
, tran
, errp
);
190 bdrv_merge_limits(&bs
->bl
, &c
->bs
->bl
);
196 bs
->bl
.min_mem_alignment
= 512;
197 bs
->bl
.opt_mem_alignment
= qemu_real_host_page_size
;
199 /* Safe default since most protocols use readv()/writev()/etc */
200 bs
->bl
.max_iov
= IOV_MAX
;
203 /* Then let the driver override it */
204 if (drv
->bdrv_refresh_limits
) {
205 drv
->bdrv_refresh_limits(bs
, errp
);
211 if (bs
->bl
.request_alignment
> BDRV_MAX_ALIGNMENT
) {
212 error_setg(errp
, "Driver requires too large request alignment");
217 * The copy-on-read flag is actually a reference count so multiple users may
218 * use the feature without worrying about clobbering its previous state.
219 * Copy-on-read stays enabled until all users have called to disable it.
221 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
223 qatomic_inc(&bs
->copy_on_read
);
226 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
228 int old
= qatomic_fetch_dec(&bs
->copy_on_read
);
234 BlockDriverState
*bs
;
240 bool ignore_bds_parents
;
241 int *drained_end_counter
;
244 static void coroutine_fn
bdrv_drain_invoke_entry(void *opaque
)
246 BdrvCoDrainData
*data
= opaque
;
247 BlockDriverState
*bs
= data
->bs
;
250 bs
->drv
->bdrv_co_drain_begin(bs
);
252 bs
->drv
->bdrv_co_drain_end(bs
);
255 /* Set data->done and decrement drained_end_counter before bdrv_wakeup() */
256 qatomic_mb_set(&data
->done
, true);
258 qatomic_dec(data
->drained_end_counter
);
260 bdrv_dec_in_flight(bs
);
265 /* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */
266 static void bdrv_drain_invoke(BlockDriverState
*bs
, bool begin
,
267 int *drained_end_counter
)
269 BdrvCoDrainData
*data
;
271 if (!bs
->drv
|| (begin
&& !bs
->drv
->bdrv_co_drain_begin
) ||
272 (!begin
&& !bs
->drv
->bdrv_co_drain_end
)) {
276 data
= g_new(BdrvCoDrainData
, 1);
277 *data
= (BdrvCoDrainData
) {
281 .drained_end_counter
= drained_end_counter
,
285 qatomic_inc(drained_end_counter
);
288 /* Make sure the driver callback completes during the polling phase for
290 bdrv_inc_in_flight(bs
);
291 data
->co
= qemu_coroutine_create(bdrv_drain_invoke_entry
, data
);
292 aio_co_schedule(bdrv_get_aio_context(bs
), data
->co
);
295 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
296 bool bdrv_drain_poll(BlockDriverState
*bs
, bool recursive
,
297 BdrvChild
*ignore_parent
, bool ignore_bds_parents
)
299 BdrvChild
*child
, *next
;
301 if (bdrv_parent_drained_poll(bs
, ignore_parent
, ignore_bds_parents
)) {
305 if (qatomic_read(&bs
->in_flight
)) {
310 assert(!ignore_bds_parents
);
311 QLIST_FOREACH_SAFE(child
, &bs
->children
, next
, next
) {
312 if (bdrv_drain_poll(child
->bs
, recursive
, child
, false)) {
321 static bool bdrv_drain_poll_top_level(BlockDriverState
*bs
, bool recursive
,
322 BdrvChild
*ignore_parent
)
324 return bdrv_drain_poll(bs
, recursive
, ignore_parent
, false);
327 static void bdrv_do_drained_begin(BlockDriverState
*bs
, bool recursive
,
328 BdrvChild
*parent
, bool ignore_bds_parents
,
330 static void bdrv_do_drained_end(BlockDriverState
*bs
, bool recursive
,
331 BdrvChild
*parent
, bool ignore_bds_parents
,
332 int *drained_end_counter
);
334 static void bdrv_co_drain_bh_cb(void *opaque
)
336 BdrvCoDrainData
*data
= opaque
;
337 Coroutine
*co
= data
->co
;
338 BlockDriverState
*bs
= data
->bs
;
341 AioContext
*ctx
= bdrv_get_aio_context(bs
);
342 aio_context_acquire(ctx
);
343 bdrv_dec_in_flight(bs
);
345 assert(!data
->drained_end_counter
);
346 bdrv_do_drained_begin(bs
, data
->recursive
, data
->parent
,
347 data
->ignore_bds_parents
, data
->poll
);
350 bdrv_do_drained_end(bs
, data
->recursive
, data
->parent
,
351 data
->ignore_bds_parents
,
352 data
->drained_end_counter
);
354 aio_context_release(ctx
);
357 bdrv_drain_all_begin();
364 static void coroutine_fn
bdrv_co_yield_to_drain(BlockDriverState
*bs
,
365 bool begin
, bool recursive
,
367 bool ignore_bds_parents
,
369 int *drained_end_counter
)
371 BdrvCoDrainData data
;
372 Coroutine
*self
= qemu_coroutine_self();
373 AioContext
*ctx
= bdrv_get_aio_context(bs
);
374 AioContext
*co_ctx
= qemu_coroutine_get_aio_context(self
);
376 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
377 * other coroutines run if they were queued by aio_co_enter(). */
379 assert(qemu_in_coroutine());
380 data
= (BdrvCoDrainData
) {
385 .recursive
= recursive
,
387 .ignore_bds_parents
= ignore_bds_parents
,
389 .drained_end_counter
= drained_end_counter
,
393 bdrv_inc_in_flight(bs
);
397 * Temporarily drop the lock across yield or we would get deadlocks.
398 * bdrv_co_drain_bh_cb() reaquires the lock as needed.
400 * When we yield below, the lock for the current context will be
401 * released, so if this is actually the lock that protects bs, don't drop
405 aio_context_release(ctx
);
407 replay_bh_schedule_oneshot_event(ctx
, bdrv_co_drain_bh_cb
, &data
);
409 qemu_coroutine_yield();
410 /* If we are resumed from some other event (such as an aio completion or a
411 * timer callback), it is a bug in the caller that should be fixed. */
414 /* Reaquire the AioContext of bs if we dropped it */
416 aio_context_acquire(ctx
);
420 void bdrv_do_drained_begin_quiesce(BlockDriverState
*bs
,
421 BdrvChild
*parent
, bool ignore_bds_parents
)
423 assert(!qemu_in_coroutine());
425 /* Stop things in parent-to-child order */
426 if (qatomic_fetch_inc(&bs
->quiesce_counter
) == 0) {
427 aio_disable_external(bdrv_get_aio_context(bs
));
430 bdrv_parent_drained_begin(bs
, parent
, ignore_bds_parents
);
431 bdrv_drain_invoke(bs
, true, NULL
);
434 static void bdrv_do_drained_begin(BlockDriverState
*bs
, bool recursive
,
435 BdrvChild
*parent
, bool ignore_bds_parents
,
438 BdrvChild
*child
, *next
;
440 if (qemu_in_coroutine()) {
441 bdrv_co_yield_to_drain(bs
, true, recursive
, parent
, ignore_bds_parents
,
446 bdrv_do_drained_begin_quiesce(bs
, parent
, ignore_bds_parents
);
449 assert(!ignore_bds_parents
);
450 bs
->recursive_quiesce_counter
++;
451 QLIST_FOREACH_SAFE(child
, &bs
->children
, next
, next
) {
452 bdrv_do_drained_begin(child
->bs
, true, child
, ignore_bds_parents
,
458 * Wait for drained requests to finish.
460 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
461 * call is needed so things in this AioContext can make progress even
462 * though we don't return to the main AioContext loop - this automatically
463 * includes other nodes in the same AioContext and therefore all child
467 assert(!ignore_bds_parents
);
468 BDRV_POLL_WHILE(bs
, bdrv_drain_poll_top_level(bs
, recursive
, parent
));
472 void bdrv_drained_begin(BlockDriverState
*bs
)
474 bdrv_do_drained_begin(bs
, false, NULL
, false, true);
477 void bdrv_subtree_drained_begin(BlockDriverState
*bs
)
479 bdrv_do_drained_begin(bs
, true, NULL
, false, true);
483 * This function does not poll, nor must any of its recursively called
484 * functions. The *drained_end_counter pointee will be incremented
485 * once for every background operation scheduled, and decremented once
486 * the operation settles. Therefore, the pointer must remain valid
487 * until the pointee reaches 0. That implies that whoever sets up the
488 * pointee has to poll until it is 0.
490 * We use atomic operations to access *drained_end_counter, because
491 * (1) when called from bdrv_set_aio_context_ignore(), the subgraph of
492 * @bs may contain nodes in different AioContexts,
493 * (2) bdrv_drain_all_end() uses the same counter for all nodes,
494 * regardless of which AioContext they are in.
496 static void bdrv_do_drained_end(BlockDriverState
*bs
, bool recursive
,
497 BdrvChild
*parent
, bool ignore_bds_parents
,
498 int *drained_end_counter
)
501 int old_quiesce_counter
;
503 assert(drained_end_counter
!= NULL
);
505 if (qemu_in_coroutine()) {
506 bdrv_co_yield_to_drain(bs
, false, recursive
, parent
, ignore_bds_parents
,
507 false, drained_end_counter
);
510 assert(bs
->quiesce_counter
> 0);
512 /* Re-enable things in child-to-parent order */
513 bdrv_drain_invoke(bs
, false, drained_end_counter
);
514 bdrv_parent_drained_end(bs
, parent
, ignore_bds_parents
,
515 drained_end_counter
);
517 old_quiesce_counter
= qatomic_fetch_dec(&bs
->quiesce_counter
);
518 if (old_quiesce_counter
== 1) {
519 aio_enable_external(bdrv_get_aio_context(bs
));
523 assert(!ignore_bds_parents
);
524 bs
->recursive_quiesce_counter
--;
525 QLIST_FOREACH(child
, &bs
->children
, next
) {
526 bdrv_do_drained_end(child
->bs
, true, child
, ignore_bds_parents
,
527 drained_end_counter
);
532 void bdrv_drained_end(BlockDriverState
*bs
)
534 int drained_end_counter
= 0;
535 bdrv_do_drained_end(bs
, false, NULL
, false, &drained_end_counter
);
536 BDRV_POLL_WHILE(bs
, qatomic_read(&drained_end_counter
) > 0);
539 void bdrv_drained_end_no_poll(BlockDriverState
*bs
, int *drained_end_counter
)
541 bdrv_do_drained_end(bs
, false, NULL
, false, drained_end_counter
);
544 void bdrv_subtree_drained_end(BlockDriverState
*bs
)
546 int drained_end_counter
= 0;
547 bdrv_do_drained_end(bs
, true, NULL
, false, &drained_end_counter
);
548 BDRV_POLL_WHILE(bs
, qatomic_read(&drained_end_counter
) > 0);
551 void bdrv_apply_subtree_drain(BdrvChild
*child
, BlockDriverState
*new_parent
)
555 for (i
= 0; i
< new_parent
->recursive_quiesce_counter
; i
++) {
556 bdrv_do_drained_begin(child
->bs
, true, child
, false, true);
560 void bdrv_unapply_subtree_drain(BdrvChild
*child
, BlockDriverState
*old_parent
)
562 int drained_end_counter
= 0;
565 for (i
= 0; i
< old_parent
->recursive_quiesce_counter
; i
++) {
566 bdrv_do_drained_end(child
->bs
, true, child
, false,
567 &drained_end_counter
);
570 BDRV_POLL_WHILE(child
->bs
, qatomic_read(&drained_end_counter
) > 0);
574 * Wait for pending requests to complete on a single BlockDriverState subtree,
575 * and suspend block driver's internal I/O until next request arrives.
577 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
580 void coroutine_fn
bdrv_co_drain(BlockDriverState
*bs
)
582 assert(qemu_in_coroutine());
583 bdrv_drained_begin(bs
);
584 bdrv_drained_end(bs
);
587 void bdrv_drain(BlockDriverState
*bs
)
589 bdrv_drained_begin(bs
);
590 bdrv_drained_end(bs
);
593 static void bdrv_drain_assert_idle(BlockDriverState
*bs
)
595 BdrvChild
*child
, *next
;
597 assert(qatomic_read(&bs
->in_flight
) == 0);
598 QLIST_FOREACH_SAFE(child
, &bs
->children
, next
, next
) {
599 bdrv_drain_assert_idle(child
->bs
);
603 unsigned int bdrv_drain_all_count
= 0;
605 static bool bdrv_drain_all_poll(void)
607 BlockDriverState
*bs
= NULL
;
610 /* bdrv_drain_poll() can't make changes to the graph and we are holding the
611 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */
612 while ((bs
= bdrv_next_all_states(bs
))) {
613 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
614 aio_context_acquire(aio_context
);
615 result
|= bdrv_drain_poll(bs
, false, NULL
, true);
616 aio_context_release(aio_context
);
623 * Wait for pending requests to complete across all BlockDriverStates
625 * This function does not flush data to disk, use bdrv_flush_all() for that
626 * after calling this function.
628 * This pauses all block jobs and disables external clients. It must
629 * be paired with bdrv_drain_all_end().
631 * NOTE: no new block jobs or BlockDriverStates can be created between
632 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
634 void bdrv_drain_all_begin(void)
636 BlockDriverState
*bs
= NULL
;
638 if (qemu_in_coroutine()) {
639 bdrv_co_yield_to_drain(NULL
, true, false, NULL
, true, true, NULL
);
644 * bdrv queue is managed by record/replay,
645 * waiting for finishing the I/O requests may
648 if (replay_events_enabled()) {
652 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
653 * loop AioContext, so make sure we're in the main context. */
654 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
655 assert(bdrv_drain_all_count
< INT_MAX
);
656 bdrv_drain_all_count
++;
658 /* Quiesce all nodes, without polling in-flight requests yet. The graph
659 * cannot change during this loop. */
660 while ((bs
= bdrv_next_all_states(bs
))) {
661 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
663 aio_context_acquire(aio_context
);
664 bdrv_do_drained_begin(bs
, false, NULL
, true, false);
665 aio_context_release(aio_context
);
668 /* Now poll the in-flight requests */
669 AIO_WAIT_WHILE(NULL
, bdrv_drain_all_poll());
671 while ((bs
= bdrv_next_all_states(bs
))) {
672 bdrv_drain_assert_idle(bs
);
676 void bdrv_drain_all_end_quiesce(BlockDriverState
*bs
)
678 int drained_end_counter
= 0;
680 g_assert(bs
->quiesce_counter
> 0);
681 g_assert(!bs
->refcnt
);
683 while (bs
->quiesce_counter
) {
684 bdrv_do_drained_end(bs
, false, NULL
, true, &drained_end_counter
);
686 BDRV_POLL_WHILE(bs
, qatomic_read(&drained_end_counter
) > 0);
689 void bdrv_drain_all_end(void)
691 BlockDriverState
*bs
= NULL
;
692 int drained_end_counter
= 0;
695 * bdrv queue is managed by record/replay,
696 * waiting for finishing the I/O requests may
699 if (replay_events_enabled()) {
703 while ((bs
= bdrv_next_all_states(bs
))) {
704 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
706 aio_context_acquire(aio_context
);
707 bdrv_do_drained_end(bs
, false, NULL
, true, &drained_end_counter
);
708 aio_context_release(aio_context
);
711 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
712 AIO_WAIT_WHILE(NULL
, qatomic_read(&drained_end_counter
) > 0);
714 assert(bdrv_drain_all_count
> 0);
715 bdrv_drain_all_count
--;
718 void bdrv_drain_all(void)
720 bdrv_drain_all_begin();
721 bdrv_drain_all_end();
725 * Remove an active request from the tracked requests list
727 * This function should be called when a tracked request is completing.
729 static void tracked_request_end(BdrvTrackedRequest
*req
)
731 if (req
->serialising
) {
732 qatomic_dec(&req
->bs
->serialising_in_flight
);
735 qemu_co_mutex_lock(&req
->bs
->reqs_lock
);
736 QLIST_REMOVE(req
, list
);
737 qemu_co_queue_restart_all(&req
->wait_queue
);
738 qemu_co_mutex_unlock(&req
->bs
->reqs_lock
);
742 * Add an active request to the tracked requests list
744 static void tracked_request_begin(BdrvTrackedRequest
*req
,
745 BlockDriverState
*bs
,
748 enum BdrvTrackedRequestType type
)
750 bdrv_check_request(offset
, bytes
, &error_abort
);
752 *req
= (BdrvTrackedRequest
){
757 .co
= qemu_coroutine_self(),
758 .serialising
= false,
759 .overlap_offset
= offset
,
760 .overlap_bytes
= bytes
,
763 qemu_co_queue_init(&req
->wait_queue
);
765 qemu_co_mutex_lock(&bs
->reqs_lock
);
766 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
767 qemu_co_mutex_unlock(&bs
->reqs_lock
);
770 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
771 int64_t offset
, int64_t bytes
)
773 bdrv_check_request(offset
, bytes
, &error_abort
);
776 if (offset
>= req
->overlap_offset
+ req
->overlap_bytes
) {
780 if (req
->overlap_offset
>= offset
+ bytes
) {
786 /* Called with self->bs->reqs_lock held */
787 static BdrvTrackedRequest
*
788 bdrv_find_conflicting_request(BdrvTrackedRequest
*self
)
790 BdrvTrackedRequest
*req
;
792 QLIST_FOREACH(req
, &self
->bs
->tracked_requests
, list
) {
793 if (req
== self
|| (!req
->serialising
&& !self
->serialising
)) {
796 if (tracked_request_overlaps(req
, self
->overlap_offset
,
797 self
->overlap_bytes
))
800 * Hitting this means there was a reentrant request, for
801 * example, a block driver issuing nested requests. This must
802 * never happen since it means deadlock.
804 assert(qemu_coroutine_self() != req
->co
);
807 * If the request is already (indirectly) waiting for us, or
808 * will wait for us as soon as it wakes up, then just go on
809 * (instead of producing a deadlock in the former case).
811 if (!req
->waiting_for
) {
820 /* Called with self->bs->reqs_lock held */
821 static bool coroutine_fn
822 bdrv_wait_serialising_requests_locked(BdrvTrackedRequest
*self
)
824 BdrvTrackedRequest
*req
;
827 while ((req
= bdrv_find_conflicting_request(self
))) {
828 self
->waiting_for
= req
;
829 qemu_co_queue_wait(&req
->wait_queue
, &self
->bs
->reqs_lock
);
830 self
->waiting_for
= NULL
;
837 /* Called with req->bs->reqs_lock held */
838 static void tracked_request_set_serialising(BdrvTrackedRequest
*req
,
841 int64_t overlap_offset
= req
->offset
& ~(align
- 1);
842 int64_t overlap_bytes
=
843 ROUND_UP(req
->offset
+ req
->bytes
, align
) - overlap_offset
;
845 bdrv_check_request(req
->offset
, req
->bytes
, &error_abort
);
847 if (!req
->serialising
) {
848 qatomic_inc(&req
->bs
->serialising_in_flight
);
849 req
->serialising
= true;
852 req
->overlap_offset
= MIN(req
->overlap_offset
, overlap_offset
);
853 req
->overlap_bytes
= MAX(req
->overlap_bytes
, overlap_bytes
);
857 * Return the tracked request on @bs for the current coroutine, or
858 * NULL if there is none.
860 BdrvTrackedRequest
*coroutine_fn
bdrv_co_get_self_request(BlockDriverState
*bs
)
862 BdrvTrackedRequest
*req
;
863 Coroutine
*self
= qemu_coroutine_self();
865 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
866 if (req
->co
== self
) {
875 * Round a region to cluster boundaries
877 void bdrv_round_to_clusters(BlockDriverState
*bs
,
878 int64_t offset
, int64_t bytes
,
879 int64_t *cluster_offset
,
880 int64_t *cluster_bytes
)
884 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
885 *cluster_offset
= offset
;
886 *cluster_bytes
= bytes
;
888 int64_t c
= bdi
.cluster_size
;
889 *cluster_offset
= QEMU_ALIGN_DOWN(offset
, c
);
890 *cluster_bytes
= QEMU_ALIGN_UP(offset
- *cluster_offset
+ bytes
, c
);
894 static int bdrv_get_cluster_size(BlockDriverState
*bs
)
899 ret
= bdrv_get_info(bs
, &bdi
);
900 if (ret
< 0 || bdi
.cluster_size
== 0) {
901 return bs
->bl
.request_alignment
;
903 return bdi
.cluster_size
;
907 void bdrv_inc_in_flight(BlockDriverState
*bs
)
909 qatomic_inc(&bs
->in_flight
);
912 void bdrv_wakeup(BlockDriverState
*bs
)
917 void bdrv_dec_in_flight(BlockDriverState
*bs
)
919 qatomic_dec(&bs
->in_flight
);
923 static bool coroutine_fn
bdrv_wait_serialising_requests(BdrvTrackedRequest
*self
)
925 BlockDriverState
*bs
= self
->bs
;
928 if (!qatomic_read(&bs
->serialising_in_flight
)) {
932 qemu_co_mutex_lock(&bs
->reqs_lock
);
933 waited
= bdrv_wait_serialising_requests_locked(self
);
934 qemu_co_mutex_unlock(&bs
->reqs_lock
);
939 bool coroutine_fn
bdrv_make_request_serialising(BdrvTrackedRequest
*req
,
944 qemu_co_mutex_lock(&req
->bs
->reqs_lock
);
946 tracked_request_set_serialising(req
, align
);
947 waited
= bdrv_wait_serialising_requests_locked(req
);
949 qemu_co_mutex_unlock(&req
->bs
->reqs_lock
);
954 static int bdrv_check_qiov_request(int64_t offset
, int64_t bytes
,
955 QEMUIOVector
*qiov
, size_t qiov_offset
,
959 * Check generic offset/bytes correctness
963 error_setg(errp
, "offset is negative: %" PRIi64
, offset
);
968 error_setg(errp
, "bytes is negative: %" PRIi64
, bytes
);
972 if (bytes
> BDRV_MAX_LENGTH
) {
973 error_setg(errp
, "bytes(%" PRIi64
") exceeds maximum(%" PRIi64
")",
974 bytes
, BDRV_MAX_LENGTH
);
978 if (offset
> BDRV_MAX_LENGTH
) {
979 error_setg(errp
, "offset(%" PRIi64
") exceeds maximum(%" PRIi64
")",
980 offset
, BDRV_MAX_LENGTH
);
984 if (offset
> BDRV_MAX_LENGTH
- bytes
) {
985 error_setg(errp
, "sum of offset(%" PRIi64
") and bytes(%" PRIi64
") "
986 "exceeds maximum(%" PRIi64
")", offset
, bytes
,
996 * Check qiov and qiov_offset
999 if (qiov_offset
> qiov
->size
) {
1000 error_setg(errp
, "qiov_offset(%zu) overflow io vector size(%zu)",
1001 qiov_offset
, qiov
->size
);
1005 if (bytes
> qiov
->size
- qiov_offset
) {
1006 error_setg(errp
, "bytes(%" PRIi64
") + qiov_offset(%zu) overflow io "
1007 "vector size(%zu)", bytes
, qiov_offset
, qiov
->size
);
1014 int bdrv_check_request(int64_t offset
, int64_t bytes
, Error
**errp
)
1016 return bdrv_check_qiov_request(offset
, bytes
, NULL
, 0, errp
);
1019 static int bdrv_check_request32(int64_t offset
, int64_t bytes
,
1020 QEMUIOVector
*qiov
, size_t qiov_offset
)
1022 int ret
= bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, NULL
);
1027 if (bytes
> BDRV_REQUEST_MAX_BYTES
) {
1034 int bdrv_pwrite_zeroes(BdrvChild
*child
, int64_t offset
,
1035 int64_t bytes
, BdrvRequestFlags flags
)
1037 return bdrv_pwritev(child
, offset
, bytes
, NULL
,
1038 BDRV_REQ_ZERO_WRITE
| flags
);
1042 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
1043 * The operation is sped up by checking the block status and only writing
1044 * zeroes to the device if they currently do not return zeroes. Optional
1045 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
1048 * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite().
1050 int bdrv_make_zero(BdrvChild
*child
, BdrvRequestFlags flags
)
1053 int64_t target_size
, bytes
, offset
= 0;
1054 BlockDriverState
*bs
= child
->bs
;
1056 target_size
= bdrv_getlength(bs
);
1057 if (target_size
< 0) {
1062 bytes
= MIN(target_size
- offset
, BDRV_REQUEST_MAX_BYTES
);
1066 ret
= bdrv_block_status(bs
, offset
, bytes
, &bytes
, NULL
, NULL
);
1070 if (ret
& BDRV_BLOCK_ZERO
) {
1074 ret
= bdrv_pwrite_zeroes(child
, offset
, bytes
, flags
);
1082 /* See bdrv_pwrite() for the return codes */
1083 int bdrv_pread(BdrvChild
*child
, int64_t offset
, void *buf
, int64_t bytes
)
1086 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, bytes
);
1092 ret
= bdrv_preadv(child
, offset
, bytes
, &qiov
, 0);
1094 return ret
< 0 ? ret
: bytes
;
1097 /* Return no. of bytes on success or < 0 on error. Important errors are:
1098 -EIO generic I/O error (may happen for all errors)
1099 -ENOMEDIUM No media inserted.
1100 -EINVAL Invalid offset or number of bytes
1101 -EACCES Trying to write a read-only device
1103 int bdrv_pwrite(BdrvChild
*child
, int64_t offset
, const void *buf
,
1107 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, bytes
);
1113 ret
= bdrv_pwritev(child
, offset
, bytes
, &qiov
, 0);
1115 return ret
< 0 ? ret
: bytes
;
1119 * Writes to the file and ensures that no writes are reordered across this
1120 * request (acts as a barrier)
1122 * Returns 0 on success, -errno in error cases.
1124 int bdrv_pwrite_sync(BdrvChild
*child
, int64_t offset
,
1125 const void *buf
, int64_t count
)
1129 ret
= bdrv_pwrite(child
, offset
, buf
, count
);
1134 ret
= bdrv_flush(child
->bs
);
1142 typedef struct CoroutineIOCompletion
{
1143 Coroutine
*coroutine
;
1145 } CoroutineIOCompletion
;
1147 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
1149 CoroutineIOCompletion
*co
= opaque
;
1152 aio_co_wake(co
->coroutine
);
1155 static int coroutine_fn
bdrv_driver_preadv(BlockDriverState
*bs
,
1156 int64_t offset
, int64_t bytes
,
1158 size_t qiov_offset
, int flags
)
1160 BlockDriver
*drv
= bs
->drv
;
1162 unsigned int nb_sectors
;
1163 QEMUIOVector local_qiov
;
1166 bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, &error_abort
);
1167 assert(!(flags
& ~BDRV_REQ_MASK
));
1168 assert(!(flags
& BDRV_REQ_NO_FALLBACK
));
1174 if (drv
->bdrv_co_preadv_part
) {
1175 return drv
->bdrv_co_preadv_part(bs
, offset
, bytes
, qiov
, qiov_offset
,
1179 if (qiov_offset
> 0 || bytes
!= qiov
->size
) {
1180 qemu_iovec_init_slice(&local_qiov
, qiov
, qiov_offset
, bytes
);
1184 if (drv
->bdrv_co_preadv
) {
1185 ret
= drv
->bdrv_co_preadv(bs
, offset
, bytes
, qiov
, flags
);
1189 if (drv
->bdrv_aio_preadv
) {
1191 CoroutineIOCompletion co
= {
1192 .coroutine
= qemu_coroutine_self(),
1195 acb
= drv
->bdrv_aio_preadv(bs
, offset
, bytes
, qiov
, flags
,
1196 bdrv_co_io_em_complete
, &co
);
1201 qemu_coroutine_yield();
1207 sector_num
= offset
>> BDRV_SECTOR_BITS
;
1208 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
1210 assert(QEMU_IS_ALIGNED(offset
, BDRV_SECTOR_SIZE
));
1211 assert(QEMU_IS_ALIGNED(bytes
, BDRV_SECTOR_SIZE
));
1212 assert(bytes
<= BDRV_REQUEST_MAX_BYTES
);
1213 assert(drv
->bdrv_co_readv
);
1215 ret
= drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
1218 if (qiov
== &local_qiov
) {
1219 qemu_iovec_destroy(&local_qiov
);
1225 static int coroutine_fn
bdrv_driver_pwritev(BlockDriverState
*bs
,
1226 int64_t offset
, int64_t bytes
,
1228 size_t qiov_offset
, int flags
)
1230 BlockDriver
*drv
= bs
->drv
;
1232 unsigned int nb_sectors
;
1233 QEMUIOVector local_qiov
;
1236 bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, &error_abort
);
1237 assert(!(flags
& ~BDRV_REQ_MASK
));
1238 assert(!(flags
& BDRV_REQ_NO_FALLBACK
));
1244 if (drv
->bdrv_co_pwritev_part
) {
1245 ret
= drv
->bdrv_co_pwritev_part(bs
, offset
, bytes
, qiov
, qiov_offset
,
1246 flags
& bs
->supported_write_flags
);
1247 flags
&= ~bs
->supported_write_flags
;
1251 if (qiov_offset
> 0 || bytes
!= qiov
->size
) {
1252 qemu_iovec_init_slice(&local_qiov
, qiov
, qiov_offset
, bytes
);
1256 if (drv
->bdrv_co_pwritev
) {
1257 ret
= drv
->bdrv_co_pwritev(bs
, offset
, bytes
, qiov
,
1258 flags
& bs
->supported_write_flags
);
1259 flags
&= ~bs
->supported_write_flags
;
1263 if (drv
->bdrv_aio_pwritev
) {
1265 CoroutineIOCompletion co
= {
1266 .coroutine
= qemu_coroutine_self(),
1269 acb
= drv
->bdrv_aio_pwritev(bs
, offset
, bytes
, qiov
,
1270 flags
& bs
->supported_write_flags
,
1271 bdrv_co_io_em_complete
, &co
);
1272 flags
&= ~bs
->supported_write_flags
;
1276 qemu_coroutine_yield();
1282 sector_num
= offset
>> BDRV_SECTOR_BITS
;
1283 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
1285 assert(QEMU_IS_ALIGNED(offset
, BDRV_SECTOR_SIZE
));
1286 assert(QEMU_IS_ALIGNED(bytes
, BDRV_SECTOR_SIZE
));
1287 assert(bytes
<= BDRV_REQUEST_MAX_BYTES
);
1289 assert(drv
->bdrv_co_writev
);
1290 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
,
1291 flags
& bs
->supported_write_flags
);
1292 flags
&= ~bs
->supported_write_flags
;
1295 if (ret
== 0 && (flags
& BDRV_REQ_FUA
)) {
1296 ret
= bdrv_co_flush(bs
);
1299 if (qiov
== &local_qiov
) {
1300 qemu_iovec_destroy(&local_qiov
);
1306 static int coroutine_fn
1307 bdrv_driver_pwritev_compressed(BlockDriverState
*bs
, int64_t offset
,
1308 int64_t bytes
, QEMUIOVector
*qiov
,
1311 BlockDriver
*drv
= bs
->drv
;
1312 QEMUIOVector local_qiov
;
1315 bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, &error_abort
);
1321 if (!block_driver_can_compress(drv
)) {
1325 if (drv
->bdrv_co_pwritev_compressed_part
) {
1326 return drv
->bdrv_co_pwritev_compressed_part(bs
, offset
, bytes
,
1330 if (qiov_offset
== 0) {
1331 return drv
->bdrv_co_pwritev_compressed(bs
, offset
, bytes
, qiov
);
1334 qemu_iovec_init_slice(&local_qiov
, qiov
, qiov_offset
, bytes
);
1335 ret
= drv
->bdrv_co_pwritev_compressed(bs
, offset
, bytes
, &local_qiov
);
1336 qemu_iovec_destroy(&local_qiov
);
1341 static int coroutine_fn
bdrv_co_do_copy_on_readv(BdrvChild
*child
,
1342 int64_t offset
, int64_t bytes
, QEMUIOVector
*qiov
,
1343 size_t qiov_offset
, int flags
)
1345 BlockDriverState
*bs
= child
->bs
;
1347 /* Perform I/O through a temporary buffer so that users who scribble over
1348 * their read buffer while the operation is in progress do not end up
1349 * modifying the image file. This is critical for zero-copy guest I/O
1350 * where anything might happen inside guest memory.
1352 void *bounce_buffer
= NULL
;
1354 BlockDriver
*drv
= bs
->drv
;
1355 int64_t cluster_offset
;
1356 int64_t cluster_bytes
;
1359 int max_transfer
= MIN_NON_ZERO(bs
->bl
.max_transfer
,
1360 BDRV_REQUEST_MAX_BYTES
);
1361 int64_t progress
= 0;
1364 bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, &error_abort
);
1371 * Do not write anything when the BDS is inactive. That is not
1372 * allowed, and it would not help.
1374 skip_write
= (bs
->open_flags
& BDRV_O_INACTIVE
);
1376 /* FIXME We cannot require callers to have write permissions when all they
1377 * are doing is a read request. If we did things right, write permissions
1378 * would be obtained anyway, but internally by the copy-on-read code. As
1379 * long as it is implemented here rather than in a separate filter driver,
1380 * the copy-on-read code doesn't have its own BdrvChild, however, for which
1381 * it could request permissions. Therefore we have to bypass the permission
1382 * system for the moment. */
1383 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1385 /* Cover entire cluster so no additional backing file I/O is required when
1386 * allocating cluster in the image file. Note that this value may exceed
1387 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1388 * is one reason we loop rather than doing it all at once.
1390 bdrv_round_to_clusters(bs
, offset
, bytes
, &cluster_offset
, &cluster_bytes
);
1391 skip_bytes
= offset
- cluster_offset
;
1393 trace_bdrv_co_do_copy_on_readv(bs
, offset
, bytes
,
1394 cluster_offset
, cluster_bytes
);
1396 while (cluster_bytes
) {
1400 ret
= 1; /* "already allocated", so nothing will be copied */
1401 pnum
= MIN(cluster_bytes
, max_transfer
);
1403 ret
= bdrv_is_allocated(bs
, cluster_offset
,
1404 MIN(cluster_bytes
, max_transfer
), &pnum
);
1407 * Safe to treat errors in querying allocation as if
1408 * unallocated; we'll probably fail again soon on the
1409 * read, but at least that will set a decent errno.
1411 pnum
= MIN(cluster_bytes
, max_transfer
);
1414 /* Stop at EOF if the image ends in the middle of the cluster */
1415 if (ret
== 0 && pnum
== 0) {
1416 assert(progress
>= bytes
);
1420 assert(skip_bytes
< pnum
);
1424 QEMUIOVector local_qiov
;
1426 /* Must copy-on-read; use the bounce buffer */
1427 pnum
= MIN(pnum
, MAX_BOUNCE_BUFFER
);
1428 if (!bounce_buffer
) {
1429 int64_t max_we_need
= MAX(pnum
, cluster_bytes
- pnum
);
1430 int64_t max_allowed
= MIN(max_transfer
, MAX_BOUNCE_BUFFER
);
1431 int64_t bounce_buffer_len
= MIN(max_we_need
, max_allowed
);
1433 bounce_buffer
= qemu_try_blockalign(bs
, bounce_buffer_len
);
1434 if (!bounce_buffer
) {
1439 qemu_iovec_init_buf(&local_qiov
, bounce_buffer
, pnum
);
1441 ret
= bdrv_driver_preadv(bs
, cluster_offset
, pnum
,
1447 bdrv_debug_event(bs
, BLKDBG_COR_WRITE
);
1448 if (drv
->bdrv_co_pwrite_zeroes
&&
1449 buffer_is_zero(bounce_buffer
, pnum
)) {
1450 /* FIXME: Should we (perhaps conditionally) be setting
1451 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1452 * that still correctly reads as zero? */
1453 ret
= bdrv_co_do_pwrite_zeroes(bs
, cluster_offset
, pnum
,
1454 BDRV_REQ_WRITE_UNCHANGED
);
1456 /* This does not change the data on the disk, it is not
1457 * necessary to flush even in cache=writethrough mode.
1459 ret
= bdrv_driver_pwritev(bs
, cluster_offset
, pnum
,
1461 BDRV_REQ_WRITE_UNCHANGED
);
1465 /* It might be okay to ignore write errors for guest
1466 * requests. If this is a deliberate copy-on-read
1467 * then we don't want to ignore the error. Simply
1468 * report it in all cases.
1473 if (!(flags
& BDRV_REQ_PREFETCH
)) {
1474 qemu_iovec_from_buf(qiov
, qiov_offset
+ progress
,
1475 bounce_buffer
+ skip_bytes
,
1476 MIN(pnum
- skip_bytes
, bytes
- progress
));
1478 } else if (!(flags
& BDRV_REQ_PREFETCH
)) {
1479 /* Read directly into the destination */
1480 ret
= bdrv_driver_preadv(bs
, offset
+ progress
,
1481 MIN(pnum
- skip_bytes
, bytes
- progress
),
1482 qiov
, qiov_offset
+ progress
, 0);
1488 cluster_offset
+= pnum
;
1489 cluster_bytes
-= pnum
;
1490 progress
+= pnum
- skip_bytes
;
1496 qemu_vfree(bounce_buffer
);
1501 * Forwards an already correctly aligned request to the BlockDriver. This
1502 * handles copy on read, zeroing after EOF, and fragmentation of large
1503 * reads; any other features must be implemented by the caller.
1505 static int coroutine_fn
bdrv_aligned_preadv(BdrvChild
*child
,
1506 BdrvTrackedRequest
*req
, int64_t offset
, int64_t bytes
,
1507 int64_t align
, QEMUIOVector
*qiov
, size_t qiov_offset
, int flags
)
1509 BlockDriverState
*bs
= child
->bs
;
1510 int64_t total_bytes
, max_bytes
;
1512 int64_t bytes_remaining
= bytes
;
1515 bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, &error_abort
);
1516 assert(is_power_of_2(align
));
1517 assert((offset
& (align
- 1)) == 0);
1518 assert((bytes
& (align
- 1)) == 0);
1519 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1520 max_transfer
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_transfer
, INT_MAX
),
1523 /* TODO: We would need a per-BDS .supported_read_flags and
1524 * potential fallback support, if we ever implement any read flags
1525 * to pass through to drivers. For now, there aren't any
1526 * passthrough flags. */
1527 assert(!(flags
& ~(BDRV_REQ_COPY_ON_READ
| BDRV_REQ_PREFETCH
)));
1529 /* Handle Copy on Read and associated serialisation */
1530 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1531 /* If we touch the same cluster it counts as an overlap. This
1532 * guarantees that allocating writes will be serialized and not race
1533 * with each other for the same cluster. For example, in copy-on-read
1534 * it ensures that the CoR read and write operations are atomic and
1535 * guest writes cannot interleave between them. */
1536 bdrv_make_request_serialising(req
, bdrv_get_cluster_size(bs
));
1538 bdrv_wait_serialising_requests(req
);
1541 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1544 /* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */
1545 flags
&= ~BDRV_REQ_COPY_ON_READ
;
1547 ret
= bdrv_is_allocated(bs
, offset
, bytes
, &pnum
);
1552 if (!ret
|| pnum
!= bytes
) {
1553 ret
= bdrv_co_do_copy_on_readv(child
, offset
, bytes
,
1554 qiov
, qiov_offset
, flags
);
1556 } else if (flags
& BDRV_REQ_PREFETCH
) {
1561 /* Forward the request to the BlockDriver, possibly fragmenting it */
1562 total_bytes
= bdrv_getlength(bs
);
1563 if (total_bytes
< 0) {
1568 assert(!(flags
& ~bs
->supported_read_flags
));
1570 max_bytes
= ROUND_UP(MAX(0, total_bytes
- offset
), align
);
1571 if (bytes
<= max_bytes
&& bytes
<= max_transfer
) {
1572 ret
= bdrv_driver_preadv(bs
, offset
, bytes
, qiov
, qiov_offset
, flags
);
1576 while (bytes_remaining
) {
1580 num
= MIN(bytes_remaining
, MIN(max_bytes
, max_transfer
));
1583 ret
= bdrv_driver_preadv(bs
, offset
+ bytes
- bytes_remaining
,
1585 qiov_offset
+ bytes
- bytes_remaining
,
1589 num
= bytes_remaining
;
1590 ret
= qemu_iovec_memset(qiov
, qiov_offset
+ bytes
- bytes_remaining
,
1591 0, bytes_remaining
);
1596 bytes_remaining
-= num
;
1600 return ret
< 0 ? ret
: 0;
1606 * |<---- align ----->| |<----- align ---->|
1607 * |<- head ->|<------------- bytes ------------->|<-- tail -->|
1609 * -*----------$-------*-------- ... --------*-----$------------*---
1611 * | offset | | end |
1612 * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end)
1613 * [buf ... ) [tail_buf )
1615 * @buf is an aligned allocation needed to store @head and @tail paddings. @head
1616 * is placed at the beginning of @buf and @tail at the @end.
1618 * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk
1619 * around tail, if tail exists.
1621 * @merge_reads is true for small requests,
1622 * if @buf_len == @head + bytes + @tail. In this case it is possible that both
1623 * head and tail exist but @buf_len == align and @tail_buf == @buf.
1625 typedef struct BdrvRequestPadding
{
1632 QEMUIOVector local_qiov
;
1633 } BdrvRequestPadding
;
1635 static bool bdrv_init_padding(BlockDriverState
*bs
,
1636 int64_t offset
, int64_t bytes
,
1637 BdrvRequestPadding
*pad
)
1639 int64_t align
= bs
->bl
.request_alignment
;
1642 bdrv_check_request(offset
, bytes
, &error_abort
);
1643 assert(align
<= INT_MAX
); /* documented in block/block_int.h */
1644 assert(align
<= SIZE_MAX
/ 2); /* so we can allocate the buffer */
1646 memset(pad
, 0, sizeof(*pad
));
1648 pad
->head
= offset
& (align
- 1);
1649 pad
->tail
= ((offset
+ bytes
) & (align
- 1));
1651 pad
->tail
= align
- pad
->tail
;
1654 if (!pad
->head
&& !pad
->tail
) {
1658 assert(bytes
); /* Nothing good in aligning zero-length requests */
1660 sum
= pad
->head
+ bytes
+ pad
->tail
;
1661 pad
->buf_len
= (sum
> align
&& pad
->head
&& pad
->tail
) ? 2 * align
: align
;
1662 pad
->buf
= qemu_blockalign(bs
, pad
->buf_len
);
1663 pad
->merge_reads
= sum
== pad
->buf_len
;
1665 pad
->tail_buf
= pad
->buf
+ pad
->buf_len
- align
;
1671 static int bdrv_padding_rmw_read(BdrvChild
*child
,
1672 BdrvTrackedRequest
*req
,
1673 BdrvRequestPadding
*pad
,
1676 QEMUIOVector local_qiov
;
1677 BlockDriverState
*bs
= child
->bs
;
1678 uint64_t align
= bs
->bl
.request_alignment
;
1681 assert(req
->serialising
&& pad
->buf
);
1683 if (pad
->head
|| pad
->merge_reads
) {
1684 int64_t bytes
= pad
->merge_reads
? pad
->buf_len
: align
;
1686 qemu_iovec_init_buf(&local_qiov
, pad
->buf
, bytes
);
1689 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1691 if (pad
->merge_reads
&& pad
->tail
) {
1692 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1694 ret
= bdrv_aligned_preadv(child
, req
, req
->overlap_offset
, bytes
,
1695 align
, &local_qiov
, 0, 0);
1700 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1702 if (pad
->merge_reads
&& pad
->tail
) {
1703 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1706 if (pad
->merge_reads
) {
1712 qemu_iovec_init_buf(&local_qiov
, pad
->tail_buf
, align
);
1714 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1715 ret
= bdrv_aligned_preadv(
1717 req
->overlap_offset
+ req
->overlap_bytes
- align
,
1718 align
, align
, &local_qiov
, 0, 0);
1722 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1727 memset(pad
->buf
+ pad
->head
, 0, pad
->buf_len
- pad
->head
- pad
->tail
);
1733 static void bdrv_padding_destroy(BdrvRequestPadding
*pad
)
1736 qemu_vfree(pad
->buf
);
1737 qemu_iovec_destroy(&pad
->local_qiov
);
1739 memset(pad
, 0, sizeof(*pad
));
1745 * Exchange request parameters with padded request if needed. Don't include RMW
1746 * read of padding, bdrv_padding_rmw_read() should be called separately if
1749 * Request parameters (@qiov, &qiov_offset, &offset, &bytes) are in-out:
1750 * - on function start they represent original request
1751 * - on failure or when padding is not needed they are unchanged
1752 * - on success when padding is needed they represent padded request
1754 static int bdrv_pad_request(BlockDriverState
*bs
,
1755 QEMUIOVector
**qiov
, size_t *qiov_offset
,
1756 int64_t *offset
, int64_t *bytes
,
1757 BdrvRequestPadding
*pad
, bool *padded
)
1761 bdrv_check_qiov_request(*offset
, *bytes
, *qiov
, *qiov_offset
, &error_abort
);
1763 if (!bdrv_init_padding(bs
, *offset
, *bytes
, pad
)) {
1770 ret
= qemu_iovec_init_extended(&pad
->local_qiov
, pad
->buf
, pad
->head
,
1771 *qiov
, *qiov_offset
, *bytes
,
1772 pad
->buf
+ pad
->buf_len
- pad
->tail
,
1775 bdrv_padding_destroy(pad
);
1778 *bytes
+= pad
->head
+ pad
->tail
;
1779 *offset
-= pad
->head
;
1780 *qiov
= &pad
->local_qiov
;
1789 int coroutine_fn
bdrv_co_preadv(BdrvChild
*child
,
1790 int64_t offset
, int64_t bytes
, QEMUIOVector
*qiov
,
1791 BdrvRequestFlags flags
)
1793 return bdrv_co_preadv_part(child
, offset
, bytes
, qiov
, 0, flags
);
1796 int coroutine_fn
bdrv_co_preadv_part(BdrvChild
*child
,
1797 int64_t offset
, int64_t bytes
,
1798 QEMUIOVector
*qiov
, size_t qiov_offset
,
1799 BdrvRequestFlags flags
)
1801 BlockDriverState
*bs
= child
->bs
;
1802 BdrvTrackedRequest req
;
1803 BdrvRequestPadding pad
;
1806 trace_bdrv_co_preadv_part(bs
, offset
, bytes
, flags
);
1808 if (!bdrv_is_inserted(bs
)) {
1812 ret
= bdrv_check_request32(offset
, bytes
, qiov
, qiov_offset
);
1817 if (bytes
== 0 && !QEMU_IS_ALIGNED(offset
, bs
->bl
.request_alignment
)) {
1819 * Aligning zero request is nonsense. Even if driver has special meaning
1820 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
1821 * it to driver due to request_alignment.
1823 * Still, no reason to return an error if someone do unaligned
1824 * zero-length read occasionally.
1829 bdrv_inc_in_flight(bs
);
1831 /* Don't do copy-on-read if we read data before write operation */
1832 if (qatomic_read(&bs
->copy_on_read
)) {
1833 flags
|= BDRV_REQ_COPY_ON_READ
;
1836 ret
= bdrv_pad_request(bs
, &qiov
, &qiov_offset
, &offset
, &bytes
, &pad
,
1842 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_READ
);
1843 ret
= bdrv_aligned_preadv(child
, &req
, offset
, bytes
,
1844 bs
->bl
.request_alignment
,
1845 qiov
, qiov_offset
, flags
);
1846 tracked_request_end(&req
);
1847 bdrv_dec_in_flight(bs
);
1849 bdrv_padding_destroy(&pad
);
1854 static int coroutine_fn
bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
,
1855 int64_t offset
, int64_t bytes
, BdrvRequestFlags flags
)
1857 BlockDriver
*drv
= bs
->drv
;
1861 bool need_flush
= false;
1865 int max_write_zeroes
= MIN_NON_ZERO(bs
->bl
.max_pwrite_zeroes
, INT_MAX
);
1866 int alignment
= MAX(bs
->bl
.pwrite_zeroes_alignment
,
1867 bs
->bl
.request_alignment
);
1868 int max_transfer
= MIN_NON_ZERO(bs
->bl
.max_transfer
, MAX_BOUNCE_BUFFER
);
1870 bdrv_check_request(offset
, bytes
, &error_abort
);
1876 if ((flags
& ~bs
->supported_zero_flags
) & BDRV_REQ_NO_FALLBACK
) {
1880 assert(alignment
% bs
->bl
.request_alignment
== 0);
1881 head
= offset
% alignment
;
1882 tail
= (offset
+ bytes
) % alignment
;
1883 max_write_zeroes
= QEMU_ALIGN_DOWN(max_write_zeroes
, alignment
);
1884 assert(max_write_zeroes
>= bs
->bl
.request_alignment
);
1886 while (bytes
> 0 && !ret
) {
1887 int64_t num
= bytes
;
1889 /* Align request. Block drivers can expect the "bulk" of the request
1890 * to be aligned, and that unaligned requests do not cross cluster
1894 /* Make a small request up to the first aligned sector. For
1895 * convenience, limit this request to max_transfer even if
1896 * we don't need to fall back to writes. */
1897 num
= MIN(MIN(bytes
, max_transfer
), alignment
- head
);
1898 head
= (head
+ num
) % alignment
;
1899 assert(num
< max_write_zeroes
);
1900 } else if (tail
&& num
> alignment
) {
1901 /* Shorten the request to the last aligned sector. */
1905 /* limit request size */
1906 if (num
> max_write_zeroes
) {
1907 num
= max_write_zeroes
;
1911 /* First try the efficient write zeroes operation */
1912 if (drv
->bdrv_co_pwrite_zeroes
) {
1913 ret
= drv
->bdrv_co_pwrite_zeroes(bs
, offset
, num
,
1914 flags
& bs
->supported_zero_flags
);
1915 if (ret
!= -ENOTSUP
&& (flags
& BDRV_REQ_FUA
) &&
1916 !(bs
->supported_zero_flags
& BDRV_REQ_FUA
)) {
1920 assert(!bs
->supported_zero_flags
);
1923 if (ret
== -ENOTSUP
&& !(flags
& BDRV_REQ_NO_FALLBACK
)) {
1924 /* Fall back to bounce buffer if write zeroes is unsupported */
1925 BdrvRequestFlags write_flags
= flags
& ~BDRV_REQ_ZERO_WRITE
;
1927 if ((flags
& BDRV_REQ_FUA
) &&
1928 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1929 /* No need for bdrv_driver_pwrite() to do a fallback
1930 * flush on each chunk; use just one at the end */
1931 write_flags
&= ~BDRV_REQ_FUA
;
1934 num
= MIN(num
, max_transfer
);
1936 buf
= qemu_try_blockalign0(bs
, num
);
1942 qemu_iovec_init_buf(&qiov
, buf
, num
);
1944 ret
= bdrv_driver_pwritev(bs
, offset
, num
, &qiov
, 0, write_flags
);
1946 /* Keep bounce buffer around if it is big enough for all
1947 * all future requests.
1949 if (num
< max_transfer
) {
1960 if (ret
== 0 && need_flush
) {
1961 ret
= bdrv_co_flush(bs
);
1967 static inline int coroutine_fn
1968 bdrv_co_write_req_prepare(BdrvChild
*child
, int64_t offset
, int64_t bytes
,
1969 BdrvTrackedRequest
*req
, int flags
)
1971 BlockDriverState
*bs
= child
->bs
;
1973 bdrv_check_request(offset
, bytes
, &error_abort
);
1975 if (bs
->read_only
) {
1979 assert(!(bs
->open_flags
& BDRV_O_INACTIVE
));
1980 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1981 assert(!(flags
& ~BDRV_REQ_MASK
));
1982 assert(!((flags
& BDRV_REQ_NO_WAIT
) && !(flags
& BDRV_REQ_SERIALISING
)));
1984 if (flags
& BDRV_REQ_SERIALISING
) {
1985 QEMU_LOCK_GUARD(&bs
->reqs_lock
);
1987 tracked_request_set_serialising(req
, bdrv_get_cluster_size(bs
));
1989 if ((flags
& BDRV_REQ_NO_WAIT
) && bdrv_find_conflicting_request(req
)) {
1993 bdrv_wait_serialising_requests_locked(req
);
1995 bdrv_wait_serialising_requests(req
);
1998 assert(req
->overlap_offset
<= offset
);
1999 assert(offset
+ bytes
<= req
->overlap_offset
+ req
->overlap_bytes
);
2000 assert(offset
+ bytes
<= bs
->total_sectors
* BDRV_SECTOR_SIZE
||
2001 child
->perm
& BLK_PERM_RESIZE
);
2003 switch (req
->type
) {
2004 case BDRV_TRACKED_WRITE
:
2005 case BDRV_TRACKED_DISCARD
:
2006 if (flags
& BDRV_REQ_WRITE_UNCHANGED
) {
2007 assert(child
->perm
& (BLK_PERM_WRITE_UNCHANGED
| BLK_PERM_WRITE
));
2009 assert(child
->perm
& BLK_PERM_WRITE
);
2011 return notifier_with_return_list_notify(&bs
->before_write_notifiers
,
2013 case BDRV_TRACKED_TRUNCATE
:
2014 assert(child
->perm
& BLK_PERM_RESIZE
);
2021 static inline void coroutine_fn
2022 bdrv_co_write_req_finish(BdrvChild
*child
, int64_t offset
, int64_t bytes
,
2023 BdrvTrackedRequest
*req
, int ret
)
2025 int64_t end_sector
= DIV_ROUND_UP(offset
+ bytes
, BDRV_SECTOR_SIZE
);
2026 BlockDriverState
*bs
= child
->bs
;
2028 bdrv_check_request(offset
, bytes
, &error_abort
);
2030 qatomic_inc(&bs
->write_gen
);
2033 * Discard cannot extend the image, but in error handling cases, such as
2034 * when reverting a qcow2 cluster allocation, the discarded range can pass
2035 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
2036 * here. Instead, just skip it, since semantically a discard request
2037 * beyond EOF cannot expand the image anyway.
2040 (req
->type
== BDRV_TRACKED_TRUNCATE
||
2041 end_sector
> bs
->total_sectors
) &&
2042 req
->type
!= BDRV_TRACKED_DISCARD
) {
2043 bs
->total_sectors
= end_sector
;
2044 bdrv_parent_cb_resize(bs
);
2045 bdrv_dirty_bitmap_truncate(bs
, end_sector
<< BDRV_SECTOR_BITS
);
2048 switch (req
->type
) {
2049 case BDRV_TRACKED_WRITE
:
2050 stat64_max(&bs
->wr_highest_offset
, offset
+ bytes
);
2051 /* fall through, to set dirty bits */
2052 case BDRV_TRACKED_DISCARD
:
2053 bdrv_set_dirty(bs
, offset
, bytes
);
2062 * Forwards an already correctly aligned write request to the BlockDriver,
2063 * after possibly fragmenting it.
2065 static int coroutine_fn
bdrv_aligned_pwritev(BdrvChild
*child
,
2066 BdrvTrackedRequest
*req
, int64_t offset
, int64_t bytes
,
2067 int64_t align
, QEMUIOVector
*qiov
, size_t qiov_offset
, int flags
)
2069 BlockDriverState
*bs
= child
->bs
;
2070 BlockDriver
*drv
= bs
->drv
;
2073 int64_t bytes_remaining
= bytes
;
2076 bdrv_check_qiov_request(offset
, bytes
, qiov
, qiov_offset
, &error_abort
);
2082 if (bdrv_has_readonly_bitmaps(bs
)) {
2086 assert(is_power_of_2(align
));
2087 assert((offset
& (align
- 1)) == 0);
2088 assert((bytes
& (align
- 1)) == 0);
2089 max_transfer
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_transfer
, INT_MAX
),
2092 ret
= bdrv_co_write_req_prepare(child
, offset
, bytes
, req
, flags
);
2094 if (!ret
&& bs
->detect_zeroes
!= BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF
&&
2095 !(flags
& BDRV_REQ_ZERO_WRITE
) && drv
->bdrv_co_pwrite_zeroes
&&
2096 qemu_iovec_is_zero(qiov
, qiov_offset
, bytes
)) {
2097 flags
|= BDRV_REQ_ZERO_WRITE
;
2098 if (bs
->detect_zeroes
== BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP
) {
2099 flags
|= BDRV_REQ_MAY_UNMAP
;
2104 /* Do nothing, write notifier decided to fail this request */
2105 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
2106 bdrv_debug_event(bs
, BLKDBG_PWRITEV_ZERO
);
2107 ret
= bdrv_co_do_pwrite_zeroes(bs
, offset
, bytes
, flags
);
2108 } else if (flags
& BDRV_REQ_WRITE_COMPRESSED
) {
2109 ret
= bdrv_driver_pwritev_compressed(bs
, offset
, bytes
,
2111 } else if (bytes
<= max_transfer
) {
2112 bdrv_debug_event(bs
, BLKDBG_PWRITEV
);
2113 ret
= bdrv_driver_pwritev(bs
, offset
, bytes
, qiov
, qiov_offset
, flags
);
2115 bdrv_debug_event(bs
, BLKDBG_PWRITEV
);
2116 while (bytes_remaining
) {
2117 int num
= MIN(bytes_remaining
, max_transfer
);
2118 int local_flags
= flags
;
2121 if (num
< bytes_remaining
&& (flags
& BDRV_REQ_FUA
) &&
2122 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
2123 /* If FUA is going to be emulated by flush, we only
2124 * need to flush on the last iteration */
2125 local_flags
&= ~BDRV_REQ_FUA
;
2128 ret
= bdrv_driver_pwritev(bs
, offset
+ bytes
- bytes_remaining
,
2130 qiov_offset
+ bytes
- bytes_remaining
,
2135 bytes_remaining
-= num
;
2138 bdrv_debug_event(bs
, BLKDBG_PWRITEV_DONE
);
2143 bdrv_co_write_req_finish(child
, offset
, bytes
, req
, ret
);
2148 static int coroutine_fn
bdrv_co_do_zero_pwritev(BdrvChild
*child
,
2151 BdrvRequestFlags flags
,
2152 BdrvTrackedRequest
*req
)
2154 BlockDriverState
*bs
= child
->bs
;
2155 QEMUIOVector local_qiov
;
2156 uint64_t align
= bs
->bl
.request_alignment
;
2159 BdrvRequestPadding pad
;
2161 padding
= bdrv_init_padding(bs
, offset
, bytes
, &pad
);
2163 bdrv_make_request_serialising(req
, align
);
2165 bdrv_padding_rmw_read(child
, req
, &pad
, true);
2167 if (pad
.head
|| pad
.merge_reads
) {
2168 int64_t aligned_offset
= offset
& ~(align
- 1);
2169 int64_t write_bytes
= pad
.merge_reads
? pad
.buf_len
: align
;
2171 qemu_iovec_init_buf(&local_qiov
, pad
.buf
, write_bytes
);
2172 ret
= bdrv_aligned_pwritev(child
, req
, aligned_offset
, write_bytes
,
2173 align
, &local_qiov
, 0,
2174 flags
& ~BDRV_REQ_ZERO_WRITE
);
2175 if (ret
< 0 || pad
.merge_reads
) {
2176 /* Error or all work is done */
2179 offset
+= write_bytes
- pad
.head
;
2180 bytes
-= write_bytes
- pad
.head
;
2184 assert(!bytes
|| (offset
& (align
- 1)) == 0);
2185 if (bytes
>= align
) {
2186 /* Write the aligned part in the middle. */
2187 int64_t aligned_bytes
= bytes
& ~(align
- 1);
2188 ret
= bdrv_aligned_pwritev(child
, req
, offset
, aligned_bytes
, align
,
2193 bytes
-= aligned_bytes
;
2194 offset
+= aligned_bytes
;
2197 assert(!bytes
|| (offset
& (align
- 1)) == 0);
2199 assert(align
== pad
.tail
+ bytes
);
2201 qemu_iovec_init_buf(&local_qiov
, pad
.tail_buf
, align
);
2202 ret
= bdrv_aligned_pwritev(child
, req
, offset
, align
, align
,
2204 flags
& ~BDRV_REQ_ZERO_WRITE
);
2208 bdrv_padding_destroy(&pad
);
2214 * Handle a write request in coroutine context
2216 int coroutine_fn
bdrv_co_pwritev(BdrvChild
*child
,
2217 int64_t offset
, int64_t bytes
, QEMUIOVector
*qiov
,
2218 BdrvRequestFlags flags
)
2220 return bdrv_co_pwritev_part(child
, offset
, bytes
, qiov
, 0, flags
);
2223 int coroutine_fn
bdrv_co_pwritev_part(BdrvChild
*child
,
2224 int64_t offset
, int64_t bytes
, QEMUIOVector
*qiov
, size_t qiov_offset
,
2225 BdrvRequestFlags flags
)
2227 BlockDriverState
*bs
= child
->bs
;
2228 BdrvTrackedRequest req
;
2229 uint64_t align
= bs
->bl
.request_alignment
;
2230 BdrvRequestPadding pad
;
2232 bool padded
= false;
2234 trace_bdrv_co_pwritev_part(child
->bs
, offset
, bytes
, flags
);
2236 if (!bdrv_is_inserted(bs
)) {
2240 ret
= bdrv_check_request32(offset
, bytes
, qiov
, qiov_offset
);
2245 /* If the request is misaligned then we can't make it efficient */
2246 if ((flags
& BDRV_REQ_NO_FALLBACK
) &&
2247 !QEMU_IS_ALIGNED(offset
| bytes
, align
))
2252 if (bytes
== 0 && !QEMU_IS_ALIGNED(offset
, bs
->bl
.request_alignment
)) {
2254 * Aligning zero request is nonsense. Even if driver has special meaning
2255 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
2256 * it to driver due to request_alignment.
2258 * Still, no reason to return an error if someone do unaligned
2259 * zero-length write occasionally.
2264 if (!(flags
& BDRV_REQ_ZERO_WRITE
)) {
2266 * Pad request for following read-modify-write cycle.
2267 * bdrv_co_do_zero_pwritev() does aligning by itself, so, we do
2268 * alignment only if there is no ZERO flag.
2270 ret
= bdrv_pad_request(bs
, &qiov
, &qiov_offset
, &offset
, &bytes
, &pad
,
2277 bdrv_inc_in_flight(bs
);
2278 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_WRITE
);
2280 if (flags
& BDRV_REQ_ZERO_WRITE
) {
2282 ret
= bdrv_co_do_zero_pwritev(child
, offset
, bytes
, flags
, &req
);
2288 * Request was unaligned to request_alignment and therefore
2289 * padded. We are going to do read-modify-write, and must
2290 * serialize the request to prevent interactions of the
2291 * widened region with other transactions.
2293 bdrv_make_request_serialising(&req
, align
);
2294 bdrv_padding_rmw_read(child
, &req
, &pad
, false);
2297 ret
= bdrv_aligned_pwritev(child
, &req
, offset
, bytes
, align
,
2298 qiov
, qiov_offset
, flags
);
2300 bdrv_padding_destroy(&pad
);
2303 tracked_request_end(&req
);
2304 bdrv_dec_in_flight(bs
);
2309 int coroutine_fn
bdrv_co_pwrite_zeroes(BdrvChild
*child
, int64_t offset
,
2310 int64_t bytes
, BdrvRequestFlags flags
)
2312 trace_bdrv_co_pwrite_zeroes(child
->bs
, offset
, bytes
, flags
);
2314 if (!(child
->bs
->open_flags
& BDRV_O_UNMAP
)) {
2315 flags
&= ~BDRV_REQ_MAY_UNMAP
;
2318 return bdrv_co_pwritev(child
, offset
, bytes
, NULL
,
2319 BDRV_REQ_ZERO_WRITE
| flags
);
2323 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
2325 int bdrv_flush_all(void)
2327 BdrvNextIterator it
;
2328 BlockDriverState
*bs
= NULL
;
2332 * bdrv queue is managed by record/replay,
2333 * creating new flush request for stopping
2334 * the VM may break the determinism
2336 if (replay_events_enabled()) {
2340 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
2341 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
2344 aio_context_acquire(aio_context
);
2345 ret
= bdrv_flush(bs
);
2346 if (ret
< 0 && !result
) {
2349 aio_context_release(aio_context
);
2356 * Returns the allocation status of the specified sectors.
2357 * Drivers not implementing the functionality are assumed to not support
2358 * backing files, hence all their sectors are reported as allocated.
2360 * If 'want_zero' is true, the caller is querying for mapping
2361 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
2362 * _ZERO where possible; otherwise, the result favors larger 'pnum',
2363 * with a focus on accurate BDRV_BLOCK_ALLOCATED.
2365 * If 'offset' is beyond the end of the disk image the return value is
2366 * BDRV_BLOCK_EOF and 'pnum' is set to 0.
2368 * 'bytes' is the max value 'pnum' should be set to. If bytes goes
2369 * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2370 * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
2372 * 'pnum' is set to the number of bytes (including and immediately
2373 * following the specified offset) that are easily known to be in the
2374 * same allocated/unallocated state. Note that a second call starting
2375 * at the original offset plus returned pnum may have the same status.
2376 * The returned value is non-zero on success except at end-of-file.
2378 * Returns negative errno on failure. Otherwise, if the
2379 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
2380 * set to the host mapping and BDS corresponding to the guest offset.
2382 static int coroutine_fn
bdrv_co_block_status(BlockDriverState
*bs
,
2384 int64_t offset
, int64_t bytes
,
2385 int64_t *pnum
, int64_t *map
,
2386 BlockDriverState
**file
)
2389 int64_t n
; /* bytes */
2391 int64_t local_map
= 0;
2392 BlockDriverState
*local_file
= NULL
;
2393 int64_t aligned_offset
, aligned_bytes
;
2395 bool has_filtered_child
;
2399 total_size
= bdrv_getlength(bs
);
2400 if (total_size
< 0) {
2405 if (offset
>= total_size
) {
2406 ret
= BDRV_BLOCK_EOF
;
2414 n
= total_size
- offset
;
2419 /* Must be non-NULL or bdrv_getlength() would have failed */
2421 has_filtered_child
= bdrv_filter_child(bs
);
2422 if (!bs
->drv
->bdrv_co_block_status
&& !has_filtered_child
) {
2424 ret
= BDRV_BLOCK_DATA
| BDRV_BLOCK_ALLOCATED
;
2425 if (offset
+ bytes
== total_size
) {
2426 ret
|= BDRV_BLOCK_EOF
;
2428 if (bs
->drv
->protocol_name
) {
2429 ret
|= BDRV_BLOCK_OFFSET_VALID
;
2436 bdrv_inc_in_flight(bs
);
2438 /* Round out to request_alignment boundaries */
2439 align
= bs
->bl
.request_alignment
;
2440 aligned_offset
= QEMU_ALIGN_DOWN(offset
, align
);
2441 aligned_bytes
= ROUND_UP(offset
+ bytes
, align
) - aligned_offset
;
2443 if (bs
->drv
->bdrv_co_block_status
) {
2444 ret
= bs
->drv
->bdrv_co_block_status(bs
, want_zero
, aligned_offset
,
2445 aligned_bytes
, pnum
, &local_map
,
2448 /* Default code for filters */
2450 local_file
= bdrv_filter_bs(bs
);
2453 *pnum
= aligned_bytes
;
2454 local_map
= aligned_offset
;
2455 ret
= BDRV_BLOCK_RAW
| BDRV_BLOCK_OFFSET_VALID
;
2463 * The driver's result must be a non-zero multiple of request_alignment.
2464 * Clamp pnum and adjust map to original request.
2466 assert(*pnum
&& QEMU_IS_ALIGNED(*pnum
, align
) &&
2467 align
> offset
- aligned_offset
);
2468 if (ret
& BDRV_BLOCK_RECURSE
) {
2469 assert(ret
& BDRV_BLOCK_DATA
);
2470 assert(ret
& BDRV_BLOCK_OFFSET_VALID
);
2471 assert(!(ret
& BDRV_BLOCK_ZERO
));
2474 *pnum
-= offset
- aligned_offset
;
2475 if (*pnum
> bytes
) {
2478 if (ret
& BDRV_BLOCK_OFFSET_VALID
) {
2479 local_map
+= offset
- aligned_offset
;
2482 if (ret
& BDRV_BLOCK_RAW
) {
2483 assert(ret
& BDRV_BLOCK_OFFSET_VALID
&& local_file
);
2484 ret
= bdrv_co_block_status(local_file
, want_zero
, local_map
,
2485 *pnum
, pnum
, &local_map
, &local_file
);
2489 if (ret
& (BDRV_BLOCK_DATA
| BDRV_BLOCK_ZERO
)) {
2490 ret
|= BDRV_BLOCK_ALLOCATED
;
2491 } else if (bs
->drv
->supports_backing
) {
2492 BlockDriverState
*cow_bs
= bdrv_cow_bs(bs
);
2495 ret
|= BDRV_BLOCK_ZERO
;
2496 } else if (want_zero
) {
2497 int64_t size2
= bdrv_getlength(cow_bs
);
2499 if (size2
>= 0 && offset
>= size2
) {
2500 ret
|= BDRV_BLOCK_ZERO
;
2505 if (want_zero
&& ret
& BDRV_BLOCK_RECURSE
&&
2506 local_file
&& local_file
!= bs
&&
2507 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
2508 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
2512 ret2
= bdrv_co_block_status(local_file
, want_zero
, local_map
,
2513 *pnum
, &file_pnum
, NULL
, NULL
);
2515 /* Ignore errors. This is just providing extra information, it
2516 * is useful but not necessary.
2518 if (ret2
& BDRV_BLOCK_EOF
&&
2519 (!file_pnum
|| ret2
& BDRV_BLOCK_ZERO
)) {
2521 * It is valid for the format block driver to read
2522 * beyond the end of the underlying file's current
2523 * size; such areas read as zero.
2525 ret
|= BDRV_BLOCK_ZERO
;
2527 /* Limit request to the range reported by the protocol driver */
2529 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
2535 bdrv_dec_in_flight(bs
);
2536 if (ret
>= 0 && offset
+ *pnum
== total_size
) {
2537 ret
|= BDRV_BLOCK_EOF
;
2550 bdrv_co_common_block_status_above(BlockDriverState
*bs
,
2551 BlockDriverState
*base
,
2558 BlockDriverState
**file
,
2562 BlockDriverState
*p
;
2566 assert(!include_base
|| base
); /* Can't include NULL base */
2573 if (!include_base
&& bs
== base
) {
2578 ret
= bdrv_co_block_status(bs
, want_zero
, offset
, bytes
, pnum
, map
, file
);
2580 if (ret
< 0 || *pnum
== 0 || ret
& BDRV_BLOCK_ALLOCATED
|| bs
== base
) {
2584 if (ret
& BDRV_BLOCK_EOF
) {
2585 eof
= offset
+ *pnum
;
2588 assert(*pnum
<= bytes
);
2591 for (p
= bdrv_filter_or_cow_bs(bs
); include_base
|| p
!= base
;
2592 p
= bdrv_filter_or_cow_bs(p
))
2594 ret
= bdrv_co_block_status(p
, want_zero
, offset
, bytes
, pnum
, map
,
2602 * The top layer deferred to this layer, and because this layer is
2603 * short, any zeroes that we synthesize beyond EOF behave as if they
2604 * were allocated at this layer.
2606 * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be
2607 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2610 assert(ret
& BDRV_BLOCK_EOF
);
2615 ret
= BDRV_BLOCK_ZERO
| BDRV_BLOCK_ALLOCATED
;
2618 if (ret
& BDRV_BLOCK_ALLOCATED
) {
2620 * We've found the node and the status, we must break.
2622 * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be
2623 * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2626 ret
&= ~BDRV_BLOCK_EOF
;
2631 assert(include_base
);
2636 * OK, [offset, offset + *pnum) region is unallocated on this layer,
2637 * let's continue the diving.
2639 assert(*pnum
<= bytes
);
2643 if (offset
+ *pnum
== eof
) {
2644 ret
|= BDRV_BLOCK_EOF
;
2650 int bdrv_block_status_above(BlockDriverState
*bs
, BlockDriverState
*base
,
2651 int64_t offset
, int64_t bytes
, int64_t *pnum
,
2652 int64_t *map
, BlockDriverState
**file
)
2654 return bdrv_common_block_status_above(bs
, base
, false, true, offset
, bytes
,
2655 pnum
, map
, file
, NULL
);
2658 int bdrv_block_status(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
2659 int64_t *pnum
, int64_t *map
, BlockDriverState
**file
)
2661 return bdrv_block_status_above(bs
, bdrv_filter_or_cow_bs(bs
),
2662 offset
, bytes
, pnum
, map
, file
);
2666 * Check @bs (and its backing chain) to see if the range defined
2667 * by @offset and @bytes is known to read as zeroes.
2668 * Return 1 if that is the case, 0 otherwise and -errno on error.
2669 * This test is meant to be fast rather than accurate so returning 0
2670 * does not guarantee non-zero data.
2672 int coroutine_fn
bdrv_co_is_zero_fast(BlockDriverState
*bs
, int64_t offset
,
2676 int64_t pnum
= bytes
;
2682 ret
= bdrv_common_block_status_above(bs
, NULL
, false, false, offset
,
2683 bytes
, &pnum
, NULL
, NULL
, NULL
);
2689 return (pnum
== bytes
) && (ret
& BDRV_BLOCK_ZERO
);
2692 int coroutine_fn
bdrv_is_allocated(BlockDriverState
*bs
, int64_t offset
,
2693 int64_t bytes
, int64_t *pnum
)
2698 ret
= bdrv_common_block_status_above(bs
, bs
, true, false, offset
,
2699 bytes
, pnum
? pnum
: &dummy
, NULL
,
2704 return !!(ret
& BDRV_BLOCK_ALLOCATED
);
2708 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2710 * Return a positive depth if (a prefix of) the given range is allocated
2711 * in any image between BASE and TOP (BASE is only included if include_base
2712 * is set). Depth 1 is TOP, 2 is the first backing layer, and so forth.
2713 * BASE can be NULL to check if the given offset is allocated in any
2714 * image of the chain. Return 0 otherwise, or negative errno on
2717 * 'pnum' is set to the number of bytes (including and immediately
2718 * following the specified offset) that are known to be in the same
2719 * allocated/unallocated state. Note that a subsequent call starting
2720 * at 'offset + *pnum' may return the same allocation status (in other
2721 * words, the result is not necessarily the maximum possible range);
2722 * but 'pnum' will only be 0 when end of file is reached.
2724 int bdrv_is_allocated_above(BlockDriverState
*top
,
2725 BlockDriverState
*base
,
2726 bool include_base
, int64_t offset
,
2727 int64_t bytes
, int64_t *pnum
)
2730 int ret
= bdrv_common_block_status_above(top
, base
, include_base
, false,
2731 offset
, bytes
, pnum
, NULL
, NULL
,
2737 if (ret
& BDRV_BLOCK_ALLOCATED
) {
2744 bdrv_co_readv_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
2746 BlockDriver
*drv
= bs
->drv
;
2747 BlockDriverState
*child_bs
= bdrv_primary_bs(bs
);
2754 bdrv_inc_in_flight(bs
);
2756 if (drv
->bdrv_load_vmstate
) {
2757 ret
= drv
->bdrv_load_vmstate(bs
, qiov
, pos
);
2758 } else if (child_bs
) {
2759 ret
= bdrv_co_readv_vmstate(child_bs
, qiov
, pos
);
2762 bdrv_dec_in_flight(bs
);
2768 bdrv_co_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
2770 BlockDriver
*drv
= bs
->drv
;
2771 BlockDriverState
*child_bs
= bdrv_primary_bs(bs
);
2778 bdrv_inc_in_flight(bs
);
2780 if (drv
->bdrv_save_vmstate
) {
2781 ret
= drv
->bdrv_save_vmstate(bs
, qiov
, pos
);
2782 } else if (child_bs
) {
2783 ret
= bdrv_co_writev_vmstate(child_bs
, qiov
, pos
);
2786 bdrv_dec_in_flight(bs
);
2791 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
2792 int64_t pos
, int size
)
2794 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, size
);
2795 int ret
= bdrv_writev_vmstate(bs
, &qiov
, pos
);
2797 return ret
< 0 ? ret
: size
;
2800 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
2801 int64_t pos
, int size
)
2803 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, size
);
2804 int ret
= bdrv_readv_vmstate(bs
, &qiov
, pos
);
2806 return ret
< 0 ? ret
: size
;
2809 /**************************************************************/
2812 void bdrv_aio_cancel(BlockAIOCB
*acb
)
2815 bdrv_aio_cancel_async(acb
);
2816 while (acb
->refcnt
> 1) {
2817 if (acb
->aiocb_info
->get_aio_context
) {
2818 aio_poll(acb
->aiocb_info
->get_aio_context(acb
), true);
2819 } else if (acb
->bs
) {
2820 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
2821 * assert that we're not using an I/O thread. Thread-safe
2822 * code should use bdrv_aio_cancel_async exclusively.
2824 assert(bdrv_get_aio_context(acb
->bs
) == qemu_get_aio_context());
2825 aio_poll(bdrv_get_aio_context(acb
->bs
), true);
2830 qemu_aio_unref(acb
);
2833 /* Async version of aio cancel. The caller is not blocked if the acb implements
2834 * cancel_async, otherwise we do nothing and let the request normally complete.
2835 * In either case the completion callback must be called. */
2836 void bdrv_aio_cancel_async(BlockAIOCB
*acb
)
2838 if (acb
->aiocb_info
->cancel_async
) {
2839 acb
->aiocb_info
->cancel_async(acb
);
2843 /**************************************************************/
2844 /* Coroutine block device emulation */
2846 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
2848 BdrvChild
*primary_child
= bdrv_primary_child(bs
);
2853 bdrv_inc_in_flight(bs
);
2855 if (!bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
) ||
2860 qemu_co_mutex_lock(&bs
->reqs_lock
);
2861 current_gen
= qatomic_read(&bs
->write_gen
);
2863 /* Wait until any previous flushes are completed */
2864 while (bs
->active_flush_req
) {
2865 qemu_co_queue_wait(&bs
->flush_queue
, &bs
->reqs_lock
);
2868 /* Flushes reach this point in nondecreasing current_gen order. */
2869 bs
->active_flush_req
= true;
2870 qemu_co_mutex_unlock(&bs
->reqs_lock
);
2872 /* Write back all layers by calling one driver function */
2873 if (bs
->drv
->bdrv_co_flush
) {
2874 ret
= bs
->drv
->bdrv_co_flush(bs
);
2878 /* Write back cached data to the OS even with cache=unsafe */
2879 BLKDBG_EVENT(primary_child
, BLKDBG_FLUSH_TO_OS
);
2880 if (bs
->drv
->bdrv_co_flush_to_os
) {
2881 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
2887 /* But don't actually force it to the disk with cache=unsafe */
2888 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
2889 goto flush_children
;
2892 /* Check if we really need to flush anything */
2893 if (bs
->flushed_gen
== current_gen
) {
2894 goto flush_children
;
2897 BLKDBG_EVENT(primary_child
, BLKDBG_FLUSH_TO_DISK
);
2899 /* bs->drv->bdrv_co_flush() might have ejected the BDS
2900 * (even in case of apparent success) */
2904 if (bs
->drv
->bdrv_co_flush_to_disk
) {
2905 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
2906 } else if (bs
->drv
->bdrv_aio_flush
) {
2908 CoroutineIOCompletion co
= {
2909 .coroutine
= qemu_coroutine_self(),
2912 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
2916 qemu_coroutine_yield();
2921 * Some block drivers always operate in either writethrough or unsafe
2922 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2923 * know how the server works (because the behaviour is hardcoded or
2924 * depends on server-side configuration), so we can't ensure that
2925 * everything is safe on disk. Returning an error doesn't work because
2926 * that would break guests even if the server operates in writethrough
2929 * Let's hope the user knows what he's doing.
2938 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2939 * in the case of cache=unsafe, so there are no useless flushes.
2943 QLIST_FOREACH(child
, &bs
->children
, next
) {
2944 if (child
->perm
& (BLK_PERM_WRITE
| BLK_PERM_WRITE_UNCHANGED
)) {
2945 int this_child_ret
= bdrv_co_flush(child
->bs
);
2947 ret
= this_child_ret
;
2953 /* Notify any pending flushes that we have completed */
2955 bs
->flushed_gen
= current_gen
;
2958 qemu_co_mutex_lock(&bs
->reqs_lock
);
2959 bs
->active_flush_req
= false;
2960 /* Return value is ignored - it's ok if wait queue is empty */
2961 qemu_co_queue_next(&bs
->flush_queue
);
2962 qemu_co_mutex_unlock(&bs
->reqs_lock
);
2965 bdrv_dec_in_flight(bs
);
2969 int coroutine_fn
bdrv_co_pdiscard(BdrvChild
*child
, int64_t offset
,
2972 BdrvTrackedRequest req
;
2973 int max_pdiscard
, ret
;
2974 int head
, tail
, align
;
2975 BlockDriverState
*bs
= child
->bs
;
2977 if (!bs
|| !bs
->drv
|| !bdrv_is_inserted(bs
)) {
2981 if (bdrv_has_readonly_bitmaps(bs
)) {
2985 ret
= bdrv_check_request(offset
, bytes
, NULL
);
2990 /* Do nothing if disabled. */
2991 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
2995 if (!bs
->drv
->bdrv_co_pdiscard
&& !bs
->drv
->bdrv_aio_pdiscard
) {
2999 /* Discard is advisory, but some devices track and coalesce
3000 * unaligned requests, so we must pass everything down rather than
3001 * round here. Still, most devices will just silently ignore
3002 * unaligned requests (by returning -ENOTSUP), so we must fragment
3003 * the request accordingly. */
3004 align
= MAX(bs
->bl
.pdiscard_alignment
, bs
->bl
.request_alignment
);
3005 assert(align
% bs
->bl
.request_alignment
== 0);
3006 head
= offset
% align
;
3007 tail
= (offset
+ bytes
) % align
;
3009 bdrv_inc_in_flight(bs
);
3010 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_DISCARD
);
3012 ret
= bdrv_co_write_req_prepare(child
, offset
, bytes
, &req
, 0);
3017 max_pdiscard
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_pdiscard
, INT_MAX
),
3019 assert(max_pdiscard
>= bs
->bl
.request_alignment
);
3022 int64_t num
= bytes
;
3025 /* Make small requests to get to alignment boundaries. */
3026 num
= MIN(bytes
, align
- head
);
3027 if (!QEMU_IS_ALIGNED(num
, bs
->bl
.request_alignment
)) {
3028 num
%= bs
->bl
.request_alignment
;
3030 head
= (head
+ num
) % align
;
3031 assert(num
< max_pdiscard
);
3034 /* Shorten the request to the last aligned cluster. */
3036 } else if (!QEMU_IS_ALIGNED(tail
, bs
->bl
.request_alignment
) &&
3037 tail
> bs
->bl
.request_alignment
) {
3038 tail
%= bs
->bl
.request_alignment
;
3042 /* limit request size */
3043 if (num
> max_pdiscard
) {
3051 if (bs
->drv
->bdrv_co_pdiscard
) {
3052 ret
= bs
->drv
->bdrv_co_pdiscard(bs
, offset
, num
);
3055 CoroutineIOCompletion co
= {
3056 .coroutine
= qemu_coroutine_self(),
3059 acb
= bs
->drv
->bdrv_aio_pdiscard(bs
, offset
, num
,
3060 bdrv_co_io_em_complete
, &co
);
3065 qemu_coroutine_yield();
3069 if (ret
&& ret
!= -ENOTSUP
) {
3078 bdrv_co_write_req_finish(child
, req
.offset
, req
.bytes
, &req
, ret
);
3079 tracked_request_end(&req
);
3080 bdrv_dec_in_flight(bs
);
3084 int bdrv_co_ioctl(BlockDriverState
*bs
, int req
, void *buf
)
3086 BlockDriver
*drv
= bs
->drv
;
3087 CoroutineIOCompletion co
= {
3088 .coroutine
= qemu_coroutine_self(),
3092 bdrv_inc_in_flight(bs
);
3093 if (!drv
|| (!drv
->bdrv_aio_ioctl
&& !drv
->bdrv_co_ioctl
)) {
3098 if (drv
->bdrv_co_ioctl
) {
3099 co
.ret
= drv
->bdrv_co_ioctl(bs
, req
, buf
);
3101 acb
= drv
->bdrv_aio_ioctl(bs
, req
, buf
, bdrv_co_io_em_complete
, &co
);
3106 qemu_coroutine_yield();
3109 bdrv_dec_in_flight(bs
);
3113 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
3115 return qemu_memalign(bdrv_opt_mem_align(bs
), size
);
3118 void *qemu_blockalign0(BlockDriverState
*bs
, size_t size
)
3120 return memset(qemu_blockalign(bs
, size
), 0, size
);
3123 void *qemu_try_blockalign(BlockDriverState
*bs
, size_t size
)
3125 size_t align
= bdrv_opt_mem_align(bs
);
3127 /* Ensure that NULL is never returned on success */
3133 return qemu_try_memalign(align
, size
);
3136 void *qemu_try_blockalign0(BlockDriverState
*bs
, size_t size
)
3138 void *mem
= qemu_try_blockalign(bs
, size
);
3141 memset(mem
, 0, size
);
3148 * Check if all memory in this vector is sector aligned.
3150 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
)
3153 size_t alignment
= bdrv_min_mem_align(bs
);
3155 for (i
= 0; i
< qiov
->niov
; i
++) {
3156 if ((uintptr_t) qiov
->iov
[i
].iov_base
% alignment
) {
3159 if (qiov
->iov
[i
].iov_len
% alignment
) {
3167 void bdrv_add_before_write_notifier(BlockDriverState
*bs
,
3168 NotifierWithReturn
*notifier
)
3170 notifier_with_return_list_add(&bs
->before_write_notifiers
, notifier
);
3173 void bdrv_io_plug(BlockDriverState
*bs
)
3177 QLIST_FOREACH(child
, &bs
->children
, next
) {
3178 bdrv_io_plug(child
->bs
);
3181 if (qatomic_fetch_inc(&bs
->io_plugged
) == 0) {
3182 BlockDriver
*drv
= bs
->drv
;
3183 if (drv
&& drv
->bdrv_io_plug
) {
3184 drv
->bdrv_io_plug(bs
);
3189 void bdrv_io_unplug(BlockDriverState
*bs
)
3193 assert(bs
->io_plugged
);
3194 if (qatomic_fetch_dec(&bs
->io_plugged
) == 1) {
3195 BlockDriver
*drv
= bs
->drv
;
3196 if (drv
&& drv
->bdrv_io_unplug
) {
3197 drv
->bdrv_io_unplug(bs
);
3201 QLIST_FOREACH(child
, &bs
->children
, next
) {
3202 bdrv_io_unplug(child
->bs
);
3206 void bdrv_register_buf(BlockDriverState
*bs
, void *host
, size_t size
)
3210 if (bs
->drv
&& bs
->drv
->bdrv_register_buf
) {
3211 bs
->drv
->bdrv_register_buf(bs
, host
, size
);
3213 QLIST_FOREACH(child
, &bs
->children
, next
) {
3214 bdrv_register_buf(child
->bs
, host
, size
);
3218 void bdrv_unregister_buf(BlockDriverState
*bs
, void *host
)
3222 if (bs
->drv
&& bs
->drv
->bdrv_unregister_buf
) {
3223 bs
->drv
->bdrv_unregister_buf(bs
, host
);
3225 QLIST_FOREACH(child
, &bs
->children
, next
) {
3226 bdrv_unregister_buf(child
->bs
, host
);
3230 static int coroutine_fn
bdrv_co_copy_range_internal(
3231 BdrvChild
*src
, int64_t src_offset
, BdrvChild
*dst
,
3232 int64_t dst_offset
, int64_t bytes
,
3233 BdrvRequestFlags read_flags
, BdrvRequestFlags write_flags
,
3236 BdrvTrackedRequest req
;
3239 /* TODO We can support BDRV_REQ_NO_FALLBACK here */
3240 assert(!(read_flags
& BDRV_REQ_NO_FALLBACK
));
3241 assert(!(write_flags
& BDRV_REQ_NO_FALLBACK
));
3243 if (!dst
|| !dst
->bs
|| !bdrv_is_inserted(dst
->bs
)) {
3246 ret
= bdrv_check_request32(dst_offset
, bytes
, NULL
, 0);
3250 if (write_flags
& BDRV_REQ_ZERO_WRITE
) {
3251 return bdrv_co_pwrite_zeroes(dst
, dst_offset
, bytes
, write_flags
);
3254 if (!src
|| !src
->bs
|| !bdrv_is_inserted(src
->bs
)) {
3257 ret
= bdrv_check_request32(src_offset
, bytes
, NULL
, 0);
3262 if (!src
->bs
->drv
->bdrv_co_copy_range_from
3263 || !dst
->bs
->drv
->bdrv_co_copy_range_to
3264 || src
->bs
->encrypted
|| dst
->bs
->encrypted
) {
3269 bdrv_inc_in_flight(src
->bs
);
3270 tracked_request_begin(&req
, src
->bs
, src_offset
, bytes
,
3273 /* BDRV_REQ_SERIALISING is only for write operation */
3274 assert(!(read_flags
& BDRV_REQ_SERIALISING
));
3275 bdrv_wait_serialising_requests(&req
);
3277 ret
= src
->bs
->drv
->bdrv_co_copy_range_from(src
->bs
,
3281 read_flags
, write_flags
);
3283 tracked_request_end(&req
);
3284 bdrv_dec_in_flight(src
->bs
);
3286 bdrv_inc_in_flight(dst
->bs
);
3287 tracked_request_begin(&req
, dst
->bs
, dst_offset
, bytes
,
3288 BDRV_TRACKED_WRITE
);
3289 ret
= bdrv_co_write_req_prepare(dst
, dst_offset
, bytes
, &req
,
3292 ret
= dst
->bs
->drv
->bdrv_co_copy_range_to(dst
->bs
,
3296 read_flags
, write_flags
);
3298 bdrv_co_write_req_finish(dst
, dst_offset
, bytes
, &req
, ret
);
3299 tracked_request_end(&req
);
3300 bdrv_dec_in_flight(dst
->bs
);
3306 /* Copy range from @src to @dst.
3308 * See the comment of bdrv_co_copy_range for the parameter and return value
3310 int coroutine_fn
bdrv_co_copy_range_from(BdrvChild
*src
, int64_t src_offset
,
3311 BdrvChild
*dst
, int64_t dst_offset
,
3313 BdrvRequestFlags read_flags
,
3314 BdrvRequestFlags write_flags
)
3316 trace_bdrv_co_copy_range_from(src
, src_offset
, dst
, dst_offset
, bytes
,
3317 read_flags
, write_flags
);
3318 return bdrv_co_copy_range_internal(src
, src_offset
, dst
, dst_offset
,
3319 bytes
, read_flags
, write_flags
, true);
3322 /* Copy range from @src to @dst.
3324 * See the comment of bdrv_co_copy_range for the parameter and return value
3326 int coroutine_fn
bdrv_co_copy_range_to(BdrvChild
*src
, int64_t src_offset
,
3327 BdrvChild
*dst
, int64_t dst_offset
,
3329 BdrvRequestFlags read_flags
,
3330 BdrvRequestFlags write_flags
)
3332 trace_bdrv_co_copy_range_to(src
, src_offset
, dst
, dst_offset
, bytes
,
3333 read_flags
, write_flags
);
3334 return bdrv_co_copy_range_internal(src
, src_offset
, dst
, dst_offset
,
3335 bytes
, read_flags
, write_flags
, false);
3338 int coroutine_fn
bdrv_co_copy_range(BdrvChild
*src
, int64_t src_offset
,
3339 BdrvChild
*dst
, int64_t dst_offset
,
3340 int64_t bytes
, BdrvRequestFlags read_flags
,
3341 BdrvRequestFlags write_flags
)
3343 return bdrv_co_copy_range_from(src
, src_offset
,
3345 bytes
, read_flags
, write_flags
);
3348 static void bdrv_parent_cb_resize(BlockDriverState
*bs
)
3351 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
3352 if (c
->klass
->resize
) {
3353 c
->klass
->resize(c
);
3359 * Truncate file to 'offset' bytes (needed only for file protocols)
3361 * If 'exact' is true, the file must be resized to exactly the given
3362 * 'offset'. Otherwise, it is sufficient for the node to be at least
3363 * 'offset' bytes in length.
3365 int coroutine_fn
bdrv_co_truncate(BdrvChild
*child
, int64_t offset
, bool exact
,
3366 PreallocMode prealloc
, BdrvRequestFlags flags
,
3369 BlockDriverState
*bs
= child
->bs
;
3370 BdrvChild
*filtered
, *backing
;
3371 BlockDriver
*drv
= bs
->drv
;
3372 BdrvTrackedRequest req
;
3373 int64_t old_size
, new_bytes
;
3377 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
3379 error_setg(errp
, "No medium inserted");
3383 error_setg(errp
, "Image size cannot be negative");
3387 ret
= bdrv_check_request(offset
, 0, errp
);
3392 old_size
= bdrv_getlength(bs
);
3394 error_setg_errno(errp
, -old_size
, "Failed to get old image size");
3398 if (offset
> old_size
) {
3399 new_bytes
= offset
- old_size
;
3404 bdrv_inc_in_flight(bs
);
3405 tracked_request_begin(&req
, bs
, offset
- new_bytes
, new_bytes
,
3406 BDRV_TRACKED_TRUNCATE
);
3408 /* If we are growing the image and potentially using preallocation for the
3409 * new area, we need to make sure that no write requests are made to it
3410 * concurrently or they might be overwritten by preallocation. */
3412 bdrv_make_request_serialising(&req
, 1);
3414 if (bs
->read_only
) {
3415 error_setg(errp
, "Image is read-only");
3419 ret
= bdrv_co_write_req_prepare(child
, offset
- new_bytes
, new_bytes
, &req
,
3422 error_setg_errno(errp
, -ret
,
3423 "Failed to prepare request for truncation");
3427 filtered
= bdrv_filter_child(bs
);
3428 backing
= bdrv_cow_child(bs
);
3431 * If the image has a backing file that is large enough that it would
3432 * provide data for the new area, we cannot leave it unallocated because
3433 * then the backing file content would become visible. Instead, zero-fill
3436 * Note that if the image has a backing file, but was opened without the
3437 * backing file, taking care of keeping things consistent with that backing
3438 * file is the user's responsibility.
3440 if (new_bytes
&& backing
) {
3441 int64_t backing_len
;
3443 backing_len
= bdrv_getlength(backing
->bs
);
3444 if (backing_len
< 0) {
3446 error_setg_errno(errp
, -ret
, "Could not get backing file size");
3450 if (backing_len
> old_size
) {
3451 flags
|= BDRV_REQ_ZERO_WRITE
;
3455 if (drv
->bdrv_co_truncate
) {
3456 if (flags
& ~bs
->supported_truncate_flags
) {
3457 error_setg(errp
, "Block driver does not support requested flags");
3461 ret
= drv
->bdrv_co_truncate(bs
, offset
, exact
, prealloc
, flags
, errp
);
3462 } else if (filtered
) {
3463 ret
= bdrv_co_truncate(filtered
, offset
, exact
, prealloc
, flags
, errp
);
3465 error_setg(errp
, "Image format driver does not support resize");
3473 ret
= refresh_total_sectors(bs
, offset
>> BDRV_SECTOR_BITS
);
3475 error_setg_errno(errp
, -ret
, "Could not refresh total sector count");
3477 offset
= bs
->total_sectors
* BDRV_SECTOR_SIZE
;
3479 /* It's possible that truncation succeeded but refresh_total_sectors
3480 * failed, but the latter doesn't affect how we should finish the request.
3481 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. */
3482 bdrv_co_write_req_finish(child
, offset
- new_bytes
, new_bytes
, &req
, 0);
3485 tracked_request_end(&req
);
3486 bdrv_dec_in_flight(bs
);
3491 void bdrv_cancel_in_flight(BlockDriverState
*bs
)
3493 if (!bs
|| !bs
->drv
) {
3497 if (bs
->drv
->bdrv_cancel_in_flight
) {
3498 bs
->drv
->bdrv_cancel_in_flight(bs
);