2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "sysemu/block-backend.h"
28 #include "block/aio-wait.h"
29 #include "block/blockjob.h"
30 #include "block/blockjob_int.h"
31 #include "block/block_int.h"
32 #include "qemu/cutils.h"
33 #include "qapi/error.h"
34 #include "qemu/error-report.h"
35 #include "qemu/main-loop.h"
37 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
39 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
40 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
42 static void bdrv_parent_cb_resize(BlockDriverState
*bs
);
43 static int coroutine_fn
bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
,
44 int64_t offset
, int bytes
, BdrvRequestFlags flags
);
46 static void bdrv_parent_drained_begin(BlockDriverState
*bs
, BdrvChild
*ignore
,
47 bool ignore_bds_parents
)
51 QLIST_FOREACH_SAFE(c
, &bs
->parents
, next_parent
, next
) {
52 if (c
== ignore
|| (ignore_bds_parents
&& c
->role
->parent_is_bds
)) {
55 bdrv_parent_drained_begin_single(c
, false);
59 static void bdrv_parent_drained_end_single_no_poll(BdrvChild
*c
,
60 int *drained_end_counter
)
62 assert(c
->parent_quiesce_counter
> 0);
63 c
->parent_quiesce_counter
--;
64 if (c
->role
->drained_end
) {
65 c
->role
->drained_end(c
, drained_end_counter
);
69 void bdrv_parent_drained_end_single(BdrvChild
*c
)
71 int drained_end_counter
= 0;
72 bdrv_parent_drained_end_single_no_poll(c
, &drained_end_counter
);
73 BDRV_POLL_WHILE(c
->bs
, atomic_read(&drained_end_counter
) > 0);
76 static void bdrv_parent_drained_end(BlockDriverState
*bs
, BdrvChild
*ignore
,
77 bool ignore_bds_parents
,
78 int *drained_end_counter
)
82 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
83 if (c
== ignore
|| (ignore_bds_parents
&& c
->role
->parent_is_bds
)) {
86 bdrv_parent_drained_end_single_no_poll(c
, drained_end_counter
);
90 static bool bdrv_parent_drained_poll_single(BdrvChild
*c
)
92 if (c
->role
->drained_poll
) {
93 return c
->role
->drained_poll(c
);
98 static bool bdrv_parent_drained_poll(BlockDriverState
*bs
, BdrvChild
*ignore
,
99 bool ignore_bds_parents
)
104 QLIST_FOREACH_SAFE(c
, &bs
->parents
, next_parent
, next
) {
105 if (c
== ignore
|| (ignore_bds_parents
&& c
->role
->parent_is_bds
)) {
108 busy
|= bdrv_parent_drained_poll_single(c
);
114 void bdrv_parent_drained_begin_single(BdrvChild
*c
, bool poll
)
116 c
->parent_quiesce_counter
++;
117 if (c
->role
->drained_begin
) {
118 c
->role
->drained_begin(c
);
121 BDRV_POLL_WHILE(c
->bs
, bdrv_parent_drained_poll_single(c
));
125 static void bdrv_merge_limits(BlockLimits
*dst
, const BlockLimits
*src
)
127 dst
->opt_transfer
= MAX(dst
->opt_transfer
, src
->opt_transfer
);
128 dst
->max_transfer
= MIN_NON_ZERO(dst
->max_transfer
, src
->max_transfer
);
129 dst
->opt_mem_alignment
= MAX(dst
->opt_mem_alignment
,
130 src
->opt_mem_alignment
);
131 dst
->min_mem_alignment
= MAX(dst
->min_mem_alignment
,
132 src
->min_mem_alignment
);
133 dst
->max_iov
= MIN_NON_ZERO(dst
->max_iov
, src
->max_iov
);
136 void bdrv_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
138 BlockDriver
*drv
= bs
->drv
;
139 Error
*local_err
= NULL
;
141 memset(&bs
->bl
, 0, sizeof(bs
->bl
));
147 /* Default alignment based on whether driver has byte interface */
148 bs
->bl
.request_alignment
= (drv
->bdrv_co_preadv
||
149 drv
->bdrv_aio_preadv
||
150 drv
->bdrv_co_preadv_part
) ? 1 : 512;
152 /* Take some limits from the children as a default */
154 bdrv_refresh_limits(bs
->file
->bs
, &local_err
);
156 error_propagate(errp
, local_err
);
159 bdrv_merge_limits(&bs
->bl
, &bs
->file
->bs
->bl
);
161 bs
->bl
.min_mem_alignment
= 512;
162 bs
->bl
.opt_mem_alignment
= getpagesize();
164 /* Safe default since most protocols use readv()/writev()/etc */
165 bs
->bl
.max_iov
= IOV_MAX
;
169 bdrv_refresh_limits(bs
->backing
->bs
, &local_err
);
171 error_propagate(errp
, local_err
);
174 bdrv_merge_limits(&bs
->bl
, &bs
->backing
->bs
->bl
);
177 /* Then let the driver override it */
178 if (drv
->bdrv_refresh_limits
) {
179 drv
->bdrv_refresh_limits(bs
, errp
);
184 * The copy-on-read flag is actually a reference count so multiple users may
185 * use the feature without worrying about clobbering its previous state.
186 * Copy-on-read stays enabled until all users have called to disable it.
188 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
190 atomic_inc(&bs
->copy_on_read
);
193 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
195 int old
= atomic_fetch_dec(&bs
->copy_on_read
);
201 BlockDriverState
*bs
;
207 bool ignore_bds_parents
;
208 int *drained_end_counter
;
211 static void coroutine_fn
bdrv_drain_invoke_entry(void *opaque
)
213 BdrvCoDrainData
*data
= opaque
;
214 BlockDriverState
*bs
= data
->bs
;
217 bs
->drv
->bdrv_co_drain_begin(bs
);
219 bs
->drv
->bdrv_co_drain_end(bs
);
222 /* Set data->done and decrement drained_end_counter before bdrv_wakeup() */
223 atomic_mb_set(&data
->done
, true);
225 atomic_dec(data
->drained_end_counter
);
227 bdrv_dec_in_flight(bs
);
232 /* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */
233 static void bdrv_drain_invoke(BlockDriverState
*bs
, bool begin
,
234 int *drained_end_counter
)
236 BdrvCoDrainData
*data
;
238 if (!bs
->drv
|| (begin
&& !bs
->drv
->bdrv_co_drain_begin
) ||
239 (!begin
&& !bs
->drv
->bdrv_co_drain_end
)) {
243 data
= g_new(BdrvCoDrainData
, 1);
244 *data
= (BdrvCoDrainData
) {
248 .drained_end_counter
= drained_end_counter
,
252 atomic_inc(drained_end_counter
);
255 /* Make sure the driver callback completes during the polling phase for
257 bdrv_inc_in_flight(bs
);
258 data
->co
= qemu_coroutine_create(bdrv_drain_invoke_entry
, data
);
259 aio_co_schedule(bdrv_get_aio_context(bs
), data
->co
);
262 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
263 bool bdrv_drain_poll(BlockDriverState
*bs
, bool recursive
,
264 BdrvChild
*ignore_parent
, bool ignore_bds_parents
)
266 BdrvChild
*child
, *next
;
268 if (bdrv_parent_drained_poll(bs
, ignore_parent
, ignore_bds_parents
)) {
272 if (atomic_read(&bs
->in_flight
)) {
277 assert(!ignore_bds_parents
);
278 QLIST_FOREACH_SAFE(child
, &bs
->children
, next
, next
) {
279 if (bdrv_drain_poll(child
->bs
, recursive
, child
, false)) {
288 static bool bdrv_drain_poll_top_level(BlockDriverState
*bs
, bool recursive
,
289 BdrvChild
*ignore_parent
)
291 return bdrv_drain_poll(bs
, recursive
, ignore_parent
, false);
294 static void bdrv_do_drained_begin(BlockDriverState
*bs
, bool recursive
,
295 BdrvChild
*parent
, bool ignore_bds_parents
,
297 static void bdrv_do_drained_end(BlockDriverState
*bs
, bool recursive
,
298 BdrvChild
*parent
, bool ignore_bds_parents
,
299 int *drained_end_counter
);
301 static void bdrv_co_drain_bh_cb(void *opaque
)
303 BdrvCoDrainData
*data
= opaque
;
304 Coroutine
*co
= data
->co
;
305 BlockDriverState
*bs
= data
->bs
;
308 AioContext
*ctx
= bdrv_get_aio_context(bs
);
309 AioContext
*co_ctx
= qemu_coroutine_get_aio_context(co
);
312 * When the coroutine yielded, the lock for its home context was
313 * released, so we need to re-acquire it here. If it explicitly
314 * acquired a different context, the lock is still held and we don't
315 * want to lock it a second time (or AIO_WAIT_WHILE() would hang).
318 aio_context_acquire(ctx
);
320 bdrv_dec_in_flight(bs
);
322 assert(!data
->drained_end_counter
);
323 bdrv_do_drained_begin(bs
, data
->recursive
, data
->parent
,
324 data
->ignore_bds_parents
, data
->poll
);
327 bdrv_do_drained_end(bs
, data
->recursive
, data
->parent
,
328 data
->ignore_bds_parents
,
329 data
->drained_end_counter
);
332 aio_context_release(ctx
);
336 bdrv_drain_all_begin();
343 static void coroutine_fn
bdrv_co_yield_to_drain(BlockDriverState
*bs
,
344 bool begin
, bool recursive
,
346 bool ignore_bds_parents
,
348 int *drained_end_counter
)
350 BdrvCoDrainData data
;
352 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
353 * other coroutines run if they were queued by aio_co_enter(). */
355 assert(qemu_in_coroutine());
356 data
= (BdrvCoDrainData
) {
357 .co
= qemu_coroutine_self(),
361 .recursive
= recursive
,
363 .ignore_bds_parents
= ignore_bds_parents
,
365 .drained_end_counter
= drained_end_counter
,
369 bdrv_inc_in_flight(bs
);
371 aio_bh_schedule_oneshot(bdrv_get_aio_context(bs
),
372 bdrv_co_drain_bh_cb
, &data
);
374 qemu_coroutine_yield();
375 /* If we are resumed from some other event (such as an aio completion or a
376 * timer callback), it is a bug in the caller that should be fixed. */
380 void bdrv_do_drained_begin_quiesce(BlockDriverState
*bs
,
381 BdrvChild
*parent
, bool ignore_bds_parents
)
383 assert(!qemu_in_coroutine());
385 /* Stop things in parent-to-child order */
386 if (atomic_fetch_inc(&bs
->quiesce_counter
) == 0) {
387 aio_disable_external(bdrv_get_aio_context(bs
));
390 bdrv_parent_drained_begin(bs
, parent
, ignore_bds_parents
);
391 bdrv_drain_invoke(bs
, true, NULL
);
394 static void bdrv_do_drained_begin(BlockDriverState
*bs
, bool recursive
,
395 BdrvChild
*parent
, bool ignore_bds_parents
,
398 BdrvChild
*child
, *next
;
400 if (qemu_in_coroutine()) {
401 bdrv_co_yield_to_drain(bs
, true, recursive
, parent
, ignore_bds_parents
,
406 bdrv_do_drained_begin_quiesce(bs
, parent
, ignore_bds_parents
);
409 assert(!ignore_bds_parents
);
410 bs
->recursive_quiesce_counter
++;
411 QLIST_FOREACH_SAFE(child
, &bs
->children
, next
, next
) {
412 bdrv_do_drained_begin(child
->bs
, true, child
, ignore_bds_parents
,
418 * Wait for drained requests to finish.
420 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
421 * call is needed so things in this AioContext can make progress even
422 * though we don't return to the main AioContext loop - this automatically
423 * includes other nodes in the same AioContext and therefore all child
427 assert(!ignore_bds_parents
);
428 BDRV_POLL_WHILE(bs
, bdrv_drain_poll_top_level(bs
, recursive
, parent
));
432 void bdrv_drained_begin(BlockDriverState
*bs
)
434 bdrv_do_drained_begin(bs
, false, NULL
, false, true);
437 void bdrv_subtree_drained_begin(BlockDriverState
*bs
)
439 bdrv_do_drained_begin(bs
, true, NULL
, false, true);
443 * This function does not poll, nor must any of its recursively called
444 * functions. The *drained_end_counter pointee will be incremented
445 * once for every background operation scheduled, and decremented once
446 * the operation settles. Therefore, the pointer must remain valid
447 * until the pointee reaches 0. That implies that whoever sets up the
448 * pointee has to poll until it is 0.
450 * We use atomic operations to access *drained_end_counter, because
451 * (1) when called from bdrv_set_aio_context_ignore(), the subgraph of
452 * @bs may contain nodes in different AioContexts,
453 * (2) bdrv_drain_all_end() uses the same counter for all nodes,
454 * regardless of which AioContext they are in.
456 static void bdrv_do_drained_end(BlockDriverState
*bs
, bool recursive
,
457 BdrvChild
*parent
, bool ignore_bds_parents
,
458 int *drained_end_counter
)
461 int old_quiesce_counter
;
463 assert(drained_end_counter
!= NULL
);
465 if (qemu_in_coroutine()) {
466 bdrv_co_yield_to_drain(bs
, false, recursive
, parent
, ignore_bds_parents
,
467 false, drained_end_counter
);
470 assert(bs
->quiesce_counter
> 0);
472 /* Re-enable things in child-to-parent order */
473 bdrv_drain_invoke(bs
, false, drained_end_counter
);
474 bdrv_parent_drained_end(bs
, parent
, ignore_bds_parents
,
475 drained_end_counter
);
477 old_quiesce_counter
= atomic_fetch_dec(&bs
->quiesce_counter
);
478 if (old_quiesce_counter
== 1) {
479 aio_enable_external(bdrv_get_aio_context(bs
));
483 assert(!ignore_bds_parents
);
484 bs
->recursive_quiesce_counter
--;
485 QLIST_FOREACH(child
, &bs
->children
, next
) {
486 bdrv_do_drained_end(child
->bs
, true, child
, ignore_bds_parents
,
487 drained_end_counter
);
492 void bdrv_drained_end(BlockDriverState
*bs
)
494 int drained_end_counter
= 0;
495 bdrv_do_drained_end(bs
, false, NULL
, false, &drained_end_counter
);
496 BDRV_POLL_WHILE(bs
, atomic_read(&drained_end_counter
) > 0);
499 void bdrv_drained_end_no_poll(BlockDriverState
*bs
, int *drained_end_counter
)
501 bdrv_do_drained_end(bs
, false, NULL
, false, drained_end_counter
);
504 void bdrv_subtree_drained_end(BlockDriverState
*bs
)
506 int drained_end_counter
= 0;
507 bdrv_do_drained_end(bs
, true, NULL
, false, &drained_end_counter
);
508 BDRV_POLL_WHILE(bs
, atomic_read(&drained_end_counter
) > 0);
511 void bdrv_apply_subtree_drain(BdrvChild
*child
, BlockDriverState
*new_parent
)
515 for (i
= 0; i
< new_parent
->recursive_quiesce_counter
; i
++) {
516 bdrv_do_drained_begin(child
->bs
, true, child
, false, true);
520 void bdrv_unapply_subtree_drain(BdrvChild
*child
, BlockDriverState
*old_parent
)
522 int drained_end_counter
= 0;
525 for (i
= 0; i
< old_parent
->recursive_quiesce_counter
; i
++) {
526 bdrv_do_drained_end(child
->bs
, true, child
, false,
527 &drained_end_counter
);
530 BDRV_POLL_WHILE(child
->bs
, atomic_read(&drained_end_counter
) > 0);
534 * Wait for pending requests to complete on a single BlockDriverState subtree,
535 * and suspend block driver's internal I/O until next request arrives.
537 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
540 void coroutine_fn
bdrv_co_drain(BlockDriverState
*bs
)
542 assert(qemu_in_coroutine());
543 bdrv_drained_begin(bs
);
544 bdrv_drained_end(bs
);
547 void bdrv_drain(BlockDriverState
*bs
)
549 bdrv_drained_begin(bs
);
550 bdrv_drained_end(bs
);
553 static void bdrv_drain_assert_idle(BlockDriverState
*bs
)
555 BdrvChild
*child
, *next
;
557 assert(atomic_read(&bs
->in_flight
) == 0);
558 QLIST_FOREACH_SAFE(child
, &bs
->children
, next
, next
) {
559 bdrv_drain_assert_idle(child
->bs
);
563 unsigned int bdrv_drain_all_count
= 0;
565 static bool bdrv_drain_all_poll(void)
567 BlockDriverState
*bs
= NULL
;
570 /* bdrv_drain_poll() can't make changes to the graph and we are holding the
571 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */
572 while ((bs
= bdrv_next_all_states(bs
))) {
573 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
574 aio_context_acquire(aio_context
);
575 result
|= bdrv_drain_poll(bs
, false, NULL
, true);
576 aio_context_release(aio_context
);
583 * Wait for pending requests to complete across all BlockDriverStates
585 * This function does not flush data to disk, use bdrv_flush_all() for that
586 * after calling this function.
588 * This pauses all block jobs and disables external clients. It must
589 * be paired with bdrv_drain_all_end().
591 * NOTE: no new block jobs or BlockDriverStates can be created between
592 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
594 void bdrv_drain_all_begin(void)
596 BlockDriverState
*bs
= NULL
;
598 if (qemu_in_coroutine()) {
599 bdrv_co_yield_to_drain(NULL
, true, false, NULL
, true, true, NULL
);
603 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
604 * loop AioContext, so make sure we're in the main context. */
605 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
606 assert(bdrv_drain_all_count
< INT_MAX
);
607 bdrv_drain_all_count
++;
609 /* Quiesce all nodes, without polling in-flight requests yet. The graph
610 * cannot change during this loop. */
611 while ((bs
= bdrv_next_all_states(bs
))) {
612 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
614 aio_context_acquire(aio_context
);
615 bdrv_do_drained_begin(bs
, false, NULL
, true, false);
616 aio_context_release(aio_context
);
619 /* Now poll the in-flight requests */
620 AIO_WAIT_WHILE(NULL
, bdrv_drain_all_poll());
622 while ((bs
= bdrv_next_all_states(bs
))) {
623 bdrv_drain_assert_idle(bs
);
627 void bdrv_drain_all_end(void)
629 BlockDriverState
*bs
= NULL
;
630 int drained_end_counter
= 0;
632 while ((bs
= bdrv_next_all_states(bs
))) {
633 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
635 aio_context_acquire(aio_context
);
636 bdrv_do_drained_end(bs
, false, NULL
, true, &drained_end_counter
);
637 aio_context_release(aio_context
);
640 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
641 AIO_WAIT_WHILE(NULL
, atomic_read(&drained_end_counter
) > 0);
643 assert(bdrv_drain_all_count
> 0);
644 bdrv_drain_all_count
--;
647 void bdrv_drain_all(void)
649 bdrv_drain_all_begin();
650 bdrv_drain_all_end();
654 * Remove an active request from the tracked requests list
656 * This function should be called when a tracked request is completing.
658 static void tracked_request_end(BdrvTrackedRequest
*req
)
660 if (req
->serialising
) {
661 atomic_dec(&req
->bs
->serialising_in_flight
);
664 qemu_co_mutex_lock(&req
->bs
->reqs_lock
);
665 QLIST_REMOVE(req
, list
);
666 qemu_co_queue_restart_all(&req
->wait_queue
);
667 qemu_co_mutex_unlock(&req
->bs
->reqs_lock
);
671 * Add an active request to the tracked requests list
673 static void tracked_request_begin(BdrvTrackedRequest
*req
,
674 BlockDriverState
*bs
,
677 enum BdrvTrackedRequestType type
)
679 assert(bytes
<= INT64_MAX
&& offset
<= INT64_MAX
- bytes
);
681 *req
= (BdrvTrackedRequest
){
686 .co
= qemu_coroutine_self(),
687 .serialising
= false,
688 .overlap_offset
= offset
,
689 .overlap_bytes
= bytes
,
692 qemu_co_queue_init(&req
->wait_queue
);
694 qemu_co_mutex_lock(&bs
->reqs_lock
);
695 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
696 qemu_co_mutex_unlock(&bs
->reqs_lock
);
699 static void mark_request_serialising(BdrvTrackedRequest
*req
, uint64_t align
)
701 int64_t overlap_offset
= req
->offset
& ~(align
- 1);
702 uint64_t overlap_bytes
= ROUND_UP(req
->offset
+ req
->bytes
, align
)
705 if (!req
->serialising
) {
706 atomic_inc(&req
->bs
->serialising_in_flight
);
707 req
->serialising
= true;
710 req
->overlap_offset
= MIN(req
->overlap_offset
, overlap_offset
);
711 req
->overlap_bytes
= MAX(req
->overlap_bytes
, overlap_bytes
);
714 static bool is_request_serialising_and_aligned(BdrvTrackedRequest
*req
)
717 * If the request is serialising, overlap_offset and overlap_bytes are set,
718 * so we can check if the request is aligned. Otherwise, don't care and
722 return req
->serialising
&& (req
->offset
== req
->overlap_offset
) &&
723 (req
->bytes
== req
->overlap_bytes
);
727 * Round a region to cluster boundaries
729 void bdrv_round_to_clusters(BlockDriverState
*bs
,
730 int64_t offset
, int64_t bytes
,
731 int64_t *cluster_offset
,
732 int64_t *cluster_bytes
)
736 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
737 *cluster_offset
= offset
;
738 *cluster_bytes
= bytes
;
740 int64_t c
= bdi
.cluster_size
;
741 *cluster_offset
= QEMU_ALIGN_DOWN(offset
, c
);
742 *cluster_bytes
= QEMU_ALIGN_UP(offset
- *cluster_offset
+ bytes
, c
);
746 static int bdrv_get_cluster_size(BlockDriverState
*bs
)
751 ret
= bdrv_get_info(bs
, &bdi
);
752 if (ret
< 0 || bdi
.cluster_size
== 0) {
753 return bs
->bl
.request_alignment
;
755 return bdi
.cluster_size
;
759 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
760 int64_t offset
, uint64_t bytes
)
763 if (offset
>= req
->overlap_offset
+ req
->overlap_bytes
) {
767 if (req
->overlap_offset
>= offset
+ bytes
) {
773 void bdrv_inc_in_flight(BlockDriverState
*bs
)
775 atomic_inc(&bs
->in_flight
);
778 void bdrv_wakeup(BlockDriverState
*bs
)
783 void bdrv_dec_in_flight(BlockDriverState
*bs
)
785 atomic_dec(&bs
->in_flight
);
789 static bool coroutine_fn
wait_serialising_requests(BdrvTrackedRequest
*self
)
791 BlockDriverState
*bs
= self
->bs
;
792 BdrvTrackedRequest
*req
;
796 if (!atomic_read(&bs
->serialising_in_flight
)) {
802 qemu_co_mutex_lock(&bs
->reqs_lock
);
803 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
804 if (req
== self
|| (!req
->serialising
&& !self
->serialising
)) {
807 if (tracked_request_overlaps(req
, self
->overlap_offset
,
808 self
->overlap_bytes
))
810 /* Hitting this means there was a reentrant request, for
811 * example, a block driver issuing nested requests. This must
812 * never happen since it means deadlock.
814 assert(qemu_coroutine_self() != req
->co
);
816 /* If the request is already (indirectly) waiting for us, or
817 * will wait for us as soon as it wakes up, then just go on
818 * (instead of producing a deadlock in the former case). */
819 if (!req
->waiting_for
) {
820 self
->waiting_for
= req
;
821 qemu_co_queue_wait(&req
->wait_queue
, &bs
->reqs_lock
);
822 self
->waiting_for
= NULL
;
829 qemu_co_mutex_unlock(&bs
->reqs_lock
);
835 static int bdrv_check_byte_request(BlockDriverState
*bs
, int64_t offset
,
838 if (size
> BDRV_REQUEST_MAX_BYTES
) {
842 if (!bdrv_is_inserted(bs
)) {
853 typedef struct RwCo
{
859 BdrvRequestFlags flags
;
862 static void coroutine_fn
bdrv_rw_co_entry(void *opaque
)
866 if (!rwco
->is_write
) {
867 rwco
->ret
= bdrv_co_preadv(rwco
->child
, rwco
->offset
,
868 rwco
->qiov
->size
, rwco
->qiov
,
871 rwco
->ret
= bdrv_co_pwritev(rwco
->child
, rwco
->offset
,
872 rwco
->qiov
->size
, rwco
->qiov
,
879 * Process a vectored synchronous request using coroutines
881 static int bdrv_prwv_co(BdrvChild
*child
, int64_t offset
,
882 QEMUIOVector
*qiov
, bool is_write
,
883 BdrvRequestFlags flags
)
890 .is_write
= is_write
,
895 if (qemu_in_coroutine()) {
896 /* Fast-path if already in coroutine context */
897 bdrv_rw_co_entry(&rwco
);
899 co
= qemu_coroutine_create(bdrv_rw_co_entry
, &rwco
);
900 bdrv_coroutine_enter(child
->bs
, co
);
901 BDRV_POLL_WHILE(child
->bs
, rwco
.ret
== NOT_DONE
);
906 int bdrv_pwrite_zeroes(BdrvChild
*child
, int64_t offset
,
907 int bytes
, BdrvRequestFlags flags
)
909 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, NULL
, bytes
);
911 return bdrv_prwv_co(child
, offset
, &qiov
, true,
912 BDRV_REQ_ZERO_WRITE
| flags
);
916 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
917 * The operation is sped up by checking the block status and only writing
918 * zeroes to the device if they currently do not return zeroes. Optional
919 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
922 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
924 int bdrv_make_zero(BdrvChild
*child
, BdrvRequestFlags flags
)
927 int64_t target_size
, bytes
, offset
= 0;
928 BlockDriverState
*bs
= child
->bs
;
930 target_size
= bdrv_getlength(bs
);
931 if (target_size
< 0) {
936 bytes
= MIN(target_size
- offset
, BDRV_REQUEST_MAX_BYTES
);
940 ret
= bdrv_block_status(bs
, offset
, bytes
, &bytes
, NULL
, NULL
);
944 if (ret
& BDRV_BLOCK_ZERO
) {
948 ret
= bdrv_pwrite_zeroes(child
, offset
, bytes
, flags
);
956 int bdrv_preadv(BdrvChild
*child
, int64_t offset
, QEMUIOVector
*qiov
)
960 ret
= bdrv_prwv_co(child
, offset
, qiov
, false, 0);
968 /* See bdrv_pwrite() for the return codes */
969 int bdrv_pread(BdrvChild
*child
, int64_t offset
, void *buf
, int bytes
)
971 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, bytes
);
977 return bdrv_preadv(child
, offset
, &qiov
);
980 int bdrv_pwritev(BdrvChild
*child
, int64_t offset
, QEMUIOVector
*qiov
)
984 ret
= bdrv_prwv_co(child
, offset
, qiov
, true, 0);
992 /* Return no. of bytes on success or < 0 on error. Important errors are:
993 -EIO generic I/O error (may happen for all errors)
994 -ENOMEDIUM No media inserted.
995 -EINVAL Invalid offset or number of bytes
996 -EACCES Trying to write a read-only device
998 int bdrv_pwrite(BdrvChild
*child
, int64_t offset
, const void *buf
, int bytes
)
1000 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, bytes
);
1006 return bdrv_pwritev(child
, offset
, &qiov
);
1010 * Writes to the file and ensures that no writes are reordered across this
1011 * request (acts as a barrier)
1013 * Returns 0 on success, -errno in error cases.
1015 int bdrv_pwrite_sync(BdrvChild
*child
, int64_t offset
,
1016 const void *buf
, int count
)
1020 ret
= bdrv_pwrite(child
, offset
, buf
, count
);
1025 ret
= bdrv_flush(child
->bs
);
1033 typedef struct CoroutineIOCompletion
{
1034 Coroutine
*coroutine
;
1036 } CoroutineIOCompletion
;
1038 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
1040 CoroutineIOCompletion
*co
= opaque
;
1043 aio_co_wake(co
->coroutine
);
1046 static int coroutine_fn
bdrv_driver_preadv(BlockDriverState
*bs
,
1047 uint64_t offset
, uint64_t bytes
,
1049 size_t qiov_offset
, int flags
)
1051 BlockDriver
*drv
= bs
->drv
;
1053 unsigned int nb_sectors
;
1054 QEMUIOVector local_qiov
;
1057 assert(!(flags
& ~BDRV_REQ_MASK
));
1058 assert(!(flags
& BDRV_REQ_NO_FALLBACK
));
1064 if (drv
->bdrv_co_preadv_part
) {
1065 return drv
->bdrv_co_preadv_part(bs
, offset
, bytes
, qiov
, qiov_offset
,
1069 if (qiov_offset
> 0 || bytes
!= qiov
->size
) {
1070 qemu_iovec_init_slice(&local_qiov
, qiov
, qiov_offset
, bytes
);
1074 if (drv
->bdrv_co_preadv
) {
1075 ret
= drv
->bdrv_co_preadv(bs
, offset
, bytes
, qiov
, flags
);
1079 if (drv
->bdrv_aio_preadv
) {
1081 CoroutineIOCompletion co
= {
1082 .coroutine
= qemu_coroutine_self(),
1085 acb
= drv
->bdrv_aio_preadv(bs
, offset
, bytes
, qiov
, flags
,
1086 bdrv_co_io_em_complete
, &co
);
1091 qemu_coroutine_yield();
1097 sector_num
= offset
>> BDRV_SECTOR_BITS
;
1098 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
1100 assert(QEMU_IS_ALIGNED(offset
, BDRV_SECTOR_SIZE
));
1101 assert(QEMU_IS_ALIGNED(bytes
, BDRV_SECTOR_SIZE
));
1102 assert(bytes
<= BDRV_REQUEST_MAX_BYTES
);
1103 assert(drv
->bdrv_co_readv
);
1105 ret
= drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
1108 if (qiov
== &local_qiov
) {
1109 qemu_iovec_destroy(&local_qiov
);
1115 static int coroutine_fn
bdrv_driver_pwritev(BlockDriverState
*bs
,
1116 uint64_t offset
, uint64_t bytes
,
1118 size_t qiov_offset
, int flags
)
1120 BlockDriver
*drv
= bs
->drv
;
1122 unsigned int nb_sectors
;
1123 QEMUIOVector local_qiov
;
1126 assert(!(flags
& ~BDRV_REQ_MASK
));
1127 assert(!(flags
& BDRV_REQ_NO_FALLBACK
));
1133 if (drv
->bdrv_co_pwritev_part
) {
1134 ret
= drv
->bdrv_co_pwritev_part(bs
, offset
, bytes
, qiov
, qiov_offset
,
1135 flags
& bs
->supported_write_flags
);
1136 flags
&= ~bs
->supported_write_flags
;
1140 if (qiov_offset
> 0 || bytes
!= qiov
->size
) {
1141 qemu_iovec_init_slice(&local_qiov
, qiov
, qiov_offset
, bytes
);
1145 if (drv
->bdrv_co_pwritev
) {
1146 ret
= drv
->bdrv_co_pwritev(bs
, offset
, bytes
, qiov
,
1147 flags
& bs
->supported_write_flags
);
1148 flags
&= ~bs
->supported_write_flags
;
1152 if (drv
->bdrv_aio_pwritev
) {
1154 CoroutineIOCompletion co
= {
1155 .coroutine
= qemu_coroutine_self(),
1158 acb
= drv
->bdrv_aio_pwritev(bs
, offset
, bytes
, qiov
,
1159 flags
& bs
->supported_write_flags
,
1160 bdrv_co_io_em_complete
, &co
);
1161 flags
&= ~bs
->supported_write_flags
;
1165 qemu_coroutine_yield();
1171 sector_num
= offset
>> BDRV_SECTOR_BITS
;
1172 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
1174 assert(QEMU_IS_ALIGNED(offset
, BDRV_SECTOR_SIZE
));
1175 assert(QEMU_IS_ALIGNED(bytes
, BDRV_SECTOR_SIZE
));
1176 assert(bytes
<= BDRV_REQUEST_MAX_BYTES
);
1178 assert(drv
->bdrv_co_writev
);
1179 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
,
1180 flags
& bs
->supported_write_flags
);
1181 flags
&= ~bs
->supported_write_flags
;
1184 if (ret
== 0 && (flags
& BDRV_REQ_FUA
)) {
1185 ret
= bdrv_co_flush(bs
);
1188 if (qiov
== &local_qiov
) {
1189 qemu_iovec_destroy(&local_qiov
);
1195 static int coroutine_fn
1196 bdrv_driver_pwritev_compressed(BlockDriverState
*bs
, uint64_t offset
,
1197 uint64_t bytes
, QEMUIOVector
*qiov
,
1200 BlockDriver
*drv
= bs
->drv
;
1201 QEMUIOVector local_qiov
;
1208 if (!block_driver_can_compress(drv
)) {
1212 if (drv
->bdrv_co_pwritev_compressed_part
) {
1213 return drv
->bdrv_co_pwritev_compressed_part(bs
, offset
, bytes
,
1217 if (qiov_offset
== 0) {
1218 return drv
->bdrv_co_pwritev_compressed(bs
, offset
, bytes
, qiov
);
1221 qemu_iovec_init_slice(&local_qiov
, qiov
, qiov_offset
, bytes
);
1222 ret
= drv
->bdrv_co_pwritev_compressed(bs
, offset
, bytes
, &local_qiov
);
1223 qemu_iovec_destroy(&local_qiov
);
1228 static int coroutine_fn
bdrv_co_do_copy_on_readv(BdrvChild
*child
,
1229 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1230 size_t qiov_offset
, int flags
)
1232 BlockDriverState
*bs
= child
->bs
;
1234 /* Perform I/O through a temporary buffer so that users who scribble over
1235 * their read buffer while the operation is in progress do not end up
1236 * modifying the image file. This is critical for zero-copy guest I/O
1237 * where anything might happen inside guest memory.
1239 void *bounce_buffer
= NULL
;
1241 BlockDriver
*drv
= bs
->drv
;
1242 int64_t cluster_offset
;
1243 int64_t cluster_bytes
;
1246 int max_transfer
= MIN_NON_ZERO(bs
->bl
.max_transfer
,
1247 BDRV_REQUEST_MAX_BYTES
);
1248 unsigned int progress
= 0;
1256 * Do not write anything when the BDS is inactive. That is not
1257 * allowed, and it would not help.
1259 skip_write
= (bs
->open_flags
& BDRV_O_INACTIVE
);
1261 /* FIXME We cannot require callers to have write permissions when all they
1262 * are doing is a read request. If we did things right, write permissions
1263 * would be obtained anyway, but internally by the copy-on-read code. As
1264 * long as it is implemented here rather than in a separate filter driver,
1265 * the copy-on-read code doesn't have its own BdrvChild, however, for which
1266 * it could request permissions. Therefore we have to bypass the permission
1267 * system for the moment. */
1268 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1270 /* Cover entire cluster so no additional backing file I/O is required when
1271 * allocating cluster in the image file. Note that this value may exceed
1272 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1273 * is one reason we loop rather than doing it all at once.
1275 bdrv_round_to_clusters(bs
, offset
, bytes
, &cluster_offset
, &cluster_bytes
);
1276 skip_bytes
= offset
- cluster_offset
;
1278 trace_bdrv_co_do_copy_on_readv(bs
, offset
, bytes
,
1279 cluster_offset
, cluster_bytes
);
1281 while (cluster_bytes
) {
1285 ret
= 1; /* "already allocated", so nothing will be copied */
1286 pnum
= MIN(cluster_bytes
, max_transfer
);
1288 ret
= bdrv_is_allocated(bs
, cluster_offset
,
1289 MIN(cluster_bytes
, max_transfer
), &pnum
);
1292 * Safe to treat errors in querying allocation as if
1293 * unallocated; we'll probably fail again soon on the
1294 * read, but at least that will set a decent errno.
1296 pnum
= MIN(cluster_bytes
, max_transfer
);
1299 /* Stop at EOF if the image ends in the middle of the cluster */
1300 if (ret
== 0 && pnum
== 0) {
1301 assert(progress
>= bytes
);
1305 assert(skip_bytes
< pnum
);
1309 QEMUIOVector local_qiov
;
1311 /* Must copy-on-read; use the bounce buffer */
1312 pnum
= MIN(pnum
, MAX_BOUNCE_BUFFER
);
1313 if (!bounce_buffer
) {
1314 int64_t max_we_need
= MAX(pnum
, cluster_bytes
- pnum
);
1315 int64_t max_allowed
= MIN(max_transfer
, MAX_BOUNCE_BUFFER
);
1316 int64_t bounce_buffer_len
= MIN(max_we_need
, max_allowed
);
1318 bounce_buffer
= qemu_try_blockalign(bs
, bounce_buffer_len
);
1319 if (!bounce_buffer
) {
1324 qemu_iovec_init_buf(&local_qiov
, bounce_buffer
, pnum
);
1326 ret
= bdrv_driver_preadv(bs
, cluster_offset
, pnum
,
1332 bdrv_debug_event(bs
, BLKDBG_COR_WRITE
);
1333 if (drv
->bdrv_co_pwrite_zeroes
&&
1334 buffer_is_zero(bounce_buffer
, pnum
)) {
1335 /* FIXME: Should we (perhaps conditionally) be setting
1336 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1337 * that still correctly reads as zero? */
1338 ret
= bdrv_co_do_pwrite_zeroes(bs
, cluster_offset
, pnum
,
1339 BDRV_REQ_WRITE_UNCHANGED
);
1341 /* This does not change the data on the disk, it is not
1342 * necessary to flush even in cache=writethrough mode.
1344 ret
= bdrv_driver_pwritev(bs
, cluster_offset
, pnum
,
1346 BDRV_REQ_WRITE_UNCHANGED
);
1350 /* It might be okay to ignore write errors for guest
1351 * requests. If this is a deliberate copy-on-read
1352 * then we don't want to ignore the error. Simply
1353 * report it in all cases.
1358 if (!(flags
& BDRV_REQ_PREFETCH
)) {
1359 qemu_iovec_from_buf(qiov
, qiov_offset
+ progress
,
1360 bounce_buffer
+ skip_bytes
,
1363 } else if (!(flags
& BDRV_REQ_PREFETCH
)) {
1364 /* Read directly into the destination */
1365 ret
= bdrv_driver_preadv(bs
, offset
+ progress
,
1366 MIN(pnum
- skip_bytes
, bytes
- progress
),
1367 qiov
, qiov_offset
+ progress
, 0);
1373 cluster_offset
+= pnum
;
1374 cluster_bytes
-= pnum
;
1375 progress
+= pnum
- skip_bytes
;
1381 qemu_vfree(bounce_buffer
);
1386 * Forwards an already correctly aligned request to the BlockDriver. This
1387 * handles copy on read, zeroing after EOF, and fragmentation of large
1388 * reads; any other features must be implemented by the caller.
1390 static int coroutine_fn
bdrv_aligned_preadv(BdrvChild
*child
,
1391 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
1392 int64_t align
, QEMUIOVector
*qiov
, size_t qiov_offset
, int flags
)
1394 BlockDriverState
*bs
= child
->bs
;
1395 int64_t total_bytes
, max_bytes
;
1397 uint64_t bytes_remaining
= bytes
;
1400 assert(is_power_of_2(align
));
1401 assert((offset
& (align
- 1)) == 0);
1402 assert((bytes
& (align
- 1)) == 0);
1403 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1404 max_transfer
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_transfer
, INT_MAX
),
1407 /* TODO: We would need a per-BDS .supported_read_flags and
1408 * potential fallback support, if we ever implement any read flags
1409 * to pass through to drivers. For now, there aren't any
1410 * passthrough flags. */
1411 assert(!(flags
& ~(BDRV_REQ_NO_SERIALISING
| BDRV_REQ_COPY_ON_READ
|
1412 BDRV_REQ_PREFETCH
)));
1414 /* Handle Copy on Read and associated serialisation */
1415 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1416 /* If we touch the same cluster it counts as an overlap. This
1417 * guarantees that allocating writes will be serialized and not race
1418 * with each other for the same cluster. For example, in copy-on-read
1419 * it ensures that the CoR read and write operations are atomic and
1420 * guest writes cannot interleave between them. */
1421 mark_request_serialising(req
, bdrv_get_cluster_size(bs
));
1424 /* BDRV_REQ_SERIALISING is only for write operation */
1425 assert(!(flags
& BDRV_REQ_SERIALISING
));
1427 if (!(flags
& BDRV_REQ_NO_SERIALISING
)) {
1428 wait_serialising_requests(req
);
1431 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1434 ret
= bdrv_is_allocated(bs
, offset
, bytes
, &pnum
);
1439 if (!ret
|| pnum
!= bytes
) {
1440 ret
= bdrv_co_do_copy_on_readv(child
, offset
, bytes
,
1441 qiov
, qiov_offset
, flags
);
1443 } else if (flags
& BDRV_REQ_PREFETCH
) {
1448 /* Forward the request to the BlockDriver, possibly fragmenting it */
1449 total_bytes
= bdrv_getlength(bs
);
1450 if (total_bytes
< 0) {
1455 max_bytes
= ROUND_UP(MAX(0, total_bytes
- offset
), align
);
1456 if (bytes
<= max_bytes
&& bytes
<= max_transfer
) {
1457 ret
= bdrv_driver_preadv(bs
, offset
, bytes
, qiov
, qiov_offset
, 0);
1461 while (bytes_remaining
) {
1465 num
= MIN(bytes_remaining
, MIN(max_bytes
, max_transfer
));
1468 ret
= bdrv_driver_preadv(bs
, offset
+ bytes
- bytes_remaining
,
1469 num
, qiov
, bytes
- bytes_remaining
, 0);
1472 num
= bytes_remaining
;
1473 ret
= qemu_iovec_memset(qiov
, bytes
- bytes_remaining
, 0,
1479 bytes_remaining
-= num
;
1483 return ret
< 0 ? ret
: 0;
1489 * |<---- align ----->| |<----- align ---->|
1490 * |<- head ->|<------------- bytes ------------->|<-- tail -->|
1492 * -*----------$-------*-------- ... --------*-----$------------*---
1494 * | offset | | end |
1495 * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end)
1496 * [buf ... ) [tail_buf )
1498 * @buf is an aligned allocation needed to store @head and @tail paddings. @head
1499 * is placed at the beginning of @buf and @tail at the @end.
1501 * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk
1502 * around tail, if tail exists.
1504 * @merge_reads is true for small requests,
1505 * if @buf_len == @head + bytes + @tail. In this case it is possible that both
1506 * head and tail exist but @buf_len == align and @tail_buf == @buf.
1508 typedef struct BdrvRequestPadding
{
1515 QEMUIOVector local_qiov
;
1516 } BdrvRequestPadding
;
1518 static bool bdrv_init_padding(BlockDriverState
*bs
,
1519 int64_t offset
, int64_t bytes
,
1520 BdrvRequestPadding
*pad
)
1522 uint64_t align
= bs
->bl
.request_alignment
;
1525 memset(pad
, 0, sizeof(*pad
));
1527 pad
->head
= offset
& (align
- 1);
1528 pad
->tail
= ((offset
+ bytes
) & (align
- 1));
1530 pad
->tail
= align
- pad
->tail
;
1533 if ((!pad
->head
&& !pad
->tail
) || !bytes
) {
1537 sum
= pad
->head
+ bytes
+ pad
->tail
;
1538 pad
->buf_len
= (sum
> align
&& pad
->head
&& pad
->tail
) ? 2 * align
: align
;
1539 pad
->buf
= qemu_blockalign(bs
, pad
->buf_len
);
1540 pad
->merge_reads
= sum
== pad
->buf_len
;
1542 pad
->tail_buf
= pad
->buf
+ pad
->buf_len
- align
;
1548 static int bdrv_padding_rmw_read(BdrvChild
*child
,
1549 BdrvTrackedRequest
*req
,
1550 BdrvRequestPadding
*pad
,
1553 QEMUIOVector local_qiov
;
1554 BlockDriverState
*bs
= child
->bs
;
1555 uint64_t align
= bs
->bl
.request_alignment
;
1558 assert(req
->serialising
&& pad
->buf
);
1560 if (pad
->head
|| pad
->merge_reads
) {
1561 uint64_t bytes
= pad
->merge_reads
? pad
->buf_len
: align
;
1563 qemu_iovec_init_buf(&local_qiov
, pad
->buf
, bytes
);
1566 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1568 if (pad
->merge_reads
&& pad
->tail
) {
1569 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1571 ret
= bdrv_aligned_preadv(child
, req
, req
->overlap_offset
, bytes
,
1572 align
, &local_qiov
, 0, 0);
1577 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1579 if (pad
->merge_reads
&& pad
->tail
) {
1580 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1583 if (pad
->merge_reads
) {
1589 qemu_iovec_init_buf(&local_qiov
, pad
->tail_buf
, align
);
1591 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1592 ret
= bdrv_aligned_preadv(
1594 req
->overlap_offset
+ req
->overlap_bytes
- align
,
1595 align
, align
, &local_qiov
, 0, 0);
1599 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1604 memset(pad
->buf
+ pad
->head
, 0, pad
->buf_len
- pad
->head
- pad
->tail
);
1610 static void bdrv_padding_destroy(BdrvRequestPadding
*pad
)
1613 qemu_vfree(pad
->buf
);
1614 qemu_iovec_destroy(&pad
->local_qiov
);
1621 * Exchange request parameters with padded request if needed. Don't include RMW
1622 * read of padding, bdrv_padding_rmw_read() should be called separately if
1625 * All parameters except @bs are in-out: they represent original request at
1626 * function call and padded (if padding needed) at function finish.
1628 * Function always succeeds.
1630 static bool bdrv_pad_request(BlockDriverState
*bs
,
1631 QEMUIOVector
**qiov
, size_t *qiov_offset
,
1632 int64_t *offset
, unsigned int *bytes
,
1633 BdrvRequestPadding
*pad
)
1635 if (!bdrv_init_padding(bs
, *offset
, *bytes
, pad
)) {
1639 qemu_iovec_init_extended(&pad
->local_qiov
, pad
->buf
, pad
->head
,
1640 *qiov
, *qiov_offset
, *bytes
,
1641 pad
->buf
+ pad
->buf_len
- pad
->tail
, pad
->tail
);
1642 *bytes
+= pad
->head
+ pad
->tail
;
1643 *offset
-= pad
->head
;
1644 *qiov
= &pad
->local_qiov
;
1650 int coroutine_fn
bdrv_co_preadv(BdrvChild
*child
,
1651 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1652 BdrvRequestFlags flags
)
1654 return bdrv_co_preadv_part(child
, offset
, bytes
, qiov
, 0, flags
);
1657 int coroutine_fn
bdrv_co_preadv_part(BdrvChild
*child
,
1658 int64_t offset
, unsigned int bytes
,
1659 QEMUIOVector
*qiov
, size_t qiov_offset
,
1660 BdrvRequestFlags flags
)
1662 BlockDriverState
*bs
= child
->bs
;
1663 BdrvTrackedRequest req
;
1664 BdrvRequestPadding pad
;
1667 trace_bdrv_co_preadv(bs
, offset
, bytes
, flags
);
1669 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
1674 bdrv_inc_in_flight(bs
);
1676 /* Don't do copy-on-read if we read data before write operation */
1677 if (atomic_read(&bs
->copy_on_read
) && !(flags
& BDRV_REQ_NO_SERIALISING
)) {
1678 flags
|= BDRV_REQ_COPY_ON_READ
;
1681 bdrv_pad_request(bs
, &qiov
, &qiov_offset
, &offset
, &bytes
, &pad
);
1683 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_READ
);
1684 ret
= bdrv_aligned_preadv(child
, &req
, offset
, bytes
,
1685 bs
->bl
.request_alignment
,
1686 qiov
, qiov_offset
, flags
);
1687 tracked_request_end(&req
);
1688 bdrv_dec_in_flight(bs
);
1690 bdrv_padding_destroy(&pad
);
1695 static int coroutine_fn
bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
,
1696 int64_t offset
, int bytes
, BdrvRequestFlags flags
)
1698 BlockDriver
*drv
= bs
->drv
;
1702 bool need_flush
= false;
1706 int max_write_zeroes
= MIN_NON_ZERO(bs
->bl
.max_pwrite_zeroes
, INT_MAX
);
1707 int alignment
= MAX(bs
->bl
.pwrite_zeroes_alignment
,
1708 bs
->bl
.request_alignment
);
1709 int max_transfer
= MIN_NON_ZERO(bs
->bl
.max_transfer
, MAX_BOUNCE_BUFFER
);
1715 if ((flags
& ~bs
->supported_zero_flags
) & BDRV_REQ_NO_FALLBACK
) {
1719 assert(alignment
% bs
->bl
.request_alignment
== 0);
1720 head
= offset
% alignment
;
1721 tail
= (offset
+ bytes
) % alignment
;
1722 max_write_zeroes
= QEMU_ALIGN_DOWN(max_write_zeroes
, alignment
);
1723 assert(max_write_zeroes
>= bs
->bl
.request_alignment
);
1725 while (bytes
> 0 && !ret
) {
1728 /* Align request. Block drivers can expect the "bulk" of the request
1729 * to be aligned, and that unaligned requests do not cross cluster
1733 /* Make a small request up to the first aligned sector. For
1734 * convenience, limit this request to max_transfer even if
1735 * we don't need to fall back to writes. */
1736 num
= MIN(MIN(bytes
, max_transfer
), alignment
- head
);
1737 head
= (head
+ num
) % alignment
;
1738 assert(num
< max_write_zeroes
);
1739 } else if (tail
&& num
> alignment
) {
1740 /* Shorten the request to the last aligned sector. */
1744 /* limit request size */
1745 if (num
> max_write_zeroes
) {
1746 num
= max_write_zeroes
;
1750 /* First try the efficient write zeroes operation */
1751 if (drv
->bdrv_co_pwrite_zeroes
) {
1752 ret
= drv
->bdrv_co_pwrite_zeroes(bs
, offset
, num
,
1753 flags
& bs
->supported_zero_flags
);
1754 if (ret
!= -ENOTSUP
&& (flags
& BDRV_REQ_FUA
) &&
1755 !(bs
->supported_zero_flags
& BDRV_REQ_FUA
)) {
1759 assert(!bs
->supported_zero_flags
);
1762 if (ret
== -ENOTSUP
&& !(flags
& BDRV_REQ_NO_FALLBACK
)) {
1763 /* Fall back to bounce buffer if write zeroes is unsupported */
1764 BdrvRequestFlags write_flags
= flags
& ~BDRV_REQ_ZERO_WRITE
;
1766 if ((flags
& BDRV_REQ_FUA
) &&
1767 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1768 /* No need for bdrv_driver_pwrite() to do a fallback
1769 * flush on each chunk; use just one at the end */
1770 write_flags
&= ~BDRV_REQ_FUA
;
1773 num
= MIN(num
, max_transfer
);
1775 buf
= qemu_try_blockalign0(bs
, num
);
1781 qemu_iovec_init_buf(&qiov
, buf
, num
);
1783 ret
= bdrv_driver_pwritev(bs
, offset
, num
, &qiov
, 0, write_flags
);
1785 /* Keep bounce buffer around if it is big enough for all
1786 * all future requests.
1788 if (num
< max_transfer
) {
1799 if (ret
== 0 && need_flush
) {
1800 ret
= bdrv_co_flush(bs
);
1806 static inline int coroutine_fn
1807 bdrv_co_write_req_prepare(BdrvChild
*child
, int64_t offset
, uint64_t bytes
,
1808 BdrvTrackedRequest
*req
, int flags
)
1810 BlockDriverState
*bs
= child
->bs
;
1812 int64_t end_sector
= DIV_ROUND_UP(offset
+ bytes
, BDRV_SECTOR_SIZE
);
1814 if (bs
->read_only
) {
1818 /* BDRV_REQ_NO_SERIALISING is only for read operation */
1819 assert(!(flags
& BDRV_REQ_NO_SERIALISING
));
1820 assert(!(bs
->open_flags
& BDRV_O_INACTIVE
));
1821 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1822 assert(!(flags
& ~BDRV_REQ_MASK
));
1824 if (flags
& BDRV_REQ_SERIALISING
) {
1825 mark_request_serialising(req
, bdrv_get_cluster_size(bs
));
1828 waited
= wait_serialising_requests(req
);
1830 assert(!waited
|| !req
->serialising
||
1831 is_request_serialising_and_aligned(req
));
1832 assert(req
->overlap_offset
<= offset
);
1833 assert(offset
+ bytes
<= req
->overlap_offset
+ req
->overlap_bytes
);
1834 assert(end_sector
<= bs
->total_sectors
|| child
->perm
& BLK_PERM_RESIZE
);
1836 switch (req
->type
) {
1837 case BDRV_TRACKED_WRITE
:
1838 case BDRV_TRACKED_DISCARD
:
1839 if (flags
& BDRV_REQ_WRITE_UNCHANGED
) {
1840 assert(child
->perm
& (BLK_PERM_WRITE_UNCHANGED
| BLK_PERM_WRITE
));
1842 assert(child
->perm
& BLK_PERM_WRITE
);
1844 return notifier_with_return_list_notify(&bs
->before_write_notifiers
,
1846 case BDRV_TRACKED_TRUNCATE
:
1847 assert(child
->perm
& BLK_PERM_RESIZE
);
1854 static inline void coroutine_fn
1855 bdrv_co_write_req_finish(BdrvChild
*child
, int64_t offset
, uint64_t bytes
,
1856 BdrvTrackedRequest
*req
, int ret
)
1858 int64_t end_sector
= DIV_ROUND_UP(offset
+ bytes
, BDRV_SECTOR_SIZE
);
1859 BlockDriverState
*bs
= child
->bs
;
1861 atomic_inc(&bs
->write_gen
);
1864 * Discard cannot extend the image, but in error handling cases, such as
1865 * when reverting a qcow2 cluster allocation, the discarded range can pass
1866 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
1867 * here. Instead, just skip it, since semantically a discard request
1868 * beyond EOF cannot expand the image anyway.
1871 (req
->type
== BDRV_TRACKED_TRUNCATE
||
1872 end_sector
> bs
->total_sectors
) &&
1873 req
->type
!= BDRV_TRACKED_DISCARD
) {
1874 bs
->total_sectors
= end_sector
;
1875 bdrv_parent_cb_resize(bs
);
1876 bdrv_dirty_bitmap_truncate(bs
, end_sector
<< BDRV_SECTOR_BITS
);
1879 switch (req
->type
) {
1880 case BDRV_TRACKED_WRITE
:
1881 stat64_max(&bs
->wr_highest_offset
, offset
+ bytes
);
1882 /* fall through, to set dirty bits */
1883 case BDRV_TRACKED_DISCARD
:
1884 bdrv_set_dirty(bs
, offset
, bytes
);
1893 * Forwards an already correctly aligned write request to the BlockDriver,
1894 * after possibly fragmenting it.
1896 static int coroutine_fn
bdrv_aligned_pwritev(BdrvChild
*child
,
1897 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
1898 int64_t align
, QEMUIOVector
*qiov
, size_t qiov_offset
, int flags
)
1900 BlockDriverState
*bs
= child
->bs
;
1901 BlockDriver
*drv
= bs
->drv
;
1904 uint64_t bytes_remaining
= bytes
;
1911 if (bdrv_has_readonly_bitmaps(bs
)) {
1915 assert(is_power_of_2(align
));
1916 assert((offset
& (align
- 1)) == 0);
1917 assert((bytes
& (align
- 1)) == 0);
1918 assert(!qiov
|| qiov_offset
+ bytes
<= qiov
->size
);
1919 max_transfer
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_transfer
, INT_MAX
),
1922 ret
= bdrv_co_write_req_prepare(child
, offset
, bytes
, req
, flags
);
1924 if (!ret
&& bs
->detect_zeroes
!= BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF
&&
1925 !(flags
& BDRV_REQ_ZERO_WRITE
) && drv
->bdrv_co_pwrite_zeroes
&&
1926 qemu_iovec_is_zero(qiov
, qiov_offset
, bytes
)) {
1927 flags
|= BDRV_REQ_ZERO_WRITE
;
1928 if (bs
->detect_zeroes
== BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP
) {
1929 flags
|= BDRV_REQ_MAY_UNMAP
;
1934 /* Do nothing, write notifier decided to fail this request */
1935 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
1936 bdrv_debug_event(bs
, BLKDBG_PWRITEV_ZERO
);
1937 ret
= bdrv_co_do_pwrite_zeroes(bs
, offset
, bytes
, flags
);
1938 } else if (flags
& BDRV_REQ_WRITE_COMPRESSED
) {
1939 ret
= bdrv_driver_pwritev_compressed(bs
, offset
, bytes
,
1941 } else if (bytes
<= max_transfer
) {
1942 bdrv_debug_event(bs
, BLKDBG_PWRITEV
);
1943 ret
= bdrv_driver_pwritev(bs
, offset
, bytes
, qiov
, qiov_offset
, flags
);
1945 bdrv_debug_event(bs
, BLKDBG_PWRITEV
);
1946 while (bytes_remaining
) {
1947 int num
= MIN(bytes_remaining
, max_transfer
);
1948 int local_flags
= flags
;
1951 if (num
< bytes_remaining
&& (flags
& BDRV_REQ_FUA
) &&
1952 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1953 /* If FUA is going to be emulated by flush, we only
1954 * need to flush on the last iteration */
1955 local_flags
&= ~BDRV_REQ_FUA
;
1958 ret
= bdrv_driver_pwritev(bs
, offset
+ bytes
- bytes_remaining
,
1959 num
, qiov
, bytes
- bytes_remaining
,
1964 bytes_remaining
-= num
;
1967 bdrv_debug_event(bs
, BLKDBG_PWRITEV_DONE
);
1972 bdrv_co_write_req_finish(child
, offset
, bytes
, req
, ret
);
1977 static int coroutine_fn
bdrv_co_do_zero_pwritev(BdrvChild
*child
,
1980 BdrvRequestFlags flags
,
1981 BdrvTrackedRequest
*req
)
1983 BlockDriverState
*bs
= child
->bs
;
1984 QEMUIOVector local_qiov
;
1985 uint64_t align
= bs
->bl
.request_alignment
;
1988 BdrvRequestPadding pad
;
1990 padding
= bdrv_init_padding(bs
, offset
, bytes
, &pad
);
1992 mark_request_serialising(req
, align
);
1993 wait_serialising_requests(req
);
1995 bdrv_padding_rmw_read(child
, req
, &pad
, true);
1997 if (pad
.head
|| pad
.merge_reads
) {
1998 int64_t aligned_offset
= offset
& ~(align
- 1);
1999 int64_t write_bytes
= pad
.merge_reads
? pad
.buf_len
: align
;
2001 qemu_iovec_init_buf(&local_qiov
, pad
.buf
, write_bytes
);
2002 ret
= bdrv_aligned_pwritev(child
, req
, aligned_offset
, write_bytes
,
2003 align
, &local_qiov
, 0,
2004 flags
& ~BDRV_REQ_ZERO_WRITE
);
2005 if (ret
< 0 || pad
.merge_reads
) {
2006 /* Error or all work is done */
2009 offset
+= write_bytes
- pad
.head
;
2010 bytes
-= write_bytes
- pad
.head
;
2014 assert(!bytes
|| (offset
& (align
- 1)) == 0);
2015 if (bytes
>= align
) {
2016 /* Write the aligned part in the middle. */
2017 uint64_t aligned_bytes
= bytes
& ~(align
- 1);
2018 ret
= bdrv_aligned_pwritev(child
, req
, offset
, aligned_bytes
, align
,
2023 bytes
-= aligned_bytes
;
2024 offset
+= aligned_bytes
;
2027 assert(!bytes
|| (offset
& (align
- 1)) == 0);
2029 assert(align
== pad
.tail
+ bytes
);
2031 qemu_iovec_init_buf(&local_qiov
, pad
.tail_buf
, align
);
2032 ret
= bdrv_aligned_pwritev(child
, req
, offset
, align
, align
,
2034 flags
& ~BDRV_REQ_ZERO_WRITE
);
2038 bdrv_padding_destroy(&pad
);
2044 * Handle a write request in coroutine context
2046 int coroutine_fn
bdrv_co_pwritev(BdrvChild
*child
,
2047 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
2048 BdrvRequestFlags flags
)
2050 return bdrv_co_pwritev_part(child
, offset
, bytes
, qiov
, 0, flags
);
2053 int coroutine_fn
bdrv_co_pwritev_part(BdrvChild
*child
,
2054 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
, size_t qiov_offset
,
2055 BdrvRequestFlags flags
)
2057 BlockDriverState
*bs
= child
->bs
;
2058 BdrvTrackedRequest req
;
2059 uint64_t align
= bs
->bl
.request_alignment
;
2060 BdrvRequestPadding pad
;
2063 trace_bdrv_co_pwritev(child
->bs
, offset
, bytes
, flags
);
2069 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
2074 bdrv_inc_in_flight(bs
);
2076 * Align write if necessary by performing a read-modify-write cycle.
2077 * Pad qiov with the read parts and be sure to have a tracked request not
2078 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
2080 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_WRITE
);
2082 if (flags
& BDRV_REQ_ZERO_WRITE
) {
2083 ret
= bdrv_co_do_zero_pwritev(child
, offset
, bytes
, flags
, &req
);
2087 if (bdrv_pad_request(bs
, &qiov
, &qiov_offset
, &offset
, &bytes
, &pad
)) {
2088 mark_request_serialising(&req
, align
);
2089 wait_serialising_requests(&req
);
2090 bdrv_padding_rmw_read(child
, &req
, &pad
, false);
2093 ret
= bdrv_aligned_pwritev(child
, &req
, offset
, bytes
, align
,
2094 qiov
, qiov_offset
, flags
);
2096 bdrv_padding_destroy(&pad
);
2099 tracked_request_end(&req
);
2100 bdrv_dec_in_flight(bs
);
2105 int coroutine_fn
bdrv_co_pwrite_zeroes(BdrvChild
*child
, int64_t offset
,
2106 int bytes
, BdrvRequestFlags flags
)
2108 trace_bdrv_co_pwrite_zeroes(child
->bs
, offset
, bytes
, flags
);
2110 if (!(child
->bs
->open_flags
& BDRV_O_UNMAP
)) {
2111 flags
&= ~BDRV_REQ_MAY_UNMAP
;
2114 return bdrv_co_pwritev(child
, offset
, bytes
, NULL
,
2115 BDRV_REQ_ZERO_WRITE
| flags
);
2119 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
2121 int bdrv_flush_all(void)
2123 BdrvNextIterator it
;
2124 BlockDriverState
*bs
= NULL
;
2127 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
2128 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
2131 aio_context_acquire(aio_context
);
2132 ret
= bdrv_flush(bs
);
2133 if (ret
< 0 && !result
) {
2136 aio_context_release(aio_context
);
2143 typedef struct BdrvCoBlockStatusData
{
2144 BlockDriverState
*bs
;
2145 BlockDriverState
*base
;
2151 BlockDriverState
**file
;
2154 } BdrvCoBlockStatusData
;
2156 int coroutine_fn
bdrv_co_block_status_from_file(BlockDriverState
*bs
,
2162 BlockDriverState
**file
)
2164 assert(bs
->file
&& bs
->file
->bs
);
2167 *file
= bs
->file
->bs
;
2168 return BDRV_BLOCK_RAW
| BDRV_BLOCK_OFFSET_VALID
;
2171 int coroutine_fn
bdrv_co_block_status_from_backing(BlockDriverState
*bs
,
2177 BlockDriverState
**file
)
2179 assert(bs
->backing
&& bs
->backing
->bs
);
2182 *file
= bs
->backing
->bs
;
2183 return BDRV_BLOCK_RAW
| BDRV_BLOCK_OFFSET_VALID
;
2187 * Returns the allocation status of the specified sectors.
2188 * Drivers not implementing the functionality are assumed to not support
2189 * backing files, hence all their sectors are reported as allocated.
2191 * If 'want_zero' is true, the caller is querying for mapping
2192 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
2193 * _ZERO where possible; otherwise, the result favors larger 'pnum',
2194 * with a focus on accurate BDRV_BLOCK_ALLOCATED.
2196 * If 'offset' is beyond the end of the disk image the return value is
2197 * BDRV_BLOCK_EOF and 'pnum' is set to 0.
2199 * 'bytes' is the max value 'pnum' should be set to. If bytes goes
2200 * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2201 * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
2203 * 'pnum' is set to the number of bytes (including and immediately
2204 * following the specified offset) that are easily known to be in the
2205 * same allocated/unallocated state. Note that a second call starting
2206 * at the original offset plus returned pnum may have the same status.
2207 * The returned value is non-zero on success except at end-of-file.
2209 * Returns negative errno on failure. Otherwise, if the
2210 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
2211 * set to the host mapping and BDS corresponding to the guest offset.
2213 static int coroutine_fn
bdrv_co_block_status(BlockDriverState
*bs
,
2215 int64_t offset
, int64_t bytes
,
2216 int64_t *pnum
, int64_t *map
,
2217 BlockDriverState
**file
)
2220 int64_t n
; /* bytes */
2222 int64_t local_map
= 0;
2223 BlockDriverState
*local_file
= NULL
;
2224 int64_t aligned_offset
, aligned_bytes
;
2229 total_size
= bdrv_getlength(bs
);
2230 if (total_size
< 0) {
2235 if (offset
>= total_size
) {
2236 ret
= BDRV_BLOCK_EOF
;
2244 n
= total_size
- offset
;
2249 /* Must be non-NULL or bdrv_getlength() would have failed */
2251 if (!bs
->drv
->bdrv_co_block_status
) {
2253 ret
= BDRV_BLOCK_DATA
| BDRV_BLOCK_ALLOCATED
;
2254 if (offset
+ bytes
== total_size
) {
2255 ret
|= BDRV_BLOCK_EOF
;
2257 if (bs
->drv
->protocol_name
) {
2258 ret
|= BDRV_BLOCK_OFFSET_VALID
;
2265 bdrv_inc_in_flight(bs
);
2267 /* Round out to request_alignment boundaries */
2268 align
= bs
->bl
.request_alignment
;
2269 aligned_offset
= QEMU_ALIGN_DOWN(offset
, align
);
2270 aligned_bytes
= ROUND_UP(offset
+ bytes
, align
) - aligned_offset
;
2272 ret
= bs
->drv
->bdrv_co_block_status(bs
, want_zero
, aligned_offset
,
2273 aligned_bytes
, pnum
, &local_map
,
2281 * The driver's result must be a non-zero multiple of request_alignment.
2282 * Clamp pnum and adjust map to original request.
2284 assert(*pnum
&& QEMU_IS_ALIGNED(*pnum
, align
) &&
2285 align
> offset
- aligned_offset
);
2286 if (ret
& BDRV_BLOCK_RECURSE
) {
2287 assert(ret
& BDRV_BLOCK_DATA
);
2288 assert(ret
& BDRV_BLOCK_OFFSET_VALID
);
2289 assert(!(ret
& BDRV_BLOCK_ZERO
));
2292 *pnum
-= offset
- aligned_offset
;
2293 if (*pnum
> bytes
) {
2296 if (ret
& BDRV_BLOCK_OFFSET_VALID
) {
2297 local_map
+= offset
- aligned_offset
;
2300 if (ret
& BDRV_BLOCK_RAW
) {
2301 assert(ret
& BDRV_BLOCK_OFFSET_VALID
&& local_file
);
2302 ret
= bdrv_co_block_status(local_file
, want_zero
, local_map
,
2303 *pnum
, pnum
, &local_map
, &local_file
);
2307 if (ret
& (BDRV_BLOCK_DATA
| BDRV_BLOCK_ZERO
)) {
2308 ret
|= BDRV_BLOCK_ALLOCATED
;
2309 } else if (want_zero
) {
2310 if (bdrv_unallocated_blocks_are_zero(bs
)) {
2311 ret
|= BDRV_BLOCK_ZERO
;
2312 } else if (bs
->backing
) {
2313 BlockDriverState
*bs2
= bs
->backing
->bs
;
2314 int64_t size2
= bdrv_getlength(bs2
);
2316 if (size2
>= 0 && offset
>= size2
) {
2317 ret
|= BDRV_BLOCK_ZERO
;
2322 if (want_zero
&& ret
& BDRV_BLOCK_RECURSE
&&
2323 local_file
&& local_file
!= bs
&&
2324 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
2325 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
2329 ret2
= bdrv_co_block_status(local_file
, want_zero
, local_map
,
2330 *pnum
, &file_pnum
, NULL
, NULL
);
2332 /* Ignore errors. This is just providing extra information, it
2333 * is useful but not necessary.
2335 if (ret2
& BDRV_BLOCK_EOF
&&
2336 (!file_pnum
|| ret2
& BDRV_BLOCK_ZERO
)) {
2338 * It is valid for the format block driver to read
2339 * beyond the end of the underlying file's current
2340 * size; such areas read as zero.
2342 ret
|= BDRV_BLOCK_ZERO
;
2344 /* Limit request to the range reported by the protocol driver */
2346 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
2352 bdrv_dec_in_flight(bs
);
2353 if (ret
>= 0 && offset
+ *pnum
== total_size
) {
2354 ret
|= BDRV_BLOCK_EOF
;
2366 static int coroutine_fn
bdrv_co_block_status_above(BlockDriverState
*bs
,
2367 BlockDriverState
*base
,
2373 BlockDriverState
**file
)
2375 BlockDriverState
*p
;
2380 for (p
= bs
; p
!= base
; p
= backing_bs(p
)) {
2381 ret
= bdrv_co_block_status(p
, want_zero
, offset
, bytes
, pnum
, map
,
2386 if (ret
& BDRV_BLOCK_ZERO
&& ret
& BDRV_BLOCK_EOF
&& !first
) {
2388 * Reading beyond the end of the file continues to read
2389 * zeroes, but we can only widen the result to the
2390 * unallocated length we learned from an earlier
2395 if (ret
& (BDRV_BLOCK_ZERO
| BDRV_BLOCK_DATA
)) {
2398 /* [offset, pnum] unallocated on this layer, which could be only
2399 * the first part of [offset, bytes]. */
2400 bytes
= MIN(bytes
, *pnum
);
2406 /* Coroutine wrapper for bdrv_block_status_above() */
2407 static void coroutine_fn
bdrv_block_status_above_co_entry(void *opaque
)
2409 BdrvCoBlockStatusData
*data
= opaque
;
2411 data
->ret
= bdrv_co_block_status_above(data
->bs
, data
->base
,
2413 data
->offset
, data
->bytes
,
2414 data
->pnum
, data
->map
, data
->file
);
2420 * Synchronous wrapper around bdrv_co_block_status_above().
2422 * See bdrv_co_block_status_above() for details.
2424 static int bdrv_common_block_status_above(BlockDriverState
*bs
,
2425 BlockDriverState
*base
,
2426 bool want_zero
, int64_t offset
,
2427 int64_t bytes
, int64_t *pnum
,
2429 BlockDriverState
**file
)
2432 BdrvCoBlockStatusData data
= {
2435 .want_zero
= want_zero
,
2444 if (qemu_in_coroutine()) {
2445 /* Fast-path if already in coroutine context */
2446 bdrv_block_status_above_co_entry(&data
);
2448 co
= qemu_coroutine_create(bdrv_block_status_above_co_entry
, &data
);
2449 bdrv_coroutine_enter(bs
, co
);
2450 BDRV_POLL_WHILE(bs
, !data
.done
);
2455 int bdrv_block_status_above(BlockDriverState
*bs
, BlockDriverState
*base
,
2456 int64_t offset
, int64_t bytes
, int64_t *pnum
,
2457 int64_t *map
, BlockDriverState
**file
)
2459 return bdrv_common_block_status_above(bs
, base
, true, offset
, bytes
,
2463 int bdrv_block_status(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
2464 int64_t *pnum
, int64_t *map
, BlockDriverState
**file
)
2466 return bdrv_block_status_above(bs
, backing_bs(bs
),
2467 offset
, bytes
, pnum
, map
, file
);
2470 int coroutine_fn
bdrv_is_allocated(BlockDriverState
*bs
, int64_t offset
,
2471 int64_t bytes
, int64_t *pnum
)
2476 ret
= bdrv_common_block_status_above(bs
, backing_bs(bs
), false, offset
,
2477 bytes
, pnum
? pnum
: &dummy
, NULL
,
2482 return !!(ret
& BDRV_BLOCK_ALLOCATED
);
2486 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2488 * Return 1 if (a prefix of) the given range is allocated in any image
2489 * between BASE and TOP (BASE is only included if include_base is set).
2490 * BASE can be NULL to check if the given offset is allocated in any
2491 * image of the chain. Return 0 otherwise, or negative errno on
2494 * 'pnum' is set to the number of bytes (including and immediately
2495 * following the specified offset) that are known to be in the same
2496 * allocated/unallocated state. Note that a subsequent call starting
2497 * at 'offset + *pnum' may return the same allocation status (in other
2498 * words, the result is not necessarily the maximum possible range);
2499 * but 'pnum' will only be 0 when end of file is reached.
2502 int bdrv_is_allocated_above(BlockDriverState
*top
,
2503 BlockDriverState
*base
,
2504 bool include_base
, int64_t offset
,
2505 int64_t bytes
, int64_t *pnum
)
2507 BlockDriverState
*intermediate
;
2511 assert(base
|| !include_base
);
2514 while (include_base
|| intermediate
!= base
) {
2518 assert(intermediate
);
2519 ret
= bdrv_is_allocated(intermediate
, offset
, bytes
, &pnum_inter
);
2528 size_inter
= bdrv_getlength(intermediate
);
2529 if (size_inter
< 0) {
2532 if (n
> pnum_inter
&&
2533 (intermediate
== top
|| offset
+ pnum_inter
< size_inter
)) {
2537 if (intermediate
== base
) {
2541 intermediate
= backing_bs(intermediate
);
2548 typedef struct BdrvVmstateCo
{
2549 BlockDriverState
*bs
;
2556 static int coroutine_fn
2557 bdrv_co_rw_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
,
2560 BlockDriver
*drv
= bs
->drv
;
2563 bdrv_inc_in_flight(bs
);
2567 } else if (drv
->bdrv_load_vmstate
) {
2569 ret
= drv
->bdrv_load_vmstate(bs
, qiov
, pos
);
2571 ret
= drv
->bdrv_save_vmstate(bs
, qiov
, pos
);
2573 } else if (bs
->file
) {
2574 ret
= bdrv_co_rw_vmstate(bs
->file
->bs
, qiov
, pos
, is_read
);
2577 bdrv_dec_in_flight(bs
);
2581 static void coroutine_fn
bdrv_co_rw_vmstate_entry(void *opaque
)
2583 BdrvVmstateCo
*co
= opaque
;
2584 co
->ret
= bdrv_co_rw_vmstate(co
->bs
, co
->qiov
, co
->pos
, co
->is_read
);
2589 bdrv_rw_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
,
2592 if (qemu_in_coroutine()) {
2593 return bdrv_co_rw_vmstate(bs
, qiov
, pos
, is_read
);
2595 BdrvVmstateCo data
= {
2600 .ret
= -EINPROGRESS
,
2602 Coroutine
*co
= qemu_coroutine_create(bdrv_co_rw_vmstate_entry
, &data
);
2604 bdrv_coroutine_enter(bs
, co
);
2605 BDRV_POLL_WHILE(bs
, data
.ret
== -EINPROGRESS
);
2610 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
2611 int64_t pos
, int size
)
2613 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, size
);
2616 ret
= bdrv_writev_vmstate(bs
, &qiov
, pos
);
2624 int bdrv_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
2626 return bdrv_rw_vmstate(bs
, qiov
, pos
, false);
2629 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
2630 int64_t pos
, int size
)
2632 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, size
);
2635 ret
= bdrv_readv_vmstate(bs
, &qiov
, pos
);
2643 int bdrv_readv_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
2645 return bdrv_rw_vmstate(bs
, qiov
, pos
, true);
2648 /**************************************************************/
2651 void bdrv_aio_cancel(BlockAIOCB
*acb
)
2654 bdrv_aio_cancel_async(acb
);
2655 while (acb
->refcnt
> 1) {
2656 if (acb
->aiocb_info
->get_aio_context
) {
2657 aio_poll(acb
->aiocb_info
->get_aio_context(acb
), true);
2658 } else if (acb
->bs
) {
2659 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
2660 * assert that we're not using an I/O thread. Thread-safe
2661 * code should use bdrv_aio_cancel_async exclusively.
2663 assert(bdrv_get_aio_context(acb
->bs
) == qemu_get_aio_context());
2664 aio_poll(bdrv_get_aio_context(acb
->bs
), true);
2669 qemu_aio_unref(acb
);
2672 /* Async version of aio cancel. The caller is not blocked if the acb implements
2673 * cancel_async, otherwise we do nothing and let the request normally complete.
2674 * In either case the completion callback must be called. */
2675 void bdrv_aio_cancel_async(BlockAIOCB
*acb
)
2677 if (acb
->aiocb_info
->cancel_async
) {
2678 acb
->aiocb_info
->cancel_async(acb
);
2682 /**************************************************************/
2683 /* Coroutine block device emulation */
2685 typedef struct FlushCo
{
2686 BlockDriverState
*bs
;
2691 static void coroutine_fn
bdrv_flush_co_entry(void *opaque
)
2693 FlushCo
*rwco
= opaque
;
2695 rwco
->ret
= bdrv_co_flush(rwco
->bs
);
2699 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
2704 bdrv_inc_in_flight(bs
);
2706 if (!bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
) ||
2711 qemu_co_mutex_lock(&bs
->reqs_lock
);
2712 current_gen
= atomic_read(&bs
->write_gen
);
2714 /* Wait until any previous flushes are completed */
2715 while (bs
->active_flush_req
) {
2716 qemu_co_queue_wait(&bs
->flush_queue
, &bs
->reqs_lock
);
2719 /* Flushes reach this point in nondecreasing current_gen order. */
2720 bs
->active_flush_req
= true;
2721 qemu_co_mutex_unlock(&bs
->reqs_lock
);
2723 /* Write back all layers by calling one driver function */
2724 if (bs
->drv
->bdrv_co_flush
) {
2725 ret
= bs
->drv
->bdrv_co_flush(bs
);
2729 /* Write back cached data to the OS even with cache=unsafe */
2730 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_OS
);
2731 if (bs
->drv
->bdrv_co_flush_to_os
) {
2732 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
2738 /* But don't actually force it to the disk with cache=unsafe */
2739 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
2743 /* Check if we really need to flush anything */
2744 if (bs
->flushed_gen
== current_gen
) {
2748 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_DISK
);
2750 /* bs->drv->bdrv_co_flush() might have ejected the BDS
2751 * (even in case of apparent success) */
2755 if (bs
->drv
->bdrv_co_flush_to_disk
) {
2756 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
2757 } else if (bs
->drv
->bdrv_aio_flush
) {
2759 CoroutineIOCompletion co
= {
2760 .coroutine
= qemu_coroutine_self(),
2763 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
2767 qemu_coroutine_yield();
2772 * Some block drivers always operate in either writethrough or unsafe
2773 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2774 * know how the server works (because the behaviour is hardcoded or
2775 * depends on server-side configuration), so we can't ensure that
2776 * everything is safe on disk. Returning an error doesn't work because
2777 * that would break guests even if the server operates in writethrough
2780 * Let's hope the user knows what he's doing.
2789 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2790 * in the case of cache=unsafe, so there are no useless flushes.
2793 ret
= bs
->file
? bdrv_co_flush(bs
->file
->bs
) : 0;
2795 /* Notify any pending flushes that we have completed */
2797 bs
->flushed_gen
= current_gen
;
2800 qemu_co_mutex_lock(&bs
->reqs_lock
);
2801 bs
->active_flush_req
= false;
2802 /* Return value is ignored - it's ok if wait queue is empty */
2803 qemu_co_queue_next(&bs
->flush_queue
);
2804 qemu_co_mutex_unlock(&bs
->reqs_lock
);
2807 bdrv_dec_in_flight(bs
);
2811 int bdrv_flush(BlockDriverState
*bs
)
2814 FlushCo flush_co
= {
2819 if (qemu_in_coroutine()) {
2820 /* Fast-path if already in coroutine context */
2821 bdrv_flush_co_entry(&flush_co
);
2823 co
= qemu_coroutine_create(bdrv_flush_co_entry
, &flush_co
);
2824 bdrv_coroutine_enter(bs
, co
);
2825 BDRV_POLL_WHILE(bs
, flush_co
.ret
== NOT_DONE
);
2828 return flush_co
.ret
;
2831 typedef struct DiscardCo
{
2837 static void coroutine_fn
bdrv_pdiscard_co_entry(void *opaque
)
2839 DiscardCo
*rwco
= opaque
;
2841 rwco
->ret
= bdrv_co_pdiscard(rwco
->child
, rwco
->offset
, rwco
->bytes
);
2845 int coroutine_fn
bdrv_co_pdiscard(BdrvChild
*child
, int64_t offset
,
2848 BdrvTrackedRequest req
;
2849 int max_pdiscard
, ret
;
2850 int head
, tail
, align
;
2851 BlockDriverState
*bs
= child
->bs
;
2853 if (!bs
|| !bs
->drv
|| !bdrv_is_inserted(bs
)) {
2857 if (bdrv_has_readonly_bitmaps(bs
)) {
2861 if (offset
< 0 || bytes
< 0 || bytes
> INT64_MAX
- offset
) {
2865 /* Do nothing if disabled. */
2866 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
2870 if (!bs
->drv
->bdrv_co_pdiscard
&& !bs
->drv
->bdrv_aio_pdiscard
) {
2874 /* Discard is advisory, but some devices track and coalesce
2875 * unaligned requests, so we must pass everything down rather than
2876 * round here. Still, most devices will just silently ignore
2877 * unaligned requests (by returning -ENOTSUP), so we must fragment
2878 * the request accordingly. */
2879 align
= MAX(bs
->bl
.pdiscard_alignment
, bs
->bl
.request_alignment
);
2880 assert(align
% bs
->bl
.request_alignment
== 0);
2881 head
= offset
% align
;
2882 tail
= (offset
+ bytes
) % align
;
2884 bdrv_inc_in_flight(bs
);
2885 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_DISCARD
);
2887 ret
= bdrv_co_write_req_prepare(child
, offset
, bytes
, &req
, 0);
2892 max_pdiscard
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_pdiscard
, INT_MAX
),
2894 assert(max_pdiscard
>= bs
->bl
.request_alignment
);
2897 int64_t num
= bytes
;
2900 /* Make small requests to get to alignment boundaries. */
2901 num
= MIN(bytes
, align
- head
);
2902 if (!QEMU_IS_ALIGNED(num
, bs
->bl
.request_alignment
)) {
2903 num
%= bs
->bl
.request_alignment
;
2905 head
= (head
+ num
) % align
;
2906 assert(num
< max_pdiscard
);
2909 /* Shorten the request to the last aligned cluster. */
2911 } else if (!QEMU_IS_ALIGNED(tail
, bs
->bl
.request_alignment
) &&
2912 tail
> bs
->bl
.request_alignment
) {
2913 tail
%= bs
->bl
.request_alignment
;
2917 /* limit request size */
2918 if (num
> max_pdiscard
) {
2926 if (bs
->drv
->bdrv_co_pdiscard
) {
2927 ret
= bs
->drv
->bdrv_co_pdiscard(bs
, offset
, num
);
2930 CoroutineIOCompletion co
= {
2931 .coroutine
= qemu_coroutine_self(),
2934 acb
= bs
->drv
->bdrv_aio_pdiscard(bs
, offset
, num
,
2935 bdrv_co_io_em_complete
, &co
);
2940 qemu_coroutine_yield();
2944 if (ret
&& ret
!= -ENOTSUP
) {
2953 bdrv_co_write_req_finish(child
, req
.offset
, req
.bytes
, &req
, ret
);
2954 tracked_request_end(&req
);
2955 bdrv_dec_in_flight(bs
);
2959 int bdrv_pdiscard(BdrvChild
*child
, int64_t offset
, int64_t bytes
)
2969 if (qemu_in_coroutine()) {
2970 /* Fast-path if already in coroutine context */
2971 bdrv_pdiscard_co_entry(&rwco
);
2973 co
= qemu_coroutine_create(bdrv_pdiscard_co_entry
, &rwco
);
2974 bdrv_coroutine_enter(child
->bs
, co
);
2975 BDRV_POLL_WHILE(child
->bs
, rwco
.ret
== NOT_DONE
);
2981 int bdrv_co_ioctl(BlockDriverState
*bs
, int req
, void *buf
)
2983 BlockDriver
*drv
= bs
->drv
;
2984 CoroutineIOCompletion co
= {
2985 .coroutine
= qemu_coroutine_self(),
2989 bdrv_inc_in_flight(bs
);
2990 if (!drv
|| (!drv
->bdrv_aio_ioctl
&& !drv
->bdrv_co_ioctl
)) {
2995 if (drv
->bdrv_co_ioctl
) {
2996 co
.ret
= drv
->bdrv_co_ioctl(bs
, req
, buf
);
2998 acb
= drv
->bdrv_aio_ioctl(bs
, req
, buf
, bdrv_co_io_em_complete
, &co
);
3003 qemu_coroutine_yield();
3006 bdrv_dec_in_flight(bs
);
3010 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
3012 return qemu_memalign(bdrv_opt_mem_align(bs
), size
);
3015 void *qemu_blockalign0(BlockDriverState
*bs
, size_t size
)
3017 return memset(qemu_blockalign(bs
, size
), 0, size
);
3020 void *qemu_try_blockalign(BlockDriverState
*bs
, size_t size
)
3022 size_t align
= bdrv_opt_mem_align(bs
);
3024 /* Ensure that NULL is never returned on success */
3030 return qemu_try_memalign(align
, size
);
3033 void *qemu_try_blockalign0(BlockDriverState
*bs
, size_t size
)
3035 void *mem
= qemu_try_blockalign(bs
, size
);
3038 memset(mem
, 0, size
);
3045 * Check if all memory in this vector is sector aligned.
3047 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
)
3050 size_t alignment
= bdrv_min_mem_align(bs
);
3052 for (i
= 0; i
< qiov
->niov
; i
++) {
3053 if ((uintptr_t) qiov
->iov
[i
].iov_base
% alignment
) {
3056 if (qiov
->iov
[i
].iov_len
% alignment
) {
3064 void bdrv_add_before_write_notifier(BlockDriverState
*bs
,
3065 NotifierWithReturn
*notifier
)
3067 notifier_with_return_list_add(&bs
->before_write_notifiers
, notifier
);
3070 void bdrv_io_plug(BlockDriverState
*bs
)
3074 QLIST_FOREACH(child
, &bs
->children
, next
) {
3075 bdrv_io_plug(child
->bs
);
3078 if (atomic_fetch_inc(&bs
->io_plugged
) == 0) {
3079 BlockDriver
*drv
= bs
->drv
;
3080 if (drv
&& drv
->bdrv_io_plug
) {
3081 drv
->bdrv_io_plug(bs
);
3086 void bdrv_io_unplug(BlockDriverState
*bs
)
3090 assert(bs
->io_plugged
);
3091 if (atomic_fetch_dec(&bs
->io_plugged
) == 1) {
3092 BlockDriver
*drv
= bs
->drv
;
3093 if (drv
&& drv
->bdrv_io_unplug
) {
3094 drv
->bdrv_io_unplug(bs
);
3098 QLIST_FOREACH(child
, &bs
->children
, next
) {
3099 bdrv_io_unplug(child
->bs
);
3103 void bdrv_register_buf(BlockDriverState
*bs
, void *host
, size_t size
)
3107 if (bs
->drv
&& bs
->drv
->bdrv_register_buf
) {
3108 bs
->drv
->bdrv_register_buf(bs
, host
, size
);
3110 QLIST_FOREACH(child
, &bs
->children
, next
) {
3111 bdrv_register_buf(child
->bs
, host
, size
);
3115 void bdrv_unregister_buf(BlockDriverState
*bs
, void *host
)
3119 if (bs
->drv
&& bs
->drv
->bdrv_unregister_buf
) {
3120 bs
->drv
->bdrv_unregister_buf(bs
, host
);
3122 QLIST_FOREACH(child
, &bs
->children
, next
) {
3123 bdrv_unregister_buf(child
->bs
, host
);
3127 static int coroutine_fn
bdrv_co_copy_range_internal(
3128 BdrvChild
*src
, uint64_t src_offset
, BdrvChild
*dst
,
3129 uint64_t dst_offset
, uint64_t bytes
,
3130 BdrvRequestFlags read_flags
, BdrvRequestFlags write_flags
,
3133 BdrvTrackedRequest req
;
3136 /* TODO We can support BDRV_REQ_NO_FALLBACK here */
3137 assert(!(read_flags
& BDRV_REQ_NO_FALLBACK
));
3138 assert(!(write_flags
& BDRV_REQ_NO_FALLBACK
));
3140 if (!dst
|| !dst
->bs
) {
3143 ret
= bdrv_check_byte_request(dst
->bs
, dst_offset
, bytes
);
3147 if (write_flags
& BDRV_REQ_ZERO_WRITE
) {
3148 return bdrv_co_pwrite_zeroes(dst
, dst_offset
, bytes
, write_flags
);
3151 if (!src
|| !src
->bs
) {
3154 ret
= bdrv_check_byte_request(src
->bs
, src_offset
, bytes
);
3159 if (!src
->bs
->drv
->bdrv_co_copy_range_from
3160 || !dst
->bs
->drv
->bdrv_co_copy_range_to
3161 || src
->bs
->encrypted
|| dst
->bs
->encrypted
) {
3166 bdrv_inc_in_flight(src
->bs
);
3167 tracked_request_begin(&req
, src
->bs
, src_offset
, bytes
,
3170 /* BDRV_REQ_SERIALISING is only for write operation */
3171 assert(!(read_flags
& BDRV_REQ_SERIALISING
));
3172 if (!(read_flags
& BDRV_REQ_NO_SERIALISING
)) {
3173 wait_serialising_requests(&req
);
3176 ret
= src
->bs
->drv
->bdrv_co_copy_range_from(src
->bs
,
3180 read_flags
, write_flags
);
3182 tracked_request_end(&req
);
3183 bdrv_dec_in_flight(src
->bs
);
3185 bdrv_inc_in_flight(dst
->bs
);
3186 tracked_request_begin(&req
, dst
->bs
, dst_offset
, bytes
,
3187 BDRV_TRACKED_WRITE
);
3188 ret
= bdrv_co_write_req_prepare(dst
, dst_offset
, bytes
, &req
,
3191 ret
= dst
->bs
->drv
->bdrv_co_copy_range_to(dst
->bs
,
3195 read_flags
, write_flags
);
3197 bdrv_co_write_req_finish(dst
, dst_offset
, bytes
, &req
, ret
);
3198 tracked_request_end(&req
);
3199 bdrv_dec_in_flight(dst
->bs
);
3205 /* Copy range from @src to @dst.
3207 * See the comment of bdrv_co_copy_range for the parameter and return value
3209 int coroutine_fn
bdrv_co_copy_range_from(BdrvChild
*src
, uint64_t src_offset
,
3210 BdrvChild
*dst
, uint64_t dst_offset
,
3212 BdrvRequestFlags read_flags
,
3213 BdrvRequestFlags write_flags
)
3215 trace_bdrv_co_copy_range_from(src
, src_offset
, dst
, dst_offset
, bytes
,
3216 read_flags
, write_flags
);
3217 return bdrv_co_copy_range_internal(src
, src_offset
, dst
, dst_offset
,
3218 bytes
, read_flags
, write_flags
, true);
3221 /* Copy range from @src to @dst.
3223 * See the comment of bdrv_co_copy_range for the parameter and return value
3225 int coroutine_fn
bdrv_co_copy_range_to(BdrvChild
*src
, uint64_t src_offset
,
3226 BdrvChild
*dst
, uint64_t dst_offset
,
3228 BdrvRequestFlags read_flags
,
3229 BdrvRequestFlags write_flags
)
3231 trace_bdrv_co_copy_range_to(src
, src_offset
, dst
, dst_offset
, bytes
,
3232 read_flags
, write_flags
);
3233 return bdrv_co_copy_range_internal(src
, src_offset
, dst
, dst_offset
,
3234 bytes
, read_flags
, write_flags
, false);
3237 int coroutine_fn
bdrv_co_copy_range(BdrvChild
*src
, uint64_t src_offset
,
3238 BdrvChild
*dst
, uint64_t dst_offset
,
3239 uint64_t bytes
, BdrvRequestFlags read_flags
,
3240 BdrvRequestFlags write_flags
)
3242 return bdrv_co_copy_range_from(src
, src_offset
,
3244 bytes
, read_flags
, write_flags
);
3247 static void bdrv_parent_cb_resize(BlockDriverState
*bs
)
3250 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
3251 if (c
->role
->resize
) {
3258 * Truncate file to 'offset' bytes (needed only for file protocols)
3260 int coroutine_fn
bdrv_co_truncate(BdrvChild
*child
, int64_t offset
,
3261 PreallocMode prealloc
, Error
**errp
)
3263 BlockDriverState
*bs
= child
->bs
;
3264 BlockDriver
*drv
= bs
->drv
;
3265 BdrvTrackedRequest req
;
3266 int64_t old_size
, new_bytes
;
3270 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
3272 error_setg(errp
, "No medium inserted");
3276 error_setg(errp
, "Image size cannot be negative");
3280 old_size
= bdrv_getlength(bs
);
3282 error_setg_errno(errp
, -old_size
, "Failed to get old image size");
3286 if (offset
> old_size
) {
3287 new_bytes
= offset
- old_size
;
3292 bdrv_inc_in_flight(bs
);
3293 tracked_request_begin(&req
, bs
, offset
- new_bytes
, new_bytes
,
3294 BDRV_TRACKED_TRUNCATE
);
3296 /* If we are growing the image and potentially using preallocation for the
3297 * new area, we need to make sure that no write requests are made to it
3298 * concurrently or they might be overwritten by preallocation. */
3300 mark_request_serialising(&req
, 1);
3302 if (bs
->read_only
) {
3303 error_setg(errp
, "Image is read-only");
3307 ret
= bdrv_co_write_req_prepare(child
, offset
- new_bytes
, new_bytes
, &req
,
3310 error_setg_errno(errp
, -ret
,
3311 "Failed to prepare request for truncation");
3315 if (!drv
->bdrv_co_truncate
) {
3316 if (bs
->file
&& drv
->is_filter
) {
3317 ret
= bdrv_co_truncate(bs
->file
, offset
, prealloc
, errp
);
3320 error_setg(errp
, "Image format driver does not support resize");
3325 ret
= drv
->bdrv_co_truncate(bs
, offset
, prealloc
, errp
);
3329 ret
= refresh_total_sectors(bs
, offset
>> BDRV_SECTOR_BITS
);
3331 error_setg_errno(errp
, -ret
, "Could not refresh total sector count");
3333 offset
= bs
->total_sectors
* BDRV_SECTOR_SIZE
;
3335 /* It's possible that truncation succeeded but refresh_total_sectors
3336 * failed, but the latter doesn't affect how we should finish the request.
3337 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. */
3338 bdrv_co_write_req_finish(child
, offset
- new_bytes
, new_bytes
, &req
, 0);
3341 tracked_request_end(&req
);
3342 bdrv_dec_in_flight(bs
);
3347 typedef struct TruncateCo
{
3350 PreallocMode prealloc
;
3355 static void coroutine_fn
bdrv_truncate_co_entry(void *opaque
)
3357 TruncateCo
*tco
= opaque
;
3358 tco
->ret
= bdrv_co_truncate(tco
->child
, tco
->offset
, tco
->prealloc
,
3363 int bdrv_truncate(BdrvChild
*child
, int64_t offset
, PreallocMode prealloc
,
3370 .prealloc
= prealloc
,
3375 if (qemu_in_coroutine()) {
3376 /* Fast-path if already in coroutine context */
3377 bdrv_truncate_co_entry(&tco
);
3379 co
= qemu_coroutine_create(bdrv_truncate_co_entry
, &tco
);
3380 bdrv_coroutine_enter(child
->bs
, co
);
3381 BDRV_POLL_WHILE(child
->bs
, tco
.ret
== NOT_DONE
);