2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "sysemu/block-backend.h"
28 #include "block/aio-wait.h"
29 #include "block/blockjob.h"
30 #include "block/blockjob_int.h"
31 #include "block/block_int.h"
32 #include "qemu/cutils.h"
33 #include "qapi/error.h"
34 #include "qemu/error-report.h"
36 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
38 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
39 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
41 static void bdrv_parent_cb_resize(BlockDriverState
*bs
);
42 static int coroutine_fn
bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
,
43 int64_t offset
, int bytes
, BdrvRequestFlags flags
);
45 void bdrv_parent_drained_begin(BlockDriverState
*bs
, BdrvChild
*ignore
,
46 bool ignore_bds_parents
)
50 QLIST_FOREACH_SAFE(c
, &bs
->parents
, next_parent
, next
) {
51 if (c
== ignore
|| (ignore_bds_parents
&& c
->role
->parent_is_bds
)) {
54 bdrv_parent_drained_begin_single(c
, false);
58 void bdrv_parent_drained_end(BlockDriverState
*bs
, BdrvChild
*ignore
,
59 bool ignore_bds_parents
)
63 QLIST_FOREACH_SAFE(c
, &bs
->parents
, next_parent
, next
) {
64 if (c
== ignore
|| (ignore_bds_parents
&& c
->role
->parent_is_bds
)) {
67 if (c
->role
->drained_end
) {
68 c
->role
->drained_end(c
);
73 static bool bdrv_parent_drained_poll_single(BdrvChild
*c
)
75 if (c
->role
->drained_poll
) {
76 return c
->role
->drained_poll(c
);
81 static bool bdrv_parent_drained_poll(BlockDriverState
*bs
, BdrvChild
*ignore
,
82 bool ignore_bds_parents
)
87 QLIST_FOREACH_SAFE(c
, &bs
->parents
, next_parent
, next
) {
88 if (c
== ignore
|| (ignore_bds_parents
&& c
->role
->parent_is_bds
)) {
91 busy
|= bdrv_parent_drained_poll_single(c
);
97 void bdrv_parent_drained_begin_single(BdrvChild
*c
, bool poll
)
99 if (c
->role
->drained_begin
) {
100 c
->role
->drained_begin(c
);
103 BDRV_POLL_WHILE(c
->bs
, bdrv_parent_drained_poll_single(c
));
107 static void bdrv_merge_limits(BlockLimits
*dst
, const BlockLimits
*src
)
109 dst
->opt_transfer
= MAX(dst
->opt_transfer
, src
->opt_transfer
);
110 dst
->max_transfer
= MIN_NON_ZERO(dst
->max_transfer
, src
->max_transfer
);
111 dst
->opt_mem_alignment
= MAX(dst
->opt_mem_alignment
,
112 src
->opt_mem_alignment
);
113 dst
->min_mem_alignment
= MAX(dst
->min_mem_alignment
,
114 src
->min_mem_alignment
);
115 dst
->max_iov
= MIN_NON_ZERO(dst
->max_iov
, src
->max_iov
);
118 void bdrv_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
120 BlockDriver
*drv
= bs
->drv
;
121 Error
*local_err
= NULL
;
123 memset(&bs
->bl
, 0, sizeof(bs
->bl
));
129 /* Default alignment based on whether driver has byte interface */
130 bs
->bl
.request_alignment
= (drv
->bdrv_co_preadv
||
131 drv
->bdrv_aio_preadv
) ? 1 : 512;
133 /* Take some limits from the children as a default */
135 bdrv_refresh_limits(bs
->file
->bs
, &local_err
);
137 error_propagate(errp
, local_err
);
140 bdrv_merge_limits(&bs
->bl
, &bs
->file
->bs
->bl
);
142 bs
->bl
.min_mem_alignment
= 512;
143 bs
->bl
.opt_mem_alignment
= getpagesize();
145 /* Safe default since most protocols use readv()/writev()/etc */
146 bs
->bl
.max_iov
= IOV_MAX
;
150 bdrv_refresh_limits(bs
->backing
->bs
, &local_err
);
152 error_propagate(errp
, local_err
);
155 bdrv_merge_limits(&bs
->bl
, &bs
->backing
->bs
->bl
);
158 /* Then let the driver override it */
159 if (drv
->bdrv_refresh_limits
) {
160 drv
->bdrv_refresh_limits(bs
, errp
);
165 * The copy-on-read flag is actually a reference count so multiple users may
166 * use the feature without worrying about clobbering its previous state.
167 * Copy-on-read stays enabled until all users have called to disable it.
169 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
171 atomic_inc(&bs
->copy_on_read
);
174 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
176 int old
= atomic_fetch_dec(&bs
->copy_on_read
);
182 BlockDriverState
*bs
;
188 bool ignore_bds_parents
;
191 static void coroutine_fn
bdrv_drain_invoke_entry(void *opaque
)
193 BdrvCoDrainData
*data
= opaque
;
194 BlockDriverState
*bs
= data
->bs
;
197 bs
->drv
->bdrv_co_drain_begin(bs
);
199 bs
->drv
->bdrv_co_drain_end(bs
);
202 /* Set data->done before reading bs->wakeup. */
203 atomic_mb_set(&data
->done
, true);
204 bdrv_dec_in_flight(bs
);
211 /* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */
212 static void bdrv_drain_invoke(BlockDriverState
*bs
, bool begin
)
214 BdrvCoDrainData
*data
;
216 if (!bs
->drv
|| (begin
&& !bs
->drv
->bdrv_co_drain_begin
) ||
217 (!begin
&& !bs
->drv
->bdrv_co_drain_end
)) {
221 data
= g_new(BdrvCoDrainData
, 1);
222 *data
= (BdrvCoDrainData
) {
228 /* Make sure the driver callback completes during the polling phase for
230 bdrv_inc_in_flight(bs
);
231 data
->co
= qemu_coroutine_create(bdrv_drain_invoke_entry
, data
);
232 aio_co_schedule(bdrv_get_aio_context(bs
), data
->co
);
235 BDRV_POLL_WHILE(bs
, !data
->done
);
240 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
241 bool bdrv_drain_poll(BlockDriverState
*bs
, bool recursive
,
242 BdrvChild
*ignore_parent
, bool ignore_bds_parents
)
244 BdrvChild
*child
, *next
;
246 if (bdrv_parent_drained_poll(bs
, ignore_parent
, ignore_bds_parents
)) {
250 if (atomic_read(&bs
->in_flight
)) {
255 assert(!ignore_bds_parents
);
256 QLIST_FOREACH_SAFE(child
, &bs
->children
, next
, next
) {
257 if (bdrv_drain_poll(child
->bs
, recursive
, child
, false)) {
266 static bool bdrv_drain_poll_top_level(BlockDriverState
*bs
, bool recursive
,
267 BdrvChild
*ignore_parent
)
269 return bdrv_drain_poll(bs
, recursive
, ignore_parent
, false);
272 static void bdrv_do_drained_begin(BlockDriverState
*bs
, bool recursive
,
273 BdrvChild
*parent
, bool ignore_bds_parents
,
275 static void bdrv_do_drained_end(BlockDriverState
*bs
, bool recursive
,
276 BdrvChild
*parent
, bool ignore_bds_parents
);
278 static void bdrv_co_drain_bh_cb(void *opaque
)
280 BdrvCoDrainData
*data
= opaque
;
281 Coroutine
*co
= data
->co
;
282 BlockDriverState
*bs
= data
->bs
;
285 AioContext
*ctx
= bdrv_get_aio_context(bs
);
286 AioContext
*co_ctx
= qemu_coroutine_get_aio_context(co
);
289 * When the coroutine yielded, the lock for its home context was
290 * released, so we need to re-acquire it here. If it explicitly
291 * acquired a different context, the lock is still held and we don't
292 * want to lock it a second time (or AIO_WAIT_WHILE() would hang).
295 aio_context_acquire(ctx
);
297 bdrv_dec_in_flight(bs
);
299 bdrv_do_drained_begin(bs
, data
->recursive
, data
->parent
,
300 data
->ignore_bds_parents
, data
->poll
);
302 bdrv_do_drained_end(bs
, data
->recursive
, data
->parent
,
303 data
->ignore_bds_parents
);
306 aio_context_release(ctx
);
310 bdrv_drain_all_begin();
317 static void coroutine_fn
bdrv_co_yield_to_drain(BlockDriverState
*bs
,
318 bool begin
, bool recursive
,
320 bool ignore_bds_parents
,
323 BdrvCoDrainData data
;
325 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
326 * other coroutines run if they were queued by aio_co_enter(). */
328 assert(qemu_in_coroutine());
329 data
= (BdrvCoDrainData
) {
330 .co
= qemu_coroutine_self(),
334 .recursive
= recursive
,
336 .ignore_bds_parents
= ignore_bds_parents
,
340 bdrv_inc_in_flight(bs
);
342 aio_bh_schedule_oneshot(bdrv_get_aio_context(bs
),
343 bdrv_co_drain_bh_cb
, &data
);
345 qemu_coroutine_yield();
346 /* If we are resumed from some other event (such as an aio completion or a
347 * timer callback), it is a bug in the caller that should be fixed. */
351 void bdrv_do_drained_begin_quiesce(BlockDriverState
*bs
,
352 BdrvChild
*parent
, bool ignore_bds_parents
)
354 assert(!qemu_in_coroutine());
356 /* Stop things in parent-to-child order */
357 if (atomic_fetch_inc(&bs
->quiesce_counter
) == 0) {
358 aio_disable_external(bdrv_get_aio_context(bs
));
361 bdrv_parent_drained_begin(bs
, parent
, ignore_bds_parents
);
362 bdrv_drain_invoke(bs
, true);
365 static void bdrv_do_drained_begin(BlockDriverState
*bs
, bool recursive
,
366 BdrvChild
*parent
, bool ignore_bds_parents
,
369 BdrvChild
*child
, *next
;
371 if (qemu_in_coroutine()) {
372 bdrv_co_yield_to_drain(bs
, true, recursive
, parent
, ignore_bds_parents
,
377 bdrv_do_drained_begin_quiesce(bs
, parent
, ignore_bds_parents
);
380 assert(!ignore_bds_parents
);
381 bs
->recursive_quiesce_counter
++;
382 QLIST_FOREACH_SAFE(child
, &bs
->children
, next
, next
) {
383 bdrv_do_drained_begin(child
->bs
, true, child
, ignore_bds_parents
,
389 * Wait for drained requests to finish.
391 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
392 * call is needed so things in this AioContext can make progress even
393 * though we don't return to the main AioContext loop - this automatically
394 * includes other nodes in the same AioContext and therefore all child
398 assert(!ignore_bds_parents
);
399 BDRV_POLL_WHILE(bs
, bdrv_drain_poll_top_level(bs
, recursive
, parent
));
403 void bdrv_drained_begin(BlockDriverState
*bs
)
405 bdrv_do_drained_begin(bs
, false, NULL
, false, true);
408 void bdrv_subtree_drained_begin(BlockDriverState
*bs
)
410 bdrv_do_drained_begin(bs
, true, NULL
, false, true);
413 static void bdrv_do_drained_end(BlockDriverState
*bs
, bool recursive
,
414 BdrvChild
*parent
, bool ignore_bds_parents
)
416 BdrvChild
*child
, *next
;
417 int old_quiesce_counter
;
419 if (qemu_in_coroutine()) {
420 bdrv_co_yield_to_drain(bs
, false, recursive
, parent
, ignore_bds_parents
,
424 assert(bs
->quiesce_counter
> 0);
425 old_quiesce_counter
= atomic_fetch_dec(&bs
->quiesce_counter
);
427 /* Re-enable things in child-to-parent order */
428 bdrv_drain_invoke(bs
, false);
429 bdrv_parent_drained_end(bs
, parent
, ignore_bds_parents
);
430 if (old_quiesce_counter
== 1) {
431 aio_enable_external(bdrv_get_aio_context(bs
));
435 assert(!ignore_bds_parents
);
436 bs
->recursive_quiesce_counter
--;
437 QLIST_FOREACH_SAFE(child
, &bs
->children
, next
, next
) {
438 bdrv_do_drained_end(child
->bs
, true, child
, ignore_bds_parents
);
443 void bdrv_drained_end(BlockDriverState
*bs
)
445 bdrv_do_drained_end(bs
, false, NULL
, false);
448 void bdrv_subtree_drained_end(BlockDriverState
*bs
)
450 bdrv_do_drained_end(bs
, true, NULL
, false);
453 void bdrv_apply_subtree_drain(BdrvChild
*child
, BlockDriverState
*new_parent
)
457 for (i
= 0; i
< new_parent
->recursive_quiesce_counter
; i
++) {
458 bdrv_do_drained_begin(child
->bs
, true, child
, false, true);
462 void bdrv_unapply_subtree_drain(BdrvChild
*child
, BlockDriverState
*old_parent
)
466 for (i
= 0; i
< old_parent
->recursive_quiesce_counter
; i
++) {
467 bdrv_do_drained_end(child
->bs
, true, child
, false);
472 * Wait for pending requests to complete on a single BlockDriverState subtree,
473 * and suspend block driver's internal I/O until next request arrives.
475 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
478 void coroutine_fn
bdrv_co_drain(BlockDriverState
*bs
)
480 assert(qemu_in_coroutine());
481 bdrv_drained_begin(bs
);
482 bdrv_drained_end(bs
);
485 void bdrv_drain(BlockDriverState
*bs
)
487 bdrv_drained_begin(bs
);
488 bdrv_drained_end(bs
);
491 static void bdrv_drain_assert_idle(BlockDriverState
*bs
)
493 BdrvChild
*child
, *next
;
495 assert(atomic_read(&bs
->in_flight
) == 0);
496 QLIST_FOREACH_SAFE(child
, &bs
->children
, next
, next
) {
497 bdrv_drain_assert_idle(child
->bs
);
501 unsigned int bdrv_drain_all_count
= 0;
503 static bool bdrv_drain_all_poll(void)
505 BlockDriverState
*bs
= NULL
;
508 /* bdrv_drain_poll() can't make changes to the graph and we are holding the
509 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */
510 while ((bs
= bdrv_next_all_states(bs
))) {
511 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
512 aio_context_acquire(aio_context
);
513 result
|= bdrv_drain_poll(bs
, false, NULL
, true);
514 aio_context_release(aio_context
);
521 * Wait for pending requests to complete across all BlockDriverStates
523 * This function does not flush data to disk, use bdrv_flush_all() for that
524 * after calling this function.
526 * This pauses all block jobs and disables external clients. It must
527 * be paired with bdrv_drain_all_end().
529 * NOTE: no new block jobs or BlockDriverStates can be created between
530 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
532 void bdrv_drain_all_begin(void)
534 BlockDriverState
*bs
= NULL
;
536 if (qemu_in_coroutine()) {
537 bdrv_co_yield_to_drain(NULL
, true, false, NULL
, true, true);
541 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
542 * loop AioContext, so make sure we're in the main context. */
543 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
544 assert(bdrv_drain_all_count
< INT_MAX
);
545 bdrv_drain_all_count
++;
547 /* Quiesce all nodes, without polling in-flight requests yet. The graph
548 * cannot change during this loop. */
549 while ((bs
= bdrv_next_all_states(bs
))) {
550 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
552 aio_context_acquire(aio_context
);
553 bdrv_do_drained_begin(bs
, false, NULL
, true, false);
554 aio_context_release(aio_context
);
557 /* Now poll the in-flight requests */
558 AIO_WAIT_WHILE(NULL
, bdrv_drain_all_poll());
560 while ((bs
= bdrv_next_all_states(bs
))) {
561 bdrv_drain_assert_idle(bs
);
565 void bdrv_drain_all_end(void)
567 BlockDriverState
*bs
= NULL
;
569 while ((bs
= bdrv_next_all_states(bs
))) {
570 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
572 aio_context_acquire(aio_context
);
573 bdrv_do_drained_end(bs
, false, NULL
, true);
574 aio_context_release(aio_context
);
577 assert(bdrv_drain_all_count
> 0);
578 bdrv_drain_all_count
--;
581 void bdrv_drain_all(void)
583 bdrv_drain_all_begin();
584 bdrv_drain_all_end();
588 * Remove an active request from the tracked requests list
590 * This function should be called when a tracked request is completing.
592 static void tracked_request_end(BdrvTrackedRequest
*req
)
594 if (req
->serialising
) {
595 atomic_dec(&req
->bs
->serialising_in_flight
);
598 qemu_co_mutex_lock(&req
->bs
->reqs_lock
);
599 QLIST_REMOVE(req
, list
);
600 qemu_co_queue_restart_all(&req
->wait_queue
);
601 qemu_co_mutex_unlock(&req
->bs
->reqs_lock
);
605 * Add an active request to the tracked requests list
607 static void tracked_request_begin(BdrvTrackedRequest
*req
,
608 BlockDriverState
*bs
,
611 enum BdrvTrackedRequestType type
)
613 assert(bytes
<= INT64_MAX
&& offset
<= INT64_MAX
- bytes
);
615 *req
= (BdrvTrackedRequest
){
620 .co
= qemu_coroutine_self(),
621 .serialising
= false,
622 .overlap_offset
= offset
,
623 .overlap_bytes
= bytes
,
626 qemu_co_queue_init(&req
->wait_queue
);
628 qemu_co_mutex_lock(&bs
->reqs_lock
);
629 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
630 qemu_co_mutex_unlock(&bs
->reqs_lock
);
633 static void mark_request_serialising(BdrvTrackedRequest
*req
, uint64_t align
)
635 int64_t overlap_offset
= req
->offset
& ~(align
- 1);
636 uint64_t overlap_bytes
= ROUND_UP(req
->offset
+ req
->bytes
, align
)
639 if (!req
->serialising
) {
640 atomic_inc(&req
->bs
->serialising_in_flight
);
641 req
->serialising
= true;
644 req
->overlap_offset
= MIN(req
->overlap_offset
, overlap_offset
);
645 req
->overlap_bytes
= MAX(req
->overlap_bytes
, overlap_bytes
);
648 static bool is_request_serialising_and_aligned(BdrvTrackedRequest
*req
)
651 * If the request is serialising, overlap_offset and overlap_bytes are set,
652 * so we can check if the request is aligned. Otherwise, don't care and
656 return req
->serialising
&& (req
->offset
== req
->overlap_offset
) &&
657 (req
->bytes
== req
->overlap_bytes
);
661 * Round a region to cluster boundaries
663 void bdrv_round_to_clusters(BlockDriverState
*bs
,
664 int64_t offset
, int64_t bytes
,
665 int64_t *cluster_offset
,
666 int64_t *cluster_bytes
)
670 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
671 *cluster_offset
= offset
;
672 *cluster_bytes
= bytes
;
674 int64_t c
= bdi
.cluster_size
;
675 *cluster_offset
= QEMU_ALIGN_DOWN(offset
, c
);
676 *cluster_bytes
= QEMU_ALIGN_UP(offset
- *cluster_offset
+ bytes
, c
);
680 static int bdrv_get_cluster_size(BlockDriverState
*bs
)
685 ret
= bdrv_get_info(bs
, &bdi
);
686 if (ret
< 0 || bdi
.cluster_size
== 0) {
687 return bs
->bl
.request_alignment
;
689 return bdi
.cluster_size
;
693 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
694 int64_t offset
, uint64_t bytes
)
697 if (offset
>= req
->overlap_offset
+ req
->overlap_bytes
) {
701 if (req
->overlap_offset
>= offset
+ bytes
) {
707 void bdrv_inc_in_flight(BlockDriverState
*bs
)
709 atomic_inc(&bs
->in_flight
);
712 void bdrv_wakeup(BlockDriverState
*bs
)
717 void bdrv_dec_in_flight(BlockDriverState
*bs
)
719 atomic_dec(&bs
->in_flight
);
723 static bool coroutine_fn
wait_serialising_requests(BdrvTrackedRequest
*self
)
725 BlockDriverState
*bs
= self
->bs
;
726 BdrvTrackedRequest
*req
;
730 if (!atomic_read(&bs
->serialising_in_flight
)) {
736 qemu_co_mutex_lock(&bs
->reqs_lock
);
737 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
738 if (req
== self
|| (!req
->serialising
&& !self
->serialising
)) {
741 if (tracked_request_overlaps(req
, self
->overlap_offset
,
742 self
->overlap_bytes
))
744 /* Hitting this means there was a reentrant request, for
745 * example, a block driver issuing nested requests. This must
746 * never happen since it means deadlock.
748 assert(qemu_coroutine_self() != req
->co
);
750 /* If the request is already (indirectly) waiting for us, or
751 * will wait for us as soon as it wakes up, then just go on
752 * (instead of producing a deadlock in the former case). */
753 if (!req
->waiting_for
) {
754 self
->waiting_for
= req
;
755 qemu_co_queue_wait(&req
->wait_queue
, &bs
->reqs_lock
);
756 self
->waiting_for
= NULL
;
763 qemu_co_mutex_unlock(&bs
->reqs_lock
);
769 static int bdrv_check_byte_request(BlockDriverState
*bs
, int64_t offset
,
772 if (size
> BDRV_REQUEST_MAX_SECTORS
<< BDRV_SECTOR_BITS
) {
776 if (!bdrv_is_inserted(bs
)) {
787 typedef struct RwCo
{
793 BdrvRequestFlags flags
;
796 static void coroutine_fn
bdrv_rw_co_entry(void *opaque
)
800 if (!rwco
->is_write
) {
801 rwco
->ret
= bdrv_co_preadv(rwco
->child
, rwco
->offset
,
802 rwco
->qiov
->size
, rwco
->qiov
,
805 rwco
->ret
= bdrv_co_pwritev(rwco
->child
, rwco
->offset
,
806 rwco
->qiov
->size
, rwco
->qiov
,
813 * Process a vectored synchronous request using coroutines
815 static int bdrv_prwv_co(BdrvChild
*child
, int64_t offset
,
816 QEMUIOVector
*qiov
, bool is_write
,
817 BdrvRequestFlags flags
)
824 .is_write
= is_write
,
829 if (qemu_in_coroutine()) {
830 /* Fast-path if already in coroutine context */
831 bdrv_rw_co_entry(&rwco
);
833 co
= qemu_coroutine_create(bdrv_rw_co_entry
, &rwco
);
834 bdrv_coroutine_enter(child
->bs
, co
);
835 BDRV_POLL_WHILE(child
->bs
, rwco
.ret
== NOT_DONE
);
840 int bdrv_pwrite_zeroes(BdrvChild
*child
, int64_t offset
,
841 int bytes
, BdrvRequestFlags flags
)
843 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, NULL
, bytes
);
845 return bdrv_prwv_co(child
, offset
, &qiov
, true,
846 BDRV_REQ_ZERO_WRITE
| flags
);
850 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
851 * The operation is sped up by checking the block status and only writing
852 * zeroes to the device if they currently do not return zeroes. Optional
853 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
856 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
858 int bdrv_make_zero(BdrvChild
*child
, BdrvRequestFlags flags
)
861 int64_t target_size
, bytes
, offset
= 0;
862 BlockDriverState
*bs
= child
->bs
;
864 target_size
= bdrv_getlength(bs
);
865 if (target_size
< 0) {
870 bytes
= MIN(target_size
- offset
, BDRV_REQUEST_MAX_BYTES
);
874 ret
= bdrv_block_status(bs
, offset
, bytes
, &bytes
, NULL
, NULL
);
878 if (ret
& BDRV_BLOCK_ZERO
) {
882 ret
= bdrv_pwrite_zeroes(child
, offset
, bytes
, flags
);
890 int bdrv_preadv(BdrvChild
*child
, int64_t offset
, QEMUIOVector
*qiov
)
894 ret
= bdrv_prwv_co(child
, offset
, qiov
, false, 0);
902 /* See bdrv_pwrite() for the return codes */
903 int bdrv_pread(BdrvChild
*child
, int64_t offset
, void *buf
, int bytes
)
905 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, bytes
);
911 return bdrv_preadv(child
, offset
, &qiov
);
914 int bdrv_pwritev(BdrvChild
*child
, int64_t offset
, QEMUIOVector
*qiov
)
918 ret
= bdrv_prwv_co(child
, offset
, qiov
, true, 0);
926 /* Return no. of bytes on success or < 0 on error. Important errors are:
927 -EIO generic I/O error (may happen for all errors)
928 -ENOMEDIUM No media inserted.
929 -EINVAL Invalid offset or number of bytes
930 -EACCES Trying to write a read-only device
932 int bdrv_pwrite(BdrvChild
*child
, int64_t offset
, const void *buf
, int bytes
)
934 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, bytes
);
940 return bdrv_pwritev(child
, offset
, &qiov
);
944 * Writes to the file and ensures that no writes are reordered across this
945 * request (acts as a barrier)
947 * Returns 0 on success, -errno in error cases.
949 int bdrv_pwrite_sync(BdrvChild
*child
, int64_t offset
,
950 const void *buf
, int count
)
954 ret
= bdrv_pwrite(child
, offset
, buf
, count
);
959 ret
= bdrv_flush(child
->bs
);
967 typedef struct CoroutineIOCompletion
{
968 Coroutine
*coroutine
;
970 } CoroutineIOCompletion
;
972 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
974 CoroutineIOCompletion
*co
= opaque
;
977 aio_co_wake(co
->coroutine
);
980 static int coroutine_fn
bdrv_driver_preadv(BlockDriverState
*bs
,
981 uint64_t offset
, uint64_t bytes
,
982 QEMUIOVector
*qiov
, int flags
)
984 BlockDriver
*drv
= bs
->drv
;
986 unsigned int nb_sectors
;
988 assert(!(flags
& ~BDRV_REQ_MASK
));
989 assert(!(flags
& BDRV_REQ_NO_FALLBACK
));
995 if (drv
->bdrv_co_preadv
) {
996 return drv
->bdrv_co_preadv(bs
, offset
, bytes
, qiov
, flags
);
999 if (drv
->bdrv_aio_preadv
) {
1001 CoroutineIOCompletion co
= {
1002 .coroutine
= qemu_coroutine_self(),
1005 acb
= drv
->bdrv_aio_preadv(bs
, offset
, bytes
, qiov
, flags
,
1006 bdrv_co_io_em_complete
, &co
);
1010 qemu_coroutine_yield();
1015 sector_num
= offset
>> BDRV_SECTOR_BITS
;
1016 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
1018 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
1019 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
1020 assert((bytes
>> BDRV_SECTOR_BITS
) <= BDRV_REQUEST_MAX_SECTORS
);
1021 assert(drv
->bdrv_co_readv
);
1023 return drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
1026 static int coroutine_fn
bdrv_driver_pwritev(BlockDriverState
*bs
,
1027 uint64_t offset
, uint64_t bytes
,
1028 QEMUIOVector
*qiov
, int flags
)
1030 BlockDriver
*drv
= bs
->drv
;
1032 unsigned int nb_sectors
;
1035 assert(!(flags
& ~BDRV_REQ_MASK
));
1036 assert(!(flags
& BDRV_REQ_NO_FALLBACK
));
1042 if (drv
->bdrv_co_pwritev
) {
1043 ret
= drv
->bdrv_co_pwritev(bs
, offset
, bytes
, qiov
,
1044 flags
& bs
->supported_write_flags
);
1045 flags
&= ~bs
->supported_write_flags
;
1049 if (drv
->bdrv_aio_pwritev
) {
1051 CoroutineIOCompletion co
= {
1052 .coroutine
= qemu_coroutine_self(),
1055 acb
= drv
->bdrv_aio_pwritev(bs
, offset
, bytes
, qiov
,
1056 flags
& bs
->supported_write_flags
,
1057 bdrv_co_io_em_complete
, &co
);
1058 flags
&= ~bs
->supported_write_flags
;
1062 qemu_coroutine_yield();
1068 sector_num
= offset
>> BDRV_SECTOR_BITS
;
1069 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
1071 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
1072 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
1073 assert((bytes
>> BDRV_SECTOR_BITS
) <= BDRV_REQUEST_MAX_SECTORS
);
1075 assert(drv
->bdrv_co_writev
);
1076 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
,
1077 flags
& bs
->supported_write_flags
);
1078 flags
&= ~bs
->supported_write_flags
;
1081 if (ret
== 0 && (flags
& BDRV_REQ_FUA
)) {
1082 ret
= bdrv_co_flush(bs
);
1088 static int coroutine_fn
1089 bdrv_driver_pwritev_compressed(BlockDriverState
*bs
, uint64_t offset
,
1090 uint64_t bytes
, QEMUIOVector
*qiov
)
1092 BlockDriver
*drv
= bs
->drv
;
1098 if (!drv
->bdrv_co_pwritev_compressed
) {
1102 return drv
->bdrv_co_pwritev_compressed(bs
, offset
, bytes
, qiov
);
1105 static int coroutine_fn
bdrv_co_do_copy_on_readv(BdrvChild
*child
,
1106 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
)
1108 BlockDriverState
*bs
= child
->bs
;
1110 /* Perform I/O through a temporary buffer so that users who scribble over
1111 * their read buffer while the operation is in progress do not end up
1112 * modifying the image file. This is critical for zero-copy guest I/O
1113 * where anything might happen inside guest memory.
1115 void *bounce_buffer
;
1117 BlockDriver
*drv
= bs
->drv
;
1118 QEMUIOVector local_qiov
;
1119 int64_t cluster_offset
;
1120 int64_t cluster_bytes
;
1123 int max_transfer
= MIN_NON_ZERO(bs
->bl
.max_transfer
,
1124 BDRV_REQUEST_MAX_BYTES
);
1125 unsigned int progress
= 0;
1131 /* FIXME We cannot require callers to have write permissions when all they
1132 * are doing is a read request. If we did things right, write permissions
1133 * would be obtained anyway, but internally by the copy-on-read code. As
1134 * long as it is implemented here rather than in a separate filter driver,
1135 * the copy-on-read code doesn't have its own BdrvChild, however, for which
1136 * it could request permissions. Therefore we have to bypass the permission
1137 * system for the moment. */
1138 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1140 /* Cover entire cluster so no additional backing file I/O is required when
1141 * allocating cluster in the image file. Note that this value may exceed
1142 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1143 * is one reason we loop rather than doing it all at once.
1145 bdrv_round_to_clusters(bs
, offset
, bytes
, &cluster_offset
, &cluster_bytes
);
1146 skip_bytes
= offset
- cluster_offset
;
1148 trace_bdrv_co_do_copy_on_readv(bs
, offset
, bytes
,
1149 cluster_offset
, cluster_bytes
);
1151 bounce_buffer
= qemu_try_blockalign(bs
,
1152 MIN(MIN(max_transfer
, cluster_bytes
),
1153 MAX_BOUNCE_BUFFER
));
1154 if (bounce_buffer
== NULL
) {
1159 while (cluster_bytes
) {
1162 ret
= bdrv_is_allocated(bs
, cluster_offset
,
1163 MIN(cluster_bytes
, max_transfer
), &pnum
);
1165 /* Safe to treat errors in querying allocation as if
1166 * unallocated; we'll probably fail again soon on the
1167 * read, but at least that will set a decent errno.
1169 pnum
= MIN(cluster_bytes
, max_transfer
);
1172 /* Stop at EOF if the image ends in the middle of the cluster */
1173 if (ret
== 0 && pnum
== 0) {
1174 assert(progress
>= bytes
);
1178 assert(skip_bytes
< pnum
);
1181 /* Must copy-on-read; use the bounce buffer */
1182 pnum
= MIN(pnum
, MAX_BOUNCE_BUFFER
);
1183 qemu_iovec_init_buf(&local_qiov
, bounce_buffer
, pnum
);
1185 ret
= bdrv_driver_preadv(bs
, cluster_offset
, pnum
,
1191 bdrv_debug_event(bs
, BLKDBG_COR_WRITE
);
1192 if (drv
->bdrv_co_pwrite_zeroes
&&
1193 buffer_is_zero(bounce_buffer
, pnum
)) {
1194 /* FIXME: Should we (perhaps conditionally) be setting
1195 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1196 * that still correctly reads as zero? */
1197 ret
= bdrv_co_do_pwrite_zeroes(bs
, cluster_offset
, pnum
,
1198 BDRV_REQ_WRITE_UNCHANGED
);
1200 /* This does not change the data on the disk, it is not
1201 * necessary to flush even in cache=writethrough mode.
1203 ret
= bdrv_driver_pwritev(bs
, cluster_offset
, pnum
,
1205 BDRV_REQ_WRITE_UNCHANGED
);
1209 /* It might be okay to ignore write errors for guest
1210 * requests. If this is a deliberate copy-on-read
1211 * then we don't want to ignore the error. Simply
1212 * report it in all cases.
1217 qemu_iovec_from_buf(qiov
, progress
, bounce_buffer
+ skip_bytes
,
1220 /* Read directly into the destination */
1221 qemu_iovec_init(&local_qiov
, qiov
->niov
);
1222 qemu_iovec_concat(&local_qiov
, qiov
, progress
, pnum
- skip_bytes
);
1223 ret
= bdrv_driver_preadv(bs
, offset
+ progress
, local_qiov
.size
,
1225 qemu_iovec_destroy(&local_qiov
);
1231 cluster_offset
+= pnum
;
1232 cluster_bytes
-= pnum
;
1233 progress
+= pnum
- skip_bytes
;
1239 qemu_vfree(bounce_buffer
);
1244 * Forwards an already correctly aligned request to the BlockDriver. This
1245 * handles copy on read, zeroing after EOF, and fragmentation of large
1246 * reads; any other features must be implemented by the caller.
1248 static int coroutine_fn
bdrv_aligned_preadv(BdrvChild
*child
,
1249 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
1250 int64_t align
, QEMUIOVector
*qiov
, int flags
)
1252 BlockDriverState
*bs
= child
->bs
;
1253 int64_t total_bytes
, max_bytes
;
1255 uint64_t bytes_remaining
= bytes
;
1258 assert(is_power_of_2(align
));
1259 assert((offset
& (align
- 1)) == 0);
1260 assert((bytes
& (align
- 1)) == 0);
1261 assert(!qiov
|| bytes
== qiov
->size
);
1262 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1263 max_transfer
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_transfer
, INT_MAX
),
1266 /* TODO: We would need a per-BDS .supported_read_flags and
1267 * potential fallback support, if we ever implement any read flags
1268 * to pass through to drivers. For now, there aren't any
1269 * passthrough flags. */
1270 assert(!(flags
& ~(BDRV_REQ_NO_SERIALISING
| BDRV_REQ_COPY_ON_READ
)));
1272 /* Handle Copy on Read and associated serialisation */
1273 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1274 /* If we touch the same cluster it counts as an overlap. This
1275 * guarantees that allocating writes will be serialized and not race
1276 * with each other for the same cluster. For example, in copy-on-read
1277 * it ensures that the CoR read and write operations are atomic and
1278 * guest writes cannot interleave between them. */
1279 mark_request_serialising(req
, bdrv_get_cluster_size(bs
));
1282 /* BDRV_REQ_SERIALISING is only for write operation */
1283 assert(!(flags
& BDRV_REQ_SERIALISING
));
1285 if (!(flags
& BDRV_REQ_NO_SERIALISING
)) {
1286 wait_serialising_requests(req
);
1289 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1292 ret
= bdrv_is_allocated(bs
, offset
, bytes
, &pnum
);
1297 if (!ret
|| pnum
!= bytes
) {
1298 ret
= bdrv_co_do_copy_on_readv(child
, offset
, bytes
, qiov
);
1303 /* Forward the request to the BlockDriver, possibly fragmenting it */
1304 total_bytes
= bdrv_getlength(bs
);
1305 if (total_bytes
< 0) {
1310 max_bytes
= ROUND_UP(MAX(0, total_bytes
- offset
), align
);
1311 if (bytes
<= max_bytes
&& bytes
<= max_transfer
) {
1312 ret
= bdrv_driver_preadv(bs
, offset
, bytes
, qiov
, 0);
1316 while (bytes_remaining
) {
1320 QEMUIOVector local_qiov
;
1322 num
= MIN(bytes_remaining
, MIN(max_bytes
, max_transfer
));
1324 qemu_iovec_init(&local_qiov
, qiov
->niov
);
1325 qemu_iovec_concat(&local_qiov
, qiov
, bytes
- bytes_remaining
, num
);
1327 ret
= bdrv_driver_preadv(bs
, offset
+ bytes
- bytes_remaining
,
1328 num
, &local_qiov
, 0);
1330 qemu_iovec_destroy(&local_qiov
);
1332 num
= bytes_remaining
;
1333 ret
= qemu_iovec_memset(qiov
, bytes
- bytes_remaining
, 0,
1339 bytes_remaining
-= num
;
1343 return ret
< 0 ? ret
: 0;
1347 * Handle a read request in coroutine context
1349 int coroutine_fn
bdrv_co_preadv(BdrvChild
*child
,
1350 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1351 BdrvRequestFlags flags
)
1353 BlockDriverState
*bs
= child
->bs
;
1354 BlockDriver
*drv
= bs
->drv
;
1355 BdrvTrackedRequest req
;
1357 uint64_t align
= bs
->bl
.request_alignment
;
1358 uint8_t *head_buf
= NULL
;
1359 uint8_t *tail_buf
= NULL
;
1360 QEMUIOVector local_qiov
;
1361 bool use_local_qiov
= false;
1364 trace_bdrv_co_preadv(child
->bs
, offset
, bytes
, flags
);
1370 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
1375 bdrv_inc_in_flight(bs
);
1377 /* Don't do copy-on-read if we read data before write operation */
1378 if (atomic_read(&bs
->copy_on_read
) && !(flags
& BDRV_REQ_NO_SERIALISING
)) {
1379 flags
|= BDRV_REQ_COPY_ON_READ
;
1382 /* Align read if necessary by padding qiov */
1383 if (offset
& (align
- 1)) {
1384 head_buf
= qemu_blockalign(bs
, align
);
1385 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
1386 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
1387 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1388 use_local_qiov
= true;
1390 bytes
+= offset
& (align
- 1);
1391 offset
= offset
& ~(align
- 1);
1394 if ((offset
+ bytes
) & (align
- 1)) {
1395 if (!use_local_qiov
) {
1396 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
1397 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1398 use_local_qiov
= true;
1400 tail_buf
= qemu_blockalign(bs
, align
);
1401 qemu_iovec_add(&local_qiov
, tail_buf
,
1402 align
- ((offset
+ bytes
) & (align
- 1)));
1404 bytes
= ROUND_UP(bytes
, align
);
1407 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_READ
);
1408 ret
= bdrv_aligned_preadv(child
, &req
, offset
, bytes
, align
,
1409 use_local_qiov
? &local_qiov
: qiov
,
1411 tracked_request_end(&req
);
1412 bdrv_dec_in_flight(bs
);
1414 if (use_local_qiov
) {
1415 qemu_iovec_destroy(&local_qiov
);
1416 qemu_vfree(head_buf
);
1417 qemu_vfree(tail_buf
);
1423 static int coroutine_fn
bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
,
1424 int64_t offset
, int bytes
, BdrvRequestFlags flags
)
1426 BlockDriver
*drv
= bs
->drv
;
1430 bool need_flush
= false;
1434 int max_write_zeroes
= MIN_NON_ZERO(bs
->bl
.max_pwrite_zeroes
, INT_MAX
);
1435 int alignment
= MAX(bs
->bl
.pwrite_zeroes_alignment
,
1436 bs
->bl
.request_alignment
);
1437 int max_transfer
= MIN_NON_ZERO(bs
->bl
.max_transfer
, MAX_BOUNCE_BUFFER
);
1443 if ((flags
& ~bs
->supported_zero_flags
) & BDRV_REQ_NO_FALLBACK
) {
1447 assert(alignment
% bs
->bl
.request_alignment
== 0);
1448 head
= offset
% alignment
;
1449 tail
= (offset
+ bytes
) % alignment
;
1450 max_write_zeroes
= QEMU_ALIGN_DOWN(max_write_zeroes
, alignment
);
1451 assert(max_write_zeroes
>= bs
->bl
.request_alignment
);
1453 while (bytes
> 0 && !ret
) {
1456 /* Align request. Block drivers can expect the "bulk" of the request
1457 * to be aligned, and that unaligned requests do not cross cluster
1461 /* Make a small request up to the first aligned sector. For
1462 * convenience, limit this request to max_transfer even if
1463 * we don't need to fall back to writes. */
1464 num
= MIN(MIN(bytes
, max_transfer
), alignment
- head
);
1465 head
= (head
+ num
) % alignment
;
1466 assert(num
< max_write_zeroes
);
1467 } else if (tail
&& num
> alignment
) {
1468 /* Shorten the request to the last aligned sector. */
1472 /* limit request size */
1473 if (num
> max_write_zeroes
) {
1474 num
= max_write_zeroes
;
1478 /* First try the efficient write zeroes operation */
1479 if (drv
->bdrv_co_pwrite_zeroes
) {
1480 ret
= drv
->bdrv_co_pwrite_zeroes(bs
, offset
, num
,
1481 flags
& bs
->supported_zero_flags
);
1482 if (ret
!= -ENOTSUP
&& (flags
& BDRV_REQ_FUA
) &&
1483 !(bs
->supported_zero_flags
& BDRV_REQ_FUA
)) {
1487 assert(!bs
->supported_zero_flags
);
1490 if (ret
< 0 && !(flags
& BDRV_REQ_NO_FALLBACK
)) {
1491 /* Fall back to bounce buffer if write zeroes is unsupported */
1492 BdrvRequestFlags write_flags
= flags
& ~BDRV_REQ_ZERO_WRITE
;
1494 if ((flags
& BDRV_REQ_FUA
) &&
1495 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1496 /* No need for bdrv_driver_pwrite() to do a fallback
1497 * flush on each chunk; use just one at the end */
1498 write_flags
&= ~BDRV_REQ_FUA
;
1501 num
= MIN(num
, max_transfer
);
1503 buf
= qemu_try_blockalign0(bs
, num
);
1509 qemu_iovec_init_buf(&qiov
, buf
, num
);
1511 ret
= bdrv_driver_pwritev(bs
, offset
, num
, &qiov
, write_flags
);
1513 /* Keep bounce buffer around if it is big enough for all
1514 * all future requests.
1516 if (num
< max_transfer
) {
1527 if (ret
== 0 && need_flush
) {
1528 ret
= bdrv_co_flush(bs
);
1534 static inline int coroutine_fn
1535 bdrv_co_write_req_prepare(BdrvChild
*child
, int64_t offset
, uint64_t bytes
,
1536 BdrvTrackedRequest
*req
, int flags
)
1538 BlockDriverState
*bs
= child
->bs
;
1540 int64_t end_sector
= DIV_ROUND_UP(offset
+ bytes
, BDRV_SECTOR_SIZE
);
1542 if (bs
->read_only
) {
1546 /* BDRV_REQ_NO_SERIALISING is only for read operation */
1547 assert(!(flags
& BDRV_REQ_NO_SERIALISING
));
1548 assert(!(bs
->open_flags
& BDRV_O_INACTIVE
));
1549 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1550 assert(!(flags
& ~BDRV_REQ_MASK
));
1552 if (flags
& BDRV_REQ_SERIALISING
) {
1553 mark_request_serialising(req
, bdrv_get_cluster_size(bs
));
1556 waited
= wait_serialising_requests(req
);
1558 assert(!waited
|| !req
->serialising
||
1559 is_request_serialising_and_aligned(req
));
1560 assert(req
->overlap_offset
<= offset
);
1561 assert(offset
+ bytes
<= req
->overlap_offset
+ req
->overlap_bytes
);
1562 assert(end_sector
<= bs
->total_sectors
|| child
->perm
& BLK_PERM_RESIZE
);
1564 switch (req
->type
) {
1565 case BDRV_TRACKED_WRITE
:
1566 case BDRV_TRACKED_DISCARD
:
1567 if (flags
& BDRV_REQ_WRITE_UNCHANGED
) {
1568 assert(child
->perm
& (BLK_PERM_WRITE_UNCHANGED
| BLK_PERM_WRITE
));
1570 assert(child
->perm
& BLK_PERM_WRITE
);
1572 return notifier_with_return_list_notify(&bs
->before_write_notifiers
,
1574 case BDRV_TRACKED_TRUNCATE
:
1575 assert(child
->perm
& BLK_PERM_RESIZE
);
1582 static inline void coroutine_fn
1583 bdrv_co_write_req_finish(BdrvChild
*child
, int64_t offset
, uint64_t bytes
,
1584 BdrvTrackedRequest
*req
, int ret
)
1586 int64_t end_sector
= DIV_ROUND_UP(offset
+ bytes
, BDRV_SECTOR_SIZE
);
1587 BlockDriverState
*bs
= child
->bs
;
1589 atomic_inc(&bs
->write_gen
);
1592 * Discard cannot extend the image, but in error handling cases, such as
1593 * when reverting a qcow2 cluster allocation, the discarded range can pass
1594 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
1595 * here. Instead, just skip it, since semantically a discard request
1596 * beyond EOF cannot expand the image anyway.
1599 (req
->type
== BDRV_TRACKED_TRUNCATE
||
1600 end_sector
> bs
->total_sectors
) &&
1601 req
->type
!= BDRV_TRACKED_DISCARD
) {
1602 bs
->total_sectors
= end_sector
;
1603 bdrv_parent_cb_resize(bs
);
1604 bdrv_dirty_bitmap_truncate(bs
, end_sector
<< BDRV_SECTOR_BITS
);
1607 switch (req
->type
) {
1608 case BDRV_TRACKED_WRITE
:
1609 stat64_max(&bs
->wr_highest_offset
, offset
+ bytes
);
1610 /* fall through, to set dirty bits */
1611 case BDRV_TRACKED_DISCARD
:
1612 bdrv_set_dirty(bs
, offset
, bytes
);
1621 * Forwards an already correctly aligned write request to the BlockDriver,
1622 * after possibly fragmenting it.
1624 static int coroutine_fn
bdrv_aligned_pwritev(BdrvChild
*child
,
1625 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
1626 int64_t align
, QEMUIOVector
*qiov
, int flags
)
1628 BlockDriverState
*bs
= child
->bs
;
1629 BlockDriver
*drv
= bs
->drv
;
1632 uint64_t bytes_remaining
= bytes
;
1639 if (bdrv_has_readonly_bitmaps(bs
)) {
1643 assert(is_power_of_2(align
));
1644 assert((offset
& (align
- 1)) == 0);
1645 assert((bytes
& (align
- 1)) == 0);
1646 assert(!qiov
|| bytes
== qiov
->size
);
1647 max_transfer
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_transfer
, INT_MAX
),
1650 ret
= bdrv_co_write_req_prepare(child
, offset
, bytes
, req
, flags
);
1652 if (!ret
&& bs
->detect_zeroes
!= BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF
&&
1653 !(flags
& BDRV_REQ_ZERO_WRITE
) && drv
->bdrv_co_pwrite_zeroes
&&
1654 qemu_iovec_is_zero(qiov
)) {
1655 flags
|= BDRV_REQ_ZERO_WRITE
;
1656 if (bs
->detect_zeroes
== BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP
) {
1657 flags
|= BDRV_REQ_MAY_UNMAP
;
1662 /* Do nothing, write notifier decided to fail this request */
1663 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
1664 bdrv_debug_event(bs
, BLKDBG_PWRITEV_ZERO
);
1665 ret
= bdrv_co_do_pwrite_zeroes(bs
, offset
, bytes
, flags
);
1666 } else if (flags
& BDRV_REQ_WRITE_COMPRESSED
) {
1667 ret
= bdrv_driver_pwritev_compressed(bs
, offset
, bytes
, qiov
);
1668 } else if (bytes
<= max_transfer
) {
1669 bdrv_debug_event(bs
, BLKDBG_PWRITEV
);
1670 ret
= bdrv_driver_pwritev(bs
, offset
, bytes
, qiov
, flags
);
1672 bdrv_debug_event(bs
, BLKDBG_PWRITEV
);
1673 while (bytes_remaining
) {
1674 int num
= MIN(bytes_remaining
, max_transfer
);
1675 QEMUIOVector local_qiov
;
1676 int local_flags
= flags
;
1679 if (num
< bytes_remaining
&& (flags
& BDRV_REQ_FUA
) &&
1680 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1681 /* If FUA is going to be emulated by flush, we only
1682 * need to flush on the last iteration */
1683 local_flags
&= ~BDRV_REQ_FUA
;
1685 qemu_iovec_init(&local_qiov
, qiov
->niov
);
1686 qemu_iovec_concat(&local_qiov
, qiov
, bytes
- bytes_remaining
, num
);
1688 ret
= bdrv_driver_pwritev(bs
, offset
+ bytes
- bytes_remaining
,
1689 num
, &local_qiov
, local_flags
);
1690 qemu_iovec_destroy(&local_qiov
);
1694 bytes_remaining
-= num
;
1697 bdrv_debug_event(bs
, BLKDBG_PWRITEV_DONE
);
1702 bdrv_co_write_req_finish(child
, offset
, bytes
, req
, ret
);
1707 static int coroutine_fn
bdrv_co_do_zero_pwritev(BdrvChild
*child
,
1710 BdrvRequestFlags flags
,
1711 BdrvTrackedRequest
*req
)
1713 BlockDriverState
*bs
= child
->bs
;
1714 uint8_t *buf
= NULL
;
1715 QEMUIOVector local_qiov
;
1716 uint64_t align
= bs
->bl
.request_alignment
;
1717 unsigned int head_padding_bytes
, tail_padding_bytes
;
1720 head_padding_bytes
= offset
& (align
- 1);
1721 tail_padding_bytes
= (align
- (offset
+ bytes
)) & (align
- 1);
1724 assert(flags
& BDRV_REQ_ZERO_WRITE
);
1725 if (head_padding_bytes
|| tail_padding_bytes
) {
1726 buf
= qemu_blockalign(bs
, align
);
1727 qemu_iovec_init_buf(&local_qiov
, buf
, align
);
1729 if (head_padding_bytes
) {
1730 uint64_t zero_bytes
= MIN(bytes
, align
- head_padding_bytes
);
1732 /* RMW the unaligned part before head. */
1733 mark_request_serialising(req
, align
);
1734 wait_serialising_requests(req
);
1735 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1736 ret
= bdrv_aligned_preadv(child
, req
, offset
& ~(align
- 1), align
,
1737 align
, &local_qiov
, 0);
1741 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1743 memset(buf
+ head_padding_bytes
, 0, zero_bytes
);
1744 ret
= bdrv_aligned_pwritev(child
, req
, offset
& ~(align
- 1), align
,
1746 flags
& ~BDRV_REQ_ZERO_WRITE
);
1750 offset
+= zero_bytes
;
1751 bytes
-= zero_bytes
;
1754 assert(!bytes
|| (offset
& (align
- 1)) == 0);
1755 if (bytes
>= align
) {
1756 /* Write the aligned part in the middle. */
1757 uint64_t aligned_bytes
= bytes
& ~(align
- 1);
1758 ret
= bdrv_aligned_pwritev(child
, req
, offset
, aligned_bytes
, align
,
1763 bytes
-= aligned_bytes
;
1764 offset
+= aligned_bytes
;
1767 assert(!bytes
|| (offset
& (align
- 1)) == 0);
1769 assert(align
== tail_padding_bytes
+ bytes
);
1770 /* RMW the unaligned part after tail. */
1771 mark_request_serialising(req
, align
);
1772 wait_serialising_requests(req
);
1773 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1774 ret
= bdrv_aligned_preadv(child
, req
, offset
, align
,
1775 align
, &local_qiov
, 0);
1779 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1781 memset(buf
, 0, bytes
);
1782 ret
= bdrv_aligned_pwritev(child
, req
, offset
, align
, align
,
1783 &local_qiov
, flags
& ~BDRV_REQ_ZERO_WRITE
);
1792 * Handle a write request in coroutine context
1794 int coroutine_fn
bdrv_co_pwritev(BdrvChild
*child
,
1795 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1796 BdrvRequestFlags flags
)
1798 BlockDriverState
*bs
= child
->bs
;
1799 BdrvTrackedRequest req
;
1800 uint64_t align
= bs
->bl
.request_alignment
;
1801 uint8_t *head_buf
= NULL
;
1802 uint8_t *tail_buf
= NULL
;
1803 QEMUIOVector local_qiov
;
1804 bool use_local_qiov
= false;
1807 trace_bdrv_co_pwritev(child
->bs
, offset
, bytes
, flags
);
1813 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
1818 bdrv_inc_in_flight(bs
);
1820 * Align write if necessary by performing a read-modify-write cycle.
1821 * Pad qiov with the read parts and be sure to have a tracked request not
1822 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1824 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_WRITE
);
1826 if (flags
& BDRV_REQ_ZERO_WRITE
) {
1827 ret
= bdrv_co_do_zero_pwritev(child
, offset
, bytes
, flags
, &req
);
1831 if (offset
& (align
- 1)) {
1832 QEMUIOVector head_qiov
;
1834 mark_request_serialising(&req
, align
);
1835 wait_serialising_requests(&req
);
1837 head_buf
= qemu_blockalign(bs
, align
);
1838 qemu_iovec_init_buf(&head_qiov
, head_buf
, align
);
1840 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1841 ret
= bdrv_aligned_preadv(child
, &req
, offset
& ~(align
- 1), align
,
1842 align
, &head_qiov
, 0);
1846 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1848 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
1849 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
1850 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1851 use_local_qiov
= true;
1853 bytes
+= offset
& (align
- 1);
1854 offset
= offset
& ~(align
- 1);
1856 /* We have read the tail already if the request is smaller
1857 * than one aligned block.
1859 if (bytes
< align
) {
1860 qemu_iovec_add(&local_qiov
, head_buf
+ bytes
, align
- bytes
);
1865 if ((offset
+ bytes
) & (align
- 1)) {
1866 QEMUIOVector tail_qiov
;
1870 mark_request_serialising(&req
, align
);
1871 waited
= wait_serialising_requests(&req
);
1872 assert(!waited
|| !use_local_qiov
);
1874 tail_buf
= qemu_blockalign(bs
, align
);
1875 qemu_iovec_init_buf(&tail_qiov
, tail_buf
, align
);
1877 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1878 ret
= bdrv_aligned_preadv(child
, &req
, (offset
+ bytes
) & ~(align
- 1),
1879 align
, align
, &tail_qiov
, 0);
1883 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1885 if (!use_local_qiov
) {
1886 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
1887 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1888 use_local_qiov
= true;
1891 tail_bytes
= (offset
+ bytes
) & (align
- 1);
1892 qemu_iovec_add(&local_qiov
, tail_buf
+ tail_bytes
, align
- tail_bytes
);
1894 bytes
= ROUND_UP(bytes
, align
);
1897 ret
= bdrv_aligned_pwritev(child
, &req
, offset
, bytes
, align
,
1898 use_local_qiov
? &local_qiov
: qiov
,
1903 if (use_local_qiov
) {
1904 qemu_iovec_destroy(&local_qiov
);
1906 qemu_vfree(head_buf
);
1907 qemu_vfree(tail_buf
);
1909 tracked_request_end(&req
);
1910 bdrv_dec_in_flight(bs
);
1914 int coroutine_fn
bdrv_co_pwrite_zeroes(BdrvChild
*child
, int64_t offset
,
1915 int bytes
, BdrvRequestFlags flags
)
1917 trace_bdrv_co_pwrite_zeroes(child
->bs
, offset
, bytes
, flags
);
1919 if (!(child
->bs
->open_flags
& BDRV_O_UNMAP
)) {
1920 flags
&= ~BDRV_REQ_MAY_UNMAP
;
1923 return bdrv_co_pwritev(child
, offset
, bytes
, NULL
,
1924 BDRV_REQ_ZERO_WRITE
| flags
);
1928 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
1930 int bdrv_flush_all(void)
1932 BdrvNextIterator it
;
1933 BlockDriverState
*bs
= NULL
;
1936 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
1937 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
1940 aio_context_acquire(aio_context
);
1941 ret
= bdrv_flush(bs
);
1942 if (ret
< 0 && !result
) {
1945 aio_context_release(aio_context
);
1952 typedef struct BdrvCoBlockStatusData
{
1953 BlockDriverState
*bs
;
1954 BlockDriverState
*base
;
1960 BlockDriverState
**file
;
1963 } BdrvCoBlockStatusData
;
1965 int coroutine_fn
bdrv_co_block_status_from_file(BlockDriverState
*bs
,
1971 BlockDriverState
**file
)
1973 assert(bs
->file
&& bs
->file
->bs
);
1976 *file
= bs
->file
->bs
;
1977 return BDRV_BLOCK_RAW
| BDRV_BLOCK_OFFSET_VALID
;
1980 int coroutine_fn
bdrv_co_block_status_from_backing(BlockDriverState
*bs
,
1986 BlockDriverState
**file
)
1988 assert(bs
->backing
&& bs
->backing
->bs
);
1991 *file
= bs
->backing
->bs
;
1992 return BDRV_BLOCK_RAW
| BDRV_BLOCK_OFFSET_VALID
;
1996 * Returns the allocation status of the specified sectors.
1997 * Drivers not implementing the functionality are assumed to not support
1998 * backing files, hence all their sectors are reported as allocated.
2000 * If 'want_zero' is true, the caller is querying for mapping
2001 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
2002 * _ZERO where possible; otherwise, the result favors larger 'pnum',
2003 * with a focus on accurate BDRV_BLOCK_ALLOCATED.
2005 * If 'offset' is beyond the end of the disk image the return value is
2006 * BDRV_BLOCK_EOF and 'pnum' is set to 0.
2008 * 'bytes' is the max value 'pnum' should be set to. If bytes goes
2009 * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2010 * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
2012 * 'pnum' is set to the number of bytes (including and immediately
2013 * following the specified offset) that are easily known to be in the
2014 * same allocated/unallocated state. Note that a second call starting
2015 * at the original offset plus returned pnum may have the same status.
2016 * The returned value is non-zero on success except at end-of-file.
2018 * Returns negative errno on failure. Otherwise, if the
2019 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
2020 * set to the host mapping and BDS corresponding to the guest offset.
2022 static int coroutine_fn
bdrv_co_block_status(BlockDriverState
*bs
,
2024 int64_t offset
, int64_t bytes
,
2025 int64_t *pnum
, int64_t *map
,
2026 BlockDriverState
**file
)
2029 int64_t n
; /* bytes */
2031 int64_t local_map
= 0;
2032 BlockDriverState
*local_file
= NULL
;
2033 int64_t aligned_offset
, aligned_bytes
;
2038 total_size
= bdrv_getlength(bs
);
2039 if (total_size
< 0) {
2044 if (offset
>= total_size
) {
2045 ret
= BDRV_BLOCK_EOF
;
2053 n
= total_size
- offset
;
2058 /* Must be non-NULL or bdrv_getlength() would have failed */
2060 if (!bs
->drv
->bdrv_co_block_status
) {
2062 ret
= BDRV_BLOCK_DATA
| BDRV_BLOCK_ALLOCATED
;
2063 if (offset
+ bytes
== total_size
) {
2064 ret
|= BDRV_BLOCK_EOF
;
2066 if (bs
->drv
->protocol_name
) {
2067 ret
|= BDRV_BLOCK_OFFSET_VALID
;
2074 bdrv_inc_in_flight(bs
);
2076 /* Round out to request_alignment boundaries */
2077 align
= bs
->bl
.request_alignment
;
2078 aligned_offset
= QEMU_ALIGN_DOWN(offset
, align
);
2079 aligned_bytes
= ROUND_UP(offset
+ bytes
, align
) - aligned_offset
;
2081 ret
= bs
->drv
->bdrv_co_block_status(bs
, want_zero
, aligned_offset
,
2082 aligned_bytes
, pnum
, &local_map
,
2090 * The driver's result must be a non-zero multiple of request_alignment.
2091 * Clamp pnum and adjust map to original request.
2093 assert(*pnum
&& QEMU_IS_ALIGNED(*pnum
, align
) &&
2094 align
> offset
- aligned_offset
);
2095 *pnum
-= offset
- aligned_offset
;
2096 if (*pnum
> bytes
) {
2099 if (ret
& BDRV_BLOCK_OFFSET_VALID
) {
2100 local_map
+= offset
- aligned_offset
;
2103 if (ret
& BDRV_BLOCK_RAW
) {
2104 assert(ret
& BDRV_BLOCK_OFFSET_VALID
&& local_file
);
2105 ret
= bdrv_co_block_status(local_file
, want_zero
, local_map
,
2106 *pnum
, pnum
, &local_map
, &local_file
);
2110 if (ret
& (BDRV_BLOCK_DATA
| BDRV_BLOCK_ZERO
)) {
2111 ret
|= BDRV_BLOCK_ALLOCATED
;
2112 } else if (want_zero
) {
2113 if (bdrv_unallocated_blocks_are_zero(bs
)) {
2114 ret
|= BDRV_BLOCK_ZERO
;
2115 } else if (bs
->backing
) {
2116 BlockDriverState
*bs2
= bs
->backing
->bs
;
2117 int64_t size2
= bdrv_getlength(bs2
);
2119 if (size2
>= 0 && offset
>= size2
) {
2120 ret
|= BDRV_BLOCK_ZERO
;
2125 if (want_zero
&& local_file
&& local_file
!= bs
&&
2126 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
2127 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
2131 ret2
= bdrv_co_block_status(local_file
, want_zero
, local_map
,
2132 *pnum
, &file_pnum
, NULL
, NULL
);
2134 /* Ignore errors. This is just providing extra information, it
2135 * is useful but not necessary.
2137 if (ret2
& BDRV_BLOCK_EOF
&&
2138 (!file_pnum
|| ret2
& BDRV_BLOCK_ZERO
)) {
2140 * It is valid for the format block driver to read
2141 * beyond the end of the underlying file's current
2142 * size; such areas read as zero.
2144 ret
|= BDRV_BLOCK_ZERO
;
2146 /* Limit request to the range reported by the protocol driver */
2148 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
2154 bdrv_dec_in_flight(bs
);
2155 if (ret
>= 0 && offset
+ *pnum
== total_size
) {
2156 ret
|= BDRV_BLOCK_EOF
;
2168 static int coroutine_fn
bdrv_co_block_status_above(BlockDriverState
*bs
,
2169 BlockDriverState
*base
,
2175 BlockDriverState
**file
)
2177 BlockDriverState
*p
;
2182 for (p
= bs
; p
!= base
; p
= backing_bs(p
)) {
2183 ret
= bdrv_co_block_status(p
, want_zero
, offset
, bytes
, pnum
, map
,
2188 if (ret
& BDRV_BLOCK_ZERO
&& ret
& BDRV_BLOCK_EOF
&& !first
) {
2190 * Reading beyond the end of the file continues to read
2191 * zeroes, but we can only widen the result to the
2192 * unallocated length we learned from an earlier
2197 if (ret
& (BDRV_BLOCK_ZERO
| BDRV_BLOCK_DATA
)) {
2200 /* [offset, pnum] unallocated on this layer, which could be only
2201 * the first part of [offset, bytes]. */
2202 bytes
= MIN(bytes
, *pnum
);
2208 /* Coroutine wrapper for bdrv_block_status_above() */
2209 static void coroutine_fn
bdrv_block_status_above_co_entry(void *opaque
)
2211 BdrvCoBlockStatusData
*data
= opaque
;
2213 data
->ret
= bdrv_co_block_status_above(data
->bs
, data
->base
,
2215 data
->offset
, data
->bytes
,
2216 data
->pnum
, data
->map
, data
->file
);
2222 * Synchronous wrapper around bdrv_co_block_status_above().
2224 * See bdrv_co_block_status_above() for details.
2226 static int bdrv_common_block_status_above(BlockDriverState
*bs
,
2227 BlockDriverState
*base
,
2228 bool want_zero
, int64_t offset
,
2229 int64_t bytes
, int64_t *pnum
,
2231 BlockDriverState
**file
)
2234 BdrvCoBlockStatusData data
= {
2237 .want_zero
= want_zero
,
2246 if (qemu_in_coroutine()) {
2247 /* Fast-path if already in coroutine context */
2248 bdrv_block_status_above_co_entry(&data
);
2250 co
= qemu_coroutine_create(bdrv_block_status_above_co_entry
, &data
);
2251 bdrv_coroutine_enter(bs
, co
);
2252 BDRV_POLL_WHILE(bs
, !data
.done
);
2257 int bdrv_block_status_above(BlockDriverState
*bs
, BlockDriverState
*base
,
2258 int64_t offset
, int64_t bytes
, int64_t *pnum
,
2259 int64_t *map
, BlockDriverState
**file
)
2261 return bdrv_common_block_status_above(bs
, base
, true, offset
, bytes
,
2265 int bdrv_block_status(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
2266 int64_t *pnum
, int64_t *map
, BlockDriverState
**file
)
2268 return bdrv_block_status_above(bs
, backing_bs(bs
),
2269 offset
, bytes
, pnum
, map
, file
);
2272 int coroutine_fn
bdrv_is_allocated(BlockDriverState
*bs
, int64_t offset
,
2273 int64_t bytes
, int64_t *pnum
)
2278 ret
= bdrv_common_block_status_above(bs
, backing_bs(bs
), false, offset
,
2279 bytes
, pnum
? pnum
: &dummy
, NULL
,
2284 return !!(ret
& BDRV_BLOCK_ALLOCATED
);
2288 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2290 * Return true if (a prefix of) the given range is allocated in any image
2291 * between BASE and TOP (inclusive). BASE can be NULL to check if the given
2292 * offset is allocated in any image of the chain. Return false otherwise,
2293 * or negative errno on failure.
2295 * 'pnum' is set to the number of bytes (including and immediately
2296 * following the specified offset) that are known to be in the same
2297 * allocated/unallocated state. Note that a subsequent call starting
2298 * at 'offset + *pnum' may return the same allocation status (in other
2299 * words, the result is not necessarily the maximum possible range);
2300 * but 'pnum' will only be 0 when end of file is reached.
2303 int bdrv_is_allocated_above(BlockDriverState
*top
,
2304 BlockDriverState
*base
,
2305 int64_t offset
, int64_t bytes
, int64_t *pnum
)
2307 BlockDriverState
*intermediate
;
2312 while (intermediate
&& intermediate
!= base
) {
2316 ret
= bdrv_is_allocated(intermediate
, offset
, bytes
, &pnum_inter
);
2325 size_inter
= bdrv_getlength(intermediate
);
2326 if (size_inter
< 0) {
2329 if (n
> pnum_inter
&&
2330 (intermediate
== top
|| offset
+ pnum_inter
< size_inter
)) {
2334 intermediate
= backing_bs(intermediate
);
2341 typedef struct BdrvVmstateCo
{
2342 BlockDriverState
*bs
;
2349 static int coroutine_fn
2350 bdrv_co_rw_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
,
2353 BlockDriver
*drv
= bs
->drv
;
2356 bdrv_inc_in_flight(bs
);
2360 } else if (drv
->bdrv_load_vmstate
) {
2362 ret
= drv
->bdrv_load_vmstate(bs
, qiov
, pos
);
2364 ret
= drv
->bdrv_save_vmstate(bs
, qiov
, pos
);
2366 } else if (bs
->file
) {
2367 ret
= bdrv_co_rw_vmstate(bs
->file
->bs
, qiov
, pos
, is_read
);
2370 bdrv_dec_in_flight(bs
);
2374 static void coroutine_fn
bdrv_co_rw_vmstate_entry(void *opaque
)
2376 BdrvVmstateCo
*co
= opaque
;
2377 co
->ret
= bdrv_co_rw_vmstate(co
->bs
, co
->qiov
, co
->pos
, co
->is_read
);
2382 bdrv_rw_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
,
2385 if (qemu_in_coroutine()) {
2386 return bdrv_co_rw_vmstate(bs
, qiov
, pos
, is_read
);
2388 BdrvVmstateCo data
= {
2393 .ret
= -EINPROGRESS
,
2395 Coroutine
*co
= qemu_coroutine_create(bdrv_co_rw_vmstate_entry
, &data
);
2397 bdrv_coroutine_enter(bs
, co
);
2398 BDRV_POLL_WHILE(bs
, data
.ret
== -EINPROGRESS
);
2403 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
2404 int64_t pos
, int size
)
2406 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, size
);
2409 ret
= bdrv_writev_vmstate(bs
, &qiov
, pos
);
2417 int bdrv_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
2419 return bdrv_rw_vmstate(bs
, qiov
, pos
, false);
2422 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
2423 int64_t pos
, int size
)
2425 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, size
);
2428 ret
= bdrv_readv_vmstate(bs
, &qiov
, pos
);
2436 int bdrv_readv_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
2438 return bdrv_rw_vmstate(bs
, qiov
, pos
, true);
2441 /**************************************************************/
2444 void bdrv_aio_cancel(BlockAIOCB
*acb
)
2447 bdrv_aio_cancel_async(acb
);
2448 while (acb
->refcnt
> 1) {
2449 if (acb
->aiocb_info
->get_aio_context
) {
2450 aio_poll(acb
->aiocb_info
->get_aio_context(acb
), true);
2451 } else if (acb
->bs
) {
2452 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
2453 * assert that we're not using an I/O thread. Thread-safe
2454 * code should use bdrv_aio_cancel_async exclusively.
2456 assert(bdrv_get_aio_context(acb
->bs
) == qemu_get_aio_context());
2457 aio_poll(bdrv_get_aio_context(acb
->bs
), true);
2462 qemu_aio_unref(acb
);
2465 /* Async version of aio cancel. The caller is not blocked if the acb implements
2466 * cancel_async, otherwise we do nothing and let the request normally complete.
2467 * In either case the completion callback must be called. */
2468 void bdrv_aio_cancel_async(BlockAIOCB
*acb
)
2470 if (acb
->aiocb_info
->cancel_async
) {
2471 acb
->aiocb_info
->cancel_async(acb
);
2475 /**************************************************************/
2476 /* Coroutine block device emulation */
2478 typedef struct FlushCo
{
2479 BlockDriverState
*bs
;
2484 static void coroutine_fn
bdrv_flush_co_entry(void *opaque
)
2486 FlushCo
*rwco
= opaque
;
2488 rwco
->ret
= bdrv_co_flush(rwco
->bs
);
2492 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
2497 bdrv_inc_in_flight(bs
);
2499 if (!bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
) ||
2504 qemu_co_mutex_lock(&bs
->reqs_lock
);
2505 current_gen
= atomic_read(&bs
->write_gen
);
2507 /* Wait until any previous flushes are completed */
2508 while (bs
->active_flush_req
) {
2509 qemu_co_queue_wait(&bs
->flush_queue
, &bs
->reqs_lock
);
2512 /* Flushes reach this point in nondecreasing current_gen order. */
2513 bs
->active_flush_req
= true;
2514 qemu_co_mutex_unlock(&bs
->reqs_lock
);
2516 /* Write back all layers by calling one driver function */
2517 if (bs
->drv
->bdrv_co_flush
) {
2518 ret
= bs
->drv
->bdrv_co_flush(bs
);
2522 /* Write back cached data to the OS even with cache=unsafe */
2523 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_OS
);
2524 if (bs
->drv
->bdrv_co_flush_to_os
) {
2525 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
2531 /* But don't actually force it to the disk with cache=unsafe */
2532 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
2536 /* Check if we really need to flush anything */
2537 if (bs
->flushed_gen
== current_gen
) {
2541 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_DISK
);
2543 /* bs->drv->bdrv_co_flush() might have ejected the BDS
2544 * (even in case of apparent success) */
2548 if (bs
->drv
->bdrv_co_flush_to_disk
) {
2549 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
2550 } else if (bs
->drv
->bdrv_aio_flush
) {
2552 CoroutineIOCompletion co
= {
2553 .coroutine
= qemu_coroutine_self(),
2556 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
2560 qemu_coroutine_yield();
2565 * Some block drivers always operate in either writethrough or unsafe
2566 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2567 * know how the server works (because the behaviour is hardcoded or
2568 * depends on server-side configuration), so we can't ensure that
2569 * everything is safe on disk. Returning an error doesn't work because
2570 * that would break guests even if the server operates in writethrough
2573 * Let's hope the user knows what he's doing.
2582 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2583 * in the case of cache=unsafe, so there are no useless flushes.
2586 ret
= bs
->file
? bdrv_co_flush(bs
->file
->bs
) : 0;
2588 /* Notify any pending flushes that we have completed */
2590 bs
->flushed_gen
= current_gen
;
2593 qemu_co_mutex_lock(&bs
->reqs_lock
);
2594 bs
->active_flush_req
= false;
2595 /* Return value is ignored - it's ok if wait queue is empty */
2596 qemu_co_queue_next(&bs
->flush_queue
);
2597 qemu_co_mutex_unlock(&bs
->reqs_lock
);
2600 bdrv_dec_in_flight(bs
);
2604 int bdrv_flush(BlockDriverState
*bs
)
2607 FlushCo flush_co
= {
2612 if (qemu_in_coroutine()) {
2613 /* Fast-path if already in coroutine context */
2614 bdrv_flush_co_entry(&flush_co
);
2616 co
= qemu_coroutine_create(bdrv_flush_co_entry
, &flush_co
);
2617 bdrv_coroutine_enter(bs
, co
);
2618 BDRV_POLL_WHILE(bs
, flush_co
.ret
== NOT_DONE
);
2621 return flush_co
.ret
;
2624 typedef struct DiscardCo
{
2630 static void coroutine_fn
bdrv_pdiscard_co_entry(void *opaque
)
2632 DiscardCo
*rwco
= opaque
;
2634 rwco
->ret
= bdrv_co_pdiscard(rwco
->child
, rwco
->offset
, rwco
->bytes
);
2638 int coroutine_fn
bdrv_co_pdiscard(BdrvChild
*child
, int64_t offset
, int bytes
)
2640 BdrvTrackedRequest req
;
2641 int max_pdiscard
, ret
;
2642 int head
, tail
, align
;
2643 BlockDriverState
*bs
= child
->bs
;
2645 if (!bs
|| !bs
->drv
) {
2649 if (bdrv_has_readonly_bitmaps(bs
)) {
2653 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
2658 /* Do nothing if disabled. */
2659 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
2663 if (!bs
->drv
->bdrv_co_pdiscard
&& !bs
->drv
->bdrv_aio_pdiscard
) {
2667 /* Discard is advisory, but some devices track and coalesce
2668 * unaligned requests, so we must pass everything down rather than
2669 * round here. Still, most devices will just silently ignore
2670 * unaligned requests (by returning -ENOTSUP), so we must fragment
2671 * the request accordingly. */
2672 align
= MAX(bs
->bl
.pdiscard_alignment
, bs
->bl
.request_alignment
);
2673 assert(align
% bs
->bl
.request_alignment
== 0);
2674 head
= offset
% align
;
2675 tail
= (offset
+ bytes
) % align
;
2677 bdrv_inc_in_flight(bs
);
2678 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_DISCARD
);
2680 ret
= bdrv_co_write_req_prepare(child
, offset
, bytes
, &req
, 0);
2685 max_pdiscard
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_pdiscard
, INT_MAX
),
2687 assert(max_pdiscard
>= bs
->bl
.request_alignment
);
2693 /* Make small requests to get to alignment boundaries. */
2694 num
= MIN(bytes
, align
- head
);
2695 if (!QEMU_IS_ALIGNED(num
, bs
->bl
.request_alignment
)) {
2696 num
%= bs
->bl
.request_alignment
;
2698 head
= (head
+ num
) % align
;
2699 assert(num
< max_pdiscard
);
2702 /* Shorten the request to the last aligned cluster. */
2704 } else if (!QEMU_IS_ALIGNED(tail
, bs
->bl
.request_alignment
) &&
2705 tail
> bs
->bl
.request_alignment
) {
2706 tail
%= bs
->bl
.request_alignment
;
2710 /* limit request size */
2711 if (num
> max_pdiscard
) {
2719 if (bs
->drv
->bdrv_co_pdiscard
) {
2720 ret
= bs
->drv
->bdrv_co_pdiscard(bs
, offset
, num
);
2723 CoroutineIOCompletion co
= {
2724 .coroutine
= qemu_coroutine_self(),
2727 acb
= bs
->drv
->bdrv_aio_pdiscard(bs
, offset
, num
,
2728 bdrv_co_io_em_complete
, &co
);
2733 qemu_coroutine_yield();
2737 if (ret
&& ret
!= -ENOTSUP
) {
2746 bdrv_co_write_req_finish(child
, req
.offset
, req
.bytes
, &req
, ret
);
2747 tracked_request_end(&req
);
2748 bdrv_dec_in_flight(bs
);
2752 int bdrv_pdiscard(BdrvChild
*child
, int64_t offset
, int bytes
)
2762 if (qemu_in_coroutine()) {
2763 /* Fast-path if already in coroutine context */
2764 bdrv_pdiscard_co_entry(&rwco
);
2766 co
= qemu_coroutine_create(bdrv_pdiscard_co_entry
, &rwco
);
2767 bdrv_coroutine_enter(child
->bs
, co
);
2768 BDRV_POLL_WHILE(child
->bs
, rwco
.ret
== NOT_DONE
);
2774 int bdrv_co_ioctl(BlockDriverState
*bs
, int req
, void *buf
)
2776 BlockDriver
*drv
= bs
->drv
;
2777 CoroutineIOCompletion co
= {
2778 .coroutine
= qemu_coroutine_self(),
2782 bdrv_inc_in_flight(bs
);
2783 if (!drv
|| (!drv
->bdrv_aio_ioctl
&& !drv
->bdrv_co_ioctl
)) {
2788 if (drv
->bdrv_co_ioctl
) {
2789 co
.ret
= drv
->bdrv_co_ioctl(bs
, req
, buf
);
2791 acb
= drv
->bdrv_aio_ioctl(bs
, req
, buf
, bdrv_co_io_em_complete
, &co
);
2796 qemu_coroutine_yield();
2799 bdrv_dec_in_flight(bs
);
2803 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
2805 return qemu_memalign(bdrv_opt_mem_align(bs
), size
);
2808 void *qemu_blockalign0(BlockDriverState
*bs
, size_t size
)
2810 return memset(qemu_blockalign(bs
, size
), 0, size
);
2813 void *qemu_try_blockalign(BlockDriverState
*bs
, size_t size
)
2815 size_t align
= bdrv_opt_mem_align(bs
);
2817 /* Ensure that NULL is never returned on success */
2823 return qemu_try_memalign(align
, size
);
2826 void *qemu_try_blockalign0(BlockDriverState
*bs
, size_t size
)
2828 void *mem
= qemu_try_blockalign(bs
, size
);
2831 memset(mem
, 0, size
);
2838 * Check if all memory in this vector is sector aligned.
2840 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
)
2843 size_t alignment
= bdrv_min_mem_align(bs
);
2845 for (i
= 0; i
< qiov
->niov
; i
++) {
2846 if ((uintptr_t) qiov
->iov
[i
].iov_base
% alignment
) {
2849 if (qiov
->iov
[i
].iov_len
% alignment
) {
2857 void bdrv_add_before_write_notifier(BlockDriverState
*bs
,
2858 NotifierWithReturn
*notifier
)
2860 notifier_with_return_list_add(&bs
->before_write_notifiers
, notifier
);
2863 void bdrv_io_plug(BlockDriverState
*bs
)
2867 QLIST_FOREACH(child
, &bs
->children
, next
) {
2868 bdrv_io_plug(child
->bs
);
2871 if (atomic_fetch_inc(&bs
->io_plugged
) == 0) {
2872 BlockDriver
*drv
= bs
->drv
;
2873 if (drv
&& drv
->bdrv_io_plug
) {
2874 drv
->bdrv_io_plug(bs
);
2879 void bdrv_io_unplug(BlockDriverState
*bs
)
2883 assert(bs
->io_plugged
);
2884 if (atomic_fetch_dec(&bs
->io_plugged
) == 1) {
2885 BlockDriver
*drv
= bs
->drv
;
2886 if (drv
&& drv
->bdrv_io_unplug
) {
2887 drv
->bdrv_io_unplug(bs
);
2891 QLIST_FOREACH(child
, &bs
->children
, next
) {
2892 bdrv_io_unplug(child
->bs
);
2896 void bdrv_register_buf(BlockDriverState
*bs
, void *host
, size_t size
)
2900 if (bs
->drv
&& bs
->drv
->bdrv_register_buf
) {
2901 bs
->drv
->bdrv_register_buf(bs
, host
, size
);
2903 QLIST_FOREACH(child
, &bs
->children
, next
) {
2904 bdrv_register_buf(child
->bs
, host
, size
);
2908 void bdrv_unregister_buf(BlockDriverState
*bs
, void *host
)
2912 if (bs
->drv
&& bs
->drv
->bdrv_unregister_buf
) {
2913 bs
->drv
->bdrv_unregister_buf(bs
, host
);
2915 QLIST_FOREACH(child
, &bs
->children
, next
) {
2916 bdrv_unregister_buf(child
->bs
, host
);
2920 static int coroutine_fn
bdrv_co_copy_range_internal(
2921 BdrvChild
*src
, uint64_t src_offset
, BdrvChild
*dst
,
2922 uint64_t dst_offset
, uint64_t bytes
,
2923 BdrvRequestFlags read_flags
, BdrvRequestFlags write_flags
,
2926 BdrvTrackedRequest req
;
2929 /* TODO We can support BDRV_REQ_NO_FALLBACK here */
2930 assert(!(read_flags
& BDRV_REQ_NO_FALLBACK
));
2931 assert(!(write_flags
& BDRV_REQ_NO_FALLBACK
));
2933 if (!dst
|| !dst
->bs
) {
2936 ret
= bdrv_check_byte_request(dst
->bs
, dst_offset
, bytes
);
2940 if (write_flags
& BDRV_REQ_ZERO_WRITE
) {
2941 return bdrv_co_pwrite_zeroes(dst
, dst_offset
, bytes
, write_flags
);
2944 if (!src
|| !src
->bs
) {
2947 ret
= bdrv_check_byte_request(src
->bs
, src_offset
, bytes
);
2952 if (!src
->bs
->drv
->bdrv_co_copy_range_from
2953 || !dst
->bs
->drv
->bdrv_co_copy_range_to
2954 || src
->bs
->encrypted
|| dst
->bs
->encrypted
) {
2959 bdrv_inc_in_flight(src
->bs
);
2960 tracked_request_begin(&req
, src
->bs
, src_offset
, bytes
,
2963 /* BDRV_REQ_SERIALISING is only for write operation */
2964 assert(!(read_flags
& BDRV_REQ_SERIALISING
));
2965 if (!(read_flags
& BDRV_REQ_NO_SERIALISING
)) {
2966 wait_serialising_requests(&req
);
2969 ret
= src
->bs
->drv
->bdrv_co_copy_range_from(src
->bs
,
2973 read_flags
, write_flags
);
2975 tracked_request_end(&req
);
2976 bdrv_dec_in_flight(src
->bs
);
2978 bdrv_inc_in_flight(dst
->bs
);
2979 tracked_request_begin(&req
, dst
->bs
, dst_offset
, bytes
,
2980 BDRV_TRACKED_WRITE
);
2981 ret
= bdrv_co_write_req_prepare(dst
, dst_offset
, bytes
, &req
,
2984 ret
= dst
->bs
->drv
->bdrv_co_copy_range_to(dst
->bs
,
2988 read_flags
, write_flags
);
2990 bdrv_co_write_req_finish(dst
, dst_offset
, bytes
, &req
, ret
);
2991 tracked_request_end(&req
);
2992 bdrv_dec_in_flight(dst
->bs
);
2998 /* Copy range from @src to @dst.
3000 * See the comment of bdrv_co_copy_range for the parameter and return value
3002 int coroutine_fn
bdrv_co_copy_range_from(BdrvChild
*src
, uint64_t src_offset
,
3003 BdrvChild
*dst
, uint64_t dst_offset
,
3005 BdrvRequestFlags read_flags
,
3006 BdrvRequestFlags write_flags
)
3008 trace_bdrv_co_copy_range_from(src
, src_offset
, dst
, dst_offset
, bytes
,
3009 read_flags
, write_flags
);
3010 return bdrv_co_copy_range_internal(src
, src_offset
, dst
, dst_offset
,
3011 bytes
, read_flags
, write_flags
, true);
3014 /* Copy range from @src to @dst.
3016 * See the comment of bdrv_co_copy_range for the parameter and return value
3018 int coroutine_fn
bdrv_co_copy_range_to(BdrvChild
*src
, uint64_t src_offset
,
3019 BdrvChild
*dst
, uint64_t dst_offset
,
3021 BdrvRequestFlags read_flags
,
3022 BdrvRequestFlags write_flags
)
3024 trace_bdrv_co_copy_range_to(src
, src_offset
, dst
, dst_offset
, bytes
,
3025 read_flags
, write_flags
);
3026 return bdrv_co_copy_range_internal(src
, src_offset
, dst
, dst_offset
,
3027 bytes
, read_flags
, write_flags
, false);
3030 int coroutine_fn
bdrv_co_copy_range(BdrvChild
*src
, uint64_t src_offset
,
3031 BdrvChild
*dst
, uint64_t dst_offset
,
3032 uint64_t bytes
, BdrvRequestFlags read_flags
,
3033 BdrvRequestFlags write_flags
)
3035 return bdrv_co_copy_range_from(src
, src_offset
,
3037 bytes
, read_flags
, write_flags
);
3040 static void bdrv_parent_cb_resize(BlockDriverState
*bs
)
3043 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
3044 if (c
->role
->resize
) {
3051 * Truncate file to 'offset' bytes (needed only for file protocols)
3053 int coroutine_fn
bdrv_co_truncate(BdrvChild
*child
, int64_t offset
,
3054 PreallocMode prealloc
, Error
**errp
)
3056 BlockDriverState
*bs
= child
->bs
;
3057 BlockDriver
*drv
= bs
->drv
;
3058 BdrvTrackedRequest req
;
3059 int64_t old_size
, new_bytes
;
3063 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
3065 error_setg(errp
, "No medium inserted");
3069 error_setg(errp
, "Image size cannot be negative");
3073 old_size
= bdrv_getlength(bs
);
3075 error_setg_errno(errp
, -old_size
, "Failed to get old image size");
3079 if (offset
> old_size
) {
3080 new_bytes
= offset
- old_size
;
3085 bdrv_inc_in_flight(bs
);
3086 tracked_request_begin(&req
, bs
, offset
- new_bytes
, new_bytes
,
3087 BDRV_TRACKED_TRUNCATE
);
3089 /* If we are growing the image and potentially using preallocation for the
3090 * new area, we need to make sure that no write requests are made to it
3091 * concurrently or they might be overwritten by preallocation. */
3093 mark_request_serialising(&req
, 1);
3095 if (bs
->read_only
) {
3096 error_setg(errp
, "Image is read-only");
3100 ret
= bdrv_co_write_req_prepare(child
, offset
- new_bytes
, new_bytes
, &req
,
3103 error_setg_errno(errp
, -ret
,
3104 "Failed to prepare request for truncation");
3108 if (!drv
->bdrv_co_truncate
) {
3109 if (bs
->file
&& drv
->is_filter
) {
3110 ret
= bdrv_co_truncate(bs
->file
, offset
, prealloc
, errp
);
3113 error_setg(errp
, "Image format driver does not support resize");
3118 ret
= drv
->bdrv_co_truncate(bs
, offset
, prealloc
, errp
);
3122 ret
= refresh_total_sectors(bs
, offset
>> BDRV_SECTOR_BITS
);
3124 error_setg_errno(errp
, -ret
, "Could not refresh total sector count");
3126 offset
= bs
->total_sectors
* BDRV_SECTOR_SIZE
;
3128 /* It's possible that truncation succeeded but refresh_total_sectors
3129 * failed, but the latter doesn't affect how we should finish the request.
3130 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. */
3131 bdrv_co_write_req_finish(child
, offset
- new_bytes
, new_bytes
, &req
, 0);
3134 tracked_request_end(&req
);
3135 bdrv_dec_in_flight(bs
);
3140 typedef struct TruncateCo
{
3143 PreallocMode prealloc
;
3148 static void coroutine_fn
bdrv_truncate_co_entry(void *opaque
)
3150 TruncateCo
*tco
= opaque
;
3151 tco
->ret
= bdrv_co_truncate(tco
->child
, tco
->offset
, tco
->prealloc
,
3156 int bdrv_truncate(BdrvChild
*child
, int64_t offset
, PreallocMode prealloc
,
3163 .prealloc
= prealloc
,
3168 if (qemu_in_coroutine()) {
3169 /* Fast-path if already in coroutine context */
3170 bdrv_truncate_co_entry(&tco
);
3172 co
= qemu_coroutine_create(bdrv_truncate_co_entry
, &tco
);
3173 bdrv_coroutine_enter(child
->bs
, co
);
3174 BDRV_POLL_WHILE(child
->bs
, tco
.ret
== NOT_DONE
);