2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "sysemu/block-backend.h"
28 #include "block/aio-wait.h"
29 #include "block/blockjob.h"
30 #include "block/blockjob_int.h"
31 #include "block/block_int.h"
32 #include "qemu/cutils.h"
33 #include "qapi/error.h"
34 #include "qemu/error-report.h"
35 #include "qemu/main-loop.h"
36 #include "sysemu/replay.h"
38 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
39 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
41 static void bdrv_parent_cb_resize(BlockDriverState
*bs
);
42 static int coroutine_fn
bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
,
43 int64_t offset
, int bytes
, BdrvRequestFlags flags
);
45 static void bdrv_parent_drained_begin(BlockDriverState
*bs
, BdrvChild
*ignore
,
46 bool ignore_bds_parents
)
50 QLIST_FOREACH_SAFE(c
, &bs
->parents
, next_parent
, next
) {
51 if (c
== ignore
|| (ignore_bds_parents
&& c
->klass
->parent_is_bds
)) {
54 bdrv_parent_drained_begin_single(c
, false);
58 static void bdrv_parent_drained_end_single_no_poll(BdrvChild
*c
,
59 int *drained_end_counter
)
61 assert(c
->parent_quiesce_counter
> 0);
62 c
->parent_quiesce_counter
--;
63 if (c
->klass
->drained_end
) {
64 c
->klass
->drained_end(c
, drained_end_counter
);
68 void bdrv_parent_drained_end_single(BdrvChild
*c
)
70 int drained_end_counter
= 0;
71 bdrv_parent_drained_end_single_no_poll(c
, &drained_end_counter
);
72 BDRV_POLL_WHILE(c
->bs
, atomic_read(&drained_end_counter
) > 0);
75 static void bdrv_parent_drained_end(BlockDriverState
*bs
, BdrvChild
*ignore
,
76 bool ignore_bds_parents
,
77 int *drained_end_counter
)
81 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
82 if (c
== ignore
|| (ignore_bds_parents
&& c
->klass
->parent_is_bds
)) {
85 bdrv_parent_drained_end_single_no_poll(c
, drained_end_counter
);
89 static bool bdrv_parent_drained_poll_single(BdrvChild
*c
)
91 if (c
->klass
->drained_poll
) {
92 return c
->klass
->drained_poll(c
);
97 static bool bdrv_parent_drained_poll(BlockDriverState
*bs
, BdrvChild
*ignore
,
98 bool ignore_bds_parents
)
103 QLIST_FOREACH_SAFE(c
, &bs
->parents
, next_parent
, next
) {
104 if (c
== ignore
|| (ignore_bds_parents
&& c
->klass
->parent_is_bds
)) {
107 busy
|= bdrv_parent_drained_poll_single(c
);
113 void bdrv_parent_drained_begin_single(BdrvChild
*c
, bool poll
)
115 c
->parent_quiesce_counter
++;
116 if (c
->klass
->drained_begin
) {
117 c
->klass
->drained_begin(c
);
120 BDRV_POLL_WHILE(c
->bs
, bdrv_parent_drained_poll_single(c
));
124 static void bdrv_merge_limits(BlockLimits
*dst
, const BlockLimits
*src
)
126 dst
->opt_transfer
= MAX(dst
->opt_transfer
, src
->opt_transfer
);
127 dst
->max_transfer
= MIN_NON_ZERO(dst
->max_transfer
, src
->max_transfer
);
128 dst
->opt_mem_alignment
= MAX(dst
->opt_mem_alignment
,
129 src
->opt_mem_alignment
);
130 dst
->min_mem_alignment
= MAX(dst
->min_mem_alignment
,
131 src
->min_mem_alignment
);
132 dst
->max_iov
= MIN_NON_ZERO(dst
->max_iov
, src
->max_iov
);
135 void bdrv_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
137 BlockDriver
*drv
= bs
->drv
;
138 Error
*local_err
= NULL
;
140 memset(&bs
->bl
, 0, sizeof(bs
->bl
));
146 /* Default alignment based on whether driver has byte interface */
147 bs
->bl
.request_alignment
= (drv
->bdrv_co_preadv
||
148 drv
->bdrv_aio_preadv
||
149 drv
->bdrv_co_preadv_part
) ? 1 : 512;
151 /* Take some limits from the children as a default */
153 bdrv_refresh_limits(bs
->file
->bs
, &local_err
);
155 error_propagate(errp
, local_err
);
158 bdrv_merge_limits(&bs
->bl
, &bs
->file
->bs
->bl
);
160 bs
->bl
.min_mem_alignment
= 512;
161 bs
->bl
.opt_mem_alignment
= qemu_real_host_page_size
;
163 /* Safe default since most protocols use readv()/writev()/etc */
164 bs
->bl
.max_iov
= IOV_MAX
;
168 bdrv_refresh_limits(bs
->backing
->bs
, &local_err
);
170 error_propagate(errp
, local_err
);
173 bdrv_merge_limits(&bs
->bl
, &bs
->backing
->bs
->bl
);
176 /* Then let the driver override it */
177 if (drv
->bdrv_refresh_limits
) {
178 drv
->bdrv_refresh_limits(bs
, errp
);
183 * The copy-on-read flag is actually a reference count so multiple users may
184 * use the feature without worrying about clobbering its previous state.
185 * Copy-on-read stays enabled until all users have called to disable it.
187 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
189 atomic_inc(&bs
->copy_on_read
);
192 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
194 int old
= atomic_fetch_dec(&bs
->copy_on_read
);
200 BlockDriverState
*bs
;
206 bool ignore_bds_parents
;
207 int *drained_end_counter
;
210 static void coroutine_fn
bdrv_drain_invoke_entry(void *opaque
)
212 BdrvCoDrainData
*data
= opaque
;
213 BlockDriverState
*bs
= data
->bs
;
216 bs
->drv
->bdrv_co_drain_begin(bs
);
218 bs
->drv
->bdrv_co_drain_end(bs
);
221 /* Set data->done and decrement drained_end_counter before bdrv_wakeup() */
222 atomic_mb_set(&data
->done
, true);
224 atomic_dec(data
->drained_end_counter
);
226 bdrv_dec_in_flight(bs
);
231 /* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */
232 static void bdrv_drain_invoke(BlockDriverState
*bs
, bool begin
,
233 int *drained_end_counter
)
235 BdrvCoDrainData
*data
;
237 if (!bs
->drv
|| (begin
&& !bs
->drv
->bdrv_co_drain_begin
) ||
238 (!begin
&& !bs
->drv
->bdrv_co_drain_end
)) {
242 data
= g_new(BdrvCoDrainData
, 1);
243 *data
= (BdrvCoDrainData
) {
247 .drained_end_counter
= drained_end_counter
,
251 atomic_inc(drained_end_counter
);
254 /* Make sure the driver callback completes during the polling phase for
256 bdrv_inc_in_flight(bs
);
257 data
->co
= qemu_coroutine_create(bdrv_drain_invoke_entry
, data
);
258 aio_co_schedule(bdrv_get_aio_context(bs
), data
->co
);
261 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
262 bool bdrv_drain_poll(BlockDriverState
*bs
, bool recursive
,
263 BdrvChild
*ignore_parent
, bool ignore_bds_parents
)
265 BdrvChild
*child
, *next
;
267 if (bdrv_parent_drained_poll(bs
, ignore_parent
, ignore_bds_parents
)) {
271 if (atomic_read(&bs
->in_flight
)) {
276 assert(!ignore_bds_parents
);
277 QLIST_FOREACH_SAFE(child
, &bs
->children
, next
, next
) {
278 if (bdrv_drain_poll(child
->bs
, recursive
, child
, false)) {
287 static bool bdrv_drain_poll_top_level(BlockDriverState
*bs
, bool recursive
,
288 BdrvChild
*ignore_parent
)
290 return bdrv_drain_poll(bs
, recursive
, ignore_parent
, false);
293 static void bdrv_do_drained_begin(BlockDriverState
*bs
, bool recursive
,
294 BdrvChild
*parent
, bool ignore_bds_parents
,
296 static void bdrv_do_drained_end(BlockDriverState
*bs
, bool recursive
,
297 BdrvChild
*parent
, bool ignore_bds_parents
,
298 int *drained_end_counter
);
300 static void bdrv_co_drain_bh_cb(void *opaque
)
302 BdrvCoDrainData
*data
= opaque
;
303 Coroutine
*co
= data
->co
;
304 BlockDriverState
*bs
= data
->bs
;
307 AioContext
*ctx
= bdrv_get_aio_context(bs
);
308 AioContext
*co_ctx
= qemu_coroutine_get_aio_context(co
);
311 * When the coroutine yielded, the lock for its home context was
312 * released, so we need to re-acquire it here. If it explicitly
313 * acquired a different context, the lock is still held and we don't
314 * want to lock it a second time (or AIO_WAIT_WHILE() would hang).
317 aio_context_acquire(ctx
);
319 bdrv_dec_in_flight(bs
);
321 assert(!data
->drained_end_counter
);
322 bdrv_do_drained_begin(bs
, data
->recursive
, data
->parent
,
323 data
->ignore_bds_parents
, data
->poll
);
326 bdrv_do_drained_end(bs
, data
->recursive
, data
->parent
,
327 data
->ignore_bds_parents
,
328 data
->drained_end_counter
);
331 aio_context_release(ctx
);
335 bdrv_drain_all_begin();
342 static void coroutine_fn
bdrv_co_yield_to_drain(BlockDriverState
*bs
,
343 bool begin
, bool recursive
,
345 bool ignore_bds_parents
,
347 int *drained_end_counter
)
349 BdrvCoDrainData data
;
351 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
352 * other coroutines run if they were queued by aio_co_enter(). */
354 assert(qemu_in_coroutine());
355 data
= (BdrvCoDrainData
) {
356 .co
= qemu_coroutine_self(),
360 .recursive
= recursive
,
362 .ignore_bds_parents
= ignore_bds_parents
,
364 .drained_end_counter
= drained_end_counter
,
368 bdrv_inc_in_flight(bs
);
370 replay_bh_schedule_oneshot_event(bdrv_get_aio_context(bs
),
371 bdrv_co_drain_bh_cb
, &data
);
373 qemu_coroutine_yield();
374 /* If we are resumed from some other event (such as an aio completion or a
375 * timer callback), it is a bug in the caller that should be fixed. */
379 void bdrv_do_drained_begin_quiesce(BlockDriverState
*bs
,
380 BdrvChild
*parent
, bool ignore_bds_parents
)
382 assert(!qemu_in_coroutine());
384 /* Stop things in parent-to-child order */
385 if (atomic_fetch_inc(&bs
->quiesce_counter
) == 0) {
386 aio_disable_external(bdrv_get_aio_context(bs
));
389 bdrv_parent_drained_begin(bs
, parent
, ignore_bds_parents
);
390 bdrv_drain_invoke(bs
, true, NULL
);
393 static void bdrv_do_drained_begin(BlockDriverState
*bs
, bool recursive
,
394 BdrvChild
*parent
, bool ignore_bds_parents
,
397 BdrvChild
*child
, *next
;
399 if (qemu_in_coroutine()) {
400 bdrv_co_yield_to_drain(bs
, true, recursive
, parent
, ignore_bds_parents
,
405 bdrv_do_drained_begin_quiesce(bs
, parent
, ignore_bds_parents
);
408 assert(!ignore_bds_parents
);
409 bs
->recursive_quiesce_counter
++;
410 QLIST_FOREACH_SAFE(child
, &bs
->children
, next
, next
) {
411 bdrv_do_drained_begin(child
->bs
, true, child
, ignore_bds_parents
,
417 * Wait for drained requests to finish.
419 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
420 * call is needed so things in this AioContext can make progress even
421 * though we don't return to the main AioContext loop - this automatically
422 * includes other nodes in the same AioContext and therefore all child
426 assert(!ignore_bds_parents
);
427 BDRV_POLL_WHILE(bs
, bdrv_drain_poll_top_level(bs
, recursive
, parent
));
431 void bdrv_drained_begin(BlockDriverState
*bs
)
433 bdrv_do_drained_begin(bs
, false, NULL
, false, true);
436 void bdrv_subtree_drained_begin(BlockDriverState
*bs
)
438 bdrv_do_drained_begin(bs
, true, NULL
, false, true);
442 * This function does not poll, nor must any of its recursively called
443 * functions. The *drained_end_counter pointee will be incremented
444 * once for every background operation scheduled, and decremented once
445 * the operation settles. Therefore, the pointer must remain valid
446 * until the pointee reaches 0. That implies that whoever sets up the
447 * pointee has to poll until it is 0.
449 * We use atomic operations to access *drained_end_counter, because
450 * (1) when called from bdrv_set_aio_context_ignore(), the subgraph of
451 * @bs may contain nodes in different AioContexts,
452 * (2) bdrv_drain_all_end() uses the same counter for all nodes,
453 * regardless of which AioContext they are in.
455 static void bdrv_do_drained_end(BlockDriverState
*bs
, bool recursive
,
456 BdrvChild
*parent
, bool ignore_bds_parents
,
457 int *drained_end_counter
)
460 int old_quiesce_counter
;
462 assert(drained_end_counter
!= NULL
);
464 if (qemu_in_coroutine()) {
465 bdrv_co_yield_to_drain(bs
, false, recursive
, parent
, ignore_bds_parents
,
466 false, drained_end_counter
);
469 assert(bs
->quiesce_counter
> 0);
471 /* Re-enable things in child-to-parent order */
472 bdrv_drain_invoke(bs
, false, drained_end_counter
);
473 bdrv_parent_drained_end(bs
, parent
, ignore_bds_parents
,
474 drained_end_counter
);
476 old_quiesce_counter
= atomic_fetch_dec(&bs
->quiesce_counter
);
477 if (old_quiesce_counter
== 1) {
478 aio_enable_external(bdrv_get_aio_context(bs
));
482 assert(!ignore_bds_parents
);
483 bs
->recursive_quiesce_counter
--;
484 QLIST_FOREACH(child
, &bs
->children
, next
) {
485 bdrv_do_drained_end(child
->bs
, true, child
, ignore_bds_parents
,
486 drained_end_counter
);
491 void bdrv_drained_end(BlockDriverState
*bs
)
493 int drained_end_counter
= 0;
494 bdrv_do_drained_end(bs
, false, NULL
, false, &drained_end_counter
);
495 BDRV_POLL_WHILE(bs
, atomic_read(&drained_end_counter
) > 0);
498 void bdrv_drained_end_no_poll(BlockDriverState
*bs
, int *drained_end_counter
)
500 bdrv_do_drained_end(bs
, false, NULL
, false, drained_end_counter
);
503 void bdrv_subtree_drained_end(BlockDriverState
*bs
)
505 int drained_end_counter
= 0;
506 bdrv_do_drained_end(bs
, true, NULL
, false, &drained_end_counter
);
507 BDRV_POLL_WHILE(bs
, atomic_read(&drained_end_counter
) > 0);
510 void bdrv_apply_subtree_drain(BdrvChild
*child
, BlockDriverState
*new_parent
)
514 for (i
= 0; i
< new_parent
->recursive_quiesce_counter
; i
++) {
515 bdrv_do_drained_begin(child
->bs
, true, child
, false, true);
519 void bdrv_unapply_subtree_drain(BdrvChild
*child
, BlockDriverState
*old_parent
)
521 int drained_end_counter
= 0;
524 for (i
= 0; i
< old_parent
->recursive_quiesce_counter
; i
++) {
525 bdrv_do_drained_end(child
->bs
, true, child
, false,
526 &drained_end_counter
);
529 BDRV_POLL_WHILE(child
->bs
, atomic_read(&drained_end_counter
) > 0);
533 * Wait for pending requests to complete on a single BlockDriverState subtree,
534 * and suspend block driver's internal I/O until next request arrives.
536 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
539 void coroutine_fn
bdrv_co_drain(BlockDriverState
*bs
)
541 assert(qemu_in_coroutine());
542 bdrv_drained_begin(bs
);
543 bdrv_drained_end(bs
);
546 void bdrv_drain(BlockDriverState
*bs
)
548 bdrv_drained_begin(bs
);
549 bdrv_drained_end(bs
);
552 static void bdrv_drain_assert_idle(BlockDriverState
*bs
)
554 BdrvChild
*child
, *next
;
556 assert(atomic_read(&bs
->in_flight
) == 0);
557 QLIST_FOREACH_SAFE(child
, &bs
->children
, next
, next
) {
558 bdrv_drain_assert_idle(child
->bs
);
562 unsigned int bdrv_drain_all_count
= 0;
564 static bool bdrv_drain_all_poll(void)
566 BlockDriverState
*bs
= NULL
;
569 /* bdrv_drain_poll() can't make changes to the graph and we are holding the
570 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */
571 while ((bs
= bdrv_next_all_states(bs
))) {
572 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
573 aio_context_acquire(aio_context
);
574 result
|= bdrv_drain_poll(bs
, false, NULL
, true);
575 aio_context_release(aio_context
);
582 * Wait for pending requests to complete across all BlockDriverStates
584 * This function does not flush data to disk, use bdrv_flush_all() for that
585 * after calling this function.
587 * This pauses all block jobs and disables external clients. It must
588 * be paired with bdrv_drain_all_end().
590 * NOTE: no new block jobs or BlockDriverStates can be created between
591 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
593 void bdrv_drain_all_begin(void)
595 BlockDriverState
*bs
= NULL
;
597 if (qemu_in_coroutine()) {
598 bdrv_co_yield_to_drain(NULL
, true, false, NULL
, true, true, NULL
);
603 * bdrv queue is managed by record/replay,
604 * waiting for finishing the I/O requests may
607 if (replay_events_enabled()) {
611 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
612 * loop AioContext, so make sure we're in the main context. */
613 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
614 assert(bdrv_drain_all_count
< INT_MAX
);
615 bdrv_drain_all_count
++;
617 /* Quiesce all nodes, without polling in-flight requests yet. The graph
618 * cannot change during this loop. */
619 while ((bs
= bdrv_next_all_states(bs
))) {
620 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
622 aio_context_acquire(aio_context
);
623 bdrv_do_drained_begin(bs
, false, NULL
, true, false);
624 aio_context_release(aio_context
);
627 /* Now poll the in-flight requests */
628 AIO_WAIT_WHILE(NULL
, bdrv_drain_all_poll());
630 while ((bs
= bdrv_next_all_states(bs
))) {
631 bdrv_drain_assert_idle(bs
);
635 void bdrv_drain_all_end(void)
637 BlockDriverState
*bs
= NULL
;
638 int drained_end_counter
= 0;
641 * bdrv queue is managed by record/replay,
642 * waiting for finishing the I/O requests may
645 if (replay_events_enabled()) {
649 while ((bs
= bdrv_next_all_states(bs
))) {
650 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
652 aio_context_acquire(aio_context
);
653 bdrv_do_drained_end(bs
, false, NULL
, true, &drained_end_counter
);
654 aio_context_release(aio_context
);
657 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
658 AIO_WAIT_WHILE(NULL
, atomic_read(&drained_end_counter
) > 0);
660 assert(bdrv_drain_all_count
> 0);
661 bdrv_drain_all_count
--;
664 void bdrv_drain_all(void)
666 bdrv_drain_all_begin();
667 bdrv_drain_all_end();
671 * Remove an active request from the tracked requests list
673 * This function should be called when a tracked request is completing.
675 static void tracked_request_end(BdrvTrackedRequest
*req
)
677 if (req
->serialising
) {
678 atomic_dec(&req
->bs
->serialising_in_flight
);
681 qemu_co_mutex_lock(&req
->bs
->reqs_lock
);
682 QLIST_REMOVE(req
, list
);
683 qemu_co_queue_restart_all(&req
->wait_queue
);
684 qemu_co_mutex_unlock(&req
->bs
->reqs_lock
);
688 * Add an active request to the tracked requests list
690 static void tracked_request_begin(BdrvTrackedRequest
*req
,
691 BlockDriverState
*bs
,
694 enum BdrvTrackedRequestType type
)
696 assert(bytes
<= INT64_MAX
&& offset
<= INT64_MAX
- bytes
);
698 *req
= (BdrvTrackedRequest
){
703 .co
= qemu_coroutine_self(),
704 .serialising
= false,
705 .overlap_offset
= offset
,
706 .overlap_bytes
= bytes
,
709 qemu_co_queue_init(&req
->wait_queue
);
711 qemu_co_mutex_lock(&bs
->reqs_lock
);
712 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
713 qemu_co_mutex_unlock(&bs
->reqs_lock
);
716 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
717 int64_t offset
, uint64_t bytes
)
720 if (offset
>= req
->overlap_offset
+ req
->overlap_bytes
) {
724 if (req
->overlap_offset
>= offset
+ bytes
) {
730 static bool coroutine_fn
731 bdrv_wait_serialising_requests_locked(BlockDriverState
*bs
,
732 BdrvTrackedRequest
*self
)
734 BdrvTrackedRequest
*req
;
740 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
741 if (req
== self
|| (!req
->serialising
&& !self
->serialising
)) {
744 if (tracked_request_overlaps(req
, self
->overlap_offset
,
745 self
->overlap_bytes
))
747 /* Hitting this means there was a reentrant request, for
748 * example, a block driver issuing nested requests. This must
749 * never happen since it means deadlock.
751 assert(qemu_coroutine_self() != req
->co
);
753 /* If the request is already (indirectly) waiting for us, or
754 * will wait for us as soon as it wakes up, then just go on
755 * (instead of producing a deadlock in the former case). */
756 if (!req
->waiting_for
) {
757 self
->waiting_for
= req
;
758 qemu_co_queue_wait(&req
->wait_queue
, &bs
->reqs_lock
);
759 self
->waiting_for
= NULL
;
770 bool bdrv_mark_request_serialising(BdrvTrackedRequest
*req
, uint64_t align
)
772 BlockDriverState
*bs
= req
->bs
;
773 int64_t overlap_offset
= req
->offset
& ~(align
- 1);
774 uint64_t overlap_bytes
= ROUND_UP(req
->offset
+ req
->bytes
, align
)
778 qemu_co_mutex_lock(&bs
->reqs_lock
);
779 if (!req
->serialising
) {
780 atomic_inc(&req
->bs
->serialising_in_flight
);
781 req
->serialising
= true;
784 req
->overlap_offset
= MIN(req
->overlap_offset
, overlap_offset
);
785 req
->overlap_bytes
= MAX(req
->overlap_bytes
, overlap_bytes
);
786 waited
= bdrv_wait_serialising_requests_locked(bs
, req
);
787 qemu_co_mutex_unlock(&bs
->reqs_lock
);
792 * Return the tracked request on @bs for the current coroutine, or
793 * NULL if there is none.
795 BdrvTrackedRequest
*coroutine_fn
bdrv_co_get_self_request(BlockDriverState
*bs
)
797 BdrvTrackedRequest
*req
;
798 Coroutine
*self
= qemu_coroutine_self();
800 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
801 if (req
->co
== self
) {
810 * Round a region to cluster boundaries
812 void bdrv_round_to_clusters(BlockDriverState
*bs
,
813 int64_t offset
, int64_t bytes
,
814 int64_t *cluster_offset
,
815 int64_t *cluster_bytes
)
819 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
820 *cluster_offset
= offset
;
821 *cluster_bytes
= bytes
;
823 int64_t c
= bdi
.cluster_size
;
824 *cluster_offset
= QEMU_ALIGN_DOWN(offset
, c
);
825 *cluster_bytes
= QEMU_ALIGN_UP(offset
- *cluster_offset
+ bytes
, c
);
829 static int bdrv_get_cluster_size(BlockDriverState
*bs
)
834 ret
= bdrv_get_info(bs
, &bdi
);
835 if (ret
< 0 || bdi
.cluster_size
== 0) {
836 return bs
->bl
.request_alignment
;
838 return bdi
.cluster_size
;
842 void bdrv_inc_in_flight(BlockDriverState
*bs
)
844 atomic_inc(&bs
->in_flight
);
847 void bdrv_wakeup(BlockDriverState
*bs
)
852 void bdrv_dec_in_flight(BlockDriverState
*bs
)
854 atomic_dec(&bs
->in_flight
);
858 static bool coroutine_fn
bdrv_wait_serialising_requests(BdrvTrackedRequest
*self
)
860 BlockDriverState
*bs
= self
->bs
;
863 if (!atomic_read(&bs
->serialising_in_flight
)) {
867 qemu_co_mutex_lock(&bs
->reqs_lock
);
868 waited
= bdrv_wait_serialising_requests_locked(bs
, self
);
869 qemu_co_mutex_unlock(&bs
->reqs_lock
);
874 static int bdrv_check_byte_request(BlockDriverState
*bs
, int64_t offset
,
877 if (size
> BDRV_REQUEST_MAX_BYTES
) {
881 if (!bdrv_is_inserted(bs
)) {
892 typedef int coroutine_fn
BdrvRequestEntry(void *opaque
);
893 typedef struct BdrvRunCo
{
894 BdrvRequestEntry
*entry
;
898 Coroutine
*co
; /* Coroutine, running bdrv_run_co_entry, for debugging */
901 static void coroutine_fn
bdrv_run_co_entry(void *opaque
)
903 BdrvRunCo
*arg
= opaque
;
905 arg
->ret
= arg
->entry(arg
->opaque
);
910 static int bdrv_run_co(BlockDriverState
*bs
, BdrvRequestEntry
*entry
,
913 if (qemu_in_coroutine()) {
914 /* Fast-path if already in coroutine context */
915 return entry(opaque
);
917 BdrvRunCo s
= { .entry
= entry
, .opaque
= opaque
};
919 s
.co
= qemu_coroutine_create(bdrv_run_co_entry
, &s
);
920 bdrv_coroutine_enter(bs
, s
.co
);
922 BDRV_POLL_WHILE(bs
, !s
.done
);
928 typedef struct RwCo
{
933 BdrvRequestFlags flags
;
936 static int coroutine_fn
bdrv_rw_co_entry(void *opaque
)
940 if (!rwco
->is_write
) {
941 return bdrv_co_preadv(rwco
->child
, rwco
->offset
,
942 rwco
->qiov
->size
, rwco
->qiov
,
945 return bdrv_co_pwritev(rwco
->child
, rwco
->offset
,
946 rwco
->qiov
->size
, rwco
->qiov
,
952 * Process a vectored synchronous request using coroutines
954 static int bdrv_prwv_co(BdrvChild
*child
, int64_t offset
,
955 QEMUIOVector
*qiov
, bool is_write
,
956 BdrvRequestFlags flags
)
962 .is_write
= is_write
,
966 return bdrv_run_co(child
->bs
, bdrv_rw_co_entry
, &rwco
);
969 int bdrv_pwrite_zeroes(BdrvChild
*child
, int64_t offset
,
970 int bytes
, BdrvRequestFlags flags
)
972 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, NULL
, bytes
);
974 return bdrv_prwv_co(child
, offset
, &qiov
, true,
975 BDRV_REQ_ZERO_WRITE
| flags
);
979 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
980 * The operation is sped up by checking the block status and only writing
981 * zeroes to the device if they currently do not return zeroes. Optional
982 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
985 * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite().
987 int bdrv_make_zero(BdrvChild
*child
, BdrvRequestFlags flags
)
990 int64_t target_size
, bytes
, offset
= 0;
991 BlockDriverState
*bs
= child
->bs
;
993 target_size
= bdrv_getlength(bs
);
994 if (target_size
< 0) {
999 bytes
= MIN(target_size
- offset
, BDRV_REQUEST_MAX_BYTES
);
1003 ret
= bdrv_block_status(bs
, offset
, bytes
, &bytes
, NULL
, NULL
);
1007 if (ret
& BDRV_BLOCK_ZERO
) {
1011 ret
= bdrv_pwrite_zeroes(child
, offset
, bytes
, flags
);
1019 /* return < 0 if error. See bdrv_pwrite() for the return codes */
1020 int bdrv_preadv(BdrvChild
*child
, int64_t offset
, QEMUIOVector
*qiov
)
1024 ret
= bdrv_prwv_co(child
, offset
, qiov
, false, 0);
1032 /* See bdrv_pwrite() for the return codes */
1033 int bdrv_pread(BdrvChild
*child
, int64_t offset
, void *buf
, int bytes
)
1035 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, bytes
);
1041 return bdrv_preadv(child
, offset
, &qiov
);
1044 int bdrv_pwritev(BdrvChild
*child
, int64_t offset
, QEMUIOVector
*qiov
)
1048 ret
= bdrv_prwv_co(child
, offset
, qiov
, true, 0);
1056 /* Return no. of bytes on success or < 0 on error. Important errors are:
1057 -EIO generic I/O error (may happen for all errors)
1058 -ENOMEDIUM No media inserted.
1059 -EINVAL Invalid offset or number of bytes
1060 -EACCES Trying to write a read-only device
1062 int bdrv_pwrite(BdrvChild
*child
, int64_t offset
, const void *buf
, int bytes
)
1064 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, bytes
);
1070 return bdrv_pwritev(child
, offset
, &qiov
);
1074 * Writes to the file and ensures that no writes are reordered across this
1075 * request (acts as a barrier)
1077 * Returns 0 on success, -errno in error cases.
1079 int bdrv_pwrite_sync(BdrvChild
*child
, int64_t offset
,
1080 const void *buf
, int count
)
1084 ret
= bdrv_pwrite(child
, offset
, buf
, count
);
1089 ret
= bdrv_flush(child
->bs
);
1097 typedef struct CoroutineIOCompletion
{
1098 Coroutine
*coroutine
;
1100 } CoroutineIOCompletion
;
1102 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
1104 CoroutineIOCompletion
*co
= opaque
;
1107 aio_co_wake(co
->coroutine
);
1110 static int coroutine_fn
bdrv_driver_preadv(BlockDriverState
*bs
,
1111 uint64_t offset
, uint64_t bytes
,
1113 size_t qiov_offset
, int flags
)
1115 BlockDriver
*drv
= bs
->drv
;
1117 unsigned int nb_sectors
;
1118 QEMUIOVector local_qiov
;
1121 assert(!(flags
& ~BDRV_REQ_MASK
));
1122 assert(!(flags
& BDRV_REQ_NO_FALLBACK
));
1128 if (drv
->bdrv_co_preadv_part
) {
1129 return drv
->bdrv_co_preadv_part(bs
, offset
, bytes
, qiov
, qiov_offset
,
1133 if (qiov_offset
> 0 || bytes
!= qiov
->size
) {
1134 qemu_iovec_init_slice(&local_qiov
, qiov
, qiov_offset
, bytes
);
1138 if (drv
->bdrv_co_preadv
) {
1139 ret
= drv
->bdrv_co_preadv(bs
, offset
, bytes
, qiov
, flags
);
1143 if (drv
->bdrv_aio_preadv
) {
1145 CoroutineIOCompletion co
= {
1146 .coroutine
= qemu_coroutine_self(),
1149 acb
= drv
->bdrv_aio_preadv(bs
, offset
, bytes
, qiov
, flags
,
1150 bdrv_co_io_em_complete
, &co
);
1155 qemu_coroutine_yield();
1161 sector_num
= offset
>> BDRV_SECTOR_BITS
;
1162 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
1164 assert(QEMU_IS_ALIGNED(offset
, BDRV_SECTOR_SIZE
));
1165 assert(QEMU_IS_ALIGNED(bytes
, BDRV_SECTOR_SIZE
));
1166 assert(bytes
<= BDRV_REQUEST_MAX_BYTES
);
1167 assert(drv
->bdrv_co_readv
);
1169 ret
= drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
1172 if (qiov
== &local_qiov
) {
1173 qemu_iovec_destroy(&local_qiov
);
1179 static int coroutine_fn
bdrv_driver_pwritev(BlockDriverState
*bs
,
1180 uint64_t offset
, uint64_t bytes
,
1182 size_t qiov_offset
, int flags
)
1184 BlockDriver
*drv
= bs
->drv
;
1186 unsigned int nb_sectors
;
1187 QEMUIOVector local_qiov
;
1190 assert(!(flags
& ~BDRV_REQ_MASK
));
1191 assert(!(flags
& BDRV_REQ_NO_FALLBACK
));
1197 if (drv
->bdrv_co_pwritev_part
) {
1198 ret
= drv
->bdrv_co_pwritev_part(bs
, offset
, bytes
, qiov
, qiov_offset
,
1199 flags
& bs
->supported_write_flags
);
1200 flags
&= ~bs
->supported_write_flags
;
1204 if (qiov_offset
> 0 || bytes
!= qiov
->size
) {
1205 qemu_iovec_init_slice(&local_qiov
, qiov
, qiov_offset
, bytes
);
1209 if (drv
->bdrv_co_pwritev
) {
1210 ret
= drv
->bdrv_co_pwritev(bs
, offset
, bytes
, qiov
,
1211 flags
& bs
->supported_write_flags
);
1212 flags
&= ~bs
->supported_write_flags
;
1216 if (drv
->bdrv_aio_pwritev
) {
1218 CoroutineIOCompletion co
= {
1219 .coroutine
= qemu_coroutine_self(),
1222 acb
= drv
->bdrv_aio_pwritev(bs
, offset
, bytes
, qiov
,
1223 flags
& bs
->supported_write_flags
,
1224 bdrv_co_io_em_complete
, &co
);
1225 flags
&= ~bs
->supported_write_flags
;
1229 qemu_coroutine_yield();
1235 sector_num
= offset
>> BDRV_SECTOR_BITS
;
1236 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
1238 assert(QEMU_IS_ALIGNED(offset
, BDRV_SECTOR_SIZE
));
1239 assert(QEMU_IS_ALIGNED(bytes
, BDRV_SECTOR_SIZE
));
1240 assert(bytes
<= BDRV_REQUEST_MAX_BYTES
);
1242 assert(drv
->bdrv_co_writev
);
1243 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
,
1244 flags
& bs
->supported_write_flags
);
1245 flags
&= ~bs
->supported_write_flags
;
1248 if (ret
== 0 && (flags
& BDRV_REQ_FUA
)) {
1249 ret
= bdrv_co_flush(bs
);
1252 if (qiov
== &local_qiov
) {
1253 qemu_iovec_destroy(&local_qiov
);
1259 static int coroutine_fn
1260 bdrv_driver_pwritev_compressed(BlockDriverState
*bs
, uint64_t offset
,
1261 uint64_t bytes
, QEMUIOVector
*qiov
,
1264 BlockDriver
*drv
= bs
->drv
;
1265 QEMUIOVector local_qiov
;
1272 if (!block_driver_can_compress(drv
)) {
1276 if (drv
->bdrv_co_pwritev_compressed_part
) {
1277 return drv
->bdrv_co_pwritev_compressed_part(bs
, offset
, bytes
,
1281 if (qiov_offset
== 0) {
1282 return drv
->bdrv_co_pwritev_compressed(bs
, offset
, bytes
, qiov
);
1285 qemu_iovec_init_slice(&local_qiov
, qiov
, qiov_offset
, bytes
);
1286 ret
= drv
->bdrv_co_pwritev_compressed(bs
, offset
, bytes
, &local_qiov
);
1287 qemu_iovec_destroy(&local_qiov
);
1292 static int coroutine_fn
bdrv_co_do_copy_on_readv(BdrvChild
*child
,
1293 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1294 size_t qiov_offset
, int flags
)
1296 BlockDriverState
*bs
= child
->bs
;
1298 /* Perform I/O through a temporary buffer so that users who scribble over
1299 * their read buffer while the operation is in progress do not end up
1300 * modifying the image file. This is critical for zero-copy guest I/O
1301 * where anything might happen inside guest memory.
1303 void *bounce_buffer
= NULL
;
1305 BlockDriver
*drv
= bs
->drv
;
1306 int64_t cluster_offset
;
1307 int64_t cluster_bytes
;
1310 int max_transfer
= MIN_NON_ZERO(bs
->bl
.max_transfer
,
1311 BDRV_REQUEST_MAX_BYTES
);
1312 unsigned int progress
= 0;
1320 * Do not write anything when the BDS is inactive. That is not
1321 * allowed, and it would not help.
1323 skip_write
= (bs
->open_flags
& BDRV_O_INACTIVE
);
1325 /* FIXME We cannot require callers to have write permissions when all they
1326 * are doing is a read request. If we did things right, write permissions
1327 * would be obtained anyway, but internally by the copy-on-read code. As
1328 * long as it is implemented here rather than in a separate filter driver,
1329 * the copy-on-read code doesn't have its own BdrvChild, however, for which
1330 * it could request permissions. Therefore we have to bypass the permission
1331 * system for the moment. */
1332 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1334 /* Cover entire cluster so no additional backing file I/O is required when
1335 * allocating cluster in the image file. Note that this value may exceed
1336 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1337 * is one reason we loop rather than doing it all at once.
1339 bdrv_round_to_clusters(bs
, offset
, bytes
, &cluster_offset
, &cluster_bytes
);
1340 skip_bytes
= offset
- cluster_offset
;
1342 trace_bdrv_co_do_copy_on_readv(bs
, offset
, bytes
,
1343 cluster_offset
, cluster_bytes
);
1345 while (cluster_bytes
) {
1349 ret
= 1; /* "already allocated", so nothing will be copied */
1350 pnum
= MIN(cluster_bytes
, max_transfer
);
1352 ret
= bdrv_is_allocated(bs
, cluster_offset
,
1353 MIN(cluster_bytes
, max_transfer
), &pnum
);
1356 * Safe to treat errors in querying allocation as if
1357 * unallocated; we'll probably fail again soon on the
1358 * read, but at least that will set a decent errno.
1360 pnum
= MIN(cluster_bytes
, max_transfer
);
1363 /* Stop at EOF if the image ends in the middle of the cluster */
1364 if (ret
== 0 && pnum
== 0) {
1365 assert(progress
>= bytes
);
1369 assert(skip_bytes
< pnum
);
1373 QEMUIOVector local_qiov
;
1375 /* Must copy-on-read; use the bounce buffer */
1376 pnum
= MIN(pnum
, MAX_BOUNCE_BUFFER
);
1377 if (!bounce_buffer
) {
1378 int64_t max_we_need
= MAX(pnum
, cluster_bytes
- pnum
);
1379 int64_t max_allowed
= MIN(max_transfer
, MAX_BOUNCE_BUFFER
);
1380 int64_t bounce_buffer_len
= MIN(max_we_need
, max_allowed
);
1382 bounce_buffer
= qemu_try_blockalign(bs
, bounce_buffer_len
);
1383 if (!bounce_buffer
) {
1388 qemu_iovec_init_buf(&local_qiov
, bounce_buffer
, pnum
);
1390 ret
= bdrv_driver_preadv(bs
, cluster_offset
, pnum
,
1396 bdrv_debug_event(bs
, BLKDBG_COR_WRITE
);
1397 if (drv
->bdrv_co_pwrite_zeroes
&&
1398 buffer_is_zero(bounce_buffer
, pnum
)) {
1399 /* FIXME: Should we (perhaps conditionally) be setting
1400 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1401 * that still correctly reads as zero? */
1402 ret
= bdrv_co_do_pwrite_zeroes(bs
, cluster_offset
, pnum
,
1403 BDRV_REQ_WRITE_UNCHANGED
);
1405 /* This does not change the data on the disk, it is not
1406 * necessary to flush even in cache=writethrough mode.
1408 ret
= bdrv_driver_pwritev(bs
, cluster_offset
, pnum
,
1410 BDRV_REQ_WRITE_UNCHANGED
);
1414 /* It might be okay to ignore write errors for guest
1415 * requests. If this is a deliberate copy-on-read
1416 * then we don't want to ignore the error. Simply
1417 * report it in all cases.
1422 if (!(flags
& BDRV_REQ_PREFETCH
)) {
1423 qemu_iovec_from_buf(qiov
, qiov_offset
+ progress
,
1424 bounce_buffer
+ skip_bytes
,
1425 MIN(pnum
- skip_bytes
, bytes
- progress
));
1427 } else if (!(flags
& BDRV_REQ_PREFETCH
)) {
1428 /* Read directly into the destination */
1429 ret
= bdrv_driver_preadv(bs
, offset
+ progress
,
1430 MIN(pnum
- skip_bytes
, bytes
- progress
),
1431 qiov
, qiov_offset
+ progress
, 0);
1437 cluster_offset
+= pnum
;
1438 cluster_bytes
-= pnum
;
1439 progress
+= pnum
- skip_bytes
;
1445 qemu_vfree(bounce_buffer
);
1450 * Forwards an already correctly aligned request to the BlockDriver. This
1451 * handles copy on read, zeroing after EOF, and fragmentation of large
1452 * reads; any other features must be implemented by the caller.
1454 static int coroutine_fn
bdrv_aligned_preadv(BdrvChild
*child
,
1455 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
1456 int64_t align
, QEMUIOVector
*qiov
, size_t qiov_offset
, int flags
)
1458 BlockDriverState
*bs
= child
->bs
;
1459 int64_t total_bytes
, max_bytes
;
1461 uint64_t bytes_remaining
= bytes
;
1464 assert(is_power_of_2(align
));
1465 assert((offset
& (align
- 1)) == 0);
1466 assert((bytes
& (align
- 1)) == 0);
1467 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1468 max_transfer
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_transfer
, INT_MAX
),
1471 /* TODO: We would need a per-BDS .supported_read_flags and
1472 * potential fallback support, if we ever implement any read flags
1473 * to pass through to drivers. For now, there aren't any
1474 * passthrough flags. */
1475 assert(!(flags
& ~(BDRV_REQ_COPY_ON_READ
| BDRV_REQ_PREFETCH
)));
1477 /* Handle Copy on Read and associated serialisation */
1478 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1479 /* If we touch the same cluster it counts as an overlap. This
1480 * guarantees that allocating writes will be serialized and not race
1481 * with each other for the same cluster. For example, in copy-on-read
1482 * it ensures that the CoR read and write operations are atomic and
1483 * guest writes cannot interleave between them. */
1484 bdrv_mark_request_serialising(req
, bdrv_get_cluster_size(bs
));
1486 bdrv_wait_serialising_requests(req
);
1489 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1492 ret
= bdrv_is_allocated(bs
, offset
, bytes
, &pnum
);
1497 if (!ret
|| pnum
!= bytes
) {
1498 ret
= bdrv_co_do_copy_on_readv(child
, offset
, bytes
,
1499 qiov
, qiov_offset
, flags
);
1501 } else if (flags
& BDRV_REQ_PREFETCH
) {
1506 /* Forward the request to the BlockDriver, possibly fragmenting it */
1507 total_bytes
= bdrv_getlength(bs
);
1508 if (total_bytes
< 0) {
1513 max_bytes
= ROUND_UP(MAX(0, total_bytes
- offset
), align
);
1514 if (bytes
<= max_bytes
&& bytes
<= max_transfer
) {
1515 ret
= bdrv_driver_preadv(bs
, offset
, bytes
, qiov
, qiov_offset
, 0);
1519 while (bytes_remaining
) {
1523 num
= MIN(bytes_remaining
, MIN(max_bytes
, max_transfer
));
1526 ret
= bdrv_driver_preadv(bs
, offset
+ bytes
- bytes_remaining
,
1528 qiov_offset
+ bytes
- bytes_remaining
, 0);
1531 num
= bytes_remaining
;
1532 ret
= qemu_iovec_memset(qiov
, qiov_offset
+ bytes
- bytes_remaining
,
1533 0, bytes_remaining
);
1538 bytes_remaining
-= num
;
1542 return ret
< 0 ? ret
: 0;
1548 * |<---- align ----->| |<----- align ---->|
1549 * |<- head ->|<------------- bytes ------------->|<-- tail -->|
1551 * -*----------$-------*-------- ... --------*-----$------------*---
1553 * | offset | | end |
1554 * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end)
1555 * [buf ... ) [tail_buf )
1557 * @buf is an aligned allocation needed to store @head and @tail paddings. @head
1558 * is placed at the beginning of @buf and @tail at the @end.
1560 * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk
1561 * around tail, if tail exists.
1563 * @merge_reads is true for small requests,
1564 * if @buf_len == @head + bytes + @tail. In this case it is possible that both
1565 * head and tail exist but @buf_len == align and @tail_buf == @buf.
1567 typedef struct BdrvRequestPadding
{
1574 QEMUIOVector local_qiov
;
1575 } BdrvRequestPadding
;
1577 static bool bdrv_init_padding(BlockDriverState
*bs
,
1578 int64_t offset
, int64_t bytes
,
1579 BdrvRequestPadding
*pad
)
1581 uint64_t align
= bs
->bl
.request_alignment
;
1584 memset(pad
, 0, sizeof(*pad
));
1586 pad
->head
= offset
& (align
- 1);
1587 pad
->tail
= ((offset
+ bytes
) & (align
- 1));
1589 pad
->tail
= align
- pad
->tail
;
1592 if (!pad
->head
&& !pad
->tail
) {
1596 assert(bytes
); /* Nothing good in aligning zero-length requests */
1598 sum
= pad
->head
+ bytes
+ pad
->tail
;
1599 pad
->buf_len
= (sum
> align
&& pad
->head
&& pad
->tail
) ? 2 * align
: align
;
1600 pad
->buf
= qemu_blockalign(bs
, pad
->buf_len
);
1601 pad
->merge_reads
= sum
== pad
->buf_len
;
1603 pad
->tail_buf
= pad
->buf
+ pad
->buf_len
- align
;
1609 static int bdrv_padding_rmw_read(BdrvChild
*child
,
1610 BdrvTrackedRequest
*req
,
1611 BdrvRequestPadding
*pad
,
1614 QEMUIOVector local_qiov
;
1615 BlockDriverState
*bs
= child
->bs
;
1616 uint64_t align
= bs
->bl
.request_alignment
;
1619 assert(req
->serialising
&& pad
->buf
);
1621 if (pad
->head
|| pad
->merge_reads
) {
1622 uint64_t bytes
= pad
->merge_reads
? pad
->buf_len
: align
;
1624 qemu_iovec_init_buf(&local_qiov
, pad
->buf
, bytes
);
1627 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1629 if (pad
->merge_reads
&& pad
->tail
) {
1630 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1632 ret
= bdrv_aligned_preadv(child
, req
, req
->overlap_offset
, bytes
,
1633 align
, &local_qiov
, 0, 0);
1638 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1640 if (pad
->merge_reads
&& pad
->tail
) {
1641 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1644 if (pad
->merge_reads
) {
1650 qemu_iovec_init_buf(&local_qiov
, pad
->tail_buf
, align
);
1652 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1653 ret
= bdrv_aligned_preadv(
1655 req
->overlap_offset
+ req
->overlap_bytes
- align
,
1656 align
, align
, &local_qiov
, 0, 0);
1660 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1665 memset(pad
->buf
+ pad
->head
, 0, pad
->buf_len
- pad
->head
- pad
->tail
);
1671 static void bdrv_padding_destroy(BdrvRequestPadding
*pad
)
1674 qemu_vfree(pad
->buf
);
1675 qemu_iovec_destroy(&pad
->local_qiov
);
1682 * Exchange request parameters with padded request if needed. Don't include RMW
1683 * read of padding, bdrv_padding_rmw_read() should be called separately if
1686 * All parameters except @bs are in-out: they represent original request at
1687 * function call and padded (if padding needed) at function finish.
1689 * Function always succeeds.
1691 static bool bdrv_pad_request(BlockDriverState
*bs
,
1692 QEMUIOVector
**qiov
, size_t *qiov_offset
,
1693 int64_t *offset
, unsigned int *bytes
,
1694 BdrvRequestPadding
*pad
)
1696 if (!bdrv_init_padding(bs
, *offset
, *bytes
, pad
)) {
1700 qemu_iovec_init_extended(&pad
->local_qiov
, pad
->buf
, pad
->head
,
1701 *qiov
, *qiov_offset
, *bytes
,
1702 pad
->buf
+ pad
->buf_len
- pad
->tail
, pad
->tail
);
1703 *bytes
+= pad
->head
+ pad
->tail
;
1704 *offset
-= pad
->head
;
1705 *qiov
= &pad
->local_qiov
;
1711 int coroutine_fn
bdrv_co_preadv(BdrvChild
*child
,
1712 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1713 BdrvRequestFlags flags
)
1715 return bdrv_co_preadv_part(child
, offset
, bytes
, qiov
, 0, flags
);
1718 int coroutine_fn
bdrv_co_preadv_part(BdrvChild
*child
,
1719 int64_t offset
, unsigned int bytes
,
1720 QEMUIOVector
*qiov
, size_t qiov_offset
,
1721 BdrvRequestFlags flags
)
1723 BlockDriverState
*bs
= child
->bs
;
1724 BdrvTrackedRequest req
;
1725 BdrvRequestPadding pad
;
1728 trace_bdrv_co_preadv(bs
, offset
, bytes
, flags
);
1730 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
1735 if (bytes
== 0 && !QEMU_IS_ALIGNED(offset
, bs
->bl
.request_alignment
)) {
1737 * Aligning zero request is nonsense. Even if driver has special meaning
1738 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
1739 * it to driver due to request_alignment.
1741 * Still, no reason to return an error if someone do unaligned
1742 * zero-length read occasionally.
1747 bdrv_inc_in_flight(bs
);
1749 /* Don't do copy-on-read if we read data before write operation */
1750 if (atomic_read(&bs
->copy_on_read
)) {
1751 flags
|= BDRV_REQ_COPY_ON_READ
;
1754 bdrv_pad_request(bs
, &qiov
, &qiov_offset
, &offset
, &bytes
, &pad
);
1756 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_READ
);
1757 ret
= bdrv_aligned_preadv(child
, &req
, offset
, bytes
,
1758 bs
->bl
.request_alignment
,
1759 qiov
, qiov_offset
, flags
);
1760 tracked_request_end(&req
);
1761 bdrv_dec_in_flight(bs
);
1763 bdrv_padding_destroy(&pad
);
1768 static int coroutine_fn
bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
,
1769 int64_t offset
, int bytes
, BdrvRequestFlags flags
)
1771 BlockDriver
*drv
= bs
->drv
;
1775 bool need_flush
= false;
1779 int max_write_zeroes
= MIN_NON_ZERO(bs
->bl
.max_pwrite_zeroes
, INT_MAX
);
1780 int alignment
= MAX(bs
->bl
.pwrite_zeroes_alignment
,
1781 bs
->bl
.request_alignment
);
1782 int max_transfer
= MIN_NON_ZERO(bs
->bl
.max_transfer
, MAX_BOUNCE_BUFFER
);
1788 if ((flags
& ~bs
->supported_zero_flags
) & BDRV_REQ_NO_FALLBACK
) {
1792 assert(alignment
% bs
->bl
.request_alignment
== 0);
1793 head
= offset
% alignment
;
1794 tail
= (offset
+ bytes
) % alignment
;
1795 max_write_zeroes
= QEMU_ALIGN_DOWN(max_write_zeroes
, alignment
);
1796 assert(max_write_zeroes
>= bs
->bl
.request_alignment
);
1798 while (bytes
> 0 && !ret
) {
1801 /* Align request. Block drivers can expect the "bulk" of the request
1802 * to be aligned, and that unaligned requests do not cross cluster
1806 /* Make a small request up to the first aligned sector. For
1807 * convenience, limit this request to max_transfer even if
1808 * we don't need to fall back to writes. */
1809 num
= MIN(MIN(bytes
, max_transfer
), alignment
- head
);
1810 head
= (head
+ num
) % alignment
;
1811 assert(num
< max_write_zeroes
);
1812 } else if (tail
&& num
> alignment
) {
1813 /* Shorten the request to the last aligned sector. */
1817 /* limit request size */
1818 if (num
> max_write_zeroes
) {
1819 num
= max_write_zeroes
;
1823 /* First try the efficient write zeroes operation */
1824 if (drv
->bdrv_co_pwrite_zeroes
) {
1825 ret
= drv
->bdrv_co_pwrite_zeroes(bs
, offset
, num
,
1826 flags
& bs
->supported_zero_flags
);
1827 if (ret
!= -ENOTSUP
&& (flags
& BDRV_REQ_FUA
) &&
1828 !(bs
->supported_zero_flags
& BDRV_REQ_FUA
)) {
1832 assert(!bs
->supported_zero_flags
);
1835 if (ret
== -ENOTSUP
&& !(flags
& BDRV_REQ_NO_FALLBACK
)) {
1836 /* Fall back to bounce buffer if write zeroes is unsupported */
1837 BdrvRequestFlags write_flags
= flags
& ~BDRV_REQ_ZERO_WRITE
;
1839 if ((flags
& BDRV_REQ_FUA
) &&
1840 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1841 /* No need for bdrv_driver_pwrite() to do a fallback
1842 * flush on each chunk; use just one at the end */
1843 write_flags
&= ~BDRV_REQ_FUA
;
1846 num
= MIN(num
, max_transfer
);
1848 buf
= qemu_try_blockalign0(bs
, num
);
1854 qemu_iovec_init_buf(&qiov
, buf
, num
);
1856 ret
= bdrv_driver_pwritev(bs
, offset
, num
, &qiov
, 0, write_flags
);
1858 /* Keep bounce buffer around if it is big enough for all
1859 * all future requests.
1861 if (num
< max_transfer
) {
1872 if (ret
== 0 && need_flush
) {
1873 ret
= bdrv_co_flush(bs
);
1879 static inline int coroutine_fn
1880 bdrv_co_write_req_prepare(BdrvChild
*child
, int64_t offset
, uint64_t bytes
,
1881 BdrvTrackedRequest
*req
, int flags
)
1883 BlockDriverState
*bs
= child
->bs
;
1885 int64_t end_sector
= DIV_ROUND_UP(offset
+ bytes
, BDRV_SECTOR_SIZE
);
1887 if (bs
->read_only
) {
1891 assert(!(bs
->open_flags
& BDRV_O_INACTIVE
));
1892 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1893 assert(!(flags
& ~BDRV_REQ_MASK
));
1895 if (flags
& BDRV_REQ_SERIALISING
) {
1896 waited
= bdrv_mark_request_serialising(req
, bdrv_get_cluster_size(bs
));
1898 * For a misaligned request we should have already waited earlier,
1899 * because we come after bdrv_padding_rmw_read which must be called
1900 * with the request already marked as serialising.
1903 (req
->offset
== req
->overlap_offset
&&
1904 req
->bytes
== req
->overlap_bytes
));
1906 bdrv_wait_serialising_requests(req
);
1909 assert(req
->overlap_offset
<= offset
);
1910 assert(offset
+ bytes
<= req
->overlap_offset
+ req
->overlap_bytes
);
1911 assert(end_sector
<= bs
->total_sectors
|| child
->perm
& BLK_PERM_RESIZE
);
1913 switch (req
->type
) {
1914 case BDRV_TRACKED_WRITE
:
1915 case BDRV_TRACKED_DISCARD
:
1916 if (flags
& BDRV_REQ_WRITE_UNCHANGED
) {
1917 assert(child
->perm
& (BLK_PERM_WRITE_UNCHANGED
| BLK_PERM_WRITE
));
1919 assert(child
->perm
& BLK_PERM_WRITE
);
1921 return notifier_with_return_list_notify(&bs
->before_write_notifiers
,
1923 case BDRV_TRACKED_TRUNCATE
:
1924 assert(child
->perm
& BLK_PERM_RESIZE
);
1931 static inline void coroutine_fn
1932 bdrv_co_write_req_finish(BdrvChild
*child
, int64_t offset
, uint64_t bytes
,
1933 BdrvTrackedRequest
*req
, int ret
)
1935 int64_t end_sector
= DIV_ROUND_UP(offset
+ bytes
, BDRV_SECTOR_SIZE
);
1936 BlockDriverState
*bs
= child
->bs
;
1938 atomic_inc(&bs
->write_gen
);
1941 * Discard cannot extend the image, but in error handling cases, such as
1942 * when reverting a qcow2 cluster allocation, the discarded range can pass
1943 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
1944 * here. Instead, just skip it, since semantically a discard request
1945 * beyond EOF cannot expand the image anyway.
1948 (req
->type
== BDRV_TRACKED_TRUNCATE
||
1949 end_sector
> bs
->total_sectors
) &&
1950 req
->type
!= BDRV_TRACKED_DISCARD
) {
1951 bs
->total_sectors
= end_sector
;
1952 bdrv_parent_cb_resize(bs
);
1953 bdrv_dirty_bitmap_truncate(bs
, end_sector
<< BDRV_SECTOR_BITS
);
1956 switch (req
->type
) {
1957 case BDRV_TRACKED_WRITE
:
1958 stat64_max(&bs
->wr_highest_offset
, offset
+ bytes
);
1959 /* fall through, to set dirty bits */
1960 case BDRV_TRACKED_DISCARD
:
1961 bdrv_set_dirty(bs
, offset
, bytes
);
1970 * Forwards an already correctly aligned write request to the BlockDriver,
1971 * after possibly fragmenting it.
1973 static int coroutine_fn
bdrv_aligned_pwritev(BdrvChild
*child
,
1974 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
1975 int64_t align
, QEMUIOVector
*qiov
, size_t qiov_offset
, int flags
)
1977 BlockDriverState
*bs
= child
->bs
;
1978 BlockDriver
*drv
= bs
->drv
;
1981 uint64_t bytes_remaining
= bytes
;
1988 if (bdrv_has_readonly_bitmaps(bs
)) {
1992 assert(is_power_of_2(align
));
1993 assert((offset
& (align
- 1)) == 0);
1994 assert((bytes
& (align
- 1)) == 0);
1995 assert(!qiov
|| qiov_offset
+ bytes
<= qiov
->size
);
1996 max_transfer
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_transfer
, INT_MAX
),
1999 ret
= bdrv_co_write_req_prepare(child
, offset
, bytes
, req
, flags
);
2001 if (!ret
&& bs
->detect_zeroes
!= BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF
&&
2002 !(flags
& BDRV_REQ_ZERO_WRITE
) && drv
->bdrv_co_pwrite_zeroes
&&
2003 qemu_iovec_is_zero(qiov
, qiov_offset
, bytes
)) {
2004 flags
|= BDRV_REQ_ZERO_WRITE
;
2005 if (bs
->detect_zeroes
== BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP
) {
2006 flags
|= BDRV_REQ_MAY_UNMAP
;
2011 /* Do nothing, write notifier decided to fail this request */
2012 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
2013 bdrv_debug_event(bs
, BLKDBG_PWRITEV_ZERO
);
2014 ret
= bdrv_co_do_pwrite_zeroes(bs
, offset
, bytes
, flags
);
2015 } else if (flags
& BDRV_REQ_WRITE_COMPRESSED
) {
2016 ret
= bdrv_driver_pwritev_compressed(bs
, offset
, bytes
,
2018 } else if (bytes
<= max_transfer
) {
2019 bdrv_debug_event(bs
, BLKDBG_PWRITEV
);
2020 ret
= bdrv_driver_pwritev(bs
, offset
, bytes
, qiov
, qiov_offset
, flags
);
2022 bdrv_debug_event(bs
, BLKDBG_PWRITEV
);
2023 while (bytes_remaining
) {
2024 int num
= MIN(bytes_remaining
, max_transfer
);
2025 int local_flags
= flags
;
2028 if (num
< bytes_remaining
&& (flags
& BDRV_REQ_FUA
) &&
2029 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
2030 /* If FUA is going to be emulated by flush, we only
2031 * need to flush on the last iteration */
2032 local_flags
&= ~BDRV_REQ_FUA
;
2035 ret
= bdrv_driver_pwritev(bs
, offset
+ bytes
- bytes_remaining
,
2037 qiov_offset
+ bytes
- bytes_remaining
,
2042 bytes_remaining
-= num
;
2045 bdrv_debug_event(bs
, BLKDBG_PWRITEV_DONE
);
2050 bdrv_co_write_req_finish(child
, offset
, bytes
, req
, ret
);
2055 static int coroutine_fn
bdrv_co_do_zero_pwritev(BdrvChild
*child
,
2058 BdrvRequestFlags flags
,
2059 BdrvTrackedRequest
*req
)
2061 BlockDriverState
*bs
= child
->bs
;
2062 QEMUIOVector local_qiov
;
2063 uint64_t align
= bs
->bl
.request_alignment
;
2066 BdrvRequestPadding pad
;
2068 padding
= bdrv_init_padding(bs
, offset
, bytes
, &pad
);
2070 bdrv_mark_request_serialising(req
, align
);
2072 bdrv_padding_rmw_read(child
, req
, &pad
, true);
2074 if (pad
.head
|| pad
.merge_reads
) {
2075 int64_t aligned_offset
= offset
& ~(align
- 1);
2076 int64_t write_bytes
= pad
.merge_reads
? pad
.buf_len
: align
;
2078 qemu_iovec_init_buf(&local_qiov
, pad
.buf
, write_bytes
);
2079 ret
= bdrv_aligned_pwritev(child
, req
, aligned_offset
, write_bytes
,
2080 align
, &local_qiov
, 0,
2081 flags
& ~BDRV_REQ_ZERO_WRITE
);
2082 if (ret
< 0 || pad
.merge_reads
) {
2083 /* Error or all work is done */
2086 offset
+= write_bytes
- pad
.head
;
2087 bytes
-= write_bytes
- pad
.head
;
2091 assert(!bytes
|| (offset
& (align
- 1)) == 0);
2092 if (bytes
>= align
) {
2093 /* Write the aligned part in the middle. */
2094 uint64_t aligned_bytes
= bytes
& ~(align
- 1);
2095 ret
= bdrv_aligned_pwritev(child
, req
, offset
, aligned_bytes
, align
,
2100 bytes
-= aligned_bytes
;
2101 offset
+= aligned_bytes
;
2104 assert(!bytes
|| (offset
& (align
- 1)) == 0);
2106 assert(align
== pad
.tail
+ bytes
);
2108 qemu_iovec_init_buf(&local_qiov
, pad
.tail_buf
, align
);
2109 ret
= bdrv_aligned_pwritev(child
, req
, offset
, align
, align
,
2111 flags
& ~BDRV_REQ_ZERO_WRITE
);
2115 bdrv_padding_destroy(&pad
);
2121 * Handle a write request in coroutine context
2123 int coroutine_fn
bdrv_co_pwritev(BdrvChild
*child
,
2124 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
2125 BdrvRequestFlags flags
)
2127 return bdrv_co_pwritev_part(child
, offset
, bytes
, qiov
, 0, flags
);
2130 int coroutine_fn
bdrv_co_pwritev_part(BdrvChild
*child
,
2131 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
, size_t qiov_offset
,
2132 BdrvRequestFlags flags
)
2134 BlockDriverState
*bs
= child
->bs
;
2135 BdrvTrackedRequest req
;
2136 uint64_t align
= bs
->bl
.request_alignment
;
2137 BdrvRequestPadding pad
;
2140 trace_bdrv_co_pwritev(child
->bs
, offset
, bytes
, flags
);
2146 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
2151 /* If the request is misaligned then we can't make it efficient */
2152 if ((flags
& BDRV_REQ_NO_FALLBACK
) &&
2153 !QEMU_IS_ALIGNED(offset
| bytes
, align
))
2158 if (bytes
== 0 && !QEMU_IS_ALIGNED(offset
, bs
->bl
.request_alignment
)) {
2160 * Aligning zero request is nonsense. Even if driver has special meaning
2161 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
2162 * it to driver due to request_alignment.
2164 * Still, no reason to return an error if someone do unaligned
2165 * zero-length write occasionally.
2170 bdrv_inc_in_flight(bs
);
2172 * Align write if necessary by performing a read-modify-write cycle.
2173 * Pad qiov with the read parts and be sure to have a tracked request not
2174 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
2176 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_WRITE
);
2178 if (flags
& BDRV_REQ_ZERO_WRITE
) {
2179 ret
= bdrv_co_do_zero_pwritev(child
, offset
, bytes
, flags
, &req
);
2183 if (bdrv_pad_request(bs
, &qiov
, &qiov_offset
, &offset
, &bytes
, &pad
)) {
2184 bdrv_mark_request_serialising(&req
, align
);
2185 bdrv_padding_rmw_read(child
, &req
, &pad
, false);
2188 ret
= bdrv_aligned_pwritev(child
, &req
, offset
, bytes
, align
,
2189 qiov
, qiov_offset
, flags
);
2191 bdrv_padding_destroy(&pad
);
2194 tracked_request_end(&req
);
2195 bdrv_dec_in_flight(bs
);
2200 int coroutine_fn
bdrv_co_pwrite_zeroes(BdrvChild
*child
, int64_t offset
,
2201 int bytes
, BdrvRequestFlags flags
)
2203 trace_bdrv_co_pwrite_zeroes(child
->bs
, offset
, bytes
, flags
);
2205 if (!(child
->bs
->open_flags
& BDRV_O_UNMAP
)) {
2206 flags
&= ~BDRV_REQ_MAY_UNMAP
;
2209 return bdrv_co_pwritev(child
, offset
, bytes
, NULL
,
2210 BDRV_REQ_ZERO_WRITE
| flags
);
2214 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
2216 int bdrv_flush_all(void)
2218 BdrvNextIterator it
;
2219 BlockDriverState
*bs
= NULL
;
2223 * bdrv queue is managed by record/replay,
2224 * creating new flush request for stopping
2225 * the VM may break the determinism
2227 if (replay_events_enabled()) {
2231 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
2232 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
2235 aio_context_acquire(aio_context
);
2236 ret
= bdrv_flush(bs
);
2237 if (ret
< 0 && !result
) {
2240 aio_context_release(aio_context
);
2247 typedef struct BdrvCoBlockStatusData
{
2248 BlockDriverState
*bs
;
2249 BlockDriverState
*base
;
2255 BlockDriverState
**file
;
2256 } BdrvCoBlockStatusData
;
2258 int coroutine_fn
bdrv_co_block_status_from_file(BlockDriverState
*bs
,
2264 BlockDriverState
**file
)
2266 assert(bs
->file
&& bs
->file
->bs
);
2269 *file
= bs
->file
->bs
;
2270 return BDRV_BLOCK_RAW
| BDRV_BLOCK_OFFSET_VALID
;
2273 int coroutine_fn
bdrv_co_block_status_from_backing(BlockDriverState
*bs
,
2279 BlockDriverState
**file
)
2281 assert(bs
->backing
&& bs
->backing
->bs
);
2284 *file
= bs
->backing
->bs
;
2285 return BDRV_BLOCK_RAW
| BDRV_BLOCK_OFFSET_VALID
;
2289 * Returns the allocation status of the specified sectors.
2290 * Drivers not implementing the functionality are assumed to not support
2291 * backing files, hence all their sectors are reported as allocated.
2293 * If 'want_zero' is true, the caller is querying for mapping
2294 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
2295 * _ZERO where possible; otherwise, the result favors larger 'pnum',
2296 * with a focus on accurate BDRV_BLOCK_ALLOCATED.
2298 * If 'offset' is beyond the end of the disk image the return value is
2299 * BDRV_BLOCK_EOF and 'pnum' is set to 0.
2301 * 'bytes' is the max value 'pnum' should be set to. If bytes goes
2302 * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2303 * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
2305 * 'pnum' is set to the number of bytes (including and immediately
2306 * following the specified offset) that are easily known to be in the
2307 * same allocated/unallocated state. Note that a second call starting
2308 * at the original offset plus returned pnum may have the same status.
2309 * The returned value is non-zero on success except at end-of-file.
2311 * Returns negative errno on failure. Otherwise, if the
2312 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
2313 * set to the host mapping and BDS corresponding to the guest offset.
2315 static int coroutine_fn
bdrv_co_block_status(BlockDriverState
*bs
,
2317 int64_t offset
, int64_t bytes
,
2318 int64_t *pnum
, int64_t *map
,
2319 BlockDriverState
**file
)
2322 int64_t n
; /* bytes */
2324 int64_t local_map
= 0;
2325 BlockDriverState
*local_file
= NULL
;
2326 int64_t aligned_offset
, aligned_bytes
;
2331 total_size
= bdrv_getlength(bs
);
2332 if (total_size
< 0) {
2337 if (offset
>= total_size
) {
2338 ret
= BDRV_BLOCK_EOF
;
2346 n
= total_size
- offset
;
2351 /* Must be non-NULL or bdrv_getlength() would have failed */
2353 if (!bs
->drv
->bdrv_co_block_status
) {
2355 ret
= BDRV_BLOCK_DATA
| BDRV_BLOCK_ALLOCATED
;
2356 if (offset
+ bytes
== total_size
) {
2357 ret
|= BDRV_BLOCK_EOF
;
2359 if (bs
->drv
->protocol_name
) {
2360 ret
|= BDRV_BLOCK_OFFSET_VALID
;
2367 bdrv_inc_in_flight(bs
);
2369 /* Round out to request_alignment boundaries */
2370 align
= bs
->bl
.request_alignment
;
2371 aligned_offset
= QEMU_ALIGN_DOWN(offset
, align
);
2372 aligned_bytes
= ROUND_UP(offset
+ bytes
, align
) - aligned_offset
;
2374 ret
= bs
->drv
->bdrv_co_block_status(bs
, want_zero
, aligned_offset
,
2375 aligned_bytes
, pnum
, &local_map
,
2383 * The driver's result must be a non-zero multiple of request_alignment.
2384 * Clamp pnum and adjust map to original request.
2386 assert(*pnum
&& QEMU_IS_ALIGNED(*pnum
, align
) &&
2387 align
> offset
- aligned_offset
);
2388 if (ret
& BDRV_BLOCK_RECURSE
) {
2389 assert(ret
& BDRV_BLOCK_DATA
);
2390 assert(ret
& BDRV_BLOCK_OFFSET_VALID
);
2391 assert(!(ret
& BDRV_BLOCK_ZERO
));
2394 *pnum
-= offset
- aligned_offset
;
2395 if (*pnum
> bytes
) {
2398 if (ret
& BDRV_BLOCK_OFFSET_VALID
) {
2399 local_map
+= offset
- aligned_offset
;
2402 if (ret
& BDRV_BLOCK_RAW
) {
2403 assert(ret
& BDRV_BLOCK_OFFSET_VALID
&& local_file
);
2404 ret
= bdrv_co_block_status(local_file
, want_zero
, local_map
,
2405 *pnum
, pnum
, &local_map
, &local_file
);
2409 if (ret
& (BDRV_BLOCK_DATA
| BDRV_BLOCK_ZERO
)) {
2410 ret
|= BDRV_BLOCK_ALLOCATED
;
2411 } else if (want_zero
&& bs
->drv
->supports_backing
) {
2413 BlockDriverState
*bs2
= bs
->backing
->bs
;
2414 int64_t size2
= bdrv_getlength(bs2
);
2416 if (size2
>= 0 && offset
>= size2
) {
2417 ret
|= BDRV_BLOCK_ZERO
;
2420 ret
|= BDRV_BLOCK_ZERO
;
2424 if (want_zero
&& ret
& BDRV_BLOCK_RECURSE
&&
2425 local_file
&& local_file
!= bs
&&
2426 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
2427 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
2431 ret2
= bdrv_co_block_status(local_file
, want_zero
, local_map
,
2432 *pnum
, &file_pnum
, NULL
, NULL
);
2434 /* Ignore errors. This is just providing extra information, it
2435 * is useful but not necessary.
2437 if (ret2
& BDRV_BLOCK_EOF
&&
2438 (!file_pnum
|| ret2
& BDRV_BLOCK_ZERO
)) {
2440 * It is valid for the format block driver to read
2441 * beyond the end of the underlying file's current
2442 * size; such areas read as zero.
2444 ret
|= BDRV_BLOCK_ZERO
;
2446 /* Limit request to the range reported by the protocol driver */
2448 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
2454 bdrv_dec_in_flight(bs
);
2455 if (ret
>= 0 && offset
+ *pnum
== total_size
) {
2456 ret
|= BDRV_BLOCK_EOF
;
2468 static int coroutine_fn
bdrv_co_block_status_above(BlockDriverState
*bs
,
2469 BlockDriverState
*base
,
2475 BlockDriverState
**file
)
2477 BlockDriverState
*p
;
2482 for (p
= bs
; p
!= base
; p
= backing_bs(p
)) {
2483 ret
= bdrv_co_block_status(p
, want_zero
, offset
, bytes
, pnum
, map
,
2488 if (ret
& BDRV_BLOCK_ZERO
&& ret
& BDRV_BLOCK_EOF
&& !first
) {
2490 * Reading beyond the end of the file continues to read
2491 * zeroes, but we can only widen the result to the
2492 * unallocated length we learned from an earlier
2497 if (ret
& (BDRV_BLOCK_ZERO
| BDRV_BLOCK_DATA
)) {
2500 /* [offset, pnum] unallocated on this layer, which could be only
2501 * the first part of [offset, bytes]. */
2502 bytes
= MIN(bytes
, *pnum
);
2508 /* Coroutine wrapper for bdrv_block_status_above() */
2509 static int coroutine_fn
bdrv_block_status_above_co_entry(void *opaque
)
2511 BdrvCoBlockStatusData
*data
= opaque
;
2513 return bdrv_co_block_status_above(data
->bs
, data
->base
,
2515 data
->offset
, data
->bytes
,
2516 data
->pnum
, data
->map
, data
->file
);
2520 * Synchronous wrapper around bdrv_co_block_status_above().
2522 * See bdrv_co_block_status_above() for details.
2524 static int bdrv_common_block_status_above(BlockDriverState
*bs
,
2525 BlockDriverState
*base
,
2526 bool want_zero
, int64_t offset
,
2527 int64_t bytes
, int64_t *pnum
,
2529 BlockDriverState
**file
)
2531 BdrvCoBlockStatusData data
= {
2534 .want_zero
= want_zero
,
2542 return bdrv_run_co(bs
, bdrv_block_status_above_co_entry
, &data
);
2545 int bdrv_block_status_above(BlockDriverState
*bs
, BlockDriverState
*base
,
2546 int64_t offset
, int64_t bytes
, int64_t *pnum
,
2547 int64_t *map
, BlockDriverState
**file
)
2549 return bdrv_common_block_status_above(bs
, base
, true, offset
, bytes
,
2553 int bdrv_block_status(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
2554 int64_t *pnum
, int64_t *map
, BlockDriverState
**file
)
2556 return bdrv_block_status_above(bs
, backing_bs(bs
),
2557 offset
, bytes
, pnum
, map
, file
);
2560 int coroutine_fn
bdrv_is_allocated(BlockDriverState
*bs
, int64_t offset
,
2561 int64_t bytes
, int64_t *pnum
)
2566 ret
= bdrv_common_block_status_above(bs
, backing_bs(bs
), false, offset
,
2567 bytes
, pnum
? pnum
: &dummy
, NULL
,
2572 return !!(ret
& BDRV_BLOCK_ALLOCATED
);
2576 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2578 * Return 1 if (a prefix of) the given range is allocated in any image
2579 * between BASE and TOP (BASE is only included if include_base is set).
2580 * BASE can be NULL to check if the given offset is allocated in any
2581 * image of the chain. Return 0 otherwise, or negative errno on
2584 * 'pnum' is set to the number of bytes (including and immediately
2585 * following the specified offset) that are known to be in the same
2586 * allocated/unallocated state. Note that a subsequent call starting
2587 * at 'offset + *pnum' may return the same allocation status (in other
2588 * words, the result is not necessarily the maximum possible range);
2589 * but 'pnum' will only be 0 when end of file is reached.
2592 int bdrv_is_allocated_above(BlockDriverState
*top
,
2593 BlockDriverState
*base
,
2594 bool include_base
, int64_t offset
,
2595 int64_t bytes
, int64_t *pnum
)
2597 BlockDriverState
*intermediate
;
2601 assert(base
|| !include_base
);
2604 while (include_base
|| intermediate
!= base
) {
2608 assert(intermediate
);
2609 ret
= bdrv_is_allocated(intermediate
, offset
, bytes
, &pnum_inter
);
2618 size_inter
= bdrv_getlength(intermediate
);
2619 if (size_inter
< 0) {
2622 if (n
> pnum_inter
&&
2623 (intermediate
== top
|| offset
+ pnum_inter
< size_inter
)) {
2627 if (intermediate
== base
) {
2631 intermediate
= backing_bs(intermediate
);
2638 typedef struct BdrvVmstateCo
{
2639 BlockDriverState
*bs
;
2645 static int coroutine_fn
2646 bdrv_co_rw_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
,
2649 BlockDriver
*drv
= bs
->drv
;
2652 bdrv_inc_in_flight(bs
);
2656 } else if (drv
->bdrv_load_vmstate
) {
2658 ret
= drv
->bdrv_load_vmstate(bs
, qiov
, pos
);
2660 ret
= drv
->bdrv_save_vmstate(bs
, qiov
, pos
);
2662 } else if (bs
->file
) {
2663 ret
= bdrv_co_rw_vmstate(bs
->file
->bs
, qiov
, pos
, is_read
);
2666 bdrv_dec_in_flight(bs
);
2670 static int coroutine_fn
bdrv_co_rw_vmstate_entry(void *opaque
)
2672 BdrvVmstateCo
*co
= opaque
;
2674 return bdrv_co_rw_vmstate(co
->bs
, co
->qiov
, co
->pos
, co
->is_read
);
2678 bdrv_rw_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
,
2681 BdrvVmstateCo data
= {
2688 return bdrv_run_co(bs
, bdrv_co_rw_vmstate_entry
, &data
);
2691 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
2692 int64_t pos
, int size
)
2694 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, size
);
2697 ret
= bdrv_writev_vmstate(bs
, &qiov
, pos
);
2705 int bdrv_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
2707 return bdrv_rw_vmstate(bs
, qiov
, pos
, false);
2710 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
2711 int64_t pos
, int size
)
2713 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, size
);
2716 ret
= bdrv_readv_vmstate(bs
, &qiov
, pos
);
2724 int bdrv_readv_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
2726 return bdrv_rw_vmstate(bs
, qiov
, pos
, true);
2729 /**************************************************************/
2732 void bdrv_aio_cancel(BlockAIOCB
*acb
)
2735 bdrv_aio_cancel_async(acb
);
2736 while (acb
->refcnt
> 1) {
2737 if (acb
->aiocb_info
->get_aio_context
) {
2738 aio_poll(acb
->aiocb_info
->get_aio_context(acb
), true);
2739 } else if (acb
->bs
) {
2740 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
2741 * assert that we're not using an I/O thread. Thread-safe
2742 * code should use bdrv_aio_cancel_async exclusively.
2744 assert(bdrv_get_aio_context(acb
->bs
) == qemu_get_aio_context());
2745 aio_poll(bdrv_get_aio_context(acb
->bs
), true);
2750 qemu_aio_unref(acb
);
2753 /* Async version of aio cancel. The caller is not blocked if the acb implements
2754 * cancel_async, otherwise we do nothing and let the request normally complete.
2755 * In either case the completion callback must be called. */
2756 void bdrv_aio_cancel_async(BlockAIOCB
*acb
)
2758 if (acb
->aiocb_info
->cancel_async
) {
2759 acb
->aiocb_info
->cancel_async(acb
);
2763 /**************************************************************/
2764 /* Coroutine block device emulation */
2766 static int coroutine_fn
bdrv_flush_co_entry(void *opaque
)
2768 return bdrv_co_flush(opaque
);
2771 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
2776 bdrv_inc_in_flight(bs
);
2778 if (!bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
) ||
2783 qemu_co_mutex_lock(&bs
->reqs_lock
);
2784 current_gen
= atomic_read(&bs
->write_gen
);
2786 /* Wait until any previous flushes are completed */
2787 while (bs
->active_flush_req
) {
2788 qemu_co_queue_wait(&bs
->flush_queue
, &bs
->reqs_lock
);
2791 /* Flushes reach this point in nondecreasing current_gen order. */
2792 bs
->active_flush_req
= true;
2793 qemu_co_mutex_unlock(&bs
->reqs_lock
);
2795 /* Write back all layers by calling one driver function */
2796 if (bs
->drv
->bdrv_co_flush
) {
2797 ret
= bs
->drv
->bdrv_co_flush(bs
);
2801 /* Write back cached data to the OS even with cache=unsafe */
2802 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_OS
);
2803 if (bs
->drv
->bdrv_co_flush_to_os
) {
2804 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
2810 /* But don't actually force it to the disk with cache=unsafe */
2811 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
2815 /* Check if we really need to flush anything */
2816 if (bs
->flushed_gen
== current_gen
) {
2820 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_DISK
);
2822 /* bs->drv->bdrv_co_flush() might have ejected the BDS
2823 * (even in case of apparent success) */
2827 if (bs
->drv
->bdrv_co_flush_to_disk
) {
2828 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
2829 } else if (bs
->drv
->bdrv_aio_flush
) {
2831 CoroutineIOCompletion co
= {
2832 .coroutine
= qemu_coroutine_self(),
2835 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
2839 qemu_coroutine_yield();
2844 * Some block drivers always operate in either writethrough or unsafe
2845 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2846 * know how the server works (because the behaviour is hardcoded or
2847 * depends on server-side configuration), so we can't ensure that
2848 * everything is safe on disk. Returning an error doesn't work because
2849 * that would break guests even if the server operates in writethrough
2852 * Let's hope the user knows what he's doing.
2861 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2862 * in the case of cache=unsafe, so there are no useless flushes.
2865 ret
= bs
->file
? bdrv_co_flush(bs
->file
->bs
) : 0;
2867 /* Notify any pending flushes that we have completed */
2869 bs
->flushed_gen
= current_gen
;
2872 qemu_co_mutex_lock(&bs
->reqs_lock
);
2873 bs
->active_flush_req
= false;
2874 /* Return value is ignored - it's ok if wait queue is empty */
2875 qemu_co_queue_next(&bs
->flush_queue
);
2876 qemu_co_mutex_unlock(&bs
->reqs_lock
);
2879 bdrv_dec_in_flight(bs
);
2883 int bdrv_flush(BlockDriverState
*bs
)
2885 return bdrv_run_co(bs
, bdrv_flush_co_entry
, bs
);
2888 typedef struct DiscardCo
{
2894 static int coroutine_fn
bdrv_pdiscard_co_entry(void *opaque
)
2896 DiscardCo
*rwco
= opaque
;
2898 return bdrv_co_pdiscard(rwco
->child
, rwco
->offset
, rwco
->bytes
);
2901 int coroutine_fn
bdrv_co_pdiscard(BdrvChild
*child
, int64_t offset
,
2904 BdrvTrackedRequest req
;
2905 int max_pdiscard
, ret
;
2906 int head
, tail
, align
;
2907 BlockDriverState
*bs
= child
->bs
;
2909 if (!bs
|| !bs
->drv
|| !bdrv_is_inserted(bs
)) {
2913 if (bdrv_has_readonly_bitmaps(bs
)) {
2917 if (offset
< 0 || bytes
< 0 || bytes
> INT64_MAX
- offset
) {
2921 /* Do nothing if disabled. */
2922 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
2926 if (!bs
->drv
->bdrv_co_pdiscard
&& !bs
->drv
->bdrv_aio_pdiscard
) {
2930 /* Discard is advisory, but some devices track and coalesce
2931 * unaligned requests, so we must pass everything down rather than
2932 * round here. Still, most devices will just silently ignore
2933 * unaligned requests (by returning -ENOTSUP), so we must fragment
2934 * the request accordingly. */
2935 align
= MAX(bs
->bl
.pdiscard_alignment
, bs
->bl
.request_alignment
);
2936 assert(align
% bs
->bl
.request_alignment
== 0);
2937 head
= offset
% align
;
2938 tail
= (offset
+ bytes
) % align
;
2940 bdrv_inc_in_flight(bs
);
2941 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_DISCARD
);
2943 ret
= bdrv_co_write_req_prepare(child
, offset
, bytes
, &req
, 0);
2948 max_pdiscard
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_pdiscard
, INT_MAX
),
2950 assert(max_pdiscard
>= bs
->bl
.request_alignment
);
2953 int64_t num
= bytes
;
2956 /* Make small requests to get to alignment boundaries. */
2957 num
= MIN(bytes
, align
- head
);
2958 if (!QEMU_IS_ALIGNED(num
, bs
->bl
.request_alignment
)) {
2959 num
%= bs
->bl
.request_alignment
;
2961 head
= (head
+ num
) % align
;
2962 assert(num
< max_pdiscard
);
2965 /* Shorten the request to the last aligned cluster. */
2967 } else if (!QEMU_IS_ALIGNED(tail
, bs
->bl
.request_alignment
) &&
2968 tail
> bs
->bl
.request_alignment
) {
2969 tail
%= bs
->bl
.request_alignment
;
2973 /* limit request size */
2974 if (num
> max_pdiscard
) {
2982 if (bs
->drv
->bdrv_co_pdiscard
) {
2983 ret
= bs
->drv
->bdrv_co_pdiscard(bs
, offset
, num
);
2986 CoroutineIOCompletion co
= {
2987 .coroutine
= qemu_coroutine_self(),
2990 acb
= bs
->drv
->bdrv_aio_pdiscard(bs
, offset
, num
,
2991 bdrv_co_io_em_complete
, &co
);
2996 qemu_coroutine_yield();
3000 if (ret
&& ret
!= -ENOTSUP
) {
3009 bdrv_co_write_req_finish(child
, req
.offset
, req
.bytes
, &req
, ret
);
3010 tracked_request_end(&req
);
3011 bdrv_dec_in_flight(bs
);
3015 int bdrv_pdiscard(BdrvChild
*child
, int64_t offset
, int64_t bytes
)
3023 return bdrv_run_co(child
->bs
, bdrv_pdiscard_co_entry
, &rwco
);
3026 int bdrv_co_ioctl(BlockDriverState
*bs
, int req
, void *buf
)
3028 BlockDriver
*drv
= bs
->drv
;
3029 CoroutineIOCompletion co
= {
3030 .coroutine
= qemu_coroutine_self(),
3034 bdrv_inc_in_flight(bs
);
3035 if (!drv
|| (!drv
->bdrv_aio_ioctl
&& !drv
->bdrv_co_ioctl
)) {
3040 if (drv
->bdrv_co_ioctl
) {
3041 co
.ret
= drv
->bdrv_co_ioctl(bs
, req
, buf
);
3043 acb
= drv
->bdrv_aio_ioctl(bs
, req
, buf
, bdrv_co_io_em_complete
, &co
);
3048 qemu_coroutine_yield();
3051 bdrv_dec_in_flight(bs
);
3055 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
3057 return qemu_memalign(bdrv_opt_mem_align(bs
), size
);
3060 void *qemu_blockalign0(BlockDriverState
*bs
, size_t size
)
3062 return memset(qemu_blockalign(bs
, size
), 0, size
);
3065 void *qemu_try_blockalign(BlockDriverState
*bs
, size_t size
)
3067 size_t align
= bdrv_opt_mem_align(bs
);
3069 /* Ensure that NULL is never returned on success */
3075 return qemu_try_memalign(align
, size
);
3078 void *qemu_try_blockalign0(BlockDriverState
*bs
, size_t size
)
3080 void *mem
= qemu_try_blockalign(bs
, size
);
3083 memset(mem
, 0, size
);
3090 * Check if all memory in this vector is sector aligned.
3092 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
)
3095 size_t alignment
= bdrv_min_mem_align(bs
);
3097 for (i
= 0; i
< qiov
->niov
; i
++) {
3098 if ((uintptr_t) qiov
->iov
[i
].iov_base
% alignment
) {
3101 if (qiov
->iov
[i
].iov_len
% alignment
) {
3109 void bdrv_add_before_write_notifier(BlockDriverState
*bs
,
3110 NotifierWithReturn
*notifier
)
3112 notifier_with_return_list_add(&bs
->before_write_notifiers
, notifier
);
3115 void bdrv_io_plug(BlockDriverState
*bs
)
3119 QLIST_FOREACH(child
, &bs
->children
, next
) {
3120 bdrv_io_plug(child
->bs
);
3123 if (atomic_fetch_inc(&bs
->io_plugged
) == 0) {
3124 BlockDriver
*drv
= bs
->drv
;
3125 if (drv
&& drv
->bdrv_io_plug
) {
3126 drv
->bdrv_io_plug(bs
);
3131 void bdrv_io_unplug(BlockDriverState
*bs
)
3135 assert(bs
->io_plugged
);
3136 if (atomic_fetch_dec(&bs
->io_plugged
) == 1) {
3137 BlockDriver
*drv
= bs
->drv
;
3138 if (drv
&& drv
->bdrv_io_unplug
) {
3139 drv
->bdrv_io_unplug(bs
);
3143 QLIST_FOREACH(child
, &bs
->children
, next
) {
3144 bdrv_io_unplug(child
->bs
);
3148 void bdrv_register_buf(BlockDriverState
*bs
, void *host
, size_t size
)
3152 if (bs
->drv
&& bs
->drv
->bdrv_register_buf
) {
3153 bs
->drv
->bdrv_register_buf(bs
, host
, size
);
3155 QLIST_FOREACH(child
, &bs
->children
, next
) {
3156 bdrv_register_buf(child
->bs
, host
, size
);
3160 void bdrv_unregister_buf(BlockDriverState
*bs
, void *host
)
3164 if (bs
->drv
&& bs
->drv
->bdrv_unregister_buf
) {
3165 bs
->drv
->bdrv_unregister_buf(bs
, host
);
3167 QLIST_FOREACH(child
, &bs
->children
, next
) {
3168 bdrv_unregister_buf(child
->bs
, host
);
3172 static int coroutine_fn
bdrv_co_copy_range_internal(
3173 BdrvChild
*src
, uint64_t src_offset
, BdrvChild
*dst
,
3174 uint64_t dst_offset
, uint64_t bytes
,
3175 BdrvRequestFlags read_flags
, BdrvRequestFlags write_flags
,
3178 BdrvTrackedRequest req
;
3181 /* TODO We can support BDRV_REQ_NO_FALLBACK here */
3182 assert(!(read_flags
& BDRV_REQ_NO_FALLBACK
));
3183 assert(!(write_flags
& BDRV_REQ_NO_FALLBACK
));
3185 if (!dst
|| !dst
->bs
) {
3188 ret
= bdrv_check_byte_request(dst
->bs
, dst_offset
, bytes
);
3192 if (write_flags
& BDRV_REQ_ZERO_WRITE
) {
3193 return bdrv_co_pwrite_zeroes(dst
, dst_offset
, bytes
, write_flags
);
3196 if (!src
|| !src
->bs
) {
3199 ret
= bdrv_check_byte_request(src
->bs
, src_offset
, bytes
);
3204 if (!src
->bs
->drv
->bdrv_co_copy_range_from
3205 || !dst
->bs
->drv
->bdrv_co_copy_range_to
3206 || src
->bs
->encrypted
|| dst
->bs
->encrypted
) {
3211 bdrv_inc_in_flight(src
->bs
);
3212 tracked_request_begin(&req
, src
->bs
, src_offset
, bytes
,
3215 /* BDRV_REQ_SERIALISING is only for write operation */
3216 assert(!(read_flags
& BDRV_REQ_SERIALISING
));
3217 bdrv_wait_serialising_requests(&req
);
3219 ret
= src
->bs
->drv
->bdrv_co_copy_range_from(src
->bs
,
3223 read_flags
, write_flags
);
3225 tracked_request_end(&req
);
3226 bdrv_dec_in_flight(src
->bs
);
3228 bdrv_inc_in_flight(dst
->bs
);
3229 tracked_request_begin(&req
, dst
->bs
, dst_offset
, bytes
,
3230 BDRV_TRACKED_WRITE
);
3231 ret
= bdrv_co_write_req_prepare(dst
, dst_offset
, bytes
, &req
,
3234 ret
= dst
->bs
->drv
->bdrv_co_copy_range_to(dst
->bs
,
3238 read_flags
, write_flags
);
3240 bdrv_co_write_req_finish(dst
, dst_offset
, bytes
, &req
, ret
);
3241 tracked_request_end(&req
);
3242 bdrv_dec_in_flight(dst
->bs
);
3248 /* Copy range from @src to @dst.
3250 * See the comment of bdrv_co_copy_range for the parameter and return value
3252 int coroutine_fn
bdrv_co_copy_range_from(BdrvChild
*src
, uint64_t src_offset
,
3253 BdrvChild
*dst
, uint64_t dst_offset
,
3255 BdrvRequestFlags read_flags
,
3256 BdrvRequestFlags write_flags
)
3258 trace_bdrv_co_copy_range_from(src
, src_offset
, dst
, dst_offset
, bytes
,
3259 read_flags
, write_flags
);
3260 return bdrv_co_copy_range_internal(src
, src_offset
, dst
, dst_offset
,
3261 bytes
, read_flags
, write_flags
, true);
3264 /* Copy range from @src to @dst.
3266 * See the comment of bdrv_co_copy_range for the parameter and return value
3268 int coroutine_fn
bdrv_co_copy_range_to(BdrvChild
*src
, uint64_t src_offset
,
3269 BdrvChild
*dst
, uint64_t dst_offset
,
3271 BdrvRequestFlags read_flags
,
3272 BdrvRequestFlags write_flags
)
3274 trace_bdrv_co_copy_range_to(src
, src_offset
, dst
, dst_offset
, bytes
,
3275 read_flags
, write_flags
);
3276 return bdrv_co_copy_range_internal(src
, src_offset
, dst
, dst_offset
,
3277 bytes
, read_flags
, write_flags
, false);
3280 int coroutine_fn
bdrv_co_copy_range(BdrvChild
*src
, uint64_t src_offset
,
3281 BdrvChild
*dst
, uint64_t dst_offset
,
3282 uint64_t bytes
, BdrvRequestFlags read_flags
,
3283 BdrvRequestFlags write_flags
)
3285 return bdrv_co_copy_range_from(src
, src_offset
,
3287 bytes
, read_flags
, write_flags
);
3290 static void bdrv_parent_cb_resize(BlockDriverState
*bs
)
3293 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
3294 if (c
->klass
->resize
) {
3295 c
->klass
->resize(c
);
3301 * Truncate file to 'offset' bytes (needed only for file protocols)
3303 * If 'exact' is true, the file must be resized to exactly the given
3304 * 'offset'. Otherwise, it is sufficient for the node to be at least
3305 * 'offset' bytes in length.
3307 int coroutine_fn
bdrv_co_truncate(BdrvChild
*child
, int64_t offset
, bool exact
,
3308 PreallocMode prealloc
, BdrvRequestFlags flags
,
3311 BlockDriverState
*bs
= child
->bs
;
3312 BlockDriver
*drv
= bs
->drv
;
3313 BdrvTrackedRequest req
;
3314 int64_t old_size
, new_bytes
;
3318 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
3320 error_setg(errp
, "No medium inserted");
3324 error_setg(errp
, "Image size cannot be negative");
3328 old_size
= bdrv_getlength(bs
);
3330 error_setg_errno(errp
, -old_size
, "Failed to get old image size");
3334 if (offset
> old_size
) {
3335 new_bytes
= offset
- old_size
;
3340 bdrv_inc_in_flight(bs
);
3341 tracked_request_begin(&req
, bs
, offset
- new_bytes
, new_bytes
,
3342 BDRV_TRACKED_TRUNCATE
);
3344 /* If we are growing the image and potentially using preallocation for the
3345 * new area, we need to make sure that no write requests are made to it
3346 * concurrently or they might be overwritten by preallocation. */
3348 bdrv_mark_request_serialising(&req
, 1);
3350 if (bs
->read_only
) {
3351 error_setg(errp
, "Image is read-only");
3355 ret
= bdrv_co_write_req_prepare(child
, offset
- new_bytes
, new_bytes
, &req
,
3358 error_setg_errno(errp
, -ret
,
3359 "Failed to prepare request for truncation");
3364 * If the image has a backing file that is large enough that it would
3365 * provide data for the new area, we cannot leave it unallocated because
3366 * then the backing file content would become visible. Instead, zero-fill
3369 * Note that if the image has a backing file, but was opened without the
3370 * backing file, taking care of keeping things consistent with that backing
3371 * file is the user's responsibility.
3373 if (new_bytes
&& bs
->backing
) {
3374 int64_t backing_len
;
3376 backing_len
= bdrv_getlength(backing_bs(bs
));
3377 if (backing_len
< 0) {
3379 error_setg_errno(errp
, -ret
, "Could not get backing file size");
3383 if (backing_len
> old_size
) {
3384 flags
|= BDRV_REQ_ZERO_WRITE
;
3388 if (drv
->bdrv_co_truncate
) {
3389 if (flags
& ~bs
->supported_truncate_flags
) {
3390 error_setg(errp
, "Block driver does not support requested flags");
3394 ret
= drv
->bdrv_co_truncate(bs
, offset
, exact
, prealloc
, flags
, errp
);
3395 } else if (bs
->file
&& drv
->is_filter
) {
3396 ret
= bdrv_co_truncate(bs
->file
, offset
, exact
, prealloc
, flags
, errp
);
3398 error_setg(errp
, "Image format driver does not support resize");
3406 ret
= refresh_total_sectors(bs
, offset
>> BDRV_SECTOR_BITS
);
3408 error_setg_errno(errp
, -ret
, "Could not refresh total sector count");
3410 offset
= bs
->total_sectors
* BDRV_SECTOR_SIZE
;
3412 /* It's possible that truncation succeeded but refresh_total_sectors
3413 * failed, but the latter doesn't affect how we should finish the request.
3414 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. */
3415 bdrv_co_write_req_finish(child
, offset
- new_bytes
, new_bytes
, &req
, 0);
3418 tracked_request_end(&req
);
3419 bdrv_dec_in_flight(bs
);
3424 typedef struct TruncateCo
{
3428 PreallocMode prealloc
;
3429 BdrvRequestFlags flags
;
3433 static int coroutine_fn
bdrv_truncate_co_entry(void *opaque
)
3435 TruncateCo
*tco
= opaque
;
3437 return bdrv_co_truncate(tco
->child
, tco
->offset
, tco
->exact
,
3438 tco
->prealloc
, tco
->flags
, tco
->errp
);
3441 int bdrv_truncate(BdrvChild
*child
, int64_t offset
, bool exact
,
3442 PreallocMode prealloc
, BdrvRequestFlags flags
, Error
**errp
)
3448 .prealloc
= prealloc
,
3453 return bdrv_run_co(child
->bs
, bdrv_truncate_co_entry
, &tco
);