2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "sysemu/block-backend.h"
28 #include "block/blockjob.h"
29 #include "block/blockjob_int.h"
30 #include "block/block_int.h"
31 #include "qemu/cutils.h"
32 #include "qapi/error.h"
33 #include "qemu/error-report.h"
35 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
37 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
38 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
40 static int coroutine_fn
bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
,
41 int64_t offset
, int bytes
, BdrvRequestFlags flags
);
43 void bdrv_parent_drained_begin(BlockDriverState
*bs
)
47 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
48 if (c
->role
->drained_begin
) {
49 c
->role
->drained_begin(c
);
54 void bdrv_parent_drained_end(BlockDriverState
*bs
)
58 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
59 if (c
->role
->drained_end
) {
60 c
->role
->drained_end(c
);
65 static void bdrv_merge_limits(BlockLimits
*dst
, const BlockLimits
*src
)
67 dst
->opt_transfer
= MAX(dst
->opt_transfer
, src
->opt_transfer
);
68 dst
->max_transfer
= MIN_NON_ZERO(dst
->max_transfer
, src
->max_transfer
);
69 dst
->opt_mem_alignment
= MAX(dst
->opt_mem_alignment
,
70 src
->opt_mem_alignment
);
71 dst
->min_mem_alignment
= MAX(dst
->min_mem_alignment
,
72 src
->min_mem_alignment
);
73 dst
->max_iov
= MIN_NON_ZERO(dst
->max_iov
, src
->max_iov
);
76 void bdrv_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
78 BlockDriver
*drv
= bs
->drv
;
79 Error
*local_err
= NULL
;
81 memset(&bs
->bl
, 0, sizeof(bs
->bl
));
87 /* Default alignment based on whether driver has byte interface */
88 bs
->bl
.request_alignment
= drv
->bdrv_co_preadv
? 1 : 512;
90 /* Take some limits from the children as a default */
92 bdrv_refresh_limits(bs
->file
->bs
, &local_err
);
94 error_propagate(errp
, local_err
);
97 bdrv_merge_limits(&bs
->bl
, &bs
->file
->bs
->bl
);
99 bs
->bl
.min_mem_alignment
= 512;
100 bs
->bl
.opt_mem_alignment
= getpagesize();
102 /* Safe default since most protocols use readv()/writev()/etc */
103 bs
->bl
.max_iov
= IOV_MAX
;
107 bdrv_refresh_limits(bs
->backing
->bs
, &local_err
);
109 error_propagate(errp
, local_err
);
112 bdrv_merge_limits(&bs
->bl
, &bs
->backing
->bs
->bl
);
115 /* Then let the driver override it */
116 if (drv
->bdrv_refresh_limits
) {
117 drv
->bdrv_refresh_limits(bs
, errp
);
122 * The copy-on-read flag is actually a reference count so multiple users may
123 * use the feature without worrying about clobbering its previous state.
124 * Copy-on-read stays enabled until all users have called to disable it.
126 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
128 atomic_inc(&bs
->copy_on_read
);
131 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
133 int old
= atomic_fetch_dec(&bs
->copy_on_read
);
137 /* Check if any requests are in-flight (including throttled requests) */
138 bool bdrv_requests_pending(BlockDriverState
*bs
)
142 if (atomic_read(&bs
->in_flight
)) {
146 QLIST_FOREACH(child
, &bs
->children
, next
) {
147 if (bdrv_requests_pending(child
->bs
)) {
157 BlockDriverState
*bs
;
162 static void coroutine_fn
bdrv_drain_invoke_entry(void *opaque
)
164 BdrvCoDrainData
*data
= opaque
;
165 BlockDriverState
*bs
= data
->bs
;
168 bs
->drv
->bdrv_co_drain_begin(bs
);
170 bs
->drv
->bdrv_co_drain_end(bs
);
173 /* Set data->done before reading bs->wakeup. */
174 atomic_mb_set(&data
->done
, true);
178 static void bdrv_drain_invoke(BlockDriverState
*bs
, bool begin
)
180 BdrvCoDrainData data
= { .bs
= bs
, .done
= false, .begin
= begin
};
182 if (!bs
->drv
|| (begin
&& !bs
->drv
->bdrv_co_drain_begin
) ||
183 (!begin
&& !bs
->drv
->bdrv_co_drain_end
)) {
187 data
.co
= qemu_coroutine_create(bdrv_drain_invoke_entry
, &data
);
188 bdrv_coroutine_enter(bs
, data
.co
);
189 BDRV_POLL_WHILE(bs
, !data
.done
);
192 static bool bdrv_drain_recurse(BlockDriverState
*bs
, bool begin
)
194 BdrvChild
*child
, *tmp
;
197 /* Ensure any pending metadata writes are submitted to bs->file. */
198 bdrv_drain_invoke(bs
, begin
);
200 /* Wait for drained requests to finish */
201 waited
= BDRV_POLL_WHILE(bs
, atomic_read(&bs
->in_flight
) > 0);
203 QLIST_FOREACH_SAFE(child
, &bs
->children
, next
, tmp
) {
204 BlockDriverState
*bs
= child
->bs
;
206 qemu_get_current_aio_context() == qemu_get_aio_context();
207 assert(bs
->refcnt
> 0);
209 /* In case the recursive bdrv_drain_recurse processes a
210 * block_job_defer_to_main_loop BH and modifies the graph,
211 * let's hold a reference to bs until we are done.
213 * IOThread doesn't have such a BH, and it is not safe to call
214 * bdrv_unref without BQL, so skip doing it there.
218 waited
|= bdrv_drain_recurse(bs
, begin
);
227 static void bdrv_co_drain_bh_cb(void *opaque
)
229 BdrvCoDrainData
*data
= opaque
;
230 Coroutine
*co
= data
->co
;
231 BlockDriverState
*bs
= data
->bs
;
233 bdrv_dec_in_flight(bs
);
235 bdrv_drained_begin(bs
);
237 bdrv_drained_end(bs
);
244 static void coroutine_fn
bdrv_co_yield_to_drain(BlockDriverState
*bs
,
247 BdrvCoDrainData data
;
249 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
250 * other coroutines run if they were queued from
251 * qemu_co_queue_run_restart(). */
253 assert(qemu_in_coroutine());
254 data
= (BdrvCoDrainData
) {
255 .co
= qemu_coroutine_self(),
260 bdrv_inc_in_flight(bs
);
261 aio_bh_schedule_oneshot(bdrv_get_aio_context(bs
),
262 bdrv_co_drain_bh_cb
, &data
);
264 qemu_coroutine_yield();
265 /* If we are resumed from some other event (such as an aio completion or a
266 * timer callback), it is a bug in the caller that should be fixed. */
270 void bdrv_drained_begin(BlockDriverState
*bs
)
272 if (qemu_in_coroutine()) {
273 bdrv_co_yield_to_drain(bs
, true);
277 if (atomic_fetch_inc(&bs
->quiesce_counter
) == 0) {
278 aio_disable_external(bdrv_get_aio_context(bs
));
279 bdrv_parent_drained_begin(bs
);
282 bdrv_drain_recurse(bs
, true);
285 void bdrv_drained_end(BlockDriverState
*bs
)
287 if (qemu_in_coroutine()) {
288 bdrv_co_yield_to_drain(bs
, false);
291 assert(bs
->quiesce_counter
> 0);
292 if (atomic_fetch_dec(&bs
->quiesce_counter
) > 1) {
296 bdrv_parent_drained_end(bs
);
297 bdrv_drain_recurse(bs
, false);
298 aio_enable_external(bdrv_get_aio_context(bs
));
302 * Wait for pending requests to complete on a single BlockDriverState subtree,
303 * and suspend block driver's internal I/O until next request arrives.
305 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
308 * Only this BlockDriverState's AioContext is run, so in-flight requests must
309 * not depend on events in other AioContexts. In that case, use
310 * bdrv_drain_all() instead.
312 void coroutine_fn
bdrv_co_drain(BlockDriverState
*bs
)
314 assert(qemu_in_coroutine());
315 bdrv_drained_begin(bs
);
316 bdrv_drained_end(bs
);
319 void bdrv_drain(BlockDriverState
*bs
)
321 bdrv_drained_begin(bs
);
322 bdrv_drained_end(bs
);
326 * Wait for pending requests to complete across all BlockDriverStates
328 * This function does not flush data to disk, use bdrv_flush_all() for that
329 * after calling this function.
331 * This pauses all block jobs and disables external clients. It must
332 * be paired with bdrv_drain_all_end().
334 * NOTE: no new block jobs or BlockDriverStates can be created between
335 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
337 void bdrv_drain_all_begin(void)
339 /* Always run first iteration so any pending completion BHs run */
341 BlockDriverState
*bs
;
343 GSList
*aio_ctxs
= NULL
, *ctx
;
345 block_job_pause_all();
347 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
348 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
350 aio_context_acquire(aio_context
);
351 bdrv_parent_drained_begin(bs
);
352 aio_disable_external(aio_context
);
353 aio_context_release(aio_context
);
355 if (!g_slist_find(aio_ctxs
, aio_context
)) {
356 aio_ctxs
= g_slist_prepend(aio_ctxs
, aio_context
);
360 /* Note that completion of an asynchronous I/O operation can trigger any
361 * number of other I/O operations on other devices---for example a
362 * coroutine can submit an I/O request to another device in response to
363 * request completion. Therefore we must keep looping until there was no
364 * more activity rather than simply draining each device independently.
369 for (ctx
= aio_ctxs
; ctx
!= NULL
; ctx
= ctx
->next
) {
370 AioContext
*aio_context
= ctx
->data
;
372 aio_context_acquire(aio_context
);
373 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
374 if (aio_context
== bdrv_get_aio_context(bs
)) {
375 waited
|= bdrv_drain_recurse(bs
, true);
378 aio_context_release(aio_context
);
382 g_slist_free(aio_ctxs
);
385 void bdrv_drain_all_end(void)
387 BlockDriverState
*bs
;
390 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
391 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
393 aio_context_acquire(aio_context
);
394 aio_enable_external(aio_context
);
395 bdrv_parent_drained_end(bs
);
396 bdrv_drain_recurse(bs
, false);
397 aio_context_release(aio_context
);
400 block_job_resume_all();
403 void bdrv_drain_all(void)
405 bdrv_drain_all_begin();
406 bdrv_drain_all_end();
410 * Remove an active request from the tracked requests list
412 * This function should be called when a tracked request is completing.
414 static void tracked_request_end(BdrvTrackedRequest
*req
)
416 if (req
->serialising
) {
417 atomic_dec(&req
->bs
->serialising_in_flight
);
420 qemu_co_mutex_lock(&req
->bs
->reqs_lock
);
421 QLIST_REMOVE(req
, list
);
422 qemu_co_queue_restart_all(&req
->wait_queue
);
423 qemu_co_mutex_unlock(&req
->bs
->reqs_lock
);
427 * Add an active request to the tracked requests list
429 static void tracked_request_begin(BdrvTrackedRequest
*req
,
430 BlockDriverState
*bs
,
433 enum BdrvTrackedRequestType type
)
435 *req
= (BdrvTrackedRequest
){
440 .co
= qemu_coroutine_self(),
441 .serialising
= false,
442 .overlap_offset
= offset
,
443 .overlap_bytes
= bytes
,
446 qemu_co_queue_init(&req
->wait_queue
);
448 qemu_co_mutex_lock(&bs
->reqs_lock
);
449 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
450 qemu_co_mutex_unlock(&bs
->reqs_lock
);
453 static void mark_request_serialising(BdrvTrackedRequest
*req
, uint64_t align
)
455 int64_t overlap_offset
= req
->offset
& ~(align
- 1);
456 unsigned int overlap_bytes
= ROUND_UP(req
->offset
+ req
->bytes
, align
)
459 if (!req
->serialising
) {
460 atomic_inc(&req
->bs
->serialising_in_flight
);
461 req
->serialising
= true;
464 req
->overlap_offset
= MIN(req
->overlap_offset
, overlap_offset
);
465 req
->overlap_bytes
= MAX(req
->overlap_bytes
, overlap_bytes
);
469 * Round a region to cluster boundaries
471 void bdrv_round_to_clusters(BlockDriverState
*bs
,
472 int64_t offset
, int64_t bytes
,
473 int64_t *cluster_offset
,
474 int64_t *cluster_bytes
)
478 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
479 *cluster_offset
= offset
;
480 *cluster_bytes
= bytes
;
482 int64_t c
= bdi
.cluster_size
;
483 *cluster_offset
= QEMU_ALIGN_DOWN(offset
, c
);
484 *cluster_bytes
= QEMU_ALIGN_UP(offset
- *cluster_offset
+ bytes
, c
);
488 static int bdrv_get_cluster_size(BlockDriverState
*bs
)
493 ret
= bdrv_get_info(bs
, &bdi
);
494 if (ret
< 0 || bdi
.cluster_size
== 0) {
495 return bs
->bl
.request_alignment
;
497 return bdi
.cluster_size
;
501 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
502 int64_t offset
, unsigned int bytes
)
505 if (offset
>= req
->overlap_offset
+ req
->overlap_bytes
) {
509 if (req
->overlap_offset
>= offset
+ bytes
) {
515 void bdrv_inc_in_flight(BlockDriverState
*bs
)
517 atomic_inc(&bs
->in_flight
);
520 static void dummy_bh_cb(void *opaque
)
524 void bdrv_wakeup(BlockDriverState
*bs
)
526 /* The barrier (or an atomic op) is in the caller. */
527 if (atomic_read(&bs
->wakeup
)) {
528 aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb
, NULL
);
532 void bdrv_dec_in_flight(BlockDriverState
*bs
)
534 atomic_dec(&bs
->in_flight
);
538 static bool coroutine_fn
wait_serialising_requests(BdrvTrackedRequest
*self
)
540 BlockDriverState
*bs
= self
->bs
;
541 BdrvTrackedRequest
*req
;
545 if (!atomic_read(&bs
->serialising_in_flight
)) {
551 qemu_co_mutex_lock(&bs
->reqs_lock
);
552 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
553 if (req
== self
|| (!req
->serialising
&& !self
->serialising
)) {
556 if (tracked_request_overlaps(req
, self
->overlap_offset
,
557 self
->overlap_bytes
))
559 /* Hitting this means there was a reentrant request, for
560 * example, a block driver issuing nested requests. This must
561 * never happen since it means deadlock.
563 assert(qemu_coroutine_self() != req
->co
);
565 /* If the request is already (indirectly) waiting for us, or
566 * will wait for us as soon as it wakes up, then just go on
567 * (instead of producing a deadlock in the former case). */
568 if (!req
->waiting_for
) {
569 self
->waiting_for
= req
;
570 qemu_co_queue_wait(&req
->wait_queue
, &bs
->reqs_lock
);
571 self
->waiting_for
= NULL
;
578 qemu_co_mutex_unlock(&bs
->reqs_lock
);
584 static int bdrv_check_byte_request(BlockDriverState
*bs
, int64_t offset
,
587 if (size
> BDRV_REQUEST_MAX_SECTORS
<< BDRV_SECTOR_BITS
) {
591 if (!bdrv_is_inserted(bs
)) {
602 typedef struct RwCo
{
608 BdrvRequestFlags flags
;
611 static void coroutine_fn
bdrv_rw_co_entry(void *opaque
)
615 if (!rwco
->is_write
) {
616 rwco
->ret
= bdrv_co_preadv(rwco
->child
, rwco
->offset
,
617 rwco
->qiov
->size
, rwco
->qiov
,
620 rwco
->ret
= bdrv_co_pwritev(rwco
->child
, rwco
->offset
,
621 rwco
->qiov
->size
, rwco
->qiov
,
627 * Process a vectored synchronous request using coroutines
629 static int bdrv_prwv_co(BdrvChild
*child
, int64_t offset
,
630 QEMUIOVector
*qiov
, bool is_write
,
631 BdrvRequestFlags flags
)
638 .is_write
= is_write
,
643 if (qemu_in_coroutine()) {
644 /* Fast-path if already in coroutine context */
645 bdrv_rw_co_entry(&rwco
);
647 co
= qemu_coroutine_create(bdrv_rw_co_entry
, &rwco
);
648 bdrv_coroutine_enter(child
->bs
, co
);
649 BDRV_POLL_WHILE(child
->bs
, rwco
.ret
== NOT_DONE
);
655 * Process a synchronous request using coroutines
657 static int bdrv_rw_co(BdrvChild
*child
, int64_t sector_num
, uint8_t *buf
,
658 int nb_sectors
, bool is_write
, BdrvRequestFlags flags
)
662 .iov_base
= (void *)buf
,
663 .iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
,
666 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
670 qemu_iovec_init_external(&qiov
, &iov
, 1);
671 return bdrv_prwv_co(child
, sector_num
<< BDRV_SECTOR_BITS
,
672 &qiov
, is_write
, flags
);
675 /* return < 0 if error. See bdrv_write() for the return codes */
676 int bdrv_read(BdrvChild
*child
, int64_t sector_num
,
677 uint8_t *buf
, int nb_sectors
)
679 return bdrv_rw_co(child
, sector_num
, buf
, nb_sectors
, false, 0);
682 /* Return < 0 if error. Important errors are:
683 -EIO generic I/O error (may happen for all errors)
684 -ENOMEDIUM No media inserted.
685 -EINVAL Invalid sector number or nb_sectors
686 -EACCES Trying to write a read-only device
688 int bdrv_write(BdrvChild
*child
, int64_t sector_num
,
689 const uint8_t *buf
, int nb_sectors
)
691 return bdrv_rw_co(child
, sector_num
, (uint8_t *)buf
, nb_sectors
, true, 0);
694 int bdrv_pwrite_zeroes(BdrvChild
*child
, int64_t offset
,
695 int bytes
, BdrvRequestFlags flags
)
703 qemu_iovec_init_external(&qiov
, &iov
, 1);
704 return bdrv_prwv_co(child
, offset
, &qiov
, true,
705 BDRV_REQ_ZERO_WRITE
| flags
);
709 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
710 * The operation is sped up by checking the block status and only writing
711 * zeroes to the device if they currently do not return zeroes. Optional
712 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
715 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
717 int bdrv_make_zero(BdrvChild
*child
, BdrvRequestFlags flags
)
720 int64_t target_size
, bytes
, offset
= 0;
721 BlockDriverState
*bs
= child
->bs
;
723 target_size
= bdrv_getlength(bs
);
724 if (target_size
< 0) {
729 bytes
= MIN(target_size
- offset
, BDRV_REQUEST_MAX_BYTES
);
733 ret
= bdrv_block_status(bs
, offset
, bytes
, &bytes
, NULL
, NULL
);
735 error_report("error getting block status at offset %" PRId64
": %s",
736 offset
, strerror(-ret
));
739 if (ret
& BDRV_BLOCK_ZERO
) {
743 ret
= bdrv_pwrite_zeroes(child
, offset
, bytes
, flags
);
745 error_report("error writing zeroes at offset %" PRId64
": %s",
746 offset
, strerror(-ret
));
753 int bdrv_preadv(BdrvChild
*child
, int64_t offset
, QEMUIOVector
*qiov
)
757 ret
= bdrv_prwv_co(child
, offset
, qiov
, false, 0);
765 int bdrv_pread(BdrvChild
*child
, int64_t offset
, void *buf
, int bytes
)
769 .iov_base
= (void *)buf
,
777 qemu_iovec_init_external(&qiov
, &iov
, 1);
778 return bdrv_preadv(child
, offset
, &qiov
);
781 int bdrv_pwritev(BdrvChild
*child
, int64_t offset
, QEMUIOVector
*qiov
)
785 ret
= bdrv_prwv_co(child
, offset
, qiov
, true, 0);
793 int bdrv_pwrite(BdrvChild
*child
, int64_t offset
, const void *buf
, int bytes
)
797 .iov_base
= (void *) buf
,
805 qemu_iovec_init_external(&qiov
, &iov
, 1);
806 return bdrv_pwritev(child
, offset
, &qiov
);
810 * Writes to the file and ensures that no writes are reordered across this
811 * request (acts as a barrier)
813 * Returns 0 on success, -errno in error cases.
815 int bdrv_pwrite_sync(BdrvChild
*child
, int64_t offset
,
816 const void *buf
, int count
)
820 ret
= bdrv_pwrite(child
, offset
, buf
, count
);
825 ret
= bdrv_flush(child
->bs
);
833 typedef struct CoroutineIOCompletion
{
834 Coroutine
*coroutine
;
836 } CoroutineIOCompletion
;
838 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
840 CoroutineIOCompletion
*co
= opaque
;
843 aio_co_wake(co
->coroutine
);
846 static int coroutine_fn
bdrv_driver_preadv(BlockDriverState
*bs
,
847 uint64_t offset
, uint64_t bytes
,
848 QEMUIOVector
*qiov
, int flags
)
850 BlockDriver
*drv
= bs
->drv
;
852 unsigned int nb_sectors
;
854 assert(!(flags
& ~BDRV_REQ_MASK
));
856 if (drv
->bdrv_co_preadv
) {
857 return drv
->bdrv_co_preadv(bs
, offset
, bytes
, qiov
, flags
);
860 sector_num
= offset
>> BDRV_SECTOR_BITS
;
861 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
863 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
864 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
865 assert((bytes
>> BDRV_SECTOR_BITS
) <= BDRV_REQUEST_MAX_SECTORS
);
867 if (drv
->bdrv_co_readv
) {
868 return drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
871 CoroutineIOCompletion co
= {
872 .coroutine
= qemu_coroutine_self(),
875 acb
= bs
->drv
->bdrv_aio_readv(bs
, sector_num
, qiov
, nb_sectors
,
876 bdrv_co_io_em_complete
, &co
);
880 qemu_coroutine_yield();
886 static int coroutine_fn
bdrv_driver_pwritev(BlockDriverState
*bs
,
887 uint64_t offset
, uint64_t bytes
,
888 QEMUIOVector
*qiov
, int flags
)
890 BlockDriver
*drv
= bs
->drv
;
892 unsigned int nb_sectors
;
895 assert(!(flags
& ~BDRV_REQ_MASK
));
897 if (drv
->bdrv_co_pwritev
) {
898 ret
= drv
->bdrv_co_pwritev(bs
, offset
, bytes
, qiov
,
899 flags
& bs
->supported_write_flags
);
900 flags
&= ~bs
->supported_write_flags
;
904 sector_num
= offset
>> BDRV_SECTOR_BITS
;
905 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
907 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
908 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
909 assert((bytes
>> BDRV_SECTOR_BITS
) <= BDRV_REQUEST_MAX_SECTORS
);
911 if (drv
->bdrv_co_writev_flags
) {
912 ret
= drv
->bdrv_co_writev_flags(bs
, sector_num
, nb_sectors
, qiov
,
913 flags
& bs
->supported_write_flags
);
914 flags
&= ~bs
->supported_write_flags
;
915 } else if (drv
->bdrv_co_writev
) {
916 assert(!bs
->supported_write_flags
);
917 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
);
920 CoroutineIOCompletion co
= {
921 .coroutine
= qemu_coroutine_self(),
924 acb
= bs
->drv
->bdrv_aio_writev(bs
, sector_num
, qiov
, nb_sectors
,
925 bdrv_co_io_em_complete
, &co
);
929 qemu_coroutine_yield();
935 if (ret
== 0 && (flags
& BDRV_REQ_FUA
)) {
936 ret
= bdrv_co_flush(bs
);
942 static int coroutine_fn
943 bdrv_driver_pwritev_compressed(BlockDriverState
*bs
, uint64_t offset
,
944 uint64_t bytes
, QEMUIOVector
*qiov
)
946 BlockDriver
*drv
= bs
->drv
;
948 if (!drv
->bdrv_co_pwritev_compressed
) {
952 return drv
->bdrv_co_pwritev_compressed(bs
, offset
, bytes
, qiov
);
955 static int coroutine_fn
bdrv_co_do_copy_on_readv(BdrvChild
*child
,
956 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
)
958 BlockDriverState
*bs
= child
->bs
;
960 /* Perform I/O through a temporary buffer so that users who scribble over
961 * their read buffer while the operation is in progress do not end up
962 * modifying the image file. This is critical for zero-copy guest I/O
963 * where anything might happen inside guest memory.
967 BlockDriver
*drv
= bs
->drv
;
969 QEMUIOVector local_qiov
;
970 int64_t cluster_offset
;
971 int64_t cluster_bytes
;
974 int max_transfer
= MIN_NON_ZERO(bs
->bl
.max_transfer
,
975 BDRV_REQUEST_MAX_BYTES
);
976 unsigned int progress
= 0;
978 /* FIXME We cannot require callers to have write permissions when all they
979 * are doing is a read request. If we did things right, write permissions
980 * would be obtained anyway, but internally by the copy-on-read code. As
981 * long as it is implemented here rather than in a separate filter driver,
982 * the copy-on-read code doesn't have its own BdrvChild, however, for which
983 * it could request permissions. Therefore we have to bypass the permission
984 * system for the moment. */
985 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
987 /* Cover entire cluster so no additional backing file I/O is required when
988 * allocating cluster in the image file. Note that this value may exceed
989 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
990 * is one reason we loop rather than doing it all at once.
992 bdrv_round_to_clusters(bs
, offset
, bytes
, &cluster_offset
, &cluster_bytes
);
993 skip_bytes
= offset
- cluster_offset
;
995 trace_bdrv_co_do_copy_on_readv(bs
, offset
, bytes
,
996 cluster_offset
, cluster_bytes
);
998 bounce_buffer
= qemu_try_blockalign(bs
,
999 MIN(MIN(max_transfer
, cluster_bytes
),
1000 MAX_BOUNCE_BUFFER
));
1001 if (bounce_buffer
== NULL
) {
1006 while (cluster_bytes
) {
1009 ret
= bdrv_is_allocated(bs
, cluster_offset
,
1010 MIN(cluster_bytes
, max_transfer
), &pnum
);
1012 /* Safe to treat errors in querying allocation as if
1013 * unallocated; we'll probably fail again soon on the
1014 * read, but at least that will set a decent errno.
1016 pnum
= MIN(cluster_bytes
, max_transfer
);
1019 assert(skip_bytes
< pnum
);
1022 /* Must copy-on-read; use the bounce buffer */
1023 iov
.iov_base
= bounce_buffer
;
1024 iov
.iov_len
= pnum
= MIN(pnum
, MAX_BOUNCE_BUFFER
);
1025 qemu_iovec_init_external(&local_qiov
, &iov
, 1);
1027 ret
= bdrv_driver_preadv(bs
, cluster_offset
, pnum
,
1033 bdrv_debug_event(bs
, BLKDBG_COR_WRITE
);
1034 if (drv
->bdrv_co_pwrite_zeroes
&&
1035 buffer_is_zero(bounce_buffer
, pnum
)) {
1036 /* FIXME: Should we (perhaps conditionally) be setting
1037 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1038 * that still correctly reads as zero? */
1039 ret
= bdrv_co_do_pwrite_zeroes(bs
, cluster_offset
, pnum
, 0);
1041 /* This does not change the data on the disk, it is not
1042 * necessary to flush even in cache=writethrough mode.
1044 ret
= bdrv_driver_pwritev(bs
, cluster_offset
, pnum
,
1049 /* It might be okay to ignore write errors for guest
1050 * requests. If this is a deliberate copy-on-read
1051 * then we don't want to ignore the error. Simply
1052 * report it in all cases.
1057 qemu_iovec_from_buf(qiov
, progress
, bounce_buffer
+ skip_bytes
,
1060 /* Read directly into the destination */
1061 qemu_iovec_init(&local_qiov
, qiov
->niov
);
1062 qemu_iovec_concat(&local_qiov
, qiov
, progress
, pnum
- skip_bytes
);
1063 ret
= bdrv_driver_preadv(bs
, offset
+ progress
, local_qiov
.size
,
1065 qemu_iovec_destroy(&local_qiov
);
1071 cluster_offset
+= pnum
;
1072 cluster_bytes
-= pnum
;
1073 progress
+= pnum
- skip_bytes
;
1079 qemu_vfree(bounce_buffer
);
1084 * Forwards an already correctly aligned request to the BlockDriver. This
1085 * handles copy on read, zeroing after EOF, and fragmentation of large
1086 * reads; any other features must be implemented by the caller.
1088 static int coroutine_fn
bdrv_aligned_preadv(BdrvChild
*child
,
1089 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
1090 int64_t align
, QEMUIOVector
*qiov
, int flags
)
1092 BlockDriverState
*bs
= child
->bs
;
1093 int64_t total_bytes
, max_bytes
;
1095 uint64_t bytes_remaining
= bytes
;
1098 assert(is_power_of_2(align
));
1099 assert((offset
& (align
- 1)) == 0);
1100 assert((bytes
& (align
- 1)) == 0);
1101 assert(!qiov
|| bytes
== qiov
->size
);
1102 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1103 max_transfer
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_transfer
, INT_MAX
),
1106 /* TODO: We would need a per-BDS .supported_read_flags and
1107 * potential fallback support, if we ever implement any read flags
1108 * to pass through to drivers. For now, there aren't any
1109 * passthrough flags. */
1110 assert(!(flags
& ~(BDRV_REQ_NO_SERIALISING
| BDRV_REQ_COPY_ON_READ
)));
1112 /* Handle Copy on Read and associated serialisation */
1113 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1114 /* If we touch the same cluster it counts as an overlap. This
1115 * guarantees that allocating writes will be serialized and not race
1116 * with each other for the same cluster. For example, in copy-on-read
1117 * it ensures that the CoR read and write operations are atomic and
1118 * guest writes cannot interleave between them. */
1119 mark_request_serialising(req
, bdrv_get_cluster_size(bs
));
1122 if (!(flags
& BDRV_REQ_NO_SERIALISING
)) {
1123 wait_serialising_requests(req
);
1126 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1127 /* TODO: Simplify further once bdrv_is_allocated no longer
1128 * requires sector alignment */
1129 int64_t start
= QEMU_ALIGN_DOWN(offset
, BDRV_SECTOR_SIZE
);
1130 int64_t end
= QEMU_ALIGN_UP(offset
+ bytes
, BDRV_SECTOR_SIZE
);
1133 ret
= bdrv_is_allocated(bs
, start
, end
- start
, &pnum
);
1138 if (!ret
|| pnum
!= end
- start
) {
1139 ret
= bdrv_co_do_copy_on_readv(child
, offset
, bytes
, qiov
);
1144 /* Forward the request to the BlockDriver, possibly fragmenting it */
1145 total_bytes
= bdrv_getlength(bs
);
1146 if (total_bytes
< 0) {
1151 max_bytes
= ROUND_UP(MAX(0, total_bytes
- offset
), align
);
1152 if (bytes
<= max_bytes
&& bytes
<= max_transfer
) {
1153 ret
= bdrv_driver_preadv(bs
, offset
, bytes
, qiov
, 0);
1157 while (bytes_remaining
) {
1161 QEMUIOVector local_qiov
;
1163 num
= MIN(bytes_remaining
, MIN(max_bytes
, max_transfer
));
1165 qemu_iovec_init(&local_qiov
, qiov
->niov
);
1166 qemu_iovec_concat(&local_qiov
, qiov
, bytes
- bytes_remaining
, num
);
1168 ret
= bdrv_driver_preadv(bs
, offset
+ bytes
- bytes_remaining
,
1169 num
, &local_qiov
, 0);
1171 qemu_iovec_destroy(&local_qiov
);
1173 num
= bytes_remaining
;
1174 ret
= qemu_iovec_memset(qiov
, bytes
- bytes_remaining
, 0,
1180 bytes_remaining
-= num
;
1184 return ret
< 0 ? ret
: 0;
1188 * Handle a read request in coroutine context
1190 int coroutine_fn
bdrv_co_preadv(BdrvChild
*child
,
1191 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1192 BdrvRequestFlags flags
)
1194 BlockDriverState
*bs
= child
->bs
;
1195 BlockDriver
*drv
= bs
->drv
;
1196 BdrvTrackedRequest req
;
1198 uint64_t align
= bs
->bl
.request_alignment
;
1199 uint8_t *head_buf
= NULL
;
1200 uint8_t *tail_buf
= NULL
;
1201 QEMUIOVector local_qiov
;
1202 bool use_local_qiov
= false;
1205 trace_bdrv_co_preadv(child
->bs
, offset
, bytes
, flags
);
1211 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
1216 bdrv_inc_in_flight(bs
);
1218 /* Don't do copy-on-read if we read data before write operation */
1219 if (atomic_read(&bs
->copy_on_read
) && !(flags
& BDRV_REQ_NO_SERIALISING
)) {
1220 flags
|= BDRV_REQ_COPY_ON_READ
;
1223 /* Align read if necessary by padding qiov */
1224 if (offset
& (align
- 1)) {
1225 head_buf
= qemu_blockalign(bs
, align
);
1226 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
1227 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
1228 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1229 use_local_qiov
= true;
1231 bytes
+= offset
& (align
- 1);
1232 offset
= offset
& ~(align
- 1);
1235 if ((offset
+ bytes
) & (align
- 1)) {
1236 if (!use_local_qiov
) {
1237 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
1238 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1239 use_local_qiov
= true;
1241 tail_buf
= qemu_blockalign(bs
, align
);
1242 qemu_iovec_add(&local_qiov
, tail_buf
,
1243 align
- ((offset
+ bytes
) & (align
- 1)));
1245 bytes
= ROUND_UP(bytes
, align
);
1248 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_READ
);
1249 ret
= bdrv_aligned_preadv(child
, &req
, offset
, bytes
, align
,
1250 use_local_qiov
? &local_qiov
: qiov
,
1252 tracked_request_end(&req
);
1253 bdrv_dec_in_flight(bs
);
1255 if (use_local_qiov
) {
1256 qemu_iovec_destroy(&local_qiov
);
1257 qemu_vfree(head_buf
);
1258 qemu_vfree(tail_buf
);
1264 static int coroutine_fn
bdrv_co_do_readv(BdrvChild
*child
,
1265 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1266 BdrvRequestFlags flags
)
1268 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
1272 return bdrv_co_preadv(child
, sector_num
<< BDRV_SECTOR_BITS
,
1273 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
1276 int coroutine_fn
bdrv_co_readv(BdrvChild
*child
, int64_t sector_num
,
1277 int nb_sectors
, QEMUIOVector
*qiov
)
1279 return bdrv_co_do_readv(child
, sector_num
, nb_sectors
, qiov
, 0);
1282 static int coroutine_fn
bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
,
1283 int64_t offset
, int bytes
, BdrvRequestFlags flags
)
1285 BlockDriver
*drv
= bs
->drv
;
1287 struct iovec iov
= {0};
1289 bool need_flush
= false;
1293 int max_write_zeroes
= MIN_NON_ZERO(bs
->bl
.max_pwrite_zeroes
, INT_MAX
);
1294 int alignment
= MAX(bs
->bl
.pwrite_zeroes_alignment
,
1295 bs
->bl
.request_alignment
);
1296 int max_transfer
= MIN_NON_ZERO(bs
->bl
.max_transfer
, MAX_BOUNCE_BUFFER
);
1298 assert(alignment
% bs
->bl
.request_alignment
== 0);
1299 head
= offset
% alignment
;
1300 tail
= (offset
+ bytes
) % alignment
;
1301 max_write_zeroes
= QEMU_ALIGN_DOWN(max_write_zeroes
, alignment
);
1302 assert(max_write_zeroes
>= bs
->bl
.request_alignment
);
1304 while (bytes
> 0 && !ret
) {
1307 /* Align request. Block drivers can expect the "bulk" of the request
1308 * to be aligned, and that unaligned requests do not cross cluster
1312 /* Make a small request up to the first aligned sector. For
1313 * convenience, limit this request to max_transfer even if
1314 * we don't need to fall back to writes. */
1315 num
= MIN(MIN(bytes
, max_transfer
), alignment
- head
);
1316 head
= (head
+ num
) % alignment
;
1317 assert(num
< max_write_zeroes
);
1318 } else if (tail
&& num
> alignment
) {
1319 /* Shorten the request to the last aligned sector. */
1323 /* limit request size */
1324 if (num
> max_write_zeroes
) {
1325 num
= max_write_zeroes
;
1329 /* First try the efficient write zeroes operation */
1330 if (drv
->bdrv_co_pwrite_zeroes
) {
1331 ret
= drv
->bdrv_co_pwrite_zeroes(bs
, offset
, num
,
1332 flags
& bs
->supported_zero_flags
);
1333 if (ret
!= -ENOTSUP
&& (flags
& BDRV_REQ_FUA
) &&
1334 !(bs
->supported_zero_flags
& BDRV_REQ_FUA
)) {
1338 assert(!bs
->supported_zero_flags
);
1341 if (ret
== -ENOTSUP
) {
1342 /* Fall back to bounce buffer if write zeroes is unsupported */
1343 BdrvRequestFlags write_flags
= flags
& ~BDRV_REQ_ZERO_WRITE
;
1345 if ((flags
& BDRV_REQ_FUA
) &&
1346 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1347 /* No need for bdrv_driver_pwrite() to do a fallback
1348 * flush on each chunk; use just one at the end */
1349 write_flags
&= ~BDRV_REQ_FUA
;
1352 num
= MIN(num
, max_transfer
);
1354 if (iov
.iov_base
== NULL
) {
1355 iov
.iov_base
= qemu_try_blockalign(bs
, num
);
1356 if (iov
.iov_base
== NULL
) {
1360 memset(iov
.iov_base
, 0, num
);
1362 qemu_iovec_init_external(&qiov
, &iov
, 1);
1364 ret
= bdrv_driver_pwritev(bs
, offset
, num
, &qiov
, write_flags
);
1366 /* Keep bounce buffer around if it is big enough for all
1367 * all future requests.
1369 if (num
< max_transfer
) {
1370 qemu_vfree(iov
.iov_base
);
1371 iov
.iov_base
= NULL
;
1380 if (ret
== 0 && need_flush
) {
1381 ret
= bdrv_co_flush(bs
);
1383 qemu_vfree(iov
.iov_base
);
1388 * Forwards an already correctly aligned write request to the BlockDriver,
1389 * after possibly fragmenting it.
1391 static int coroutine_fn
bdrv_aligned_pwritev(BdrvChild
*child
,
1392 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
1393 int64_t align
, QEMUIOVector
*qiov
, int flags
)
1395 BlockDriverState
*bs
= child
->bs
;
1396 BlockDriver
*drv
= bs
->drv
;
1400 int64_t end_sector
= DIV_ROUND_UP(offset
+ bytes
, BDRV_SECTOR_SIZE
);
1401 uint64_t bytes_remaining
= bytes
;
1404 if (bdrv_has_readonly_bitmaps(bs
)) {
1408 assert(is_power_of_2(align
));
1409 assert((offset
& (align
- 1)) == 0);
1410 assert((bytes
& (align
- 1)) == 0);
1411 assert(!qiov
|| bytes
== qiov
->size
);
1412 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1413 assert(!(flags
& ~BDRV_REQ_MASK
));
1414 max_transfer
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_transfer
, INT_MAX
),
1417 waited
= wait_serialising_requests(req
);
1418 assert(!waited
|| !req
->serialising
);
1419 assert(req
->overlap_offset
<= offset
);
1420 assert(offset
+ bytes
<= req
->overlap_offset
+ req
->overlap_bytes
);
1421 assert(child
->perm
& BLK_PERM_WRITE
);
1422 assert(end_sector
<= bs
->total_sectors
|| child
->perm
& BLK_PERM_RESIZE
);
1424 ret
= notifier_with_return_list_notify(&bs
->before_write_notifiers
, req
);
1426 if (!ret
&& bs
->detect_zeroes
!= BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF
&&
1427 !(flags
& BDRV_REQ_ZERO_WRITE
) && drv
->bdrv_co_pwrite_zeroes
&&
1428 qemu_iovec_is_zero(qiov
)) {
1429 flags
|= BDRV_REQ_ZERO_WRITE
;
1430 if (bs
->detect_zeroes
== BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP
) {
1431 flags
|= BDRV_REQ_MAY_UNMAP
;
1436 /* Do nothing, write notifier decided to fail this request */
1437 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
1438 bdrv_debug_event(bs
, BLKDBG_PWRITEV_ZERO
);
1439 ret
= bdrv_co_do_pwrite_zeroes(bs
, offset
, bytes
, flags
);
1440 } else if (flags
& BDRV_REQ_WRITE_COMPRESSED
) {
1441 ret
= bdrv_driver_pwritev_compressed(bs
, offset
, bytes
, qiov
);
1442 } else if (bytes
<= max_transfer
) {
1443 bdrv_debug_event(bs
, BLKDBG_PWRITEV
);
1444 ret
= bdrv_driver_pwritev(bs
, offset
, bytes
, qiov
, flags
);
1446 bdrv_debug_event(bs
, BLKDBG_PWRITEV
);
1447 while (bytes_remaining
) {
1448 int num
= MIN(bytes_remaining
, max_transfer
);
1449 QEMUIOVector local_qiov
;
1450 int local_flags
= flags
;
1453 if (num
< bytes_remaining
&& (flags
& BDRV_REQ_FUA
) &&
1454 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1455 /* If FUA is going to be emulated by flush, we only
1456 * need to flush on the last iteration */
1457 local_flags
&= ~BDRV_REQ_FUA
;
1459 qemu_iovec_init(&local_qiov
, qiov
->niov
);
1460 qemu_iovec_concat(&local_qiov
, qiov
, bytes
- bytes_remaining
, num
);
1462 ret
= bdrv_driver_pwritev(bs
, offset
+ bytes
- bytes_remaining
,
1463 num
, &local_qiov
, local_flags
);
1464 qemu_iovec_destroy(&local_qiov
);
1468 bytes_remaining
-= num
;
1471 bdrv_debug_event(bs
, BLKDBG_PWRITEV_DONE
);
1473 atomic_inc(&bs
->write_gen
);
1474 bdrv_set_dirty(bs
, offset
, bytes
);
1476 stat64_max(&bs
->wr_highest_offset
, offset
+ bytes
);
1479 bs
->total_sectors
= MAX(bs
->total_sectors
, end_sector
);
1486 static int coroutine_fn
bdrv_co_do_zero_pwritev(BdrvChild
*child
,
1489 BdrvRequestFlags flags
,
1490 BdrvTrackedRequest
*req
)
1492 BlockDriverState
*bs
= child
->bs
;
1493 uint8_t *buf
= NULL
;
1494 QEMUIOVector local_qiov
;
1496 uint64_t align
= bs
->bl
.request_alignment
;
1497 unsigned int head_padding_bytes
, tail_padding_bytes
;
1500 head_padding_bytes
= offset
& (align
- 1);
1501 tail_padding_bytes
= (align
- (offset
+ bytes
)) & (align
- 1);
1504 assert(flags
& BDRV_REQ_ZERO_WRITE
);
1505 if (head_padding_bytes
|| tail_padding_bytes
) {
1506 buf
= qemu_blockalign(bs
, align
);
1507 iov
= (struct iovec
) {
1511 qemu_iovec_init_external(&local_qiov
, &iov
, 1);
1513 if (head_padding_bytes
) {
1514 uint64_t zero_bytes
= MIN(bytes
, align
- head_padding_bytes
);
1516 /* RMW the unaligned part before head. */
1517 mark_request_serialising(req
, align
);
1518 wait_serialising_requests(req
);
1519 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1520 ret
= bdrv_aligned_preadv(child
, req
, offset
& ~(align
- 1), align
,
1521 align
, &local_qiov
, 0);
1525 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1527 memset(buf
+ head_padding_bytes
, 0, zero_bytes
);
1528 ret
= bdrv_aligned_pwritev(child
, req
, offset
& ~(align
- 1), align
,
1530 flags
& ~BDRV_REQ_ZERO_WRITE
);
1534 offset
+= zero_bytes
;
1535 bytes
-= zero_bytes
;
1538 assert(!bytes
|| (offset
& (align
- 1)) == 0);
1539 if (bytes
>= align
) {
1540 /* Write the aligned part in the middle. */
1541 uint64_t aligned_bytes
= bytes
& ~(align
- 1);
1542 ret
= bdrv_aligned_pwritev(child
, req
, offset
, aligned_bytes
, align
,
1547 bytes
-= aligned_bytes
;
1548 offset
+= aligned_bytes
;
1551 assert(!bytes
|| (offset
& (align
- 1)) == 0);
1553 assert(align
== tail_padding_bytes
+ bytes
);
1554 /* RMW the unaligned part after tail. */
1555 mark_request_serialising(req
, align
);
1556 wait_serialising_requests(req
);
1557 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1558 ret
= bdrv_aligned_preadv(child
, req
, offset
, align
,
1559 align
, &local_qiov
, 0);
1563 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1565 memset(buf
, 0, bytes
);
1566 ret
= bdrv_aligned_pwritev(child
, req
, offset
, align
, align
,
1567 &local_qiov
, flags
& ~BDRV_REQ_ZERO_WRITE
);
1576 * Handle a write request in coroutine context
1578 int coroutine_fn
bdrv_co_pwritev(BdrvChild
*child
,
1579 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1580 BdrvRequestFlags flags
)
1582 BlockDriverState
*bs
= child
->bs
;
1583 BdrvTrackedRequest req
;
1584 uint64_t align
= bs
->bl
.request_alignment
;
1585 uint8_t *head_buf
= NULL
;
1586 uint8_t *tail_buf
= NULL
;
1587 QEMUIOVector local_qiov
;
1588 bool use_local_qiov
= false;
1591 trace_bdrv_co_pwritev(child
->bs
, offset
, bytes
, flags
);
1596 if (bs
->read_only
) {
1599 assert(!(bs
->open_flags
& BDRV_O_INACTIVE
));
1601 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
1606 bdrv_inc_in_flight(bs
);
1608 * Align write if necessary by performing a read-modify-write cycle.
1609 * Pad qiov with the read parts and be sure to have a tracked request not
1610 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1612 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_WRITE
);
1615 ret
= bdrv_co_do_zero_pwritev(child
, offset
, bytes
, flags
, &req
);
1619 if (offset
& (align
- 1)) {
1620 QEMUIOVector head_qiov
;
1621 struct iovec head_iov
;
1623 mark_request_serialising(&req
, align
);
1624 wait_serialising_requests(&req
);
1626 head_buf
= qemu_blockalign(bs
, align
);
1627 head_iov
= (struct iovec
) {
1628 .iov_base
= head_buf
,
1631 qemu_iovec_init_external(&head_qiov
, &head_iov
, 1);
1633 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1634 ret
= bdrv_aligned_preadv(child
, &req
, offset
& ~(align
- 1), align
,
1635 align
, &head_qiov
, 0);
1639 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1641 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
1642 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
1643 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1644 use_local_qiov
= true;
1646 bytes
+= offset
& (align
- 1);
1647 offset
= offset
& ~(align
- 1);
1649 /* We have read the tail already if the request is smaller
1650 * than one aligned block.
1652 if (bytes
< align
) {
1653 qemu_iovec_add(&local_qiov
, head_buf
+ bytes
, align
- bytes
);
1658 if ((offset
+ bytes
) & (align
- 1)) {
1659 QEMUIOVector tail_qiov
;
1660 struct iovec tail_iov
;
1664 mark_request_serialising(&req
, align
);
1665 waited
= wait_serialising_requests(&req
);
1666 assert(!waited
|| !use_local_qiov
);
1668 tail_buf
= qemu_blockalign(bs
, align
);
1669 tail_iov
= (struct iovec
) {
1670 .iov_base
= tail_buf
,
1673 qemu_iovec_init_external(&tail_qiov
, &tail_iov
, 1);
1675 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1676 ret
= bdrv_aligned_preadv(child
, &req
, (offset
+ bytes
) & ~(align
- 1),
1677 align
, align
, &tail_qiov
, 0);
1681 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1683 if (!use_local_qiov
) {
1684 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
1685 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1686 use_local_qiov
= true;
1689 tail_bytes
= (offset
+ bytes
) & (align
- 1);
1690 qemu_iovec_add(&local_qiov
, tail_buf
+ tail_bytes
, align
- tail_bytes
);
1692 bytes
= ROUND_UP(bytes
, align
);
1695 ret
= bdrv_aligned_pwritev(child
, &req
, offset
, bytes
, align
,
1696 use_local_qiov
? &local_qiov
: qiov
,
1701 if (use_local_qiov
) {
1702 qemu_iovec_destroy(&local_qiov
);
1704 qemu_vfree(head_buf
);
1705 qemu_vfree(tail_buf
);
1707 tracked_request_end(&req
);
1708 bdrv_dec_in_flight(bs
);
1712 static int coroutine_fn
bdrv_co_do_writev(BdrvChild
*child
,
1713 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1714 BdrvRequestFlags flags
)
1716 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
1720 return bdrv_co_pwritev(child
, sector_num
<< BDRV_SECTOR_BITS
,
1721 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
1724 int coroutine_fn
bdrv_co_writev(BdrvChild
*child
, int64_t sector_num
,
1725 int nb_sectors
, QEMUIOVector
*qiov
)
1727 return bdrv_co_do_writev(child
, sector_num
, nb_sectors
, qiov
, 0);
1730 int coroutine_fn
bdrv_co_pwrite_zeroes(BdrvChild
*child
, int64_t offset
,
1731 int bytes
, BdrvRequestFlags flags
)
1733 trace_bdrv_co_pwrite_zeroes(child
->bs
, offset
, bytes
, flags
);
1735 if (!(child
->bs
->open_flags
& BDRV_O_UNMAP
)) {
1736 flags
&= ~BDRV_REQ_MAY_UNMAP
;
1739 return bdrv_co_pwritev(child
, offset
, bytes
, NULL
,
1740 BDRV_REQ_ZERO_WRITE
| flags
);
1744 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
1746 int bdrv_flush_all(void)
1748 BdrvNextIterator it
;
1749 BlockDriverState
*bs
= NULL
;
1752 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
1753 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
1756 aio_context_acquire(aio_context
);
1757 ret
= bdrv_flush(bs
);
1758 if (ret
< 0 && !result
) {
1761 aio_context_release(aio_context
);
1768 typedef struct BdrvCoBlockStatusData
{
1769 BlockDriverState
*bs
;
1770 BlockDriverState
*base
;
1776 BlockDriverState
**file
;
1779 } BdrvCoBlockStatusData
;
1781 int64_t coroutine_fn
bdrv_co_get_block_status_from_file(BlockDriverState
*bs
,
1785 BlockDriverState
**file
)
1787 assert(bs
->file
&& bs
->file
->bs
);
1789 *file
= bs
->file
->bs
;
1790 return BDRV_BLOCK_RAW
| BDRV_BLOCK_OFFSET_VALID
|
1791 (sector_num
<< BDRV_SECTOR_BITS
);
1794 int64_t coroutine_fn
bdrv_co_get_block_status_from_backing(BlockDriverState
*bs
,
1798 BlockDriverState
**file
)
1800 assert(bs
->backing
&& bs
->backing
->bs
);
1802 *file
= bs
->backing
->bs
;
1803 return BDRV_BLOCK_RAW
| BDRV_BLOCK_OFFSET_VALID
|
1804 (sector_num
<< BDRV_SECTOR_BITS
);
1808 * Returns the allocation status of the specified sectors.
1809 * Drivers not implementing the functionality are assumed to not support
1810 * backing files, hence all their sectors are reported as allocated.
1812 * If 'want_zero' is true, the caller is querying for mapping purposes,
1813 * and the result should include BDRV_BLOCK_OFFSET_VALID and
1814 * BDRV_BLOCK_ZERO where possible; otherwise, the result may omit those
1815 * bits particularly if it allows for a larger value in 'pnum'.
1817 * If 'offset' is beyond the end of the disk image the return value is
1818 * BDRV_BLOCK_EOF and 'pnum' is set to 0.
1820 * 'bytes' is the max value 'pnum' should be set to. If bytes goes
1821 * beyond the end of the disk image it will be clamped; if 'pnum' is set to
1822 * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
1824 * 'pnum' is set to the number of bytes (including and immediately
1825 * following the specified offset) that are easily known to be in the
1826 * same allocated/unallocated state. Note that a second call starting
1827 * at the original offset plus returned pnum may have the same status.
1828 * The returned value is non-zero on success except at end-of-file.
1830 * Returns negative errno on failure. Otherwise, if the
1831 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
1832 * set to the host mapping and BDS corresponding to the guest offset.
1834 static int coroutine_fn
bdrv_co_block_status(BlockDriverState
*bs
,
1836 int64_t offset
, int64_t bytes
,
1837 int64_t *pnum
, int64_t *map
,
1838 BlockDriverState
**file
)
1841 int64_t n
; /* bytes */
1843 int64_t local_map
= 0;
1844 BlockDriverState
*local_file
= NULL
;
1845 int count
; /* sectors */
1849 total_size
= bdrv_getlength(bs
);
1850 if (total_size
< 0) {
1855 if (offset
>= total_size
) {
1856 ret
= BDRV_BLOCK_EOF
;
1864 n
= total_size
- offset
;
1869 if (!bs
->drv
->bdrv_co_get_block_status
) {
1871 ret
= BDRV_BLOCK_DATA
| BDRV_BLOCK_ALLOCATED
;
1872 if (offset
+ bytes
== total_size
) {
1873 ret
|= BDRV_BLOCK_EOF
;
1875 if (bs
->drv
->protocol_name
) {
1876 ret
|= BDRV_BLOCK_OFFSET_VALID
;
1883 bdrv_inc_in_flight(bs
);
1885 * TODO: Rather than require aligned offsets, we could instead
1886 * round to the driver's request_alignment here, then touch up
1887 * count afterwards back to the caller's expectations.
1889 assert(QEMU_IS_ALIGNED(offset
| bytes
, BDRV_SECTOR_SIZE
));
1891 * The contract allows us to return pnum smaller than bytes, even
1892 * if the next query would see the same status; we truncate the
1893 * request to avoid overflowing the driver's 32-bit interface.
1895 bytes
= MIN(bytes
, BDRV_REQUEST_MAX_BYTES
);
1896 ret
= bs
->drv
->bdrv_co_get_block_status(bs
, offset
>> BDRV_SECTOR_BITS
,
1897 bytes
>> BDRV_SECTOR_BITS
, &count
,
1902 if (ret
& BDRV_BLOCK_OFFSET_VALID
) {
1903 local_map
= ret
& BDRV_BLOCK_OFFSET_MASK
;
1905 *pnum
= count
* BDRV_SECTOR_SIZE
;
1907 if (ret
& BDRV_BLOCK_RAW
) {
1908 assert(ret
& BDRV_BLOCK_OFFSET_VALID
&& local_file
);
1909 ret
= bdrv_co_block_status(local_file
, want_zero
, local_map
,
1910 *pnum
, pnum
, &local_map
, &local_file
);
1912 QEMU_IS_ALIGNED(*pnum
| local_map
, BDRV_SECTOR_SIZE
));
1916 if (ret
& (BDRV_BLOCK_DATA
| BDRV_BLOCK_ZERO
)) {
1917 ret
|= BDRV_BLOCK_ALLOCATED
;
1918 } else if (want_zero
) {
1919 if (bdrv_unallocated_blocks_are_zero(bs
)) {
1920 ret
|= BDRV_BLOCK_ZERO
;
1921 } else if (bs
->backing
) {
1922 BlockDriverState
*bs2
= bs
->backing
->bs
;
1923 int64_t size2
= bdrv_getlength(bs2
);
1925 if (size2
>= 0 && offset
>= size2
) {
1926 ret
|= BDRV_BLOCK_ZERO
;
1931 if (want_zero
&& local_file
&& local_file
!= bs
&&
1932 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
1933 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
1937 ret2
= bdrv_co_block_status(local_file
, want_zero
, local_map
,
1938 *pnum
, &file_pnum
, NULL
, NULL
);
1940 /* Ignore errors. This is just providing extra information, it
1941 * is useful but not necessary.
1943 if (ret2
& BDRV_BLOCK_EOF
&&
1944 (!file_pnum
|| ret2
& BDRV_BLOCK_ZERO
)) {
1946 * It is valid for the format block driver to read
1947 * beyond the end of the underlying file's current
1948 * size; such areas read as zero.
1950 ret
|= BDRV_BLOCK_ZERO
;
1952 /* Limit request to the range reported by the protocol driver */
1954 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
1960 bdrv_dec_in_flight(bs
);
1961 if (ret
>= 0 && offset
+ *pnum
== total_size
) {
1962 ret
|= BDRV_BLOCK_EOF
;
1972 ret
&= ~BDRV_BLOCK_OFFSET_MASK
;
1974 assert(INT_MIN
<= ret
);
1979 static int coroutine_fn
bdrv_co_block_status_above(BlockDriverState
*bs
,
1980 BlockDriverState
*base
,
1986 BlockDriverState
**file
)
1988 BlockDriverState
*p
;
1993 for (p
= bs
; p
!= base
; p
= backing_bs(p
)) {
1994 ret
= bdrv_co_block_status(p
, want_zero
, offset
, bytes
, pnum
, map
,
1999 if (ret
& BDRV_BLOCK_ZERO
&& ret
& BDRV_BLOCK_EOF
&& !first
) {
2001 * Reading beyond the end of the file continues to read
2002 * zeroes, but we can only widen the result to the
2003 * unallocated length we learned from an earlier
2008 if (ret
& (BDRV_BLOCK_ZERO
| BDRV_BLOCK_DATA
)) {
2011 /* [offset, pnum] unallocated on this layer, which could be only
2012 * the first part of [offset, bytes]. */
2013 bytes
= MIN(bytes
, *pnum
);
2019 /* Coroutine wrapper for bdrv_block_status_above() */
2020 static void coroutine_fn
bdrv_block_status_above_co_entry(void *opaque
)
2022 BdrvCoBlockStatusData
*data
= opaque
;
2024 data
->ret
= bdrv_co_block_status_above(data
->bs
, data
->base
,
2026 data
->offset
, data
->bytes
,
2027 data
->pnum
, data
->map
, data
->file
);
2032 * Synchronous wrapper around bdrv_co_block_status_above().
2034 * See bdrv_co_block_status_above() for details.
2036 static int bdrv_common_block_status_above(BlockDriverState
*bs
,
2037 BlockDriverState
*base
,
2038 bool want_zero
, int64_t offset
,
2039 int64_t bytes
, int64_t *pnum
,
2041 BlockDriverState
**file
)
2044 BdrvCoBlockStatusData data
= {
2047 .want_zero
= want_zero
,
2056 if (qemu_in_coroutine()) {
2057 /* Fast-path if already in coroutine context */
2058 bdrv_block_status_above_co_entry(&data
);
2060 co
= qemu_coroutine_create(bdrv_block_status_above_co_entry
, &data
);
2061 bdrv_coroutine_enter(bs
, co
);
2062 BDRV_POLL_WHILE(bs
, !data
.done
);
2067 int bdrv_block_status_above(BlockDriverState
*bs
, BlockDriverState
*base
,
2068 int64_t offset
, int64_t bytes
, int64_t *pnum
,
2069 int64_t *map
, BlockDriverState
**file
)
2071 return bdrv_common_block_status_above(bs
, base
, true, offset
, bytes
,
2075 int bdrv_block_status(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
2076 int64_t *pnum
, int64_t *map
, BlockDriverState
**file
)
2078 return bdrv_block_status_above(bs
, backing_bs(bs
),
2079 offset
, bytes
, pnum
, map
, file
);
2082 int coroutine_fn
bdrv_is_allocated(BlockDriverState
*bs
, int64_t offset
,
2083 int64_t bytes
, int64_t *pnum
)
2088 ret
= bdrv_common_block_status_above(bs
, backing_bs(bs
), false, offset
,
2089 bytes
, pnum
? pnum
: &dummy
, NULL
,
2094 return !!(ret
& BDRV_BLOCK_ALLOCATED
);
2098 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2100 * Return true if (a prefix of) the given range is allocated in any image
2101 * between BASE and TOP (inclusive). BASE can be NULL to check if the given
2102 * offset is allocated in any image of the chain. Return false otherwise,
2103 * or negative errno on failure.
2105 * 'pnum' is set to the number of bytes (including and immediately
2106 * following the specified offset) that are known to be in the same
2107 * allocated/unallocated state. Note that a subsequent call starting
2108 * at 'offset + *pnum' may return the same allocation status (in other
2109 * words, the result is not necessarily the maximum possible range);
2110 * but 'pnum' will only be 0 when end of file is reached.
2113 int bdrv_is_allocated_above(BlockDriverState
*top
,
2114 BlockDriverState
*base
,
2115 int64_t offset
, int64_t bytes
, int64_t *pnum
)
2117 BlockDriverState
*intermediate
;
2122 while (intermediate
&& intermediate
!= base
) {
2126 ret
= bdrv_is_allocated(intermediate
, offset
, bytes
, &pnum_inter
);
2135 size_inter
= bdrv_getlength(intermediate
);
2136 if (size_inter
< 0) {
2139 if (n
> pnum_inter
&&
2140 (intermediate
== top
|| offset
+ pnum_inter
< size_inter
)) {
2144 intermediate
= backing_bs(intermediate
);
2151 typedef struct BdrvVmstateCo
{
2152 BlockDriverState
*bs
;
2159 static int coroutine_fn
2160 bdrv_co_rw_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
,
2163 BlockDriver
*drv
= bs
->drv
;
2166 bdrv_inc_in_flight(bs
);
2170 } else if (drv
->bdrv_load_vmstate
) {
2172 ret
= drv
->bdrv_load_vmstate(bs
, qiov
, pos
);
2174 ret
= drv
->bdrv_save_vmstate(bs
, qiov
, pos
);
2176 } else if (bs
->file
) {
2177 ret
= bdrv_co_rw_vmstate(bs
->file
->bs
, qiov
, pos
, is_read
);
2180 bdrv_dec_in_flight(bs
);
2184 static void coroutine_fn
bdrv_co_rw_vmstate_entry(void *opaque
)
2186 BdrvVmstateCo
*co
= opaque
;
2187 co
->ret
= bdrv_co_rw_vmstate(co
->bs
, co
->qiov
, co
->pos
, co
->is_read
);
2191 bdrv_rw_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
,
2194 if (qemu_in_coroutine()) {
2195 return bdrv_co_rw_vmstate(bs
, qiov
, pos
, is_read
);
2197 BdrvVmstateCo data
= {
2202 .ret
= -EINPROGRESS
,
2204 Coroutine
*co
= qemu_coroutine_create(bdrv_co_rw_vmstate_entry
, &data
);
2206 bdrv_coroutine_enter(bs
, co
);
2207 BDRV_POLL_WHILE(bs
, data
.ret
== -EINPROGRESS
);
2212 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
2213 int64_t pos
, int size
)
2216 struct iovec iov
= {
2217 .iov_base
= (void *) buf
,
2222 qemu_iovec_init_external(&qiov
, &iov
, 1);
2224 ret
= bdrv_writev_vmstate(bs
, &qiov
, pos
);
2232 int bdrv_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
2234 return bdrv_rw_vmstate(bs
, qiov
, pos
, false);
2237 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
2238 int64_t pos
, int size
)
2241 struct iovec iov
= {
2247 qemu_iovec_init_external(&qiov
, &iov
, 1);
2248 ret
= bdrv_readv_vmstate(bs
, &qiov
, pos
);
2256 int bdrv_readv_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
2258 return bdrv_rw_vmstate(bs
, qiov
, pos
, true);
2261 /**************************************************************/
2264 void bdrv_aio_cancel(BlockAIOCB
*acb
)
2267 bdrv_aio_cancel_async(acb
);
2268 while (acb
->refcnt
> 1) {
2269 if (acb
->aiocb_info
->get_aio_context
) {
2270 aio_poll(acb
->aiocb_info
->get_aio_context(acb
), true);
2271 } else if (acb
->bs
) {
2272 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
2273 * assert that we're not using an I/O thread. Thread-safe
2274 * code should use bdrv_aio_cancel_async exclusively.
2276 assert(bdrv_get_aio_context(acb
->bs
) == qemu_get_aio_context());
2277 aio_poll(bdrv_get_aio_context(acb
->bs
), true);
2282 qemu_aio_unref(acb
);
2285 /* Async version of aio cancel. The caller is not blocked if the acb implements
2286 * cancel_async, otherwise we do nothing and let the request normally complete.
2287 * In either case the completion callback must be called. */
2288 void bdrv_aio_cancel_async(BlockAIOCB
*acb
)
2290 if (acb
->aiocb_info
->cancel_async
) {
2291 acb
->aiocb_info
->cancel_async(acb
);
2295 /**************************************************************/
2296 /* Coroutine block device emulation */
2298 typedef struct FlushCo
{
2299 BlockDriverState
*bs
;
2304 static void coroutine_fn
bdrv_flush_co_entry(void *opaque
)
2306 FlushCo
*rwco
= opaque
;
2308 rwco
->ret
= bdrv_co_flush(rwco
->bs
);
2311 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
2316 bdrv_inc_in_flight(bs
);
2318 if (!bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
) ||
2323 qemu_co_mutex_lock(&bs
->reqs_lock
);
2324 current_gen
= atomic_read(&bs
->write_gen
);
2326 /* Wait until any previous flushes are completed */
2327 while (bs
->active_flush_req
) {
2328 qemu_co_queue_wait(&bs
->flush_queue
, &bs
->reqs_lock
);
2331 /* Flushes reach this point in nondecreasing current_gen order. */
2332 bs
->active_flush_req
= true;
2333 qemu_co_mutex_unlock(&bs
->reqs_lock
);
2335 /* Write back all layers by calling one driver function */
2336 if (bs
->drv
->bdrv_co_flush
) {
2337 ret
= bs
->drv
->bdrv_co_flush(bs
);
2341 /* Write back cached data to the OS even with cache=unsafe */
2342 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_OS
);
2343 if (bs
->drv
->bdrv_co_flush_to_os
) {
2344 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
2350 /* But don't actually force it to the disk with cache=unsafe */
2351 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
2355 /* Check if we really need to flush anything */
2356 if (bs
->flushed_gen
== current_gen
) {
2360 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_DISK
);
2361 if (bs
->drv
->bdrv_co_flush_to_disk
) {
2362 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
2363 } else if (bs
->drv
->bdrv_aio_flush
) {
2365 CoroutineIOCompletion co
= {
2366 .coroutine
= qemu_coroutine_self(),
2369 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
2373 qemu_coroutine_yield();
2378 * Some block drivers always operate in either writethrough or unsafe
2379 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2380 * know how the server works (because the behaviour is hardcoded or
2381 * depends on server-side configuration), so we can't ensure that
2382 * everything is safe on disk. Returning an error doesn't work because
2383 * that would break guests even if the server operates in writethrough
2386 * Let's hope the user knows what he's doing.
2395 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2396 * in the case of cache=unsafe, so there are no useless flushes.
2399 ret
= bs
->file
? bdrv_co_flush(bs
->file
->bs
) : 0;
2401 /* Notify any pending flushes that we have completed */
2403 bs
->flushed_gen
= current_gen
;
2406 qemu_co_mutex_lock(&bs
->reqs_lock
);
2407 bs
->active_flush_req
= false;
2408 /* Return value is ignored - it's ok if wait queue is empty */
2409 qemu_co_queue_next(&bs
->flush_queue
);
2410 qemu_co_mutex_unlock(&bs
->reqs_lock
);
2413 bdrv_dec_in_flight(bs
);
2417 int bdrv_flush(BlockDriverState
*bs
)
2420 FlushCo flush_co
= {
2425 if (qemu_in_coroutine()) {
2426 /* Fast-path if already in coroutine context */
2427 bdrv_flush_co_entry(&flush_co
);
2429 co
= qemu_coroutine_create(bdrv_flush_co_entry
, &flush_co
);
2430 bdrv_coroutine_enter(bs
, co
);
2431 BDRV_POLL_WHILE(bs
, flush_co
.ret
== NOT_DONE
);
2434 return flush_co
.ret
;
2437 typedef struct DiscardCo
{
2438 BlockDriverState
*bs
;
2443 static void coroutine_fn
bdrv_pdiscard_co_entry(void *opaque
)
2445 DiscardCo
*rwco
= opaque
;
2447 rwco
->ret
= bdrv_co_pdiscard(rwco
->bs
, rwco
->offset
, rwco
->bytes
);
2450 int coroutine_fn
bdrv_co_pdiscard(BlockDriverState
*bs
, int64_t offset
,
2453 BdrvTrackedRequest req
;
2454 int max_pdiscard
, ret
;
2455 int head
, tail
, align
;
2461 if (bdrv_has_readonly_bitmaps(bs
)) {
2465 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
2468 } else if (bs
->read_only
) {
2471 assert(!(bs
->open_flags
& BDRV_O_INACTIVE
));
2473 /* Do nothing if disabled. */
2474 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
2478 if (!bs
->drv
->bdrv_co_pdiscard
&& !bs
->drv
->bdrv_aio_pdiscard
) {
2482 /* Discard is advisory, but some devices track and coalesce
2483 * unaligned requests, so we must pass everything down rather than
2484 * round here. Still, most devices will just silently ignore
2485 * unaligned requests (by returning -ENOTSUP), so we must fragment
2486 * the request accordingly. */
2487 align
= MAX(bs
->bl
.pdiscard_alignment
, bs
->bl
.request_alignment
);
2488 assert(align
% bs
->bl
.request_alignment
== 0);
2489 head
= offset
% align
;
2490 tail
= (offset
+ bytes
) % align
;
2492 bdrv_inc_in_flight(bs
);
2493 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_DISCARD
);
2495 ret
= notifier_with_return_list_notify(&bs
->before_write_notifiers
, &req
);
2500 max_pdiscard
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_pdiscard
, INT_MAX
),
2502 assert(max_pdiscard
>= bs
->bl
.request_alignment
);
2508 /* Make small requests to get to alignment boundaries. */
2509 num
= MIN(bytes
, align
- head
);
2510 if (!QEMU_IS_ALIGNED(num
, bs
->bl
.request_alignment
)) {
2511 num
%= bs
->bl
.request_alignment
;
2513 head
= (head
+ num
) % align
;
2514 assert(num
< max_pdiscard
);
2517 /* Shorten the request to the last aligned cluster. */
2519 } else if (!QEMU_IS_ALIGNED(tail
, bs
->bl
.request_alignment
) &&
2520 tail
> bs
->bl
.request_alignment
) {
2521 tail
%= bs
->bl
.request_alignment
;
2525 /* limit request size */
2526 if (num
> max_pdiscard
) {
2530 if (bs
->drv
->bdrv_co_pdiscard
) {
2531 ret
= bs
->drv
->bdrv_co_pdiscard(bs
, offset
, num
);
2534 CoroutineIOCompletion co
= {
2535 .coroutine
= qemu_coroutine_self(),
2538 acb
= bs
->drv
->bdrv_aio_pdiscard(bs
, offset
, num
,
2539 bdrv_co_io_em_complete
, &co
);
2544 qemu_coroutine_yield();
2548 if (ret
&& ret
!= -ENOTSUP
) {
2557 atomic_inc(&bs
->write_gen
);
2558 bdrv_set_dirty(bs
, req
.offset
, req
.bytes
);
2559 tracked_request_end(&req
);
2560 bdrv_dec_in_flight(bs
);
2564 int bdrv_pdiscard(BlockDriverState
*bs
, int64_t offset
, int bytes
)
2574 if (qemu_in_coroutine()) {
2575 /* Fast-path if already in coroutine context */
2576 bdrv_pdiscard_co_entry(&rwco
);
2578 co
= qemu_coroutine_create(bdrv_pdiscard_co_entry
, &rwco
);
2579 bdrv_coroutine_enter(bs
, co
);
2580 BDRV_POLL_WHILE(bs
, rwco
.ret
== NOT_DONE
);
2586 int bdrv_co_ioctl(BlockDriverState
*bs
, int req
, void *buf
)
2588 BlockDriver
*drv
= bs
->drv
;
2589 CoroutineIOCompletion co
= {
2590 .coroutine
= qemu_coroutine_self(),
2594 bdrv_inc_in_flight(bs
);
2595 if (!drv
|| (!drv
->bdrv_aio_ioctl
&& !drv
->bdrv_co_ioctl
)) {
2600 if (drv
->bdrv_co_ioctl
) {
2601 co
.ret
= drv
->bdrv_co_ioctl(bs
, req
, buf
);
2603 acb
= drv
->bdrv_aio_ioctl(bs
, req
, buf
, bdrv_co_io_em_complete
, &co
);
2608 qemu_coroutine_yield();
2611 bdrv_dec_in_flight(bs
);
2615 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
2617 return qemu_memalign(bdrv_opt_mem_align(bs
), size
);
2620 void *qemu_blockalign0(BlockDriverState
*bs
, size_t size
)
2622 return memset(qemu_blockalign(bs
, size
), 0, size
);
2625 void *qemu_try_blockalign(BlockDriverState
*bs
, size_t size
)
2627 size_t align
= bdrv_opt_mem_align(bs
);
2629 /* Ensure that NULL is never returned on success */
2635 return qemu_try_memalign(align
, size
);
2638 void *qemu_try_blockalign0(BlockDriverState
*bs
, size_t size
)
2640 void *mem
= qemu_try_blockalign(bs
, size
);
2643 memset(mem
, 0, size
);
2650 * Check if all memory in this vector is sector aligned.
2652 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
)
2655 size_t alignment
= bdrv_min_mem_align(bs
);
2657 for (i
= 0; i
< qiov
->niov
; i
++) {
2658 if ((uintptr_t) qiov
->iov
[i
].iov_base
% alignment
) {
2661 if (qiov
->iov
[i
].iov_len
% alignment
) {
2669 void bdrv_add_before_write_notifier(BlockDriverState
*bs
,
2670 NotifierWithReturn
*notifier
)
2672 notifier_with_return_list_add(&bs
->before_write_notifiers
, notifier
);
2675 void bdrv_io_plug(BlockDriverState
*bs
)
2679 QLIST_FOREACH(child
, &bs
->children
, next
) {
2680 bdrv_io_plug(child
->bs
);
2683 if (atomic_fetch_inc(&bs
->io_plugged
) == 0) {
2684 BlockDriver
*drv
= bs
->drv
;
2685 if (drv
&& drv
->bdrv_io_plug
) {
2686 drv
->bdrv_io_plug(bs
);
2691 void bdrv_io_unplug(BlockDriverState
*bs
)
2695 assert(bs
->io_plugged
);
2696 if (atomic_fetch_dec(&bs
->io_plugged
) == 1) {
2697 BlockDriver
*drv
= bs
->drv
;
2698 if (drv
&& drv
->bdrv_io_unplug
) {
2699 drv
->bdrv_io_unplug(bs
);
2703 QLIST_FOREACH(child
, &bs
->children
, next
) {
2704 bdrv_io_unplug(child
->bs
);