2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "sysemu/block-backend.h"
28 #include "block/blockjob.h"
29 #include "block/blockjob_int.h"
30 #include "block/block_int.h"
31 #include "qemu/cutils.h"
32 #include "qapi/error.h"
33 #include "qemu/error-report.h"
35 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
37 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
38 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
40 static int coroutine_fn
bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
,
41 int64_t offset
, int bytes
, BdrvRequestFlags flags
);
43 void bdrv_parent_drained_begin(BlockDriverState
*bs
)
47 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
48 if (c
->role
->drained_begin
) {
49 c
->role
->drained_begin(c
);
54 void bdrv_parent_drained_end(BlockDriverState
*bs
)
58 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
59 if (c
->role
->drained_end
) {
60 c
->role
->drained_end(c
);
65 static void bdrv_merge_limits(BlockLimits
*dst
, const BlockLimits
*src
)
67 dst
->opt_transfer
= MAX(dst
->opt_transfer
, src
->opt_transfer
);
68 dst
->max_transfer
= MIN_NON_ZERO(dst
->max_transfer
, src
->max_transfer
);
69 dst
->opt_mem_alignment
= MAX(dst
->opt_mem_alignment
,
70 src
->opt_mem_alignment
);
71 dst
->min_mem_alignment
= MAX(dst
->min_mem_alignment
,
72 src
->min_mem_alignment
);
73 dst
->max_iov
= MIN_NON_ZERO(dst
->max_iov
, src
->max_iov
);
76 void bdrv_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
78 BlockDriver
*drv
= bs
->drv
;
79 Error
*local_err
= NULL
;
81 memset(&bs
->bl
, 0, sizeof(bs
->bl
));
87 /* Default alignment based on whether driver has byte interface */
88 bs
->bl
.request_alignment
= drv
->bdrv_co_preadv
? 1 : 512;
90 /* Take some limits from the children as a default */
92 bdrv_refresh_limits(bs
->file
->bs
, &local_err
);
94 error_propagate(errp
, local_err
);
97 bdrv_merge_limits(&bs
->bl
, &bs
->file
->bs
->bl
);
99 bs
->bl
.min_mem_alignment
= 512;
100 bs
->bl
.opt_mem_alignment
= getpagesize();
102 /* Safe default since most protocols use readv()/writev()/etc */
103 bs
->bl
.max_iov
= IOV_MAX
;
107 bdrv_refresh_limits(bs
->backing
->bs
, &local_err
);
109 error_propagate(errp
, local_err
);
112 bdrv_merge_limits(&bs
->bl
, &bs
->backing
->bs
->bl
);
115 /* Then let the driver override it */
116 if (drv
->bdrv_refresh_limits
) {
117 drv
->bdrv_refresh_limits(bs
, errp
);
122 * The copy-on-read flag is actually a reference count so multiple users may
123 * use the feature without worrying about clobbering its previous state.
124 * Copy-on-read stays enabled until all users have called to disable it.
126 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
128 atomic_inc(&bs
->copy_on_read
);
131 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
133 int old
= atomic_fetch_dec(&bs
->copy_on_read
);
137 /* Check if any requests are in-flight (including throttled requests) */
138 bool bdrv_requests_pending(BlockDriverState
*bs
)
142 if (atomic_read(&bs
->in_flight
)) {
146 QLIST_FOREACH(child
, &bs
->children
, next
) {
147 if (bdrv_requests_pending(child
->bs
)) {
157 BlockDriverState
*bs
;
162 static void coroutine_fn
bdrv_drain_invoke_entry(void *opaque
)
164 BdrvCoDrainData
*data
= opaque
;
165 BlockDriverState
*bs
= data
->bs
;
168 bs
->drv
->bdrv_co_drain_begin(bs
);
170 bs
->drv
->bdrv_co_drain_end(bs
);
173 /* Set data->done before reading bs->wakeup. */
174 atomic_mb_set(&data
->done
, true);
178 static void bdrv_drain_invoke(BlockDriverState
*bs
, bool begin
)
180 BdrvCoDrainData data
= { .bs
= bs
, .done
= false, .begin
= begin
};
182 if (!bs
->drv
|| (begin
&& !bs
->drv
->bdrv_co_drain_begin
) ||
183 (!begin
&& !bs
->drv
->bdrv_co_drain_end
)) {
187 data
.co
= qemu_coroutine_create(bdrv_drain_invoke_entry
, &data
);
188 bdrv_coroutine_enter(bs
, data
.co
);
189 BDRV_POLL_WHILE(bs
, !data
.done
);
192 static bool bdrv_drain_recurse(BlockDriverState
*bs
, bool begin
)
194 BdrvChild
*child
, *tmp
;
197 /* Ensure any pending metadata writes are submitted to bs->file. */
198 bdrv_drain_invoke(bs
, begin
);
200 /* Wait for drained requests to finish */
201 waited
= BDRV_POLL_WHILE(bs
, atomic_read(&bs
->in_flight
) > 0);
203 QLIST_FOREACH_SAFE(child
, &bs
->children
, next
, tmp
) {
204 BlockDriverState
*bs
= child
->bs
;
206 qemu_get_current_aio_context() == qemu_get_aio_context();
207 assert(bs
->refcnt
> 0);
209 /* In case the recursive bdrv_drain_recurse processes a
210 * block_job_defer_to_main_loop BH and modifies the graph,
211 * let's hold a reference to bs until we are done.
213 * IOThread doesn't have such a BH, and it is not safe to call
214 * bdrv_unref without BQL, so skip doing it there.
218 waited
|= bdrv_drain_recurse(bs
, begin
);
227 static void bdrv_co_drain_bh_cb(void *opaque
)
229 BdrvCoDrainData
*data
= opaque
;
230 Coroutine
*co
= data
->co
;
231 BlockDriverState
*bs
= data
->bs
;
233 bdrv_dec_in_flight(bs
);
235 bdrv_drained_begin(bs
);
237 bdrv_drained_end(bs
);
244 static void coroutine_fn
bdrv_co_yield_to_drain(BlockDriverState
*bs
,
247 BdrvCoDrainData data
;
249 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
250 * other coroutines run if they were queued from
251 * qemu_co_queue_run_restart(). */
253 assert(qemu_in_coroutine());
254 data
= (BdrvCoDrainData
) {
255 .co
= qemu_coroutine_self(),
260 bdrv_inc_in_flight(bs
);
261 aio_bh_schedule_oneshot(bdrv_get_aio_context(bs
),
262 bdrv_co_drain_bh_cb
, &data
);
264 qemu_coroutine_yield();
265 /* If we are resumed from some other event (such as an aio completion or a
266 * timer callback), it is a bug in the caller that should be fixed. */
270 void bdrv_drained_begin(BlockDriverState
*bs
)
272 if (qemu_in_coroutine()) {
273 bdrv_co_yield_to_drain(bs
, true);
277 if (atomic_fetch_inc(&bs
->quiesce_counter
) == 0) {
278 aio_disable_external(bdrv_get_aio_context(bs
));
279 bdrv_parent_drained_begin(bs
);
282 bdrv_drain_recurse(bs
, true);
285 void bdrv_drained_end(BlockDriverState
*bs
)
287 if (qemu_in_coroutine()) {
288 bdrv_co_yield_to_drain(bs
, false);
291 assert(bs
->quiesce_counter
> 0);
292 if (atomic_fetch_dec(&bs
->quiesce_counter
) > 1) {
296 bdrv_parent_drained_end(bs
);
297 bdrv_drain_recurse(bs
, false);
298 aio_enable_external(bdrv_get_aio_context(bs
));
302 * Wait for pending requests to complete on a single BlockDriverState subtree,
303 * and suspend block driver's internal I/O until next request arrives.
305 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
308 * Only this BlockDriverState's AioContext is run, so in-flight requests must
309 * not depend on events in other AioContexts. In that case, use
310 * bdrv_drain_all() instead.
312 void coroutine_fn
bdrv_co_drain(BlockDriverState
*bs
)
314 assert(qemu_in_coroutine());
315 bdrv_drained_begin(bs
);
316 bdrv_drained_end(bs
);
319 void bdrv_drain(BlockDriverState
*bs
)
321 bdrv_drained_begin(bs
);
322 bdrv_drained_end(bs
);
326 * Wait for pending requests to complete across all BlockDriverStates
328 * This function does not flush data to disk, use bdrv_flush_all() for that
329 * after calling this function.
331 * This pauses all block jobs and disables external clients. It must
332 * be paired with bdrv_drain_all_end().
334 * NOTE: no new block jobs or BlockDriverStates can be created between
335 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
337 void bdrv_drain_all_begin(void)
339 /* Always run first iteration so any pending completion BHs run */
341 BlockDriverState
*bs
;
343 GSList
*aio_ctxs
= NULL
, *ctx
;
345 block_job_pause_all();
347 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
348 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
350 aio_context_acquire(aio_context
);
351 bdrv_parent_drained_begin(bs
);
352 aio_disable_external(aio_context
);
353 aio_context_release(aio_context
);
355 if (!g_slist_find(aio_ctxs
, aio_context
)) {
356 aio_ctxs
= g_slist_prepend(aio_ctxs
, aio_context
);
360 /* Note that completion of an asynchronous I/O operation can trigger any
361 * number of other I/O operations on other devices---for example a
362 * coroutine can submit an I/O request to another device in response to
363 * request completion. Therefore we must keep looping until there was no
364 * more activity rather than simply draining each device independently.
369 for (ctx
= aio_ctxs
; ctx
!= NULL
; ctx
= ctx
->next
) {
370 AioContext
*aio_context
= ctx
->data
;
372 aio_context_acquire(aio_context
);
373 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
374 if (aio_context
== bdrv_get_aio_context(bs
)) {
375 waited
|= bdrv_drain_recurse(bs
, true);
378 aio_context_release(aio_context
);
382 g_slist_free(aio_ctxs
);
385 void bdrv_drain_all_end(void)
387 BlockDriverState
*bs
;
390 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
391 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
393 aio_context_acquire(aio_context
);
394 aio_enable_external(aio_context
);
395 bdrv_parent_drained_end(bs
);
396 bdrv_drain_recurse(bs
, false);
397 aio_context_release(aio_context
);
400 block_job_resume_all();
403 void bdrv_drain_all(void)
405 bdrv_drain_all_begin();
406 bdrv_drain_all_end();
410 * Remove an active request from the tracked requests list
412 * This function should be called when a tracked request is completing.
414 static void tracked_request_end(BdrvTrackedRequest
*req
)
416 if (req
->serialising
) {
417 atomic_dec(&req
->bs
->serialising_in_flight
);
420 qemu_co_mutex_lock(&req
->bs
->reqs_lock
);
421 QLIST_REMOVE(req
, list
);
422 qemu_co_queue_restart_all(&req
->wait_queue
);
423 qemu_co_mutex_unlock(&req
->bs
->reqs_lock
);
427 * Add an active request to the tracked requests list
429 static void tracked_request_begin(BdrvTrackedRequest
*req
,
430 BlockDriverState
*bs
,
433 enum BdrvTrackedRequestType type
)
435 *req
= (BdrvTrackedRequest
){
440 .co
= qemu_coroutine_self(),
441 .serialising
= false,
442 .overlap_offset
= offset
,
443 .overlap_bytes
= bytes
,
446 qemu_co_queue_init(&req
->wait_queue
);
448 qemu_co_mutex_lock(&bs
->reqs_lock
);
449 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
450 qemu_co_mutex_unlock(&bs
->reqs_lock
);
453 static void mark_request_serialising(BdrvTrackedRequest
*req
, uint64_t align
)
455 int64_t overlap_offset
= req
->offset
& ~(align
- 1);
456 unsigned int overlap_bytes
= ROUND_UP(req
->offset
+ req
->bytes
, align
)
459 if (!req
->serialising
) {
460 atomic_inc(&req
->bs
->serialising_in_flight
);
461 req
->serialising
= true;
464 req
->overlap_offset
= MIN(req
->overlap_offset
, overlap_offset
);
465 req
->overlap_bytes
= MAX(req
->overlap_bytes
, overlap_bytes
);
469 * Round a region to cluster boundaries
471 void bdrv_round_to_clusters(BlockDriverState
*bs
,
472 int64_t offset
, int64_t bytes
,
473 int64_t *cluster_offset
,
474 int64_t *cluster_bytes
)
478 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
479 *cluster_offset
= offset
;
480 *cluster_bytes
= bytes
;
482 int64_t c
= bdi
.cluster_size
;
483 *cluster_offset
= QEMU_ALIGN_DOWN(offset
, c
);
484 *cluster_bytes
= QEMU_ALIGN_UP(offset
- *cluster_offset
+ bytes
, c
);
488 static int bdrv_get_cluster_size(BlockDriverState
*bs
)
493 ret
= bdrv_get_info(bs
, &bdi
);
494 if (ret
< 0 || bdi
.cluster_size
== 0) {
495 return bs
->bl
.request_alignment
;
497 return bdi
.cluster_size
;
501 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
502 int64_t offset
, unsigned int bytes
)
505 if (offset
>= req
->overlap_offset
+ req
->overlap_bytes
) {
509 if (req
->overlap_offset
>= offset
+ bytes
) {
515 void bdrv_inc_in_flight(BlockDriverState
*bs
)
517 atomic_inc(&bs
->in_flight
);
520 static void dummy_bh_cb(void *opaque
)
524 void bdrv_wakeup(BlockDriverState
*bs
)
526 /* The barrier (or an atomic op) is in the caller. */
527 if (atomic_read(&bs
->wakeup
)) {
528 aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb
, NULL
);
532 void bdrv_dec_in_flight(BlockDriverState
*bs
)
534 atomic_dec(&bs
->in_flight
);
538 static bool coroutine_fn
wait_serialising_requests(BdrvTrackedRequest
*self
)
540 BlockDriverState
*bs
= self
->bs
;
541 BdrvTrackedRequest
*req
;
545 if (!atomic_read(&bs
->serialising_in_flight
)) {
551 qemu_co_mutex_lock(&bs
->reqs_lock
);
552 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
553 if (req
== self
|| (!req
->serialising
&& !self
->serialising
)) {
556 if (tracked_request_overlaps(req
, self
->overlap_offset
,
557 self
->overlap_bytes
))
559 /* Hitting this means there was a reentrant request, for
560 * example, a block driver issuing nested requests. This must
561 * never happen since it means deadlock.
563 assert(qemu_coroutine_self() != req
->co
);
565 /* If the request is already (indirectly) waiting for us, or
566 * will wait for us as soon as it wakes up, then just go on
567 * (instead of producing a deadlock in the former case). */
568 if (!req
->waiting_for
) {
569 self
->waiting_for
= req
;
570 qemu_co_queue_wait(&req
->wait_queue
, &bs
->reqs_lock
);
571 self
->waiting_for
= NULL
;
578 qemu_co_mutex_unlock(&bs
->reqs_lock
);
584 static int bdrv_check_byte_request(BlockDriverState
*bs
, int64_t offset
,
587 if (size
> BDRV_REQUEST_MAX_SECTORS
<< BDRV_SECTOR_BITS
) {
591 if (!bdrv_is_inserted(bs
)) {
602 typedef struct RwCo
{
608 BdrvRequestFlags flags
;
611 static void coroutine_fn
bdrv_rw_co_entry(void *opaque
)
615 if (!rwco
->is_write
) {
616 rwco
->ret
= bdrv_co_preadv(rwco
->child
, rwco
->offset
,
617 rwco
->qiov
->size
, rwco
->qiov
,
620 rwco
->ret
= bdrv_co_pwritev(rwco
->child
, rwco
->offset
,
621 rwco
->qiov
->size
, rwco
->qiov
,
627 * Process a vectored synchronous request using coroutines
629 static int bdrv_prwv_co(BdrvChild
*child
, int64_t offset
,
630 QEMUIOVector
*qiov
, bool is_write
,
631 BdrvRequestFlags flags
)
638 .is_write
= is_write
,
643 if (qemu_in_coroutine()) {
644 /* Fast-path if already in coroutine context */
645 bdrv_rw_co_entry(&rwco
);
647 co
= qemu_coroutine_create(bdrv_rw_co_entry
, &rwco
);
648 bdrv_coroutine_enter(child
->bs
, co
);
649 BDRV_POLL_WHILE(child
->bs
, rwco
.ret
== NOT_DONE
);
655 * Process a synchronous request using coroutines
657 static int bdrv_rw_co(BdrvChild
*child
, int64_t sector_num
, uint8_t *buf
,
658 int nb_sectors
, bool is_write
, BdrvRequestFlags flags
)
662 .iov_base
= (void *)buf
,
663 .iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
,
666 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
670 qemu_iovec_init_external(&qiov
, &iov
, 1);
671 return bdrv_prwv_co(child
, sector_num
<< BDRV_SECTOR_BITS
,
672 &qiov
, is_write
, flags
);
675 /* return < 0 if error. See bdrv_write() for the return codes */
676 int bdrv_read(BdrvChild
*child
, int64_t sector_num
,
677 uint8_t *buf
, int nb_sectors
)
679 return bdrv_rw_co(child
, sector_num
, buf
, nb_sectors
, false, 0);
682 /* Return < 0 if error. Important errors are:
683 -EIO generic I/O error (may happen for all errors)
684 -ENOMEDIUM No media inserted.
685 -EINVAL Invalid sector number or nb_sectors
686 -EACCES Trying to write a read-only device
688 int bdrv_write(BdrvChild
*child
, int64_t sector_num
,
689 const uint8_t *buf
, int nb_sectors
)
691 return bdrv_rw_co(child
, sector_num
, (uint8_t *)buf
, nb_sectors
, true, 0);
694 int bdrv_pwrite_zeroes(BdrvChild
*child
, int64_t offset
,
695 int bytes
, BdrvRequestFlags flags
)
703 qemu_iovec_init_external(&qiov
, &iov
, 1);
704 return bdrv_prwv_co(child
, offset
, &qiov
, true,
705 BDRV_REQ_ZERO_WRITE
| flags
);
709 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
710 * The operation is sped up by checking the block status and only writing
711 * zeroes to the device if they currently do not return zeroes. Optional
712 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
715 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
717 int bdrv_make_zero(BdrvChild
*child
, BdrvRequestFlags flags
)
719 int64_t target_sectors
, ret
, nb_sectors
, sector_num
= 0;
720 BlockDriverState
*bs
= child
->bs
;
723 target_sectors
= bdrv_nb_sectors(bs
);
724 if (target_sectors
< 0) {
725 return target_sectors
;
729 nb_sectors
= MIN(target_sectors
- sector_num
, BDRV_REQUEST_MAX_SECTORS
);
730 if (nb_sectors
<= 0) {
733 ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, &n
, NULL
);
735 error_report("error getting block status at sector %" PRId64
": %s",
736 sector_num
, strerror(-ret
));
739 if (ret
& BDRV_BLOCK_ZERO
) {
743 ret
= bdrv_pwrite_zeroes(child
, sector_num
<< BDRV_SECTOR_BITS
,
744 n
<< BDRV_SECTOR_BITS
, flags
);
746 error_report("error writing zeroes at sector %" PRId64
": %s",
747 sector_num
, strerror(-ret
));
754 int bdrv_preadv(BdrvChild
*child
, int64_t offset
, QEMUIOVector
*qiov
)
758 ret
= bdrv_prwv_co(child
, offset
, qiov
, false, 0);
766 int bdrv_pread(BdrvChild
*child
, int64_t offset
, void *buf
, int bytes
)
770 .iov_base
= (void *)buf
,
778 qemu_iovec_init_external(&qiov
, &iov
, 1);
779 return bdrv_preadv(child
, offset
, &qiov
);
782 int bdrv_pwritev(BdrvChild
*child
, int64_t offset
, QEMUIOVector
*qiov
)
786 ret
= bdrv_prwv_co(child
, offset
, qiov
, true, 0);
794 int bdrv_pwrite(BdrvChild
*child
, int64_t offset
, const void *buf
, int bytes
)
798 .iov_base
= (void *) buf
,
806 qemu_iovec_init_external(&qiov
, &iov
, 1);
807 return bdrv_pwritev(child
, offset
, &qiov
);
811 * Writes to the file and ensures that no writes are reordered across this
812 * request (acts as a barrier)
814 * Returns 0 on success, -errno in error cases.
816 int bdrv_pwrite_sync(BdrvChild
*child
, int64_t offset
,
817 const void *buf
, int count
)
821 ret
= bdrv_pwrite(child
, offset
, buf
, count
);
826 ret
= bdrv_flush(child
->bs
);
834 typedef struct CoroutineIOCompletion
{
835 Coroutine
*coroutine
;
837 } CoroutineIOCompletion
;
839 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
841 CoroutineIOCompletion
*co
= opaque
;
844 aio_co_wake(co
->coroutine
);
847 static int coroutine_fn
bdrv_driver_preadv(BlockDriverState
*bs
,
848 uint64_t offset
, uint64_t bytes
,
849 QEMUIOVector
*qiov
, int flags
)
851 BlockDriver
*drv
= bs
->drv
;
853 unsigned int nb_sectors
;
855 assert(!(flags
& ~BDRV_REQ_MASK
));
857 if (drv
->bdrv_co_preadv
) {
858 return drv
->bdrv_co_preadv(bs
, offset
, bytes
, qiov
, flags
);
861 sector_num
= offset
>> BDRV_SECTOR_BITS
;
862 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
864 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
865 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
866 assert((bytes
>> BDRV_SECTOR_BITS
) <= BDRV_REQUEST_MAX_SECTORS
);
868 if (drv
->bdrv_co_readv
) {
869 return drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
872 CoroutineIOCompletion co
= {
873 .coroutine
= qemu_coroutine_self(),
876 acb
= bs
->drv
->bdrv_aio_readv(bs
, sector_num
, qiov
, nb_sectors
,
877 bdrv_co_io_em_complete
, &co
);
881 qemu_coroutine_yield();
887 static int coroutine_fn
bdrv_driver_pwritev(BlockDriverState
*bs
,
888 uint64_t offset
, uint64_t bytes
,
889 QEMUIOVector
*qiov
, int flags
)
891 BlockDriver
*drv
= bs
->drv
;
893 unsigned int nb_sectors
;
896 assert(!(flags
& ~BDRV_REQ_MASK
));
898 if (drv
->bdrv_co_pwritev
) {
899 ret
= drv
->bdrv_co_pwritev(bs
, offset
, bytes
, qiov
,
900 flags
& bs
->supported_write_flags
);
901 flags
&= ~bs
->supported_write_flags
;
905 sector_num
= offset
>> BDRV_SECTOR_BITS
;
906 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
908 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
909 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
910 assert((bytes
>> BDRV_SECTOR_BITS
) <= BDRV_REQUEST_MAX_SECTORS
);
912 if (drv
->bdrv_co_writev_flags
) {
913 ret
= drv
->bdrv_co_writev_flags(bs
, sector_num
, nb_sectors
, qiov
,
914 flags
& bs
->supported_write_flags
);
915 flags
&= ~bs
->supported_write_flags
;
916 } else if (drv
->bdrv_co_writev
) {
917 assert(!bs
->supported_write_flags
);
918 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
);
921 CoroutineIOCompletion co
= {
922 .coroutine
= qemu_coroutine_self(),
925 acb
= bs
->drv
->bdrv_aio_writev(bs
, sector_num
, qiov
, nb_sectors
,
926 bdrv_co_io_em_complete
, &co
);
930 qemu_coroutine_yield();
936 if (ret
== 0 && (flags
& BDRV_REQ_FUA
)) {
937 ret
= bdrv_co_flush(bs
);
943 static int coroutine_fn
944 bdrv_driver_pwritev_compressed(BlockDriverState
*bs
, uint64_t offset
,
945 uint64_t bytes
, QEMUIOVector
*qiov
)
947 BlockDriver
*drv
= bs
->drv
;
949 if (!drv
->bdrv_co_pwritev_compressed
) {
953 return drv
->bdrv_co_pwritev_compressed(bs
, offset
, bytes
, qiov
);
956 static int coroutine_fn
bdrv_co_do_copy_on_readv(BdrvChild
*child
,
957 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
)
959 BlockDriverState
*bs
= child
->bs
;
961 /* Perform I/O through a temporary buffer so that users who scribble over
962 * their read buffer while the operation is in progress do not end up
963 * modifying the image file. This is critical for zero-copy guest I/O
964 * where anything might happen inside guest memory.
968 BlockDriver
*drv
= bs
->drv
;
970 QEMUIOVector local_qiov
;
971 int64_t cluster_offset
;
972 int64_t cluster_bytes
;
975 int max_transfer
= MIN_NON_ZERO(bs
->bl
.max_transfer
,
976 BDRV_REQUEST_MAX_BYTES
);
977 unsigned int progress
= 0;
979 /* FIXME We cannot require callers to have write permissions when all they
980 * are doing is a read request. If we did things right, write permissions
981 * would be obtained anyway, but internally by the copy-on-read code. As
982 * long as it is implemented here rather than in a separate filter driver,
983 * the copy-on-read code doesn't have its own BdrvChild, however, for which
984 * it could request permissions. Therefore we have to bypass the permission
985 * system for the moment. */
986 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
988 /* Cover entire cluster so no additional backing file I/O is required when
989 * allocating cluster in the image file. Note that this value may exceed
990 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
991 * is one reason we loop rather than doing it all at once.
993 bdrv_round_to_clusters(bs
, offset
, bytes
, &cluster_offset
, &cluster_bytes
);
994 skip_bytes
= offset
- cluster_offset
;
996 trace_bdrv_co_do_copy_on_readv(bs
, offset
, bytes
,
997 cluster_offset
, cluster_bytes
);
999 bounce_buffer
= qemu_try_blockalign(bs
,
1000 MIN(MIN(max_transfer
, cluster_bytes
),
1001 MAX_BOUNCE_BUFFER
));
1002 if (bounce_buffer
== NULL
) {
1007 while (cluster_bytes
) {
1010 ret
= bdrv_is_allocated(bs
, cluster_offset
,
1011 MIN(cluster_bytes
, max_transfer
), &pnum
);
1013 /* Safe to treat errors in querying allocation as if
1014 * unallocated; we'll probably fail again soon on the
1015 * read, but at least that will set a decent errno.
1017 pnum
= MIN(cluster_bytes
, max_transfer
);
1020 assert(skip_bytes
< pnum
);
1023 /* Must copy-on-read; use the bounce buffer */
1024 iov
.iov_base
= bounce_buffer
;
1025 iov
.iov_len
= pnum
= MIN(pnum
, MAX_BOUNCE_BUFFER
);
1026 qemu_iovec_init_external(&local_qiov
, &iov
, 1);
1028 ret
= bdrv_driver_preadv(bs
, cluster_offset
, pnum
,
1034 bdrv_debug_event(bs
, BLKDBG_COR_WRITE
);
1035 if (drv
->bdrv_co_pwrite_zeroes
&&
1036 buffer_is_zero(bounce_buffer
, pnum
)) {
1037 /* FIXME: Should we (perhaps conditionally) be setting
1038 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1039 * that still correctly reads as zero? */
1040 ret
= bdrv_co_do_pwrite_zeroes(bs
, cluster_offset
, pnum
, 0);
1042 /* This does not change the data on the disk, it is not
1043 * necessary to flush even in cache=writethrough mode.
1045 ret
= bdrv_driver_pwritev(bs
, cluster_offset
, pnum
,
1050 /* It might be okay to ignore write errors for guest
1051 * requests. If this is a deliberate copy-on-read
1052 * then we don't want to ignore the error. Simply
1053 * report it in all cases.
1058 qemu_iovec_from_buf(qiov
, progress
, bounce_buffer
+ skip_bytes
,
1061 /* Read directly into the destination */
1062 qemu_iovec_init(&local_qiov
, qiov
->niov
);
1063 qemu_iovec_concat(&local_qiov
, qiov
, progress
, pnum
- skip_bytes
);
1064 ret
= bdrv_driver_preadv(bs
, offset
+ progress
, local_qiov
.size
,
1066 qemu_iovec_destroy(&local_qiov
);
1072 cluster_offset
+= pnum
;
1073 cluster_bytes
-= pnum
;
1074 progress
+= pnum
- skip_bytes
;
1080 qemu_vfree(bounce_buffer
);
1085 * Forwards an already correctly aligned request to the BlockDriver. This
1086 * handles copy on read, zeroing after EOF, and fragmentation of large
1087 * reads; any other features must be implemented by the caller.
1089 static int coroutine_fn
bdrv_aligned_preadv(BdrvChild
*child
,
1090 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
1091 int64_t align
, QEMUIOVector
*qiov
, int flags
)
1093 BlockDriverState
*bs
= child
->bs
;
1094 int64_t total_bytes
, max_bytes
;
1096 uint64_t bytes_remaining
= bytes
;
1099 assert(is_power_of_2(align
));
1100 assert((offset
& (align
- 1)) == 0);
1101 assert((bytes
& (align
- 1)) == 0);
1102 assert(!qiov
|| bytes
== qiov
->size
);
1103 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1104 max_transfer
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_transfer
, INT_MAX
),
1107 /* TODO: We would need a per-BDS .supported_read_flags and
1108 * potential fallback support, if we ever implement any read flags
1109 * to pass through to drivers. For now, there aren't any
1110 * passthrough flags. */
1111 assert(!(flags
& ~(BDRV_REQ_NO_SERIALISING
| BDRV_REQ_COPY_ON_READ
)));
1113 /* Handle Copy on Read and associated serialisation */
1114 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1115 /* If we touch the same cluster it counts as an overlap. This
1116 * guarantees that allocating writes will be serialized and not race
1117 * with each other for the same cluster. For example, in copy-on-read
1118 * it ensures that the CoR read and write operations are atomic and
1119 * guest writes cannot interleave between them. */
1120 mark_request_serialising(req
, bdrv_get_cluster_size(bs
));
1123 if (!(flags
& BDRV_REQ_NO_SERIALISING
)) {
1124 wait_serialising_requests(req
);
1127 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1128 /* TODO: Simplify further once bdrv_is_allocated no longer
1129 * requires sector alignment */
1130 int64_t start
= QEMU_ALIGN_DOWN(offset
, BDRV_SECTOR_SIZE
);
1131 int64_t end
= QEMU_ALIGN_UP(offset
+ bytes
, BDRV_SECTOR_SIZE
);
1134 ret
= bdrv_is_allocated(bs
, start
, end
- start
, &pnum
);
1139 if (!ret
|| pnum
!= end
- start
) {
1140 ret
= bdrv_co_do_copy_on_readv(child
, offset
, bytes
, qiov
);
1145 /* Forward the request to the BlockDriver, possibly fragmenting it */
1146 total_bytes
= bdrv_getlength(bs
);
1147 if (total_bytes
< 0) {
1152 max_bytes
= ROUND_UP(MAX(0, total_bytes
- offset
), align
);
1153 if (bytes
<= max_bytes
&& bytes
<= max_transfer
) {
1154 ret
= bdrv_driver_preadv(bs
, offset
, bytes
, qiov
, 0);
1158 while (bytes_remaining
) {
1162 QEMUIOVector local_qiov
;
1164 num
= MIN(bytes_remaining
, MIN(max_bytes
, max_transfer
));
1166 qemu_iovec_init(&local_qiov
, qiov
->niov
);
1167 qemu_iovec_concat(&local_qiov
, qiov
, bytes
- bytes_remaining
, num
);
1169 ret
= bdrv_driver_preadv(bs
, offset
+ bytes
- bytes_remaining
,
1170 num
, &local_qiov
, 0);
1172 qemu_iovec_destroy(&local_qiov
);
1174 num
= bytes_remaining
;
1175 ret
= qemu_iovec_memset(qiov
, bytes
- bytes_remaining
, 0,
1181 bytes_remaining
-= num
;
1185 return ret
< 0 ? ret
: 0;
1189 * Handle a read request in coroutine context
1191 int coroutine_fn
bdrv_co_preadv(BdrvChild
*child
,
1192 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1193 BdrvRequestFlags flags
)
1195 BlockDriverState
*bs
= child
->bs
;
1196 BlockDriver
*drv
= bs
->drv
;
1197 BdrvTrackedRequest req
;
1199 uint64_t align
= bs
->bl
.request_alignment
;
1200 uint8_t *head_buf
= NULL
;
1201 uint8_t *tail_buf
= NULL
;
1202 QEMUIOVector local_qiov
;
1203 bool use_local_qiov
= false;
1206 trace_bdrv_co_preadv(child
->bs
, offset
, bytes
, flags
);
1212 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
1217 bdrv_inc_in_flight(bs
);
1219 /* Don't do copy-on-read if we read data before write operation */
1220 if (atomic_read(&bs
->copy_on_read
) && !(flags
& BDRV_REQ_NO_SERIALISING
)) {
1221 flags
|= BDRV_REQ_COPY_ON_READ
;
1224 /* Align read if necessary by padding qiov */
1225 if (offset
& (align
- 1)) {
1226 head_buf
= qemu_blockalign(bs
, align
);
1227 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
1228 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
1229 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1230 use_local_qiov
= true;
1232 bytes
+= offset
& (align
- 1);
1233 offset
= offset
& ~(align
- 1);
1236 if ((offset
+ bytes
) & (align
- 1)) {
1237 if (!use_local_qiov
) {
1238 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
1239 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1240 use_local_qiov
= true;
1242 tail_buf
= qemu_blockalign(bs
, align
);
1243 qemu_iovec_add(&local_qiov
, tail_buf
,
1244 align
- ((offset
+ bytes
) & (align
- 1)));
1246 bytes
= ROUND_UP(bytes
, align
);
1249 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_READ
);
1250 ret
= bdrv_aligned_preadv(child
, &req
, offset
, bytes
, align
,
1251 use_local_qiov
? &local_qiov
: qiov
,
1253 tracked_request_end(&req
);
1254 bdrv_dec_in_flight(bs
);
1256 if (use_local_qiov
) {
1257 qemu_iovec_destroy(&local_qiov
);
1258 qemu_vfree(head_buf
);
1259 qemu_vfree(tail_buf
);
1265 static int coroutine_fn
bdrv_co_do_readv(BdrvChild
*child
,
1266 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1267 BdrvRequestFlags flags
)
1269 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
1273 return bdrv_co_preadv(child
, sector_num
<< BDRV_SECTOR_BITS
,
1274 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
1277 int coroutine_fn
bdrv_co_readv(BdrvChild
*child
, int64_t sector_num
,
1278 int nb_sectors
, QEMUIOVector
*qiov
)
1280 return bdrv_co_do_readv(child
, sector_num
, nb_sectors
, qiov
, 0);
1283 static int coroutine_fn
bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
,
1284 int64_t offset
, int bytes
, BdrvRequestFlags flags
)
1286 BlockDriver
*drv
= bs
->drv
;
1288 struct iovec iov
= {0};
1290 bool need_flush
= false;
1294 int max_write_zeroes
= MIN_NON_ZERO(bs
->bl
.max_pwrite_zeroes
, INT_MAX
);
1295 int alignment
= MAX(bs
->bl
.pwrite_zeroes_alignment
,
1296 bs
->bl
.request_alignment
);
1297 int max_transfer
= MIN_NON_ZERO(bs
->bl
.max_transfer
, MAX_BOUNCE_BUFFER
);
1299 assert(alignment
% bs
->bl
.request_alignment
== 0);
1300 head
= offset
% alignment
;
1301 tail
= (offset
+ bytes
) % alignment
;
1302 max_write_zeroes
= QEMU_ALIGN_DOWN(max_write_zeroes
, alignment
);
1303 assert(max_write_zeroes
>= bs
->bl
.request_alignment
);
1305 while (bytes
> 0 && !ret
) {
1308 /* Align request. Block drivers can expect the "bulk" of the request
1309 * to be aligned, and that unaligned requests do not cross cluster
1313 /* Make a small request up to the first aligned sector. For
1314 * convenience, limit this request to max_transfer even if
1315 * we don't need to fall back to writes. */
1316 num
= MIN(MIN(bytes
, max_transfer
), alignment
- head
);
1317 head
= (head
+ num
) % alignment
;
1318 assert(num
< max_write_zeroes
);
1319 } else if (tail
&& num
> alignment
) {
1320 /* Shorten the request to the last aligned sector. */
1324 /* limit request size */
1325 if (num
> max_write_zeroes
) {
1326 num
= max_write_zeroes
;
1330 /* First try the efficient write zeroes operation */
1331 if (drv
->bdrv_co_pwrite_zeroes
) {
1332 ret
= drv
->bdrv_co_pwrite_zeroes(bs
, offset
, num
,
1333 flags
& bs
->supported_zero_flags
);
1334 if (ret
!= -ENOTSUP
&& (flags
& BDRV_REQ_FUA
) &&
1335 !(bs
->supported_zero_flags
& BDRV_REQ_FUA
)) {
1339 assert(!bs
->supported_zero_flags
);
1342 if (ret
== -ENOTSUP
) {
1343 /* Fall back to bounce buffer if write zeroes is unsupported */
1344 BdrvRequestFlags write_flags
= flags
& ~BDRV_REQ_ZERO_WRITE
;
1346 if ((flags
& BDRV_REQ_FUA
) &&
1347 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1348 /* No need for bdrv_driver_pwrite() to do a fallback
1349 * flush on each chunk; use just one at the end */
1350 write_flags
&= ~BDRV_REQ_FUA
;
1353 num
= MIN(num
, max_transfer
);
1355 if (iov
.iov_base
== NULL
) {
1356 iov
.iov_base
= qemu_try_blockalign(bs
, num
);
1357 if (iov
.iov_base
== NULL
) {
1361 memset(iov
.iov_base
, 0, num
);
1363 qemu_iovec_init_external(&qiov
, &iov
, 1);
1365 ret
= bdrv_driver_pwritev(bs
, offset
, num
, &qiov
, write_flags
);
1367 /* Keep bounce buffer around if it is big enough for all
1368 * all future requests.
1370 if (num
< max_transfer
) {
1371 qemu_vfree(iov
.iov_base
);
1372 iov
.iov_base
= NULL
;
1381 if (ret
== 0 && need_flush
) {
1382 ret
= bdrv_co_flush(bs
);
1384 qemu_vfree(iov
.iov_base
);
1389 * Forwards an already correctly aligned write request to the BlockDriver,
1390 * after possibly fragmenting it.
1392 static int coroutine_fn
bdrv_aligned_pwritev(BdrvChild
*child
,
1393 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
1394 int64_t align
, QEMUIOVector
*qiov
, int flags
)
1396 BlockDriverState
*bs
= child
->bs
;
1397 BlockDriver
*drv
= bs
->drv
;
1401 int64_t end_sector
= DIV_ROUND_UP(offset
+ bytes
, BDRV_SECTOR_SIZE
);
1402 uint64_t bytes_remaining
= bytes
;
1405 if (bdrv_has_readonly_bitmaps(bs
)) {
1409 assert(is_power_of_2(align
));
1410 assert((offset
& (align
- 1)) == 0);
1411 assert((bytes
& (align
- 1)) == 0);
1412 assert(!qiov
|| bytes
== qiov
->size
);
1413 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1414 assert(!(flags
& ~BDRV_REQ_MASK
));
1415 max_transfer
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_transfer
, INT_MAX
),
1418 waited
= wait_serialising_requests(req
);
1419 assert(!waited
|| !req
->serialising
);
1420 assert(req
->overlap_offset
<= offset
);
1421 assert(offset
+ bytes
<= req
->overlap_offset
+ req
->overlap_bytes
);
1422 assert(child
->perm
& BLK_PERM_WRITE
);
1423 assert(end_sector
<= bs
->total_sectors
|| child
->perm
& BLK_PERM_RESIZE
);
1425 ret
= notifier_with_return_list_notify(&bs
->before_write_notifiers
, req
);
1427 if (!ret
&& bs
->detect_zeroes
!= BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF
&&
1428 !(flags
& BDRV_REQ_ZERO_WRITE
) && drv
->bdrv_co_pwrite_zeroes
&&
1429 qemu_iovec_is_zero(qiov
)) {
1430 flags
|= BDRV_REQ_ZERO_WRITE
;
1431 if (bs
->detect_zeroes
== BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP
) {
1432 flags
|= BDRV_REQ_MAY_UNMAP
;
1437 /* Do nothing, write notifier decided to fail this request */
1438 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
1439 bdrv_debug_event(bs
, BLKDBG_PWRITEV_ZERO
);
1440 ret
= bdrv_co_do_pwrite_zeroes(bs
, offset
, bytes
, flags
);
1441 } else if (flags
& BDRV_REQ_WRITE_COMPRESSED
) {
1442 ret
= bdrv_driver_pwritev_compressed(bs
, offset
, bytes
, qiov
);
1443 } else if (bytes
<= max_transfer
) {
1444 bdrv_debug_event(bs
, BLKDBG_PWRITEV
);
1445 ret
= bdrv_driver_pwritev(bs
, offset
, bytes
, qiov
, flags
);
1447 bdrv_debug_event(bs
, BLKDBG_PWRITEV
);
1448 while (bytes_remaining
) {
1449 int num
= MIN(bytes_remaining
, max_transfer
);
1450 QEMUIOVector local_qiov
;
1451 int local_flags
= flags
;
1454 if (num
< bytes_remaining
&& (flags
& BDRV_REQ_FUA
) &&
1455 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1456 /* If FUA is going to be emulated by flush, we only
1457 * need to flush on the last iteration */
1458 local_flags
&= ~BDRV_REQ_FUA
;
1460 qemu_iovec_init(&local_qiov
, qiov
->niov
);
1461 qemu_iovec_concat(&local_qiov
, qiov
, bytes
- bytes_remaining
, num
);
1463 ret
= bdrv_driver_pwritev(bs
, offset
+ bytes
- bytes_remaining
,
1464 num
, &local_qiov
, local_flags
);
1465 qemu_iovec_destroy(&local_qiov
);
1469 bytes_remaining
-= num
;
1472 bdrv_debug_event(bs
, BLKDBG_PWRITEV_DONE
);
1474 atomic_inc(&bs
->write_gen
);
1475 bdrv_set_dirty(bs
, offset
, bytes
);
1477 stat64_max(&bs
->wr_highest_offset
, offset
+ bytes
);
1480 bs
->total_sectors
= MAX(bs
->total_sectors
, end_sector
);
1487 static int coroutine_fn
bdrv_co_do_zero_pwritev(BdrvChild
*child
,
1490 BdrvRequestFlags flags
,
1491 BdrvTrackedRequest
*req
)
1493 BlockDriverState
*bs
= child
->bs
;
1494 uint8_t *buf
= NULL
;
1495 QEMUIOVector local_qiov
;
1497 uint64_t align
= bs
->bl
.request_alignment
;
1498 unsigned int head_padding_bytes
, tail_padding_bytes
;
1501 head_padding_bytes
= offset
& (align
- 1);
1502 tail_padding_bytes
= (align
- (offset
+ bytes
)) & (align
- 1);
1505 assert(flags
& BDRV_REQ_ZERO_WRITE
);
1506 if (head_padding_bytes
|| tail_padding_bytes
) {
1507 buf
= qemu_blockalign(bs
, align
);
1508 iov
= (struct iovec
) {
1512 qemu_iovec_init_external(&local_qiov
, &iov
, 1);
1514 if (head_padding_bytes
) {
1515 uint64_t zero_bytes
= MIN(bytes
, align
- head_padding_bytes
);
1517 /* RMW the unaligned part before head. */
1518 mark_request_serialising(req
, align
);
1519 wait_serialising_requests(req
);
1520 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1521 ret
= bdrv_aligned_preadv(child
, req
, offset
& ~(align
- 1), align
,
1522 align
, &local_qiov
, 0);
1526 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1528 memset(buf
+ head_padding_bytes
, 0, zero_bytes
);
1529 ret
= bdrv_aligned_pwritev(child
, req
, offset
& ~(align
- 1), align
,
1531 flags
& ~BDRV_REQ_ZERO_WRITE
);
1535 offset
+= zero_bytes
;
1536 bytes
-= zero_bytes
;
1539 assert(!bytes
|| (offset
& (align
- 1)) == 0);
1540 if (bytes
>= align
) {
1541 /* Write the aligned part in the middle. */
1542 uint64_t aligned_bytes
= bytes
& ~(align
- 1);
1543 ret
= bdrv_aligned_pwritev(child
, req
, offset
, aligned_bytes
, align
,
1548 bytes
-= aligned_bytes
;
1549 offset
+= aligned_bytes
;
1552 assert(!bytes
|| (offset
& (align
- 1)) == 0);
1554 assert(align
== tail_padding_bytes
+ bytes
);
1555 /* RMW the unaligned part after tail. */
1556 mark_request_serialising(req
, align
);
1557 wait_serialising_requests(req
);
1558 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1559 ret
= bdrv_aligned_preadv(child
, req
, offset
, align
,
1560 align
, &local_qiov
, 0);
1564 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1566 memset(buf
, 0, bytes
);
1567 ret
= bdrv_aligned_pwritev(child
, req
, offset
, align
, align
,
1568 &local_qiov
, flags
& ~BDRV_REQ_ZERO_WRITE
);
1577 * Handle a write request in coroutine context
1579 int coroutine_fn
bdrv_co_pwritev(BdrvChild
*child
,
1580 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1581 BdrvRequestFlags flags
)
1583 BlockDriverState
*bs
= child
->bs
;
1584 BdrvTrackedRequest req
;
1585 uint64_t align
= bs
->bl
.request_alignment
;
1586 uint8_t *head_buf
= NULL
;
1587 uint8_t *tail_buf
= NULL
;
1588 QEMUIOVector local_qiov
;
1589 bool use_local_qiov
= false;
1592 trace_bdrv_co_pwritev(child
->bs
, offset
, bytes
, flags
);
1597 if (bs
->read_only
) {
1600 assert(!(bs
->open_flags
& BDRV_O_INACTIVE
));
1602 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
1607 bdrv_inc_in_flight(bs
);
1609 * Align write if necessary by performing a read-modify-write cycle.
1610 * Pad qiov with the read parts and be sure to have a tracked request not
1611 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1613 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_WRITE
);
1616 ret
= bdrv_co_do_zero_pwritev(child
, offset
, bytes
, flags
, &req
);
1620 if (offset
& (align
- 1)) {
1621 QEMUIOVector head_qiov
;
1622 struct iovec head_iov
;
1624 mark_request_serialising(&req
, align
);
1625 wait_serialising_requests(&req
);
1627 head_buf
= qemu_blockalign(bs
, align
);
1628 head_iov
= (struct iovec
) {
1629 .iov_base
= head_buf
,
1632 qemu_iovec_init_external(&head_qiov
, &head_iov
, 1);
1634 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1635 ret
= bdrv_aligned_preadv(child
, &req
, offset
& ~(align
- 1), align
,
1636 align
, &head_qiov
, 0);
1640 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1642 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
1643 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
1644 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1645 use_local_qiov
= true;
1647 bytes
+= offset
& (align
- 1);
1648 offset
= offset
& ~(align
- 1);
1650 /* We have read the tail already if the request is smaller
1651 * than one aligned block.
1653 if (bytes
< align
) {
1654 qemu_iovec_add(&local_qiov
, head_buf
+ bytes
, align
- bytes
);
1659 if ((offset
+ bytes
) & (align
- 1)) {
1660 QEMUIOVector tail_qiov
;
1661 struct iovec tail_iov
;
1665 mark_request_serialising(&req
, align
);
1666 waited
= wait_serialising_requests(&req
);
1667 assert(!waited
|| !use_local_qiov
);
1669 tail_buf
= qemu_blockalign(bs
, align
);
1670 tail_iov
= (struct iovec
) {
1671 .iov_base
= tail_buf
,
1674 qemu_iovec_init_external(&tail_qiov
, &tail_iov
, 1);
1676 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1677 ret
= bdrv_aligned_preadv(child
, &req
, (offset
+ bytes
) & ~(align
- 1),
1678 align
, align
, &tail_qiov
, 0);
1682 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1684 if (!use_local_qiov
) {
1685 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
1686 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1687 use_local_qiov
= true;
1690 tail_bytes
= (offset
+ bytes
) & (align
- 1);
1691 qemu_iovec_add(&local_qiov
, tail_buf
+ tail_bytes
, align
- tail_bytes
);
1693 bytes
= ROUND_UP(bytes
, align
);
1696 ret
= bdrv_aligned_pwritev(child
, &req
, offset
, bytes
, align
,
1697 use_local_qiov
? &local_qiov
: qiov
,
1702 if (use_local_qiov
) {
1703 qemu_iovec_destroy(&local_qiov
);
1705 qemu_vfree(head_buf
);
1706 qemu_vfree(tail_buf
);
1708 tracked_request_end(&req
);
1709 bdrv_dec_in_flight(bs
);
1713 static int coroutine_fn
bdrv_co_do_writev(BdrvChild
*child
,
1714 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1715 BdrvRequestFlags flags
)
1717 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
1721 return bdrv_co_pwritev(child
, sector_num
<< BDRV_SECTOR_BITS
,
1722 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
1725 int coroutine_fn
bdrv_co_writev(BdrvChild
*child
, int64_t sector_num
,
1726 int nb_sectors
, QEMUIOVector
*qiov
)
1728 return bdrv_co_do_writev(child
, sector_num
, nb_sectors
, qiov
, 0);
1731 int coroutine_fn
bdrv_co_pwrite_zeroes(BdrvChild
*child
, int64_t offset
,
1732 int bytes
, BdrvRequestFlags flags
)
1734 trace_bdrv_co_pwrite_zeroes(child
->bs
, offset
, bytes
, flags
);
1736 if (!(child
->bs
->open_flags
& BDRV_O_UNMAP
)) {
1737 flags
&= ~BDRV_REQ_MAY_UNMAP
;
1740 return bdrv_co_pwritev(child
, offset
, bytes
, NULL
,
1741 BDRV_REQ_ZERO_WRITE
| flags
);
1745 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
1747 int bdrv_flush_all(void)
1749 BdrvNextIterator it
;
1750 BlockDriverState
*bs
= NULL
;
1753 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
1754 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
1757 aio_context_acquire(aio_context
);
1758 ret
= bdrv_flush(bs
);
1759 if (ret
< 0 && !result
) {
1762 aio_context_release(aio_context
);
1769 typedef struct BdrvCoGetBlockStatusData
{
1770 BlockDriverState
*bs
;
1771 BlockDriverState
*base
;
1776 BlockDriverState
**file
;
1779 } BdrvCoGetBlockStatusData
;
1781 int64_t coroutine_fn
bdrv_co_get_block_status_from_file(BlockDriverState
*bs
,
1785 BlockDriverState
**file
)
1787 assert(bs
->file
&& bs
->file
->bs
);
1789 *file
= bs
->file
->bs
;
1790 return BDRV_BLOCK_RAW
| BDRV_BLOCK_OFFSET_VALID
|
1791 (sector_num
<< BDRV_SECTOR_BITS
);
1794 int64_t coroutine_fn
bdrv_co_get_block_status_from_backing(BlockDriverState
*bs
,
1798 BlockDriverState
**file
)
1800 assert(bs
->backing
&& bs
->backing
->bs
);
1802 *file
= bs
->backing
->bs
;
1803 return BDRV_BLOCK_RAW
| BDRV_BLOCK_OFFSET_VALID
|
1804 (sector_num
<< BDRV_SECTOR_BITS
);
1808 * Returns the allocation status of the specified sectors.
1809 * Drivers not implementing the functionality are assumed to not support
1810 * backing files, hence all their sectors are reported as allocated.
1812 * If 'want_zero' is true, the caller is querying for mapping purposes,
1813 * and the result should include BDRV_BLOCK_OFFSET_VALID and
1814 * BDRV_BLOCK_ZERO where possible; otherwise, the result may omit those
1815 * bits particularly if it allows for a larger value in 'pnum'.
1817 * If 'sector_num' is beyond the end of the disk image the return value is
1818 * BDRV_BLOCK_EOF and 'pnum' is set to 0.
1820 * 'pnum' is set to the number of sectors (including and immediately following
1821 * the specified sector) that are known to be in the same
1822 * allocated/unallocated state.
1824 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
1825 * beyond the end of the disk image it will be clamped; if 'pnum' is set to
1826 * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
1828 * If returned value is positive, BDRV_BLOCK_OFFSET_VALID bit is set, and
1829 * 'file' is non-NULL, then '*file' points to the BDS which the sector range
1832 static int64_t coroutine_fn
bdrv_co_get_block_status(BlockDriverState
*bs
,
1835 int nb_sectors
, int *pnum
,
1836 BlockDriverState
**file
)
1838 int64_t total_sectors
;
1841 BlockDriverState
*local_file
= NULL
;
1845 total_sectors
= bdrv_nb_sectors(bs
);
1846 if (total_sectors
< 0) {
1847 ret
= total_sectors
;
1851 if (sector_num
>= total_sectors
) {
1852 ret
= BDRV_BLOCK_EOF
;
1860 n
= total_sectors
- sector_num
;
1861 if (n
< nb_sectors
) {
1865 if (!bs
->drv
->bdrv_co_get_block_status
) {
1867 ret
= BDRV_BLOCK_DATA
| BDRV_BLOCK_ALLOCATED
;
1868 if (sector_num
+ nb_sectors
== total_sectors
) {
1869 ret
|= BDRV_BLOCK_EOF
;
1871 if (bs
->drv
->protocol_name
) {
1872 ret
|= BDRV_BLOCK_OFFSET_VALID
| (sector_num
* BDRV_SECTOR_SIZE
);
1878 bdrv_inc_in_flight(bs
);
1879 ret
= bs
->drv
->bdrv_co_get_block_status(bs
, sector_num
, nb_sectors
, pnum
,
1886 if (ret
& BDRV_BLOCK_RAW
) {
1887 assert(ret
& BDRV_BLOCK_OFFSET_VALID
&& local_file
);
1888 ret
= bdrv_co_get_block_status(local_file
, want_zero
,
1889 ret
>> BDRV_SECTOR_BITS
,
1890 *pnum
, pnum
, &local_file
);
1894 if (ret
& (BDRV_BLOCK_DATA
| BDRV_BLOCK_ZERO
)) {
1895 ret
|= BDRV_BLOCK_ALLOCATED
;
1896 } else if (want_zero
) {
1897 if (bdrv_unallocated_blocks_are_zero(bs
)) {
1898 ret
|= BDRV_BLOCK_ZERO
;
1899 } else if (bs
->backing
) {
1900 BlockDriverState
*bs2
= bs
->backing
->bs
;
1901 int64_t nb_sectors2
= bdrv_nb_sectors(bs2
);
1903 if (nb_sectors2
>= 0 && sector_num
>= nb_sectors2
) {
1904 ret
|= BDRV_BLOCK_ZERO
;
1909 if (want_zero
&& local_file
&& local_file
!= bs
&&
1910 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
1911 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
1914 ret2
= bdrv_co_get_block_status(local_file
, want_zero
,
1915 ret
>> BDRV_SECTOR_BITS
,
1916 *pnum
, &file_pnum
, NULL
);
1918 /* Ignore errors. This is just providing extra information, it
1919 * is useful but not necessary.
1921 if (ret2
& BDRV_BLOCK_EOF
&&
1922 (!file_pnum
|| ret2
& BDRV_BLOCK_ZERO
)) {
1924 * It is valid for the format block driver to read
1925 * beyond the end of the underlying file's current
1926 * size; such areas read as zero.
1928 ret
|= BDRV_BLOCK_ZERO
;
1930 /* Limit request to the range reported by the protocol driver */
1932 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
1938 bdrv_dec_in_flight(bs
);
1939 if (ret
>= 0 && sector_num
+ *pnum
== total_sectors
) {
1940 ret
|= BDRV_BLOCK_EOF
;
1949 static int64_t coroutine_fn
bdrv_co_get_block_status_above(BlockDriverState
*bs
,
1950 BlockDriverState
*base
,
1955 BlockDriverState
**file
)
1957 BlockDriverState
*p
;
1962 for (p
= bs
; p
!= base
; p
= backing_bs(p
)) {
1963 ret
= bdrv_co_get_block_status(p
, want_zero
, sector_num
, nb_sectors
,
1968 if (ret
& BDRV_BLOCK_ZERO
&& ret
& BDRV_BLOCK_EOF
&& !first
) {
1970 * Reading beyond the end of the file continues to read
1971 * zeroes, but we can only widen the result to the
1972 * unallocated length we learned from an earlier
1977 if (ret
& (BDRV_BLOCK_ZERO
| BDRV_BLOCK_DATA
)) {
1980 /* [sector_num, pnum] unallocated on this layer, which could be only
1981 * the first part of [sector_num, nb_sectors]. */
1982 nb_sectors
= MIN(nb_sectors
, *pnum
);
1988 /* Coroutine wrapper for bdrv_get_block_status_above() */
1989 static void coroutine_fn
bdrv_get_block_status_above_co_entry(void *opaque
)
1991 BdrvCoGetBlockStatusData
*data
= opaque
;
1993 data
->ret
= bdrv_co_get_block_status_above(data
->bs
, data
->base
,
2003 * Synchronous wrapper around bdrv_co_get_block_status_above().
2005 * See bdrv_co_get_block_status_above() for details.
2007 static int64_t bdrv_common_block_status_above(BlockDriverState
*bs
,
2008 BlockDriverState
*base
,
2011 int nb_sectors
, int *pnum
,
2012 BlockDriverState
**file
)
2015 BdrvCoGetBlockStatusData data
= {
2018 .want_zero
= want_zero
,
2019 .sector_num
= sector_num
,
2020 .nb_sectors
= nb_sectors
,
2026 if (qemu_in_coroutine()) {
2027 /* Fast-path if already in coroutine context */
2028 bdrv_get_block_status_above_co_entry(&data
);
2030 co
= qemu_coroutine_create(bdrv_get_block_status_above_co_entry
,
2032 bdrv_coroutine_enter(bs
, co
);
2033 BDRV_POLL_WHILE(bs
, !data
.done
);
2038 int64_t bdrv_get_block_status_above(BlockDriverState
*bs
,
2039 BlockDriverState
*base
,
2041 int nb_sectors
, int *pnum
,
2042 BlockDriverState
**file
)
2044 return bdrv_common_block_status_above(bs
, base
, true, sector_num
,
2045 nb_sectors
, pnum
, file
);
2048 int64_t bdrv_get_block_status(BlockDriverState
*bs
,
2050 int nb_sectors
, int *pnum
,
2051 BlockDriverState
**file
)
2053 return bdrv_get_block_status_above(bs
, backing_bs(bs
),
2054 sector_num
, nb_sectors
, pnum
, file
);
2057 int coroutine_fn
bdrv_is_allocated(BlockDriverState
*bs
, int64_t offset
,
2058 int64_t bytes
, int64_t *pnum
)
2063 assert(QEMU_IS_ALIGNED(offset
, BDRV_SECTOR_SIZE
));
2064 assert(QEMU_IS_ALIGNED(bytes
, BDRV_SECTOR_SIZE
) && bytes
< INT_MAX
);
2065 ret
= bdrv_common_block_status_above(bs
, backing_bs(bs
), false,
2066 offset
>> BDRV_SECTOR_BITS
,
2067 bytes
>> BDRV_SECTOR_BITS
, &psectors
,
2073 *pnum
= psectors
* BDRV_SECTOR_SIZE
;
2075 return !!(ret
& BDRV_BLOCK_ALLOCATED
);
2079 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2081 * Return true if (a prefix of) the given range is allocated in any image
2082 * between BASE and TOP (inclusive). BASE can be NULL to check if the given
2083 * offset is allocated in any image of the chain. Return false otherwise,
2084 * or negative errno on failure.
2086 * 'pnum' is set to the number of bytes (including and immediately
2087 * following the specified offset) that are known to be in the same
2088 * allocated/unallocated state. Note that a subsequent call starting
2089 * at 'offset + *pnum' may return the same allocation status (in other
2090 * words, the result is not necessarily the maximum possible range);
2091 * but 'pnum' will only be 0 when end of file is reached.
2094 int bdrv_is_allocated_above(BlockDriverState
*top
,
2095 BlockDriverState
*base
,
2096 int64_t offset
, int64_t bytes
, int64_t *pnum
)
2098 BlockDriverState
*intermediate
;
2103 while (intermediate
&& intermediate
!= base
) {
2107 ret
= bdrv_is_allocated(intermediate
, offset
, bytes
, &pnum_inter
);
2116 size_inter
= bdrv_getlength(intermediate
);
2117 if (size_inter
< 0) {
2120 if (n
> pnum_inter
&&
2121 (intermediate
== top
|| offset
+ pnum_inter
< size_inter
)) {
2125 intermediate
= backing_bs(intermediate
);
2132 typedef struct BdrvVmstateCo
{
2133 BlockDriverState
*bs
;
2140 static int coroutine_fn
2141 bdrv_co_rw_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
,
2144 BlockDriver
*drv
= bs
->drv
;
2147 bdrv_inc_in_flight(bs
);
2151 } else if (drv
->bdrv_load_vmstate
) {
2153 ret
= drv
->bdrv_load_vmstate(bs
, qiov
, pos
);
2155 ret
= drv
->bdrv_save_vmstate(bs
, qiov
, pos
);
2157 } else if (bs
->file
) {
2158 ret
= bdrv_co_rw_vmstate(bs
->file
->bs
, qiov
, pos
, is_read
);
2161 bdrv_dec_in_flight(bs
);
2165 static void coroutine_fn
bdrv_co_rw_vmstate_entry(void *opaque
)
2167 BdrvVmstateCo
*co
= opaque
;
2168 co
->ret
= bdrv_co_rw_vmstate(co
->bs
, co
->qiov
, co
->pos
, co
->is_read
);
2172 bdrv_rw_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
,
2175 if (qemu_in_coroutine()) {
2176 return bdrv_co_rw_vmstate(bs
, qiov
, pos
, is_read
);
2178 BdrvVmstateCo data
= {
2183 .ret
= -EINPROGRESS
,
2185 Coroutine
*co
= qemu_coroutine_create(bdrv_co_rw_vmstate_entry
, &data
);
2187 bdrv_coroutine_enter(bs
, co
);
2188 BDRV_POLL_WHILE(bs
, data
.ret
== -EINPROGRESS
);
2193 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
2194 int64_t pos
, int size
)
2197 struct iovec iov
= {
2198 .iov_base
= (void *) buf
,
2203 qemu_iovec_init_external(&qiov
, &iov
, 1);
2205 ret
= bdrv_writev_vmstate(bs
, &qiov
, pos
);
2213 int bdrv_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
2215 return bdrv_rw_vmstate(bs
, qiov
, pos
, false);
2218 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
2219 int64_t pos
, int size
)
2222 struct iovec iov
= {
2228 qemu_iovec_init_external(&qiov
, &iov
, 1);
2229 ret
= bdrv_readv_vmstate(bs
, &qiov
, pos
);
2237 int bdrv_readv_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
2239 return bdrv_rw_vmstate(bs
, qiov
, pos
, true);
2242 /**************************************************************/
2245 void bdrv_aio_cancel(BlockAIOCB
*acb
)
2248 bdrv_aio_cancel_async(acb
);
2249 while (acb
->refcnt
> 1) {
2250 if (acb
->aiocb_info
->get_aio_context
) {
2251 aio_poll(acb
->aiocb_info
->get_aio_context(acb
), true);
2252 } else if (acb
->bs
) {
2253 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
2254 * assert that we're not using an I/O thread. Thread-safe
2255 * code should use bdrv_aio_cancel_async exclusively.
2257 assert(bdrv_get_aio_context(acb
->bs
) == qemu_get_aio_context());
2258 aio_poll(bdrv_get_aio_context(acb
->bs
), true);
2263 qemu_aio_unref(acb
);
2266 /* Async version of aio cancel. The caller is not blocked if the acb implements
2267 * cancel_async, otherwise we do nothing and let the request normally complete.
2268 * In either case the completion callback must be called. */
2269 void bdrv_aio_cancel_async(BlockAIOCB
*acb
)
2271 if (acb
->aiocb_info
->cancel_async
) {
2272 acb
->aiocb_info
->cancel_async(acb
);
2276 /**************************************************************/
2277 /* Coroutine block device emulation */
2279 typedef struct FlushCo
{
2280 BlockDriverState
*bs
;
2285 static void coroutine_fn
bdrv_flush_co_entry(void *opaque
)
2287 FlushCo
*rwco
= opaque
;
2289 rwco
->ret
= bdrv_co_flush(rwco
->bs
);
2292 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
2297 bdrv_inc_in_flight(bs
);
2299 if (!bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
) ||
2304 qemu_co_mutex_lock(&bs
->reqs_lock
);
2305 current_gen
= atomic_read(&bs
->write_gen
);
2307 /* Wait until any previous flushes are completed */
2308 while (bs
->active_flush_req
) {
2309 qemu_co_queue_wait(&bs
->flush_queue
, &bs
->reqs_lock
);
2312 /* Flushes reach this point in nondecreasing current_gen order. */
2313 bs
->active_flush_req
= true;
2314 qemu_co_mutex_unlock(&bs
->reqs_lock
);
2316 /* Write back all layers by calling one driver function */
2317 if (bs
->drv
->bdrv_co_flush
) {
2318 ret
= bs
->drv
->bdrv_co_flush(bs
);
2322 /* Write back cached data to the OS even with cache=unsafe */
2323 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_OS
);
2324 if (bs
->drv
->bdrv_co_flush_to_os
) {
2325 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
2331 /* But don't actually force it to the disk with cache=unsafe */
2332 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
2336 /* Check if we really need to flush anything */
2337 if (bs
->flushed_gen
== current_gen
) {
2341 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_DISK
);
2342 if (bs
->drv
->bdrv_co_flush_to_disk
) {
2343 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
2344 } else if (bs
->drv
->bdrv_aio_flush
) {
2346 CoroutineIOCompletion co
= {
2347 .coroutine
= qemu_coroutine_self(),
2350 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
2354 qemu_coroutine_yield();
2359 * Some block drivers always operate in either writethrough or unsafe
2360 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2361 * know how the server works (because the behaviour is hardcoded or
2362 * depends on server-side configuration), so we can't ensure that
2363 * everything is safe on disk. Returning an error doesn't work because
2364 * that would break guests even if the server operates in writethrough
2367 * Let's hope the user knows what he's doing.
2376 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2377 * in the case of cache=unsafe, so there are no useless flushes.
2380 ret
= bs
->file
? bdrv_co_flush(bs
->file
->bs
) : 0;
2382 /* Notify any pending flushes that we have completed */
2384 bs
->flushed_gen
= current_gen
;
2387 qemu_co_mutex_lock(&bs
->reqs_lock
);
2388 bs
->active_flush_req
= false;
2389 /* Return value is ignored - it's ok if wait queue is empty */
2390 qemu_co_queue_next(&bs
->flush_queue
);
2391 qemu_co_mutex_unlock(&bs
->reqs_lock
);
2394 bdrv_dec_in_flight(bs
);
2398 int bdrv_flush(BlockDriverState
*bs
)
2401 FlushCo flush_co
= {
2406 if (qemu_in_coroutine()) {
2407 /* Fast-path if already in coroutine context */
2408 bdrv_flush_co_entry(&flush_co
);
2410 co
= qemu_coroutine_create(bdrv_flush_co_entry
, &flush_co
);
2411 bdrv_coroutine_enter(bs
, co
);
2412 BDRV_POLL_WHILE(bs
, flush_co
.ret
== NOT_DONE
);
2415 return flush_co
.ret
;
2418 typedef struct DiscardCo
{
2419 BlockDriverState
*bs
;
2424 static void coroutine_fn
bdrv_pdiscard_co_entry(void *opaque
)
2426 DiscardCo
*rwco
= opaque
;
2428 rwco
->ret
= bdrv_co_pdiscard(rwco
->bs
, rwco
->offset
, rwco
->bytes
);
2431 int coroutine_fn
bdrv_co_pdiscard(BlockDriverState
*bs
, int64_t offset
,
2434 BdrvTrackedRequest req
;
2435 int max_pdiscard
, ret
;
2436 int head
, tail
, align
;
2442 if (bdrv_has_readonly_bitmaps(bs
)) {
2446 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
2449 } else if (bs
->read_only
) {
2452 assert(!(bs
->open_flags
& BDRV_O_INACTIVE
));
2454 /* Do nothing if disabled. */
2455 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
2459 if (!bs
->drv
->bdrv_co_pdiscard
&& !bs
->drv
->bdrv_aio_pdiscard
) {
2463 /* Discard is advisory, but some devices track and coalesce
2464 * unaligned requests, so we must pass everything down rather than
2465 * round here. Still, most devices will just silently ignore
2466 * unaligned requests (by returning -ENOTSUP), so we must fragment
2467 * the request accordingly. */
2468 align
= MAX(bs
->bl
.pdiscard_alignment
, bs
->bl
.request_alignment
);
2469 assert(align
% bs
->bl
.request_alignment
== 0);
2470 head
= offset
% align
;
2471 tail
= (offset
+ bytes
) % align
;
2473 bdrv_inc_in_flight(bs
);
2474 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_DISCARD
);
2476 ret
= notifier_with_return_list_notify(&bs
->before_write_notifiers
, &req
);
2481 max_pdiscard
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_pdiscard
, INT_MAX
),
2483 assert(max_pdiscard
>= bs
->bl
.request_alignment
);
2489 /* Make small requests to get to alignment boundaries. */
2490 num
= MIN(bytes
, align
- head
);
2491 if (!QEMU_IS_ALIGNED(num
, bs
->bl
.request_alignment
)) {
2492 num
%= bs
->bl
.request_alignment
;
2494 head
= (head
+ num
) % align
;
2495 assert(num
< max_pdiscard
);
2498 /* Shorten the request to the last aligned cluster. */
2500 } else if (!QEMU_IS_ALIGNED(tail
, bs
->bl
.request_alignment
) &&
2501 tail
> bs
->bl
.request_alignment
) {
2502 tail
%= bs
->bl
.request_alignment
;
2506 /* limit request size */
2507 if (num
> max_pdiscard
) {
2511 if (bs
->drv
->bdrv_co_pdiscard
) {
2512 ret
= bs
->drv
->bdrv_co_pdiscard(bs
, offset
, num
);
2515 CoroutineIOCompletion co
= {
2516 .coroutine
= qemu_coroutine_self(),
2519 acb
= bs
->drv
->bdrv_aio_pdiscard(bs
, offset
, num
,
2520 bdrv_co_io_em_complete
, &co
);
2525 qemu_coroutine_yield();
2529 if (ret
&& ret
!= -ENOTSUP
) {
2538 atomic_inc(&bs
->write_gen
);
2539 bdrv_set_dirty(bs
, req
.offset
, req
.bytes
);
2540 tracked_request_end(&req
);
2541 bdrv_dec_in_flight(bs
);
2545 int bdrv_pdiscard(BlockDriverState
*bs
, int64_t offset
, int bytes
)
2555 if (qemu_in_coroutine()) {
2556 /* Fast-path if already in coroutine context */
2557 bdrv_pdiscard_co_entry(&rwco
);
2559 co
= qemu_coroutine_create(bdrv_pdiscard_co_entry
, &rwco
);
2560 bdrv_coroutine_enter(bs
, co
);
2561 BDRV_POLL_WHILE(bs
, rwco
.ret
== NOT_DONE
);
2567 int bdrv_co_ioctl(BlockDriverState
*bs
, int req
, void *buf
)
2569 BlockDriver
*drv
= bs
->drv
;
2570 CoroutineIOCompletion co
= {
2571 .coroutine
= qemu_coroutine_self(),
2575 bdrv_inc_in_flight(bs
);
2576 if (!drv
|| (!drv
->bdrv_aio_ioctl
&& !drv
->bdrv_co_ioctl
)) {
2581 if (drv
->bdrv_co_ioctl
) {
2582 co
.ret
= drv
->bdrv_co_ioctl(bs
, req
, buf
);
2584 acb
= drv
->bdrv_aio_ioctl(bs
, req
, buf
, bdrv_co_io_em_complete
, &co
);
2589 qemu_coroutine_yield();
2592 bdrv_dec_in_flight(bs
);
2596 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
2598 return qemu_memalign(bdrv_opt_mem_align(bs
), size
);
2601 void *qemu_blockalign0(BlockDriverState
*bs
, size_t size
)
2603 return memset(qemu_blockalign(bs
, size
), 0, size
);
2606 void *qemu_try_blockalign(BlockDriverState
*bs
, size_t size
)
2608 size_t align
= bdrv_opt_mem_align(bs
);
2610 /* Ensure that NULL is never returned on success */
2616 return qemu_try_memalign(align
, size
);
2619 void *qemu_try_blockalign0(BlockDriverState
*bs
, size_t size
)
2621 void *mem
= qemu_try_blockalign(bs
, size
);
2624 memset(mem
, 0, size
);
2631 * Check if all memory in this vector is sector aligned.
2633 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
)
2636 size_t alignment
= bdrv_min_mem_align(bs
);
2638 for (i
= 0; i
< qiov
->niov
; i
++) {
2639 if ((uintptr_t) qiov
->iov
[i
].iov_base
% alignment
) {
2642 if (qiov
->iov
[i
].iov_len
% alignment
) {
2650 void bdrv_add_before_write_notifier(BlockDriverState
*bs
,
2651 NotifierWithReturn
*notifier
)
2653 notifier_with_return_list_add(&bs
->before_write_notifiers
, notifier
);
2656 void bdrv_io_plug(BlockDriverState
*bs
)
2660 QLIST_FOREACH(child
, &bs
->children
, next
) {
2661 bdrv_io_plug(child
->bs
);
2664 if (atomic_fetch_inc(&bs
->io_plugged
) == 0) {
2665 BlockDriver
*drv
= bs
->drv
;
2666 if (drv
&& drv
->bdrv_io_plug
) {
2667 drv
->bdrv_io_plug(bs
);
2672 void bdrv_io_unplug(BlockDriverState
*bs
)
2676 assert(bs
->io_plugged
);
2677 if (atomic_fetch_dec(&bs
->io_plugged
) == 1) {
2678 BlockDriver
*drv
= bs
->drv
;
2679 if (drv
&& drv
->bdrv_io_unplug
) {
2680 drv
->bdrv_io_unplug(bs
);
2684 QLIST_FOREACH(child
, &bs
->children
, next
) {
2685 bdrv_io_unplug(child
->bs
);