2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "sysemu/block-backend.h"
28 #include "block/blockjob.h"
29 #include "block/block_int.h"
30 #include "qemu/cutils.h"
31 #include "qapi/error.h"
32 #include "qemu/error-report.h"
34 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
36 static BlockAIOCB
*bdrv_co_aio_prw_vector(BdrvChild
*child
,
39 BdrvRequestFlags flags
,
40 BlockCompletionFunc
*cb
,
43 static void coroutine_fn
bdrv_co_do_rw(void *opaque
);
44 static int coroutine_fn
bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
,
45 int64_t offset
, int count
, BdrvRequestFlags flags
);
47 static void bdrv_parent_drained_begin(BlockDriverState
*bs
)
51 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
52 if (c
->role
->drained_begin
) {
53 c
->role
->drained_begin(c
);
58 static void bdrv_parent_drained_end(BlockDriverState
*bs
)
62 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
63 if (c
->role
->drained_end
) {
64 c
->role
->drained_end(c
);
69 static void bdrv_merge_limits(BlockLimits
*dst
, const BlockLimits
*src
)
71 dst
->opt_transfer
= MAX(dst
->opt_transfer
, src
->opt_transfer
);
72 dst
->max_transfer
= MIN_NON_ZERO(dst
->max_transfer
, src
->max_transfer
);
73 dst
->opt_mem_alignment
= MAX(dst
->opt_mem_alignment
,
74 src
->opt_mem_alignment
);
75 dst
->min_mem_alignment
= MAX(dst
->min_mem_alignment
,
76 src
->min_mem_alignment
);
77 dst
->max_iov
= MIN_NON_ZERO(dst
->max_iov
, src
->max_iov
);
80 void bdrv_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
82 BlockDriver
*drv
= bs
->drv
;
83 Error
*local_err
= NULL
;
85 memset(&bs
->bl
, 0, sizeof(bs
->bl
));
91 /* Default alignment based on whether driver has byte interface */
92 bs
->bl
.request_alignment
= drv
->bdrv_co_preadv
? 1 : 512;
94 /* Take some limits from the children as a default */
96 bdrv_refresh_limits(bs
->file
->bs
, &local_err
);
98 error_propagate(errp
, local_err
);
101 bdrv_merge_limits(&bs
->bl
, &bs
->file
->bs
->bl
);
103 bs
->bl
.min_mem_alignment
= 512;
104 bs
->bl
.opt_mem_alignment
= getpagesize();
106 /* Safe default since most protocols use readv()/writev()/etc */
107 bs
->bl
.max_iov
= IOV_MAX
;
111 bdrv_refresh_limits(bs
->backing
->bs
, &local_err
);
113 error_propagate(errp
, local_err
);
116 bdrv_merge_limits(&bs
->bl
, &bs
->backing
->bs
->bl
);
119 /* Then let the driver override it */
120 if (drv
->bdrv_refresh_limits
) {
121 drv
->bdrv_refresh_limits(bs
, errp
);
126 * The copy-on-read flag is actually a reference count so multiple users may
127 * use the feature without worrying about clobbering its previous state.
128 * Copy-on-read stays enabled until all users have called to disable it.
130 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
135 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
137 assert(bs
->copy_on_read
> 0);
141 /* Check if any requests are in-flight (including throttled requests) */
142 bool bdrv_requests_pending(BlockDriverState
*bs
)
146 if (atomic_read(&bs
->in_flight
)) {
150 QLIST_FOREACH(child
, &bs
->children
, next
) {
151 if (bdrv_requests_pending(child
->bs
)) {
159 static bool bdrv_drain_recurse(BlockDriverState
*bs
)
164 waited
= BDRV_POLL_WHILE(bs
, atomic_read(&bs
->in_flight
) > 0);
166 if (bs
->drv
&& bs
->drv
->bdrv_drain
) {
167 bs
->drv
->bdrv_drain(bs
);
170 QLIST_FOREACH(child
, &bs
->children
, next
) {
171 waited
|= bdrv_drain_recurse(child
->bs
);
179 BlockDriverState
*bs
;
183 static void bdrv_co_drain_bh_cb(void *opaque
)
185 BdrvCoDrainData
*data
= opaque
;
186 Coroutine
*co
= data
->co
;
187 BlockDriverState
*bs
= data
->bs
;
189 bdrv_dec_in_flight(bs
);
190 bdrv_drained_begin(bs
);
192 qemu_coroutine_enter(co
);
195 static void coroutine_fn
bdrv_co_yield_to_drain(BlockDriverState
*bs
)
197 BdrvCoDrainData data
;
199 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
200 * other coroutines run if they were queued from
201 * qemu_co_queue_run_restart(). */
203 assert(qemu_in_coroutine());
204 data
= (BdrvCoDrainData
) {
205 .co
= qemu_coroutine_self(),
209 bdrv_inc_in_flight(bs
);
210 aio_bh_schedule_oneshot(bdrv_get_aio_context(bs
),
211 bdrv_co_drain_bh_cb
, &data
);
213 qemu_coroutine_yield();
214 /* If we are resumed from some other event (such as an aio completion or a
215 * timer callback), it is a bug in the caller that should be fixed. */
219 void bdrv_drained_begin(BlockDriverState
*bs
)
221 if (qemu_in_coroutine()) {
222 bdrv_co_yield_to_drain(bs
);
226 if (!bs
->quiesce_counter
++) {
227 aio_disable_external(bdrv_get_aio_context(bs
));
228 bdrv_parent_drained_begin(bs
);
231 bdrv_io_unplugged_begin(bs
);
232 bdrv_drain_recurse(bs
);
233 bdrv_io_unplugged_end(bs
);
236 void bdrv_drained_end(BlockDriverState
*bs
)
238 assert(bs
->quiesce_counter
> 0);
239 if (--bs
->quiesce_counter
> 0) {
243 bdrv_parent_drained_end(bs
);
244 aio_enable_external(bdrv_get_aio_context(bs
));
248 * Wait for pending requests to complete on a single BlockDriverState subtree,
249 * and suspend block driver's internal I/O until next request arrives.
251 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
254 * Only this BlockDriverState's AioContext is run, so in-flight requests must
255 * not depend on events in other AioContexts. In that case, use
256 * bdrv_drain_all() instead.
258 void coroutine_fn
bdrv_co_drain(BlockDriverState
*bs
)
260 assert(qemu_in_coroutine());
261 bdrv_drained_begin(bs
);
262 bdrv_drained_end(bs
);
265 void bdrv_drain(BlockDriverState
*bs
)
267 bdrv_drained_begin(bs
);
268 bdrv_drained_end(bs
);
272 * Wait for pending requests to complete across all BlockDriverStates
274 * This function does not flush data to disk, use bdrv_flush_all() for that
275 * after calling this function.
277 void bdrv_drain_all(void)
279 /* Always run first iteration so any pending completion BHs run */
281 BlockDriverState
*bs
;
283 BlockJob
*job
= NULL
;
284 GSList
*aio_ctxs
= NULL
, *ctx
;
286 while ((job
= block_job_next(job
))) {
287 AioContext
*aio_context
= blk_get_aio_context(job
->blk
);
289 aio_context_acquire(aio_context
);
290 block_job_pause(job
);
291 aio_context_release(aio_context
);
294 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
295 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
297 aio_context_acquire(aio_context
);
298 bdrv_parent_drained_begin(bs
);
299 bdrv_io_unplugged_begin(bs
);
300 aio_context_release(aio_context
);
302 if (!g_slist_find(aio_ctxs
, aio_context
)) {
303 aio_ctxs
= g_slist_prepend(aio_ctxs
, aio_context
);
307 /* Note that completion of an asynchronous I/O operation can trigger any
308 * number of other I/O operations on other devices---for example a
309 * coroutine can submit an I/O request to another device in response to
310 * request completion. Therefore we must keep looping until there was no
311 * more activity rather than simply draining each device independently.
316 for (ctx
= aio_ctxs
; ctx
!= NULL
; ctx
= ctx
->next
) {
317 AioContext
*aio_context
= ctx
->data
;
319 aio_context_acquire(aio_context
);
320 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
321 if (aio_context
== bdrv_get_aio_context(bs
)) {
322 waited
|= bdrv_drain_recurse(bs
);
325 aio_context_release(aio_context
);
329 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
330 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
332 aio_context_acquire(aio_context
);
333 bdrv_io_unplugged_end(bs
);
334 bdrv_parent_drained_end(bs
);
335 aio_context_release(aio_context
);
337 g_slist_free(aio_ctxs
);
340 while ((job
= block_job_next(job
))) {
341 AioContext
*aio_context
= blk_get_aio_context(job
->blk
);
343 aio_context_acquire(aio_context
);
344 block_job_resume(job
);
345 aio_context_release(aio_context
);
350 * Remove an active request from the tracked requests list
352 * This function should be called when a tracked request is completing.
354 static void tracked_request_end(BdrvTrackedRequest
*req
)
356 if (req
->serialising
) {
357 req
->bs
->serialising_in_flight
--;
360 QLIST_REMOVE(req
, list
);
361 qemu_co_queue_restart_all(&req
->wait_queue
);
365 * Add an active request to the tracked requests list
367 static void tracked_request_begin(BdrvTrackedRequest
*req
,
368 BlockDriverState
*bs
,
371 enum BdrvTrackedRequestType type
)
373 *req
= (BdrvTrackedRequest
){
378 .co
= qemu_coroutine_self(),
379 .serialising
= false,
380 .overlap_offset
= offset
,
381 .overlap_bytes
= bytes
,
384 qemu_co_queue_init(&req
->wait_queue
);
386 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
389 static void mark_request_serialising(BdrvTrackedRequest
*req
, uint64_t align
)
391 int64_t overlap_offset
= req
->offset
& ~(align
- 1);
392 unsigned int overlap_bytes
= ROUND_UP(req
->offset
+ req
->bytes
, align
)
395 if (!req
->serialising
) {
396 req
->bs
->serialising_in_flight
++;
397 req
->serialising
= true;
400 req
->overlap_offset
= MIN(req
->overlap_offset
, overlap_offset
);
401 req
->overlap_bytes
= MAX(req
->overlap_bytes
, overlap_bytes
);
405 * Round a region to cluster boundaries (sector-based)
407 void bdrv_round_sectors_to_clusters(BlockDriverState
*bs
,
408 int64_t sector_num
, int nb_sectors
,
409 int64_t *cluster_sector_num
,
410 int *cluster_nb_sectors
)
414 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
415 *cluster_sector_num
= sector_num
;
416 *cluster_nb_sectors
= nb_sectors
;
418 int64_t c
= bdi
.cluster_size
/ BDRV_SECTOR_SIZE
;
419 *cluster_sector_num
= QEMU_ALIGN_DOWN(sector_num
, c
);
420 *cluster_nb_sectors
= QEMU_ALIGN_UP(sector_num
- *cluster_sector_num
+
426 * Round a region to cluster boundaries
428 void bdrv_round_to_clusters(BlockDriverState
*bs
,
429 int64_t offset
, unsigned int bytes
,
430 int64_t *cluster_offset
,
431 unsigned int *cluster_bytes
)
435 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
436 *cluster_offset
= offset
;
437 *cluster_bytes
= bytes
;
439 int64_t c
= bdi
.cluster_size
;
440 *cluster_offset
= QEMU_ALIGN_DOWN(offset
, c
);
441 *cluster_bytes
= QEMU_ALIGN_UP(offset
- *cluster_offset
+ bytes
, c
);
445 static int bdrv_get_cluster_size(BlockDriverState
*bs
)
450 ret
= bdrv_get_info(bs
, &bdi
);
451 if (ret
< 0 || bdi
.cluster_size
== 0) {
452 return bs
->bl
.request_alignment
;
454 return bdi
.cluster_size
;
458 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
459 int64_t offset
, unsigned int bytes
)
462 if (offset
>= req
->overlap_offset
+ req
->overlap_bytes
) {
466 if (req
->overlap_offset
>= offset
+ bytes
) {
472 void bdrv_inc_in_flight(BlockDriverState
*bs
)
474 atomic_inc(&bs
->in_flight
);
477 static void dummy_bh_cb(void *opaque
)
481 void bdrv_wakeup(BlockDriverState
*bs
)
484 aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb
, NULL
);
488 void bdrv_dec_in_flight(BlockDriverState
*bs
)
490 atomic_dec(&bs
->in_flight
);
494 static bool coroutine_fn
wait_serialising_requests(BdrvTrackedRequest
*self
)
496 BlockDriverState
*bs
= self
->bs
;
497 BdrvTrackedRequest
*req
;
501 if (!bs
->serialising_in_flight
) {
507 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
508 if (req
== self
|| (!req
->serialising
&& !self
->serialising
)) {
511 if (tracked_request_overlaps(req
, self
->overlap_offset
,
512 self
->overlap_bytes
))
514 /* Hitting this means there was a reentrant request, for
515 * example, a block driver issuing nested requests. This must
516 * never happen since it means deadlock.
518 assert(qemu_coroutine_self() != req
->co
);
520 /* If the request is already (indirectly) waiting for us, or
521 * will wait for us as soon as it wakes up, then just go on
522 * (instead of producing a deadlock in the former case). */
523 if (!req
->waiting_for
) {
524 self
->waiting_for
= req
;
525 qemu_co_queue_wait(&req
->wait_queue
);
526 self
->waiting_for
= NULL
;
538 static int bdrv_check_byte_request(BlockDriverState
*bs
, int64_t offset
,
541 if (size
> BDRV_REQUEST_MAX_SECTORS
<< BDRV_SECTOR_BITS
) {
545 if (!bdrv_is_inserted(bs
)) {
556 typedef struct RwCo
{
562 BdrvRequestFlags flags
;
565 static void coroutine_fn
bdrv_rw_co_entry(void *opaque
)
569 if (!rwco
->is_write
) {
570 rwco
->ret
= bdrv_co_preadv(rwco
->child
, rwco
->offset
,
571 rwco
->qiov
->size
, rwco
->qiov
,
574 rwco
->ret
= bdrv_co_pwritev(rwco
->child
, rwco
->offset
,
575 rwco
->qiov
->size
, rwco
->qiov
,
581 * Process a vectored synchronous request using coroutines
583 static int bdrv_prwv_co(BdrvChild
*child
, int64_t offset
,
584 QEMUIOVector
*qiov
, bool is_write
,
585 BdrvRequestFlags flags
)
592 .is_write
= is_write
,
597 if (qemu_in_coroutine()) {
598 /* Fast-path if already in coroutine context */
599 bdrv_rw_co_entry(&rwco
);
601 co
= qemu_coroutine_create(bdrv_rw_co_entry
, &rwco
);
602 qemu_coroutine_enter(co
);
603 BDRV_POLL_WHILE(child
->bs
, rwco
.ret
== NOT_DONE
);
609 * Process a synchronous request using coroutines
611 static int bdrv_rw_co(BdrvChild
*child
, int64_t sector_num
, uint8_t *buf
,
612 int nb_sectors
, bool is_write
, BdrvRequestFlags flags
)
616 .iov_base
= (void *)buf
,
617 .iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
,
620 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
624 qemu_iovec_init_external(&qiov
, &iov
, 1);
625 return bdrv_prwv_co(child
, sector_num
<< BDRV_SECTOR_BITS
,
626 &qiov
, is_write
, flags
);
629 /* return < 0 if error. See bdrv_write() for the return codes */
630 int bdrv_read(BdrvChild
*child
, int64_t sector_num
,
631 uint8_t *buf
, int nb_sectors
)
633 return bdrv_rw_co(child
, sector_num
, buf
, nb_sectors
, false, 0);
636 /* Return < 0 if error. Important errors are:
637 -EIO generic I/O error (may happen for all errors)
638 -ENOMEDIUM No media inserted.
639 -EINVAL Invalid sector number or nb_sectors
640 -EACCES Trying to write a read-only device
642 int bdrv_write(BdrvChild
*child
, int64_t sector_num
,
643 const uint8_t *buf
, int nb_sectors
)
645 return bdrv_rw_co(child
, sector_num
, (uint8_t *)buf
, nb_sectors
, true, 0);
648 int bdrv_pwrite_zeroes(BdrvChild
*child
, int64_t offset
,
649 int count
, BdrvRequestFlags flags
)
657 qemu_iovec_init_external(&qiov
, &iov
, 1);
658 return bdrv_prwv_co(child
, offset
, &qiov
, true,
659 BDRV_REQ_ZERO_WRITE
| flags
);
663 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
664 * The operation is sped up by checking the block status and only writing
665 * zeroes to the device if they currently do not return zeroes. Optional
666 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
669 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
671 int bdrv_make_zero(BdrvChild
*child
, BdrvRequestFlags flags
)
673 int64_t target_sectors
, ret
, nb_sectors
, sector_num
= 0;
674 BlockDriverState
*bs
= child
->bs
;
675 BlockDriverState
*file
;
678 target_sectors
= bdrv_nb_sectors(bs
);
679 if (target_sectors
< 0) {
680 return target_sectors
;
684 nb_sectors
= MIN(target_sectors
- sector_num
, BDRV_REQUEST_MAX_SECTORS
);
685 if (nb_sectors
<= 0) {
688 ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, &n
, &file
);
690 error_report("error getting block status at sector %" PRId64
": %s",
691 sector_num
, strerror(-ret
));
694 if (ret
& BDRV_BLOCK_ZERO
) {
698 ret
= bdrv_pwrite_zeroes(child
, sector_num
<< BDRV_SECTOR_BITS
,
699 n
<< BDRV_SECTOR_BITS
, flags
);
701 error_report("error writing zeroes at sector %" PRId64
": %s",
702 sector_num
, strerror(-ret
));
709 int bdrv_preadv(BdrvChild
*child
, int64_t offset
, QEMUIOVector
*qiov
)
713 ret
= bdrv_prwv_co(child
, offset
, qiov
, false, 0);
721 int bdrv_pread(BdrvChild
*child
, int64_t offset
, void *buf
, int bytes
)
725 .iov_base
= (void *)buf
,
733 qemu_iovec_init_external(&qiov
, &iov
, 1);
734 return bdrv_preadv(child
, offset
, &qiov
);
737 int bdrv_pwritev(BdrvChild
*child
, int64_t offset
, QEMUIOVector
*qiov
)
741 ret
= bdrv_prwv_co(child
, offset
, qiov
, true, 0);
749 int bdrv_pwrite(BdrvChild
*child
, int64_t offset
, const void *buf
, int bytes
)
753 .iov_base
= (void *) buf
,
761 qemu_iovec_init_external(&qiov
, &iov
, 1);
762 return bdrv_pwritev(child
, offset
, &qiov
);
766 * Writes to the file and ensures that no writes are reordered across this
767 * request (acts as a barrier)
769 * Returns 0 on success, -errno in error cases.
771 int bdrv_pwrite_sync(BdrvChild
*child
, int64_t offset
,
772 const void *buf
, int count
)
776 ret
= bdrv_pwrite(child
, offset
, buf
, count
);
781 ret
= bdrv_flush(child
->bs
);
789 typedef struct CoroutineIOCompletion
{
790 Coroutine
*coroutine
;
792 } CoroutineIOCompletion
;
794 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
796 CoroutineIOCompletion
*co
= opaque
;
799 qemu_coroutine_enter(co
->coroutine
);
802 static int coroutine_fn
bdrv_driver_preadv(BlockDriverState
*bs
,
803 uint64_t offset
, uint64_t bytes
,
804 QEMUIOVector
*qiov
, int flags
)
806 BlockDriver
*drv
= bs
->drv
;
808 unsigned int nb_sectors
;
810 assert(!(flags
& ~BDRV_REQ_MASK
));
812 if (drv
->bdrv_co_preadv
) {
813 return drv
->bdrv_co_preadv(bs
, offset
, bytes
, qiov
, flags
);
816 sector_num
= offset
>> BDRV_SECTOR_BITS
;
817 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
819 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
820 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
821 assert((bytes
>> BDRV_SECTOR_BITS
) <= BDRV_REQUEST_MAX_SECTORS
);
823 if (drv
->bdrv_co_readv
) {
824 return drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
827 CoroutineIOCompletion co
= {
828 .coroutine
= qemu_coroutine_self(),
831 acb
= bs
->drv
->bdrv_aio_readv(bs
, sector_num
, qiov
, nb_sectors
,
832 bdrv_co_io_em_complete
, &co
);
836 qemu_coroutine_yield();
842 static int coroutine_fn
bdrv_driver_pwritev(BlockDriverState
*bs
,
843 uint64_t offset
, uint64_t bytes
,
844 QEMUIOVector
*qiov
, int flags
)
846 BlockDriver
*drv
= bs
->drv
;
848 unsigned int nb_sectors
;
851 assert(!(flags
& ~BDRV_REQ_MASK
));
853 if (drv
->bdrv_co_pwritev
) {
854 ret
= drv
->bdrv_co_pwritev(bs
, offset
, bytes
, qiov
,
855 flags
& bs
->supported_write_flags
);
856 flags
&= ~bs
->supported_write_flags
;
860 sector_num
= offset
>> BDRV_SECTOR_BITS
;
861 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
863 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
864 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
865 assert((bytes
>> BDRV_SECTOR_BITS
) <= BDRV_REQUEST_MAX_SECTORS
);
867 if (drv
->bdrv_co_writev_flags
) {
868 ret
= drv
->bdrv_co_writev_flags(bs
, sector_num
, nb_sectors
, qiov
,
869 flags
& bs
->supported_write_flags
);
870 flags
&= ~bs
->supported_write_flags
;
871 } else if (drv
->bdrv_co_writev
) {
872 assert(!bs
->supported_write_flags
);
873 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
);
876 CoroutineIOCompletion co
= {
877 .coroutine
= qemu_coroutine_self(),
880 acb
= bs
->drv
->bdrv_aio_writev(bs
, sector_num
, qiov
, nb_sectors
,
881 bdrv_co_io_em_complete
, &co
);
885 qemu_coroutine_yield();
891 if (ret
== 0 && (flags
& BDRV_REQ_FUA
)) {
892 ret
= bdrv_co_flush(bs
);
898 static int coroutine_fn
899 bdrv_driver_pwritev_compressed(BlockDriverState
*bs
, uint64_t offset
,
900 uint64_t bytes
, QEMUIOVector
*qiov
)
902 BlockDriver
*drv
= bs
->drv
;
904 if (!drv
->bdrv_co_pwritev_compressed
) {
908 return drv
->bdrv_co_pwritev_compressed(bs
, offset
, bytes
, qiov
);
911 static int coroutine_fn
bdrv_co_do_copy_on_readv(BlockDriverState
*bs
,
912 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
)
914 /* Perform I/O through a temporary buffer so that users who scribble over
915 * their read buffer while the operation is in progress do not end up
916 * modifying the image file. This is critical for zero-copy guest I/O
917 * where anything might happen inside guest memory.
921 BlockDriver
*drv
= bs
->drv
;
923 QEMUIOVector bounce_qiov
;
924 int64_t cluster_offset
;
925 unsigned int cluster_bytes
;
929 /* Cover entire cluster so no additional backing file I/O is required when
930 * allocating cluster in the image file.
932 bdrv_round_to_clusters(bs
, offset
, bytes
, &cluster_offset
, &cluster_bytes
);
934 trace_bdrv_co_do_copy_on_readv(bs
, offset
, bytes
,
935 cluster_offset
, cluster_bytes
);
937 iov
.iov_len
= cluster_bytes
;
938 iov
.iov_base
= bounce_buffer
= qemu_try_blockalign(bs
, iov
.iov_len
);
939 if (bounce_buffer
== NULL
) {
944 qemu_iovec_init_external(&bounce_qiov
, &iov
, 1);
946 ret
= bdrv_driver_preadv(bs
, cluster_offset
, cluster_bytes
,
952 if (drv
->bdrv_co_pwrite_zeroes
&&
953 buffer_is_zero(bounce_buffer
, iov
.iov_len
)) {
954 /* FIXME: Should we (perhaps conditionally) be setting
955 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
956 * that still correctly reads as zero? */
957 ret
= bdrv_co_do_pwrite_zeroes(bs
, cluster_offset
, cluster_bytes
, 0);
959 /* This does not change the data on the disk, it is not necessary
960 * to flush even in cache=writethrough mode.
962 ret
= bdrv_driver_pwritev(bs
, cluster_offset
, cluster_bytes
,
967 /* It might be okay to ignore write errors for guest requests. If this
968 * is a deliberate copy-on-read then we don't want to ignore the error.
969 * Simply report it in all cases.
974 skip_bytes
= offset
- cluster_offset
;
975 qemu_iovec_from_buf(qiov
, 0, bounce_buffer
+ skip_bytes
, bytes
);
978 qemu_vfree(bounce_buffer
);
983 * Forwards an already correctly aligned request to the BlockDriver. This
984 * handles copy on read, zeroing after EOF, and fragmentation of large
985 * reads; any other features must be implemented by the caller.
987 static int coroutine_fn
bdrv_aligned_preadv(BlockDriverState
*bs
,
988 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
989 int64_t align
, QEMUIOVector
*qiov
, int flags
)
991 int64_t total_bytes
, max_bytes
;
993 uint64_t bytes_remaining
= bytes
;
996 assert(is_power_of_2(align
));
997 assert((offset
& (align
- 1)) == 0);
998 assert((bytes
& (align
- 1)) == 0);
999 assert(!qiov
|| bytes
== qiov
->size
);
1000 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1001 max_transfer
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_transfer
, INT_MAX
),
1004 /* TODO: We would need a per-BDS .supported_read_flags and
1005 * potential fallback support, if we ever implement any read flags
1006 * to pass through to drivers. For now, there aren't any
1007 * passthrough flags. */
1008 assert(!(flags
& ~(BDRV_REQ_NO_SERIALISING
| BDRV_REQ_COPY_ON_READ
)));
1010 /* Handle Copy on Read and associated serialisation */
1011 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1012 /* If we touch the same cluster it counts as an overlap. This
1013 * guarantees that allocating writes will be serialized and not race
1014 * with each other for the same cluster. For example, in copy-on-read
1015 * it ensures that the CoR read and write operations are atomic and
1016 * guest writes cannot interleave between them. */
1017 mark_request_serialising(req
, bdrv_get_cluster_size(bs
));
1020 if (!(flags
& BDRV_REQ_NO_SERIALISING
)) {
1021 wait_serialising_requests(req
);
1024 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1025 int64_t start_sector
= offset
>> BDRV_SECTOR_BITS
;
1026 int64_t end_sector
= DIV_ROUND_UP(offset
+ bytes
, BDRV_SECTOR_SIZE
);
1027 unsigned int nb_sectors
= end_sector
- start_sector
;
1030 ret
= bdrv_is_allocated(bs
, start_sector
, nb_sectors
, &pnum
);
1035 if (!ret
|| pnum
!= nb_sectors
) {
1036 ret
= bdrv_co_do_copy_on_readv(bs
, offset
, bytes
, qiov
);
1041 /* Forward the request to the BlockDriver, possibly fragmenting it */
1042 total_bytes
= bdrv_getlength(bs
);
1043 if (total_bytes
< 0) {
1048 max_bytes
= ROUND_UP(MAX(0, total_bytes
- offset
), align
);
1049 if (bytes
<= max_bytes
&& bytes
<= max_transfer
) {
1050 ret
= bdrv_driver_preadv(bs
, offset
, bytes
, qiov
, 0);
1054 while (bytes_remaining
) {
1058 QEMUIOVector local_qiov
;
1060 num
= MIN(bytes_remaining
, MIN(max_bytes
, max_transfer
));
1062 qemu_iovec_init(&local_qiov
, qiov
->niov
);
1063 qemu_iovec_concat(&local_qiov
, qiov
, bytes
- bytes_remaining
, num
);
1065 ret
= bdrv_driver_preadv(bs
, offset
+ bytes
- bytes_remaining
,
1066 num
, &local_qiov
, 0);
1068 qemu_iovec_destroy(&local_qiov
);
1070 num
= bytes_remaining
;
1071 ret
= qemu_iovec_memset(qiov
, bytes
- bytes_remaining
, 0,
1077 bytes_remaining
-= num
;
1081 return ret
< 0 ? ret
: 0;
1085 * Handle a read request in coroutine context
1087 int coroutine_fn
bdrv_co_preadv(BdrvChild
*child
,
1088 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1089 BdrvRequestFlags flags
)
1091 BlockDriverState
*bs
= child
->bs
;
1092 BlockDriver
*drv
= bs
->drv
;
1093 BdrvTrackedRequest req
;
1095 uint64_t align
= bs
->bl
.request_alignment
;
1096 uint8_t *head_buf
= NULL
;
1097 uint8_t *tail_buf
= NULL
;
1098 QEMUIOVector local_qiov
;
1099 bool use_local_qiov
= false;
1106 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
1111 bdrv_inc_in_flight(bs
);
1113 /* Don't do copy-on-read if we read data before write operation */
1114 if (bs
->copy_on_read
&& !(flags
& BDRV_REQ_NO_SERIALISING
)) {
1115 flags
|= BDRV_REQ_COPY_ON_READ
;
1118 /* Align read if necessary by padding qiov */
1119 if (offset
& (align
- 1)) {
1120 head_buf
= qemu_blockalign(bs
, align
);
1121 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
1122 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
1123 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1124 use_local_qiov
= true;
1126 bytes
+= offset
& (align
- 1);
1127 offset
= offset
& ~(align
- 1);
1130 if ((offset
+ bytes
) & (align
- 1)) {
1131 if (!use_local_qiov
) {
1132 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
1133 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1134 use_local_qiov
= true;
1136 tail_buf
= qemu_blockalign(bs
, align
);
1137 qemu_iovec_add(&local_qiov
, tail_buf
,
1138 align
- ((offset
+ bytes
) & (align
- 1)));
1140 bytes
= ROUND_UP(bytes
, align
);
1143 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_READ
);
1144 ret
= bdrv_aligned_preadv(bs
, &req
, offset
, bytes
, align
,
1145 use_local_qiov
? &local_qiov
: qiov
,
1147 tracked_request_end(&req
);
1148 bdrv_dec_in_flight(bs
);
1150 if (use_local_qiov
) {
1151 qemu_iovec_destroy(&local_qiov
);
1152 qemu_vfree(head_buf
);
1153 qemu_vfree(tail_buf
);
1159 static int coroutine_fn
bdrv_co_do_readv(BdrvChild
*child
,
1160 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1161 BdrvRequestFlags flags
)
1163 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
1167 return bdrv_co_preadv(child
, sector_num
<< BDRV_SECTOR_BITS
,
1168 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
1171 int coroutine_fn
bdrv_co_readv(BdrvChild
*child
, int64_t sector_num
,
1172 int nb_sectors
, QEMUIOVector
*qiov
)
1174 trace_bdrv_co_readv(child
->bs
, sector_num
, nb_sectors
);
1176 return bdrv_co_do_readv(child
, sector_num
, nb_sectors
, qiov
, 0);
1179 /* Maximum buffer for write zeroes fallback, in bytes */
1180 #define MAX_WRITE_ZEROES_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
1182 static int coroutine_fn
bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
,
1183 int64_t offset
, int count
, BdrvRequestFlags flags
)
1185 BlockDriver
*drv
= bs
->drv
;
1187 struct iovec iov
= {0};
1189 bool need_flush
= false;
1193 int max_write_zeroes
= MIN_NON_ZERO(bs
->bl
.max_pwrite_zeroes
, INT_MAX
);
1194 int alignment
= MAX(bs
->bl
.pwrite_zeroes_alignment
,
1195 bs
->bl
.request_alignment
);
1197 assert(alignment
% bs
->bl
.request_alignment
== 0);
1198 head
= offset
% alignment
;
1199 tail
= (offset
+ count
) % alignment
;
1200 max_write_zeroes
= QEMU_ALIGN_DOWN(max_write_zeroes
, alignment
);
1201 assert(max_write_zeroes
>= bs
->bl
.request_alignment
);
1203 while (count
> 0 && !ret
) {
1206 /* Align request. Block drivers can expect the "bulk" of the request
1207 * to be aligned, and that unaligned requests do not cross cluster
1211 /* Make a small request up to the first aligned sector. */
1212 num
= MIN(count
, alignment
- head
);
1214 } else if (tail
&& num
> alignment
) {
1215 /* Shorten the request to the last aligned sector. */
1219 /* limit request size */
1220 if (num
> max_write_zeroes
) {
1221 num
= max_write_zeroes
;
1225 /* First try the efficient write zeroes operation */
1226 if (drv
->bdrv_co_pwrite_zeroes
) {
1227 ret
= drv
->bdrv_co_pwrite_zeroes(bs
, offset
, num
,
1228 flags
& bs
->supported_zero_flags
);
1229 if (ret
!= -ENOTSUP
&& (flags
& BDRV_REQ_FUA
) &&
1230 !(bs
->supported_zero_flags
& BDRV_REQ_FUA
)) {
1234 assert(!bs
->supported_zero_flags
);
1237 if (ret
== -ENOTSUP
) {
1238 /* Fall back to bounce buffer if write zeroes is unsupported */
1239 int max_transfer
= MIN_NON_ZERO(bs
->bl
.max_transfer
,
1240 MAX_WRITE_ZEROES_BOUNCE_BUFFER
);
1241 BdrvRequestFlags write_flags
= flags
& ~BDRV_REQ_ZERO_WRITE
;
1243 if ((flags
& BDRV_REQ_FUA
) &&
1244 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1245 /* No need for bdrv_driver_pwrite() to do a fallback
1246 * flush on each chunk; use just one at the end */
1247 write_flags
&= ~BDRV_REQ_FUA
;
1250 num
= MIN(num
, max_transfer
);
1252 if (iov
.iov_base
== NULL
) {
1253 iov
.iov_base
= qemu_try_blockalign(bs
, num
);
1254 if (iov
.iov_base
== NULL
) {
1258 memset(iov
.iov_base
, 0, num
);
1260 qemu_iovec_init_external(&qiov
, &iov
, 1);
1262 ret
= bdrv_driver_pwritev(bs
, offset
, num
, &qiov
, write_flags
);
1264 /* Keep bounce buffer around if it is big enough for all
1265 * all future requests.
1267 if (num
< max_transfer
) {
1268 qemu_vfree(iov
.iov_base
);
1269 iov
.iov_base
= NULL
;
1278 if (ret
== 0 && need_flush
) {
1279 ret
= bdrv_co_flush(bs
);
1281 qemu_vfree(iov
.iov_base
);
1286 * Forwards an already correctly aligned write request to the BlockDriver,
1287 * after possibly fragmenting it.
1289 static int coroutine_fn
bdrv_aligned_pwritev(BlockDriverState
*bs
,
1290 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
1291 int64_t align
, QEMUIOVector
*qiov
, int flags
)
1293 BlockDriver
*drv
= bs
->drv
;
1297 int64_t start_sector
= offset
>> BDRV_SECTOR_BITS
;
1298 int64_t end_sector
= DIV_ROUND_UP(offset
+ bytes
, BDRV_SECTOR_SIZE
);
1299 uint64_t bytes_remaining
= bytes
;
1302 assert(is_power_of_2(align
));
1303 assert((offset
& (align
- 1)) == 0);
1304 assert((bytes
& (align
- 1)) == 0);
1305 assert(!qiov
|| bytes
== qiov
->size
);
1306 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1307 assert(!(flags
& ~BDRV_REQ_MASK
));
1308 max_transfer
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_transfer
, INT_MAX
),
1311 waited
= wait_serialising_requests(req
);
1312 assert(!waited
|| !req
->serialising
);
1313 assert(req
->overlap_offset
<= offset
);
1314 assert(offset
+ bytes
<= req
->overlap_offset
+ req
->overlap_bytes
);
1316 ret
= notifier_with_return_list_notify(&bs
->before_write_notifiers
, req
);
1318 if (!ret
&& bs
->detect_zeroes
!= BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF
&&
1319 !(flags
& BDRV_REQ_ZERO_WRITE
) && drv
->bdrv_co_pwrite_zeroes
&&
1320 qemu_iovec_is_zero(qiov
)) {
1321 flags
|= BDRV_REQ_ZERO_WRITE
;
1322 if (bs
->detect_zeroes
== BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP
) {
1323 flags
|= BDRV_REQ_MAY_UNMAP
;
1328 /* Do nothing, write notifier decided to fail this request */
1329 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
1330 bdrv_debug_event(bs
, BLKDBG_PWRITEV_ZERO
);
1331 ret
= bdrv_co_do_pwrite_zeroes(bs
, offset
, bytes
, flags
);
1332 } else if (flags
& BDRV_REQ_WRITE_COMPRESSED
) {
1333 ret
= bdrv_driver_pwritev_compressed(bs
, offset
, bytes
, qiov
);
1334 } else if (bytes
<= max_transfer
) {
1335 bdrv_debug_event(bs
, BLKDBG_PWRITEV
);
1336 ret
= bdrv_driver_pwritev(bs
, offset
, bytes
, qiov
, flags
);
1338 bdrv_debug_event(bs
, BLKDBG_PWRITEV
);
1339 while (bytes_remaining
) {
1340 int num
= MIN(bytes_remaining
, max_transfer
);
1341 QEMUIOVector local_qiov
;
1342 int local_flags
= flags
;
1345 if (num
< bytes_remaining
&& (flags
& BDRV_REQ_FUA
) &&
1346 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1347 /* If FUA is going to be emulated by flush, we only
1348 * need to flush on the last iteration */
1349 local_flags
&= ~BDRV_REQ_FUA
;
1351 qemu_iovec_init(&local_qiov
, qiov
->niov
);
1352 qemu_iovec_concat(&local_qiov
, qiov
, bytes
- bytes_remaining
, num
);
1354 ret
= bdrv_driver_pwritev(bs
, offset
+ bytes
- bytes_remaining
,
1355 num
, &local_qiov
, local_flags
);
1356 qemu_iovec_destroy(&local_qiov
);
1360 bytes_remaining
-= num
;
1363 bdrv_debug_event(bs
, BLKDBG_PWRITEV_DONE
);
1366 bdrv_set_dirty(bs
, start_sector
, end_sector
- start_sector
);
1368 if (bs
->wr_highest_offset
< offset
+ bytes
) {
1369 bs
->wr_highest_offset
= offset
+ bytes
;
1373 bs
->total_sectors
= MAX(bs
->total_sectors
, end_sector
);
1380 static int coroutine_fn
bdrv_co_do_zero_pwritev(BlockDriverState
*bs
,
1383 BdrvRequestFlags flags
,
1384 BdrvTrackedRequest
*req
)
1386 uint8_t *buf
= NULL
;
1387 QEMUIOVector local_qiov
;
1389 uint64_t align
= bs
->bl
.request_alignment
;
1390 unsigned int head_padding_bytes
, tail_padding_bytes
;
1393 head_padding_bytes
= offset
& (align
- 1);
1394 tail_padding_bytes
= align
- ((offset
+ bytes
) & (align
- 1));
1397 assert(flags
& BDRV_REQ_ZERO_WRITE
);
1398 if (head_padding_bytes
|| tail_padding_bytes
) {
1399 buf
= qemu_blockalign(bs
, align
);
1400 iov
= (struct iovec
) {
1404 qemu_iovec_init_external(&local_qiov
, &iov
, 1);
1406 if (head_padding_bytes
) {
1407 uint64_t zero_bytes
= MIN(bytes
, align
- head_padding_bytes
);
1409 /* RMW the unaligned part before head. */
1410 mark_request_serialising(req
, align
);
1411 wait_serialising_requests(req
);
1412 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1413 ret
= bdrv_aligned_preadv(bs
, req
, offset
& ~(align
- 1), align
,
1414 align
, &local_qiov
, 0);
1418 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1420 memset(buf
+ head_padding_bytes
, 0, zero_bytes
);
1421 ret
= bdrv_aligned_pwritev(bs
, req
, offset
& ~(align
- 1), align
,
1423 flags
& ~BDRV_REQ_ZERO_WRITE
);
1427 offset
+= zero_bytes
;
1428 bytes
-= zero_bytes
;
1431 assert(!bytes
|| (offset
& (align
- 1)) == 0);
1432 if (bytes
>= align
) {
1433 /* Write the aligned part in the middle. */
1434 uint64_t aligned_bytes
= bytes
& ~(align
- 1);
1435 ret
= bdrv_aligned_pwritev(bs
, req
, offset
, aligned_bytes
, align
,
1440 bytes
-= aligned_bytes
;
1441 offset
+= aligned_bytes
;
1444 assert(!bytes
|| (offset
& (align
- 1)) == 0);
1446 assert(align
== tail_padding_bytes
+ bytes
);
1447 /* RMW the unaligned part after tail. */
1448 mark_request_serialising(req
, align
);
1449 wait_serialising_requests(req
);
1450 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1451 ret
= bdrv_aligned_preadv(bs
, req
, offset
, align
,
1452 align
, &local_qiov
, 0);
1456 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1458 memset(buf
, 0, bytes
);
1459 ret
= bdrv_aligned_pwritev(bs
, req
, offset
, align
, align
,
1460 &local_qiov
, flags
& ~BDRV_REQ_ZERO_WRITE
);
1469 * Handle a write request in coroutine context
1471 int coroutine_fn
bdrv_co_pwritev(BdrvChild
*child
,
1472 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1473 BdrvRequestFlags flags
)
1475 BlockDriverState
*bs
= child
->bs
;
1476 BdrvTrackedRequest req
;
1477 uint64_t align
= bs
->bl
.request_alignment
;
1478 uint8_t *head_buf
= NULL
;
1479 uint8_t *tail_buf
= NULL
;
1480 QEMUIOVector local_qiov
;
1481 bool use_local_qiov
= false;
1487 if (bs
->read_only
) {
1490 assert(!(bs
->open_flags
& BDRV_O_INACTIVE
));
1492 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
1497 bdrv_inc_in_flight(bs
);
1499 * Align write if necessary by performing a read-modify-write cycle.
1500 * Pad qiov with the read parts and be sure to have a tracked request not
1501 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1503 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_WRITE
);
1506 ret
= bdrv_co_do_zero_pwritev(bs
, offset
, bytes
, flags
, &req
);
1510 if (offset
& (align
- 1)) {
1511 QEMUIOVector head_qiov
;
1512 struct iovec head_iov
;
1514 mark_request_serialising(&req
, align
);
1515 wait_serialising_requests(&req
);
1517 head_buf
= qemu_blockalign(bs
, align
);
1518 head_iov
= (struct iovec
) {
1519 .iov_base
= head_buf
,
1522 qemu_iovec_init_external(&head_qiov
, &head_iov
, 1);
1524 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1525 ret
= bdrv_aligned_preadv(bs
, &req
, offset
& ~(align
- 1), align
,
1526 align
, &head_qiov
, 0);
1530 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1532 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
1533 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
1534 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1535 use_local_qiov
= true;
1537 bytes
+= offset
& (align
- 1);
1538 offset
= offset
& ~(align
- 1);
1540 /* We have read the tail already if the request is smaller
1541 * than one aligned block.
1543 if (bytes
< align
) {
1544 qemu_iovec_add(&local_qiov
, head_buf
+ bytes
, align
- bytes
);
1549 if ((offset
+ bytes
) & (align
- 1)) {
1550 QEMUIOVector tail_qiov
;
1551 struct iovec tail_iov
;
1555 mark_request_serialising(&req
, align
);
1556 waited
= wait_serialising_requests(&req
);
1557 assert(!waited
|| !use_local_qiov
);
1559 tail_buf
= qemu_blockalign(bs
, align
);
1560 tail_iov
= (struct iovec
) {
1561 .iov_base
= tail_buf
,
1564 qemu_iovec_init_external(&tail_qiov
, &tail_iov
, 1);
1566 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1567 ret
= bdrv_aligned_preadv(bs
, &req
, (offset
+ bytes
) & ~(align
- 1), align
,
1568 align
, &tail_qiov
, 0);
1572 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1574 if (!use_local_qiov
) {
1575 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
1576 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1577 use_local_qiov
= true;
1580 tail_bytes
= (offset
+ bytes
) & (align
- 1);
1581 qemu_iovec_add(&local_qiov
, tail_buf
+ tail_bytes
, align
- tail_bytes
);
1583 bytes
= ROUND_UP(bytes
, align
);
1586 ret
= bdrv_aligned_pwritev(bs
, &req
, offset
, bytes
, align
,
1587 use_local_qiov
? &local_qiov
: qiov
,
1592 if (use_local_qiov
) {
1593 qemu_iovec_destroy(&local_qiov
);
1595 qemu_vfree(head_buf
);
1596 qemu_vfree(tail_buf
);
1598 tracked_request_end(&req
);
1599 bdrv_dec_in_flight(bs
);
1603 static int coroutine_fn
bdrv_co_do_writev(BdrvChild
*child
,
1604 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1605 BdrvRequestFlags flags
)
1607 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
1611 return bdrv_co_pwritev(child
, sector_num
<< BDRV_SECTOR_BITS
,
1612 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
1615 int coroutine_fn
bdrv_co_writev(BdrvChild
*child
, int64_t sector_num
,
1616 int nb_sectors
, QEMUIOVector
*qiov
)
1618 trace_bdrv_co_writev(child
->bs
, sector_num
, nb_sectors
);
1620 return bdrv_co_do_writev(child
, sector_num
, nb_sectors
, qiov
, 0);
1623 int coroutine_fn
bdrv_co_pwrite_zeroes(BdrvChild
*child
, int64_t offset
,
1624 int count
, BdrvRequestFlags flags
)
1626 trace_bdrv_co_pwrite_zeroes(child
->bs
, offset
, count
, flags
);
1628 if (!(child
->bs
->open_flags
& BDRV_O_UNMAP
)) {
1629 flags
&= ~BDRV_REQ_MAY_UNMAP
;
1632 return bdrv_co_pwritev(child
, offset
, count
, NULL
,
1633 BDRV_REQ_ZERO_WRITE
| flags
);
1637 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
1639 int bdrv_flush_all(void)
1641 BdrvNextIterator it
;
1642 BlockDriverState
*bs
= NULL
;
1645 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
1646 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
1649 aio_context_acquire(aio_context
);
1650 ret
= bdrv_flush(bs
);
1651 if (ret
< 0 && !result
) {
1654 aio_context_release(aio_context
);
1661 typedef struct BdrvCoGetBlockStatusData
{
1662 BlockDriverState
*bs
;
1663 BlockDriverState
*base
;
1664 BlockDriverState
**file
;
1670 } BdrvCoGetBlockStatusData
;
1673 * Returns the allocation status of the specified sectors.
1674 * Drivers not implementing the functionality are assumed to not support
1675 * backing files, hence all their sectors are reported as allocated.
1677 * If 'sector_num' is beyond the end of the disk image the return value is 0
1678 * and 'pnum' is set to 0.
1680 * 'pnum' is set to the number of sectors (including and immediately following
1681 * the specified sector) that are known to be in the same
1682 * allocated/unallocated state.
1684 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
1685 * beyond the end of the disk image it will be clamped.
1687 * If returned value is positive and BDRV_BLOCK_OFFSET_VALID bit is set, 'file'
1688 * points to the BDS which the sector range is allocated in.
1690 static int64_t coroutine_fn
bdrv_co_get_block_status(BlockDriverState
*bs
,
1692 int nb_sectors
, int *pnum
,
1693 BlockDriverState
**file
)
1695 int64_t total_sectors
;
1699 total_sectors
= bdrv_nb_sectors(bs
);
1700 if (total_sectors
< 0) {
1701 return total_sectors
;
1704 if (sector_num
>= total_sectors
) {
1709 n
= total_sectors
- sector_num
;
1710 if (n
< nb_sectors
) {
1714 if (!bs
->drv
->bdrv_co_get_block_status
) {
1716 ret
= BDRV_BLOCK_DATA
| BDRV_BLOCK_ALLOCATED
;
1717 if (bs
->drv
->protocol_name
) {
1718 ret
|= BDRV_BLOCK_OFFSET_VALID
| (sector_num
* BDRV_SECTOR_SIZE
);
1724 bdrv_inc_in_flight(bs
);
1725 ret
= bs
->drv
->bdrv_co_get_block_status(bs
, sector_num
, nb_sectors
, pnum
,
1732 if (ret
& BDRV_BLOCK_RAW
) {
1733 assert(ret
& BDRV_BLOCK_OFFSET_VALID
);
1734 ret
= bdrv_get_block_status(bs
->file
->bs
, ret
>> BDRV_SECTOR_BITS
,
1739 if (ret
& (BDRV_BLOCK_DATA
| BDRV_BLOCK_ZERO
)) {
1740 ret
|= BDRV_BLOCK_ALLOCATED
;
1742 if (bdrv_unallocated_blocks_are_zero(bs
)) {
1743 ret
|= BDRV_BLOCK_ZERO
;
1744 } else if (bs
->backing
) {
1745 BlockDriverState
*bs2
= bs
->backing
->bs
;
1746 int64_t nb_sectors2
= bdrv_nb_sectors(bs2
);
1747 if (nb_sectors2
>= 0 && sector_num
>= nb_sectors2
) {
1748 ret
|= BDRV_BLOCK_ZERO
;
1753 if (*file
&& *file
!= bs
&&
1754 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
1755 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
1756 BlockDriverState
*file2
;
1759 ret2
= bdrv_co_get_block_status(*file
, ret
>> BDRV_SECTOR_BITS
,
1760 *pnum
, &file_pnum
, &file2
);
1762 /* Ignore errors. This is just providing extra information, it
1763 * is useful but not necessary.
1766 /* !file_pnum indicates an offset at or beyond the EOF; it is
1767 * perfectly valid for the format block driver to point to such
1768 * offsets, so catch it and mark everything as zero */
1769 ret
|= BDRV_BLOCK_ZERO
;
1771 /* Limit request to the range reported by the protocol driver */
1773 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
1779 bdrv_dec_in_flight(bs
);
1783 static int64_t coroutine_fn
bdrv_co_get_block_status_above(BlockDriverState
*bs
,
1784 BlockDriverState
*base
,
1788 BlockDriverState
**file
)
1790 BlockDriverState
*p
;
1794 for (p
= bs
; p
!= base
; p
= backing_bs(p
)) {
1795 ret
= bdrv_co_get_block_status(p
, sector_num
, nb_sectors
, pnum
, file
);
1796 if (ret
< 0 || ret
& BDRV_BLOCK_ALLOCATED
) {
1799 /* [sector_num, pnum] unallocated on this layer, which could be only
1800 * the first part of [sector_num, nb_sectors]. */
1801 nb_sectors
= MIN(nb_sectors
, *pnum
);
1806 /* Coroutine wrapper for bdrv_get_block_status_above() */
1807 static void coroutine_fn
bdrv_get_block_status_above_co_entry(void *opaque
)
1809 BdrvCoGetBlockStatusData
*data
= opaque
;
1811 data
->ret
= bdrv_co_get_block_status_above(data
->bs
, data
->base
,
1820 * Synchronous wrapper around bdrv_co_get_block_status_above().
1822 * See bdrv_co_get_block_status_above() for details.
1824 int64_t bdrv_get_block_status_above(BlockDriverState
*bs
,
1825 BlockDriverState
*base
,
1827 int nb_sectors
, int *pnum
,
1828 BlockDriverState
**file
)
1831 BdrvCoGetBlockStatusData data
= {
1835 .sector_num
= sector_num
,
1836 .nb_sectors
= nb_sectors
,
1841 if (qemu_in_coroutine()) {
1842 /* Fast-path if already in coroutine context */
1843 bdrv_get_block_status_above_co_entry(&data
);
1845 co
= qemu_coroutine_create(bdrv_get_block_status_above_co_entry
,
1847 qemu_coroutine_enter(co
);
1848 BDRV_POLL_WHILE(bs
, !data
.done
);
1853 int64_t bdrv_get_block_status(BlockDriverState
*bs
,
1855 int nb_sectors
, int *pnum
,
1856 BlockDriverState
**file
)
1858 return bdrv_get_block_status_above(bs
, backing_bs(bs
),
1859 sector_num
, nb_sectors
, pnum
, file
);
1862 int coroutine_fn
bdrv_is_allocated(BlockDriverState
*bs
, int64_t sector_num
,
1863 int nb_sectors
, int *pnum
)
1865 BlockDriverState
*file
;
1866 int64_t ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, pnum
,
1871 return !!(ret
& BDRV_BLOCK_ALLOCATED
);
1875 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
1877 * Return true if the given sector is allocated in any image between
1878 * BASE and TOP (inclusive). BASE can be NULL to check if the given
1879 * sector is allocated in any image of the chain. Return false otherwise.
1881 * 'pnum' is set to the number of sectors (including and immediately following
1882 * the specified sector) that are known to be in the same
1883 * allocated/unallocated state.
1886 int bdrv_is_allocated_above(BlockDriverState
*top
,
1887 BlockDriverState
*base
,
1889 int nb_sectors
, int *pnum
)
1891 BlockDriverState
*intermediate
;
1892 int ret
, n
= nb_sectors
;
1895 while (intermediate
&& intermediate
!= base
) {
1897 ret
= bdrv_is_allocated(intermediate
, sector_num
, nb_sectors
,
1907 * [sector_num, nb_sectors] is unallocated on top but intermediate
1910 * [sector_num+x, nr_sectors] allocated.
1912 if (n
> pnum_inter
&&
1913 (intermediate
== top
||
1914 sector_num
+ pnum_inter
< intermediate
->total_sectors
)) {
1918 intermediate
= backing_bs(intermediate
);
1925 typedef struct BdrvVmstateCo
{
1926 BlockDriverState
*bs
;
1933 static int coroutine_fn
1934 bdrv_co_rw_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
,
1937 BlockDriver
*drv
= bs
->drv
;
1941 } else if (drv
->bdrv_load_vmstate
) {
1942 return is_read
? drv
->bdrv_load_vmstate(bs
, qiov
, pos
)
1943 : drv
->bdrv_save_vmstate(bs
, qiov
, pos
);
1944 } else if (bs
->file
) {
1945 return bdrv_co_rw_vmstate(bs
->file
->bs
, qiov
, pos
, is_read
);
1951 static void coroutine_fn
bdrv_co_rw_vmstate_entry(void *opaque
)
1953 BdrvVmstateCo
*co
= opaque
;
1954 co
->ret
= bdrv_co_rw_vmstate(co
->bs
, co
->qiov
, co
->pos
, co
->is_read
);
1958 bdrv_rw_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
,
1961 if (qemu_in_coroutine()) {
1962 return bdrv_co_rw_vmstate(bs
, qiov
, pos
, is_read
);
1964 BdrvVmstateCo data
= {
1969 .ret
= -EINPROGRESS
,
1971 Coroutine
*co
= qemu_coroutine_create(bdrv_co_rw_vmstate_entry
, &data
);
1973 qemu_coroutine_enter(co
);
1974 while (data
.ret
== -EINPROGRESS
) {
1975 aio_poll(bdrv_get_aio_context(bs
), true);
1981 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
1982 int64_t pos
, int size
)
1985 struct iovec iov
= {
1986 .iov_base
= (void *) buf
,
1991 qemu_iovec_init_external(&qiov
, &iov
, 1);
1993 ret
= bdrv_writev_vmstate(bs
, &qiov
, pos
);
2001 int bdrv_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
2003 return bdrv_rw_vmstate(bs
, qiov
, pos
, false);
2006 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
2007 int64_t pos
, int size
)
2010 struct iovec iov
= {
2016 qemu_iovec_init_external(&qiov
, &iov
, 1);
2017 ret
= bdrv_readv_vmstate(bs
, &qiov
, pos
);
2025 int bdrv_readv_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
2027 return bdrv_rw_vmstate(bs
, qiov
, pos
, true);
2030 /**************************************************************/
2033 BlockAIOCB
*bdrv_aio_readv(BdrvChild
*child
, int64_t sector_num
,
2034 QEMUIOVector
*qiov
, int nb_sectors
,
2035 BlockCompletionFunc
*cb
, void *opaque
)
2037 trace_bdrv_aio_readv(child
->bs
, sector_num
, nb_sectors
, opaque
);
2039 assert(nb_sectors
<< BDRV_SECTOR_BITS
== qiov
->size
);
2040 return bdrv_co_aio_prw_vector(child
, sector_num
<< BDRV_SECTOR_BITS
, qiov
,
2041 0, cb
, opaque
, false);
2044 BlockAIOCB
*bdrv_aio_writev(BdrvChild
*child
, int64_t sector_num
,
2045 QEMUIOVector
*qiov
, int nb_sectors
,
2046 BlockCompletionFunc
*cb
, void *opaque
)
2048 trace_bdrv_aio_writev(child
->bs
, sector_num
, nb_sectors
, opaque
);
2050 assert(nb_sectors
<< BDRV_SECTOR_BITS
== qiov
->size
);
2051 return bdrv_co_aio_prw_vector(child
, sector_num
<< BDRV_SECTOR_BITS
, qiov
,
2052 0, cb
, opaque
, true);
2055 void bdrv_aio_cancel(BlockAIOCB
*acb
)
2058 bdrv_aio_cancel_async(acb
);
2059 while (acb
->refcnt
> 1) {
2060 if (acb
->aiocb_info
->get_aio_context
) {
2061 aio_poll(acb
->aiocb_info
->get_aio_context(acb
), true);
2062 } else if (acb
->bs
) {
2063 aio_poll(bdrv_get_aio_context(acb
->bs
), true);
2068 qemu_aio_unref(acb
);
2071 /* Async version of aio cancel. The caller is not blocked if the acb implements
2072 * cancel_async, otherwise we do nothing and let the request normally complete.
2073 * In either case the completion callback must be called. */
2074 void bdrv_aio_cancel_async(BlockAIOCB
*acb
)
2076 if (acb
->aiocb_info
->cancel_async
) {
2077 acb
->aiocb_info
->cancel_async(acb
);
2081 /**************************************************************/
2082 /* async block device emulation */
2084 typedef struct BlockRequest
{
2086 /* Used during read, write, trim */
2093 /* Used during ioctl */
2099 BlockCompletionFunc
*cb
;
2105 typedef struct BlockAIOCBCoroutine
{
2112 } BlockAIOCBCoroutine
;
2114 static const AIOCBInfo bdrv_em_co_aiocb_info
= {
2115 .aiocb_size
= sizeof(BlockAIOCBCoroutine
),
2118 static void bdrv_co_complete(BlockAIOCBCoroutine
*acb
)
2120 if (!acb
->need_bh
) {
2121 bdrv_dec_in_flight(acb
->common
.bs
);
2122 acb
->common
.cb(acb
->common
.opaque
, acb
->req
.error
);
2123 qemu_aio_unref(acb
);
2127 static void bdrv_co_em_bh(void *opaque
)
2129 BlockAIOCBCoroutine
*acb
= opaque
;
2131 assert(!acb
->need_bh
);
2132 bdrv_co_complete(acb
);
2135 static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine
*acb
)
2137 acb
->need_bh
= false;
2138 if (acb
->req
.error
!= -EINPROGRESS
) {
2139 BlockDriverState
*bs
= acb
->common
.bs
;
2141 aio_bh_schedule_oneshot(bdrv_get_aio_context(bs
), bdrv_co_em_bh
, acb
);
2145 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
2146 static void coroutine_fn
bdrv_co_do_rw(void *opaque
)
2148 BlockAIOCBCoroutine
*acb
= opaque
;
2150 if (!acb
->is_write
) {
2151 acb
->req
.error
= bdrv_co_preadv(acb
->child
, acb
->req
.offset
,
2152 acb
->req
.qiov
->size
, acb
->req
.qiov
, acb
->req
.flags
);
2154 acb
->req
.error
= bdrv_co_pwritev(acb
->child
, acb
->req
.offset
,
2155 acb
->req
.qiov
->size
, acb
->req
.qiov
, acb
->req
.flags
);
2158 bdrv_co_complete(acb
);
2161 static BlockAIOCB
*bdrv_co_aio_prw_vector(BdrvChild
*child
,
2164 BdrvRequestFlags flags
,
2165 BlockCompletionFunc
*cb
,
2170 BlockAIOCBCoroutine
*acb
;
2172 /* Matched by bdrv_co_complete's bdrv_dec_in_flight. */
2173 bdrv_inc_in_flight(child
->bs
);
2175 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, child
->bs
, cb
, opaque
);
2177 acb
->need_bh
= true;
2178 acb
->req
.error
= -EINPROGRESS
;
2179 acb
->req
.offset
= offset
;
2180 acb
->req
.qiov
= qiov
;
2181 acb
->req
.flags
= flags
;
2182 acb
->is_write
= is_write
;
2184 co
= qemu_coroutine_create(bdrv_co_do_rw
, acb
);
2185 qemu_coroutine_enter(co
);
2187 bdrv_co_maybe_schedule_bh(acb
);
2188 return &acb
->common
;
2191 static void coroutine_fn
bdrv_aio_flush_co_entry(void *opaque
)
2193 BlockAIOCBCoroutine
*acb
= opaque
;
2194 BlockDriverState
*bs
= acb
->common
.bs
;
2196 acb
->req
.error
= bdrv_co_flush(bs
);
2197 bdrv_co_complete(acb
);
2200 BlockAIOCB
*bdrv_aio_flush(BlockDriverState
*bs
,
2201 BlockCompletionFunc
*cb
, void *opaque
)
2203 trace_bdrv_aio_flush(bs
, opaque
);
2206 BlockAIOCBCoroutine
*acb
;
2208 /* Matched by bdrv_co_complete's bdrv_dec_in_flight. */
2209 bdrv_inc_in_flight(bs
);
2211 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
2212 acb
->need_bh
= true;
2213 acb
->req
.error
= -EINPROGRESS
;
2215 co
= qemu_coroutine_create(bdrv_aio_flush_co_entry
, acb
);
2216 qemu_coroutine_enter(co
);
2218 bdrv_co_maybe_schedule_bh(acb
);
2219 return &acb
->common
;
2222 void *qemu_aio_get(const AIOCBInfo
*aiocb_info
, BlockDriverState
*bs
,
2223 BlockCompletionFunc
*cb
, void *opaque
)
2227 acb
= g_malloc(aiocb_info
->aiocb_size
);
2228 acb
->aiocb_info
= aiocb_info
;
2231 acb
->opaque
= opaque
;
2236 void qemu_aio_ref(void *p
)
2238 BlockAIOCB
*acb
= p
;
2242 void qemu_aio_unref(void *p
)
2244 BlockAIOCB
*acb
= p
;
2245 assert(acb
->refcnt
> 0);
2246 if (--acb
->refcnt
== 0) {
2251 /**************************************************************/
2252 /* Coroutine block device emulation */
2254 typedef struct FlushCo
{
2255 BlockDriverState
*bs
;
2260 static void coroutine_fn
bdrv_flush_co_entry(void *opaque
)
2262 FlushCo
*rwco
= opaque
;
2264 rwco
->ret
= bdrv_co_flush(rwco
->bs
);
2267 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
2271 if (!bs
|| !bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
) ||
2276 bdrv_inc_in_flight(bs
);
2278 int current_gen
= bs
->write_gen
;
2280 /* Wait until any previous flushes are completed */
2281 while (bs
->active_flush_req
) {
2282 qemu_co_queue_wait(&bs
->flush_queue
);
2285 bs
->active_flush_req
= true;
2287 /* Write back all layers by calling one driver function */
2288 if (bs
->drv
->bdrv_co_flush
) {
2289 ret
= bs
->drv
->bdrv_co_flush(bs
);
2293 /* Write back cached data to the OS even with cache=unsafe */
2294 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_OS
);
2295 if (bs
->drv
->bdrv_co_flush_to_os
) {
2296 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
2302 /* But don't actually force it to the disk with cache=unsafe */
2303 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
2307 /* Check if we really need to flush anything */
2308 if (bs
->flushed_gen
== current_gen
) {
2312 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_DISK
);
2313 if (bs
->drv
->bdrv_co_flush_to_disk
) {
2314 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
2315 } else if (bs
->drv
->bdrv_aio_flush
) {
2317 CoroutineIOCompletion co
= {
2318 .coroutine
= qemu_coroutine_self(),
2321 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
2325 qemu_coroutine_yield();
2330 * Some block drivers always operate in either writethrough or unsafe
2331 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2332 * know how the server works (because the behaviour is hardcoded or
2333 * depends on server-side configuration), so we can't ensure that
2334 * everything is safe on disk. Returning an error doesn't work because
2335 * that would break guests even if the server operates in writethrough
2338 * Let's hope the user knows what he's doing.
2347 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2348 * in the case of cache=unsafe, so there are no useless flushes.
2351 ret
= bs
->file
? bdrv_co_flush(bs
->file
->bs
) : 0;
2353 /* Notify any pending flushes that we have completed */
2354 bs
->flushed_gen
= current_gen
;
2355 bs
->active_flush_req
= false;
2356 /* Return value is ignored - it's ok if wait queue is empty */
2357 qemu_co_queue_next(&bs
->flush_queue
);
2359 bdrv_dec_in_flight(bs
);
2363 int bdrv_flush(BlockDriverState
*bs
)
2366 FlushCo flush_co
= {
2371 if (qemu_in_coroutine()) {
2372 /* Fast-path if already in coroutine context */
2373 bdrv_flush_co_entry(&flush_co
);
2375 co
= qemu_coroutine_create(bdrv_flush_co_entry
, &flush_co
);
2376 qemu_coroutine_enter(co
);
2377 BDRV_POLL_WHILE(bs
, flush_co
.ret
== NOT_DONE
);
2380 return flush_co
.ret
;
2383 typedef struct DiscardCo
{
2384 BlockDriverState
*bs
;
2389 static void coroutine_fn
bdrv_pdiscard_co_entry(void *opaque
)
2391 DiscardCo
*rwco
= opaque
;
2393 rwco
->ret
= bdrv_co_pdiscard(rwco
->bs
, rwco
->offset
, rwco
->count
);
2396 int coroutine_fn
bdrv_co_pdiscard(BlockDriverState
*bs
, int64_t offset
,
2399 BdrvTrackedRequest req
;
2400 int max_pdiscard
, ret
;
2407 ret
= bdrv_check_byte_request(bs
, offset
, count
);
2410 } else if (bs
->read_only
) {
2413 assert(!(bs
->open_flags
& BDRV_O_INACTIVE
));
2415 /* Do nothing if disabled. */
2416 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
2420 if (!bs
->drv
->bdrv_co_pdiscard
&& !bs
->drv
->bdrv_aio_pdiscard
) {
2424 /* Discard is advisory, so ignore any unaligned head or tail */
2425 align
= MAX(bs
->bl
.pdiscard_alignment
, bs
->bl
.request_alignment
);
2426 assert(align
% bs
->bl
.request_alignment
== 0);
2427 head
= offset
% align
;
2429 head
= MIN(count
, align
- head
);
2433 count
= QEMU_ALIGN_DOWN(count
, align
);
2438 bdrv_inc_in_flight(bs
);
2439 tracked_request_begin(&req
, bs
, offset
, count
, BDRV_TRACKED_DISCARD
);
2441 ret
= notifier_with_return_list_notify(&bs
->before_write_notifiers
, &req
);
2446 max_pdiscard
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_pdiscard
, INT_MAX
),
2448 assert(max_pdiscard
);
2452 int num
= MIN(count
, max_pdiscard
);
2454 if (bs
->drv
->bdrv_co_pdiscard
) {
2455 ret
= bs
->drv
->bdrv_co_pdiscard(bs
, offset
, num
);
2458 CoroutineIOCompletion co
= {
2459 .coroutine
= qemu_coroutine_self(),
2462 acb
= bs
->drv
->bdrv_aio_pdiscard(bs
, offset
, num
,
2463 bdrv_co_io_em_complete
, &co
);
2468 qemu_coroutine_yield();
2472 if (ret
&& ret
!= -ENOTSUP
) {
2482 bdrv_set_dirty(bs
, req
.offset
>> BDRV_SECTOR_BITS
,
2483 req
.bytes
>> BDRV_SECTOR_BITS
);
2484 tracked_request_end(&req
);
2485 bdrv_dec_in_flight(bs
);
2489 int bdrv_pdiscard(BlockDriverState
*bs
, int64_t offset
, int count
)
2499 if (qemu_in_coroutine()) {
2500 /* Fast-path if already in coroutine context */
2501 bdrv_pdiscard_co_entry(&rwco
);
2503 co
= qemu_coroutine_create(bdrv_pdiscard_co_entry
, &rwco
);
2504 qemu_coroutine_enter(co
);
2505 BDRV_POLL_WHILE(bs
, rwco
.ret
== NOT_DONE
);
2511 int bdrv_co_ioctl(BlockDriverState
*bs
, int req
, void *buf
)
2513 BlockDriver
*drv
= bs
->drv
;
2514 CoroutineIOCompletion co
= {
2515 .coroutine
= qemu_coroutine_self(),
2519 bdrv_inc_in_flight(bs
);
2520 if (!drv
|| (!drv
->bdrv_aio_ioctl
&& !drv
->bdrv_co_ioctl
)) {
2525 if (drv
->bdrv_co_ioctl
) {
2526 co
.ret
= drv
->bdrv_co_ioctl(bs
, req
, buf
);
2528 acb
= drv
->bdrv_aio_ioctl(bs
, req
, buf
, bdrv_co_io_em_complete
, &co
);
2533 qemu_coroutine_yield();
2536 bdrv_dec_in_flight(bs
);
2540 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
2542 return qemu_memalign(bdrv_opt_mem_align(bs
), size
);
2545 void *qemu_blockalign0(BlockDriverState
*bs
, size_t size
)
2547 return memset(qemu_blockalign(bs
, size
), 0, size
);
2550 void *qemu_try_blockalign(BlockDriverState
*bs
, size_t size
)
2552 size_t align
= bdrv_opt_mem_align(bs
);
2554 /* Ensure that NULL is never returned on success */
2560 return qemu_try_memalign(align
, size
);
2563 void *qemu_try_blockalign0(BlockDriverState
*bs
, size_t size
)
2565 void *mem
= qemu_try_blockalign(bs
, size
);
2568 memset(mem
, 0, size
);
2575 * Check if all memory in this vector is sector aligned.
2577 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
)
2580 size_t alignment
= bdrv_min_mem_align(bs
);
2582 for (i
= 0; i
< qiov
->niov
; i
++) {
2583 if ((uintptr_t) qiov
->iov
[i
].iov_base
% alignment
) {
2586 if (qiov
->iov
[i
].iov_len
% alignment
) {
2594 void bdrv_add_before_write_notifier(BlockDriverState
*bs
,
2595 NotifierWithReturn
*notifier
)
2597 notifier_with_return_list_add(&bs
->before_write_notifiers
, notifier
);
2600 void bdrv_io_plug(BlockDriverState
*bs
)
2604 QLIST_FOREACH(child
, &bs
->children
, next
) {
2605 bdrv_io_plug(child
->bs
);
2608 if (bs
->io_plugged
++ == 0 && bs
->io_plug_disabled
== 0) {
2609 BlockDriver
*drv
= bs
->drv
;
2610 if (drv
&& drv
->bdrv_io_plug
) {
2611 drv
->bdrv_io_plug(bs
);
2616 void bdrv_io_unplug(BlockDriverState
*bs
)
2620 assert(bs
->io_plugged
);
2621 if (--bs
->io_plugged
== 0 && bs
->io_plug_disabled
== 0) {
2622 BlockDriver
*drv
= bs
->drv
;
2623 if (drv
&& drv
->bdrv_io_unplug
) {
2624 drv
->bdrv_io_unplug(bs
);
2628 QLIST_FOREACH(child
, &bs
->children
, next
) {
2629 bdrv_io_unplug(child
->bs
);
2633 void bdrv_io_unplugged_begin(BlockDriverState
*bs
)
2637 if (bs
->io_plug_disabled
++ == 0 && bs
->io_plugged
> 0) {
2638 BlockDriver
*drv
= bs
->drv
;
2639 if (drv
&& drv
->bdrv_io_unplug
) {
2640 drv
->bdrv_io_unplug(bs
);
2644 QLIST_FOREACH(child
, &bs
->children
, next
) {
2645 bdrv_io_unplugged_begin(child
->bs
);
2649 void bdrv_io_unplugged_end(BlockDriverState
*bs
)
2653 assert(bs
->io_plug_disabled
);
2654 QLIST_FOREACH(child
, &bs
->children
, next
) {
2655 bdrv_io_unplugged_end(child
->bs
);
2658 if (--bs
->io_plug_disabled
== 0 && bs
->io_plugged
> 0) {
2659 BlockDriver
*drv
= bs
->drv
;
2660 if (drv
&& drv
->bdrv_io_plug
) {
2661 drv
->bdrv_io_plug(bs
);