2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "sysemu/block-backend.h"
28 #include "block/blockjob.h"
29 #include "block/block_int.h"
30 #include "qemu/cutils.h"
31 #include "qapi/error.h"
32 #include "qemu/error-report.h"
34 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
36 static BlockAIOCB
*bdrv_co_aio_prw_vector(BdrvChild
*child
,
39 BdrvRequestFlags flags
,
40 BlockCompletionFunc
*cb
,
43 static void coroutine_fn
bdrv_co_do_rw(void *opaque
);
44 static int coroutine_fn
bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
,
45 int64_t offset
, int count
, BdrvRequestFlags flags
);
47 static void bdrv_parent_drained_begin(BlockDriverState
*bs
)
51 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
52 if (c
->role
->drained_begin
) {
53 c
->role
->drained_begin(c
);
58 static void bdrv_parent_drained_end(BlockDriverState
*bs
)
62 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
63 if (c
->role
->drained_end
) {
64 c
->role
->drained_end(c
);
69 static void bdrv_merge_limits(BlockLimits
*dst
, const BlockLimits
*src
)
71 dst
->opt_transfer
= MAX(dst
->opt_transfer
, src
->opt_transfer
);
72 dst
->max_transfer
= MIN_NON_ZERO(dst
->max_transfer
, src
->max_transfer
);
73 dst
->opt_mem_alignment
= MAX(dst
->opt_mem_alignment
,
74 src
->opt_mem_alignment
);
75 dst
->min_mem_alignment
= MAX(dst
->min_mem_alignment
,
76 src
->min_mem_alignment
);
77 dst
->max_iov
= MIN_NON_ZERO(dst
->max_iov
, src
->max_iov
);
80 void bdrv_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
82 BlockDriver
*drv
= bs
->drv
;
83 Error
*local_err
= NULL
;
85 memset(&bs
->bl
, 0, sizeof(bs
->bl
));
91 /* Default alignment based on whether driver has byte interface */
92 bs
->bl
.request_alignment
= drv
->bdrv_co_preadv
? 1 : 512;
94 /* Take some limits from the children as a default */
96 bdrv_refresh_limits(bs
->file
->bs
, &local_err
);
98 error_propagate(errp
, local_err
);
101 bdrv_merge_limits(&bs
->bl
, &bs
->file
->bs
->bl
);
103 bs
->bl
.min_mem_alignment
= 512;
104 bs
->bl
.opt_mem_alignment
= getpagesize();
106 /* Safe default since most protocols use readv()/writev()/etc */
107 bs
->bl
.max_iov
= IOV_MAX
;
111 bdrv_refresh_limits(bs
->backing
->bs
, &local_err
);
113 error_propagate(errp
, local_err
);
116 bdrv_merge_limits(&bs
->bl
, &bs
->backing
->bs
->bl
);
119 /* Then let the driver override it */
120 if (drv
->bdrv_refresh_limits
) {
121 drv
->bdrv_refresh_limits(bs
, errp
);
126 * The copy-on-read flag is actually a reference count so multiple users may
127 * use the feature without worrying about clobbering its previous state.
128 * Copy-on-read stays enabled until all users have called to disable it.
130 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
135 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
137 assert(bs
->copy_on_read
> 0);
141 /* Check if any requests are in-flight (including throttled requests) */
142 bool bdrv_requests_pending(BlockDriverState
*bs
)
146 if (atomic_read(&bs
->in_flight
)) {
150 QLIST_FOREACH(child
, &bs
->children
, next
) {
151 if (bdrv_requests_pending(child
->bs
)) {
159 static bool bdrv_drain_recurse(BlockDriverState
*bs
)
164 waited
= BDRV_POLL_WHILE(bs
, atomic_read(&bs
->in_flight
) > 0);
166 if (bs
->drv
&& bs
->drv
->bdrv_drain
) {
167 bs
->drv
->bdrv_drain(bs
);
170 QLIST_FOREACH(child
, &bs
->children
, next
) {
171 waited
|= bdrv_drain_recurse(child
->bs
);
179 BlockDriverState
*bs
;
183 static void bdrv_co_drain_bh_cb(void *opaque
)
185 BdrvCoDrainData
*data
= opaque
;
186 Coroutine
*co
= data
->co
;
187 BlockDriverState
*bs
= data
->bs
;
189 bdrv_dec_in_flight(bs
);
190 bdrv_drained_begin(bs
);
192 qemu_coroutine_enter(co
);
195 static void coroutine_fn
bdrv_co_yield_to_drain(BlockDriverState
*bs
)
197 BdrvCoDrainData data
;
199 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
200 * other coroutines run if they were queued from
201 * qemu_co_queue_run_restart(). */
203 assert(qemu_in_coroutine());
204 data
= (BdrvCoDrainData
) {
205 .co
= qemu_coroutine_self(),
209 bdrv_inc_in_flight(bs
);
210 aio_bh_schedule_oneshot(bdrv_get_aio_context(bs
),
211 bdrv_co_drain_bh_cb
, &data
);
213 qemu_coroutine_yield();
214 /* If we are resumed from some other event (such as an aio completion or a
215 * timer callback), it is a bug in the caller that should be fixed. */
219 void bdrv_drained_begin(BlockDriverState
*bs
)
221 if (qemu_in_coroutine()) {
222 bdrv_co_yield_to_drain(bs
);
226 if (!bs
->quiesce_counter
++) {
227 aio_disable_external(bdrv_get_aio_context(bs
));
228 bdrv_parent_drained_begin(bs
);
231 bdrv_io_unplugged_begin(bs
);
232 bdrv_drain_recurse(bs
);
233 bdrv_io_unplugged_end(bs
);
236 void bdrv_drained_end(BlockDriverState
*bs
)
238 assert(bs
->quiesce_counter
> 0);
239 if (--bs
->quiesce_counter
> 0) {
243 bdrv_parent_drained_end(bs
);
244 aio_enable_external(bdrv_get_aio_context(bs
));
248 * Wait for pending requests to complete on a single BlockDriverState subtree,
249 * and suspend block driver's internal I/O until next request arrives.
251 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
254 * Only this BlockDriverState's AioContext is run, so in-flight requests must
255 * not depend on events in other AioContexts. In that case, use
256 * bdrv_drain_all() instead.
258 void coroutine_fn
bdrv_co_drain(BlockDriverState
*bs
)
260 assert(qemu_in_coroutine());
261 bdrv_drained_begin(bs
);
262 bdrv_drained_end(bs
);
265 void bdrv_drain(BlockDriverState
*bs
)
267 bdrv_drained_begin(bs
);
268 bdrv_drained_end(bs
);
272 * Wait for pending requests to complete across all BlockDriverStates
274 * This function does not flush data to disk, use bdrv_flush_all() for that
275 * after calling this function.
277 * This pauses all block jobs and disables external clients. It must
278 * be paired with bdrv_drain_all_end().
280 * NOTE: no new block jobs or BlockDriverStates can be created between
281 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
283 void bdrv_drain_all_begin(void)
285 /* Always run first iteration so any pending completion BHs run */
287 BlockDriverState
*bs
;
289 BlockJob
*job
= NULL
;
290 GSList
*aio_ctxs
= NULL
, *ctx
;
292 while ((job
= block_job_next(job
))) {
293 AioContext
*aio_context
= blk_get_aio_context(job
->blk
);
295 aio_context_acquire(aio_context
);
296 block_job_pause(job
);
297 aio_context_release(aio_context
);
300 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
301 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
303 aio_context_acquire(aio_context
);
304 bdrv_parent_drained_begin(bs
);
305 bdrv_io_unplugged_begin(bs
);
306 aio_disable_external(aio_context
);
307 aio_context_release(aio_context
);
309 if (!g_slist_find(aio_ctxs
, aio_context
)) {
310 aio_ctxs
= g_slist_prepend(aio_ctxs
, aio_context
);
314 /* Note that completion of an asynchronous I/O operation can trigger any
315 * number of other I/O operations on other devices---for example a
316 * coroutine can submit an I/O request to another device in response to
317 * request completion. Therefore we must keep looping until there was no
318 * more activity rather than simply draining each device independently.
323 for (ctx
= aio_ctxs
; ctx
!= NULL
; ctx
= ctx
->next
) {
324 AioContext
*aio_context
= ctx
->data
;
326 aio_context_acquire(aio_context
);
327 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
328 if (aio_context
== bdrv_get_aio_context(bs
)) {
329 waited
|= bdrv_drain_recurse(bs
);
332 aio_context_release(aio_context
);
336 g_slist_free(aio_ctxs
);
339 void bdrv_drain_all_end(void)
341 BlockDriverState
*bs
;
343 BlockJob
*job
= NULL
;
345 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
346 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
348 aio_context_acquire(aio_context
);
349 aio_enable_external(aio_context
);
350 bdrv_io_unplugged_end(bs
);
351 bdrv_parent_drained_end(bs
);
352 aio_context_release(aio_context
);
355 while ((job
= block_job_next(job
))) {
356 AioContext
*aio_context
= blk_get_aio_context(job
->blk
);
358 aio_context_acquire(aio_context
);
359 block_job_resume(job
);
360 aio_context_release(aio_context
);
364 void bdrv_drain_all(void)
366 bdrv_drain_all_begin();
367 bdrv_drain_all_end();
371 * Remove an active request from the tracked requests list
373 * This function should be called when a tracked request is completing.
375 static void tracked_request_end(BdrvTrackedRequest
*req
)
377 if (req
->serialising
) {
378 req
->bs
->serialising_in_flight
--;
381 QLIST_REMOVE(req
, list
);
382 qemu_co_queue_restart_all(&req
->wait_queue
);
386 * Add an active request to the tracked requests list
388 static void tracked_request_begin(BdrvTrackedRequest
*req
,
389 BlockDriverState
*bs
,
392 enum BdrvTrackedRequestType type
)
394 *req
= (BdrvTrackedRequest
){
399 .co
= qemu_coroutine_self(),
400 .serialising
= false,
401 .overlap_offset
= offset
,
402 .overlap_bytes
= bytes
,
405 qemu_co_queue_init(&req
->wait_queue
);
407 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
410 static void mark_request_serialising(BdrvTrackedRequest
*req
, uint64_t align
)
412 int64_t overlap_offset
= req
->offset
& ~(align
- 1);
413 unsigned int overlap_bytes
= ROUND_UP(req
->offset
+ req
->bytes
, align
)
416 if (!req
->serialising
) {
417 req
->bs
->serialising_in_flight
++;
418 req
->serialising
= true;
421 req
->overlap_offset
= MIN(req
->overlap_offset
, overlap_offset
);
422 req
->overlap_bytes
= MAX(req
->overlap_bytes
, overlap_bytes
);
426 * Round a region to cluster boundaries (sector-based)
428 void bdrv_round_sectors_to_clusters(BlockDriverState
*bs
,
429 int64_t sector_num
, int nb_sectors
,
430 int64_t *cluster_sector_num
,
431 int *cluster_nb_sectors
)
435 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
436 *cluster_sector_num
= sector_num
;
437 *cluster_nb_sectors
= nb_sectors
;
439 int64_t c
= bdi
.cluster_size
/ BDRV_SECTOR_SIZE
;
440 *cluster_sector_num
= QEMU_ALIGN_DOWN(sector_num
, c
);
441 *cluster_nb_sectors
= QEMU_ALIGN_UP(sector_num
- *cluster_sector_num
+
447 * Round a region to cluster boundaries
449 void bdrv_round_to_clusters(BlockDriverState
*bs
,
450 int64_t offset
, unsigned int bytes
,
451 int64_t *cluster_offset
,
452 unsigned int *cluster_bytes
)
456 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
457 *cluster_offset
= offset
;
458 *cluster_bytes
= bytes
;
460 int64_t c
= bdi
.cluster_size
;
461 *cluster_offset
= QEMU_ALIGN_DOWN(offset
, c
);
462 *cluster_bytes
= QEMU_ALIGN_UP(offset
- *cluster_offset
+ bytes
, c
);
466 static int bdrv_get_cluster_size(BlockDriverState
*bs
)
471 ret
= bdrv_get_info(bs
, &bdi
);
472 if (ret
< 0 || bdi
.cluster_size
== 0) {
473 return bs
->bl
.request_alignment
;
475 return bdi
.cluster_size
;
479 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
480 int64_t offset
, unsigned int bytes
)
483 if (offset
>= req
->overlap_offset
+ req
->overlap_bytes
) {
487 if (req
->overlap_offset
>= offset
+ bytes
) {
493 void bdrv_inc_in_flight(BlockDriverState
*bs
)
495 atomic_inc(&bs
->in_flight
);
498 static void dummy_bh_cb(void *opaque
)
502 void bdrv_wakeup(BlockDriverState
*bs
)
505 aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb
, NULL
);
509 void bdrv_dec_in_flight(BlockDriverState
*bs
)
511 atomic_dec(&bs
->in_flight
);
515 static bool coroutine_fn
wait_serialising_requests(BdrvTrackedRequest
*self
)
517 BlockDriverState
*bs
= self
->bs
;
518 BdrvTrackedRequest
*req
;
522 if (!bs
->serialising_in_flight
) {
528 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
529 if (req
== self
|| (!req
->serialising
&& !self
->serialising
)) {
532 if (tracked_request_overlaps(req
, self
->overlap_offset
,
533 self
->overlap_bytes
))
535 /* Hitting this means there was a reentrant request, for
536 * example, a block driver issuing nested requests. This must
537 * never happen since it means deadlock.
539 assert(qemu_coroutine_self() != req
->co
);
541 /* If the request is already (indirectly) waiting for us, or
542 * will wait for us as soon as it wakes up, then just go on
543 * (instead of producing a deadlock in the former case). */
544 if (!req
->waiting_for
) {
545 self
->waiting_for
= req
;
546 qemu_co_queue_wait(&req
->wait_queue
);
547 self
->waiting_for
= NULL
;
559 static int bdrv_check_byte_request(BlockDriverState
*bs
, int64_t offset
,
562 if (size
> BDRV_REQUEST_MAX_SECTORS
<< BDRV_SECTOR_BITS
) {
566 if (!bdrv_is_inserted(bs
)) {
577 typedef struct RwCo
{
583 BdrvRequestFlags flags
;
586 static void coroutine_fn
bdrv_rw_co_entry(void *opaque
)
590 if (!rwco
->is_write
) {
591 rwco
->ret
= bdrv_co_preadv(rwco
->child
, rwco
->offset
,
592 rwco
->qiov
->size
, rwco
->qiov
,
595 rwco
->ret
= bdrv_co_pwritev(rwco
->child
, rwco
->offset
,
596 rwco
->qiov
->size
, rwco
->qiov
,
602 * Process a vectored synchronous request using coroutines
604 static int bdrv_prwv_co(BdrvChild
*child
, int64_t offset
,
605 QEMUIOVector
*qiov
, bool is_write
,
606 BdrvRequestFlags flags
)
613 .is_write
= is_write
,
618 if (qemu_in_coroutine()) {
619 /* Fast-path if already in coroutine context */
620 bdrv_rw_co_entry(&rwco
);
622 co
= qemu_coroutine_create(bdrv_rw_co_entry
, &rwco
);
623 qemu_coroutine_enter(co
);
624 BDRV_POLL_WHILE(child
->bs
, rwco
.ret
== NOT_DONE
);
630 * Process a synchronous request using coroutines
632 static int bdrv_rw_co(BdrvChild
*child
, int64_t sector_num
, uint8_t *buf
,
633 int nb_sectors
, bool is_write
, BdrvRequestFlags flags
)
637 .iov_base
= (void *)buf
,
638 .iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
,
641 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
645 qemu_iovec_init_external(&qiov
, &iov
, 1);
646 return bdrv_prwv_co(child
, sector_num
<< BDRV_SECTOR_BITS
,
647 &qiov
, is_write
, flags
);
650 /* return < 0 if error. See bdrv_write() for the return codes */
651 int bdrv_read(BdrvChild
*child
, int64_t sector_num
,
652 uint8_t *buf
, int nb_sectors
)
654 return bdrv_rw_co(child
, sector_num
, buf
, nb_sectors
, false, 0);
657 /* Return < 0 if error. Important errors are:
658 -EIO generic I/O error (may happen for all errors)
659 -ENOMEDIUM No media inserted.
660 -EINVAL Invalid sector number or nb_sectors
661 -EACCES Trying to write a read-only device
663 int bdrv_write(BdrvChild
*child
, int64_t sector_num
,
664 const uint8_t *buf
, int nb_sectors
)
666 return bdrv_rw_co(child
, sector_num
, (uint8_t *)buf
, nb_sectors
, true, 0);
669 int bdrv_pwrite_zeroes(BdrvChild
*child
, int64_t offset
,
670 int count
, BdrvRequestFlags flags
)
678 qemu_iovec_init_external(&qiov
, &iov
, 1);
679 return bdrv_prwv_co(child
, offset
, &qiov
, true,
680 BDRV_REQ_ZERO_WRITE
| flags
);
684 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
685 * The operation is sped up by checking the block status and only writing
686 * zeroes to the device if they currently do not return zeroes. Optional
687 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
690 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
692 int bdrv_make_zero(BdrvChild
*child
, BdrvRequestFlags flags
)
694 int64_t target_sectors
, ret
, nb_sectors
, sector_num
= 0;
695 BlockDriverState
*bs
= child
->bs
;
696 BlockDriverState
*file
;
699 target_sectors
= bdrv_nb_sectors(bs
);
700 if (target_sectors
< 0) {
701 return target_sectors
;
705 nb_sectors
= MIN(target_sectors
- sector_num
, BDRV_REQUEST_MAX_SECTORS
);
706 if (nb_sectors
<= 0) {
709 ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, &n
, &file
);
711 error_report("error getting block status at sector %" PRId64
": %s",
712 sector_num
, strerror(-ret
));
715 if (ret
& BDRV_BLOCK_ZERO
) {
719 ret
= bdrv_pwrite_zeroes(child
, sector_num
<< BDRV_SECTOR_BITS
,
720 n
<< BDRV_SECTOR_BITS
, flags
);
722 error_report("error writing zeroes at sector %" PRId64
": %s",
723 sector_num
, strerror(-ret
));
730 int bdrv_preadv(BdrvChild
*child
, int64_t offset
, QEMUIOVector
*qiov
)
734 ret
= bdrv_prwv_co(child
, offset
, qiov
, false, 0);
742 int bdrv_pread(BdrvChild
*child
, int64_t offset
, void *buf
, int bytes
)
746 .iov_base
= (void *)buf
,
754 qemu_iovec_init_external(&qiov
, &iov
, 1);
755 return bdrv_preadv(child
, offset
, &qiov
);
758 int bdrv_pwritev(BdrvChild
*child
, int64_t offset
, QEMUIOVector
*qiov
)
762 ret
= bdrv_prwv_co(child
, offset
, qiov
, true, 0);
770 int bdrv_pwrite(BdrvChild
*child
, int64_t offset
, const void *buf
, int bytes
)
774 .iov_base
= (void *) buf
,
782 qemu_iovec_init_external(&qiov
, &iov
, 1);
783 return bdrv_pwritev(child
, offset
, &qiov
);
787 * Writes to the file and ensures that no writes are reordered across this
788 * request (acts as a barrier)
790 * Returns 0 on success, -errno in error cases.
792 int bdrv_pwrite_sync(BdrvChild
*child
, int64_t offset
,
793 const void *buf
, int count
)
797 ret
= bdrv_pwrite(child
, offset
, buf
, count
);
802 ret
= bdrv_flush(child
->bs
);
810 typedef struct CoroutineIOCompletion
{
811 Coroutine
*coroutine
;
813 } CoroutineIOCompletion
;
815 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
817 CoroutineIOCompletion
*co
= opaque
;
820 qemu_coroutine_enter(co
->coroutine
);
823 static int coroutine_fn
bdrv_driver_preadv(BlockDriverState
*bs
,
824 uint64_t offset
, uint64_t bytes
,
825 QEMUIOVector
*qiov
, int flags
)
827 BlockDriver
*drv
= bs
->drv
;
829 unsigned int nb_sectors
;
831 assert(!(flags
& ~BDRV_REQ_MASK
));
833 if (drv
->bdrv_co_preadv
) {
834 return drv
->bdrv_co_preadv(bs
, offset
, bytes
, qiov
, flags
);
837 sector_num
= offset
>> BDRV_SECTOR_BITS
;
838 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
840 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
841 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
842 assert((bytes
>> BDRV_SECTOR_BITS
) <= BDRV_REQUEST_MAX_SECTORS
);
844 if (drv
->bdrv_co_readv
) {
845 return drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
848 CoroutineIOCompletion co
= {
849 .coroutine
= qemu_coroutine_self(),
852 acb
= bs
->drv
->bdrv_aio_readv(bs
, sector_num
, qiov
, nb_sectors
,
853 bdrv_co_io_em_complete
, &co
);
857 qemu_coroutine_yield();
863 static int coroutine_fn
bdrv_driver_pwritev(BlockDriverState
*bs
,
864 uint64_t offset
, uint64_t bytes
,
865 QEMUIOVector
*qiov
, int flags
)
867 BlockDriver
*drv
= bs
->drv
;
869 unsigned int nb_sectors
;
872 assert(!(flags
& ~BDRV_REQ_MASK
));
874 if (drv
->bdrv_co_pwritev
) {
875 ret
= drv
->bdrv_co_pwritev(bs
, offset
, bytes
, qiov
,
876 flags
& bs
->supported_write_flags
);
877 flags
&= ~bs
->supported_write_flags
;
881 sector_num
= offset
>> BDRV_SECTOR_BITS
;
882 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
884 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
885 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
886 assert((bytes
>> BDRV_SECTOR_BITS
) <= BDRV_REQUEST_MAX_SECTORS
);
888 if (drv
->bdrv_co_writev_flags
) {
889 ret
= drv
->bdrv_co_writev_flags(bs
, sector_num
, nb_sectors
, qiov
,
890 flags
& bs
->supported_write_flags
);
891 flags
&= ~bs
->supported_write_flags
;
892 } else if (drv
->bdrv_co_writev
) {
893 assert(!bs
->supported_write_flags
);
894 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
);
897 CoroutineIOCompletion co
= {
898 .coroutine
= qemu_coroutine_self(),
901 acb
= bs
->drv
->bdrv_aio_writev(bs
, sector_num
, qiov
, nb_sectors
,
902 bdrv_co_io_em_complete
, &co
);
906 qemu_coroutine_yield();
912 if (ret
== 0 && (flags
& BDRV_REQ_FUA
)) {
913 ret
= bdrv_co_flush(bs
);
919 static int coroutine_fn
920 bdrv_driver_pwritev_compressed(BlockDriverState
*bs
, uint64_t offset
,
921 uint64_t bytes
, QEMUIOVector
*qiov
)
923 BlockDriver
*drv
= bs
->drv
;
925 if (!drv
->bdrv_co_pwritev_compressed
) {
929 return drv
->bdrv_co_pwritev_compressed(bs
, offset
, bytes
, qiov
);
932 static int coroutine_fn
bdrv_co_do_copy_on_readv(BlockDriverState
*bs
,
933 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
)
935 /* Perform I/O through a temporary buffer so that users who scribble over
936 * their read buffer while the operation is in progress do not end up
937 * modifying the image file. This is critical for zero-copy guest I/O
938 * where anything might happen inside guest memory.
942 BlockDriver
*drv
= bs
->drv
;
944 QEMUIOVector bounce_qiov
;
945 int64_t cluster_offset
;
946 unsigned int cluster_bytes
;
950 /* Cover entire cluster so no additional backing file I/O is required when
951 * allocating cluster in the image file.
953 bdrv_round_to_clusters(bs
, offset
, bytes
, &cluster_offset
, &cluster_bytes
);
955 trace_bdrv_co_do_copy_on_readv(bs
, offset
, bytes
,
956 cluster_offset
, cluster_bytes
);
958 iov
.iov_len
= cluster_bytes
;
959 iov
.iov_base
= bounce_buffer
= qemu_try_blockalign(bs
, iov
.iov_len
);
960 if (bounce_buffer
== NULL
) {
965 qemu_iovec_init_external(&bounce_qiov
, &iov
, 1);
967 ret
= bdrv_driver_preadv(bs
, cluster_offset
, cluster_bytes
,
973 if (drv
->bdrv_co_pwrite_zeroes
&&
974 buffer_is_zero(bounce_buffer
, iov
.iov_len
)) {
975 /* FIXME: Should we (perhaps conditionally) be setting
976 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
977 * that still correctly reads as zero? */
978 ret
= bdrv_co_do_pwrite_zeroes(bs
, cluster_offset
, cluster_bytes
, 0);
980 /* This does not change the data on the disk, it is not necessary
981 * to flush even in cache=writethrough mode.
983 ret
= bdrv_driver_pwritev(bs
, cluster_offset
, cluster_bytes
,
988 /* It might be okay to ignore write errors for guest requests. If this
989 * is a deliberate copy-on-read then we don't want to ignore the error.
990 * Simply report it in all cases.
995 skip_bytes
= offset
- cluster_offset
;
996 qemu_iovec_from_buf(qiov
, 0, bounce_buffer
+ skip_bytes
, bytes
);
999 qemu_vfree(bounce_buffer
);
1004 * Forwards an already correctly aligned request to the BlockDriver. This
1005 * handles copy on read, zeroing after EOF, and fragmentation of large
1006 * reads; any other features must be implemented by the caller.
1008 static int coroutine_fn
bdrv_aligned_preadv(BlockDriverState
*bs
,
1009 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
1010 int64_t align
, QEMUIOVector
*qiov
, int flags
)
1012 int64_t total_bytes
, max_bytes
;
1014 uint64_t bytes_remaining
= bytes
;
1017 assert(is_power_of_2(align
));
1018 assert((offset
& (align
- 1)) == 0);
1019 assert((bytes
& (align
- 1)) == 0);
1020 assert(!qiov
|| bytes
== qiov
->size
);
1021 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1022 max_transfer
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_transfer
, INT_MAX
),
1025 /* TODO: We would need a per-BDS .supported_read_flags and
1026 * potential fallback support, if we ever implement any read flags
1027 * to pass through to drivers. For now, there aren't any
1028 * passthrough flags. */
1029 assert(!(flags
& ~(BDRV_REQ_NO_SERIALISING
| BDRV_REQ_COPY_ON_READ
)));
1031 /* Handle Copy on Read and associated serialisation */
1032 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1033 /* If we touch the same cluster it counts as an overlap. This
1034 * guarantees that allocating writes will be serialized and not race
1035 * with each other for the same cluster. For example, in copy-on-read
1036 * it ensures that the CoR read and write operations are atomic and
1037 * guest writes cannot interleave between them. */
1038 mark_request_serialising(req
, bdrv_get_cluster_size(bs
));
1041 if (!(flags
& BDRV_REQ_NO_SERIALISING
)) {
1042 wait_serialising_requests(req
);
1045 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1046 int64_t start_sector
= offset
>> BDRV_SECTOR_BITS
;
1047 int64_t end_sector
= DIV_ROUND_UP(offset
+ bytes
, BDRV_SECTOR_SIZE
);
1048 unsigned int nb_sectors
= end_sector
- start_sector
;
1051 ret
= bdrv_is_allocated(bs
, start_sector
, nb_sectors
, &pnum
);
1056 if (!ret
|| pnum
!= nb_sectors
) {
1057 ret
= bdrv_co_do_copy_on_readv(bs
, offset
, bytes
, qiov
);
1062 /* Forward the request to the BlockDriver, possibly fragmenting it */
1063 total_bytes
= bdrv_getlength(bs
);
1064 if (total_bytes
< 0) {
1069 max_bytes
= ROUND_UP(MAX(0, total_bytes
- offset
), align
);
1070 if (bytes
<= max_bytes
&& bytes
<= max_transfer
) {
1071 ret
= bdrv_driver_preadv(bs
, offset
, bytes
, qiov
, 0);
1075 while (bytes_remaining
) {
1079 QEMUIOVector local_qiov
;
1081 num
= MIN(bytes_remaining
, MIN(max_bytes
, max_transfer
));
1083 qemu_iovec_init(&local_qiov
, qiov
->niov
);
1084 qemu_iovec_concat(&local_qiov
, qiov
, bytes
- bytes_remaining
, num
);
1086 ret
= bdrv_driver_preadv(bs
, offset
+ bytes
- bytes_remaining
,
1087 num
, &local_qiov
, 0);
1089 qemu_iovec_destroy(&local_qiov
);
1091 num
= bytes_remaining
;
1092 ret
= qemu_iovec_memset(qiov
, bytes
- bytes_remaining
, 0,
1098 bytes_remaining
-= num
;
1102 return ret
< 0 ? ret
: 0;
1106 * Handle a read request in coroutine context
1108 int coroutine_fn
bdrv_co_preadv(BdrvChild
*child
,
1109 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1110 BdrvRequestFlags flags
)
1112 BlockDriverState
*bs
= child
->bs
;
1113 BlockDriver
*drv
= bs
->drv
;
1114 BdrvTrackedRequest req
;
1116 uint64_t align
= bs
->bl
.request_alignment
;
1117 uint8_t *head_buf
= NULL
;
1118 uint8_t *tail_buf
= NULL
;
1119 QEMUIOVector local_qiov
;
1120 bool use_local_qiov
= false;
1127 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
1132 bdrv_inc_in_flight(bs
);
1134 /* Don't do copy-on-read if we read data before write operation */
1135 if (bs
->copy_on_read
&& !(flags
& BDRV_REQ_NO_SERIALISING
)) {
1136 flags
|= BDRV_REQ_COPY_ON_READ
;
1139 /* Align read if necessary by padding qiov */
1140 if (offset
& (align
- 1)) {
1141 head_buf
= qemu_blockalign(bs
, align
);
1142 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
1143 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
1144 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1145 use_local_qiov
= true;
1147 bytes
+= offset
& (align
- 1);
1148 offset
= offset
& ~(align
- 1);
1151 if ((offset
+ bytes
) & (align
- 1)) {
1152 if (!use_local_qiov
) {
1153 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
1154 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1155 use_local_qiov
= true;
1157 tail_buf
= qemu_blockalign(bs
, align
);
1158 qemu_iovec_add(&local_qiov
, tail_buf
,
1159 align
- ((offset
+ bytes
) & (align
- 1)));
1161 bytes
= ROUND_UP(bytes
, align
);
1164 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_READ
);
1165 ret
= bdrv_aligned_preadv(bs
, &req
, offset
, bytes
, align
,
1166 use_local_qiov
? &local_qiov
: qiov
,
1168 tracked_request_end(&req
);
1169 bdrv_dec_in_flight(bs
);
1171 if (use_local_qiov
) {
1172 qemu_iovec_destroy(&local_qiov
);
1173 qemu_vfree(head_buf
);
1174 qemu_vfree(tail_buf
);
1180 static int coroutine_fn
bdrv_co_do_readv(BdrvChild
*child
,
1181 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1182 BdrvRequestFlags flags
)
1184 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
1188 return bdrv_co_preadv(child
, sector_num
<< BDRV_SECTOR_BITS
,
1189 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
1192 int coroutine_fn
bdrv_co_readv(BdrvChild
*child
, int64_t sector_num
,
1193 int nb_sectors
, QEMUIOVector
*qiov
)
1195 trace_bdrv_co_readv(child
->bs
, sector_num
, nb_sectors
);
1197 return bdrv_co_do_readv(child
, sector_num
, nb_sectors
, qiov
, 0);
1200 /* Maximum buffer for write zeroes fallback, in bytes */
1201 #define MAX_WRITE_ZEROES_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
1203 static int coroutine_fn
bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
,
1204 int64_t offset
, int count
, BdrvRequestFlags flags
)
1206 BlockDriver
*drv
= bs
->drv
;
1208 struct iovec iov
= {0};
1210 bool need_flush
= false;
1214 int max_write_zeroes
= MIN_NON_ZERO(bs
->bl
.max_pwrite_zeroes
, INT_MAX
);
1215 int alignment
= MAX(bs
->bl
.pwrite_zeroes_alignment
,
1216 bs
->bl
.request_alignment
);
1217 int max_transfer
= MIN_NON_ZERO(bs
->bl
.max_transfer
,
1218 MAX_WRITE_ZEROES_BOUNCE_BUFFER
);
1220 assert(alignment
% bs
->bl
.request_alignment
== 0);
1221 head
= offset
% alignment
;
1222 tail
= (offset
+ count
) % alignment
;
1223 max_write_zeroes
= QEMU_ALIGN_DOWN(max_write_zeroes
, alignment
);
1224 assert(max_write_zeroes
>= bs
->bl
.request_alignment
);
1226 while (count
> 0 && !ret
) {
1229 /* Align request. Block drivers can expect the "bulk" of the request
1230 * to be aligned, and that unaligned requests do not cross cluster
1234 /* Make a small request up to the first aligned sector. For
1235 * convenience, limit this request to max_transfer even if
1236 * we don't need to fall back to writes. */
1237 num
= MIN(MIN(count
, max_transfer
), alignment
- head
);
1238 head
= (head
+ num
) % alignment
;
1239 assert(num
< max_write_zeroes
);
1240 } else if (tail
&& num
> alignment
) {
1241 /* Shorten the request to the last aligned sector. */
1245 /* limit request size */
1246 if (num
> max_write_zeroes
) {
1247 num
= max_write_zeroes
;
1251 /* First try the efficient write zeroes operation */
1252 if (drv
->bdrv_co_pwrite_zeroes
) {
1253 ret
= drv
->bdrv_co_pwrite_zeroes(bs
, offset
, num
,
1254 flags
& bs
->supported_zero_flags
);
1255 if (ret
!= -ENOTSUP
&& (flags
& BDRV_REQ_FUA
) &&
1256 !(bs
->supported_zero_flags
& BDRV_REQ_FUA
)) {
1260 assert(!bs
->supported_zero_flags
);
1263 if (ret
== -ENOTSUP
) {
1264 /* Fall back to bounce buffer if write zeroes is unsupported */
1265 BdrvRequestFlags write_flags
= flags
& ~BDRV_REQ_ZERO_WRITE
;
1267 if ((flags
& BDRV_REQ_FUA
) &&
1268 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1269 /* No need for bdrv_driver_pwrite() to do a fallback
1270 * flush on each chunk; use just one at the end */
1271 write_flags
&= ~BDRV_REQ_FUA
;
1274 num
= MIN(num
, max_transfer
);
1276 if (iov
.iov_base
== NULL
) {
1277 iov
.iov_base
= qemu_try_blockalign(bs
, num
);
1278 if (iov
.iov_base
== NULL
) {
1282 memset(iov
.iov_base
, 0, num
);
1284 qemu_iovec_init_external(&qiov
, &iov
, 1);
1286 ret
= bdrv_driver_pwritev(bs
, offset
, num
, &qiov
, write_flags
);
1288 /* Keep bounce buffer around if it is big enough for all
1289 * all future requests.
1291 if (num
< max_transfer
) {
1292 qemu_vfree(iov
.iov_base
);
1293 iov
.iov_base
= NULL
;
1302 if (ret
== 0 && need_flush
) {
1303 ret
= bdrv_co_flush(bs
);
1305 qemu_vfree(iov
.iov_base
);
1310 * Forwards an already correctly aligned write request to the BlockDriver,
1311 * after possibly fragmenting it.
1313 static int coroutine_fn
bdrv_aligned_pwritev(BlockDriverState
*bs
,
1314 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
1315 int64_t align
, QEMUIOVector
*qiov
, int flags
)
1317 BlockDriver
*drv
= bs
->drv
;
1321 int64_t start_sector
= offset
>> BDRV_SECTOR_BITS
;
1322 int64_t end_sector
= DIV_ROUND_UP(offset
+ bytes
, BDRV_SECTOR_SIZE
);
1323 uint64_t bytes_remaining
= bytes
;
1326 assert(is_power_of_2(align
));
1327 assert((offset
& (align
- 1)) == 0);
1328 assert((bytes
& (align
- 1)) == 0);
1329 assert(!qiov
|| bytes
== qiov
->size
);
1330 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1331 assert(!(flags
& ~BDRV_REQ_MASK
));
1332 max_transfer
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_transfer
, INT_MAX
),
1335 waited
= wait_serialising_requests(req
);
1336 assert(!waited
|| !req
->serialising
);
1337 assert(req
->overlap_offset
<= offset
);
1338 assert(offset
+ bytes
<= req
->overlap_offset
+ req
->overlap_bytes
);
1340 ret
= notifier_with_return_list_notify(&bs
->before_write_notifiers
, req
);
1342 if (!ret
&& bs
->detect_zeroes
!= BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF
&&
1343 !(flags
& BDRV_REQ_ZERO_WRITE
) && drv
->bdrv_co_pwrite_zeroes
&&
1344 qemu_iovec_is_zero(qiov
)) {
1345 flags
|= BDRV_REQ_ZERO_WRITE
;
1346 if (bs
->detect_zeroes
== BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP
) {
1347 flags
|= BDRV_REQ_MAY_UNMAP
;
1352 /* Do nothing, write notifier decided to fail this request */
1353 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
1354 bdrv_debug_event(bs
, BLKDBG_PWRITEV_ZERO
);
1355 ret
= bdrv_co_do_pwrite_zeroes(bs
, offset
, bytes
, flags
);
1356 } else if (flags
& BDRV_REQ_WRITE_COMPRESSED
) {
1357 ret
= bdrv_driver_pwritev_compressed(bs
, offset
, bytes
, qiov
);
1358 } else if (bytes
<= max_transfer
) {
1359 bdrv_debug_event(bs
, BLKDBG_PWRITEV
);
1360 ret
= bdrv_driver_pwritev(bs
, offset
, bytes
, qiov
, flags
);
1362 bdrv_debug_event(bs
, BLKDBG_PWRITEV
);
1363 while (bytes_remaining
) {
1364 int num
= MIN(bytes_remaining
, max_transfer
);
1365 QEMUIOVector local_qiov
;
1366 int local_flags
= flags
;
1369 if (num
< bytes_remaining
&& (flags
& BDRV_REQ_FUA
) &&
1370 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1371 /* If FUA is going to be emulated by flush, we only
1372 * need to flush on the last iteration */
1373 local_flags
&= ~BDRV_REQ_FUA
;
1375 qemu_iovec_init(&local_qiov
, qiov
->niov
);
1376 qemu_iovec_concat(&local_qiov
, qiov
, bytes
- bytes_remaining
, num
);
1378 ret
= bdrv_driver_pwritev(bs
, offset
+ bytes
- bytes_remaining
,
1379 num
, &local_qiov
, local_flags
);
1380 qemu_iovec_destroy(&local_qiov
);
1384 bytes_remaining
-= num
;
1387 bdrv_debug_event(bs
, BLKDBG_PWRITEV_DONE
);
1390 bdrv_set_dirty(bs
, start_sector
, end_sector
- start_sector
);
1392 if (bs
->wr_highest_offset
< offset
+ bytes
) {
1393 bs
->wr_highest_offset
= offset
+ bytes
;
1397 bs
->total_sectors
= MAX(bs
->total_sectors
, end_sector
);
1404 static int coroutine_fn
bdrv_co_do_zero_pwritev(BlockDriverState
*bs
,
1407 BdrvRequestFlags flags
,
1408 BdrvTrackedRequest
*req
)
1410 uint8_t *buf
= NULL
;
1411 QEMUIOVector local_qiov
;
1413 uint64_t align
= bs
->bl
.request_alignment
;
1414 unsigned int head_padding_bytes
, tail_padding_bytes
;
1417 head_padding_bytes
= offset
& (align
- 1);
1418 tail_padding_bytes
= align
- ((offset
+ bytes
) & (align
- 1));
1421 assert(flags
& BDRV_REQ_ZERO_WRITE
);
1422 if (head_padding_bytes
|| tail_padding_bytes
) {
1423 buf
= qemu_blockalign(bs
, align
);
1424 iov
= (struct iovec
) {
1428 qemu_iovec_init_external(&local_qiov
, &iov
, 1);
1430 if (head_padding_bytes
) {
1431 uint64_t zero_bytes
= MIN(bytes
, align
- head_padding_bytes
);
1433 /* RMW the unaligned part before head. */
1434 mark_request_serialising(req
, align
);
1435 wait_serialising_requests(req
);
1436 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1437 ret
= bdrv_aligned_preadv(bs
, req
, offset
& ~(align
- 1), align
,
1438 align
, &local_qiov
, 0);
1442 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1444 memset(buf
+ head_padding_bytes
, 0, zero_bytes
);
1445 ret
= bdrv_aligned_pwritev(bs
, req
, offset
& ~(align
- 1), align
,
1447 flags
& ~BDRV_REQ_ZERO_WRITE
);
1451 offset
+= zero_bytes
;
1452 bytes
-= zero_bytes
;
1455 assert(!bytes
|| (offset
& (align
- 1)) == 0);
1456 if (bytes
>= align
) {
1457 /* Write the aligned part in the middle. */
1458 uint64_t aligned_bytes
= bytes
& ~(align
- 1);
1459 ret
= bdrv_aligned_pwritev(bs
, req
, offset
, aligned_bytes
, align
,
1464 bytes
-= aligned_bytes
;
1465 offset
+= aligned_bytes
;
1468 assert(!bytes
|| (offset
& (align
- 1)) == 0);
1470 assert(align
== tail_padding_bytes
+ bytes
);
1471 /* RMW the unaligned part after tail. */
1472 mark_request_serialising(req
, align
);
1473 wait_serialising_requests(req
);
1474 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1475 ret
= bdrv_aligned_preadv(bs
, req
, offset
, align
,
1476 align
, &local_qiov
, 0);
1480 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1482 memset(buf
, 0, bytes
);
1483 ret
= bdrv_aligned_pwritev(bs
, req
, offset
, align
, align
,
1484 &local_qiov
, flags
& ~BDRV_REQ_ZERO_WRITE
);
1493 * Handle a write request in coroutine context
1495 int coroutine_fn
bdrv_co_pwritev(BdrvChild
*child
,
1496 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1497 BdrvRequestFlags flags
)
1499 BlockDriverState
*bs
= child
->bs
;
1500 BdrvTrackedRequest req
;
1501 uint64_t align
= bs
->bl
.request_alignment
;
1502 uint8_t *head_buf
= NULL
;
1503 uint8_t *tail_buf
= NULL
;
1504 QEMUIOVector local_qiov
;
1505 bool use_local_qiov
= false;
1511 if (bs
->read_only
) {
1514 assert(!(bs
->open_flags
& BDRV_O_INACTIVE
));
1516 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
1521 bdrv_inc_in_flight(bs
);
1523 * Align write if necessary by performing a read-modify-write cycle.
1524 * Pad qiov with the read parts and be sure to have a tracked request not
1525 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1527 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_WRITE
);
1530 ret
= bdrv_co_do_zero_pwritev(bs
, offset
, bytes
, flags
, &req
);
1534 if (offset
& (align
- 1)) {
1535 QEMUIOVector head_qiov
;
1536 struct iovec head_iov
;
1538 mark_request_serialising(&req
, align
);
1539 wait_serialising_requests(&req
);
1541 head_buf
= qemu_blockalign(bs
, align
);
1542 head_iov
= (struct iovec
) {
1543 .iov_base
= head_buf
,
1546 qemu_iovec_init_external(&head_qiov
, &head_iov
, 1);
1548 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1549 ret
= bdrv_aligned_preadv(bs
, &req
, offset
& ~(align
- 1), align
,
1550 align
, &head_qiov
, 0);
1554 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1556 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
1557 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
1558 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1559 use_local_qiov
= true;
1561 bytes
+= offset
& (align
- 1);
1562 offset
= offset
& ~(align
- 1);
1564 /* We have read the tail already if the request is smaller
1565 * than one aligned block.
1567 if (bytes
< align
) {
1568 qemu_iovec_add(&local_qiov
, head_buf
+ bytes
, align
- bytes
);
1573 if ((offset
+ bytes
) & (align
- 1)) {
1574 QEMUIOVector tail_qiov
;
1575 struct iovec tail_iov
;
1579 mark_request_serialising(&req
, align
);
1580 waited
= wait_serialising_requests(&req
);
1581 assert(!waited
|| !use_local_qiov
);
1583 tail_buf
= qemu_blockalign(bs
, align
);
1584 tail_iov
= (struct iovec
) {
1585 .iov_base
= tail_buf
,
1588 qemu_iovec_init_external(&tail_qiov
, &tail_iov
, 1);
1590 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1591 ret
= bdrv_aligned_preadv(bs
, &req
, (offset
+ bytes
) & ~(align
- 1), align
,
1592 align
, &tail_qiov
, 0);
1596 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1598 if (!use_local_qiov
) {
1599 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
1600 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1601 use_local_qiov
= true;
1604 tail_bytes
= (offset
+ bytes
) & (align
- 1);
1605 qemu_iovec_add(&local_qiov
, tail_buf
+ tail_bytes
, align
- tail_bytes
);
1607 bytes
= ROUND_UP(bytes
, align
);
1610 ret
= bdrv_aligned_pwritev(bs
, &req
, offset
, bytes
, align
,
1611 use_local_qiov
? &local_qiov
: qiov
,
1616 if (use_local_qiov
) {
1617 qemu_iovec_destroy(&local_qiov
);
1619 qemu_vfree(head_buf
);
1620 qemu_vfree(tail_buf
);
1622 tracked_request_end(&req
);
1623 bdrv_dec_in_flight(bs
);
1627 static int coroutine_fn
bdrv_co_do_writev(BdrvChild
*child
,
1628 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1629 BdrvRequestFlags flags
)
1631 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
1635 return bdrv_co_pwritev(child
, sector_num
<< BDRV_SECTOR_BITS
,
1636 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
1639 int coroutine_fn
bdrv_co_writev(BdrvChild
*child
, int64_t sector_num
,
1640 int nb_sectors
, QEMUIOVector
*qiov
)
1642 trace_bdrv_co_writev(child
->bs
, sector_num
, nb_sectors
);
1644 return bdrv_co_do_writev(child
, sector_num
, nb_sectors
, qiov
, 0);
1647 int coroutine_fn
bdrv_co_pwrite_zeroes(BdrvChild
*child
, int64_t offset
,
1648 int count
, BdrvRequestFlags flags
)
1650 trace_bdrv_co_pwrite_zeroes(child
->bs
, offset
, count
, flags
);
1652 if (!(child
->bs
->open_flags
& BDRV_O_UNMAP
)) {
1653 flags
&= ~BDRV_REQ_MAY_UNMAP
;
1656 return bdrv_co_pwritev(child
, offset
, count
, NULL
,
1657 BDRV_REQ_ZERO_WRITE
| flags
);
1661 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
1663 int bdrv_flush_all(void)
1665 BdrvNextIterator it
;
1666 BlockDriverState
*bs
= NULL
;
1669 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
1670 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
1673 aio_context_acquire(aio_context
);
1674 ret
= bdrv_flush(bs
);
1675 if (ret
< 0 && !result
) {
1678 aio_context_release(aio_context
);
1685 typedef struct BdrvCoGetBlockStatusData
{
1686 BlockDriverState
*bs
;
1687 BlockDriverState
*base
;
1688 BlockDriverState
**file
;
1694 } BdrvCoGetBlockStatusData
;
1697 * Returns the allocation status of the specified sectors.
1698 * Drivers not implementing the functionality are assumed to not support
1699 * backing files, hence all their sectors are reported as allocated.
1701 * If 'sector_num' is beyond the end of the disk image the return value is 0
1702 * and 'pnum' is set to 0.
1704 * 'pnum' is set to the number of sectors (including and immediately following
1705 * the specified sector) that are known to be in the same
1706 * allocated/unallocated state.
1708 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
1709 * beyond the end of the disk image it will be clamped.
1711 * If returned value is positive and BDRV_BLOCK_OFFSET_VALID bit is set, 'file'
1712 * points to the BDS which the sector range is allocated in.
1714 static int64_t coroutine_fn
bdrv_co_get_block_status(BlockDriverState
*bs
,
1716 int nb_sectors
, int *pnum
,
1717 BlockDriverState
**file
)
1719 int64_t total_sectors
;
1723 total_sectors
= bdrv_nb_sectors(bs
);
1724 if (total_sectors
< 0) {
1725 return total_sectors
;
1728 if (sector_num
>= total_sectors
) {
1733 n
= total_sectors
- sector_num
;
1734 if (n
< nb_sectors
) {
1738 if (!bs
->drv
->bdrv_co_get_block_status
) {
1740 ret
= BDRV_BLOCK_DATA
| BDRV_BLOCK_ALLOCATED
;
1741 if (bs
->drv
->protocol_name
) {
1742 ret
|= BDRV_BLOCK_OFFSET_VALID
| (sector_num
* BDRV_SECTOR_SIZE
);
1748 bdrv_inc_in_flight(bs
);
1749 ret
= bs
->drv
->bdrv_co_get_block_status(bs
, sector_num
, nb_sectors
, pnum
,
1756 if (ret
& BDRV_BLOCK_RAW
) {
1757 assert(ret
& BDRV_BLOCK_OFFSET_VALID
);
1758 ret
= bdrv_get_block_status(bs
->file
->bs
, ret
>> BDRV_SECTOR_BITS
,
1763 if (ret
& (BDRV_BLOCK_DATA
| BDRV_BLOCK_ZERO
)) {
1764 ret
|= BDRV_BLOCK_ALLOCATED
;
1766 if (bdrv_unallocated_blocks_are_zero(bs
)) {
1767 ret
|= BDRV_BLOCK_ZERO
;
1768 } else if (bs
->backing
) {
1769 BlockDriverState
*bs2
= bs
->backing
->bs
;
1770 int64_t nb_sectors2
= bdrv_nb_sectors(bs2
);
1771 if (nb_sectors2
>= 0 && sector_num
>= nb_sectors2
) {
1772 ret
|= BDRV_BLOCK_ZERO
;
1777 if (*file
&& *file
!= bs
&&
1778 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
1779 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
1780 BlockDriverState
*file2
;
1783 ret2
= bdrv_co_get_block_status(*file
, ret
>> BDRV_SECTOR_BITS
,
1784 *pnum
, &file_pnum
, &file2
);
1786 /* Ignore errors. This is just providing extra information, it
1787 * is useful but not necessary.
1790 /* !file_pnum indicates an offset at or beyond the EOF; it is
1791 * perfectly valid for the format block driver to point to such
1792 * offsets, so catch it and mark everything as zero */
1793 ret
|= BDRV_BLOCK_ZERO
;
1795 /* Limit request to the range reported by the protocol driver */
1797 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
1803 bdrv_dec_in_flight(bs
);
1807 static int64_t coroutine_fn
bdrv_co_get_block_status_above(BlockDriverState
*bs
,
1808 BlockDriverState
*base
,
1812 BlockDriverState
**file
)
1814 BlockDriverState
*p
;
1818 for (p
= bs
; p
!= base
; p
= backing_bs(p
)) {
1819 ret
= bdrv_co_get_block_status(p
, sector_num
, nb_sectors
, pnum
, file
);
1820 if (ret
< 0 || ret
& BDRV_BLOCK_ALLOCATED
) {
1823 /* [sector_num, pnum] unallocated on this layer, which could be only
1824 * the first part of [sector_num, nb_sectors]. */
1825 nb_sectors
= MIN(nb_sectors
, *pnum
);
1830 /* Coroutine wrapper for bdrv_get_block_status_above() */
1831 static void coroutine_fn
bdrv_get_block_status_above_co_entry(void *opaque
)
1833 BdrvCoGetBlockStatusData
*data
= opaque
;
1835 data
->ret
= bdrv_co_get_block_status_above(data
->bs
, data
->base
,
1844 * Synchronous wrapper around bdrv_co_get_block_status_above().
1846 * See bdrv_co_get_block_status_above() for details.
1848 int64_t bdrv_get_block_status_above(BlockDriverState
*bs
,
1849 BlockDriverState
*base
,
1851 int nb_sectors
, int *pnum
,
1852 BlockDriverState
**file
)
1855 BdrvCoGetBlockStatusData data
= {
1859 .sector_num
= sector_num
,
1860 .nb_sectors
= nb_sectors
,
1865 if (qemu_in_coroutine()) {
1866 /* Fast-path if already in coroutine context */
1867 bdrv_get_block_status_above_co_entry(&data
);
1869 co
= qemu_coroutine_create(bdrv_get_block_status_above_co_entry
,
1871 qemu_coroutine_enter(co
);
1872 BDRV_POLL_WHILE(bs
, !data
.done
);
1877 int64_t bdrv_get_block_status(BlockDriverState
*bs
,
1879 int nb_sectors
, int *pnum
,
1880 BlockDriverState
**file
)
1882 return bdrv_get_block_status_above(bs
, backing_bs(bs
),
1883 sector_num
, nb_sectors
, pnum
, file
);
1886 int coroutine_fn
bdrv_is_allocated(BlockDriverState
*bs
, int64_t sector_num
,
1887 int nb_sectors
, int *pnum
)
1889 BlockDriverState
*file
;
1890 int64_t ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, pnum
,
1895 return !!(ret
& BDRV_BLOCK_ALLOCATED
);
1899 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
1901 * Return true if the given sector is allocated in any image between
1902 * BASE and TOP (inclusive). BASE can be NULL to check if the given
1903 * sector is allocated in any image of the chain. Return false otherwise.
1905 * 'pnum' is set to the number of sectors (including and immediately following
1906 * the specified sector) that are known to be in the same
1907 * allocated/unallocated state.
1910 int bdrv_is_allocated_above(BlockDriverState
*top
,
1911 BlockDriverState
*base
,
1913 int nb_sectors
, int *pnum
)
1915 BlockDriverState
*intermediate
;
1916 int ret
, n
= nb_sectors
;
1919 while (intermediate
&& intermediate
!= base
) {
1921 ret
= bdrv_is_allocated(intermediate
, sector_num
, nb_sectors
,
1931 * [sector_num, nb_sectors] is unallocated on top but intermediate
1934 * [sector_num+x, nr_sectors] allocated.
1936 if (n
> pnum_inter
&&
1937 (intermediate
== top
||
1938 sector_num
+ pnum_inter
< intermediate
->total_sectors
)) {
1942 intermediate
= backing_bs(intermediate
);
1949 typedef struct BdrvVmstateCo
{
1950 BlockDriverState
*bs
;
1957 static int coroutine_fn
1958 bdrv_co_rw_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
,
1961 BlockDriver
*drv
= bs
->drv
;
1965 } else if (drv
->bdrv_load_vmstate
) {
1966 return is_read
? drv
->bdrv_load_vmstate(bs
, qiov
, pos
)
1967 : drv
->bdrv_save_vmstate(bs
, qiov
, pos
);
1968 } else if (bs
->file
) {
1969 return bdrv_co_rw_vmstate(bs
->file
->bs
, qiov
, pos
, is_read
);
1975 static void coroutine_fn
bdrv_co_rw_vmstate_entry(void *opaque
)
1977 BdrvVmstateCo
*co
= opaque
;
1978 co
->ret
= bdrv_co_rw_vmstate(co
->bs
, co
->qiov
, co
->pos
, co
->is_read
);
1982 bdrv_rw_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
,
1985 if (qemu_in_coroutine()) {
1986 return bdrv_co_rw_vmstate(bs
, qiov
, pos
, is_read
);
1988 BdrvVmstateCo data
= {
1993 .ret
= -EINPROGRESS
,
1995 Coroutine
*co
= qemu_coroutine_create(bdrv_co_rw_vmstate_entry
, &data
);
1997 qemu_coroutine_enter(co
);
1998 while (data
.ret
== -EINPROGRESS
) {
1999 aio_poll(bdrv_get_aio_context(bs
), true);
2005 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
2006 int64_t pos
, int size
)
2009 struct iovec iov
= {
2010 .iov_base
= (void *) buf
,
2015 qemu_iovec_init_external(&qiov
, &iov
, 1);
2017 ret
= bdrv_writev_vmstate(bs
, &qiov
, pos
);
2025 int bdrv_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
2027 return bdrv_rw_vmstate(bs
, qiov
, pos
, false);
2030 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
2031 int64_t pos
, int size
)
2034 struct iovec iov
= {
2040 qemu_iovec_init_external(&qiov
, &iov
, 1);
2041 ret
= bdrv_readv_vmstate(bs
, &qiov
, pos
);
2049 int bdrv_readv_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
2051 return bdrv_rw_vmstate(bs
, qiov
, pos
, true);
2054 /**************************************************************/
2057 BlockAIOCB
*bdrv_aio_readv(BdrvChild
*child
, int64_t sector_num
,
2058 QEMUIOVector
*qiov
, int nb_sectors
,
2059 BlockCompletionFunc
*cb
, void *opaque
)
2061 trace_bdrv_aio_readv(child
->bs
, sector_num
, nb_sectors
, opaque
);
2063 assert(nb_sectors
<< BDRV_SECTOR_BITS
== qiov
->size
);
2064 return bdrv_co_aio_prw_vector(child
, sector_num
<< BDRV_SECTOR_BITS
, qiov
,
2065 0, cb
, opaque
, false);
2068 BlockAIOCB
*bdrv_aio_writev(BdrvChild
*child
, int64_t sector_num
,
2069 QEMUIOVector
*qiov
, int nb_sectors
,
2070 BlockCompletionFunc
*cb
, void *opaque
)
2072 trace_bdrv_aio_writev(child
->bs
, sector_num
, nb_sectors
, opaque
);
2074 assert(nb_sectors
<< BDRV_SECTOR_BITS
== qiov
->size
);
2075 return bdrv_co_aio_prw_vector(child
, sector_num
<< BDRV_SECTOR_BITS
, qiov
,
2076 0, cb
, opaque
, true);
2079 void bdrv_aio_cancel(BlockAIOCB
*acb
)
2082 bdrv_aio_cancel_async(acb
);
2083 while (acb
->refcnt
> 1) {
2084 if (acb
->aiocb_info
->get_aio_context
) {
2085 aio_poll(acb
->aiocb_info
->get_aio_context(acb
), true);
2086 } else if (acb
->bs
) {
2087 aio_poll(bdrv_get_aio_context(acb
->bs
), true);
2092 qemu_aio_unref(acb
);
2095 /* Async version of aio cancel. The caller is not blocked if the acb implements
2096 * cancel_async, otherwise we do nothing and let the request normally complete.
2097 * In either case the completion callback must be called. */
2098 void bdrv_aio_cancel_async(BlockAIOCB
*acb
)
2100 if (acb
->aiocb_info
->cancel_async
) {
2101 acb
->aiocb_info
->cancel_async(acb
);
2105 /**************************************************************/
2106 /* async block device emulation */
2108 typedef struct BlockRequest
{
2110 /* Used during read, write, trim */
2117 /* Used during ioctl */
2123 BlockCompletionFunc
*cb
;
2129 typedef struct BlockAIOCBCoroutine
{
2136 } BlockAIOCBCoroutine
;
2138 static const AIOCBInfo bdrv_em_co_aiocb_info
= {
2139 .aiocb_size
= sizeof(BlockAIOCBCoroutine
),
2142 static void bdrv_co_complete(BlockAIOCBCoroutine
*acb
)
2144 if (!acb
->need_bh
) {
2145 bdrv_dec_in_flight(acb
->common
.bs
);
2146 acb
->common
.cb(acb
->common
.opaque
, acb
->req
.error
);
2147 qemu_aio_unref(acb
);
2151 static void bdrv_co_em_bh(void *opaque
)
2153 BlockAIOCBCoroutine
*acb
= opaque
;
2155 assert(!acb
->need_bh
);
2156 bdrv_co_complete(acb
);
2159 static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine
*acb
)
2161 acb
->need_bh
= false;
2162 if (acb
->req
.error
!= -EINPROGRESS
) {
2163 BlockDriverState
*bs
= acb
->common
.bs
;
2165 aio_bh_schedule_oneshot(bdrv_get_aio_context(bs
), bdrv_co_em_bh
, acb
);
2169 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
2170 static void coroutine_fn
bdrv_co_do_rw(void *opaque
)
2172 BlockAIOCBCoroutine
*acb
= opaque
;
2174 if (!acb
->is_write
) {
2175 acb
->req
.error
= bdrv_co_preadv(acb
->child
, acb
->req
.offset
,
2176 acb
->req
.qiov
->size
, acb
->req
.qiov
, acb
->req
.flags
);
2178 acb
->req
.error
= bdrv_co_pwritev(acb
->child
, acb
->req
.offset
,
2179 acb
->req
.qiov
->size
, acb
->req
.qiov
, acb
->req
.flags
);
2182 bdrv_co_complete(acb
);
2185 static BlockAIOCB
*bdrv_co_aio_prw_vector(BdrvChild
*child
,
2188 BdrvRequestFlags flags
,
2189 BlockCompletionFunc
*cb
,
2194 BlockAIOCBCoroutine
*acb
;
2196 /* Matched by bdrv_co_complete's bdrv_dec_in_flight. */
2197 bdrv_inc_in_flight(child
->bs
);
2199 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, child
->bs
, cb
, opaque
);
2201 acb
->need_bh
= true;
2202 acb
->req
.error
= -EINPROGRESS
;
2203 acb
->req
.offset
= offset
;
2204 acb
->req
.qiov
= qiov
;
2205 acb
->req
.flags
= flags
;
2206 acb
->is_write
= is_write
;
2208 co
= qemu_coroutine_create(bdrv_co_do_rw
, acb
);
2209 qemu_coroutine_enter(co
);
2211 bdrv_co_maybe_schedule_bh(acb
);
2212 return &acb
->common
;
2215 static void coroutine_fn
bdrv_aio_flush_co_entry(void *opaque
)
2217 BlockAIOCBCoroutine
*acb
= opaque
;
2218 BlockDriverState
*bs
= acb
->common
.bs
;
2220 acb
->req
.error
= bdrv_co_flush(bs
);
2221 bdrv_co_complete(acb
);
2224 BlockAIOCB
*bdrv_aio_flush(BlockDriverState
*bs
,
2225 BlockCompletionFunc
*cb
, void *opaque
)
2227 trace_bdrv_aio_flush(bs
, opaque
);
2230 BlockAIOCBCoroutine
*acb
;
2232 /* Matched by bdrv_co_complete's bdrv_dec_in_flight. */
2233 bdrv_inc_in_flight(bs
);
2235 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
2236 acb
->need_bh
= true;
2237 acb
->req
.error
= -EINPROGRESS
;
2239 co
= qemu_coroutine_create(bdrv_aio_flush_co_entry
, acb
);
2240 qemu_coroutine_enter(co
);
2242 bdrv_co_maybe_schedule_bh(acb
);
2243 return &acb
->common
;
2246 void *qemu_aio_get(const AIOCBInfo
*aiocb_info
, BlockDriverState
*bs
,
2247 BlockCompletionFunc
*cb
, void *opaque
)
2251 acb
= g_malloc(aiocb_info
->aiocb_size
);
2252 acb
->aiocb_info
= aiocb_info
;
2255 acb
->opaque
= opaque
;
2260 void qemu_aio_ref(void *p
)
2262 BlockAIOCB
*acb
= p
;
2266 void qemu_aio_unref(void *p
)
2268 BlockAIOCB
*acb
= p
;
2269 assert(acb
->refcnt
> 0);
2270 if (--acb
->refcnt
== 0) {
2275 /**************************************************************/
2276 /* Coroutine block device emulation */
2278 typedef struct FlushCo
{
2279 BlockDriverState
*bs
;
2284 static void coroutine_fn
bdrv_flush_co_entry(void *opaque
)
2286 FlushCo
*rwco
= opaque
;
2288 rwco
->ret
= bdrv_co_flush(rwco
->bs
);
2291 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
2295 if (!bs
|| !bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
) ||
2300 bdrv_inc_in_flight(bs
);
2302 int current_gen
= bs
->write_gen
;
2304 /* Wait until any previous flushes are completed */
2305 while (bs
->active_flush_req
) {
2306 qemu_co_queue_wait(&bs
->flush_queue
);
2309 bs
->active_flush_req
= true;
2311 /* Write back all layers by calling one driver function */
2312 if (bs
->drv
->bdrv_co_flush
) {
2313 ret
= bs
->drv
->bdrv_co_flush(bs
);
2317 /* Write back cached data to the OS even with cache=unsafe */
2318 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_OS
);
2319 if (bs
->drv
->bdrv_co_flush_to_os
) {
2320 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
2326 /* But don't actually force it to the disk with cache=unsafe */
2327 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
2331 /* Check if we really need to flush anything */
2332 if (bs
->flushed_gen
== current_gen
) {
2336 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_DISK
);
2337 if (bs
->drv
->bdrv_co_flush_to_disk
) {
2338 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
2339 } else if (bs
->drv
->bdrv_aio_flush
) {
2341 CoroutineIOCompletion co
= {
2342 .coroutine
= qemu_coroutine_self(),
2345 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
2349 qemu_coroutine_yield();
2354 * Some block drivers always operate in either writethrough or unsafe
2355 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2356 * know how the server works (because the behaviour is hardcoded or
2357 * depends on server-side configuration), so we can't ensure that
2358 * everything is safe on disk. Returning an error doesn't work because
2359 * that would break guests even if the server operates in writethrough
2362 * Let's hope the user knows what he's doing.
2371 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2372 * in the case of cache=unsafe, so there are no useless flushes.
2375 ret
= bs
->file
? bdrv_co_flush(bs
->file
->bs
) : 0;
2377 /* Notify any pending flushes that we have completed */
2379 bs
->flushed_gen
= current_gen
;
2381 bs
->active_flush_req
= false;
2382 /* Return value is ignored - it's ok if wait queue is empty */
2383 qemu_co_queue_next(&bs
->flush_queue
);
2385 bdrv_dec_in_flight(bs
);
2389 int bdrv_flush(BlockDriverState
*bs
)
2392 FlushCo flush_co
= {
2397 if (qemu_in_coroutine()) {
2398 /* Fast-path if already in coroutine context */
2399 bdrv_flush_co_entry(&flush_co
);
2401 co
= qemu_coroutine_create(bdrv_flush_co_entry
, &flush_co
);
2402 qemu_coroutine_enter(co
);
2403 BDRV_POLL_WHILE(bs
, flush_co
.ret
== NOT_DONE
);
2406 return flush_co
.ret
;
2409 typedef struct DiscardCo
{
2410 BlockDriverState
*bs
;
2415 static void coroutine_fn
bdrv_pdiscard_co_entry(void *opaque
)
2417 DiscardCo
*rwco
= opaque
;
2419 rwco
->ret
= bdrv_co_pdiscard(rwco
->bs
, rwco
->offset
, rwco
->count
);
2422 int coroutine_fn
bdrv_co_pdiscard(BlockDriverState
*bs
, int64_t offset
,
2425 BdrvTrackedRequest req
;
2426 int max_pdiscard
, ret
;
2427 int head
, tail
, align
;
2433 ret
= bdrv_check_byte_request(bs
, offset
, count
);
2436 } else if (bs
->read_only
) {
2439 assert(!(bs
->open_flags
& BDRV_O_INACTIVE
));
2441 /* Do nothing if disabled. */
2442 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
2446 if (!bs
->drv
->bdrv_co_pdiscard
&& !bs
->drv
->bdrv_aio_pdiscard
) {
2450 /* Discard is advisory, but some devices track and coalesce
2451 * unaligned requests, so we must pass everything down rather than
2452 * round here. Still, most devices will just silently ignore
2453 * unaligned requests (by returning -ENOTSUP), so we must fragment
2454 * the request accordingly. */
2455 align
= MAX(bs
->bl
.pdiscard_alignment
, bs
->bl
.request_alignment
);
2456 assert(align
% bs
->bl
.request_alignment
== 0);
2457 head
= offset
% align
;
2458 tail
= (offset
+ count
) % align
;
2460 bdrv_inc_in_flight(bs
);
2461 tracked_request_begin(&req
, bs
, offset
, count
, BDRV_TRACKED_DISCARD
);
2463 ret
= notifier_with_return_list_notify(&bs
->before_write_notifiers
, &req
);
2468 max_pdiscard
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_pdiscard
, INT_MAX
),
2470 assert(max_pdiscard
>= bs
->bl
.request_alignment
);
2477 /* Make small requests to get to alignment boundaries. */
2478 num
= MIN(count
, align
- head
);
2479 if (!QEMU_IS_ALIGNED(num
, bs
->bl
.request_alignment
)) {
2480 num
%= bs
->bl
.request_alignment
;
2482 head
= (head
+ num
) % align
;
2483 assert(num
< max_pdiscard
);
2486 /* Shorten the request to the last aligned cluster. */
2488 } else if (!QEMU_IS_ALIGNED(tail
, bs
->bl
.request_alignment
) &&
2489 tail
> bs
->bl
.request_alignment
) {
2490 tail
%= bs
->bl
.request_alignment
;
2494 /* limit request size */
2495 if (num
> max_pdiscard
) {
2499 if (bs
->drv
->bdrv_co_pdiscard
) {
2500 ret
= bs
->drv
->bdrv_co_pdiscard(bs
, offset
, num
);
2503 CoroutineIOCompletion co
= {
2504 .coroutine
= qemu_coroutine_self(),
2507 acb
= bs
->drv
->bdrv_aio_pdiscard(bs
, offset
, num
,
2508 bdrv_co_io_em_complete
, &co
);
2513 qemu_coroutine_yield();
2517 if (ret
&& ret
!= -ENOTSUP
) {
2527 bdrv_set_dirty(bs
, req
.offset
>> BDRV_SECTOR_BITS
,
2528 req
.bytes
>> BDRV_SECTOR_BITS
);
2529 tracked_request_end(&req
);
2530 bdrv_dec_in_flight(bs
);
2534 int bdrv_pdiscard(BlockDriverState
*bs
, int64_t offset
, int count
)
2544 if (qemu_in_coroutine()) {
2545 /* Fast-path if already in coroutine context */
2546 bdrv_pdiscard_co_entry(&rwco
);
2548 co
= qemu_coroutine_create(bdrv_pdiscard_co_entry
, &rwco
);
2549 qemu_coroutine_enter(co
);
2550 BDRV_POLL_WHILE(bs
, rwco
.ret
== NOT_DONE
);
2556 int bdrv_co_ioctl(BlockDriverState
*bs
, int req
, void *buf
)
2558 BlockDriver
*drv
= bs
->drv
;
2559 CoroutineIOCompletion co
= {
2560 .coroutine
= qemu_coroutine_self(),
2564 bdrv_inc_in_flight(bs
);
2565 if (!drv
|| (!drv
->bdrv_aio_ioctl
&& !drv
->bdrv_co_ioctl
)) {
2570 if (drv
->bdrv_co_ioctl
) {
2571 co
.ret
= drv
->bdrv_co_ioctl(bs
, req
, buf
);
2573 acb
= drv
->bdrv_aio_ioctl(bs
, req
, buf
, bdrv_co_io_em_complete
, &co
);
2578 qemu_coroutine_yield();
2581 bdrv_dec_in_flight(bs
);
2585 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
2587 return qemu_memalign(bdrv_opt_mem_align(bs
), size
);
2590 void *qemu_blockalign0(BlockDriverState
*bs
, size_t size
)
2592 return memset(qemu_blockalign(bs
, size
), 0, size
);
2595 void *qemu_try_blockalign(BlockDriverState
*bs
, size_t size
)
2597 size_t align
= bdrv_opt_mem_align(bs
);
2599 /* Ensure that NULL is never returned on success */
2605 return qemu_try_memalign(align
, size
);
2608 void *qemu_try_blockalign0(BlockDriverState
*bs
, size_t size
)
2610 void *mem
= qemu_try_blockalign(bs
, size
);
2613 memset(mem
, 0, size
);
2620 * Check if all memory in this vector is sector aligned.
2622 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
)
2625 size_t alignment
= bdrv_min_mem_align(bs
);
2627 for (i
= 0; i
< qiov
->niov
; i
++) {
2628 if ((uintptr_t) qiov
->iov
[i
].iov_base
% alignment
) {
2631 if (qiov
->iov
[i
].iov_len
% alignment
) {
2639 void bdrv_add_before_write_notifier(BlockDriverState
*bs
,
2640 NotifierWithReturn
*notifier
)
2642 notifier_with_return_list_add(&bs
->before_write_notifiers
, notifier
);
2645 void bdrv_io_plug(BlockDriverState
*bs
)
2649 QLIST_FOREACH(child
, &bs
->children
, next
) {
2650 bdrv_io_plug(child
->bs
);
2653 if (bs
->io_plugged
++ == 0 && bs
->io_plug_disabled
== 0) {
2654 BlockDriver
*drv
= bs
->drv
;
2655 if (drv
&& drv
->bdrv_io_plug
) {
2656 drv
->bdrv_io_plug(bs
);
2661 void bdrv_io_unplug(BlockDriverState
*bs
)
2665 assert(bs
->io_plugged
);
2666 if (--bs
->io_plugged
== 0 && bs
->io_plug_disabled
== 0) {
2667 BlockDriver
*drv
= bs
->drv
;
2668 if (drv
&& drv
->bdrv_io_unplug
) {
2669 drv
->bdrv_io_unplug(bs
);
2673 QLIST_FOREACH(child
, &bs
->children
, next
) {
2674 bdrv_io_unplug(child
->bs
);
2678 void bdrv_io_unplugged_begin(BlockDriverState
*bs
)
2682 if (bs
->io_plug_disabled
++ == 0 && bs
->io_plugged
> 0) {
2683 BlockDriver
*drv
= bs
->drv
;
2684 if (drv
&& drv
->bdrv_io_unplug
) {
2685 drv
->bdrv_io_unplug(bs
);
2689 QLIST_FOREACH(child
, &bs
->children
, next
) {
2690 bdrv_io_unplugged_begin(child
->bs
);
2694 void bdrv_io_unplugged_end(BlockDriverState
*bs
)
2698 assert(bs
->io_plug_disabled
);
2699 QLIST_FOREACH(child
, &bs
->children
, next
) {
2700 bdrv_io_unplugged_end(child
->bs
);
2703 if (--bs
->io_plug_disabled
== 0 && bs
->io_plugged
> 0) {
2704 BlockDriver
*drv
= bs
->drv
;
2705 if (drv
&& drv
->bdrv_io_plug
) {
2706 drv
->bdrv_io_plug(bs
);