2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "sysemu/block-backend.h"
28 #include "block/blockjob.h"
29 #include "block/block_int.h"
30 #include "qemu/cutils.h"
31 #include "qapi/error.h"
32 #include "qemu/error-report.h"
34 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
36 static BlockAIOCB
*bdrv_co_aio_prw_vector(BdrvChild
*child
,
39 BdrvRequestFlags flags
,
40 BlockCompletionFunc
*cb
,
43 static void coroutine_fn
bdrv_co_do_rw(void *opaque
);
44 static int coroutine_fn
bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
,
45 int64_t offset
, int count
, BdrvRequestFlags flags
);
47 static void bdrv_parent_drained_begin(BlockDriverState
*bs
)
51 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
52 if (c
->role
->drained_begin
) {
53 c
->role
->drained_begin(c
);
58 static void bdrv_parent_drained_end(BlockDriverState
*bs
)
62 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
63 if (c
->role
->drained_end
) {
64 c
->role
->drained_end(c
);
69 static void bdrv_merge_limits(BlockLimits
*dst
, const BlockLimits
*src
)
71 dst
->opt_transfer
= MAX(dst
->opt_transfer
, src
->opt_transfer
);
72 dst
->max_transfer
= MIN_NON_ZERO(dst
->max_transfer
, src
->max_transfer
);
73 dst
->opt_mem_alignment
= MAX(dst
->opt_mem_alignment
,
74 src
->opt_mem_alignment
);
75 dst
->min_mem_alignment
= MAX(dst
->min_mem_alignment
,
76 src
->min_mem_alignment
);
77 dst
->max_iov
= MIN_NON_ZERO(dst
->max_iov
, src
->max_iov
);
80 void bdrv_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
82 BlockDriver
*drv
= bs
->drv
;
83 Error
*local_err
= NULL
;
85 memset(&bs
->bl
, 0, sizeof(bs
->bl
));
91 /* Default alignment based on whether driver has byte interface */
92 bs
->bl
.request_alignment
= drv
->bdrv_co_preadv
? 1 : 512;
94 /* Take some limits from the children as a default */
96 bdrv_refresh_limits(bs
->file
->bs
, &local_err
);
98 error_propagate(errp
, local_err
);
101 bdrv_merge_limits(&bs
->bl
, &bs
->file
->bs
->bl
);
103 bs
->bl
.min_mem_alignment
= 512;
104 bs
->bl
.opt_mem_alignment
= getpagesize();
106 /* Safe default since most protocols use readv()/writev()/etc */
107 bs
->bl
.max_iov
= IOV_MAX
;
111 bdrv_refresh_limits(bs
->backing
->bs
, &local_err
);
113 error_propagate(errp
, local_err
);
116 bdrv_merge_limits(&bs
->bl
, &bs
->backing
->bs
->bl
);
119 /* Then let the driver override it */
120 if (drv
->bdrv_refresh_limits
) {
121 drv
->bdrv_refresh_limits(bs
, errp
);
126 * The copy-on-read flag is actually a reference count so multiple users may
127 * use the feature without worrying about clobbering its previous state.
128 * Copy-on-read stays enabled until all users have called to disable it.
130 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
135 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
137 assert(bs
->copy_on_read
> 0);
141 /* Check if any requests are in-flight (including throttled requests) */
142 bool bdrv_requests_pending(BlockDriverState
*bs
)
146 if (!QLIST_EMPTY(&bs
->tracked_requests
)) {
150 QLIST_FOREACH(child
, &bs
->children
, next
) {
151 if (bdrv_requests_pending(child
->bs
)) {
159 static void bdrv_drain_recurse(BlockDriverState
*bs
)
163 if (bs
->drv
&& bs
->drv
->bdrv_drain
) {
164 bs
->drv
->bdrv_drain(bs
);
166 QLIST_FOREACH(child
, &bs
->children
, next
) {
167 bdrv_drain_recurse(child
->bs
);
173 BlockDriverState
*bs
;
178 static void bdrv_drain_poll(BlockDriverState
*bs
)
184 busy
= bdrv_requests_pending(bs
);
185 busy
|= aio_poll(bdrv_get_aio_context(bs
), busy
);
189 static void bdrv_co_drain_bh_cb(void *opaque
)
191 BdrvCoDrainData
*data
= opaque
;
192 Coroutine
*co
= data
->co
;
194 qemu_bh_delete(data
->bh
);
195 bdrv_drain_poll(data
->bs
);
197 qemu_coroutine_enter(co
);
200 static void coroutine_fn
bdrv_co_yield_to_drain(BlockDriverState
*bs
)
202 BdrvCoDrainData data
;
204 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
205 * other coroutines run if they were queued from
206 * qemu_co_queue_run_restart(). */
208 assert(qemu_in_coroutine());
209 data
= (BdrvCoDrainData
) {
210 .co
= qemu_coroutine_self(),
213 .bh
= aio_bh_new(bdrv_get_aio_context(bs
), bdrv_co_drain_bh_cb
, &data
),
215 qemu_bh_schedule(data
.bh
);
217 qemu_coroutine_yield();
218 /* If we are resumed from some other event (such as an aio completion or a
219 * timer callback), it is a bug in the caller that should be fixed. */
223 void bdrv_drained_begin(BlockDriverState
*bs
)
225 if (!bs
->quiesce_counter
++) {
226 aio_disable_external(bdrv_get_aio_context(bs
));
227 bdrv_parent_drained_begin(bs
);
230 bdrv_io_unplugged_begin(bs
);
231 bdrv_drain_recurse(bs
);
232 if (qemu_in_coroutine()) {
233 bdrv_co_yield_to_drain(bs
);
237 bdrv_io_unplugged_end(bs
);
240 void bdrv_drained_end(BlockDriverState
*bs
)
242 assert(bs
->quiesce_counter
> 0);
243 if (--bs
->quiesce_counter
> 0) {
247 bdrv_parent_drained_end(bs
);
248 aio_enable_external(bdrv_get_aio_context(bs
));
252 * Wait for pending requests to complete on a single BlockDriverState subtree,
253 * and suspend block driver's internal I/O until next request arrives.
255 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
258 * Only this BlockDriverState's AioContext is run, so in-flight requests must
259 * not depend on events in other AioContexts. In that case, use
260 * bdrv_drain_all() instead.
262 void coroutine_fn
bdrv_co_drain(BlockDriverState
*bs
)
264 assert(qemu_in_coroutine());
265 bdrv_drained_begin(bs
);
266 bdrv_drained_end(bs
);
269 void bdrv_drain(BlockDriverState
*bs
)
271 bdrv_drained_begin(bs
);
272 bdrv_drained_end(bs
);
276 * Wait for pending requests to complete across all BlockDriverStates
278 * This function does not flush data to disk, use bdrv_flush_all() for that
279 * after calling this function.
281 void bdrv_drain_all(void)
283 /* Always run first iteration so any pending completion BHs run */
285 BlockDriverState
*bs
;
287 BlockJob
*job
= NULL
;
288 GSList
*aio_ctxs
= NULL
, *ctx
;
290 while ((job
= block_job_next(job
))) {
291 AioContext
*aio_context
= blk_get_aio_context(job
->blk
);
293 aio_context_acquire(aio_context
);
294 block_job_pause(job
);
295 aio_context_release(aio_context
);
298 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
299 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
301 aio_context_acquire(aio_context
);
302 bdrv_parent_drained_begin(bs
);
303 bdrv_io_unplugged_begin(bs
);
304 bdrv_drain_recurse(bs
);
305 aio_context_release(aio_context
);
307 if (!g_slist_find(aio_ctxs
, aio_context
)) {
308 aio_ctxs
= g_slist_prepend(aio_ctxs
, aio_context
);
312 /* Note that completion of an asynchronous I/O operation can trigger any
313 * number of other I/O operations on other devices---for example a
314 * coroutine can submit an I/O request to another device in response to
315 * request completion. Therefore we must keep looping until there was no
316 * more activity rather than simply draining each device independently.
321 for (ctx
= aio_ctxs
; ctx
!= NULL
; ctx
= ctx
->next
) {
322 AioContext
*aio_context
= ctx
->data
;
324 aio_context_acquire(aio_context
);
325 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
326 if (aio_context
== bdrv_get_aio_context(bs
)) {
327 if (bdrv_requests_pending(bs
)) {
329 aio_poll(aio_context
, busy
);
333 busy
|= aio_poll(aio_context
, false);
334 aio_context_release(aio_context
);
338 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
339 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
341 aio_context_acquire(aio_context
);
342 bdrv_io_unplugged_end(bs
);
343 bdrv_parent_drained_end(bs
);
344 aio_context_release(aio_context
);
346 g_slist_free(aio_ctxs
);
349 while ((job
= block_job_next(job
))) {
350 AioContext
*aio_context
= blk_get_aio_context(job
->blk
);
352 aio_context_acquire(aio_context
);
353 block_job_resume(job
);
354 aio_context_release(aio_context
);
359 * Remove an active request from the tracked requests list
361 * This function should be called when a tracked request is completing.
363 static void tracked_request_end(BdrvTrackedRequest
*req
)
365 if (req
->serialising
) {
366 req
->bs
->serialising_in_flight
--;
369 QLIST_REMOVE(req
, list
);
370 qemu_co_queue_restart_all(&req
->wait_queue
);
374 * Add an active request to the tracked requests list
376 static void tracked_request_begin(BdrvTrackedRequest
*req
,
377 BlockDriverState
*bs
,
380 enum BdrvTrackedRequestType type
)
382 *req
= (BdrvTrackedRequest
){
387 .co
= qemu_coroutine_self(),
388 .serialising
= false,
389 .overlap_offset
= offset
,
390 .overlap_bytes
= bytes
,
393 qemu_co_queue_init(&req
->wait_queue
);
395 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
398 static void mark_request_serialising(BdrvTrackedRequest
*req
, uint64_t align
)
400 int64_t overlap_offset
= req
->offset
& ~(align
- 1);
401 unsigned int overlap_bytes
= ROUND_UP(req
->offset
+ req
->bytes
, align
)
404 if (!req
->serialising
) {
405 req
->bs
->serialising_in_flight
++;
406 req
->serialising
= true;
409 req
->overlap_offset
= MIN(req
->overlap_offset
, overlap_offset
);
410 req
->overlap_bytes
= MAX(req
->overlap_bytes
, overlap_bytes
);
414 * Round a region to cluster boundaries (sector-based)
416 void bdrv_round_sectors_to_clusters(BlockDriverState
*bs
,
417 int64_t sector_num
, int nb_sectors
,
418 int64_t *cluster_sector_num
,
419 int *cluster_nb_sectors
)
423 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
424 *cluster_sector_num
= sector_num
;
425 *cluster_nb_sectors
= nb_sectors
;
427 int64_t c
= bdi
.cluster_size
/ BDRV_SECTOR_SIZE
;
428 *cluster_sector_num
= QEMU_ALIGN_DOWN(sector_num
, c
);
429 *cluster_nb_sectors
= QEMU_ALIGN_UP(sector_num
- *cluster_sector_num
+
435 * Round a region to cluster boundaries
437 void bdrv_round_to_clusters(BlockDriverState
*bs
,
438 int64_t offset
, unsigned int bytes
,
439 int64_t *cluster_offset
,
440 unsigned int *cluster_bytes
)
444 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
445 *cluster_offset
= offset
;
446 *cluster_bytes
= bytes
;
448 int64_t c
= bdi
.cluster_size
;
449 *cluster_offset
= QEMU_ALIGN_DOWN(offset
, c
);
450 *cluster_bytes
= QEMU_ALIGN_UP(offset
- *cluster_offset
+ bytes
, c
);
454 static int bdrv_get_cluster_size(BlockDriverState
*bs
)
459 ret
= bdrv_get_info(bs
, &bdi
);
460 if (ret
< 0 || bdi
.cluster_size
== 0) {
461 return bs
->bl
.request_alignment
;
463 return bdi
.cluster_size
;
467 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
468 int64_t offset
, unsigned int bytes
)
471 if (offset
>= req
->overlap_offset
+ req
->overlap_bytes
) {
475 if (req
->overlap_offset
>= offset
+ bytes
) {
481 static bool coroutine_fn
wait_serialising_requests(BdrvTrackedRequest
*self
)
483 BlockDriverState
*bs
= self
->bs
;
484 BdrvTrackedRequest
*req
;
488 if (!bs
->serialising_in_flight
) {
494 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
495 if (req
== self
|| (!req
->serialising
&& !self
->serialising
)) {
498 if (tracked_request_overlaps(req
, self
->overlap_offset
,
499 self
->overlap_bytes
))
501 /* Hitting this means there was a reentrant request, for
502 * example, a block driver issuing nested requests. This must
503 * never happen since it means deadlock.
505 assert(qemu_coroutine_self() != req
->co
);
507 /* If the request is already (indirectly) waiting for us, or
508 * will wait for us as soon as it wakes up, then just go on
509 * (instead of producing a deadlock in the former case). */
510 if (!req
->waiting_for
) {
511 self
->waiting_for
= req
;
512 qemu_co_queue_wait(&req
->wait_queue
);
513 self
->waiting_for
= NULL
;
525 static int bdrv_check_byte_request(BlockDriverState
*bs
, int64_t offset
,
528 if (size
> BDRV_REQUEST_MAX_SECTORS
<< BDRV_SECTOR_BITS
) {
532 if (!bdrv_is_inserted(bs
)) {
543 static int bdrv_check_request(BlockDriverState
*bs
, int64_t sector_num
,
546 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
550 return bdrv_check_byte_request(bs
, sector_num
* BDRV_SECTOR_SIZE
,
551 nb_sectors
* BDRV_SECTOR_SIZE
);
554 typedef struct RwCo
{
560 BdrvRequestFlags flags
;
563 static void coroutine_fn
bdrv_rw_co_entry(void *opaque
)
567 if (!rwco
->is_write
) {
568 rwco
->ret
= bdrv_co_preadv(rwco
->child
, rwco
->offset
,
569 rwco
->qiov
->size
, rwco
->qiov
,
572 rwco
->ret
= bdrv_co_pwritev(rwco
->child
, rwco
->offset
,
573 rwco
->qiov
->size
, rwco
->qiov
,
579 * Process a vectored synchronous request using coroutines
581 static int bdrv_prwv_co(BdrvChild
*child
, int64_t offset
,
582 QEMUIOVector
*qiov
, bool is_write
,
583 BdrvRequestFlags flags
)
590 .is_write
= is_write
,
595 if (qemu_in_coroutine()) {
596 /* Fast-path if already in coroutine context */
597 bdrv_rw_co_entry(&rwco
);
599 AioContext
*aio_context
= bdrv_get_aio_context(child
->bs
);
601 co
= qemu_coroutine_create(bdrv_rw_co_entry
, &rwco
);
602 qemu_coroutine_enter(co
);
603 while (rwco
.ret
== NOT_DONE
) {
604 aio_poll(aio_context
, true);
611 * Process a synchronous request using coroutines
613 static int bdrv_rw_co(BdrvChild
*child
, int64_t sector_num
, uint8_t *buf
,
614 int nb_sectors
, bool is_write
, BdrvRequestFlags flags
)
618 .iov_base
= (void *)buf
,
619 .iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
,
622 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
626 qemu_iovec_init_external(&qiov
, &iov
, 1);
627 return bdrv_prwv_co(child
, sector_num
<< BDRV_SECTOR_BITS
,
628 &qiov
, is_write
, flags
);
631 /* return < 0 if error. See bdrv_write() for the return codes */
632 int bdrv_read(BdrvChild
*child
, int64_t sector_num
,
633 uint8_t *buf
, int nb_sectors
)
635 return bdrv_rw_co(child
, sector_num
, buf
, nb_sectors
, false, 0);
638 /* Return < 0 if error. Important errors are:
639 -EIO generic I/O error (may happen for all errors)
640 -ENOMEDIUM No media inserted.
641 -EINVAL Invalid sector number or nb_sectors
642 -EACCES Trying to write a read-only device
644 int bdrv_write(BdrvChild
*child
, int64_t sector_num
,
645 const uint8_t *buf
, int nb_sectors
)
647 return bdrv_rw_co(child
, sector_num
, (uint8_t *)buf
, nb_sectors
, true, 0);
650 int bdrv_pwrite_zeroes(BdrvChild
*child
, int64_t offset
,
651 int count
, BdrvRequestFlags flags
)
659 qemu_iovec_init_external(&qiov
, &iov
, 1);
660 return bdrv_prwv_co(child
, offset
, &qiov
, true,
661 BDRV_REQ_ZERO_WRITE
| flags
);
665 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
666 * The operation is sped up by checking the block status and only writing
667 * zeroes to the device if they currently do not return zeroes. Optional
668 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
671 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
673 int bdrv_make_zero(BdrvChild
*child
, BdrvRequestFlags flags
)
675 int64_t target_sectors
, ret
, nb_sectors
, sector_num
= 0;
676 BlockDriverState
*bs
= child
->bs
;
677 BlockDriverState
*file
;
680 target_sectors
= bdrv_nb_sectors(bs
);
681 if (target_sectors
< 0) {
682 return target_sectors
;
686 nb_sectors
= MIN(target_sectors
- sector_num
, BDRV_REQUEST_MAX_SECTORS
);
687 if (nb_sectors
<= 0) {
690 ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, &n
, &file
);
692 error_report("error getting block status at sector %" PRId64
": %s",
693 sector_num
, strerror(-ret
));
696 if (ret
& BDRV_BLOCK_ZERO
) {
700 ret
= bdrv_pwrite_zeroes(child
, sector_num
<< BDRV_SECTOR_BITS
,
701 n
<< BDRV_SECTOR_BITS
, flags
);
703 error_report("error writing zeroes at sector %" PRId64
": %s",
704 sector_num
, strerror(-ret
));
711 int bdrv_preadv(BdrvChild
*child
, int64_t offset
, QEMUIOVector
*qiov
)
715 ret
= bdrv_prwv_co(child
, offset
, qiov
, false, 0);
723 int bdrv_pread(BdrvChild
*child
, int64_t offset
, void *buf
, int bytes
)
727 .iov_base
= (void *)buf
,
735 qemu_iovec_init_external(&qiov
, &iov
, 1);
736 return bdrv_preadv(child
, offset
, &qiov
);
739 int bdrv_pwritev(BdrvChild
*child
, int64_t offset
, QEMUIOVector
*qiov
)
743 ret
= bdrv_prwv_co(child
, offset
, qiov
, true, 0);
751 int bdrv_pwrite(BdrvChild
*child
, int64_t offset
, const void *buf
, int bytes
)
755 .iov_base
= (void *) buf
,
763 qemu_iovec_init_external(&qiov
, &iov
, 1);
764 return bdrv_pwritev(child
, offset
, &qiov
);
768 * Writes to the file and ensures that no writes are reordered across this
769 * request (acts as a barrier)
771 * Returns 0 on success, -errno in error cases.
773 int bdrv_pwrite_sync(BdrvChild
*child
, int64_t offset
,
774 const void *buf
, int count
)
778 ret
= bdrv_pwrite(child
, offset
, buf
, count
);
783 ret
= bdrv_flush(child
->bs
);
791 typedef struct CoroutineIOCompletion
{
792 Coroutine
*coroutine
;
794 } CoroutineIOCompletion
;
796 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
798 CoroutineIOCompletion
*co
= opaque
;
801 qemu_coroutine_enter(co
->coroutine
);
804 static int coroutine_fn
bdrv_driver_preadv(BlockDriverState
*bs
,
805 uint64_t offset
, uint64_t bytes
,
806 QEMUIOVector
*qiov
, int flags
)
808 BlockDriver
*drv
= bs
->drv
;
810 unsigned int nb_sectors
;
812 assert(!(flags
& ~BDRV_REQ_MASK
));
814 if (drv
->bdrv_co_preadv
) {
815 return drv
->bdrv_co_preadv(bs
, offset
, bytes
, qiov
, flags
);
818 sector_num
= offset
>> BDRV_SECTOR_BITS
;
819 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
821 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
822 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
823 assert((bytes
>> BDRV_SECTOR_BITS
) <= BDRV_REQUEST_MAX_SECTORS
);
825 if (drv
->bdrv_co_readv
) {
826 return drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
829 CoroutineIOCompletion co
= {
830 .coroutine
= qemu_coroutine_self(),
833 acb
= bs
->drv
->bdrv_aio_readv(bs
, sector_num
, qiov
, nb_sectors
,
834 bdrv_co_io_em_complete
, &co
);
838 qemu_coroutine_yield();
844 static int coroutine_fn
bdrv_driver_pwritev(BlockDriverState
*bs
,
845 uint64_t offset
, uint64_t bytes
,
846 QEMUIOVector
*qiov
, int flags
)
848 BlockDriver
*drv
= bs
->drv
;
850 unsigned int nb_sectors
;
853 assert(!(flags
& ~BDRV_REQ_MASK
));
855 if (drv
->bdrv_co_pwritev
) {
856 ret
= drv
->bdrv_co_pwritev(bs
, offset
, bytes
, qiov
,
857 flags
& bs
->supported_write_flags
);
858 flags
&= ~bs
->supported_write_flags
;
862 sector_num
= offset
>> BDRV_SECTOR_BITS
;
863 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
865 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
866 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
867 assert((bytes
>> BDRV_SECTOR_BITS
) <= BDRV_REQUEST_MAX_SECTORS
);
869 if (drv
->bdrv_co_writev_flags
) {
870 ret
= drv
->bdrv_co_writev_flags(bs
, sector_num
, nb_sectors
, qiov
,
871 flags
& bs
->supported_write_flags
);
872 flags
&= ~bs
->supported_write_flags
;
873 } else if (drv
->bdrv_co_writev
) {
874 assert(!bs
->supported_write_flags
);
875 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
);
878 CoroutineIOCompletion co
= {
879 .coroutine
= qemu_coroutine_self(),
882 acb
= bs
->drv
->bdrv_aio_writev(bs
, sector_num
, qiov
, nb_sectors
,
883 bdrv_co_io_em_complete
, &co
);
887 qemu_coroutine_yield();
893 if (ret
== 0 && (flags
& BDRV_REQ_FUA
)) {
894 ret
= bdrv_co_flush(bs
);
900 static int coroutine_fn
bdrv_co_do_copy_on_readv(BlockDriverState
*bs
,
901 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
)
903 /* Perform I/O through a temporary buffer so that users who scribble over
904 * their read buffer while the operation is in progress do not end up
905 * modifying the image file. This is critical for zero-copy guest I/O
906 * where anything might happen inside guest memory.
910 BlockDriver
*drv
= bs
->drv
;
912 QEMUIOVector bounce_qiov
;
913 int64_t cluster_offset
;
914 unsigned int cluster_bytes
;
918 /* Cover entire cluster so no additional backing file I/O is required when
919 * allocating cluster in the image file.
921 bdrv_round_to_clusters(bs
, offset
, bytes
, &cluster_offset
, &cluster_bytes
);
923 trace_bdrv_co_do_copy_on_readv(bs
, offset
, bytes
,
924 cluster_offset
, cluster_bytes
);
926 iov
.iov_len
= cluster_bytes
;
927 iov
.iov_base
= bounce_buffer
= qemu_try_blockalign(bs
, iov
.iov_len
);
928 if (bounce_buffer
== NULL
) {
933 qemu_iovec_init_external(&bounce_qiov
, &iov
, 1);
935 ret
= bdrv_driver_preadv(bs
, cluster_offset
, cluster_bytes
,
941 if (drv
->bdrv_co_pwrite_zeroes
&&
942 buffer_is_zero(bounce_buffer
, iov
.iov_len
)) {
943 /* FIXME: Should we (perhaps conditionally) be setting
944 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
945 * that still correctly reads as zero? */
946 ret
= bdrv_co_do_pwrite_zeroes(bs
, cluster_offset
, cluster_bytes
, 0);
948 /* This does not change the data on the disk, it is not necessary
949 * to flush even in cache=writethrough mode.
951 ret
= bdrv_driver_pwritev(bs
, cluster_offset
, cluster_bytes
,
956 /* It might be okay to ignore write errors for guest requests. If this
957 * is a deliberate copy-on-read then we don't want to ignore the error.
958 * Simply report it in all cases.
963 skip_bytes
= offset
- cluster_offset
;
964 qemu_iovec_from_buf(qiov
, 0, bounce_buffer
+ skip_bytes
, bytes
);
967 qemu_vfree(bounce_buffer
);
972 * Forwards an already correctly aligned request to the BlockDriver. This
973 * handles copy on read, zeroing after EOF, and fragmentation of large
974 * reads; any other features must be implemented by the caller.
976 static int coroutine_fn
bdrv_aligned_preadv(BlockDriverState
*bs
,
977 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
978 int64_t align
, QEMUIOVector
*qiov
, int flags
)
980 int64_t total_bytes
, max_bytes
;
982 uint64_t bytes_remaining
= bytes
;
985 assert(is_power_of_2(align
));
986 assert((offset
& (align
- 1)) == 0);
987 assert((bytes
& (align
- 1)) == 0);
988 assert(!qiov
|| bytes
== qiov
->size
);
989 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
990 max_transfer
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_transfer
, INT_MAX
),
993 /* TODO: We would need a per-BDS .supported_read_flags and
994 * potential fallback support, if we ever implement any read flags
995 * to pass through to drivers. For now, there aren't any
996 * passthrough flags. */
997 assert(!(flags
& ~(BDRV_REQ_NO_SERIALISING
| BDRV_REQ_COPY_ON_READ
)));
999 /* Handle Copy on Read and associated serialisation */
1000 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1001 /* If we touch the same cluster it counts as an overlap. This
1002 * guarantees that allocating writes will be serialized and not race
1003 * with each other for the same cluster. For example, in copy-on-read
1004 * it ensures that the CoR read and write operations are atomic and
1005 * guest writes cannot interleave between them. */
1006 mark_request_serialising(req
, bdrv_get_cluster_size(bs
));
1009 if (!(flags
& BDRV_REQ_NO_SERIALISING
)) {
1010 wait_serialising_requests(req
);
1013 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1014 int64_t start_sector
= offset
>> BDRV_SECTOR_BITS
;
1015 int64_t end_sector
= DIV_ROUND_UP(offset
+ bytes
, BDRV_SECTOR_SIZE
);
1016 unsigned int nb_sectors
= end_sector
- start_sector
;
1019 ret
= bdrv_is_allocated(bs
, start_sector
, nb_sectors
, &pnum
);
1024 if (!ret
|| pnum
!= nb_sectors
) {
1025 ret
= bdrv_co_do_copy_on_readv(bs
, offset
, bytes
, qiov
);
1030 /* Forward the request to the BlockDriver, possibly fragmenting it */
1031 total_bytes
= bdrv_getlength(bs
);
1032 if (total_bytes
< 0) {
1037 max_bytes
= ROUND_UP(MAX(0, total_bytes
- offset
), align
);
1038 if (bytes
<= max_bytes
&& bytes
<= max_transfer
) {
1039 ret
= bdrv_driver_preadv(bs
, offset
, bytes
, qiov
, 0);
1043 while (bytes_remaining
) {
1047 QEMUIOVector local_qiov
;
1049 num
= MIN(bytes_remaining
, MIN(max_bytes
, max_transfer
));
1051 qemu_iovec_init(&local_qiov
, qiov
->niov
);
1052 qemu_iovec_concat(&local_qiov
, qiov
, bytes
- bytes_remaining
, num
);
1054 ret
= bdrv_driver_preadv(bs
, offset
+ bytes
- bytes_remaining
,
1055 num
, &local_qiov
, 0);
1057 qemu_iovec_destroy(&local_qiov
);
1059 num
= bytes_remaining
;
1060 ret
= qemu_iovec_memset(qiov
, bytes
- bytes_remaining
, 0,
1066 bytes_remaining
-= num
;
1070 return ret
< 0 ? ret
: 0;
1074 * Handle a read request in coroutine context
1076 int coroutine_fn
bdrv_co_preadv(BdrvChild
*child
,
1077 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1078 BdrvRequestFlags flags
)
1080 BlockDriverState
*bs
= child
->bs
;
1081 BlockDriver
*drv
= bs
->drv
;
1082 BdrvTrackedRequest req
;
1084 uint64_t align
= bs
->bl
.request_alignment
;
1085 uint8_t *head_buf
= NULL
;
1086 uint8_t *tail_buf
= NULL
;
1087 QEMUIOVector local_qiov
;
1088 bool use_local_qiov
= false;
1095 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
1100 /* Don't do copy-on-read if we read data before write operation */
1101 if (bs
->copy_on_read
&& !(flags
& BDRV_REQ_NO_SERIALISING
)) {
1102 flags
|= BDRV_REQ_COPY_ON_READ
;
1105 /* Align read if necessary by padding qiov */
1106 if (offset
& (align
- 1)) {
1107 head_buf
= qemu_blockalign(bs
, align
);
1108 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
1109 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
1110 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1111 use_local_qiov
= true;
1113 bytes
+= offset
& (align
- 1);
1114 offset
= offset
& ~(align
- 1);
1117 if ((offset
+ bytes
) & (align
- 1)) {
1118 if (!use_local_qiov
) {
1119 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
1120 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1121 use_local_qiov
= true;
1123 tail_buf
= qemu_blockalign(bs
, align
);
1124 qemu_iovec_add(&local_qiov
, tail_buf
,
1125 align
- ((offset
+ bytes
) & (align
- 1)));
1127 bytes
= ROUND_UP(bytes
, align
);
1130 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_READ
);
1131 ret
= bdrv_aligned_preadv(bs
, &req
, offset
, bytes
, align
,
1132 use_local_qiov
? &local_qiov
: qiov
,
1134 tracked_request_end(&req
);
1136 if (use_local_qiov
) {
1137 qemu_iovec_destroy(&local_qiov
);
1138 qemu_vfree(head_buf
);
1139 qemu_vfree(tail_buf
);
1145 static int coroutine_fn
bdrv_co_do_readv(BdrvChild
*child
,
1146 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1147 BdrvRequestFlags flags
)
1149 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
1153 return bdrv_co_preadv(child
, sector_num
<< BDRV_SECTOR_BITS
,
1154 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
1157 int coroutine_fn
bdrv_co_readv(BdrvChild
*child
, int64_t sector_num
,
1158 int nb_sectors
, QEMUIOVector
*qiov
)
1160 trace_bdrv_co_readv(child
->bs
, sector_num
, nb_sectors
);
1162 return bdrv_co_do_readv(child
, sector_num
, nb_sectors
, qiov
, 0);
1165 /* Maximum buffer for write zeroes fallback, in bytes */
1166 #define MAX_WRITE_ZEROES_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
1168 static int coroutine_fn
bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
,
1169 int64_t offset
, int count
, BdrvRequestFlags flags
)
1171 BlockDriver
*drv
= bs
->drv
;
1173 struct iovec iov
= {0};
1175 bool need_flush
= false;
1179 int max_write_zeroes
= MIN_NON_ZERO(bs
->bl
.max_pwrite_zeroes
, INT_MAX
);
1180 int alignment
= MAX(bs
->bl
.pwrite_zeroes_alignment
,
1181 bs
->bl
.request_alignment
);
1183 assert(is_power_of_2(alignment
));
1184 head
= offset
& (alignment
- 1);
1185 tail
= (offset
+ count
) & (alignment
- 1);
1186 max_write_zeroes
&= ~(alignment
- 1);
1188 while (count
> 0 && !ret
) {
1191 /* Align request. Block drivers can expect the "bulk" of the request
1192 * to be aligned, and that unaligned requests do not cross cluster
1196 /* Make a small request up to the first aligned sector. */
1197 num
= MIN(count
, alignment
- head
);
1199 } else if (tail
&& num
> alignment
) {
1200 /* Shorten the request to the last aligned sector. */
1204 /* limit request size */
1205 if (num
> max_write_zeroes
) {
1206 num
= max_write_zeroes
;
1210 /* First try the efficient write zeroes operation */
1211 if (drv
->bdrv_co_pwrite_zeroes
) {
1212 ret
= drv
->bdrv_co_pwrite_zeroes(bs
, offset
, num
,
1213 flags
& bs
->supported_zero_flags
);
1214 if (ret
!= -ENOTSUP
&& (flags
& BDRV_REQ_FUA
) &&
1215 !(bs
->supported_zero_flags
& BDRV_REQ_FUA
)) {
1219 assert(!bs
->supported_zero_flags
);
1222 if (ret
== -ENOTSUP
) {
1223 /* Fall back to bounce buffer if write zeroes is unsupported */
1224 int max_transfer
= MIN_NON_ZERO(bs
->bl
.max_transfer
,
1225 MAX_WRITE_ZEROES_BOUNCE_BUFFER
);
1226 BdrvRequestFlags write_flags
= flags
& ~BDRV_REQ_ZERO_WRITE
;
1228 if ((flags
& BDRV_REQ_FUA
) &&
1229 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1230 /* No need for bdrv_driver_pwrite() to do a fallback
1231 * flush on each chunk; use just one at the end */
1232 write_flags
&= ~BDRV_REQ_FUA
;
1235 num
= MIN(num
, max_transfer
);
1237 if (iov
.iov_base
== NULL
) {
1238 iov
.iov_base
= qemu_try_blockalign(bs
, num
);
1239 if (iov
.iov_base
== NULL
) {
1243 memset(iov
.iov_base
, 0, num
);
1245 qemu_iovec_init_external(&qiov
, &iov
, 1);
1247 ret
= bdrv_driver_pwritev(bs
, offset
, num
, &qiov
, write_flags
);
1249 /* Keep bounce buffer around if it is big enough for all
1250 * all future requests.
1252 if (num
< max_transfer
) {
1253 qemu_vfree(iov
.iov_base
);
1254 iov
.iov_base
= NULL
;
1263 if (ret
== 0 && need_flush
) {
1264 ret
= bdrv_co_flush(bs
);
1266 qemu_vfree(iov
.iov_base
);
1271 * Forwards an already correctly aligned write request to the BlockDriver,
1272 * after possibly fragmenting it.
1274 static int coroutine_fn
bdrv_aligned_pwritev(BlockDriverState
*bs
,
1275 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
1276 int64_t align
, QEMUIOVector
*qiov
, int flags
)
1278 BlockDriver
*drv
= bs
->drv
;
1282 int64_t start_sector
= offset
>> BDRV_SECTOR_BITS
;
1283 int64_t end_sector
= DIV_ROUND_UP(offset
+ bytes
, BDRV_SECTOR_SIZE
);
1284 uint64_t bytes_remaining
= bytes
;
1287 assert(is_power_of_2(align
));
1288 assert((offset
& (align
- 1)) == 0);
1289 assert((bytes
& (align
- 1)) == 0);
1290 assert(!qiov
|| bytes
== qiov
->size
);
1291 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1292 assert(!(flags
& ~BDRV_REQ_MASK
));
1293 max_transfer
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_transfer
, INT_MAX
),
1296 waited
= wait_serialising_requests(req
);
1297 assert(!waited
|| !req
->serialising
);
1298 assert(req
->overlap_offset
<= offset
);
1299 assert(offset
+ bytes
<= req
->overlap_offset
+ req
->overlap_bytes
);
1301 ret
= notifier_with_return_list_notify(&bs
->before_write_notifiers
, req
);
1303 if (!ret
&& bs
->detect_zeroes
!= BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF
&&
1304 !(flags
& BDRV_REQ_ZERO_WRITE
) && drv
->bdrv_co_pwrite_zeroes
&&
1305 qemu_iovec_is_zero(qiov
)) {
1306 flags
|= BDRV_REQ_ZERO_WRITE
;
1307 if (bs
->detect_zeroes
== BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP
) {
1308 flags
|= BDRV_REQ_MAY_UNMAP
;
1313 /* Do nothing, write notifier decided to fail this request */
1314 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
1315 bdrv_debug_event(bs
, BLKDBG_PWRITEV_ZERO
);
1316 ret
= bdrv_co_do_pwrite_zeroes(bs
, offset
, bytes
, flags
);
1317 } else if (bytes
<= max_transfer
) {
1318 bdrv_debug_event(bs
, BLKDBG_PWRITEV
);
1319 ret
= bdrv_driver_pwritev(bs
, offset
, bytes
, qiov
, flags
);
1321 bdrv_debug_event(bs
, BLKDBG_PWRITEV
);
1322 while (bytes_remaining
) {
1323 int num
= MIN(bytes_remaining
, max_transfer
);
1324 QEMUIOVector local_qiov
;
1325 int local_flags
= flags
;
1328 if (num
< bytes_remaining
&& (flags
& BDRV_REQ_FUA
) &&
1329 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1330 /* If FUA is going to be emulated by flush, we only
1331 * need to flush on the last iteration */
1332 local_flags
&= ~BDRV_REQ_FUA
;
1334 qemu_iovec_init(&local_qiov
, qiov
->niov
);
1335 qemu_iovec_concat(&local_qiov
, qiov
, bytes
- bytes_remaining
, num
);
1337 ret
= bdrv_driver_pwritev(bs
, offset
+ bytes
- bytes_remaining
,
1338 num
, &local_qiov
, local_flags
);
1339 qemu_iovec_destroy(&local_qiov
);
1343 bytes_remaining
-= num
;
1346 bdrv_debug_event(bs
, BLKDBG_PWRITEV_DONE
);
1349 bdrv_set_dirty(bs
, start_sector
, end_sector
- start_sector
);
1351 if (bs
->wr_highest_offset
< offset
+ bytes
) {
1352 bs
->wr_highest_offset
= offset
+ bytes
;
1356 bs
->total_sectors
= MAX(bs
->total_sectors
, end_sector
);
1363 static int coroutine_fn
bdrv_co_do_zero_pwritev(BlockDriverState
*bs
,
1366 BdrvRequestFlags flags
,
1367 BdrvTrackedRequest
*req
)
1369 uint8_t *buf
= NULL
;
1370 QEMUIOVector local_qiov
;
1372 uint64_t align
= bs
->bl
.request_alignment
;
1373 unsigned int head_padding_bytes
, tail_padding_bytes
;
1376 head_padding_bytes
= offset
& (align
- 1);
1377 tail_padding_bytes
= align
- ((offset
+ bytes
) & (align
- 1));
1380 assert(flags
& BDRV_REQ_ZERO_WRITE
);
1381 if (head_padding_bytes
|| tail_padding_bytes
) {
1382 buf
= qemu_blockalign(bs
, align
);
1383 iov
= (struct iovec
) {
1387 qemu_iovec_init_external(&local_qiov
, &iov
, 1);
1389 if (head_padding_bytes
) {
1390 uint64_t zero_bytes
= MIN(bytes
, align
- head_padding_bytes
);
1392 /* RMW the unaligned part before head. */
1393 mark_request_serialising(req
, align
);
1394 wait_serialising_requests(req
);
1395 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1396 ret
= bdrv_aligned_preadv(bs
, req
, offset
& ~(align
- 1), align
,
1397 align
, &local_qiov
, 0);
1401 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1403 memset(buf
+ head_padding_bytes
, 0, zero_bytes
);
1404 ret
= bdrv_aligned_pwritev(bs
, req
, offset
& ~(align
- 1), align
,
1406 flags
& ~BDRV_REQ_ZERO_WRITE
);
1410 offset
+= zero_bytes
;
1411 bytes
-= zero_bytes
;
1414 assert(!bytes
|| (offset
& (align
- 1)) == 0);
1415 if (bytes
>= align
) {
1416 /* Write the aligned part in the middle. */
1417 uint64_t aligned_bytes
= bytes
& ~(align
- 1);
1418 ret
= bdrv_aligned_pwritev(bs
, req
, offset
, aligned_bytes
, align
,
1423 bytes
-= aligned_bytes
;
1424 offset
+= aligned_bytes
;
1427 assert(!bytes
|| (offset
& (align
- 1)) == 0);
1429 assert(align
== tail_padding_bytes
+ bytes
);
1430 /* RMW the unaligned part after tail. */
1431 mark_request_serialising(req
, align
);
1432 wait_serialising_requests(req
);
1433 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1434 ret
= bdrv_aligned_preadv(bs
, req
, offset
, align
,
1435 align
, &local_qiov
, 0);
1439 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1441 memset(buf
, 0, bytes
);
1442 ret
= bdrv_aligned_pwritev(bs
, req
, offset
, align
, align
,
1443 &local_qiov
, flags
& ~BDRV_REQ_ZERO_WRITE
);
1452 * Handle a write request in coroutine context
1454 int coroutine_fn
bdrv_co_pwritev(BdrvChild
*child
,
1455 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1456 BdrvRequestFlags flags
)
1458 BlockDriverState
*bs
= child
->bs
;
1459 BdrvTrackedRequest req
;
1460 uint64_t align
= bs
->bl
.request_alignment
;
1461 uint8_t *head_buf
= NULL
;
1462 uint8_t *tail_buf
= NULL
;
1463 QEMUIOVector local_qiov
;
1464 bool use_local_qiov
= false;
1470 if (bs
->read_only
) {
1473 assert(!(bs
->open_flags
& BDRV_O_INACTIVE
));
1475 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
1481 * Align write if necessary by performing a read-modify-write cycle.
1482 * Pad qiov with the read parts and be sure to have a tracked request not
1483 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1485 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_WRITE
);
1488 ret
= bdrv_co_do_zero_pwritev(bs
, offset
, bytes
, flags
, &req
);
1492 if (offset
& (align
- 1)) {
1493 QEMUIOVector head_qiov
;
1494 struct iovec head_iov
;
1496 mark_request_serialising(&req
, align
);
1497 wait_serialising_requests(&req
);
1499 head_buf
= qemu_blockalign(bs
, align
);
1500 head_iov
= (struct iovec
) {
1501 .iov_base
= head_buf
,
1504 qemu_iovec_init_external(&head_qiov
, &head_iov
, 1);
1506 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1507 ret
= bdrv_aligned_preadv(bs
, &req
, offset
& ~(align
- 1), align
,
1508 align
, &head_qiov
, 0);
1512 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1514 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
1515 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
1516 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1517 use_local_qiov
= true;
1519 bytes
+= offset
& (align
- 1);
1520 offset
= offset
& ~(align
- 1);
1522 /* We have read the tail already if the request is smaller
1523 * than one aligned block.
1525 if (bytes
< align
) {
1526 qemu_iovec_add(&local_qiov
, head_buf
+ bytes
, align
- bytes
);
1531 if ((offset
+ bytes
) & (align
- 1)) {
1532 QEMUIOVector tail_qiov
;
1533 struct iovec tail_iov
;
1537 mark_request_serialising(&req
, align
);
1538 waited
= wait_serialising_requests(&req
);
1539 assert(!waited
|| !use_local_qiov
);
1541 tail_buf
= qemu_blockalign(bs
, align
);
1542 tail_iov
= (struct iovec
) {
1543 .iov_base
= tail_buf
,
1546 qemu_iovec_init_external(&tail_qiov
, &tail_iov
, 1);
1548 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1549 ret
= bdrv_aligned_preadv(bs
, &req
, (offset
+ bytes
) & ~(align
- 1), align
,
1550 align
, &tail_qiov
, 0);
1554 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1556 if (!use_local_qiov
) {
1557 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
1558 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1559 use_local_qiov
= true;
1562 tail_bytes
= (offset
+ bytes
) & (align
- 1);
1563 qemu_iovec_add(&local_qiov
, tail_buf
+ tail_bytes
, align
- tail_bytes
);
1565 bytes
= ROUND_UP(bytes
, align
);
1568 ret
= bdrv_aligned_pwritev(bs
, &req
, offset
, bytes
, align
,
1569 use_local_qiov
? &local_qiov
: qiov
,
1574 if (use_local_qiov
) {
1575 qemu_iovec_destroy(&local_qiov
);
1577 qemu_vfree(head_buf
);
1578 qemu_vfree(tail_buf
);
1580 tracked_request_end(&req
);
1584 static int coroutine_fn
bdrv_co_do_writev(BdrvChild
*child
,
1585 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1586 BdrvRequestFlags flags
)
1588 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
1592 return bdrv_co_pwritev(child
, sector_num
<< BDRV_SECTOR_BITS
,
1593 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
1596 int coroutine_fn
bdrv_co_writev(BdrvChild
*child
, int64_t sector_num
,
1597 int nb_sectors
, QEMUIOVector
*qiov
)
1599 trace_bdrv_co_writev(child
->bs
, sector_num
, nb_sectors
);
1601 return bdrv_co_do_writev(child
, sector_num
, nb_sectors
, qiov
, 0);
1604 int coroutine_fn
bdrv_co_pwrite_zeroes(BdrvChild
*child
, int64_t offset
,
1605 int count
, BdrvRequestFlags flags
)
1607 trace_bdrv_co_pwrite_zeroes(child
->bs
, offset
, count
, flags
);
1609 if (!(child
->bs
->open_flags
& BDRV_O_UNMAP
)) {
1610 flags
&= ~BDRV_REQ_MAY_UNMAP
;
1613 return bdrv_co_pwritev(child
, offset
, count
, NULL
,
1614 BDRV_REQ_ZERO_WRITE
| flags
);
1617 typedef struct BdrvCoGetBlockStatusData
{
1618 BlockDriverState
*bs
;
1619 BlockDriverState
*base
;
1620 BlockDriverState
**file
;
1626 } BdrvCoGetBlockStatusData
;
1629 * Returns the allocation status of the specified sectors.
1630 * Drivers not implementing the functionality are assumed to not support
1631 * backing files, hence all their sectors are reported as allocated.
1633 * If 'sector_num' is beyond the end of the disk image the return value is 0
1634 * and 'pnum' is set to 0.
1636 * 'pnum' is set to the number of sectors (including and immediately following
1637 * the specified sector) that are known to be in the same
1638 * allocated/unallocated state.
1640 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
1641 * beyond the end of the disk image it will be clamped.
1643 * If returned value is positive and BDRV_BLOCK_OFFSET_VALID bit is set, 'file'
1644 * points to the BDS which the sector range is allocated in.
1646 static int64_t coroutine_fn
bdrv_co_get_block_status(BlockDriverState
*bs
,
1648 int nb_sectors
, int *pnum
,
1649 BlockDriverState
**file
)
1651 int64_t total_sectors
;
1655 total_sectors
= bdrv_nb_sectors(bs
);
1656 if (total_sectors
< 0) {
1657 return total_sectors
;
1660 if (sector_num
>= total_sectors
) {
1665 n
= total_sectors
- sector_num
;
1666 if (n
< nb_sectors
) {
1670 if (!bs
->drv
->bdrv_co_get_block_status
) {
1672 ret
= BDRV_BLOCK_DATA
| BDRV_BLOCK_ALLOCATED
;
1673 if (bs
->drv
->protocol_name
) {
1674 ret
|= BDRV_BLOCK_OFFSET_VALID
| (sector_num
* BDRV_SECTOR_SIZE
);
1680 ret
= bs
->drv
->bdrv_co_get_block_status(bs
, sector_num
, nb_sectors
, pnum
,
1687 if (ret
& BDRV_BLOCK_RAW
) {
1688 assert(ret
& BDRV_BLOCK_OFFSET_VALID
);
1689 return bdrv_get_block_status(bs
->file
->bs
, ret
>> BDRV_SECTOR_BITS
,
1693 if (ret
& (BDRV_BLOCK_DATA
| BDRV_BLOCK_ZERO
)) {
1694 ret
|= BDRV_BLOCK_ALLOCATED
;
1696 if (bdrv_unallocated_blocks_are_zero(bs
)) {
1697 ret
|= BDRV_BLOCK_ZERO
;
1698 } else if (bs
->backing
) {
1699 BlockDriverState
*bs2
= bs
->backing
->bs
;
1700 int64_t nb_sectors2
= bdrv_nb_sectors(bs2
);
1701 if (nb_sectors2
>= 0 && sector_num
>= nb_sectors2
) {
1702 ret
|= BDRV_BLOCK_ZERO
;
1707 if (*file
&& *file
!= bs
&&
1708 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
1709 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
1710 BlockDriverState
*file2
;
1713 ret2
= bdrv_co_get_block_status(*file
, ret
>> BDRV_SECTOR_BITS
,
1714 *pnum
, &file_pnum
, &file2
);
1716 /* Ignore errors. This is just providing extra information, it
1717 * is useful but not necessary.
1720 /* !file_pnum indicates an offset at or beyond the EOF; it is
1721 * perfectly valid for the format block driver to point to such
1722 * offsets, so catch it and mark everything as zero */
1723 ret
|= BDRV_BLOCK_ZERO
;
1725 /* Limit request to the range reported by the protocol driver */
1727 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
1735 static int64_t coroutine_fn
bdrv_co_get_block_status_above(BlockDriverState
*bs
,
1736 BlockDriverState
*base
,
1740 BlockDriverState
**file
)
1742 BlockDriverState
*p
;
1746 for (p
= bs
; p
!= base
; p
= backing_bs(p
)) {
1747 ret
= bdrv_co_get_block_status(p
, sector_num
, nb_sectors
, pnum
, file
);
1748 if (ret
< 0 || ret
& BDRV_BLOCK_ALLOCATED
) {
1751 /* [sector_num, pnum] unallocated on this layer, which could be only
1752 * the first part of [sector_num, nb_sectors]. */
1753 nb_sectors
= MIN(nb_sectors
, *pnum
);
1758 /* Coroutine wrapper for bdrv_get_block_status_above() */
1759 static void coroutine_fn
bdrv_get_block_status_above_co_entry(void *opaque
)
1761 BdrvCoGetBlockStatusData
*data
= opaque
;
1763 data
->ret
= bdrv_co_get_block_status_above(data
->bs
, data
->base
,
1772 * Synchronous wrapper around bdrv_co_get_block_status_above().
1774 * See bdrv_co_get_block_status_above() for details.
1776 int64_t bdrv_get_block_status_above(BlockDriverState
*bs
,
1777 BlockDriverState
*base
,
1779 int nb_sectors
, int *pnum
,
1780 BlockDriverState
**file
)
1783 BdrvCoGetBlockStatusData data
= {
1787 .sector_num
= sector_num
,
1788 .nb_sectors
= nb_sectors
,
1793 if (qemu_in_coroutine()) {
1794 /* Fast-path if already in coroutine context */
1795 bdrv_get_block_status_above_co_entry(&data
);
1797 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
1799 co
= qemu_coroutine_create(bdrv_get_block_status_above_co_entry
,
1801 qemu_coroutine_enter(co
);
1802 while (!data
.done
) {
1803 aio_poll(aio_context
, true);
1809 int64_t bdrv_get_block_status(BlockDriverState
*bs
,
1811 int nb_sectors
, int *pnum
,
1812 BlockDriverState
**file
)
1814 return bdrv_get_block_status_above(bs
, backing_bs(bs
),
1815 sector_num
, nb_sectors
, pnum
, file
);
1818 int coroutine_fn
bdrv_is_allocated(BlockDriverState
*bs
, int64_t sector_num
,
1819 int nb_sectors
, int *pnum
)
1821 BlockDriverState
*file
;
1822 int64_t ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, pnum
,
1827 return !!(ret
& BDRV_BLOCK_ALLOCATED
);
1831 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
1833 * Return true if the given sector is allocated in any image between
1834 * BASE and TOP (inclusive). BASE can be NULL to check if the given
1835 * sector is allocated in any image of the chain. Return false otherwise.
1837 * 'pnum' is set to the number of sectors (including and immediately following
1838 * the specified sector) that are known to be in the same
1839 * allocated/unallocated state.
1842 int bdrv_is_allocated_above(BlockDriverState
*top
,
1843 BlockDriverState
*base
,
1845 int nb_sectors
, int *pnum
)
1847 BlockDriverState
*intermediate
;
1848 int ret
, n
= nb_sectors
;
1851 while (intermediate
&& intermediate
!= base
) {
1853 ret
= bdrv_is_allocated(intermediate
, sector_num
, nb_sectors
,
1863 * [sector_num, nb_sectors] is unallocated on top but intermediate
1866 * [sector_num+x, nr_sectors] allocated.
1868 if (n
> pnum_inter
&&
1869 (intermediate
== top
||
1870 sector_num
+ pnum_inter
< intermediate
->total_sectors
)) {
1874 intermediate
= backing_bs(intermediate
);
1881 int bdrv_write_compressed(BlockDriverState
*bs
, int64_t sector_num
,
1882 const uint8_t *buf
, int nb_sectors
)
1884 BlockDriver
*drv
= bs
->drv
;
1890 if (!drv
->bdrv_write_compressed
) {
1893 ret
= bdrv_check_request(bs
, sector_num
, nb_sectors
);
1898 assert(QLIST_EMPTY(&bs
->dirty_bitmaps
));
1900 return drv
->bdrv_write_compressed(bs
, sector_num
, buf
, nb_sectors
);
1903 typedef struct BdrvVmstateCo
{
1904 BlockDriverState
*bs
;
1911 static int coroutine_fn
1912 bdrv_co_rw_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
,
1915 BlockDriver
*drv
= bs
->drv
;
1919 } else if (drv
->bdrv_load_vmstate
) {
1920 return is_read
? drv
->bdrv_load_vmstate(bs
, qiov
, pos
)
1921 : drv
->bdrv_save_vmstate(bs
, qiov
, pos
);
1922 } else if (bs
->file
) {
1923 return bdrv_co_rw_vmstate(bs
->file
->bs
, qiov
, pos
, is_read
);
1929 static void coroutine_fn
bdrv_co_rw_vmstate_entry(void *opaque
)
1931 BdrvVmstateCo
*co
= opaque
;
1932 co
->ret
= bdrv_co_rw_vmstate(co
->bs
, co
->qiov
, co
->pos
, co
->is_read
);
1936 bdrv_rw_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
,
1939 if (qemu_in_coroutine()) {
1940 return bdrv_co_rw_vmstate(bs
, qiov
, pos
, is_read
);
1942 BdrvVmstateCo data
= {
1947 .ret
= -EINPROGRESS
,
1949 Coroutine
*co
= qemu_coroutine_create(bdrv_co_rw_vmstate_entry
, &data
);
1951 qemu_coroutine_enter(co
);
1952 while (data
.ret
== -EINPROGRESS
) {
1953 aio_poll(bdrv_get_aio_context(bs
), true);
1959 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
1960 int64_t pos
, int size
)
1963 struct iovec iov
= {
1964 .iov_base
= (void *) buf
,
1969 qemu_iovec_init_external(&qiov
, &iov
, 1);
1971 ret
= bdrv_writev_vmstate(bs
, &qiov
, pos
);
1979 int bdrv_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
1981 return bdrv_rw_vmstate(bs
, qiov
, pos
, false);
1984 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
1985 int64_t pos
, int size
)
1988 struct iovec iov
= {
1994 qemu_iovec_init_external(&qiov
, &iov
, 1);
1995 ret
= bdrv_readv_vmstate(bs
, &qiov
, pos
);
2003 int bdrv_readv_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
2005 return bdrv_rw_vmstate(bs
, qiov
, pos
, true);
2008 /**************************************************************/
2011 BlockAIOCB
*bdrv_aio_readv(BdrvChild
*child
, int64_t sector_num
,
2012 QEMUIOVector
*qiov
, int nb_sectors
,
2013 BlockCompletionFunc
*cb
, void *opaque
)
2015 trace_bdrv_aio_readv(child
->bs
, sector_num
, nb_sectors
, opaque
);
2017 assert(nb_sectors
<< BDRV_SECTOR_BITS
== qiov
->size
);
2018 return bdrv_co_aio_prw_vector(child
, sector_num
<< BDRV_SECTOR_BITS
, qiov
,
2019 0, cb
, opaque
, false);
2022 BlockAIOCB
*bdrv_aio_writev(BdrvChild
*child
, int64_t sector_num
,
2023 QEMUIOVector
*qiov
, int nb_sectors
,
2024 BlockCompletionFunc
*cb
, void *opaque
)
2026 trace_bdrv_aio_writev(child
->bs
, sector_num
, nb_sectors
, opaque
);
2028 assert(nb_sectors
<< BDRV_SECTOR_BITS
== qiov
->size
);
2029 return bdrv_co_aio_prw_vector(child
, sector_num
<< BDRV_SECTOR_BITS
, qiov
,
2030 0, cb
, opaque
, true);
2033 void bdrv_aio_cancel(BlockAIOCB
*acb
)
2036 bdrv_aio_cancel_async(acb
);
2037 while (acb
->refcnt
> 1) {
2038 if (acb
->aiocb_info
->get_aio_context
) {
2039 aio_poll(acb
->aiocb_info
->get_aio_context(acb
), true);
2040 } else if (acb
->bs
) {
2041 aio_poll(bdrv_get_aio_context(acb
->bs
), true);
2046 qemu_aio_unref(acb
);
2049 /* Async version of aio cancel. The caller is not blocked if the acb implements
2050 * cancel_async, otherwise we do nothing and let the request normally complete.
2051 * In either case the completion callback must be called. */
2052 void bdrv_aio_cancel_async(BlockAIOCB
*acb
)
2054 if (acb
->aiocb_info
->cancel_async
) {
2055 acb
->aiocb_info
->cancel_async(acb
);
2059 /**************************************************************/
2060 /* async block device emulation */
2062 typedef struct BlockRequest
{
2064 /* Used during read, write, trim */
2071 /* Used during ioctl */
2077 BlockCompletionFunc
*cb
;
2083 typedef struct BlockAIOCBCoroutine
{
2091 } BlockAIOCBCoroutine
;
2093 static const AIOCBInfo bdrv_em_co_aiocb_info
= {
2094 .aiocb_size
= sizeof(BlockAIOCBCoroutine
),
2097 static void bdrv_co_complete(BlockAIOCBCoroutine
*acb
)
2099 if (!acb
->need_bh
) {
2100 acb
->common
.cb(acb
->common
.opaque
, acb
->req
.error
);
2101 qemu_aio_unref(acb
);
2105 static void bdrv_co_em_bh(void *opaque
)
2107 BlockAIOCBCoroutine
*acb
= opaque
;
2109 assert(!acb
->need_bh
);
2110 qemu_bh_delete(acb
->bh
);
2111 bdrv_co_complete(acb
);
2114 static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine
*acb
)
2116 acb
->need_bh
= false;
2117 if (acb
->req
.error
!= -EINPROGRESS
) {
2118 BlockDriverState
*bs
= acb
->common
.bs
;
2120 acb
->bh
= aio_bh_new(bdrv_get_aio_context(bs
), bdrv_co_em_bh
, acb
);
2121 qemu_bh_schedule(acb
->bh
);
2125 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
2126 static void coroutine_fn
bdrv_co_do_rw(void *opaque
)
2128 BlockAIOCBCoroutine
*acb
= opaque
;
2130 if (!acb
->is_write
) {
2131 acb
->req
.error
= bdrv_co_preadv(acb
->child
, acb
->req
.offset
,
2132 acb
->req
.qiov
->size
, acb
->req
.qiov
, acb
->req
.flags
);
2134 acb
->req
.error
= bdrv_co_pwritev(acb
->child
, acb
->req
.offset
,
2135 acb
->req
.qiov
->size
, acb
->req
.qiov
, acb
->req
.flags
);
2138 bdrv_co_complete(acb
);
2141 static BlockAIOCB
*bdrv_co_aio_prw_vector(BdrvChild
*child
,
2144 BdrvRequestFlags flags
,
2145 BlockCompletionFunc
*cb
,
2150 BlockAIOCBCoroutine
*acb
;
2152 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, child
->bs
, cb
, opaque
);
2154 acb
->need_bh
= true;
2155 acb
->req
.error
= -EINPROGRESS
;
2156 acb
->req
.offset
= offset
;
2157 acb
->req
.qiov
= qiov
;
2158 acb
->req
.flags
= flags
;
2159 acb
->is_write
= is_write
;
2161 co
= qemu_coroutine_create(bdrv_co_do_rw
, acb
);
2162 qemu_coroutine_enter(co
);
2164 bdrv_co_maybe_schedule_bh(acb
);
2165 return &acb
->common
;
2168 static void coroutine_fn
bdrv_aio_flush_co_entry(void *opaque
)
2170 BlockAIOCBCoroutine
*acb
= opaque
;
2171 BlockDriverState
*bs
= acb
->common
.bs
;
2173 acb
->req
.error
= bdrv_co_flush(bs
);
2174 bdrv_co_complete(acb
);
2177 BlockAIOCB
*bdrv_aio_flush(BlockDriverState
*bs
,
2178 BlockCompletionFunc
*cb
, void *opaque
)
2180 trace_bdrv_aio_flush(bs
, opaque
);
2183 BlockAIOCBCoroutine
*acb
;
2185 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
2186 acb
->need_bh
= true;
2187 acb
->req
.error
= -EINPROGRESS
;
2189 co
= qemu_coroutine_create(bdrv_aio_flush_co_entry
, acb
);
2190 qemu_coroutine_enter(co
);
2192 bdrv_co_maybe_schedule_bh(acb
);
2193 return &acb
->common
;
2196 static void coroutine_fn
bdrv_aio_pdiscard_co_entry(void *opaque
)
2198 BlockAIOCBCoroutine
*acb
= opaque
;
2199 BlockDriverState
*bs
= acb
->common
.bs
;
2201 acb
->req
.error
= bdrv_co_pdiscard(bs
, acb
->req
.offset
, acb
->req
.bytes
);
2202 bdrv_co_complete(acb
);
2205 BlockAIOCB
*bdrv_aio_pdiscard(BlockDriverState
*bs
, int64_t offset
, int count
,
2206 BlockCompletionFunc
*cb
, void *opaque
)
2209 BlockAIOCBCoroutine
*acb
;
2211 trace_bdrv_aio_pdiscard(bs
, offset
, count
, opaque
);
2213 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
2214 acb
->need_bh
= true;
2215 acb
->req
.error
= -EINPROGRESS
;
2216 acb
->req
.offset
= offset
;
2217 acb
->req
.bytes
= count
;
2218 co
= qemu_coroutine_create(bdrv_aio_pdiscard_co_entry
, acb
);
2219 qemu_coroutine_enter(co
);
2221 bdrv_co_maybe_schedule_bh(acb
);
2222 return &acb
->common
;
2225 void *qemu_aio_get(const AIOCBInfo
*aiocb_info
, BlockDriverState
*bs
,
2226 BlockCompletionFunc
*cb
, void *opaque
)
2230 acb
= g_malloc(aiocb_info
->aiocb_size
);
2231 acb
->aiocb_info
= aiocb_info
;
2234 acb
->opaque
= opaque
;
2239 void qemu_aio_ref(void *p
)
2241 BlockAIOCB
*acb
= p
;
2245 void qemu_aio_unref(void *p
)
2247 BlockAIOCB
*acb
= p
;
2248 assert(acb
->refcnt
> 0);
2249 if (--acb
->refcnt
== 0) {
2254 /**************************************************************/
2255 /* Coroutine block device emulation */
2257 typedef struct FlushCo
{
2258 BlockDriverState
*bs
;
2263 static void coroutine_fn
bdrv_flush_co_entry(void *opaque
)
2265 FlushCo
*rwco
= opaque
;
2267 rwco
->ret
= bdrv_co_flush(rwco
->bs
);
2270 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
2273 BdrvTrackedRequest req
;
2275 if (!bs
|| !bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
) ||
2280 tracked_request_begin(&req
, bs
, 0, 0, BDRV_TRACKED_FLUSH
);
2282 int current_gen
= bs
->write_gen
;
2284 /* Wait until any previous flushes are completed */
2285 while (bs
->flush_started_gen
!= bs
->flushed_gen
) {
2286 qemu_co_queue_wait(&bs
->flush_queue
);
2289 bs
->flush_started_gen
= current_gen
;
2291 /* Write back all layers by calling one driver function */
2292 if (bs
->drv
->bdrv_co_flush
) {
2293 ret
= bs
->drv
->bdrv_co_flush(bs
);
2297 /* Write back cached data to the OS even with cache=unsafe */
2298 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_OS
);
2299 if (bs
->drv
->bdrv_co_flush_to_os
) {
2300 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
2306 /* But don't actually force it to the disk with cache=unsafe */
2307 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
2311 /* Check if we really need to flush anything */
2312 if (bs
->flushed_gen
== current_gen
) {
2316 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_DISK
);
2317 if (bs
->drv
->bdrv_co_flush_to_disk
) {
2318 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
2319 } else if (bs
->drv
->bdrv_aio_flush
) {
2321 CoroutineIOCompletion co
= {
2322 .coroutine
= qemu_coroutine_self(),
2325 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
2329 qemu_coroutine_yield();
2334 * Some block drivers always operate in either writethrough or unsafe
2335 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2336 * know how the server works (because the behaviour is hardcoded or
2337 * depends on server-side configuration), so we can't ensure that
2338 * everything is safe on disk. Returning an error doesn't work because
2339 * that would break guests even if the server operates in writethrough
2342 * Let's hope the user knows what he's doing.
2351 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2352 * in the case of cache=unsafe, so there are no useless flushes.
2355 ret
= bs
->file
? bdrv_co_flush(bs
->file
->bs
) : 0;
2357 /* Notify any pending flushes that we have completed */
2358 bs
->flushed_gen
= current_gen
;
2359 qemu_co_queue_restart_all(&bs
->flush_queue
);
2361 tracked_request_end(&req
);
2365 int bdrv_flush(BlockDriverState
*bs
)
2368 FlushCo flush_co
= {
2373 if (qemu_in_coroutine()) {
2374 /* Fast-path if already in coroutine context */
2375 bdrv_flush_co_entry(&flush_co
);
2377 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
2379 co
= qemu_coroutine_create(bdrv_flush_co_entry
, &flush_co
);
2380 qemu_coroutine_enter(co
);
2381 while (flush_co
.ret
== NOT_DONE
) {
2382 aio_poll(aio_context
, true);
2386 return flush_co
.ret
;
2389 typedef struct DiscardCo
{
2390 BlockDriverState
*bs
;
2395 static void coroutine_fn
bdrv_pdiscard_co_entry(void *opaque
)
2397 DiscardCo
*rwco
= opaque
;
2399 rwco
->ret
= bdrv_co_pdiscard(rwco
->bs
, rwco
->offset
, rwco
->count
);
2402 int coroutine_fn
bdrv_co_pdiscard(BlockDriverState
*bs
, int64_t offset
,
2405 BdrvTrackedRequest req
;
2406 int max_pdiscard
, ret
;
2413 ret
= bdrv_check_byte_request(bs
, offset
, count
);
2416 } else if (bs
->read_only
) {
2419 assert(!(bs
->open_flags
& BDRV_O_INACTIVE
));
2421 /* Do nothing if disabled. */
2422 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
2426 if (!bs
->drv
->bdrv_co_pdiscard
&& !bs
->drv
->bdrv_aio_pdiscard
) {
2430 /* Discard is advisory, so ignore any unaligned head or tail */
2431 align
= MAX(bs
->bl
.pdiscard_alignment
, bs
->bl
.request_alignment
);
2432 assert(is_power_of_2(align
));
2433 head
= MIN(count
, -offset
& (align
- 1));
2438 count
= QEMU_ALIGN_DOWN(count
, align
);
2443 tracked_request_begin(&req
, bs
, offset
, count
, BDRV_TRACKED_DISCARD
);
2445 ret
= notifier_with_return_list_notify(&bs
->before_write_notifiers
, &req
);
2450 max_pdiscard
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_pdiscard
, INT_MAX
),
2455 int num
= MIN(count
, max_pdiscard
);
2457 if (bs
->drv
->bdrv_co_pdiscard
) {
2458 ret
= bs
->drv
->bdrv_co_pdiscard(bs
, offset
, num
);
2461 CoroutineIOCompletion co
= {
2462 .coroutine
= qemu_coroutine_self(),
2465 acb
= bs
->drv
->bdrv_aio_pdiscard(bs
, offset
, num
,
2466 bdrv_co_io_em_complete
, &co
);
2471 qemu_coroutine_yield();
2475 if (ret
&& ret
!= -ENOTSUP
) {
2485 bdrv_set_dirty(bs
, req
.offset
>> BDRV_SECTOR_BITS
,
2486 req
.bytes
>> BDRV_SECTOR_BITS
);
2487 tracked_request_end(&req
);
2491 int bdrv_pdiscard(BlockDriverState
*bs
, int64_t offset
, int count
)
2501 if (qemu_in_coroutine()) {
2502 /* Fast-path if already in coroutine context */
2503 bdrv_pdiscard_co_entry(&rwco
);
2505 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
2507 co
= qemu_coroutine_create(bdrv_pdiscard_co_entry
, &rwco
);
2508 qemu_coroutine_enter(co
);
2509 while (rwco
.ret
== NOT_DONE
) {
2510 aio_poll(aio_context
, true);
2517 static int bdrv_co_do_ioctl(BlockDriverState
*bs
, int req
, void *buf
)
2519 BlockDriver
*drv
= bs
->drv
;
2520 BdrvTrackedRequest tracked_req
;
2521 CoroutineIOCompletion co
= {
2522 .coroutine
= qemu_coroutine_self(),
2526 tracked_request_begin(&tracked_req
, bs
, 0, 0, BDRV_TRACKED_IOCTL
);
2527 if (!drv
|| !drv
->bdrv_aio_ioctl
) {
2532 acb
= drv
->bdrv_aio_ioctl(bs
, req
, buf
, bdrv_co_io_em_complete
, &co
);
2537 qemu_coroutine_yield();
2539 tracked_request_end(&tracked_req
);
2544 BlockDriverState
*bs
;
2550 static void coroutine_fn
bdrv_co_ioctl_entry(void *opaque
)
2552 BdrvIoctlCoData
*data
= opaque
;
2553 data
->ret
= bdrv_co_do_ioctl(data
->bs
, data
->req
, data
->buf
);
2556 /* needed for generic scsi interface */
2557 int bdrv_ioctl(BlockDriverState
*bs
, unsigned long int req
, void *buf
)
2559 BdrvIoctlCoData data
= {
2563 .ret
= -EINPROGRESS
,
2566 if (qemu_in_coroutine()) {
2567 /* Fast-path if already in coroutine context */
2568 bdrv_co_ioctl_entry(&data
);
2570 Coroutine
*co
= qemu_coroutine_create(bdrv_co_ioctl_entry
, &data
);
2572 qemu_coroutine_enter(co
);
2573 while (data
.ret
== -EINPROGRESS
) {
2574 aio_poll(bdrv_get_aio_context(bs
), true);
2580 static void coroutine_fn
bdrv_co_aio_ioctl_entry(void *opaque
)
2582 BlockAIOCBCoroutine
*acb
= opaque
;
2583 acb
->req
.error
= bdrv_co_do_ioctl(acb
->common
.bs
,
2584 acb
->req
.req
, acb
->req
.buf
);
2585 bdrv_co_complete(acb
);
2588 BlockAIOCB
*bdrv_aio_ioctl(BlockDriverState
*bs
,
2589 unsigned long int req
, void *buf
,
2590 BlockCompletionFunc
*cb
, void *opaque
)
2592 BlockAIOCBCoroutine
*acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
,
2596 acb
->need_bh
= true;
2597 acb
->req
.error
= -EINPROGRESS
;
2600 co
= qemu_coroutine_create(bdrv_co_aio_ioctl_entry
, acb
);
2601 qemu_coroutine_enter(co
);
2603 bdrv_co_maybe_schedule_bh(acb
);
2604 return &acb
->common
;
2607 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
2609 return qemu_memalign(bdrv_opt_mem_align(bs
), size
);
2612 void *qemu_blockalign0(BlockDriverState
*bs
, size_t size
)
2614 return memset(qemu_blockalign(bs
, size
), 0, size
);
2617 void *qemu_try_blockalign(BlockDriverState
*bs
, size_t size
)
2619 size_t align
= bdrv_opt_mem_align(bs
);
2621 /* Ensure that NULL is never returned on success */
2627 return qemu_try_memalign(align
, size
);
2630 void *qemu_try_blockalign0(BlockDriverState
*bs
, size_t size
)
2632 void *mem
= qemu_try_blockalign(bs
, size
);
2635 memset(mem
, 0, size
);
2642 * Check if all memory in this vector is sector aligned.
2644 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
)
2647 size_t alignment
= bdrv_min_mem_align(bs
);
2649 for (i
= 0; i
< qiov
->niov
; i
++) {
2650 if ((uintptr_t) qiov
->iov
[i
].iov_base
% alignment
) {
2653 if (qiov
->iov
[i
].iov_len
% alignment
) {
2661 void bdrv_add_before_write_notifier(BlockDriverState
*bs
,
2662 NotifierWithReturn
*notifier
)
2664 notifier_with_return_list_add(&bs
->before_write_notifiers
, notifier
);
2667 void bdrv_io_plug(BlockDriverState
*bs
)
2671 QLIST_FOREACH(child
, &bs
->children
, next
) {
2672 bdrv_io_plug(child
->bs
);
2675 if (bs
->io_plugged
++ == 0 && bs
->io_plug_disabled
== 0) {
2676 BlockDriver
*drv
= bs
->drv
;
2677 if (drv
&& drv
->bdrv_io_plug
) {
2678 drv
->bdrv_io_plug(bs
);
2683 void bdrv_io_unplug(BlockDriverState
*bs
)
2687 assert(bs
->io_plugged
);
2688 if (--bs
->io_plugged
== 0 && bs
->io_plug_disabled
== 0) {
2689 BlockDriver
*drv
= bs
->drv
;
2690 if (drv
&& drv
->bdrv_io_unplug
) {
2691 drv
->bdrv_io_unplug(bs
);
2695 QLIST_FOREACH(child
, &bs
->children
, next
) {
2696 bdrv_io_unplug(child
->bs
);
2700 void bdrv_io_unplugged_begin(BlockDriverState
*bs
)
2704 if (bs
->io_plug_disabled
++ == 0 && bs
->io_plugged
> 0) {
2705 BlockDriver
*drv
= bs
->drv
;
2706 if (drv
&& drv
->bdrv_io_unplug
) {
2707 drv
->bdrv_io_unplug(bs
);
2711 QLIST_FOREACH(child
, &bs
->children
, next
) {
2712 bdrv_io_unplugged_begin(child
->bs
);
2716 void bdrv_io_unplugged_end(BlockDriverState
*bs
)
2720 assert(bs
->io_plug_disabled
);
2721 QLIST_FOREACH(child
, &bs
->children
, next
) {
2722 bdrv_io_unplugged_end(child
->bs
);
2725 if (--bs
->io_plug_disabled
== 0 && bs
->io_plugged
> 0) {
2726 BlockDriver
*drv
= bs
->drv
;
2727 if (drv
&& drv
->bdrv_io_plug
) {
2728 drv
->bdrv_io_plug(bs
);