2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "sysemu/block-backend.h"
28 #include "block/blockjob.h"
29 #include "block/block_int.h"
30 #include "block/throttle-groups.h"
31 #include "qemu/error-report.h"
33 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
35 static BlockAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
36 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
37 BlockCompletionFunc
*cb
, void *opaque
);
38 static BlockAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
39 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
40 BlockCompletionFunc
*cb
, void *opaque
);
41 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
42 int64_t sector_num
, int nb_sectors
,
44 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
45 int64_t sector_num
, int nb_sectors
,
47 static int coroutine_fn
bdrv_co_do_preadv(BlockDriverState
*bs
,
48 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
49 BdrvRequestFlags flags
);
50 static int coroutine_fn
bdrv_co_do_pwritev(BlockDriverState
*bs
,
51 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
52 BdrvRequestFlags flags
);
53 static BlockAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
57 BdrvRequestFlags flags
,
58 BlockCompletionFunc
*cb
,
61 static void coroutine_fn
bdrv_co_do_rw(void *opaque
);
62 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
63 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
);
65 /* throttling disk I/O limits */
66 void bdrv_set_io_limits(BlockDriverState
*bs
,
71 throttle_group_config(bs
, cfg
);
73 for (i
= 0; i
< 2; i
++) {
74 qemu_co_enter_next(&bs
->throttled_reqs
[i
]);
78 /* this function drain all the throttled IOs */
79 static bool bdrv_start_throttled_reqs(BlockDriverState
*bs
)
82 bool enabled
= bs
->io_limits_enabled
;
85 bs
->io_limits_enabled
= false;
87 for (i
= 0; i
< 2; i
++) {
88 while (qemu_co_enter_next(&bs
->throttled_reqs
[i
])) {
93 bs
->io_limits_enabled
= enabled
;
98 void bdrv_io_limits_disable(BlockDriverState
*bs
)
100 bs
->io_limits_enabled
= false;
101 bdrv_start_throttled_reqs(bs
);
102 throttle_group_unregister_bs(bs
);
105 /* should be called before bdrv_set_io_limits if a limit is set */
106 void bdrv_io_limits_enable(BlockDriverState
*bs
, const char *group
)
108 assert(!bs
->io_limits_enabled
);
109 throttle_group_register_bs(bs
, group
);
110 bs
->io_limits_enabled
= true;
113 void bdrv_io_limits_update_group(BlockDriverState
*bs
, const char *group
)
115 /* this bs is not part of any group */
116 if (!bs
->throttle_state
) {
120 /* this bs is a part of the same group than the one we want */
121 if (!g_strcmp0(throttle_group_get_name(bs
), group
)) {
125 /* need to change the group this bs belong to */
126 bdrv_io_limits_disable(bs
);
127 bdrv_io_limits_enable(bs
, group
);
130 void bdrv_setup_io_funcs(BlockDriver
*bdrv
)
132 /* Block drivers without coroutine functions need emulation */
133 if (!bdrv
->bdrv_co_readv
) {
134 bdrv
->bdrv_co_readv
= bdrv_co_readv_em
;
135 bdrv
->bdrv_co_writev
= bdrv_co_writev_em
;
137 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
138 * the block driver lacks aio we need to emulate that too.
140 if (!bdrv
->bdrv_aio_readv
) {
141 /* add AIO emulation layer */
142 bdrv
->bdrv_aio_readv
= bdrv_aio_readv_em
;
143 bdrv
->bdrv_aio_writev
= bdrv_aio_writev_em
;
148 void bdrv_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
150 BlockDriver
*drv
= bs
->drv
;
151 Error
*local_err
= NULL
;
153 memset(&bs
->bl
, 0, sizeof(bs
->bl
));
159 /* Take some limits from the children as a default */
161 bdrv_refresh_limits(bs
->file
->bs
, &local_err
);
163 error_propagate(errp
, local_err
);
166 bs
->bl
.opt_transfer_length
= bs
->file
->bs
->bl
.opt_transfer_length
;
167 bs
->bl
.max_transfer_length
= bs
->file
->bs
->bl
.max_transfer_length
;
168 bs
->bl
.min_mem_alignment
= bs
->file
->bs
->bl
.min_mem_alignment
;
169 bs
->bl
.opt_mem_alignment
= bs
->file
->bs
->bl
.opt_mem_alignment
;
170 bs
->bl
.max_iov
= bs
->file
->bs
->bl
.max_iov
;
172 bs
->bl
.min_mem_alignment
= 512;
173 bs
->bl
.opt_mem_alignment
= getpagesize();
175 /* Safe default since most protocols use readv()/writev()/etc */
176 bs
->bl
.max_iov
= IOV_MAX
;
180 bdrv_refresh_limits(bs
->backing
->bs
, &local_err
);
182 error_propagate(errp
, local_err
);
185 bs
->bl
.opt_transfer_length
=
186 MAX(bs
->bl
.opt_transfer_length
,
187 bs
->backing
->bs
->bl
.opt_transfer_length
);
188 bs
->bl
.max_transfer_length
=
189 MIN_NON_ZERO(bs
->bl
.max_transfer_length
,
190 bs
->backing
->bs
->bl
.max_transfer_length
);
191 bs
->bl
.opt_mem_alignment
=
192 MAX(bs
->bl
.opt_mem_alignment
,
193 bs
->backing
->bs
->bl
.opt_mem_alignment
);
194 bs
->bl
.min_mem_alignment
=
195 MAX(bs
->bl
.min_mem_alignment
,
196 bs
->backing
->bs
->bl
.min_mem_alignment
);
199 bs
->backing
->bs
->bl
.max_iov
);
202 /* Then let the driver override it */
203 if (drv
->bdrv_refresh_limits
) {
204 drv
->bdrv_refresh_limits(bs
, errp
);
209 * The copy-on-read flag is actually a reference count so multiple users may
210 * use the feature without worrying about clobbering its previous state.
211 * Copy-on-read stays enabled until all users have called to disable it.
213 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
218 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
220 assert(bs
->copy_on_read
> 0);
224 /* Check if any requests are in-flight (including throttled requests) */
225 bool bdrv_requests_pending(BlockDriverState
*bs
)
229 if (!QLIST_EMPTY(&bs
->tracked_requests
)) {
232 if (!qemu_co_queue_empty(&bs
->throttled_reqs
[0])) {
235 if (!qemu_co_queue_empty(&bs
->throttled_reqs
[1])) {
239 QLIST_FOREACH(child
, &bs
->children
, next
) {
240 if (bdrv_requests_pending(child
->bs
)) {
248 static void bdrv_drain_recurse(BlockDriverState
*bs
)
252 if (bs
->drv
&& bs
->drv
->bdrv_drain
) {
253 bs
->drv
->bdrv_drain(bs
);
255 QLIST_FOREACH(child
, &bs
->children
, next
) {
256 bdrv_drain_recurse(child
->bs
);
261 * Wait for pending requests to complete on a single BlockDriverState subtree,
262 * and suspend block driver's internal I/O until next request arrives.
264 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
267 * Only this BlockDriverState's AioContext is run, so in-flight requests must
268 * not depend on events in other AioContexts. In that case, use
269 * bdrv_drain_all() instead.
271 void bdrv_drain(BlockDriverState
*bs
)
275 bdrv_drain_recurse(bs
);
278 bdrv_flush_io_queue(bs
);
279 busy
= bdrv_requests_pending(bs
);
280 busy
|= aio_poll(bdrv_get_aio_context(bs
), busy
);
285 * Wait for pending requests to complete across all BlockDriverStates
287 * This function does not flush data to disk, use bdrv_flush_all() for that
288 * after calling this function.
290 void bdrv_drain_all(void)
292 /* Always run first iteration so any pending completion BHs run */
294 BlockDriverState
*bs
= NULL
;
295 GSList
*aio_ctxs
= NULL
, *ctx
;
297 while ((bs
= bdrv_next(bs
))) {
298 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
300 aio_context_acquire(aio_context
);
302 block_job_pause(bs
->job
);
304 aio_context_release(aio_context
);
306 if (!g_slist_find(aio_ctxs
, aio_context
)) {
307 aio_ctxs
= g_slist_prepend(aio_ctxs
, aio_context
);
311 /* Note that completion of an asynchronous I/O operation can trigger any
312 * number of other I/O operations on other devices---for example a
313 * coroutine can submit an I/O request to another device in response to
314 * request completion. Therefore we must keep looping until there was no
315 * more activity rather than simply draining each device independently.
320 for (ctx
= aio_ctxs
; ctx
!= NULL
; ctx
= ctx
->next
) {
321 AioContext
*aio_context
= ctx
->data
;
324 aio_context_acquire(aio_context
);
325 while ((bs
= bdrv_next(bs
))) {
326 if (aio_context
== bdrv_get_aio_context(bs
)) {
327 bdrv_flush_io_queue(bs
);
328 if (bdrv_requests_pending(bs
)) {
330 aio_poll(aio_context
, busy
);
334 busy
|= aio_poll(aio_context
, false);
335 aio_context_release(aio_context
);
340 while ((bs
= bdrv_next(bs
))) {
341 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
343 aio_context_acquire(aio_context
);
345 block_job_resume(bs
->job
);
347 aio_context_release(aio_context
);
349 g_slist_free(aio_ctxs
);
353 * Remove an active request from the tracked requests list
355 * This function should be called when a tracked request is completing.
357 static void tracked_request_end(BdrvTrackedRequest
*req
)
359 if (req
->serialising
) {
360 req
->bs
->serialising_in_flight
--;
363 QLIST_REMOVE(req
, list
);
364 qemu_co_queue_restart_all(&req
->wait_queue
);
368 * Add an active request to the tracked requests list
370 static void tracked_request_begin(BdrvTrackedRequest
*req
,
371 BlockDriverState
*bs
,
374 enum BdrvTrackedRequestType type
)
376 *req
= (BdrvTrackedRequest
){
381 .co
= qemu_coroutine_self(),
382 .serialising
= false,
383 .overlap_offset
= offset
,
384 .overlap_bytes
= bytes
,
387 qemu_co_queue_init(&req
->wait_queue
);
389 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
392 static void mark_request_serialising(BdrvTrackedRequest
*req
, uint64_t align
)
394 int64_t overlap_offset
= req
->offset
& ~(align
- 1);
395 unsigned int overlap_bytes
= ROUND_UP(req
->offset
+ req
->bytes
, align
)
398 if (!req
->serialising
) {
399 req
->bs
->serialising_in_flight
++;
400 req
->serialising
= true;
403 req
->overlap_offset
= MIN(req
->overlap_offset
, overlap_offset
);
404 req
->overlap_bytes
= MAX(req
->overlap_bytes
, overlap_bytes
);
408 * Round a region to cluster boundaries
410 void bdrv_round_to_clusters(BlockDriverState
*bs
,
411 int64_t sector_num
, int nb_sectors
,
412 int64_t *cluster_sector_num
,
413 int *cluster_nb_sectors
)
417 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
418 *cluster_sector_num
= sector_num
;
419 *cluster_nb_sectors
= nb_sectors
;
421 int64_t c
= bdi
.cluster_size
/ BDRV_SECTOR_SIZE
;
422 *cluster_sector_num
= QEMU_ALIGN_DOWN(sector_num
, c
);
423 *cluster_nb_sectors
= QEMU_ALIGN_UP(sector_num
- *cluster_sector_num
+
428 static int bdrv_get_cluster_size(BlockDriverState
*bs
)
433 ret
= bdrv_get_info(bs
, &bdi
);
434 if (ret
< 0 || bdi
.cluster_size
== 0) {
435 return bs
->request_alignment
;
437 return bdi
.cluster_size
;
441 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
442 int64_t offset
, unsigned int bytes
)
445 if (offset
>= req
->overlap_offset
+ req
->overlap_bytes
) {
449 if (req
->overlap_offset
>= offset
+ bytes
) {
455 static bool coroutine_fn
wait_serialising_requests(BdrvTrackedRequest
*self
)
457 BlockDriverState
*bs
= self
->bs
;
458 BdrvTrackedRequest
*req
;
462 if (!bs
->serialising_in_flight
) {
468 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
469 if (req
== self
|| (!req
->serialising
&& !self
->serialising
)) {
472 if (tracked_request_overlaps(req
, self
->overlap_offset
,
473 self
->overlap_bytes
))
475 /* Hitting this means there was a reentrant request, for
476 * example, a block driver issuing nested requests. This must
477 * never happen since it means deadlock.
479 assert(qemu_coroutine_self() != req
->co
);
481 /* If the request is already (indirectly) waiting for us, or
482 * will wait for us as soon as it wakes up, then just go on
483 * (instead of producing a deadlock in the former case). */
484 if (!req
->waiting_for
) {
485 self
->waiting_for
= req
;
486 qemu_co_queue_wait(&req
->wait_queue
);
487 self
->waiting_for
= NULL
;
499 static int bdrv_check_byte_request(BlockDriverState
*bs
, int64_t offset
,
502 if (size
> BDRV_REQUEST_MAX_SECTORS
<< BDRV_SECTOR_BITS
) {
506 if (!bdrv_is_inserted(bs
)) {
517 static int bdrv_check_request(BlockDriverState
*bs
, int64_t sector_num
,
520 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
524 return bdrv_check_byte_request(bs
, sector_num
* BDRV_SECTOR_SIZE
,
525 nb_sectors
* BDRV_SECTOR_SIZE
);
528 typedef struct RwCo
{
529 BlockDriverState
*bs
;
534 BdrvRequestFlags flags
;
537 static void coroutine_fn
bdrv_rw_co_entry(void *opaque
)
541 if (!rwco
->is_write
) {
542 rwco
->ret
= bdrv_co_do_preadv(rwco
->bs
, rwco
->offset
,
543 rwco
->qiov
->size
, rwco
->qiov
,
546 rwco
->ret
= bdrv_co_do_pwritev(rwco
->bs
, rwco
->offset
,
547 rwco
->qiov
->size
, rwco
->qiov
,
553 * Process a vectored synchronous request using coroutines
555 static int bdrv_prwv_co(BlockDriverState
*bs
, int64_t offset
,
556 QEMUIOVector
*qiov
, bool is_write
,
557 BdrvRequestFlags flags
)
564 .is_write
= is_write
,
570 * In sync call context, when the vcpu is blocked, this throttling timer
571 * will not fire; so the I/O throttling function has to be disabled here
572 * if it has been enabled.
574 if (bs
->io_limits_enabled
) {
575 fprintf(stderr
, "Disabling I/O throttling on '%s' due "
576 "to synchronous I/O.\n", bdrv_get_device_name(bs
));
577 bdrv_io_limits_disable(bs
);
580 if (qemu_in_coroutine()) {
581 /* Fast-path if already in coroutine context */
582 bdrv_rw_co_entry(&rwco
);
584 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
586 co
= qemu_coroutine_create(bdrv_rw_co_entry
);
587 qemu_coroutine_enter(co
, &rwco
);
588 while (rwco
.ret
== NOT_DONE
) {
589 aio_poll(aio_context
, true);
596 * Process a synchronous request using coroutines
598 static int bdrv_rw_co(BlockDriverState
*bs
, int64_t sector_num
, uint8_t *buf
,
599 int nb_sectors
, bool is_write
, BdrvRequestFlags flags
)
603 .iov_base
= (void *)buf
,
604 .iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
,
607 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
611 qemu_iovec_init_external(&qiov
, &iov
, 1);
612 return bdrv_prwv_co(bs
, sector_num
<< BDRV_SECTOR_BITS
,
613 &qiov
, is_write
, flags
);
616 /* return < 0 if error. See bdrv_write() for the return codes */
617 int bdrv_read(BlockDriverState
*bs
, int64_t sector_num
,
618 uint8_t *buf
, int nb_sectors
)
620 return bdrv_rw_co(bs
, sector_num
, buf
, nb_sectors
, false, 0);
623 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
624 int bdrv_read_unthrottled(BlockDriverState
*bs
, int64_t sector_num
,
625 uint8_t *buf
, int nb_sectors
)
630 enabled
= bs
->io_limits_enabled
;
631 bs
->io_limits_enabled
= false;
632 ret
= bdrv_read(bs
, sector_num
, buf
, nb_sectors
);
633 bs
->io_limits_enabled
= enabled
;
637 /* Return < 0 if error. Important errors are:
638 -EIO generic I/O error (may happen for all errors)
639 -ENOMEDIUM No media inserted.
640 -EINVAL Invalid sector number or nb_sectors
641 -EACCES Trying to write a read-only device
643 int bdrv_write(BlockDriverState
*bs
, int64_t sector_num
,
644 const uint8_t *buf
, int nb_sectors
)
646 return bdrv_rw_co(bs
, sector_num
, (uint8_t *)buf
, nb_sectors
, true, 0);
649 int bdrv_write_zeroes(BlockDriverState
*bs
, int64_t sector_num
,
650 int nb_sectors
, BdrvRequestFlags flags
)
652 return bdrv_rw_co(bs
, sector_num
, NULL
, nb_sectors
, true,
653 BDRV_REQ_ZERO_WRITE
| flags
);
657 * Completely zero out a block device with the help of bdrv_write_zeroes.
658 * The operation is sped up by checking the block status and only writing
659 * zeroes to the device if they currently do not return zeroes. Optional
660 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
662 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
664 int bdrv_make_zero(BlockDriverState
*bs
, BdrvRequestFlags flags
)
666 int64_t target_sectors
, ret
, nb_sectors
, sector_num
= 0;
669 target_sectors
= bdrv_nb_sectors(bs
);
670 if (target_sectors
< 0) {
671 return target_sectors
;
675 nb_sectors
= MIN(target_sectors
- sector_num
, BDRV_REQUEST_MAX_SECTORS
);
676 if (nb_sectors
<= 0) {
679 ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, &n
);
681 error_report("error getting block status at sector %" PRId64
": %s",
682 sector_num
, strerror(-ret
));
685 if (ret
& BDRV_BLOCK_ZERO
) {
689 ret
= bdrv_write_zeroes(bs
, sector_num
, n
, flags
);
691 error_report("error writing zeroes at sector %" PRId64
": %s",
692 sector_num
, strerror(-ret
));
699 int bdrv_pread(BlockDriverState
*bs
, int64_t offset
, void *buf
, int bytes
)
703 .iov_base
= (void *)buf
,
712 qemu_iovec_init_external(&qiov
, &iov
, 1);
713 ret
= bdrv_prwv_co(bs
, offset
, &qiov
, false, 0);
721 int bdrv_pwritev(BlockDriverState
*bs
, int64_t offset
, QEMUIOVector
*qiov
)
725 ret
= bdrv_prwv_co(bs
, offset
, qiov
, true, 0);
733 int bdrv_pwrite(BlockDriverState
*bs
, int64_t offset
,
734 const void *buf
, int bytes
)
738 .iov_base
= (void *) buf
,
746 qemu_iovec_init_external(&qiov
, &iov
, 1);
747 return bdrv_pwritev(bs
, offset
, &qiov
);
751 * Writes to the file and ensures that no writes are reordered across this
752 * request (acts as a barrier)
754 * Returns 0 on success, -errno in error cases.
756 int bdrv_pwrite_sync(BlockDriverState
*bs
, int64_t offset
,
757 const void *buf
, int count
)
761 ret
= bdrv_pwrite(bs
, offset
, buf
, count
);
766 /* No flush needed for cache modes that already do it */
767 if (bs
->enable_write_cache
) {
774 static int coroutine_fn
bdrv_co_do_copy_on_readv(BlockDriverState
*bs
,
775 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
777 /* Perform I/O through a temporary buffer so that users who scribble over
778 * their read buffer while the operation is in progress do not end up
779 * modifying the image file. This is critical for zero-copy guest I/O
780 * where anything might happen inside guest memory.
784 BlockDriver
*drv
= bs
->drv
;
786 QEMUIOVector bounce_qiov
;
787 int64_t cluster_sector_num
;
788 int cluster_nb_sectors
;
792 /* Cover entire cluster so no additional backing file I/O is required when
793 * allocating cluster in the image file.
795 bdrv_round_to_clusters(bs
, sector_num
, nb_sectors
,
796 &cluster_sector_num
, &cluster_nb_sectors
);
798 trace_bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
,
799 cluster_sector_num
, cluster_nb_sectors
);
801 iov
.iov_len
= cluster_nb_sectors
* BDRV_SECTOR_SIZE
;
802 iov
.iov_base
= bounce_buffer
= qemu_try_blockalign(bs
, iov
.iov_len
);
803 if (bounce_buffer
== NULL
) {
808 qemu_iovec_init_external(&bounce_qiov
, &iov
, 1);
810 ret
= drv
->bdrv_co_readv(bs
, cluster_sector_num
, cluster_nb_sectors
,
816 if (drv
->bdrv_co_write_zeroes
&&
817 buffer_is_zero(bounce_buffer
, iov
.iov_len
)) {
818 ret
= bdrv_co_do_write_zeroes(bs
, cluster_sector_num
,
819 cluster_nb_sectors
, 0);
821 /* This does not change the data on the disk, it is not necessary
822 * to flush even in cache=writethrough mode.
824 ret
= drv
->bdrv_co_writev(bs
, cluster_sector_num
, cluster_nb_sectors
,
829 /* It might be okay to ignore write errors for guest requests. If this
830 * is a deliberate copy-on-read then we don't want to ignore the error.
831 * Simply report it in all cases.
836 skip_bytes
= (sector_num
- cluster_sector_num
) * BDRV_SECTOR_SIZE
;
837 qemu_iovec_from_buf(qiov
, 0, bounce_buffer
+ skip_bytes
,
838 nb_sectors
* BDRV_SECTOR_SIZE
);
841 qemu_vfree(bounce_buffer
);
846 * Forwards an already correctly aligned request to the BlockDriver. This
847 * handles copy on read and zeroing after EOF; any other features must be
848 * implemented by the caller.
850 static int coroutine_fn
bdrv_aligned_preadv(BlockDriverState
*bs
,
851 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
852 int64_t align
, QEMUIOVector
*qiov
, int flags
)
854 BlockDriver
*drv
= bs
->drv
;
857 int64_t sector_num
= offset
>> BDRV_SECTOR_BITS
;
858 unsigned int nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
860 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
861 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
862 assert(!qiov
|| bytes
== qiov
->size
);
864 /* Handle Copy on Read and associated serialisation */
865 if (flags
& BDRV_REQ_COPY_ON_READ
) {
866 /* If we touch the same cluster it counts as an overlap. This
867 * guarantees that allocating writes will be serialized and not race
868 * with each other for the same cluster. For example, in copy-on-read
869 * it ensures that the CoR read and write operations are atomic and
870 * guest writes cannot interleave between them. */
871 mark_request_serialising(req
, bdrv_get_cluster_size(bs
));
874 if (!(flags
& BDRV_REQ_NO_SERIALISING
)) {
875 wait_serialising_requests(req
);
878 if (flags
& BDRV_REQ_COPY_ON_READ
) {
881 ret
= bdrv_is_allocated(bs
, sector_num
, nb_sectors
, &pnum
);
886 if (!ret
|| pnum
!= nb_sectors
) {
887 ret
= bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
, qiov
);
892 /* Forward the request to the BlockDriver */
893 if (!bs
->zero_beyond_eof
) {
894 ret
= drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
896 /* Read zeros after EOF */
897 int64_t total_sectors
, max_nb_sectors
;
899 total_sectors
= bdrv_nb_sectors(bs
);
900 if (total_sectors
< 0) {
905 max_nb_sectors
= ROUND_UP(MAX(0, total_sectors
- sector_num
),
906 align
>> BDRV_SECTOR_BITS
);
907 if (nb_sectors
< max_nb_sectors
) {
908 ret
= drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
909 } else if (max_nb_sectors
> 0) {
910 QEMUIOVector local_qiov
;
912 qemu_iovec_init(&local_qiov
, qiov
->niov
);
913 qemu_iovec_concat(&local_qiov
, qiov
, 0,
914 max_nb_sectors
* BDRV_SECTOR_SIZE
);
916 ret
= drv
->bdrv_co_readv(bs
, sector_num
, max_nb_sectors
,
919 qemu_iovec_destroy(&local_qiov
);
924 /* Reading beyond end of file is supposed to produce zeroes */
925 if (ret
== 0 && total_sectors
< sector_num
+ nb_sectors
) {
926 uint64_t offset
= MAX(0, total_sectors
- sector_num
);
927 uint64_t bytes
= (sector_num
+ nb_sectors
- offset
) *
929 qemu_iovec_memset(qiov
, offset
* BDRV_SECTOR_SIZE
, 0, bytes
);
938 * Handle a read request in coroutine context
940 static int coroutine_fn
bdrv_co_do_preadv(BlockDriverState
*bs
,
941 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
942 BdrvRequestFlags flags
)
944 BlockDriver
*drv
= bs
->drv
;
945 BdrvTrackedRequest req
;
947 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
948 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
949 uint8_t *head_buf
= NULL
;
950 uint8_t *tail_buf
= NULL
;
951 QEMUIOVector local_qiov
;
952 bool use_local_qiov
= false;
959 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
964 /* Don't do copy-on-read if we read data before write operation */
965 if (bs
->copy_on_read
&& !(flags
& BDRV_REQ_NO_SERIALISING
)) {
966 flags
|= BDRV_REQ_COPY_ON_READ
;
969 /* throttling disk I/O */
970 if (bs
->io_limits_enabled
) {
971 throttle_group_co_io_limits_intercept(bs
, bytes
, false);
974 /* Align read if necessary by padding qiov */
975 if (offset
& (align
- 1)) {
976 head_buf
= qemu_blockalign(bs
, align
);
977 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
978 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
979 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
980 use_local_qiov
= true;
982 bytes
+= offset
& (align
- 1);
983 offset
= offset
& ~(align
- 1);
986 if ((offset
+ bytes
) & (align
- 1)) {
987 if (!use_local_qiov
) {
988 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
989 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
990 use_local_qiov
= true;
992 tail_buf
= qemu_blockalign(bs
, align
);
993 qemu_iovec_add(&local_qiov
, tail_buf
,
994 align
- ((offset
+ bytes
) & (align
- 1)));
996 bytes
= ROUND_UP(bytes
, align
);
999 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_READ
);
1000 ret
= bdrv_aligned_preadv(bs
, &req
, offset
, bytes
, align
,
1001 use_local_qiov
? &local_qiov
: qiov
,
1003 tracked_request_end(&req
);
1005 if (use_local_qiov
) {
1006 qemu_iovec_destroy(&local_qiov
);
1007 qemu_vfree(head_buf
);
1008 qemu_vfree(tail_buf
);
1014 static int coroutine_fn
bdrv_co_do_readv(BlockDriverState
*bs
,
1015 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1016 BdrvRequestFlags flags
)
1018 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
1022 return bdrv_co_do_preadv(bs
, sector_num
<< BDRV_SECTOR_BITS
,
1023 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
1026 int coroutine_fn
bdrv_co_readv(BlockDriverState
*bs
, int64_t sector_num
,
1027 int nb_sectors
, QEMUIOVector
*qiov
)
1029 trace_bdrv_co_readv(bs
, sector_num
, nb_sectors
);
1031 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
, 0);
1034 int coroutine_fn
bdrv_co_readv_no_serialising(BlockDriverState
*bs
,
1035 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
1037 trace_bdrv_co_readv_no_serialising(bs
, sector_num
, nb_sectors
);
1039 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
,
1040 BDRV_REQ_NO_SERIALISING
);
1043 int coroutine_fn
bdrv_co_copy_on_readv(BlockDriverState
*bs
,
1044 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
1046 trace_bdrv_co_copy_on_readv(bs
, sector_num
, nb_sectors
);
1048 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
,
1049 BDRV_REQ_COPY_ON_READ
);
1052 #define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768
1054 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
1055 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
)
1057 BlockDriver
*drv
= bs
->drv
;
1059 struct iovec iov
= {0};
1062 int max_write_zeroes
= MIN_NON_ZERO(bs
->bl
.max_write_zeroes
,
1063 BDRV_REQUEST_MAX_SECTORS
);
1065 while (nb_sectors
> 0 && !ret
) {
1066 int num
= nb_sectors
;
1068 /* Align request. Block drivers can expect the "bulk" of the request
1071 if (bs
->bl
.write_zeroes_alignment
1072 && num
> bs
->bl
.write_zeroes_alignment
) {
1073 if (sector_num
% bs
->bl
.write_zeroes_alignment
!= 0) {
1074 /* Make a small request up to the first aligned sector. */
1075 num
= bs
->bl
.write_zeroes_alignment
;
1076 num
-= sector_num
% bs
->bl
.write_zeroes_alignment
;
1077 } else if ((sector_num
+ num
) % bs
->bl
.write_zeroes_alignment
!= 0) {
1078 /* Shorten the request to the last aligned sector. num cannot
1079 * underflow because num > bs->bl.write_zeroes_alignment.
1081 num
-= (sector_num
+ num
) % bs
->bl
.write_zeroes_alignment
;
1085 /* limit request size */
1086 if (num
> max_write_zeroes
) {
1087 num
= max_write_zeroes
;
1091 /* First try the efficient write zeroes operation */
1092 if (drv
->bdrv_co_write_zeroes
) {
1093 ret
= drv
->bdrv_co_write_zeroes(bs
, sector_num
, num
, flags
);
1096 if (ret
== -ENOTSUP
) {
1097 /* Fall back to bounce buffer if write zeroes is unsupported */
1098 int max_xfer_len
= MIN_NON_ZERO(bs
->bl
.max_transfer_length
,
1099 MAX_WRITE_ZEROES_BOUNCE_BUFFER
);
1100 num
= MIN(num
, max_xfer_len
);
1101 iov
.iov_len
= num
* BDRV_SECTOR_SIZE
;
1102 if (iov
.iov_base
== NULL
) {
1103 iov
.iov_base
= qemu_try_blockalign(bs
, num
* BDRV_SECTOR_SIZE
);
1104 if (iov
.iov_base
== NULL
) {
1108 memset(iov
.iov_base
, 0, num
* BDRV_SECTOR_SIZE
);
1110 qemu_iovec_init_external(&qiov
, &iov
, 1);
1112 ret
= drv
->bdrv_co_writev(bs
, sector_num
, num
, &qiov
);
1114 /* Keep bounce buffer around if it is big enough for all
1115 * all future requests.
1117 if (num
< max_xfer_len
) {
1118 qemu_vfree(iov
.iov_base
);
1119 iov
.iov_base
= NULL
;
1128 qemu_vfree(iov
.iov_base
);
1133 * Forwards an already correctly aligned write request to the BlockDriver.
1135 static int coroutine_fn
bdrv_aligned_pwritev(BlockDriverState
*bs
,
1136 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
1137 QEMUIOVector
*qiov
, int flags
)
1139 BlockDriver
*drv
= bs
->drv
;
1143 int64_t sector_num
= offset
>> BDRV_SECTOR_BITS
;
1144 unsigned int nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
1146 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
1147 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
1148 assert(!qiov
|| bytes
== qiov
->size
);
1150 waited
= wait_serialising_requests(req
);
1151 assert(!waited
|| !req
->serialising
);
1152 assert(req
->overlap_offset
<= offset
);
1153 assert(offset
+ bytes
<= req
->overlap_offset
+ req
->overlap_bytes
);
1155 ret
= notifier_with_return_list_notify(&bs
->before_write_notifiers
, req
);
1157 if (!ret
&& bs
->detect_zeroes
!= BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF
&&
1158 !(flags
& BDRV_REQ_ZERO_WRITE
) && drv
->bdrv_co_write_zeroes
&&
1159 qemu_iovec_is_zero(qiov
)) {
1160 flags
|= BDRV_REQ_ZERO_WRITE
;
1161 if (bs
->detect_zeroes
== BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP
) {
1162 flags
|= BDRV_REQ_MAY_UNMAP
;
1167 /* Do nothing, write notifier decided to fail this request */
1168 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
1169 bdrv_debug_event(bs
, BLKDBG_PWRITEV_ZERO
);
1170 ret
= bdrv_co_do_write_zeroes(bs
, sector_num
, nb_sectors
, flags
);
1172 bdrv_debug_event(bs
, BLKDBG_PWRITEV
);
1173 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
);
1175 bdrv_debug_event(bs
, BLKDBG_PWRITEV_DONE
);
1177 if (ret
== 0 && !bs
->enable_write_cache
) {
1178 ret
= bdrv_co_flush(bs
);
1181 bdrv_set_dirty(bs
, sector_num
, nb_sectors
);
1183 if (bs
->wr_highest_offset
< offset
+ bytes
) {
1184 bs
->wr_highest_offset
= offset
+ bytes
;
1188 bs
->total_sectors
= MAX(bs
->total_sectors
, sector_num
+ nb_sectors
);
1194 static int coroutine_fn
bdrv_co_do_zero_pwritev(BlockDriverState
*bs
,
1197 BdrvRequestFlags flags
,
1198 BdrvTrackedRequest
*req
)
1200 uint8_t *buf
= NULL
;
1201 QEMUIOVector local_qiov
;
1203 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
1204 unsigned int head_padding_bytes
, tail_padding_bytes
;
1207 head_padding_bytes
= offset
& (align
- 1);
1208 tail_padding_bytes
= align
- ((offset
+ bytes
) & (align
- 1));
1211 assert(flags
& BDRV_REQ_ZERO_WRITE
);
1212 if (head_padding_bytes
|| tail_padding_bytes
) {
1213 buf
= qemu_blockalign(bs
, align
);
1214 iov
= (struct iovec
) {
1218 qemu_iovec_init_external(&local_qiov
, &iov
, 1);
1220 if (head_padding_bytes
) {
1221 uint64_t zero_bytes
= MIN(bytes
, align
- head_padding_bytes
);
1223 /* RMW the unaligned part before head. */
1224 mark_request_serialising(req
, align
);
1225 wait_serialising_requests(req
);
1226 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1227 ret
= bdrv_aligned_preadv(bs
, req
, offset
& ~(align
- 1), align
,
1228 align
, &local_qiov
, 0);
1232 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1234 memset(buf
+ head_padding_bytes
, 0, zero_bytes
);
1235 ret
= bdrv_aligned_pwritev(bs
, req
, offset
& ~(align
- 1), align
,
1237 flags
& ~BDRV_REQ_ZERO_WRITE
);
1241 offset
+= zero_bytes
;
1242 bytes
-= zero_bytes
;
1245 assert(!bytes
|| (offset
& (align
- 1)) == 0);
1246 if (bytes
>= align
) {
1247 /* Write the aligned part in the middle. */
1248 uint64_t aligned_bytes
= bytes
& ~(align
- 1);
1249 ret
= bdrv_aligned_pwritev(bs
, req
, offset
, aligned_bytes
,
1254 bytes
-= aligned_bytes
;
1255 offset
+= aligned_bytes
;
1258 assert(!bytes
|| (offset
& (align
- 1)) == 0);
1260 assert(align
== tail_padding_bytes
+ bytes
);
1261 /* RMW the unaligned part after tail. */
1262 mark_request_serialising(req
, align
);
1263 wait_serialising_requests(req
);
1264 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1265 ret
= bdrv_aligned_preadv(bs
, req
, offset
, align
,
1266 align
, &local_qiov
, 0);
1270 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1272 memset(buf
, 0, bytes
);
1273 ret
= bdrv_aligned_pwritev(bs
, req
, offset
, align
,
1274 &local_qiov
, flags
& ~BDRV_REQ_ZERO_WRITE
);
1283 * Handle a write request in coroutine context
1285 static int coroutine_fn
bdrv_co_do_pwritev(BlockDriverState
*bs
,
1286 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1287 BdrvRequestFlags flags
)
1289 BdrvTrackedRequest req
;
1290 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
1291 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
1292 uint8_t *head_buf
= NULL
;
1293 uint8_t *tail_buf
= NULL
;
1294 QEMUIOVector local_qiov
;
1295 bool use_local_qiov
= false;
1301 if (bs
->read_only
) {
1304 assert(!(bs
->open_flags
& BDRV_O_INACTIVE
));
1306 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
1311 /* throttling disk I/O */
1312 if (bs
->io_limits_enabled
) {
1313 throttle_group_co_io_limits_intercept(bs
, bytes
, true);
1317 * Align write if necessary by performing a read-modify-write cycle.
1318 * Pad qiov with the read parts and be sure to have a tracked request not
1319 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1321 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_WRITE
);
1324 ret
= bdrv_co_do_zero_pwritev(bs
, offset
, bytes
, flags
, &req
);
1328 if (offset
& (align
- 1)) {
1329 QEMUIOVector head_qiov
;
1330 struct iovec head_iov
;
1332 mark_request_serialising(&req
, align
);
1333 wait_serialising_requests(&req
);
1335 head_buf
= qemu_blockalign(bs
, align
);
1336 head_iov
= (struct iovec
) {
1337 .iov_base
= head_buf
,
1340 qemu_iovec_init_external(&head_qiov
, &head_iov
, 1);
1342 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1343 ret
= bdrv_aligned_preadv(bs
, &req
, offset
& ~(align
- 1), align
,
1344 align
, &head_qiov
, 0);
1348 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1350 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
1351 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
1352 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1353 use_local_qiov
= true;
1355 bytes
+= offset
& (align
- 1);
1356 offset
= offset
& ~(align
- 1);
1359 if ((offset
+ bytes
) & (align
- 1)) {
1360 QEMUIOVector tail_qiov
;
1361 struct iovec tail_iov
;
1365 mark_request_serialising(&req
, align
);
1366 waited
= wait_serialising_requests(&req
);
1367 assert(!waited
|| !use_local_qiov
);
1369 tail_buf
= qemu_blockalign(bs
, align
);
1370 tail_iov
= (struct iovec
) {
1371 .iov_base
= tail_buf
,
1374 qemu_iovec_init_external(&tail_qiov
, &tail_iov
, 1);
1376 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1377 ret
= bdrv_aligned_preadv(bs
, &req
, (offset
+ bytes
) & ~(align
- 1), align
,
1378 align
, &tail_qiov
, 0);
1382 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1384 if (!use_local_qiov
) {
1385 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
1386 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1387 use_local_qiov
= true;
1390 tail_bytes
= (offset
+ bytes
) & (align
- 1);
1391 qemu_iovec_add(&local_qiov
, tail_buf
+ tail_bytes
, align
- tail_bytes
);
1393 bytes
= ROUND_UP(bytes
, align
);
1396 ret
= bdrv_aligned_pwritev(bs
, &req
, offset
, bytes
,
1397 use_local_qiov
? &local_qiov
: qiov
,
1402 if (use_local_qiov
) {
1403 qemu_iovec_destroy(&local_qiov
);
1405 qemu_vfree(head_buf
);
1406 qemu_vfree(tail_buf
);
1408 tracked_request_end(&req
);
1412 static int coroutine_fn
bdrv_co_do_writev(BlockDriverState
*bs
,
1413 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1414 BdrvRequestFlags flags
)
1416 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
1420 return bdrv_co_do_pwritev(bs
, sector_num
<< BDRV_SECTOR_BITS
,
1421 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
1424 int coroutine_fn
bdrv_co_writev(BlockDriverState
*bs
, int64_t sector_num
,
1425 int nb_sectors
, QEMUIOVector
*qiov
)
1427 trace_bdrv_co_writev(bs
, sector_num
, nb_sectors
);
1429 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, qiov
, 0);
1432 int coroutine_fn
bdrv_co_write_zeroes(BlockDriverState
*bs
,
1433 int64_t sector_num
, int nb_sectors
,
1434 BdrvRequestFlags flags
)
1436 trace_bdrv_co_write_zeroes(bs
, sector_num
, nb_sectors
, flags
);
1438 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
1439 flags
&= ~BDRV_REQ_MAY_UNMAP
;
1442 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, NULL
,
1443 BDRV_REQ_ZERO_WRITE
| flags
);
1446 int bdrv_flush_all(void)
1448 BlockDriverState
*bs
= NULL
;
1451 while ((bs
= bdrv_next(bs
))) {
1452 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
1455 aio_context_acquire(aio_context
);
1456 ret
= bdrv_flush(bs
);
1457 if (ret
< 0 && !result
) {
1460 aio_context_release(aio_context
);
1466 typedef struct BdrvCoGetBlockStatusData
{
1467 BlockDriverState
*bs
;
1468 BlockDriverState
*base
;
1474 } BdrvCoGetBlockStatusData
;
1477 * Returns the allocation status of the specified sectors.
1478 * Drivers not implementing the functionality are assumed to not support
1479 * backing files, hence all their sectors are reported as allocated.
1481 * If 'sector_num' is beyond the end of the disk image the return value is 0
1482 * and 'pnum' is set to 0.
1484 * 'pnum' is set to the number of sectors (including and immediately following
1485 * the specified sector) that are known to be in the same
1486 * allocated/unallocated state.
1488 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
1489 * beyond the end of the disk image it will be clamped.
1491 static int64_t coroutine_fn
bdrv_co_get_block_status(BlockDriverState
*bs
,
1493 int nb_sectors
, int *pnum
)
1495 int64_t total_sectors
;
1499 total_sectors
= bdrv_nb_sectors(bs
);
1500 if (total_sectors
< 0) {
1501 return total_sectors
;
1504 if (sector_num
>= total_sectors
) {
1509 n
= total_sectors
- sector_num
;
1510 if (n
< nb_sectors
) {
1514 if (!bs
->drv
->bdrv_co_get_block_status
) {
1516 ret
= BDRV_BLOCK_DATA
| BDRV_BLOCK_ALLOCATED
;
1517 if (bs
->drv
->protocol_name
) {
1518 ret
|= BDRV_BLOCK_OFFSET_VALID
| (sector_num
* BDRV_SECTOR_SIZE
);
1523 ret
= bs
->drv
->bdrv_co_get_block_status(bs
, sector_num
, nb_sectors
, pnum
);
1529 if (ret
& BDRV_BLOCK_RAW
) {
1530 assert(ret
& BDRV_BLOCK_OFFSET_VALID
);
1531 return bdrv_get_block_status(bs
->file
->bs
, ret
>> BDRV_SECTOR_BITS
,
1535 if (ret
& (BDRV_BLOCK_DATA
| BDRV_BLOCK_ZERO
)) {
1536 ret
|= BDRV_BLOCK_ALLOCATED
;
1538 if (bdrv_unallocated_blocks_are_zero(bs
)) {
1539 ret
|= BDRV_BLOCK_ZERO
;
1540 } else if (bs
->backing
) {
1541 BlockDriverState
*bs2
= bs
->backing
->bs
;
1542 int64_t nb_sectors2
= bdrv_nb_sectors(bs2
);
1543 if (nb_sectors2
>= 0 && sector_num
>= nb_sectors2
) {
1544 ret
|= BDRV_BLOCK_ZERO
;
1550 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
1551 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
1554 ret2
= bdrv_co_get_block_status(bs
->file
->bs
, ret
>> BDRV_SECTOR_BITS
,
1557 /* Ignore errors. This is just providing extra information, it
1558 * is useful but not necessary.
1561 /* !file_pnum indicates an offset at or beyond the EOF; it is
1562 * perfectly valid for the format block driver to point to such
1563 * offsets, so catch it and mark everything as zero */
1564 ret
|= BDRV_BLOCK_ZERO
;
1566 /* Limit request to the range reported by the protocol driver */
1568 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
1576 static int64_t coroutine_fn
bdrv_co_get_block_status_above(BlockDriverState
*bs
,
1577 BlockDriverState
*base
,
1582 BlockDriverState
*p
;
1586 for (p
= bs
; p
!= base
; p
= backing_bs(p
)) {
1587 ret
= bdrv_co_get_block_status(p
, sector_num
, nb_sectors
, pnum
);
1588 if (ret
< 0 || ret
& BDRV_BLOCK_ALLOCATED
) {
1591 /* [sector_num, pnum] unallocated on this layer, which could be only
1592 * the first part of [sector_num, nb_sectors]. */
1593 nb_sectors
= MIN(nb_sectors
, *pnum
);
1598 /* Coroutine wrapper for bdrv_get_block_status_above() */
1599 static void coroutine_fn
bdrv_get_block_status_above_co_entry(void *opaque
)
1601 BdrvCoGetBlockStatusData
*data
= opaque
;
1603 data
->ret
= bdrv_co_get_block_status_above(data
->bs
, data
->base
,
1611 * Synchronous wrapper around bdrv_co_get_block_status_above().
1613 * See bdrv_co_get_block_status_above() for details.
1615 int64_t bdrv_get_block_status_above(BlockDriverState
*bs
,
1616 BlockDriverState
*base
,
1618 int nb_sectors
, int *pnum
)
1621 BdrvCoGetBlockStatusData data
= {
1624 .sector_num
= sector_num
,
1625 .nb_sectors
= nb_sectors
,
1630 if (qemu_in_coroutine()) {
1631 /* Fast-path if already in coroutine context */
1632 bdrv_get_block_status_above_co_entry(&data
);
1634 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
1636 co
= qemu_coroutine_create(bdrv_get_block_status_above_co_entry
);
1637 qemu_coroutine_enter(co
, &data
);
1638 while (!data
.done
) {
1639 aio_poll(aio_context
, true);
1645 int64_t bdrv_get_block_status(BlockDriverState
*bs
,
1647 int nb_sectors
, int *pnum
)
1649 return bdrv_get_block_status_above(bs
, backing_bs(bs
),
1650 sector_num
, nb_sectors
, pnum
);
1653 int coroutine_fn
bdrv_is_allocated(BlockDriverState
*bs
, int64_t sector_num
,
1654 int nb_sectors
, int *pnum
)
1656 int64_t ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, pnum
);
1660 return !!(ret
& BDRV_BLOCK_ALLOCATED
);
1664 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
1666 * Return true if the given sector is allocated in any image between
1667 * BASE and TOP (inclusive). BASE can be NULL to check if the given
1668 * sector is allocated in any image of the chain. Return false otherwise.
1670 * 'pnum' is set to the number of sectors (including and immediately following
1671 * the specified sector) that are known to be in the same
1672 * allocated/unallocated state.
1675 int bdrv_is_allocated_above(BlockDriverState
*top
,
1676 BlockDriverState
*base
,
1678 int nb_sectors
, int *pnum
)
1680 BlockDriverState
*intermediate
;
1681 int ret
, n
= nb_sectors
;
1684 while (intermediate
&& intermediate
!= base
) {
1686 ret
= bdrv_is_allocated(intermediate
, sector_num
, nb_sectors
,
1696 * [sector_num, nb_sectors] is unallocated on top but intermediate
1699 * [sector_num+x, nr_sectors] allocated.
1701 if (n
> pnum_inter
&&
1702 (intermediate
== top
||
1703 sector_num
+ pnum_inter
< intermediate
->total_sectors
)) {
1707 intermediate
= backing_bs(intermediate
);
1714 int bdrv_write_compressed(BlockDriverState
*bs
, int64_t sector_num
,
1715 const uint8_t *buf
, int nb_sectors
)
1717 BlockDriver
*drv
= bs
->drv
;
1723 if (!drv
->bdrv_write_compressed
) {
1726 ret
= bdrv_check_request(bs
, sector_num
, nb_sectors
);
1731 assert(QLIST_EMPTY(&bs
->dirty_bitmaps
));
1733 return drv
->bdrv_write_compressed(bs
, sector_num
, buf
, nb_sectors
);
1736 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
1737 int64_t pos
, int size
)
1740 struct iovec iov
= {
1741 .iov_base
= (void *) buf
,
1745 qemu_iovec_init_external(&qiov
, &iov
, 1);
1746 return bdrv_writev_vmstate(bs
, &qiov
, pos
);
1749 int bdrv_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
1751 BlockDriver
*drv
= bs
->drv
;
1755 } else if (drv
->bdrv_save_vmstate
) {
1756 return drv
->bdrv_save_vmstate(bs
, qiov
, pos
);
1757 } else if (bs
->file
) {
1758 return bdrv_writev_vmstate(bs
->file
->bs
, qiov
, pos
);
1764 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
1765 int64_t pos
, int size
)
1767 BlockDriver
*drv
= bs
->drv
;
1770 if (drv
->bdrv_load_vmstate
)
1771 return drv
->bdrv_load_vmstate(bs
, buf
, pos
, size
);
1773 return bdrv_load_vmstate(bs
->file
->bs
, buf
, pos
, size
);
1777 /**************************************************************/
1780 BlockAIOCB
*bdrv_aio_readv(BlockDriverState
*bs
, int64_t sector_num
,
1781 QEMUIOVector
*qiov
, int nb_sectors
,
1782 BlockCompletionFunc
*cb
, void *opaque
)
1784 trace_bdrv_aio_readv(bs
, sector_num
, nb_sectors
, opaque
);
1786 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, 0,
1790 BlockAIOCB
*bdrv_aio_writev(BlockDriverState
*bs
, int64_t sector_num
,
1791 QEMUIOVector
*qiov
, int nb_sectors
,
1792 BlockCompletionFunc
*cb
, void *opaque
)
1794 trace_bdrv_aio_writev(bs
, sector_num
, nb_sectors
, opaque
);
1796 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, 0,
1800 BlockAIOCB
*bdrv_aio_write_zeroes(BlockDriverState
*bs
,
1801 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
,
1802 BlockCompletionFunc
*cb
, void *opaque
)
1804 trace_bdrv_aio_write_zeroes(bs
, sector_num
, nb_sectors
, flags
, opaque
);
1806 return bdrv_co_aio_rw_vector(bs
, sector_num
, NULL
, nb_sectors
,
1807 BDRV_REQ_ZERO_WRITE
| flags
,
1812 typedef struct MultiwriteCB
{
1817 BlockCompletionFunc
*cb
;
1819 QEMUIOVector
*free_qiov
;
1823 static void multiwrite_user_cb(MultiwriteCB
*mcb
)
1827 for (i
= 0; i
< mcb
->num_callbacks
; i
++) {
1828 mcb
->callbacks
[i
].cb(mcb
->callbacks
[i
].opaque
, mcb
->error
);
1829 if (mcb
->callbacks
[i
].free_qiov
) {
1830 qemu_iovec_destroy(mcb
->callbacks
[i
].free_qiov
);
1832 g_free(mcb
->callbacks
[i
].free_qiov
);
1836 static void multiwrite_cb(void *opaque
, int ret
)
1838 MultiwriteCB
*mcb
= opaque
;
1840 trace_multiwrite_cb(mcb
, ret
);
1842 if (ret
< 0 && !mcb
->error
) {
1846 mcb
->num_requests
--;
1847 if (mcb
->num_requests
== 0) {
1848 multiwrite_user_cb(mcb
);
1853 static int multiwrite_req_compare(const void *a
, const void *b
)
1855 const BlockRequest
*req1
= a
, *req2
= b
;
1858 * Note that we can't simply subtract req2->sector from req1->sector
1859 * here as that could overflow the return value.
1861 if (req1
->sector
> req2
->sector
) {
1863 } else if (req1
->sector
< req2
->sector
) {
1871 * Takes a bunch of requests and tries to merge them. Returns the number of
1872 * requests that remain after merging.
1874 static int multiwrite_merge(BlockDriverState
*bs
, BlockRequest
*reqs
,
1875 int num_reqs
, MultiwriteCB
*mcb
)
1879 // Sort requests by start sector
1880 qsort(reqs
, num_reqs
, sizeof(*reqs
), &multiwrite_req_compare
);
1882 // Check if adjacent requests touch the same clusters. If so, combine them,
1883 // filling up gaps with zero sectors.
1885 for (i
= 1; i
< num_reqs
; i
++) {
1887 int64_t oldreq_last
= reqs
[outidx
].sector
+ reqs
[outidx
].nb_sectors
;
1889 // Handle exactly sequential writes and overlapping writes.
1890 if (reqs
[i
].sector
<= oldreq_last
) {
1894 if (reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1 >
1899 if (bs
->bl
.max_transfer_length
&& reqs
[outidx
].nb_sectors
+
1900 reqs
[i
].nb_sectors
> bs
->bl
.max_transfer_length
) {
1906 QEMUIOVector
*qiov
= g_malloc0(sizeof(*qiov
));
1907 qemu_iovec_init(qiov
,
1908 reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1);
1910 // Add the first request to the merged one. If the requests are
1911 // overlapping, drop the last sectors of the first request.
1912 size
= (reqs
[i
].sector
- reqs
[outidx
].sector
) << 9;
1913 qemu_iovec_concat(qiov
, reqs
[outidx
].qiov
, 0, size
);
1915 // We should need to add any zeros between the two requests
1916 assert (reqs
[i
].sector
<= oldreq_last
);
1918 // Add the second request
1919 qemu_iovec_concat(qiov
, reqs
[i
].qiov
, 0, reqs
[i
].qiov
->size
);
1921 // Add tail of first request, if necessary
1922 if (qiov
->size
< reqs
[outidx
].qiov
->size
) {
1923 qemu_iovec_concat(qiov
, reqs
[outidx
].qiov
, qiov
->size
,
1924 reqs
[outidx
].qiov
->size
- qiov
->size
);
1927 reqs
[outidx
].nb_sectors
= qiov
->size
>> 9;
1928 reqs
[outidx
].qiov
= qiov
;
1930 mcb
->callbacks
[i
].free_qiov
= reqs
[outidx
].qiov
;
1933 reqs
[outidx
].sector
= reqs
[i
].sector
;
1934 reqs
[outidx
].nb_sectors
= reqs
[i
].nb_sectors
;
1935 reqs
[outidx
].qiov
= reqs
[i
].qiov
;
1940 block_acct_merge_done(blk_get_stats(bs
->blk
), BLOCK_ACCT_WRITE
,
1941 num_reqs
- outidx
- 1);
1948 * Submit multiple AIO write requests at once.
1950 * On success, the function returns 0 and all requests in the reqs array have
1951 * been submitted. In error case this function returns -1, and any of the
1952 * requests may or may not be submitted yet. In particular, this means that the
1953 * callback will be called for some of the requests, for others it won't. The
1954 * caller must check the error field of the BlockRequest to wait for the right
1955 * callbacks (if error != 0, no callback will be called).
1957 * The implementation may modify the contents of the reqs array, e.g. to merge
1958 * requests. However, the fields opaque and error are left unmodified as they
1959 * are used to signal failure for a single request to the caller.
1961 int bdrv_aio_multiwrite(BlockDriverState
*bs
, BlockRequest
*reqs
, int num_reqs
)
1966 /* don't submit writes if we don't have a medium */
1967 if (bs
->drv
== NULL
) {
1968 for (i
= 0; i
< num_reqs
; i
++) {
1969 reqs
[i
].error
= -ENOMEDIUM
;
1974 if (num_reqs
== 0) {
1978 // Create MultiwriteCB structure
1979 mcb
= g_malloc0(sizeof(*mcb
) + num_reqs
* sizeof(*mcb
->callbacks
));
1980 mcb
->num_requests
= 0;
1981 mcb
->num_callbacks
= num_reqs
;
1983 for (i
= 0; i
< num_reqs
; i
++) {
1984 mcb
->callbacks
[i
].cb
= reqs
[i
].cb
;
1985 mcb
->callbacks
[i
].opaque
= reqs
[i
].opaque
;
1988 // Check for mergable requests
1989 num_reqs
= multiwrite_merge(bs
, reqs
, num_reqs
, mcb
);
1991 trace_bdrv_aio_multiwrite(mcb
, mcb
->num_callbacks
, num_reqs
);
1993 /* Run the aio requests. */
1994 mcb
->num_requests
= num_reqs
;
1995 for (i
= 0; i
< num_reqs
; i
++) {
1996 bdrv_co_aio_rw_vector(bs
, reqs
[i
].sector
, reqs
[i
].qiov
,
1997 reqs
[i
].nb_sectors
, reqs
[i
].flags
,
2005 void bdrv_aio_cancel(BlockAIOCB
*acb
)
2008 bdrv_aio_cancel_async(acb
);
2009 while (acb
->refcnt
> 1) {
2010 if (acb
->aiocb_info
->get_aio_context
) {
2011 aio_poll(acb
->aiocb_info
->get_aio_context(acb
), true);
2012 } else if (acb
->bs
) {
2013 aio_poll(bdrv_get_aio_context(acb
->bs
), true);
2018 qemu_aio_unref(acb
);
2021 /* Async version of aio cancel. The caller is not blocked if the acb implements
2022 * cancel_async, otherwise we do nothing and let the request normally complete.
2023 * In either case the completion callback must be called. */
2024 void bdrv_aio_cancel_async(BlockAIOCB
*acb
)
2026 if (acb
->aiocb_info
->cancel_async
) {
2027 acb
->aiocb_info
->cancel_async(acb
);
2031 /**************************************************************/
2032 /* async block device emulation */
2034 typedef struct BlockAIOCBSync
{
2038 /* vector translation state */
2044 static const AIOCBInfo bdrv_em_aiocb_info
= {
2045 .aiocb_size
= sizeof(BlockAIOCBSync
),
2048 static void bdrv_aio_bh_cb(void *opaque
)
2050 BlockAIOCBSync
*acb
= opaque
;
2052 if (!acb
->is_write
&& acb
->ret
>= 0) {
2053 qemu_iovec_from_buf(acb
->qiov
, 0, acb
->bounce
, acb
->qiov
->size
);
2055 qemu_vfree(acb
->bounce
);
2056 acb
->common
.cb(acb
->common
.opaque
, acb
->ret
);
2057 qemu_bh_delete(acb
->bh
);
2059 qemu_aio_unref(acb
);
2062 static BlockAIOCB
*bdrv_aio_rw_vector(BlockDriverState
*bs
,
2066 BlockCompletionFunc
*cb
,
2071 BlockAIOCBSync
*acb
;
2073 acb
= qemu_aio_get(&bdrv_em_aiocb_info
, bs
, cb
, opaque
);
2074 acb
->is_write
= is_write
;
2076 acb
->bounce
= qemu_try_blockalign(bs
, qiov
->size
);
2077 acb
->bh
= aio_bh_new(bdrv_get_aio_context(bs
), bdrv_aio_bh_cb
, acb
);
2079 if (acb
->bounce
== NULL
) {
2081 } else if (is_write
) {
2082 qemu_iovec_to_buf(acb
->qiov
, 0, acb
->bounce
, qiov
->size
);
2083 acb
->ret
= bs
->drv
->bdrv_write(bs
, sector_num
, acb
->bounce
, nb_sectors
);
2085 acb
->ret
= bs
->drv
->bdrv_read(bs
, sector_num
, acb
->bounce
, nb_sectors
);
2088 qemu_bh_schedule(acb
->bh
);
2090 return &acb
->common
;
2093 static BlockAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
2094 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
2095 BlockCompletionFunc
*cb
, void *opaque
)
2097 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 0);
2100 static BlockAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
2101 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
2102 BlockCompletionFunc
*cb
, void *opaque
)
2104 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 1);
2108 typedef struct BlockAIOCBCoroutine
{
2115 } BlockAIOCBCoroutine
;
2117 static const AIOCBInfo bdrv_em_co_aiocb_info
= {
2118 .aiocb_size
= sizeof(BlockAIOCBCoroutine
),
2121 static void bdrv_co_complete(BlockAIOCBCoroutine
*acb
)
2123 if (!acb
->need_bh
) {
2124 acb
->common
.cb(acb
->common
.opaque
, acb
->req
.error
);
2125 qemu_aio_unref(acb
);
2129 static void bdrv_co_em_bh(void *opaque
)
2131 BlockAIOCBCoroutine
*acb
= opaque
;
2133 assert(!acb
->need_bh
);
2134 qemu_bh_delete(acb
->bh
);
2135 bdrv_co_complete(acb
);
2138 static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine
*acb
)
2140 acb
->need_bh
= false;
2141 if (acb
->req
.error
!= -EINPROGRESS
) {
2142 BlockDriverState
*bs
= acb
->common
.bs
;
2144 acb
->bh
= aio_bh_new(bdrv_get_aio_context(bs
), bdrv_co_em_bh
, acb
);
2145 qemu_bh_schedule(acb
->bh
);
2149 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
2150 static void coroutine_fn
bdrv_co_do_rw(void *opaque
)
2152 BlockAIOCBCoroutine
*acb
= opaque
;
2153 BlockDriverState
*bs
= acb
->common
.bs
;
2155 if (!acb
->is_write
) {
2156 acb
->req
.error
= bdrv_co_do_readv(bs
, acb
->req
.sector
,
2157 acb
->req
.nb_sectors
, acb
->req
.qiov
, acb
->req
.flags
);
2159 acb
->req
.error
= bdrv_co_do_writev(bs
, acb
->req
.sector
,
2160 acb
->req
.nb_sectors
, acb
->req
.qiov
, acb
->req
.flags
);
2163 bdrv_co_complete(acb
);
2166 static BlockAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
2170 BdrvRequestFlags flags
,
2171 BlockCompletionFunc
*cb
,
2176 BlockAIOCBCoroutine
*acb
;
2178 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
2179 acb
->need_bh
= true;
2180 acb
->req
.error
= -EINPROGRESS
;
2181 acb
->req
.sector
= sector_num
;
2182 acb
->req
.nb_sectors
= nb_sectors
;
2183 acb
->req
.qiov
= qiov
;
2184 acb
->req
.flags
= flags
;
2185 acb
->is_write
= is_write
;
2187 co
= qemu_coroutine_create(bdrv_co_do_rw
);
2188 qemu_coroutine_enter(co
, acb
);
2190 bdrv_co_maybe_schedule_bh(acb
);
2191 return &acb
->common
;
2194 static void coroutine_fn
bdrv_aio_flush_co_entry(void *opaque
)
2196 BlockAIOCBCoroutine
*acb
= opaque
;
2197 BlockDriverState
*bs
= acb
->common
.bs
;
2199 acb
->req
.error
= bdrv_co_flush(bs
);
2200 bdrv_co_complete(acb
);
2203 BlockAIOCB
*bdrv_aio_flush(BlockDriverState
*bs
,
2204 BlockCompletionFunc
*cb
, void *opaque
)
2206 trace_bdrv_aio_flush(bs
, opaque
);
2209 BlockAIOCBCoroutine
*acb
;
2211 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
2212 acb
->need_bh
= true;
2213 acb
->req
.error
= -EINPROGRESS
;
2215 co
= qemu_coroutine_create(bdrv_aio_flush_co_entry
);
2216 qemu_coroutine_enter(co
, acb
);
2218 bdrv_co_maybe_schedule_bh(acb
);
2219 return &acb
->common
;
2222 static void coroutine_fn
bdrv_aio_discard_co_entry(void *opaque
)
2224 BlockAIOCBCoroutine
*acb
= opaque
;
2225 BlockDriverState
*bs
= acb
->common
.bs
;
2227 acb
->req
.error
= bdrv_co_discard(bs
, acb
->req
.sector
, acb
->req
.nb_sectors
);
2228 bdrv_co_complete(acb
);
2231 BlockAIOCB
*bdrv_aio_discard(BlockDriverState
*bs
,
2232 int64_t sector_num
, int nb_sectors
,
2233 BlockCompletionFunc
*cb
, void *opaque
)
2236 BlockAIOCBCoroutine
*acb
;
2238 trace_bdrv_aio_discard(bs
, sector_num
, nb_sectors
, opaque
);
2240 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
2241 acb
->need_bh
= true;
2242 acb
->req
.error
= -EINPROGRESS
;
2243 acb
->req
.sector
= sector_num
;
2244 acb
->req
.nb_sectors
= nb_sectors
;
2245 co
= qemu_coroutine_create(bdrv_aio_discard_co_entry
);
2246 qemu_coroutine_enter(co
, acb
);
2248 bdrv_co_maybe_schedule_bh(acb
);
2249 return &acb
->common
;
2252 void *qemu_aio_get(const AIOCBInfo
*aiocb_info
, BlockDriverState
*bs
,
2253 BlockCompletionFunc
*cb
, void *opaque
)
2257 acb
= g_malloc(aiocb_info
->aiocb_size
);
2258 acb
->aiocb_info
= aiocb_info
;
2261 acb
->opaque
= opaque
;
2266 void qemu_aio_ref(void *p
)
2268 BlockAIOCB
*acb
= p
;
2272 void qemu_aio_unref(void *p
)
2274 BlockAIOCB
*acb
= p
;
2275 assert(acb
->refcnt
> 0);
2276 if (--acb
->refcnt
== 0) {
2281 /**************************************************************/
2282 /* Coroutine block device emulation */
2284 typedef struct CoroutineIOCompletion
{
2285 Coroutine
*coroutine
;
2287 } CoroutineIOCompletion
;
2289 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
2291 CoroutineIOCompletion
*co
= opaque
;
2294 qemu_coroutine_enter(co
->coroutine
, NULL
);
2297 static int coroutine_fn
bdrv_co_io_em(BlockDriverState
*bs
, int64_t sector_num
,
2298 int nb_sectors
, QEMUIOVector
*iov
,
2301 CoroutineIOCompletion co
= {
2302 .coroutine
= qemu_coroutine_self(),
2307 acb
= bs
->drv
->bdrv_aio_writev(bs
, sector_num
, iov
, nb_sectors
,
2308 bdrv_co_io_em_complete
, &co
);
2310 acb
= bs
->drv
->bdrv_aio_readv(bs
, sector_num
, iov
, nb_sectors
,
2311 bdrv_co_io_em_complete
, &co
);
2314 trace_bdrv_co_io_em(bs
, sector_num
, nb_sectors
, is_write
, acb
);
2318 qemu_coroutine_yield();
2323 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
2324 int64_t sector_num
, int nb_sectors
,
2327 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, false);
2330 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
2331 int64_t sector_num
, int nb_sectors
,
2334 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, true);
2337 static void coroutine_fn
bdrv_flush_co_entry(void *opaque
)
2339 RwCo
*rwco
= opaque
;
2341 rwco
->ret
= bdrv_co_flush(rwco
->bs
);
2344 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
2347 BdrvTrackedRequest req
;
2349 if (!bs
|| !bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
) ||
2354 tracked_request_begin(&req
, bs
, 0, 0, BDRV_TRACKED_FLUSH
);
2355 /* Write back cached data to the OS even with cache=unsafe */
2356 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_OS
);
2357 if (bs
->drv
->bdrv_co_flush_to_os
) {
2358 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
2364 /* But don't actually force it to the disk with cache=unsafe */
2365 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
2369 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_DISK
);
2370 if (bs
->drv
->bdrv_co_flush_to_disk
) {
2371 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
2372 } else if (bs
->drv
->bdrv_aio_flush
) {
2374 CoroutineIOCompletion co
= {
2375 .coroutine
= qemu_coroutine_self(),
2378 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
2382 qemu_coroutine_yield();
2387 * Some block drivers always operate in either writethrough or unsafe
2388 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2389 * know how the server works (because the behaviour is hardcoded or
2390 * depends on server-side configuration), so we can't ensure that
2391 * everything is safe on disk. Returning an error doesn't work because
2392 * that would break guests even if the server operates in writethrough
2395 * Let's hope the user knows what he's doing.
2403 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2404 * in the case of cache=unsafe, so there are no useless flushes.
2407 ret
= bs
->file
? bdrv_co_flush(bs
->file
->bs
) : 0;
2409 tracked_request_end(&req
);
2413 int bdrv_flush(BlockDriverState
*bs
)
2421 if (qemu_in_coroutine()) {
2422 /* Fast-path if already in coroutine context */
2423 bdrv_flush_co_entry(&rwco
);
2425 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
2427 co
= qemu_coroutine_create(bdrv_flush_co_entry
);
2428 qemu_coroutine_enter(co
, &rwco
);
2429 while (rwco
.ret
== NOT_DONE
) {
2430 aio_poll(aio_context
, true);
2437 typedef struct DiscardCo
{
2438 BlockDriverState
*bs
;
2443 static void coroutine_fn
bdrv_discard_co_entry(void *opaque
)
2445 DiscardCo
*rwco
= opaque
;
2447 rwco
->ret
= bdrv_co_discard(rwco
->bs
, rwco
->sector_num
, rwco
->nb_sectors
);
2450 int coroutine_fn
bdrv_co_discard(BlockDriverState
*bs
, int64_t sector_num
,
2453 BdrvTrackedRequest req
;
2454 int max_discard
, ret
;
2460 ret
= bdrv_check_request(bs
, sector_num
, nb_sectors
);
2463 } else if (bs
->read_only
) {
2466 assert(!(bs
->open_flags
& BDRV_O_INACTIVE
));
2468 /* Do nothing if disabled. */
2469 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
2473 if (!bs
->drv
->bdrv_co_discard
&& !bs
->drv
->bdrv_aio_discard
) {
2477 tracked_request_begin(&req
, bs
, sector_num
, nb_sectors
,
2478 BDRV_TRACKED_DISCARD
);
2479 bdrv_set_dirty(bs
, sector_num
, nb_sectors
);
2481 max_discard
= MIN_NON_ZERO(bs
->bl
.max_discard
, BDRV_REQUEST_MAX_SECTORS
);
2482 while (nb_sectors
> 0) {
2484 int num
= nb_sectors
;
2487 if (bs
->bl
.discard_alignment
&&
2488 num
>= bs
->bl
.discard_alignment
&&
2489 sector_num
% bs
->bl
.discard_alignment
) {
2490 if (num
> bs
->bl
.discard_alignment
) {
2491 num
= bs
->bl
.discard_alignment
;
2493 num
-= sector_num
% bs
->bl
.discard_alignment
;
2496 /* limit request size */
2497 if (num
> max_discard
) {
2501 if (bs
->drv
->bdrv_co_discard
) {
2502 ret
= bs
->drv
->bdrv_co_discard(bs
, sector_num
, num
);
2505 CoroutineIOCompletion co
= {
2506 .coroutine
= qemu_coroutine_self(),
2509 acb
= bs
->drv
->bdrv_aio_discard(bs
, sector_num
, nb_sectors
,
2510 bdrv_co_io_em_complete
, &co
);
2515 qemu_coroutine_yield();
2519 if (ret
&& ret
!= -ENOTSUP
) {
2528 tracked_request_end(&req
);
2532 int bdrv_discard(BlockDriverState
*bs
, int64_t sector_num
, int nb_sectors
)
2537 .sector_num
= sector_num
,
2538 .nb_sectors
= nb_sectors
,
2542 if (qemu_in_coroutine()) {
2543 /* Fast-path if already in coroutine context */
2544 bdrv_discard_co_entry(&rwco
);
2546 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
2548 co
= qemu_coroutine_create(bdrv_discard_co_entry
);
2549 qemu_coroutine_enter(co
, &rwco
);
2550 while (rwco
.ret
== NOT_DONE
) {
2551 aio_poll(aio_context
, true);
2559 CoroutineIOCompletion
*co
;
2561 } BdrvIoctlCompletionData
;
2563 static void bdrv_ioctl_bh_cb(void *opaque
)
2565 BdrvIoctlCompletionData
*data
= opaque
;
2567 bdrv_co_io_em_complete(data
->co
, -ENOTSUP
);
2568 qemu_bh_delete(data
->bh
);
2571 static int bdrv_co_do_ioctl(BlockDriverState
*bs
, int req
, void *buf
)
2573 BlockDriver
*drv
= bs
->drv
;
2574 BdrvTrackedRequest tracked_req
;
2575 CoroutineIOCompletion co
= {
2576 .coroutine
= qemu_coroutine_self(),
2580 tracked_request_begin(&tracked_req
, bs
, 0, 0, BDRV_TRACKED_IOCTL
);
2581 if (!drv
|| !drv
->bdrv_aio_ioctl
) {
2586 acb
= drv
->bdrv_aio_ioctl(bs
, req
, buf
, bdrv_co_io_em_complete
, &co
);
2588 BdrvIoctlCompletionData
*data
= g_new(BdrvIoctlCompletionData
, 1);
2589 data
->bh
= aio_bh_new(bdrv_get_aio_context(bs
),
2590 bdrv_ioctl_bh_cb
, data
);
2592 qemu_bh_schedule(data
->bh
);
2594 qemu_coroutine_yield();
2596 tracked_request_end(&tracked_req
);
2601 BlockDriverState
*bs
;
2607 static void coroutine_fn
bdrv_co_ioctl_entry(void *opaque
)
2609 BdrvIoctlCoData
*data
= opaque
;
2610 data
->ret
= bdrv_co_do_ioctl(data
->bs
, data
->req
, data
->buf
);
2613 /* needed for generic scsi interface */
2614 int bdrv_ioctl(BlockDriverState
*bs
, unsigned long int req
, void *buf
)
2616 BdrvIoctlCoData data
= {
2620 .ret
= -EINPROGRESS
,
2623 if (qemu_in_coroutine()) {
2624 /* Fast-path if already in coroutine context */
2625 bdrv_co_ioctl_entry(&data
);
2627 Coroutine
*co
= qemu_coroutine_create(bdrv_co_ioctl_entry
);
2629 qemu_coroutine_enter(co
, &data
);
2630 while (data
.ret
== -EINPROGRESS
) {
2631 aio_poll(bdrv_get_aio_context(bs
), true);
2637 static void coroutine_fn
bdrv_co_aio_ioctl_entry(void *opaque
)
2639 BlockAIOCBCoroutine
*acb
= opaque
;
2640 acb
->req
.error
= bdrv_co_do_ioctl(acb
->common
.bs
,
2641 acb
->req
.req
, acb
->req
.buf
);
2642 bdrv_co_complete(acb
);
2645 BlockAIOCB
*bdrv_aio_ioctl(BlockDriverState
*bs
,
2646 unsigned long int req
, void *buf
,
2647 BlockCompletionFunc
*cb
, void *opaque
)
2649 BlockAIOCBCoroutine
*acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
,
2653 acb
->need_bh
= true;
2654 acb
->req
.error
= -EINPROGRESS
;
2657 co
= qemu_coroutine_create(bdrv_co_aio_ioctl_entry
);
2658 qemu_coroutine_enter(co
, acb
);
2660 bdrv_co_maybe_schedule_bh(acb
);
2661 return &acb
->common
;
2664 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
2666 return qemu_memalign(bdrv_opt_mem_align(bs
), size
);
2669 void *qemu_blockalign0(BlockDriverState
*bs
, size_t size
)
2671 return memset(qemu_blockalign(bs
, size
), 0, size
);
2674 void *qemu_try_blockalign(BlockDriverState
*bs
, size_t size
)
2676 size_t align
= bdrv_opt_mem_align(bs
);
2678 /* Ensure that NULL is never returned on success */
2684 return qemu_try_memalign(align
, size
);
2687 void *qemu_try_blockalign0(BlockDriverState
*bs
, size_t size
)
2689 void *mem
= qemu_try_blockalign(bs
, size
);
2692 memset(mem
, 0, size
);
2699 * Check if all memory in this vector is sector aligned.
2701 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
)
2704 size_t alignment
= bdrv_min_mem_align(bs
);
2706 for (i
= 0; i
< qiov
->niov
; i
++) {
2707 if ((uintptr_t) qiov
->iov
[i
].iov_base
% alignment
) {
2710 if (qiov
->iov
[i
].iov_len
% alignment
) {
2718 void bdrv_add_before_write_notifier(BlockDriverState
*bs
,
2719 NotifierWithReturn
*notifier
)
2721 notifier_with_return_list_add(&bs
->before_write_notifiers
, notifier
);
2724 void bdrv_io_plug(BlockDriverState
*bs
)
2726 BlockDriver
*drv
= bs
->drv
;
2727 if (drv
&& drv
->bdrv_io_plug
) {
2728 drv
->bdrv_io_plug(bs
);
2729 } else if (bs
->file
) {
2730 bdrv_io_plug(bs
->file
->bs
);
2734 void bdrv_io_unplug(BlockDriverState
*bs
)
2736 BlockDriver
*drv
= bs
->drv
;
2737 if (drv
&& drv
->bdrv_io_unplug
) {
2738 drv
->bdrv_io_unplug(bs
);
2739 } else if (bs
->file
) {
2740 bdrv_io_unplug(bs
->file
->bs
);
2744 void bdrv_flush_io_queue(BlockDriverState
*bs
)
2746 BlockDriver
*drv
= bs
->drv
;
2747 if (drv
&& drv
->bdrv_flush_io_queue
) {
2748 drv
->bdrv_flush_io_queue(bs
);
2749 } else if (bs
->file
) {
2750 bdrv_flush_io_queue(bs
->file
->bs
);
2752 bdrv_start_throttled_reqs(bs
);
2755 void bdrv_drained_begin(BlockDriverState
*bs
)
2757 if (!bs
->quiesce_counter
++) {
2758 aio_disable_external(bdrv_get_aio_context(bs
));
2763 void bdrv_drained_end(BlockDriverState
*bs
)
2765 assert(bs
->quiesce_counter
> 0);
2766 if (--bs
->quiesce_counter
> 0) {
2769 aio_enable_external(bdrv_get_aio_context(bs
));