2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "block/blockjob.h"
27 #include "block/block_int.h"
28 #include "block/throttle-groups.h"
29 #include "qemu/error-report.h"
31 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
33 static BlockAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
34 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
35 BlockCompletionFunc
*cb
, void *opaque
);
36 static BlockAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
37 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
38 BlockCompletionFunc
*cb
, void *opaque
);
39 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
40 int64_t sector_num
, int nb_sectors
,
42 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
43 int64_t sector_num
, int nb_sectors
,
45 static int coroutine_fn
bdrv_co_do_preadv(BlockDriverState
*bs
,
46 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
47 BdrvRequestFlags flags
);
48 static int coroutine_fn
bdrv_co_do_pwritev(BlockDriverState
*bs
,
49 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
50 BdrvRequestFlags flags
);
51 static BlockAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
55 BdrvRequestFlags flags
,
56 BlockCompletionFunc
*cb
,
59 static void coroutine_fn
bdrv_co_do_rw(void *opaque
);
60 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
61 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
);
63 /* throttling disk I/O limits */
64 void bdrv_set_io_limits(BlockDriverState
*bs
,
69 throttle_group_config(bs
, cfg
);
71 for (i
= 0; i
< 2; i
++) {
72 qemu_co_enter_next(&bs
->throttled_reqs
[i
]);
76 /* this function drain all the throttled IOs */
77 static bool bdrv_start_throttled_reqs(BlockDriverState
*bs
)
80 bool enabled
= bs
->io_limits_enabled
;
83 bs
->io_limits_enabled
= false;
85 for (i
= 0; i
< 2; i
++) {
86 while (qemu_co_enter_next(&bs
->throttled_reqs
[i
])) {
91 bs
->io_limits_enabled
= enabled
;
96 void bdrv_io_limits_disable(BlockDriverState
*bs
)
98 bs
->io_limits_enabled
= false;
99 bdrv_start_throttled_reqs(bs
);
100 throttle_group_unregister_bs(bs
);
103 /* should be called before bdrv_set_io_limits if a limit is set */
104 void bdrv_io_limits_enable(BlockDriverState
*bs
, const char *group
)
106 assert(!bs
->io_limits_enabled
);
107 throttle_group_register_bs(bs
, group
);
108 bs
->io_limits_enabled
= true;
111 void bdrv_io_limits_update_group(BlockDriverState
*bs
, const char *group
)
113 /* this bs is not part of any group */
114 if (!bs
->throttle_state
) {
118 /* this bs is a part of the same group than the one we want */
119 if (!g_strcmp0(throttle_group_get_name(bs
), group
)) {
123 /* need to change the group this bs belong to */
124 bdrv_io_limits_disable(bs
);
125 bdrv_io_limits_enable(bs
, group
);
128 void bdrv_setup_io_funcs(BlockDriver
*bdrv
)
130 /* Block drivers without coroutine functions need emulation */
131 if (!bdrv
->bdrv_co_readv
) {
132 bdrv
->bdrv_co_readv
= bdrv_co_readv_em
;
133 bdrv
->bdrv_co_writev
= bdrv_co_writev_em
;
135 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
136 * the block driver lacks aio we need to emulate that too.
138 if (!bdrv
->bdrv_aio_readv
) {
139 /* add AIO emulation layer */
140 bdrv
->bdrv_aio_readv
= bdrv_aio_readv_em
;
141 bdrv
->bdrv_aio_writev
= bdrv_aio_writev_em
;
146 void bdrv_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
148 BlockDriver
*drv
= bs
->drv
;
149 Error
*local_err
= NULL
;
151 memset(&bs
->bl
, 0, sizeof(bs
->bl
));
157 /* Take some limits from the children as a default */
159 bdrv_refresh_limits(bs
->file
, &local_err
);
161 error_propagate(errp
, local_err
);
164 bs
->bl
.opt_transfer_length
= bs
->file
->bl
.opt_transfer_length
;
165 bs
->bl
.max_transfer_length
= bs
->file
->bl
.max_transfer_length
;
166 bs
->bl
.min_mem_alignment
= bs
->file
->bl
.min_mem_alignment
;
167 bs
->bl
.opt_mem_alignment
= bs
->file
->bl
.opt_mem_alignment
;
169 bs
->bl
.min_mem_alignment
= 512;
170 bs
->bl
.opt_mem_alignment
= getpagesize();
173 if (bs
->backing_hd
) {
174 bdrv_refresh_limits(bs
->backing_hd
, &local_err
);
176 error_propagate(errp
, local_err
);
179 bs
->bl
.opt_transfer_length
=
180 MAX(bs
->bl
.opt_transfer_length
,
181 bs
->backing_hd
->bl
.opt_transfer_length
);
182 bs
->bl
.max_transfer_length
=
183 MIN_NON_ZERO(bs
->bl
.max_transfer_length
,
184 bs
->backing_hd
->bl
.max_transfer_length
);
185 bs
->bl
.opt_mem_alignment
=
186 MAX(bs
->bl
.opt_mem_alignment
,
187 bs
->backing_hd
->bl
.opt_mem_alignment
);
188 bs
->bl
.min_mem_alignment
=
189 MAX(bs
->bl
.min_mem_alignment
,
190 bs
->backing_hd
->bl
.min_mem_alignment
);
193 /* Then let the driver override it */
194 if (drv
->bdrv_refresh_limits
) {
195 drv
->bdrv_refresh_limits(bs
, errp
);
200 * The copy-on-read flag is actually a reference count so multiple users may
201 * use the feature without worrying about clobbering its previous state.
202 * Copy-on-read stays enabled until all users have called to disable it.
204 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
209 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
211 assert(bs
->copy_on_read
> 0);
215 /* Check if any requests are in-flight (including throttled requests) */
216 static bool bdrv_requests_pending(BlockDriverState
*bs
)
218 if (!QLIST_EMPTY(&bs
->tracked_requests
)) {
221 if (!qemu_co_queue_empty(&bs
->throttled_reqs
[0])) {
224 if (!qemu_co_queue_empty(&bs
->throttled_reqs
[1])) {
227 if (bs
->file
&& bdrv_requests_pending(bs
->file
)) {
230 if (bs
->backing_hd
&& bdrv_requests_pending(bs
->backing_hd
)) {
236 static bool bdrv_drain_one(BlockDriverState
*bs
)
240 bdrv_flush_io_queue(bs
);
241 bdrv_start_throttled_reqs(bs
);
242 bs_busy
= bdrv_requests_pending(bs
);
243 bs_busy
|= aio_poll(bdrv_get_aio_context(bs
), bs_busy
);
248 * Wait for pending requests to complete on a single BlockDriverState subtree
250 * See the warning in bdrv_drain_all(). This function can only be called if
251 * you are sure nothing can generate I/O because you have op blockers
254 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
257 void bdrv_drain(BlockDriverState
*bs
)
259 while (bdrv_drain_one(bs
)) {
265 * Wait for pending requests to complete across all BlockDriverStates
267 * This function does not flush data to disk, use bdrv_flush_all() for that
268 * after calling this function.
270 * Note that completion of an asynchronous I/O operation can trigger any
271 * number of other I/O operations on other devices---for example a coroutine
272 * can be arbitrarily complex and a constant flow of I/O can come until the
273 * coroutine is complete. Because of this, it is not possible to have a
274 * function to drain a single device's I/O queue.
276 void bdrv_drain_all(void)
278 /* Always run first iteration so any pending completion BHs run */
280 BlockDriverState
*bs
= NULL
;
282 while ((bs
= bdrv_next(bs
))) {
283 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
285 aio_context_acquire(aio_context
);
287 block_job_pause(bs
->job
);
289 aio_context_release(aio_context
);
296 while ((bs
= bdrv_next(bs
))) {
297 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
299 aio_context_acquire(aio_context
);
300 busy
|= bdrv_drain_one(bs
);
301 aio_context_release(aio_context
);
306 while ((bs
= bdrv_next(bs
))) {
307 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
309 aio_context_acquire(aio_context
);
311 block_job_resume(bs
->job
);
313 aio_context_release(aio_context
);
318 * Remove an active request from the tracked requests list
320 * This function should be called when a tracked request is completing.
322 static void tracked_request_end(BdrvTrackedRequest
*req
)
324 if (req
->serialising
) {
325 req
->bs
->serialising_in_flight
--;
328 QLIST_REMOVE(req
, list
);
329 qemu_co_queue_restart_all(&req
->wait_queue
);
333 * Add an active request to the tracked requests list
335 static void tracked_request_begin(BdrvTrackedRequest
*req
,
336 BlockDriverState
*bs
,
338 unsigned int bytes
, bool is_write
)
340 *req
= (BdrvTrackedRequest
){
344 .is_write
= is_write
,
345 .co
= qemu_coroutine_self(),
346 .serialising
= false,
347 .overlap_offset
= offset
,
348 .overlap_bytes
= bytes
,
351 qemu_co_queue_init(&req
->wait_queue
);
353 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
356 static void mark_request_serialising(BdrvTrackedRequest
*req
, uint64_t align
)
358 int64_t overlap_offset
= req
->offset
& ~(align
- 1);
359 unsigned int overlap_bytes
= ROUND_UP(req
->offset
+ req
->bytes
, align
)
362 if (!req
->serialising
) {
363 req
->bs
->serialising_in_flight
++;
364 req
->serialising
= true;
367 req
->overlap_offset
= MIN(req
->overlap_offset
, overlap_offset
);
368 req
->overlap_bytes
= MAX(req
->overlap_bytes
, overlap_bytes
);
372 * Round a region to cluster boundaries
374 void bdrv_round_to_clusters(BlockDriverState
*bs
,
375 int64_t sector_num
, int nb_sectors
,
376 int64_t *cluster_sector_num
,
377 int *cluster_nb_sectors
)
381 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
382 *cluster_sector_num
= sector_num
;
383 *cluster_nb_sectors
= nb_sectors
;
385 int64_t c
= bdi
.cluster_size
/ BDRV_SECTOR_SIZE
;
386 *cluster_sector_num
= QEMU_ALIGN_DOWN(sector_num
, c
);
387 *cluster_nb_sectors
= QEMU_ALIGN_UP(sector_num
- *cluster_sector_num
+
392 static int bdrv_get_cluster_size(BlockDriverState
*bs
)
397 ret
= bdrv_get_info(bs
, &bdi
);
398 if (ret
< 0 || bdi
.cluster_size
== 0) {
399 return bs
->request_alignment
;
401 return bdi
.cluster_size
;
405 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
406 int64_t offset
, unsigned int bytes
)
409 if (offset
>= req
->overlap_offset
+ req
->overlap_bytes
) {
413 if (req
->overlap_offset
>= offset
+ bytes
) {
419 static bool coroutine_fn
wait_serialising_requests(BdrvTrackedRequest
*self
)
421 BlockDriverState
*bs
= self
->bs
;
422 BdrvTrackedRequest
*req
;
426 if (!bs
->serialising_in_flight
) {
432 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
433 if (req
== self
|| (!req
->serialising
&& !self
->serialising
)) {
436 if (tracked_request_overlaps(req
, self
->overlap_offset
,
437 self
->overlap_bytes
))
439 /* Hitting this means there was a reentrant request, for
440 * example, a block driver issuing nested requests. This must
441 * never happen since it means deadlock.
443 assert(qemu_coroutine_self() != req
->co
);
445 /* If the request is already (indirectly) waiting for us, or
446 * will wait for us as soon as it wakes up, then just go on
447 * (instead of producing a deadlock in the former case). */
448 if (!req
->waiting_for
) {
449 self
->waiting_for
= req
;
450 qemu_co_queue_wait(&req
->wait_queue
);
451 self
->waiting_for
= NULL
;
463 static int bdrv_check_byte_request(BlockDriverState
*bs
, int64_t offset
,
466 if (size
> BDRV_REQUEST_MAX_SECTORS
<< BDRV_SECTOR_BITS
) {
470 if (!bdrv_is_inserted(bs
)) {
481 static int bdrv_check_request(BlockDriverState
*bs
, int64_t sector_num
,
484 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
488 return bdrv_check_byte_request(bs
, sector_num
* BDRV_SECTOR_SIZE
,
489 nb_sectors
* BDRV_SECTOR_SIZE
);
492 typedef struct RwCo
{
493 BlockDriverState
*bs
;
498 BdrvRequestFlags flags
;
501 static void coroutine_fn
bdrv_rw_co_entry(void *opaque
)
505 if (!rwco
->is_write
) {
506 rwco
->ret
= bdrv_co_do_preadv(rwco
->bs
, rwco
->offset
,
507 rwco
->qiov
->size
, rwco
->qiov
,
510 rwco
->ret
= bdrv_co_do_pwritev(rwco
->bs
, rwco
->offset
,
511 rwco
->qiov
->size
, rwco
->qiov
,
517 * Process a vectored synchronous request using coroutines
519 static int bdrv_prwv_co(BlockDriverState
*bs
, int64_t offset
,
520 QEMUIOVector
*qiov
, bool is_write
,
521 BdrvRequestFlags flags
)
528 .is_write
= is_write
,
534 * In sync call context, when the vcpu is blocked, this throttling timer
535 * will not fire; so the I/O throttling function has to be disabled here
536 * if it has been enabled.
538 if (bs
->io_limits_enabled
) {
539 fprintf(stderr
, "Disabling I/O throttling on '%s' due "
540 "to synchronous I/O.\n", bdrv_get_device_name(bs
));
541 bdrv_io_limits_disable(bs
);
544 if (qemu_in_coroutine()) {
545 /* Fast-path if already in coroutine context */
546 bdrv_rw_co_entry(&rwco
);
548 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
550 co
= qemu_coroutine_create(bdrv_rw_co_entry
);
551 qemu_coroutine_enter(co
, &rwco
);
552 while (rwco
.ret
== NOT_DONE
) {
553 aio_poll(aio_context
, true);
560 * Process a synchronous request using coroutines
562 static int bdrv_rw_co(BlockDriverState
*bs
, int64_t sector_num
, uint8_t *buf
,
563 int nb_sectors
, bool is_write
, BdrvRequestFlags flags
)
567 .iov_base
= (void *)buf
,
568 .iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
,
571 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
575 qemu_iovec_init_external(&qiov
, &iov
, 1);
576 return bdrv_prwv_co(bs
, sector_num
<< BDRV_SECTOR_BITS
,
577 &qiov
, is_write
, flags
);
580 /* return < 0 if error. See bdrv_write() for the return codes */
581 int bdrv_read(BlockDriverState
*bs
, int64_t sector_num
,
582 uint8_t *buf
, int nb_sectors
)
584 return bdrv_rw_co(bs
, sector_num
, buf
, nb_sectors
, false, 0);
587 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
588 int bdrv_read_unthrottled(BlockDriverState
*bs
, int64_t sector_num
,
589 uint8_t *buf
, int nb_sectors
)
594 enabled
= bs
->io_limits_enabled
;
595 bs
->io_limits_enabled
= false;
596 ret
= bdrv_read(bs
, sector_num
, buf
, nb_sectors
);
597 bs
->io_limits_enabled
= enabled
;
601 /* Return < 0 if error. Important errors are:
602 -EIO generic I/O error (may happen for all errors)
603 -ENOMEDIUM No media inserted.
604 -EINVAL Invalid sector number or nb_sectors
605 -EACCES Trying to write a read-only device
607 int bdrv_write(BlockDriverState
*bs
, int64_t sector_num
,
608 const uint8_t *buf
, int nb_sectors
)
610 return bdrv_rw_co(bs
, sector_num
, (uint8_t *)buf
, nb_sectors
, true, 0);
613 int bdrv_write_zeroes(BlockDriverState
*bs
, int64_t sector_num
,
614 int nb_sectors
, BdrvRequestFlags flags
)
616 return bdrv_rw_co(bs
, sector_num
, NULL
, nb_sectors
, true,
617 BDRV_REQ_ZERO_WRITE
| flags
);
621 * Completely zero out a block device with the help of bdrv_write_zeroes.
622 * The operation is sped up by checking the block status and only writing
623 * zeroes to the device if they currently do not return zeroes. Optional
624 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
626 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
628 int bdrv_make_zero(BlockDriverState
*bs
, BdrvRequestFlags flags
)
630 int64_t target_sectors
, ret
, nb_sectors
, sector_num
= 0;
633 target_sectors
= bdrv_nb_sectors(bs
);
634 if (target_sectors
< 0) {
635 return target_sectors
;
639 nb_sectors
= MIN(target_sectors
- sector_num
, BDRV_REQUEST_MAX_SECTORS
);
640 if (nb_sectors
<= 0) {
643 ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, &n
);
645 error_report("error getting block status at sector %" PRId64
": %s",
646 sector_num
, strerror(-ret
));
649 if (ret
& BDRV_BLOCK_ZERO
) {
653 ret
= bdrv_write_zeroes(bs
, sector_num
, n
, flags
);
655 error_report("error writing zeroes at sector %" PRId64
": %s",
656 sector_num
, strerror(-ret
));
663 int bdrv_pread(BlockDriverState
*bs
, int64_t offset
, void *buf
, int bytes
)
667 .iov_base
= (void *)buf
,
676 qemu_iovec_init_external(&qiov
, &iov
, 1);
677 ret
= bdrv_prwv_co(bs
, offset
, &qiov
, false, 0);
685 int bdrv_pwritev(BlockDriverState
*bs
, int64_t offset
, QEMUIOVector
*qiov
)
689 ret
= bdrv_prwv_co(bs
, offset
, qiov
, true, 0);
697 int bdrv_pwrite(BlockDriverState
*bs
, int64_t offset
,
698 const void *buf
, int bytes
)
702 .iov_base
= (void *) buf
,
710 qemu_iovec_init_external(&qiov
, &iov
, 1);
711 return bdrv_pwritev(bs
, offset
, &qiov
);
715 * Writes to the file and ensures that no writes are reordered across this
716 * request (acts as a barrier)
718 * Returns 0 on success, -errno in error cases.
720 int bdrv_pwrite_sync(BlockDriverState
*bs
, int64_t offset
,
721 const void *buf
, int count
)
725 ret
= bdrv_pwrite(bs
, offset
, buf
, count
);
730 /* No flush needed for cache modes that already do it */
731 if (bs
->enable_write_cache
) {
738 static int coroutine_fn
bdrv_co_do_copy_on_readv(BlockDriverState
*bs
,
739 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
741 /* Perform I/O through a temporary buffer so that users who scribble over
742 * their read buffer while the operation is in progress do not end up
743 * modifying the image file. This is critical for zero-copy guest I/O
744 * where anything might happen inside guest memory.
748 BlockDriver
*drv
= bs
->drv
;
750 QEMUIOVector bounce_qiov
;
751 int64_t cluster_sector_num
;
752 int cluster_nb_sectors
;
756 /* Cover entire cluster so no additional backing file I/O is required when
757 * allocating cluster in the image file.
759 bdrv_round_to_clusters(bs
, sector_num
, nb_sectors
,
760 &cluster_sector_num
, &cluster_nb_sectors
);
762 trace_bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
,
763 cluster_sector_num
, cluster_nb_sectors
);
765 iov
.iov_len
= cluster_nb_sectors
* BDRV_SECTOR_SIZE
;
766 iov
.iov_base
= bounce_buffer
= qemu_try_blockalign(bs
, iov
.iov_len
);
767 if (bounce_buffer
== NULL
) {
772 qemu_iovec_init_external(&bounce_qiov
, &iov
, 1);
774 ret
= drv
->bdrv_co_readv(bs
, cluster_sector_num
, cluster_nb_sectors
,
780 if (drv
->bdrv_co_write_zeroes
&&
781 buffer_is_zero(bounce_buffer
, iov
.iov_len
)) {
782 ret
= bdrv_co_do_write_zeroes(bs
, cluster_sector_num
,
783 cluster_nb_sectors
, 0);
785 /* This does not change the data on the disk, it is not necessary
786 * to flush even in cache=writethrough mode.
788 ret
= drv
->bdrv_co_writev(bs
, cluster_sector_num
, cluster_nb_sectors
,
793 /* It might be okay to ignore write errors for guest requests. If this
794 * is a deliberate copy-on-read then we don't want to ignore the error.
795 * Simply report it in all cases.
800 skip_bytes
= (sector_num
- cluster_sector_num
) * BDRV_SECTOR_SIZE
;
801 qemu_iovec_from_buf(qiov
, 0, bounce_buffer
+ skip_bytes
,
802 nb_sectors
* BDRV_SECTOR_SIZE
);
805 qemu_vfree(bounce_buffer
);
810 * Forwards an already correctly aligned request to the BlockDriver. This
811 * handles copy on read and zeroing after EOF; any other features must be
812 * implemented by the caller.
814 static int coroutine_fn
bdrv_aligned_preadv(BlockDriverState
*bs
,
815 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
816 int64_t align
, QEMUIOVector
*qiov
, int flags
)
818 BlockDriver
*drv
= bs
->drv
;
821 int64_t sector_num
= offset
>> BDRV_SECTOR_BITS
;
822 unsigned int nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
824 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
825 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
826 assert(!qiov
|| bytes
== qiov
->size
);
828 /* Handle Copy on Read and associated serialisation */
829 if (flags
& BDRV_REQ_COPY_ON_READ
) {
830 /* If we touch the same cluster it counts as an overlap. This
831 * guarantees that allocating writes will be serialized and not race
832 * with each other for the same cluster. For example, in copy-on-read
833 * it ensures that the CoR read and write operations are atomic and
834 * guest writes cannot interleave between them. */
835 mark_request_serialising(req
, bdrv_get_cluster_size(bs
));
838 wait_serialising_requests(req
);
840 if (flags
& BDRV_REQ_COPY_ON_READ
) {
843 ret
= bdrv_is_allocated(bs
, sector_num
, nb_sectors
, &pnum
);
848 if (!ret
|| pnum
!= nb_sectors
) {
849 ret
= bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
, qiov
);
854 /* Forward the request to the BlockDriver */
855 if (!bs
->zero_beyond_eof
) {
856 ret
= drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
858 /* Read zeros after EOF */
859 int64_t total_sectors
, max_nb_sectors
;
861 total_sectors
= bdrv_nb_sectors(bs
);
862 if (total_sectors
< 0) {
867 max_nb_sectors
= ROUND_UP(MAX(0, total_sectors
- sector_num
),
868 align
>> BDRV_SECTOR_BITS
);
869 if (nb_sectors
< max_nb_sectors
) {
870 ret
= drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
871 } else if (max_nb_sectors
> 0) {
872 QEMUIOVector local_qiov
;
874 qemu_iovec_init(&local_qiov
, qiov
->niov
);
875 qemu_iovec_concat(&local_qiov
, qiov
, 0,
876 max_nb_sectors
* BDRV_SECTOR_SIZE
);
878 ret
= drv
->bdrv_co_readv(bs
, sector_num
, max_nb_sectors
,
881 qemu_iovec_destroy(&local_qiov
);
886 /* Reading beyond end of file is supposed to produce zeroes */
887 if (ret
== 0 && total_sectors
< sector_num
+ nb_sectors
) {
888 uint64_t offset
= MAX(0, total_sectors
- sector_num
);
889 uint64_t bytes
= (sector_num
+ nb_sectors
- offset
) *
891 qemu_iovec_memset(qiov
, offset
* BDRV_SECTOR_SIZE
, 0, bytes
);
900 * Handle a read request in coroutine context
902 static int coroutine_fn
bdrv_co_do_preadv(BlockDriverState
*bs
,
903 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
904 BdrvRequestFlags flags
)
906 BlockDriver
*drv
= bs
->drv
;
907 BdrvTrackedRequest req
;
909 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
910 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
911 uint8_t *head_buf
= NULL
;
912 uint8_t *tail_buf
= NULL
;
913 QEMUIOVector local_qiov
;
914 bool use_local_qiov
= false;
921 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
926 if (bs
->copy_on_read
) {
927 flags
|= BDRV_REQ_COPY_ON_READ
;
930 /* throttling disk I/O */
931 if (bs
->io_limits_enabled
) {
932 throttle_group_co_io_limits_intercept(bs
, bytes
, false);
935 /* Align read if necessary by padding qiov */
936 if (offset
& (align
- 1)) {
937 head_buf
= qemu_blockalign(bs
, align
);
938 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
939 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
940 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
941 use_local_qiov
= true;
943 bytes
+= offset
& (align
- 1);
944 offset
= offset
& ~(align
- 1);
947 if ((offset
+ bytes
) & (align
- 1)) {
948 if (!use_local_qiov
) {
949 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
950 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
951 use_local_qiov
= true;
953 tail_buf
= qemu_blockalign(bs
, align
);
954 qemu_iovec_add(&local_qiov
, tail_buf
,
955 align
- ((offset
+ bytes
) & (align
- 1)));
957 bytes
= ROUND_UP(bytes
, align
);
960 tracked_request_begin(&req
, bs
, offset
, bytes
, false);
961 ret
= bdrv_aligned_preadv(bs
, &req
, offset
, bytes
, align
,
962 use_local_qiov
? &local_qiov
: qiov
,
964 tracked_request_end(&req
);
966 if (use_local_qiov
) {
967 qemu_iovec_destroy(&local_qiov
);
968 qemu_vfree(head_buf
);
969 qemu_vfree(tail_buf
);
975 static int coroutine_fn
bdrv_co_do_readv(BlockDriverState
*bs
,
976 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
977 BdrvRequestFlags flags
)
979 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
983 return bdrv_co_do_preadv(bs
, sector_num
<< BDRV_SECTOR_BITS
,
984 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
987 int coroutine_fn
bdrv_co_readv(BlockDriverState
*bs
, int64_t sector_num
,
988 int nb_sectors
, QEMUIOVector
*qiov
)
990 trace_bdrv_co_readv(bs
, sector_num
, nb_sectors
);
992 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
, 0);
995 int coroutine_fn
bdrv_co_copy_on_readv(BlockDriverState
*bs
,
996 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
998 trace_bdrv_co_copy_on_readv(bs
, sector_num
, nb_sectors
);
1000 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
,
1001 BDRV_REQ_COPY_ON_READ
);
1004 #define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768
1006 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
1007 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
)
1009 BlockDriver
*drv
= bs
->drv
;
1011 struct iovec iov
= {0};
1014 int max_write_zeroes
= MIN_NON_ZERO(bs
->bl
.max_write_zeroes
,
1015 BDRV_REQUEST_MAX_SECTORS
);
1017 while (nb_sectors
> 0 && !ret
) {
1018 int num
= nb_sectors
;
1020 /* Align request. Block drivers can expect the "bulk" of the request
1023 if (bs
->bl
.write_zeroes_alignment
1024 && num
> bs
->bl
.write_zeroes_alignment
) {
1025 if (sector_num
% bs
->bl
.write_zeroes_alignment
!= 0) {
1026 /* Make a small request up to the first aligned sector. */
1027 num
= bs
->bl
.write_zeroes_alignment
;
1028 num
-= sector_num
% bs
->bl
.write_zeroes_alignment
;
1029 } else if ((sector_num
+ num
) % bs
->bl
.write_zeroes_alignment
!= 0) {
1030 /* Shorten the request to the last aligned sector. num cannot
1031 * underflow because num > bs->bl.write_zeroes_alignment.
1033 num
-= (sector_num
+ num
) % bs
->bl
.write_zeroes_alignment
;
1037 /* limit request size */
1038 if (num
> max_write_zeroes
) {
1039 num
= max_write_zeroes
;
1043 /* First try the efficient write zeroes operation */
1044 if (drv
->bdrv_co_write_zeroes
) {
1045 ret
= drv
->bdrv_co_write_zeroes(bs
, sector_num
, num
, flags
);
1048 if (ret
== -ENOTSUP
) {
1049 /* Fall back to bounce buffer if write zeroes is unsupported */
1050 int max_xfer_len
= MIN_NON_ZERO(bs
->bl
.max_transfer_length
,
1051 MAX_WRITE_ZEROES_BOUNCE_BUFFER
);
1052 num
= MIN(num
, max_xfer_len
);
1053 iov
.iov_len
= num
* BDRV_SECTOR_SIZE
;
1054 if (iov
.iov_base
== NULL
) {
1055 iov
.iov_base
= qemu_try_blockalign(bs
, num
* BDRV_SECTOR_SIZE
);
1056 if (iov
.iov_base
== NULL
) {
1060 memset(iov
.iov_base
, 0, num
* BDRV_SECTOR_SIZE
);
1062 qemu_iovec_init_external(&qiov
, &iov
, 1);
1064 ret
= drv
->bdrv_co_writev(bs
, sector_num
, num
, &qiov
);
1066 /* Keep bounce buffer around if it is big enough for all
1067 * all future requests.
1069 if (num
< max_xfer_len
) {
1070 qemu_vfree(iov
.iov_base
);
1071 iov
.iov_base
= NULL
;
1080 qemu_vfree(iov
.iov_base
);
1085 * Forwards an already correctly aligned write request to the BlockDriver.
1087 static int coroutine_fn
bdrv_aligned_pwritev(BlockDriverState
*bs
,
1088 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
1089 QEMUIOVector
*qiov
, int flags
)
1091 BlockDriver
*drv
= bs
->drv
;
1095 int64_t sector_num
= offset
>> BDRV_SECTOR_BITS
;
1096 unsigned int nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
1098 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
1099 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
1100 assert(!qiov
|| bytes
== qiov
->size
);
1102 waited
= wait_serialising_requests(req
);
1103 assert(!waited
|| !req
->serialising
);
1104 assert(req
->overlap_offset
<= offset
);
1105 assert(offset
+ bytes
<= req
->overlap_offset
+ req
->overlap_bytes
);
1107 ret
= notifier_with_return_list_notify(&bs
->before_write_notifiers
, req
);
1109 if (!ret
&& bs
->detect_zeroes
!= BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF
&&
1110 !(flags
& BDRV_REQ_ZERO_WRITE
) && drv
->bdrv_co_write_zeroes
&&
1111 qemu_iovec_is_zero(qiov
)) {
1112 flags
|= BDRV_REQ_ZERO_WRITE
;
1113 if (bs
->detect_zeroes
== BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP
) {
1114 flags
|= BDRV_REQ_MAY_UNMAP
;
1119 /* Do nothing, write notifier decided to fail this request */
1120 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
1121 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_ZERO
);
1122 ret
= bdrv_co_do_write_zeroes(bs
, sector_num
, nb_sectors
, flags
);
1124 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV
);
1125 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
);
1127 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_DONE
);
1129 if (ret
== 0 && !bs
->enable_write_cache
) {
1130 ret
= bdrv_co_flush(bs
);
1133 bdrv_set_dirty(bs
, sector_num
, nb_sectors
);
1135 block_acct_highest_sector(&bs
->stats
, sector_num
, nb_sectors
);
1138 bs
->total_sectors
= MAX(bs
->total_sectors
, sector_num
+ nb_sectors
);
1144 static int coroutine_fn
bdrv_co_do_zero_pwritev(BlockDriverState
*bs
,
1147 BdrvRequestFlags flags
,
1148 BdrvTrackedRequest
*req
)
1150 uint8_t *buf
= NULL
;
1151 QEMUIOVector local_qiov
;
1153 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
1154 unsigned int head_padding_bytes
, tail_padding_bytes
;
1157 head_padding_bytes
= offset
& (align
- 1);
1158 tail_padding_bytes
= align
- ((offset
+ bytes
) & (align
- 1));
1161 assert(flags
& BDRV_REQ_ZERO_WRITE
);
1162 if (head_padding_bytes
|| tail_padding_bytes
) {
1163 buf
= qemu_blockalign(bs
, align
);
1164 iov
= (struct iovec
) {
1168 qemu_iovec_init_external(&local_qiov
, &iov
, 1);
1170 if (head_padding_bytes
) {
1171 uint64_t zero_bytes
= MIN(bytes
, align
- head_padding_bytes
);
1173 /* RMW the unaligned part before head. */
1174 mark_request_serialising(req
, align
);
1175 wait_serialising_requests(req
);
1176 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1177 ret
= bdrv_aligned_preadv(bs
, req
, offset
& ~(align
- 1), align
,
1178 align
, &local_qiov
, 0);
1182 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1184 memset(buf
+ head_padding_bytes
, 0, zero_bytes
);
1185 ret
= bdrv_aligned_pwritev(bs
, req
, offset
& ~(align
- 1), align
,
1187 flags
& ~BDRV_REQ_ZERO_WRITE
);
1191 offset
+= zero_bytes
;
1192 bytes
-= zero_bytes
;
1195 assert(!bytes
|| (offset
& (align
- 1)) == 0);
1196 if (bytes
>= align
) {
1197 /* Write the aligned part in the middle. */
1198 uint64_t aligned_bytes
= bytes
& ~(align
- 1);
1199 ret
= bdrv_aligned_pwritev(bs
, req
, offset
, aligned_bytes
,
1204 bytes
-= aligned_bytes
;
1205 offset
+= aligned_bytes
;
1208 assert(!bytes
|| (offset
& (align
- 1)) == 0);
1210 assert(align
== tail_padding_bytes
+ bytes
);
1211 /* RMW the unaligned part after tail. */
1212 mark_request_serialising(req
, align
);
1213 wait_serialising_requests(req
);
1214 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1215 ret
= bdrv_aligned_preadv(bs
, req
, offset
, align
,
1216 align
, &local_qiov
, 0);
1220 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1222 memset(buf
, 0, bytes
);
1223 ret
= bdrv_aligned_pwritev(bs
, req
, offset
, align
,
1224 &local_qiov
, flags
& ~BDRV_REQ_ZERO_WRITE
);
1233 * Handle a write request in coroutine context
1235 static int coroutine_fn
bdrv_co_do_pwritev(BlockDriverState
*bs
,
1236 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1237 BdrvRequestFlags flags
)
1239 BdrvTrackedRequest req
;
1240 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
1241 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
1242 uint8_t *head_buf
= NULL
;
1243 uint8_t *tail_buf
= NULL
;
1244 QEMUIOVector local_qiov
;
1245 bool use_local_qiov
= false;
1251 if (bs
->read_only
) {
1255 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
1260 /* throttling disk I/O */
1261 if (bs
->io_limits_enabled
) {
1262 throttle_group_co_io_limits_intercept(bs
, bytes
, true);
1266 * Align write if necessary by performing a read-modify-write cycle.
1267 * Pad qiov with the read parts and be sure to have a tracked request not
1268 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1270 tracked_request_begin(&req
, bs
, offset
, bytes
, true);
1273 ret
= bdrv_co_do_zero_pwritev(bs
, offset
, bytes
, flags
, &req
);
1277 if (offset
& (align
- 1)) {
1278 QEMUIOVector head_qiov
;
1279 struct iovec head_iov
;
1281 mark_request_serialising(&req
, align
);
1282 wait_serialising_requests(&req
);
1284 head_buf
= qemu_blockalign(bs
, align
);
1285 head_iov
= (struct iovec
) {
1286 .iov_base
= head_buf
,
1289 qemu_iovec_init_external(&head_qiov
, &head_iov
, 1);
1291 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1292 ret
= bdrv_aligned_preadv(bs
, &req
, offset
& ~(align
- 1), align
,
1293 align
, &head_qiov
, 0);
1297 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1299 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
1300 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
1301 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1302 use_local_qiov
= true;
1304 bytes
+= offset
& (align
- 1);
1305 offset
= offset
& ~(align
- 1);
1308 if ((offset
+ bytes
) & (align
- 1)) {
1309 QEMUIOVector tail_qiov
;
1310 struct iovec tail_iov
;
1314 mark_request_serialising(&req
, align
);
1315 waited
= wait_serialising_requests(&req
);
1316 assert(!waited
|| !use_local_qiov
);
1318 tail_buf
= qemu_blockalign(bs
, align
);
1319 tail_iov
= (struct iovec
) {
1320 .iov_base
= tail_buf
,
1323 qemu_iovec_init_external(&tail_qiov
, &tail_iov
, 1);
1325 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1326 ret
= bdrv_aligned_preadv(bs
, &req
, (offset
+ bytes
) & ~(align
- 1), align
,
1327 align
, &tail_qiov
, 0);
1331 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1333 if (!use_local_qiov
) {
1334 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
1335 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1336 use_local_qiov
= true;
1339 tail_bytes
= (offset
+ bytes
) & (align
- 1);
1340 qemu_iovec_add(&local_qiov
, tail_buf
+ tail_bytes
, align
- tail_bytes
);
1342 bytes
= ROUND_UP(bytes
, align
);
1345 ret
= bdrv_aligned_pwritev(bs
, &req
, offset
, bytes
,
1346 use_local_qiov
? &local_qiov
: qiov
,
1351 if (use_local_qiov
) {
1352 qemu_iovec_destroy(&local_qiov
);
1354 qemu_vfree(head_buf
);
1355 qemu_vfree(tail_buf
);
1357 tracked_request_end(&req
);
1361 static int coroutine_fn
bdrv_co_do_writev(BlockDriverState
*bs
,
1362 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1363 BdrvRequestFlags flags
)
1365 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
1369 return bdrv_co_do_pwritev(bs
, sector_num
<< BDRV_SECTOR_BITS
,
1370 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
1373 int coroutine_fn
bdrv_co_writev(BlockDriverState
*bs
, int64_t sector_num
,
1374 int nb_sectors
, QEMUIOVector
*qiov
)
1376 trace_bdrv_co_writev(bs
, sector_num
, nb_sectors
);
1378 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, qiov
, 0);
1381 int coroutine_fn
bdrv_co_write_zeroes(BlockDriverState
*bs
,
1382 int64_t sector_num
, int nb_sectors
,
1383 BdrvRequestFlags flags
)
1385 trace_bdrv_co_write_zeroes(bs
, sector_num
, nb_sectors
, flags
);
1387 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
1388 flags
&= ~BDRV_REQ_MAY_UNMAP
;
1391 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, NULL
,
1392 BDRV_REQ_ZERO_WRITE
| flags
);
1395 int bdrv_flush_all(void)
1397 BlockDriverState
*bs
= NULL
;
1400 while ((bs
= bdrv_next(bs
))) {
1401 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
1404 aio_context_acquire(aio_context
);
1405 ret
= bdrv_flush(bs
);
1406 if (ret
< 0 && !result
) {
1409 aio_context_release(aio_context
);
1415 typedef struct BdrvCoGetBlockStatusData
{
1416 BlockDriverState
*bs
;
1417 BlockDriverState
*base
;
1423 } BdrvCoGetBlockStatusData
;
1426 * Returns the allocation status of the specified sectors.
1427 * Drivers not implementing the functionality are assumed to not support
1428 * backing files, hence all their sectors are reported as allocated.
1430 * If 'sector_num' is beyond the end of the disk image the return value is 0
1431 * and 'pnum' is set to 0.
1433 * 'pnum' is set to the number of sectors (including and immediately following
1434 * the specified sector) that are known to be in the same
1435 * allocated/unallocated state.
1437 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
1438 * beyond the end of the disk image it will be clamped.
1440 static int64_t coroutine_fn
bdrv_co_get_block_status(BlockDriverState
*bs
,
1442 int nb_sectors
, int *pnum
)
1444 int64_t total_sectors
;
1448 total_sectors
= bdrv_nb_sectors(bs
);
1449 if (total_sectors
< 0) {
1450 return total_sectors
;
1453 if (sector_num
>= total_sectors
) {
1458 n
= total_sectors
- sector_num
;
1459 if (n
< nb_sectors
) {
1463 if (!bs
->drv
->bdrv_co_get_block_status
) {
1465 ret
= BDRV_BLOCK_DATA
| BDRV_BLOCK_ALLOCATED
;
1466 if (bs
->drv
->protocol_name
) {
1467 ret
|= BDRV_BLOCK_OFFSET_VALID
| (sector_num
* BDRV_SECTOR_SIZE
);
1472 ret
= bs
->drv
->bdrv_co_get_block_status(bs
, sector_num
, nb_sectors
, pnum
);
1478 if (ret
& BDRV_BLOCK_RAW
) {
1479 assert(ret
& BDRV_BLOCK_OFFSET_VALID
);
1480 return bdrv_get_block_status(bs
->file
, ret
>> BDRV_SECTOR_BITS
,
1484 if (ret
& (BDRV_BLOCK_DATA
| BDRV_BLOCK_ZERO
)) {
1485 ret
|= BDRV_BLOCK_ALLOCATED
;
1487 if (bdrv_unallocated_blocks_are_zero(bs
)) {
1488 ret
|= BDRV_BLOCK_ZERO
;
1489 } else if (bs
->backing_hd
) {
1490 BlockDriverState
*bs2
= bs
->backing_hd
;
1491 int64_t nb_sectors2
= bdrv_nb_sectors(bs2
);
1492 if (nb_sectors2
>= 0 && sector_num
>= nb_sectors2
) {
1493 ret
|= BDRV_BLOCK_ZERO
;
1499 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
1500 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
1503 ret2
= bdrv_co_get_block_status(bs
->file
, ret
>> BDRV_SECTOR_BITS
,
1506 /* Ignore errors. This is just providing extra information, it
1507 * is useful but not necessary.
1510 /* !file_pnum indicates an offset at or beyond the EOF; it is
1511 * perfectly valid for the format block driver to point to such
1512 * offsets, so catch it and mark everything as zero */
1513 ret
|= BDRV_BLOCK_ZERO
;
1515 /* Limit request to the range reported by the protocol driver */
1517 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
1525 /* Coroutine wrapper for bdrv_get_block_status() */
1526 static void coroutine_fn
bdrv_get_block_status_co_entry(void *opaque
)
1528 BdrvCoGetBlockStatusData
*data
= opaque
;
1529 BlockDriverState
*bs
= data
->bs
;
1531 data
->ret
= bdrv_co_get_block_status(bs
, data
->sector_num
, data
->nb_sectors
,
1537 * Synchronous wrapper around bdrv_co_get_block_status().
1539 * See bdrv_co_get_block_status() for details.
1541 int64_t bdrv_get_block_status(BlockDriverState
*bs
, int64_t sector_num
,
1542 int nb_sectors
, int *pnum
)
1545 BdrvCoGetBlockStatusData data
= {
1547 .sector_num
= sector_num
,
1548 .nb_sectors
= nb_sectors
,
1553 if (qemu_in_coroutine()) {
1554 /* Fast-path if already in coroutine context */
1555 bdrv_get_block_status_co_entry(&data
);
1557 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
1559 co
= qemu_coroutine_create(bdrv_get_block_status_co_entry
);
1560 qemu_coroutine_enter(co
, &data
);
1561 while (!data
.done
) {
1562 aio_poll(aio_context
, true);
1568 int coroutine_fn
bdrv_is_allocated(BlockDriverState
*bs
, int64_t sector_num
,
1569 int nb_sectors
, int *pnum
)
1571 int64_t ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, pnum
);
1575 return !!(ret
& BDRV_BLOCK_ALLOCATED
);
1579 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
1581 * Return true if the given sector is allocated in any image between
1582 * BASE and TOP (inclusive). BASE can be NULL to check if the given
1583 * sector is allocated in any image of the chain. Return false otherwise.
1585 * 'pnum' is set to the number of sectors (including and immediately following
1586 * the specified sector) that are known to be in the same
1587 * allocated/unallocated state.
1590 int bdrv_is_allocated_above(BlockDriverState
*top
,
1591 BlockDriverState
*base
,
1593 int nb_sectors
, int *pnum
)
1595 BlockDriverState
*intermediate
;
1596 int ret
, n
= nb_sectors
;
1599 while (intermediate
&& intermediate
!= base
) {
1601 ret
= bdrv_is_allocated(intermediate
, sector_num
, nb_sectors
,
1611 * [sector_num, nb_sectors] is unallocated on top but intermediate
1614 * [sector_num+x, nr_sectors] allocated.
1616 if (n
> pnum_inter
&&
1617 (intermediate
== top
||
1618 sector_num
+ pnum_inter
< intermediate
->total_sectors
)) {
1622 intermediate
= intermediate
->backing_hd
;
1629 int bdrv_write_compressed(BlockDriverState
*bs
, int64_t sector_num
,
1630 const uint8_t *buf
, int nb_sectors
)
1632 BlockDriver
*drv
= bs
->drv
;
1638 if (!drv
->bdrv_write_compressed
) {
1641 ret
= bdrv_check_request(bs
, sector_num
, nb_sectors
);
1646 assert(QLIST_EMPTY(&bs
->dirty_bitmaps
));
1648 return drv
->bdrv_write_compressed(bs
, sector_num
, buf
, nb_sectors
);
1651 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
1652 int64_t pos
, int size
)
1655 struct iovec iov
= {
1656 .iov_base
= (void *) buf
,
1660 qemu_iovec_init_external(&qiov
, &iov
, 1);
1661 return bdrv_writev_vmstate(bs
, &qiov
, pos
);
1664 int bdrv_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
1666 BlockDriver
*drv
= bs
->drv
;
1670 } else if (drv
->bdrv_save_vmstate
) {
1671 return drv
->bdrv_save_vmstate(bs
, qiov
, pos
);
1672 } else if (bs
->file
) {
1673 return bdrv_writev_vmstate(bs
->file
, qiov
, pos
);
1679 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
1680 int64_t pos
, int size
)
1682 BlockDriver
*drv
= bs
->drv
;
1685 if (drv
->bdrv_load_vmstate
)
1686 return drv
->bdrv_load_vmstate(bs
, buf
, pos
, size
);
1688 return bdrv_load_vmstate(bs
->file
, buf
, pos
, size
);
1692 /**************************************************************/
1695 BlockAIOCB
*bdrv_aio_readv(BlockDriverState
*bs
, int64_t sector_num
,
1696 QEMUIOVector
*qiov
, int nb_sectors
,
1697 BlockCompletionFunc
*cb
, void *opaque
)
1699 trace_bdrv_aio_readv(bs
, sector_num
, nb_sectors
, opaque
);
1701 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, 0,
1705 BlockAIOCB
*bdrv_aio_writev(BlockDriverState
*bs
, int64_t sector_num
,
1706 QEMUIOVector
*qiov
, int nb_sectors
,
1707 BlockCompletionFunc
*cb
, void *opaque
)
1709 trace_bdrv_aio_writev(bs
, sector_num
, nb_sectors
, opaque
);
1711 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, 0,
1715 BlockAIOCB
*bdrv_aio_write_zeroes(BlockDriverState
*bs
,
1716 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
,
1717 BlockCompletionFunc
*cb
, void *opaque
)
1719 trace_bdrv_aio_write_zeroes(bs
, sector_num
, nb_sectors
, flags
, opaque
);
1721 return bdrv_co_aio_rw_vector(bs
, sector_num
, NULL
, nb_sectors
,
1722 BDRV_REQ_ZERO_WRITE
| flags
,
1727 typedef struct MultiwriteCB
{
1732 BlockCompletionFunc
*cb
;
1734 QEMUIOVector
*free_qiov
;
1738 static void multiwrite_user_cb(MultiwriteCB
*mcb
)
1742 for (i
= 0; i
< mcb
->num_callbacks
; i
++) {
1743 mcb
->callbacks
[i
].cb(mcb
->callbacks
[i
].opaque
, mcb
->error
);
1744 if (mcb
->callbacks
[i
].free_qiov
) {
1745 qemu_iovec_destroy(mcb
->callbacks
[i
].free_qiov
);
1747 g_free(mcb
->callbacks
[i
].free_qiov
);
1751 static void multiwrite_cb(void *opaque
, int ret
)
1753 MultiwriteCB
*mcb
= opaque
;
1755 trace_multiwrite_cb(mcb
, ret
);
1757 if (ret
< 0 && !mcb
->error
) {
1761 mcb
->num_requests
--;
1762 if (mcb
->num_requests
== 0) {
1763 multiwrite_user_cb(mcb
);
1768 static int multiwrite_req_compare(const void *a
, const void *b
)
1770 const BlockRequest
*req1
= a
, *req2
= b
;
1773 * Note that we can't simply subtract req2->sector from req1->sector
1774 * here as that could overflow the return value.
1776 if (req1
->sector
> req2
->sector
) {
1778 } else if (req1
->sector
< req2
->sector
) {
1786 * Takes a bunch of requests and tries to merge them. Returns the number of
1787 * requests that remain after merging.
1789 static int multiwrite_merge(BlockDriverState
*bs
, BlockRequest
*reqs
,
1790 int num_reqs
, MultiwriteCB
*mcb
)
1794 // Sort requests by start sector
1795 qsort(reqs
, num_reqs
, sizeof(*reqs
), &multiwrite_req_compare
);
1797 // Check if adjacent requests touch the same clusters. If so, combine them,
1798 // filling up gaps with zero sectors.
1800 for (i
= 1; i
< num_reqs
; i
++) {
1802 int64_t oldreq_last
= reqs
[outidx
].sector
+ reqs
[outidx
].nb_sectors
;
1804 // Handle exactly sequential writes and overlapping writes.
1805 if (reqs
[i
].sector
<= oldreq_last
) {
1809 if (reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1 > IOV_MAX
) {
1813 if (bs
->bl
.max_transfer_length
&& reqs
[outidx
].nb_sectors
+
1814 reqs
[i
].nb_sectors
> bs
->bl
.max_transfer_length
) {
1820 QEMUIOVector
*qiov
= g_malloc0(sizeof(*qiov
));
1821 qemu_iovec_init(qiov
,
1822 reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1);
1824 // Add the first request to the merged one. If the requests are
1825 // overlapping, drop the last sectors of the first request.
1826 size
= (reqs
[i
].sector
- reqs
[outidx
].sector
) << 9;
1827 qemu_iovec_concat(qiov
, reqs
[outidx
].qiov
, 0, size
);
1829 // We should need to add any zeros between the two requests
1830 assert (reqs
[i
].sector
<= oldreq_last
);
1832 // Add the second request
1833 qemu_iovec_concat(qiov
, reqs
[i
].qiov
, 0, reqs
[i
].qiov
->size
);
1835 // Add tail of first request, if necessary
1836 if (qiov
->size
< reqs
[outidx
].qiov
->size
) {
1837 qemu_iovec_concat(qiov
, reqs
[outidx
].qiov
, qiov
->size
,
1838 reqs
[outidx
].qiov
->size
- qiov
->size
);
1841 reqs
[outidx
].nb_sectors
= qiov
->size
>> 9;
1842 reqs
[outidx
].qiov
= qiov
;
1844 mcb
->callbacks
[i
].free_qiov
= reqs
[outidx
].qiov
;
1847 reqs
[outidx
].sector
= reqs
[i
].sector
;
1848 reqs
[outidx
].nb_sectors
= reqs
[i
].nb_sectors
;
1849 reqs
[outidx
].qiov
= reqs
[i
].qiov
;
1853 block_acct_merge_done(&bs
->stats
, BLOCK_ACCT_WRITE
, num_reqs
- outidx
- 1);
1859 * Submit multiple AIO write requests at once.
1861 * On success, the function returns 0 and all requests in the reqs array have
1862 * been submitted. In error case this function returns -1, and any of the
1863 * requests may or may not be submitted yet. In particular, this means that the
1864 * callback will be called for some of the requests, for others it won't. The
1865 * caller must check the error field of the BlockRequest to wait for the right
1866 * callbacks (if error != 0, no callback will be called).
1868 * The implementation may modify the contents of the reqs array, e.g. to merge
1869 * requests. However, the fields opaque and error are left unmodified as they
1870 * are used to signal failure for a single request to the caller.
1872 int bdrv_aio_multiwrite(BlockDriverState
*bs
, BlockRequest
*reqs
, int num_reqs
)
1877 /* don't submit writes if we don't have a medium */
1878 if (bs
->drv
== NULL
) {
1879 for (i
= 0; i
< num_reqs
; i
++) {
1880 reqs
[i
].error
= -ENOMEDIUM
;
1885 if (num_reqs
== 0) {
1889 // Create MultiwriteCB structure
1890 mcb
= g_malloc0(sizeof(*mcb
) + num_reqs
* sizeof(*mcb
->callbacks
));
1891 mcb
->num_requests
= 0;
1892 mcb
->num_callbacks
= num_reqs
;
1894 for (i
= 0; i
< num_reqs
; i
++) {
1895 mcb
->callbacks
[i
].cb
= reqs
[i
].cb
;
1896 mcb
->callbacks
[i
].opaque
= reqs
[i
].opaque
;
1899 // Check for mergable requests
1900 num_reqs
= multiwrite_merge(bs
, reqs
, num_reqs
, mcb
);
1902 trace_bdrv_aio_multiwrite(mcb
, mcb
->num_callbacks
, num_reqs
);
1904 /* Run the aio requests. */
1905 mcb
->num_requests
= num_reqs
;
1906 for (i
= 0; i
< num_reqs
; i
++) {
1907 bdrv_co_aio_rw_vector(bs
, reqs
[i
].sector
, reqs
[i
].qiov
,
1908 reqs
[i
].nb_sectors
, reqs
[i
].flags
,
1916 void bdrv_aio_cancel(BlockAIOCB
*acb
)
1919 bdrv_aio_cancel_async(acb
);
1920 while (acb
->refcnt
> 1) {
1921 if (acb
->aiocb_info
->get_aio_context
) {
1922 aio_poll(acb
->aiocb_info
->get_aio_context(acb
), true);
1923 } else if (acb
->bs
) {
1924 aio_poll(bdrv_get_aio_context(acb
->bs
), true);
1929 qemu_aio_unref(acb
);
1932 /* Async version of aio cancel. The caller is not blocked if the acb implements
1933 * cancel_async, otherwise we do nothing and let the request normally complete.
1934 * In either case the completion callback must be called. */
1935 void bdrv_aio_cancel_async(BlockAIOCB
*acb
)
1937 if (acb
->aiocb_info
->cancel_async
) {
1938 acb
->aiocb_info
->cancel_async(acb
);
1942 /**************************************************************/
1943 /* async block device emulation */
1945 typedef struct BlockAIOCBSync
{
1949 /* vector translation state */
1955 static const AIOCBInfo bdrv_em_aiocb_info
= {
1956 .aiocb_size
= sizeof(BlockAIOCBSync
),
1959 static void bdrv_aio_bh_cb(void *opaque
)
1961 BlockAIOCBSync
*acb
= opaque
;
1963 if (!acb
->is_write
&& acb
->ret
>= 0) {
1964 qemu_iovec_from_buf(acb
->qiov
, 0, acb
->bounce
, acb
->qiov
->size
);
1966 qemu_vfree(acb
->bounce
);
1967 acb
->common
.cb(acb
->common
.opaque
, acb
->ret
);
1968 qemu_bh_delete(acb
->bh
);
1970 qemu_aio_unref(acb
);
1973 static BlockAIOCB
*bdrv_aio_rw_vector(BlockDriverState
*bs
,
1977 BlockCompletionFunc
*cb
,
1982 BlockAIOCBSync
*acb
;
1984 acb
= qemu_aio_get(&bdrv_em_aiocb_info
, bs
, cb
, opaque
);
1985 acb
->is_write
= is_write
;
1987 acb
->bounce
= qemu_try_blockalign(bs
, qiov
->size
);
1988 acb
->bh
= aio_bh_new(bdrv_get_aio_context(bs
), bdrv_aio_bh_cb
, acb
);
1990 if (acb
->bounce
== NULL
) {
1992 } else if (is_write
) {
1993 qemu_iovec_to_buf(acb
->qiov
, 0, acb
->bounce
, qiov
->size
);
1994 acb
->ret
= bs
->drv
->bdrv_write(bs
, sector_num
, acb
->bounce
, nb_sectors
);
1996 acb
->ret
= bs
->drv
->bdrv_read(bs
, sector_num
, acb
->bounce
, nb_sectors
);
1999 qemu_bh_schedule(acb
->bh
);
2001 return &acb
->common
;
2004 static BlockAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
2005 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
2006 BlockCompletionFunc
*cb
, void *opaque
)
2008 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 0);
2011 static BlockAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
2012 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
2013 BlockCompletionFunc
*cb
, void *opaque
)
2015 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 1);
2019 typedef struct BlockAIOCBCoroutine
{
2026 } BlockAIOCBCoroutine
;
2028 static const AIOCBInfo bdrv_em_co_aiocb_info
= {
2029 .aiocb_size
= sizeof(BlockAIOCBCoroutine
),
2032 static void bdrv_co_complete(BlockAIOCBCoroutine
*acb
)
2034 if (!acb
->need_bh
) {
2035 acb
->common
.cb(acb
->common
.opaque
, acb
->req
.error
);
2036 qemu_aio_unref(acb
);
2040 static void bdrv_co_em_bh(void *opaque
)
2042 BlockAIOCBCoroutine
*acb
= opaque
;
2044 assert(!acb
->need_bh
);
2045 qemu_bh_delete(acb
->bh
);
2046 bdrv_co_complete(acb
);
2049 static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine
*acb
)
2051 acb
->need_bh
= false;
2052 if (acb
->req
.error
!= -EINPROGRESS
) {
2053 BlockDriverState
*bs
= acb
->common
.bs
;
2055 acb
->bh
= aio_bh_new(bdrv_get_aio_context(bs
), bdrv_co_em_bh
, acb
);
2056 qemu_bh_schedule(acb
->bh
);
2060 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
2061 static void coroutine_fn
bdrv_co_do_rw(void *opaque
)
2063 BlockAIOCBCoroutine
*acb
= opaque
;
2064 BlockDriverState
*bs
= acb
->common
.bs
;
2066 if (!acb
->is_write
) {
2067 acb
->req
.error
= bdrv_co_do_readv(bs
, acb
->req
.sector
,
2068 acb
->req
.nb_sectors
, acb
->req
.qiov
, acb
->req
.flags
);
2070 acb
->req
.error
= bdrv_co_do_writev(bs
, acb
->req
.sector
,
2071 acb
->req
.nb_sectors
, acb
->req
.qiov
, acb
->req
.flags
);
2074 bdrv_co_complete(acb
);
2077 static BlockAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
2081 BdrvRequestFlags flags
,
2082 BlockCompletionFunc
*cb
,
2087 BlockAIOCBCoroutine
*acb
;
2089 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
2090 acb
->need_bh
= true;
2091 acb
->req
.error
= -EINPROGRESS
;
2092 acb
->req
.sector
= sector_num
;
2093 acb
->req
.nb_sectors
= nb_sectors
;
2094 acb
->req
.qiov
= qiov
;
2095 acb
->req
.flags
= flags
;
2096 acb
->is_write
= is_write
;
2098 co
= qemu_coroutine_create(bdrv_co_do_rw
);
2099 qemu_coroutine_enter(co
, acb
);
2101 bdrv_co_maybe_schedule_bh(acb
);
2102 return &acb
->common
;
2105 static void coroutine_fn
bdrv_aio_flush_co_entry(void *opaque
)
2107 BlockAIOCBCoroutine
*acb
= opaque
;
2108 BlockDriverState
*bs
= acb
->common
.bs
;
2110 acb
->req
.error
= bdrv_co_flush(bs
);
2111 bdrv_co_complete(acb
);
2114 BlockAIOCB
*bdrv_aio_flush(BlockDriverState
*bs
,
2115 BlockCompletionFunc
*cb
, void *opaque
)
2117 trace_bdrv_aio_flush(bs
, opaque
);
2120 BlockAIOCBCoroutine
*acb
;
2122 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
2123 acb
->need_bh
= true;
2124 acb
->req
.error
= -EINPROGRESS
;
2126 co
= qemu_coroutine_create(bdrv_aio_flush_co_entry
);
2127 qemu_coroutine_enter(co
, acb
);
2129 bdrv_co_maybe_schedule_bh(acb
);
2130 return &acb
->common
;
2133 static void coroutine_fn
bdrv_aio_discard_co_entry(void *opaque
)
2135 BlockAIOCBCoroutine
*acb
= opaque
;
2136 BlockDriverState
*bs
= acb
->common
.bs
;
2138 acb
->req
.error
= bdrv_co_discard(bs
, acb
->req
.sector
, acb
->req
.nb_sectors
);
2139 bdrv_co_complete(acb
);
2142 BlockAIOCB
*bdrv_aio_discard(BlockDriverState
*bs
,
2143 int64_t sector_num
, int nb_sectors
,
2144 BlockCompletionFunc
*cb
, void *opaque
)
2147 BlockAIOCBCoroutine
*acb
;
2149 trace_bdrv_aio_discard(bs
, sector_num
, nb_sectors
, opaque
);
2151 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
2152 acb
->need_bh
= true;
2153 acb
->req
.error
= -EINPROGRESS
;
2154 acb
->req
.sector
= sector_num
;
2155 acb
->req
.nb_sectors
= nb_sectors
;
2156 co
= qemu_coroutine_create(bdrv_aio_discard_co_entry
);
2157 qemu_coroutine_enter(co
, acb
);
2159 bdrv_co_maybe_schedule_bh(acb
);
2160 return &acb
->common
;
2163 void *qemu_aio_get(const AIOCBInfo
*aiocb_info
, BlockDriverState
*bs
,
2164 BlockCompletionFunc
*cb
, void *opaque
)
2168 acb
= g_slice_alloc(aiocb_info
->aiocb_size
);
2169 acb
->aiocb_info
= aiocb_info
;
2172 acb
->opaque
= opaque
;
2177 void qemu_aio_ref(void *p
)
2179 BlockAIOCB
*acb
= p
;
2183 void qemu_aio_unref(void *p
)
2185 BlockAIOCB
*acb
= p
;
2186 assert(acb
->refcnt
> 0);
2187 if (--acb
->refcnt
== 0) {
2188 g_slice_free1(acb
->aiocb_info
->aiocb_size
, acb
);
2192 /**************************************************************/
2193 /* Coroutine block device emulation */
2195 typedef struct CoroutineIOCompletion
{
2196 Coroutine
*coroutine
;
2198 } CoroutineIOCompletion
;
2200 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
2202 CoroutineIOCompletion
*co
= opaque
;
2205 qemu_coroutine_enter(co
->coroutine
, NULL
);
2208 static int coroutine_fn
bdrv_co_io_em(BlockDriverState
*bs
, int64_t sector_num
,
2209 int nb_sectors
, QEMUIOVector
*iov
,
2212 CoroutineIOCompletion co
= {
2213 .coroutine
= qemu_coroutine_self(),
2218 acb
= bs
->drv
->bdrv_aio_writev(bs
, sector_num
, iov
, nb_sectors
,
2219 bdrv_co_io_em_complete
, &co
);
2221 acb
= bs
->drv
->bdrv_aio_readv(bs
, sector_num
, iov
, nb_sectors
,
2222 bdrv_co_io_em_complete
, &co
);
2225 trace_bdrv_co_io_em(bs
, sector_num
, nb_sectors
, is_write
, acb
);
2229 qemu_coroutine_yield();
2234 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
2235 int64_t sector_num
, int nb_sectors
,
2238 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, false);
2241 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
2242 int64_t sector_num
, int nb_sectors
,
2245 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, true);
2248 static void coroutine_fn
bdrv_flush_co_entry(void *opaque
)
2250 RwCo
*rwco
= opaque
;
2252 rwco
->ret
= bdrv_co_flush(rwco
->bs
);
2255 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
2259 if (!bs
|| !bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
)) {
2263 /* Write back cached data to the OS even with cache=unsafe */
2264 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_OS
);
2265 if (bs
->drv
->bdrv_co_flush_to_os
) {
2266 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
2272 /* But don't actually force it to the disk with cache=unsafe */
2273 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
2277 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_DISK
);
2278 if (bs
->drv
->bdrv_co_flush_to_disk
) {
2279 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
2280 } else if (bs
->drv
->bdrv_aio_flush
) {
2282 CoroutineIOCompletion co
= {
2283 .coroutine
= qemu_coroutine_self(),
2286 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
2290 qemu_coroutine_yield();
2295 * Some block drivers always operate in either writethrough or unsafe
2296 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2297 * know how the server works (because the behaviour is hardcoded or
2298 * depends on server-side configuration), so we can't ensure that
2299 * everything is safe on disk. Returning an error doesn't work because
2300 * that would break guests even if the server operates in writethrough
2303 * Let's hope the user knows what he's doing.
2311 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2312 * in the case of cache=unsafe, so there are no useless flushes.
2315 return bdrv_co_flush(bs
->file
);
2318 int bdrv_flush(BlockDriverState
*bs
)
2326 if (qemu_in_coroutine()) {
2327 /* Fast-path if already in coroutine context */
2328 bdrv_flush_co_entry(&rwco
);
2330 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
2332 co
= qemu_coroutine_create(bdrv_flush_co_entry
);
2333 qemu_coroutine_enter(co
, &rwco
);
2334 while (rwco
.ret
== NOT_DONE
) {
2335 aio_poll(aio_context
, true);
2342 typedef struct DiscardCo
{
2343 BlockDriverState
*bs
;
2348 static void coroutine_fn
bdrv_discard_co_entry(void *opaque
)
2350 DiscardCo
*rwco
= opaque
;
2352 rwco
->ret
= bdrv_co_discard(rwco
->bs
, rwco
->sector_num
, rwco
->nb_sectors
);
2355 int coroutine_fn
bdrv_co_discard(BlockDriverState
*bs
, int64_t sector_num
,
2358 int max_discard
, ret
;
2364 ret
= bdrv_check_request(bs
, sector_num
, nb_sectors
);
2367 } else if (bs
->read_only
) {
2371 bdrv_reset_dirty(bs
, sector_num
, nb_sectors
);
2373 /* Do nothing if disabled. */
2374 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
2378 if (!bs
->drv
->bdrv_co_discard
&& !bs
->drv
->bdrv_aio_discard
) {
2382 max_discard
= MIN_NON_ZERO(bs
->bl
.max_discard
, BDRV_REQUEST_MAX_SECTORS
);
2383 while (nb_sectors
> 0) {
2385 int num
= nb_sectors
;
2388 if (bs
->bl
.discard_alignment
&&
2389 num
>= bs
->bl
.discard_alignment
&&
2390 sector_num
% bs
->bl
.discard_alignment
) {
2391 if (num
> bs
->bl
.discard_alignment
) {
2392 num
= bs
->bl
.discard_alignment
;
2394 num
-= sector_num
% bs
->bl
.discard_alignment
;
2397 /* limit request size */
2398 if (num
> max_discard
) {
2402 if (bs
->drv
->bdrv_co_discard
) {
2403 ret
= bs
->drv
->bdrv_co_discard(bs
, sector_num
, num
);
2406 CoroutineIOCompletion co
= {
2407 .coroutine
= qemu_coroutine_self(),
2410 acb
= bs
->drv
->bdrv_aio_discard(bs
, sector_num
, nb_sectors
,
2411 bdrv_co_io_em_complete
, &co
);
2415 qemu_coroutine_yield();
2419 if (ret
&& ret
!= -ENOTSUP
) {
2429 int bdrv_discard(BlockDriverState
*bs
, int64_t sector_num
, int nb_sectors
)
2434 .sector_num
= sector_num
,
2435 .nb_sectors
= nb_sectors
,
2439 if (qemu_in_coroutine()) {
2440 /* Fast-path if already in coroutine context */
2441 bdrv_discard_co_entry(&rwco
);
2443 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
2445 co
= qemu_coroutine_create(bdrv_discard_co_entry
);
2446 qemu_coroutine_enter(co
, &rwco
);
2447 while (rwco
.ret
== NOT_DONE
) {
2448 aio_poll(aio_context
, true);
2455 /* needed for generic scsi interface */
2457 int bdrv_ioctl(BlockDriverState
*bs
, unsigned long int req
, void *buf
)
2459 BlockDriver
*drv
= bs
->drv
;
2461 if (drv
&& drv
->bdrv_ioctl
)
2462 return drv
->bdrv_ioctl(bs
, req
, buf
);
2466 BlockAIOCB
*bdrv_aio_ioctl(BlockDriverState
*bs
,
2467 unsigned long int req
, void *buf
,
2468 BlockCompletionFunc
*cb
, void *opaque
)
2470 BlockDriver
*drv
= bs
->drv
;
2472 if (drv
&& drv
->bdrv_aio_ioctl
)
2473 return drv
->bdrv_aio_ioctl(bs
, req
, buf
, cb
, opaque
);
2477 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
2479 return qemu_memalign(bdrv_opt_mem_align(bs
), size
);
2482 void *qemu_blockalign0(BlockDriverState
*bs
, size_t size
)
2484 return memset(qemu_blockalign(bs
, size
), 0, size
);
2487 void *qemu_try_blockalign(BlockDriverState
*bs
, size_t size
)
2489 size_t align
= bdrv_opt_mem_align(bs
);
2491 /* Ensure that NULL is never returned on success */
2497 return qemu_try_memalign(align
, size
);
2500 void *qemu_try_blockalign0(BlockDriverState
*bs
, size_t size
)
2502 void *mem
= qemu_try_blockalign(bs
, size
);
2505 memset(mem
, 0, size
);
2512 * Check if all memory in this vector is sector aligned.
2514 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
)
2517 size_t alignment
= bdrv_min_mem_align(bs
);
2519 for (i
= 0; i
< qiov
->niov
; i
++) {
2520 if ((uintptr_t) qiov
->iov
[i
].iov_base
% alignment
) {
2523 if (qiov
->iov
[i
].iov_len
% alignment
) {
2531 void bdrv_add_before_write_notifier(BlockDriverState
*bs
,
2532 NotifierWithReturn
*notifier
)
2534 notifier_with_return_list_add(&bs
->before_write_notifiers
, notifier
);
2537 void bdrv_io_plug(BlockDriverState
*bs
)
2539 BlockDriver
*drv
= bs
->drv
;
2540 if (drv
&& drv
->bdrv_io_plug
) {
2541 drv
->bdrv_io_plug(bs
);
2542 } else if (bs
->file
) {
2543 bdrv_io_plug(bs
->file
);
2547 void bdrv_io_unplug(BlockDriverState
*bs
)
2549 BlockDriver
*drv
= bs
->drv
;
2550 if (drv
&& drv
->bdrv_io_unplug
) {
2551 drv
->bdrv_io_unplug(bs
);
2552 } else if (bs
->file
) {
2553 bdrv_io_unplug(bs
->file
);
2557 void bdrv_flush_io_queue(BlockDriverState
*bs
)
2559 BlockDriver
*drv
= bs
->drv
;
2560 if (drv
&& drv
->bdrv_flush_io_queue
) {
2561 drv
->bdrv_flush_io_queue(bs
);
2562 } else if (bs
->file
) {
2563 bdrv_flush_io_queue(bs
->file
);