2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "sysemu/block-backend.h"
27 #include "block/blockjob.h"
28 #include "block/block_int.h"
29 #include "block/throttle-groups.h"
30 #include "qemu/error-report.h"
32 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
34 static BlockAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
35 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
36 BlockCompletionFunc
*cb
, void *opaque
);
37 static BlockAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
38 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
39 BlockCompletionFunc
*cb
, void *opaque
);
40 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
41 int64_t sector_num
, int nb_sectors
,
43 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
44 int64_t sector_num
, int nb_sectors
,
46 static int coroutine_fn
bdrv_co_do_preadv(BlockDriverState
*bs
,
47 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
48 BdrvRequestFlags flags
);
49 static int coroutine_fn
bdrv_co_do_pwritev(BlockDriverState
*bs
,
50 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
51 BdrvRequestFlags flags
);
52 static BlockAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
56 BdrvRequestFlags flags
,
57 BlockCompletionFunc
*cb
,
60 static void coroutine_fn
bdrv_co_do_rw(void *opaque
);
61 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
62 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
);
64 /* throttling disk I/O limits */
65 void bdrv_set_io_limits(BlockDriverState
*bs
,
70 throttle_group_config(bs
, cfg
);
72 for (i
= 0; i
< 2; i
++) {
73 qemu_co_enter_next(&bs
->throttled_reqs
[i
]);
77 /* this function drain all the throttled IOs */
78 static bool bdrv_start_throttled_reqs(BlockDriverState
*bs
)
81 bool enabled
= bs
->io_limits_enabled
;
84 bs
->io_limits_enabled
= false;
86 for (i
= 0; i
< 2; i
++) {
87 while (qemu_co_enter_next(&bs
->throttled_reqs
[i
])) {
92 bs
->io_limits_enabled
= enabled
;
97 void bdrv_io_limits_disable(BlockDriverState
*bs
)
99 bs
->io_limits_enabled
= false;
100 bdrv_start_throttled_reqs(bs
);
101 throttle_group_unregister_bs(bs
);
104 /* should be called before bdrv_set_io_limits if a limit is set */
105 void bdrv_io_limits_enable(BlockDriverState
*bs
, const char *group
)
107 assert(!bs
->io_limits_enabled
);
108 throttle_group_register_bs(bs
, group
);
109 bs
->io_limits_enabled
= true;
112 void bdrv_io_limits_update_group(BlockDriverState
*bs
, const char *group
)
114 /* this bs is not part of any group */
115 if (!bs
->throttle_state
) {
119 /* this bs is a part of the same group than the one we want */
120 if (!g_strcmp0(throttle_group_get_name(bs
), group
)) {
124 /* need to change the group this bs belong to */
125 bdrv_io_limits_disable(bs
);
126 bdrv_io_limits_enable(bs
, group
);
129 void bdrv_setup_io_funcs(BlockDriver
*bdrv
)
131 /* Block drivers without coroutine functions need emulation */
132 if (!bdrv
->bdrv_co_readv
) {
133 bdrv
->bdrv_co_readv
= bdrv_co_readv_em
;
134 bdrv
->bdrv_co_writev
= bdrv_co_writev_em
;
136 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
137 * the block driver lacks aio we need to emulate that too.
139 if (!bdrv
->bdrv_aio_readv
) {
140 /* add AIO emulation layer */
141 bdrv
->bdrv_aio_readv
= bdrv_aio_readv_em
;
142 bdrv
->bdrv_aio_writev
= bdrv_aio_writev_em
;
147 void bdrv_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
149 BlockDriver
*drv
= bs
->drv
;
150 Error
*local_err
= NULL
;
152 memset(&bs
->bl
, 0, sizeof(bs
->bl
));
158 /* Take some limits from the children as a default */
160 bdrv_refresh_limits(bs
->file
->bs
, &local_err
);
162 error_propagate(errp
, local_err
);
165 bs
->bl
.opt_transfer_length
= bs
->file
->bs
->bl
.opt_transfer_length
;
166 bs
->bl
.max_transfer_length
= bs
->file
->bs
->bl
.max_transfer_length
;
167 bs
->bl
.min_mem_alignment
= bs
->file
->bs
->bl
.min_mem_alignment
;
168 bs
->bl
.opt_mem_alignment
= bs
->file
->bs
->bl
.opt_mem_alignment
;
170 bs
->bl
.min_mem_alignment
= 512;
171 bs
->bl
.opt_mem_alignment
= getpagesize();
175 bdrv_refresh_limits(bs
->backing
->bs
, &local_err
);
177 error_propagate(errp
, local_err
);
180 bs
->bl
.opt_transfer_length
=
181 MAX(bs
->bl
.opt_transfer_length
,
182 bs
->backing
->bs
->bl
.opt_transfer_length
);
183 bs
->bl
.max_transfer_length
=
184 MIN_NON_ZERO(bs
->bl
.max_transfer_length
,
185 bs
->backing
->bs
->bl
.max_transfer_length
);
186 bs
->bl
.opt_mem_alignment
=
187 MAX(bs
->bl
.opt_mem_alignment
,
188 bs
->backing
->bs
->bl
.opt_mem_alignment
);
189 bs
->bl
.min_mem_alignment
=
190 MAX(bs
->bl
.min_mem_alignment
,
191 bs
->backing
->bs
->bl
.min_mem_alignment
);
194 /* Then let the driver override it */
195 if (drv
->bdrv_refresh_limits
) {
196 drv
->bdrv_refresh_limits(bs
, errp
);
201 * The copy-on-read flag is actually a reference count so multiple users may
202 * use the feature without worrying about clobbering its previous state.
203 * Copy-on-read stays enabled until all users have called to disable it.
205 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
210 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
212 assert(bs
->copy_on_read
> 0);
216 /* Check if any requests are in-flight (including throttled requests) */
217 bool bdrv_requests_pending(BlockDriverState
*bs
)
219 if (!QLIST_EMPTY(&bs
->tracked_requests
)) {
222 if (!qemu_co_queue_empty(&bs
->throttled_reqs
[0])) {
225 if (!qemu_co_queue_empty(&bs
->throttled_reqs
[1])) {
228 if (bs
->file
&& bdrv_requests_pending(bs
->file
->bs
)) {
231 if (bs
->backing
&& bdrv_requests_pending(bs
->backing
->bs
)) {
238 * Wait for pending requests to complete on a single BlockDriverState subtree
240 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
243 * Only this BlockDriverState's AioContext is run, so in-flight requests must
244 * not depend on events in other AioContexts. In that case, use
245 * bdrv_drain_all() instead.
247 void bdrv_drain(BlockDriverState
*bs
)
253 bdrv_flush_io_queue(bs
);
254 busy
= bdrv_requests_pending(bs
);
255 busy
|= aio_poll(bdrv_get_aio_context(bs
), busy
);
260 * Wait for pending requests to complete across all BlockDriverStates
262 * This function does not flush data to disk, use bdrv_flush_all() for that
263 * after calling this function.
265 void bdrv_drain_all(void)
267 /* Always run first iteration so any pending completion BHs run */
269 BlockDriverState
*bs
= NULL
;
270 GSList
*aio_ctxs
= NULL
, *ctx
;
272 while ((bs
= bdrv_next(bs
))) {
273 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
275 aio_context_acquire(aio_context
);
277 block_job_pause(bs
->job
);
279 aio_context_release(aio_context
);
281 if (!g_slist_find(aio_ctxs
, aio_context
)) {
282 aio_ctxs
= g_slist_prepend(aio_ctxs
, aio_context
);
286 /* Note that completion of an asynchronous I/O operation can trigger any
287 * number of other I/O operations on other devices---for example a
288 * coroutine can submit an I/O request to another device in response to
289 * request completion. Therefore we must keep looping until there was no
290 * more activity rather than simply draining each device independently.
295 for (ctx
= aio_ctxs
; ctx
!= NULL
; ctx
= ctx
->next
) {
296 AioContext
*aio_context
= ctx
->data
;
299 aio_context_acquire(aio_context
);
300 while ((bs
= bdrv_next(bs
))) {
301 if (aio_context
== bdrv_get_aio_context(bs
)) {
302 bdrv_flush_io_queue(bs
);
303 if (bdrv_requests_pending(bs
)) {
305 aio_poll(aio_context
, busy
);
309 busy
|= aio_poll(aio_context
, false);
310 aio_context_release(aio_context
);
315 while ((bs
= bdrv_next(bs
))) {
316 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
318 aio_context_acquire(aio_context
);
320 block_job_resume(bs
->job
);
322 aio_context_release(aio_context
);
324 g_slist_free(aio_ctxs
);
328 * Remove an active request from the tracked requests list
330 * This function should be called when a tracked request is completing.
332 static void tracked_request_end(BdrvTrackedRequest
*req
)
334 if (req
->serialising
) {
335 req
->bs
->serialising_in_flight
--;
338 QLIST_REMOVE(req
, list
);
339 qemu_co_queue_restart_all(&req
->wait_queue
);
343 * Add an active request to the tracked requests list
345 static void tracked_request_begin(BdrvTrackedRequest
*req
,
346 BlockDriverState
*bs
,
348 unsigned int bytes
, bool is_write
)
350 *req
= (BdrvTrackedRequest
){
354 .is_write
= is_write
,
355 .co
= qemu_coroutine_self(),
356 .serialising
= false,
357 .overlap_offset
= offset
,
358 .overlap_bytes
= bytes
,
361 qemu_co_queue_init(&req
->wait_queue
);
363 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
366 static void mark_request_serialising(BdrvTrackedRequest
*req
, uint64_t align
)
368 int64_t overlap_offset
= req
->offset
& ~(align
- 1);
369 unsigned int overlap_bytes
= ROUND_UP(req
->offset
+ req
->bytes
, align
)
372 if (!req
->serialising
) {
373 req
->bs
->serialising_in_flight
++;
374 req
->serialising
= true;
377 req
->overlap_offset
= MIN(req
->overlap_offset
, overlap_offset
);
378 req
->overlap_bytes
= MAX(req
->overlap_bytes
, overlap_bytes
);
382 * Round a region to cluster boundaries
384 void bdrv_round_to_clusters(BlockDriverState
*bs
,
385 int64_t sector_num
, int nb_sectors
,
386 int64_t *cluster_sector_num
,
387 int *cluster_nb_sectors
)
391 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
392 *cluster_sector_num
= sector_num
;
393 *cluster_nb_sectors
= nb_sectors
;
395 int64_t c
= bdi
.cluster_size
/ BDRV_SECTOR_SIZE
;
396 *cluster_sector_num
= QEMU_ALIGN_DOWN(sector_num
, c
);
397 *cluster_nb_sectors
= QEMU_ALIGN_UP(sector_num
- *cluster_sector_num
+
402 static int bdrv_get_cluster_size(BlockDriverState
*bs
)
407 ret
= bdrv_get_info(bs
, &bdi
);
408 if (ret
< 0 || bdi
.cluster_size
== 0) {
409 return bs
->request_alignment
;
411 return bdi
.cluster_size
;
415 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
416 int64_t offset
, unsigned int bytes
)
419 if (offset
>= req
->overlap_offset
+ req
->overlap_bytes
) {
423 if (req
->overlap_offset
>= offset
+ bytes
) {
429 static bool coroutine_fn
wait_serialising_requests(BdrvTrackedRequest
*self
)
431 BlockDriverState
*bs
= self
->bs
;
432 BdrvTrackedRequest
*req
;
436 if (!bs
->serialising_in_flight
) {
442 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
443 if (req
== self
|| (!req
->serialising
&& !self
->serialising
)) {
446 if (tracked_request_overlaps(req
, self
->overlap_offset
,
447 self
->overlap_bytes
))
449 /* Hitting this means there was a reentrant request, for
450 * example, a block driver issuing nested requests. This must
451 * never happen since it means deadlock.
453 assert(qemu_coroutine_self() != req
->co
);
455 /* If the request is already (indirectly) waiting for us, or
456 * will wait for us as soon as it wakes up, then just go on
457 * (instead of producing a deadlock in the former case). */
458 if (!req
->waiting_for
) {
459 self
->waiting_for
= req
;
460 qemu_co_queue_wait(&req
->wait_queue
);
461 self
->waiting_for
= NULL
;
473 static int bdrv_check_byte_request(BlockDriverState
*bs
, int64_t offset
,
476 if (size
> BDRV_REQUEST_MAX_SECTORS
<< BDRV_SECTOR_BITS
) {
480 if (!bdrv_is_inserted(bs
)) {
491 static int bdrv_check_request(BlockDriverState
*bs
, int64_t sector_num
,
494 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
498 return bdrv_check_byte_request(bs
, sector_num
* BDRV_SECTOR_SIZE
,
499 nb_sectors
* BDRV_SECTOR_SIZE
);
502 typedef struct RwCo
{
503 BlockDriverState
*bs
;
508 BdrvRequestFlags flags
;
511 static void coroutine_fn
bdrv_rw_co_entry(void *opaque
)
515 if (!rwco
->is_write
) {
516 rwco
->ret
= bdrv_co_do_preadv(rwco
->bs
, rwco
->offset
,
517 rwco
->qiov
->size
, rwco
->qiov
,
520 rwco
->ret
= bdrv_co_do_pwritev(rwco
->bs
, rwco
->offset
,
521 rwco
->qiov
->size
, rwco
->qiov
,
527 * Process a vectored synchronous request using coroutines
529 static int bdrv_prwv_co(BlockDriverState
*bs
, int64_t offset
,
530 QEMUIOVector
*qiov
, bool is_write
,
531 BdrvRequestFlags flags
)
538 .is_write
= is_write
,
544 * In sync call context, when the vcpu is blocked, this throttling timer
545 * will not fire; so the I/O throttling function has to be disabled here
546 * if it has been enabled.
548 if (bs
->io_limits_enabled
) {
549 fprintf(stderr
, "Disabling I/O throttling on '%s' due "
550 "to synchronous I/O.\n", bdrv_get_device_name(bs
));
551 bdrv_io_limits_disable(bs
);
554 if (qemu_in_coroutine()) {
555 /* Fast-path if already in coroutine context */
556 bdrv_rw_co_entry(&rwco
);
558 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
560 co
= qemu_coroutine_create(bdrv_rw_co_entry
);
561 qemu_coroutine_enter(co
, &rwco
);
562 while (rwco
.ret
== NOT_DONE
) {
563 aio_poll(aio_context
, true);
570 * Process a synchronous request using coroutines
572 static int bdrv_rw_co(BlockDriverState
*bs
, int64_t sector_num
, uint8_t *buf
,
573 int nb_sectors
, bool is_write
, BdrvRequestFlags flags
)
577 .iov_base
= (void *)buf
,
578 .iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
,
581 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
585 qemu_iovec_init_external(&qiov
, &iov
, 1);
586 return bdrv_prwv_co(bs
, sector_num
<< BDRV_SECTOR_BITS
,
587 &qiov
, is_write
, flags
);
590 /* return < 0 if error. See bdrv_write() for the return codes */
591 int bdrv_read(BlockDriverState
*bs
, int64_t sector_num
,
592 uint8_t *buf
, int nb_sectors
)
594 return bdrv_rw_co(bs
, sector_num
, buf
, nb_sectors
, false, 0);
597 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
598 int bdrv_read_unthrottled(BlockDriverState
*bs
, int64_t sector_num
,
599 uint8_t *buf
, int nb_sectors
)
604 enabled
= bs
->io_limits_enabled
;
605 bs
->io_limits_enabled
= false;
606 ret
= bdrv_read(bs
, sector_num
, buf
, nb_sectors
);
607 bs
->io_limits_enabled
= enabled
;
611 /* Return < 0 if error. Important errors are:
612 -EIO generic I/O error (may happen for all errors)
613 -ENOMEDIUM No media inserted.
614 -EINVAL Invalid sector number or nb_sectors
615 -EACCES Trying to write a read-only device
617 int bdrv_write(BlockDriverState
*bs
, int64_t sector_num
,
618 const uint8_t *buf
, int nb_sectors
)
620 return bdrv_rw_co(bs
, sector_num
, (uint8_t *)buf
, nb_sectors
, true, 0);
623 int bdrv_write_zeroes(BlockDriverState
*bs
, int64_t sector_num
,
624 int nb_sectors
, BdrvRequestFlags flags
)
626 return bdrv_rw_co(bs
, sector_num
, NULL
, nb_sectors
, true,
627 BDRV_REQ_ZERO_WRITE
| flags
);
631 * Completely zero out a block device with the help of bdrv_write_zeroes.
632 * The operation is sped up by checking the block status and only writing
633 * zeroes to the device if they currently do not return zeroes. Optional
634 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
636 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
638 int bdrv_make_zero(BlockDriverState
*bs
, BdrvRequestFlags flags
)
640 int64_t target_sectors
, ret
, nb_sectors
, sector_num
= 0;
643 target_sectors
= bdrv_nb_sectors(bs
);
644 if (target_sectors
< 0) {
645 return target_sectors
;
649 nb_sectors
= MIN(target_sectors
- sector_num
, BDRV_REQUEST_MAX_SECTORS
);
650 if (nb_sectors
<= 0) {
653 ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, &n
);
655 error_report("error getting block status at sector %" PRId64
": %s",
656 sector_num
, strerror(-ret
));
659 if (ret
& BDRV_BLOCK_ZERO
) {
663 ret
= bdrv_write_zeroes(bs
, sector_num
, n
, flags
);
665 error_report("error writing zeroes at sector %" PRId64
": %s",
666 sector_num
, strerror(-ret
));
673 int bdrv_pread(BlockDriverState
*bs
, int64_t offset
, void *buf
, int bytes
)
677 .iov_base
= (void *)buf
,
686 qemu_iovec_init_external(&qiov
, &iov
, 1);
687 ret
= bdrv_prwv_co(bs
, offset
, &qiov
, false, 0);
695 int bdrv_pwritev(BlockDriverState
*bs
, int64_t offset
, QEMUIOVector
*qiov
)
699 ret
= bdrv_prwv_co(bs
, offset
, qiov
, true, 0);
707 int bdrv_pwrite(BlockDriverState
*bs
, int64_t offset
,
708 const void *buf
, int bytes
)
712 .iov_base
= (void *) buf
,
720 qemu_iovec_init_external(&qiov
, &iov
, 1);
721 return bdrv_pwritev(bs
, offset
, &qiov
);
725 * Writes to the file and ensures that no writes are reordered across this
726 * request (acts as a barrier)
728 * Returns 0 on success, -errno in error cases.
730 int bdrv_pwrite_sync(BlockDriverState
*bs
, int64_t offset
,
731 const void *buf
, int count
)
735 ret
= bdrv_pwrite(bs
, offset
, buf
, count
);
740 /* No flush needed for cache modes that already do it */
741 if (bs
->enable_write_cache
) {
748 static int coroutine_fn
bdrv_co_do_copy_on_readv(BlockDriverState
*bs
,
749 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
751 /* Perform I/O through a temporary buffer so that users who scribble over
752 * their read buffer while the operation is in progress do not end up
753 * modifying the image file. This is critical for zero-copy guest I/O
754 * where anything might happen inside guest memory.
758 BlockDriver
*drv
= bs
->drv
;
760 QEMUIOVector bounce_qiov
;
761 int64_t cluster_sector_num
;
762 int cluster_nb_sectors
;
766 /* Cover entire cluster so no additional backing file I/O is required when
767 * allocating cluster in the image file.
769 bdrv_round_to_clusters(bs
, sector_num
, nb_sectors
,
770 &cluster_sector_num
, &cluster_nb_sectors
);
772 trace_bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
,
773 cluster_sector_num
, cluster_nb_sectors
);
775 iov
.iov_len
= cluster_nb_sectors
* BDRV_SECTOR_SIZE
;
776 iov
.iov_base
= bounce_buffer
= qemu_try_blockalign(bs
, iov
.iov_len
);
777 if (bounce_buffer
== NULL
) {
782 qemu_iovec_init_external(&bounce_qiov
, &iov
, 1);
784 ret
= drv
->bdrv_co_readv(bs
, cluster_sector_num
, cluster_nb_sectors
,
790 if (drv
->bdrv_co_write_zeroes
&&
791 buffer_is_zero(bounce_buffer
, iov
.iov_len
)) {
792 ret
= bdrv_co_do_write_zeroes(bs
, cluster_sector_num
,
793 cluster_nb_sectors
, 0);
795 /* This does not change the data on the disk, it is not necessary
796 * to flush even in cache=writethrough mode.
798 ret
= drv
->bdrv_co_writev(bs
, cluster_sector_num
, cluster_nb_sectors
,
803 /* It might be okay to ignore write errors for guest requests. If this
804 * is a deliberate copy-on-read then we don't want to ignore the error.
805 * Simply report it in all cases.
810 skip_bytes
= (sector_num
- cluster_sector_num
) * BDRV_SECTOR_SIZE
;
811 qemu_iovec_from_buf(qiov
, 0, bounce_buffer
+ skip_bytes
,
812 nb_sectors
* BDRV_SECTOR_SIZE
);
815 qemu_vfree(bounce_buffer
);
820 * Forwards an already correctly aligned request to the BlockDriver. This
821 * handles copy on read and zeroing after EOF; any other features must be
822 * implemented by the caller.
824 static int coroutine_fn
bdrv_aligned_preadv(BlockDriverState
*bs
,
825 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
826 int64_t align
, QEMUIOVector
*qiov
, int flags
)
828 BlockDriver
*drv
= bs
->drv
;
831 int64_t sector_num
= offset
>> BDRV_SECTOR_BITS
;
832 unsigned int nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
834 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
835 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
836 assert(!qiov
|| bytes
== qiov
->size
);
838 /* Handle Copy on Read and associated serialisation */
839 if (flags
& BDRV_REQ_COPY_ON_READ
) {
840 /* If we touch the same cluster it counts as an overlap. This
841 * guarantees that allocating writes will be serialized and not race
842 * with each other for the same cluster. For example, in copy-on-read
843 * it ensures that the CoR read and write operations are atomic and
844 * guest writes cannot interleave between them. */
845 mark_request_serialising(req
, bdrv_get_cluster_size(bs
));
848 wait_serialising_requests(req
);
850 if (flags
& BDRV_REQ_COPY_ON_READ
) {
853 ret
= bdrv_is_allocated(bs
, sector_num
, nb_sectors
, &pnum
);
858 if (!ret
|| pnum
!= nb_sectors
) {
859 ret
= bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
, qiov
);
864 /* Forward the request to the BlockDriver */
865 if (!bs
->zero_beyond_eof
) {
866 ret
= drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
868 /* Read zeros after EOF */
869 int64_t total_sectors
, max_nb_sectors
;
871 total_sectors
= bdrv_nb_sectors(bs
);
872 if (total_sectors
< 0) {
877 max_nb_sectors
= ROUND_UP(MAX(0, total_sectors
- sector_num
),
878 align
>> BDRV_SECTOR_BITS
);
879 if (nb_sectors
< max_nb_sectors
) {
880 ret
= drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
881 } else if (max_nb_sectors
> 0) {
882 QEMUIOVector local_qiov
;
884 qemu_iovec_init(&local_qiov
, qiov
->niov
);
885 qemu_iovec_concat(&local_qiov
, qiov
, 0,
886 max_nb_sectors
* BDRV_SECTOR_SIZE
);
888 ret
= drv
->bdrv_co_readv(bs
, sector_num
, max_nb_sectors
,
891 qemu_iovec_destroy(&local_qiov
);
896 /* Reading beyond end of file is supposed to produce zeroes */
897 if (ret
== 0 && total_sectors
< sector_num
+ nb_sectors
) {
898 uint64_t offset
= MAX(0, total_sectors
- sector_num
);
899 uint64_t bytes
= (sector_num
+ nb_sectors
- offset
) *
901 qemu_iovec_memset(qiov
, offset
* BDRV_SECTOR_SIZE
, 0, bytes
);
910 * Handle a read request in coroutine context
912 static int coroutine_fn
bdrv_co_do_preadv(BlockDriverState
*bs
,
913 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
914 BdrvRequestFlags flags
)
916 BlockDriver
*drv
= bs
->drv
;
917 BdrvTrackedRequest req
;
919 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
920 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
921 uint8_t *head_buf
= NULL
;
922 uint8_t *tail_buf
= NULL
;
923 QEMUIOVector local_qiov
;
924 bool use_local_qiov
= false;
931 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
936 /* Don't do copy-on-read if we read data before write operation */
937 if (bs
->copy_on_read
&& !(flags
& BDRV_REQ_NO_COPY_ON_READ
)) {
938 flags
|= BDRV_REQ_COPY_ON_READ
;
941 /* throttling disk I/O */
942 if (bs
->io_limits_enabled
) {
943 throttle_group_co_io_limits_intercept(bs
, bytes
, false);
946 /* Align read if necessary by padding qiov */
947 if (offset
& (align
- 1)) {
948 head_buf
= qemu_blockalign(bs
, align
);
949 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
950 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
951 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
952 use_local_qiov
= true;
954 bytes
+= offset
& (align
- 1);
955 offset
= offset
& ~(align
- 1);
958 if ((offset
+ bytes
) & (align
- 1)) {
959 if (!use_local_qiov
) {
960 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
961 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
962 use_local_qiov
= true;
964 tail_buf
= qemu_blockalign(bs
, align
);
965 qemu_iovec_add(&local_qiov
, tail_buf
,
966 align
- ((offset
+ bytes
) & (align
- 1)));
968 bytes
= ROUND_UP(bytes
, align
);
971 tracked_request_begin(&req
, bs
, offset
, bytes
, false);
972 ret
= bdrv_aligned_preadv(bs
, &req
, offset
, bytes
, align
,
973 use_local_qiov
? &local_qiov
: qiov
,
975 tracked_request_end(&req
);
977 if (use_local_qiov
) {
978 qemu_iovec_destroy(&local_qiov
);
979 qemu_vfree(head_buf
);
980 qemu_vfree(tail_buf
);
986 static int coroutine_fn
bdrv_co_do_readv(BlockDriverState
*bs
,
987 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
988 BdrvRequestFlags flags
)
990 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
994 return bdrv_co_do_preadv(bs
, sector_num
<< BDRV_SECTOR_BITS
,
995 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
998 int coroutine_fn
bdrv_co_readv(BlockDriverState
*bs
, int64_t sector_num
,
999 int nb_sectors
, QEMUIOVector
*qiov
)
1001 trace_bdrv_co_readv(bs
, sector_num
, nb_sectors
);
1003 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
, 0);
1006 int coroutine_fn
bdrv_co_no_copy_on_readv(BlockDriverState
*bs
,
1007 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
1009 trace_bdrv_co_no_copy_on_readv(bs
, sector_num
, nb_sectors
);
1011 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
,
1012 BDRV_REQ_NO_COPY_ON_READ
);
1015 int coroutine_fn
bdrv_co_copy_on_readv(BlockDriverState
*bs
,
1016 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
1018 trace_bdrv_co_copy_on_readv(bs
, sector_num
, nb_sectors
);
1020 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
,
1021 BDRV_REQ_COPY_ON_READ
);
1024 #define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768
1026 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
1027 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
)
1029 BlockDriver
*drv
= bs
->drv
;
1031 struct iovec iov
= {0};
1034 int max_write_zeroes
= MIN_NON_ZERO(bs
->bl
.max_write_zeroes
,
1035 BDRV_REQUEST_MAX_SECTORS
);
1037 while (nb_sectors
> 0 && !ret
) {
1038 int num
= nb_sectors
;
1040 /* Align request. Block drivers can expect the "bulk" of the request
1043 if (bs
->bl
.write_zeroes_alignment
1044 && num
> bs
->bl
.write_zeroes_alignment
) {
1045 if (sector_num
% bs
->bl
.write_zeroes_alignment
!= 0) {
1046 /* Make a small request up to the first aligned sector. */
1047 num
= bs
->bl
.write_zeroes_alignment
;
1048 num
-= sector_num
% bs
->bl
.write_zeroes_alignment
;
1049 } else if ((sector_num
+ num
) % bs
->bl
.write_zeroes_alignment
!= 0) {
1050 /* Shorten the request to the last aligned sector. num cannot
1051 * underflow because num > bs->bl.write_zeroes_alignment.
1053 num
-= (sector_num
+ num
) % bs
->bl
.write_zeroes_alignment
;
1057 /* limit request size */
1058 if (num
> max_write_zeroes
) {
1059 num
= max_write_zeroes
;
1063 /* First try the efficient write zeroes operation */
1064 if (drv
->bdrv_co_write_zeroes
) {
1065 ret
= drv
->bdrv_co_write_zeroes(bs
, sector_num
, num
, flags
);
1068 if (ret
== -ENOTSUP
) {
1069 /* Fall back to bounce buffer if write zeroes is unsupported */
1070 int max_xfer_len
= MIN_NON_ZERO(bs
->bl
.max_transfer_length
,
1071 MAX_WRITE_ZEROES_BOUNCE_BUFFER
);
1072 num
= MIN(num
, max_xfer_len
);
1073 iov
.iov_len
= num
* BDRV_SECTOR_SIZE
;
1074 if (iov
.iov_base
== NULL
) {
1075 iov
.iov_base
= qemu_try_blockalign(bs
, num
* BDRV_SECTOR_SIZE
);
1076 if (iov
.iov_base
== NULL
) {
1080 memset(iov
.iov_base
, 0, num
* BDRV_SECTOR_SIZE
);
1082 qemu_iovec_init_external(&qiov
, &iov
, 1);
1084 ret
= drv
->bdrv_co_writev(bs
, sector_num
, num
, &qiov
);
1086 /* Keep bounce buffer around if it is big enough for all
1087 * all future requests.
1089 if (num
< max_xfer_len
) {
1090 qemu_vfree(iov
.iov_base
);
1091 iov
.iov_base
= NULL
;
1100 qemu_vfree(iov
.iov_base
);
1105 * Forwards an already correctly aligned write request to the BlockDriver.
1107 static int coroutine_fn
bdrv_aligned_pwritev(BlockDriverState
*bs
,
1108 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
1109 QEMUIOVector
*qiov
, int flags
)
1111 BlockDriver
*drv
= bs
->drv
;
1115 int64_t sector_num
= offset
>> BDRV_SECTOR_BITS
;
1116 unsigned int nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
1118 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
1119 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
1120 assert(!qiov
|| bytes
== qiov
->size
);
1122 waited
= wait_serialising_requests(req
);
1123 assert(!waited
|| !req
->serialising
);
1124 assert(req
->overlap_offset
<= offset
);
1125 assert(offset
+ bytes
<= req
->overlap_offset
+ req
->overlap_bytes
);
1127 ret
= notifier_with_return_list_notify(&bs
->before_write_notifiers
, req
);
1129 if (!ret
&& bs
->detect_zeroes
!= BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF
&&
1130 !(flags
& BDRV_REQ_ZERO_WRITE
) && drv
->bdrv_co_write_zeroes
&&
1131 qemu_iovec_is_zero(qiov
)) {
1132 flags
|= BDRV_REQ_ZERO_WRITE
;
1133 if (bs
->detect_zeroes
== BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP
) {
1134 flags
|= BDRV_REQ_MAY_UNMAP
;
1139 /* Do nothing, write notifier decided to fail this request */
1140 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
1141 bdrv_debug_event(bs
, BLKDBG_PWRITEV_ZERO
);
1142 ret
= bdrv_co_do_write_zeroes(bs
, sector_num
, nb_sectors
, flags
);
1144 bdrv_debug_event(bs
, BLKDBG_PWRITEV
);
1145 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
);
1147 bdrv_debug_event(bs
, BLKDBG_PWRITEV_DONE
);
1149 if (ret
== 0 && !bs
->enable_write_cache
) {
1150 ret
= bdrv_co_flush(bs
);
1153 bdrv_set_dirty(bs
, sector_num
, nb_sectors
);
1155 if (bs
->wr_highest_offset
< offset
+ bytes
) {
1156 bs
->wr_highest_offset
= offset
+ bytes
;
1160 bs
->total_sectors
= MAX(bs
->total_sectors
, sector_num
+ nb_sectors
);
1166 static int coroutine_fn
bdrv_co_do_zero_pwritev(BlockDriverState
*bs
,
1169 BdrvRequestFlags flags
,
1170 BdrvTrackedRequest
*req
)
1172 uint8_t *buf
= NULL
;
1173 QEMUIOVector local_qiov
;
1175 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
1176 unsigned int head_padding_bytes
, tail_padding_bytes
;
1179 head_padding_bytes
= offset
& (align
- 1);
1180 tail_padding_bytes
= align
- ((offset
+ bytes
) & (align
- 1));
1183 assert(flags
& BDRV_REQ_ZERO_WRITE
);
1184 if (head_padding_bytes
|| tail_padding_bytes
) {
1185 buf
= qemu_blockalign(bs
, align
);
1186 iov
= (struct iovec
) {
1190 qemu_iovec_init_external(&local_qiov
, &iov
, 1);
1192 if (head_padding_bytes
) {
1193 uint64_t zero_bytes
= MIN(bytes
, align
- head_padding_bytes
);
1195 /* RMW the unaligned part before head. */
1196 mark_request_serialising(req
, align
);
1197 wait_serialising_requests(req
);
1198 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1199 ret
= bdrv_aligned_preadv(bs
, req
, offset
& ~(align
- 1), align
,
1200 align
, &local_qiov
, 0);
1204 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1206 memset(buf
+ head_padding_bytes
, 0, zero_bytes
);
1207 ret
= bdrv_aligned_pwritev(bs
, req
, offset
& ~(align
- 1), align
,
1209 flags
& ~BDRV_REQ_ZERO_WRITE
);
1213 offset
+= zero_bytes
;
1214 bytes
-= zero_bytes
;
1217 assert(!bytes
|| (offset
& (align
- 1)) == 0);
1218 if (bytes
>= align
) {
1219 /* Write the aligned part in the middle. */
1220 uint64_t aligned_bytes
= bytes
& ~(align
- 1);
1221 ret
= bdrv_aligned_pwritev(bs
, req
, offset
, aligned_bytes
,
1226 bytes
-= aligned_bytes
;
1227 offset
+= aligned_bytes
;
1230 assert(!bytes
|| (offset
& (align
- 1)) == 0);
1232 assert(align
== tail_padding_bytes
+ bytes
);
1233 /* RMW the unaligned part after tail. */
1234 mark_request_serialising(req
, align
);
1235 wait_serialising_requests(req
);
1236 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1237 ret
= bdrv_aligned_preadv(bs
, req
, offset
, align
,
1238 align
, &local_qiov
, 0);
1242 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1244 memset(buf
, 0, bytes
);
1245 ret
= bdrv_aligned_pwritev(bs
, req
, offset
, align
,
1246 &local_qiov
, flags
& ~BDRV_REQ_ZERO_WRITE
);
1255 * Handle a write request in coroutine context
1257 static int coroutine_fn
bdrv_co_do_pwritev(BlockDriverState
*bs
,
1258 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1259 BdrvRequestFlags flags
)
1261 BdrvTrackedRequest req
;
1262 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
1263 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
1264 uint8_t *head_buf
= NULL
;
1265 uint8_t *tail_buf
= NULL
;
1266 QEMUIOVector local_qiov
;
1267 bool use_local_qiov
= false;
1273 if (bs
->read_only
) {
1277 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
1282 /* throttling disk I/O */
1283 if (bs
->io_limits_enabled
) {
1284 throttle_group_co_io_limits_intercept(bs
, bytes
, true);
1288 * Align write if necessary by performing a read-modify-write cycle.
1289 * Pad qiov with the read parts and be sure to have a tracked request not
1290 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1292 tracked_request_begin(&req
, bs
, offset
, bytes
, true);
1295 ret
= bdrv_co_do_zero_pwritev(bs
, offset
, bytes
, flags
, &req
);
1299 if (offset
& (align
- 1)) {
1300 QEMUIOVector head_qiov
;
1301 struct iovec head_iov
;
1303 mark_request_serialising(&req
, align
);
1304 wait_serialising_requests(&req
);
1306 head_buf
= qemu_blockalign(bs
, align
);
1307 head_iov
= (struct iovec
) {
1308 .iov_base
= head_buf
,
1311 qemu_iovec_init_external(&head_qiov
, &head_iov
, 1);
1313 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1314 ret
= bdrv_aligned_preadv(bs
, &req
, offset
& ~(align
- 1), align
,
1315 align
, &head_qiov
, 0);
1319 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1321 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
1322 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
1323 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1324 use_local_qiov
= true;
1326 bytes
+= offset
& (align
- 1);
1327 offset
= offset
& ~(align
- 1);
1330 if ((offset
+ bytes
) & (align
- 1)) {
1331 QEMUIOVector tail_qiov
;
1332 struct iovec tail_iov
;
1336 mark_request_serialising(&req
, align
);
1337 waited
= wait_serialising_requests(&req
);
1338 assert(!waited
|| !use_local_qiov
);
1340 tail_buf
= qemu_blockalign(bs
, align
);
1341 tail_iov
= (struct iovec
) {
1342 .iov_base
= tail_buf
,
1345 qemu_iovec_init_external(&tail_qiov
, &tail_iov
, 1);
1347 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1348 ret
= bdrv_aligned_preadv(bs
, &req
, (offset
+ bytes
) & ~(align
- 1), align
,
1349 align
, &tail_qiov
, 0);
1353 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1355 if (!use_local_qiov
) {
1356 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
1357 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1358 use_local_qiov
= true;
1361 tail_bytes
= (offset
+ bytes
) & (align
- 1);
1362 qemu_iovec_add(&local_qiov
, tail_buf
+ tail_bytes
, align
- tail_bytes
);
1364 bytes
= ROUND_UP(bytes
, align
);
1367 ret
= bdrv_aligned_pwritev(bs
, &req
, offset
, bytes
,
1368 use_local_qiov
? &local_qiov
: qiov
,
1373 if (use_local_qiov
) {
1374 qemu_iovec_destroy(&local_qiov
);
1376 qemu_vfree(head_buf
);
1377 qemu_vfree(tail_buf
);
1379 tracked_request_end(&req
);
1383 static int coroutine_fn
bdrv_co_do_writev(BlockDriverState
*bs
,
1384 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1385 BdrvRequestFlags flags
)
1387 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
1391 return bdrv_co_do_pwritev(bs
, sector_num
<< BDRV_SECTOR_BITS
,
1392 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
1395 int coroutine_fn
bdrv_co_writev(BlockDriverState
*bs
, int64_t sector_num
,
1396 int nb_sectors
, QEMUIOVector
*qiov
)
1398 trace_bdrv_co_writev(bs
, sector_num
, nb_sectors
);
1400 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, qiov
, 0);
1403 int coroutine_fn
bdrv_co_write_zeroes(BlockDriverState
*bs
,
1404 int64_t sector_num
, int nb_sectors
,
1405 BdrvRequestFlags flags
)
1407 trace_bdrv_co_write_zeroes(bs
, sector_num
, nb_sectors
, flags
);
1409 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
1410 flags
&= ~BDRV_REQ_MAY_UNMAP
;
1413 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, NULL
,
1414 BDRV_REQ_ZERO_WRITE
| flags
);
1417 int bdrv_flush_all(void)
1419 BlockDriverState
*bs
= NULL
;
1422 while ((bs
= bdrv_next(bs
))) {
1423 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
1426 aio_context_acquire(aio_context
);
1427 ret
= bdrv_flush(bs
);
1428 if (ret
< 0 && !result
) {
1431 aio_context_release(aio_context
);
1437 typedef struct BdrvCoGetBlockStatusData
{
1438 BlockDriverState
*bs
;
1439 BlockDriverState
*base
;
1445 } BdrvCoGetBlockStatusData
;
1448 * Returns the allocation status of the specified sectors.
1449 * Drivers not implementing the functionality are assumed to not support
1450 * backing files, hence all their sectors are reported as allocated.
1452 * If 'sector_num' is beyond the end of the disk image the return value is 0
1453 * and 'pnum' is set to 0.
1455 * 'pnum' is set to the number of sectors (including and immediately following
1456 * the specified sector) that are known to be in the same
1457 * allocated/unallocated state.
1459 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
1460 * beyond the end of the disk image it will be clamped.
1462 static int64_t coroutine_fn
bdrv_co_get_block_status(BlockDriverState
*bs
,
1464 int nb_sectors
, int *pnum
)
1466 int64_t total_sectors
;
1470 total_sectors
= bdrv_nb_sectors(bs
);
1471 if (total_sectors
< 0) {
1472 return total_sectors
;
1475 if (sector_num
>= total_sectors
) {
1480 n
= total_sectors
- sector_num
;
1481 if (n
< nb_sectors
) {
1485 if (!bs
->drv
->bdrv_co_get_block_status
) {
1487 ret
= BDRV_BLOCK_DATA
| BDRV_BLOCK_ALLOCATED
;
1488 if (bs
->drv
->protocol_name
) {
1489 ret
|= BDRV_BLOCK_OFFSET_VALID
| (sector_num
* BDRV_SECTOR_SIZE
);
1494 ret
= bs
->drv
->bdrv_co_get_block_status(bs
, sector_num
, nb_sectors
, pnum
);
1500 if (ret
& BDRV_BLOCK_RAW
) {
1501 assert(ret
& BDRV_BLOCK_OFFSET_VALID
);
1502 return bdrv_get_block_status(bs
->file
->bs
, ret
>> BDRV_SECTOR_BITS
,
1506 if (ret
& (BDRV_BLOCK_DATA
| BDRV_BLOCK_ZERO
)) {
1507 ret
|= BDRV_BLOCK_ALLOCATED
;
1509 if (bdrv_unallocated_blocks_are_zero(bs
)) {
1510 ret
|= BDRV_BLOCK_ZERO
;
1511 } else if (bs
->backing
) {
1512 BlockDriverState
*bs2
= bs
->backing
->bs
;
1513 int64_t nb_sectors2
= bdrv_nb_sectors(bs2
);
1514 if (nb_sectors2
>= 0 && sector_num
>= nb_sectors2
) {
1515 ret
|= BDRV_BLOCK_ZERO
;
1521 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
1522 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
1525 ret2
= bdrv_co_get_block_status(bs
->file
->bs
, ret
>> BDRV_SECTOR_BITS
,
1528 /* Ignore errors. This is just providing extra information, it
1529 * is useful but not necessary.
1532 /* !file_pnum indicates an offset at or beyond the EOF; it is
1533 * perfectly valid for the format block driver to point to such
1534 * offsets, so catch it and mark everything as zero */
1535 ret
|= BDRV_BLOCK_ZERO
;
1537 /* Limit request to the range reported by the protocol driver */
1539 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
1547 static int64_t coroutine_fn
bdrv_co_get_block_status_above(BlockDriverState
*bs
,
1548 BlockDriverState
*base
,
1553 BlockDriverState
*p
;
1557 for (p
= bs
; p
!= base
; p
= backing_bs(p
)) {
1558 ret
= bdrv_co_get_block_status(p
, sector_num
, nb_sectors
, pnum
);
1559 if (ret
< 0 || ret
& BDRV_BLOCK_ALLOCATED
) {
1562 /* [sector_num, pnum] unallocated on this layer, which could be only
1563 * the first part of [sector_num, nb_sectors]. */
1564 nb_sectors
= MIN(nb_sectors
, *pnum
);
1569 /* Coroutine wrapper for bdrv_get_block_status_above() */
1570 static void coroutine_fn
bdrv_get_block_status_above_co_entry(void *opaque
)
1572 BdrvCoGetBlockStatusData
*data
= opaque
;
1574 data
->ret
= bdrv_co_get_block_status_above(data
->bs
, data
->base
,
1582 * Synchronous wrapper around bdrv_co_get_block_status_above().
1584 * See bdrv_co_get_block_status_above() for details.
1586 int64_t bdrv_get_block_status_above(BlockDriverState
*bs
,
1587 BlockDriverState
*base
,
1589 int nb_sectors
, int *pnum
)
1592 BdrvCoGetBlockStatusData data
= {
1595 .sector_num
= sector_num
,
1596 .nb_sectors
= nb_sectors
,
1601 if (qemu_in_coroutine()) {
1602 /* Fast-path if already in coroutine context */
1603 bdrv_get_block_status_above_co_entry(&data
);
1605 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
1607 co
= qemu_coroutine_create(bdrv_get_block_status_above_co_entry
);
1608 qemu_coroutine_enter(co
, &data
);
1609 while (!data
.done
) {
1610 aio_poll(aio_context
, true);
1616 int64_t bdrv_get_block_status(BlockDriverState
*bs
,
1618 int nb_sectors
, int *pnum
)
1620 return bdrv_get_block_status_above(bs
, backing_bs(bs
),
1621 sector_num
, nb_sectors
, pnum
);
1624 int coroutine_fn
bdrv_is_allocated(BlockDriverState
*bs
, int64_t sector_num
,
1625 int nb_sectors
, int *pnum
)
1627 int64_t ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, pnum
);
1631 return !!(ret
& BDRV_BLOCK_ALLOCATED
);
1635 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
1637 * Return true if the given sector is allocated in any image between
1638 * BASE and TOP (inclusive). BASE can be NULL to check if the given
1639 * sector is allocated in any image of the chain. Return false otherwise.
1641 * 'pnum' is set to the number of sectors (including and immediately following
1642 * the specified sector) that are known to be in the same
1643 * allocated/unallocated state.
1646 int bdrv_is_allocated_above(BlockDriverState
*top
,
1647 BlockDriverState
*base
,
1649 int nb_sectors
, int *pnum
)
1651 BlockDriverState
*intermediate
;
1652 int ret
, n
= nb_sectors
;
1655 while (intermediate
&& intermediate
!= base
) {
1657 ret
= bdrv_is_allocated(intermediate
, sector_num
, nb_sectors
,
1667 * [sector_num, nb_sectors] is unallocated on top but intermediate
1670 * [sector_num+x, nr_sectors] allocated.
1672 if (n
> pnum_inter
&&
1673 (intermediate
== top
||
1674 sector_num
+ pnum_inter
< intermediate
->total_sectors
)) {
1678 intermediate
= backing_bs(intermediate
);
1685 int bdrv_write_compressed(BlockDriverState
*bs
, int64_t sector_num
,
1686 const uint8_t *buf
, int nb_sectors
)
1688 BlockDriver
*drv
= bs
->drv
;
1694 if (!drv
->bdrv_write_compressed
) {
1697 ret
= bdrv_check_request(bs
, sector_num
, nb_sectors
);
1702 assert(QLIST_EMPTY(&bs
->dirty_bitmaps
));
1704 return drv
->bdrv_write_compressed(bs
, sector_num
, buf
, nb_sectors
);
1707 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
1708 int64_t pos
, int size
)
1711 struct iovec iov
= {
1712 .iov_base
= (void *) buf
,
1716 qemu_iovec_init_external(&qiov
, &iov
, 1);
1717 return bdrv_writev_vmstate(bs
, &qiov
, pos
);
1720 int bdrv_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
1722 BlockDriver
*drv
= bs
->drv
;
1726 } else if (drv
->bdrv_save_vmstate
) {
1727 return drv
->bdrv_save_vmstate(bs
, qiov
, pos
);
1728 } else if (bs
->file
) {
1729 return bdrv_writev_vmstate(bs
->file
->bs
, qiov
, pos
);
1735 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
1736 int64_t pos
, int size
)
1738 BlockDriver
*drv
= bs
->drv
;
1741 if (drv
->bdrv_load_vmstate
)
1742 return drv
->bdrv_load_vmstate(bs
, buf
, pos
, size
);
1744 return bdrv_load_vmstate(bs
->file
->bs
, buf
, pos
, size
);
1748 /**************************************************************/
1751 BlockAIOCB
*bdrv_aio_readv(BlockDriverState
*bs
, int64_t sector_num
,
1752 QEMUIOVector
*qiov
, int nb_sectors
,
1753 BlockCompletionFunc
*cb
, void *opaque
)
1755 trace_bdrv_aio_readv(bs
, sector_num
, nb_sectors
, opaque
);
1757 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, 0,
1761 BlockAIOCB
*bdrv_aio_writev(BlockDriverState
*bs
, int64_t sector_num
,
1762 QEMUIOVector
*qiov
, int nb_sectors
,
1763 BlockCompletionFunc
*cb
, void *opaque
)
1765 trace_bdrv_aio_writev(bs
, sector_num
, nb_sectors
, opaque
);
1767 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, 0,
1771 BlockAIOCB
*bdrv_aio_write_zeroes(BlockDriverState
*bs
,
1772 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
,
1773 BlockCompletionFunc
*cb
, void *opaque
)
1775 trace_bdrv_aio_write_zeroes(bs
, sector_num
, nb_sectors
, flags
, opaque
);
1777 return bdrv_co_aio_rw_vector(bs
, sector_num
, NULL
, nb_sectors
,
1778 BDRV_REQ_ZERO_WRITE
| flags
,
1783 typedef struct MultiwriteCB
{
1788 BlockCompletionFunc
*cb
;
1790 QEMUIOVector
*free_qiov
;
1794 static void multiwrite_user_cb(MultiwriteCB
*mcb
)
1798 for (i
= 0; i
< mcb
->num_callbacks
; i
++) {
1799 mcb
->callbacks
[i
].cb(mcb
->callbacks
[i
].opaque
, mcb
->error
);
1800 if (mcb
->callbacks
[i
].free_qiov
) {
1801 qemu_iovec_destroy(mcb
->callbacks
[i
].free_qiov
);
1803 g_free(mcb
->callbacks
[i
].free_qiov
);
1807 static void multiwrite_cb(void *opaque
, int ret
)
1809 MultiwriteCB
*mcb
= opaque
;
1811 trace_multiwrite_cb(mcb
, ret
);
1813 if (ret
< 0 && !mcb
->error
) {
1817 mcb
->num_requests
--;
1818 if (mcb
->num_requests
== 0) {
1819 multiwrite_user_cb(mcb
);
1824 static int multiwrite_req_compare(const void *a
, const void *b
)
1826 const BlockRequest
*req1
= a
, *req2
= b
;
1829 * Note that we can't simply subtract req2->sector from req1->sector
1830 * here as that could overflow the return value.
1832 if (req1
->sector
> req2
->sector
) {
1834 } else if (req1
->sector
< req2
->sector
) {
1842 * Takes a bunch of requests and tries to merge them. Returns the number of
1843 * requests that remain after merging.
1845 static int multiwrite_merge(BlockDriverState
*bs
, BlockRequest
*reqs
,
1846 int num_reqs
, MultiwriteCB
*mcb
)
1850 // Sort requests by start sector
1851 qsort(reqs
, num_reqs
, sizeof(*reqs
), &multiwrite_req_compare
);
1853 // Check if adjacent requests touch the same clusters. If so, combine them,
1854 // filling up gaps with zero sectors.
1856 for (i
= 1; i
< num_reqs
; i
++) {
1858 int64_t oldreq_last
= reqs
[outidx
].sector
+ reqs
[outidx
].nb_sectors
;
1860 // Handle exactly sequential writes and overlapping writes.
1861 if (reqs
[i
].sector
<= oldreq_last
) {
1865 if (reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1 > IOV_MAX
) {
1869 if (bs
->bl
.max_transfer_length
&& reqs
[outidx
].nb_sectors
+
1870 reqs
[i
].nb_sectors
> bs
->bl
.max_transfer_length
) {
1876 QEMUIOVector
*qiov
= g_malloc0(sizeof(*qiov
));
1877 qemu_iovec_init(qiov
,
1878 reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1);
1880 // Add the first request to the merged one. If the requests are
1881 // overlapping, drop the last sectors of the first request.
1882 size
= (reqs
[i
].sector
- reqs
[outidx
].sector
) << 9;
1883 qemu_iovec_concat(qiov
, reqs
[outidx
].qiov
, 0, size
);
1885 // We should need to add any zeros between the two requests
1886 assert (reqs
[i
].sector
<= oldreq_last
);
1888 // Add the second request
1889 qemu_iovec_concat(qiov
, reqs
[i
].qiov
, 0, reqs
[i
].qiov
->size
);
1891 // Add tail of first request, if necessary
1892 if (qiov
->size
< reqs
[outidx
].qiov
->size
) {
1893 qemu_iovec_concat(qiov
, reqs
[outidx
].qiov
, qiov
->size
,
1894 reqs
[outidx
].qiov
->size
- qiov
->size
);
1897 reqs
[outidx
].nb_sectors
= qiov
->size
>> 9;
1898 reqs
[outidx
].qiov
= qiov
;
1900 mcb
->callbacks
[i
].free_qiov
= reqs
[outidx
].qiov
;
1903 reqs
[outidx
].sector
= reqs
[i
].sector
;
1904 reqs
[outidx
].nb_sectors
= reqs
[i
].nb_sectors
;
1905 reqs
[outidx
].qiov
= reqs
[i
].qiov
;
1910 block_acct_merge_done(blk_get_stats(bs
->blk
), BLOCK_ACCT_WRITE
,
1911 num_reqs
- outidx
- 1);
1918 * Submit multiple AIO write requests at once.
1920 * On success, the function returns 0 and all requests in the reqs array have
1921 * been submitted. In error case this function returns -1, and any of the
1922 * requests may or may not be submitted yet. In particular, this means that the
1923 * callback will be called for some of the requests, for others it won't. The
1924 * caller must check the error field of the BlockRequest to wait for the right
1925 * callbacks (if error != 0, no callback will be called).
1927 * The implementation may modify the contents of the reqs array, e.g. to merge
1928 * requests. However, the fields opaque and error are left unmodified as they
1929 * are used to signal failure for a single request to the caller.
1931 int bdrv_aio_multiwrite(BlockDriverState
*bs
, BlockRequest
*reqs
, int num_reqs
)
1936 /* don't submit writes if we don't have a medium */
1937 if (bs
->drv
== NULL
) {
1938 for (i
= 0; i
< num_reqs
; i
++) {
1939 reqs
[i
].error
= -ENOMEDIUM
;
1944 if (num_reqs
== 0) {
1948 // Create MultiwriteCB structure
1949 mcb
= g_malloc0(sizeof(*mcb
) + num_reqs
* sizeof(*mcb
->callbacks
));
1950 mcb
->num_requests
= 0;
1951 mcb
->num_callbacks
= num_reqs
;
1953 for (i
= 0; i
< num_reqs
; i
++) {
1954 mcb
->callbacks
[i
].cb
= reqs
[i
].cb
;
1955 mcb
->callbacks
[i
].opaque
= reqs
[i
].opaque
;
1958 // Check for mergable requests
1959 num_reqs
= multiwrite_merge(bs
, reqs
, num_reqs
, mcb
);
1961 trace_bdrv_aio_multiwrite(mcb
, mcb
->num_callbacks
, num_reqs
);
1963 /* Run the aio requests. */
1964 mcb
->num_requests
= num_reqs
;
1965 for (i
= 0; i
< num_reqs
; i
++) {
1966 bdrv_co_aio_rw_vector(bs
, reqs
[i
].sector
, reqs
[i
].qiov
,
1967 reqs
[i
].nb_sectors
, reqs
[i
].flags
,
1975 void bdrv_aio_cancel(BlockAIOCB
*acb
)
1978 bdrv_aio_cancel_async(acb
);
1979 while (acb
->refcnt
> 1) {
1980 if (acb
->aiocb_info
->get_aio_context
) {
1981 aio_poll(acb
->aiocb_info
->get_aio_context(acb
), true);
1982 } else if (acb
->bs
) {
1983 aio_poll(bdrv_get_aio_context(acb
->bs
), true);
1988 qemu_aio_unref(acb
);
1991 /* Async version of aio cancel. The caller is not blocked if the acb implements
1992 * cancel_async, otherwise we do nothing and let the request normally complete.
1993 * In either case the completion callback must be called. */
1994 void bdrv_aio_cancel_async(BlockAIOCB
*acb
)
1996 if (acb
->aiocb_info
->cancel_async
) {
1997 acb
->aiocb_info
->cancel_async(acb
);
2001 /**************************************************************/
2002 /* async block device emulation */
2004 typedef struct BlockAIOCBSync
{
2008 /* vector translation state */
2014 static const AIOCBInfo bdrv_em_aiocb_info
= {
2015 .aiocb_size
= sizeof(BlockAIOCBSync
),
2018 static void bdrv_aio_bh_cb(void *opaque
)
2020 BlockAIOCBSync
*acb
= opaque
;
2022 if (!acb
->is_write
&& acb
->ret
>= 0) {
2023 qemu_iovec_from_buf(acb
->qiov
, 0, acb
->bounce
, acb
->qiov
->size
);
2025 qemu_vfree(acb
->bounce
);
2026 acb
->common
.cb(acb
->common
.opaque
, acb
->ret
);
2027 qemu_bh_delete(acb
->bh
);
2029 qemu_aio_unref(acb
);
2032 static BlockAIOCB
*bdrv_aio_rw_vector(BlockDriverState
*bs
,
2036 BlockCompletionFunc
*cb
,
2041 BlockAIOCBSync
*acb
;
2043 acb
= qemu_aio_get(&bdrv_em_aiocb_info
, bs
, cb
, opaque
);
2044 acb
->is_write
= is_write
;
2046 acb
->bounce
= qemu_try_blockalign(bs
, qiov
->size
);
2047 acb
->bh
= aio_bh_new(bdrv_get_aio_context(bs
), bdrv_aio_bh_cb
, acb
);
2049 if (acb
->bounce
== NULL
) {
2051 } else if (is_write
) {
2052 qemu_iovec_to_buf(acb
->qiov
, 0, acb
->bounce
, qiov
->size
);
2053 acb
->ret
= bs
->drv
->bdrv_write(bs
, sector_num
, acb
->bounce
, nb_sectors
);
2055 acb
->ret
= bs
->drv
->bdrv_read(bs
, sector_num
, acb
->bounce
, nb_sectors
);
2058 qemu_bh_schedule(acb
->bh
);
2060 return &acb
->common
;
2063 static BlockAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
2064 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
2065 BlockCompletionFunc
*cb
, void *opaque
)
2067 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 0);
2070 static BlockAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
2071 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
2072 BlockCompletionFunc
*cb
, void *opaque
)
2074 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 1);
2078 typedef struct BlockAIOCBCoroutine
{
2085 } BlockAIOCBCoroutine
;
2087 static const AIOCBInfo bdrv_em_co_aiocb_info
= {
2088 .aiocb_size
= sizeof(BlockAIOCBCoroutine
),
2091 static void bdrv_co_complete(BlockAIOCBCoroutine
*acb
)
2093 if (!acb
->need_bh
) {
2094 acb
->common
.cb(acb
->common
.opaque
, acb
->req
.error
);
2095 qemu_aio_unref(acb
);
2099 static void bdrv_co_em_bh(void *opaque
)
2101 BlockAIOCBCoroutine
*acb
= opaque
;
2103 assert(!acb
->need_bh
);
2104 qemu_bh_delete(acb
->bh
);
2105 bdrv_co_complete(acb
);
2108 static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine
*acb
)
2110 acb
->need_bh
= false;
2111 if (acb
->req
.error
!= -EINPROGRESS
) {
2112 BlockDriverState
*bs
= acb
->common
.bs
;
2114 acb
->bh
= aio_bh_new(bdrv_get_aio_context(bs
), bdrv_co_em_bh
, acb
);
2115 qemu_bh_schedule(acb
->bh
);
2119 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
2120 static void coroutine_fn
bdrv_co_do_rw(void *opaque
)
2122 BlockAIOCBCoroutine
*acb
= opaque
;
2123 BlockDriverState
*bs
= acb
->common
.bs
;
2125 if (!acb
->is_write
) {
2126 acb
->req
.error
= bdrv_co_do_readv(bs
, acb
->req
.sector
,
2127 acb
->req
.nb_sectors
, acb
->req
.qiov
, acb
->req
.flags
);
2129 acb
->req
.error
= bdrv_co_do_writev(bs
, acb
->req
.sector
,
2130 acb
->req
.nb_sectors
, acb
->req
.qiov
, acb
->req
.flags
);
2133 bdrv_co_complete(acb
);
2136 static BlockAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
2140 BdrvRequestFlags flags
,
2141 BlockCompletionFunc
*cb
,
2146 BlockAIOCBCoroutine
*acb
;
2148 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
2149 acb
->need_bh
= true;
2150 acb
->req
.error
= -EINPROGRESS
;
2151 acb
->req
.sector
= sector_num
;
2152 acb
->req
.nb_sectors
= nb_sectors
;
2153 acb
->req
.qiov
= qiov
;
2154 acb
->req
.flags
= flags
;
2155 acb
->is_write
= is_write
;
2157 co
= qemu_coroutine_create(bdrv_co_do_rw
);
2158 qemu_coroutine_enter(co
, acb
);
2160 bdrv_co_maybe_schedule_bh(acb
);
2161 return &acb
->common
;
2164 static void coroutine_fn
bdrv_aio_flush_co_entry(void *opaque
)
2166 BlockAIOCBCoroutine
*acb
= opaque
;
2167 BlockDriverState
*bs
= acb
->common
.bs
;
2169 acb
->req
.error
= bdrv_co_flush(bs
);
2170 bdrv_co_complete(acb
);
2173 BlockAIOCB
*bdrv_aio_flush(BlockDriverState
*bs
,
2174 BlockCompletionFunc
*cb
, void *opaque
)
2176 trace_bdrv_aio_flush(bs
, opaque
);
2179 BlockAIOCBCoroutine
*acb
;
2181 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
2182 acb
->need_bh
= true;
2183 acb
->req
.error
= -EINPROGRESS
;
2185 co
= qemu_coroutine_create(bdrv_aio_flush_co_entry
);
2186 qemu_coroutine_enter(co
, acb
);
2188 bdrv_co_maybe_schedule_bh(acb
);
2189 return &acb
->common
;
2192 static void coroutine_fn
bdrv_aio_discard_co_entry(void *opaque
)
2194 BlockAIOCBCoroutine
*acb
= opaque
;
2195 BlockDriverState
*bs
= acb
->common
.bs
;
2197 acb
->req
.error
= bdrv_co_discard(bs
, acb
->req
.sector
, acb
->req
.nb_sectors
);
2198 bdrv_co_complete(acb
);
2201 BlockAIOCB
*bdrv_aio_discard(BlockDriverState
*bs
,
2202 int64_t sector_num
, int nb_sectors
,
2203 BlockCompletionFunc
*cb
, void *opaque
)
2206 BlockAIOCBCoroutine
*acb
;
2208 trace_bdrv_aio_discard(bs
, sector_num
, nb_sectors
, opaque
);
2210 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
2211 acb
->need_bh
= true;
2212 acb
->req
.error
= -EINPROGRESS
;
2213 acb
->req
.sector
= sector_num
;
2214 acb
->req
.nb_sectors
= nb_sectors
;
2215 co
= qemu_coroutine_create(bdrv_aio_discard_co_entry
);
2216 qemu_coroutine_enter(co
, acb
);
2218 bdrv_co_maybe_schedule_bh(acb
);
2219 return &acb
->common
;
2222 void *qemu_aio_get(const AIOCBInfo
*aiocb_info
, BlockDriverState
*bs
,
2223 BlockCompletionFunc
*cb
, void *opaque
)
2227 acb
= g_malloc(aiocb_info
->aiocb_size
);
2228 acb
->aiocb_info
= aiocb_info
;
2231 acb
->opaque
= opaque
;
2236 void qemu_aio_ref(void *p
)
2238 BlockAIOCB
*acb
= p
;
2242 void qemu_aio_unref(void *p
)
2244 BlockAIOCB
*acb
= p
;
2245 assert(acb
->refcnt
> 0);
2246 if (--acb
->refcnt
== 0) {
2251 /**************************************************************/
2252 /* Coroutine block device emulation */
2254 typedef struct CoroutineIOCompletion
{
2255 Coroutine
*coroutine
;
2257 } CoroutineIOCompletion
;
2259 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
2261 CoroutineIOCompletion
*co
= opaque
;
2264 qemu_coroutine_enter(co
->coroutine
, NULL
);
2267 static int coroutine_fn
bdrv_co_io_em(BlockDriverState
*bs
, int64_t sector_num
,
2268 int nb_sectors
, QEMUIOVector
*iov
,
2271 CoroutineIOCompletion co
= {
2272 .coroutine
= qemu_coroutine_self(),
2277 acb
= bs
->drv
->bdrv_aio_writev(bs
, sector_num
, iov
, nb_sectors
,
2278 bdrv_co_io_em_complete
, &co
);
2280 acb
= bs
->drv
->bdrv_aio_readv(bs
, sector_num
, iov
, nb_sectors
,
2281 bdrv_co_io_em_complete
, &co
);
2284 trace_bdrv_co_io_em(bs
, sector_num
, nb_sectors
, is_write
, acb
);
2288 qemu_coroutine_yield();
2293 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
2294 int64_t sector_num
, int nb_sectors
,
2297 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, false);
2300 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
2301 int64_t sector_num
, int nb_sectors
,
2304 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, true);
2307 static void coroutine_fn
bdrv_flush_co_entry(void *opaque
)
2309 RwCo
*rwco
= opaque
;
2311 rwco
->ret
= bdrv_co_flush(rwco
->bs
);
2314 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
2318 if (!bs
|| !bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
) ||
2323 /* Write back cached data to the OS even with cache=unsafe */
2324 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_OS
);
2325 if (bs
->drv
->bdrv_co_flush_to_os
) {
2326 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
2332 /* But don't actually force it to the disk with cache=unsafe */
2333 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
2337 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_DISK
);
2338 if (bs
->drv
->bdrv_co_flush_to_disk
) {
2339 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
2340 } else if (bs
->drv
->bdrv_aio_flush
) {
2342 CoroutineIOCompletion co
= {
2343 .coroutine
= qemu_coroutine_self(),
2346 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
2350 qemu_coroutine_yield();
2355 * Some block drivers always operate in either writethrough or unsafe
2356 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2357 * know how the server works (because the behaviour is hardcoded or
2358 * depends on server-side configuration), so we can't ensure that
2359 * everything is safe on disk. Returning an error doesn't work because
2360 * that would break guests even if the server operates in writethrough
2363 * Let's hope the user knows what he's doing.
2371 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2372 * in the case of cache=unsafe, so there are no useless flushes.
2375 return bs
->file
? bdrv_co_flush(bs
->file
->bs
) : 0;
2378 int bdrv_flush(BlockDriverState
*bs
)
2386 if (qemu_in_coroutine()) {
2387 /* Fast-path if already in coroutine context */
2388 bdrv_flush_co_entry(&rwco
);
2390 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
2392 co
= qemu_coroutine_create(bdrv_flush_co_entry
);
2393 qemu_coroutine_enter(co
, &rwco
);
2394 while (rwco
.ret
== NOT_DONE
) {
2395 aio_poll(aio_context
, true);
2402 typedef struct DiscardCo
{
2403 BlockDriverState
*bs
;
2408 static void coroutine_fn
bdrv_discard_co_entry(void *opaque
)
2410 DiscardCo
*rwco
= opaque
;
2412 rwco
->ret
= bdrv_co_discard(rwco
->bs
, rwco
->sector_num
, rwco
->nb_sectors
);
2415 int coroutine_fn
bdrv_co_discard(BlockDriverState
*bs
, int64_t sector_num
,
2418 int max_discard
, ret
;
2424 ret
= bdrv_check_request(bs
, sector_num
, nb_sectors
);
2427 } else if (bs
->read_only
) {
2431 /* Do nothing if disabled. */
2432 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
2436 if (!bs
->drv
->bdrv_co_discard
&& !bs
->drv
->bdrv_aio_discard
) {
2440 bdrv_set_dirty(bs
, sector_num
, nb_sectors
);
2442 max_discard
= MIN_NON_ZERO(bs
->bl
.max_discard
, BDRV_REQUEST_MAX_SECTORS
);
2443 while (nb_sectors
> 0) {
2445 int num
= nb_sectors
;
2448 if (bs
->bl
.discard_alignment
&&
2449 num
>= bs
->bl
.discard_alignment
&&
2450 sector_num
% bs
->bl
.discard_alignment
) {
2451 if (num
> bs
->bl
.discard_alignment
) {
2452 num
= bs
->bl
.discard_alignment
;
2454 num
-= sector_num
% bs
->bl
.discard_alignment
;
2457 /* limit request size */
2458 if (num
> max_discard
) {
2462 if (bs
->drv
->bdrv_co_discard
) {
2463 ret
= bs
->drv
->bdrv_co_discard(bs
, sector_num
, num
);
2466 CoroutineIOCompletion co
= {
2467 .coroutine
= qemu_coroutine_self(),
2470 acb
= bs
->drv
->bdrv_aio_discard(bs
, sector_num
, nb_sectors
,
2471 bdrv_co_io_em_complete
, &co
);
2475 qemu_coroutine_yield();
2479 if (ret
&& ret
!= -ENOTSUP
) {
2489 int bdrv_discard(BlockDriverState
*bs
, int64_t sector_num
, int nb_sectors
)
2494 .sector_num
= sector_num
,
2495 .nb_sectors
= nb_sectors
,
2499 if (qemu_in_coroutine()) {
2500 /* Fast-path if already in coroutine context */
2501 bdrv_discard_co_entry(&rwco
);
2503 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
2505 co
= qemu_coroutine_create(bdrv_discard_co_entry
);
2506 qemu_coroutine_enter(co
, &rwco
);
2507 while (rwco
.ret
== NOT_DONE
) {
2508 aio_poll(aio_context
, true);
2515 /* needed for generic scsi interface */
2517 int bdrv_ioctl(BlockDriverState
*bs
, unsigned long int req
, void *buf
)
2519 BlockDriver
*drv
= bs
->drv
;
2521 if (drv
&& drv
->bdrv_ioctl
)
2522 return drv
->bdrv_ioctl(bs
, req
, buf
);
2526 BlockAIOCB
*bdrv_aio_ioctl(BlockDriverState
*bs
,
2527 unsigned long int req
, void *buf
,
2528 BlockCompletionFunc
*cb
, void *opaque
)
2530 BlockDriver
*drv
= bs
->drv
;
2532 if (drv
&& drv
->bdrv_aio_ioctl
)
2533 return drv
->bdrv_aio_ioctl(bs
, req
, buf
, cb
, opaque
);
2537 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
2539 return qemu_memalign(bdrv_opt_mem_align(bs
), size
);
2542 void *qemu_blockalign0(BlockDriverState
*bs
, size_t size
)
2544 return memset(qemu_blockalign(bs
, size
), 0, size
);
2547 void *qemu_try_blockalign(BlockDriverState
*bs
, size_t size
)
2549 size_t align
= bdrv_opt_mem_align(bs
);
2551 /* Ensure that NULL is never returned on success */
2557 return qemu_try_memalign(align
, size
);
2560 void *qemu_try_blockalign0(BlockDriverState
*bs
, size_t size
)
2562 void *mem
= qemu_try_blockalign(bs
, size
);
2565 memset(mem
, 0, size
);
2572 * Check if all memory in this vector is sector aligned.
2574 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
)
2577 size_t alignment
= bdrv_min_mem_align(bs
);
2579 for (i
= 0; i
< qiov
->niov
; i
++) {
2580 if ((uintptr_t) qiov
->iov
[i
].iov_base
% alignment
) {
2583 if (qiov
->iov
[i
].iov_len
% alignment
) {
2591 void bdrv_add_before_write_notifier(BlockDriverState
*bs
,
2592 NotifierWithReturn
*notifier
)
2594 notifier_with_return_list_add(&bs
->before_write_notifiers
, notifier
);
2597 void bdrv_io_plug(BlockDriverState
*bs
)
2599 BlockDriver
*drv
= bs
->drv
;
2600 if (drv
&& drv
->bdrv_io_plug
) {
2601 drv
->bdrv_io_plug(bs
);
2602 } else if (bs
->file
) {
2603 bdrv_io_plug(bs
->file
->bs
);
2607 void bdrv_io_unplug(BlockDriverState
*bs
)
2609 BlockDriver
*drv
= bs
->drv
;
2610 if (drv
&& drv
->bdrv_io_unplug
) {
2611 drv
->bdrv_io_unplug(bs
);
2612 } else if (bs
->file
) {
2613 bdrv_io_unplug(bs
->file
->bs
);
2617 void bdrv_flush_io_queue(BlockDriverState
*bs
)
2619 BlockDriver
*drv
= bs
->drv
;
2620 if (drv
&& drv
->bdrv_flush_io_queue
) {
2621 drv
->bdrv_flush_io_queue(bs
);
2622 } else if (bs
->file
) {
2623 bdrv_flush_io_queue(bs
->file
->bs
);
2625 bdrv_start_throttled_reqs(bs
);
2628 void bdrv_drained_begin(BlockDriverState
*bs
)
2630 if (!bs
->quiesce_counter
++) {
2631 aio_disable_external(bdrv_get_aio_context(bs
));
2636 void bdrv_drained_end(BlockDriverState
*bs
)
2638 assert(bs
->quiesce_counter
> 0);
2639 if (--bs
->quiesce_counter
> 0) {
2642 aio_enable_external(bdrv_get_aio_context(bs
));