2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "sysemu/qtest.h"
27 #include "block/blockjob.h"
28 #include "block/block_int.h"
30 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
32 static BlockAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
33 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
34 BlockCompletionFunc
*cb
, void *opaque
);
35 static BlockAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
36 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
37 BlockCompletionFunc
*cb
, void *opaque
);
38 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
39 int64_t sector_num
, int nb_sectors
,
41 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
42 int64_t sector_num
, int nb_sectors
,
44 static int coroutine_fn
bdrv_co_do_preadv(BlockDriverState
*bs
,
45 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
46 BdrvRequestFlags flags
);
47 static int coroutine_fn
bdrv_co_do_pwritev(BlockDriverState
*bs
,
48 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
49 BdrvRequestFlags flags
);
50 static BlockAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
54 BdrvRequestFlags flags
,
55 BlockCompletionFunc
*cb
,
58 static void coroutine_fn
bdrv_co_do_rw(void *opaque
);
59 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
60 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
);
62 /* throttling disk I/O limits */
63 void bdrv_set_io_limits(BlockDriverState
*bs
,
68 throttle_config(&bs
->throttle_state
, cfg
);
70 for (i
= 0; i
< 2; i
++) {
71 qemu_co_enter_next(&bs
->throttled_reqs
[i
]);
75 /* this function drain all the throttled IOs */
76 static bool bdrv_start_throttled_reqs(BlockDriverState
*bs
)
79 bool enabled
= bs
->io_limits_enabled
;
82 bs
->io_limits_enabled
= false;
84 for (i
= 0; i
< 2; i
++) {
85 while (qemu_co_enter_next(&bs
->throttled_reqs
[i
])) {
90 bs
->io_limits_enabled
= enabled
;
95 void bdrv_io_limits_disable(BlockDriverState
*bs
)
97 bs
->io_limits_enabled
= false;
99 bdrv_start_throttled_reqs(bs
);
101 throttle_destroy(&bs
->throttle_state
);
104 static void bdrv_throttle_read_timer_cb(void *opaque
)
106 BlockDriverState
*bs
= opaque
;
107 qemu_co_enter_next(&bs
->throttled_reqs
[0]);
110 static void bdrv_throttle_write_timer_cb(void *opaque
)
112 BlockDriverState
*bs
= opaque
;
113 qemu_co_enter_next(&bs
->throttled_reqs
[1]);
116 /* should be called before bdrv_set_io_limits if a limit is set */
117 void bdrv_io_limits_enable(BlockDriverState
*bs
)
119 int clock_type
= QEMU_CLOCK_REALTIME
;
121 if (qtest_enabled()) {
122 /* For testing block IO throttling only */
123 clock_type
= QEMU_CLOCK_VIRTUAL
;
125 assert(!bs
->io_limits_enabled
);
126 throttle_init(&bs
->throttle_state
,
127 bdrv_get_aio_context(bs
),
129 bdrv_throttle_read_timer_cb
,
130 bdrv_throttle_write_timer_cb
,
132 bs
->io_limits_enabled
= true;
135 /* This function makes an IO wait if needed
137 * @nb_sectors: the number of sectors of the IO
138 * @is_write: is the IO a write
140 static void bdrv_io_limits_intercept(BlockDriverState
*bs
,
144 /* does this io must wait */
145 bool must_wait
= throttle_schedule_timer(&bs
->throttle_state
, is_write
);
147 /* if must wait or any request of this type throttled queue the IO */
149 !qemu_co_queue_empty(&bs
->throttled_reqs
[is_write
])) {
150 qemu_co_queue_wait(&bs
->throttled_reqs
[is_write
]);
153 /* the IO will be executed, do the accounting */
154 throttle_account(&bs
->throttle_state
, is_write
, bytes
);
157 /* if the next request must wait -> do nothing */
158 if (throttle_schedule_timer(&bs
->throttle_state
, is_write
)) {
162 /* else queue next request for execution */
163 qemu_co_queue_next(&bs
->throttled_reqs
[is_write
]);
166 void bdrv_setup_io_funcs(BlockDriver
*bdrv
)
168 /* Block drivers without coroutine functions need emulation */
169 if (!bdrv
->bdrv_co_readv
) {
170 bdrv
->bdrv_co_readv
= bdrv_co_readv_em
;
171 bdrv
->bdrv_co_writev
= bdrv_co_writev_em
;
173 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
174 * the block driver lacks aio we need to emulate that too.
176 if (!bdrv
->bdrv_aio_readv
) {
177 /* add AIO emulation layer */
178 bdrv
->bdrv_aio_readv
= bdrv_aio_readv_em
;
179 bdrv
->bdrv_aio_writev
= bdrv_aio_writev_em
;
184 void bdrv_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
186 BlockDriver
*drv
= bs
->drv
;
187 Error
*local_err
= NULL
;
189 memset(&bs
->bl
, 0, sizeof(bs
->bl
));
195 /* Take some limits from the children as a default */
197 bdrv_refresh_limits(bs
->file
, &local_err
);
199 error_propagate(errp
, local_err
);
202 bs
->bl
.opt_transfer_length
= bs
->file
->bl
.opt_transfer_length
;
203 bs
->bl
.max_transfer_length
= bs
->file
->bl
.max_transfer_length
;
204 bs
->bl
.opt_mem_alignment
= bs
->file
->bl
.opt_mem_alignment
;
206 bs
->bl
.opt_mem_alignment
= 512;
209 if (bs
->backing_hd
) {
210 bdrv_refresh_limits(bs
->backing_hd
, &local_err
);
212 error_propagate(errp
, local_err
);
215 bs
->bl
.opt_transfer_length
=
216 MAX(bs
->bl
.opt_transfer_length
,
217 bs
->backing_hd
->bl
.opt_transfer_length
);
218 bs
->bl
.max_transfer_length
=
219 MIN_NON_ZERO(bs
->bl
.max_transfer_length
,
220 bs
->backing_hd
->bl
.max_transfer_length
);
221 bs
->bl
.opt_mem_alignment
=
222 MAX(bs
->bl
.opt_mem_alignment
,
223 bs
->backing_hd
->bl
.opt_mem_alignment
);
226 /* Then let the driver override it */
227 if (drv
->bdrv_refresh_limits
) {
228 drv
->bdrv_refresh_limits(bs
, errp
);
233 * The copy-on-read flag is actually a reference count so multiple users may
234 * use the feature without worrying about clobbering its previous state.
235 * Copy-on-read stays enabled until all users have called to disable it.
237 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
242 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
244 assert(bs
->copy_on_read
> 0);
248 /* Check if any requests are in-flight (including throttled requests) */
249 static bool bdrv_requests_pending(BlockDriverState
*bs
)
251 if (!QLIST_EMPTY(&bs
->tracked_requests
)) {
254 if (!qemu_co_queue_empty(&bs
->throttled_reqs
[0])) {
257 if (!qemu_co_queue_empty(&bs
->throttled_reqs
[1])) {
260 if (bs
->file
&& bdrv_requests_pending(bs
->file
)) {
263 if (bs
->backing_hd
&& bdrv_requests_pending(bs
->backing_hd
)) {
269 static bool bdrv_drain_one(BlockDriverState
*bs
)
273 bdrv_flush_io_queue(bs
);
274 bdrv_start_throttled_reqs(bs
);
275 bs_busy
= bdrv_requests_pending(bs
);
276 bs_busy
|= aio_poll(bdrv_get_aio_context(bs
), bs_busy
);
281 * Wait for pending requests to complete on a single BlockDriverState subtree
283 * See the warning in bdrv_drain_all(). This function can only be called if
284 * you are sure nothing can generate I/O because you have op blockers
287 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
290 void bdrv_drain(BlockDriverState
*bs
)
292 while (bdrv_drain_one(bs
)) {
298 * Wait for pending requests to complete across all BlockDriverStates
300 * This function does not flush data to disk, use bdrv_flush_all() for that
301 * after calling this function.
303 * Note that completion of an asynchronous I/O operation can trigger any
304 * number of other I/O operations on other devices---for example a coroutine
305 * can be arbitrarily complex and a constant flow of I/O can come until the
306 * coroutine is complete. Because of this, it is not possible to have a
307 * function to drain a single device's I/O queue.
309 void bdrv_drain_all(void)
311 /* Always run first iteration so any pending completion BHs run */
313 BlockDriverState
*bs
= NULL
;
315 while ((bs
= bdrv_next(bs
))) {
316 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
318 aio_context_acquire(aio_context
);
320 block_job_pause(bs
->job
);
322 aio_context_release(aio_context
);
329 while ((bs
= bdrv_next(bs
))) {
330 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
332 aio_context_acquire(aio_context
);
333 busy
|= bdrv_drain_one(bs
);
334 aio_context_release(aio_context
);
339 while ((bs
= bdrv_next(bs
))) {
340 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
342 aio_context_acquire(aio_context
);
344 block_job_resume(bs
->job
);
346 aio_context_release(aio_context
);
351 * Remove an active request from the tracked requests list
353 * This function should be called when a tracked request is completing.
355 static void tracked_request_end(BdrvTrackedRequest
*req
)
357 if (req
->serialising
) {
358 req
->bs
->serialising_in_flight
--;
361 QLIST_REMOVE(req
, list
);
362 qemu_co_queue_restart_all(&req
->wait_queue
);
366 * Add an active request to the tracked requests list
368 static void tracked_request_begin(BdrvTrackedRequest
*req
,
369 BlockDriverState
*bs
,
371 unsigned int bytes
, bool is_write
)
373 *req
= (BdrvTrackedRequest
){
377 .is_write
= is_write
,
378 .co
= qemu_coroutine_self(),
379 .serialising
= false,
380 .overlap_offset
= offset
,
381 .overlap_bytes
= bytes
,
384 qemu_co_queue_init(&req
->wait_queue
);
386 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
389 static void mark_request_serialising(BdrvTrackedRequest
*req
, uint64_t align
)
391 int64_t overlap_offset
= req
->offset
& ~(align
- 1);
392 unsigned int overlap_bytes
= ROUND_UP(req
->offset
+ req
->bytes
, align
)
395 if (!req
->serialising
) {
396 req
->bs
->serialising_in_flight
++;
397 req
->serialising
= true;
400 req
->overlap_offset
= MIN(req
->overlap_offset
, overlap_offset
);
401 req
->overlap_bytes
= MAX(req
->overlap_bytes
, overlap_bytes
);
405 * Round a region to cluster boundaries
407 void bdrv_round_to_clusters(BlockDriverState
*bs
,
408 int64_t sector_num
, int nb_sectors
,
409 int64_t *cluster_sector_num
,
410 int *cluster_nb_sectors
)
414 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
415 *cluster_sector_num
= sector_num
;
416 *cluster_nb_sectors
= nb_sectors
;
418 int64_t c
= bdi
.cluster_size
/ BDRV_SECTOR_SIZE
;
419 *cluster_sector_num
= QEMU_ALIGN_DOWN(sector_num
, c
);
420 *cluster_nb_sectors
= QEMU_ALIGN_UP(sector_num
- *cluster_sector_num
+
425 static int bdrv_get_cluster_size(BlockDriverState
*bs
)
430 ret
= bdrv_get_info(bs
, &bdi
);
431 if (ret
< 0 || bdi
.cluster_size
== 0) {
432 return bs
->request_alignment
;
434 return bdi
.cluster_size
;
438 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
439 int64_t offset
, unsigned int bytes
)
442 if (offset
>= req
->overlap_offset
+ req
->overlap_bytes
) {
446 if (req
->overlap_offset
>= offset
+ bytes
) {
452 static bool coroutine_fn
wait_serialising_requests(BdrvTrackedRequest
*self
)
454 BlockDriverState
*bs
= self
->bs
;
455 BdrvTrackedRequest
*req
;
459 if (!bs
->serialising_in_flight
) {
465 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
466 if (req
== self
|| (!req
->serialising
&& !self
->serialising
)) {
469 if (tracked_request_overlaps(req
, self
->overlap_offset
,
470 self
->overlap_bytes
))
472 /* Hitting this means there was a reentrant request, for
473 * example, a block driver issuing nested requests. This must
474 * never happen since it means deadlock.
476 assert(qemu_coroutine_self() != req
->co
);
478 /* If the request is already (indirectly) waiting for us, or
479 * will wait for us as soon as it wakes up, then just go on
480 * (instead of producing a deadlock in the former case). */
481 if (!req
->waiting_for
) {
482 self
->waiting_for
= req
;
483 qemu_co_queue_wait(&req
->wait_queue
);
484 self
->waiting_for
= NULL
;
496 static int bdrv_check_byte_request(BlockDriverState
*bs
, int64_t offset
,
499 if (size
> BDRV_REQUEST_MAX_SECTORS
<< BDRV_SECTOR_BITS
) {
503 if (!bdrv_is_inserted(bs
)) {
514 static int bdrv_check_request(BlockDriverState
*bs
, int64_t sector_num
,
517 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
521 return bdrv_check_byte_request(bs
, sector_num
* BDRV_SECTOR_SIZE
,
522 nb_sectors
* BDRV_SECTOR_SIZE
);
525 typedef struct RwCo
{
526 BlockDriverState
*bs
;
531 BdrvRequestFlags flags
;
534 static void coroutine_fn
bdrv_rw_co_entry(void *opaque
)
538 if (!rwco
->is_write
) {
539 rwco
->ret
= bdrv_co_do_preadv(rwco
->bs
, rwco
->offset
,
540 rwco
->qiov
->size
, rwco
->qiov
,
543 rwco
->ret
= bdrv_co_do_pwritev(rwco
->bs
, rwco
->offset
,
544 rwco
->qiov
->size
, rwco
->qiov
,
550 * Process a vectored synchronous request using coroutines
552 static int bdrv_prwv_co(BlockDriverState
*bs
, int64_t offset
,
553 QEMUIOVector
*qiov
, bool is_write
,
554 BdrvRequestFlags flags
)
561 .is_write
= is_write
,
567 * In sync call context, when the vcpu is blocked, this throttling timer
568 * will not fire; so the I/O throttling function has to be disabled here
569 * if it has been enabled.
571 if (bs
->io_limits_enabled
) {
572 fprintf(stderr
, "Disabling I/O throttling on '%s' due "
573 "to synchronous I/O.\n", bdrv_get_device_name(bs
));
574 bdrv_io_limits_disable(bs
);
577 if (qemu_in_coroutine()) {
578 /* Fast-path if already in coroutine context */
579 bdrv_rw_co_entry(&rwco
);
581 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
583 co
= qemu_coroutine_create(bdrv_rw_co_entry
);
584 qemu_coroutine_enter(co
, &rwco
);
585 while (rwco
.ret
== NOT_DONE
) {
586 aio_poll(aio_context
, true);
593 * Process a synchronous request using coroutines
595 static int bdrv_rw_co(BlockDriverState
*bs
, int64_t sector_num
, uint8_t *buf
,
596 int nb_sectors
, bool is_write
, BdrvRequestFlags flags
)
600 .iov_base
= (void *)buf
,
601 .iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
,
604 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
608 qemu_iovec_init_external(&qiov
, &iov
, 1);
609 return bdrv_prwv_co(bs
, sector_num
<< BDRV_SECTOR_BITS
,
610 &qiov
, is_write
, flags
);
613 /* return < 0 if error. See bdrv_write() for the return codes */
614 int bdrv_read(BlockDriverState
*bs
, int64_t sector_num
,
615 uint8_t *buf
, int nb_sectors
)
617 return bdrv_rw_co(bs
, sector_num
, buf
, nb_sectors
, false, 0);
620 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
621 int bdrv_read_unthrottled(BlockDriverState
*bs
, int64_t sector_num
,
622 uint8_t *buf
, int nb_sectors
)
627 enabled
= bs
->io_limits_enabled
;
628 bs
->io_limits_enabled
= false;
629 ret
= bdrv_read(bs
, sector_num
, buf
, nb_sectors
);
630 bs
->io_limits_enabled
= enabled
;
634 /* Return < 0 if error. Important errors are:
635 -EIO generic I/O error (may happen for all errors)
636 -ENOMEDIUM No media inserted.
637 -EINVAL Invalid sector number or nb_sectors
638 -EACCES Trying to write a read-only device
640 int bdrv_write(BlockDriverState
*bs
, int64_t sector_num
,
641 const uint8_t *buf
, int nb_sectors
)
643 return bdrv_rw_co(bs
, sector_num
, (uint8_t *)buf
, nb_sectors
, true, 0);
646 int bdrv_write_zeroes(BlockDriverState
*bs
, int64_t sector_num
,
647 int nb_sectors
, BdrvRequestFlags flags
)
649 return bdrv_rw_co(bs
, sector_num
, NULL
, nb_sectors
, true,
650 BDRV_REQ_ZERO_WRITE
| flags
);
654 * Completely zero out a block device with the help of bdrv_write_zeroes.
655 * The operation is sped up by checking the block status and only writing
656 * zeroes to the device if they currently do not return zeroes. Optional
657 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
659 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
661 int bdrv_make_zero(BlockDriverState
*bs
, BdrvRequestFlags flags
)
663 int64_t target_sectors
, ret
, nb_sectors
, sector_num
= 0;
666 target_sectors
= bdrv_nb_sectors(bs
);
667 if (target_sectors
< 0) {
668 return target_sectors
;
672 nb_sectors
= MIN(target_sectors
- sector_num
, BDRV_REQUEST_MAX_SECTORS
);
673 if (nb_sectors
<= 0) {
676 ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, &n
);
678 error_report("error getting block status at sector %" PRId64
": %s",
679 sector_num
, strerror(-ret
));
682 if (ret
& BDRV_BLOCK_ZERO
) {
686 ret
= bdrv_write_zeroes(bs
, sector_num
, n
, flags
);
688 error_report("error writing zeroes at sector %" PRId64
": %s",
689 sector_num
, strerror(-ret
));
696 int bdrv_pread(BlockDriverState
*bs
, int64_t offset
, void *buf
, int bytes
)
700 .iov_base
= (void *)buf
,
709 qemu_iovec_init_external(&qiov
, &iov
, 1);
710 ret
= bdrv_prwv_co(bs
, offset
, &qiov
, false, 0);
718 int bdrv_pwritev(BlockDriverState
*bs
, int64_t offset
, QEMUIOVector
*qiov
)
722 ret
= bdrv_prwv_co(bs
, offset
, qiov
, true, 0);
730 int bdrv_pwrite(BlockDriverState
*bs
, int64_t offset
,
731 const void *buf
, int bytes
)
735 .iov_base
= (void *) buf
,
743 qemu_iovec_init_external(&qiov
, &iov
, 1);
744 return bdrv_pwritev(bs
, offset
, &qiov
);
748 * Writes to the file and ensures that no writes are reordered across this
749 * request (acts as a barrier)
751 * Returns 0 on success, -errno in error cases.
753 int bdrv_pwrite_sync(BlockDriverState
*bs
, int64_t offset
,
754 const void *buf
, int count
)
758 ret
= bdrv_pwrite(bs
, offset
, buf
, count
);
763 /* No flush needed for cache modes that already do it */
764 if (bs
->enable_write_cache
) {
771 static int coroutine_fn
bdrv_co_do_copy_on_readv(BlockDriverState
*bs
,
772 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
774 /* Perform I/O through a temporary buffer so that users who scribble over
775 * their read buffer while the operation is in progress do not end up
776 * modifying the image file. This is critical for zero-copy guest I/O
777 * where anything might happen inside guest memory.
781 BlockDriver
*drv
= bs
->drv
;
783 QEMUIOVector bounce_qiov
;
784 int64_t cluster_sector_num
;
785 int cluster_nb_sectors
;
789 /* Cover entire cluster so no additional backing file I/O is required when
790 * allocating cluster in the image file.
792 bdrv_round_to_clusters(bs
, sector_num
, nb_sectors
,
793 &cluster_sector_num
, &cluster_nb_sectors
);
795 trace_bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
,
796 cluster_sector_num
, cluster_nb_sectors
);
798 iov
.iov_len
= cluster_nb_sectors
* BDRV_SECTOR_SIZE
;
799 iov
.iov_base
= bounce_buffer
= qemu_try_blockalign(bs
, iov
.iov_len
);
800 if (bounce_buffer
== NULL
) {
805 qemu_iovec_init_external(&bounce_qiov
, &iov
, 1);
807 ret
= drv
->bdrv_co_readv(bs
, cluster_sector_num
, cluster_nb_sectors
,
813 if (drv
->bdrv_co_write_zeroes
&&
814 buffer_is_zero(bounce_buffer
, iov
.iov_len
)) {
815 ret
= bdrv_co_do_write_zeroes(bs
, cluster_sector_num
,
816 cluster_nb_sectors
, 0);
818 /* This does not change the data on the disk, it is not necessary
819 * to flush even in cache=writethrough mode.
821 ret
= drv
->bdrv_co_writev(bs
, cluster_sector_num
, cluster_nb_sectors
,
826 /* It might be okay to ignore write errors for guest requests. If this
827 * is a deliberate copy-on-read then we don't want to ignore the error.
828 * Simply report it in all cases.
833 skip_bytes
= (sector_num
- cluster_sector_num
) * BDRV_SECTOR_SIZE
;
834 qemu_iovec_from_buf(qiov
, 0, bounce_buffer
+ skip_bytes
,
835 nb_sectors
* BDRV_SECTOR_SIZE
);
838 qemu_vfree(bounce_buffer
);
843 * Forwards an already correctly aligned request to the BlockDriver. This
844 * handles copy on read and zeroing after EOF; any other features must be
845 * implemented by the caller.
847 static int coroutine_fn
bdrv_aligned_preadv(BlockDriverState
*bs
,
848 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
849 int64_t align
, QEMUIOVector
*qiov
, int flags
)
851 BlockDriver
*drv
= bs
->drv
;
854 int64_t sector_num
= offset
>> BDRV_SECTOR_BITS
;
855 unsigned int nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
857 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
858 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
859 assert(!qiov
|| bytes
== qiov
->size
);
861 /* Handle Copy on Read and associated serialisation */
862 if (flags
& BDRV_REQ_COPY_ON_READ
) {
863 /* If we touch the same cluster it counts as an overlap. This
864 * guarantees that allocating writes will be serialized and not race
865 * with each other for the same cluster. For example, in copy-on-read
866 * it ensures that the CoR read and write operations are atomic and
867 * guest writes cannot interleave between them. */
868 mark_request_serialising(req
, bdrv_get_cluster_size(bs
));
871 wait_serialising_requests(req
);
873 if (flags
& BDRV_REQ_COPY_ON_READ
) {
876 ret
= bdrv_is_allocated(bs
, sector_num
, nb_sectors
, &pnum
);
881 if (!ret
|| pnum
!= nb_sectors
) {
882 ret
= bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
, qiov
);
887 /* Forward the request to the BlockDriver */
888 if (!bs
->zero_beyond_eof
) {
889 ret
= drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
891 /* Read zeros after EOF */
892 int64_t total_sectors
, max_nb_sectors
;
894 total_sectors
= bdrv_nb_sectors(bs
);
895 if (total_sectors
< 0) {
900 max_nb_sectors
= ROUND_UP(MAX(0, total_sectors
- sector_num
),
901 align
>> BDRV_SECTOR_BITS
);
902 if (nb_sectors
< max_nb_sectors
) {
903 ret
= drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
904 } else if (max_nb_sectors
> 0) {
905 QEMUIOVector local_qiov
;
907 qemu_iovec_init(&local_qiov
, qiov
->niov
);
908 qemu_iovec_concat(&local_qiov
, qiov
, 0,
909 max_nb_sectors
* BDRV_SECTOR_SIZE
);
911 ret
= drv
->bdrv_co_readv(bs
, sector_num
, max_nb_sectors
,
914 qemu_iovec_destroy(&local_qiov
);
919 /* Reading beyond end of file is supposed to produce zeroes */
920 if (ret
== 0 && total_sectors
< sector_num
+ nb_sectors
) {
921 uint64_t offset
= MAX(0, total_sectors
- sector_num
);
922 uint64_t bytes
= (sector_num
+ nb_sectors
- offset
) *
924 qemu_iovec_memset(qiov
, offset
* BDRV_SECTOR_SIZE
, 0, bytes
);
932 static inline uint64_t bdrv_get_align(BlockDriverState
*bs
)
934 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
935 return MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
938 static inline bool bdrv_req_is_aligned(BlockDriverState
*bs
,
939 int64_t offset
, size_t bytes
)
941 int64_t align
= bdrv_get_align(bs
);
942 return !(offset
& (align
- 1) || (bytes
& (align
- 1)));
946 * Handle a read request in coroutine context
948 static int coroutine_fn
bdrv_co_do_preadv(BlockDriverState
*bs
,
949 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
950 BdrvRequestFlags flags
)
952 BlockDriver
*drv
= bs
->drv
;
953 BdrvTrackedRequest req
;
955 uint64_t align
= bdrv_get_align(bs
);
956 uint8_t *head_buf
= NULL
;
957 uint8_t *tail_buf
= NULL
;
958 QEMUIOVector local_qiov
;
959 bool use_local_qiov
= false;
966 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
971 if (bs
->copy_on_read
) {
972 flags
|= BDRV_REQ_COPY_ON_READ
;
975 /* throttling disk I/O */
976 if (bs
->io_limits_enabled
) {
977 bdrv_io_limits_intercept(bs
, bytes
, false);
980 /* Align read if necessary by padding qiov */
981 if (offset
& (align
- 1)) {
982 head_buf
= qemu_blockalign(bs
, align
);
983 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
984 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
985 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
986 use_local_qiov
= true;
988 bytes
+= offset
& (align
- 1);
989 offset
= offset
& ~(align
- 1);
992 if ((offset
+ bytes
) & (align
- 1)) {
993 if (!use_local_qiov
) {
994 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
995 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
996 use_local_qiov
= true;
998 tail_buf
= qemu_blockalign(bs
, align
);
999 qemu_iovec_add(&local_qiov
, tail_buf
,
1000 align
- ((offset
+ bytes
) & (align
- 1)));
1002 bytes
= ROUND_UP(bytes
, align
);
1005 tracked_request_begin(&req
, bs
, offset
, bytes
, false);
1006 ret
= bdrv_aligned_preadv(bs
, &req
, offset
, bytes
, align
,
1007 use_local_qiov
? &local_qiov
: qiov
,
1009 tracked_request_end(&req
);
1011 if (use_local_qiov
) {
1012 qemu_iovec_destroy(&local_qiov
);
1013 qemu_vfree(head_buf
);
1014 qemu_vfree(tail_buf
);
1020 static int coroutine_fn
bdrv_co_do_readv(BlockDriverState
*bs
,
1021 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1022 BdrvRequestFlags flags
)
1024 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
1028 return bdrv_co_do_preadv(bs
, sector_num
<< BDRV_SECTOR_BITS
,
1029 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
1032 int coroutine_fn
bdrv_co_readv(BlockDriverState
*bs
, int64_t sector_num
,
1033 int nb_sectors
, QEMUIOVector
*qiov
)
1035 trace_bdrv_co_readv(bs
, sector_num
, nb_sectors
);
1037 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
, 0);
1040 int coroutine_fn
bdrv_co_copy_on_readv(BlockDriverState
*bs
,
1041 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
1043 trace_bdrv_co_copy_on_readv(bs
, sector_num
, nb_sectors
);
1045 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
,
1046 BDRV_REQ_COPY_ON_READ
);
1049 #define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768
1051 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
1052 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
)
1054 BlockDriver
*drv
= bs
->drv
;
1056 struct iovec iov
= {0};
1059 int max_write_zeroes
= MIN_NON_ZERO(bs
->bl
.max_write_zeroes
,
1060 BDRV_REQUEST_MAX_SECTORS
);
1062 while (nb_sectors
> 0 && !ret
) {
1063 int num
= nb_sectors
;
1065 /* Align request. Block drivers can expect the "bulk" of the request
1068 if (bs
->bl
.write_zeroes_alignment
1069 && num
> bs
->bl
.write_zeroes_alignment
) {
1070 if (sector_num
% bs
->bl
.write_zeroes_alignment
!= 0) {
1071 /* Make a small request up to the first aligned sector. */
1072 num
= bs
->bl
.write_zeroes_alignment
;
1073 num
-= sector_num
% bs
->bl
.write_zeroes_alignment
;
1074 } else if ((sector_num
+ num
) % bs
->bl
.write_zeroes_alignment
!= 0) {
1075 /* Shorten the request to the last aligned sector. num cannot
1076 * underflow because num > bs->bl.write_zeroes_alignment.
1078 num
-= (sector_num
+ num
) % bs
->bl
.write_zeroes_alignment
;
1082 /* limit request size */
1083 if (num
> max_write_zeroes
) {
1084 num
= max_write_zeroes
;
1088 /* First try the efficient write zeroes operation */
1089 if (drv
->bdrv_co_write_zeroes
) {
1090 ret
= drv
->bdrv_co_write_zeroes(bs
, sector_num
, num
, flags
);
1093 if (ret
== -ENOTSUP
) {
1094 /* Fall back to bounce buffer if write zeroes is unsupported */
1095 int max_xfer_len
= MIN_NON_ZERO(bs
->bl
.max_transfer_length
,
1096 MAX_WRITE_ZEROES_BOUNCE_BUFFER
);
1097 num
= MIN(num
, max_xfer_len
);
1098 iov
.iov_len
= num
* BDRV_SECTOR_SIZE
;
1099 if (iov
.iov_base
== NULL
) {
1100 iov
.iov_base
= qemu_try_blockalign(bs
, num
* BDRV_SECTOR_SIZE
);
1101 if (iov
.iov_base
== NULL
) {
1105 memset(iov
.iov_base
, 0, num
* BDRV_SECTOR_SIZE
);
1107 qemu_iovec_init_external(&qiov
, &iov
, 1);
1109 ret
= drv
->bdrv_co_writev(bs
, sector_num
, num
, &qiov
);
1111 /* Keep bounce buffer around if it is big enough for all
1112 * all future requests.
1114 if (num
< max_xfer_len
) {
1115 qemu_vfree(iov
.iov_base
);
1116 iov
.iov_base
= NULL
;
1125 qemu_vfree(iov
.iov_base
);
1130 * Forwards an already correctly aligned write request to the BlockDriver.
1132 static int coroutine_fn
bdrv_aligned_pwritev(BlockDriverState
*bs
,
1133 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
1134 QEMUIOVector
*qiov
, int flags
)
1136 BlockDriver
*drv
= bs
->drv
;
1140 int64_t sector_num
= offset
>> BDRV_SECTOR_BITS
;
1141 unsigned int nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
1143 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
1144 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
1145 assert(!qiov
|| bytes
== qiov
->size
);
1147 waited
= wait_serialising_requests(req
);
1148 assert(!waited
|| !req
->serialising
);
1149 assert(req
->overlap_offset
<= offset
);
1150 assert(offset
+ bytes
<= req
->overlap_offset
+ req
->overlap_bytes
);
1152 ret
= notifier_with_return_list_notify(&bs
->before_write_notifiers
, req
);
1154 if (!ret
&& bs
->detect_zeroes
!= BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF
&&
1155 !(flags
& BDRV_REQ_ZERO_WRITE
) && drv
->bdrv_co_write_zeroes
&&
1156 qemu_iovec_is_zero(qiov
)) {
1157 flags
|= BDRV_REQ_ZERO_WRITE
;
1158 if (bs
->detect_zeroes
== BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP
) {
1159 flags
|= BDRV_REQ_MAY_UNMAP
;
1164 /* Do nothing, write notifier decided to fail this request */
1165 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
1166 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_ZERO
);
1167 ret
= bdrv_co_do_write_zeroes(bs
, sector_num
, nb_sectors
, flags
);
1169 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV
);
1170 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
);
1172 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_DONE
);
1174 if (ret
== 0 && !bs
->enable_write_cache
) {
1175 ret
= bdrv_co_flush(bs
);
1178 bdrv_set_dirty(bs
, sector_num
, nb_sectors
);
1180 block_acct_highest_sector(&bs
->stats
, sector_num
, nb_sectors
);
1183 bs
->total_sectors
= MAX(bs
->total_sectors
, sector_num
+ nb_sectors
);
1190 * Handle a write request in coroutine context
1192 static int coroutine_fn
bdrv_co_do_pwritev(BlockDriverState
*bs
,
1193 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1194 BdrvRequestFlags flags
)
1196 BdrvTrackedRequest req
;
1197 uint64_t align
= bdrv_get_align(bs
);
1198 uint8_t *head_buf
= NULL
;
1199 uint8_t *tail_buf
= NULL
;
1200 QEMUIOVector local_qiov
;
1201 bool use_local_qiov
= false;
1207 if (bs
->read_only
) {
1211 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
1216 /* throttling disk I/O */
1217 if (bs
->io_limits_enabled
) {
1218 bdrv_io_limits_intercept(bs
, bytes
, true);
1222 * Align write if necessary by performing a read-modify-write cycle.
1223 * Pad qiov with the read parts and be sure to have a tracked request not
1224 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1226 tracked_request_begin(&req
, bs
, offset
, bytes
, true);
1228 if (offset
& (align
- 1)) {
1229 QEMUIOVector head_qiov
;
1230 struct iovec head_iov
;
1232 mark_request_serialising(&req
, align
);
1233 wait_serialising_requests(&req
);
1235 head_buf
= qemu_blockalign(bs
, align
);
1236 head_iov
= (struct iovec
) {
1237 .iov_base
= head_buf
,
1240 qemu_iovec_init_external(&head_qiov
, &head_iov
, 1);
1242 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1243 ret
= bdrv_aligned_preadv(bs
, &req
, offset
& ~(align
- 1), align
,
1244 align
, &head_qiov
, 0);
1248 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1250 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
1251 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
1252 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1253 use_local_qiov
= true;
1255 bytes
+= offset
& (align
- 1);
1256 offset
= offset
& ~(align
- 1);
1259 if ((offset
+ bytes
) & (align
- 1)) {
1260 QEMUIOVector tail_qiov
;
1261 struct iovec tail_iov
;
1265 mark_request_serialising(&req
, align
);
1266 waited
= wait_serialising_requests(&req
);
1267 assert(!waited
|| !use_local_qiov
);
1269 tail_buf
= qemu_blockalign(bs
, align
);
1270 tail_iov
= (struct iovec
) {
1271 .iov_base
= tail_buf
,
1274 qemu_iovec_init_external(&tail_qiov
, &tail_iov
, 1);
1276 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1277 ret
= bdrv_aligned_preadv(bs
, &req
, (offset
+ bytes
) & ~(align
- 1), align
,
1278 align
, &tail_qiov
, 0);
1282 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1284 if (!use_local_qiov
) {
1285 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
1286 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1287 use_local_qiov
= true;
1290 tail_bytes
= (offset
+ bytes
) & (align
- 1);
1291 qemu_iovec_add(&local_qiov
, tail_buf
+ tail_bytes
, align
- tail_bytes
);
1293 bytes
= ROUND_UP(bytes
, align
);
1296 if (use_local_qiov
) {
1297 /* Local buffer may have non-zero data. */
1298 flags
&= ~BDRV_REQ_ZERO_WRITE
;
1300 ret
= bdrv_aligned_pwritev(bs
, &req
, offset
, bytes
,
1301 use_local_qiov
? &local_qiov
: qiov
,
1305 tracked_request_end(&req
);
1307 if (use_local_qiov
) {
1308 qemu_iovec_destroy(&local_qiov
);
1310 qemu_vfree(head_buf
);
1311 qemu_vfree(tail_buf
);
1316 static int coroutine_fn
bdrv_co_do_writev(BlockDriverState
*bs
,
1317 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1318 BdrvRequestFlags flags
)
1320 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
1324 return bdrv_co_do_pwritev(bs
, sector_num
<< BDRV_SECTOR_BITS
,
1325 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
1328 int coroutine_fn
bdrv_co_writev(BlockDriverState
*bs
, int64_t sector_num
,
1329 int nb_sectors
, QEMUIOVector
*qiov
)
1331 trace_bdrv_co_writev(bs
, sector_num
, nb_sectors
);
1333 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, qiov
, 0);
1336 int coroutine_fn
bdrv_co_write_zeroes(BlockDriverState
*bs
,
1337 int64_t sector_num
, int nb_sectors
,
1338 BdrvRequestFlags flags
)
1342 trace_bdrv_co_write_zeroes(bs
, sector_num
, nb_sectors
, flags
);
1344 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
1345 flags
&= ~BDRV_REQ_MAY_UNMAP
;
1347 if (bdrv_req_is_aligned(bs
, sector_num
<< BDRV_SECTOR_BITS
,
1348 nb_sectors
<< BDRV_SECTOR_BITS
)) {
1349 ret
= bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, NULL
,
1350 BDRV_REQ_ZERO_WRITE
| flags
);
1353 QEMUIOVector local_qiov
;
1354 size_t bytes
= nb_sectors
<< BDRV_SECTOR_BITS
;
1356 buf
= qemu_memalign(bdrv_opt_mem_align(bs
), bytes
);
1357 memset(buf
, 0, bytes
);
1358 qemu_iovec_init(&local_qiov
, 1);
1359 qemu_iovec_add(&local_qiov
, buf
, bytes
);
1361 ret
= bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, &local_qiov
,
1362 BDRV_REQ_ZERO_WRITE
| flags
);
1368 int bdrv_flush_all(void)
1370 BlockDriverState
*bs
= NULL
;
1373 while ((bs
= bdrv_next(bs
))) {
1374 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
1377 aio_context_acquire(aio_context
);
1378 ret
= bdrv_flush(bs
);
1379 if (ret
< 0 && !result
) {
1382 aio_context_release(aio_context
);
1388 typedef struct BdrvCoGetBlockStatusData
{
1389 BlockDriverState
*bs
;
1390 BlockDriverState
*base
;
1396 } BdrvCoGetBlockStatusData
;
1399 * Returns the allocation status of the specified sectors.
1400 * Drivers not implementing the functionality are assumed to not support
1401 * backing files, hence all their sectors are reported as allocated.
1403 * If 'sector_num' is beyond the end of the disk image the return value is 0
1404 * and 'pnum' is set to 0.
1406 * 'pnum' is set to the number of sectors (including and immediately following
1407 * the specified sector) that are known to be in the same
1408 * allocated/unallocated state.
1410 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
1411 * beyond the end of the disk image it will be clamped.
1413 static int64_t coroutine_fn
bdrv_co_get_block_status(BlockDriverState
*bs
,
1415 int nb_sectors
, int *pnum
)
1417 int64_t total_sectors
;
1421 total_sectors
= bdrv_nb_sectors(bs
);
1422 if (total_sectors
< 0) {
1423 return total_sectors
;
1426 if (sector_num
>= total_sectors
) {
1431 n
= total_sectors
- sector_num
;
1432 if (n
< nb_sectors
) {
1436 if (!bs
->drv
->bdrv_co_get_block_status
) {
1438 ret
= BDRV_BLOCK_DATA
| BDRV_BLOCK_ALLOCATED
;
1439 if (bs
->drv
->protocol_name
) {
1440 ret
|= BDRV_BLOCK_OFFSET_VALID
| (sector_num
* BDRV_SECTOR_SIZE
);
1445 ret
= bs
->drv
->bdrv_co_get_block_status(bs
, sector_num
, nb_sectors
, pnum
);
1451 if (ret
& BDRV_BLOCK_RAW
) {
1452 assert(ret
& BDRV_BLOCK_OFFSET_VALID
);
1453 return bdrv_get_block_status(bs
->file
, ret
>> BDRV_SECTOR_BITS
,
1457 if (ret
& (BDRV_BLOCK_DATA
| BDRV_BLOCK_ZERO
)) {
1458 ret
|= BDRV_BLOCK_ALLOCATED
;
1461 if (!(ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
)) {
1462 if (bdrv_unallocated_blocks_are_zero(bs
)) {
1463 ret
|= BDRV_BLOCK_ZERO
;
1464 } else if (bs
->backing_hd
) {
1465 BlockDriverState
*bs2
= bs
->backing_hd
;
1466 int64_t nb_sectors2
= bdrv_nb_sectors(bs2
);
1467 if (nb_sectors2
>= 0 && sector_num
>= nb_sectors2
) {
1468 ret
|= BDRV_BLOCK_ZERO
;
1474 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
1475 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
1478 ret2
= bdrv_co_get_block_status(bs
->file
, ret
>> BDRV_SECTOR_BITS
,
1481 /* Ignore errors. This is just providing extra information, it
1482 * is useful but not necessary.
1485 /* !file_pnum indicates an offset at or beyond the EOF; it is
1486 * perfectly valid for the format block driver to point to such
1487 * offsets, so catch it and mark everything as zero */
1488 ret
|= BDRV_BLOCK_ZERO
;
1490 /* Limit request to the range reported by the protocol driver */
1492 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
1500 /* Coroutine wrapper for bdrv_get_block_status() */
1501 static void coroutine_fn
bdrv_get_block_status_co_entry(void *opaque
)
1503 BdrvCoGetBlockStatusData
*data
= opaque
;
1504 BlockDriverState
*bs
= data
->bs
;
1506 data
->ret
= bdrv_co_get_block_status(bs
, data
->sector_num
, data
->nb_sectors
,
1512 * Synchronous wrapper around bdrv_co_get_block_status().
1514 * See bdrv_co_get_block_status() for details.
1516 int64_t bdrv_get_block_status(BlockDriverState
*bs
, int64_t sector_num
,
1517 int nb_sectors
, int *pnum
)
1520 BdrvCoGetBlockStatusData data
= {
1522 .sector_num
= sector_num
,
1523 .nb_sectors
= nb_sectors
,
1528 if (qemu_in_coroutine()) {
1529 /* Fast-path if already in coroutine context */
1530 bdrv_get_block_status_co_entry(&data
);
1532 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
1534 co
= qemu_coroutine_create(bdrv_get_block_status_co_entry
);
1535 qemu_coroutine_enter(co
, &data
);
1536 while (!data
.done
) {
1537 aio_poll(aio_context
, true);
1543 int coroutine_fn
bdrv_is_allocated(BlockDriverState
*bs
, int64_t sector_num
,
1544 int nb_sectors
, int *pnum
)
1546 int64_t ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, pnum
);
1550 return !!(ret
& BDRV_BLOCK_ALLOCATED
);
1554 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
1556 * Return true if the given sector is allocated in any image between
1557 * BASE and TOP (inclusive). BASE can be NULL to check if the given
1558 * sector is allocated in any image of the chain. Return false otherwise.
1560 * 'pnum' is set to the number of sectors (including and immediately following
1561 * the specified sector) that are known to be in the same
1562 * allocated/unallocated state.
1565 int bdrv_is_allocated_above(BlockDriverState
*top
,
1566 BlockDriverState
*base
,
1568 int nb_sectors
, int *pnum
)
1570 BlockDriverState
*intermediate
;
1571 int ret
, n
= nb_sectors
;
1574 while (intermediate
&& intermediate
!= base
) {
1576 ret
= bdrv_is_allocated(intermediate
, sector_num
, nb_sectors
,
1586 * [sector_num, nb_sectors] is unallocated on top but intermediate
1589 * [sector_num+x, nr_sectors] allocated.
1591 if (n
> pnum_inter
&&
1592 (intermediate
== top
||
1593 sector_num
+ pnum_inter
< intermediate
->total_sectors
)) {
1597 intermediate
= intermediate
->backing_hd
;
1604 int bdrv_write_compressed(BlockDriverState
*bs
, int64_t sector_num
,
1605 const uint8_t *buf
, int nb_sectors
)
1607 BlockDriver
*drv
= bs
->drv
;
1613 if (!drv
->bdrv_write_compressed
) {
1616 ret
= bdrv_check_request(bs
, sector_num
, nb_sectors
);
1621 assert(QLIST_EMPTY(&bs
->dirty_bitmaps
));
1623 return drv
->bdrv_write_compressed(bs
, sector_num
, buf
, nb_sectors
);
1626 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
1627 int64_t pos
, int size
)
1630 struct iovec iov
= {
1631 .iov_base
= (void *) buf
,
1635 qemu_iovec_init_external(&qiov
, &iov
, 1);
1636 return bdrv_writev_vmstate(bs
, &qiov
, pos
);
1639 int bdrv_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
1641 BlockDriver
*drv
= bs
->drv
;
1645 } else if (drv
->bdrv_save_vmstate
) {
1646 return drv
->bdrv_save_vmstate(bs
, qiov
, pos
);
1647 } else if (bs
->file
) {
1648 return bdrv_writev_vmstate(bs
->file
, qiov
, pos
);
1654 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
1655 int64_t pos
, int size
)
1657 BlockDriver
*drv
= bs
->drv
;
1660 if (drv
->bdrv_load_vmstate
)
1661 return drv
->bdrv_load_vmstate(bs
, buf
, pos
, size
);
1663 return bdrv_load_vmstate(bs
->file
, buf
, pos
, size
);
1667 /**************************************************************/
1670 BlockAIOCB
*bdrv_aio_readv(BlockDriverState
*bs
, int64_t sector_num
,
1671 QEMUIOVector
*qiov
, int nb_sectors
,
1672 BlockCompletionFunc
*cb
, void *opaque
)
1674 trace_bdrv_aio_readv(bs
, sector_num
, nb_sectors
, opaque
);
1676 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, 0,
1680 BlockAIOCB
*bdrv_aio_writev(BlockDriverState
*bs
, int64_t sector_num
,
1681 QEMUIOVector
*qiov
, int nb_sectors
,
1682 BlockCompletionFunc
*cb
, void *opaque
)
1684 trace_bdrv_aio_writev(bs
, sector_num
, nb_sectors
, opaque
);
1686 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, 0,
1690 BlockAIOCB
*bdrv_aio_write_zeroes(BlockDriverState
*bs
,
1691 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
,
1692 BlockCompletionFunc
*cb
, void *opaque
)
1694 trace_bdrv_aio_write_zeroes(bs
, sector_num
, nb_sectors
, flags
, opaque
);
1696 return bdrv_co_aio_rw_vector(bs
, sector_num
, NULL
, nb_sectors
,
1697 BDRV_REQ_ZERO_WRITE
| flags
,
1702 typedef struct MultiwriteCB
{
1707 BlockCompletionFunc
*cb
;
1709 QEMUIOVector
*free_qiov
;
1713 static void multiwrite_user_cb(MultiwriteCB
*mcb
)
1717 for (i
= 0; i
< mcb
->num_callbacks
; i
++) {
1718 mcb
->callbacks
[i
].cb(mcb
->callbacks
[i
].opaque
, mcb
->error
);
1719 if (mcb
->callbacks
[i
].free_qiov
) {
1720 qemu_iovec_destroy(mcb
->callbacks
[i
].free_qiov
);
1722 g_free(mcb
->callbacks
[i
].free_qiov
);
1726 static void multiwrite_cb(void *opaque
, int ret
)
1728 MultiwriteCB
*mcb
= opaque
;
1730 trace_multiwrite_cb(mcb
, ret
);
1732 if (ret
< 0 && !mcb
->error
) {
1736 mcb
->num_requests
--;
1737 if (mcb
->num_requests
== 0) {
1738 multiwrite_user_cb(mcb
);
1743 static int multiwrite_req_compare(const void *a
, const void *b
)
1745 const BlockRequest
*req1
= a
, *req2
= b
;
1748 * Note that we can't simply subtract req2->sector from req1->sector
1749 * here as that could overflow the return value.
1751 if (req1
->sector
> req2
->sector
) {
1753 } else if (req1
->sector
< req2
->sector
) {
1761 * Takes a bunch of requests and tries to merge them. Returns the number of
1762 * requests that remain after merging.
1764 static int multiwrite_merge(BlockDriverState
*bs
, BlockRequest
*reqs
,
1765 int num_reqs
, MultiwriteCB
*mcb
)
1769 // Sort requests by start sector
1770 qsort(reqs
, num_reqs
, sizeof(*reqs
), &multiwrite_req_compare
);
1772 // Check if adjacent requests touch the same clusters. If so, combine them,
1773 // filling up gaps with zero sectors.
1775 for (i
= 1; i
< num_reqs
; i
++) {
1777 int64_t oldreq_last
= reqs
[outidx
].sector
+ reqs
[outidx
].nb_sectors
;
1779 // Handle exactly sequential writes and overlapping writes.
1780 if (reqs
[i
].sector
<= oldreq_last
) {
1784 if (reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1 > IOV_MAX
) {
1788 if (bs
->bl
.max_transfer_length
&& reqs
[outidx
].nb_sectors
+
1789 reqs
[i
].nb_sectors
> bs
->bl
.max_transfer_length
) {
1795 QEMUIOVector
*qiov
= g_malloc0(sizeof(*qiov
));
1796 qemu_iovec_init(qiov
,
1797 reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1);
1799 // Add the first request to the merged one. If the requests are
1800 // overlapping, drop the last sectors of the first request.
1801 size
= (reqs
[i
].sector
- reqs
[outidx
].sector
) << 9;
1802 qemu_iovec_concat(qiov
, reqs
[outidx
].qiov
, 0, size
);
1804 // We should need to add any zeros between the two requests
1805 assert (reqs
[i
].sector
<= oldreq_last
);
1807 // Add the second request
1808 qemu_iovec_concat(qiov
, reqs
[i
].qiov
, 0, reqs
[i
].qiov
->size
);
1810 // Add tail of first request, if necessary
1811 if (qiov
->size
< reqs
[outidx
].qiov
->size
) {
1812 qemu_iovec_concat(qiov
, reqs
[outidx
].qiov
, qiov
->size
,
1813 reqs
[outidx
].qiov
->size
- qiov
->size
);
1816 reqs
[outidx
].nb_sectors
= qiov
->size
>> 9;
1817 reqs
[outidx
].qiov
= qiov
;
1819 mcb
->callbacks
[i
].free_qiov
= reqs
[outidx
].qiov
;
1822 reqs
[outidx
].sector
= reqs
[i
].sector
;
1823 reqs
[outidx
].nb_sectors
= reqs
[i
].nb_sectors
;
1824 reqs
[outidx
].qiov
= reqs
[i
].qiov
;
1828 block_acct_merge_done(&bs
->stats
, BLOCK_ACCT_WRITE
, num_reqs
- outidx
- 1);
1834 * Submit multiple AIO write requests at once.
1836 * On success, the function returns 0 and all requests in the reqs array have
1837 * been submitted. In error case this function returns -1, and any of the
1838 * requests may or may not be submitted yet. In particular, this means that the
1839 * callback will be called for some of the requests, for others it won't. The
1840 * caller must check the error field of the BlockRequest to wait for the right
1841 * callbacks (if error != 0, no callback will be called).
1843 * The implementation may modify the contents of the reqs array, e.g. to merge
1844 * requests. However, the fields opaque and error are left unmodified as they
1845 * are used to signal failure for a single request to the caller.
1847 int bdrv_aio_multiwrite(BlockDriverState
*bs
, BlockRequest
*reqs
, int num_reqs
)
1852 /* don't submit writes if we don't have a medium */
1853 if (bs
->drv
== NULL
) {
1854 for (i
= 0; i
< num_reqs
; i
++) {
1855 reqs
[i
].error
= -ENOMEDIUM
;
1860 if (num_reqs
== 0) {
1864 // Create MultiwriteCB structure
1865 mcb
= g_malloc0(sizeof(*mcb
) + num_reqs
* sizeof(*mcb
->callbacks
));
1866 mcb
->num_requests
= 0;
1867 mcb
->num_callbacks
= num_reqs
;
1869 for (i
= 0; i
< num_reqs
; i
++) {
1870 mcb
->callbacks
[i
].cb
= reqs
[i
].cb
;
1871 mcb
->callbacks
[i
].opaque
= reqs
[i
].opaque
;
1874 // Check for mergable requests
1875 num_reqs
= multiwrite_merge(bs
, reqs
, num_reqs
, mcb
);
1877 trace_bdrv_aio_multiwrite(mcb
, mcb
->num_callbacks
, num_reqs
);
1879 /* Run the aio requests. */
1880 mcb
->num_requests
= num_reqs
;
1881 for (i
= 0; i
< num_reqs
; i
++) {
1882 bdrv_co_aio_rw_vector(bs
, reqs
[i
].sector
, reqs
[i
].qiov
,
1883 reqs
[i
].nb_sectors
, reqs
[i
].flags
,
1891 void bdrv_aio_cancel(BlockAIOCB
*acb
)
1894 bdrv_aio_cancel_async(acb
);
1895 while (acb
->refcnt
> 1) {
1896 if (acb
->aiocb_info
->get_aio_context
) {
1897 aio_poll(acb
->aiocb_info
->get_aio_context(acb
), true);
1898 } else if (acb
->bs
) {
1899 aio_poll(bdrv_get_aio_context(acb
->bs
), true);
1904 qemu_aio_unref(acb
);
1907 /* Async version of aio cancel. The caller is not blocked if the acb implements
1908 * cancel_async, otherwise we do nothing and let the request normally complete.
1909 * In either case the completion callback must be called. */
1910 void bdrv_aio_cancel_async(BlockAIOCB
*acb
)
1912 if (acb
->aiocb_info
->cancel_async
) {
1913 acb
->aiocb_info
->cancel_async(acb
);
1917 /**************************************************************/
1918 /* async block device emulation */
1920 typedef struct BlockAIOCBSync
{
1924 /* vector translation state */
1930 static const AIOCBInfo bdrv_em_aiocb_info
= {
1931 .aiocb_size
= sizeof(BlockAIOCBSync
),
1934 static void bdrv_aio_bh_cb(void *opaque
)
1936 BlockAIOCBSync
*acb
= opaque
;
1938 if (!acb
->is_write
&& acb
->ret
>= 0) {
1939 qemu_iovec_from_buf(acb
->qiov
, 0, acb
->bounce
, acb
->qiov
->size
);
1941 qemu_vfree(acb
->bounce
);
1942 acb
->common
.cb(acb
->common
.opaque
, acb
->ret
);
1943 qemu_bh_delete(acb
->bh
);
1945 qemu_aio_unref(acb
);
1948 static BlockAIOCB
*bdrv_aio_rw_vector(BlockDriverState
*bs
,
1952 BlockCompletionFunc
*cb
,
1957 BlockAIOCBSync
*acb
;
1959 acb
= qemu_aio_get(&bdrv_em_aiocb_info
, bs
, cb
, opaque
);
1960 acb
->is_write
= is_write
;
1962 acb
->bounce
= qemu_try_blockalign(bs
, qiov
->size
);
1963 acb
->bh
= aio_bh_new(bdrv_get_aio_context(bs
), bdrv_aio_bh_cb
, acb
);
1965 if (acb
->bounce
== NULL
) {
1967 } else if (is_write
) {
1968 qemu_iovec_to_buf(acb
->qiov
, 0, acb
->bounce
, qiov
->size
);
1969 acb
->ret
= bs
->drv
->bdrv_write(bs
, sector_num
, acb
->bounce
, nb_sectors
);
1971 acb
->ret
= bs
->drv
->bdrv_read(bs
, sector_num
, acb
->bounce
, nb_sectors
);
1974 qemu_bh_schedule(acb
->bh
);
1976 return &acb
->common
;
1979 static BlockAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
1980 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
1981 BlockCompletionFunc
*cb
, void *opaque
)
1983 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 0);
1986 static BlockAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
1987 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
1988 BlockCompletionFunc
*cb
, void *opaque
)
1990 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 1);
1994 typedef struct BlockAIOCBCoroutine
{
2001 } BlockAIOCBCoroutine
;
2003 static const AIOCBInfo bdrv_em_co_aiocb_info
= {
2004 .aiocb_size
= sizeof(BlockAIOCBCoroutine
),
2007 static void bdrv_co_complete(BlockAIOCBCoroutine
*acb
)
2009 if (!acb
->need_bh
) {
2010 acb
->common
.cb(acb
->common
.opaque
, acb
->req
.error
);
2011 qemu_aio_unref(acb
);
2015 static void bdrv_co_em_bh(void *opaque
)
2017 BlockAIOCBCoroutine
*acb
= opaque
;
2019 assert(!acb
->need_bh
);
2020 qemu_bh_delete(acb
->bh
);
2021 bdrv_co_complete(acb
);
2024 static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine
*acb
)
2026 acb
->need_bh
= false;
2027 if (acb
->req
.error
!= -EINPROGRESS
) {
2028 BlockDriverState
*bs
= acb
->common
.bs
;
2030 acb
->bh
= aio_bh_new(bdrv_get_aio_context(bs
), bdrv_co_em_bh
, acb
);
2031 qemu_bh_schedule(acb
->bh
);
2035 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
2036 static void coroutine_fn
bdrv_co_do_rw(void *opaque
)
2038 BlockAIOCBCoroutine
*acb
= opaque
;
2039 BlockDriverState
*bs
= acb
->common
.bs
;
2041 if (!acb
->is_write
) {
2042 acb
->req
.error
= bdrv_co_do_readv(bs
, acb
->req
.sector
,
2043 acb
->req
.nb_sectors
, acb
->req
.qiov
, acb
->req
.flags
);
2045 acb
->req
.error
= bdrv_co_do_writev(bs
, acb
->req
.sector
,
2046 acb
->req
.nb_sectors
, acb
->req
.qiov
, acb
->req
.flags
);
2049 bdrv_co_complete(acb
);
2052 static BlockAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
2056 BdrvRequestFlags flags
,
2057 BlockCompletionFunc
*cb
,
2062 BlockAIOCBCoroutine
*acb
;
2064 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
2065 acb
->need_bh
= true;
2066 acb
->req
.error
= -EINPROGRESS
;
2067 acb
->req
.sector
= sector_num
;
2068 acb
->req
.nb_sectors
= nb_sectors
;
2069 acb
->req
.qiov
= qiov
;
2070 acb
->req
.flags
= flags
;
2071 acb
->is_write
= is_write
;
2073 co
= qemu_coroutine_create(bdrv_co_do_rw
);
2074 qemu_coroutine_enter(co
, acb
);
2076 bdrv_co_maybe_schedule_bh(acb
);
2077 return &acb
->common
;
2080 static void coroutine_fn
bdrv_aio_flush_co_entry(void *opaque
)
2082 BlockAIOCBCoroutine
*acb
= opaque
;
2083 BlockDriverState
*bs
= acb
->common
.bs
;
2085 acb
->req
.error
= bdrv_co_flush(bs
);
2086 bdrv_co_complete(acb
);
2089 BlockAIOCB
*bdrv_aio_flush(BlockDriverState
*bs
,
2090 BlockCompletionFunc
*cb
, void *opaque
)
2092 trace_bdrv_aio_flush(bs
, opaque
);
2095 BlockAIOCBCoroutine
*acb
;
2097 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
2098 acb
->need_bh
= true;
2099 acb
->req
.error
= -EINPROGRESS
;
2101 co
= qemu_coroutine_create(bdrv_aio_flush_co_entry
);
2102 qemu_coroutine_enter(co
, acb
);
2104 bdrv_co_maybe_schedule_bh(acb
);
2105 return &acb
->common
;
2108 static void coroutine_fn
bdrv_aio_discard_co_entry(void *opaque
)
2110 BlockAIOCBCoroutine
*acb
= opaque
;
2111 BlockDriverState
*bs
= acb
->common
.bs
;
2113 acb
->req
.error
= bdrv_co_discard(bs
, acb
->req
.sector
, acb
->req
.nb_sectors
);
2114 bdrv_co_complete(acb
);
2117 BlockAIOCB
*bdrv_aio_discard(BlockDriverState
*bs
,
2118 int64_t sector_num
, int nb_sectors
,
2119 BlockCompletionFunc
*cb
, void *opaque
)
2122 BlockAIOCBCoroutine
*acb
;
2124 trace_bdrv_aio_discard(bs
, sector_num
, nb_sectors
, opaque
);
2126 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
2127 acb
->need_bh
= true;
2128 acb
->req
.error
= -EINPROGRESS
;
2129 acb
->req
.sector
= sector_num
;
2130 acb
->req
.nb_sectors
= nb_sectors
;
2131 co
= qemu_coroutine_create(bdrv_aio_discard_co_entry
);
2132 qemu_coroutine_enter(co
, acb
);
2134 bdrv_co_maybe_schedule_bh(acb
);
2135 return &acb
->common
;
2138 void *qemu_aio_get(const AIOCBInfo
*aiocb_info
, BlockDriverState
*bs
,
2139 BlockCompletionFunc
*cb
, void *opaque
)
2143 acb
= g_slice_alloc(aiocb_info
->aiocb_size
);
2144 acb
->aiocb_info
= aiocb_info
;
2147 acb
->opaque
= opaque
;
2152 void qemu_aio_ref(void *p
)
2154 BlockAIOCB
*acb
= p
;
2158 void qemu_aio_unref(void *p
)
2160 BlockAIOCB
*acb
= p
;
2161 assert(acb
->refcnt
> 0);
2162 if (--acb
->refcnt
== 0) {
2163 g_slice_free1(acb
->aiocb_info
->aiocb_size
, acb
);
2167 /**************************************************************/
2168 /* Coroutine block device emulation */
2170 typedef struct CoroutineIOCompletion
{
2171 Coroutine
*coroutine
;
2173 } CoroutineIOCompletion
;
2175 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
2177 CoroutineIOCompletion
*co
= opaque
;
2180 qemu_coroutine_enter(co
->coroutine
, NULL
);
2183 static int coroutine_fn
bdrv_co_io_em(BlockDriverState
*bs
, int64_t sector_num
,
2184 int nb_sectors
, QEMUIOVector
*iov
,
2187 CoroutineIOCompletion co
= {
2188 .coroutine
= qemu_coroutine_self(),
2193 acb
= bs
->drv
->bdrv_aio_writev(bs
, sector_num
, iov
, nb_sectors
,
2194 bdrv_co_io_em_complete
, &co
);
2196 acb
= bs
->drv
->bdrv_aio_readv(bs
, sector_num
, iov
, nb_sectors
,
2197 bdrv_co_io_em_complete
, &co
);
2200 trace_bdrv_co_io_em(bs
, sector_num
, nb_sectors
, is_write
, acb
);
2204 qemu_coroutine_yield();
2209 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
2210 int64_t sector_num
, int nb_sectors
,
2213 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, false);
2216 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
2217 int64_t sector_num
, int nb_sectors
,
2220 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, true);
2223 static void coroutine_fn
bdrv_flush_co_entry(void *opaque
)
2225 RwCo
*rwco
= opaque
;
2227 rwco
->ret
= bdrv_co_flush(rwco
->bs
);
2230 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
2234 if (!bs
|| !bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
)) {
2238 /* Write back cached data to the OS even with cache=unsafe */
2239 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_OS
);
2240 if (bs
->drv
->bdrv_co_flush_to_os
) {
2241 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
2247 /* But don't actually force it to the disk with cache=unsafe */
2248 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
2252 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_DISK
);
2253 if (bs
->drv
->bdrv_co_flush_to_disk
) {
2254 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
2255 } else if (bs
->drv
->bdrv_aio_flush
) {
2257 CoroutineIOCompletion co
= {
2258 .coroutine
= qemu_coroutine_self(),
2261 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
2265 qemu_coroutine_yield();
2270 * Some block drivers always operate in either writethrough or unsafe
2271 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2272 * know how the server works (because the behaviour is hardcoded or
2273 * depends on server-side configuration), so we can't ensure that
2274 * everything is safe on disk. Returning an error doesn't work because
2275 * that would break guests even if the server operates in writethrough
2278 * Let's hope the user knows what he's doing.
2286 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2287 * in the case of cache=unsafe, so there are no useless flushes.
2290 return bdrv_co_flush(bs
->file
);
2293 int bdrv_flush(BlockDriverState
*bs
)
2301 if (qemu_in_coroutine()) {
2302 /* Fast-path if already in coroutine context */
2303 bdrv_flush_co_entry(&rwco
);
2305 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
2307 co
= qemu_coroutine_create(bdrv_flush_co_entry
);
2308 qemu_coroutine_enter(co
, &rwco
);
2309 while (rwco
.ret
== NOT_DONE
) {
2310 aio_poll(aio_context
, true);
2317 typedef struct DiscardCo
{
2318 BlockDriverState
*bs
;
2323 static void coroutine_fn
bdrv_discard_co_entry(void *opaque
)
2325 DiscardCo
*rwco
= opaque
;
2327 rwco
->ret
= bdrv_co_discard(rwco
->bs
, rwco
->sector_num
, rwco
->nb_sectors
);
2330 int coroutine_fn
bdrv_co_discard(BlockDriverState
*bs
, int64_t sector_num
,
2333 int max_discard
, ret
;
2339 ret
= bdrv_check_request(bs
, sector_num
, nb_sectors
);
2342 } else if (bs
->read_only
) {
2346 bdrv_reset_dirty(bs
, sector_num
, nb_sectors
);
2348 /* Do nothing if disabled. */
2349 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
2353 if (!bs
->drv
->bdrv_co_discard
&& !bs
->drv
->bdrv_aio_discard
) {
2357 max_discard
= MIN_NON_ZERO(bs
->bl
.max_discard
, BDRV_REQUEST_MAX_SECTORS
);
2358 while (nb_sectors
> 0) {
2360 int num
= nb_sectors
;
2363 if (bs
->bl
.discard_alignment
&&
2364 num
>= bs
->bl
.discard_alignment
&&
2365 sector_num
% bs
->bl
.discard_alignment
) {
2366 if (num
> bs
->bl
.discard_alignment
) {
2367 num
= bs
->bl
.discard_alignment
;
2369 num
-= sector_num
% bs
->bl
.discard_alignment
;
2372 /* limit request size */
2373 if (num
> max_discard
) {
2377 if (bs
->drv
->bdrv_co_discard
) {
2378 ret
= bs
->drv
->bdrv_co_discard(bs
, sector_num
, num
);
2381 CoroutineIOCompletion co
= {
2382 .coroutine
= qemu_coroutine_self(),
2385 acb
= bs
->drv
->bdrv_aio_discard(bs
, sector_num
, nb_sectors
,
2386 bdrv_co_io_em_complete
, &co
);
2390 qemu_coroutine_yield();
2394 if (ret
&& ret
!= -ENOTSUP
) {
2404 int bdrv_discard(BlockDriverState
*bs
, int64_t sector_num
, int nb_sectors
)
2409 .sector_num
= sector_num
,
2410 .nb_sectors
= nb_sectors
,
2414 if (qemu_in_coroutine()) {
2415 /* Fast-path if already in coroutine context */
2416 bdrv_discard_co_entry(&rwco
);
2418 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
2420 co
= qemu_coroutine_create(bdrv_discard_co_entry
);
2421 qemu_coroutine_enter(co
, &rwco
);
2422 while (rwco
.ret
== NOT_DONE
) {
2423 aio_poll(aio_context
, true);
2430 /* needed for generic scsi interface */
2432 int bdrv_ioctl(BlockDriverState
*bs
, unsigned long int req
, void *buf
)
2434 BlockDriver
*drv
= bs
->drv
;
2436 if (drv
&& drv
->bdrv_ioctl
)
2437 return drv
->bdrv_ioctl(bs
, req
, buf
);
2441 BlockAIOCB
*bdrv_aio_ioctl(BlockDriverState
*bs
,
2442 unsigned long int req
, void *buf
,
2443 BlockCompletionFunc
*cb
, void *opaque
)
2445 BlockDriver
*drv
= bs
->drv
;
2447 if (drv
&& drv
->bdrv_aio_ioctl
)
2448 return drv
->bdrv_aio_ioctl(bs
, req
, buf
, cb
, opaque
);
2452 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
2454 return qemu_memalign(bdrv_opt_mem_align(bs
), size
);
2457 void *qemu_blockalign0(BlockDriverState
*bs
, size_t size
)
2459 return memset(qemu_blockalign(bs
, size
), 0, size
);
2462 void *qemu_try_blockalign(BlockDriverState
*bs
, size_t size
)
2464 size_t align
= bdrv_opt_mem_align(bs
);
2466 /* Ensure that NULL is never returned on success */
2472 return qemu_try_memalign(align
, size
);
2475 void *qemu_try_blockalign0(BlockDriverState
*bs
, size_t size
)
2477 void *mem
= qemu_try_blockalign(bs
, size
);
2480 memset(mem
, 0, size
);
2487 * Check if all memory in this vector is sector aligned.
2489 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
)
2492 size_t alignment
= bdrv_opt_mem_align(bs
);
2494 for (i
= 0; i
< qiov
->niov
; i
++) {
2495 if ((uintptr_t) qiov
->iov
[i
].iov_base
% alignment
) {
2498 if (qiov
->iov
[i
].iov_len
% alignment
) {
2506 void bdrv_add_before_write_notifier(BlockDriverState
*bs
,
2507 NotifierWithReturn
*notifier
)
2509 notifier_with_return_list_add(&bs
->before_write_notifiers
, notifier
);
2512 void bdrv_io_plug(BlockDriverState
*bs
)
2514 BlockDriver
*drv
= bs
->drv
;
2515 if (drv
&& drv
->bdrv_io_plug
) {
2516 drv
->bdrv_io_plug(bs
);
2517 } else if (bs
->file
) {
2518 bdrv_io_plug(bs
->file
);
2522 void bdrv_io_unplug(BlockDriverState
*bs
)
2524 BlockDriver
*drv
= bs
->drv
;
2525 if (drv
&& drv
->bdrv_io_unplug
) {
2526 drv
->bdrv_io_unplug(bs
);
2527 } else if (bs
->file
) {
2528 bdrv_io_unplug(bs
->file
);
2532 void bdrv_flush_io_queue(BlockDriverState
*bs
)
2534 BlockDriver
*drv
= bs
->drv
;
2535 if (drv
&& drv
->bdrv_flush_io_queue
) {
2536 drv
->bdrv_flush_io_queue(bs
);
2537 } else if (bs
->file
) {
2538 bdrv_flush_io_queue(bs
->file
);