2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "sysemu/block-backend.h"
28 #include "block/blockjob.h"
29 #include "block/block_int.h"
30 #include "qemu/cutils.h"
31 #include "qapi/error.h"
32 #include "qemu/error-report.h"
34 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
36 static BlockAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
40 BdrvRequestFlags flags
,
41 BlockCompletionFunc
*cb
,
44 static void coroutine_fn
bdrv_co_do_rw(void *opaque
);
45 static int coroutine_fn
bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
,
46 int64_t offset
, int count
, BdrvRequestFlags flags
);
48 static void bdrv_parent_drained_begin(BlockDriverState
*bs
)
52 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
53 if (c
->role
->drained_begin
) {
54 c
->role
->drained_begin(c
);
59 static void bdrv_parent_drained_end(BlockDriverState
*bs
)
63 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
64 if (c
->role
->drained_end
) {
65 c
->role
->drained_end(c
);
70 void bdrv_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
72 BlockDriver
*drv
= bs
->drv
;
73 Error
*local_err
= NULL
;
75 memset(&bs
->bl
, 0, sizeof(bs
->bl
));
81 /* Take some limits from the children as a default */
83 bdrv_refresh_limits(bs
->file
->bs
, &local_err
);
85 error_propagate(errp
, local_err
);
88 bs
->bl
.opt_transfer_length
= bs
->file
->bs
->bl
.opt_transfer_length
;
89 bs
->bl
.max_transfer_length
= bs
->file
->bs
->bl
.max_transfer_length
;
90 bs
->bl
.min_mem_alignment
= bs
->file
->bs
->bl
.min_mem_alignment
;
91 bs
->bl
.opt_mem_alignment
= bs
->file
->bs
->bl
.opt_mem_alignment
;
92 bs
->bl
.max_iov
= bs
->file
->bs
->bl
.max_iov
;
94 bs
->bl
.min_mem_alignment
= 512;
95 bs
->bl
.opt_mem_alignment
= getpagesize();
97 /* Safe default since most protocols use readv()/writev()/etc */
98 bs
->bl
.max_iov
= IOV_MAX
;
102 bdrv_refresh_limits(bs
->backing
->bs
, &local_err
);
104 error_propagate(errp
, local_err
);
107 bs
->bl
.opt_transfer_length
=
108 MAX(bs
->bl
.opt_transfer_length
,
109 bs
->backing
->bs
->bl
.opt_transfer_length
);
110 bs
->bl
.max_transfer_length
=
111 MIN_NON_ZERO(bs
->bl
.max_transfer_length
,
112 bs
->backing
->bs
->bl
.max_transfer_length
);
113 bs
->bl
.opt_mem_alignment
=
114 MAX(bs
->bl
.opt_mem_alignment
,
115 bs
->backing
->bs
->bl
.opt_mem_alignment
);
116 bs
->bl
.min_mem_alignment
=
117 MAX(bs
->bl
.min_mem_alignment
,
118 bs
->backing
->bs
->bl
.min_mem_alignment
);
121 bs
->backing
->bs
->bl
.max_iov
);
124 /* Then let the driver override it */
125 if (drv
->bdrv_refresh_limits
) {
126 drv
->bdrv_refresh_limits(bs
, errp
);
131 * The copy-on-read flag is actually a reference count so multiple users may
132 * use the feature without worrying about clobbering its previous state.
133 * Copy-on-read stays enabled until all users have called to disable it.
135 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
140 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
142 assert(bs
->copy_on_read
> 0);
146 /* Check if any requests are in-flight (including throttled requests) */
147 bool bdrv_requests_pending(BlockDriverState
*bs
)
151 if (!QLIST_EMPTY(&bs
->tracked_requests
)) {
155 QLIST_FOREACH(child
, &bs
->children
, next
) {
156 if (bdrv_requests_pending(child
->bs
)) {
164 static void bdrv_drain_recurse(BlockDriverState
*bs
)
168 if (bs
->drv
&& bs
->drv
->bdrv_drain
) {
169 bs
->drv
->bdrv_drain(bs
);
171 QLIST_FOREACH(child
, &bs
->children
, next
) {
172 bdrv_drain_recurse(child
->bs
);
178 BlockDriverState
*bs
;
183 static void bdrv_drain_poll(BlockDriverState
*bs
)
189 busy
= bdrv_requests_pending(bs
);
190 busy
|= aio_poll(bdrv_get_aio_context(bs
), busy
);
194 static void bdrv_co_drain_bh_cb(void *opaque
)
196 BdrvCoDrainData
*data
= opaque
;
197 Coroutine
*co
= data
->co
;
199 qemu_bh_delete(data
->bh
);
200 bdrv_drain_poll(data
->bs
);
202 qemu_coroutine_enter(co
, NULL
);
205 static void coroutine_fn
bdrv_co_yield_to_drain(BlockDriverState
*bs
)
207 BdrvCoDrainData data
;
209 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
210 * other coroutines run if they were queued from
211 * qemu_co_queue_run_restart(). */
213 assert(qemu_in_coroutine());
214 data
= (BdrvCoDrainData
) {
215 .co
= qemu_coroutine_self(),
218 .bh
= aio_bh_new(bdrv_get_aio_context(bs
), bdrv_co_drain_bh_cb
, &data
),
220 qemu_bh_schedule(data
.bh
);
222 qemu_coroutine_yield();
223 /* If we are resumed from some other event (such as an aio completion or a
224 * timer callback), it is a bug in the caller that should be fixed. */
228 void bdrv_drained_begin(BlockDriverState
*bs
)
230 if (!bs
->quiesce_counter
++) {
231 aio_disable_external(bdrv_get_aio_context(bs
));
232 bdrv_parent_drained_begin(bs
);
235 bdrv_io_unplugged_begin(bs
);
236 bdrv_drain_recurse(bs
);
237 if (qemu_in_coroutine()) {
238 bdrv_co_yield_to_drain(bs
);
242 bdrv_io_unplugged_end(bs
);
245 void bdrv_drained_end(BlockDriverState
*bs
)
247 assert(bs
->quiesce_counter
> 0);
248 if (--bs
->quiesce_counter
> 0) {
252 bdrv_parent_drained_end(bs
);
253 aio_enable_external(bdrv_get_aio_context(bs
));
257 * Wait for pending requests to complete on a single BlockDriverState subtree,
258 * and suspend block driver's internal I/O until next request arrives.
260 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
263 * Only this BlockDriverState's AioContext is run, so in-flight requests must
264 * not depend on events in other AioContexts. In that case, use
265 * bdrv_drain_all() instead.
267 void coroutine_fn
bdrv_co_drain(BlockDriverState
*bs
)
269 assert(qemu_in_coroutine());
270 bdrv_drained_begin(bs
);
271 bdrv_drained_end(bs
);
274 void bdrv_drain(BlockDriverState
*bs
)
276 bdrv_drained_begin(bs
);
277 bdrv_drained_end(bs
);
281 * Wait for pending requests to complete across all BlockDriverStates
283 * This function does not flush data to disk, use bdrv_flush_all() for that
284 * after calling this function.
286 void bdrv_drain_all(void)
288 /* Always run first iteration so any pending completion BHs run */
290 BlockDriverState
*bs
;
292 GSList
*aio_ctxs
= NULL
, *ctx
;
294 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
295 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
297 aio_context_acquire(aio_context
);
299 block_job_pause(bs
->job
);
301 bdrv_parent_drained_begin(bs
);
302 bdrv_io_unplugged_begin(bs
);
303 bdrv_drain_recurse(bs
);
304 aio_context_release(aio_context
);
306 if (!g_slist_find(aio_ctxs
, aio_context
)) {
307 aio_ctxs
= g_slist_prepend(aio_ctxs
, aio_context
);
311 /* Note that completion of an asynchronous I/O operation can trigger any
312 * number of other I/O operations on other devices---for example a
313 * coroutine can submit an I/O request to another device in response to
314 * request completion. Therefore we must keep looping until there was no
315 * more activity rather than simply draining each device independently.
320 for (ctx
= aio_ctxs
; ctx
!= NULL
; ctx
= ctx
->next
) {
321 AioContext
*aio_context
= ctx
->data
;
323 aio_context_acquire(aio_context
);
324 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
325 if (aio_context
== bdrv_get_aio_context(bs
)) {
326 if (bdrv_requests_pending(bs
)) {
328 aio_poll(aio_context
, busy
);
332 busy
|= aio_poll(aio_context
, false);
333 aio_context_release(aio_context
);
337 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
338 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
340 aio_context_acquire(aio_context
);
341 bdrv_io_unplugged_end(bs
);
342 bdrv_parent_drained_end(bs
);
344 block_job_resume(bs
->job
);
346 aio_context_release(aio_context
);
348 g_slist_free(aio_ctxs
);
352 * Remove an active request from the tracked requests list
354 * This function should be called when a tracked request is completing.
356 static void tracked_request_end(BdrvTrackedRequest
*req
)
358 if (req
->serialising
) {
359 req
->bs
->serialising_in_flight
--;
362 QLIST_REMOVE(req
, list
);
363 qemu_co_queue_restart_all(&req
->wait_queue
);
367 * Add an active request to the tracked requests list
369 static void tracked_request_begin(BdrvTrackedRequest
*req
,
370 BlockDriverState
*bs
,
373 enum BdrvTrackedRequestType type
)
375 *req
= (BdrvTrackedRequest
){
380 .co
= qemu_coroutine_self(),
381 .serialising
= false,
382 .overlap_offset
= offset
,
383 .overlap_bytes
= bytes
,
386 qemu_co_queue_init(&req
->wait_queue
);
388 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
391 static void mark_request_serialising(BdrvTrackedRequest
*req
, uint64_t align
)
393 int64_t overlap_offset
= req
->offset
& ~(align
- 1);
394 unsigned int overlap_bytes
= ROUND_UP(req
->offset
+ req
->bytes
, align
)
397 if (!req
->serialising
) {
398 req
->bs
->serialising_in_flight
++;
399 req
->serialising
= true;
402 req
->overlap_offset
= MIN(req
->overlap_offset
, overlap_offset
);
403 req
->overlap_bytes
= MAX(req
->overlap_bytes
, overlap_bytes
);
407 * Round a region to cluster boundaries
409 void bdrv_round_to_clusters(BlockDriverState
*bs
,
410 int64_t sector_num
, int nb_sectors
,
411 int64_t *cluster_sector_num
,
412 int *cluster_nb_sectors
)
416 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
417 *cluster_sector_num
= sector_num
;
418 *cluster_nb_sectors
= nb_sectors
;
420 int64_t c
= bdi
.cluster_size
/ BDRV_SECTOR_SIZE
;
421 *cluster_sector_num
= QEMU_ALIGN_DOWN(sector_num
, c
);
422 *cluster_nb_sectors
= QEMU_ALIGN_UP(sector_num
- *cluster_sector_num
+
427 static int bdrv_get_cluster_size(BlockDriverState
*bs
)
432 ret
= bdrv_get_info(bs
, &bdi
);
433 if (ret
< 0 || bdi
.cluster_size
== 0) {
434 return bs
->request_alignment
;
436 return bdi
.cluster_size
;
440 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
441 int64_t offset
, unsigned int bytes
)
444 if (offset
>= req
->overlap_offset
+ req
->overlap_bytes
) {
448 if (req
->overlap_offset
>= offset
+ bytes
) {
454 static bool coroutine_fn
wait_serialising_requests(BdrvTrackedRequest
*self
)
456 BlockDriverState
*bs
= self
->bs
;
457 BdrvTrackedRequest
*req
;
461 if (!bs
->serialising_in_flight
) {
467 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
468 if (req
== self
|| (!req
->serialising
&& !self
->serialising
)) {
471 if (tracked_request_overlaps(req
, self
->overlap_offset
,
472 self
->overlap_bytes
))
474 /* Hitting this means there was a reentrant request, for
475 * example, a block driver issuing nested requests. This must
476 * never happen since it means deadlock.
478 assert(qemu_coroutine_self() != req
->co
);
480 /* If the request is already (indirectly) waiting for us, or
481 * will wait for us as soon as it wakes up, then just go on
482 * (instead of producing a deadlock in the former case). */
483 if (!req
->waiting_for
) {
484 self
->waiting_for
= req
;
485 qemu_co_queue_wait(&req
->wait_queue
);
486 self
->waiting_for
= NULL
;
498 static int bdrv_check_byte_request(BlockDriverState
*bs
, int64_t offset
,
501 if (size
> BDRV_REQUEST_MAX_SECTORS
<< BDRV_SECTOR_BITS
) {
505 if (!bdrv_is_inserted(bs
)) {
516 static int bdrv_check_request(BlockDriverState
*bs
, int64_t sector_num
,
519 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
523 return bdrv_check_byte_request(bs
, sector_num
* BDRV_SECTOR_SIZE
,
524 nb_sectors
* BDRV_SECTOR_SIZE
);
527 typedef struct RwCo
{
528 BlockDriverState
*bs
;
533 BdrvRequestFlags flags
;
536 static void coroutine_fn
bdrv_rw_co_entry(void *opaque
)
540 if (!rwco
->is_write
) {
541 rwco
->ret
= bdrv_co_preadv(rwco
->bs
, rwco
->offset
,
542 rwco
->qiov
->size
, rwco
->qiov
,
545 rwco
->ret
= bdrv_co_pwritev(rwco
->bs
, rwco
->offset
,
546 rwco
->qiov
->size
, rwco
->qiov
,
552 * Process a vectored synchronous request using coroutines
554 static int bdrv_prwv_co(BlockDriverState
*bs
, int64_t offset
,
555 QEMUIOVector
*qiov
, bool is_write
,
556 BdrvRequestFlags flags
)
563 .is_write
= is_write
,
568 if (qemu_in_coroutine()) {
569 /* Fast-path if already in coroutine context */
570 bdrv_rw_co_entry(&rwco
);
572 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
574 co
= qemu_coroutine_create(bdrv_rw_co_entry
);
575 qemu_coroutine_enter(co
, &rwco
);
576 while (rwco
.ret
== NOT_DONE
) {
577 aio_poll(aio_context
, true);
584 * Process a synchronous request using coroutines
586 static int bdrv_rw_co(BlockDriverState
*bs
, int64_t sector_num
, uint8_t *buf
,
587 int nb_sectors
, bool is_write
, BdrvRequestFlags flags
)
591 .iov_base
= (void *)buf
,
592 .iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
,
595 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
599 qemu_iovec_init_external(&qiov
, &iov
, 1);
600 return bdrv_prwv_co(bs
, sector_num
<< BDRV_SECTOR_BITS
,
601 &qiov
, is_write
, flags
);
604 /* return < 0 if error. See bdrv_write() for the return codes */
605 int bdrv_read(BlockDriverState
*bs
, int64_t sector_num
,
606 uint8_t *buf
, int nb_sectors
)
608 return bdrv_rw_co(bs
, sector_num
, buf
, nb_sectors
, false, 0);
611 /* Return < 0 if error. Important errors are:
612 -EIO generic I/O error (may happen for all errors)
613 -ENOMEDIUM No media inserted.
614 -EINVAL Invalid sector number or nb_sectors
615 -EACCES Trying to write a read-only device
617 int bdrv_write(BlockDriverState
*bs
, int64_t sector_num
,
618 const uint8_t *buf
, int nb_sectors
)
620 return bdrv_rw_co(bs
, sector_num
, (uint8_t *)buf
, nb_sectors
, true, 0);
623 int bdrv_pwrite_zeroes(BlockDriverState
*bs
, int64_t offset
,
624 int count
, BdrvRequestFlags flags
)
632 qemu_iovec_init_external(&qiov
, &iov
, 1);
633 return bdrv_prwv_co(bs
, offset
, &qiov
, true,
634 BDRV_REQ_ZERO_WRITE
| flags
);
638 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
639 * The operation is sped up by checking the block status and only writing
640 * zeroes to the device if they currently do not return zeroes. Optional
641 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
644 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
646 int bdrv_make_zero(BlockDriverState
*bs
, BdrvRequestFlags flags
)
648 int64_t target_sectors
, ret
, nb_sectors
, sector_num
= 0;
649 BlockDriverState
*file
;
652 target_sectors
= bdrv_nb_sectors(bs
);
653 if (target_sectors
< 0) {
654 return target_sectors
;
658 nb_sectors
= MIN(target_sectors
- sector_num
, BDRV_REQUEST_MAX_SECTORS
);
659 if (nb_sectors
<= 0) {
662 ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, &n
, &file
);
664 error_report("error getting block status at sector %" PRId64
": %s",
665 sector_num
, strerror(-ret
));
668 if (ret
& BDRV_BLOCK_ZERO
) {
672 ret
= bdrv_pwrite_zeroes(bs
, sector_num
<< BDRV_SECTOR_BITS
,
673 n
<< BDRV_SECTOR_BITS
, flags
);
675 error_report("error writing zeroes at sector %" PRId64
": %s",
676 sector_num
, strerror(-ret
));
683 int bdrv_pread(BlockDriverState
*bs
, int64_t offset
, void *buf
, int bytes
)
687 .iov_base
= (void *)buf
,
696 qemu_iovec_init_external(&qiov
, &iov
, 1);
697 ret
= bdrv_prwv_co(bs
, offset
, &qiov
, false, 0);
705 int bdrv_pwritev(BlockDriverState
*bs
, int64_t offset
, QEMUIOVector
*qiov
)
709 ret
= bdrv_prwv_co(bs
, offset
, qiov
, true, 0);
717 int bdrv_pwrite(BlockDriverState
*bs
, int64_t offset
,
718 const void *buf
, int bytes
)
722 .iov_base
= (void *) buf
,
730 qemu_iovec_init_external(&qiov
, &iov
, 1);
731 return bdrv_pwritev(bs
, offset
, &qiov
);
735 * Writes to the file and ensures that no writes are reordered across this
736 * request (acts as a barrier)
738 * Returns 0 on success, -errno in error cases.
740 int bdrv_pwrite_sync(BlockDriverState
*bs
, int64_t offset
,
741 const void *buf
, int count
)
745 ret
= bdrv_pwrite(bs
, offset
, buf
, count
);
750 ret
= bdrv_flush(bs
);
758 typedef struct CoroutineIOCompletion
{
759 Coroutine
*coroutine
;
761 } CoroutineIOCompletion
;
763 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
765 CoroutineIOCompletion
*co
= opaque
;
768 qemu_coroutine_enter(co
->coroutine
, NULL
);
771 static int coroutine_fn
bdrv_driver_preadv(BlockDriverState
*bs
,
772 uint64_t offset
, uint64_t bytes
,
773 QEMUIOVector
*qiov
, int flags
)
775 BlockDriver
*drv
= bs
->drv
;
777 unsigned int nb_sectors
;
779 if (drv
->bdrv_co_preadv
) {
780 return drv
->bdrv_co_preadv(bs
, offset
, bytes
, qiov
, flags
);
783 sector_num
= offset
>> BDRV_SECTOR_BITS
;
784 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
786 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
787 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
788 assert((bytes
>> BDRV_SECTOR_BITS
) <= BDRV_REQUEST_MAX_SECTORS
);
790 if (drv
->bdrv_co_readv
) {
791 return drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
794 CoroutineIOCompletion co
= {
795 .coroutine
= qemu_coroutine_self(),
798 acb
= bs
->drv
->bdrv_aio_readv(bs
, sector_num
, qiov
, nb_sectors
,
799 bdrv_co_io_em_complete
, &co
);
803 qemu_coroutine_yield();
809 static int coroutine_fn
bdrv_driver_pwritev(BlockDriverState
*bs
,
810 uint64_t offset
, uint64_t bytes
,
811 QEMUIOVector
*qiov
, int flags
)
813 BlockDriver
*drv
= bs
->drv
;
815 unsigned int nb_sectors
;
818 if (drv
->bdrv_co_pwritev
) {
819 ret
= drv
->bdrv_co_pwritev(bs
, offset
, bytes
, qiov
, flags
);
823 sector_num
= offset
>> BDRV_SECTOR_BITS
;
824 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
826 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
827 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
828 assert((bytes
>> BDRV_SECTOR_BITS
) <= BDRV_REQUEST_MAX_SECTORS
);
830 if (drv
->bdrv_co_writev_flags
) {
831 ret
= drv
->bdrv_co_writev_flags(bs
, sector_num
, nb_sectors
, qiov
,
832 flags
& bs
->supported_write_flags
);
833 flags
&= ~bs
->supported_write_flags
;
834 } else if (drv
->bdrv_co_writev
) {
835 assert(!bs
->supported_write_flags
);
836 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
);
839 CoroutineIOCompletion co
= {
840 .coroutine
= qemu_coroutine_self(),
843 acb
= bs
->drv
->bdrv_aio_writev(bs
, sector_num
, qiov
, nb_sectors
,
844 bdrv_co_io_em_complete
, &co
);
848 qemu_coroutine_yield();
854 if (ret
== 0 && (flags
& BDRV_REQ_FUA
)) {
855 ret
= bdrv_co_flush(bs
);
861 static int coroutine_fn
bdrv_co_do_copy_on_readv(BlockDriverState
*bs
,
862 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
864 /* Perform I/O through a temporary buffer so that users who scribble over
865 * their read buffer while the operation is in progress do not end up
866 * modifying the image file. This is critical for zero-copy guest I/O
867 * where anything might happen inside guest memory.
871 BlockDriver
*drv
= bs
->drv
;
873 QEMUIOVector bounce_qiov
;
874 int64_t cluster_sector_num
;
875 int cluster_nb_sectors
;
879 /* Cover entire cluster so no additional backing file I/O is required when
880 * allocating cluster in the image file.
882 bdrv_round_to_clusters(bs
, sector_num
, nb_sectors
,
883 &cluster_sector_num
, &cluster_nb_sectors
);
885 trace_bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
,
886 cluster_sector_num
, cluster_nb_sectors
);
888 iov
.iov_len
= cluster_nb_sectors
* BDRV_SECTOR_SIZE
;
889 iov
.iov_base
= bounce_buffer
= qemu_try_blockalign(bs
, iov
.iov_len
);
890 if (bounce_buffer
== NULL
) {
895 qemu_iovec_init_external(&bounce_qiov
, &iov
, 1);
897 ret
= bdrv_driver_preadv(bs
, cluster_sector_num
* BDRV_SECTOR_SIZE
,
898 cluster_nb_sectors
* BDRV_SECTOR_SIZE
,
904 if ((drv
->bdrv_co_write_zeroes
|| drv
->bdrv_co_pwrite_zeroes
) &&
905 buffer_is_zero(bounce_buffer
, iov
.iov_len
)) {
906 ret
= bdrv_co_do_pwrite_zeroes(bs
,
907 cluster_sector_num
* BDRV_SECTOR_SIZE
,
908 cluster_nb_sectors
* BDRV_SECTOR_SIZE
,
911 /* This does not change the data on the disk, it is not necessary
912 * to flush even in cache=writethrough mode.
914 ret
= bdrv_driver_pwritev(bs
, cluster_sector_num
* BDRV_SECTOR_SIZE
,
915 cluster_nb_sectors
* BDRV_SECTOR_SIZE
,
920 /* It might be okay to ignore write errors for guest requests. If this
921 * is a deliberate copy-on-read then we don't want to ignore the error.
922 * Simply report it in all cases.
927 skip_bytes
= (sector_num
- cluster_sector_num
) * BDRV_SECTOR_SIZE
;
928 qemu_iovec_from_buf(qiov
, 0, bounce_buffer
+ skip_bytes
,
929 nb_sectors
* BDRV_SECTOR_SIZE
);
932 qemu_vfree(bounce_buffer
);
937 * Forwards an already correctly aligned request to the BlockDriver. This
938 * handles copy on read and zeroing after EOF; any other features must be
939 * implemented by the caller.
941 static int coroutine_fn
bdrv_aligned_preadv(BlockDriverState
*bs
,
942 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
943 int64_t align
, QEMUIOVector
*qiov
, int flags
)
947 int64_t sector_num
= offset
>> BDRV_SECTOR_BITS
;
948 unsigned int nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
950 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
951 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
952 assert(!qiov
|| bytes
== qiov
->size
);
953 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
955 /* Handle Copy on Read and associated serialisation */
956 if (flags
& BDRV_REQ_COPY_ON_READ
) {
957 /* If we touch the same cluster it counts as an overlap. This
958 * guarantees that allocating writes will be serialized and not race
959 * with each other for the same cluster. For example, in copy-on-read
960 * it ensures that the CoR read and write operations are atomic and
961 * guest writes cannot interleave between them. */
962 mark_request_serialising(req
, bdrv_get_cluster_size(bs
));
965 if (!(flags
& BDRV_REQ_NO_SERIALISING
)) {
966 wait_serialising_requests(req
);
969 if (flags
& BDRV_REQ_COPY_ON_READ
) {
972 ret
= bdrv_is_allocated(bs
, sector_num
, nb_sectors
, &pnum
);
977 if (!ret
|| pnum
!= nb_sectors
) {
978 ret
= bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
, qiov
);
983 /* Forward the request to the BlockDriver */
984 if (!bs
->zero_beyond_eof
) {
985 ret
= bdrv_driver_preadv(bs
, offset
, bytes
, qiov
, 0);
987 /* Read zeros after EOF */
988 int64_t total_sectors
, max_nb_sectors
;
990 total_sectors
= bdrv_nb_sectors(bs
);
991 if (total_sectors
< 0) {
996 max_nb_sectors
= ROUND_UP(MAX(0, total_sectors
- sector_num
),
997 align
>> BDRV_SECTOR_BITS
);
998 if (nb_sectors
< max_nb_sectors
) {
999 ret
= bdrv_driver_preadv(bs
, offset
, bytes
, qiov
, 0);
1000 } else if (max_nb_sectors
> 0) {
1001 QEMUIOVector local_qiov
;
1003 qemu_iovec_init(&local_qiov
, qiov
->niov
);
1004 qemu_iovec_concat(&local_qiov
, qiov
, 0,
1005 max_nb_sectors
* BDRV_SECTOR_SIZE
);
1007 ret
= bdrv_driver_preadv(bs
, offset
,
1008 max_nb_sectors
* BDRV_SECTOR_SIZE
,
1011 qemu_iovec_destroy(&local_qiov
);
1016 /* Reading beyond end of file is supposed to produce zeroes */
1017 if (ret
== 0 && total_sectors
< sector_num
+ nb_sectors
) {
1018 uint64_t offset
= MAX(0, total_sectors
- sector_num
);
1019 uint64_t bytes
= (sector_num
+ nb_sectors
- offset
) *
1021 qemu_iovec_memset(qiov
, offset
* BDRV_SECTOR_SIZE
, 0, bytes
);
1030 * Handle a read request in coroutine context
1032 int coroutine_fn
bdrv_co_preadv(BlockDriverState
*bs
,
1033 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1034 BdrvRequestFlags flags
)
1036 BlockDriver
*drv
= bs
->drv
;
1037 BdrvTrackedRequest req
;
1039 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
1040 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
1041 uint8_t *head_buf
= NULL
;
1042 uint8_t *tail_buf
= NULL
;
1043 QEMUIOVector local_qiov
;
1044 bool use_local_qiov
= false;
1051 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
1056 /* Don't do copy-on-read if we read data before write operation */
1057 if (bs
->copy_on_read
&& !(flags
& BDRV_REQ_NO_SERIALISING
)) {
1058 flags
|= BDRV_REQ_COPY_ON_READ
;
1061 /* Align read if necessary by padding qiov */
1062 if (offset
& (align
- 1)) {
1063 head_buf
= qemu_blockalign(bs
, align
);
1064 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
1065 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
1066 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1067 use_local_qiov
= true;
1069 bytes
+= offset
& (align
- 1);
1070 offset
= offset
& ~(align
- 1);
1073 if ((offset
+ bytes
) & (align
- 1)) {
1074 if (!use_local_qiov
) {
1075 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
1076 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1077 use_local_qiov
= true;
1079 tail_buf
= qemu_blockalign(bs
, align
);
1080 qemu_iovec_add(&local_qiov
, tail_buf
,
1081 align
- ((offset
+ bytes
) & (align
- 1)));
1083 bytes
= ROUND_UP(bytes
, align
);
1086 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_READ
);
1087 ret
= bdrv_aligned_preadv(bs
, &req
, offset
, bytes
, align
,
1088 use_local_qiov
? &local_qiov
: qiov
,
1090 tracked_request_end(&req
);
1092 if (use_local_qiov
) {
1093 qemu_iovec_destroy(&local_qiov
);
1094 qemu_vfree(head_buf
);
1095 qemu_vfree(tail_buf
);
1101 static int coroutine_fn
bdrv_co_do_readv(BlockDriverState
*bs
,
1102 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1103 BdrvRequestFlags flags
)
1105 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
1109 return bdrv_co_preadv(bs
, sector_num
<< BDRV_SECTOR_BITS
,
1110 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
1113 int coroutine_fn
bdrv_co_readv(BlockDriverState
*bs
, int64_t sector_num
,
1114 int nb_sectors
, QEMUIOVector
*qiov
)
1116 trace_bdrv_co_readv(bs
, sector_num
, nb_sectors
);
1118 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
, 0);
1121 #define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768
1123 static int coroutine_fn
bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
,
1124 int64_t offset
, int count
, BdrvRequestFlags flags
)
1126 BlockDriver
*drv
= bs
->drv
;
1128 struct iovec iov
= {0};
1130 bool need_flush
= false;
1134 int max_write_zeroes
= MIN_NON_ZERO(bs
->bl
.max_pwrite_zeroes
, INT_MAX
);
1135 int alignment
= MAX(bs
->bl
.pwrite_zeroes_alignment
?: 1,
1136 bs
->request_alignment
);
1138 assert(is_power_of_2(alignment
));
1139 head
= offset
& (alignment
- 1);
1140 tail
= (offset
+ count
) & (alignment
- 1);
1141 max_write_zeroes
&= ~(alignment
- 1);
1143 while (count
> 0 && !ret
) {
1146 /* Align request. Block drivers can expect the "bulk" of the request
1147 * to be aligned, and that unaligned requests do not cross cluster
1151 /* Make a small request up to the first aligned sector. */
1152 num
= MIN(count
, alignment
- head
);
1154 } else if (tail
&& num
> alignment
) {
1155 /* Shorten the request to the last aligned sector. */
1159 /* limit request size */
1160 if (num
> max_write_zeroes
) {
1161 num
= max_write_zeroes
;
1165 /* First try the efficient write zeroes operation */
1166 if (drv
->bdrv_co_pwrite_zeroes
) {
1167 ret
= drv
->bdrv_co_pwrite_zeroes(bs
, offset
, num
,
1168 flags
& bs
->supported_zero_flags
);
1169 if (ret
!= -ENOTSUP
&& (flags
& BDRV_REQ_FUA
) &&
1170 !(bs
->supported_zero_flags
& BDRV_REQ_FUA
)) {
1173 } else if (drv
->bdrv_co_write_zeroes
) {
1174 assert(offset
% BDRV_SECTOR_SIZE
== 0);
1175 assert(count
% BDRV_SECTOR_SIZE
== 0);
1176 ret
= drv
->bdrv_co_write_zeroes(bs
, offset
>> BDRV_SECTOR_BITS
,
1177 num
>> BDRV_SECTOR_BITS
,
1178 flags
& bs
->supported_zero_flags
);
1179 if (ret
!= -ENOTSUP
&& (flags
& BDRV_REQ_FUA
) &&
1180 !(bs
->supported_zero_flags
& BDRV_REQ_FUA
)) {
1184 assert(!bs
->supported_zero_flags
);
1187 if (ret
== -ENOTSUP
) {
1188 /* Fall back to bounce buffer if write zeroes is unsupported */
1189 int max_xfer_len
= MIN_NON_ZERO(bs
->bl
.max_transfer_length
,
1190 MAX_WRITE_ZEROES_BOUNCE_BUFFER
);
1191 BdrvRequestFlags write_flags
= flags
& ~BDRV_REQ_ZERO_WRITE
;
1193 if ((flags
& BDRV_REQ_FUA
) &&
1194 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1195 /* No need for bdrv_driver_pwrite() to do a fallback
1196 * flush on each chunk; use just one at the end */
1197 write_flags
&= ~BDRV_REQ_FUA
;
1200 num
= MIN(num
, max_xfer_len
<< BDRV_SECTOR_BITS
);
1202 if (iov
.iov_base
== NULL
) {
1203 iov
.iov_base
= qemu_try_blockalign(bs
, num
);
1204 if (iov
.iov_base
== NULL
) {
1208 memset(iov
.iov_base
, 0, num
);
1210 qemu_iovec_init_external(&qiov
, &iov
, 1);
1212 ret
= bdrv_driver_pwritev(bs
, offset
, num
, &qiov
, write_flags
);
1214 /* Keep bounce buffer around if it is big enough for all
1215 * all future requests.
1217 if (num
< max_xfer_len
<< BDRV_SECTOR_BITS
) {
1218 qemu_vfree(iov
.iov_base
);
1219 iov
.iov_base
= NULL
;
1228 if (ret
== 0 && need_flush
) {
1229 ret
= bdrv_co_flush(bs
);
1231 qemu_vfree(iov
.iov_base
);
1236 * Forwards an already correctly aligned write request to the BlockDriver.
1238 static int coroutine_fn
bdrv_aligned_pwritev(BlockDriverState
*bs
,
1239 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
1240 QEMUIOVector
*qiov
, int flags
)
1242 BlockDriver
*drv
= bs
->drv
;
1246 int64_t sector_num
= offset
>> BDRV_SECTOR_BITS
;
1247 unsigned int nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
1249 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
1250 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
1251 assert(!qiov
|| bytes
== qiov
->size
);
1252 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1254 waited
= wait_serialising_requests(req
);
1255 assert(!waited
|| !req
->serialising
);
1256 assert(req
->overlap_offset
<= offset
);
1257 assert(offset
+ bytes
<= req
->overlap_offset
+ req
->overlap_bytes
);
1259 ret
= notifier_with_return_list_notify(&bs
->before_write_notifiers
, req
);
1261 if (!ret
&& bs
->detect_zeroes
!= BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF
&&
1262 !(flags
& BDRV_REQ_ZERO_WRITE
) &&
1263 (drv
->bdrv_co_pwrite_zeroes
|| drv
->bdrv_co_write_zeroes
) &&
1264 qemu_iovec_is_zero(qiov
)) {
1265 flags
|= BDRV_REQ_ZERO_WRITE
;
1266 if (bs
->detect_zeroes
== BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP
) {
1267 flags
|= BDRV_REQ_MAY_UNMAP
;
1272 /* Do nothing, write notifier decided to fail this request */
1273 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
1274 bdrv_debug_event(bs
, BLKDBG_PWRITEV_ZERO
);
1275 ret
= bdrv_co_do_pwrite_zeroes(bs
, sector_num
<< BDRV_SECTOR_BITS
,
1276 nb_sectors
<< BDRV_SECTOR_BITS
, flags
);
1278 bdrv_debug_event(bs
, BLKDBG_PWRITEV
);
1279 ret
= bdrv_driver_pwritev(bs
, offset
, bytes
, qiov
, flags
);
1281 bdrv_debug_event(bs
, BLKDBG_PWRITEV_DONE
);
1283 bdrv_set_dirty(bs
, sector_num
, nb_sectors
);
1285 if (bs
->wr_highest_offset
< offset
+ bytes
) {
1286 bs
->wr_highest_offset
= offset
+ bytes
;
1290 bs
->total_sectors
= MAX(bs
->total_sectors
, sector_num
+ nb_sectors
);
1296 static int coroutine_fn
bdrv_co_do_zero_pwritev(BlockDriverState
*bs
,
1299 BdrvRequestFlags flags
,
1300 BdrvTrackedRequest
*req
)
1302 uint8_t *buf
= NULL
;
1303 QEMUIOVector local_qiov
;
1305 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
1306 unsigned int head_padding_bytes
, tail_padding_bytes
;
1309 head_padding_bytes
= offset
& (align
- 1);
1310 tail_padding_bytes
= align
- ((offset
+ bytes
) & (align
- 1));
1313 assert(flags
& BDRV_REQ_ZERO_WRITE
);
1314 if (head_padding_bytes
|| tail_padding_bytes
) {
1315 buf
= qemu_blockalign(bs
, align
);
1316 iov
= (struct iovec
) {
1320 qemu_iovec_init_external(&local_qiov
, &iov
, 1);
1322 if (head_padding_bytes
) {
1323 uint64_t zero_bytes
= MIN(bytes
, align
- head_padding_bytes
);
1325 /* RMW the unaligned part before head. */
1326 mark_request_serialising(req
, align
);
1327 wait_serialising_requests(req
);
1328 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1329 ret
= bdrv_aligned_preadv(bs
, req
, offset
& ~(align
- 1), align
,
1330 align
, &local_qiov
, 0);
1334 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1336 memset(buf
+ head_padding_bytes
, 0, zero_bytes
);
1337 ret
= bdrv_aligned_pwritev(bs
, req
, offset
& ~(align
- 1), align
,
1339 flags
& ~BDRV_REQ_ZERO_WRITE
);
1343 offset
+= zero_bytes
;
1344 bytes
-= zero_bytes
;
1347 assert(!bytes
|| (offset
& (align
- 1)) == 0);
1348 if (bytes
>= align
) {
1349 /* Write the aligned part in the middle. */
1350 uint64_t aligned_bytes
= bytes
& ~(align
- 1);
1351 ret
= bdrv_aligned_pwritev(bs
, req
, offset
, aligned_bytes
,
1356 bytes
-= aligned_bytes
;
1357 offset
+= aligned_bytes
;
1360 assert(!bytes
|| (offset
& (align
- 1)) == 0);
1362 assert(align
== tail_padding_bytes
+ bytes
);
1363 /* RMW the unaligned part after tail. */
1364 mark_request_serialising(req
, align
);
1365 wait_serialising_requests(req
);
1366 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1367 ret
= bdrv_aligned_preadv(bs
, req
, offset
, align
,
1368 align
, &local_qiov
, 0);
1372 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1374 memset(buf
, 0, bytes
);
1375 ret
= bdrv_aligned_pwritev(bs
, req
, offset
, align
,
1376 &local_qiov
, flags
& ~BDRV_REQ_ZERO_WRITE
);
1385 * Handle a write request in coroutine context
1387 int coroutine_fn
bdrv_co_pwritev(BlockDriverState
*bs
,
1388 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1389 BdrvRequestFlags flags
)
1391 BdrvTrackedRequest req
;
1392 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
1393 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
1394 uint8_t *head_buf
= NULL
;
1395 uint8_t *tail_buf
= NULL
;
1396 QEMUIOVector local_qiov
;
1397 bool use_local_qiov
= false;
1403 if (bs
->read_only
) {
1406 assert(!(bs
->open_flags
& BDRV_O_INACTIVE
));
1408 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
1414 * Align write if necessary by performing a read-modify-write cycle.
1415 * Pad qiov with the read parts and be sure to have a tracked request not
1416 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1418 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_WRITE
);
1421 ret
= bdrv_co_do_zero_pwritev(bs
, offset
, bytes
, flags
, &req
);
1425 if (offset
& (align
- 1)) {
1426 QEMUIOVector head_qiov
;
1427 struct iovec head_iov
;
1429 mark_request_serialising(&req
, align
);
1430 wait_serialising_requests(&req
);
1432 head_buf
= qemu_blockalign(bs
, align
);
1433 head_iov
= (struct iovec
) {
1434 .iov_base
= head_buf
,
1437 qemu_iovec_init_external(&head_qiov
, &head_iov
, 1);
1439 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1440 ret
= bdrv_aligned_preadv(bs
, &req
, offset
& ~(align
- 1), align
,
1441 align
, &head_qiov
, 0);
1445 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1447 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
1448 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
1449 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1450 use_local_qiov
= true;
1452 bytes
+= offset
& (align
- 1);
1453 offset
= offset
& ~(align
- 1);
1455 /* We have read the tail already if the request is smaller
1456 * than one aligned block.
1458 if (bytes
< align
) {
1459 qemu_iovec_add(&local_qiov
, head_buf
+ bytes
, align
- bytes
);
1464 if ((offset
+ bytes
) & (align
- 1)) {
1465 QEMUIOVector tail_qiov
;
1466 struct iovec tail_iov
;
1470 mark_request_serialising(&req
, align
);
1471 waited
= wait_serialising_requests(&req
);
1472 assert(!waited
|| !use_local_qiov
);
1474 tail_buf
= qemu_blockalign(bs
, align
);
1475 tail_iov
= (struct iovec
) {
1476 .iov_base
= tail_buf
,
1479 qemu_iovec_init_external(&tail_qiov
, &tail_iov
, 1);
1481 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1482 ret
= bdrv_aligned_preadv(bs
, &req
, (offset
+ bytes
) & ~(align
- 1), align
,
1483 align
, &tail_qiov
, 0);
1487 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1489 if (!use_local_qiov
) {
1490 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
1491 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1492 use_local_qiov
= true;
1495 tail_bytes
= (offset
+ bytes
) & (align
- 1);
1496 qemu_iovec_add(&local_qiov
, tail_buf
+ tail_bytes
, align
- tail_bytes
);
1498 bytes
= ROUND_UP(bytes
, align
);
1501 ret
= bdrv_aligned_pwritev(bs
, &req
, offset
, bytes
,
1502 use_local_qiov
? &local_qiov
: qiov
,
1507 if (use_local_qiov
) {
1508 qemu_iovec_destroy(&local_qiov
);
1510 qemu_vfree(head_buf
);
1511 qemu_vfree(tail_buf
);
1513 tracked_request_end(&req
);
1517 static int coroutine_fn
bdrv_co_do_writev(BlockDriverState
*bs
,
1518 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1519 BdrvRequestFlags flags
)
1521 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
1525 return bdrv_co_pwritev(bs
, sector_num
<< BDRV_SECTOR_BITS
,
1526 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
1529 int coroutine_fn
bdrv_co_writev(BlockDriverState
*bs
, int64_t sector_num
,
1530 int nb_sectors
, QEMUIOVector
*qiov
)
1532 trace_bdrv_co_writev(bs
, sector_num
, nb_sectors
);
1534 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, qiov
, 0);
1537 int coroutine_fn
bdrv_co_pwrite_zeroes(BlockDriverState
*bs
,
1538 int64_t offset
, int count
,
1539 BdrvRequestFlags flags
)
1541 trace_bdrv_co_pwrite_zeroes(bs
, offset
, count
, flags
);
1543 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
1544 flags
&= ~BDRV_REQ_MAY_UNMAP
;
1547 return bdrv_co_pwritev(bs
, offset
, count
, NULL
,
1548 BDRV_REQ_ZERO_WRITE
| flags
);
1551 typedef struct BdrvCoGetBlockStatusData
{
1552 BlockDriverState
*bs
;
1553 BlockDriverState
*base
;
1554 BlockDriverState
**file
;
1560 } BdrvCoGetBlockStatusData
;
1563 * Returns the allocation status of the specified sectors.
1564 * Drivers not implementing the functionality are assumed to not support
1565 * backing files, hence all their sectors are reported as allocated.
1567 * If 'sector_num' is beyond the end of the disk image the return value is 0
1568 * and 'pnum' is set to 0.
1570 * 'pnum' is set to the number of sectors (including and immediately following
1571 * the specified sector) that are known to be in the same
1572 * allocated/unallocated state.
1574 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
1575 * beyond the end of the disk image it will be clamped.
1577 * If returned value is positive and BDRV_BLOCK_OFFSET_VALID bit is set, 'file'
1578 * points to the BDS which the sector range is allocated in.
1580 static int64_t coroutine_fn
bdrv_co_get_block_status(BlockDriverState
*bs
,
1582 int nb_sectors
, int *pnum
,
1583 BlockDriverState
**file
)
1585 int64_t total_sectors
;
1589 total_sectors
= bdrv_nb_sectors(bs
);
1590 if (total_sectors
< 0) {
1591 return total_sectors
;
1594 if (sector_num
>= total_sectors
) {
1599 n
= total_sectors
- sector_num
;
1600 if (n
< nb_sectors
) {
1604 if (!bs
->drv
->bdrv_co_get_block_status
) {
1606 ret
= BDRV_BLOCK_DATA
| BDRV_BLOCK_ALLOCATED
;
1607 if (bs
->drv
->protocol_name
) {
1608 ret
|= BDRV_BLOCK_OFFSET_VALID
| (sector_num
* BDRV_SECTOR_SIZE
);
1614 ret
= bs
->drv
->bdrv_co_get_block_status(bs
, sector_num
, nb_sectors
, pnum
,
1621 if (ret
& BDRV_BLOCK_RAW
) {
1622 assert(ret
& BDRV_BLOCK_OFFSET_VALID
);
1623 return bdrv_get_block_status(bs
->file
->bs
, ret
>> BDRV_SECTOR_BITS
,
1627 if (ret
& (BDRV_BLOCK_DATA
| BDRV_BLOCK_ZERO
)) {
1628 ret
|= BDRV_BLOCK_ALLOCATED
;
1630 if (bdrv_unallocated_blocks_are_zero(bs
)) {
1631 ret
|= BDRV_BLOCK_ZERO
;
1632 } else if (bs
->backing
) {
1633 BlockDriverState
*bs2
= bs
->backing
->bs
;
1634 int64_t nb_sectors2
= bdrv_nb_sectors(bs2
);
1635 if (nb_sectors2
>= 0 && sector_num
>= nb_sectors2
) {
1636 ret
|= BDRV_BLOCK_ZERO
;
1641 if (*file
&& *file
!= bs
&&
1642 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
1643 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
1644 BlockDriverState
*file2
;
1647 ret2
= bdrv_co_get_block_status(*file
, ret
>> BDRV_SECTOR_BITS
,
1648 *pnum
, &file_pnum
, &file2
);
1650 /* Ignore errors. This is just providing extra information, it
1651 * is useful but not necessary.
1654 /* !file_pnum indicates an offset at or beyond the EOF; it is
1655 * perfectly valid for the format block driver to point to such
1656 * offsets, so catch it and mark everything as zero */
1657 ret
|= BDRV_BLOCK_ZERO
;
1659 /* Limit request to the range reported by the protocol driver */
1661 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
1669 static int64_t coroutine_fn
bdrv_co_get_block_status_above(BlockDriverState
*bs
,
1670 BlockDriverState
*base
,
1674 BlockDriverState
**file
)
1676 BlockDriverState
*p
;
1680 for (p
= bs
; p
!= base
; p
= backing_bs(p
)) {
1681 ret
= bdrv_co_get_block_status(p
, sector_num
, nb_sectors
, pnum
, file
);
1682 if (ret
< 0 || ret
& BDRV_BLOCK_ALLOCATED
) {
1685 /* [sector_num, pnum] unallocated on this layer, which could be only
1686 * the first part of [sector_num, nb_sectors]. */
1687 nb_sectors
= MIN(nb_sectors
, *pnum
);
1692 /* Coroutine wrapper for bdrv_get_block_status_above() */
1693 static void coroutine_fn
bdrv_get_block_status_above_co_entry(void *opaque
)
1695 BdrvCoGetBlockStatusData
*data
= opaque
;
1697 data
->ret
= bdrv_co_get_block_status_above(data
->bs
, data
->base
,
1706 * Synchronous wrapper around bdrv_co_get_block_status_above().
1708 * See bdrv_co_get_block_status_above() for details.
1710 int64_t bdrv_get_block_status_above(BlockDriverState
*bs
,
1711 BlockDriverState
*base
,
1713 int nb_sectors
, int *pnum
,
1714 BlockDriverState
**file
)
1717 BdrvCoGetBlockStatusData data
= {
1721 .sector_num
= sector_num
,
1722 .nb_sectors
= nb_sectors
,
1727 if (qemu_in_coroutine()) {
1728 /* Fast-path if already in coroutine context */
1729 bdrv_get_block_status_above_co_entry(&data
);
1731 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
1733 co
= qemu_coroutine_create(bdrv_get_block_status_above_co_entry
);
1734 qemu_coroutine_enter(co
, &data
);
1735 while (!data
.done
) {
1736 aio_poll(aio_context
, true);
1742 int64_t bdrv_get_block_status(BlockDriverState
*bs
,
1744 int nb_sectors
, int *pnum
,
1745 BlockDriverState
**file
)
1747 return bdrv_get_block_status_above(bs
, backing_bs(bs
),
1748 sector_num
, nb_sectors
, pnum
, file
);
1751 int coroutine_fn
bdrv_is_allocated(BlockDriverState
*bs
, int64_t sector_num
,
1752 int nb_sectors
, int *pnum
)
1754 BlockDriverState
*file
;
1755 int64_t ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, pnum
,
1760 return !!(ret
& BDRV_BLOCK_ALLOCATED
);
1764 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
1766 * Return true if the given sector is allocated in any image between
1767 * BASE and TOP (inclusive). BASE can be NULL to check if the given
1768 * sector is allocated in any image of the chain. Return false otherwise.
1770 * 'pnum' is set to the number of sectors (including and immediately following
1771 * the specified sector) that are known to be in the same
1772 * allocated/unallocated state.
1775 int bdrv_is_allocated_above(BlockDriverState
*top
,
1776 BlockDriverState
*base
,
1778 int nb_sectors
, int *pnum
)
1780 BlockDriverState
*intermediate
;
1781 int ret
, n
= nb_sectors
;
1784 while (intermediate
&& intermediate
!= base
) {
1786 ret
= bdrv_is_allocated(intermediate
, sector_num
, nb_sectors
,
1796 * [sector_num, nb_sectors] is unallocated on top but intermediate
1799 * [sector_num+x, nr_sectors] allocated.
1801 if (n
> pnum_inter
&&
1802 (intermediate
== top
||
1803 sector_num
+ pnum_inter
< intermediate
->total_sectors
)) {
1807 intermediate
= backing_bs(intermediate
);
1814 int bdrv_write_compressed(BlockDriverState
*bs
, int64_t sector_num
,
1815 const uint8_t *buf
, int nb_sectors
)
1817 BlockDriver
*drv
= bs
->drv
;
1823 if (!drv
->bdrv_write_compressed
) {
1826 ret
= bdrv_check_request(bs
, sector_num
, nb_sectors
);
1831 assert(QLIST_EMPTY(&bs
->dirty_bitmaps
));
1833 return drv
->bdrv_write_compressed(bs
, sector_num
, buf
, nb_sectors
);
1836 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
1837 int64_t pos
, int size
)
1840 struct iovec iov
= {
1841 .iov_base
= (void *) buf
,
1845 qemu_iovec_init_external(&qiov
, &iov
, 1);
1846 return bdrv_writev_vmstate(bs
, &qiov
, pos
);
1849 int bdrv_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
1851 BlockDriver
*drv
= bs
->drv
;
1855 } else if (drv
->bdrv_save_vmstate
) {
1856 return drv
->bdrv_save_vmstate(bs
, qiov
, pos
);
1857 } else if (bs
->file
) {
1858 return bdrv_writev_vmstate(bs
->file
->bs
, qiov
, pos
);
1864 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
1865 int64_t pos
, int size
)
1867 BlockDriver
*drv
= bs
->drv
;
1870 if (drv
->bdrv_load_vmstate
)
1871 return drv
->bdrv_load_vmstate(bs
, buf
, pos
, size
);
1873 return bdrv_load_vmstate(bs
->file
->bs
, buf
, pos
, size
);
1877 /**************************************************************/
1880 BlockAIOCB
*bdrv_aio_readv(BlockDriverState
*bs
, int64_t sector_num
,
1881 QEMUIOVector
*qiov
, int nb_sectors
,
1882 BlockCompletionFunc
*cb
, void *opaque
)
1884 trace_bdrv_aio_readv(bs
, sector_num
, nb_sectors
, opaque
);
1886 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, 0,
1890 BlockAIOCB
*bdrv_aio_writev(BlockDriverState
*bs
, int64_t sector_num
,
1891 QEMUIOVector
*qiov
, int nb_sectors
,
1892 BlockCompletionFunc
*cb
, void *opaque
)
1894 trace_bdrv_aio_writev(bs
, sector_num
, nb_sectors
, opaque
);
1896 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, 0,
1900 void bdrv_aio_cancel(BlockAIOCB
*acb
)
1903 bdrv_aio_cancel_async(acb
);
1904 while (acb
->refcnt
> 1) {
1905 if (acb
->aiocb_info
->get_aio_context
) {
1906 aio_poll(acb
->aiocb_info
->get_aio_context(acb
), true);
1907 } else if (acb
->bs
) {
1908 aio_poll(bdrv_get_aio_context(acb
->bs
), true);
1913 qemu_aio_unref(acb
);
1916 /* Async version of aio cancel. The caller is not blocked if the acb implements
1917 * cancel_async, otherwise we do nothing and let the request normally complete.
1918 * In either case the completion callback must be called. */
1919 void bdrv_aio_cancel_async(BlockAIOCB
*acb
)
1921 if (acb
->aiocb_info
->cancel_async
) {
1922 acb
->aiocb_info
->cancel_async(acb
);
1926 /**************************************************************/
1927 /* async block device emulation */
1929 typedef struct BlockRequest
{
1931 /* Used during read, write, trim */
1938 /* Used during ioctl */
1944 BlockCompletionFunc
*cb
;
1950 typedef struct BlockAIOCBCoroutine
{
1957 } BlockAIOCBCoroutine
;
1959 static const AIOCBInfo bdrv_em_co_aiocb_info
= {
1960 .aiocb_size
= sizeof(BlockAIOCBCoroutine
),
1963 static void bdrv_co_complete(BlockAIOCBCoroutine
*acb
)
1965 if (!acb
->need_bh
) {
1966 acb
->common
.cb(acb
->common
.opaque
, acb
->req
.error
);
1967 qemu_aio_unref(acb
);
1971 static void bdrv_co_em_bh(void *opaque
)
1973 BlockAIOCBCoroutine
*acb
= opaque
;
1975 assert(!acb
->need_bh
);
1976 qemu_bh_delete(acb
->bh
);
1977 bdrv_co_complete(acb
);
1980 static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine
*acb
)
1982 acb
->need_bh
= false;
1983 if (acb
->req
.error
!= -EINPROGRESS
) {
1984 BlockDriverState
*bs
= acb
->common
.bs
;
1986 acb
->bh
= aio_bh_new(bdrv_get_aio_context(bs
), bdrv_co_em_bh
, acb
);
1987 qemu_bh_schedule(acb
->bh
);
1991 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
1992 static void coroutine_fn
bdrv_co_do_rw(void *opaque
)
1994 BlockAIOCBCoroutine
*acb
= opaque
;
1995 BlockDriverState
*bs
= acb
->common
.bs
;
1997 if (!acb
->is_write
) {
1998 acb
->req
.error
= bdrv_co_do_readv(bs
, acb
->req
.sector
,
1999 acb
->req
.nb_sectors
, acb
->req
.qiov
, acb
->req
.flags
);
2001 acb
->req
.error
= bdrv_co_do_writev(bs
, acb
->req
.sector
,
2002 acb
->req
.nb_sectors
, acb
->req
.qiov
, acb
->req
.flags
);
2005 bdrv_co_complete(acb
);
2008 static BlockAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
2012 BdrvRequestFlags flags
,
2013 BlockCompletionFunc
*cb
,
2018 BlockAIOCBCoroutine
*acb
;
2020 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
2021 acb
->need_bh
= true;
2022 acb
->req
.error
= -EINPROGRESS
;
2023 acb
->req
.sector
= sector_num
;
2024 acb
->req
.nb_sectors
= nb_sectors
;
2025 acb
->req
.qiov
= qiov
;
2026 acb
->req
.flags
= flags
;
2027 acb
->is_write
= is_write
;
2029 co
= qemu_coroutine_create(bdrv_co_do_rw
);
2030 qemu_coroutine_enter(co
, acb
);
2032 bdrv_co_maybe_schedule_bh(acb
);
2033 return &acb
->common
;
2036 static void coroutine_fn
bdrv_aio_flush_co_entry(void *opaque
)
2038 BlockAIOCBCoroutine
*acb
= opaque
;
2039 BlockDriverState
*bs
= acb
->common
.bs
;
2041 acb
->req
.error
= bdrv_co_flush(bs
);
2042 bdrv_co_complete(acb
);
2045 BlockAIOCB
*bdrv_aio_flush(BlockDriverState
*bs
,
2046 BlockCompletionFunc
*cb
, void *opaque
)
2048 trace_bdrv_aio_flush(bs
, opaque
);
2051 BlockAIOCBCoroutine
*acb
;
2053 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
2054 acb
->need_bh
= true;
2055 acb
->req
.error
= -EINPROGRESS
;
2057 co
= qemu_coroutine_create(bdrv_aio_flush_co_entry
);
2058 qemu_coroutine_enter(co
, acb
);
2060 bdrv_co_maybe_schedule_bh(acb
);
2061 return &acb
->common
;
2064 static void coroutine_fn
bdrv_aio_discard_co_entry(void *opaque
)
2066 BlockAIOCBCoroutine
*acb
= opaque
;
2067 BlockDriverState
*bs
= acb
->common
.bs
;
2069 acb
->req
.error
= bdrv_co_discard(bs
, acb
->req
.sector
, acb
->req
.nb_sectors
);
2070 bdrv_co_complete(acb
);
2073 BlockAIOCB
*bdrv_aio_discard(BlockDriverState
*bs
,
2074 int64_t sector_num
, int nb_sectors
,
2075 BlockCompletionFunc
*cb
, void *opaque
)
2078 BlockAIOCBCoroutine
*acb
;
2080 trace_bdrv_aio_discard(bs
, sector_num
, nb_sectors
, opaque
);
2082 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
2083 acb
->need_bh
= true;
2084 acb
->req
.error
= -EINPROGRESS
;
2085 acb
->req
.sector
= sector_num
;
2086 acb
->req
.nb_sectors
= nb_sectors
;
2087 co
= qemu_coroutine_create(bdrv_aio_discard_co_entry
);
2088 qemu_coroutine_enter(co
, acb
);
2090 bdrv_co_maybe_schedule_bh(acb
);
2091 return &acb
->common
;
2094 void *qemu_aio_get(const AIOCBInfo
*aiocb_info
, BlockDriverState
*bs
,
2095 BlockCompletionFunc
*cb
, void *opaque
)
2099 acb
= g_malloc(aiocb_info
->aiocb_size
);
2100 acb
->aiocb_info
= aiocb_info
;
2103 acb
->opaque
= opaque
;
2108 void qemu_aio_ref(void *p
)
2110 BlockAIOCB
*acb
= p
;
2114 void qemu_aio_unref(void *p
)
2116 BlockAIOCB
*acb
= p
;
2117 assert(acb
->refcnt
> 0);
2118 if (--acb
->refcnt
== 0) {
2123 /**************************************************************/
2124 /* Coroutine block device emulation */
2126 static void coroutine_fn
bdrv_flush_co_entry(void *opaque
)
2128 RwCo
*rwco
= opaque
;
2130 rwco
->ret
= bdrv_co_flush(rwco
->bs
);
2133 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
2136 BdrvTrackedRequest req
;
2138 if (!bs
|| !bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
) ||
2143 tracked_request_begin(&req
, bs
, 0, 0, BDRV_TRACKED_FLUSH
);
2145 /* Write back all layers by calling one driver function */
2146 if (bs
->drv
->bdrv_co_flush
) {
2147 ret
= bs
->drv
->bdrv_co_flush(bs
);
2151 /* Write back cached data to the OS even with cache=unsafe */
2152 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_OS
);
2153 if (bs
->drv
->bdrv_co_flush_to_os
) {
2154 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
2160 /* But don't actually force it to the disk with cache=unsafe */
2161 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
2165 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_DISK
);
2166 if (bs
->drv
->bdrv_co_flush_to_disk
) {
2167 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
2168 } else if (bs
->drv
->bdrv_aio_flush
) {
2170 CoroutineIOCompletion co
= {
2171 .coroutine
= qemu_coroutine_self(),
2174 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
2178 qemu_coroutine_yield();
2183 * Some block drivers always operate in either writethrough or unsafe
2184 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2185 * know how the server works (because the behaviour is hardcoded or
2186 * depends on server-side configuration), so we can't ensure that
2187 * everything is safe on disk. Returning an error doesn't work because
2188 * that would break guests even if the server operates in writethrough
2191 * Let's hope the user knows what he's doing.
2199 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2200 * in the case of cache=unsafe, so there are no useless flushes.
2203 ret
= bs
->file
? bdrv_co_flush(bs
->file
->bs
) : 0;
2205 tracked_request_end(&req
);
2209 int bdrv_flush(BlockDriverState
*bs
)
2217 if (qemu_in_coroutine()) {
2218 /* Fast-path if already in coroutine context */
2219 bdrv_flush_co_entry(&rwco
);
2221 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
2223 co
= qemu_coroutine_create(bdrv_flush_co_entry
);
2224 qemu_coroutine_enter(co
, &rwco
);
2225 while (rwco
.ret
== NOT_DONE
) {
2226 aio_poll(aio_context
, true);
2233 typedef struct DiscardCo
{
2234 BlockDriverState
*bs
;
2239 static void coroutine_fn
bdrv_discard_co_entry(void *opaque
)
2241 DiscardCo
*rwco
= opaque
;
2243 rwco
->ret
= bdrv_co_discard(rwco
->bs
, rwco
->sector_num
, rwco
->nb_sectors
);
2246 int coroutine_fn
bdrv_co_discard(BlockDriverState
*bs
, int64_t sector_num
,
2249 BdrvTrackedRequest req
;
2250 int max_discard
, ret
;
2256 ret
= bdrv_check_request(bs
, sector_num
, nb_sectors
);
2259 } else if (bs
->read_only
) {
2262 assert(!(bs
->open_flags
& BDRV_O_INACTIVE
));
2264 /* Do nothing if disabled. */
2265 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
2269 if (!bs
->drv
->bdrv_co_discard
&& !bs
->drv
->bdrv_aio_discard
) {
2273 tracked_request_begin(&req
, bs
, sector_num
, nb_sectors
,
2274 BDRV_TRACKED_DISCARD
);
2275 bdrv_set_dirty(bs
, sector_num
, nb_sectors
);
2277 max_discard
= MIN_NON_ZERO(bs
->bl
.max_discard
, BDRV_REQUEST_MAX_SECTORS
);
2278 while (nb_sectors
> 0) {
2280 int num
= nb_sectors
;
2283 if (bs
->bl
.discard_alignment
&&
2284 num
>= bs
->bl
.discard_alignment
&&
2285 sector_num
% bs
->bl
.discard_alignment
) {
2286 if (num
> bs
->bl
.discard_alignment
) {
2287 num
= bs
->bl
.discard_alignment
;
2289 num
-= sector_num
% bs
->bl
.discard_alignment
;
2292 /* limit request size */
2293 if (num
> max_discard
) {
2297 if (bs
->drv
->bdrv_co_discard
) {
2298 ret
= bs
->drv
->bdrv_co_discard(bs
, sector_num
, num
);
2301 CoroutineIOCompletion co
= {
2302 .coroutine
= qemu_coroutine_self(),
2305 acb
= bs
->drv
->bdrv_aio_discard(bs
, sector_num
, nb_sectors
,
2306 bdrv_co_io_em_complete
, &co
);
2311 qemu_coroutine_yield();
2315 if (ret
&& ret
!= -ENOTSUP
) {
2324 tracked_request_end(&req
);
2328 int bdrv_discard(BlockDriverState
*bs
, int64_t sector_num
, int nb_sectors
)
2333 .sector_num
= sector_num
,
2334 .nb_sectors
= nb_sectors
,
2338 if (qemu_in_coroutine()) {
2339 /* Fast-path if already in coroutine context */
2340 bdrv_discard_co_entry(&rwco
);
2342 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
2344 co
= qemu_coroutine_create(bdrv_discard_co_entry
);
2345 qemu_coroutine_enter(co
, &rwco
);
2346 while (rwco
.ret
== NOT_DONE
) {
2347 aio_poll(aio_context
, true);
2354 static int bdrv_co_do_ioctl(BlockDriverState
*bs
, int req
, void *buf
)
2356 BlockDriver
*drv
= bs
->drv
;
2357 BdrvTrackedRequest tracked_req
;
2358 CoroutineIOCompletion co
= {
2359 .coroutine
= qemu_coroutine_self(),
2363 tracked_request_begin(&tracked_req
, bs
, 0, 0, BDRV_TRACKED_IOCTL
);
2364 if (!drv
|| !drv
->bdrv_aio_ioctl
) {
2369 acb
= drv
->bdrv_aio_ioctl(bs
, req
, buf
, bdrv_co_io_em_complete
, &co
);
2374 qemu_coroutine_yield();
2376 tracked_request_end(&tracked_req
);
2381 BlockDriverState
*bs
;
2387 static void coroutine_fn
bdrv_co_ioctl_entry(void *opaque
)
2389 BdrvIoctlCoData
*data
= opaque
;
2390 data
->ret
= bdrv_co_do_ioctl(data
->bs
, data
->req
, data
->buf
);
2393 /* needed for generic scsi interface */
2394 int bdrv_ioctl(BlockDriverState
*bs
, unsigned long int req
, void *buf
)
2396 BdrvIoctlCoData data
= {
2400 .ret
= -EINPROGRESS
,
2403 if (qemu_in_coroutine()) {
2404 /* Fast-path if already in coroutine context */
2405 bdrv_co_ioctl_entry(&data
);
2407 Coroutine
*co
= qemu_coroutine_create(bdrv_co_ioctl_entry
);
2409 qemu_coroutine_enter(co
, &data
);
2410 while (data
.ret
== -EINPROGRESS
) {
2411 aio_poll(bdrv_get_aio_context(bs
), true);
2417 static void coroutine_fn
bdrv_co_aio_ioctl_entry(void *opaque
)
2419 BlockAIOCBCoroutine
*acb
= opaque
;
2420 acb
->req
.error
= bdrv_co_do_ioctl(acb
->common
.bs
,
2421 acb
->req
.req
, acb
->req
.buf
);
2422 bdrv_co_complete(acb
);
2425 BlockAIOCB
*bdrv_aio_ioctl(BlockDriverState
*bs
,
2426 unsigned long int req
, void *buf
,
2427 BlockCompletionFunc
*cb
, void *opaque
)
2429 BlockAIOCBCoroutine
*acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
,
2433 acb
->need_bh
= true;
2434 acb
->req
.error
= -EINPROGRESS
;
2437 co
= qemu_coroutine_create(bdrv_co_aio_ioctl_entry
);
2438 qemu_coroutine_enter(co
, acb
);
2440 bdrv_co_maybe_schedule_bh(acb
);
2441 return &acb
->common
;
2444 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
2446 return qemu_memalign(bdrv_opt_mem_align(bs
), size
);
2449 void *qemu_blockalign0(BlockDriverState
*bs
, size_t size
)
2451 return memset(qemu_blockalign(bs
, size
), 0, size
);
2454 void *qemu_try_blockalign(BlockDriverState
*bs
, size_t size
)
2456 size_t align
= bdrv_opt_mem_align(bs
);
2458 /* Ensure that NULL is never returned on success */
2464 return qemu_try_memalign(align
, size
);
2467 void *qemu_try_blockalign0(BlockDriverState
*bs
, size_t size
)
2469 void *mem
= qemu_try_blockalign(bs
, size
);
2472 memset(mem
, 0, size
);
2479 * Check if all memory in this vector is sector aligned.
2481 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
)
2484 size_t alignment
= bdrv_min_mem_align(bs
);
2486 for (i
= 0; i
< qiov
->niov
; i
++) {
2487 if ((uintptr_t) qiov
->iov
[i
].iov_base
% alignment
) {
2490 if (qiov
->iov
[i
].iov_len
% alignment
) {
2498 void bdrv_add_before_write_notifier(BlockDriverState
*bs
,
2499 NotifierWithReturn
*notifier
)
2501 notifier_with_return_list_add(&bs
->before_write_notifiers
, notifier
);
2504 void bdrv_io_plug(BlockDriverState
*bs
)
2508 QLIST_FOREACH(child
, &bs
->children
, next
) {
2509 bdrv_io_plug(child
->bs
);
2512 if (bs
->io_plugged
++ == 0 && bs
->io_plug_disabled
== 0) {
2513 BlockDriver
*drv
= bs
->drv
;
2514 if (drv
&& drv
->bdrv_io_plug
) {
2515 drv
->bdrv_io_plug(bs
);
2520 void bdrv_io_unplug(BlockDriverState
*bs
)
2524 assert(bs
->io_plugged
);
2525 if (--bs
->io_plugged
== 0 && bs
->io_plug_disabled
== 0) {
2526 BlockDriver
*drv
= bs
->drv
;
2527 if (drv
&& drv
->bdrv_io_unplug
) {
2528 drv
->bdrv_io_unplug(bs
);
2532 QLIST_FOREACH(child
, &bs
->children
, next
) {
2533 bdrv_io_unplug(child
->bs
);
2537 void bdrv_io_unplugged_begin(BlockDriverState
*bs
)
2541 if (bs
->io_plug_disabled
++ == 0 && bs
->io_plugged
> 0) {
2542 BlockDriver
*drv
= bs
->drv
;
2543 if (drv
&& drv
->bdrv_io_unplug
) {
2544 drv
->bdrv_io_unplug(bs
);
2548 QLIST_FOREACH(child
, &bs
->children
, next
) {
2549 bdrv_io_unplugged_begin(child
->bs
);
2553 void bdrv_io_unplugged_end(BlockDriverState
*bs
)
2557 assert(bs
->io_plug_disabled
);
2558 QLIST_FOREACH(child
, &bs
->children
, next
) {
2559 bdrv_io_unplugged_end(child
->bs
);
2562 if (--bs
->io_plug_disabled
== 0 && bs
->io_plugged
> 0) {
2563 BlockDriver
*drv
= bs
->drv
;
2564 if (drv
&& drv
->bdrv_io_plug
) {
2565 drv
->bdrv_io_plug(bs
);