2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "sysemu/block-backend.h"
28 #include "block/blockjob.h"
29 #include "block/blockjob_int.h"
30 #include "block/block_int.h"
31 #include "qemu/cutils.h"
32 #include "qapi/error.h"
33 #include "qemu/error-report.h"
35 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
37 static int coroutine_fn
bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
,
38 int64_t offset
, int bytes
, BdrvRequestFlags flags
);
40 void bdrv_parent_drained_begin(BlockDriverState
*bs
)
44 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
45 if (c
->role
->drained_begin
) {
46 c
->role
->drained_begin(c
);
51 void bdrv_parent_drained_end(BlockDriverState
*bs
)
55 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
56 if (c
->role
->drained_end
) {
57 c
->role
->drained_end(c
);
62 static void bdrv_merge_limits(BlockLimits
*dst
, const BlockLimits
*src
)
64 dst
->opt_transfer
= MAX(dst
->opt_transfer
, src
->opt_transfer
);
65 dst
->max_transfer
= MIN_NON_ZERO(dst
->max_transfer
, src
->max_transfer
);
66 dst
->opt_mem_alignment
= MAX(dst
->opt_mem_alignment
,
67 src
->opt_mem_alignment
);
68 dst
->min_mem_alignment
= MAX(dst
->min_mem_alignment
,
69 src
->min_mem_alignment
);
70 dst
->max_iov
= MIN_NON_ZERO(dst
->max_iov
, src
->max_iov
);
73 void bdrv_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
75 BlockDriver
*drv
= bs
->drv
;
76 Error
*local_err
= NULL
;
78 memset(&bs
->bl
, 0, sizeof(bs
->bl
));
84 /* Default alignment based on whether driver has byte interface */
85 bs
->bl
.request_alignment
= drv
->bdrv_co_preadv
? 1 : 512;
87 /* Take some limits from the children as a default */
89 bdrv_refresh_limits(bs
->file
->bs
, &local_err
);
91 error_propagate(errp
, local_err
);
94 bdrv_merge_limits(&bs
->bl
, &bs
->file
->bs
->bl
);
96 bs
->bl
.min_mem_alignment
= 512;
97 bs
->bl
.opt_mem_alignment
= getpagesize();
99 /* Safe default since most protocols use readv()/writev()/etc */
100 bs
->bl
.max_iov
= IOV_MAX
;
104 bdrv_refresh_limits(bs
->backing
->bs
, &local_err
);
106 error_propagate(errp
, local_err
);
109 bdrv_merge_limits(&bs
->bl
, &bs
->backing
->bs
->bl
);
112 /* Then let the driver override it */
113 if (drv
->bdrv_refresh_limits
) {
114 drv
->bdrv_refresh_limits(bs
, errp
);
119 * The copy-on-read flag is actually a reference count so multiple users may
120 * use the feature without worrying about clobbering its previous state.
121 * Copy-on-read stays enabled until all users have called to disable it.
123 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
125 atomic_inc(&bs
->copy_on_read
);
128 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
130 int old
= atomic_fetch_dec(&bs
->copy_on_read
);
134 /* Check if any requests are in-flight (including throttled requests) */
135 bool bdrv_requests_pending(BlockDriverState
*bs
)
139 if (atomic_read(&bs
->in_flight
)) {
143 QLIST_FOREACH(child
, &bs
->children
, next
) {
144 if (bdrv_requests_pending(child
->bs
)) {
152 static bool bdrv_drain_recurse(BlockDriverState
*bs
)
154 BdrvChild
*child
, *tmp
;
157 waited
= BDRV_POLL_WHILE(bs
, atomic_read(&bs
->in_flight
) > 0);
159 if (bs
->drv
&& bs
->drv
->bdrv_drain
) {
160 bs
->drv
->bdrv_drain(bs
);
163 QLIST_FOREACH_SAFE(child
, &bs
->children
, next
, tmp
) {
164 BlockDriverState
*bs
= child
->bs
;
166 qemu_get_current_aio_context() == qemu_get_aio_context();
167 assert(bs
->refcnt
> 0);
169 /* In case the recursive bdrv_drain_recurse processes a
170 * block_job_defer_to_main_loop BH and modifies the graph,
171 * let's hold a reference to bs until we are done.
173 * IOThread doesn't have such a BH, and it is not safe to call
174 * bdrv_unref without BQL, so skip doing it there.
178 waited
|= bdrv_drain_recurse(bs
);
189 BlockDriverState
*bs
;
193 static void bdrv_co_drain_bh_cb(void *opaque
)
195 BdrvCoDrainData
*data
= opaque
;
196 Coroutine
*co
= data
->co
;
197 BlockDriverState
*bs
= data
->bs
;
199 bdrv_dec_in_flight(bs
);
200 bdrv_drained_begin(bs
);
205 static void coroutine_fn
bdrv_co_yield_to_drain(BlockDriverState
*bs
)
207 BdrvCoDrainData data
;
209 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
210 * other coroutines run if they were queued from
211 * qemu_co_queue_run_restart(). */
213 assert(qemu_in_coroutine());
214 data
= (BdrvCoDrainData
) {
215 .co
= qemu_coroutine_self(),
219 bdrv_inc_in_flight(bs
);
220 aio_bh_schedule_oneshot(bdrv_get_aio_context(bs
),
221 bdrv_co_drain_bh_cb
, &data
);
223 qemu_coroutine_yield();
224 /* If we are resumed from some other event (such as an aio completion or a
225 * timer callback), it is a bug in the caller that should be fixed. */
229 void bdrv_drained_begin(BlockDriverState
*bs
)
231 if (qemu_in_coroutine()) {
232 bdrv_co_yield_to_drain(bs
);
236 if (atomic_fetch_inc(&bs
->quiesce_counter
) == 0) {
237 aio_disable_external(bdrv_get_aio_context(bs
));
238 bdrv_parent_drained_begin(bs
);
241 bdrv_drain_recurse(bs
);
244 void bdrv_drained_end(BlockDriverState
*bs
)
246 assert(bs
->quiesce_counter
> 0);
247 if (atomic_fetch_dec(&bs
->quiesce_counter
) > 1) {
251 bdrv_parent_drained_end(bs
);
252 aio_enable_external(bdrv_get_aio_context(bs
));
256 * Wait for pending requests to complete on a single BlockDriverState subtree,
257 * and suspend block driver's internal I/O until next request arrives.
259 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
262 * Only this BlockDriverState's AioContext is run, so in-flight requests must
263 * not depend on events in other AioContexts. In that case, use
264 * bdrv_drain_all() instead.
266 void coroutine_fn
bdrv_co_drain(BlockDriverState
*bs
)
268 assert(qemu_in_coroutine());
269 bdrv_drained_begin(bs
);
270 bdrv_drained_end(bs
);
273 void bdrv_drain(BlockDriverState
*bs
)
275 bdrv_drained_begin(bs
);
276 bdrv_drained_end(bs
);
280 * Wait for pending requests to complete across all BlockDriverStates
282 * This function does not flush data to disk, use bdrv_flush_all() for that
283 * after calling this function.
285 * This pauses all block jobs and disables external clients. It must
286 * be paired with bdrv_drain_all_end().
288 * NOTE: no new block jobs or BlockDriverStates can be created between
289 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
291 void bdrv_drain_all_begin(void)
293 /* Always run first iteration so any pending completion BHs run */
295 BlockDriverState
*bs
;
297 GSList
*aio_ctxs
= NULL
, *ctx
;
299 block_job_pause_all();
301 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
302 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
304 aio_context_acquire(aio_context
);
305 bdrv_parent_drained_begin(bs
);
306 aio_disable_external(aio_context
);
307 aio_context_release(aio_context
);
309 if (!g_slist_find(aio_ctxs
, aio_context
)) {
310 aio_ctxs
= g_slist_prepend(aio_ctxs
, aio_context
);
314 /* Note that completion of an asynchronous I/O operation can trigger any
315 * number of other I/O operations on other devices---for example a
316 * coroutine can submit an I/O request to another device in response to
317 * request completion. Therefore we must keep looping until there was no
318 * more activity rather than simply draining each device independently.
323 for (ctx
= aio_ctxs
; ctx
!= NULL
; ctx
= ctx
->next
) {
324 AioContext
*aio_context
= ctx
->data
;
326 aio_context_acquire(aio_context
);
327 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
328 if (aio_context
== bdrv_get_aio_context(bs
)) {
329 waited
|= bdrv_drain_recurse(bs
);
332 aio_context_release(aio_context
);
336 g_slist_free(aio_ctxs
);
339 void bdrv_drain_all_end(void)
341 BlockDriverState
*bs
;
344 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
345 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
347 aio_context_acquire(aio_context
);
348 aio_enable_external(aio_context
);
349 bdrv_parent_drained_end(bs
);
350 aio_context_release(aio_context
);
353 block_job_resume_all();
356 void bdrv_drain_all(void)
358 bdrv_drain_all_begin();
359 bdrv_drain_all_end();
363 * Remove an active request from the tracked requests list
365 * This function should be called when a tracked request is completing.
367 static void tracked_request_end(BdrvTrackedRequest
*req
)
369 if (req
->serialising
) {
370 atomic_dec(&req
->bs
->serialising_in_flight
);
373 qemu_co_mutex_lock(&req
->bs
->reqs_lock
);
374 QLIST_REMOVE(req
, list
);
375 qemu_co_queue_restart_all(&req
->wait_queue
);
376 qemu_co_mutex_unlock(&req
->bs
->reqs_lock
);
380 * Add an active request to the tracked requests list
382 static void tracked_request_begin(BdrvTrackedRequest
*req
,
383 BlockDriverState
*bs
,
386 enum BdrvTrackedRequestType type
)
388 *req
= (BdrvTrackedRequest
){
393 .co
= qemu_coroutine_self(),
394 .serialising
= false,
395 .overlap_offset
= offset
,
396 .overlap_bytes
= bytes
,
399 qemu_co_queue_init(&req
->wait_queue
);
401 qemu_co_mutex_lock(&bs
->reqs_lock
);
402 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
403 qemu_co_mutex_unlock(&bs
->reqs_lock
);
406 static void mark_request_serialising(BdrvTrackedRequest
*req
, uint64_t align
)
408 int64_t overlap_offset
= req
->offset
& ~(align
- 1);
409 unsigned int overlap_bytes
= ROUND_UP(req
->offset
+ req
->bytes
, align
)
412 if (!req
->serialising
) {
413 atomic_inc(&req
->bs
->serialising_in_flight
);
414 req
->serialising
= true;
417 req
->overlap_offset
= MIN(req
->overlap_offset
, overlap_offset
);
418 req
->overlap_bytes
= MAX(req
->overlap_bytes
, overlap_bytes
);
422 * Round a region to cluster boundaries
424 void bdrv_round_to_clusters(BlockDriverState
*bs
,
425 int64_t offset
, unsigned int bytes
,
426 int64_t *cluster_offset
,
427 unsigned int *cluster_bytes
)
431 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
432 *cluster_offset
= offset
;
433 *cluster_bytes
= bytes
;
435 int64_t c
= bdi
.cluster_size
;
436 *cluster_offset
= QEMU_ALIGN_DOWN(offset
, c
);
437 *cluster_bytes
= QEMU_ALIGN_UP(offset
- *cluster_offset
+ bytes
, c
);
441 static int bdrv_get_cluster_size(BlockDriverState
*bs
)
446 ret
= bdrv_get_info(bs
, &bdi
);
447 if (ret
< 0 || bdi
.cluster_size
== 0) {
448 return bs
->bl
.request_alignment
;
450 return bdi
.cluster_size
;
454 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
455 int64_t offset
, unsigned int bytes
)
458 if (offset
>= req
->overlap_offset
+ req
->overlap_bytes
) {
462 if (req
->overlap_offset
>= offset
+ bytes
) {
468 void bdrv_inc_in_flight(BlockDriverState
*bs
)
470 atomic_inc(&bs
->in_flight
);
473 static void dummy_bh_cb(void *opaque
)
477 void bdrv_wakeup(BlockDriverState
*bs
)
479 /* The barrier (or an atomic op) is in the caller. */
480 if (atomic_read(&bs
->wakeup
)) {
481 aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb
, NULL
);
485 void bdrv_dec_in_flight(BlockDriverState
*bs
)
487 atomic_dec(&bs
->in_flight
);
491 static bool coroutine_fn
wait_serialising_requests(BdrvTrackedRequest
*self
)
493 BlockDriverState
*bs
= self
->bs
;
494 BdrvTrackedRequest
*req
;
498 if (!atomic_read(&bs
->serialising_in_flight
)) {
504 qemu_co_mutex_lock(&bs
->reqs_lock
);
505 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
506 if (req
== self
|| (!req
->serialising
&& !self
->serialising
)) {
509 if (tracked_request_overlaps(req
, self
->overlap_offset
,
510 self
->overlap_bytes
))
512 /* Hitting this means there was a reentrant request, for
513 * example, a block driver issuing nested requests. This must
514 * never happen since it means deadlock.
516 assert(qemu_coroutine_self() != req
->co
);
518 /* If the request is already (indirectly) waiting for us, or
519 * will wait for us as soon as it wakes up, then just go on
520 * (instead of producing a deadlock in the former case). */
521 if (!req
->waiting_for
) {
522 self
->waiting_for
= req
;
523 qemu_co_queue_wait(&req
->wait_queue
, &bs
->reqs_lock
);
524 self
->waiting_for
= NULL
;
531 qemu_co_mutex_unlock(&bs
->reqs_lock
);
537 static int bdrv_check_byte_request(BlockDriverState
*bs
, int64_t offset
,
540 if (size
> BDRV_REQUEST_MAX_SECTORS
<< BDRV_SECTOR_BITS
) {
544 if (!bdrv_is_inserted(bs
)) {
555 typedef struct RwCo
{
561 BdrvRequestFlags flags
;
564 static void coroutine_fn
bdrv_rw_co_entry(void *opaque
)
568 if (!rwco
->is_write
) {
569 rwco
->ret
= bdrv_co_preadv(rwco
->child
, rwco
->offset
,
570 rwco
->qiov
->size
, rwco
->qiov
,
573 rwco
->ret
= bdrv_co_pwritev(rwco
->child
, rwco
->offset
,
574 rwco
->qiov
->size
, rwco
->qiov
,
580 * Process a vectored synchronous request using coroutines
582 static int bdrv_prwv_co(BdrvChild
*child
, int64_t offset
,
583 QEMUIOVector
*qiov
, bool is_write
,
584 BdrvRequestFlags flags
)
591 .is_write
= is_write
,
596 if (qemu_in_coroutine()) {
597 /* Fast-path if already in coroutine context */
598 bdrv_rw_co_entry(&rwco
);
600 co
= qemu_coroutine_create(bdrv_rw_co_entry
, &rwco
);
601 bdrv_coroutine_enter(child
->bs
, co
);
602 BDRV_POLL_WHILE(child
->bs
, rwco
.ret
== NOT_DONE
);
608 * Process a synchronous request using coroutines
610 static int bdrv_rw_co(BdrvChild
*child
, int64_t sector_num
, uint8_t *buf
,
611 int nb_sectors
, bool is_write
, BdrvRequestFlags flags
)
615 .iov_base
= (void *)buf
,
616 .iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
,
619 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
623 qemu_iovec_init_external(&qiov
, &iov
, 1);
624 return bdrv_prwv_co(child
, sector_num
<< BDRV_SECTOR_BITS
,
625 &qiov
, is_write
, flags
);
628 /* return < 0 if error. See bdrv_write() for the return codes */
629 int bdrv_read(BdrvChild
*child
, int64_t sector_num
,
630 uint8_t *buf
, int nb_sectors
)
632 return bdrv_rw_co(child
, sector_num
, buf
, nb_sectors
, false, 0);
635 /* Return < 0 if error. Important errors are:
636 -EIO generic I/O error (may happen for all errors)
637 -ENOMEDIUM No media inserted.
638 -EINVAL Invalid sector number or nb_sectors
639 -EACCES Trying to write a read-only device
641 int bdrv_write(BdrvChild
*child
, int64_t sector_num
,
642 const uint8_t *buf
, int nb_sectors
)
644 return bdrv_rw_co(child
, sector_num
, (uint8_t *)buf
, nb_sectors
, true, 0);
647 int bdrv_pwrite_zeroes(BdrvChild
*child
, int64_t offset
,
648 int bytes
, BdrvRequestFlags flags
)
656 qemu_iovec_init_external(&qiov
, &iov
, 1);
657 return bdrv_prwv_co(child
, offset
, &qiov
, true,
658 BDRV_REQ_ZERO_WRITE
| flags
);
662 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
663 * The operation is sped up by checking the block status and only writing
664 * zeroes to the device if they currently do not return zeroes. Optional
665 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
668 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
670 int bdrv_make_zero(BdrvChild
*child
, BdrvRequestFlags flags
)
672 int64_t target_sectors
, ret
, nb_sectors
, sector_num
= 0;
673 BlockDriverState
*bs
= child
->bs
;
674 BlockDriverState
*file
;
677 target_sectors
= bdrv_nb_sectors(bs
);
678 if (target_sectors
< 0) {
679 return target_sectors
;
683 nb_sectors
= MIN(target_sectors
- sector_num
, BDRV_REQUEST_MAX_SECTORS
);
684 if (nb_sectors
<= 0) {
687 ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, &n
, &file
);
689 error_report("error getting block status at sector %" PRId64
": %s",
690 sector_num
, strerror(-ret
));
693 if (ret
& BDRV_BLOCK_ZERO
) {
697 ret
= bdrv_pwrite_zeroes(child
, sector_num
<< BDRV_SECTOR_BITS
,
698 n
<< BDRV_SECTOR_BITS
, flags
);
700 error_report("error writing zeroes at sector %" PRId64
": %s",
701 sector_num
, strerror(-ret
));
708 int bdrv_preadv(BdrvChild
*child
, int64_t offset
, QEMUIOVector
*qiov
)
712 ret
= bdrv_prwv_co(child
, offset
, qiov
, false, 0);
720 int bdrv_pread(BdrvChild
*child
, int64_t offset
, void *buf
, int bytes
)
724 .iov_base
= (void *)buf
,
732 qemu_iovec_init_external(&qiov
, &iov
, 1);
733 return bdrv_preadv(child
, offset
, &qiov
);
736 int bdrv_pwritev(BdrvChild
*child
, int64_t offset
, QEMUIOVector
*qiov
)
740 ret
= bdrv_prwv_co(child
, offset
, qiov
, true, 0);
748 int bdrv_pwrite(BdrvChild
*child
, int64_t offset
, const void *buf
, int bytes
)
752 .iov_base
= (void *) buf
,
760 qemu_iovec_init_external(&qiov
, &iov
, 1);
761 return bdrv_pwritev(child
, offset
, &qiov
);
765 * Writes to the file and ensures that no writes are reordered across this
766 * request (acts as a barrier)
768 * Returns 0 on success, -errno in error cases.
770 int bdrv_pwrite_sync(BdrvChild
*child
, int64_t offset
,
771 const void *buf
, int count
)
775 ret
= bdrv_pwrite(child
, offset
, buf
, count
);
780 ret
= bdrv_flush(child
->bs
);
788 typedef struct CoroutineIOCompletion
{
789 Coroutine
*coroutine
;
791 } CoroutineIOCompletion
;
793 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
795 CoroutineIOCompletion
*co
= opaque
;
798 aio_co_wake(co
->coroutine
);
801 static int coroutine_fn
bdrv_driver_preadv(BlockDriverState
*bs
,
802 uint64_t offset
, uint64_t bytes
,
803 QEMUIOVector
*qiov
, int flags
)
805 BlockDriver
*drv
= bs
->drv
;
807 unsigned int nb_sectors
;
809 assert(!(flags
& ~BDRV_REQ_MASK
));
811 if (drv
->bdrv_co_preadv
) {
812 return drv
->bdrv_co_preadv(bs
, offset
, bytes
, qiov
, flags
);
815 sector_num
= offset
>> BDRV_SECTOR_BITS
;
816 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
818 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
819 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
820 assert((bytes
>> BDRV_SECTOR_BITS
) <= BDRV_REQUEST_MAX_SECTORS
);
822 if (drv
->bdrv_co_readv
) {
823 return drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
826 CoroutineIOCompletion co
= {
827 .coroutine
= qemu_coroutine_self(),
830 acb
= bs
->drv
->bdrv_aio_readv(bs
, sector_num
, qiov
, nb_sectors
,
831 bdrv_co_io_em_complete
, &co
);
835 qemu_coroutine_yield();
841 static int coroutine_fn
bdrv_driver_pwritev(BlockDriverState
*bs
,
842 uint64_t offset
, uint64_t bytes
,
843 QEMUIOVector
*qiov
, int flags
)
845 BlockDriver
*drv
= bs
->drv
;
847 unsigned int nb_sectors
;
850 assert(!(flags
& ~BDRV_REQ_MASK
));
852 if (drv
->bdrv_co_pwritev
) {
853 ret
= drv
->bdrv_co_pwritev(bs
, offset
, bytes
, qiov
,
854 flags
& bs
->supported_write_flags
);
855 flags
&= ~bs
->supported_write_flags
;
859 sector_num
= offset
>> BDRV_SECTOR_BITS
;
860 nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
862 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
863 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
864 assert((bytes
>> BDRV_SECTOR_BITS
) <= BDRV_REQUEST_MAX_SECTORS
);
866 if (drv
->bdrv_co_writev_flags
) {
867 ret
= drv
->bdrv_co_writev_flags(bs
, sector_num
, nb_sectors
, qiov
,
868 flags
& bs
->supported_write_flags
);
869 flags
&= ~bs
->supported_write_flags
;
870 } else if (drv
->bdrv_co_writev
) {
871 assert(!bs
->supported_write_flags
);
872 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
);
875 CoroutineIOCompletion co
= {
876 .coroutine
= qemu_coroutine_self(),
879 acb
= bs
->drv
->bdrv_aio_writev(bs
, sector_num
, qiov
, nb_sectors
,
880 bdrv_co_io_em_complete
, &co
);
884 qemu_coroutine_yield();
890 if (ret
== 0 && (flags
& BDRV_REQ_FUA
)) {
891 ret
= bdrv_co_flush(bs
);
897 static int coroutine_fn
898 bdrv_driver_pwritev_compressed(BlockDriverState
*bs
, uint64_t offset
,
899 uint64_t bytes
, QEMUIOVector
*qiov
)
901 BlockDriver
*drv
= bs
->drv
;
903 if (!drv
->bdrv_co_pwritev_compressed
) {
907 return drv
->bdrv_co_pwritev_compressed(bs
, offset
, bytes
, qiov
);
910 static int coroutine_fn
bdrv_co_do_copy_on_readv(BdrvChild
*child
,
911 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
)
913 BlockDriverState
*bs
= child
->bs
;
915 /* Perform I/O through a temporary buffer so that users who scribble over
916 * their read buffer while the operation is in progress do not end up
917 * modifying the image file. This is critical for zero-copy guest I/O
918 * where anything might happen inside guest memory.
922 BlockDriver
*drv
= bs
->drv
;
924 QEMUIOVector bounce_qiov
;
925 int64_t cluster_offset
;
926 unsigned int cluster_bytes
;
930 /* FIXME We cannot require callers to have write permissions when all they
931 * are doing is a read request. If we did things right, write permissions
932 * would be obtained anyway, but internally by the copy-on-read code. As
933 * long as it is implemented here rather than in a separat filter driver,
934 * the copy-on-read code doesn't have its own BdrvChild, however, for which
935 * it could request permissions. Therefore we have to bypass the permission
936 * system for the moment. */
937 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
939 /* Cover entire cluster so no additional backing file I/O is required when
940 * allocating cluster in the image file.
942 bdrv_round_to_clusters(bs
, offset
, bytes
, &cluster_offset
, &cluster_bytes
);
944 trace_bdrv_co_do_copy_on_readv(bs
, offset
, bytes
,
945 cluster_offset
, cluster_bytes
);
947 iov
.iov_len
= cluster_bytes
;
948 iov
.iov_base
= bounce_buffer
= qemu_try_blockalign(bs
, iov
.iov_len
);
949 if (bounce_buffer
== NULL
) {
954 qemu_iovec_init_external(&bounce_qiov
, &iov
, 1);
956 ret
= bdrv_driver_preadv(bs
, cluster_offset
, cluster_bytes
,
962 if (drv
->bdrv_co_pwrite_zeroes
&&
963 buffer_is_zero(bounce_buffer
, iov
.iov_len
)) {
964 /* FIXME: Should we (perhaps conditionally) be setting
965 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
966 * that still correctly reads as zero? */
967 ret
= bdrv_co_do_pwrite_zeroes(bs
, cluster_offset
, cluster_bytes
, 0);
969 /* This does not change the data on the disk, it is not necessary
970 * to flush even in cache=writethrough mode.
972 ret
= bdrv_driver_pwritev(bs
, cluster_offset
, cluster_bytes
,
977 /* It might be okay to ignore write errors for guest requests. If this
978 * is a deliberate copy-on-read then we don't want to ignore the error.
979 * Simply report it in all cases.
984 skip_bytes
= offset
- cluster_offset
;
985 qemu_iovec_from_buf(qiov
, 0, bounce_buffer
+ skip_bytes
, bytes
);
988 qemu_vfree(bounce_buffer
);
993 * Forwards an already correctly aligned request to the BlockDriver. This
994 * handles copy on read, zeroing after EOF, and fragmentation of large
995 * reads; any other features must be implemented by the caller.
997 static int coroutine_fn
bdrv_aligned_preadv(BdrvChild
*child
,
998 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
999 int64_t align
, QEMUIOVector
*qiov
, int flags
)
1001 BlockDriverState
*bs
= child
->bs
;
1002 int64_t total_bytes
, max_bytes
;
1004 uint64_t bytes_remaining
= bytes
;
1007 assert(is_power_of_2(align
));
1008 assert((offset
& (align
- 1)) == 0);
1009 assert((bytes
& (align
- 1)) == 0);
1010 assert(!qiov
|| bytes
== qiov
->size
);
1011 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1012 max_transfer
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_transfer
, INT_MAX
),
1015 /* TODO: We would need a per-BDS .supported_read_flags and
1016 * potential fallback support, if we ever implement any read flags
1017 * to pass through to drivers. For now, there aren't any
1018 * passthrough flags. */
1019 assert(!(flags
& ~(BDRV_REQ_NO_SERIALISING
| BDRV_REQ_COPY_ON_READ
)));
1021 /* Handle Copy on Read and associated serialisation */
1022 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1023 /* If we touch the same cluster it counts as an overlap. This
1024 * guarantees that allocating writes will be serialized and not race
1025 * with each other for the same cluster. For example, in copy-on-read
1026 * it ensures that the CoR read and write operations are atomic and
1027 * guest writes cannot interleave between them. */
1028 mark_request_serialising(req
, bdrv_get_cluster_size(bs
));
1031 if (!(flags
& BDRV_REQ_NO_SERIALISING
)) {
1032 wait_serialising_requests(req
);
1035 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1036 /* TODO: Simplify further once bdrv_is_allocated no longer
1037 * requires sector alignment */
1038 int64_t start
= QEMU_ALIGN_DOWN(offset
, BDRV_SECTOR_SIZE
);
1039 int64_t end
= QEMU_ALIGN_UP(offset
+ bytes
, BDRV_SECTOR_SIZE
);
1042 ret
= bdrv_is_allocated(bs
, start
, end
- start
, &pnum
);
1047 if (!ret
|| pnum
!= end
- start
) {
1048 ret
= bdrv_co_do_copy_on_readv(child
, offset
, bytes
, qiov
);
1053 /* Forward the request to the BlockDriver, possibly fragmenting it */
1054 total_bytes
= bdrv_getlength(bs
);
1055 if (total_bytes
< 0) {
1060 max_bytes
= ROUND_UP(MAX(0, total_bytes
- offset
), align
);
1061 if (bytes
<= max_bytes
&& bytes
<= max_transfer
) {
1062 ret
= bdrv_driver_preadv(bs
, offset
, bytes
, qiov
, 0);
1066 while (bytes_remaining
) {
1070 QEMUIOVector local_qiov
;
1072 num
= MIN(bytes_remaining
, MIN(max_bytes
, max_transfer
));
1074 qemu_iovec_init(&local_qiov
, qiov
->niov
);
1075 qemu_iovec_concat(&local_qiov
, qiov
, bytes
- bytes_remaining
, num
);
1077 ret
= bdrv_driver_preadv(bs
, offset
+ bytes
- bytes_remaining
,
1078 num
, &local_qiov
, 0);
1080 qemu_iovec_destroy(&local_qiov
);
1082 num
= bytes_remaining
;
1083 ret
= qemu_iovec_memset(qiov
, bytes
- bytes_remaining
, 0,
1089 bytes_remaining
-= num
;
1093 return ret
< 0 ? ret
: 0;
1097 * Handle a read request in coroutine context
1099 int coroutine_fn
bdrv_co_preadv(BdrvChild
*child
,
1100 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1101 BdrvRequestFlags flags
)
1103 BlockDriverState
*bs
= child
->bs
;
1104 BlockDriver
*drv
= bs
->drv
;
1105 BdrvTrackedRequest req
;
1107 uint64_t align
= bs
->bl
.request_alignment
;
1108 uint8_t *head_buf
= NULL
;
1109 uint8_t *tail_buf
= NULL
;
1110 QEMUIOVector local_qiov
;
1111 bool use_local_qiov
= false;
1118 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
1123 bdrv_inc_in_flight(bs
);
1125 /* Don't do copy-on-read if we read data before write operation */
1126 if (atomic_read(&bs
->copy_on_read
) && !(flags
& BDRV_REQ_NO_SERIALISING
)) {
1127 flags
|= BDRV_REQ_COPY_ON_READ
;
1130 /* Align read if necessary by padding qiov */
1131 if (offset
& (align
- 1)) {
1132 head_buf
= qemu_blockalign(bs
, align
);
1133 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
1134 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
1135 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1136 use_local_qiov
= true;
1138 bytes
+= offset
& (align
- 1);
1139 offset
= offset
& ~(align
- 1);
1142 if ((offset
+ bytes
) & (align
- 1)) {
1143 if (!use_local_qiov
) {
1144 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
1145 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1146 use_local_qiov
= true;
1148 tail_buf
= qemu_blockalign(bs
, align
);
1149 qemu_iovec_add(&local_qiov
, tail_buf
,
1150 align
- ((offset
+ bytes
) & (align
- 1)));
1152 bytes
= ROUND_UP(bytes
, align
);
1155 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_READ
);
1156 ret
= bdrv_aligned_preadv(child
, &req
, offset
, bytes
, align
,
1157 use_local_qiov
? &local_qiov
: qiov
,
1159 tracked_request_end(&req
);
1160 bdrv_dec_in_flight(bs
);
1162 if (use_local_qiov
) {
1163 qemu_iovec_destroy(&local_qiov
);
1164 qemu_vfree(head_buf
);
1165 qemu_vfree(tail_buf
);
1171 static int coroutine_fn
bdrv_co_do_readv(BdrvChild
*child
,
1172 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1173 BdrvRequestFlags flags
)
1175 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
1179 return bdrv_co_preadv(child
, sector_num
<< BDRV_SECTOR_BITS
,
1180 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
1183 int coroutine_fn
bdrv_co_readv(BdrvChild
*child
, int64_t sector_num
,
1184 int nb_sectors
, QEMUIOVector
*qiov
)
1186 trace_bdrv_co_readv(child
->bs
, sector_num
, nb_sectors
);
1188 return bdrv_co_do_readv(child
, sector_num
, nb_sectors
, qiov
, 0);
1191 /* Maximum buffer for write zeroes fallback, in bytes */
1192 #define MAX_WRITE_ZEROES_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
1194 static int coroutine_fn
bdrv_co_do_pwrite_zeroes(BlockDriverState
*bs
,
1195 int64_t offset
, int bytes
, BdrvRequestFlags flags
)
1197 BlockDriver
*drv
= bs
->drv
;
1199 struct iovec iov
= {0};
1201 bool need_flush
= false;
1205 int max_write_zeroes
= MIN_NON_ZERO(bs
->bl
.max_pwrite_zeroes
, INT_MAX
);
1206 int alignment
= MAX(bs
->bl
.pwrite_zeroes_alignment
,
1207 bs
->bl
.request_alignment
);
1208 int max_transfer
= MIN_NON_ZERO(bs
->bl
.max_transfer
,
1209 MAX_WRITE_ZEROES_BOUNCE_BUFFER
);
1211 assert(alignment
% bs
->bl
.request_alignment
== 0);
1212 head
= offset
% alignment
;
1213 tail
= (offset
+ bytes
) % alignment
;
1214 max_write_zeroes
= QEMU_ALIGN_DOWN(max_write_zeroes
, alignment
);
1215 assert(max_write_zeroes
>= bs
->bl
.request_alignment
);
1217 while (bytes
> 0 && !ret
) {
1220 /* Align request. Block drivers can expect the "bulk" of the request
1221 * to be aligned, and that unaligned requests do not cross cluster
1225 /* Make a small request up to the first aligned sector. For
1226 * convenience, limit this request to max_transfer even if
1227 * we don't need to fall back to writes. */
1228 num
= MIN(MIN(bytes
, max_transfer
), alignment
- head
);
1229 head
= (head
+ num
) % alignment
;
1230 assert(num
< max_write_zeroes
);
1231 } else if (tail
&& num
> alignment
) {
1232 /* Shorten the request to the last aligned sector. */
1236 /* limit request size */
1237 if (num
> max_write_zeroes
) {
1238 num
= max_write_zeroes
;
1242 /* First try the efficient write zeroes operation */
1243 if (drv
->bdrv_co_pwrite_zeroes
) {
1244 ret
= drv
->bdrv_co_pwrite_zeroes(bs
, offset
, num
,
1245 flags
& bs
->supported_zero_flags
);
1246 if (ret
!= -ENOTSUP
&& (flags
& BDRV_REQ_FUA
) &&
1247 !(bs
->supported_zero_flags
& BDRV_REQ_FUA
)) {
1251 assert(!bs
->supported_zero_flags
);
1254 if (ret
== -ENOTSUP
) {
1255 /* Fall back to bounce buffer if write zeroes is unsupported */
1256 BdrvRequestFlags write_flags
= flags
& ~BDRV_REQ_ZERO_WRITE
;
1258 if ((flags
& BDRV_REQ_FUA
) &&
1259 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1260 /* No need for bdrv_driver_pwrite() to do a fallback
1261 * flush on each chunk; use just one at the end */
1262 write_flags
&= ~BDRV_REQ_FUA
;
1265 num
= MIN(num
, max_transfer
);
1267 if (iov
.iov_base
== NULL
) {
1268 iov
.iov_base
= qemu_try_blockalign(bs
, num
);
1269 if (iov
.iov_base
== NULL
) {
1273 memset(iov
.iov_base
, 0, num
);
1275 qemu_iovec_init_external(&qiov
, &iov
, 1);
1277 ret
= bdrv_driver_pwritev(bs
, offset
, num
, &qiov
, write_flags
);
1279 /* Keep bounce buffer around if it is big enough for all
1280 * all future requests.
1282 if (num
< max_transfer
) {
1283 qemu_vfree(iov
.iov_base
);
1284 iov
.iov_base
= NULL
;
1293 if (ret
== 0 && need_flush
) {
1294 ret
= bdrv_co_flush(bs
);
1296 qemu_vfree(iov
.iov_base
);
1301 * Forwards an already correctly aligned write request to the BlockDriver,
1302 * after possibly fragmenting it.
1304 static int coroutine_fn
bdrv_aligned_pwritev(BdrvChild
*child
,
1305 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
1306 int64_t align
, QEMUIOVector
*qiov
, int flags
)
1308 BlockDriverState
*bs
= child
->bs
;
1309 BlockDriver
*drv
= bs
->drv
;
1313 int64_t start_sector
= offset
>> BDRV_SECTOR_BITS
;
1314 int64_t end_sector
= DIV_ROUND_UP(offset
+ bytes
, BDRV_SECTOR_SIZE
);
1315 uint64_t bytes_remaining
= bytes
;
1318 if (bdrv_has_readonly_bitmaps(bs
)) {
1322 assert(is_power_of_2(align
));
1323 assert((offset
& (align
- 1)) == 0);
1324 assert((bytes
& (align
- 1)) == 0);
1325 assert(!qiov
|| bytes
== qiov
->size
);
1326 assert((bs
->open_flags
& BDRV_O_NO_IO
) == 0);
1327 assert(!(flags
& ~BDRV_REQ_MASK
));
1328 max_transfer
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_transfer
, INT_MAX
),
1331 waited
= wait_serialising_requests(req
);
1332 assert(!waited
|| !req
->serialising
);
1333 assert(req
->overlap_offset
<= offset
);
1334 assert(offset
+ bytes
<= req
->overlap_offset
+ req
->overlap_bytes
);
1335 assert(child
->perm
& BLK_PERM_WRITE
);
1336 assert(end_sector
<= bs
->total_sectors
|| child
->perm
& BLK_PERM_RESIZE
);
1338 ret
= notifier_with_return_list_notify(&bs
->before_write_notifiers
, req
);
1340 if (!ret
&& bs
->detect_zeroes
!= BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF
&&
1341 !(flags
& BDRV_REQ_ZERO_WRITE
) && drv
->bdrv_co_pwrite_zeroes
&&
1342 qemu_iovec_is_zero(qiov
)) {
1343 flags
|= BDRV_REQ_ZERO_WRITE
;
1344 if (bs
->detect_zeroes
== BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP
) {
1345 flags
|= BDRV_REQ_MAY_UNMAP
;
1350 /* Do nothing, write notifier decided to fail this request */
1351 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
1352 bdrv_debug_event(bs
, BLKDBG_PWRITEV_ZERO
);
1353 ret
= bdrv_co_do_pwrite_zeroes(bs
, offset
, bytes
, flags
);
1354 } else if (flags
& BDRV_REQ_WRITE_COMPRESSED
) {
1355 ret
= bdrv_driver_pwritev_compressed(bs
, offset
, bytes
, qiov
);
1356 } else if (bytes
<= max_transfer
) {
1357 bdrv_debug_event(bs
, BLKDBG_PWRITEV
);
1358 ret
= bdrv_driver_pwritev(bs
, offset
, bytes
, qiov
, flags
);
1360 bdrv_debug_event(bs
, BLKDBG_PWRITEV
);
1361 while (bytes_remaining
) {
1362 int num
= MIN(bytes_remaining
, max_transfer
);
1363 QEMUIOVector local_qiov
;
1364 int local_flags
= flags
;
1367 if (num
< bytes_remaining
&& (flags
& BDRV_REQ_FUA
) &&
1368 !(bs
->supported_write_flags
& BDRV_REQ_FUA
)) {
1369 /* If FUA is going to be emulated by flush, we only
1370 * need to flush on the last iteration */
1371 local_flags
&= ~BDRV_REQ_FUA
;
1373 qemu_iovec_init(&local_qiov
, qiov
->niov
);
1374 qemu_iovec_concat(&local_qiov
, qiov
, bytes
- bytes_remaining
, num
);
1376 ret
= bdrv_driver_pwritev(bs
, offset
+ bytes
- bytes_remaining
,
1377 num
, &local_qiov
, local_flags
);
1378 qemu_iovec_destroy(&local_qiov
);
1382 bytes_remaining
-= num
;
1385 bdrv_debug_event(bs
, BLKDBG_PWRITEV_DONE
);
1387 atomic_inc(&bs
->write_gen
);
1388 bdrv_set_dirty(bs
, start_sector
, end_sector
- start_sector
);
1390 stat64_max(&bs
->wr_highest_offset
, offset
+ bytes
);
1393 bs
->total_sectors
= MAX(bs
->total_sectors
, end_sector
);
1400 static int coroutine_fn
bdrv_co_do_zero_pwritev(BdrvChild
*child
,
1403 BdrvRequestFlags flags
,
1404 BdrvTrackedRequest
*req
)
1406 BlockDriverState
*bs
= child
->bs
;
1407 uint8_t *buf
= NULL
;
1408 QEMUIOVector local_qiov
;
1410 uint64_t align
= bs
->bl
.request_alignment
;
1411 unsigned int head_padding_bytes
, tail_padding_bytes
;
1414 head_padding_bytes
= offset
& (align
- 1);
1415 tail_padding_bytes
= (align
- (offset
+ bytes
)) & (align
- 1);
1418 assert(flags
& BDRV_REQ_ZERO_WRITE
);
1419 if (head_padding_bytes
|| tail_padding_bytes
) {
1420 buf
= qemu_blockalign(bs
, align
);
1421 iov
= (struct iovec
) {
1425 qemu_iovec_init_external(&local_qiov
, &iov
, 1);
1427 if (head_padding_bytes
) {
1428 uint64_t zero_bytes
= MIN(bytes
, align
- head_padding_bytes
);
1430 /* RMW the unaligned part before head. */
1431 mark_request_serialising(req
, align
);
1432 wait_serialising_requests(req
);
1433 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1434 ret
= bdrv_aligned_preadv(child
, req
, offset
& ~(align
- 1), align
,
1435 align
, &local_qiov
, 0);
1439 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1441 memset(buf
+ head_padding_bytes
, 0, zero_bytes
);
1442 ret
= bdrv_aligned_pwritev(child
, req
, offset
& ~(align
- 1), align
,
1444 flags
& ~BDRV_REQ_ZERO_WRITE
);
1448 offset
+= zero_bytes
;
1449 bytes
-= zero_bytes
;
1452 assert(!bytes
|| (offset
& (align
- 1)) == 0);
1453 if (bytes
>= align
) {
1454 /* Write the aligned part in the middle. */
1455 uint64_t aligned_bytes
= bytes
& ~(align
- 1);
1456 ret
= bdrv_aligned_pwritev(child
, req
, offset
, aligned_bytes
, align
,
1461 bytes
-= aligned_bytes
;
1462 offset
+= aligned_bytes
;
1465 assert(!bytes
|| (offset
& (align
- 1)) == 0);
1467 assert(align
== tail_padding_bytes
+ bytes
);
1468 /* RMW the unaligned part after tail. */
1469 mark_request_serialising(req
, align
);
1470 wait_serialising_requests(req
);
1471 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1472 ret
= bdrv_aligned_preadv(child
, req
, offset
, align
,
1473 align
, &local_qiov
, 0);
1477 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1479 memset(buf
, 0, bytes
);
1480 ret
= bdrv_aligned_pwritev(child
, req
, offset
, align
, align
,
1481 &local_qiov
, flags
& ~BDRV_REQ_ZERO_WRITE
);
1490 * Handle a write request in coroutine context
1492 int coroutine_fn
bdrv_co_pwritev(BdrvChild
*child
,
1493 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
1494 BdrvRequestFlags flags
)
1496 BlockDriverState
*bs
= child
->bs
;
1497 BdrvTrackedRequest req
;
1498 uint64_t align
= bs
->bl
.request_alignment
;
1499 uint8_t *head_buf
= NULL
;
1500 uint8_t *tail_buf
= NULL
;
1501 QEMUIOVector local_qiov
;
1502 bool use_local_qiov
= false;
1508 if (bs
->read_only
) {
1511 assert(!(bs
->open_flags
& BDRV_O_INACTIVE
));
1513 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
1518 bdrv_inc_in_flight(bs
);
1520 * Align write if necessary by performing a read-modify-write cycle.
1521 * Pad qiov with the read parts and be sure to have a tracked request not
1522 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1524 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_WRITE
);
1527 ret
= bdrv_co_do_zero_pwritev(child
, offset
, bytes
, flags
, &req
);
1531 if (offset
& (align
- 1)) {
1532 QEMUIOVector head_qiov
;
1533 struct iovec head_iov
;
1535 mark_request_serialising(&req
, align
);
1536 wait_serialising_requests(&req
);
1538 head_buf
= qemu_blockalign(bs
, align
);
1539 head_iov
= (struct iovec
) {
1540 .iov_base
= head_buf
,
1543 qemu_iovec_init_external(&head_qiov
, &head_iov
, 1);
1545 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
1546 ret
= bdrv_aligned_preadv(child
, &req
, offset
& ~(align
- 1), align
,
1547 align
, &head_qiov
, 0);
1551 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
1553 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
1554 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
1555 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1556 use_local_qiov
= true;
1558 bytes
+= offset
& (align
- 1);
1559 offset
= offset
& ~(align
- 1);
1561 /* We have read the tail already if the request is smaller
1562 * than one aligned block.
1564 if (bytes
< align
) {
1565 qemu_iovec_add(&local_qiov
, head_buf
+ bytes
, align
- bytes
);
1570 if ((offset
+ bytes
) & (align
- 1)) {
1571 QEMUIOVector tail_qiov
;
1572 struct iovec tail_iov
;
1576 mark_request_serialising(&req
, align
);
1577 waited
= wait_serialising_requests(&req
);
1578 assert(!waited
|| !use_local_qiov
);
1580 tail_buf
= qemu_blockalign(bs
, align
);
1581 tail_iov
= (struct iovec
) {
1582 .iov_base
= tail_buf
,
1585 qemu_iovec_init_external(&tail_qiov
, &tail_iov
, 1);
1587 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
1588 ret
= bdrv_aligned_preadv(child
, &req
, (offset
+ bytes
) & ~(align
- 1),
1589 align
, align
, &tail_qiov
, 0);
1593 bdrv_debug_event(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
1595 if (!use_local_qiov
) {
1596 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
1597 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
1598 use_local_qiov
= true;
1601 tail_bytes
= (offset
+ bytes
) & (align
- 1);
1602 qemu_iovec_add(&local_qiov
, tail_buf
+ tail_bytes
, align
- tail_bytes
);
1604 bytes
= ROUND_UP(bytes
, align
);
1607 ret
= bdrv_aligned_pwritev(child
, &req
, offset
, bytes
, align
,
1608 use_local_qiov
? &local_qiov
: qiov
,
1613 if (use_local_qiov
) {
1614 qemu_iovec_destroy(&local_qiov
);
1616 qemu_vfree(head_buf
);
1617 qemu_vfree(tail_buf
);
1619 tracked_request_end(&req
);
1620 bdrv_dec_in_flight(bs
);
1624 static int coroutine_fn
bdrv_co_do_writev(BdrvChild
*child
,
1625 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1626 BdrvRequestFlags flags
)
1628 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
1632 return bdrv_co_pwritev(child
, sector_num
<< BDRV_SECTOR_BITS
,
1633 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
1636 int coroutine_fn
bdrv_co_writev(BdrvChild
*child
, int64_t sector_num
,
1637 int nb_sectors
, QEMUIOVector
*qiov
)
1639 trace_bdrv_co_writev(child
->bs
, sector_num
, nb_sectors
);
1641 return bdrv_co_do_writev(child
, sector_num
, nb_sectors
, qiov
, 0);
1644 int coroutine_fn
bdrv_co_pwrite_zeroes(BdrvChild
*child
, int64_t offset
,
1645 int bytes
, BdrvRequestFlags flags
)
1647 trace_bdrv_co_pwrite_zeroes(child
->bs
, offset
, bytes
, flags
);
1649 if (!(child
->bs
->open_flags
& BDRV_O_UNMAP
)) {
1650 flags
&= ~BDRV_REQ_MAY_UNMAP
;
1653 return bdrv_co_pwritev(child
, offset
, bytes
, NULL
,
1654 BDRV_REQ_ZERO_WRITE
| flags
);
1658 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
1660 int bdrv_flush_all(void)
1662 BdrvNextIterator it
;
1663 BlockDriverState
*bs
= NULL
;
1666 for (bs
= bdrv_first(&it
); bs
; bs
= bdrv_next(&it
)) {
1667 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
1670 aio_context_acquire(aio_context
);
1671 ret
= bdrv_flush(bs
);
1672 if (ret
< 0 && !result
) {
1675 aio_context_release(aio_context
);
1682 typedef struct BdrvCoGetBlockStatusData
{
1683 BlockDriverState
*bs
;
1684 BlockDriverState
*base
;
1685 BlockDriverState
**file
;
1691 } BdrvCoGetBlockStatusData
;
1694 * Returns the allocation status of the specified sectors.
1695 * Drivers not implementing the functionality are assumed to not support
1696 * backing files, hence all their sectors are reported as allocated.
1698 * If 'sector_num' is beyond the end of the disk image the return value is
1699 * BDRV_BLOCK_EOF and 'pnum' is set to 0.
1701 * 'pnum' is set to the number of sectors (including and immediately following
1702 * the specified sector) that are known to be in the same
1703 * allocated/unallocated state.
1705 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
1706 * beyond the end of the disk image it will be clamped; if 'pnum' is set to
1707 * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
1709 * If returned value is positive and BDRV_BLOCK_OFFSET_VALID bit is set, 'file'
1710 * points to the BDS which the sector range is allocated in.
1712 static int64_t coroutine_fn
bdrv_co_get_block_status(BlockDriverState
*bs
,
1714 int nb_sectors
, int *pnum
,
1715 BlockDriverState
**file
)
1717 int64_t total_sectors
;
1722 total_sectors
= bdrv_nb_sectors(bs
);
1723 if (total_sectors
< 0) {
1724 return total_sectors
;
1727 if (sector_num
>= total_sectors
) {
1729 return BDRV_BLOCK_EOF
;
1732 n
= total_sectors
- sector_num
;
1733 if (n
< nb_sectors
) {
1737 if (!bs
->drv
->bdrv_co_get_block_status
) {
1739 ret
= BDRV_BLOCK_DATA
| BDRV_BLOCK_ALLOCATED
;
1740 if (sector_num
+ nb_sectors
== total_sectors
) {
1741 ret
|= BDRV_BLOCK_EOF
;
1743 if (bs
->drv
->protocol_name
) {
1744 ret
|= BDRV_BLOCK_OFFSET_VALID
| (sector_num
* BDRV_SECTOR_SIZE
);
1750 bdrv_inc_in_flight(bs
);
1751 ret
= bs
->drv
->bdrv_co_get_block_status(bs
, sector_num
, nb_sectors
, pnum
,
1758 if (ret
& BDRV_BLOCK_RAW
) {
1759 assert(ret
& BDRV_BLOCK_OFFSET_VALID
&& *file
);
1760 ret
= bdrv_co_get_block_status(*file
, ret
>> BDRV_SECTOR_BITS
,
1765 if (ret
& (BDRV_BLOCK_DATA
| BDRV_BLOCK_ZERO
)) {
1766 ret
|= BDRV_BLOCK_ALLOCATED
;
1768 if (bdrv_unallocated_blocks_are_zero(bs
)) {
1769 ret
|= BDRV_BLOCK_ZERO
;
1770 } else if (bs
->backing
) {
1771 BlockDriverState
*bs2
= bs
->backing
->bs
;
1772 int64_t nb_sectors2
= bdrv_nb_sectors(bs2
);
1773 if (nb_sectors2
>= 0 && sector_num
>= nb_sectors2
) {
1774 ret
|= BDRV_BLOCK_ZERO
;
1779 if (*file
&& *file
!= bs
&&
1780 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
1781 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
1782 BlockDriverState
*file2
;
1785 ret2
= bdrv_co_get_block_status(*file
, ret
>> BDRV_SECTOR_BITS
,
1786 *pnum
, &file_pnum
, &file2
);
1788 /* Ignore errors. This is just providing extra information, it
1789 * is useful but not necessary.
1791 if (ret2
& BDRV_BLOCK_EOF
&&
1792 (!file_pnum
|| ret2
& BDRV_BLOCK_ZERO
)) {
1794 * It is valid for the format block driver to read
1795 * beyond the end of the underlying file's current
1796 * size; such areas read as zero.
1798 ret
|= BDRV_BLOCK_ZERO
;
1800 /* Limit request to the range reported by the protocol driver */
1802 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
1808 bdrv_dec_in_flight(bs
);
1809 if (ret
>= 0 && sector_num
+ *pnum
== total_sectors
) {
1810 ret
|= BDRV_BLOCK_EOF
;
1815 static int64_t coroutine_fn
bdrv_co_get_block_status_above(BlockDriverState
*bs
,
1816 BlockDriverState
*base
,
1820 BlockDriverState
**file
)
1822 BlockDriverState
*p
;
1827 for (p
= bs
; p
!= base
; p
= backing_bs(p
)) {
1828 ret
= bdrv_co_get_block_status(p
, sector_num
, nb_sectors
, pnum
, file
);
1832 if (ret
& BDRV_BLOCK_ZERO
&& ret
& BDRV_BLOCK_EOF
&& !first
) {
1834 * Reading beyond the end of the file continues to read
1835 * zeroes, but we can only widen the result to the
1836 * unallocated length we learned from an earlier
1841 if (ret
& (BDRV_BLOCK_ZERO
| BDRV_BLOCK_DATA
)) {
1844 /* [sector_num, pnum] unallocated on this layer, which could be only
1845 * the first part of [sector_num, nb_sectors]. */
1846 nb_sectors
= MIN(nb_sectors
, *pnum
);
1852 /* Coroutine wrapper for bdrv_get_block_status_above() */
1853 static void coroutine_fn
bdrv_get_block_status_above_co_entry(void *opaque
)
1855 BdrvCoGetBlockStatusData
*data
= opaque
;
1857 data
->ret
= bdrv_co_get_block_status_above(data
->bs
, data
->base
,
1866 * Synchronous wrapper around bdrv_co_get_block_status_above().
1868 * See bdrv_co_get_block_status_above() for details.
1870 int64_t bdrv_get_block_status_above(BlockDriverState
*bs
,
1871 BlockDriverState
*base
,
1873 int nb_sectors
, int *pnum
,
1874 BlockDriverState
**file
)
1877 BdrvCoGetBlockStatusData data
= {
1881 .sector_num
= sector_num
,
1882 .nb_sectors
= nb_sectors
,
1887 if (qemu_in_coroutine()) {
1888 /* Fast-path if already in coroutine context */
1889 bdrv_get_block_status_above_co_entry(&data
);
1891 co
= qemu_coroutine_create(bdrv_get_block_status_above_co_entry
,
1893 bdrv_coroutine_enter(bs
, co
);
1894 BDRV_POLL_WHILE(bs
, !data
.done
);
1899 int64_t bdrv_get_block_status(BlockDriverState
*bs
,
1901 int nb_sectors
, int *pnum
,
1902 BlockDriverState
**file
)
1904 return bdrv_get_block_status_above(bs
, backing_bs(bs
),
1905 sector_num
, nb_sectors
, pnum
, file
);
1908 int coroutine_fn
bdrv_is_allocated(BlockDriverState
*bs
, int64_t offset
,
1909 int64_t bytes
, int64_t *pnum
)
1911 BlockDriverState
*file
;
1912 int64_t sector_num
= offset
>> BDRV_SECTOR_BITS
;
1913 int nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
1917 assert(QEMU_IS_ALIGNED(offset
, BDRV_SECTOR_SIZE
));
1918 assert(QEMU_IS_ALIGNED(bytes
, BDRV_SECTOR_SIZE
) && bytes
< INT_MAX
);
1919 ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, &psectors
,
1925 *pnum
= psectors
* BDRV_SECTOR_SIZE
;
1927 return !!(ret
& BDRV_BLOCK_ALLOCATED
);
1931 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
1933 * Return true if (a prefix of) the given range is allocated in any image
1934 * between BASE and TOP (inclusive). BASE can be NULL to check if the given
1935 * offset is allocated in any image of the chain. Return false otherwise,
1936 * or negative errno on failure.
1938 * 'pnum' is set to the number of bytes (including and immediately
1939 * following the specified offset) that are known to be in the same
1940 * allocated/unallocated state. Note that a subsequent call starting
1941 * at 'offset + *pnum' may return the same allocation status (in other
1942 * words, the result is not necessarily the maximum possible range);
1943 * but 'pnum' will only be 0 when end of file is reached.
1946 int bdrv_is_allocated_above(BlockDriverState
*top
,
1947 BlockDriverState
*base
,
1948 int64_t offset
, int64_t bytes
, int64_t *pnum
)
1950 BlockDriverState
*intermediate
;
1955 while (intermediate
&& intermediate
!= base
) {
1959 ret
= bdrv_is_allocated(intermediate
, offset
, bytes
, &pnum_inter
);
1968 size_inter
= bdrv_getlength(intermediate
);
1969 if (size_inter
< 0) {
1972 if (n
> pnum_inter
&&
1973 (intermediate
== top
|| offset
+ pnum_inter
< size_inter
)) {
1977 intermediate
= backing_bs(intermediate
);
1984 typedef struct BdrvVmstateCo
{
1985 BlockDriverState
*bs
;
1992 static int coroutine_fn
1993 bdrv_co_rw_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
,
1996 BlockDriver
*drv
= bs
->drv
;
1999 bdrv_inc_in_flight(bs
);
2003 } else if (drv
->bdrv_load_vmstate
) {
2005 ret
= drv
->bdrv_load_vmstate(bs
, qiov
, pos
);
2007 ret
= drv
->bdrv_save_vmstate(bs
, qiov
, pos
);
2009 } else if (bs
->file
) {
2010 ret
= bdrv_co_rw_vmstate(bs
->file
->bs
, qiov
, pos
, is_read
);
2013 bdrv_dec_in_flight(bs
);
2017 static void coroutine_fn
bdrv_co_rw_vmstate_entry(void *opaque
)
2019 BdrvVmstateCo
*co
= opaque
;
2020 co
->ret
= bdrv_co_rw_vmstate(co
->bs
, co
->qiov
, co
->pos
, co
->is_read
);
2024 bdrv_rw_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
,
2027 if (qemu_in_coroutine()) {
2028 return bdrv_co_rw_vmstate(bs
, qiov
, pos
, is_read
);
2030 BdrvVmstateCo data
= {
2035 .ret
= -EINPROGRESS
,
2037 Coroutine
*co
= qemu_coroutine_create(bdrv_co_rw_vmstate_entry
, &data
);
2039 bdrv_coroutine_enter(bs
, co
);
2040 BDRV_POLL_WHILE(bs
, data
.ret
== -EINPROGRESS
);
2045 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
2046 int64_t pos
, int size
)
2049 struct iovec iov
= {
2050 .iov_base
= (void *) buf
,
2055 qemu_iovec_init_external(&qiov
, &iov
, 1);
2057 ret
= bdrv_writev_vmstate(bs
, &qiov
, pos
);
2065 int bdrv_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
2067 return bdrv_rw_vmstate(bs
, qiov
, pos
, false);
2070 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
2071 int64_t pos
, int size
)
2074 struct iovec iov
= {
2080 qemu_iovec_init_external(&qiov
, &iov
, 1);
2081 ret
= bdrv_readv_vmstate(bs
, &qiov
, pos
);
2089 int bdrv_readv_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
2091 return bdrv_rw_vmstate(bs
, qiov
, pos
, true);
2094 /**************************************************************/
2097 void bdrv_aio_cancel(BlockAIOCB
*acb
)
2100 bdrv_aio_cancel_async(acb
);
2101 while (acb
->refcnt
> 1) {
2102 if (acb
->aiocb_info
->get_aio_context
) {
2103 aio_poll(acb
->aiocb_info
->get_aio_context(acb
), true);
2104 } else if (acb
->bs
) {
2105 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
2106 * assert that we're not using an I/O thread. Thread-safe
2107 * code should use bdrv_aio_cancel_async exclusively.
2109 assert(bdrv_get_aio_context(acb
->bs
) == qemu_get_aio_context());
2110 aio_poll(bdrv_get_aio_context(acb
->bs
), true);
2115 qemu_aio_unref(acb
);
2118 /* Async version of aio cancel. The caller is not blocked if the acb implements
2119 * cancel_async, otherwise we do nothing and let the request normally complete.
2120 * In either case the completion callback must be called. */
2121 void bdrv_aio_cancel_async(BlockAIOCB
*acb
)
2123 if (acb
->aiocb_info
->cancel_async
) {
2124 acb
->aiocb_info
->cancel_async(acb
);
2128 /**************************************************************/
2129 /* Coroutine block device emulation */
2131 typedef struct FlushCo
{
2132 BlockDriverState
*bs
;
2137 static void coroutine_fn
bdrv_flush_co_entry(void *opaque
)
2139 FlushCo
*rwco
= opaque
;
2141 rwco
->ret
= bdrv_co_flush(rwco
->bs
);
2144 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
2149 bdrv_inc_in_flight(bs
);
2151 if (!bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
) ||
2156 qemu_co_mutex_lock(&bs
->reqs_lock
);
2157 current_gen
= atomic_read(&bs
->write_gen
);
2159 /* Wait until any previous flushes are completed */
2160 while (bs
->active_flush_req
) {
2161 qemu_co_queue_wait(&bs
->flush_queue
, &bs
->reqs_lock
);
2164 /* Flushes reach this point in nondecreasing current_gen order. */
2165 bs
->active_flush_req
= true;
2166 qemu_co_mutex_unlock(&bs
->reqs_lock
);
2168 /* Write back all layers by calling one driver function */
2169 if (bs
->drv
->bdrv_co_flush
) {
2170 ret
= bs
->drv
->bdrv_co_flush(bs
);
2174 /* Write back cached data to the OS even with cache=unsafe */
2175 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_OS
);
2176 if (bs
->drv
->bdrv_co_flush_to_os
) {
2177 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
2183 /* But don't actually force it to the disk with cache=unsafe */
2184 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
2188 /* Check if we really need to flush anything */
2189 if (bs
->flushed_gen
== current_gen
) {
2193 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_DISK
);
2194 if (bs
->drv
->bdrv_co_flush_to_disk
) {
2195 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
2196 } else if (bs
->drv
->bdrv_aio_flush
) {
2198 CoroutineIOCompletion co
= {
2199 .coroutine
= qemu_coroutine_self(),
2202 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
2206 qemu_coroutine_yield();
2211 * Some block drivers always operate in either writethrough or unsafe
2212 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2213 * know how the server works (because the behaviour is hardcoded or
2214 * depends on server-side configuration), so we can't ensure that
2215 * everything is safe on disk. Returning an error doesn't work because
2216 * that would break guests even if the server operates in writethrough
2219 * Let's hope the user knows what he's doing.
2228 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2229 * in the case of cache=unsafe, so there are no useless flushes.
2232 ret
= bs
->file
? bdrv_co_flush(bs
->file
->bs
) : 0;
2234 /* Notify any pending flushes that we have completed */
2236 bs
->flushed_gen
= current_gen
;
2239 qemu_co_mutex_lock(&bs
->reqs_lock
);
2240 bs
->active_flush_req
= false;
2241 /* Return value is ignored - it's ok if wait queue is empty */
2242 qemu_co_queue_next(&bs
->flush_queue
);
2243 qemu_co_mutex_unlock(&bs
->reqs_lock
);
2246 bdrv_dec_in_flight(bs
);
2250 int bdrv_flush(BlockDriverState
*bs
)
2253 FlushCo flush_co
= {
2258 if (qemu_in_coroutine()) {
2259 /* Fast-path if already in coroutine context */
2260 bdrv_flush_co_entry(&flush_co
);
2262 co
= qemu_coroutine_create(bdrv_flush_co_entry
, &flush_co
);
2263 bdrv_coroutine_enter(bs
, co
);
2264 BDRV_POLL_WHILE(bs
, flush_co
.ret
== NOT_DONE
);
2267 return flush_co
.ret
;
2270 typedef struct DiscardCo
{
2271 BlockDriverState
*bs
;
2276 static void coroutine_fn
bdrv_pdiscard_co_entry(void *opaque
)
2278 DiscardCo
*rwco
= opaque
;
2280 rwco
->ret
= bdrv_co_pdiscard(rwco
->bs
, rwco
->offset
, rwco
->bytes
);
2283 int coroutine_fn
bdrv_co_pdiscard(BlockDriverState
*bs
, int64_t offset
,
2286 BdrvTrackedRequest req
;
2287 int max_pdiscard
, ret
;
2288 int head
, tail
, align
;
2294 if (bdrv_has_readonly_bitmaps(bs
)) {
2298 ret
= bdrv_check_byte_request(bs
, offset
, bytes
);
2301 } else if (bs
->read_only
) {
2304 assert(!(bs
->open_flags
& BDRV_O_INACTIVE
));
2306 /* Do nothing if disabled. */
2307 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
2311 if (!bs
->drv
->bdrv_co_pdiscard
&& !bs
->drv
->bdrv_aio_pdiscard
) {
2315 /* Discard is advisory, but some devices track and coalesce
2316 * unaligned requests, so we must pass everything down rather than
2317 * round here. Still, most devices will just silently ignore
2318 * unaligned requests (by returning -ENOTSUP), so we must fragment
2319 * the request accordingly. */
2320 align
= MAX(bs
->bl
.pdiscard_alignment
, bs
->bl
.request_alignment
);
2321 assert(align
% bs
->bl
.request_alignment
== 0);
2322 head
= offset
% align
;
2323 tail
= (offset
+ bytes
) % align
;
2325 bdrv_inc_in_flight(bs
);
2326 tracked_request_begin(&req
, bs
, offset
, bytes
, BDRV_TRACKED_DISCARD
);
2328 ret
= notifier_with_return_list_notify(&bs
->before_write_notifiers
, &req
);
2333 max_pdiscard
= QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs
->bl
.max_pdiscard
, INT_MAX
),
2335 assert(max_pdiscard
>= bs
->bl
.request_alignment
);
2342 /* Make small requests to get to alignment boundaries. */
2343 num
= MIN(bytes
, align
- head
);
2344 if (!QEMU_IS_ALIGNED(num
, bs
->bl
.request_alignment
)) {
2345 num
%= bs
->bl
.request_alignment
;
2347 head
= (head
+ num
) % align
;
2348 assert(num
< max_pdiscard
);
2351 /* Shorten the request to the last aligned cluster. */
2353 } else if (!QEMU_IS_ALIGNED(tail
, bs
->bl
.request_alignment
) &&
2354 tail
> bs
->bl
.request_alignment
) {
2355 tail
%= bs
->bl
.request_alignment
;
2359 /* limit request size */
2360 if (num
> max_pdiscard
) {
2364 if (bs
->drv
->bdrv_co_pdiscard
) {
2365 ret
= bs
->drv
->bdrv_co_pdiscard(bs
, offset
, num
);
2368 CoroutineIOCompletion co
= {
2369 .coroutine
= qemu_coroutine_self(),
2372 acb
= bs
->drv
->bdrv_aio_pdiscard(bs
, offset
, num
,
2373 bdrv_co_io_em_complete
, &co
);
2378 qemu_coroutine_yield();
2382 if (ret
&& ret
!= -ENOTSUP
) {
2391 atomic_inc(&bs
->write_gen
);
2392 bdrv_set_dirty(bs
, req
.offset
>> BDRV_SECTOR_BITS
,
2393 req
.bytes
>> BDRV_SECTOR_BITS
);
2394 tracked_request_end(&req
);
2395 bdrv_dec_in_flight(bs
);
2399 int bdrv_pdiscard(BlockDriverState
*bs
, int64_t offset
, int bytes
)
2409 if (qemu_in_coroutine()) {
2410 /* Fast-path if already in coroutine context */
2411 bdrv_pdiscard_co_entry(&rwco
);
2413 co
= qemu_coroutine_create(bdrv_pdiscard_co_entry
, &rwco
);
2414 bdrv_coroutine_enter(bs
, co
);
2415 BDRV_POLL_WHILE(bs
, rwco
.ret
== NOT_DONE
);
2421 int bdrv_co_ioctl(BlockDriverState
*bs
, int req
, void *buf
)
2423 BlockDriver
*drv
= bs
->drv
;
2424 CoroutineIOCompletion co
= {
2425 .coroutine
= qemu_coroutine_self(),
2429 bdrv_inc_in_flight(bs
);
2430 if (!drv
|| (!drv
->bdrv_aio_ioctl
&& !drv
->bdrv_co_ioctl
)) {
2435 if (drv
->bdrv_co_ioctl
) {
2436 co
.ret
= drv
->bdrv_co_ioctl(bs
, req
, buf
);
2438 acb
= drv
->bdrv_aio_ioctl(bs
, req
, buf
, bdrv_co_io_em_complete
, &co
);
2443 qemu_coroutine_yield();
2446 bdrv_dec_in_flight(bs
);
2450 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
2452 return qemu_memalign(bdrv_opt_mem_align(bs
), size
);
2455 void *qemu_blockalign0(BlockDriverState
*bs
, size_t size
)
2457 return memset(qemu_blockalign(bs
, size
), 0, size
);
2460 void *qemu_try_blockalign(BlockDriverState
*bs
, size_t size
)
2462 size_t align
= bdrv_opt_mem_align(bs
);
2464 /* Ensure that NULL is never returned on success */
2470 return qemu_try_memalign(align
, size
);
2473 void *qemu_try_blockalign0(BlockDriverState
*bs
, size_t size
)
2475 void *mem
= qemu_try_blockalign(bs
, size
);
2478 memset(mem
, 0, size
);
2485 * Check if all memory in this vector is sector aligned.
2487 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
)
2490 size_t alignment
= bdrv_min_mem_align(bs
);
2492 for (i
= 0; i
< qiov
->niov
; i
++) {
2493 if ((uintptr_t) qiov
->iov
[i
].iov_base
% alignment
) {
2496 if (qiov
->iov
[i
].iov_len
% alignment
) {
2504 void bdrv_add_before_write_notifier(BlockDriverState
*bs
,
2505 NotifierWithReturn
*notifier
)
2507 notifier_with_return_list_add(&bs
->before_write_notifiers
, notifier
);
2510 void bdrv_io_plug(BlockDriverState
*bs
)
2514 QLIST_FOREACH(child
, &bs
->children
, next
) {
2515 bdrv_io_plug(child
->bs
);
2518 if (atomic_fetch_inc(&bs
->io_plugged
) == 0) {
2519 BlockDriver
*drv
= bs
->drv
;
2520 if (drv
&& drv
->bdrv_io_plug
) {
2521 drv
->bdrv_io_plug(bs
);
2526 void bdrv_io_unplug(BlockDriverState
*bs
)
2530 assert(bs
->io_plugged
);
2531 if (atomic_fetch_dec(&bs
->io_plugged
) == 1) {
2532 BlockDriver
*drv
= bs
->drv
;
2533 if (drv
&& drv
->bdrv_io_unplug
) {
2534 drv
->bdrv_io_unplug(bs
);
2538 QLIST_FOREACH(child
, &bs
->children
, next
) {
2539 bdrv_io_unplug(child
->bs
);