block: Fix bdrv_co_truncate overlap check
[qemu/ar7.git] / block / io.c
blob77d38ca1d3fb9030567d9ede8bbf4e3acdf90375
1 /*
2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include "trace.h"
27 #include "sysemu/block-backend.h"
28 #include "block/aio-wait.h"
29 #include "block/blockjob.h"
30 #include "block/blockjob_int.h"
31 #include "block/block_int.h"
32 #include "qemu/cutils.h"
33 #include "qapi/error.h"
34 #include "qemu/error-report.h"
36 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
38 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
39 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
41 static AioWait drain_all_aio_wait;
43 static void bdrv_parent_cb_resize(BlockDriverState *bs);
44 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
45 int64_t offset, int bytes, BdrvRequestFlags flags);
47 void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore,
48 bool ignore_bds_parents)
50 BdrvChild *c, *next;
52 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
53 if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) {
54 continue;
56 bdrv_parent_drained_begin_single(c, false);
60 void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore,
61 bool ignore_bds_parents)
63 BdrvChild *c, *next;
65 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
66 if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) {
67 continue;
69 if (c->role->drained_end) {
70 c->role->drained_end(c);
75 static bool bdrv_parent_drained_poll_single(BdrvChild *c)
77 if (c->role->drained_poll) {
78 return c->role->drained_poll(c);
80 return false;
83 static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
84 bool ignore_bds_parents)
86 BdrvChild *c, *next;
87 bool busy = false;
89 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
90 if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) {
91 continue;
93 busy |= bdrv_parent_drained_poll_single(c);
96 return busy;
99 void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll)
101 if (c->role->drained_begin) {
102 c->role->drained_begin(c);
104 if (poll) {
105 BDRV_POLL_WHILE(c->bs, bdrv_parent_drained_poll_single(c));
109 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
111 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
112 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
113 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
114 src->opt_mem_alignment);
115 dst->min_mem_alignment = MAX(dst->min_mem_alignment,
116 src->min_mem_alignment);
117 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
120 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
122 BlockDriver *drv = bs->drv;
123 Error *local_err = NULL;
125 memset(&bs->bl, 0, sizeof(bs->bl));
127 if (!drv) {
128 return;
131 /* Default alignment based on whether driver has byte interface */
132 bs->bl.request_alignment = (drv->bdrv_co_preadv ||
133 drv->bdrv_aio_preadv) ? 1 : 512;
135 /* Take some limits from the children as a default */
136 if (bs->file) {
137 bdrv_refresh_limits(bs->file->bs, &local_err);
138 if (local_err) {
139 error_propagate(errp, local_err);
140 return;
142 bdrv_merge_limits(&bs->bl, &bs->file->bs->bl);
143 } else {
144 bs->bl.min_mem_alignment = 512;
145 bs->bl.opt_mem_alignment = getpagesize();
147 /* Safe default since most protocols use readv()/writev()/etc */
148 bs->bl.max_iov = IOV_MAX;
151 if (bs->backing) {
152 bdrv_refresh_limits(bs->backing->bs, &local_err);
153 if (local_err) {
154 error_propagate(errp, local_err);
155 return;
157 bdrv_merge_limits(&bs->bl, &bs->backing->bs->bl);
160 /* Then let the driver override it */
161 if (drv->bdrv_refresh_limits) {
162 drv->bdrv_refresh_limits(bs, errp);
167 * The copy-on-read flag is actually a reference count so multiple users may
168 * use the feature without worrying about clobbering its previous state.
169 * Copy-on-read stays enabled until all users have called to disable it.
171 void bdrv_enable_copy_on_read(BlockDriverState *bs)
173 atomic_inc(&bs->copy_on_read);
176 void bdrv_disable_copy_on_read(BlockDriverState *bs)
178 int old = atomic_fetch_dec(&bs->copy_on_read);
179 assert(old >= 1);
182 typedef struct {
183 Coroutine *co;
184 BlockDriverState *bs;
185 bool done;
186 bool begin;
187 bool recursive;
188 bool poll;
189 BdrvChild *parent;
190 bool ignore_bds_parents;
191 } BdrvCoDrainData;
193 static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
195 BdrvCoDrainData *data = opaque;
196 BlockDriverState *bs = data->bs;
198 if (data->begin) {
199 bs->drv->bdrv_co_drain_begin(bs);
200 } else {
201 bs->drv->bdrv_co_drain_end(bs);
204 /* Set data->done before reading bs->wakeup. */
205 atomic_mb_set(&data->done, true);
206 bdrv_dec_in_flight(bs);
208 if (data->begin) {
209 g_free(data);
213 /* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */
214 static void bdrv_drain_invoke(BlockDriverState *bs, bool begin)
216 BdrvCoDrainData *data;
218 if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) ||
219 (!begin && !bs->drv->bdrv_co_drain_end)) {
220 return;
223 data = g_new(BdrvCoDrainData, 1);
224 *data = (BdrvCoDrainData) {
225 .bs = bs,
226 .done = false,
227 .begin = begin
230 /* Make sure the driver callback completes during the polling phase for
231 * drain_begin. */
232 bdrv_inc_in_flight(bs);
233 data->co = qemu_coroutine_create(bdrv_drain_invoke_entry, data);
234 aio_co_schedule(bdrv_get_aio_context(bs), data->co);
236 if (!begin) {
237 BDRV_POLL_WHILE(bs, !data->done);
238 g_free(data);
242 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
243 bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
244 BdrvChild *ignore_parent, bool ignore_bds_parents)
246 BdrvChild *child, *next;
248 if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
249 return true;
252 if (atomic_read(&bs->in_flight)) {
253 return true;
256 if (recursive) {
257 assert(!ignore_bds_parents);
258 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
259 if (bdrv_drain_poll(child->bs, recursive, child, false)) {
260 return true;
265 return false;
268 static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive,
269 BdrvChild *ignore_parent)
271 /* Execute pending BHs first and check everything else only after the BHs
272 * have executed. */
273 while (aio_poll(bs->aio_context, false));
275 return bdrv_drain_poll(bs, recursive, ignore_parent, false);
278 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
279 BdrvChild *parent, bool ignore_bds_parents,
280 bool poll);
281 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
282 BdrvChild *parent, bool ignore_bds_parents);
284 static void bdrv_co_drain_bh_cb(void *opaque)
286 BdrvCoDrainData *data = opaque;
287 Coroutine *co = data->co;
288 BlockDriverState *bs = data->bs;
290 if (bs) {
291 bdrv_dec_in_flight(bs);
292 if (data->begin) {
293 bdrv_do_drained_begin(bs, data->recursive, data->parent,
294 data->ignore_bds_parents, data->poll);
295 } else {
296 bdrv_do_drained_end(bs, data->recursive, data->parent,
297 data->ignore_bds_parents);
299 } else {
300 assert(data->begin);
301 bdrv_drain_all_begin();
304 data->done = true;
305 aio_co_wake(co);
308 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
309 bool begin, bool recursive,
310 BdrvChild *parent,
311 bool ignore_bds_parents,
312 bool poll)
314 BdrvCoDrainData data;
316 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
317 * other coroutines run if they were queued by aio_co_enter(). */
319 assert(qemu_in_coroutine());
320 data = (BdrvCoDrainData) {
321 .co = qemu_coroutine_self(),
322 .bs = bs,
323 .done = false,
324 .begin = begin,
325 .recursive = recursive,
326 .parent = parent,
327 .ignore_bds_parents = ignore_bds_parents,
328 .poll = poll,
330 if (bs) {
331 bdrv_inc_in_flight(bs);
333 aio_bh_schedule_oneshot(bdrv_get_aio_context(bs),
334 bdrv_co_drain_bh_cb, &data);
336 qemu_coroutine_yield();
337 /* If we are resumed from some other event (such as an aio completion or a
338 * timer callback), it is a bug in the caller that should be fixed. */
339 assert(data.done);
342 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
343 BdrvChild *parent, bool ignore_bds_parents)
345 assert(!qemu_in_coroutine());
347 /* Stop things in parent-to-child order */
348 if (atomic_fetch_inc(&bs->quiesce_counter) == 0) {
349 aio_disable_external(bdrv_get_aio_context(bs));
352 bdrv_parent_drained_begin(bs, parent, ignore_bds_parents);
353 bdrv_drain_invoke(bs, true);
356 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
357 BdrvChild *parent, bool ignore_bds_parents,
358 bool poll)
360 BdrvChild *child, *next;
362 if (qemu_in_coroutine()) {
363 bdrv_co_yield_to_drain(bs, true, recursive, parent, ignore_bds_parents,
364 poll);
365 return;
368 bdrv_do_drained_begin_quiesce(bs, parent, ignore_bds_parents);
370 if (recursive) {
371 assert(!ignore_bds_parents);
372 bs->recursive_quiesce_counter++;
373 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
374 bdrv_do_drained_begin(child->bs, true, child, ignore_bds_parents,
375 false);
380 * Wait for drained requests to finish.
382 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
383 * call is needed so things in this AioContext can make progress even
384 * though we don't return to the main AioContext loop - this automatically
385 * includes other nodes in the same AioContext and therefore all child
386 * nodes.
388 if (poll) {
389 assert(!ignore_bds_parents);
390 BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, recursive, parent));
394 void bdrv_drained_begin(BlockDriverState *bs)
396 bdrv_do_drained_begin(bs, false, NULL, false, true);
399 void bdrv_subtree_drained_begin(BlockDriverState *bs)
401 bdrv_do_drained_begin(bs, true, NULL, false, true);
404 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
405 BdrvChild *parent, bool ignore_bds_parents)
407 BdrvChild *child, *next;
408 int old_quiesce_counter;
410 if (qemu_in_coroutine()) {
411 bdrv_co_yield_to_drain(bs, false, recursive, parent, ignore_bds_parents,
412 false);
413 return;
415 assert(bs->quiesce_counter > 0);
416 old_quiesce_counter = atomic_fetch_dec(&bs->quiesce_counter);
418 /* Re-enable things in child-to-parent order */
419 bdrv_drain_invoke(bs, false);
420 bdrv_parent_drained_end(bs, parent, ignore_bds_parents);
421 if (old_quiesce_counter == 1) {
422 aio_enable_external(bdrv_get_aio_context(bs));
425 if (recursive) {
426 assert(!ignore_bds_parents);
427 bs->recursive_quiesce_counter--;
428 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
429 bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents);
434 void bdrv_drained_end(BlockDriverState *bs)
436 bdrv_do_drained_end(bs, false, NULL, false);
439 void bdrv_subtree_drained_end(BlockDriverState *bs)
441 bdrv_do_drained_end(bs, true, NULL, false);
444 void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent)
446 int i;
448 for (i = 0; i < new_parent->recursive_quiesce_counter; i++) {
449 bdrv_do_drained_begin(child->bs, true, child, false, true);
453 void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent)
455 int i;
457 for (i = 0; i < old_parent->recursive_quiesce_counter; i++) {
458 bdrv_do_drained_end(child->bs, true, child, false);
463 * Wait for pending requests to complete on a single BlockDriverState subtree,
464 * and suspend block driver's internal I/O until next request arrives.
466 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
467 * AioContext.
469 void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
471 assert(qemu_in_coroutine());
472 bdrv_drained_begin(bs);
473 bdrv_drained_end(bs);
476 void bdrv_drain(BlockDriverState *bs)
478 bdrv_drained_begin(bs);
479 bdrv_drained_end(bs);
482 static void bdrv_drain_assert_idle(BlockDriverState *bs)
484 BdrvChild *child, *next;
486 assert(atomic_read(&bs->in_flight) == 0);
487 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
488 bdrv_drain_assert_idle(child->bs);
492 unsigned int bdrv_drain_all_count = 0;
494 static bool bdrv_drain_all_poll(void)
496 BlockDriverState *bs = NULL;
497 bool result = false;
499 /* Execute pending BHs first (may modify the graph) and check everything
500 * else only after the BHs have executed. */
501 while (aio_poll(qemu_get_aio_context(), false));
503 /* bdrv_drain_poll() can't make changes to the graph and we are holding the
504 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */
505 while ((bs = bdrv_next_all_states(bs))) {
506 AioContext *aio_context = bdrv_get_aio_context(bs);
507 aio_context_acquire(aio_context);
508 result |= bdrv_drain_poll(bs, false, NULL, true);
509 aio_context_release(aio_context);
512 return result;
516 * Wait for pending requests to complete across all BlockDriverStates
518 * This function does not flush data to disk, use bdrv_flush_all() for that
519 * after calling this function.
521 * This pauses all block jobs and disables external clients. It must
522 * be paired with bdrv_drain_all_end().
524 * NOTE: no new block jobs or BlockDriverStates can be created between
525 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
527 void bdrv_drain_all_begin(void)
529 BlockDriverState *bs = NULL;
531 if (qemu_in_coroutine()) {
532 bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true);
533 return;
536 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
537 * loop AioContext, so make sure we're in the main context. */
538 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
539 assert(bdrv_drain_all_count < INT_MAX);
540 bdrv_drain_all_count++;
542 /* Quiesce all nodes, without polling in-flight requests yet. The graph
543 * cannot change during this loop. */
544 while ((bs = bdrv_next_all_states(bs))) {
545 AioContext *aio_context = bdrv_get_aio_context(bs);
547 aio_context_acquire(aio_context);
548 bdrv_do_drained_begin(bs, false, NULL, true, false);
549 aio_context_release(aio_context);
552 /* Now poll the in-flight requests */
553 AIO_WAIT_WHILE(&drain_all_aio_wait, NULL, bdrv_drain_all_poll());
555 while ((bs = bdrv_next_all_states(bs))) {
556 bdrv_drain_assert_idle(bs);
560 void bdrv_drain_all_end(void)
562 BlockDriverState *bs = NULL;
564 while ((bs = bdrv_next_all_states(bs))) {
565 AioContext *aio_context = bdrv_get_aio_context(bs);
567 aio_context_acquire(aio_context);
568 bdrv_do_drained_end(bs, false, NULL, true);
569 aio_context_release(aio_context);
572 assert(bdrv_drain_all_count > 0);
573 bdrv_drain_all_count--;
576 void bdrv_drain_all(void)
578 bdrv_drain_all_begin();
579 bdrv_drain_all_end();
583 * Remove an active request from the tracked requests list
585 * This function should be called when a tracked request is completing.
587 static void tracked_request_end(BdrvTrackedRequest *req)
589 if (req->serialising) {
590 atomic_dec(&req->bs->serialising_in_flight);
593 qemu_co_mutex_lock(&req->bs->reqs_lock);
594 QLIST_REMOVE(req, list);
595 qemu_co_queue_restart_all(&req->wait_queue);
596 qemu_co_mutex_unlock(&req->bs->reqs_lock);
600 * Add an active request to the tracked requests list
602 static void tracked_request_begin(BdrvTrackedRequest *req,
603 BlockDriverState *bs,
604 int64_t offset,
605 uint64_t bytes,
606 enum BdrvTrackedRequestType type)
608 assert(bytes <= INT64_MAX && offset <= INT64_MAX - bytes);
610 *req = (BdrvTrackedRequest){
611 .bs = bs,
612 .offset = offset,
613 .bytes = bytes,
614 .type = type,
615 .co = qemu_coroutine_self(),
616 .serialising = false,
617 .overlap_offset = offset,
618 .overlap_bytes = bytes,
621 qemu_co_queue_init(&req->wait_queue);
623 qemu_co_mutex_lock(&bs->reqs_lock);
624 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
625 qemu_co_mutex_unlock(&bs->reqs_lock);
628 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
630 int64_t overlap_offset = req->offset & ~(align - 1);
631 uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
632 - overlap_offset;
634 if (!req->serialising) {
635 atomic_inc(&req->bs->serialising_in_flight);
636 req->serialising = true;
639 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
640 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
643 static bool is_request_serialising_and_aligned(BdrvTrackedRequest *req)
646 * If the request is serialising, overlap_offset and overlap_bytes are set,
647 * so we can check if the request is aligned. Otherwise, don't care and
648 * return false.
651 return req->serialising && (req->offset == req->overlap_offset) &&
652 (req->bytes == req->overlap_bytes);
656 * Round a region to cluster boundaries
658 void bdrv_round_to_clusters(BlockDriverState *bs,
659 int64_t offset, int64_t bytes,
660 int64_t *cluster_offset,
661 int64_t *cluster_bytes)
663 BlockDriverInfo bdi;
665 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
666 *cluster_offset = offset;
667 *cluster_bytes = bytes;
668 } else {
669 int64_t c = bdi.cluster_size;
670 *cluster_offset = QEMU_ALIGN_DOWN(offset, c);
671 *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
675 static int bdrv_get_cluster_size(BlockDriverState *bs)
677 BlockDriverInfo bdi;
678 int ret;
680 ret = bdrv_get_info(bs, &bdi);
681 if (ret < 0 || bdi.cluster_size == 0) {
682 return bs->bl.request_alignment;
683 } else {
684 return bdi.cluster_size;
688 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
689 int64_t offset, uint64_t bytes)
691 /* aaaa bbbb */
692 if (offset >= req->overlap_offset + req->overlap_bytes) {
693 return false;
695 /* bbbb aaaa */
696 if (req->overlap_offset >= offset + bytes) {
697 return false;
699 return true;
702 void bdrv_inc_in_flight(BlockDriverState *bs)
704 atomic_inc(&bs->in_flight);
707 void bdrv_wakeup(BlockDriverState *bs)
709 aio_wait_kick(bdrv_get_aio_wait(bs));
710 aio_wait_kick(&drain_all_aio_wait);
713 void bdrv_dec_in_flight(BlockDriverState *bs)
715 atomic_dec(&bs->in_flight);
716 bdrv_wakeup(bs);
719 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
721 BlockDriverState *bs = self->bs;
722 BdrvTrackedRequest *req;
723 bool retry;
724 bool waited = false;
726 if (!atomic_read(&bs->serialising_in_flight)) {
727 return false;
730 do {
731 retry = false;
732 qemu_co_mutex_lock(&bs->reqs_lock);
733 QLIST_FOREACH(req, &bs->tracked_requests, list) {
734 if (req == self || (!req->serialising && !self->serialising)) {
735 continue;
737 if (tracked_request_overlaps(req, self->overlap_offset,
738 self->overlap_bytes))
740 /* Hitting this means there was a reentrant request, for
741 * example, a block driver issuing nested requests. This must
742 * never happen since it means deadlock.
744 assert(qemu_coroutine_self() != req->co);
746 /* If the request is already (indirectly) waiting for us, or
747 * will wait for us as soon as it wakes up, then just go on
748 * (instead of producing a deadlock in the former case). */
749 if (!req->waiting_for) {
750 self->waiting_for = req;
751 qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock);
752 self->waiting_for = NULL;
753 retry = true;
754 waited = true;
755 break;
759 qemu_co_mutex_unlock(&bs->reqs_lock);
760 } while (retry);
762 return waited;
765 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
766 size_t size)
768 if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) {
769 return -EIO;
772 if (!bdrv_is_inserted(bs)) {
773 return -ENOMEDIUM;
776 if (offset < 0) {
777 return -EIO;
780 return 0;
783 typedef struct RwCo {
784 BdrvChild *child;
785 int64_t offset;
786 QEMUIOVector *qiov;
787 bool is_write;
788 int ret;
789 BdrvRequestFlags flags;
790 } RwCo;
792 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
794 RwCo *rwco = opaque;
796 if (!rwco->is_write) {
797 rwco->ret = bdrv_co_preadv(rwco->child, rwco->offset,
798 rwco->qiov->size, rwco->qiov,
799 rwco->flags);
800 } else {
801 rwco->ret = bdrv_co_pwritev(rwco->child, rwco->offset,
802 rwco->qiov->size, rwco->qiov,
803 rwco->flags);
808 * Process a vectored synchronous request using coroutines
810 static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
811 QEMUIOVector *qiov, bool is_write,
812 BdrvRequestFlags flags)
814 Coroutine *co;
815 RwCo rwco = {
816 .child = child,
817 .offset = offset,
818 .qiov = qiov,
819 .is_write = is_write,
820 .ret = NOT_DONE,
821 .flags = flags,
824 if (qemu_in_coroutine()) {
825 /* Fast-path if already in coroutine context */
826 bdrv_rw_co_entry(&rwco);
827 } else {
828 co = qemu_coroutine_create(bdrv_rw_co_entry, &rwco);
829 bdrv_coroutine_enter(child->bs, co);
830 BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE);
832 return rwco.ret;
836 * Process a synchronous request using coroutines
838 static int bdrv_rw_co(BdrvChild *child, int64_t sector_num, uint8_t *buf,
839 int nb_sectors, bool is_write, BdrvRequestFlags flags)
841 QEMUIOVector qiov;
842 struct iovec iov = {
843 .iov_base = (void *)buf,
844 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
847 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
848 return -EINVAL;
851 qemu_iovec_init_external(&qiov, &iov, 1);
852 return bdrv_prwv_co(child, sector_num << BDRV_SECTOR_BITS,
853 &qiov, is_write, flags);
856 /* return < 0 if error. See bdrv_write() for the return codes */
857 int bdrv_read(BdrvChild *child, int64_t sector_num,
858 uint8_t *buf, int nb_sectors)
860 return bdrv_rw_co(child, sector_num, buf, nb_sectors, false, 0);
863 /* Return < 0 if error. Important errors are:
864 -EIO generic I/O error (may happen for all errors)
865 -ENOMEDIUM No media inserted.
866 -EINVAL Invalid sector number or nb_sectors
867 -EACCES Trying to write a read-only device
869 int bdrv_write(BdrvChild *child, int64_t sector_num,
870 const uint8_t *buf, int nb_sectors)
872 return bdrv_rw_co(child, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
875 int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
876 int bytes, BdrvRequestFlags flags)
878 QEMUIOVector qiov;
879 struct iovec iov = {
880 .iov_base = NULL,
881 .iov_len = bytes,
884 qemu_iovec_init_external(&qiov, &iov, 1);
885 return bdrv_prwv_co(child, offset, &qiov, true,
886 BDRV_REQ_ZERO_WRITE | flags);
890 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
891 * The operation is sped up by checking the block status and only writing
892 * zeroes to the device if they currently do not return zeroes. Optional
893 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
894 * BDRV_REQ_FUA).
896 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
898 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
900 int ret;
901 int64_t target_size, bytes, offset = 0;
902 BlockDriverState *bs = child->bs;
904 target_size = bdrv_getlength(bs);
905 if (target_size < 0) {
906 return target_size;
909 for (;;) {
910 bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES);
911 if (bytes <= 0) {
912 return 0;
914 ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL);
915 if (ret < 0) {
916 error_report("error getting block status at offset %" PRId64 ": %s",
917 offset, strerror(-ret));
918 return ret;
920 if (ret & BDRV_BLOCK_ZERO) {
921 offset += bytes;
922 continue;
924 ret = bdrv_pwrite_zeroes(child, offset, bytes, flags);
925 if (ret < 0) {
926 error_report("error writing zeroes at offset %" PRId64 ": %s",
927 offset, strerror(-ret));
928 return ret;
930 offset += bytes;
934 int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
936 int ret;
938 ret = bdrv_prwv_co(child, offset, qiov, false, 0);
939 if (ret < 0) {
940 return ret;
943 return qiov->size;
946 int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes)
948 QEMUIOVector qiov;
949 struct iovec iov = {
950 .iov_base = (void *)buf,
951 .iov_len = bytes,
954 if (bytes < 0) {
955 return -EINVAL;
958 qemu_iovec_init_external(&qiov, &iov, 1);
959 return bdrv_preadv(child, offset, &qiov);
962 int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
964 int ret;
966 ret = bdrv_prwv_co(child, offset, qiov, true, 0);
967 if (ret < 0) {
968 return ret;
971 return qiov->size;
974 int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes)
976 QEMUIOVector qiov;
977 struct iovec iov = {
978 .iov_base = (void *) buf,
979 .iov_len = bytes,
982 if (bytes < 0) {
983 return -EINVAL;
986 qemu_iovec_init_external(&qiov, &iov, 1);
987 return bdrv_pwritev(child, offset, &qiov);
991 * Writes to the file and ensures that no writes are reordered across this
992 * request (acts as a barrier)
994 * Returns 0 on success, -errno in error cases.
996 int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
997 const void *buf, int count)
999 int ret;
1001 ret = bdrv_pwrite(child, offset, buf, count);
1002 if (ret < 0) {
1003 return ret;
1006 ret = bdrv_flush(child->bs);
1007 if (ret < 0) {
1008 return ret;
1011 return 0;
1014 typedef struct CoroutineIOCompletion {
1015 Coroutine *coroutine;
1016 int ret;
1017 } CoroutineIOCompletion;
1019 static void bdrv_co_io_em_complete(void *opaque, int ret)
1021 CoroutineIOCompletion *co = opaque;
1023 co->ret = ret;
1024 aio_co_wake(co->coroutine);
1027 static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
1028 uint64_t offset, uint64_t bytes,
1029 QEMUIOVector *qiov, int flags)
1031 BlockDriver *drv = bs->drv;
1032 int64_t sector_num;
1033 unsigned int nb_sectors;
1035 assert(!(flags & ~BDRV_REQ_MASK));
1037 if (!drv) {
1038 return -ENOMEDIUM;
1041 if (drv->bdrv_co_preadv) {
1042 return drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
1045 if (drv->bdrv_aio_preadv) {
1046 BlockAIOCB *acb;
1047 CoroutineIOCompletion co = {
1048 .coroutine = qemu_coroutine_self(),
1051 acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags,
1052 bdrv_co_io_em_complete, &co);
1053 if (acb == NULL) {
1054 return -EIO;
1055 } else {
1056 qemu_coroutine_yield();
1057 return co.ret;
1061 sector_num = offset >> BDRV_SECTOR_BITS;
1062 nb_sectors = bytes >> BDRV_SECTOR_BITS;
1064 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
1065 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
1066 assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
1067 assert(drv->bdrv_co_readv);
1069 return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
1072 static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
1073 uint64_t offset, uint64_t bytes,
1074 QEMUIOVector *qiov, int flags)
1076 BlockDriver *drv = bs->drv;
1077 int64_t sector_num;
1078 unsigned int nb_sectors;
1079 int ret;
1081 assert(!(flags & ~BDRV_REQ_MASK));
1083 if (!drv) {
1084 return -ENOMEDIUM;
1087 if (drv->bdrv_co_pwritev) {
1088 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov,
1089 flags & bs->supported_write_flags);
1090 flags &= ~bs->supported_write_flags;
1091 goto emulate_flags;
1094 if (drv->bdrv_aio_pwritev) {
1095 BlockAIOCB *acb;
1096 CoroutineIOCompletion co = {
1097 .coroutine = qemu_coroutine_self(),
1100 acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov,
1101 flags & bs->supported_write_flags,
1102 bdrv_co_io_em_complete, &co);
1103 flags &= ~bs->supported_write_flags;
1104 if (acb == NULL) {
1105 ret = -EIO;
1106 } else {
1107 qemu_coroutine_yield();
1108 ret = co.ret;
1110 goto emulate_flags;
1113 sector_num = offset >> BDRV_SECTOR_BITS;
1114 nb_sectors = bytes >> BDRV_SECTOR_BITS;
1116 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
1117 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
1118 assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
1120 assert(drv->bdrv_co_writev);
1121 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov,
1122 flags & bs->supported_write_flags);
1123 flags &= ~bs->supported_write_flags;
1125 emulate_flags:
1126 if (ret == 0 && (flags & BDRV_REQ_FUA)) {
1127 ret = bdrv_co_flush(bs);
1130 return ret;
1133 static int coroutine_fn
1134 bdrv_driver_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
1135 uint64_t bytes, QEMUIOVector *qiov)
1137 BlockDriver *drv = bs->drv;
1139 if (!drv) {
1140 return -ENOMEDIUM;
1143 if (!drv->bdrv_co_pwritev_compressed) {
1144 return -ENOTSUP;
1147 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
1150 static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child,
1151 int64_t offset, unsigned int bytes, QEMUIOVector *qiov)
1153 BlockDriverState *bs = child->bs;
1155 /* Perform I/O through a temporary buffer so that users who scribble over
1156 * their read buffer while the operation is in progress do not end up
1157 * modifying the image file. This is critical for zero-copy guest I/O
1158 * where anything might happen inside guest memory.
1160 void *bounce_buffer;
1162 BlockDriver *drv = bs->drv;
1163 struct iovec iov;
1164 QEMUIOVector local_qiov;
1165 int64_t cluster_offset;
1166 int64_t cluster_bytes;
1167 size_t skip_bytes;
1168 int ret;
1169 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1170 BDRV_REQUEST_MAX_BYTES);
1171 unsigned int progress = 0;
1173 if (!drv) {
1174 return -ENOMEDIUM;
1177 /* FIXME We cannot require callers to have write permissions when all they
1178 * are doing is a read request. If we did things right, write permissions
1179 * would be obtained anyway, but internally by the copy-on-read code. As
1180 * long as it is implemented here rather than in a separate filter driver,
1181 * the copy-on-read code doesn't have its own BdrvChild, however, for which
1182 * it could request permissions. Therefore we have to bypass the permission
1183 * system for the moment. */
1184 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1186 /* Cover entire cluster so no additional backing file I/O is required when
1187 * allocating cluster in the image file. Note that this value may exceed
1188 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1189 * is one reason we loop rather than doing it all at once.
1191 bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
1192 skip_bytes = offset - cluster_offset;
1194 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
1195 cluster_offset, cluster_bytes);
1197 bounce_buffer = qemu_try_blockalign(bs,
1198 MIN(MIN(max_transfer, cluster_bytes),
1199 MAX_BOUNCE_BUFFER));
1200 if (bounce_buffer == NULL) {
1201 ret = -ENOMEM;
1202 goto err;
1205 while (cluster_bytes) {
1206 int64_t pnum;
1208 ret = bdrv_is_allocated(bs, cluster_offset,
1209 MIN(cluster_bytes, max_transfer), &pnum);
1210 if (ret < 0) {
1211 /* Safe to treat errors in querying allocation as if
1212 * unallocated; we'll probably fail again soon on the
1213 * read, but at least that will set a decent errno.
1215 pnum = MIN(cluster_bytes, max_transfer);
1218 /* Stop at EOF if the image ends in the middle of the cluster */
1219 if (ret == 0 && pnum == 0) {
1220 assert(progress >= bytes);
1221 break;
1224 assert(skip_bytes < pnum);
1226 if (ret <= 0) {
1227 /* Must copy-on-read; use the bounce buffer */
1228 iov.iov_base = bounce_buffer;
1229 iov.iov_len = pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
1230 qemu_iovec_init_external(&local_qiov, &iov, 1);
1232 ret = bdrv_driver_preadv(bs, cluster_offset, pnum,
1233 &local_qiov, 0);
1234 if (ret < 0) {
1235 goto err;
1238 bdrv_debug_event(bs, BLKDBG_COR_WRITE);
1239 if (drv->bdrv_co_pwrite_zeroes &&
1240 buffer_is_zero(bounce_buffer, pnum)) {
1241 /* FIXME: Should we (perhaps conditionally) be setting
1242 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1243 * that still correctly reads as zero? */
1244 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum,
1245 BDRV_REQ_WRITE_UNCHANGED);
1246 } else {
1247 /* This does not change the data on the disk, it is not
1248 * necessary to flush even in cache=writethrough mode.
1250 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum,
1251 &local_qiov,
1252 BDRV_REQ_WRITE_UNCHANGED);
1255 if (ret < 0) {
1256 /* It might be okay to ignore write errors for guest
1257 * requests. If this is a deliberate copy-on-read
1258 * then we don't want to ignore the error. Simply
1259 * report it in all cases.
1261 goto err;
1264 qemu_iovec_from_buf(qiov, progress, bounce_buffer + skip_bytes,
1265 pnum - skip_bytes);
1266 } else {
1267 /* Read directly into the destination */
1268 qemu_iovec_init(&local_qiov, qiov->niov);
1269 qemu_iovec_concat(&local_qiov, qiov, progress, pnum - skip_bytes);
1270 ret = bdrv_driver_preadv(bs, offset + progress, local_qiov.size,
1271 &local_qiov, 0);
1272 qemu_iovec_destroy(&local_qiov);
1273 if (ret < 0) {
1274 goto err;
1278 cluster_offset += pnum;
1279 cluster_bytes -= pnum;
1280 progress += pnum - skip_bytes;
1281 skip_bytes = 0;
1283 ret = 0;
1285 err:
1286 qemu_vfree(bounce_buffer);
1287 return ret;
1291 * Forwards an already correctly aligned request to the BlockDriver. This
1292 * handles copy on read, zeroing after EOF, and fragmentation of large
1293 * reads; any other features must be implemented by the caller.
1295 static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
1296 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
1297 int64_t align, QEMUIOVector *qiov, int flags)
1299 BlockDriverState *bs = child->bs;
1300 int64_t total_bytes, max_bytes;
1301 int ret = 0;
1302 uint64_t bytes_remaining = bytes;
1303 int max_transfer;
1305 assert(is_power_of_2(align));
1306 assert((offset & (align - 1)) == 0);
1307 assert((bytes & (align - 1)) == 0);
1308 assert(!qiov || bytes == qiov->size);
1309 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1310 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1311 align);
1313 /* TODO: We would need a per-BDS .supported_read_flags and
1314 * potential fallback support, if we ever implement any read flags
1315 * to pass through to drivers. For now, there aren't any
1316 * passthrough flags. */
1317 assert(!(flags & ~(BDRV_REQ_NO_SERIALISING | BDRV_REQ_COPY_ON_READ)));
1319 /* Handle Copy on Read and associated serialisation */
1320 if (flags & BDRV_REQ_COPY_ON_READ) {
1321 /* If we touch the same cluster it counts as an overlap. This
1322 * guarantees that allocating writes will be serialized and not race
1323 * with each other for the same cluster. For example, in copy-on-read
1324 * it ensures that the CoR read and write operations are atomic and
1325 * guest writes cannot interleave between them. */
1326 mark_request_serialising(req, bdrv_get_cluster_size(bs));
1329 /* BDRV_REQ_SERIALISING is only for write operation */
1330 assert(!(flags & BDRV_REQ_SERIALISING));
1332 if (!(flags & BDRV_REQ_NO_SERIALISING)) {
1333 wait_serialising_requests(req);
1336 if (flags & BDRV_REQ_COPY_ON_READ) {
1337 int64_t pnum;
1339 ret = bdrv_is_allocated(bs, offset, bytes, &pnum);
1340 if (ret < 0) {
1341 goto out;
1344 if (!ret || pnum != bytes) {
1345 ret = bdrv_co_do_copy_on_readv(child, offset, bytes, qiov);
1346 goto out;
1350 /* Forward the request to the BlockDriver, possibly fragmenting it */
1351 total_bytes = bdrv_getlength(bs);
1352 if (total_bytes < 0) {
1353 ret = total_bytes;
1354 goto out;
1357 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
1358 if (bytes <= max_bytes && bytes <= max_transfer) {
1359 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0);
1360 goto out;
1363 while (bytes_remaining) {
1364 int num;
1366 if (max_bytes) {
1367 QEMUIOVector local_qiov;
1369 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
1370 assert(num);
1371 qemu_iovec_init(&local_qiov, qiov->niov);
1372 qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
1374 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
1375 num, &local_qiov, 0);
1376 max_bytes -= num;
1377 qemu_iovec_destroy(&local_qiov);
1378 } else {
1379 num = bytes_remaining;
1380 ret = qemu_iovec_memset(qiov, bytes - bytes_remaining, 0,
1381 bytes_remaining);
1383 if (ret < 0) {
1384 goto out;
1386 bytes_remaining -= num;
1389 out:
1390 return ret < 0 ? ret : 0;
1394 * Handle a read request in coroutine context
1396 int coroutine_fn bdrv_co_preadv(BdrvChild *child,
1397 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1398 BdrvRequestFlags flags)
1400 BlockDriverState *bs = child->bs;
1401 BlockDriver *drv = bs->drv;
1402 BdrvTrackedRequest req;
1404 uint64_t align = bs->bl.request_alignment;
1405 uint8_t *head_buf = NULL;
1406 uint8_t *tail_buf = NULL;
1407 QEMUIOVector local_qiov;
1408 bool use_local_qiov = false;
1409 int ret;
1411 trace_bdrv_co_preadv(child->bs, offset, bytes, flags);
1413 if (!drv) {
1414 return -ENOMEDIUM;
1417 ret = bdrv_check_byte_request(bs, offset, bytes);
1418 if (ret < 0) {
1419 return ret;
1422 bdrv_inc_in_flight(bs);
1424 /* Don't do copy-on-read if we read data before write operation */
1425 if (atomic_read(&bs->copy_on_read) && !(flags & BDRV_REQ_NO_SERIALISING)) {
1426 flags |= BDRV_REQ_COPY_ON_READ;
1429 /* Align read if necessary by padding qiov */
1430 if (offset & (align - 1)) {
1431 head_buf = qemu_blockalign(bs, align);
1432 qemu_iovec_init(&local_qiov, qiov->niov + 2);
1433 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1434 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1435 use_local_qiov = true;
1437 bytes += offset & (align - 1);
1438 offset = offset & ~(align - 1);
1441 if ((offset + bytes) & (align - 1)) {
1442 if (!use_local_qiov) {
1443 qemu_iovec_init(&local_qiov, qiov->niov + 1);
1444 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1445 use_local_qiov = true;
1447 tail_buf = qemu_blockalign(bs, align);
1448 qemu_iovec_add(&local_qiov, tail_buf,
1449 align - ((offset + bytes) & (align - 1)));
1451 bytes = ROUND_UP(bytes, align);
1454 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
1455 ret = bdrv_aligned_preadv(child, &req, offset, bytes, align,
1456 use_local_qiov ? &local_qiov : qiov,
1457 flags);
1458 tracked_request_end(&req);
1459 bdrv_dec_in_flight(bs);
1461 if (use_local_qiov) {
1462 qemu_iovec_destroy(&local_qiov);
1463 qemu_vfree(head_buf);
1464 qemu_vfree(tail_buf);
1467 return ret;
1470 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
1471 int64_t offset, int bytes, BdrvRequestFlags flags)
1473 BlockDriver *drv = bs->drv;
1474 QEMUIOVector qiov;
1475 struct iovec iov = {0};
1476 int ret = 0;
1477 bool need_flush = false;
1478 int head = 0;
1479 int tail = 0;
1481 int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
1482 int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1483 bs->bl.request_alignment);
1484 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER);
1486 if (!drv) {
1487 return -ENOMEDIUM;
1490 assert(alignment % bs->bl.request_alignment == 0);
1491 head = offset % alignment;
1492 tail = (offset + bytes) % alignment;
1493 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1494 assert(max_write_zeroes >= bs->bl.request_alignment);
1496 while (bytes > 0 && !ret) {
1497 int num = bytes;
1499 /* Align request. Block drivers can expect the "bulk" of the request
1500 * to be aligned, and that unaligned requests do not cross cluster
1501 * boundaries.
1503 if (head) {
1504 /* Make a small request up to the first aligned sector. For
1505 * convenience, limit this request to max_transfer even if
1506 * we don't need to fall back to writes. */
1507 num = MIN(MIN(bytes, max_transfer), alignment - head);
1508 head = (head + num) % alignment;
1509 assert(num < max_write_zeroes);
1510 } else if (tail && num > alignment) {
1511 /* Shorten the request to the last aligned sector. */
1512 num -= tail;
1515 /* limit request size */
1516 if (num > max_write_zeroes) {
1517 num = max_write_zeroes;
1520 ret = -ENOTSUP;
1521 /* First try the efficient write zeroes operation */
1522 if (drv->bdrv_co_pwrite_zeroes) {
1523 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1524 flags & bs->supported_zero_flags);
1525 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1526 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1527 need_flush = true;
1529 } else {
1530 assert(!bs->supported_zero_flags);
1533 if (ret == -ENOTSUP) {
1534 /* Fall back to bounce buffer if write zeroes is unsupported */
1535 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1537 if ((flags & BDRV_REQ_FUA) &&
1538 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1539 /* No need for bdrv_driver_pwrite() to do a fallback
1540 * flush on each chunk; use just one at the end */
1541 write_flags &= ~BDRV_REQ_FUA;
1542 need_flush = true;
1544 num = MIN(num, max_transfer);
1545 iov.iov_len = num;
1546 if (iov.iov_base == NULL) {
1547 iov.iov_base = qemu_try_blockalign(bs, num);
1548 if (iov.iov_base == NULL) {
1549 ret = -ENOMEM;
1550 goto fail;
1552 memset(iov.iov_base, 0, num);
1554 qemu_iovec_init_external(&qiov, &iov, 1);
1556 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, write_flags);
1558 /* Keep bounce buffer around if it is big enough for all
1559 * all future requests.
1561 if (num < max_transfer) {
1562 qemu_vfree(iov.iov_base);
1563 iov.iov_base = NULL;
1567 offset += num;
1568 bytes -= num;
1571 fail:
1572 if (ret == 0 && need_flush) {
1573 ret = bdrv_co_flush(bs);
1575 qemu_vfree(iov.iov_base);
1576 return ret;
1579 static inline int coroutine_fn
1580 bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes,
1581 BdrvTrackedRequest *req, int flags)
1583 BlockDriverState *bs = child->bs;
1584 bool waited;
1585 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
1587 if (bs->read_only) {
1588 return -EPERM;
1591 /* BDRV_REQ_NO_SERIALISING is only for read operation */
1592 assert(!(flags & BDRV_REQ_NO_SERIALISING));
1593 assert(!(bs->open_flags & BDRV_O_INACTIVE));
1594 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1595 assert(!(flags & ~BDRV_REQ_MASK));
1597 if (flags & BDRV_REQ_SERIALISING) {
1598 mark_request_serialising(req, bdrv_get_cluster_size(bs));
1601 waited = wait_serialising_requests(req);
1603 assert(!waited || !req->serialising ||
1604 is_request_serialising_and_aligned(req));
1605 assert(req->overlap_offset <= offset);
1606 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1608 if (flags & BDRV_REQ_WRITE_UNCHANGED) {
1609 assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1610 } else {
1611 assert(child->perm & BLK_PERM_WRITE);
1613 assert(end_sector <= bs->total_sectors || child->perm & BLK_PERM_RESIZE);
1614 return notifier_with_return_list_notify(&bs->before_write_notifiers, req);
1617 static inline void coroutine_fn
1618 bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, uint64_t bytes,
1619 BdrvTrackedRequest *req, int ret)
1621 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
1622 BlockDriverState *bs = child->bs;
1624 atomic_inc(&bs->write_gen);
1627 * Discard cannot extend the image, but in error handling cases, such as
1628 * when reverting a qcow2 cluster allocation, the discarded range can pass
1629 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
1630 * here. Instead, just skip it, since semantically a discard request
1631 * beyond EOF cannot expand the image anyway.
1633 if (ret == 0 &&
1634 end_sector > bs->total_sectors &&
1635 req->type != BDRV_TRACKED_DISCARD) {
1636 bs->total_sectors = end_sector;
1637 bdrv_parent_cb_resize(bs);
1638 bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS);
1640 if (req->bytes) {
1641 switch (req->type) {
1642 case BDRV_TRACKED_WRITE:
1643 stat64_max(&bs->wr_highest_offset, offset + bytes);
1644 /* fall through, to set dirty bits */
1645 case BDRV_TRACKED_DISCARD:
1646 bdrv_set_dirty(bs, offset, bytes);
1647 break;
1648 default:
1649 break;
1655 * Forwards an already correctly aligned write request to the BlockDriver,
1656 * after possibly fragmenting it.
1658 static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child,
1659 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
1660 int64_t align, QEMUIOVector *qiov, int flags)
1662 BlockDriverState *bs = child->bs;
1663 BlockDriver *drv = bs->drv;
1664 int ret;
1666 uint64_t bytes_remaining = bytes;
1667 int max_transfer;
1669 if (!drv) {
1670 return -ENOMEDIUM;
1673 if (bdrv_has_readonly_bitmaps(bs)) {
1674 return -EPERM;
1677 assert(is_power_of_2(align));
1678 assert((offset & (align - 1)) == 0);
1679 assert((bytes & (align - 1)) == 0);
1680 assert(!qiov || bytes == qiov->size);
1681 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1682 align);
1684 ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags);
1686 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
1687 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
1688 qemu_iovec_is_zero(qiov)) {
1689 flags |= BDRV_REQ_ZERO_WRITE;
1690 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
1691 flags |= BDRV_REQ_MAY_UNMAP;
1695 if (ret < 0) {
1696 /* Do nothing, write notifier decided to fail this request */
1697 } else if (flags & BDRV_REQ_ZERO_WRITE) {
1698 bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
1699 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
1700 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
1701 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, qiov);
1702 } else if (bytes <= max_transfer) {
1703 bdrv_debug_event(bs, BLKDBG_PWRITEV);
1704 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags);
1705 } else {
1706 bdrv_debug_event(bs, BLKDBG_PWRITEV);
1707 while (bytes_remaining) {
1708 int num = MIN(bytes_remaining, max_transfer);
1709 QEMUIOVector local_qiov;
1710 int local_flags = flags;
1712 assert(num);
1713 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
1714 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1715 /* If FUA is going to be emulated by flush, we only
1716 * need to flush on the last iteration */
1717 local_flags &= ~BDRV_REQ_FUA;
1719 qemu_iovec_init(&local_qiov, qiov->niov);
1720 qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
1722 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
1723 num, &local_qiov, local_flags);
1724 qemu_iovec_destroy(&local_qiov);
1725 if (ret < 0) {
1726 break;
1728 bytes_remaining -= num;
1731 bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
1733 if (ret >= 0) {
1734 ret = 0;
1736 bdrv_co_write_req_finish(child, offset, bytes, req, ret);
1738 return ret;
1741 static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
1742 int64_t offset,
1743 unsigned int bytes,
1744 BdrvRequestFlags flags,
1745 BdrvTrackedRequest *req)
1747 BlockDriverState *bs = child->bs;
1748 uint8_t *buf = NULL;
1749 QEMUIOVector local_qiov;
1750 struct iovec iov;
1751 uint64_t align = bs->bl.request_alignment;
1752 unsigned int head_padding_bytes, tail_padding_bytes;
1753 int ret = 0;
1755 head_padding_bytes = offset & (align - 1);
1756 tail_padding_bytes = (align - (offset + bytes)) & (align - 1);
1759 assert(flags & BDRV_REQ_ZERO_WRITE);
1760 if (head_padding_bytes || tail_padding_bytes) {
1761 buf = qemu_blockalign(bs, align);
1762 iov = (struct iovec) {
1763 .iov_base = buf,
1764 .iov_len = align,
1766 qemu_iovec_init_external(&local_qiov, &iov, 1);
1768 if (head_padding_bytes) {
1769 uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes);
1771 /* RMW the unaligned part before head. */
1772 mark_request_serialising(req, align);
1773 wait_serialising_requests(req);
1774 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1775 ret = bdrv_aligned_preadv(child, req, offset & ~(align - 1), align,
1776 align, &local_qiov, 0);
1777 if (ret < 0) {
1778 goto fail;
1780 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1782 memset(buf + head_padding_bytes, 0, zero_bytes);
1783 ret = bdrv_aligned_pwritev(child, req, offset & ~(align - 1), align,
1784 align, &local_qiov,
1785 flags & ~BDRV_REQ_ZERO_WRITE);
1786 if (ret < 0) {
1787 goto fail;
1789 offset += zero_bytes;
1790 bytes -= zero_bytes;
1793 assert(!bytes || (offset & (align - 1)) == 0);
1794 if (bytes >= align) {
1795 /* Write the aligned part in the middle. */
1796 uint64_t aligned_bytes = bytes & ~(align - 1);
1797 ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align,
1798 NULL, flags);
1799 if (ret < 0) {
1800 goto fail;
1802 bytes -= aligned_bytes;
1803 offset += aligned_bytes;
1806 assert(!bytes || (offset & (align - 1)) == 0);
1807 if (bytes) {
1808 assert(align == tail_padding_bytes + bytes);
1809 /* RMW the unaligned part after tail. */
1810 mark_request_serialising(req, align);
1811 wait_serialising_requests(req);
1812 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1813 ret = bdrv_aligned_preadv(child, req, offset, align,
1814 align, &local_qiov, 0);
1815 if (ret < 0) {
1816 goto fail;
1818 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1820 memset(buf, 0, bytes);
1821 ret = bdrv_aligned_pwritev(child, req, offset, align, align,
1822 &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE);
1824 fail:
1825 qemu_vfree(buf);
1826 return ret;
1831 * Handle a write request in coroutine context
1833 int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
1834 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1835 BdrvRequestFlags flags)
1837 BlockDriverState *bs = child->bs;
1838 BdrvTrackedRequest req;
1839 uint64_t align = bs->bl.request_alignment;
1840 uint8_t *head_buf = NULL;
1841 uint8_t *tail_buf = NULL;
1842 QEMUIOVector local_qiov;
1843 bool use_local_qiov = false;
1844 int ret;
1846 trace_bdrv_co_pwritev(child->bs, offset, bytes, flags);
1848 if (!bs->drv) {
1849 return -ENOMEDIUM;
1852 ret = bdrv_check_byte_request(bs, offset, bytes);
1853 if (ret < 0) {
1854 return ret;
1857 bdrv_inc_in_flight(bs);
1859 * Align write if necessary by performing a read-modify-write cycle.
1860 * Pad qiov with the read parts and be sure to have a tracked request not
1861 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1863 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
1865 if (flags & BDRV_REQ_ZERO_WRITE) {
1866 ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req);
1867 goto out;
1870 if (offset & (align - 1)) {
1871 QEMUIOVector head_qiov;
1872 struct iovec head_iov;
1874 mark_request_serialising(&req, align);
1875 wait_serialising_requests(&req);
1877 head_buf = qemu_blockalign(bs, align);
1878 head_iov = (struct iovec) {
1879 .iov_base = head_buf,
1880 .iov_len = align,
1882 qemu_iovec_init_external(&head_qiov, &head_iov, 1);
1884 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1885 ret = bdrv_aligned_preadv(child, &req, offset & ~(align - 1), align,
1886 align, &head_qiov, 0);
1887 if (ret < 0) {
1888 goto fail;
1890 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1892 qemu_iovec_init(&local_qiov, qiov->niov + 2);
1893 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1894 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1895 use_local_qiov = true;
1897 bytes += offset & (align - 1);
1898 offset = offset & ~(align - 1);
1900 /* We have read the tail already if the request is smaller
1901 * than one aligned block.
1903 if (bytes < align) {
1904 qemu_iovec_add(&local_qiov, head_buf + bytes, align - bytes);
1905 bytes = align;
1909 if ((offset + bytes) & (align - 1)) {
1910 QEMUIOVector tail_qiov;
1911 struct iovec tail_iov;
1912 size_t tail_bytes;
1913 bool waited;
1915 mark_request_serialising(&req, align);
1916 waited = wait_serialising_requests(&req);
1917 assert(!waited || !use_local_qiov);
1919 tail_buf = qemu_blockalign(bs, align);
1920 tail_iov = (struct iovec) {
1921 .iov_base = tail_buf,
1922 .iov_len = align,
1924 qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
1926 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1927 ret = bdrv_aligned_preadv(child, &req, (offset + bytes) & ~(align - 1),
1928 align, align, &tail_qiov, 0);
1929 if (ret < 0) {
1930 goto fail;
1932 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1934 if (!use_local_qiov) {
1935 qemu_iovec_init(&local_qiov, qiov->niov + 1);
1936 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1937 use_local_qiov = true;
1940 tail_bytes = (offset + bytes) & (align - 1);
1941 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
1943 bytes = ROUND_UP(bytes, align);
1946 ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align,
1947 use_local_qiov ? &local_qiov : qiov,
1948 flags);
1950 fail:
1952 if (use_local_qiov) {
1953 qemu_iovec_destroy(&local_qiov);
1955 qemu_vfree(head_buf);
1956 qemu_vfree(tail_buf);
1957 out:
1958 tracked_request_end(&req);
1959 bdrv_dec_in_flight(bs);
1960 return ret;
1963 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
1964 int bytes, BdrvRequestFlags flags)
1966 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
1968 if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
1969 flags &= ~BDRV_REQ_MAY_UNMAP;
1972 return bdrv_co_pwritev(child, offset, bytes, NULL,
1973 BDRV_REQ_ZERO_WRITE | flags);
1977 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
1979 int bdrv_flush_all(void)
1981 BdrvNextIterator it;
1982 BlockDriverState *bs = NULL;
1983 int result = 0;
1985 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
1986 AioContext *aio_context = bdrv_get_aio_context(bs);
1987 int ret;
1989 aio_context_acquire(aio_context);
1990 ret = bdrv_flush(bs);
1991 if (ret < 0 && !result) {
1992 result = ret;
1994 aio_context_release(aio_context);
1997 return result;
2001 typedef struct BdrvCoBlockStatusData {
2002 BlockDriverState *bs;
2003 BlockDriverState *base;
2004 bool want_zero;
2005 int64_t offset;
2006 int64_t bytes;
2007 int64_t *pnum;
2008 int64_t *map;
2009 BlockDriverState **file;
2010 int ret;
2011 bool done;
2012 } BdrvCoBlockStatusData;
2014 int coroutine_fn bdrv_co_block_status_from_file(BlockDriverState *bs,
2015 bool want_zero,
2016 int64_t offset,
2017 int64_t bytes,
2018 int64_t *pnum,
2019 int64_t *map,
2020 BlockDriverState **file)
2022 assert(bs->file && bs->file->bs);
2023 *pnum = bytes;
2024 *map = offset;
2025 *file = bs->file->bs;
2026 return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
2029 int coroutine_fn bdrv_co_block_status_from_backing(BlockDriverState *bs,
2030 bool want_zero,
2031 int64_t offset,
2032 int64_t bytes,
2033 int64_t *pnum,
2034 int64_t *map,
2035 BlockDriverState **file)
2037 assert(bs->backing && bs->backing->bs);
2038 *pnum = bytes;
2039 *map = offset;
2040 *file = bs->backing->bs;
2041 return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
2045 * Returns the allocation status of the specified sectors.
2046 * Drivers not implementing the functionality are assumed to not support
2047 * backing files, hence all their sectors are reported as allocated.
2049 * If 'want_zero' is true, the caller is querying for mapping
2050 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
2051 * _ZERO where possible; otherwise, the result favors larger 'pnum',
2052 * with a focus on accurate BDRV_BLOCK_ALLOCATED.
2054 * If 'offset' is beyond the end of the disk image the return value is
2055 * BDRV_BLOCK_EOF and 'pnum' is set to 0.
2057 * 'bytes' is the max value 'pnum' should be set to. If bytes goes
2058 * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2059 * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
2061 * 'pnum' is set to the number of bytes (including and immediately
2062 * following the specified offset) that are easily known to be in the
2063 * same allocated/unallocated state. Note that a second call starting
2064 * at the original offset plus returned pnum may have the same status.
2065 * The returned value is non-zero on success except at end-of-file.
2067 * Returns negative errno on failure. Otherwise, if the
2068 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
2069 * set to the host mapping and BDS corresponding to the guest offset.
2071 static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs,
2072 bool want_zero,
2073 int64_t offset, int64_t bytes,
2074 int64_t *pnum, int64_t *map,
2075 BlockDriverState **file)
2077 int64_t total_size;
2078 int64_t n; /* bytes */
2079 int ret;
2080 int64_t local_map = 0;
2081 BlockDriverState *local_file = NULL;
2082 int64_t aligned_offset, aligned_bytes;
2083 uint32_t align;
2085 assert(pnum);
2086 *pnum = 0;
2087 total_size = bdrv_getlength(bs);
2088 if (total_size < 0) {
2089 ret = total_size;
2090 goto early_out;
2093 if (offset >= total_size) {
2094 ret = BDRV_BLOCK_EOF;
2095 goto early_out;
2097 if (!bytes) {
2098 ret = 0;
2099 goto early_out;
2102 n = total_size - offset;
2103 if (n < bytes) {
2104 bytes = n;
2107 /* Must be non-NULL or bdrv_getlength() would have failed */
2108 assert(bs->drv);
2109 if (!bs->drv->bdrv_co_block_status) {
2110 *pnum = bytes;
2111 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
2112 if (offset + bytes == total_size) {
2113 ret |= BDRV_BLOCK_EOF;
2115 if (bs->drv->protocol_name) {
2116 ret |= BDRV_BLOCK_OFFSET_VALID;
2117 local_map = offset;
2118 local_file = bs;
2120 goto early_out;
2123 bdrv_inc_in_flight(bs);
2125 /* Round out to request_alignment boundaries */
2126 align = bs->bl.request_alignment;
2127 aligned_offset = QEMU_ALIGN_DOWN(offset, align);
2128 aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset;
2130 ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
2131 aligned_bytes, pnum, &local_map,
2132 &local_file);
2133 if (ret < 0) {
2134 *pnum = 0;
2135 goto out;
2139 * The driver's result must be a non-zero multiple of request_alignment.
2140 * Clamp pnum and adjust map to original request.
2142 assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) &&
2143 align > offset - aligned_offset);
2144 *pnum -= offset - aligned_offset;
2145 if (*pnum > bytes) {
2146 *pnum = bytes;
2148 if (ret & BDRV_BLOCK_OFFSET_VALID) {
2149 local_map += offset - aligned_offset;
2152 if (ret & BDRV_BLOCK_RAW) {
2153 assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
2154 ret = bdrv_co_block_status(local_file, want_zero, local_map,
2155 *pnum, pnum, &local_map, &local_file);
2156 goto out;
2159 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
2160 ret |= BDRV_BLOCK_ALLOCATED;
2161 } else if (want_zero) {
2162 if (bdrv_unallocated_blocks_are_zero(bs)) {
2163 ret |= BDRV_BLOCK_ZERO;
2164 } else if (bs->backing) {
2165 BlockDriverState *bs2 = bs->backing->bs;
2166 int64_t size2 = bdrv_getlength(bs2);
2168 if (size2 >= 0 && offset >= size2) {
2169 ret |= BDRV_BLOCK_ZERO;
2174 if (want_zero && local_file && local_file != bs &&
2175 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
2176 (ret & BDRV_BLOCK_OFFSET_VALID)) {
2177 int64_t file_pnum;
2178 int ret2;
2180 ret2 = bdrv_co_block_status(local_file, want_zero, local_map,
2181 *pnum, &file_pnum, NULL, NULL);
2182 if (ret2 >= 0) {
2183 /* Ignore errors. This is just providing extra information, it
2184 * is useful but not necessary.
2186 if (ret2 & BDRV_BLOCK_EOF &&
2187 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
2189 * It is valid for the format block driver to read
2190 * beyond the end of the underlying file's current
2191 * size; such areas read as zero.
2193 ret |= BDRV_BLOCK_ZERO;
2194 } else {
2195 /* Limit request to the range reported by the protocol driver */
2196 *pnum = file_pnum;
2197 ret |= (ret2 & BDRV_BLOCK_ZERO);
2202 out:
2203 bdrv_dec_in_flight(bs);
2204 if (ret >= 0 && offset + *pnum == total_size) {
2205 ret |= BDRV_BLOCK_EOF;
2207 early_out:
2208 if (file) {
2209 *file = local_file;
2211 if (map) {
2212 *map = local_map;
2214 return ret;
2217 static int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs,
2218 BlockDriverState *base,
2219 bool want_zero,
2220 int64_t offset,
2221 int64_t bytes,
2222 int64_t *pnum,
2223 int64_t *map,
2224 BlockDriverState **file)
2226 BlockDriverState *p;
2227 int ret = 0;
2228 bool first = true;
2230 assert(bs != base);
2231 for (p = bs; p != base; p = backing_bs(p)) {
2232 ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map,
2233 file);
2234 if (ret < 0) {
2235 break;
2237 if (ret & BDRV_BLOCK_ZERO && ret & BDRV_BLOCK_EOF && !first) {
2239 * Reading beyond the end of the file continues to read
2240 * zeroes, but we can only widen the result to the
2241 * unallocated length we learned from an earlier
2242 * iteration.
2244 *pnum = bytes;
2246 if (ret & (BDRV_BLOCK_ZERO | BDRV_BLOCK_DATA)) {
2247 break;
2249 /* [offset, pnum] unallocated on this layer, which could be only
2250 * the first part of [offset, bytes]. */
2251 bytes = MIN(bytes, *pnum);
2252 first = false;
2254 return ret;
2257 /* Coroutine wrapper for bdrv_block_status_above() */
2258 static void coroutine_fn bdrv_block_status_above_co_entry(void *opaque)
2260 BdrvCoBlockStatusData *data = opaque;
2262 data->ret = bdrv_co_block_status_above(data->bs, data->base,
2263 data->want_zero,
2264 data->offset, data->bytes,
2265 data->pnum, data->map, data->file);
2266 data->done = true;
2270 * Synchronous wrapper around bdrv_co_block_status_above().
2272 * See bdrv_co_block_status_above() for details.
2274 static int bdrv_common_block_status_above(BlockDriverState *bs,
2275 BlockDriverState *base,
2276 bool want_zero, int64_t offset,
2277 int64_t bytes, int64_t *pnum,
2278 int64_t *map,
2279 BlockDriverState **file)
2281 Coroutine *co;
2282 BdrvCoBlockStatusData data = {
2283 .bs = bs,
2284 .base = base,
2285 .want_zero = want_zero,
2286 .offset = offset,
2287 .bytes = bytes,
2288 .pnum = pnum,
2289 .map = map,
2290 .file = file,
2291 .done = false,
2294 if (qemu_in_coroutine()) {
2295 /* Fast-path if already in coroutine context */
2296 bdrv_block_status_above_co_entry(&data);
2297 } else {
2298 co = qemu_coroutine_create(bdrv_block_status_above_co_entry, &data);
2299 bdrv_coroutine_enter(bs, co);
2300 BDRV_POLL_WHILE(bs, !data.done);
2302 return data.ret;
2305 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
2306 int64_t offset, int64_t bytes, int64_t *pnum,
2307 int64_t *map, BlockDriverState **file)
2309 return bdrv_common_block_status_above(bs, base, true, offset, bytes,
2310 pnum, map, file);
2313 int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
2314 int64_t *pnum, int64_t *map, BlockDriverState **file)
2316 return bdrv_block_status_above(bs, backing_bs(bs),
2317 offset, bytes, pnum, map, file);
2320 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset,
2321 int64_t bytes, int64_t *pnum)
2323 int ret;
2324 int64_t dummy;
2326 ret = bdrv_common_block_status_above(bs, backing_bs(bs), false, offset,
2327 bytes, pnum ? pnum : &dummy, NULL,
2328 NULL);
2329 if (ret < 0) {
2330 return ret;
2332 return !!(ret & BDRV_BLOCK_ALLOCATED);
2336 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2338 * Return true if (a prefix of) the given range is allocated in any image
2339 * between BASE and TOP (inclusive). BASE can be NULL to check if the given
2340 * offset is allocated in any image of the chain. Return false otherwise,
2341 * or negative errno on failure.
2343 * 'pnum' is set to the number of bytes (including and immediately
2344 * following the specified offset) that are known to be in the same
2345 * allocated/unallocated state. Note that a subsequent call starting
2346 * at 'offset + *pnum' may return the same allocation status (in other
2347 * words, the result is not necessarily the maximum possible range);
2348 * but 'pnum' will only be 0 when end of file is reached.
2351 int bdrv_is_allocated_above(BlockDriverState *top,
2352 BlockDriverState *base,
2353 int64_t offset, int64_t bytes, int64_t *pnum)
2355 BlockDriverState *intermediate;
2356 int ret;
2357 int64_t n = bytes;
2359 intermediate = top;
2360 while (intermediate && intermediate != base) {
2361 int64_t pnum_inter;
2362 int64_t size_inter;
2364 ret = bdrv_is_allocated(intermediate, offset, bytes, &pnum_inter);
2365 if (ret < 0) {
2366 return ret;
2368 if (ret) {
2369 *pnum = pnum_inter;
2370 return 1;
2373 size_inter = bdrv_getlength(intermediate);
2374 if (size_inter < 0) {
2375 return size_inter;
2377 if (n > pnum_inter &&
2378 (intermediate == top || offset + pnum_inter < size_inter)) {
2379 n = pnum_inter;
2382 intermediate = backing_bs(intermediate);
2385 *pnum = n;
2386 return 0;
2389 typedef struct BdrvVmstateCo {
2390 BlockDriverState *bs;
2391 QEMUIOVector *qiov;
2392 int64_t pos;
2393 bool is_read;
2394 int ret;
2395 } BdrvVmstateCo;
2397 static int coroutine_fn
2398 bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
2399 bool is_read)
2401 BlockDriver *drv = bs->drv;
2402 int ret = -ENOTSUP;
2404 bdrv_inc_in_flight(bs);
2406 if (!drv) {
2407 ret = -ENOMEDIUM;
2408 } else if (drv->bdrv_load_vmstate) {
2409 if (is_read) {
2410 ret = drv->bdrv_load_vmstate(bs, qiov, pos);
2411 } else {
2412 ret = drv->bdrv_save_vmstate(bs, qiov, pos);
2414 } else if (bs->file) {
2415 ret = bdrv_co_rw_vmstate(bs->file->bs, qiov, pos, is_read);
2418 bdrv_dec_in_flight(bs);
2419 return ret;
2422 static void coroutine_fn bdrv_co_rw_vmstate_entry(void *opaque)
2424 BdrvVmstateCo *co = opaque;
2425 co->ret = bdrv_co_rw_vmstate(co->bs, co->qiov, co->pos, co->is_read);
2428 static inline int
2429 bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
2430 bool is_read)
2432 if (qemu_in_coroutine()) {
2433 return bdrv_co_rw_vmstate(bs, qiov, pos, is_read);
2434 } else {
2435 BdrvVmstateCo data = {
2436 .bs = bs,
2437 .qiov = qiov,
2438 .pos = pos,
2439 .is_read = is_read,
2440 .ret = -EINPROGRESS,
2442 Coroutine *co = qemu_coroutine_create(bdrv_co_rw_vmstate_entry, &data);
2444 bdrv_coroutine_enter(bs, co);
2445 BDRV_POLL_WHILE(bs, data.ret == -EINPROGRESS);
2446 return data.ret;
2450 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
2451 int64_t pos, int size)
2453 QEMUIOVector qiov;
2454 struct iovec iov = {
2455 .iov_base = (void *) buf,
2456 .iov_len = size,
2458 int ret;
2460 qemu_iovec_init_external(&qiov, &iov, 1);
2462 ret = bdrv_writev_vmstate(bs, &qiov, pos);
2463 if (ret < 0) {
2464 return ret;
2467 return size;
2470 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2472 return bdrv_rw_vmstate(bs, qiov, pos, false);
2475 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2476 int64_t pos, int size)
2478 QEMUIOVector qiov;
2479 struct iovec iov = {
2480 .iov_base = buf,
2481 .iov_len = size,
2483 int ret;
2485 qemu_iovec_init_external(&qiov, &iov, 1);
2486 ret = bdrv_readv_vmstate(bs, &qiov, pos);
2487 if (ret < 0) {
2488 return ret;
2491 return size;
2494 int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2496 return bdrv_rw_vmstate(bs, qiov, pos, true);
2499 /**************************************************************/
2500 /* async I/Os */
2502 void bdrv_aio_cancel(BlockAIOCB *acb)
2504 qemu_aio_ref(acb);
2505 bdrv_aio_cancel_async(acb);
2506 while (acb->refcnt > 1) {
2507 if (acb->aiocb_info->get_aio_context) {
2508 aio_poll(acb->aiocb_info->get_aio_context(acb), true);
2509 } else if (acb->bs) {
2510 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
2511 * assert that we're not using an I/O thread. Thread-safe
2512 * code should use bdrv_aio_cancel_async exclusively.
2514 assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context());
2515 aio_poll(bdrv_get_aio_context(acb->bs), true);
2516 } else {
2517 abort();
2520 qemu_aio_unref(acb);
2523 /* Async version of aio cancel. The caller is not blocked if the acb implements
2524 * cancel_async, otherwise we do nothing and let the request normally complete.
2525 * In either case the completion callback must be called. */
2526 void bdrv_aio_cancel_async(BlockAIOCB *acb)
2528 if (acb->aiocb_info->cancel_async) {
2529 acb->aiocb_info->cancel_async(acb);
2533 /**************************************************************/
2534 /* Coroutine block device emulation */
2536 typedef struct FlushCo {
2537 BlockDriverState *bs;
2538 int ret;
2539 } FlushCo;
2542 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
2544 FlushCo *rwco = opaque;
2546 rwco->ret = bdrv_co_flush(rwco->bs);
2549 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2551 int current_gen;
2552 int ret = 0;
2554 bdrv_inc_in_flight(bs);
2556 if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
2557 bdrv_is_sg(bs)) {
2558 goto early_exit;
2561 qemu_co_mutex_lock(&bs->reqs_lock);
2562 current_gen = atomic_read(&bs->write_gen);
2564 /* Wait until any previous flushes are completed */
2565 while (bs->active_flush_req) {
2566 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
2569 /* Flushes reach this point in nondecreasing current_gen order. */
2570 bs->active_flush_req = true;
2571 qemu_co_mutex_unlock(&bs->reqs_lock);
2573 /* Write back all layers by calling one driver function */
2574 if (bs->drv->bdrv_co_flush) {
2575 ret = bs->drv->bdrv_co_flush(bs);
2576 goto out;
2579 /* Write back cached data to the OS even with cache=unsafe */
2580 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
2581 if (bs->drv->bdrv_co_flush_to_os) {
2582 ret = bs->drv->bdrv_co_flush_to_os(bs);
2583 if (ret < 0) {
2584 goto out;
2588 /* But don't actually force it to the disk with cache=unsafe */
2589 if (bs->open_flags & BDRV_O_NO_FLUSH) {
2590 goto flush_parent;
2593 /* Check if we really need to flush anything */
2594 if (bs->flushed_gen == current_gen) {
2595 goto flush_parent;
2598 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
2599 if (!bs->drv) {
2600 /* bs->drv->bdrv_co_flush() might have ejected the BDS
2601 * (even in case of apparent success) */
2602 ret = -ENOMEDIUM;
2603 goto out;
2605 if (bs->drv->bdrv_co_flush_to_disk) {
2606 ret = bs->drv->bdrv_co_flush_to_disk(bs);
2607 } else if (bs->drv->bdrv_aio_flush) {
2608 BlockAIOCB *acb;
2609 CoroutineIOCompletion co = {
2610 .coroutine = qemu_coroutine_self(),
2613 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
2614 if (acb == NULL) {
2615 ret = -EIO;
2616 } else {
2617 qemu_coroutine_yield();
2618 ret = co.ret;
2620 } else {
2622 * Some block drivers always operate in either writethrough or unsafe
2623 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2624 * know how the server works (because the behaviour is hardcoded or
2625 * depends on server-side configuration), so we can't ensure that
2626 * everything is safe on disk. Returning an error doesn't work because
2627 * that would break guests even if the server operates in writethrough
2628 * mode.
2630 * Let's hope the user knows what he's doing.
2632 ret = 0;
2635 if (ret < 0) {
2636 goto out;
2639 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2640 * in the case of cache=unsafe, so there are no useless flushes.
2642 flush_parent:
2643 ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0;
2644 out:
2645 /* Notify any pending flushes that we have completed */
2646 if (ret == 0) {
2647 bs->flushed_gen = current_gen;
2650 qemu_co_mutex_lock(&bs->reqs_lock);
2651 bs->active_flush_req = false;
2652 /* Return value is ignored - it's ok if wait queue is empty */
2653 qemu_co_queue_next(&bs->flush_queue);
2654 qemu_co_mutex_unlock(&bs->reqs_lock);
2656 early_exit:
2657 bdrv_dec_in_flight(bs);
2658 return ret;
2661 int bdrv_flush(BlockDriverState *bs)
2663 Coroutine *co;
2664 FlushCo flush_co = {
2665 .bs = bs,
2666 .ret = NOT_DONE,
2669 if (qemu_in_coroutine()) {
2670 /* Fast-path if already in coroutine context */
2671 bdrv_flush_co_entry(&flush_co);
2672 } else {
2673 co = qemu_coroutine_create(bdrv_flush_co_entry, &flush_co);
2674 bdrv_coroutine_enter(bs, co);
2675 BDRV_POLL_WHILE(bs, flush_co.ret == NOT_DONE);
2678 return flush_co.ret;
2681 typedef struct DiscardCo {
2682 BdrvChild *child;
2683 int64_t offset;
2684 int bytes;
2685 int ret;
2686 } DiscardCo;
2687 static void coroutine_fn bdrv_pdiscard_co_entry(void *opaque)
2689 DiscardCo *rwco = opaque;
2691 rwco->ret = bdrv_co_pdiscard(rwco->child, rwco->offset, rwco->bytes);
2694 int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset, int bytes)
2696 BdrvTrackedRequest req;
2697 int max_pdiscard, ret;
2698 int head, tail, align;
2699 BlockDriverState *bs = child->bs;
2701 if (!bs || !bs->drv) {
2702 return -ENOMEDIUM;
2705 if (bdrv_has_readonly_bitmaps(bs)) {
2706 return -EPERM;
2709 ret = bdrv_check_byte_request(bs, offset, bytes);
2710 if (ret < 0) {
2711 return ret;
2714 /* Do nothing if disabled. */
2715 if (!(bs->open_flags & BDRV_O_UNMAP)) {
2716 return 0;
2719 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
2720 return 0;
2723 /* Discard is advisory, but some devices track and coalesce
2724 * unaligned requests, so we must pass everything down rather than
2725 * round here. Still, most devices will just silently ignore
2726 * unaligned requests (by returning -ENOTSUP), so we must fragment
2727 * the request accordingly. */
2728 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
2729 assert(align % bs->bl.request_alignment == 0);
2730 head = offset % align;
2731 tail = (offset + bytes) % align;
2733 bdrv_inc_in_flight(bs);
2734 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD);
2736 ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0);
2737 if (ret < 0) {
2738 goto out;
2741 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX),
2742 align);
2743 assert(max_pdiscard >= bs->bl.request_alignment);
2745 while (bytes > 0) {
2746 int num = bytes;
2748 if (head) {
2749 /* Make small requests to get to alignment boundaries. */
2750 num = MIN(bytes, align - head);
2751 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
2752 num %= bs->bl.request_alignment;
2754 head = (head + num) % align;
2755 assert(num < max_pdiscard);
2756 } else if (tail) {
2757 if (num > align) {
2758 /* Shorten the request to the last aligned cluster. */
2759 num -= tail;
2760 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
2761 tail > bs->bl.request_alignment) {
2762 tail %= bs->bl.request_alignment;
2763 num -= tail;
2766 /* limit request size */
2767 if (num > max_pdiscard) {
2768 num = max_pdiscard;
2771 if (!bs->drv) {
2772 ret = -ENOMEDIUM;
2773 goto out;
2775 if (bs->drv->bdrv_co_pdiscard) {
2776 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
2777 } else {
2778 BlockAIOCB *acb;
2779 CoroutineIOCompletion co = {
2780 .coroutine = qemu_coroutine_self(),
2783 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
2784 bdrv_co_io_em_complete, &co);
2785 if (acb == NULL) {
2786 ret = -EIO;
2787 goto out;
2788 } else {
2789 qemu_coroutine_yield();
2790 ret = co.ret;
2793 if (ret && ret != -ENOTSUP) {
2794 goto out;
2797 offset += num;
2798 bytes -= num;
2800 ret = 0;
2801 out:
2802 bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret);
2803 tracked_request_end(&req);
2804 bdrv_dec_in_flight(bs);
2805 return ret;
2808 int bdrv_pdiscard(BdrvChild *child, int64_t offset, int bytes)
2810 Coroutine *co;
2811 DiscardCo rwco = {
2812 .child = child,
2813 .offset = offset,
2814 .bytes = bytes,
2815 .ret = NOT_DONE,
2818 if (qemu_in_coroutine()) {
2819 /* Fast-path if already in coroutine context */
2820 bdrv_pdiscard_co_entry(&rwco);
2821 } else {
2822 co = qemu_coroutine_create(bdrv_pdiscard_co_entry, &rwco);
2823 bdrv_coroutine_enter(child->bs, co);
2824 BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE);
2827 return rwco.ret;
2830 int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
2832 BlockDriver *drv = bs->drv;
2833 CoroutineIOCompletion co = {
2834 .coroutine = qemu_coroutine_self(),
2836 BlockAIOCB *acb;
2838 bdrv_inc_in_flight(bs);
2839 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
2840 co.ret = -ENOTSUP;
2841 goto out;
2844 if (drv->bdrv_co_ioctl) {
2845 co.ret = drv->bdrv_co_ioctl(bs, req, buf);
2846 } else {
2847 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
2848 if (!acb) {
2849 co.ret = -ENOTSUP;
2850 goto out;
2852 qemu_coroutine_yield();
2854 out:
2855 bdrv_dec_in_flight(bs);
2856 return co.ret;
2859 void *qemu_blockalign(BlockDriverState *bs, size_t size)
2861 return qemu_memalign(bdrv_opt_mem_align(bs), size);
2864 void *qemu_blockalign0(BlockDriverState *bs, size_t size)
2866 return memset(qemu_blockalign(bs, size), 0, size);
2869 void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
2871 size_t align = bdrv_opt_mem_align(bs);
2873 /* Ensure that NULL is never returned on success */
2874 assert(align > 0);
2875 if (size == 0) {
2876 size = align;
2879 return qemu_try_memalign(align, size);
2882 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
2884 void *mem = qemu_try_blockalign(bs, size);
2886 if (mem) {
2887 memset(mem, 0, size);
2890 return mem;
2894 * Check if all memory in this vector is sector aligned.
2896 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
2898 int i;
2899 size_t alignment = bdrv_min_mem_align(bs);
2901 for (i = 0; i < qiov->niov; i++) {
2902 if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
2903 return false;
2905 if (qiov->iov[i].iov_len % alignment) {
2906 return false;
2910 return true;
2913 void bdrv_add_before_write_notifier(BlockDriverState *bs,
2914 NotifierWithReturn *notifier)
2916 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
2919 void bdrv_io_plug(BlockDriverState *bs)
2921 BdrvChild *child;
2923 QLIST_FOREACH(child, &bs->children, next) {
2924 bdrv_io_plug(child->bs);
2927 if (atomic_fetch_inc(&bs->io_plugged) == 0) {
2928 BlockDriver *drv = bs->drv;
2929 if (drv && drv->bdrv_io_plug) {
2930 drv->bdrv_io_plug(bs);
2935 void bdrv_io_unplug(BlockDriverState *bs)
2937 BdrvChild *child;
2939 assert(bs->io_plugged);
2940 if (atomic_fetch_dec(&bs->io_plugged) == 1) {
2941 BlockDriver *drv = bs->drv;
2942 if (drv && drv->bdrv_io_unplug) {
2943 drv->bdrv_io_unplug(bs);
2947 QLIST_FOREACH(child, &bs->children, next) {
2948 bdrv_io_unplug(child->bs);
2952 void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size)
2954 BdrvChild *child;
2956 if (bs->drv && bs->drv->bdrv_register_buf) {
2957 bs->drv->bdrv_register_buf(bs, host, size);
2959 QLIST_FOREACH(child, &bs->children, next) {
2960 bdrv_register_buf(child->bs, host, size);
2964 void bdrv_unregister_buf(BlockDriverState *bs, void *host)
2966 BdrvChild *child;
2968 if (bs->drv && bs->drv->bdrv_unregister_buf) {
2969 bs->drv->bdrv_unregister_buf(bs, host);
2971 QLIST_FOREACH(child, &bs->children, next) {
2972 bdrv_unregister_buf(child->bs, host);
2976 static int coroutine_fn bdrv_co_copy_range_internal(
2977 BdrvChild *src, uint64_t src_offset, BdrvChild *dst,
2978 uint64_t dst_offset, uint64_t bytes,
2979 BdrvRequestFlags read_flags, BdrvRequestFlags write_flags,
2980 bool recurse_src)
2982 BdrvTrackedRequest req;
2983 int ret;
2985 if (!dst || !dst->bs) {
2986 return -ENOMEDIUM;
2988 ret = bdrv_check_byte_request(dst->bs, dst_offset, bytes);
2989 if (ret) {
2990 return ret;
2992 if (write_flags & BDRV_REQ_ZERO_WRITE) {
2993 return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags);
2996 if (!src || !src->bs) {
2997 return -ENOMEDIUM;
2999 ret = bdrv_check_byte_request(src->bs, src_offset, bytes);
3000 if (ret) {
3001 return ret;
3004 if (!src->bs->drv->bdrv_co_copy_range_from
3005 || !dst->bs->drv->bdrv_co_copy_range_to
3006 || src->bs->encrypted || dst->bs->encrypted) {
3007 return -ENOTSUP;
3010 if (recurse_src) {
3011 bdrv_inc_in_flight(src->bs);
3012 tracked_request_begin(&req, src->bs, src_offset, bytes,
3013 BDRV_TRACKED_READ);
3015 /* BDRV_REQ_SERIALISING is only for write operation */
3016 assert(!(read_flags & BDRV_REQ_SERIALISING));
3017 if (!(read_flags & BDRV_REQ_NO_SERIALISING)) {
3018 wait_serialising_requests(&req);
3021 ret = src->bs->drv->bdrv_co_copy_range_from(src->bs,
3022 src, src_offset,
3023 dst, dst_offset,
3024 bytes,
3025 read_flags, write_flags);
3027 tracked_request_end(&req);
3028 bdrv_dec_in_flight(src->bs);
3029 } else {
3030 bdrv_inc_in_flight(dst->bs);
3031 tracked_request_begin(&req, dst->bs, dst_offset, bytes,
3032 BDRV_TRACKED_WRITE);
3033 ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req,
3034 write_flags);
3035 if (!ret) {
3036 ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs,
3037 src, src_offset,
3038 dst, dst_offset,
3039 bytes,
3040 read_flags, write_flags);
3042 bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret);
3043 tracked_request_end(&req);
3044 bdrv_dec_in_flight(dst->bs);
3047 return ret;
3050 /* Copy range from @src to @dst.
3052 * See the comment of bdrv_co_copy_range for the parameter and return value
3053 * semantics. */
3054 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, uint64_t src_offset,
3055 BdrvChild *dst, uint64_t dst_offset,
3056 uint64_t bytes,
3057 BdrvRequestFlags read_flags,
3058 BdrvRequestFlags write_flags)
3060 trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes,
3061 read_flags, write_flags);
3062 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
3063 bytes, read_flags, write_flags, true);
3066 /* Copy range from @src to @dst.
3068 * See the comment of bdrv_co_copy_range for the parameter and return value
3069 * semantics. */
3070 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, uint64_t src_offset,
3071 BdrvChild *dst, uint64_t dst_offset,
3072 uint64_t bytes,
3073 BdrvRequestFlags read_flags,
3074 BdrvRequestFlags write_flags)
3076 trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
3077 read_flags, write_flags);
3078 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
3079 bytes, read_flags, write_flags, false);
3082 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, uint64_t src_offset,
3083 BdrvChild *dst, uint64_t dst_offset,
3084 uint64_t bytes, BdrvRequestFlags read_flags,
3085 BdrvRequestFlags write_flags)
3087 return bdrv_co_copy_range_from(src, src_offset,
3088 dst, dst_offset,
3089 bytes, read_flags, write_flags);
3092 static void bdrv_parent_cb_resize(BlockDriverState *bs)
3094 BdrvChild *c;
3095 QLIST_FOREACH(c, &bs->parents, next_parent) {
3096 if (c->role->resize) {
3097 c->role->resize(c);
3103 * Truncate file to 'offset' bytes (needed only for file protocols)
3105 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset,
3106 PreallocMode prealloc, Error **errp)
3108 BlockDriverState *bs = child->bs;
3109 BlockDriver *drv = bs->drv;
3110 BdrvTrackedRequest req;
3111 int64_t old_size, new_bytes;
3112 int ret;
3114 assert(child->perm & BLK_PERM_RESIZE);
3116 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
3117 if (!drv) {
3118 error_setg(errp, "No medium inserted");
3119 return -ENOMEDIUM;
3121 if (offset < 0) {
3122 error_setg(errp, "Image size cannot be negative");
3123 return -EINVAL;
3126 old_size = bdrv_getlength(bs);
3127 if (old_size < 0) {
3128 error_setg_errno(errp, -old_size, "Failed to get old image size");
3129 return old_size;
3132 if (offset > old_size) {
3133 new_bytes = offset - old_size;
3134 } else {
3135 new_bytes = 0;
3138 bdrv_inc_in_flight(bs);
3139 tracked_request_begin(&req, bs, offset - new_bytes, new_bytes,
3140 BDRV_TRACKED_TRUNCATE);
3142 /* If we are growing the image and potentially using preallocation for the
3143 * new area, we need to make sure that no write requests are made to it
3144 * concurrently or they might be overwritten by preallocation. */
3145 if (new_bytes) {
3146 mark_request_serialising(&req, 1);
3147 wait_serialising_requests(&req);
3150 if (!drv->bdrv_co_truncate) {
3151 if (bs->file && drv->is_filter) {
3152 ret = bdrv_co_truncate(bs->file, offset, prealloc, errp);
3153 goto out;
3155 error_setg(errp, "Image format driver does not support resize");
3156 ret = -ENOTSUP;
3157 goto out;
3159 if (bs->read_only) {
3160 error_setg(errp, "Image is read-only");
3161 ret = -EACCES;
3162 goto out;
3165 assert(!(bs->open_flags & BDRV_O_INACTIVE));
3167 ret = drv->bdrv_co_truncate(bs, offset, prealloc, errp);
3168 if (ret < 0) {
3169 goto out;
3171 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3172 if (ret < 0) {
3173 error_setg_errno(errp, -ret, "Could not refresh total sector count");
3174 } else {
3175 offset = bs->total_sectors * BDRV_SECTOR_SIZE;
3177 bdrv_dirty_bitmap_truncate(bs, offset);
3178 bdrv_parent_cb_resize(bs);
3179 atomic_inc(&bs->write_gen);
3181 out:
3182 tracked_request_end(&req);
3183 bdrv_dec_in_flight(bs);
3185 return ret;
3188 typedef struct TruncateCo {
3189 BdrvChild *child;
3190 int64_t offset;
3191 PreallocMode prealloc;
3192 Error **errp;
3193 int ret;
3194 } TruncateCo;
3196 static void coroutine_fn bdrv_truncate_co_entry(void *opaque)
3198 TruncateCo *tco = opaque;
3199 tco->ret = bdrv_co_truncate(tco->child, tco->offset, tco->prealloc,
3200 tco->errp);
3203 int bdrv_truncate(BdrvChild *child, int64_t offset, PreallocMode prealloc,
3204 Error **errp)
3206 Coroutine *co;
3207 TruncateCo tco = {
3208 .child = child,
3209 .offset = offset,
3210 .prealloc = prealloc,
3211 .errp = errp,
3212 .ret = NOT_DONE,
3215 if (qemu_in_coroutine()) {
3216 /* Fast-path if already in coroutine context */
3217 bdrv_truncate_co_entry(&tco);
3218 } else {
3219 co = qemu_coroutine_create(bdrv_truncate_co_entry, &tco);
3220 qemu_coroutine_enter(co);
3221 BDRV_POLL_WHILE(child->bs, tco.ret == NOT_DONE);
3224 return tco.ret;