net: pcnet: check rx/tx descriptor ring length
[qemu/ar7.git] / block / io.c
blobb136c89ae01c5e0e269d1bcd00a48c866c9bc895
1 /*
2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include "trace.h"
27 #include "sysemu/block-backend.h"
28 #include "block/blockjob.h"
29 #include "block/block_int.h"
30 #include "qemu/cutils.h"
31 #include "qapi/error.h"
32 #include "qemu/error-report.h"
34 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
36 static BlockAIOCB *bdrv_co_aio_prw_vector(BdrvChild *child,
37 int64_t offset,
38 QEMUIOVector *qiov,
39 BdrvRequestFlags flags,
40 BlockCompletionFunc *cb,
41 void *opaque,
42 bool is_write);
43 static void coroutine_fn bdrv_co_do_rw(void *opaque);
44 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
45 int64_t offset, int count, BdrvRequestFlags flags);
47 static void bdrv_parent_drained_begin(BlockDriverState *bs)
49 BdrvChild *c;
51 QLIST_FOREACH(c, &bs->parents, next_parent) {
52 if (c->role->drained_begin) {
53 c->role->drained_begin(c);
58 static void bdrv_parent_drained_end(BlockDriverState *bs)
60 BdrvChild *c;
62 QLIST_FOREACH(c, &bs->parents, next_parent) {
63 if (c->role->drained_end) {
64 c->role->drained_end(c);
69 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
71 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
72 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
73 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
74 src->opt_mem_alignment);
75 dst->min_mem_alignment = MAX(dst->min_mem_alignment,
76 src->min_mem_alignment);
77 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
80 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
82 BlockDriver *drv = bs->drv;
83 Error *local_err = NULL;
85 memset(&bs->bl, 0, sizeof(bs->bl));
87 if (!drv) {
88 return;
91 /* Default alignment based on whether driver has byte interface */
92 bs->bl.request_alignment = drv->bdrv_co_preadv ? 1 : 512;
94 /* Take some limits from the children as a default */
95 if (bs->file) {
96 bdrv_refresh_limits(bs->file->bs, &local_err);
97 if (local_err) {
98 error_propagate(errp, local_err);
99 return;
101 bdrv_merge_limits(&bs->bl, &bs->file->bs->bl);
102 } else {
103 bs->bl.min_mem_alignment = 512;
104 bs->bl.opt_mem_alignment = getpagesize();
106 /* Safe default since most protocols use readv()/writev()/etc */
107 bs->bl.max_iov = IOV_MAX;
110 if (bs->backing) {
111 bdrv_refresh_limits(bs->backing->bs, &local_err);
112 if (local_err) {
113 error_propagate(errp, local_err);
114 return;
116 bdrv_merge_limits(&bs->bl, &bs->backing->bs->bl);
119 /* Then let the driver override it */
120 if (drv->bdrv_refresh_limits) {
121 drv->bdrv_refresh_limits(bs, errp);
126 * The copy-on-read flag is actually a reference count so multiple users may
127 * use the feature without worrying about clobbering its previous state.
128 * Copy-on-read stays enabled until all users have called to disable it.
130 void bdrv_enable_copy_on_read(BlockDriverState *bs)
132 bs->copy_on_read++;
135 void bdrv_disable_copy_on_read(BlockDriverState *bs)
137 assert(bs->copy_on_read > 0);
138 bs->copy_on_read--;
141 /* Check if any requests are in-flight (including throttled requests) */
142 bool bdrv_requests_pending(BlockDriverState *bs)
144 BdrvChild *child;
146 if (!QLIST_EMPTY(&bs->tracked_requests)) {
147 return true;
150 QLIST_FOREACH(child, &bs->children, next) {
151 if (bdrv_requests_pending(child->bs)) {
152 return true;
156 return false;
159 static void bdrv_drain_recurse(BlockDriverState *bs)
161 BdrvChild *child;
163 if (bs->drv && bs->drv->bdrv_drain) {
164 bs->drv->bdrv_drain(bs);
166 QLIST_FOREACH(child, &bs->children, next) {
167 bdrv_drain_recurse(child->bs);
171 typedef struct {
172 Coroutine *co;
173 BlockDriverState *bs;
174 bool done;
175 } BdrvCoDrainData;
177 static void bdrv_drain_poll(BlockDriverState *bs)
179 bool busy = true;
181 while (busy) {
182 /* Keep iterating */
183 busy = bdrv_requests_pending(bs);
184 busy |= aio_poll(bdrv_get_aio_context(bs), busy);
188 static void bdrv_co_drain_bh_cb(void *opaque)
190 BdrvCoDrainData *data = opaque;
191 Coroutine *co = data->co;
193 bdrv_drain_poll(data->bs);
194 data->done = true;
195 qemu_coroutine_enter(co);
198 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs)
200 BdrvCoDrainData data;
202 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
203 * other coroutines run if they were queued from
204 * qemu_co_queue_run_restart(). */
206 assert(qemu_in_coroutine());
207 data = (BdrvCoDrainData) {
208 .co = qemu_coroutine_self(),
209 .bs = bs,
210 .done = false,
212 aio_bh_schedule_oneshot(bdrv_get_aio_context(bs),
213 bdrv_co_drain_bh_cb, &data);
215 qemu_coroutine_yield();
216 /* If we are resumed from some other event (such as an aio completion or a
217 * timer callback), it is a bug in the caller that should be fixed. */
218 assert(data.done);
221 void bdrv_drained_begin(BlockDriverState *bs)
223 if (!bs->quiesce_counter++) {
224 aio_disable_external(bdrv_get_aio_context(bs));
225 bdrv_parent_drained_begin(bs);
228 bdrv_io_unplugged_begin(bs);
229 bdrv_drain_recurse(bs);
230 if (qemu_in_coroutine()) {
231 bdrv_co_yield_to_drain(bs);
232 } else {
233 bdrv_drain_poll(bs);
235 bdrv_io_unplugged_end(bs);
238 void bdrv_drained_end(BlockDriverState *bs)
240 assert(bs->quiesce_counter > 0);
241 if (--bs->quiesce_counter > 0) {
242 return;
245 bdrv_parent_drained_end(bs);
246 aio_enable_external(bdrv_get_aio_context(bs));
250 * Wait for pending requests to complete on a single BlockDriverState subtree,
251 * and suspend block driver's internal I/O until next request arrives.
253 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
254 * AioContext.
256 * Only this BlockDriverState's AioContext is run, so in-flight requests must
257 * not depend on events in other AioContexts. In that case, use
258 * bdrv_drain_all() instead.
260 void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
262 assert(qemu_in_coroutine());
263 bdrv_drained_begin(bs);
264 bdrv_drained_end(bs);
267 void bdrv_drain(BlockDriverState *bs)
269 bdrv_drained_begin(bs);
270 bdrv_drained_end(bs);
274 * Wait for pending requests to complete across all BlockDriverStates
276 * This function does not flush data to disk, use bdrv_flush_all() for that
277 * after calling this function.
279 void bdrv_drain_all(void)
281 /* Always run first iteration so any pending completion BHs run */
282 bool busy = true;
283 BlockDriverState *bs;
284 BdrvNextIterator it;
285 BlockJob *job = NULL;
286 GSList *aio_ctxs = NULL, *ctx;
288 while ((job = block_job_next(job))) {
289 AioContext *aio_context = blk_get_aio_context(job->blk);
291 aio_context_acquire(aio_context);
292 block_job_pause(job);
293 aio_context_release(aio_context);
296 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
297 AioContext *aio_context = bdrv_get_aio_context(bs);
299 aio_context_acquire(aio_context);
300 bdrv_parent_drained_begin(bs);
301 bdrv_io_unplugged_begin(bs);
302 bdrv_drain_recurse(bs);
303 aio_context_release(aio_context);
305 if (!g_slist_find(aio_ctxs, aio_context)) {
306 aio_ctxs = g_slist_prepend(aio_ctxs, aio_context);
310 /* Note that completion of an asynchronous I/O operation can trigger any
311 * number of other I/O operations on other devices---for example a
312 * coroutine can submit an I/O request to another device in response to
313 * request completion. Therefore we must keep looping until there was no
314 * more activity rather than simply draining each device independently.
316 while (busy) {
317 busy = false;
319 for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) {
320 AioContext *aio_context = ctx->data;
322 aio_context_acquire(aio_context);
323 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
324 if (aio_context == bdrv_get_aio_context(bs)) {
325 if (bdrv_requests_pending(bs)) {
326 busy = true;
327 aio_poll(aio_context, busy);
331 busy |= aio_poll(aio_context, false);
332 aio_context_release(aio_context);
336 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
337 AioContext *aio_context = bdrv_get_aio_context(bs);
339 aio_context_acquire(aio_context);
340 bdrv_io_unplugged_end(bs);
341 bdrv_parent_drained_end(bs);
342 aio_context_release(aio_context);
344 g_slist_free(aio_ctxs);
346 job = NULL;
347 while ((job = block_job_next(job))) {
348 AioContext *aio_context = blk_get_aio_context(job->blk);
350 aio_context_acquire(aio_context);
351 block_job_resume(job);
352 aio_context_release(aio_context);
357 * Remove an active request from the tracked requests list
359 * This function should be called when a tracked request is completing.
361 static void tracked_request_end(BdrvTrackedRequest *req)
363 if (req->serialising) {
364 req->bs->serialising_in_flight--;
367 QLIST_REMOVE(req, list);
368 qemu_co_queue_restart_all(&req->wait_queue);
372 * Add an active request to the tracked requests list
374 static void tracked_request_begin(BdrvTrackedRequest *req,
375 BlockDriverState *bs,
376 int64_t offset,
377 unsigned int bytes,
378 enum BdrvTrackedRequestType type)
380 *req = (BdrvTrackedRequest){
381 .bs = bs,
382 .offset = offset,
383 .bytes = bytes,
384 .type = type,
385 .co = qemu_coroutine_self(),
386 .serialising = false,
387 .overlap_offset = offset,
388 .overlap_bytes = bytes,
391 qemu_co_queue_init(&req->wait_queue);
393 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
396 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
398 int64_t overlap_offset = req->offset & ~(align - 1);
399 unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
400 - overlap_offset;
402 if (!req->serialising) {
403 req->bs->serialising_in_flight++;
404 req->serialising = true;
407 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
408 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
412 * Round a region to cluster boundaries (sector-based)
414 void bdrv_round_sectors_to_clusters(BlockDriverState *bs,
415 int64_t sector_num, int nb_sectors,
416 int64_t *cluster_sector_num,
417 int *cluster_nb_sectors)
419 BlockDriverInfo bdi;
421 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
422 *cluster_sector_num = sector_num;
423 *cluster_nb_sectors = nb_sectors;
424 } else {
425 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
426 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
427 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
428 nb_sectors, c);
433 * Round a region to cluster boundaries
435 void bdrv_round_to_clusters(BlockDriverState *bs,
436 int64_t offset, unsigned int bytes,
437 int64_t *cluster_offset,
438 unsigned int *cluster_bytes)
440 BlockDriverInfo bdi;
442 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
443 *cluster_offset = offset;
444 *cluster_bytes = bytes;
445 } else {
446 int64_t c = bdi.cluster_size;
447 *cluster_offset = QEMU_ALIGN_DOWN(offset, c);
448 *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
452 static int bdrv_get_cluster_size(BlockDriverState *bs)
454 BlockDriverInfo bdi;
455 int ret;
457 ret = bdrv_get_info(bs, &bdi);
458 if (ret < 0 || bdi.cluster_size == 0) {
459 return bs->bl.request_alignment;
460 } else {
461 return bdi.cluster_size;
465 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
466 int64_t offset, unsigned int bytes)
468 /* aaaa bbbb */
469 if (offset >= req->overlap_offset + req->overlap_bytes) {
470 return false;
472 /* bbbb aaaa */
473 if (req->overlap_offset >= offset + bytes) {
474 return false;
476 return true;
479 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
481 BlockDriverState *bs = self->bs;
482 BdrvTrackedRequest *req;
483 bool retry;
484 bool waited = false;
486 if (!bs->serialising_in_flight) {
487 return false;
490 do {
491 retry = false;
492 QLIST_FOREACH(req, &bs->tracked_requests, list) {
493 if (req == self || (!req->serialising && !self->serialising)) {
494 continue;
496 if (tracked_request_overlaps(req, self->overlap_offset,
497 self->overlap_bytes))
499 /* Hitting this means there was a reentrant request, for
500 * example, a block driver issuing nested requests. This must
501 * never happen since it means deadlock.
503 assert(qemu_coroutine_self() != req->co);
505 /* If the request is already (indirectly) waiting for us, or
506 * will wait for us as soon as it wakes up, then just go on
507 * (instead of producing a deadlock in the former case). */
508 if (!req->waiting_for) {
509 self->waiting_for = req;
510 qemu_co_queue_wait(&req->wait_queue);
511 self->waiting_for = NULL;
512 retry = true;
513 waited = true;
514 break;
518 } while (retry);
520 return waited;
523 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
524 size_t size)
526 if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) {
527 return -EIO;
530 if (!bdrv_is_inserted(bs)) {
531 return -ENOMEDIUM;
534 if (offset < 0) {
535 return -EIO;
538 return 0;
541 typedef struct RwCo {
542 BdrvChild *child;
543 int64_t offset;
544 QEMUIOVector *qiov;
545 bool is_write;
546 int ret;
547 BdrvRequestFlags flags;
548 } RwCo;
550 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
552 RwCo *rwco = opaque;
554 if (!rwco->is_write) {
555 rwco->ret = bdrv_co_preadv(rwco->child, rwco->offset,
556 rwco->qiov->size, rwco->qiov,
557 rwco->flags);
558 } else {
559 rwco->ret = bdrv_co_pwritev(rwco->child, rwco->offset,
560 rwco->qiov->size, rwco->qiov,
561 rwco->flags);
566 * Process a vectored synchronous request using coroutines
568 static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
569 QEMUIOVector *qiov, bool is_write,
570 BdrvRequestFlags flags)
572 Coroutine *co;
573 RwCo rwco = {
574 .child = child,
575 .offset = offset,
576 .qiov = qiov,
577 .is_write = is_write,
578 .ret = NOT_DONE,
579 .flags = flags,
582 if (qemu_in_coroutine()) {
583 /* Fast-path if already in coroutine context */
584 bdrv_rw_co_entry(&rwco);
585 } else {
586 AioContext *aio_context = bdrv_get_aio_context(child->bs);
588 co = qemu_coroutine_create(bdrv_rw_co_entry, &rwco);
589 qemu_coroutine_enter(co);
590 while (rwco.ret == NOT_DONE) {
591 aio_poll(aio_context, true);
594 return rwco.ret;
598 * Process a synchronous request using coroutines
600 static int bdrv_rw_co(BdrvChild *child, int64_t sector_num, uint8_t *buf,
601 int nb_sectors, bool is_write, BdrvRequestFlags flags)
603 QEMUIOVector qiov;
604 struct iovec iov = {
605 .iov_base = (void *)buf,
606 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
609 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
610 return -EINVAL;
613 qemu_iovec_init_external(&qiov, &iov, 1);
614 return bdrv_prwv_co(child, sector_num << BDRV_SECTOR_BITS,
615 &qiov, is_write, flags);
618 /* return < 0 if error. See bdrv_write() for the return codes */
619 int bdrv_read(BdrvChild *child, int64_t sector_num,
620 uint8_t *buf, int nb_sectors)
622 return bdrv_rw_co(child, sector_num, buf, nb_sectors, false, 0);
625 /* Return < 0 if error. Important errors are:
626 -EIO generic I/O error (may happen for all errors)
627 -ENOMEDIUM No media inserted.
628 -EINVAL Invalid sector number or nb_sectors
629 -EACCES Trying to write a read-only device
631 int bdrv_write(BdrvChild *child, int64_t sector_num,
632 const uint8_t *buf, int nb_sectors)
634 return bdrv_rw_co(child, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
637 int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
638 int count, BdrvRequestFlags flags)
640 QEMUIOVector qiov;
641 struct iovec iov = {
642 .iov_base = NULL,
643 .iov_len = count,
646 qemu_iovec_init_external(&qiov, &iov, 1);
647 return bdrv_prwv_co(child, offset, &qiov, true,
648 BDRV_REQ_ZERO_WRITE | flags);
652 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
653 * The operation is sped up by checking the block status and only writing
654 * zeroes to the device if they currently do not return zeroes. Optional
655 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
656 * BDRV_REQ_FUA).
658 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
660 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
662 int64_t target_sectors, ret, nb_sectors, sector_num = 0;
663 BlockDriverState *bs = child->bs;
664 BlockDriverState *file;
665 int n;
667 target_sectors = bdrv_nb_sectors(bs);
668 if (target_sectors < 0) {
669 return target_sectors;
672 for (;;) {
673 nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS);
674 if (nb_sectors <= 0) {
675 return 0;
677 ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n, &file);
678 if (ret < 0) {
679 error_report("error getting block status at sector %" PRId64 ": %s",
680 sector_num, strerror(-ret));
681 return ret;
683 if (ret & BDRV_BLOCK_ZERO) {
684 sector_num += n;
685 continue;
687 ret = bdrv_pwrite_zeroes(child, sector_num << BDRV_SECTOR_BITS,
688 n << BDRV_SECTOR_BITS, flags);
689 if (ret < 0) {
690 error_report("error writing zeroes at sector %" PRId64 ": %s",
691 sector_num, strerror(-ret));
692 return ret;
694 sector_num += n;
698 int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
700 int ret;
702 ret = bdrv_prwv_co(child, offset, qiov, false, 0);
703 if (ret < 0) {
704 return ret;
707 return qiov->size;
710 int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes)
712 QEMUIOVector qiov;
713 struct iovec iov = {
714 .iov_base = (void *)buf,
715 .iov_len = bytes,
718 if (bytes < 0) {
719 return -EINVAL;
722 qemu_iovec_init_external(&qiov, &iov, 1);
723 return bdrv_preadv(child, offset, &qiov);
726 int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
728 int ret;
730 ret = bdrv_prwv_co(child, offset, qiov, true, 0);
731 if (ret < 0) {
732 return ret;
735 return qiov->size;
738 int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes)
740 QEMUIOVector qiov;
741 struct iovec iov = {
742 .iov_base = (void *) buf,
743 .iov_len = bytes,
746 if (bytes < 0) {
747 return -EINVAL;
750 qemu_iovec_init_external(&qiov, &iov, 1);
751 return bdrv_pwritev(child, offset, &qiov);
755 * Writes to the file and ensures that no writes are reordered across this
756 * request (acts as a barrier)
758 * Returns 0 on success, -errno in error cases.
760 int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
761 const void *buf, int count)
763 int ret;
765 ret = bdrv_pwrite(child, offset, buf, count);
766 if (ret < 0) {
767 return ret;
770 ret = bdrv_flush(child->bs);
771 if (ret < 0) {
772 return ret;
775 return 0;
778 typedef struct CoroutineIOCompletion {
779 Coroutine *coroutine;
780 int ret;
781 } CoroutineIOCompletion;
783 static void bdrv_co_io_em_complete(void *opaque, int ret)
785 CoroutineIOCompletion *co = opaque;
787 co->ret = ret;
788 qemu_coroutine_enter(co->coroutine);
791 static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
792 uint64_t offset, uint64_t bytes,
793 QEMUIOVector *qiov, int flags)
795 BlockDriver *drv = bs->drv;
796 int64_t sector_num;
797 unsigned int nb_sectors;
799 assert(!(flags & ~BDRV_REQ_MASK));
801 if (drv->bdrv_co_preadv) {
802 return drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
805 sector_num = offset >> BDRV_SECTOR_BITS;
806 nb_sectors = bytes >> BDRV_SECTOR_BITS;
808 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
809 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
810 assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
812 if (drv->bdrv_co_readv) {
813 return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
814 } else {
815 BlockAIOCB *acb;
816 CoroutineIOCompletion co = {
817 .coroutine = qemu_coroutine_self(),
820 acb = bs->drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors,
821 bdrv_co_io_em_complete, &co);
822 if (acb == NULL) {
823 return -EIO;
824 } else {
825 qemu_coroutine_yield();
826 return co.ret;
831 static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
832 uint64_t offset, uint64_t bytes,
833 QEMUIOVector *qiov, int flags)
835 BlockDriver *drv = bs->drv;
836 int64_t sector_num;
837 unsigned int nb_sectors;
838 int ret;
840 assert(!(flags & ~BDRV_REQ_MASK));
842 if (drv->bdrv_co_pwritev) {
843 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov,
844 flags & bs->supported_write_flags);
845 flags &= ~bs->supported_write_flags;
846 goto emulate_flags;
849 sector_num = offset >> BDRV_SECTOR_BITS;
850 nb_sectors = bytes >> BDRV_SECTOR_BITS;
852 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
853 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
854 assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
856 if (drv->bdrv_co_writev_flags) {
857 ret = drv->bdrv_co_writev_flags(bs, sector_num, nb_sectors, qiov,
858 flags & bs->supported_write_flags);
859 flags &= ~bs->supported_write_flags;
860 } else if (drv->bdrv_co_writev) {
861 assert(!bs->supported_write_flags);
862 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
863 } else {
864 BlockAIOCB *acb;
865 CoroutineIOCompletion co = {
866 .coroutine = qemu_coroutine_self(),
869 acb = bs->drv->bdrv_aio_writev(bs, sector_num, qiov, nb_sectors,
870 bdrv_co_io_em_complete, &co);
871 if (acb == NULL) {
872 ret = -EIO;
873 } else {
874 qemu_coroutine_yield();
875 ret = co.ret;
879 emulate_flags:
880 if (ret == 0 && (flags & BDRV_REQ_FUA)) {
881 ret = bdrv_co_flush(bs);
884 return ret;
887 static int coroutine_fn
888 bdrv_driver_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
889 uint64_t bytes, QEMUIOVector *qiov)
891 BlockDriver *drv = bs->drv;
893 if (!drv->bdrv_co_pwritev_compressed) {
894 return -ENOTSUP;
897 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
900 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
901 int64_t offset, unsigned int bytes, QEMUIOVector *qiov)
903 /* Perform I/O through a temporary buffer so that users who scribble over
904 * their read buffer while the operation is in progress do not end up
905 * modifying the image file. This is critical for zero-copy guest I/O
906 * where anything might happen inside guest memory.
908 void *bounce_buffer;
910 BlockDriver *drv = bs->drv;
911 struct iovec iov;
912 QEMUIOVector bounce_qiov;
913 int64_t cluster_offset;
914 unsigned int cluster_bytes;
915 size_t skip_bytes;
916 int ret;
918 /* Cover entire cluster so no additional backing file I/O is required when
919 * allocating cluster in the image file.
921 bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
923 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
924 cluster_offset, cluster_bytes);
926 iov.iov_len = cluster_bytes;
927 iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len);
928 if (bounce_buffer == NULL) {
929 ret = -ENOMEM;
930 goto err;
933 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
935 ret = bdrv_driver_preadv(bs, cluster_offset, cluster_bytes,
936 &bounce_qiov, 0);
937 if (ret < 0) {
938 goto err;
941 if (drv->bdrv_co_pwrite_zeroes &&
942 buffer_is_zero(bounce_buffer, iov.iov_len)) {
943 /* FIXME: Should we (perhaps conditionally) be setting
944 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
945 * that still correctly reads as zero? */
946 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, cluster_bytes, 0);
947 } else {
948 /* This does not change the data on the disk, it is not necessary
949 * to flush even in cache=writethrough mode.
951 ret = bdrv_driver_pwritev(bs, cluster_offset, cluster_bytes,
952 &bounce_qiov, 0);
955 if (ret < 0) {
956 /* It might be okay to ignore write errors for guest requests. If this
957 * is a deliberate copy-on-read then we don't want to ignore the error.
958 * Simply report it in all cases.
960 goto err;
963 skip_bytes = offset - cluster_offset;
964 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes, bytes);
966 err:
967 qemu_vfree(bounce_buffer);
968 return ret;
972 * Forwards an already correctly aligned request to the BlockDriver. This
973 * handles copy on read, zeroing after EOF, and fragmentation of large
974 * reads; any other features must be implemented by the caller.
976 static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
977 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
978 int64_t align, QEMUIOVector *qiov, int flags)
980 int64_t total_bytes, max_bytes;
981 int ret = 0;
982 uint64_t bytes_remaining = bytes;
983 int max_transfer;
985 assert(is_power_of_2(align));
986 assert((offset & (align - 1)) == 0);
987 assert((bytes & (align - 1)) == 0);
988 assert(!qiov || bytes == qiov->size);
989 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
990 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
991 align);
993 /* TODO: We would need a per-BDS .supported_read_flags and
994 * potential fallback support, if we ever implement any read flags
995 * to pass through to drivers. For now, there aren't any
996 * passthrough flags. */
997 assert(!(flags & ~(BDRV_REQ_NO_SERIALISING | BDRV_REQ_COPY_ON_READ)));
999 /* Handle Copy on Read and associated serialisation */
1000 if (flags & BDRV_REQ_COPY_ON_READ) {
1001 /* If we touch the same cluster it counts as an overlap. This
1002 * guarantees that allocating writes will be serialized and not race
1003 * with each other for the same cluster. For example, in copy-on-read
1004 * it ensures that the CoR read and write operations are atomic and
1005 * guest writes cannot interleave between them. */
1006 mark_request_serialising(req, bdrv_get_cluster_size(bs));
1009 if (!(flags & BDRV_REQ_NO_SERIALISING)) {
1010 wait_serialising_requests(req);
1013 if (flags & BDRV_REQ_COPY_ON_READ) {
1014 int64_t start_sector = offset >> BDRV_SECTOR_BITS;
1015 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
1016 unsigned int nb_sectors = end_sector - start_sector;
1017 int pnum;
1019 ret = bdrv_is_allocated(bs, start_sector, nb_sectors, &pnum);
1020 if (ret < 0) {
1021 goto out;
1024 if (!ret || pnum != nb_sectors) {
1025 ret = bdrv_co_do_copy_on_readv(bs, offset, bytes, qiov);
1026 goto out;
1030 /* Forward the request to the BlockDriver, possibly fragmenting it */
1031 total_bytes = bdrv_getlength(bs);
1032 if (total_bytes < 0) {
1033 ret = total_bytes;
1034 goto out;
1037 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
1038 if (bytes <= max_bytes && bytes <= max_transfer) {
1039 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0);
1040 goto out;
1043 while (bytes_remaining) {
1044 int num;
1046 if (max_bytes) {
1047 QEMUIOVector local_qiov;
1049 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
1050 assert(num);
1051 qemu_iovec_init(&local_qiov, qiov->niov);
1052 qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
1054 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
1055 num, &local_qiov, 0);
1056 max_bytes -= num;
1057 qemu_iovec_destroy(&local_qiov);
1058 } else {
1059 num = bytes_remaining;
1060 ret = qemu_iovec_memset(qiov, bytes - bytes_remaining, 0,
1061 bytes_remaining);
1063 if (ret < 0) {
1064 goto out;
1066 bytes_remaining -= num;
1069 out:
1070 return ret < 0 ? ret : 0;
1074 * Handle a read request in coroutine context
1076 int coroutine_fn bdrv_co_preadv(BdrvChild *child,
1077 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1078 BdrvRequestFlags flags)
1080 BlockDriverState *bs = child->bs;
1081 BlockDriver *drv = bs->drv;
1082 BdrvTrackedRequest req;
1084 uint64_t align = bs->bl.request_alignment;
1085 uint8_t *head_buf = NULL;
1086 uint8_t *tail_buf = NULL;
1087 QEMUIOVector local_qiov;
1088 bool use_local_qiov = false;
1089 int ret;
1091 if (!drv) {
1092 return -ENOMEDIUM;
1095 ret = bdrv_check_byte_request(bs, offset, bytes);
1096 if (ret < 0) {
1097 return ret;
1100 /* Don't do copy-on-read if we read data before write operation */
1101 if (bs->copy_on_read && !(flags & BDRV_REQ_NO_SERIALISING)) {
1102 flags |= BDRV_REQ_COPY_ON_READ;
1105 /* Align read if necessary by padding qiov */
1106 if (offset & (align - 1)) {
1107 head_buf = qemu_blockalign(bs, align);
1108 qemu_iovec_init(&local_qiov, qiov->niov + 2);
1109 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1110 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1111 use_local_qiov = true;
1113 bytes += offset & (align - 1);
1114 offset = offset & ~(align - 1);
1117 if ((offset + bytes) & (align - 1)) {
1118 if (!use_local_qiov) {
1119 qemu_iovec_init(&local_qiov, qiov->niov + 1);
1120 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1121 use_local_qiov = true;
1123 tail_buf = qemu_blockalign(bs, align);
1124 qemu_iovec_add(&local_qiov, tail_buf,
1125 align - ((offset + bytes) & (align - 1)));
1127 bytes = ROUND_UP(bytes, align);
1130 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
1131 ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align,
1132 use_local_qiov ? &local_qiov : qiov,
1133 flags);
1134 tracked_request_end(&req);
1136 if (use_local_qiov) {
1137 qemu_iovec_destroy(&local_qiov);
1138 qemu_vfree(head_buf);
1139 qemu_vfree(tail_buf);
1142 return ret;
1145 static int coroutine_fn bdrv_co_do_readv(BdrvChild *child,
1146 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1147 BdrvRequestFlags flags)
1149 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1150 return -EINVAL;
1153 return bdrv_co_preadv(child, sector_num << BDRV_SECTOR_BITS,
1154 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
1157 int coroutine_fn bdrv_co_readv(BdrvChild *child, int64_t sector_num,
1158 int nb_sectors, QEMUIOVector *qiov)
1160 trace_bdrv_co_readv(child->bs, sector_num, nb_sectors);
1162 return bdrv_co_do_readv(child, sector_num, nb_sectors, qiov, 0);
1165 /* Maximum buffer for write zeroes fallback, in bytes */
1166 #define MAX_WRITE_ZEROES_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
1168 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
1169 int64_t offset, int count, BdrvRequestFlags flags)
1171 BlockDriver *drv = bs->drv;
1172 QEMUIOVector qiov;
1173 struct iovec iov = {0};
1174 int ret = 0;
1175 bool need_flush = false;
1176 int head = 0;
1177 int tail = 0;
1179 int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
1180 int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1181 bs->bl.request_alignment);
1183 assert(alignment % bs->bl.request_alignment == 0);
1184 head = offset % alignment;
1185 tail = (offset + count) % alignment;
1186 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1187 assert(max_write_zeroes >= bs->bl.request_alignment);
1189 while (count > 0 && !ret) {
1190 int num = count;
1192 /* Align request. Block drivers can expect the "bulk" of the request
1193 * to be aligned, and that unaligned requests do not cross cluster
1194 * boundaries.
1196 if (head) {
1197 /* Make a small request up to the first aligned sector. */
1198 num = MIN(count, alignment - head);
1199 head = 0;
1200 } else if (tail && num > alignment) {
1201 /* Shorten the request to the last aligned sector. */
1202 num -= tail;
1205 /* limit request size */
1206 if (num > max_write_zeroes) {
1207 num = max_write_zeroes;
1210 ret = -ENOTSUP;
1211 /* First try the efficient write zeroes operation */
1212 if (drv->bdrv_co_pwrite_zeroes) {
1213 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1214 flags & bs->supported_zero_flags);
1215 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1216 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1217 need_flush = true;
1219 } else {
1220 assert(!bs->supported_zero_flags);
1223 if (ret == -ENOTSUP) {
1224 /* Fall back to bounce buffer if write zeroes is unsupported */
1225 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1226 MAX_WRITE_ZEROES_BOUNCE_BUFFER);
1227 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1229 if ((flags & BDRV_REQ_FUA) &&
1230 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1231 /* No need for bdrv_driver_pwrite() to do a fallback
1232 * flush on each chunk; use just one at the end */
1233 write_flags &= ~BDRV_REQ_FUA;
1234 need_flush = true;
1236 num = MIN(num, max_transfer);
1237 iov.iov_len = num;
1238 if (iov.iov_base == NULL) {
1239 iov.iov_base = qemu_try_blockalign(bs, num);
1240 if (iov.iov_base == NULL) {
1241 ret = -ENOMEM;
1242 goto fail;
1244 memset(iov.iov_base, 0, num);
1246 qemu_iovec_init_external(&qiov, &iov, 1);
1248 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, write_flags);
1250 /* Keep bounce buffer around if it is big enough for all
1251 * all future requests.
1253 if (num < max_transfer) {
1254 qemu_vfree(iov.iov_base);
1255 iov.iov_base = NULL;
1259 offset += num;
1260 count -= num;
1263 fail:
1264 if (ret == 0 && need_flush) {
1265 ret = bdrv_co_flush(bs);
1267 qemu_vfree(iov.iov_base);
1268 return ret;
1272 * Forwards an already correctly aligned write request to the BlockDriver,
1273 * after possibly fragmenting it.
1275 static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
1276 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
1277 int64_t align, QEMUIOVector *qiov, int flags)
1279 BlockDriver *drv = bs->drv;
1280 bool waited;
1281 int ret;
1283 int64_t start_sector = offset >> BDRV_SECTOR_BITS;
1284 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
1285 uint64_t bytes_remaining = bytes;
1286 int max_transfer;
1288 assert(is_power_of_2(align));
1289 assert((offset & (align - 1)) == 0);
1290 assert((bytes & (align - 1)) == 0);
1291 assert(!qiov || bytes == qiov->size);
1292 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1293 assert(!(flags & ~BDRV_REQ_MASK));
1294 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1295 align);
1297 waited = wait_serialising_requests(req);
1298 assert(!waited || !req->serialising);
1299 assert(req->overlap_offset <= offset);
1300 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1302 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
1304 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
1305 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
1306 qemu_iovec_is_zero(qiov)) {
1307 flags |= BDRV_REQ_ZERO_WRITE;
1308 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
1309 flags |= BDRV_REQ_MAY_UNMAP;
1313 if (ret < 0) {
1314 /* Do nothing, write notifier decided to fail this request */
1315 } else if (flags & BDRV_REQ_ZERO_WRITE) {
1316 bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
1317 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
1318 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
1319 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, qiov);
1320 } else if (bytes <= max_transfer) {
1321 bdrv_debug_event(bs, BLKDBG_PWRITEV);
1322 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags);
1323 } else {
1324 bdrv_debug_event(bs, BLKDBG_PWRITEV);
1325 while (bytes_remaining) {
1326 int num = MIN(bytes_remaining, max_transfer);
1327 QEMUIOVector local_qiov;
1328 int local_flags = flags;
1330 assert(num);
1331 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
1332 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1333 /* If FUA is going to be emulated by flush, we only
1334 * need to flush on the last iteration */
1335 local_flags &= ~BDRV_REQ_FUA;
1337 qemu_iovec_init(&local_qiov, qiov->niov);
1338 qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
1340 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
1341 num, &local_qiov, local_flags);
1342 qemu_iovec_destroy(&local_qiov);
1343 if (ret < 0) {
1344 break;
1346 bytes_remaining -= num;
1349 bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
1351 ++bs->write_gen;
1352 bdrv_set_dirty(bs, start_sector, end_sector - start_sector);
1354 if (bs->wr_highest_offset < offset + bytes) {
1355 bs->wr_highest_offset = offset + bytes;
1358 if (ret >= 0) {
1359 bs->total_sectors = MAX(bs->total_sectors, end_sector);
1360 ret = 0;
1363 return ret;
1366 static int coroutine_fn bdrv_co_do_zero_pwritev(BlockDriverState *bs,
1367 int64_t offset,
1368 unsigned int bytes,
1369 BdrvRequestFlags flags,
1370 BdrvTrackedRequest *req)
1372 uint8_t *buf = NULL;
1373 QEMUIOVector local_qiov;
1374 struct iovec iov;
1375 uint64_t align = bs->bl.request_alignment;
1376 unsigned int head_padding_bytes, tail_padding_bytes;
1377 int ret = 0;
1379 head_padding_bytes = offset & (align - 1);
1380 tail_padding_bytes = align - ((offset + bytes) & (align - 1));
1383 assert(flags & BDRV_REQ_ZERO_WRITE);
1384 if (head_padding_bytes || tail_padding_bytes) {
1385 buf = qemu_blockalign(bs, align);
1386 iov = (struct iovec) {
1387 .iov_base = buf,
1388 .iov_len = align,
1390 qemu_iovec_init_external(&local_qiov, &iov, 1);
1392 if (head_padding_bytes) {
1393 uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes);
1395 /* RMW the unaligned part before head. */
1396 mark_request_serialising(req, align);
1397 wait_serialising_requests(req);
1398 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1399 ret = bdrv_aligned_preadv(bs, req, offset & ~(align - 1), align,
1400 align, &local_qiov, 0);
1401 if (ret < 0) {
1402 goto fail;
1404 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1406 memset(buf + head_padding_bytes, 0, zero_bytes);
1407 ret = bdrv_aligned_pwritev(bs, req, offset & ~(align - 1), align,
1408 align, &local_qiov,
1409 flags & ~BDRV_REQ_ZERO_WRITE);
1410 if (ret < 0) {
1411 goto fail;
1413 offset += zero_bytes;
1414 bytes -= zero_bytes;
1417 assert(!bytes || (offset & (align - 1)) == 0);
1418 if (bytes >= align) {
1419 /* Write the aligned part in the middle. */
1420 uint64_t aligned_bytes = bytes & ~(align - 1);
1421 ret = bdrv_aligned_pwritev(bs, req, offset, aligned_bytes, align,
1422 NULL, flags);
1423 if (ret < 0) {
1424 goto fail;
1426 bytes -= aligned_bytes;
1427 offset += aligned_bytes;
1430 assert(!bytes || (offset & (align - 1)) == 0);
1431 if (bytes) {
1432 assert(align == tail_padding_bytes + bytes);
1433 /* RMW the unaligned part after tail. */
1434 mark_request_serialising(req, align);
1435 wait_serialising_requests(req);
1436 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1437 ret = bdrv_aligned_preadv(bs, req, offset, align,
1438 align, &local_qiov, 0);
1439 if (ret < 0) {
1440 goto fail;
1442 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1444 memset(buf, 0, bytes);
1445 ret = bdrv_aligned_pwritev(bs, req, offset, align, align,
1446 &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE);
1448 fail:
1449 qemu_vfree(buf);
1450 return ret;
1455 * Handle a write request in coroutine context
1457 int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
1458 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1459 BdrvRequestFlags flags)
1461 BlockDriverState *bs = child->bs;
1462 BdrvTrackedRequest req;
1463 uint64_t align = bs->bl.request_alignment;
1464 uint8_t *head_buf = NULL;
1465 uint8_t *tail_buf = NULL;
1466 QEMUIOVector local_qiov;
1467 bool use_local_qiov = false;
1468 int ret;
1470 if (!bs->drv) {
1471 return -ENOMEDIUM;
1473 if (bs->read_only) {
1474 return -EPERM;
1476 assert(!(bs->open_flags & BDRV_O_INACTIVE));
1478 ret = bdrv_check_byte_request(bs, offset, bytes);
1479 if (ret < 0) {
1480 return ret;
1484 * Align write if necessary by performing a read-modify-write cycle.
1485 * Pad qiov with the read parts and be sure to have a tracked request not
1486 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1488 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
1490 if (!qiov) {
1491 ret = bdrv_co_do_zero_pwritev(bs, offset, bytes, flags, &req);
1492 goto out;
1495 if (offset & (align - 1)) {
1496 QEMUIOVector head_qiov;
1497 struct iovec head_iov;
1499 mark_request_serialising(&req, align);
1500 wait_serialising_requests(&req);
1502 head_buf = qemu_blockalign(bs, align);
1503 head_iov = (struct iovec) {
1504 .iov_base = head_buf,
1505 .iov_len = align,
1507 qemu_iovec_init_external(&head_qiov, &head_iov, 1);
1509 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1510 ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align,
1511 align, &head_qiov, 0);
1512 if (ret < 0) {
1513 goto fail;
1515 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1517 qemu_iovec_init(&local_qiov, qiov->niov + 2);
1518 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1519 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1520 use_local_qiov = true;
1522 bytes += offset & (align - 1);
1523 offset = offset & ~(align - 1);
1525 /* We have read the tail already if the request is smaller
1526 * than one aligned block.
1528 if (bytes < align) {
1529 qemu_iovec_add(&local_qiov, head_buf + bytes, align - bytes);
1530 bytes = align;
1534 if ((offset + bytes) & (align - 1)) {
1535 QEMUIOVector tail_qiov;
1536 struct iovec tail_iov;
1537 size_t tail_bytes;
1538 bool waited;
1540 mark_request_serialising(&req, align);
1541 waited = wait_serialising_requests(&req);
1542 assert(!waited || !use_local_qiov);
1544 tail_buf = qemu_blockalign(bs, align);
1545 tail_iov = (struct iovec) {
1546 .iov_base = tail_buf,
1547 .iov_len = align,
1549 qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
1551 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1552 ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align,
1553 align, &tail_qiov, 0);
1554 if (ret < 0) {
1555 goto fail;
1557 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1559 if (!use_local_qiov) {
1560 qemu_iovec_init(&local_qiov, qiov->niov + 1);
1561 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1562 use_local_qiov = true;
1565 tail_bytes = (offset + bytes) & (align - 1);
1566 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
1568 bytes = ROUND_UP(bytes, align);
1571 ret = bdrv_aligned_pwritev(bs, &req, offset, bytes, align,
1572 use_local_qiov ? &local_qiov : qiov,
1573 flags);
1575 fail:
1577 if (use_local_qiov) {
1578 qemu_iovec_destroy(&local_qiov);
1580 qemu_vfree(head_buf);
1581 qemu_vfree(tail_buf);
1582 out:
1583 tracked_request_end(&req);
1584 return ret;
1587 static int coroutine_fn bdrv_co_do_writev(BdrvChild *child,
1588 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1589 BdrvRequestFlags flags)
1591 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1592 return -EINVAL;
1595 return bdrv_co_pwritev(child, sector_num << BDRV_SECTOR_BITS,
1596 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
1599 int coroutine_fn bdrv_co_writev(BdrvChild *child, int64_t sector_num,
1600 int nb_sectors, QEMUIOVector *qiov)
1602 trace_bdrv_co_writev(child->bs, sector_num, nb_sectors);
1604 return bdrv_co_do_writev(child, sector_num, nb_sectors, qiov, 0);
1607 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
1608 int count, BdrvRequestFlags flags)
1610 trace_bdrv_co_pwrite_zeroes(child->bs, offset, count, flags);
1612 if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
1613 flags &= ~BDRV_REQ_MAY_UNMAP;
1616 return bdrv_co_pwritev(child, offset, count, NULL,
1617 BDRV_REQ_ZERO_WRITE | flags);
1621 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
1623 int bdrv_flush_all(void)
1625 BdrvNextIterator it;
1626 BlockDriverState *bs = NULL;
1627 int result = 0;
1629 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
1630 AioContext *aio_context = bdrv_get_aio_context(bs);
1631 int ret;
1633 aio_context_acquire(aio_context);
1634 ret = bdrv_flush(bs);
1635 if (ret < 0 && !result) {
1636 result = ret;
1638 aio_context_release(aio_context);
1641 return result;
1645 typedef struct BdrvCoGetBlockStatusData {
1646 BlockDriverState *bs;
1647 BlockDriverState *base;
1648 BlockDriverState **file;
1649 int64_t sector_num;
1650 int nb_sectors;
1651 int *pnum;
1652 int64_t ret;
1653 bool done;
1654 } BdrvCoGetBlockStatusData;
1657 * Returns the allocation status of the specified sectors.
1658 * Drivers not implementing the functionality are assumed to not support
1659 * backing files, hence all their sectors are reported as allocated.
1661 * If 'sector_num' is beyond the end of the disk image the return value is 0
1662 * and 'pnum' is set to 0.
1664 * 'pnum' is set to the number of sectors (including and immediately following
1665 * the specified sector) that are known to be in the same
1666 * allocated/unallocated state.
1668 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
1669 * beyond the end of the disk image it will be clamped.
1671 * If returned value is positive and BDRV_BLOCK_OFFSET_VALID bit is set, 'file'
1672 * points to the BDS which the sector range is allocated in.
1674 static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
1675 int64_t sector_num,
1676 int nb_sectors, int *pnum,
1677 BlockDriverState **file)
1679 int64_t total_sectors;
1680 int64_t n;
1681 int64_t ret, ret2;
1683 total_sectors = bdrv_nb_sectors(bs);
1684 if (total_sectors < 0) {
1685 return total_sectors;
1688 if (sector_num >= total_sectors) {
1689 *pnum = 0;
1690 return 0;
1693 n = total_sectors - sector_num;
1694 if (n < nb_sectors) {
1695 nb_sectors = n;
1698 if (!bs->drv->bdrv_co_get_block_status) {
1699 *pnum = nb_sectors;
1700 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
1701 if (bs->drv->protocol_name) {
1702 ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
1704 return ret;
1707 *file = NULL;
1708 ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum,
1709 file);
1710 if (ret < 0) {
1711 *pnum = 0;
1712 return ret;
1715 if (ret & BDRV_BLOCK_RAW) {
1716 assert(ret & BDRV_BLOCK_OFFSET_VALID);
1717 return bdrv_get_block_status(bs->file->bs, ret >> BDRV_SECTOR_BITS,
1718 *pnum, pnum, file);
1721 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
1722 ret |= BDRV_BLOCK_ALLOCATED;
1723 } else {
1724 if (bdrv_unallocated_blocks_are_zero(bs)) {
1725 ret |= BDRV_BLOCK_ZERO;
1726 } else if (bs->backing) {
1727 BlockDriverState *bs2 = bs->backing->bs;
1728 int64_t nb_sectors2 = bdrv_nb_sectors(bs2);
1729 if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) {
1730 ret |= BDRV_BLOCK_ZERO;
1735 if (*file && *file != bs &&
1736 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
1737 (ret & BDRV_BLOCK_OFFSET_VALID)) {
1738 BlockDriverState *file2;
1739 int file_pnum;
1741 ret2 = bdrv_co_get_block_status(*file, ret >> BDRV_SECTOR_BITS,
1742 *pnum, &file_pnum, &file2);
1743 if (ret2 >= 0) {
1744 /* Ignore errors. This is just providing extra information, it
1745 * is useful but not necessary.
1747 if (!file_pnum) {
1748 /* !file_pnum indicates an offset at or beyond the EOF; it is
1749 * perfectly valid for the format block driver to point to such
1750 * offsets, so catch it and mark everything as zero */
1751 ret |= BDRV_BLOCK_ZERO;
1752 } else {
1753 /* Limit request to the range reported by the protocol driver */
1754 *pnum = file_pnum;
1755 ret |= (ret2 & BDRV_BLOCK_ZERO);
1760 return ret;
1763 static int64_t coroutine_fn bdrv_co_get_block_status_above(BlockDriverState *bs,
1764 BlockDriverState *base,
1765 int64_t sector_num,
1766 int nb_sectors,
1767 int *pnum,
1768 BlockDriverState **file)
1770 BlockDriverState *p;
1771 int64_t ret = 0;
1773 assert(bs != base);
1774 for (p = bs; p != base; p = backing_bs(p)) {
1775 ret = bdrv_co_get_block_status(p, sector_num, nb_sectors, pnum, file);
1776 if (ret < 0 || ret & BDRV_BLOCK_ALLOCATED) {
1777 break;
1779 /* [sector_num, pnum] unallocated on this layer, which could be only
1780 * the first part of [sector_num, nb_sectors]. */
1781 nb_sectors = MIN(nb_sectors, *pnum);
1783 return ret;
1786 /* Coroutine wrapper for bdrv_get_block_status_above() */
1787 static void coroutine_fn bdrv_get_block_status_above_co_entry(void *opaque)
1789 BdrvCoGetBlockStatusData *data = opaque;
1791 data->ret = bdrv_co_get_block_status_above(data->bs, data->base,
1792 data->sector_num,
1793 data->nb_sectors,
1794 data->pnum,
1795 data->file);
1796 data->done = true;
1800 * Synchronous wrapper around bdrv_co_get_block_status_above().
1802 * See bdrv_co_get_block_status_above() for details.
1804 int64_t bdrv_get_block_status_above(BlockDriverState *bs,
1805 BlockDriverState *base,
1806 int64_t sector_num,
1807 int nb_sectors, int *pnum,
1808 BlockDriverState **file)
1810 Coroutine *co;
1811 BdrvCoGetBlockStatusData data = {
1812 .bs = bs,
1813 .base = base,
1814 .file = file,
1815 .sector_num = sector_num,
1816 .nb_sectors = nb_sectors,
1817 .pnum = pnum,
1818 .done = false,
1821 if (qemu_in_coroutine()) {
1822 /* Fast-path if already in coroutine context */
1823 bdrv_get_block_status_above_co_entry(&data);
1824 } else {
1825 AioContext *aio_context = bdrv_get_aio_context(bs);
1827 co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry,
1828 &data);
1829 qemu_coroutine_enter(co);
1830 while (!data.done) {
1831 aio_poll(aio_context, true);
1834 return data.ret;
1837 int64_t bdrv_get_block_status(BlockDriverState *bs,
1838 int64_t sector_num,
1839 int nb_sectors, int *pnum,
1840 BlockDriverState **file)
1842 return bdrv_get_block_status_above(bs, backing_bs(bs),
1843 sector_num, nb_sectors, pnum, file);
1846 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
1847 int nb_sectors, int *pnum)
1849 BlockDriverState *file;
1850 int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum,
1851 &file);
1852 if (ret < 0) {
1853 return ret;
1855 return !!(ret & BDRV_BLOCK_ALLOCATED);
1859 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
1861 * Return true if the given sector is allocated in any image between
1862 * BASE and TOP (inclusive). BASE can be NULL to check if the given
1863 * sector is allocated in any image of the chain. Return false otherwise.
1865 * 'pnum' is set to the number of sectors (including and immediately following
1866 * the specified sector) that are known to be in the same
1867 * allocated/unallocated state.
1870 int bdrv_is_allocated_above(BlockDriverState *top,
1871 BlockDriverState *base,
1872 int64_t sector_num,
1873 int nb_sectors, int *pnum)
1875 BlockDriverState *intermediate;
1876 int ret, n = nb_sectors;
1878 intermediate = top;
1879 while (intermediate && intermediate != base) {
1880 int pnum_inter;
1881 ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
1882 &pnum_inter);
1883 if (ret < 0) {
1884 return ret;
1885 } else if (ret) {
1886 *pnum = pnum_inter;
1887 return 1;
1891 * [sector_num, nb_sectors] is unallocated on top but intermediate
1892 * might have
1894 * [sector_num+x, nr_sectors] allocated.
1896 if (n > pnum_inter &&
1897 (intermediate == top ||
1898 sector_num + pnum_inter < intermediate->total_sectors)) {
1899 n = pnum_inter;
1902 intermediate = backing_bs(intermediate);
1905 *pnum = n;
1906 return 0;
1909 typedef struct BdrvVmstateCo {
1910 BlockDriverState *bs;
1911 QEMUIOVector *qiov;
1912 int64_t pos;
1913 bool is_read;
1914 int ret;
1915 } BdrvVmstateCo;
1917 static int coroutine_fn
1918 bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
1919 bool is_read)
1921 BlockDriver *drv = bs->drv;
1923 if (!drv) {
1924 return -ENOMEDIUM;
1925 } else if (drv->bdrv_load_vmstate) {
1926 return is_read ? drv->bdrv_load_vmstate(bs, qiov, pos)
1927 : drv->bdrv_save_vmstate(bs, qiov, pos);
1928 } else if (bs->file) {
1929 return bdrv_co_rw_vmstate(bs->file->bs, qiov, pos, is_read);
1932 return -ENOTSUP;
1935 static void coroutine_fn bdrv_co_rw_vmstate_entry(void *opaque)
1937 BdrvVmstateCo *co = opaque;
1938 co->ret = bdrv_co_rw_vmstate(co->bs, co->qiov, co->pos, co->is_read);
1941 static inline int
1942 bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
1943 bool is_read)
1945 if (qemu_in_coroutine()) {
1946 return bdrv_co_rw_vmstate(bs, qiov, pos, is_read);
1947 } else {
1948 BdrvVmstateCo data = {
1949 .bs = bs,
1950 .qiov = qiov,
1951 .pos = pos,
1952 .is_read = is_read,
1953 .ret = -EINPROGRESS,
1955 Coroutine *co = qemu_coroutine_create(bdrv_co_rw_vmstate_entry, &data);
1957 qemu_coroutine_enter(co);
1958 while (data.ret == -EINPROGRESS) {
1959 aio_poll(bdrv_get_aio_context(bs), true);
1961 return data.ret;
1965 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
1966 int64_t pos, int size)
1968 QEMUIOVector qiov;
1969 struct iovec iov = {
1970 .iov_base = (void *) buf,
1971 .iov_len = size,
1973 int ret;
1975 qemu_iovec_init_external(&qiov, &iov, 1);
1977 ret = bdrv_writev_vmstate(bs, &qiov, pos);
1978 if (ret < 0) {
1979 return ret;
1982 return size;
1985 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
1987 return bdrv_rw_vmstate(bs, qiov, pos, false);
1990 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
1991 int64_t pos, int size)
1993 QEMUIOVector qiov;
1994 struct iovec iov = {
1995 .iov_base = buf,
1996 .iov_len = size,
1998 int ret;
2000 qemu_iovec_init_external(&qiov, &iov, 1);
2001 ret = bdrv_readv_vmstate(bs, &qiov, pos);
2002 if (ret < 0) {
2003 return ret;
2006 return size;
2009 int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2011 return bdrv_rw_vmstate(bs, qiov, pos, true);
2014 /**************************************************************/
2015 /* async I/Os */
2017 BlockAIOCB *bdrv_aio_readv(BdrvChild *child, int64_t sector_num,
2018 QEMUIOVector *qiov, int nb_sectors,
2019 BlockCompletionFunc *cb, void *opaque)
2021 trace_bdrv_aio_readv(child->bs, sector_num, nb_sectors, opaque);
2023 assert(nb_sectors << BDRV_SECTOR_BITS == qiov->size);
2024 return bdrv_co_aio_prw_vector(child, sector_num << BDRV_SECTOR_BITS, qiov,
2025 0, cb, opaque, false);
2028 BlockAIOCB *bdrv_aio_writev(BdrvChild *child, int64_t sector_num,
2029 QEMUIOVector *qiov, int nb_sectors,
2030 BlockCompletionFunc *cb, void *opaque)
2032 trace_bdrv_aio_writev(child->bs, sector_num, nb_sectors, opaque);
2034 assert(nb_sectors << BDRV_SECTOR_BITS == qiov->size);
2035 return bdrv_co_aio_prw_vector(child, sector_num << BDRV_SECTOR_BITS, qiov,
2036 0, cb, opaque, true);
2039 void bdrv_aio_cancel(BlockAIOCB *acb)
2041 qemu_aio_ref(acb);
2042 bdrv_aio_cancel_async(acb);
2043 while (acb->refcnt > 1) {
2044 if (acb->aiocb_info->get_aio_context) {
2045 aio_poll(acb->aiocb_info->get_aio_context(acb), true);
2046 } else if (acb->bs) {
2047 aio_poll(bdrv_get_aio_context(acb->bs), true);
2048 } else {
2049 abort();
2052 qemu_aio_unref(acb);
2055 /* Async version of aio cancel. The caller is not blocked if the acb implements
2056 * cancel_async, otherwise we do nothing and let the request normally complete.
2057 * In either case the completion callback must be called. */
2058 void bdrv_aio_cancel_async(BlockAIOCB *acb)
2060 if (acb->aiocb_info->cancel_async) {
2061 acb->aiocb_info->cancel_async(acb);
2065 /**************************************************************/
2066 /* async block device emulation */
2068 typedef struct BlockRequest {
2069 union {
2070 /* Used during read, write, trim */
2071 struct {
2072 int64_t offset;
2073 int bytes;
2074 int flags;
2075 QEMUIOVector *qiov;
2077 /* Used during ioctl */
2078 struct {
2079 int req;
2080 void *buf;
2083 BlockCompletionFunc *cb;
2084 void *opaque;
2086 int error;
2087 } BlockRequest;
2089 typedef struct BlockAIOCBCoroutine {
2090 BlockAIOCB common;
2091 BdrvChild *child;
2092 BlockRequest req;
2093 bool is_write;
2094 bool need_bh;
2095 bool *done;
2096 } BlockAIOCBCoroutine;
2098 static const AIOCBInfo bdrv_em_co_aiocb_info = {
2099 .aiocb_size = sizeof(BlockAIOCBCoroutine),
2102 static void bdrv_co_complete(BlockAIOCBCoroutine *acb)
2104 if (!acb->need_bh) {
2105 acb->common.cb(acb->common.opaque, acb->req.error);
2106 qemu_aio_unref(acb);
2110 static void bdrv_co_em_bh(void *opaque)
2112 BlockAIOCBCoroutine *acb = opaque;
2114 assert(!acb->need_bh);
2115 bdrv_co_complete(acb);
2118 static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb)
2120 acb->need_bh = false;
2121 if (acb->req.error != -EINPROGRESS) {
2122 BlockDriverState *bs = acb->common.bs;
2124 aio_bh_schedule_oneshot(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
2128 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
2129 static void coroutine_fn bdrv_co_do_rw(void *opaque)
2131 BlockAIOCBCoroutine *acb = opaque;
2133 if (!acb->is_write) {
2134 acb->req.error = bdrv_co_preadv(acb->child, acb->req.offset,
2135 acb->req.qiov->size, acb->req.qiov, acb->req.flags);
2136 } else {
2137 acb->req.error = bdrv_co_pwritev(acb->child, acb->req.offset,
2138 acb->req.qiov->size, acb->req.qiov, acb->req.flags);
2141 bdrv_co_complete(acb);
2144 static BlockAIOCB *bdrv_co_aio_prw_vector(BdrvChild *child,
2145 int64_t offset,
2146 QEMUIOVector *qiov,
2147 BdrvRequestFlags flags,
2148 BlockCompletionFunc *cb,
2149 void *opaque,
2150 bool is_write)
2152 Coroutine *co;
2153 BlockAIOCBCoroutine *acb;
2155 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, child->bs, cb, opaque);
2156 acb->child = child;
2157 acb->need_bh = true;
2158 acb->req.error = -EINPROGRESS;
2159 acb->req.offset = offset;
2160 acb->req.qiov = qiov;
2161 acb->req.flags = flags;
2162 acb->is_write = is_write;
2164 co = qemu_coroutine_create(bdrv_co_do_rw, acb);
2165 qemu_coroutine_enter(co);
2167 bdrv_co_maybe_schedule_bh(acb);
2168 return &acb->common;
2171 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
2173 BlockAIOCBCoroutine *acb = opaque;
2174 BlockDriverState *bs = acb->common.bs;
2176 acb->req.error = bdrv_co_flush(bs);
2177 bdrv_co_complete(acb);
2180 BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
2181 BlockCompletionFunc *cb, void *opaque)
2183 trace_bdrv_aio_flush(bs, opaque);
2185 Coroutine *co;
2186 BlockAIOCBCoroutine *acb;
2188 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
2189 acb->need_bh = true;
2190 acb->req.error = -EINPROGRESS;
2192 co = qemu_coroutine_create(bdrv_aio_flush_co_entry, acb);
2193 qemu_coroutine_enter(co);
2195 bdrv_co_maybe_schedule_bh(acb);
2196 return &acb->common;
2199 static void coroutine_fn bdrv_aio_pdiscard_co_entry(void *opaque)
2201 BlockAIOCBCoroutine *acb = opaque;
2202 BlockDriverState *bs = acb->common.bs;
2204 acb->req.error = bdrv_co_pdiscard(bs, acb->req.offset, acb->req.bytes);
2205 bdrv_co_complete(acb);
2208 BlockAIOCB *bdrv_aio_pdiscard(BlockDriverState *bs, int64_t offset, int count,
2209 BlockCompletionFunc *cb, void *opaque)
2211 Coroutine *co;
2212 BlockAIOCBCoroutine *acb;
2214 trace_bdrv_aio_pdiscard(bs, offset, count, opaque);
2216 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
2217 acb->need_bh = true;
2218 acb->req.error = -EINPROGRESS;
2219 acb->req.offset = offset;
2220 acb->req.bytes = count;
2221 co = qemu_coroutine_create(bdrv_aio_pdiscard_co_entry, acb);
2222 qemu_coroutine_enter(co);
2224 bdrv_co_maybe_schedule_bh(acb);
2225 return &acb->common;
2228 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
2229 BlockCompletionFunc *cb, void *opaque)
2231 BlockAIOCB *acb;
2233 acb = g_malloc(aiocb_info->aiocb_size);
2234 acb->aiocb_info = aiocb_info;
2235 acb->bs = bs;
2236 acb->cb = cb;
2237 acb->opaque = opaque;
2238 acb->refcnt = 1;
2239 return acb;
2242 void qemu_aio_ref(void *p)
2244 BlockAIOCB *acb = p;
2245 acb->refcnt++;
2248 void qemu_aio_unref(void *p)
2250 BlockAIOCB *acb = p;
2251 assert(acb->refcnt > 0);
2252 if (--acb->refcnt == 0) {
2253 g_free(acb);
2257 /**************************************************************/
2258 /* Coroutine block device emulation */
2260 typedef struct FlushCo {
2261 BlockDriverState *bs;
2262 int ret;
2263 } FlushCo;
2266 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
2268 FlushCo *rwco = opaque;
2270 rwco->ret = bdrv_co_flush(rwco->bs);
2273 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2275 int ret;
2276 BdrvTrackedRequest req;
2278 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
2279 bdrv_is_sg(bs)) {
2280 return 0;
2283 tracked_request_begin(&req, bs, 0, 0, BDRV_TRACKED_FLUSH);
2285 int current_gen = bs->write_gen;
2287 /* Wait until any previous flushes are completed */
2288 while (bs->active_flush_req != NULL) {
2289 qemu_co_queue_wait(&bs->flush_queue);
2292 bs->active_flush_req = &req;
2294 /* Write back all layers by calling one driver function */
2295 if (bs->drv->bdrv_co_flush) {
2296 ret = bs->drv->bdrv_co_flush(bs);
2297 goto out;
2300 /* Write back cached data to the OS even with cache=unsafe */
2301 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
2302 if (bs->drv->bdrv_co_flush_to_os) {
2303 ret = bs->drv->bdrv_co_flush_to_os(bs);
2304 if (ret < 0) {
2305 goto out;
2309 /* But don't actually force it to the disk with cache=unsafe */
2310 if (bs->open_flags & BDRV_O_NO_FLUSH) {
2311 goto flush_parent;
2314 /* Check if we really need to flush anything */
2315 if (bs->flushed_gen == current_gen) {
2316 goto flush_parent;
2319 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
2320 if (bs->drv->bdrv_co_flush_to_disk) {
2321 ret = bs->drv->bdrv_co_flush_to_disk(bs);
2322 } else if (bs->drv->bdrv_aio_flush) {
2323 BlockAIOCB *acb;
2324 CoroutineIOCompletion co = {
2325 .coroutine = qemu_coroutine_self(),
2328 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
2329 if (acb == NULL) {
2330 ret = -EIO;
2331 } else {
2332 qemu_coroutine_yield();
2333 ret = co.ret;
2335 } else {
2337 * Some block drivers always operate in either writethrough or unsafe
2338 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2339 * know how the server works (because the behaviour is hardcoded or
2340 * depends on server-side configuration), so we can't ensure that
2341 * everything is safe on disk. Returning an error doesn't work because
2342 * that would break guests even if the server operates in writethrough
2343 * mode.
2345 * Let's hope the user knows what he's doing.
2347 ret = 0;
2350 if (ret < 0) {
2351 goto out;
2354 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2355 * in the case of cache=unsafe, so there are no useless flushes.
2357 flush_parent:
2358 ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0;
2359 out:
2360 /* Notify any pending flushes that we have completed */
2361 bs->flushed_gen = current_gen;
2362 bs->active_flush_req = NULL;
2363 /* Return value is ignored - it's ok if wait queue is empty */
2364 qemu_co_queue_next(&bs->flush_queue);
2366 tracked_request_end(&req);
2367 return ret;
2370 int bdrv_flush(BlockDriverState *bs)
2372 Coroutine *co;
2373 FlushCo flush_co = {
2374 .bs = bs,
2375 .ret = NOT_DONE,
2378 if (qemu_in_coroutine()) {
2379 /* Fast-path if already in coroutine context */
2380 bdrv_flush_co_entry(&flush_co);
2381 } else {
2382 AioContext *aio_context = bdrv_get_aio_context(bs);
2384 co = qemu_coroutine_create(bdrv_flush_co_entry, &flush_co);
2385 qemu_coroutine_enter(co);
2386 while (flush_co.ret == NOT_DONE) {
2387 aio_poll(aio_context, true);
2391 return flush_co.ret;
2394 typedef struct DiscardCo {
2395 BlockDriverState *bs;
2396 int64_t offset;
2397 int count;
2398 int ret;
2399 } DiscardCo;
2400 static void coroutine_fn bdrv_pdiscard_co_entry(void *opaque)
2402 DiscardCo *rwco = opaque;
2404 rwco->ret = bdrv_co_pdiscard(rwco->bs, rwco->offset, rwco->count);
2407 int coroutine_fn bdrv_co_pdiscard(BlockDriverState *bs, int64_t offset,
2408 int count)
2410 BdrvTrackedRequest req;
2411 int max_pdiscard, ret;
2412 int head, align;
2414 if (!bs->drv) {
2415 return -ENOMEDIUM;
2418 ret = bdrv_check_byte_request(bs, offset, count);
2419 if (ret < 0) {
2420 return ret;
2421 } else if (bs->read_only) {
2422 return -EPERM;
2424 assert(!(bs->open_flags & BDRV_O_INACTIVE));
2426 /* Do nothing if disabled. */
2427 if (!(bs->open_flags & BDRV_O_UNMAP)) {
2428 return 0;
2431 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
2432 return 0;
2435 /* Discard is advisory, so ignore any unaligned head or tail */
2436 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
2437 assert(align % bs->bl.request_alignment == 0);
2438 head = offset % align;
2439 if (head) {
2440 head = MIN(count, align - head);
2441 count -= head;
2442 offset += head;
2444 count = QEMU_ALIGN_DOWN(count, align);
2445 if (!count) {
2446 return 0;
2449 tracked_request_begin(&req, bs, offset, count, BDRV_TRACKED_DISCARD);
2451 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, &req);
2452 if (ret < 0) {
2453 goto out;
2456 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX),
2457 align);
2458 assert(max_pdiscard);
2460 while (count > 0) {
2461 int ret;
2462 int num = MIN(count, max_pdiscard);
2464 if (bs->drv->bdrv_co_pdiscard) {
2465 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
2466 } else {
2467 BlockAIOCB *acb;
2468 CoroutineIOCompletion co = {
2469 .coroutine = qemu_coroutine_self(),
2472 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
2473 bdrv_co_io_em_complete, &co);
2474 if (acb == NULL) {
2475 ret = -EIO;
2476 goto out;
2477 } else {
2478 qemu_coroutine_yield();
2479 ret = co.ret;
2482 if (ret && ret != -ENOTSUP) {
2483 goto out;
2486 offset += num;
2487 count -= num;
2489 ret = 0;
2490 out:
2491 ++bs->write_gen;
2492 bdrv_set_dirty(bs, req.offset >> BDRV_SECTOR_BITS,
2493 req.bytes >> BDRV_SECTOR_BITS);
2494 tracked_request_end(&req);
2495 return ret;
2498 int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, int count)
2500 Coroutine *co;
2501 DiscardCo rwco = {
2502 .bs = bs,
2503 .offset = offset,
2504 .count = count,
2505 .ret = NOT_DONE,
2508 if (qemu_in_coroutine()) {
2509 /* Fast-path if already in coroutine context */
2510 bdrv_pdiscard_co_entry(&rwco);
2511 } else {
2512 AioContext *aio_context = bdrv_get_aio_context(bs);
2514 co = qemu_coroutine_create(bdrv_pdiscard_co_entry, &rwco);
2515 qemu_coroutine_enter(co);
2516 while (rwco.ret == NOT_DONE) {
2517 aio_poll(aio_context, true);
2521 return rwco.ret;
2524 static int bdrv_co_do_ioctl(BlockDriverState *bs, int req, void *buf)
2526 BlockDriver *drv = bs->drv;
2527 BdrvTrackedRequest tracked_req;
2528 CoroutineIOCompletion co = {
2529 .coroutine = qemu_coroutine_self(),
2531 BlockAIOCB *acb;
2533 tracked_request_begin(&tracked_req, bs, 0, 0, BDRV_TRACKED_IOCTL);
2534 if (!drv || !drv->bdrv_aio_ioctl) {
2535 co.ret = -ENOTSUP;
2536 goto out;
2539 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
2540 if (!acb) {
2541 co.ret = -ENOTSUP;
2542 goto out;
2544 qemu_coroutine_yield();
2545 out:
2546 tracked_request_end(&tracked_req);
2547 return co.ret;
2550 typedef struct {
2551 BlockDriverState *bs;
2552 int req;
2553 void *buf;
2554 int ret;
2555 } BdrvIoctlCoData;
2557 static void coroutine_fn bdrv_co_ioctl_entry(void *opaque)
2559 BdrvIoctlCoData *data = opaque;
2560 data->ret = bdrv_co_do_ioctl(data->bs, data->req, data->buf);
2563 /* needed for generic scsi interface */
2564 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
2566 BdrvIoctlCoData data = {
2567 .bs = bs,
2568 .req = req,
2569 .buf = buf,
2570 .ret = -EINPROGRESS,
2573 if (qemu_in_coroutine()) {
2574 /* Fast-path if already in coroutine context */
2575 bdrv_co_ioctl_entry(&data);
2576 } else {
2577 Coroutine *co = qemu_coroutine_create(bdrv_co_ioctl_entry, &data);
2579 qemu_coroutine_enter(co);
2580 while (data.ret == -EINPROGRESS) {
2581 aio_poll(bdrv_get_aio_context(bs), true);
2584 return data.ret;
2587 static void coroutine_fn bdrv_co_aio_ioctl_entry(void *opaque)
2589 BlockAIOCBCoroutine *acb = opaque;
2590 acb->req.error = bdrv_co_do_ioctl(acb->common.bs,
2591 acb->req.req, acb->req.buf);
2592 bdrv_co_complete(acb);
2595 BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
2596 unsigned long int req, void *buf,
2597 BlockCompletionFunc *cb, void *opaque)
2599 BlockAIOCBCoroutine *acb = qemu_aio_get(&bdrv_em_co_aiocb_info,
2600 bs, cb, opaque);
2601 Coroutine *co;
2603 acb->need_bh = true;
2604 acb->req.error = -EINPROGRESS;
2605 acb->req.req = req;
2606 acb->req.buf = buf;
2607 co = qemu_coroutine_create(bdrv_co_aio_ioctl_entry, acb);
2608 qemu_coroutine_enter(co);
2610 bdrv_co_maybe_schedule_bh(acb);
2611 return &acb->common;
2614 void *qemu_blockalign(BlockDriverState *bs, size_t size)
2616 return qemu_memalign(bdrv_opt_mem_align(bs), size);
2619 void *qemu_blockalign0(BlockDriverState *bs, size_t size)
2621 return memset(qemu_blockalign(bs, size), 0, size);
2624 void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
2626 size_t align = bdrv_opt_mem_align(bs);
2628 /* Ensure that NULL is never returned on success */
2629 assert(align > 0);
2630 if (size == 0) {
2631 size = align;
2634 return qemu_try_memalign(align, size);
2637 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
2639 void *mem = qemu_try_blockalign(bs, size);
2641 if (mem) {
2642 memset(mem, 0, size);
2645 return mem;
2649 * Check if all memory in this vector is sector aligned.
2651 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
2653 int i;
2654 size_t alignment = bdrv_min_mem_align(bs);
2656 for (i = 0; i < qiov->niov; i++) {
2657 if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
2658 return false;
2660 if (qiov->iov[i].iov_len % alignment) {
2661 return false;
2665 return true;
2668 void bdrv_add_before_write_notifier(BlockDriverState *bs,
2669 NotifierWithReturn *notifier)
2671 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
2674 void bdrv_io_plug(BlockDriverState *bs)
2676 BdrvChild *child;
2678 QLIST_FOREACH(child, &bs->children, next) {
2679 bdrv_io_plug(child->bs);
2682 if (bs->io_plugged++ == 0 && bs->io_plug_disabled == 0) {
2683 BlockDriver *drv = bs->drv;
2684 if (drv && drv->bdrv_io_plug) {
2685 drv->bdrv_io_plug(bs);
2690 void bdrv_io_unplug(BlockDriverState *bs)
2692 BdrvChild *child;
2694 assert(bs->io_plugged);
2695 if (--bs->io_plugged == 0 && bs->io_plug_disabled == 0) {
2696 BlockDriver *drv = bs->drv;
2697 if (drv && drv->bdrv_io_unplug) {
2698 drv->bdrv_io_unplug(bs);
2702 QLIST_FOREACH(child, &bs->children, next) {
2703 bdrv_io_unplug(child->bs);
2707 void bdrv_io_unplugged_begin(BlockDriverState *bs)
2709 BdrvChild *child;
2711 if (bs->io_plug_disabled++ == 0 && bs->io_plugged > 0) {
2712 BlockDriver *drv = bs->drv;
2713 if (drv && drv->bdrv_io_unplug) {
2714 drv->bdrv_io_unplug(bs);
2718 QLIST_FOREACH(child, &bs->children, next) {
2719 bdrv_io_unplugged_begin(child->bs);
2723 void bdrv_io_unplugged_end(BlockDriverState *bs)
2725 BdrvChild *child;
2727 assert(bs->io_plug_disabled);
2728 QLIST_FOREACH(child, &bs->children, next) {
2729 bdrv_io_unplugged_end(child->bs);
2732 if (--bs->io_plug_disabled == 0 && bs->io_plugged > 0) {
2733 BlockDriver *drv = bs->drv;
2734 if (drv && drv->bdrv_io_plug) {
2735 drv->bdrv_io_plug(bs);