Merge remote-tracking branch 'remotes/mjt/tags/pull-trivial-patches-2016-06-07' into...
[qemu/kevin.git] / block / io.c
blob6070e773b7760380139313b04c67f99fbd2f6679
1 /*
2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include "trace.h"
27 #include "sysemu/block-backend.h"
28 #include "block/blockjob.h"
29 #include "block/block_int.h"
30 #include "qemu/cutils.h"
31 #include "qapi/error.h"
32 #include "qemu/error-report.h"
34 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
36 static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
37 int64_t sector_num,
38 QEMUIOVector *qiov,
39 int nb_sectors,
40 BdrvRequestFlags flags,
41 BlockCompletionFunc *cb,
42 void *opaque,
43 bool is_write);
44 static void coroutine_fn bdrv_co_do_rw(void *opaque);
45 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
46 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
48 static void bdrv_parent_drained_begin(BlockDriverState *bs)
50 BdrvChild *c;
52 QLIST_FOREACH(c, &bs->parents, next_parent) {
53 if (c->role->drained_begin) {
54 c->role->drained_begin(c);
59 static void bdrv_parent_drained_end(BlockDriverState *bs)
61 BdrvChild *c;
63 QLIST_FOREACH(c, &bs->parents, next_parent) {
64 if (c->role->drained_end) {
65 c->role->drained_end(c);
70 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
72 BlockDriver *drv = bs->drv;
73 Error *local_err = NULL;
75 memset(&bs->bl, 0, sizeof(bs->bl));
77 if (!drv) {
78 return;
81 /* Take some limits from the children as a default */
82 if (bs->file) {
83 bdrv_refresh_limits(bs->file->bs, &local_err);
84 if (local_err) {
85 error_propagate(errp, local_err);
86 return;
88 bs->bl.opt_transfer_length = bs->file->bs->bl.opt_transfer_length;
89 bs->bl.max_transfer_length = bs->file->bs->bl.max_transfer_length;
90 bs->bl.min_mem_alignment = bs->file->bs->bl.min_mem_alignment;
91 bs->bl.opt_mem_alignment = bs->file->bs->bl.opt_mem_alignment;
92 bs->bl.max_iov = bs->file->bs->bl.max_iov;
93 } else {
94 bs->bl.min_mem_alignment = 512;
95 bs->bl.opt_mem_alignment = getpagesize();
97 /* Safe default since most protocols use readv()/writev()/etc */
98 bs->bl.max_iov = IOV_MAX;
101 if (bs->backing) {
102 bdrv_refresh_limits(bs->backing->bs, &local_err);
103 if (local_err) {
104 error_propagate(errp, local_err);
105 return;
107 bs->bl.opt_transfer_length =
108 MAX(bs->bl.opt_transfer_length,
109 bs->backing->bs->bl.opt_transfer_length);
110 bs->bl.max_transfer_length =
111 MIN_NON_ZERO(bs->bl.max_transfer_length,
112 bs->backing->bs->bl.max_transfer_length);
113 bs->bl.opt_mem_alignment =
114 MAX(bs->bl.opt_mem_alignment,
115 bs->backing->bs->bl.opt_mem_alignment);
116 bs->bl.min_mem_alignment =
117 MAX(bs->bl.min_mem_alignment,
118 bs->backing->bs->bl.min_mem_alignment);
119 bs->bl.max_iov =
120 MIN(bs->bl.max_iov,
121 bs->backing->bs->bl.max_iov);
124 /* Then let the driver override it */
125 if (drv->bdrv_refresh_limits) {
126 drv->bdrv_refresh_limits(bs, errp);
131 * The copy-on-read flag is actually a reference count so multiple users may
132 * use the feature without worrying about clobbering its previous state.
133 * Copy-on-read stays enabled until all users have called to disable it.
135 void bdrv_enable_copy_on_read(BlockDriverState *bs)
137 bs->copy_on_read++;
140 void bdrv_disable_copy_on_read(BlockDriverState *bs)
142 assert(bs->copy_on_read > 0);
143 bs->copy_on_read--;
146 /* Check if any requests are in-flight (including throttled requests) */
147 bool bdrv_requests_pending(BlockDriverState *bs)
149 BdrvChild *child;
151 if (!QLIST_EMPTY(&bs->tracked_requests)) {
152 return true;
155 QLIST_FOREACH(child, &bs->children, next) {
156 if (bdrv_requests_pending(child->bs)) {
157 return true;
161 return false;
164 static void bdrv_drain_recurse(BlockDriverState *bs)
166 BdrvChild *child;
168 if (bs->drv && bs->drv->bdrv_drain) {
169 bs->drv->bdrv_drain(bs);
171 QLIST_FOREACH(child, &bs->children, next) {
172 bdrv_drain_recurse(child->bs);
176 typedef struct {
177 Coroutine *co;
178 BlockDriverState *bs;
179 QEMUBH *bh;
180 bool done;
181 } BdrvCoDrainData;
183 static void bdrv_drain_poll(BlockDriverState *bs)
185 bool busy = true;
187 while (busy) {
188 /* Keep iterating */
189 busy = bdrv_requests_pending(bs);
190 busy |= aio_poll(bdrv_get_aio_context(bs), busy);
194 static void bdrv_co_drain_bh_cb(void *opaque)
196 BdrvCoDrainData *data = opaque;
197 Coroutine *co = data->co;
199 qemu_bh_delete(data->bh);
200 bdrv_drain_poll(data->bs);
201 data->done = true;
202 qemu_coroutine_enter(co, NULL);
205 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs)
207 BdrvCoDrainData data;
209 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
210 * other coroutines run if they were queued from
211 * qemu_co_queue_run_restart(). */
213 assert(qemu_in_coroutine());
214 data = (BdrvCoDrainData) {
215 .co = qemu_coroutine_self(),
216 .bs = bs,
217 .done = false,
218 .bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_drain_bh_cb, &data),
220 qemu_bh_schedule(data.bh);
222 qemu_coroutine_yield();
223 /* If we are resumed from some other event (such as an aio completion or a
224 * timer callback), it is a bug in the caller that should be fixed. */
225 assert(data.done);
228 void bdrv_drained_begin(BlockDriverState *bs)
230 if (!bs->quiesce_counter++) {
231 aio_disable_external(bdrv_get_aio_context(bs));
232 bdrv_parent_drained_begin(bs);
235 bdrv_io_unplugged_begin(bs);
236 bdrv_drain_recurse(bs);
237 if (qemu_in_coroutine()) {
238 bdrv_co_yield_to_drain(bs);
239 } else {
240 bdrv_drain_poll(bs);
242 bdrv_io_unplugged_end(bs);
245 void bdrv_drained_end(BlockDriverState *bs)
247 assert(bs->quiesce_counter > 0);
248 if (--bs->quiesce_counter > 0) {
249 return;
252 bdrv_parent_drained_end(bs);
253 aio_enable_external(bdrv_get_aio_context(bs));
257 * Wait for pending requests to complete on a single BlockDriverState subtree,
258 * and suspend block driver's internal I/O until next request arrives.
260 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
261 * AioContext.
263 * Only this BlockDriverState's AioContext is run, so in-flight requests must
264 * not depend on events in other AioContexts. In that case, use
265 * bdrv_drain_all() instead.
267 void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
269 assert(qemu_in_coroutine());
270 bdrv_drained_begin(bs);
271 bdrv_drained_end(bs);
274 void bdrv_drain(BlockDriverState *bs)
276 bdrv_drained_begin(bs);
277 bdrv_drained_end(bs);
281 * Wait for pending requests to complete across all BlockDriverStates
283 * This function does not flush data to disk, use bdrv_flush_all() for that
284 * after calling this function.
286 void bdrv_drain_all(void)
288 /* Always run first iteration so any pending completion BHs run */
289 bool busy = true;
290 BlockDriverState *bs;
291 BdrvNextIterator it;
292 GSList *aio_ctxs = NULL, *ctx;
294 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
295 AioContext *aio_context = bdrv_get_aio_context(bs);
297 aio_context_acquire(aio_context);
298 if (bs->job) {
299 block_job_pause(bs->job);
301 bdrv_parent_drained_begin(bs);
302 bdrv_io_unplugged_begin(bs);
303 bdrv_drain_recurse(bs);
304 aio_context_release(aio_context);
306 if (!g_slist_find(aio_ctxs, aio_context)) {
307 aio_ctxs = g_slist_prepend(aio_ctxs, aio_context);
311 /* Note that completion of an asynchronous I/O operation can trigger any
312 * number of other I/O operations on other devices---for example a
313 * coroutine can submit an I/O request to another device in response to
314 * request completion. Therefore we must keep looping until there was no
315 * more activity rather than simply draining each device independently.
317 while (busy) {
318 busy = false;
320 for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) {
321 AioContext *aio_context = ctx->data;
323 aio_context_acquire(aio_context);
324 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
325 if (aio_context == bdrv_get_aio_context(bs)) {
326 if (bdrv_requests_pending(bs)) {
327 busy = true;
328 aio_poll(aio_context, busy);
332 busy |= aio_poll(aio_context, false);
333 aio_context_release(aio_context);
337 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
338 AioContext *aio_context = bdrv_get_aio_context(bs);
340 aio_context_acquire(aio_context);
341 bdrv_io_unplugged_end(bs);
342 bdrv_parent_drained_end(bs);
343 if (bs->job) {
344 block_job_resume(bs->job);
346 aio_context_release(aio_context);
348 g_slist_free(aio_ctxs);
352 * Remove an active request from the tracked requests list
354 * This function should be called when a tracked request is completing.
356 static void tracked_request_end(BdrvTrackedRequest *req)
358 if (req->serialising) {
359 req->bs->serialising_in_flight--;
362 QLIST_REMOVE(req, list);
363 qemu_co_queue_restart_all(&req->wait_queue);
367 * Add an active request to the tracked requests list
369 static void tracked_request_begin(BdrvTrackedRequest *req,
370 BlockDriverState *bs,
371 int64_t offset,
372 unsigned int bytes,
373 enum BdrvTrackedRequestType type)
375 *req = (BdrvTrackedRequest){
376 .bs = bs,
377 .offset = offset,
378 .bytes = bytes,
379 .type = type,
380 .co = qemu_coroutine_self(),
381 .serialising = false,
382 .overlap_offset = offset,
383 .overlap_bytes = bytes,
386 qemu_co_queue_init(&req->wait_queue);
388 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
391 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
393 int64_t overlap_offset = req->offset & ~(align - 1);
394 unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
395 - overlap_offset;
397 if (!req->serialising) {
398 req->bs->serialising_in_flight++;
399 req->serialising = true;
402 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
403 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
407 * Round a region to cluster boundaries
409 void bdrv_round_to_clusters(BlockDriverState *bs,
410 int64_t sector_num, int nb_sectors,
411 int64_t *cluster_sector_num,
412 int *cluster_nb_sectors)
414 BlockDriverInfo bdi;
416 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
417 *cluster_sector_num = sector_num;
418 *cluster_nb_sectors = nb_sectors;
419 } else {
420 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
421 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
422 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
423 nb_sectors, c);
427 static int bdrv_get_cluster_size(BlockDriverState *bs)
429 BlockDriverInfo bdi;
430 int ret;
432 ret = bdrv_get_info(bs, &bdi);
433 if (ret < 0 || bdi.cluster_size == 0) {
434 return bs->request_alignment;
435 } else {
436 return bdi.cluster_size;
440 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
441 int64_t offset, unsigned int bytes)
443 /* aaaa bbbb */
444 if (offset >= req->overlap_offset + req->overlap_bytes) {
445 return false;
447 /* bbbb aaaa */
448 if (req->overlap_offset >= offset + bytes) {
449 return false;
451 return true;
454 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
456 BlockDriverState *bs = self->bs;
457 BdrvTrackedRequest *req;
458 bool retry;
459 bool waited = false;
461 if (!bs->serialising_in_flight) {
462 return false;
465 do {
466 retry = false;
467 QLIST_FOREACH(req, &bs->tracked_requests, list) {
468 if (req == self || (!req->serialising && !self->serialising)) {
469 continue;
471 if (tracked_request_overlaps(req, self->overlap_offset,
472 self->overlap_bytes))
474 /* Hitting this means there was a reentrant request, for
475 * example, a block driver issuing nested requests. This must
476 * never happen since it means deadlock.
478 assert(qemu_coroutine_self() != req->co);
480 /* If the request is already (indirectly) waiting for us, or
481 * will wait for us as soon as it wakes up, then just go on
482 * (instead of producing a deadlock in the former case). */
483 if (!req->waiting_for) {
484 self->waiting_for = req;
485 qemu_co_queue_wait(&req->wait_queue);
486 self->waiting_for = NULL;
487 retry = true;
488 waited = true;
489 break;
493 } while (retry);
495 return waited;
498 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
499 size_t size)
501 if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) {
502 return -EIO;
505 if (!bdrv_is_inserted(bs)) {
506 return -ENOMEDIUM;
509 if (offset < 0) {
510 return -EIO;
513 return 0;
516 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
517 int nb_sectors)
519 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
520 return -EIO;
523 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
524 nb_sectors * BDRV_SECTOR_SIZE);
527 typedef struct RwCo {
528 BlockDriverState *bs;
529 int64_t offset;
530 QEMUIOVector *qiov;
531 bool is_write;
532 int ret;
533 BdrvRequestFlags flags;
534 } RwCo;
536 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
538 RwCo *rwco = opaque;
540 if (!rwco->is_write) {
541 rwco->ret = bdrv_co_preadv(rwco->bs, rwco->offset,
542 rwco->qiov->size, rwco->qiov,
543 rwco->flags);
544 } else {
545 rwco->ret = bdrv_co_pwritev(rwco->bs, rwco->offset,
546 rwco->qiov->size, rwco->qiov,
547 rwco->flags);
552 * Process a vectored synchronous request using coroutines
554 static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset,
555 QEMUIOVector *qiov, bool is_write,
556 BdrvRequestFlags flags)
558 Coroutine *co;
559 RwCo rwco = {
560 .bs = bs,
561 .offset = offset,
562 .qiov = qiov,
563 .is_write = is_write,
564 .ret = NOT_DONE,
565 .flags = flags,
568 if (qemu_in_coroutine()) {
569 /* Fast-path if already in coroutine context */
570 bdrv_rw_co_entry(&rwco);
571 } else {
572 AioContext *aio_context = bdrv_get_aio_context(bs);
574 co = qemu_coroutine_create(bdrv_rw_co_entry);
575 qemu_coroutine_enter(co, &rwco);
576 while (rwco.ret == NOT_DONE) {
577 aio_poll(aio_context, true);
580 return rwco.ret;
584 * Process a synchronous request using coroutines
586 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
587 int nb_sectors, bool is_write, BdrvRequestFlags flags)
589 QEMUIOVector qiov;
590 struct iovec iov = {
591 .iov_base = (void *)buf,
592 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
595 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
596 return -EINVAL;
599 qemu_iovec_init_external(&qiov, &iov, 1);
600 return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS,
601 &qiov, is_write, flags);
604 /* return < 0 if error. See bdrv_write() for the return codes */
605 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
606 uint8_t *buf, int nb_sectors)
608 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0);
611 /* Return < 0 if error. Important errors are:
612 -EIO generic I/O error (may happen for all errors)
613 -ENOMEDIUM No media inserted.
614 -EINVAL Invalid sector number or nb_sectors
615 -EACCES Trying to write a read-only device
617 int bdrv_write(BlockDriverState *bs, int64_t sector_num,
618 const uint8_t *buf, int nb_sectors)
620 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
623 int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
624 int nb_sectors, BdrvRequestFlags flags)
626 return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true,
627 BDRV_REQ_ZERO_WRITE | flags);
631 * Completely zero out a block device with the help of bdrv_write_zeroes.
632 * The operation is sped up by checking the block status and only writing
633 * zeroes to the device if they currently do not return zeroes. Optional
634 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
635 * BDRV_REQ_FUA).
637 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
639 int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
641 int64_t target_sectors, ret, nb_sectors, sector_num = 0;
642 BlockDriverState *file;
643 int n;
645 target_sectors = bdrv_nb_sectors(bs);
646 if (target_sectors < 0) {
647 return target_sectors;
650 for (;;) {
651 nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS);
652 if (nb_sectors <= 0) {
653 return 0;
655 ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n, &file);
656 if (ret < 0) {
657 error_report("error getting block status at sector %" PRId64 ": %s",
658 sector_num, strerror(-ret));
659 return ret;
661 if (ret & BDRV_BLOCK_ZERO) {
662 sector_num += n;
663 continue;
665 ret = bdrv_write_zeroes(bs, sector_num, n, flags);
666 if (ret < 0) {
667 error_report("error writing zeroes at sector %" PRId64 ": %s",
668 sector_num, strerror(-ret));
669 return ret;
671 sector_num += n;
675 int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes)
677 QEMUIOVector qiov;
678 struct iovec iov = {
679 .iov_base = (void *)buf,
680 .iov_len = bytes,
682 int ret;
684 if (bytes < 0) {
685 return -EINVAL;
688 qemu_iovec_init_external(&qiov, &iov, 1);
689 ret = bdrv_prwv_co(bs, offset, &qiov, false, 0);
690 if (ret < 0) {
691 return ret;
694 return bytes;
697 int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov)
699 int ret;
701 ret = bdrv_prwv_co(bs, offset, qiov, true, 0);
702 if (ret < 0) {
703 return ret;
706 return qiov->size;
709 int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
710 const void *buf, int bytes)
712 QEMUIOVector qiov;
713 struct iovec iov = {
714 .iov_base = (void *) buf,
715 .iov_len = bytes,
718 if (bytes < 0) {
719 return -EINVAL;
722 qemu_iovec_init_external(&qiov, &iov, 1);
723 return bdrv_pwritev(bs, offset, &qiov);
727 * Writes to the file and ensures that no writes are reordered across this
728 * request (acts as a barrier)
730 * Returns 0 on success, -errno in error cases.
732 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
733 const void *buf, int count)
735 int ret;
737 ret = bdrv_pwrite(bs, offset, buf, count);
738 if (ret < 0) {
739 return ret;
742 ret = bdrv_flush(bs);
743 if (ret < 0) {
744 return ret;
747 return 0;
750 typedef struct CoroutineIOCompletion {
751 Coroutine *coroutine;
752 int ret;
753 } CoroutineIOCompletion;
755 static void bdrv_co_io_em_complete(void *opaque, int ret)
757 CoroutineIOCompletion *co = opaque;
759 co->ret = ret;
760 qemu_coroutine_enter(co->coroutine, NULL);
763 static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
764 uint64_t offset, uint64_t bytes,
765 QEMUIOVector *qiov, int flags)
767 BlockDriver *drv = bs->drv;
768 int64_t sector_num;
769 unsigned int nb_sectors;
771 if (drv->bdrv_co_preadv) {
772 return drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
775 sector_num = offset >> BDRV_SECTOR_BITS;
776 nb_sectors = bytes >> BDRV_SECTOR_BITS;
778 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
779 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
780 assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
782 if (drv->bdrv_co_readv) {
783 return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
784 } else {
785 BlockAIOCB *acb;
786 CoroutineIOCompletion co = {
787 .coroutine = qemu_coroutine_self(),
790 acb = bs->drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors,
791 bdrv_co_io_em_complete, &co);
792 if (acb == NULL) {
793 return -EIO;
794 } else {
795 qemu_coroutine_yield();
796 return co.ret;
801 static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
802 uint64_t offset, uint64_t bytes,
803 QEMUIOVector *qiov, int flags)
805 BlockDriver *drv = bs->drv;
806 int64_t sector_num;
807 unsigned int nb_sectors;
808 int ret;
810 if (drv->bdrv_co_pwritev) {
811 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, flags);
812 goto emulate_flags;
815 sector_num = offset >> BDRV_SECTOR_BITS;
816 nb_sectors = bytes >> BDRV_SECTOR_BITS;
818 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
819 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
820 assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
822 if (drv->bdrv_co_writev_flags) {
823 ret = drv->bdrv_co_writev_flags(bs, sector_num, nb_sectors, qiov,
824 flags & bs->supported_write_flags);
825 flags &= ~bs->supported_write_flags;
826 } else if (drv->bdrv_co_writev) {
827 assert(!bs->supported_write_flags);
828 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
829 } else {
830 BlockAIOCB *acb;
831 CoroutineIOCompletion co = {
832 .coroutine = qemu_coroutine_self(),
835 acb = bs->drv->bdrv_aio_writev(bs, sector_num, qiov, nb_sectors,
836 bdrv_co_io_em_complete, &co);
837 if (acb == NULL) {
838 ret = -EIO;
839 } else {
840 qemu_coroutine_yield();
841 ret = co.ret;
845 emulate_flags:
846 if (ret == 0 && (flags & BDRV_REQ_FUA)) {
847 ret = bdrv_co_flush(bs);
850 return ret;
853 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
854 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
856 /* Perform I/O through a temporary buffer so that users who scribble over
857 * their read buffer while the operation is in progress do not end up
858 * modifying the image file. This is critical for zero-copy guest I/O
859 * where anything might happen inside guest memory.
861 void *bounce_buffer;
863 BlockDriver *drv = bs->drv;
864 struct iovec iov;
865 QEMUIOVector bounce_qiov;
866 int64_t cluster_sector_num;
867 int cluster_nb_sectors;
868 size_t skip_bytes;
869 int ret;
871 /* Cover entire cluster so no additional backing file I/O is required when
872 * allocating cluster in the image file.
874 bdrv_round_to_clusters(bs, sector_num, nb_sectors,
875 &cluster_sector_num, &cluster_nb_sectors);
877 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
878 cluster_sector_num, cluster_nb_sectors);
880 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
881 iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len);
882 if (bounce_buffer == NULL) {
883 ret = -ENOMEM;
884 goto err;
887 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
889 ret = bdrv_driver_preadv(bs, cluster_sector_num * BDRV_SECTOR_SIZE,
890 cluster_nb_sectors * BDRV_SECTOR_SIZE,
891 &bounce_qiov, 0);
892 if (ret < 0) {
893 goto err;
896 if (drv->bdrv_co_write_zeroes &&
897 buffer_is_zero(bounce_buffer, iov.iov_len)) {
898 ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
899 cluster_nb_sectors, 0);
900 } else {
901 /* This does not change the data on the disk, it is not necessary
902 * to flush even in cache=writethrough mode.
904 ret = bdrv_driver_pwritev(bs, cluster_sector_num * BDRV_SECTOR_SIZE,
905 cluster_nb_sectors * BDRV_SECTOR_SIZE,
906 &bounce_qiov, 0);
909 if (ret < 0) {
910 /* It might be okay to ignore write errors for guest requests. If this
911 * is a deliberate copy-on-read then we don't want to ignore the error.
912 * Simply report it in all cases.
914 goto err;
917 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
918 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
919 nb_sectors * BDRV_SECTOR_SIZE);
921 err:
922 qemu_vfree(bounce_buffer);
923 return ret;
927 * Forwards an already correctly aligned request to the BlockDriver. This
928 * handles copy on read and zeroing after EOF; any other features must be
929 * implemented by the caller.
931 static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
932 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
933 int64_t align, QEMUIOVector *qiov, int flags)
935 int ret;
937 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
938 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
940 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
941 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
942 assert(!qiov || bytes == qiov->size);
943 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
945 /* Handle Copy on Read and associated serialisation */
946 if (flags & BDRV_REQ_COPY_ON_READ) {
947 /* If we touch the same cluster it counts as an overlap. This
948 * guarantees that allocating writes will be serialized and not race
949 * with each other for the same cluster. For example, in copy-on-read
950 * it ensures that the CoR read and write operations are atomic and
951 * guest writes cannot interleave between them. */
952 mark_request_serialising(req, bdrv_get_cluster_size(bs));
955 if (!(flags & BDRV_REQ_NO_SERIALISING)) {
956 wait_serialising_requests(req);
959 if (flags & BDRV_REQ_COPY_ON_READ) {
960 int pnum;
962 ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum);
963 if (ret < 0) {
964 goto out;
967 if (!ret || pnum != nb_sectors) {
968 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
969 goto out;
973 /* Forward the request to the BlockDriver */
974 if (!bs->zero_beyond_eof) {
975 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0);
976 } else {
977 /* Read zeros after EOF */
978 int64_t total_sectors, max_nb_sectors;
980 total_sectors = bdrv_nb_sectors(bs);
981 if (total_sectors < 0) {
982 ret = total_sectors;
983 goto out;
986 max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num),
987 align >> BDRV_SECTOR_BITS);
988 if (nb_sectors < max_nb_sectors) {
989 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0);
990 } else if (max_nb_sectors > 0) {
991 QEMUIOVector local_qiov;
993 qemu_iovec_init(&local_qiov, qiov->niov);
994 qemu_iovec_concat(&local_qiov, qiov, 0,
995 max_nb_sectors * BDRV_SECTOR_SIZE);
997 ret = bdrv_driver_preadv(bs, offset,
998 max_nb_sectors * BDRV_SECTOR_SIZE,
999 &local_qiov, 0);
1001 qemu_iovec_destroy(&local_qiov);
1002 } else {
1003 ret = 0;
1006 /* Reading beyond end of file is supposed to produce zeroes */
1007 if (ret == 0 && total_sectors < sector_num + nb_sectors) {
1008 uint64_t offset = MAX(0, total_sectors - sector_num);
1009 uint64_t bytes = (sector_num + nb_sectors - offset) *
1010 BDRV_SECTOR_SIZE;
1011 qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes);
1015 out:
1016 return ret;
1020 * Handle a read request in coroutine context
1022 int coroutine_fn bdrv_co_preadv(BlockDriverState *bs,
1023 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1024 BdrvRequestFlags flags)
1026 BlockDriver *drv = bs->drv;
1027 BdrvTrackedRequest req;
1029 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
1030 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
1031 uint8_t *head_buf = NULL;
1032 uint8_t *tail_buf = NULL;
1033 QEMUIOVector local_qiov;
1034 bool use_local_qiov = false;
1035 int ret;
1037 if (!drv) {
1038 return -ENOMEDIUM;
1041 ret = bdrv_check_byte_request(bs, offset, bytes);
1042 if (ret < 0) {
1043 return ret;
1046 /* Don't do copy-on-read if we read data before write operation */
1047 if (bs->copy_on_read && !(flags & BDRV_REQ_NO_SERIALISING)) {
1048 flags |= BDRV_REQ_COPY_ON_READ;
1051 /* Align read if necessary by padding qiov */
1052 if (offset & (align - 1)) {
1053 head_buf = qemu_blockalign(bs, align);
1054 qemu_iovec_init(&local_qiov, qiov->niov + 2);
1055 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1056 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1057 use_local_qiov = true;
1059 bytes += offset & (align - 1);
1060 offset = offset & ~(align - 1);
1063 if ((offset + bytes) & (align - 1)) {
1064 if (!use_local_qiov) {
1065 qemu_iovec_init(&local_qiov, qiov->niov + 1);
1066 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1067 use_local_qiov = true;
1069 tail_buf = qemu_blockalign(bs, align);
1070 qemu_iovec_add(&local_qiov, tail_buf,
1071 align - ((offset + bytes) & (align - 1)));
1073 bytes = ROUND_UP(bytes, align);
1076 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
1077 ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align,
1078 use_local_qiov ? &local_qiov : qiov,
1079 flags);
1080 tracked_request_end(&req);
1082 if (use_local_qiov) {
1083 qemu_iovec_destroy(&local_qiov);
1084 qemu_vfree(head_buf);
1085 qemu_vfree(tail_buf);
1088 return ret;
1091 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
1092 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1093 BdrvRequestFlags flags)
1095 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1096 return -EINVAL;
1099 return bdrv_co_preadv(bs, sector_num << BDRV_SECTOR_BITS,
1100 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
1103 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
1104 int nb_sectors, QEMUIOVector *qiov)
1106 trace_bdrv_co_readv(bs, sector_num, nb_sectors);
1108 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
1111 #define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768
1113 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
1114 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
1116 BlockDriver *drv = bs->drv;
1117 QEMUIOVector qiov;
1118 struct iovec iov = {0};
1119 int ret = 0;
1120 bool need_flush = false;
1122 int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_write_zeroes,
1123 BDRV_REQUEST_MAX_SECTORS);
1125 while (nb_sectors > 0 && !ret) {
1126 int num = nb_sectors;
1128 /* Align request. Block drivers can expect the "bulk" of the request
1129 * to be aligned.
1131 if (bs->bl.write_zeroes_alignment
1132 && num > bs->bl.write_zeroes_alignment) {
1133 if (sector_num % bs->bl.write_zeroes_alignment != 0) {
1134 /* Make a small request up to the first aligned sector. */
1135 num = bs->bl.write_zeroes_alignment;
1136 num -= sector_num % bs->bl.write_zeroes_alignment;
1137 } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) {
1138 /* Shorten the request to the last aligned sector. num cannot
1139 * underflow because num > bs->bl.write_zeroes_alignment.
1141 num -= (sector_num + num) % bs->bl.write_zeroes_alignment;
1145 /* limit request size */
1146 if (num > max_write_zeroes) {
1147 num = max_write_zeroes;
1150 ret = -ENOTSUP;
1151 /* First try the efficient write zeroes operation */
1152 if (drv->bdrv_co_write_zeroes) {
1153 ret = drv->bdrv_co_write_zeroes(bs, sector_num, num,
1154 flags & bs->supported_zero_flags);
1155 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1156 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1157 need_flush = true;
1159 } else {
1160 assert(!bs->supported_zero_flags);
1163 if (ret == -ENOTSUP) {
1164 /* Fall back to bounce buffer if write zeroes is unsupported */
1165 int max_xfer_len = MIN_NON_ZERO(bs->bl.max_transfer_length,
1166 MAX_WRITE_ZEROES_BOUNCE_BUFFER);
1167 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1169 if ((flags & BDRV_REQ_FUA) &&
1170 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1171 /* No need for bdrv_driver_pwrite() to do a fallback
1172 * flush on each chunk; use just one at the end */
1173 write_flags &= ~BDRV_REQ_FUA;
1174 need_flush = true;
1176 num = MIN(num, max_xfer_len);
1177 iov.iov_len = num * BDRV_SECTOR_SIZE;
1178 if (iov.iov_base == NULL) {
1179 iov.iov_base = qemu_try_blockalign(bs, num * BDRV_SECTOR_SIZE);
1180 if (iov.iov_base == NULL) {
1181 ret = -ENOMEM;
1182 goto fail;
1184 memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE);
1186 qemu_iovec_init_external(&qiov, &iov, 1);
1188 ret = bdrv_driver_pwritev(bs, sector_num * BDRV_SECTOR_SIZE,
1189 num * BDRV_SECTOR_SIZE, &qiov,
1190 write_flags);
1192 /* Keep bounce buffer around if it is big enough for all
1193 * all future requests.
1195 if (num < max_xfer_len) {
1196 qemu_vfree(iov.iov_base);
1197 iov.iov_base = NULL;
1201 sector_num += num;
1202 nb_sectors -= num;
1205 fail:
1206 if (ret == 0 && need_flush) {
1207 ret = bdrv_co_flush(bs);
1209 qemu_vfree(iov.iov_base);
1210 return ret;
1214 * Forwards an already correctly aligned write request to the BlockDriver.
1216 static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
1217 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
1218 QEMUIOVector *qiov, int flags)
1220 BlockDriver *drv = bs->drv;
1221 bool waited;
1222 int ret;
1224 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
1225 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
1227 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
1228 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
1229 assert(!qiov || bytes == qiov->size);
1230 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1232 waited = wait_serialising_requests(req);
1233 assert(!waited || !req->serialising);
1234 assert(req->overlap_offset <= offset);
1235 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1237 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
1239 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
1240 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes &&
1241 qemu_iovec_is_zero(qiov)) {
1242 flags |= BDRV_REQ_ZERO_WRITE;
1243 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
1244 flags |= BDRV_REQ_MAY_UNMAP;
1248 if (ret < 0) {
1249 /* Do nothing, write notifier decided to fail this request */
1250 } else if (flags & BDRV_REQ_ZERO_WRITE) {
1251 bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
1252 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags);
1253 } else {
1254 bdrv_debug_event(bs, BLKDBG_PWRITEV);
1255 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags);
1257 bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
1259 bdrv_set_dirty(bs, sector_num, nb_sectors);
1261 if (bs->wr_highest_offset < offset + bytes) {
1262 bs->wr_highest_offset = offset + bytes;
1265 if (ret >= 0) {
1266 bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
1269 return ret;
1272 static int coroutine_fn bdrv_co_do_zero_pwritev(BlockDriverState *bs,
1273 int64_t offset,
1274 unsigned int bytes,
1275 BdrvRequestFlags flags,
1276 BdrvTrackedRequest *req)
1278 uint8_t *buf = NULL;
1279 QEMUIOVector local_qiov;
1280 struct iovec iov;
1281 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
1282 unsigned int head_padding_bytes, tail_padding_bytes;
1283 int ret = 0;
1285 head_padding_bytes = offset & (align - 1);
1286 tail_padding_bytes = align - ((offset + bytes) & (align - 1));
1289 assert(flags & BDRV_REQ_ZERO_WRITE);
1290 if (head_padding_bytes || tail_padding_bytes) {
1291 buf = qemu_blockalign(bs, align);
1292 iov = (struct iovec) {
1293 .iov_base = buf,
1294 .iov_len = align,
1296 qemu_iovec_init_external(&local_qiov, &iov, 1);
1298 if (head_padding_bytes) {
1299 uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes);
1301 /* RMW the unaligned part before head. */
1302 mark_request_serialising(req, align);
1303 wait_serialising_requests(req);
1304 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1305 ret = bdrv_aligned_preadv(bs, req, offset & ~(align - 1), align,
1306 align, &local_qiov, 0);
1307 if (ret < 0) {
1308 goto fail;
1310 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1312 memset(buf + head_padding_bytes, 0, zero_bytes);
1313 ret = bdrv_aligned_pwritev(bs, req, offset & ~(align - 1), align,
1314 &local_qiov,
1315 flags & ~BDRV_REQ_ZERO_WRITE);
1316 if (ret < 0) {
1317 goto fail;
1319 offset += zero_bytes;
1320 bytes -= zero_bytes;
1323 assert(!bytes || (offset & (align - 1)) == 0);
1324 if (bytes >= align) {
1325 /* Write the aligned part in the middle. */
1326 uint64_t aligned_bytes = bytes & ~(align - 1);
1327 ret = bdrv_aligned_pwritev(bs, req, offset, aligned_bytes,
1328 NULL, flags);
1329 if (ret < 0) {
1330 goto fail;
1332 bytes -= aligned_bytes;
1333 offset += aligned_bytes;
1336 assert(!bytes || (offset & (align - 1)) == 0);
1337 if (bytes) {
1338 assert(align == tail_padding_bytes + bytes);
1339 /* RMW the unaligned part after tail. */
1340 mark_request_serialising(req, align);
1341 wait_serialising_requests(req);
1342 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1343 ret = bdrv_aligned_preadv(bs, req, offset, align,
1344 align, &local_qiov, 0);
1345 if (ret < 0) {
1346 goto fail;
1348 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1350 memset(buf, 0, bytes);
1351 ret = bdrv_aligned_pwritev(bs, req, offset, align,
1352 &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE);
1354 fail:
1355 qemu_vfree(buf);
1356 return ret;
1361 * Handle a write request in coroutine context
1363 int coroutine_fn bdrv_co_pwritev(BlockDriverState *bs,
1364 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1365 BdrvRequestFlags flags)
1367 BdrvTrackedRequest req;
1368 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
1369 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
1370 uint8_t *head_buf = NULL;
1371 uint8_t *tail_buf = NULL;
1372 QEMUIOVector local_qiov;
1373 bool use_local_qiov = false;
1374 int ret;
1376 if (!bs->drv) {
1377 return -ENOMEDIUM;
1379 if (bs->read_only) {
1380 return -EPERM;
1382 assert(!(bs->open_flags & BDRV_O_INACTIVE));
1384 ret = bdrv_check_byte_request(bs, offset, bytes);
1385 if (ret < 0) {
1386 return ret;
1390 * Align write if necessary by performing a read-modify-write cycle.
1391 * Pad qiov with the read parts and be sure to have a tracked request not
1392 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1394 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
1396 if (!qiov) {
1397 ret = bdrv_co_do_zero_pwritev(bs, offset, bytes, flags, &req);
1398 goto out;
1401 if (offset & (align - 1)) {
1402 QEMUIOVector head_qiov;
1403 struct iovec head_iov;
1405 mark_request_serialising(&req, align);
1406 wait_serialising_requests(&req);
1408 head_buf = qemu_blockalign(bs, align);
1409 head_iov = (struct iovec) {
1410 .iov_base = head_buf,
1411 .iov_len = align,
1413 qemu_iovec_init_external(&head_qiov, &head_iov, 1);
1415 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1416 ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align,
1417 align, &head_qiov, 0);
1418 if (ret < 0) {
1419 goto fail;
1421 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1423 qemu_iovec_init(&local_qiov, qiov->niov + 2);
1424 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1425 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1426 use_local_qiov = true;
1428 bytes += offset & (align - 1);
1429 offset = offset & ~(align - 1);
1431 /* We have read the tail already if the request is smaller
1432 * than one aligned block.
1434 if (bytes < align) {
1435 qemu_iovec_add(&local_qiov, head_buf + bytes, align - bytes);
1436 bytes = align;
1440 if ((offset + bytes) & (align - 1)) {
1441 QEMUIOVector tail_qiov;
1442 struct iovec tail_iov;
1443 size_t tail_bytes;
1444 bool waited;
1446 mark_request_serialising(&req, align);
1447 waited = wait_serialising_requests(&req);
1448 assert(!waited || !use_local_qiov);
1450 tail_buf = qemu_blockalign(bs, align);
1451 tail_iov = (struct iovec) {
1452 .iov_base = tail_buf,
1453 .iov_len = align,
1455 qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
1457 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1458 ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align,
1459 align, &tail_qiov, 0);
1460 if (ret < 0) {
1461 goto fail;
1463 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1465 if (!use_local_qiov) {
1466 qemu_iovec_init(&local_qiov, qiov->niov + 1);
1467 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1468 use_local_qiov = true;
1471 tail_bytes = (offset + bytes) & (align - 1);
1472 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
1474 bytes = ROUND_UP(bytes, align);
1477 ret = bdrv_aligned_pwritev(bs, &req, offset, bytes,
1478 use_local_qiov ? &local_qiov : qiov,
1479 flags);
1481 fail:
1483 if (use_local_qiov) {
1484 qemu_iovec_destroy(&local_qiov);
1486 qemu_vfree(head_buf);
1487 qemu_vfree(tail_buf);
1488 out:
1489 tracked_request_end(&req);
1490 return ret;
1493 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
1494 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1495 BdrvRequestFlags flags)
1497 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1498 return -EINVAL;
1501 return bdrv_co_pwritev(bs, sector_num << BDRV_SECTOR_BITS,
1502 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
1505 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
1506 int nb_sectors, QEMUIOVector *qiov)
1508 trace_bdrv_co_writev(bs, sector_num, nb_sectors);
1510 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
1513 int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
1514 int64_t sector_num, int nb_sectors,
1515 BdrvRequestFlags flags)
1517 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags);
1519 if (!(bs->open_flags & BDRV_O_UNMAP)) {
1520 flags &= ~BDRV_REQ_MAY_UNMAP;
1523 return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
1524 BDRV_REQ_ZERO_WRITE | flags);
1527 typedef struct BdrvCoGetBlockStatusData {
1528 BlockDriverState *bs;
1529 BlockDriverState *base;
1530 BlockDriverState **file;
1531 int64_t sector_num;
1532 int nb_sectors;
1533 int *pnum;
1534 int64_t ret;
1535 bool done;
1536 } BdrvCoGetBlockStatusData;
1539 * Returns the allocation status of the specified sectors.
1540 * Drivers not implementing the functionality are assumed to not support
1541 * backing files, hence all their sectors are reported as allocated.
1543 * If 'sector_num' is beyond the end of the disk image the return value is 0
1544 * and 'pnum' is set to 0.
1546 * 'pnum' is set to the number of sectors (including and immediately following
1547 * the specified sector) that are known to be in the same
1548 * allocated/unallocated state.
1550 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
1551 * beyond the end of the disk image it will be clamped.
1553 * If returned value is positive and BDRV_BLOCK_OFFSET_VALID bit is set, 'file'
1554 * points to the BDS which the sector range is allocated in.
1556 static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
1557 int64_t sector_num,
1558 int nb_sectors, int *pnum,
1559 BlockDriverState **file)
1561 int64_t total_sectors;
1562 int64_t n;
1563 int64_t ret, ret2;
1565 total_sectors = bdrv_nb_sectors(bs);
1566 if (total_sectors < 0) {
1567 return total_sectors;
1570 if (sector_num >= total_sectors) {
1571 *pnum = 0;
1572 return 0;
1575 n = total_sectors - sector_num;
1576 if (n < nb_sectors) {
1577 nb_sectors = n;
1580 if (!bs->drv->bdrv_co_get_block_status) {
1581 *pnum = nb_sectors;
1582 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
1583 if (bs->drv->protocol_name) {
1584 ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
1586 return ret;
1589 *file = NULL;
1590 ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum,
1591 file);
1592 if (ret < 0) {
1593 *pnum = 0;
1594 return ret;
1597 if (ret & BDRV_BLOCK_RAW) {
1598 assert(ret & BDRV_BLOCK_OFFSET_VALID);
1599 return bdrv_get_block_status(bs->file->bs, ret >> BDRV_SECTOR_BITS,
1600 *pnum, pnum, file);
1603 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
1604 ret |= BDRV_BLOCK_ALLOCATED;
1605 } else {
1606 if (bdrv_unallocated_blocks_are_zero(bs)) {
1607 ret |= BDRV_BLOCK_ZERO;
1608 } else if (bs->backing) {
1609 BlockDriverState *bs2 = bs->backing->bs;
1610 int64_t nb_sectors2 = bdrv_nb_sectors(bs2);
1611 if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) {
1612 ret |= BDRV_BLOCK_ZERO;
1617 if (*file && *file != bs &&
1618 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
1619 (ret & BDRV_BLOCK_OFFSET_VALID)) {
1620 BlockDriverState *file2;
1621 int file_pnum;
1623 ret2 = bdrv_co_get_block_status(*file, ret >> BDRV_SECTOR_BITS,
1624 *pnum, &file_pnum, &file2);
1625 if (ret2 >= 0) {
1626 /* Ignore errors. This is just providing extra information, it
1627 * is useful but not necessary.
1629 if (!file_pnum) {
1630 /* !file_pnum indicates an offset at or beyond the EOF; it is
1631 * perfectly valid for the format block driver to point to such
1632 * offsets, so catch it and mark everything as zero */
1633 ret |= BDRV_BLOCK_ZERO;
1634 } else {
1635 /* Limit request to the range reported by the protocol driver */
1636 *pnum = file_pnum;
1637 ret |= (ret2 & BDRV_BLOCK_ZERO);
1642 return ret;
1645 static int64_t coroutine_fn bdrv_co_get_block_status_above(BlockDriverState *bs,
1646 BlockDriverState *base,
1647 int64_t sector_num,
1648 int nb_sectors,
1649 int *pnum,
1650 BlockDriverState **file)
1652 BlockDriverState *p;
1653 int64_t ret = 0;
1655 assert(bs != base);
1656 for (p = bs; p != base; p = backing_bs(p)) {
1657 ret = bdrv_co_get_block_status(p, sector_num, nb_sectors, pnum, file);
1658 if (ret < 0 || ret & BDRV_BLOCK_ALLOCATED) {
1659 break;
1661 /* [sector_num, pnum] unallocated on this layer, which could be only
1662 * the first part of [sector_num, nb_sectors]. */
1663 nb_sectors = MIN(nb_sectors, *pnum);
1665 return ret;
1668 /* Coroutine wrapper for bdrv_get_block_status_above() */
1669 static void coroutine_fn bdrv_get_block_status_above_co_entry(void *opaque)
1671 BdrvCoGetBlockStatusData *data = opaque;
1673 data->ret = bdrv_co_get_block_status_above(data->bs, data->base,
1674 data->sector_num,
1675 data->nb_sectors,
1676 data->pnum,
1677 data->file);
1678 data->done = true;
1682 * Synchronous wrapper around bdrv_co_get_block_status_above().
1684 * See bdrv_co_get_block_status_above() for details.
1686 int64_t bdrv_get_block_status_above(BlockDriverState *bs,
1687 BlockDriverState *base,
1688 int64_t sector_num,
1689 int nb_sectors, int *pnum,
1690 BlockDriverState **file)
1692 Coroutine *co;
1693 BdrvCoGetBlockStatusData data = {
1694 .bs = bs,
1695 .base = base,
1696 .file = file,
1697 .sector_num = sector_num,
1698 .nb_sectors = nb_sectors,
1699 .pnum = pnum,
1700 .done = false,
1703 if (qemu_in_coroutine()) {
1704 /* Fast-path if already in coroutine context */
1705 bdrv_get_block_status_above_co_entry(&data);
1706 } else {
1707 AioContext *aio_context = bdrv_get_aio_context(bs);
1709 co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry);
1710 qemu_coroutine_enter(co, &data);
1711 while (!data.done) {
1712 aio_poll(aio_context, true);
1715 return data.ret;
1718 int64_t bdrv_get_block_status(BlockDriverState *bs,
1719 int64_t sector_num,
1720 int nb_sectors, int *pnum,
1721 BlockDriverState **file)
1723 return bdrv_get_block_status_above(bs, backing_bs(bs),
1724 sector_num, nb_sectors, pnum, file);
1727 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
1728 int nb_sectors, int *pnum)
1730 BlockDriverState *file;
1731 int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum,
1732 &file);
1733 if (ret < 0) {
1734 return ret;
1736 return !!(ret & BDRV_BLOCK_ALLOCATED);
1740 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
1742 * Return true if the given sector is allocated in any image between
1743 * BASE and TOP (inclusive). BASE can be NULL to check if the given
1744 * sector is allocated in any image of the chain. Return false otherwise.
1746 * 'pnum' is set to the number of sectors (including and immediately following
1747 * the specified sector) that are known to be in the same
1748 * allocated/unallocated state.
1751 int bdrv_is_allocated_above(BlockDriverState *top,
1752 BlockDriverState *base,
1753 int64_t sector_num,
1754 int nb_sectors, int *pnum)
1756 BlockDriverState *intermediate;
1757 int ret, n = nb_sectors;
1759 intermediate = top;
1760 while (intermediate && intermediate != base) {
1761 int pnum_inter;
1762 ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
1763 &pnum_inter);
1764 if (ret < 0) {
1765 return ret;
1766 } else if (ret) {
1767 *pnum = pnum_inter;
1768 return 1;
1772 * [sector_num, nb_sectors] is unallocated on top but intermediate
1773 * might have
1775 * [sector_num+x, nr_sectors] allocated.
1777 if (n > pnum_inter &&
1778 (intermediate == top ||
1779 sector_num + pnum_inter < intermediate->total_sectors)) {
1780 n = pnum_inter;
1783 intermediate = backing_bs(intermediate);
1786 *pnum = n;
1787 return 0;
1790 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
1791 const uint8_t *buf, int nb_sectors)
1793 BlockDriver *drv = bs->drv;
1794 int ret;
1796 if (!drv) {
1797 return -ENOMEDIUM;
1799 if (!drv->bdrv_write_compressed) {
1800 return -ENOTSUP;
1802 ret = bdrv_check_request(bs, sector_num, nb_sectors);
1803 if (ret < 0) {
1804 return ret;
1807 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
1809 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
1812 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
1813 int64_t pos, int size)
1815 QEMUIOVector qiov;
1816 struct iovec iov = {
1817 .iov_base = (void *) buf,
1818 .iov_len = size,
1821 qemu_iovec_init_external(&qiov, &iov, 1);
1822 return bdrv_writev_vmstate(bs, &qiov, pos);
1825 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
1827 BlockDriver *drv = bs->drv;
1829 if (!drv) {
1830 return -ENOMEDIUM;
1831 } else if (drv->bdrv_save_vmstate) {
1832 return drv->bdrv_save_vmstate(bs, qiov, pos);
1833 } else if (bs->file) {
1834 return bdrv_writev_vmstate(bs->file->bs, qiov, pos);
1837 return -ENOTSUP;
1840 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
1841 int64_t pos, int size)
1843 BlockDriver *drv = bs->drv;
1844 if (!drv)
1845 return -ENOMEDIUM;
1846 if (drv->bdrv_load_vmstate)
1847 return drv->bdrv_load_vmstate(bs, buf, pos, size);
1848 if (bs->file)
1849 return bdrv_load_vmstate(bs->file->bs, buf, pos, size);
1850 return -ENOTSUP;
1853 /**************************************************************/
1854 /* async I/Os */
1856 BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
1857 QEMUIOVector *qiov, int nb_sectors,
1858 BlockCompletionFunc *cb, void *opaque)
1860 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
1862 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
1863 cb, opaque, false);
1866 BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
1867 QEMUIOVector *qiov, int nb_sectors,
1868 BlockCompletionFunc *cb, void *opaque)
1870 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
1872 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
1873 cb, opaque, true);
1876 void bdrv_aio_cancel(BlockAIOCB *acb)
1878 qemu_aio_ref(acb);
1879 bdrv_aio_cancel_async(acb);
1880 while (acb->refcnt > 1) {
1881 if (acb->aiocb_info->get_aio_context) {
1882 aio_poll(acb->aiocb_info->get_aio_context(acb), true);
1883 } else if (acb->bs) {
1884 aio_poll(bdrv_get_aio_context(acb->bs), true);
1885 } else {
1886 abort();
1889 qemu_aio_unref(acb);
1892 /* Async version of aio cancel. The caller is not blocked if the acb implements
1893 * cancel_async, otherwise we do nothing and let the request normally complete.
1894 * In either case the completion callback must be called. */
1895 void bdrv_aio_cancel_async(BlockAIOCB *acb)
1897 if (acb->aiocb_info->cancel_async) {
1898 acb->aiocb_info->cancel_async(acb);
1902 /**************************************************************/
1903 /* async block device emulation */
1905 typedef struct BlockRequest {
1906 union {
1907 /* Used during read, write, trim */
1908 struct {
1909 int64_t sector;
1910 int nb_sectors;
1911 int flags;
1912 QEMUIOVector *qiov;
1914 /* Used during ioctl */
1915 struct {
1916 int req;
1917 void *buf;
1920 BlockCompletionFunc *cb;
1921 void *opaque;
1923 int error;
1924 } BlockRequest;
1926 typedef struct BlockAIOCBCoroutine {
1927 BlockAIOCB common;
1928 BlockRequest req;
1929 bool is_write;
1930 bool need_bh;
1931 bool *done;
1932 QEMUBH* bh;
1933 } BlockAIOCBCoroutine;
1935 static const AIOCBInfo bdrv_em_co_aiocb_info = {
1936 .aiocb_size = sizeof(BlockAIOCBCoroutine),
1939 static void bdrv_co_complete(BlockAIOCBCoroutine *acb)
1941 if (!acb->need_bh) {
1942 acb->common.cb(acb->common.opaque, acb->req.error);
1943 qemu_aio_unref(acb);
1947 static void bdrv_co_em_bh(void *opaque)
1949 BlockAIOCBCoroutine *acb = opaque;
1951 assert(!acb->need_bh);
1952 qemu_bh_delete(acb->bh);
1953 bdrv_co_complete(acb);
1956 static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb)
1958 acb->need_bh = false;
1959 if (acb->req.error != -EINPROGRESS) {
1960 BlockDriverState *bs = acb->common.bs;
1962 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
1963 qemu_bh_schedule(acb->bh);
1967 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
1968 static void coroutine_fn bdrv_co_do_rw(void *opaque)
1970 BlockAIOCBCoroutine *acb = opaque;
1971 BlockDriverState *bs = acb->common.bs;
1973 if (!acb->is_write) {
1974 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
1975 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
1976 } else {
1977 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
1978 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
1981 bdrv_co_complete(acb);
1984 static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
1985 int64_t sector_num,
1986 QEMUIOVector *qiov,
1987 int nb_sectors,
1988 BdrvRequestFlags flags,
1989 BlockCompletionFunc *cb,
1990 void *opaque,
1991 bool is_write)
1993 Coroutine *co;
1994 BlockAIOCBCoroutine *acb;
1996 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
1997 acb->need_bh = true;
1998 acb->req.error = -EINPROGRESS;
1999 acb->req.sector = sector_num;
2000 acb->req.nb_sectors = nb_sectors;
2001 acb->req.qiov = qiov;
2002 acb->req.flags = flags;
2003 acb->is_write = is_write;
2005 co = qemu_coroutine_create(bdrv_co_do_rw);
2006 qemu_coroutine_enter(co, acb);
2008 bdrv_co_maybe_schedule_bh(acb);
2009 return &acb->common;
2012 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
2014 BlockAIOCBCoroutine *acb = opaque;
2015 BlockDriverState *bs = acb->common.bs;
2017 acb->req.error = bdrv_co_flush(bs);
2018 bdrv_co_complete(acb);
2021 BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
2022 BlockCompletionFunc *cb, void *opaque)
2024 trace_bdrv_aio_flush(bs, opaque);
2026 Coroutine *co;
2027 BlockAIOCBCoroutine *acb;
2029 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
2030 acb->need_bh = true;
2031 acb->req.error = -EINPROGRESS;
2033 co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
2034 qemu_coroutine_enter(co, acb);
2036 bdrv_co_maybe_schedule_bh(acb);
2037 return &acb->common;
2040 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
2042 BlockAIOCBCoroutine *acb = opaque;
2043 BlockDriverState *bs = acb->common.bs;
2045 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
2046 bdrv_co_complete(acb);
2049 BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs,
2050 int64_t sector_num, int nb_sectors,
2051 BlockCompletionFunc *cb, void *opaque)
2053 Coroutine *co;
2054 BlockAIOCBCoroutine *acb;
2056 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
2058 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
2059 acb->need_bh = true;
2060 acb->req.error = -EINPROGRESS;
2061 acb->req.sector = sector_num;
2062 acb->req.nb_sectors = nb_sectors;
2063 co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
2064 qemu_coroutine_enter(co, acb);
2066 bdrv_co_maybe_schedule_bh(acb);
2067 return &acb->common;
2070 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
2071 BlockCompletionFunc *cb, void *opaque)
2073 BlockAIOCB *acb;
2075 acb = g_malloc(aiocb_info->aiocb_size);
2076 acb->aiocb_info = aiocb_info;
2077 acb->bs = bs;
2078 acb->cb = cb;
2079 acb->opaque = opaque;
2080 acb->refcnt = 1;
2081 return acb;
2084 void qemu_aio_ref(void *p)
2086 BlockAIOCB *acb = p;
2087 acb->refcnt++;
2090 void qemu_aio_unref(void *p)
2092 BlockAIOCB *acb = p;
2093 assert(acb->refcnt > 0);
2094 if (--acb->refcnt == 0) {
2095 g_free(acb);
2099 /**************************************************************/
2100 /* Coroutine block device emulation */
2102 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
2104 RwCo *rwco = opaque;
2106 rwco->ret = bdrv_co_flush(rwco->bs);
2109 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2111 int ret;
2112 BdrvTrackedRequest req;
2114 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
2115 bdrv_is_sg(bs)) {
2116 return 0;
2119 tracked_request_begin(&req, bs, 0, 0, BDRV_TRACKED_FLUSH);
2121 /* Write back all layers by calling one driver function */
2122 if (bs->drv->bdrv_co_flush) {
2123 ret = bs->drv->bdrv_co_flush(bs);
2124 goto out;
2127 /* Write back cached data to the OS even with cache=unsafe */
2128 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
2129 if (bs->drv->bdrv_co_flush_to_os) {
2130 ret = bs->drv->bdrv_co_flush_to_os(bs);
2131 if (ret < 0) {
2132 goto out;
2136 /* But don't actually force it to the disk with cache=unsafe */
2137 if (bs->open_flags & BDRV_O_NO_FLUSH) {
2138 goto flush_parent;
2141 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
2142 if (bs->drv->bdrv_co_flush_to_disk) {
2143 ret = bs->drv->bdrv_co_flush_to_disk(bs);
2144 } else if (bs->drv->bdrv_aio_flush) {
2145 BlockAIOCB *acb;
2146 CoroutineIOCompletion co = {
2147 .coroutine = qemu_coroutine_self(),
2150 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
2151 if (acb == NULL) {
2152 ret = -EIO;
2153 } else {
2154 qemu_coroutine_yield();
2155 ret = co.ret;
2157 } else {
2159 * Some block drivers always operate in either writethrough or unsafe
2160 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2161 * know how the server works (because the behaviour is hardcoded or
2162 * depends on server-side configuration), so we can't ensure that
2163 * everything is safe on disk. Returning an error doesn't work because
2164 * that would break guests even if the server operates in writethrough
2165 * mode.
2167 * Let's hope the user knows what he's doing.
2169 ret = 0;
2171 if (ret < 0) {
2172 goto out;
2175 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2176 * in the case of cache=unsafe, so there are no useless flushes.
2178 flush_parent:
2179 ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0;
2180 out:
2181 tracked_request_end(&req);
2182 return ret;
2185 int bdrv_flush(BlockDriverState *bs)
2187 Coroutine *co;
2188 RwCo rwco = {
2189 .bs = bs,
2190 .ret = NOT_DONE,
2193 if (qemu_in_coroutine()) {
2194 /* Fast-path if already in coroutine context */
2195 bdrv_flush_co_entry(&rwco);
2196 } else {
2197 AioContext *aio_context = bdrv_get_aio_context(bs);
2199 co = qemu_coroutine_create(bdrv_flush_co_entry);
2200 qemu_coroutine_enter(co, &rwco);
2201 while (rwco.ret == NOT_DONE) {
2202 aio_poll(aio_context, true);
2206 return rwco.ret;
2209 typedef struct DiscardCo {
2210 BlockDriverState *bs;
2211 int64_t sector_num;
2212 int nb_sectors;
2213 int ret;
2214 } DiscardCo;
2215 static void coroutine_fn bdrv_discard_co_entry(void *opaque)
2217 DiscardCo *rwco = opaque;
2219 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
2222 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
2223 int nb_sectors)
2225 BdrvTrackedRequest req;
2226 int max_discard, ret;
2228 if (!bs->drv) {
2229 return -ENOMEDIUM;
2232 ret = bdrv_check_request(bs, sector_num, nb_sectors);
2233 if (ret < 0) {
2234 return ret;
2235 } else if (bs->read_only) {
2236 return -EPERM;
2238 assert(!(bs->open_flags & BDRV_O_INACTIVE));
2240 /* Do nothing if disabled. */
2241 if (!(bs->open_flags & BDRV_O_UNMAP)) {
2242 return 0;
2245 if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) {
2246 return 0;
2249 tracked_request_begin(&req, bs, sector_num, nb_sectors,
2250 BDRV_TRACKED_DISCARD);
2251 bdrv_set_dirty(bs, sector_num, nb_sectors);
2253 max_discard = MIN_NON_ZERO(bs->bl.max_discard, BDRV_REQUEST_MAX_SECTORS);
2254 while (nb_sectors > 0) {
2255 int ret;
2256 int num = nb_sectors;
2258 /* align request */
2259 if (bs->bl.discard_alignment &&
2260 num >= bs->bl.discard_alignment &&
2261 sector_num % bs->bl.discard_alignment) {
2262 if (num > bs->bl.discard_alignment) {
2263 num = bs->bl.discard_alignment;
2265 num -= sector_num % bs->bl.discard_alignment;
2268 /* limit request size */
2269 if (num > max_discard) {
2270 num = max_discard;
2273 if (bs->drv->bdrv_co_discard) {
2274 ret = bs->drv->bdrv_co_discard(bs, sector_num, num);
2275 } else {
2276 BlockAIOCB *acb;
2277 CoroutineIOCompletion co = {
2278 .coroutine = qemu_coroutine_self(),
2281 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
2282 bdrv_co_io_em_complete, &co);
2283 if (acb == NULL) {
2284 ret = -EIO;
2285 goto out;
2286 } else {
2287 qemu_coroutine_yield();
2288 ret = co.ret;
2291 if (ret && ret != -ENOTSUP) {
2292 goto out;
2295 sector_num += num;
2296 nb_sectors -= num;
2298 ret = 0;
2299 out:
2300 tracked_request_end(&req);
2301 return ret;
2304 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
2306 Coroutine *co;
2307 DiscardCo rwco = {
2308 .bs = bs,
2309 .sector_num = sector_num,
2310 .nb_sectors = nb_sectors,
2311 .ret = NOT_DONE,
2314 if (qemu_in_coroutine()) {
2315 /* Fast-path if already in coroutine context */
2316 bdrv_discard_co_entry(&rwco);
2317 } else {
2318 AioContext *aio_context = bdrv_get_aio_context(bs);
2320 co = qemu_coroutine_create(bdrv_discard_co_entry);
2321 qemu_coroutine_enter(co, &rwco);
2322 while (rwco.ret == NOT_DONE) {
2323 aio_poll(aio_context, true);
2327 return rwco.ret;
2330 static int bdrv_co_do_ioctl(BlockDriverState *bs, int req, void *buf)
2332 BlockDriver *drv = bs->drv;
2333 BdrvTrackedRequest tracked_req;
2334 CoroutineIOCompletion co = {
2335 .coroutine = qemu_coroutine_self(),
2337 BlockAIOCB *acb;
2339 tracked_request_begin(&tracked_req, bs, 0, 0, BDRV_TRACKED_IOCTL);
2340 if (!drv || !drv->bdrv_aio_ioctl) {
2341 co.ret = -ENOTSUP;
2342 goto out;
2345 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
2346 if (!acb) {
2347 co.ret = -ENOTSUP;
2348 goto out;
2350 qemu_coroutine_yield();
2351 out:
2352 tracked_request_end(&tracked_req);
2353 return co.ret;
2356 typedef struct {
2357 BlockDriverState *bs;
2358 int req;
2359 void *buf;
2360 int ret;
2361 } BdrvIoctlCoData;
2363 static void coroutine_fn bdrv_co_ioctl_entry(void *opaque)
2365 BdrvIoctlCoData *data = opaque;
2366 data->ret = bdrv_co_do_ioctl(data->bs, data->req, data->buf);
2369 /* needed for generic scsi interface */
2370 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
2372 BdrvIoctlCoData data = {
2373 .bs = bs,
2374 .req = req,
2375 .buf = buf,
2376 .ret = -EINPROGRESS,
2379 if (qemu_in_coroutine()) {
2380 /* Fast-path if already in coroutine context */
2381 bdrv_co_ioctl_entry(&data);
2382 } else {
2383 Coroutine *co = qemu_coroutine_create(bdrv_co_ioctl_entry);
2385 qemu_coroutine_enter(co, &data);
2386 while (data.ret == -EINPROGRESS) {
2387 aio_poll(bdrv_get_aio_context(bs), true);
2390 return data.ret;
2393 static void coroutine_fn bdrv_co_aio_ioctl_entry(void *opaque)
2395 BlockAIOCBCoroutine *acb = opaque;
2396 acb->req.error = bdrv_co_do_ioctl(acb->common.bs,
2397 acb->req.req, acb->req.buf);
2398 bdrv_co_complete(acb);
2401 BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
2402 unsigned long int req, void *buf,
2403 BlockCompletionFunc *cb, void *opaque)
2405 BlockAIOCBCoroutine *acb = qemu_aio_get(&bdrv_em_co_aiocb_info,
2406 bs, cb, opaque);
2407 Coroutine *co;
2409 acb->need_bh = true;
2410 acb->req.error = -EINPROGRESS;
2411 acb->req.req = req;
2412 acb->req.buf = buf;
2413 co = qemu_coroutine_create(bdrv_co_aio_ioctl_entry);
2414 qemu_coroutine_enter(co, acb);
2416 bdrv_co_maybe_schedule_bh(acb);
2417 return &acb->common;
2420 void *qemu_blockalign(BlockDriverState *bs, size_t size)
2422 return qemu_memalign(bdrv_opt_mem_align(bs), size);
2425 void *qemu_blockalign0(BlockDriverState *bs, size_t size)
2427 return memset(qemu_blockalign(bs, size), 0, size);
2430 void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
2432 size_t align = bdrv_opt_mem_align(bs);
2434 /* Ensure that NULL is never returned on success */
2435 assert(align > 0);
2436 if (size == 0) {
2437 size = align;
2440 return qemu_try_memalign(align, size);
2443 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
2445 void *mem = qemu_try_blockalign(bs, size);
2447 if (mem) {
2448 memset(mem, 0, size);
2451 return mem;
2455 * Check if all memory in this vector is sector aligned.
2457 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
2459 int i;
2460 size_t alignment = bdrv_min_mem_align(bs);
2462 for (i = 0; i < qiov->niov; i++) {
2463 if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
2464 return false;
2466 if (qiov->iov[i].iov_len % alignment) {
2467 return false;
2471 return true;
2474 void bdrv_add_before_write_notifier(BlockDriverState *bs,
2475 NotifierWithReturn *notifier)
2477 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
2480 void bdrv_io_plug(BlockDriverState *bs)
2482 BdrvChild *child;
2484 QLIST_FOREACH(child, &bs->children, next) {
2485 bdrv_io_plug(child->bs);
2488 if (bs->io_plugged++ == 0 && bs->io_plug_disabled == 0) {
2489 BlockDriver *drv = bs->drv;
2490 if (drv && drv->bdrv_io_plug) {
2491 drv->bdrv_io_plug(bs);
2496 void bdrv_io_unplug(BlockDriverState *bs)
2498 BdrvChild *child;
2500 assert(bs->io_plugged);
2501 if (--bs->io_plugged == 0 && bs->io_plug_disabled == 0) {
2502 BlockDriver *drv = bs->drv;
2503 if (drv && drv->bdrv_io_unplug) {
2504 drv->bdrv_io_unplug(bs);
2508 QLIST_FOREACH(child, &bs->children, next) {
2509 bdrv_io_unplug(child->bs);
2513 void bdrv_io_unplugged_begin(BlockDriverState *bs)
2515 BdrvChild *child;
2517 if (bs->io_plug_disabled++ == 0 && bs->io_plugged > 0) {
2518 BlockDriver *drv = bs->drv;
2519 if (drv && drv->bdrv_io_unplug) {
2520 drv->bdrv_io_unplug(bs);
2524 QLIST_FOREACH(child, &bs->children, next) {
2525 bdrv_io_unplugged_begin(child->bs);
2529 void bdrv_io_unplugged_end(BlockDriverState *bs)
2531 BdrvChild *child;
2533 assert(bs->io_plug_disabled);
2534 QLIST_FOREACH(child, &bs->children, next) {
2535 bdrv_io_unplugged_end(child->bs);
2538 if (--bs->io_plug_disabled == 0 && bs->io_plugged > 0) {
2539 BlockDriver *drv = bs->drv;
2540 if (drv && drv->bdrv_io_plug) {
2541 drv->bdrv_io_plug(bs);