block: Allow BDRV_REQ_FUA through blk_pwrite()
[qemu/ar7.git] / block / block-backend.c
blob96c1d7c44597474754aeda2fdf324fdab7546da7
1 /*
2 * QEMU Block backends
4 * Copyright (C) 2014 Red Hat, Inc.
6 * Authors:
7 * Markus Armbruster <armbru@redhat.com>,
9 * This work is licensed under the terms of the GNU LGPL, version 2.1
10 * or later. See the COPYING.LIB file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "sysemu/block-backend.h"
15 #include "block/block_int.h"
16 #include "block/blockjob.h"
17 #include "block/throttle-groups.h"
18 #include "sysemu/blockdev.h"
19 #include "sysemu/sysemu.h"
20 #include "qapi-event.h"
21 #include "qemu/id.h"
23 /* Number of coroutines to reserve per attached device model */
24 #define COROUTINE_POOL_RESERVATION 64
26 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
28 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb);
30 struct BlockBackend {
31 char *name;
32 int refcnt;
33 BdrvChild *root;
34 DriveInfo *legacy_dinfo; /* null unless created by drive_new() */
35 QTAILQ_ENTRY(BlockBackend) link; /* for block_backends */
36 QTAILQ_ENTRY(BlockBackend) monitor_link; /* for monitor_block_backends */
38 void *dev; /* attached device model, if any */
39 /* TODO change to DeviceState when all users are qdevified */
40 const BlockDevOps *dev_ops;
41 void *dev_opaque;
43 /* the block size for which the guest device expects atomicity */
44 int guest_block_size;
46 /* If the BDS tree is removed, some of its options are stored here (which
47 * can be used to restore those options in the new BDS on insert) */
48 BlockBackendRootState root_state;
50 bool enable_write_cache;
52 /* I/O stats (display with "info blockstats"). */
53 BlockAcctStats stats;
55 BlockdevOnError on_read_error, on_write_error;
56 bool iostatus_enabled;
57 BlockDeviceIoStatus iostatus;
59 bool allow_write_beyond_eof;
61 NotifierList remove_bs_notifiers, insert_bs_notifiers;
64 typedef struct BlockBackendAIOCB {
65 BlockAIOCB common;
66 QEMUBH *bh;
67 BlockBackend *blk;
68 int ret;
69 } BlockBackendAIOCB;
71 static const AIOCBInfo block_backend_aiocb_info = {
72 .get_aio_context = blk_aiocb_get_aio_context,
73 .aiocb_size = sizeof(BlockBackendAIOCB),
76 static void drive_info_del(DriveInfo *dinfo);
78 /* All BlockBackends */
79 static QTAILQ_HEAD(, BlockBackend) block_backends =
80 QTAILQ_HEAD_INITIALIZER(block_backends);
82 /* All BlockBackends referenced by the monitor and which are iterated through by
83 * blk_next() */
84 static QTAILQ_HEAD(, BlockBackend) monitor_block_backends =
85 QTAILQ_HEAD_INITIALIZER(monitor_block_backends);
87 static void blk_root_inherit_options(int *child_flags, QDict *child_options,
88 int parent_flags, QDict *parent_options)
90 /* We're not supposed to call this function for root nodes */
91 abort();
94 static const BdrvChildRole child_root = {
95 .inherit_options = blk_root_inherit_options,
99 * Create a new BlockBackend with a reference count of one.
100 * Store an error through @errp on failure, unless it's null.
101 * Return the new BlockBackend on success, null on failure.
103 BlockBackend *blk_new(Error **errp)
105 BlockBackend *blk;
107 blk = g_new0(BlockBackend, 1);
108 blk->refcnt = 1;
109 notifier_list_init(&blk->remove_bs_notifiers);
110 notifier_list_init(&blk->insert_bs_notifiers);
111 QTAILQ_INSERT_TAIL(&block_backends, blk, link);
112 return blk;
116 * Create a new BlockBackend with a new BlockDriverState attached.
117 * Otherwise just like blk_new(), which see.
119 BlockBackend *blk_new_with_bs(Error **errp)
121 BlockBackend *blk;
122 BlockDriverState *bs;
124 blk = blk_new(errp);
125 if (!blk) {
126 return NULL;
129 bs = bdrv_new_root();
130 blk->root = bdrv_root_attach_child(bs, "root", &child_root);
131 bs->blk = blk;
132 return blk;
136 * Calls blk_new_with_bs() and then calls bdrv_open() on the BlockDriverState.
138 * Just as with bdrv_open(), after having called this function the reference to
139 * @options belongs to the block layer (even on failure).
141 * TODO: Remove @filename and @flags; it should be possible to specify a whole
142 * BDS tree just by specifying the @options QDict (or @reference,
143 * alternatively). At the time of adding this function, this is not possible,
144 * though, so callers of this function have to be able to specify @filename and
145 * @flags.
147 BlockBackend *blk_new_open(const char *filename, const char *reference,
148 QDict *options, int flags, Error **errp)
150 BlockBackend *blk;
151 int ret;
153 blk = blk_new_with_bs(errp);
154 if (!blk) {
155 QDECREF(options);
156 return NULL;
159 ret = bdrv_open(&blk->root->bs, filename, reference, options, flags, errp);
160 if (ret < 0) {
161 blk_unref(blk);
162 return NULL;
165 blk_set_enable_write_cache(blk, true);
167 return blk;
170 static void blk_delete(BlockBackend *blk)
172 assert(!blk->refcnt);
173 assert(!blk->name);
174 assert(!blk->dev);
175 if (blk->root) {
176 blk_remove_bs(blk);
178 assert(QLIST_EMPTY(&blk->remove_bs_notifiers.notifiers));
179 assert(QLIST_EMPTY(&blk->insert_bs_notifiers.notifiers));
180 if (blk->root_state.throttle_state) {
181 g_free(blk->root_state.throttle_group);
182 throttle_group_unref(blk->root_state.throttle_state);
184 QTAILQ_REMOVE(&block_backends, blk, link);
185 drive_info_del(blk->legacy_dinfo);
186 block_acct_cleanup(&blk->stats);
187 g_free(blk);
190 static void drive_info_del(DriveInfo *dinfo)
192 if (!dinfo) {
193 return;
195 qemu_opts_del(dinfo->opts);
196 g_free(dinfo->serial);
197 g_free(dinfo);
200 int blk_get_refcnt(BlockBackend *blk)
202 return blk ? blk->refcnt : 0;
206 * Increment @blk's reference count.
207 * @blk must not be null.
209 void blk_ref(BlockBackend *blk)
211 blk->refcnt++;
215 * Decrement @blk's reference count.
216 * If this drops it to zero, destroy @blk.
217 * For convenience, do nothing if @blk is null.
219 void blk_unref(BlockBackend *blk)
221 if (blk) {
222 assert(blk->refcnt > 0);
223 if (!--blk->refcnt) {
224 blk_delete(blk);
230 * Behaves similarly to blk_next() but iterates over all BlockBackends, even the
231 * ones which are hidden (i.e. are not referenced by the monitor).
233 static BlockBackend *blk_all_next(BlockBackend *blk)
235 return blk ? QTAILQ_NEXT(blk, link)
236 : QTAILQ_FIRST(&block_backends);
239 void blk_remove_all_bs(void)
241 BlockBackend *blk = NULL;
243 while ((blk = blk_all_next(blk)) != NULL) {
244 AioContext *ctx = blk_get_aio_context(blk);
246 aio_context_acquire(ctx);
247 if (blk->root) {
248 blk_remove_bs(blk);
250 aio_context_release(ctx);
255 * Return the monitor-owned BlockBackend after @blk.
256 * If @blk is null, return the first one.
257 * Else, return @blk's next sibling, which may be null.
259 * To iterate over all BlockBackends, do
260 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
261 * ...
264 BlockBackend *blk_next(BlockBackend *blk)
266 return blk ? QTAILQ_NEXT(blk, monitor_link)
267 : QTAILQ_FIRST(&monitor_block_backends);
271 * Iterates over all BlockDriverStates which are attached to a BlockBackend.
272 * This function is for use by bdrv_next().
274 * @bs must be NULL or a BDS that is attached to a BB.
276 BlockDriverState *blk_next_root_bs(BlockDriverState *bs)
278 BlockBackend *blk;
280 if (bs) {
281 assert(bs->blk);
282 blk = bs->blk;
283 } else {
284 blk = NULL;
287 do {
288 blk = blk_all_next(blk);
289 } while (blk && !blk->root);
291 return blk ? blk->root->bs : NULL;
295 * Add a BlockBackend into the list of backends referenced by the monitor, with
296 * the given @name acting as the handle for the monitor.
297 * Strictly for use by blockdev.c.
299 * @name must not be null or empty.
301 * Returns true on success and false on failure. In the latter case, an Error
302 * object is returned through @errp.
304 bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp)
306 assert(!blk->name);
307 assert(name && name[0]);
309 if (!id_wellformed(name)) {
310 error_setg(errp, "Invalid device name");
311 return false;
313 if (blk_by_name(name)) {
314 error_setg(errp, "Device with id '%s' already exists", name);
315 return false;
317 if (bdrv_find_node(name)) {
318 error_setg(errp,
319 "Device name '%s' conflicts with an existing node name",
320 name);
321 return false;
324 blk->name = g_strdup(name);
325 QTAILQ_INSERT_TAIL(&monitor_block_backends, blk, monitor_link);
326 return true;
330 * Remove a BlockBackend from the list of backends referenced by the monitor.
331 * Strictly for use by blockdev.c.
333 void monitor_remove_blk(BlockBackend *blk)
335 if (!blk->name) {
336 return;
339 QTAILQ_REMOVE(&monitor_block_backends, blk, monitor_link);
340 g_free(blk->name);
341 blk->name = NULL;
345 * Return @blk's name, a non-null string.
346 * Returns an empty string iff @blk is not referenced by the monitor.
348 const char *blk_name(BlockBackend *blk)
350 return blk->name ?: "";
354 * Return the BlockBackend with name @name if it exists, else null.
355 * @name must not be null.
357 BlockBackend *blk_by_name(const char *name)
359 BlockBackend *blk = NULL;
361 assert(name);
362 while ((blk = blk_next(blk)) != NULL) {
363 if (!strcmp(name, blk->name)) {
364 return blk;
367 return NULL;
371 * Return the BlockDriverState attached to @blk if any, else null.
373 BlockDriverState *blk_bs(BlockBackend *blk)
375 return blk->root ? blk->root->bs : NULL;
379 * Return @blk's DriveInfo if any, else null.
381 DriveInfo *blk_legacy_dinfo(BlockBackend *blk)
383 return blk->legacy_dinfo;
387 * Set @blk's DriveInfo to @dinfo, and return it.
388 * @blk must not have a DriveInfo set already.
389 * No other BlockBackend may have the same DriveInfo set.
391 DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo)
393 assert(!blk->legacy_dinfo);
394 return blk->legacy_dinfo = dinfo;
398 * Return the BlockBackend with DriveInfo @dinfo.
399 * It must exist.
401 BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo)
403 BlockBackend *blk = NULL;
405 while ((blk = blk_next(blk)) != NULL) {
406 if (blk->legacy_dinfo == dinfo) {
407 return blk;
410 abort();
414 * Disassociates the currently associated BlockDriverState from @blk.
416 void blk_remove_bs(BlockBackend *blk)
418 assert(blk->root->bs->blk == blk);
420 notifier_list_notify(&blk->remove_bs_notifiers, blk);
422 blk_update_root_state(blk);
424 blk->root->bs->blk = NULL;
425 bdrv_root_unref_child(blk->root);
426 blk->root = NULL;
430 * Associates a new BlockDriverState with @blk.
432 void blk_insert_bs(BlockBackend *blk, BlockDriverState *bs)
434 assert(!blk->root && !bs->blk);
435 bdrv_ref(bs);
436 blk->root = bdrv_root_attach_child(bs, "root", &child_root);
437 bs->blk = blk;
439 notifier_list_notify(&blk->insert_bs_notifiers, blk);
443 * Attach device model @dev to @blk.
444 * Return 0 on success, -EBUSY when a device model is attached already.
446 int blk_attach_dev(BlockBackend *blk, void *dev)
447 /* TODO change to DeviceState *dev when all users are qdevified */
449 if (blk->dev) {
450 return -EBUSY;
452 blk_ref(blk);
453 blk->dev = dev;
454 blk_iostatus_reset(blk);
455 return 0;
459 * Attach device model @dev to @blk.
460 * @blk must not have a device model attached already.
461 * TODO qdevified devices don't use this, remove when devices are qdevified
463 void blk_attach_dev_nofail(BlockBackend *blk, void *dev)
465 if (blk_attach_dev(blk, dev) < 0) {
466 abort();
471 * Detach device model @dev from @blk.
472 * @dev must be currently attached to @blk.
474 void blk_detach_dev(BlockBackend *blk, void *dev)
475 /* TODO change to DeviceState *dev when all users are qdevified */
477 assert(blk->dev == dev);
478 blk->dev = NULL;
479 blk->dev_ops = NULL;
480 blk->dev_opaque = NULL;
481 blk->guest_block_size = 512;
482 blk_unref(blk);
486 * Return the device model attached to @blk if any, else null.
488 void *blk_get_attached_dev(BlockBackend *blk)
489 /* TODO change to return DeviceState * when all users are qdevified */
491 return blk->dev;
495 * Set @blk's device model callbacks to @ops.
496 * @opaque is the opaque argument to pass to the callbacks.
497 * This is for use by device models.
499 void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops,
500 void *opaque)
502 blk->dev_ops = ops;
503 blk->dev_opaque = opaque;
507 * Notify @blk's attached device model of media change.
508 * If @load is true, notify of media load.
509 * Else, notify of media eject.
510 * Also send DEVICE_TRAY_MOVED events as appropriate.
512 void blk_dev_change_media_cb(BlockBackend *blk, bool load)
514 if (blk->dev_ops && blk->dev_ops->change_media_cb) {
515 bool tray_was_open, tray_is_open;
517 tray_was_open = blk_dev_is_tray_open(blk);
518 blk->dev_ops->change_media_cb(blk->dev_opaque, load);
519 tray_is_open = blk_dev_is_tray_open(blk);
521 if (tray_was_open != tray_is_open) {
522 qapi_event_send_device_tray_moved(blk_name(blk), tray_is_open,
523 &error_abort);
529 * Does @blk's attached device model have removable media?
530 * %true if no device model is attached.
532 bool blk_dev_has_removable_media(BlockBackend *blk)
534 return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb);
538 * Does @blk's attached device model have a tray?
540 bool blk_dev_has_tray(BlockBackend *blk)
542 return blk->dev_ops && blk->dev_ops->is_tray_open;
546 * Notify @blk's attached device model of a media eject request.
547 * If @force is true, the medium is about to be yanked out forcefully.
549 void blk_dev_eject_request(BlockBackend *blk, bool force)
551 if (blk->dev_ops && blk->dev_ops->eject_request_cb) {
552 blk->dev_ops->eject_request_cb(blk->dev_opaque, force);
557 * Does @blk's attached device model have a tray, and is it open?
559 bool blk_dev_is_tray_open(BlockBackend *blk)
561 if (blk_dev_has_tray(blk)) {
562 return blk->dev_ops->is_tray_open(blk->dev_opaque);
564 return false;
568 * Does @blk's attached device model have the medium locked?
569 * %false if the device model has no such lock.
571 bool blk_dev_is_medium_locked(BlockBackend *blk)
573 if (blk->dev_ops && blk->dev_ops->is_medium_locked) {
574 return blk->dev_ops->is_medium_locked(blk->dev_opaque);
576 return false;
580 * Notify @blk's attached device model of a backend size change.
582 void blk_dev_resize_cb(BlockBackend *blk)
584 if (blk->dev_ops && blk->dev_ops->resize_cb) {
585 blk->dev_ops->resize_cb(blk->dev_opaque);
589 void blk_iostatus_enable(BlockBackend *blk)
591 blk->iostatus_enabled = true;
592 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
595 /* The I/O status is only enabled if the drive explicitly
596 * enables it _and_ the VM is configured to stop on errors */
597 bool blk_iostatus_is_enabled(const BlockBackend *blk)
599 return (blk->iostatus_enabled &&
600 (blk->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
601 blk->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
602 blk->on_read_error == BLOCKDEV_ON_ERROR_STOP));
605 BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk)
607 return blk->iostatus;
610 void blk_iostatus_disable(BlockBackend *blk)
612 blk->iostatus_enabled = false;
615 void blk_iostatus_reset(BlockBackend *blk)
617 if (blk_iostatus_is_enabled(blk)) {
618 BlockDriverState *bs = blk_bs(blk);
619 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
620 if (bs && bs->job) {
621 block_job_iostatus_reset(bs->job);
626 void blk_iostatus_set_err(BlockBackend *blk, int error)
628 assert(blk_iostatus_is_enabled(blk));
629 if (blk->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
630 blk->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
631 BLOCK_DEVICE_IO_STATUS_FAILED;
635 void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow)
637 blk->allow_write_beyond_eof = allow;
640 static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
641 size_t size)
643 int64_t len;
645 if (size > INT_MAX) {
646 return -EIO;
649 if (!blk_is_available(blk)) {
650 return -ENOMEDIUM;
653 if (offset < 0) {
654 return -EIO;
657 if (!blk->allow_write_beyond_eof) {
658 len = blk_getlength(blk);
659 if (len < 0) {
660 return len;
663 if (offset > len || len - offset < size) {
664 return -EIO;
668 return 0;
671 static int blk_check_request(BlockBackend *blk, int64_t sector_num,
672 int nb_sectors)
674 if (sector_num < 0 || sector_num > INT64_MAX / BDRV_SECTOR_SIZE) {
675 return -EIO;
678 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
679 return -EIO;
682 return blk_check_byte_request(blk, sector_num * BDRV_SECTOR_SIZE,
683 nb_sectors * BDRV_SECTOR_SIZE);
686 static int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
687 unsigned int bytes, QEMUIOVector *qiov,
688 BdrvRequestFlags flags)
690 int ret = blk_check_byte_request(blk, offset, bytes);
691 if (ret < 0) {
692 return ret;
695 return bdrv_co_preadv(blk_bs(blk), offset, bytes, qiov, flags);
698 static int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
699 unsigned int bytes, QEMUIOVector *qiov,
700 BdrvRequestFlags flags)
702 int ret;
704 ret = blk_check_byte_request(blk, offset, bytes);
705 if (ret < 0) {
706 return ret;
709 if (!blk->enable_write_cache) {
710 flags |= BDRV_REQ_FUA;
713 return bdrv_co_pwritev(blk_bs(blk), offset, bytes, qiov, flags);
716 typedef struct BlkRwCo {
717 BlockBackend *blk;
718 int64_t offset;
719 QEMUIOVector *qiov;
720 int ret;
721 BdrvRequestFlags flags;
722 } BlkRwCo;
724 static void blk_read_entry(void *opaque)
726 BlkRwCo *rwco = opaque;
728 rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, rwco->qiov->size,
729 rwco->qiov, rwco->flags);
732 static void blk_write_entry(void *opaque)
734 BlkRwCo *rwco = opaque;
736 rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, rwco->qiov->size,
737 rwco->qiov, rwco->flags);
740 static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
741 int64_t bytes, CoroutineEntry co_entry,
742 BdrvRequestFlags flags)
744 AioContext *aio_context;
745 QEMUIOVector qiov;
746 struct iovec iov;
747 Coroutine *co;
748 BlkRwCo rwco;
750 iov = (struct iovec) {
751 .iov_base = buf,
752 .iov_len = bytes,
754 qemu_iovec_init_external(&qiov, &iov, 1);
756 rwco = (BlkRwCo) {
757 .blk = blk,
758 .offset = offset,
759 .qiov = &qiov,
760 .flags = flags,
761 .ret = NOT_DONE,
764 co = qemu_coroutine_create(co_entry);
765 qemu_coroutine_enter(co, &rwco);
767 aio_context = blk_get_aio_context(blk);
768 while (rwco.ret == NOT_DONE) {
769 aio_poll(aio_context, true);
772 return rwco.ret;
775 static int blk_rw(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
776 int nb_sectors, CoroutineEntry co_entry,
777 BdrvRequestFlags flags)
779 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
780 return -EINVAL;
783 return blk_prw(blk, sector_num << BDRV_SECTOR_BITS, buf,
784 nb_sectors << BDRV_SECTOR_BITS, co_entry, flags);
787 int blk_read(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
788 int nb_sectors)
790 return blk_rw(blk, sector_num, buf, nb_sectors, blk_read_entry, 0);
793 int blk_read_unthrottled(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
794 int nb_sectors)
796 BlockDriverState *bs = blk_bs(blk);
797 int ret;
799 ret = blk_check_request(blk, sector_num, nb_sectors);
800 if (ret < 0) {
801 return ret;
804 bdrv_no_throttling_begin(bs);
805 ret = blk_read(blk, sector_num, buf, nb_sectors);
806 bdrv_no_throttling_end(bs);
807 return ret;
810 int blk_write(BlockBackend *blk, int64_t sector_num, const uint8_t *buf,
811 int nb_sectors)
813 return blk_rw(blk, sector_num, (uint8_t*) buf, nb_sectors,
814 blk_write_entry, 0);
817 int blk_write_zeroes(BlockBackend *blk, int64_t sector_num,
818 int nb_sectors, BdrvRequestFlags flags)
820 return blk_rw(blk, sector_num, NULL, nb_sectors, blk_write_entry,
821 flags | BDRV_REQ_ZERO_WRITE);
824 static void error_callback_bh(void *opaque)
826 struct BlockBackendAIOCB *acb = opaque;
827 qemu_bh_delete(acb->bh);
828 acb->common.cb(acb->common.opaque, acb->ret);
829 qemu_aio_unref(acb);
832 BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
833 BlockCompletionFunc *cb,
834 void *opaque, int ret)
836 struct BlockBackendAIOCB *acb;
837 QEMUBH *bh;
839 acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
840 acb->blk = blk;
841 acb->ret = ret;
843 bh = aio_bh_new(blk_get_aio_context(blk), error_callback_bh, acb);
844 acb->bh = bh;
845 qemu_bh_schedule(bh);
847 return &acb->common;
850 typedef struct BlkAioEmAIOCB {
851 BlockAIOCB common;
852 BlkRwCo rwco;
853 int bytes;
854 bool has_returned;
855 QEMUBH* bh;
856 } BlkAioEmAIOCB;
858 static const AIOCBInfo blk_aio_em_aiocb_info = {
859 .aiocb_size = sizeof(BlkAioEmAIOCB),
862 static void blk_aio_complete(BlkAioEmAIOCB *acb)
864 if (acb->bh) {
865 assert(acb->has_returned);
866 qemu_bh_delete(acb->bh);
868 if (acb->has_returned) {
869 acb->common.cb(acb->common.opaque, acb->rwco.ret);
870 qemu_aio_unref(acb);
874 static void blk_aio_complete_bh(void *opaque)
876 blk_aio_complete(opaque);
879 static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
880 QEMUIOVector *qiov, CoroutineEntry co_entry,
881 BdrvRequestFlags flags,
882 BlockCompletionFunc *cb, void *opaque)
884 BlkAioEmAIOCB *acb;
885 Coroutine *co;
887 acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque);
888 acb->rwco = (BlkRwCo) {
889 .blk = blk,
890 .offset = offset,
891 .qiov = qiov,
892 .flags = flags,
893 .ret = NOT_DONE,
895 acb->bytes = bytes;
896 acb->bh = NULL;
897 acb->has_returned = false;
899 co = qemu_coroutine_create(co_entry);
900 qemu_coroutine_enter(co, acb);
902 acb->has_returned = true;
903 if (acb->rwco.ret != NOT_DONE) {
904 acb->bh = aio_bh_new(blk_get_aio_context(blk), blk_aio_complete_bh, acb);
905 qemu_bh_schedule(acb->bh);
908 return &acb->common;
911 static void blk_aio_read_entry(void *opaque)
913 BlkAioEmAIOCB *acb = opaque;
914 BlkRwCo *rwco = &acb->rwco;
916 assert(rwco->qiov->size == acb->bytes);
917 rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, acb->bytes,
918 rwco->qiov, rwco->flags);
919 blk_aio_complete(acb);
922 static void blk_aio_write_entry(void *opaque)
924 BlkAioEmAIOCB *acb = opaque;
925 BlkRwCo *rwco = &acb->rwco;
927 assert(!rwco->qiov || rwco->qiov->size == acb->bytes);
928 rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, acb->bytes,
929 rwco->qiov, rwco->flags);
930 blk_aio_complete(acb);
933 BlockAIOCB *blk_aio_write_zeroes(BlockBackend *blk, int64_t sector_num,
934 int nb_sectors, BdrvRequestFlags flags,
935 BlockCompletionFunc *cb, void *opaque)
937 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
938 return blk_abort_aio_request(blk, cb, opaque, -EINVAL);
941 return blk_aio_prwv(blk, sector_num << BDRV_SECTOR_BITS,
942 nb_sectors << BDRV_SECTOR_BITS, NULL,
943 blk_aio_write_entry, flags | BDRV_REQ_ZERO_WRITE,
944 cb, opaque);
947 int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count)
949 int ret = blk_prw(blk, offset, buf, count, blk_read_entry, 0);
950 if (ret < 0) {
951 return ret;
953 return count;
956 int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count,
957 BdrvRequestFlags flags)
959 int ret = blk_prw(blk, offset, (void *) buf, count, blk_write_entry,
960 flags);
961 if (ret < 0) {
962 return ret;
964 return count;
967 int64_t blk_getlength(BlockBackend *blk)
969 if (!blk_is_available(blk)) {
970 return -ENOMEDIUM;
973 return bdrv_getlength(blk_bs(blk));
976 void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr)
978 if (!blk_bs(blk)) {
979 *nb_sectors_ptr = 0;
980 } else {
981 bdrv_get_geometry(blk_bs(blk), nb_sectors_ptr);
985 int64_t blk_nb_sectors(BlockBackend *blk)
987 if (!blk_is_available(blk)) {
988 return -ENOMEDIUM;
991 return bdrv_nb_sectors(blk_bs(blk));
994 BlockAIOCB *blk_aio_readv(BlockBackend *blk, int64_t sector_num,
995 QEMUIOVector *iov, int nb_sectors,
996 BlockCompletionFunc *cb, void *opaque)
998 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
999 return blk_abort_aio_request(blk, cb, opaque, -EINVAL);
1002 assert(nb_sectors << BDRV_SECTOR_BITS == iov->size);
1003 return blk_aio_prwv(blk, sector_num << BDRV_SECTOR_BITS, iov->size, iov,
1004 blk_aio_read_entry, 0, cb, opaque);
1007 BlockAIOCB *blk_aio_writev(BlockBackend *blk, int64_t sector_num,
1008 QEMUIOVector *iov, int nb_sectors,
1009 BlockCompletionFunc *cb, void *opaque)
1011 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1012 return blk_abort_aio_request(blk, cb, opaque, -EINVAL);
1015 assert(nb_sectors << BDRV_SECTOR_BITS == iov->size);
1016 return blk_aio_prwv(blk, sector_num << BDRV_SECTOR_BITS, iov->size, iov,
1017 blk_aio_write_entry, 0, cb, opaque);
1020 BlockAIOCB *blk_aio_flush(BlockBackend *blk,
1021 BlockCompletionFunc *cb, void *opaque)
1023 if (!blk_is_available(blk)) {
1024 return blk_abort_aio_request(blk, cb, opaque, -ENOMEDIUM);
1027 return bdrv_aio_flush(blk_bs(blk), cb, opaque);
1030 BlockAIOCB *blk_aio_discard(BlockBackend *blk,
1031 int64_t sector_num, int nb_sectors,
1032 BlockCompletionFunc *cb, void *opaque)
1034 int ret = blk_check_request(blk, sector_num, nb_sectors);
1035 if (ret < 0) {
1036 return blk_abort_aio_request(blk, cb, opaque, ret);
1039 return bdrv_aio_discard(blk_bs(blk), sector_num, nb_sectors, cb, opaque);
1042 void blk_aio_cancel(BlockAIOCB *acb)
1044 bdrv_aio_cancel(acb);
1047 void blk_aio_cancel_async(BlockAIOCB *acb)
1049 bdrv_aio_cancel_async(acb);
1052 int blk_aio_multiwrite(BlockBackend *blk, BlockRequest *reqs, int num_reqs)
1054 int i, ret;
1056 for (i = 0; i < num_reqs; i++) {
1057 ret = blk_check_request(blk, reqs[i].sector, reqs[i].nb_sectors);
1058 if (ret < 0) {
1059 return ret;
1063 return bdrv_aio_multiwrite(blk_bs(blk), reqs, num_reqs);
1066 int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
1068 if (!blk_is_available(blk)) {
1069 return -ENOMEDIUM;
1072 return bdrv_ioctl(blk_bs(blk), req, buf);
1075 BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
1076 BlockCompletionFunc *cb, void *opaque)
1078 if (!blk_is_available(blk)) {
1079 return blk_abort_aio_request(blk, cb, opaque, -ENOMEDIUM);
1082 return bdrv_aio_ioctl(blk_bs(blk), req, buf, cb, opaque);
1085 int blk_co_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
1087 int ret = blk_check_request(blk, sector_num, nb_sectors);
1088 if (ret < 0) {
1089 return ret;
1092 return bdrv_co_discard(blk_bs(blk), sector_num, nb_sectors);
1095 int blk_co_flush(BlockBackend *blk)
1097 if (!blk_is_available(blk)) {
1098 return -ENOMEDIUM;
1101 return bdrv_co_flush(blk_bs(blk));
1104 int blk_flush(BlockBackend *blk)
1106 if (!blk_is_available(blk)) {
1107 return -ENOMEDIUM;
1110 return bdrv_flush(blk_bs(blk));
1113 void blk_drain(BlockBackend *blk)
1115 if (blk_bs(blk)) {
1116 bdrv_drain(blk_bs(blk));
1120 void blk_drain_all(void)
1122 bdrv_drain_all();
1125 void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
1126 BlockdevOnError on_write_error)
1128 blk->on_read_error = on_read_error;
1129 blk->on_write_error = on_write_error;
1132 BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read)
1134 return is_read ? blk->on_read_error : blk->on_write_error;
1137 BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
1138 int error)
1140 BlockdevOnError on_err = blk_get_on_error(blk, is_read);
1142 switch (on_err) {
1143 case BLOCKDEV_ON_ERROR_ENOSPC:
1144 return (error == ENOSPC) ?
1145 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
1146 case BLOCKDEV_ON_ERROR_STOP:
1147 return BLOCK_ERROR_ACTION_STOP;
1148 case BLOCKDEV_ON_ERROR_REPORT:
1149 return BLOCK_ERROR_ACTION_REPORT;
1150 case BLOCKDEV_ON_ERROR_IGNORE:
1151 return BLOCK_ERROR_ACTION_IGNORE;
1152 default:
1153 abort();
1157 static void send_qmp_error_event(BlockBackend *blk,
1158 BlockErrorAction action,
1159 bool is_read, int error)
1161 IoOperationType optype;
1163 optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
1164 qapi_event_send_block_io_error(blk_name(blk), optype, action,
1165 blk_iostatus_is_enabled(blk),
1166 error == ENOSPC, strerror(error),
1167 &error_abort);
1170 /* This is done by device models because, while the block layer knows
1171 * about the error, it does not know whether an operation comes from
1172 * the device or the block layer (from a job, for example).
1174 void blk_error_action(BlockBackend *blk, BlockErrorAction action,
1175 bool is_read, int error)
1177 assert(error >= 0);
1179 if (action == BLOCK_ERROR_ACTION_STOP) {
1180 /* First set the iostatus, so that "info block" returns an iostatus
1181 * that matches the events raised so far (an additional error iostatus
1182 * is fine, but not a lost one).
1184 blk_iostatus_set_err(blk, error);
1186 /* Then raise the request to stop the VM and the event.
1187 * qemu_system_vmstop_request_prepare has two effects. First,
1188 * it ensures that the STOP event always comes after the
1189 * BLOCK_IO_ERROR event. Second, it ensures that even if management
1190 * can observe the STOP event and do a "cont" before the STOP
1191 * event is issued, the VM will not stop. In this case, vm_start()
1192 * also ensures that the STOP/RESUME pair of events is emitted.
1194 qemu_system_vmstop_request_prepare();
1195 send_qmp_error_event(blk, action, is_read, error);
1196 qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
1197 } else {
1198 send_qmp_error_event(blk, action, is_read, error);
1202 int blk_is_read_only(BlockBackend *blk)
1204 BlockDriverState *bs = blk_bs(blk);
1206 if (bs) {
1207 return bdrv_is_read_only(bs);
1208 } else {
1209 return blk->root_state.read_only;
1213 int blk_is_sg(BlockBackend *blk)
1215 BlockDriverState *bs = blk_bs(blk);
1217 if (!bs) {
1218 return 0;
1221 return bdrv_is_sg(bs);
1224 int blk_enable_write_cache(BlockBackend *blk)
1226 return blk->enable_write_cache;
1229 void blk_set_enable_write_cache(BlockBackend *blk, bool wce)
1231 blk->enable_write_cache = wce;
1234 void blk_invalidate_cache(BlockBackend *blk, Error **errp)
1236 BlockDriverState *bs = blk_bs(blk);
1238 if (!bs) {
1239 error_setg(errp, "Device '%s' has no medium", blk->name);
1240 return;
1243 bdrv_invalidate_cache(bs, errp);
1246 bool blk_is_inserted(BlockBackend *blk)
1248 BlockDriverState *bs = blk_bs(blk);
1250 return bs && bdrv_is_inserted(bs);
1253 bool blk_is_available(BlockBackend *blk)
1255 return blk_is_inserted(blk) && !blk_dev_is_tray_open(blk);
1258 void blk_lock_medium(BlockBackend *blk, bool locked)
1260 BlockDriverState *bs = blk_bs(blk);
1262 if (bs) {
1263 bdrv_lock_medium(bs, locked);
1267 void blk_eject(BlockBackend *blk, bool eject_flag)
1269 BlockDriverState *bs = blk_bs(blk);
1271 if (bs) {
1272 bdrv_eject(bs, eject_flag);
1276 int blk_get_flags(BlockBackend *blk)
1278 BlockDriverState *bs = blk_bs(blk);
1280 if (bs) {
1281 return bdrv_get_flags(bs);
1282 } else {
1283 return blk->root_state.open_flags;
1287 int blk_get_max_transfer_length(BlockBackend *blk)
1289 BlockDriverState *bs = blk_bs(blk);
1291 if (bs) {
1292 return bs->bl.max_transfer_length;
1293 } else {
1294 return 0;
1298 int blk_get_max_iov(BlockBackend *blk)
1300 return blk->root->bs->bl.max_iov;
1303 void blk_set_guest_block_size(BlockBackend *blk, int align)
1305 blk->guest_block_size = align;
1308 void *blk_try_blockalign(BlockBackend *blk, size_t size)
1310 return qemu_try_blockalign(blk ? blk_bs(blk) : NULL, size);
1313 void *blk_blockalign(BlockBackend *blk, size_t size)
1315 return qemu_blockalign(blk ? blk_bs(blk) : NULL, size);
1318 bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
1320 BlockDriverState *bs = blk_bs(blk);
1322 if (!bs) {
1323 return false;
1326 return bdrv_op_is_blocked(bs, op, errp);
1329 void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason)
1331 BlockDriverState *bs = blk_bs(blk);
1333 if (bs) {
1334 bdrv_op_unblock(bs, op, reason);
1338 void blk_op_block_all(BlockBackend *blk, Error *reason)
1340 BlockDriverState *bs = blk_bs(blk);
1342 if (bs) {
1343 bdrv_op_block_all(bs, reason);
1347 void blk_op_unblock_all(BlockBackend *blk, Error *reason)
1349 BlockDriverState *bs = blk_bs(blk);
1351 if (bs) {
1352 bdrv_op_unblock_all(bs, reason);
1356 AioContext *blk_get_aio_context(BlockBackend *blk)
1358 BlockDriverState *bs = blk_bs(blk);
1360 if (bs) {
1361 return bdrv_get_aio_context(bs);
1362 } else {
1363 return qemu_get_aio_context();
1367 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb)
1369 BlockBackendAIOCB *blk_acb = DO_UPCAST(BlockBackendAIOCB, common, acb);
1370 return blk_get_aio_context(blk_acb->blk);
1373 void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
1375 BlockDriverState *bs = blk_bs(blk);
1377 if (bs) {
1378 bdrv_set_aio_context(bs, new_context);
1382 void blk_add_aio_context_notifier(BlockBackend *blk,
1383 void (*attached_aio_context)(AioContext *new_context, void *opaque),
1384 void (*detach_aio_context)(void *opaque), void *opaque)
1386 BlockDriverState *bs = blk_bs(blk);
1388 if (bs) {
1389 bdrv_add_aio_context_notifier(bs, attached_aio_context,
1390 detach_aio_context, opaque);
1394 void blk_remove_aio_context_notifier(BlockBackend *blk,
1395 void (*attached_aio_context)(AioContext *,
1396 void *),
1397 void (*detach_aio_context)(void *),
1398 void *opaque)
1400 BlockDriverState *bs = blk_bs(blk);
1402 if (bs) {
1403 bdrv_remove_aio_context_notifier(bs, attached_aio_context,
1404 detach_aio_context, opaque);
1408 void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify)
1410 notifier_list_add(&blk->remove_bs_notifiers, notify);
1413 void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify)
1415 notifier_list_add(&blk->insert_bs_notifiers, notify);
1418 void blk_io_plug(BlockBackend *blk)
1420 BlockDriverState *bs = blk_bs(blk);
1422 if (bs) {
1423 bdrv_io_plug(bs);
1427 void blk_io_unplug(BlockBackend *blk)
1429 BlockDriverState *bs = blk_bs(blk);
1431 if (bs) {
1432 bdrv_io_unplug(bs);
1436 BlockAcctStats *blk_get_stats(BlockBackend *blk)
1438 return &blk->stats;
1441 void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
1442 BlockCompletionFunc *cb, void *opaque)
1444 return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque);
1447 int coroutine_fn blk_co_write_zeroes(BlockBackend *blk, int64_t sector_num,
1448 int nb_sectors, BdrvRequestFlags flags)
1450 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1451 return -EINVAL;
1454 return blk_co_pwritev(blk, sector_num << BDRV_SECTOR_BITS,
1455 nb_sectors << BDRV_SECTOR_BITS, NULL,
1456 flags | BDRV_REQ_ZERO_WRITE);
1459 int blk_write_compressed(BlockBackend *blk, int64_t sector_num,
1460 const uint8_t *buf, int nb_sectors)
1462 int ret = blk_check_request(blk, sector_num, nb_sectors);
1463 if (ret < 0) {
1464 return ret;
1467 return bdrv_write_compressed(blk_bs(blk), sector_num, buf, nb_sectors);
1470 int blk_truncate(BlockBackend *blk, int64_t offset)
1472 if (!blk_is_available(blk)) {
1473 return -ENOMEDIUM;
1476 return bdrv_truncate(blk_bs(blk), offset);
1479 int blk_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
1481 int ret = blk_check_request(blk, sector_num, nb_sectors);
1482 if (ret < 0) {
1483 return ret;
1486 return bdrv_discard(blk_bs(blk), sector_num, nb_sectors);
1489 int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
1490 int64_t pos, int size)
1492 int ret;
1494 if (!blk_is_available(blk)) {
1495 return -ENOMEDIUM;
1498 ret = bdrv_save_vmstate(blk_bs(blk), buf, pos, size);
1499 if (ret < 0) {
1500 return ret;
1503 if (ret == size && !blk->enable_write_cache) {
1504 ret = bdrv_flush(blk_bs(blk));
1507 return ret < 0 ? ret : size;
1510 int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size)
1512 if (!blk_is_available(blk)) {
1513 return -ENOMEDIUM;
1516 return bdrv_load_vmstate(blk_bs(blk), buf, pos, size);
1519 int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz)
1521 if (!blk_is_available(blk)) {
1522 return -ENOMEDIUM;
1525 return bdrv_probe_blocksizes(blk_bs(blk), bsz);
1528 int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo)
1530 if (!blk_is_available(blk)) {
1531 return -ENOMEDIUM;
1534 return bdrv_probe_geometry(blk_bs(blk), geo);
1538 * Updates the BlockBackendRootState object with data from the currently
1539 * attached BlockDriverState.
1541 void blk_update_root_state(BlockBackend *blk)
1543 assert(blk->root);
1545 blk->root_state.open_flags = blk->root->bs->open_flags;
1546 blk->root_state.read_only = blk->root->bs->read_only;
1547 blk->root_state.detect_zeroes = blk->root->bs->detect_zeroes;
1549 if (blk->root_state.throttle_group) {
1550 g_free(blk->root_state.throttle_group);
1551 throttle_group_unref(blk->root_state.throttle_state);
1553 if (blk->root->bs->throttle_state) {
1554 const char *name = throttle_group_get_name(blk->root->bs);
1555 blk->root_state.throttle_group = g_strdup(name);
1556 blk->root_state.throttle_state = throttle_group_incref(name);
1557 } else {
1558 blk->root_state.throttle_group = NULL;
1559 blk->root_state.throttle_state = NULL;
1564 * Applies the information in the root state to the given BlockDriverState. This
1565 * does not include the flags which have to be specified for bdrv_open(), use
1566 * blk_get_open_flags_from_root_state() to inquire them.
1568 void blk_apply_root_state(BlockBackend *blk, BlockDriverState *bs)
1570 bs->detect_zeroes = blk->root_state.detect_zeroes;
1571 if (blk->root_state.throttle_group) {
1572 bdrv_io_limits_enable(bs, blk->root_state.throttle_group);
1577 * Returns the flags to be used for bdrv_open() of a BlockDriverState which is
1578 * supposed to inherit the root state.
1580 int blk_get_open_flags_from_root_state(BlockBackend *blk)
1582 int bs_flags;
1584 bs_flags = blk->root_state.read_only ? 0 : BDRV_O_RDWR;
1585 bs_flags |= blk->root_state.open_flags & ~BDRV_O_RDWR;
1587 return bs_flags;
1590 BlockBackendRootState *blk_get_root_state(BlockBackend *blk)
1592 return &blk->root_state;
1595 int blk_commit_all(void)
1597 BlockBackend *blk = NULL;
1599 while ((blk = blk_all_next(blk)) != NULL) {
1600 AioContext *aio_context = blk_get_aio_context(blk);
1602 aio_context_acquire(aio_context);
1603 if (blk_is_inserted(blk) && blk->root->bs->backing) {
1604 int ret = bdrv_commit(blk->root->bs);
1605 if (ret < 0) {
1606 aio_context_release(aio_context);
1607 return ret;
1610 aio_context_release(aio_context);
1612 return 0;
1615 int blk_flush_all(void)
1617 BlockBackend *blk = NULL;
1618 int result = 0;
1620 while ((blk = blk_all_next(blk)) != NULL) {
1621 AioContext *aio_context = blk_get_aio_context(blk);
1622 int ret;
1624 aio_context_acquire(aio_context);
1625 if (blk_is_inserted(blk)) {
1626 ret = blk_flush(blk);
1627 if (ret < 0 && !result) {
1628 result = ret;
1631 aio_context_release(aio_context);
1634 return result;