4 * Copyright (C) 2014-2016 Red Hat, Inc.
7 * Markus Armbruster <armbru@redhat.com>,
9 * This work is licensed under the terms of the GNU LGPL, version 2.1
10 * or later. See the COPYING.LIB file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "sysemu/block-backend.h"
15 #include "block/block_int.h"
16 #include "block/blockjob.h"
17 #include "block/throttle-groups.h"
18 #include "sysemu/blockdev.h"
19 #include "sysemu/sysemu.h"
20 #include "qapi/error.h"
21 #include "qapi/qapi-events-block.h"
23 #include "qemu/option.h"
25 #include "migration/misc.h"
27 /* Number of coroutines to reserve per attached device model */
28 #define COROUTINE_POOL_RESERVATION 64
30 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
32 static AioContext
*blk_aiocb_get_aio_context(BlockAIOCB
*acb
);
38 DriveInfo
*legacy_dinfo
; /* null unless created by drive_new() */
39 QTAILQ_ENTRY(BlockBackend
) link
; /* for block_backends */
40 QTAILQ_ENTRY(BlockBackend
) monitor_link
; /* for monitor_block_backends */
41 BlockBackendPublic
public;
43 void *dev
; /* attached device model, if any */
44 bool legacy_dev
; /* true if dev is not a DeviceState */
45 /* TODO change to DeviceState when all users are qdevified */
46 const BlockDevOps
*dev_ops
;
49 /* the block size for which the guest device expects atomicity */
52 /* If the BDS tree is removed, some of its options are stored here (which
53 * can be used to restore those options in the new BDS on insert) */
54 BlockBackendRootState root_state
;
56 bool enable_write_cache
;
58 /* I/O stats (display with "info blockstats"). */
61 BlockdevOnError on_read_error
, on_write_error
;
62 bool iostatus_enabled
;
63 BlockDeviceIoStatus iostatus
;
69 bool allow_write_beyond_eof
;
71 NotifierList remove_bs_notifiers
, insert_bs_notifiers
;
74 VMChangeStateEntry
*vmsh
;
75 bool force_allow_inactivate
;
77 /* Number of in-flight aio requests. BlockDriverState also counts
78 * in-flight requests but aio requests can exist even when blk->root is
79 * NULL, so we cannot rely on its counter for that case.
80 * Accessed with atomic ops.
82 unsigned int in_flight
;
86 typedef struct BlockBackendAIOCB
{
92 static const AIOCBInfo block_backend_aiocb_info
= {
93 .get_aio_context
= blk_aiocb_get_aio_context
,
94 .aiocb_size
= sizeof(BlockBackendAIOCB
),
97 static void drive_info_del(DriveInfo
*dinfo
);
98 static BlockBackend
*bdrv_first_blk(BlockDriverState
*bs
);
100 /* All BlockBackends */
101 static QTAILQ_HEAD(, BlockBackend
) block_backends
=
102 QTAILQ_HEAD_INITIALIZER(block_backends
);
104 /* All BlockBackends referenced by the monitor and which are iterated through by
106 static QTAILQ_HEAD(, BlockBackend
) monitor_block_backends
=
107 QTAILQ_HEAD_INITIALIZER(monitor_block_backends
);
109 static void blk_root_inherit_options(int *child_flags
, QDict
*child_options
,
110 int parent_flags
, QDict
*parent_options
)
112 /* We're not supposed to call this function for root nodes */
115 static void blk_root_drained_begin(BdrvChild
*child
);
116 static void blk_root_drained_end(BdrvChild
*child
);
118 static void blk_root_change_media(BdrvChild
*child
, bool load
);
119 static void blk_root_resize(BdrvChild
*child
);
121 static char *blk_root_get_parent_desc(BdrvChild
*child
)
123 BlockBackend
*blk
= child
->opaque
;
127 return g_strdup(blk
->name
);
130 dev_id
= blk_get_attached_dev_id(blk
);
134 /* TODO Callback into the BB owner for something more detailed */
136 return g_strdup("a block device");
140 static const char *blk_root_get_name(BdrvChild
*child
)
142 return blk_name(child
->opaque
);
145 static void blk_vm_state_changed(void *opaque
, int running
, RunState state
)
147 Error
*local_err
= NULL
;
148 BlockBackend
*blk
= opaque
;
150 if (state
== RUN_STATE_INMIGRATE
) {
154 qemu_del_vm_change_state_handler(blk
->vmsh
);
156 blk_set_perm(blk
, blk
->perm
, blk
->shared_perm
, &local_err
);
158 error_report_err(local_err
);
163 * Notifies the user of the BlockBackend that migration has completed. qdev
164 * devices can tighten their permissions in response (specifically revoke
165 * shared write permissions that we needed for storage migration).
167 * If an error is returned, the VM cannot be allowed to be resumed.
169 static void blk_root_activate(BdrvChild
*child
, Error
**errp
)
171 BlockBackend
*blk
= child
->opaque
;
172 Error
*local_err
= NULL
;
174 if (!blk
->disable_perm
) {
178 blk
->disable_perm
= false;
180 blk_set_perm(blk
, blk
->perm
, BLK_PERM_ALL
, &local_err
);
182 error_propagate(errp
, local_err
);
183 blk
->disable_perm
= true;
187 if (runstate_check(RUN_STATE_INMIGRATE
)) {
188 /* Activation can happen when migration process is still active, for
189 * example when nbd_server_add is called during non-shared storage
190 * migration. Defer the shared_perm update to migration completion. */
192 blk
->vmsh
= qemu_add_vm_change_state_handler(blk_vm_state_changed
,
198 blk_set_perm(blk
, blk
->perm
, blk
->shared_perm
, &local_err
);
200 error_propagate(errp
, local_err
);
201 blk
->disable_perm
= true;
206 void blk_set_force_allow_inactivate(BlockBackend
*blk
)
208 blk
->force_allow_inactivate
= true;
211 static bool blk_can_inactivate(BlockBackend
*blk
)
213 /* If it is a guest device, inactivate is ok. */
214 if (blk
->dev
|| blk_name(blk
)[0]) {
218 /* Inactivating means no more writes to the image can be done,
219 * even if those writes would be changes invisible to the
220 * guest. For block job BBs that satisfy this, we can just allow
221 * it. This is the case for mirror job source, which is required
222 * by libvirt non-shared block migration. */
223 if (!(blk
->perm
& (BLK_PERM_WRITE
| BLK_PERM_WRITE_UNCHANGED
))) {
227 return blk
->force_allow_inactivate
;
230 static int blk_root_inactivate(BdrvChild
*child
)
232 BlockBackend
*blk
= child
->opaque
;
234 if (blk
->disable_perm
) {
238 if (!blk_can_inactivate(blk
)) {
242 blk
->disable_perm
= true;
244 bdrv_child_try_set_perm(blk
->root
, 0, BLK_PERM_ALL
, &error_abort
);
250 static const BdrvChildRole child_root
= {
251 .inherit_options
= blk_root_inherit_options
,
253 .change_media
= blk_root_change_media
,
254 .resize
= blk_root_resize
,
255 .get_name
= blk_root_get_name
,
256 .get_parent_desc
= blk_root_get_parent_desc
,
258 .drained_begin
= blk_root_drained_begin
,
259 .drained_end
= blk_root_drained_end
,
261 .activate
= blk_root_activate
,
262 .inactivate
= blk_root_inactivate
,
266 * Create a new BlockBackend with a reference count of one.
268 * @perm is a bitmasks of BLK_PERM_* constants which describes the permissions
269 * to request for a block driver node that is attached to this BlockBackend.
270 * @shared_perm is a bitmask which describes which permissions may be granted
271 * to other users of the attached node.
272 * Both sets of permissions can be changed later using blk_set_perm().
274 * Return the new BlockBackend on success, null on failure.
276 BlockBackend
*blk_new(uint64_t perm
, uint64_t shared_perm
)
280 blk
= g_new0(BlockBackend
, 1);
283 blk
->shared_perm
= shared_perm
;
284 blk_set_enable_write_cache(blk
, true);
286 block_acct_init(&blk
->stats
);
288 notifier_list_init(&blk
->remove_bs_notifiers
);
289 notifier_list_init(&blk
->insert_bs_notifiers
);
291 QTAILQ_INSERT_TAIL(&block_backends
, blk
, link
);
296 * Creates a new BlockBackend, opens a new BlockDriverState, and connects both.
298 * Just as with bdrv_open(), after having called this function the reference to
299 * @options belongs to the block layer (even on failure).
301 * TODO: Remove @filename and @flags; it should be possible to specify a whole
302 * BDS tree just by specifying the @options QDict (or @reference,
303 * alternatively). At the time of adding this function, this is not possible,
304 * though, so callers of this function have to be able to specify @filename and
307 BlockBackend
*blk_new_open(const char *filename
, const char *reference
,
308 QDict
*options
, int flags
, Error
**errp
)
311 BlockDriverState
*bs
;
314 /* blk_new_open() is mainly used in .bdrv_create implementations and the
315 * tools where sharing isn't a concern because the BDS stays private, so we
316 * just request permission according to the flags.
318 * The exceptions are xen_disk and blockdev_init(); in these cases, the
319 * caller of blk_new_open() doesn't make use of the permissions, but they
320 * shouldn't hurt either. We can still share everything here because the
321 * guest devices will add their own blockers if they can't share. */
322 if ((flags
& BDRV_O_NO_IO
) == 0) {
323 perm
|= BLK_PERM_CONSISTENT_READ
;
324 if (flags
& BDRV_O_RDWR
) {
325 perm
|= BLK_PERM_WRITE
;
328 if (flags
& BDRV_O_RESIZE
) {
329 perm
|= BLK_PERM_RESIZE
;
332 blk
= blk_new(perm
, BLK_PERM_ALL
);
333 bs
= bdrv_open(filename
, reference
, options
, flags
, errp
);
339 blk
->root
= bdrv_root_attach_child(bs
, "root", &child_root
,
340 perm
, BLK_PERM_ALL
, blk
, errp
);
350 static void blk_delete(BlockBackend
*blk
)
352 assert(!blk
->refcnt
);
355 if (blk
->public.throttle_group_member
.throttle_state
) {
356 blk_io_limits_disable(blk
);
362 qemu_del_vm_change_state_handler(blk
->vmsh
);
365 assert(QLIST_EMPTY(&blk
->remove_bs_notifiers
.notifiers
));
366 assert(QLIST_EMPTY(&blk
->insert_bs_notifiers
.notifiers
));
367 QTAILQ_REMOVE(&block_backends
, blk
, link
);
368 drive_info_del(blk
->legacy_dinfo
);
369 block_acct_cleanup(&blk
->stats
);
373 static void drive_info_del(DriveInfo
*dinfo
)
378 qemu_opts_del(dinfo
->opts
);
379 g_free(dinfo
->serial
);
383 int blk_get_refcnt(BlockBackend
*blk
)
385 return blk
? blk
->refcnt
: 0;
389 * Increment @blk's reference count.
390 * @blk must not be null.
392 void blk_ref(BlockBackend
*blk
)
398 * Decrement @blk's reference count.
399 * If this drops it to zero, destroy @blk.
400 * For convenience, do nothing if @blk is null.
402 void blk_unref(BlockBackend
*blk
)
405 assert(blk
->refcnt
> 0);
406 if (!--blk
->refcnt
) {
413 * Behaves similarly to blk_next() but iterates over all BlockBackends, even the
414 * ones which are hidden (i.e. are not referenced by the monitor).
416 BlockBackend
*blk_all_next(BlockBackend
*blk
)
418 return blk
? QTAILQ_NEXT(blk
, link
)
419 : QTAILQ_FIRST(&block_backends
);
422 void blk_remove_all_bs(void)
424 BlockBackend
*blk
= NULL
;
426 while ((blk
= blk_all_next(blk
)) != NULL
) {
427 AioContext
*ctx
= blk_get_aio_context(blk
);
429 aio_context_acquire(ctx
);
433 aio_context_release(ctx
);
438 * Return the monitor-owned BlockBackend after @blk.
439 * If @blk is null, return the first one.
440 * Else, return @blk's next sibling, which may be null.
442 * To iterate over all BlockBackends, do
443 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
447 BlockBackend
*blk_next(BlockBackend
*blk
)
449 return blk
? QTAILQ_NEXT(blk
, monitor_link
)
450 : QTAILQ_FIRST(&monitor_block_backends
);
453 /* Iterates over all top-level BlockDriverStates, i.e. BDSs that are owned by
454 * the monitor or attached to a BlockBackend */
455 BlockDriverState
*bdrv_next(BdrvNextIterator
*it
)
457 BlockDriverState
*bs
, *old_bs
;
459 /* Must be called from the main loop */
460 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
462 /* First, return all root nodes of BlockBackends. In order to avoid
463 * returning a BDS twice when multiple BBs refer to it, we only return it
464 * if the BB is the first one in the parent list of the BDS. */
465 if (it
->phase
== BDRV_NEXT_BACKEND_ROOTS
) {
466 BlockBackend
*old_blk
= it
->blk
;
468 old_bs
= old_blk
? blk_bs(old_blk
) : NULL
;
471 it
->blk
= blk_all_next(it
->blk
);
472 bs
= it
->blk
? blk_bs(it
->blk
) : NULL
;
473 } while (it
->blk
&& (bs
== NULL
|| bdrv_first_blk(bs
) != it
->blk
));
485 it
->phase
= BDRV_NEXT_MONITOR_OWNED
;
490 /* Then return the monitor-owned BDSes without a BB attached. Ignore all
491 * BDSes that are attached to a BlockBackend here; they have been handled
492 * by the above block already */
494 it
->bs
= bdrv_next_monitor_owned(it
->bs
);
496 } while (bs
&& bdrv_has_blk(bs
));
506 static void bdrv_next_reset(BdrvNextIterator
*it
)
508 *it
= (BdrvNextIterator
) {
509 .phase
= BDRV_NEXT_BACKEND_ROOTS
,
513 BlockDriverState
*bdrv_first(BdrvNextIterator
*it
)
516 return bdrv_next(it
);
519 /* Must be called when aborting a bdrv_next() iteration before
520 * bdrv_next() returns NULL */
521 void bdrv_next_cleanup(BdrvNextIterator
*it
)
523 /* Must be called from the main loop */
524 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
526 if (it
->phase
== BDRV_NEXT_BACKEND_ROOTS
) {
528 bdrv_unref(blk_bs(it
->blk
));
539 * Add a BlockBackend into the list of backends referenced by the monitor, with
540 * the given @name acting as the handle for the monitor.
541 * Strictly for use by blockdev.c.
543 * @name must not be null or empty.
545 * Returns true on success and false on failure. In the latter case, an Error
546 * object is returned through @errp.
548 bool monitor_add_blk(BlockBackend
*blk
, const char *name
, Error
**errp
)
551 assert(name
&& name
[0]);
553 if (!id_wellformed(name
)) {
554 error_setg(errp
, "Invalid device name");
557 if (blk_by_name(name
)) {
558 error_setg(errp
, "Device with id '%s' already exists", name
);
561 if (bdrv_find_node(name
)) {
563 "Device name '%s' conflicts with an existing node name",
568 blk
->name
= g_strdup(name
);
569 QTAILQ_INSERT_TAIL(&monitor_block_backends
, blk
, monitor_link
);
574 * Remove a BlockBackend from the list of backends referenced by the monitor.
575 * Strictly for use by blockdev.c.
577 void monitor_remove_blk(BlockBackend
*blk
)
583 QTAILQ_REMOVE(&monitor_block_backends
, blk
, monitor_link
);
589 * Return @blk's name, a non-null string.
590 * Returns an empty string iff @blk is not referenced by the monitor.
592 const char *blk_name(const BlockBackend
*blk
)
594 return blk
->name
?: "";
598 * Return the BlockBackend with name @name if it exists, else null.
599 * @name must not be null.
601 BlockBackend
*blk_by_name(const char *name
)
603 BlockBackend
*blk
= NULL
;
606 while ((blk
= blk_next(blk
)) != NULL
) {
607 if (!strcmp(name
, blk
->name
)) {
615 * Return the BlockDriverState attached to @blk if any, else null.
617 BlockDriverState
*blk_bs(BlockBackend
*blk
)
619 return blk
->root
? blk
->root
->bs
: NULL
;
622 static BlockBackend
*bdrv_first_blk(BlockDriverState
*bs
)
625 QLIST_FOREACH(child
, &bs
->parents
, next_parent
) {
626 if (child
->role
== &child_root
) {
627 return child
->opaque
;
635 * Returns true if @bs has an associated BlockBackend.
637 bool bdrv_has_blk(BlockDriverState
*bs
)
639 return bdrv_first_blk(bs
) != NULL
;
643 * Returns true if @bs has only BlockBackends as parents.
645 bool bdrv_is_root_node(BlockDriverState
*bs
)
649 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
650 if (c
->role
!= &child_root
) {
659 * Return @blk's DriveInfo if any, else null.
661 DriveInfo
*blk_legacy_dinfo(BlockBackend
*blk
)
663 return blk
->legacy_dinfo
;
667 * Set @blk's DriveInfo to @dinfo, and return it.
668 * @blk must not have a DriveInfo set already.
669 * No other BlockBackend may have the same DriveInfo set.
671 DriveInfo
*blk_set_legacy_dinfo(BlockBackend
*blk
, DriveInfo
*dinfo
)
673 assert(!blk
->legacy_dinfo
);
674 return blk
->legacy_dinfo
= dinfo
;
678 * Return the BlockBackend with DriveInfo @dinfo.
681 BlockBackend
*blk_by_legacy_dinfo(DriveInfo
*dinfo
)
683 BlockBackend
*blk
= NULL
;
685 while ((blk
= blk_next(blk
)) != NULL
) {
686 if (blk
->legacy_dinfo
== dinfo
) {
694 * Returns a pointer to the publicly accessible fields of @blk.
696 BlockBackendPublic
*blk_get_public(BlockBackend
*blk
)
702 * Returns a BlockBackend given the associated @public fields.
704 BlockBackend
*blk_by_public(BlockBackendPublic
*public)
706 return container_of(public, BlockBackend
, public);
710 * Disassociates the currently associated BlockDriverState from @blk.
712 void blk_remove_bs(BlockBackend
*blk
)
714 ThrottleGroupMember
*tgm
= &blk
->public.throttle_group_member
;
715 BlockDriverState
*bs
;
717 notifier_list_notify(&blk
->remove_bs_notifiers
, blk
);
718 if (tgm
->throttle_state
) {
720 bdrv_drained_begin(bs
);
721 throttle_group_detach_aio_context(tgm
);
722 throttle_group_attach_aio_context(tgm
, qemu_get_aio_context());
723 bdrv_drained_end(bs
);
726 blk_update_root_state(blk
);
728 bdrv_root_unref_child(blk
->root
);
733 * Associates a new BlockDriverState with @blk.
735 int blk_insert_bs(BlockBackend
*blk
, BlockDriverState
*bs
, Error
**errp
)
737 ThrottleGroupMember
*tgm
= &blk
->public.throttle_group_member
;
738 blk
->root
= bdrv_root_attach_child(bs
, "root", &child_root
,
739 blk
->perm
, blk
->shared_perm
, blk
, errp
);
740 if (blk
->root
== NULL
) {
745 notifier_list_notify(&blk
->insert_bs_notifiers
, blk
);
746 if (tgm
->throttle_state
) {
747 throttle_group_detach_aio_context(tgm
);
748 throttle_group_attach_aio_context(tgm
, bdrv_get_aio_context(bs
));
755 * Sets the permission bitmasks that the user of the BlockBackend needs.
757 int blk_set_perm(BlockBackend
*blk
, uint64_t perm
, uint64_t shared_perm
,
762 if (blk
->root
&& !blk
->disable_perm
) {
763 ret
= bdrv_child_try_set_perm(blk
->root
, perm
, shared_perm
, errp
);
770 blk
->shared_perm
= shared_perm
;
775 void blk_get_perm(BlockBackend
*blk
, uint64_t *perm
, uint64_t *shared_perm
)
778 *shared_perm
= blk
->shared_perm
;
781 static int blk_do_attach_dev(BlockBackend
*blk
, void *dev
)
787 /* While migration is still incoming, we don't need to apply the
788 * permissions of guest device BlockBackends. We might still have a block
789 * job or NBD server writing to the image for storage migration. */
790 if (runstate_check(RUN_STATE_INMIGRATE
)) {
791 blk
->disable_perm
= true;
796 blk
->legacy_dev
= false;
797 blk_iostatus_reset(blk
);
803 * Attach device model @dev to @blk.
804 * Return 0 on success, -EBUSY when a device model is attached already.
806 int blk_attach_dev(BlockBackend
*blk
, DeviceState
*dev
)
808 return blk_do_attach_dev(blk
, dev
);
812 * Attach device model @dev to @blk.
813 * @blk must not have a device model attached already.
814 * TODO qdevified devices don't use this, remove when devices are qdevified
816 void blk_attach_dev_legacy(BlockBackend
*blk
, void *dev
)
818 if (blk_do_attach_dev(blk
, dev
) < 0) {
821 blk
->legacy_dev
= true;
825 * Detach device model @dev from @blk.
826 * @dev must be currently attached to @blk.
828 void blk_detach_dev(BlockBackend
*blk
, void *dev
)
829 /* TODO change to DeviceState *dev when all users are qdevified */
831 assert(blk
->dev
== dev
);
834 blk
->dev_opaque
= NULL
;
835 blk
->guest_block_size
= 512;
836 blk_set_perm(blk
, 0, BLK_PERM_ALL
, &error_abort
);
841 * Return the device model attached to @blk if any, else null.
843 void *blk_get_attached_dev(BlockBackend
*blk
)
844 /* TODO change to return DeviceState * when all users are qdevified */
849 /* Return the qdev ID, or if no ID is assigned the QOM path, of the block
850 * device attached to the BlockBackend. */
851 char *blk_get_attached_dev_id(BlockBackend
*blk
)
855 assert(!blk
->legacy_dev
);
860 } else if (dev
->id
) {
861 return g_strdup(dev
->id
);
863 return object_get_canonical_path(OBJECT(dev
));
867 * Return the BlockBackend which has the device model @dev attached if it
870 * @dev must not be null.
872 BlockBackend
*blk_by_dev(void *dev
)
874 BlockBackend
*blk
= NULL
;
877 while ((blk
= blk_all_next(blk
)) != NULL
) {
878 if (blk
->dev
== dev
) {
886 * Set @blk's device model callbacks to @ops.
887 * @opaque is the opaque argument to pass to the callbacks.
888 * This is for use by device models.
890 void blk_set_dev_ops(BlockBackend
*blk
, const BlockDevOps
*ops
,
893 /* All drivers that use blk_set_dev_ops() are qdevified and we want to keep
894 * it that way, so we can assume blk->dev, if present, is a DeviceState if
895 * blk->dev_ops is set. Non-device users may use dev_ops without device. */
896 assert(!blk
->legacy_dev
);
899 blk
->dev_opaque
= opaque
;
901 /* Are we currently quiesced? Should we enforce this right now? */
902 if (blk
->quiesce_counter
&& ops
->drained_begin
) {
903 ops
->drained_begin(opaque
);
908 * Notify @blk's attached device model of media change.
910 * If @load is true, notify of media load. This action can fail, meaning that
911 * the medium cannot be loaded. @errp is set then.
913 * If @load is false, notify of media eject. This can never fail.
915 * Also send DEVICE_TRAY_MOVED events as appropriate.
917 void blk_dev_change_media_cb(BlockBackend
*blk
, bool load
, Error
**errp
)
919 if (blk
->dev_ops
&& blk
->dev_ops
->change_media_cb
) {
920 bool tray_was_open
, tray_is_open
;
921 Error
*local_err
= NULL
;
923 assert(!blk
->legacy_dev
);
925 tray_was_open
= blk_dev_is_tray_open(blk
);
926 blk
->dev_ops
->change_media_cb(blk
->dev_opaque
, load
, &local_err
);
928 assert(load
== true);
929 error_propagate(errp
, local_err
);
932 tray_is_open
= blk_dev_is_tray_open(blk
);
934 if (tray_was_open
!= tray_is_open
) {
935 char *id
= blk_get_attached_dev_id(blk
);
936 qapi_event_send_device_tray_moved(blk_name(blk
), id
, tray_is_open
,
943 static void blk_root_change_media(BdrvChild
*child
, bool load
)
945 blk_dev_change_media_cb(child
->opaque
, load
, NULL
);
949 * Does @blk's attached device model have removable media?
950 * %true if no device model is attached.
952 bool blk_dev_has_removable_media(BlockBackend
*blk
)
954 return !blk
->dev
|| (blk
->dev_ops
&& blk
->dev_ops
->change_media_cb
);
958 * Does @blk's attached device model have a tray?
960 bool blk_dev_has_tray(BlockBackend
*blk
)
962 return blk
->dev_ops
&& blk
->dev_ops
->is_tray_open
;
966 * Notify @blk's attached device model of a media eject request.
967 * If @force is true, the medium is about to be yanked out forcefully.
969 void blk_dev_eject_request(BlockBackend
*blk
, bool force
)
971 if (blk
->dev_ops
&& blk
->dev_ops
->eject_request_cb
) {
972 blk
->dev_ops
->eject_request_cb(blk
->dev_opaque
, force
);
977 * Does @blk's attached device model have a tray, and is it open?
979 bool blk_dev_is_tray_open(BlockBackend
*blk
)
981 if (blk_dev_has_tray(blk
)) {
982 return blk
->dev_ops
->is_tray_open(blk
->dev_opaque
);
988 * Does @blk's attached device model have the medium locked?
989 * %false if the device model has no such lock.
991 bool blk_dev_is_medium_locked(BlockBackend
*blk
)
993 if (blk
->dev_ops
&& blk
->dev_ops
->is_medium_locked
) {
994 return blk
->dev_ops
->is_medium_locked(blk
->dev_opaque
);
1000 * Notify @blk's attached device model of a backend size change.
1002 static void blk_root_resize(BdrvChild
*child
)
1004 BlockBackend
*blk
= child
->opaque
;
1006 if (blk
->dev_ops
&& blk
->dev_ops
->resize_cb
) {
1007 blk
->dev_ops
->resize_cb(blk
->dev_opaque
);
1011 void blk_iostatus_enable(BlockBackend
*blk
)
1013 blk
->iostatus_enabled
= true;
1014 blk
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
1017 /* The I/O status is only enabled if the drive explicitly
1018 * enables it _and_ the VM is configured to stop on errors */
1019 bool blk_iostatus_is_enabled(const BlockBackend
*blk
)
1021 return (blk
->iostatus_enabled
&&
1022 (blk
->on_write_error
== BLOCKDEV_ON_ERROR_ENOSPC
||
1023 blk
->on_write_error
== BLOCKDEV_ON_ERROR_STOP
||
1024 blk
->on_read_error
== BLOCKDEV_ON_ERROR_STOP
));
1027 BlockDeviceIoStatus
blk_iostatus(const BlockBackend
*blk
)
1029 return blk
->iostatus
;
1032 void blk_iostatus_disable(BlockBackend
*blk
)
1034 blk
->iostatus_enabled
= false;
1037 void blk_iostatus_reset(BlockBackend
*blk
)
1039 if (blk_iostatus_is_enabled(blk
)) {
1040 BlockDriverState
*bs
= blk_bs(blk
);
1041 blk
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
1042 if (bs
&& bs
->job
) {
1043 block_job_iostatus_reset(bs
->job
);
1048 void blk_iostatus_set_err(BlockBackend
*blk
, int error
)
1050 assert(blk_iostatus_is_enabled(blk
));
1051 if (blk
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
1052 blk
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
1053 BLOCK_DEVICE_IO_STATUS_FAILED
;
1057 void blk_set_allow_write_beyond_eof(BlockBackend
*blk
, bool allow
)
1059 blk
->allow_write_beyond_eof
= allow
;
1062 static int blk_check_byte_request(BlockBackend
*blk
, int64_t offset
,
1067 if (size
> INT_MAX
) {
1071 if (!blk_is_available(blk
)) {
1079 if (!blk
->allow_write_beyond_eof
) {
1080 len
= blk_getlength(blk
);
1085 if (offset
> len
|| len
- offset
< size
) {
1093 int coroutine_fn
blk_co_preadv(BlockBackend
*blk
, int64_t offset
,
1094 unsigned int bytes
, QEMUIOVector
*qiov
,
1095 BdrvRequestFlags flags
)
1098 BlockDriverState
*bs
= blk_bs(blk
);
1100 trace_blk_co_preadv(blk
, bs
, offset
, bytes
, flags
);
1102 ret
= blk_check_byte_request(blk
, offset
, bytes
);
1107 bdrv_inc_in_flight(bs
);
1109 /* throttling disk I/O */
1110 if (blk
->public.throttle_group_member
.throttle_state
) {
1111 throttle_group_co_io_limits_intercept(&blk
->public.throttle_group_member
,
1115 ret
= bdrv_co_preadv(blk
->root
, offset
, bytes
, qiov
, flags
);
1116 bdrv_dec_in_flight(bs
);
1120 int coroutine_fn
blk_co_pwritev(BlockBackend
*blk
, int64_t offset
,
1121 unsigned int bytes
, QEMUIOVector
*qiov
,
1122 BdrvRequestFlags flags
)
1125 BlockDriverState
*bs
= blk_bs(blk
);
1127 trace_blk_co_pwritev(blk
, bs
, offset
, bytes
, flags
);
1129 ret
= blk_check_byte_request(blk
, offset
, bytes
);
1134 bdrv_inc_in_flight(bs
);
1135 /* throttling disk I/O */
1136 if (blk
->public.throttle_group_member
.throttle_state
) {
1137 throttle_group_co_io_limits_intercept(&blk
->public.throttle_group_member
,
1141 if (!blk
->enable_write_cache
) {
1142 flags
|= BDRV_REQ_FUA
;
1145 ret
= bdrv_co_pwritev(blk
->root
, offset
, bytes
, qiov
, flags
);
1146 bdrv_dec_in_flight(bs
);
1150 typedef struct BlkRwCo
{
1155 BdrvRequestFlags flags
;
1158 static void blk_read_entry(void *opaque
)
1160 BlkRwCo
*rwco
= opaque
;
1161 QEMUIOVector
*qiov
= rwco
->iobuf
;
1163 rwco
->ret
= blk_co_preadv(rwco
->blk
, rwco
->offset
, qiov
->size
,
1167 static void blk_write_entry(void *opaque
)
1169 BlkRwCo
*rwco
= opaque
;
1170 QEMUIOVector
*qiov
= rwco
->iobuf
;
1172 rwco
->ret
= blk_co_pwritev(rwco
->blk
, rwco
->offset
, qiov
->size
,
1176 static int blk_prw(BlockBackend
*blk
, int64_t offset
, uint8_t *buf
,
1177 int64_t bytes
, CoroutineEntry co_entry
,
1178 BdrvRequestFlags flags
)
1184 iov
= (struct iovec
) {
1188 qemu_iovec_init_external(&qiov
, &iov
, 1);
1198 if (qemu_in_coroutine()) {
1199 /* Fast-path if already in coroutine context */
1202 Coroutine
*co
= qemu_coroutine_create(co_entry
, &rwco
);
1203 bdrv_coroutine_enter(blk_bs(blk
), co
);
1204 BDRV_POLL_WHILE(blk_bs(blk
), rwco
.ret
== NOT_DONE
);
1210 int blk_pread_unthrottled(BlockBackend
*blk
, int64_t offset
, uint8_t *buf
,
1215 ret
= blk_check_byte_request(blk
, offset
, count
);
1220 blk_root_drained_begin(blk
->root
);
1221 ret
= blk_pread(blk
, offset
, buf
, count
);
1222 blk_root_drained_end(blk
->root
);
1226 int blk_pwrite_zeroes(BlockBackend
*blk
, int64_t offset
,
1227 int bytes
, BdrvRequestFlags flags
)
1229 return blk_prw(blk
, offset
, NULL
, bytes
, blk_write_entry
,
1230 flags
| BDRV_REQ_ZERO_WRITE
);
1233 int blk_make_zero(BlockBackend
*blk
, BdrvRequestFlags flags
)
1235 return bdrv_make_zero(blk
->root
, flags
);
1238 static void blk_inc_in_flight(BlockBackend
*blk
)
1240 atomic_inc(&blk
->in_flight
);
1243 static void blk_dec_in_flight(BlockBackend
*blk
)
1245 atomic_dec(&blk
->in_flight
);
1246 aio_wait_kick(&blk
->wait
);
1249 static void error_callback_bh(void *opaque
)
1251 struct BlockBackendAIOCB
*acb
= opaque
;
1253 blk_dec_in_flight(acb
->blk
);
1254 acb
->common
.cb(acb
->common
.opaque
, acb
->ret
);
1255 qemu_aio_unref(acb
);
1258 BlockAIOCB
*blk_abort_aio_request(BlockBackend
*blk
,
1259 BlockCompletionFunc
*cb
,
1260 void *opaque
, int ret
)
1262 struct BlockBackendAIOCB
*acb
;
1264 blk_inc_in_flight(blk
);
1265 acb
= blk_aio_get(&block_backend_aiocb_info
, blk
, cb
, opaque
);
1269 aio_bh_schedule_oneshot(blk_get_aio_context(blk
), error_callback_bh
, acb
);
1270 return &acb
->common
;
1273 typedef struct BlkAioEmAIOCB
{
1280 static const AIOCBInfo blk_aio_em_aiocb_info
= {
1281 .aiocb_size
= sizeof(BlkAioEmAIOCB
),
1284 static void blk_aio_complete(BlkAioEmAIOCB
*acb
)
1286 if (acb
->has_returned
) {
1287 blk_dec_in_flight(acb
->rwco
.blk
);
1288 acb
->common
.cb(acb
->common
.opaque
, acb
->rwco
.ret
);
1289 qemu_aio_unref(acb
);
1293 static void blk_aio_complete_bh(void *opaque
)
1295 BlkAioEmAIOCB
*acb
= opaque
;
1296 assert(acb
->has_returned
);
1297 blk_aio_complete(acb
);
1300 static BlockAIOCB
*blk_aio_prwv(BlockBackend
*blk
, int64_t offset
, int bytes
,
1301 void *iobuf
, CoroutineEntry co_entry
,
1302 BdrvRequestFlags flags
,
1303 BlockCompletionFunc
*cb
, void *opaque
)
1308 blk_inc_in_flight(blk
);
1309 acb
= blk_aio_get(&blk_aio_em_aiocb_info
, blk
, cb
, opaque
);
1310 acb
->rwco
= (BlkRwCo
) {
1318 acb
->has_returned
= false;
1320 co
= qemu_coroutine_create(co_entry
, acb
);
1321 bdrv_coroutine_enter(blk_bs(blk
), co
);
1323 acb
->has_returned
= true;
1324 if (acb
->rwco
.ret
!= NOT_DONE
) {
1325 aio_bh_schedule_oneshot(blk_get_aio_context(blk
),
1326 blk_aio_complete_bh
, acb
);
1329 return &acb
->common
;
1332 static void blk_aio_read_entry(void *opaque
)
1334 BlkAioEmAIOCB
*acb
= opaque
;
1335 BlkRwCo
*rwco
= &acb
->rwco
;
1336 QEMUIOVector
*qiov
= rwco
->iobuf
;
1338 assert(qiov
->size
== acb
->bytes
);
1339 rwco
->ret
= blk_co_preadv(rwco
->blk
, rwco
->offset
, acb
->bytes
,
1341 blk_aio_complete(acb
);
1344 static void blk_aio_write_entry(void *opaque
)
1346 BlkAioEmAIOCB
*acb
= opaque
;
1347 BlkRwCo
*rwco
= &acb
->rwco
;
1348 QEMUIOVector
*qiov
= rwco
->iobuf
;
1350 assert(!qiov
|| qiov
->size
== acb
->bytes
);
1351 rwco
->ret
= blk_co_pwritev(rwco
->blk
, rwco
->offset
, acb
->bytes
,
1353 blk_aio_complete(acb
);
1356 BlockAIOCB
*blk_aio_pwrite_zeroes(BlockBackend
*blk
, int64_t offset
,
1357 int count
, BdrvRequestFlags flags
,
1358 BlockCompletionFunc
*cb
, void *opaque
)
1360 return blk_aio_prwv(blk
, offset
, count
, NULL
, blk_aio_write_entry
,
1361 flags
| BDRV_REQ_ZERO_WRITE
, cb
, opaque
);
1364 int blk_pread(BlockBackend
*blk
, int64_t offset
, void *buf
, int count
)
1366 int ret
= blk_prw(blk
, offset
, buf
, count
, blk_read_entry
, 0);
1373 int blk_pwrite(BlockBackend
*blk
, int64_t offset
, const void *buf
, int count
,
1374 BdrvRequestFlags flags
)
1376 int ret
= blk_prw(blk
, offset
, (void *) buf
, count
, blk_write_entry
,
1384 int64_t blk_getlength(BlockBackend
*blk
)
1386 if (!blk_is_available(blk
)) {
1390 return bdrv_getlength(blk_bs(blk
));
1393 void blk_get_geometry(BlockBackend
*blk
, uint64_t *nb_sectors_ptr
)
1396 *nb_sectors_ptr
= 0;
1398 bdrv_get_geometry(blk_bs(blk
), nb_sectors_ptr
);
1402 int64_t blk_nb_sectors(BlockBackend
*blk
)
1404 if (!blk_is_available(blk
)) {
1408 return bdrv_nb_sectors(blk_bs(blk
));
1411 BlockAIOCB
*blk_aio_preadv(BlockBackend
*blk
, int64_t offset
,
1412 QEMUIOVector
*qiov
, BdrvRequestFlags flags
,
1413 BlockCompletionFunc
*cb
, void *opaque
)
1415 return blk_aio_prwv(blk
, offset
, qiov
->size
, qiov
,
1416 blk_aio_read_entry
, flags
, cb
, opaque
);
1419 BlockAIOCB
*blk_aio_pwritev(BlockBackend
*blk
, int64_t offset
,
1420 QEMUIOVector
*qiov
, BdrvRequestFlags flags
,
1421 BlockCompletionFunc
*cb
, void *opaque
)
1423 return blk_aio_prwv(blk
, offset
, qiov
->size
, qiov
,
1424 blk_aio_write_entry
, flags
, cb
, opaque
);
1427 static void blk_aio_flush_entry(void *opaque
)
1429 BlkAioEmAIOCB
*acb
= opaque
;
1430 BlkRwCo
*rwco
= &acb
->rwco
;
1432 rwco
->ret
= blk_co_flush(rwco
->blk
);
1433 blk_aio_complete(acb
);
1436 BlockAIOCB
*blk_aio_flush(BlockBackend
*blk
,
1437 BlockCompletionFunc
*cb
, void *opaque
)
1439 return blk_aio_prwv(blk
, 0, 0, NULL
, blk_aio_flush_entry
, 0, cb
, opaque
);
1442 static void blk_aio_pdiscard_entry(void *opaque
)
1444 BlkAioEmAIOCB
*acb
= opaque
;
1445 BlkRwCo
*rwco
= &acb
->rwco
;
1447 rwco
->ret
= blk_co_pdiscard(rwco
->blk
, rwco
->offset
, acb
->bytes
);
1448 blk_aio_complete(acb
);
1451 BlockAIOCB
*blk_aio_pdiscard(BlockBackend
*blk
,
1452 int64_t offset
, int bytes
,
1453 BlockCompletionFunc
*cb
, void *opaque
)
1455 return blk_aio_prwv(blk
, offset
, bytes
, NULL
, blk_aio_pdiscard_entry
, 0,
1459 void blk_aio_cancel(BlockAIOCB
*acb
)
1461 bdrv_aio_cancel(acb
);
1464 void blk_aio_cancel_async(BlockAIOCB
*acb
)
1466 bdrv_aio_cancel_async(acb
);
1469 int blk_co_ioctl(BlockBackend
*blk
, unsigned long int req
, void *buf
)
1471 if (!blk_is_available(blk
)) {
1475 return bdrv_co_ioctl(blk_bs(blk
), req
, buf
);
1478 static void blk_ioctl_entry(void *opaque
)
1480 BlkRwCo
*rwco
= opaque
;
1481 QEMUIOVector
*qiov
= rwco
->iobuf
;
1483 rwco
->ret
= blk_co_ioctl(rwco
->blk
, rwco
->offset
,
1484 qiov
->iov
[0].iov_base
);
1487 int blk_ioctl(BlockBackend
*blk
, unsigned long int req
, void *buf
)
1489 return blk_prw(blk
, req
, buf
, 0, blk_ioctl_entry
, 0);
1492 static void blk_aio_ioctl_entry(void *opaque
)
1494 BlkAioEmAIOCB
*acb
= opaque
;
1495 BlkRwCo
*rwco
= &acb
->rwco
;
1497 rwco
->ret
= blk_co_ioctl(rwco
->blk
, rwco
->offset
, rwco
->iobuf
);
1499 blk_aio_complete(acb
);
1502 BlockAIOCB
*blk_aio_ioctl(BlockBackend
*blk
, unsigned long int req
, void *buf
,
1503 BlockCompletionFunc
*cb
, void *opaque
)
1505 return blk_aio_prwv(blk
, req
, 0, buf
, blk_aio_ioctl_entry
, 0, cb
, opaque
);
1508 int blk_co_pdiscard(BlockBackend
*blk
, int64_t offset
, int bytes
)
1510 int ret
= blk_check_byte_request(blk
, offset
, bytes
);
1515 return bdrv_co_pdiscard(blk_bs(blk
), offset
, bytes
);
1518 int blk_co_flush(BlockBackend
*blk
)
1520 if (!blk_is_available(blk
)) {
1524 return bdrv_co_flush(blk_bs(blk
));
1527 static void blk_flush_entry(void *opaque
)
1529 BlkRwCo
*rwco
= opaque
;
1530 rwco
->ret
= blk_co_flush(rwco
->blk
);
1533 int blk_flush(BlockBackend
*blk
)
1535 return blk_prw(blk
, 0, NULL
, 0, blk_flush_entry
, 0);
1538 void blk_drain(BlockBackend
*blk
)
1540 BlockDriverState
*bs
= blk_bs(blk
);
1543 bdrv_drained_begin(bs
);
1546 /* We may have -ENOMEDIUM completions in flight */
1547 AIO_WAIT_WHILE(&blk
->wait
,
1548 blk_get_aio_context(blk
),
1549 atomic_mb_read(&blk
->in_flight
) > 0);
1552 bdrv_drained_end(bs
);
1556 void blk_drain_all(void)
1558 BlockBackend
*blk
= NULL
;
1560 bdrv_drain_all_begin();
1562 while ((blk
= blk_all_next(blk
)) != NULL
) {
1563 AioContext
*ctx
= blk_get_aio_context(blk
);
1565 aio_context_acquire(ctx
);
1567 /* We may have -ENOMEDIUM completions in flight */
1568 AIO_WAIT_WHILE(&blk
->wait
, ctx
,
1569 atomic_mb_read(&blk
->in_flight
) > 0);
1571 aio_context_release(ctx
);
1574 bdrv_drain_all_end();
1577 void blk_set_on_error(BlockBackend
*blk
, BlockdevOnError on_read_error
,
1578 BlockdevOnError on_write_error
)
1580 blk
->on_read_error
= on_read_error
;
1581 blk
->on_write_error
= on_write_error
;
1584 BlockdevOnError
blk_get_on_error(BlockBackend
*blk
, bool is_read
)
1586 return is_read
? blk
->on_read_error
: blk
->on_write_error
;
1589 BlockErrorAction
blk_get_error_action(BlockBackend
*blk
, bool is_read
,
1592 BlockdevOnError on_err
= blk_get_on_error(blk
, is_read
);
1595 case BLOCKDEV_ON_ERROR_ENOSPC
:
1596 return (error
== ENOSPC
) ?
1597 BLOCK_ERROR_ACTION_STOP
: BLOCK_ERROR_ACTION_REPORT
;
1598 case BLOCKDEV_ON_ERROR_STOP
:
1599 return BLOCK_ERROR_ACTION_STOP
;
1600 case BLOCKDEV_ON_ERROR_REPORT
:
1601 return BLOCK_ERROR_ACTION_REPORT
;
1602 case BLOCKDEV_ON_ERROR_IGNORE
:
1603 return BLOCK_ERROR_ACTION_IGNORE
;
1604 case BLOCKDEV_ON_ERROR_AUTO
:
1610 static void send_qmp_error_event(BlockBackend
*blk
,
1611 BlockErrorAction action
,
1612 bool is_read
, int error
)
1614 IoOperationType optype
;
1615 BlockDriverState
*bs
= blk_bs(blk
);
1617 optype
= is_read
? IO_OPERATION_TYPE_READ
: IO_OPERATION_TYPE_WRITE
;
1618 qapi_event_send_block_io_error(blk_name(blk
), !!bs
,
1619 bs
? bdrv_get_node_name(bs
) : NULL
, optype
,
1620 action
, blk_iostatus_is_enabled(blk
),
1621 error
== ENOSPC
, strerror(error
),
1625 /* This is done by device models because, while the block layer knows
1626 * about the error, it does not know whether an operation comes from
1627 * the device or the block layer (from a job, for example).
1629 void blk_error_action(BlockBackend
*blk
, BlockErrorAction action
,
1630 bool is_read
, int error
)
1634 if (action
== BLOCK_ERROR_ACTION_STOP
) {
1635 /* First set the iostatus, so that "info block" returns an iostatus
1636 * that matches the events raised so far (an additional error iostatus
1637 * is fine, but not a lost one).
1639 blk_iostatus_set_err(blk
, error
);
1641 /* Then raise the request to stop the VM and the event.
1642 * qemu_system_vmstop_request_prepare has two effects. First,
1643 * it ensures that the STOP event always comes after the
1644 * BLOCK_IO_ERROR event. Second, it ensures that even if management
1645 * can observe the STOP event and do a "cont" before the STOP
1646 * event is issued, the VM will not stop. In this case, vm_start()
1647 * also ensures that the STOP/RESUME pair of events is emitted.
1649 qemu_system_vmstop_request_prepare();
1650 send_qmp_error_event(blk
, action
, is_read
, error
);
1651 qemu_system_vmstop_request(RUN_STATE_IO_ERROR
);
1653 send_qmp_error_event(blk
, action
, is_read
, error
);
1657 int blk_is_read_only(BlockBackend
*blk
)
1659 BlockDriverState
*bs
= blk_bs(blk
);
1662 return bdrv_is_read_only(bs
);
1664 return blk
->root_state
.read_only
;
1668 int blk_is_sg(BlockBackend
*blk
)
1670 BlockDriverState
*bs
= blk_bs(blk
);
1676 return bdrv_is_sg(bs
);
1679 int blk_enable_write_cache(BlockBackend
*blk
)
1681 return blk
->enable_write_cache
;
1684 void blk_set_enable_write_cache(BlockBackend
*blk
, bool wce
)
1686 blk
->enable_write_cache
= wce
;
1689 void blk_invalidate_cache(BlockBackend
*blk
, Error
**errp
)
1691 BlockDriverState
*bs
= blk_bs(blk
);
1694 error_setg(errp
, "Device '%s' has no medium", blk
->name
);
1698 bdrv_invalidate_cache(bs
, errp
);
1701 bool blk_is_inserted(BlockBackend
*blk
)
1703 BlockDriverState
*bs
= blk_bs(blk
);
1705 return bs
&& bdrv_is_inserted(bs
);
1708 bool blk_is_available(BlockBackend
*blk
)
1710 return blk_is_inserted(blk
) && !blk_dev_is_tray_open(blk
);
1713 void blk_lock_medium(BlockBackend
*blk
, bool locked
)
1715 BlockDriverState
*bs
= blk_bs(blk
);
1718 bdrv_lock_medium(bs
, locked
);
1722 void blk_eject(BlockBackend
*blk
, bool eject_flag
)
1724 BlockDriverState
*bs
= blk_bs(blk
);
1727 /* blk_eject is only called by qdevified devices */
1728 assert(!blk
->legacy_dev
);
1731 bdrv_eject(bs
, eject_flag
);
1734 /* Whether or not we ejected on the backend,
1735 * the frontend experienced a tray event. */
1736 id
= blk_get_attached_dev_id(blk
);
1737 qapi_event_send_device_tray_moved(blk_name(blk
), id
,
1738 eject_flag
, &error_abort
);
1742 int blk_get_flags(BlockBackend
*blk
)
1744 BlockDriverState
*bs
= blk_bs(blk
);
1747 return bdrv_get_flags(bs
);
1749 return blk
->root_state
.open_flags
;
1753 /* Returns the maximum transfer length, in bytes; guaranteed nonzero */
1754 uint32_t blk_get_max_transfer(BlockBackend
*blk
)
1756 BlockDriverState
*bs
= blk_bs(blk
);
1760 max
= bs
->bl
.max_transfer
;
1762 return MIN_NON_ZERO(max
, INT_MAX
);
1765 int blk_get_max_iov(BlockBackend
*blk
)
1767 return blk
->root
->bs
->bl
.max_iov
;
1770 void blk_set_guest_block_size(BlockBackend
*blk
, int align
)
1772 blk
->guest_block_size
= align
;
1775 void *blk_try_blockalign(BlockBackend
*blk
, size_t size
)
1777 return qemu_try_blockalign(blk
? blk_bs(blk
) : NULL
, size
);
1780 void *blk_blockalign(BlockBackend
*blk
, size_t size
)
1782 return qemu_blockalign(blk
? blk_bs(blk
) : NULL
, size
);
1785 bool blk_op_is_blocked(BlockBackend
*blk
, BlockOpType op
, Error
**errp
)
1787 BlockDriverState
*bs
= blk_bs(blk
);
1793 return bdrv_op_is_blocked(bs
, op
, errp
);
1796 void blk_op_unblock(BlockBackend
*blk
, BlockOpType op
, Error
*reason
)
1798 BlockDriverState
*bs
= blk_bs(blk
);
1801 bdrv_op_unblock(bs
, op
, reason
);
1805 void blk_op_block_all(BlockBackend
*blk
, Error
*reason
)
1807 BlockDriverState
*bs
= blk_bs(blk
);
1810 bdrv_op_block_all(bs
, reason
);
1814 void blk_op_unblock_all(BlockBackend
*blk
, Error
*reason
)
1816 BlockDriverState
*bs
= blk_bs(blk
);
1819 bdrv_op_unblock_all(bs
, reason
);
1823 AioContext
*blk_get_aio_context(BlockBackend
*blk
)
1825 BlockDriverState
*bs
= blk_bs(blk
);
1828 return bdrv_get_aio_context(bs
);
1830 return qemu_get_aio_context();
1834 static AioContext
*blk_aiocb_get_aio_context(BlockAIOCB
*acb
)
1836 BlockBackendAIOCB
*blk_acb
= DO_UPCAST(BlockBackendAIOCB
, common
, acb
);
1837 return blk_get_aio_context(blk_acb
->blk
);
1840 void blk_set_aio_context(BlockBackend
*blk
, AioContext
*new_context
)
1842 BlockDriverState
*bs
= blk_bs(blk
);
1843 ThrottleGroupMember
*tgm
= &blk
->public.throttle_group_member
;
1846 if (tgm
->throttle_state
) {
1847 bdrv_drained_begin(bs
);
1848 throttle_group_detach_aio_context(tgm
);
1849 throttle_group_attach_aio_context(tgm
, new_context
);
1850 bdrv_drained_end(bs
);
1852 bdrv_set_aio_context(bs
, new_context
);
1856 void blk_add_aio_context_notifier(BlockBackend
*blk
,
1857 void (*attached_aio_context
)(AioContext
*new_context
, void *opaque
),
1858 void (*detach_aio_context
)(void *opaque
), void *opaque
)
1860 BlockDriverState
*bs
= blk_bs(blk
);
1863 bdrv_add_aio_context_notifier(bs
, attached_aio_context
,
1864 detach_aio_context
, opaque
);
1868 void blk_remove_aio_context_notifier(BlockBackend
*blk
,
1869 void (*attached_aio_context
)(AioContext
*,
1871 void (*detach_aio_context
)(void *),
1874 BlockDriverState
*bs
= blk_bs(blk
);
1877 bdrv_remove_aio_context_notifier(bs
, attached_aio_context
,
1878 detach_aio_context
, opaque
);
1882 void blk_add_remove_bs_notifier(BlockBackend
*blk
, Notifier
*notify
)
1884 notifier_list_add(&blk
->remove_bs_notifiers
, notify
);
1887 void blk_add_insert_bs_notifier(BlockBackend
*blk
, Notifier
*notify
)
1889 notifier_list_add(&blk
->insert_bs_notifiers
, notify
);
1892 void blk_io_plug(BlockBackend
*blk
)
1894 BlockDriverState
*bs
= blk_bs(blk
);
1901 void blk_io_unplug(BlockBackend
*blk
)
1903 BlockDriverState
*bs
= blk_bs(blk
);
1910 BlockAcctStats
*blk_get_stats(BlockBackend
*blk
)
1915 void *blk_aio_get(const AIOCBInfo
*aiocb_info
, BlockBackend
*blk
,
1916 BlockCompletionFunc
*cb
, void *opaque
)
1918 return qemu_aio_get(aiocb_info
, blk_bs(blk
), cb
, opaque
);
1921 int coroutine_fn
blk_co_pwrite_zeroes(BlockBackend
*blk
, int64_t offset
,
1922 int bytes
, BdrvRequestFlags flags
)
1924 return blk_co_pwritev(blk
, offset
, bytes
, NULL
,
1925 flags
| BDRV_REQ_ZERO_WRITE
);
1928 int blk_pwrite_compressed(BlockBackend
*blk
, int64_t offset
, const void *buf
,
1931 return blk_prw(blk
, offset
, (void *) buf
, count
, blk_write_entry
,
1932 BDRV_REQ_WRITE_COMPRESSED
);
1935 int blk_truncate(BlockBackend
*blk
, int64_t offset
, PreallocMode prealloc
,
1938 if (!blk_is_available(blk
)) {
1939 error_setg(errp
, "No medium inserted");
1943 return bdrv_truncate(blk
->root
, offset
, prealloc
, errp
);
1946 static void blk_pdiscard_entry(void *opaque
)
1948 BlkRwCo
*rwco
= opaque
;
1949 QEMUIOVector
*qiov
= rwco
->iobuf
;
1951 rwco
->ret
= blk_co_pdiscard(rwco
->blk
, rwco
->offset
, qiov
->size
);
1954 int blk_pdiscard(BlockBackend
*blk
, int64_t offset
, int bytes
)
1956 return blk_prw(blk
, offset
, NULL
, bytes
, blk_pdiscard_entry
, 0);
1959 int blk_save_vmstate(BlockBackend
*blk
, const uint8_t *buf
,
1960 int64_t pos
, int size
)
1964 if (!blk_is_available(blk
)) {
1968 ret
= bdrv_save_vmstate(blk_bs(blk
), buf
, pos
, size
);
1973 if (ret
== size
&& !blk
->enable_write_cache
) {
1974 ret
= bdrv_flush(blk_bs(blk
));
1977 return ret
< 0 ? ret
: size
;
1980 int blk_load_vmstate(BlockBackend
*blk
, uint8_t *buf
, int64_t pos
, int size
)
1982 if (!blk_is_available(blk
)) {
1986 return bdrv_load_vmstate(blk_bs(blk
), buf
, pos
, size
);
1989 int blk_probe_blocksizes(BlockBackend
*blk
, BlockSizes
*bsz
)
1991 if (!blk_is_available(blk
)) {
1995 return bdrv_probe_blocksizes(blk_bs(blk
), bsz
);
1998 int blk_probe_geometry(BlockBackend
*blk
, HDGeometry
*geo
)
2000 if (!blk_is_available(blk
)) {
2004 return bdrv_probe_geometry(blk_bs(blk
), geo
);
2008 * Updates the BlockBackendRootState object with data from the currently
2009 * attached BlockDriverState.
2011 void blk_update_root_state(BlockBackend
*blk
)
2015 blk
->root_state
.open_flags
= blk
->root
->bs
->open_flags
;
2016 blk
->root_state
.read_only
= blk
->root
->bs
->read_only
;
2017 blk
->root_state
.detect_zeroes
= blk
->root
->bs
->detect_zeroes
;
2021 * Returns the detect-zeroes setting to be used for bdrv_open() of a
2022 * BlockDriverState which is supposed to inherit the root state.
2024 bool blk_get_detect_zeroes_from_root_state(BlockBackend
*blk
)
2026 return blk
->root_state
.detect_zeroes
;
2030 * Returns the flags to be used for bdrv_open() of a BlockDriverState which is
2031 * supposed to inherit the root state.
2033 int blk_get_open_flags_from_root_state(BlockBackend
*blk
)
2037 bs_flags
= blk
->root_state
.read_only
? 0 : BDRV_O_RDWR
;
2038 bs_flags
|= blk
->root_state
.open_flags
& ~BDRV_O_RDWR
;
2043 BlockBackendRootState
*blk_get_root_state(BlockBackend
*blk
)
2045 return &blk
->root_state
;
2048 int blk_commit_all(void)
2050 BlockBackend
*blk
= NULL
;
2052 while ((blk
= blk_all_next(blk
)) != NULL
) {
2053 AioContext
*aio_context
= blk_get_aio_context(blk
);
2055 aio_context_acquire(aio_context
);
2056 if (blk_is_inserted(blk
) && blk
->root
->bs
->backing
) {
2057 int ret
= bdrv_commit(blk
->root
->bs
);
2059 aio_context_release(aio_context
);
2063 aio_context_release(aio_context
);
2069 /* throttling disk I/O limits */
2070 void blk_set_io_limits(BlockBackend
*blk
, ThrottleConfig
*cfg
)
2072 throttle_group_config(&blk
->public.throttle_group_member
, cfg
);
2075 void blk_io_limits_disable(BlockBackend
*blk
)
2077 BlockDriverState
*bs
= blk_bs(blk
);
2078 ThrottleGroupMember
*tgm
= &blk
->public.throttle_group_member
;
2079 assert(tgm
->throttle_state
);
2081 bdrv_drained_begin(bs
);
2083 throttle_group_unregister_tgm(tgm
);
2085 bdrv_drained_end(bs
);
2089 /* should be called before blk_set_io_limits if a limit is set */
2090 void blk_io_limits_enable(BlockBackend
*blk
, const char *group
)
2092 assert(!blk
->public.throttle_group_member
.throttle_state
);
2093 throttle_group_register_tgm(&blk
->public.throttle_group_member
,
2094 group
, blk_get_aio_context(blk
));
2097 void blk_io_limits_update_group(BlockBackend
*blk
, const char *group
)
2099 /* this BB is not part of any group */
2100 if (!blk
->public.throttle_group_member
.throttle_state
) {
2104 /* this BB is a part of the same group than the one we want */
2105 if (!g_strcmp0(throttle_group_get_name(&blk
->public.throttle_group_member
),
2110 /* need to change the group this bs belong to */
2111 blk_io_limits_disable(blk
);
2112 blk_io_limits_enable(blk
, group
);
2115 static void blk_root_drained_begin(BdrvChild
*child
)
2117 BlockBackend
*blk
= child
->opaque
;
2119 if (++blk
->quiesce_counter
== 1) {
2120 if (blk
->dev_ops
&& blk
->dev_ops
->drained_begin
) {
2121 blk
->dev_ops
->drained_begin(blk
->dev_opaque
);
2125 /* Note that blk->root may not be accessible here yet if we are just
2126 * attaching to a BlockDriverState that is drained. Use child instead. */
2128 if (atomic_fetch_inc(&blk
->public.throttle_group_member
.io_limits_disabled
) == 0) {
2129 throttle_group_restart_tgm(&blk
->public.throttle_group_member
);
2133 static void blk_root_drained_end(BdrvChild
*child
)
2135 BlockBackend
*blk
= child
->opaque
;
2136 assert(blk
->quiesce_counter
);
2138 assert(blk
->public.throttle_group_member
.io_limits_disabled
);
2139 atomic_dec(&blk
->public.throttle_group_member
.io_limits_disabled
);
2141 if (--blk
->quiesce_counter
== 0) {
2142 if (blk
->dev_ops
&& blk
->dev_ops
->drained_end
) {
2143 blk
->dev_ops
->drained_end(blk
->dev_opaque
);
2148 void blk_register_buf(BlockBackend
*blk
, void *host
, size_t size
)
2150 bdrv_register_buf(blk_bs(blk
), host
, size
);
2153 void blk_unregister_buf(BlockBackend
*blk
, void *host
)
2155 bdrv_unregister_buf(blk_bs(blk
), host
);