4 * Copyright (C) 2014-2016 Red Hat, Inc.
7 * Markus Armbruster <armbru@redhat.com>,
9 * This work is licensed under the terms of the GNU LGPL, version 2.1
10 * or later. See the COPYING.LIB file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "sysemu/block-backend.h"
15 #include "block/block_int.h"
16 #include "block/blockjob.h"
17 #include "block/throttle-groups.h"
18 #include "sysemu/blockdev.h"
19 #include "sysemu/sysemu.h"
20 #include "qapi/error.h"
21 #include "qapi/qapi-events-block.h"
23 #include "qemu/option.h"
25 #include "migration/misc.h"
27 /* Number of coroutines to reserve per attached device model */
28 #define COROUTINE_POOL_RESERVATION 64
30 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
32 static AioContext
*blk_aiocb_get_aio_context(BlockAIOCB
*acb
);
34 typedef struct BlockBackendAioNotifier
{
35 void (*attached_aio_context
)(AioContext
*new_context
, void *opaque
);
36 void (*detach_aio_context
)(void *opaque
);
38 QLIST_ENTRY(BlockBackendAioNotifier
) list
;
39 } BlockBackendAioNotifier
;
46 DriveInfo
*legacy_dinfo
; /* null unless created by drive_new() */
47 QTAILQ_ENTRY(BlockBackend
) link
; /* for block_backends */
48 QTAILQ_ENTRY(BlockBackend
) monitor_link
; /* for monitor_block_backends */
49 BlockBackendPublic
public;
51 DeviceState
*dev
; /* attached device model, if any */
52 const BlockDevOps
*dev_ops
;
55 /* the block size for which the guest device expects atomicity */
58 /* If the BDS tree is removed, some of its options are stored here (which
59 * can be used to restore those options in the new BDS on insert) */
60 BlockBackendRootState root_state
;
62 bool enable_write_cache
;
64 /* I/O stats (display with "info blockstats"). */
67 BlockdevOnError on_read_error
, on_write_error
;
68 bool iostatus_enabled
;
69 BlockDeviceIoStatus iostatus
;
75 bool allow_aio_context_change
;
76 bool allow_write_beyond_eof
;
78 NotifierList remove_bs_notifiers
, insert_bs_notifiers
;
79 QLIST_HEAD(, BlockBackendAioNotifier
) aio_notifiers
;
82 VMChangeStateEntry
*vmsh
;
83 bool force_allow_inactivate
;
85 /* Number of in-flight aio requests. BlockDriverState also counts
86 * in-flight requests but aio requests can exist even when blk->root is
87 * NULL, so we cannot rely on its counter for that case.
88 * Accessed with atomic ops.
90 unsigned int in_flight
;
93 typedef struct BlockBackendAIOCB
{
99 static const AIOCBInfo block_backend_aiocb_info
= {
100 .get_aio_context
= blk_aiocb_get_aio_context
,
101 .aiocb_size
= sizeof(BlockBackendAIOCB
),
104 static void drive_info_del(DriveInfo
*dinfo
);
105 static BlockBackend
*bdrv_first_blk(BlockDriverState
*bs
);
107 /* All BlockBackends */
108 static QTAILQ_HEAD(, BlockBackend
) block_backends
=
109 QTAILQ_HEAD_INITIALIZER(block_backends
);
111 /* All BlockBackends referenced by the monitor and which are iterated through by
113 static QTAILQ_HEAD(, BlockBackend
) monitor_block_backends
=
114 QTAILQ_HEAD_INITIALIZER(monitor_block_backends
);
116 static void blk_root_inherit_options(int *child_flags
, QDict
*child_options
,
117 int parent_flags
, QDict
*parent_options
)
119 /* We're not supposed to call this function for root nodes */
122 static void blk_root_drained_begin(BdrvChild
*child
);
123 static bool blk_root_drained_poll(BdrvChild
*child
);
124 static void blk_root_drained_end(BdrvChild
*child
);
126 static void blk_root_change_media(BdrvChild
*child
, bool load
);
127 static void blk_root_resize(BdrvChild
*child
);
129 static bool blk_root_can_set_aio_ctx(BdrvChild
*child
, AioContext
*ctx
,
130 GSList
**ignore
, Error
**errp
);
131 static void blk_root_set_aio_ctx(BdrvChild
*child
, AioContext
*ctx
,
134 static char *blk_root_get_parent_desc(BdrvChild
*child
)
136 BlockBackend
*blk
= child
->opaque
;
140 return g_strdup(blk
->name
);
143 dev_id
= blk_get_attached_dev_id(blk
);
147 /* TODO Callback into the BB owner for something more detailed */
149 return g_strdup("a block device");
153 static const char *blk_root_get_name(BdrvChild
*child
)
155 return blk_name(child
->opaque
);
158 static void blk_vm_state_changed(void *opaque
, int running
, RunState state
)
160 Error
*local_err
= NULL
;
161 BlockBackend
*blk
= opaque
;
163 if (state
== RUN_STATE_INMIGRATE
) {
167 qemu_del_vm_change_state_handler(blk
->vmsh
);
169 blk_set_perm(blk
, blk
->perm
, blk
->shared_perm
, &local_err
);
171 error_report_err(local_err
);
176 * Notifies the user of the BlockBackend that migration has completed. qdev
177 * devices can tighten their permissions in response (specifically revoke
178 * shared write permissions that we needed for storage migration).
180 * If an error is returned, the VM cannot be allowed to be resumed.
182 static void blk_root_activate(BdrvChild
*child
, Error
**errp
)
184 BlockBackend
*blk
= child
->opaque
;
185 Error
*local_err
= NULL
;
187 if (!blk
->disable_perm
) {
191 blk
->disable_perm
= false;
193 blk_set_perm(blk
, blk
->perm
, BLK_PERM_ALL
, &local_err
);
195 error_propagate(errp
, local_err
);
196 blk
->disable_perm
= true;
200 if (runstate_check(RUN_STATE_INMIGRATE
)) {
201 /* Activation can happen when migration process is still active, for
202 * example when nbd_server_add is called during non-shared storage
203 * migration. Defer the shared_perm update to migration completion. */
205 blk
->vmsh
= qemu_add_vm_change_state_handler(blk_vm_state_changed
,
211 blk_set_perm(blk
, blk
->perm
, blk
->shared_perm
, &local_err
);
213 error_propagate(errp
, local_err
);
214 blk
->disable_perm
= true;
219 void blk_set_force_allow_inactivate(BlockBackend
*blk
)
221 blk
->force_allow_inactivate
= true;
224 static bool blk_can_inactivate(BlockBackend
*blk
)
226 /* If it is a guest device, inactivate is ok. */
227 if (blk
->dev
|| blk_name(blk
)[0]) {
231 /* Inactivating means no more writes to the image can be done,
232 * even if those writes would be changes invisible to the
233 * guest. For block job BBs that satisfy this, we can just allow
234 * it. This is the case for mirror job source, which is required
235 * by libvirt non-shared block migration. */
236 if (!(blk
->perm
& (BLK_PERM_WRITE
| BLK_PERM_WRITE_UNCHANGED
))) {
240 return blk
->force_allow_inactivate
;
243 static int blk_root_inactivate(BdrvChild
*child
)
245 BlockBackend
*blk
= child
->opaque
;
247 if (blk
->disable_perm
) {
251 if (!blk_can_inactivate(blk
)) {
255 blk
->disable_perm
= true;
257 bdrv_child_try_set_perm(blk
->root
, 0, BLK_PERM_ALL
, &error_abort
);
263 static void blk_root_attach(BdrvChild
*child
)
265 BlockBackend
*blk
= child
->opaque
;
266 BlockBackendAioNotifier
*notifier
;
268 trace_blk_root_attach(child
, blk
, child
->bs
);
270 QLIST_FOREACH(notifier
, &blk
->aio_notifiers
, list
) {
271 bdrv_add_aio_context_notifier(child
->bs
,
272 notifier
->attached_aio_context
,
273 notifier
->detach_aio_context
,
278 static void blk_root_detach(BdrvChild
*child
)
280 BlockBackend
*blk
= child
->opaque
;
281 BlockBackendAioNotifier
*notifier
;
283 trace_blk_root_detach(child
, blk
, child
->bs
);
285 QLIST_FOREACH(notifier
, &blk
->aio_notifiers
, list
) {
286 bdrv_remove_aio_context_notifier(child
->bs
,
287 notifier
->attached_aio_context
,
288 notifier
->detach_aio_context
,
293 static const BdrvChildRole child_root
= {
294 .inherit_options
= blk_root_inherit_options
,
296 .change_media
= blk_root_change_media
,
297 .resize
= blk_root_resize
,
298 .get_name
= blk_root_get_name
,
299 .get_parent_desc
= blk_root_get_parent_desc
,
301 .drained_begin
= blk_root_drained_begin
,
302 .drained_poll
= blk_root_drained_poll
,
303 .drained_end
= blk_root_drained_end
,
305 .activate
= blk_root_activate
,
306 .inactivate
= blk_root_inactivate
,
308 .attach
= blk_root_attach
,
309 .detach
= blk_root_detach
,
311 .can_set_aio_ctx
= blk_root_can_set_aio_ctx
,
312 .set_aio_ctx
= blk_root_set_aio_ctx
,
316 * Create a new BlockBackend with a reference count of one.
318 * @perm is a bitmasks of BLK_PERM_* constants which describes the permissions
319 * to request for a block driver node that is attached to this BlockBackend.
320 * @shared_perm is a bitmask which describes which permissions may be granted
321 * to other users of the attached node.
322 * Both sets of permissions can be changed later using blk_set_perm().
324 * Return the new BlockBackend on success, null on failure.
326 BlockBackend
*blk_new(AioContext
*ctx
, uint64_t perm
, uint64_t shared_perm
)
330 blk
= g_new0(BlockBackend
, 1);
334 blk
->shared_perm
= shared_perm
;
335 blk_set_enable_write_cache(blk
, true);
337 blk
->on_read_error
= BLOCKDEV_ON_ERROR_REPORT
;
338 blk
->on_write_error
= BLOCKDEV_ON_ERROR_ENOSPC
;
340 block_acct_init(&blk
->stats
);
342 notifier_list_init(&blk
->remove_bs_notifiers
);
343 notifier_list_init(&blk
->insert_bs_notifiers
);
344 QLIST_INIT(&blk
->aio_notifiers
);
346 QTAILQ_INSERT_TAIL(&block_backends
, blk
, link
);
351 * Creates a new BlockBackend, opens a new BlockDriverState, and connects both.
352 * The new BlockBackend is in the main AioContext.
354 * Just as with bdrv_open(), after having called this function the reference to
355 * @options belongs to the block layer (even on failure).
357 * TODO: Remove @filename and @flags; it should be possible to specify a whole
358 * BDS tree just by specifying the @options QDict (or @reference,
359 * alternatively). At the time of adding this function, this is not possible,
360 * though, so callers of this function have to be able to specify @filename and
363 BlockBackend
*blk_new_open(const char *filename
, const char *reference
,
364 QDict
*options
, int flags
, Error
**errp
)
367 BlockDriverState
*bs
;
370 /* blk_new_open() is mainly used in .bdrv_create implementations and the
371 * tools where sharing isn't a concern because the BDS stays private, so we
372 * just request permission according to the flags.
374 * The exceptions are xen_disk and blockdev_init(); in these cases, the
375 * caller of blk_new_open() doesn't make use of the permissions, but they
376 * shouldn't hurt either. We can still share everything here because the
377 * guest devices will add their own blockers if they can't share. */
378 if ((flags
& BDRV_O_NO_IO
) == 0) {
379 perm
|= BLK_PERM_CONSISTENT_READ
;
380 if (flags
& BDRV_O_RDWR
) {
381 perm
|= BLK_PERM_WRITE
;
384 if (flags
& BDRV_O_RESIZE
) {
385 perm
|= BLK_PERM_RESIZE
;
388 blk
= blk_new(qemu_get_aio_context(), perm
, BLK_PERM_ALL
);
389 bs
= bdrv_open(filename
, reference
, options
, flags
, errp
);
395 blk
->root
= bdrv_root_attach_child(bs
, "root", &child_root
, blk
->ctx
,
396 perm
, BLK_PERM_ALL
, blk
, errp
);
405 static void blk_delete(BlockBackend
*blk
)
407 assert(!blk
->refcnt
);
410 if (blk
->public.throttle_group_member
.throttle_state
) {
411 blk_io_limits_disable(blk
);
417 qemu_del_vm_change_state_handler(blk
->vmsh
);
420 assert(QLIST_EMPTY(&blk
->remove_bs_notifiers
.notifiers
));
421 assert(QLIST_EMPTY(&blk
->insert_bs_notifiers
.notifiers
));
422 assert(QLIST_EMPTY(&blk
->aio_notifiers
));
423 QTAILQ_REMOVE(&block_backends
, blk
, link
);
424 drive_info_del(blk
->legacy_dinfo
);
425 block_acct_cleanup(&blk
->stats
);
429 static void drive_info_del(DriveInfo
*dinfo
)
434 qemu_opts_del(dinfo
->opts
);
438 int blk_get_refcnt(BlockBackend
*blk
)
440 return blk
? blk
->refcnt
: 0;
444 * Increment @blk's reference count.
445 * @blk must not be null.
447 void blk_ref(BlockBackend
*blk
)
449 assert(blk
->refcnt
> 0);
454 * Decrement @blk's reference count.
455 * If this drops it to zero, destroy @blk.
456 * For convenience, do nothing if @blk is null.
458 void blk_unref(BlockBackend
*blk
)
461 assert(blk
->refcnt
> 0);
462 if (blk
->refcnt
> 1) {
466 /* blk_drain() cannot resurrect blk, nobody held a reference */
467 assert(blk
->refcnt
== 1);
475 * Behaves similarly to blk_next() but iterates over all BlockBackends, even the
476 * ones which are hidden (i.e. are not referenced by the monitor).
478 BlockBackend
*blk_all_next(BlockBackend
*blk
)
480 return blk
? QTAILQ_NEXT(blk
, link
)
481 : QTAILQ_FIRST(&block_backends
);
484 void blk_remove_all_bs(void)
486 BlockBackend
*blk
= NULL
;
488 while ((blk
= blk_all_next(blk
)) != NULL
) {
489 AioContext
*ctx
= blk_get_aio_context(blk
);
491 aio_context_acquire(ctx
);
495 aio_context_release(ctx
);
500 * Return the monitor-owned BlockBackend after @blk.
501 * If @blk is null, return the first one.
502 * Else, return @blk's next sibling, which may be null.
504 * To iterate over all BlockBackends, do
505 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
509 BlockBackend
*blk_next(BlockBackend
*blk
)
511 return blk
? QTAILQ_NEXT(blk
, monitor_link
)
512 : QTAILQ_FIRST(&monitor_block_backends
);
515 /* Iterates over all top-level BlockDriverStates, i.e. BDSs that are owned by
516 * the monitor or attached to a BlockBackend */
517 BlockDriverState
*bdrv_next(BdrvNextIterator
*it
)
519 BlockDriverState
*bs
, *old_bs
;
521 /* Must be called from the main loop */
522 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
524 /* First, return all root nodes of BlockBackends. In order to avoid
525 * returning a BDS twice when multiple BBs refer to it, we only return it
526 * if the BB is the first one in the parent list of the BDS. */
527 if (it
->phase
== BDRV_NEXT_BACKEND_ROOTS
) {
528 BlockBackend
*old_blk
= it
->blk
;
530 old_bs
= old_blk
? blk_bs(old_blk
) : NULL
;
533 it
->blk
= blk_all_next(it
->blk
);
534 bs
= it
->blk
? blk_bs(it
->blk
) : NULL
;
535 } while (it
->blk
&& (bs
== NULL
|| bdrv_first_blk(bs
) != it
->blk
));
547 it
->phase
= BDRV_NEXT_MONITOR_OWNED
;
552 /* Then return the monitor-owned BDSes without a BB attached. Ignore all
553 * BDSes that are attached to a BlockBackend here; they have been handled
554 * by the above block already */
556 it
->bs
= bdrv_next_monitor_owned(it
->bs
);
558 } while (bs
&& bdrv_has_blk(bs
));
568 static void bdrv_next_reset(BdrvNextIterator
*it
)
570 *it
= (BdrvNextIterator
) {
571 .phase
= BDRV_NEXT_BACKEND_ROOTS
,
575 BlockDriverState
*bdrv_first(BdrvNextIterator
*it
)
578 return bdrv_next(it
);
581 /* Must be called when aborting a bdrv_next() iteration before
582 * bdrv_next() returns NULL */
583 void bdrv_next_cleanup(BdrvNextIterator
*it
)
585 /* Must be called from the main loop */
586 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
588 if (it
->phase
== BDRV_NEXT_BACKEND_ROOTS
) {
590 bdrv_unref(blk_bs(it
->blk
));
601 * Add a BlockBackend into the list of backends referenced by the monitor, with
602 * the given @name acting as the handle for the monitor.
603 * Strictly for use by blockdev.c.
605 * @name must not be null or empty.
607 * Returns true on success and false on failure. In the latter case, an Error
608 * object is returned through @errp.
610 bool monitor_add_blk(BlockBackend
*blk
, const char *name
, Error
**errp
)
613 assert(name
&& name
[0]);
615 if (!id_wellformed(name
)) {
616 error_setg(errp
, "Invalid device name");
619 if (blk_by_name(name
)) {
620 error_setg(errp
, "Device with id '%s' already exists", name
);
623 if (bdrv_find_node(name
)) {
625 "Device name '%s' conflicts with an existing node name",
630 blk
->name
= g_strdup(name
);
631 QTAILQ_INSERT_TAIL(&monitor_block_backends
, blk
, monitor_link
);
636 * Remove a BlockBackend from the list of backends referenced by the monitor.
637 * Strictly for use by blockdev.c.
639 void monitor_remove_blk(BlockBackend
*blk
)
645 QTAILQ_REMOVE(&monitor_block_backends
, blk
, monitor_link
);
651 * Return @blk's name, a non-null string.
652 * Returns an empty string iff @blk is not referenced by the monitor.
654 const char *blk_name(const BlockBackend
*blk
)
656 return blk
->name
?: "";
660 * Return the BlockBackend with name @name if it exists, else null.
661 * @name must not be null.
663 BlockBackend
*blk_by_name(const char *name
)
665 BlockBackend
*blk
= NULL
;
668 while ((blk
= blk_next(blk
)) != NULL
) {
669 if (!strcmp(name
, blk
->name
)) {
677 * Return the BlockDriverState attached to @blk if any, else null.
679 BlockDriverState
*blk_bs(BlockBackend
*blk
)
681 return blk
->root
? blk
->root
->bs
: NULL
;
684 static BlockBackend
*bdrv_first_blk(BlockDriverState
*bs
)
687 QLIST_FOREACH(child
, &bs
->parents
, next_parent
) {
688 if (child
->role
== &child_root
) {
689 return child
->opaque
;
697 * Returns true if @bs has an associated BlockBackend.
699 bool bdrv_has_blk(BlockDriverState
*bs
)
701 return bdrv_first_blk(bs
) != NULL
;
705 * Returns true if @bs has only BlockBackends as parents.
707 bool bdrv_is_root_node(BlockDriverState
*bs
)
711 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
712 if (c
->role
!= &child_root
) {
721 * Return @blk's DriveInfo if any, else null.
723 DriveInfo
*blk_legacy_dinfo(BlockBackend
*blk
)
725 return blk
->legacy_dinfo
;
729 * Set @blk's DriveInfo to @dinfo, and return it.
730 * @blk must not have a DriveInfo set already.
731 * No other BlockBackend may have the same DriveInfo set.
733 DriveInfo
*blk_set_legacy_dinfo(BlockBackend
*blk
, DriveInfo
*dinfo
)
735 assert(!blk
->legacy_dinfo
);
736 return blk
->legacy_dinfo
= dinfo
;
740 * Return the BlockBackend with DriveInfo @dinfo.
743 BlockBackend
*blk_by_legacy_dinfo(DriveInfo
*dinfo
)
745 BlockBackend
*blk
= NULL
;
747 while ((blk
= blk_next(blk
)) != NULL
) {
748 if (blk
->legacy_dinfo
== dinfo
) {
756 * Returns a pointer to the publicly accessible fields of @blk.
758 BlockBackendPublic
*blk_get_public(BlockBackend
*blk
)
764 * Returns a BlockBackend given the associated @public fields.
766 BlockBackend
*blk_by_public(BlockBackendPublic
*public)
768 return container_of(public, BlockBackend
, public);
772 * Disassociates the currently associated BlockDriverState from @blk.
774 void blk_remove_bs(BlockBackend
*blk
)
776 ThrottleGroupMember
*tgm
= &blk
->public.throttle_group_member
;
777 BlockDriverState
*bs
;
779 notifier_list_notify(&blk
->remove_bs_notifiers
, blk
);
780 if (tgm
->throttle_state
) {
782 bdrv_drained_begin(bs
);
783 throttle_group_detach_aio_context(tgm
);
784 throttle_group_attach_aio_context(tgm
, qemu_get_aio_context());
785 bdrv_drained_end(bs
);
788 blk_update_root_state(blk
);
790 /* bdrv_root_unref_child() will cause blk->root to become stale and may
791 * switch to a completion coroutine later on. Let's drain all I/O here
792 * to avoid that and a potential QEMU crash.
795 bdrv_root_unref_child(blk
->root
);
800 * Associates a new BlockDriverState with @blk.
802 int blk_insert_bs(BlockBackend
*blk
, BlockDriverState
*bs
, Error
**errp
)
804 ThrottleGroupMember
*tgm
= &blk
->public.throttle_group_member
;
806 blk
->root
= bdrv_root_attach_child(bs
, "root", &child_root
, blk
->ctx
,
807 blk
->perm
, blk
->shared_perm
, blk
, errp
);
808 if (blk
->root
== NULL
) {
812 notifier_list_notify(&blk
->insert_bs_notifiers
, blk
);
813 if (tgm
->throttle_state
) {
814 throttle_group_detach_aio_context(tgm
);
815 throttle_group_attach_aio_context(tgm
, bdrv_get_aio_context(bs
));
822 * Sets the permission bitmasks that the user of the BlockBackend needs.
824 int blk_set_perm(BlockBackend
*blk
, uint64_t perm
, uint64_t shared_perm
,
829 if (blk
->root
&& !blk
->disable_perm
) {
830 ret
= bdrv_child_try_set_perm(blk
->root
, perm
, shared_perm
, errp
);
837 blk
->shared_perm
= shared_perm
;
842 void blk_get_perm(BlockBackend
*blk
, uint64_t *perm
, uint64_t *shared_perm
)
845 *shared_perm
= blk
->shared_perm
;
849 * Attach device model @dev to @blk.
850 * Return 0 on success, -EBUSY when a device model is attached already.
852 int blk_attach_dev(BlockBackend
*blk
, DeviceState
*dev
)
858 /* While migration is still incoming, we don't need to apply the
859 * permissions of guest device BlockBackends. We might still have a block
860 * job or NBD server writing to the image for storage migration. */
861 if (runstate_check(RUN_STATE_INMIGRATE
)) {
862 blk
->disable_perm
= true;
867 blk_iostatus_reset(blk
);
873 * Detach device model @dev from @blk.
874 * @dev must be currently attached to @blk.
876 void blk_detach_dev(BlockBackend
*blk
, DeviceState
*dev
)
878 assert(blk
->dev
== dev
);
881 blk
->dev_opaque
= NULL
;
882 blk
->guest_block_size
= 512;
883 blk_set_perm(blk
, 0, BLK_PERM_ALL
, &error_abort
);
888 * Return the device model attached to @blk if any, else null.
890 DeviceState
*blk_get_attached_dev(BlockBackend
*blk
)
895 /* Return the qdev ID, or if no ID is assigned the QOM path, of the block
896 * device attached to the BlockBackend. */
897 char *blk_get_attached_dev_id(BlockBackend
*blk
)
899 DeviceState
*dev
= blk
->dev
;
903 } else if (dev
->id
) {
904 return g_strdup(dev
->id
);
907 return object_get_canonical_path(OBJECT(dev
)) ?: g_strdup("");
911 * Return the BlockBackend which has the device model @dev attached if it
914 * @dev must not be null.
916 BlockBackend
*blk_by_dev(void *dev
)
918 BlockBackend
*blk
= NULL
;
921 while ((blk
= blk_all_next(blk
)) != NULL
) {
922 if (blk
->dev
== dev
) {
930 * Set @blk's device model callbacks to @ops.
931 * @opaque is the opaque argument to pass to the callbacks.
932 * This is for use by device models.
934 void blk_set_dev_ops(BlockBackend
*blk
, const BlockDevOps
*ops
,
938 blk
->dev_opaque
= opaque
;
940 /* Are we currently quiesced? Should we enforce this right now? */
941 if (blk
->quiesce_counter
&& ops
->drained_begin
) {
942 ops
->drained_begin(opaque
);
947 * Notify @blk's attached device model of media change.
949 * If @load is true, notify of media load. This action can fail, meaning that
950 * the medium cannot be loaded. @errp is set then.
952 * If @load is false, notify of media eject. This can never fail.
954 * Also send DEVICE_TRAY_MOVED events as appropriate.
956 void blk_dev_change_media_cb(BlockBackend
*blk
, bool load
, Error
**errp
)
958 if (blk
->dev_ops
&& blk
->dev_ops
->change_media_cb
) {
959 bool tray_was_open
, tray_is_open
;
960 Error
*local_err
= NULL
;
962 tray_was_open
= blk_dev_is_tray_open(blk
);
963 blk
->dev_ops
->change_media_cb(blk
->dev_opaque
, load
, &local_err
);
965 assert(load
== true);
966 error_propagate(errp
, local_err
);
969 tray_is_open
= blk_dev_is_tray_open(blk
);
971 if (tray_was_open
!= tray_is_open
) {
972 char *id
= blk_get_attached_dev_id(blk
);
973 qapi_event_send_device_tray_moved(blk_name(blk
), id
, tray_is_open
);
979 static void blk_root_change_media(BdrvChild
*child
, bool load
)
981 blk_dev_change_media_cb(child
->opaque
, load
, NULL
);
985 * Does @blk's attached device model have removable media?
986 * %true if no device model is attached.
988 bool blk_dev_has_removable_media(BlockBackend
*blk
)
990 return !blk
->dev
|| (blk
->dev_ops
&& blk
->dev_ops
->change_media_cb
);
994 * Does @blk's attached device model have a tray?
996 bool blk_dev_has_tray(BlockBackend
*blk
)
998 return blk
->dev_ops
&& blk
->dev_ops
->is_tray_open
;
1002 * Notify @blk's attached device model of a media eject request.
1003 * If @force is true, the medium is about to be yanked out forcefully.
1005 void blk_dev_eject_request(BlockBackend
*blk
, bool force
)
1007 if (blk
->dev_ops
&& blk
->dev_ops
->eject_request_cb
) {
1008 blk
->dev_ops
->eject_request_cb(blk
->dev_opaque
, force
);
1013 * Does @blk's attached device model have a tray, and is it open?
1015 bool blk_dev_is_tray_open(BlockBackend
*blk
)
1017 if (blk_dev_has_tray(blk
)) {
1018 return blk
->dev_ops
->is_tray_open(blk
->dev_opaque
);
1024 * Does @blk's attached device model have the medium locked?
1025 * %false if the device model has no such lock.
1027 bool blk_dev_is_medium_locked(BlockBackend
*blk
)
1029 if (blk
->dev_ops
&& blk
->dev_ops
->is_medium_locked
) {
1030 return blk
->dev_ops
->is_medium_locked(blk
->dev_opaque
);
1036 * Notify @blk's attached device model of a backend size change.
1038 static void blk_root_resize(BdrvChild
*child
)
1040 BlockBackend
*blk
= child
->opaque
;
1042 if (blk
->dev_ops
&& blk
->dev_ops
->resize_cb
) {
1043 blk
->dev_ops
->resize_cb(blk
->dev_opaque
);
1047 void blk_iostatus_enable(BlockBackend
*blk
)
1049 blk
->iostatus_enabled
= true;
1050 blk
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
1053 /* The I/O status is only enabled if the drive explicitly
1054 * enables it _and_ the VM is configured to stop on errors */
1055 bool blk_iostatus_is_enabled(const BlockBackend
*blk
)
1057 return (blk
->iostatus_enabled
&&
1058 (blk
->on_write_error
== BLOCKDEV_ON_ERROR_ENOSPC
||
1059 blk
->on_write_error
== BLOCKDEV_ON_ERROR_STOP
||
1060 blk
->on_read_error
== BLOCKDEV_ON_ERROR_STOP
));
1063 BlockDeviceIoStatus
blk_iostatus(const BlockBackend
*blk
)
1065 return blk
->iostatus
;
1068 void blk_iostatus_disable(BlockBackend
*blk
)
1070 blk
->iostatus_enabled
= false;
1073 void blk_iostatus_reset(BlockBackend
*blk
)
1075 if (blk_iostatus_is_enabled(blk
)) {
1076 blk
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
1080 void blk_iostatus_set_err(BlockBackend
*blk
, int error
)
1082 assert(blk_iostatus_is_enabled(blk
));
1083 if (blk
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
1084 blk
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
1085 BLOCK_DEVICE_IO_STATUS_FAILED
;
1089 void blk_set_allow_write_beyond_eof(BlockBackend
*blk
, bool allow
)
1091 blk
->allow_write_beyond_eof
= allow
;
1094 void blk_set_allow_aio_context_change(BlockBackend
*blk
, bool allow
)
1096 blk
->allow_aio_context_change
= allow
;
1099 static int blk_check_byte_request(BlockBackend
*blk
, int64_t offset
,
1104 if (size
> INT_MAX
) {
1108 if (!blk_is_available(blk
)) {
1116 if (!blk
->allow_write_beyond_eof
) {
1117 len
= blk_getlength(blk
);
1122 if (offset
> len
|| len
- offset
< size
) {
1130 int coroutine_fn
blk_co_preadv(BlockBackend
*blk
, int64_t offset
,
1131 unsigned int bytes
, QEMUIOVector
*qiov
,
1132 BdrvRequestFlags flags
)
1135 BlockDriverState
*bs
= blk_bs(blk
);
1137 trace_blk_co_preadv(blk
, bs
, offset
, bytes
, flags
);
1139 ret
= blk_check_byte_request(blk
, offset
, bytes
);
1144 bdrv_inc_in_flight(bs
);
1146 /* throttling disk I/O */
1147 if (blk
->public.throttle_group_member
.throttle_state
) {
1148 throttle_group_co_io_limits_intercept(&blk
->public.throttle_group_member
,
1152 ret
= bdrv_co_preadv(blk
->root
, offset
, bytes
, qiov
, flags
);
1153 bdrv_dec_in_flight(bs
);
1157 int coroutine_fn
blk_co_pwritev(BlockBackend
*blk
, int64_t offset
,
1158 unsigned int bytes
, QEMUIOVector
*qiov
,
1159 BdrvRequestFlags flags
)
1162 BlockDriverState
*bs
= blk_bs(blk
);
1164 trace_blk_co_pwritev(blk
, bs
, offset
, bytes
, flags
);
1166 ret
= blk_check_byte_request(blk
, offset
, bytes
);
1171 bdrv_inc_in_flight(bs
);
1172 /* throttling disk I/O */
1173 if (blk
->public.throttle_group_member
.throttle_state
) {
1174 throttle_group_co_io_limits_intercept(&blk
->public.throttle_group_member
,
1178 if (!blk
->enable_write_cache
) {
1179 flags
|= BDRV_REQ_FUA
;
1182 ret
= bdrv_co_pwritev(blk
->root
, offset
, bytes
, qiov
, flags
);
1183 bdrv_dec_in_flight(bs
);
1187 typedef struct BlkRwCo
{
1192 BdrvRequestFlags flags
;
1195 static void blk_read_entry(void *opaque
)
1197 BlkRwCo
*rwco
= opaque
;
1198 QEMUIOVector
*qiov
= rwco
->iobuf
;
1200 rwco
->ret
= blk_co_preadv(rwco
->blk
, rwco
->offset
, qiov
->size
,
1205 static void blk_write_entry(void *opaque
)
1207 BlkRwCo
*rwco
= opaque
;
1208 QEMUIOVector
*qiov
= rwco
->iobuf
;
1210 rwco
->ret
= blk_co_pwritev(rwco
->blk
, rwco
->offset
, qiov
->size
,
1215 static int blk_prw(BlockBackend
*blk
, int64_t offset
, uint8_t *buf
,
1216 int64_t bytes
, CoroutineEntry co_entry
,
1217 BdrvRequestFlags flags
)
1219 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, bytes
);
1228 if (qemu_in_coroutine()) {
1229 /* Fast-path if already in coroutine context */
1232 Coroutine
*co
= qemu_coroutine_create(co_entry
, &rwco
);
1233 bdrv_coroutine_enter(blk_bs(blk
), co
);
1234 BDRV_POLL_WHILE(blk_bs(blk
), rwco
.ret
== NOT_DONE
);
1240 int blk_pread_unthrottled(BlockBackend
*blk
, int64_t offset
, uint8_t *buf
,
1245 ret
= blk_check_byte_request(blk
, offset
, count
);
1250 blk_root_drained_begin(blk
->root
);
1251 ret
= blk_pread(blk
, offset
, buf
, count
);
1252 blk_root_drained_end(blk
->root
);
1256 int blk_pwrite_zeroes(BlockBackend
*blk
, int64_t offset
,
1257 int bytes
, BdrvRequestFlags flags
)
1259 return blk_prw(blk
, offset
, NULL
, bytes
, blk_write_entry
,
1260 flags
| BDRV_REQ_ZERO_WRITE
);
1263 int blk_make_zero(BlockBackend
*blk
, BdrvRequestFlags flags
)
1265 return bdrv_make_zero(blk
->root
, flags
);
1268 void blk_inc_in_flight(BlockBackend
*blk
)
1270 atomic_inc(&blk
->in_flight
);
1273 void blk_dec_in_flight(BlockBackend
*blk
)
1275 atomic_dec(&blk
->in_flight
);
1279 static void error_callback_bh(void *opaque
)
1281 struct BlockBackendAIOCB
*acb
= opaque
;
1283 blk_dec_in_flight(acb
->blk
);
1284 acb
->common
.cb(acb
->common
.opaque
, acb
->ret
);
1285 qemu_aio_unref(acb
);
1288 BlockAIOCB
*blk_abort_aio_request(BlockBackend
*blk
,
1289 BlockCompletionFunc
*cb
,
1290 void *opaque
, int ret
)
1292 struct BlockBackendAIOCB
*acb
;
1294 blk_inc_in_flight(blk
);
1295 acb
= blk_aio_get(&block_backend_aiocb_info
, blk
, cb
, opaque
);
1299 aio_bh_schedule_oneshot(blk_get_aio_context(blk
), error_callback_bh
, acb
);
1300 return &acb
->common
;
1303 typedef struct BlkAioEmAIOCB
{
1310 static const AIOCBInfo blk_aio_em_aiocb_info
= {
1311 .aiocb_size
= sizeof(BlkAioEmAIOCB
),
1314 static void blk_aio_complete(BlkAioEmAIOCB
*acb
)
1316 if (acb
->has_returned
) {
1317 acb
->common
.cb(acb
->common
.opaque
, acb
->rwco
.ret
);
1318 blk_dec_in_flight(acb
->rwco
.blk
);
1319 qemu_aio_unref(acb
);
1323 static void blk_aio_complete_bh(void *opaque
)
1325 BlkAioEmAIOCB
*acb
= opaque
;
1326 assert(acb
->has_returned
);
1327 blk_aio_complete(acb
);
1330 static BlockAIOCB
*blk_aio_prwv(BlockBackend
*blk
, int64_t offset
, int bytes
,
1331 void *iobuf
, CoroutineEntry co_entry
,
1332 BdrvRequestFlags flags
,
1333 BlockCompletionFunc
*cb
, void *opaque
)
1338 blk_inc_in_flight(blk
);
1339 acb
= blk_aio_get(&blk_aio_em_aiocb_info
, blk
, cb
, opaque
);
1340 acb
->rwco
= (BlkRwCo
) {
1348 acb
->has_returned
= false;
1350 co
= qemu_coroutine_create(co_entry
, acb
);
1351 bdrv_coroutine_enter(blk_bs(blk
), co
);
1353 acb
->has_returned
= true;
1354 if (acb
->rwco
.ret
!= NOT_DONE
) {
1355 aio_bh_schedule_oneshot(blk_get_aio_context(blk
),
1356 blk_aio_complete_bh
, acb
);
1359 return &acb
->common
;
1362 static void blk_aio_read_entry(void *opaque
)
1364 BlkAioEmAIOCB
*acb
= opaque
;
1365 BlkRwCo
*rwco
= &acb
->rwco
;
1366 QEMUIOVector
*qiov
= rwco
->iobuf
;
1368 assert(qiov
->size
== acb
->bytes
);
1369 rwco
->ret
= blk_co_preadv(rwco
->blk
, rwco
->offset
, acb
->bytes
,
1371 blk_aio_complete(acb
);
1374 static void blk_aio_write_entry(void *opaque
)
1376 BlkAioEmAIOCB
*acb
= opaque
;
1377 BlkRwCo
*rwco
= &acb
->rwco
;
1378 QEMUIOVector
*qiov
= rwco
->iobuf
;
1380 assert(!qiov
|| qiov
->size
== acb
->bytes
);
1381 rwco
->ret
= blk_co_pwritev(rwco
->blk
, rwco
->offset
, acb
->bytes
,
1383 blk_aio_complete(acb
);
1386 BlockAIOCB
*blk_aio_pwrite_zeroes(BlockBackend
*blk
, int64_t offset
,
1387 int count
, BdrvRequestFlags flags
,
1388 BlockCompletionFunc
*cb
, void *opaque
)
1390 return blk_aio_prwv(blk
, offset
, count
, NULL
, blk_aio_write_entry
,
1391 flags
| BDRV_REQ_ZERO_WRITE
, cb
, opaque
);
1394 int blk_pread(BlockBackend
*blk
, int64_t offset
, void *buf
, int count
)
1396 int ret
= blk_prw(blk
, offset
, buf
, count
, blk_read_entry
, 0);
1403 int blk_pwrite(BlockBackend
*blk
, int64_t offset
, const void *buf
, int count
,
1404 BdrvRequestFlags flags
)
1406 int ret
= blk_prw(blk
, offset
, (void *) buf
, count
, blk_write_entry
,
1414 int64_t blk_getlength(BlockBackend
*blk
)
1416 if (!blk_is_available(blk
)) {
1420 return bdrv_getlength(blk_bs(blk
));
1423 void blk_get_geometry(BlockBackend
*blk
, uint64_t *nb_sectors_ptr
)
1426 *nb_sectors_ptr
= 0;
1428 bdrv_get_geometry(blk_bs(blk
), nb_sectors_ptr
);
1432 int64_t blk_nb_sectors(BlockBackend
*blk
)
1434 if (!blk_is_available(blk
)) {
1438 return bdrv_nb_sectors(blk_bs(blk
));
1441 BlockAIOCB
*blk_aio_preadv(BlockBackend
*blk
, int64_t offset
,
1442 QEMUIOVector
*qiov
, BdrvRequestFlags flags
,
1443 BlockCompletionFunc
*cb
, void *opaque
)
1445 return blk_aio_prwv(blk
, offset
, qiov
->size
, qiov
,
1446 blk_aio_read_entry
, flags
, cb
, opaque
);
1449 BlockAIOCB
*blk_aio_pwritev(BlockBackend
*blk
, int64_t offset
,
1450 QEMUIOVector
*qiov
, BdrvRequestFlags flags
,
1451 BlockCompletionFunc
*cb
, void *opaque
)
1453 return blk_aio_prwv(blk
, offset
, qiov
->size
, qiov
,
1454 blk_aio_write_entry
, flags
, cb
, opaque
);
1457 static void blk_aio_flush_entry(void *opaque
)
1459 BlkAioEmAIOCB
*acb
= opaque
;
1460 BlkRwCo
*rwco
= &acb
->rwco
;
1462 rwco
->ret
= blk_co_flush(rwco
->blk
);
1463 blk_aio_complete(acb
);
1466 BlockAIOCB
*blk_aio_flush(BlockBackend
*blk
,
1467 BlockCompletionFunc
*cb
, void *opaque
)
1469 return blk_aio_prwv(blk
, 0, 0, NULL
, blk_aio_flush_entry
, 0, cb
, opaque
);
1472 static void blk_aio_pdiscard_entry(void *opaque
)
1474 BlkAioEmAIOCB
*acb
= opaque
;
1475 BlkRwCo
*rwco
= &acb
->rwco
;
1477 rwco
->ret
= blk_co_pdiscard(rwco
->blk
, rwco
->offset
, acb
->bytes
);
1478 blk_aio_complete(acb
);
1481 BlockAIOCB
*blk_aio_pdiscard(BlockBackend
*blk
,
1482 int64_t offset
, int bytes
,
1483 BlockCompletionFunc
*cb
, void *opaque
)
1485 return blk_aio_prwv(blk
, offset
, bytes
, NULL
, blk_aio_pdiscard_entry
, 0,
1489 void blk_aio_cancel(BlockAIOCB
*acb
)
1491 bdrv_aio_cancel(acb
);
1494 void blk_aio_cancel_async(BlockAIOCB
*acb
)
1496 bdrv_aio_cancel_async(acb
);
1499 int blk_co_ioctl(BlockBackend
*blk
, unsigned long int req
, void *buf
)
1501 if (!blk_is_available(blk
)) {
1505 return bdrv_co_ioctl(blk_bs(blk
), req
, buf
);
1508 static void blk_ioctl_entry(void *opaque
)
1510 BlkRwCo
*rwco
= opaque
;
1511 QEMUIOVector
*qiov
= rwco
->iobuf
;
1513 rwco
->ret
= blk_co_ioctl(rwco
->blk
, rwco
->offset
,
1514 qiov
->iov
[0].iov_base
);
1518 int blk_ioctl(BlockBackend
*blk
, unsigned long int req
, void *buf
)
1520 return blk_prw(blk
, req
, buf
, 0, blk_ioctl_entry
, 0);
1523 static void blk_aio_ioctl_entry(void *opaque
)
1525 BlkAioEmAIOCB
*acb
= opaque
;
1526 BlkRwCo
*rwco
= &acb
->rwco
;
1528 rwco
->ret
= blk_co_ioctl(rwco
->blk
, rwco
->offset
, rwco
->iobuf
);
1530 blk_aio_complete(acb
);
1533 BlockAIOCB
*blk_aio_ioctl(BlockBackend
*blk
, unsigned long int req
, void *buf
,
1534 BlockCompletionFunc
*cb
, void *opaque
)
1536 return blk_aio_prwv(blk
, req
, 0, buf
, blk_aio_ioctl_entry
, 0, cb
, opaque
);
1539 int blk_co_pdiscard(BlockBackend
*blk
, int64_t offset
, int bytes
)
1541 int ret
= blk_check_byte_request(blk
, offset
, bytes
);
1546 return bdrv_co_pdiscard(blk
->root
, offset
, bytes
);
1549 int blk_co_flush(BlockBackend
*blk
)
1551 if (!blk_is_available(blk
)) {
1555 return bdrv_co_flush(blk_bs(blk
));
1558 static void blk_flush_entry(void *opaque
)
1560 BlkRwCo
*rwco
= opaque
;
1561 rwco
->ret
= blk_co_flush(rwco
->blk
);
1565 int blk_flush(BlockBackend
*blk
)
1567 return blk_prw(blk
, 0, NULL
, 0, blk_flush_entry
, 0);
1570 void blk_drain(BlockBackend
*blk
)
1572 BlockDriverState
*bs
= blk_bs(blk
);
1575 bdrv_drained_begin(bs
);
1578 /* We may have -ENOMEDIUM completions in flight */
1579 AIO_WAIT_WHILE(blk_get_aio_context(blk
),
1580 atomic_mb_read(&blk
->in_flight
) > 0);
1583 bdrv_drained_end(bs
);
1587 void blk_drain_all(void)
1589 BlockBackend
*blk
= NULL
;
1591 bdrv_drain_all_begin();
1593 while ((blk
= blk_all_next(blk
)) != NULL
) {
1594 AioContext
*ctx
= blk_get_aio_context(blk
);
1596 aio_context_acquire(ctx
);
1598 /* We may have -ENOMEDIUM completions in flight */
1599 AIO_WAIT_WHILE(ctx
, atomic_mb_read(&blk
->in_flight
) > 0);
1601 aio_context_release(ctx
);
1604 bdrv_drain_all_end();
1607 void blk_set_on_error(BlockBackend
*blk
, BlockdevOnError on_read_error
,
1608 BlockdevOnError on_write_error
)
1610 blk
->on_read_error
= on_read_error
;
1611 blk
->on_write_error
= on_write_error
;
1614 BlockdevOnError
blk_get_on_error(BlockBackend
*blk
, bool is_read
)
1616 return is_read
? blk
->on_read_error
: blk
->on_write_error
;
1619 BlockErrorAction
blk_get_error_action(BlockBackend
*blk
, bool is_read
,
1622 BlockdevOnError on_err
= blk_get_on_error(blk
, is_read
);
1625 case BLOCKDEV_ON_ERROR_ENOSPC
:
1626 return (error
== ENOSPC
) ?
1627 BLOCK_ERROR_ACTION_STOP
: BLOCK_ERROR_ACTION_REPORT
;
1628 case BLOCKDEV_ON_ERROR_STOP
:
1629 return BLOCK_ERROR_ACTION_STOP
;
1630 case BLOCKDEV_ON_ERROR_REPORT
:
1631 return BLOCK_ERROR_ACTION_REPORT
;
1632 case BLOCKDEV_ON_ERROR_IGNORE
:
1633 return BLOCK_ERROR_ACTION_IGNORE
;
1634 case BLOCKDEV_ON_ERROR_AUTO
:
1640 static void send_qmp_error_event(BlockBackend
*blk
,
1641 BlockErrorAction action
,
1642 bool is_read
, int error
)
1644 IoOperationType optype
;
1645 BlockDriverState
*bs
= blk_bs(blk
);
1647 optype
= is_read
? IO_OPERATION_TYPE_READ
: IO_OPERATION_TYPE_WRITE
;
1648 qapi_event_send_block_io_error(blk_name(blk
), !!bs
,
1649 bs
? bdrv_get_node_name(bs
) : NULL
, optype
,
1650 action
, blk_iostatus_is_enabled(blk
),
1651 error
== ENOSPC
, strerror(error
));
1654 /* This is done by device models because, while the block layer knows
1655 * about the error, it does not know whether an operation comes from
1656 * the device or the block layer (from a job, for example).
1658 void blk_error_action(BlockBackend
*blk
, BlockErrorAction action
,
1659 bool is_read
, int error
)
1663 if (action
== BLOCK_ERROR_ACTION_STOP
) {
1664 /* First set the iostatus, so that "info block" returns an iostatus
1665 * that matches the events raised so far (an additional error iostatus
1666 * is fine, but not a lost one).
1668 blk_iostatus_set_err(blk
, error
);
1670 /* Then raise the request to stop the VM and the event.
1671 * qemu_system_vmstop_request_prepare has two effects. First,
1672 * it ensures that the STOP event always comes after the
1673 * BLOCK_IO_ERROR event. Second, it ensures that even if management
1674 * can observe the STOP event and do a "cont" before the STOP
1675 * event is issued, the VM will not stop. In this case, vm_start()
1676 * also ensures that the STOP/RESUME pair of events is emitted.
1678 qemu_system_vmstop_request_prepare();
1679 send_qmp_error_event(blk
, action
, is_read
, error
);
1680 qemu_system_vmstop_request(RUN_STATE_IO_ERROR
);
1682 send_qmp_error_event(blk
, action
, is_read
, error
);
1686 bool blk_is_read_only(BlockBackend
*blk
)
1688 BlockDriverState
*bs
= blk_bs(blk
);
1691 return bdrv_is_read_only(bs
);
1693 return blk
->root_state
.read_only
;
1697 bool blk_is_sg(BlockBackend
*blk
)
1699 BlockDriverState
*bs
= blk_bs(blk
);
1705 return bdrv_is_sg(bs
);
1708 bool blk_enable_write_cache(BlockBackend
*blk
)
1710 return blk
->enable_write_cache
;
1713 void blk_set_enable_write_cache(BlockBackend
*blk
, bool wce
)
1715 blk
->enable_write_cache
= wce
;
1718 void blk_invalidate_cache(BlockBackend
*blk
, Error
**errp
)
1720 BlockDriverState
*bs
= blk_bs(blk
);
1723 error_setg(errp
, "Device '%s' has no medium", blk
->name
);
1727 bdrv_invalidate_cache(bs
, errp
);
1730 bool blk_is_inserted(BlockBackend
*blk
)
1732 BlockDriverState
*bs
= blk_bs(blk
);
1734 return bs
&& bdrv_is_inserted(bs
);
1737 bool blk_is_available(BlockBackend
*blk
)
1739 return blk_is_inserted(blk
) && !blk_dev_is_tray_open(blk
);
1742 void blk_lock_medium(BlockBackend
*blk
, bool locked
)
1744 BlockDriverState
*bs
= blk_bs(blk
);
1747 bdrv_lock_medium(bs
, locked
);
1751 void blk_eject(BlockBackend
*blk
, bool eject_flag
)
1753 BlockDriverState
*bs
= blk_bs(blk
);
1757 bdrv_eject(bs
, eject_flag
);
1760 /* Whether or not we ejected on the backend,
1761 * the frontend experienced a tray event. */
1762 id
= blk_get_attached_dev_id(blk
);
1763 qapi_event_send_device_tray_moved(blk_name(blk
), id
,
1768 int blk_get_flags(BlockBackend
*blk
)
1770 BlockDriverState
*bs
= blk_bs(blk
);
1773 return bdrv_get_flags(bs
);
1775 return blk
->root_state
.open_flags
;
1779 /* Returns the minimum request alignment, in bytes; guaranteed nonzero */
1780 uint32_t blk_get_request_alignment(BlockBackend
*blk
)
1782 BlockDriverState
*bs
= blk_bs(blk
);
1783 return bs
? bs
->bl
.request_alignment
: BDRV_SECTOR_SIZE
;
1786 /* Returns the maximum transfer length, in bytes; guaranteed nonzero */
1787 uint32_t blk_get_max_transfer(BlockBackend
*blk
)
1789 BlockDriverState
*bs
= blk_bs(blk
);
1793 max
= bs
->bl
.max_transfer
;
1795 return MIN_NON_ZERO(max
, INT_MAX
);
1798 int blk_get_max_iov(BlockBackend
*blk
)
1800 return blk
->root
->bs
->bl
.max_iov
;
1803 void blk_set_guest_block_size(BlockBackend
*blk
, int align
)
1805 blk
->guest_block_size
= align
;
1808 void *blk_try_blockalign(BlockBackend
*blk
, size_t size
)
1810 return qemu_try_blockalign(blk
? blk_bs(blk
) : NULL
, size
);
1813 void *blk_blockalign(BlockBackend
*blk
, size_t size
)
1815 return qemu_blockalign(blk
? blk_bs(blk
) : NULL
, size
);
1818 bool blk_op_is_blocked(BlockBackend
*blk
, BlockOpType op
, Error
**errp
)
1820 BlockDriverState
*bs
= blk_bs(blk
);
1826 return bdrv_op_is_blocked(bs
, op
, errp
);
1829 void blk_op_unblock(BlockBackend
*blk
, BlockOpType op
, Error
*reason
)
1831 BlockDriverState
*bs
= blk_bs(blk
);
1834 bdrv_op_unblock(bs
, op
, reason
);
1838 void blk_op_block_all(BlockBackend
*blk
, Error
*reason
)
1840 BlockDriverState
*bs
= blk_bs(blk
);
1843 bdrv_op_block_all(bs
, reason
);
1847 void blk_op_unblock_all(BlockBackend
*blk
, Error
*reason
)
1849 BlockDriverState
*bs
= blk_bs(blk
);
1852 bdrv_op_unblock_all(bs
, reason
);
1856 AioContext
*blk_get_aio_context(BlockBackend
*blk
)
1858 BlockDriverState
*bs
= blk_bs(blk
);
1861 AioContext
*ctx
= bdrv_get_aio_context(blk_bs(blk
));
1862 assert(ctx
== blk
->ctx
);
1868 static AioContext
*blk_aiocb_get_aio_context(BlockAIOCB
*acb
)
1870 BlockBackendAIOCB
*blk_acb
= DO_UPCAST(BlockBackendAIOCB
, common
, acb
);
1871 return blk_get_aio_context(blk_acb
->blk
);
1874 static int blk_do_set_aio_context(BlockBackend
*blk
, AioContext
*new_context
,
1875 bool update_root_node
, Error
**errp
)
1877 BlockDriverState
*bs
= blk_bs(blk
);
1878 ThrottleGroupMember
*tgm
= &blk
->public.throttle_group_member
;
1882 if (update_root_node
) {
1883 ret
= bdrv_child_try_set_aio_context(bs
, new_context
, blk
->root
,
1889 if (tgm
->throttle_state
) {
1890 bdrv_drained_begin(bs
);
1891 throttle_group_detach_aio_context(tgm
);
1892 throttle_group_attach_aio_context(tgm
, new_context
);
1893 bdrv_drained_end(bs
);
1897 blk
->ctx
= new_context
;
1901 int blk_set_aio_context(BlockBackend
*blk
, AioContext
*new_context
,
1904 return blk_do_set_aio_context(blk
, new_context
, true, errp
);
1907 static bool blk_root_can_set_aio_ctx(BdrvChild
*child
, AioContext
*ctx
,
1908 GSList
**ignore
, Error
**errp
)
1910 BlockBackend
*blk
= child
->opaque
;
1912 if (blk
->allow_aio_context_change
) {
1916 /* Only manually created BlockBackends that are not attached to anything
1917 * can change their AioContext without updating their user. */
1918 if (!blk
->name
|| blk
->dev
) {
1919 /* TODO Add BB name/QOM path */
1920 error_setg(errp
, "Cannot change iothread of active block backend");
1927 static void blk_root_set_aio_ctx(BdrvChild
*child
, AioContext
*ctx
,
1930 BlockBackend
*blk
= child
->opaque
;
1931 blk_do_set_aio_context(blk
, ctx
, false, &error_abort
);
1934 void blk_add_aio_context_notifier(BlockBackend
*blk
,
1935 void (*attached_aio_context
)(AioContext
*new_context
, void *opaque
),
1936 void (*detach_aio_context
)(void *opaque
), void *opaque
)
1938 BlockBackendAioNotifier
*notifier
;
1939 BlockDriverState
*bs
= blk_bs(blk
);
1941 notifier
= g_new(BlockBackendAioNotifier
, 1);
1942 notifier
->attached_aio_context
= attached_aio_context
;
1943 notifier
->detach_aio_context
= detach_aio_context
;
1944 notifier
->opaque
= opaque
;
1945 QLIST_INSERT_HEAD(&blk
->aio_notifiers
, notifier
, list
);
1948 bdrv_add_aio_context_notifier(bs
, attached_aio_context
,
1949 detach_aio_context
, opaque
);
1953 void blk_remove_aio_context_notifier(BlockBackend
*blk
,
1954 void (*attached_aio_context
)(AioContext
*,
1956 void (*detach_aio_context
)(void *),
1959 BlockBackendAioNotifier
*notifier
;
1960 BlockDriverState
*bs
= blk_bs(blk
);
1963 bdrv_remove_aio_context_notifier(bs
, attached_aio_context
,
1964 detach_aio_context
, opaque
);
1967 QLIST_FOREACH(notifier
, &blk
->aio_notifiers
, list
) {
1968 if (notifier
->attached_aio_context
== attached_aio_context
&&
1969 notifier
->detach_aio_context
== detach_aio_context
&&
1970 notifier
->opaque
== opaque
) {
1971 QLIST_REMOVE(notifier
, list
);
1980 void blk_add_remove_bs_notifier(BlockBackend
*blk
, Notifier
*notify
)
1982 notifier_list_add(&blk
->remove_bs_notifiers
, notify
);
1985 void blk_add_insert_bs_notifier(BlockBackend
*blk
, Notifier
*notify
)
1987 notifier_list_add(&blk
->insert_bs_notifiers
, notify
);
1990 void blk_io_plug(BlockBackend
*blk
)
1992 BlockDriverState
*bs
= blk_bs(blk
);
1999 void blk_io_unplug(BlockBackend
*blk
)
2001 BlockDriverState
*bs
= blk_bs(blk
);
2008 BlockAcctStats
*blk_get_stats(BlockBackend
*blk
)
2013 void *blk_aio_get(const AIOCBInfo
*aiocb_info
, BlockBackend
*blk
,
2014 BlockCompletionFunc
*cb
, void *opaque
)
2016 return qemu_aio_get(aiocb_info
, blk_bs(blk
), cb
, opaque
);
2019 int coroutine_fn
blk_co_pwrite_zeroes(BlockBackend
*blk
, int64_t offset
,
2020 int bytes
, BdrvRequestFlags flags
)
2022 return blk_co_pwritev(blk
, offset
, bytes
, NULL
,
2023 flags
| BDRV_REQ_ZERO_WRITE
);
2026 int blk_pwrite_compressed(BlockBackend
*blk
, int64_t offset
, const void *buf
,
2029 return blk_prw(blk
, offset
, (void *) buf
, count
, blk_write_entry
,
2030 BDRV_REQ_WRITE_COMPRESSED
);
2033 int blk_truncate(BlockBackend
*blk
, int64_t offset
, PreallocMode prealloc
,
2036 if (!blk_is_available(blk
)) {
2037 error_setg(errp
, "No medium inserted");
2041 return bdrv_truncate(blk
->root
, offset
, prealloc
, errp
);
2044 static void blk_pdiscard_entry(void *opaque
)
2046 BlkRwCo
*rwco
= opaque
;
2047 QEMUIOVector
*qiov
= rwco
->iobuf
;
2049 rwco
->ret
= blk_co_pdiscard(rwco
->blk
, rwco
->offset
, qiov
->size
);
2053 int blk_pdiscard(BlockBackend
*blk
, int64_t offset
, int bytes
)
2055 return blk_prw(blk
, offset
, NULL
, bytes
, blk_pdiscard_entry
, 0);
2058 int blk_save_vmstate(BlockBackend
*blk
, const uint8_t *buf
,
2059 int64_t pos
, int size
)
2063 if (!blk_is_available(blk
)) {
2067 ret
= bdrv_save_vmstate(blk_bs(blk
), buf
, pos
, size
);
2072 if (ret
== size
&& !blk
->enable_write_cache
) {
2073 ret
= bdrv_flush(blk_bs(blk
));
2076 return ret
< 0 ? ret
: size
;
2079 int blk_load_vmstate(BlockBackend
*blk
, uint8_t *buf
, int64_t pos
, int size
)
2081 if (!blk_is_available(blk
)) {
2085 return bdrv_load_vmstate(blk_bs(blk
), buf
, pos
, size
);
2088 int blk_probe_blocksizes(BlockBackend
*blk
, BlockSizes
*bsz
)
2090 if (!blk_is_available(blk
)) {
2094 return bdrv_probe_blocksizes(blk_bs(blk
), bsz
);
2097 int blk_probe_geometry(BlockBackend
*blk
, HDGeometry
*geo
)
2099 if (!blk_is_available(blk
)) {
2103 return bdrv_probe_geometry(blk_bs(blk
), geo
);
2107 * Updates the BlockBackendRootState object with data from the currently
2108 * attached BlockDriverState.
2110 void blk_update_root_state(BlockBackend
*blk
)
2114 blk
->root_state
.open_flags
= blk
->root
->bs
->open_flags
;
2115 blk
->root_state
.read_only
= blk
->root
->bs
->read_only
;
2116 blk
->root_state
.detect_zeroes
= blk
->root
->bs
->detect_zeroes
;
2120 * Returns the detect-zeroes setting to be used for bdrv_open() of a
2121 * BlockDriverState which is supposed to inherit the root state.
2123 bool blk_get_detect_zeroes_from_root_state(BlockBackend
*blk
)
2125 return blk
->root_state
.detect_zeroes
;
2129 * Returns the flags to be used for bdrv_open() of a BlockDriverState which is
2130 * supposed to inherit the root state.
2132 int blk_get_open_flags_from_root_state(BlockBackend
*blk
)
2136 bs_flags
= blk
->root_state
.read_only
? 0 : BDRV_O_RDWR
;
2137 bs_flags
|= blk
->root_state
.open_flags
& ~BDRV_O_RDWR
;
2142 BlockBackendRootState
*blk_get_root_state(BlockBackend
*blk
)
2144 return &blk
->root_state
;
2147 int blk_commit_all(void)
2149 BlockBackend
*blk
= NULL
;
2151 while ((blk
= blk_all_next(blk
)) != NULL
) {
2152 AioContext
*aio_context
= blk_get_aio_context(blk
);
2154 aio_context_acquire(aio_context
);
2155 if (blk_is_inserted(blk
) && blk
->root
->bs
->backing
) {
2156 int ret
= bdrv_commit(blk
->root
->bs
);
2158 aio_context_release(aio_context
);
2162 aio_context_release(aio_context
);
2168 /* throttling disk I/O limits */
2169 void blk_set_io_limits(BlockBackend
*blk
, ThrottleConfig
*cfg
)
2171 throttle_group_config(&blk
->public.throttle_group_member
, cfg
);
2174 void blk_io_limits_disable(BlockBackend
*blk
)
2176 BlockDriverState
*bs
= blk_bs(blk
);
2177 ThrottleGroupMember
*tgm
= &blk
->public.throttle_group_member
;
2178 assert(tgm
->throttle_state
);
2180 bdrv_drained_begin(bs
);
2182 throttle_group_unregister_tgm(tgm
);
2184 bdrv_drained_end(bs
);
2188 /* should be called before blk_set_io_limits if a limit is set */
2189 void blk_io_limits_enable(BlockBackend
*blk
, const char *group
)
2191 assert(!blk
->public.throttle_group_member
.throttle_state
);
2192 throttle_group_register_tgm(&blk
->public.throttle_group_member
,
2193 group
, blk_get_aio_context(blk
));
2196 void blk_io_limits_update_group(BlockBackend
*blk
, const char *group
)
2198 /* this BB is not part of any group */
2199 if (!blk
->public.throttle_group_member
.throttle_state
) {
2203 /* this BB is a part of the same group than the one we want */
2204 if (!g_strcmp0(throttle_group_get_name(&blk
->public.throttle_group_member
),
2209 /* need to change the group this bs belong to */
2210 blk_io_limits_disable(blk
);
2211 blk_io_limits_enable(blk
, group
);
2214 static void blk_root_drained_begin(BdrvChild
*child
)
2216 BlockBackend
*blk
= child
->opaque
;
2218 if (++blk
->quiesce_counter
== 1) {
2219 if (blk
->dev_ops
&& blk
->dev_ops
->drained_begin
) {
2220 blk
->dev_ops
->drained_begin(blk
->dev_opaque
);
2224 /* Note that blk->root may not be accessible here yet if we are just
2225 * attaching to a BlockDriverState that is drained. Use child instead. */
2227 if (atomic_fetch_inc(&blk
->public.throttle_group_member
.io_limits_disabled
) == 0) {
2228 throttle_group_restart_tgm(&blk
->public.throttle_group_member
);
2232 static bool blk_root_drained_poll(BdrvChild
*child
)
2234 BlockBackend
*blk
= child
->opaque
;
2235 assert(blk
->quiesce_counter
);
2236 return !!blk
->in_flight
;
2239 static void blk_root_drained_end(BdrvChild
*child
)
2241 BlockBackend
*blk
= child
->opaque
;
2242 assert(blk
->quiesce_counter
);
2244 assert(blk
->public.throttle_group_member
.io_limits_disabled
);
2245 atomic_dec(&blk
->public.throttle_group_member
.io_limits_disabled
);
2247 if (--blk
->quiesce_counter
== 0) {
2248 if (blk
->dev_ops
&& blk
->dev_ops
->drained_end
) {
2249 blk
->dev_ops
->drained_end(blk
->dev_opaque
);
2254 void blk_register_buf(BlockBackend
*blk
, void *host
, size_t size
)
2256 bdrv_register_buf(blk_bs(blk
), host
, size
);
2259 void blk_unregister_buf(BlockBackend
*blk
, void *host
)
2261 bdrv_unregister_buf(blk_bs(blk
), host
);
2264 int coroutine_fn
blk_co_copy_range(BlockBackend
*blk_in
, int64_t off_in
,
2265 BlockBackend
*blk_out
, int64_t off_out
,
2266 int bytes
, BdrvRequestFlags read_flags
,
2267 BdrvRequestFlags write_flags
)
2270 r
= blk_check_byte_request(blk_in
, off_in
, bytes
);
2274 r
= blk_check_byte_request(blk_out
, off_out
, bytes
);
2278 return bdrv_co_copy_range(blk_in
->root
, off_in
,
2279 blk_out
->root
, off_out
,
2280 bytes
, read_flags
, write_flags
);
2283 const BdrvChild
*blk_root(BlockBackend
*blk
)