4 * Copyright (C) 2014-2016 Red Hat, Inc.
7 * Markus Armbruster <armbru@redhat.com>,
9 * This work is licensed under the terms of the GNU LGPL, version 2.1
10 * or later. See the COPYING.LIB file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "sysemu/block-backend.h"
15 #include "block/block_int.h"
16 #include "block/blockjob.h"
17 #include "block/throttle-groups.h"
18 #include "hw/qdev-core.h"
19 #include "sysemu/blockdev.h"
20 #include "sysemu/runstate.h"
21 #include "sysemu/sysemu.h"
22 #include "sysemu/replay.h"
23 #include "qapi/error.h"
24 #include "qapi/qapi-events-block.h"
26 #include "qemu/main-loop.h"
27 #include "qemu/option.h"
29 #include "migration/misc.h"
31 /* Number of coroutines to reserve per attached device model */
32 #define COROUTINE_POOL_RESERVATION 64
34 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
36 static AioContext
*blk_aiocb_get_aio_context(BlockAIOCB
*acb
);
38 typedef struct BlockBackendAioNotifier
{
39 void (*attached_aio_context
)(AioContext
*new_context
, void *opaque
);
40 void (*detach_aio_context
)(void *opaque
);
42 QLIST_ENTRY(BlockBackendAioNotifier
) list
;
43 } BlockBackendAioNotifier
;
50 DriveInfo
*legacy_dinfo
; /* null unless created by drive_new() */
51 QTAILQ_ENTRY(BlockBackend
) link
; /* for block_backends */
52 QTAILQ_ENTRY(BlockBackend
) monitor_link
; /* for monitor_block_backends */
53 BlockBackendPublic
public;
55 DeviceState
*dev
; /* attached device model, if any */
56 const BlockDevOps
*dev_ops
;
59 /* the block size for which the guest device expects atomicity */
62 /* If the BDS tree is removed, some of its options are stored here (which
63 * can be used to restore those options in the new BDS on insert) */
64 BlockBackendRootState root_state
;
66 bool enable_write_cache
;
68 /* I/O stats (display with "info blockstats"). */
71 BlockdevOnError on_read_error
, on_write_error
;
72 bool iostatus_enabled
;
73 BlockDeviceIoStatus iostatus
;
79 bool allow_aio_context_change
;
80 bool allow_write_beyond_eof
;
82 NotifierList remove_bs_notifiers
, insert_bs_notifiers
;
83 QLIST_HEAD(, BlockBackendAioNotifier
) aio_notifiers
;
86 CoQueue queued_requests
;
87 bool disable_request_queuing
;
89 VMChangeStateEntry
*vmsh
;
90 bool force_allow_inactivate
;
92 /* Number of in-flight aio requests. BlockDriverState also counts
93 * in-flight requests but aio requests can exist even when blk->root is
94 * NULL, so we cannot rely on its counter for that case.
95 * Accessed with atomic ops.
97 unsigned int in_flight
;
100 typedef struct BlockBackendAIOCB
{
106 static const AIOCBInfo block_backend_aiocb_info
= {
107 .get_aio_context
= blk_aiocb_get_aio_context
,
108 .aiocb_size
= sizeof(BlockBackendAIOCB
),
111 static void drive_info_del(DriveInfo
*dinfo
);
112 static BlockBackend
*bdrv_first_blk(BlockDriverState
*bs
);
114 /* All BlockBackends */
115 static QTAILQ_HEAD(, BlockBackend
) block_backends
=
116 QTAILQ_HEAD_INITIALIZER(block_backends
);
118 /* All BlockBackends referenced by the monitor and which are iterated through by
120 static QTAILQ_HEAD(, BlockBackend
) monitor_block_backends
=
121 QTAILQ_HEAD_INITIALIZER(monitor_block_backends
);
123 static void blk_root_inherit_options(BdrvChildRole role
, bool parent_is_format
,
124 int *child_flags
, QDict
*child_options
,
125 int parent_flags
, QDict
*parent_options
)
127 /* We're not supposed to call this function for root nodes */
130 static void blk_root_drained_begin(BdrvChild
*child
);
131 static bool blk_root_drained_poll(BdrvChild
*child
);
132 static void blk_root_drained_end(BdrvChild
*child
, int *drained_end_counter
);
134 static void blk_root_change_media(BdrvChild
*child
, bool load
);
135 static void blk_root_resize(BdrvChild
*child
);
137 static bool blk_root_can_set_aio_ctx(BdrvChild
*child
, AioContext
*ctx
,
138 GSList
**ignore
, Error
**errp
);
139 static void blk_root_set_aio_ctx(BdrvChild
*child
, AioContext
*ctx
,
142 static char *blk_root_get_parent_desc(BdrvChild
*child
)
144 BlockBackend
*blk
= child
->opaque
;
148 return g_strdup(blk
->name
);
151 dev_id
= blk_get_attached_dev_id(blk
);
155 /* TODO Callback into the BB owner for something more detailed */
157 return g_strdup("a block device");
161 static const char *blk_root_get_name(BdrvChild
*child
)
163 return blk_name(child
->opaque
);
166 static void blk_vm_state_changed(void *opaque
, bool running
, RunState state
)
168 Error
*local_err
= NULL
;
169 BlockBackend
*blk
= opaque
;
171 if (state
== RUN_STATE_INMIGRATE
) {
175 qemu_del_vm_change_state_handler(blk
->vmsh
);
177 blk_set_perm(blk
, blk
->perm
, blk
->shared_perm
, &local_err
);
179 error_report_err(local_err
);
184 * Notifies the user of the BlockBackend that migration has completed. qdev
185 * devices can tighten their permissions in response (specifically revoke
186 * shared write permissions that we needed for storage migration).
188 * If an error is returned, the VM cannot be allowed to be resumed.
190 static void blk_root_activate(BdrvChild
*child
, Error
**errp
)
192 BlockBackend
*blk
= child
->opaque
;
193 Error
*local_err
= NULL
;
195 if (!blk
->disable_perm
) {
199 blk
->disable_perm
= false;
201 blk_set_perm(blk
, blk
->perm
, BLK_PERM_ALL
, &local_err
);
203 error_propagate(errp
, local_err
);
204 blk
->disable_perm
= true;
208 if (runstate_check(RUN_STATE_INMIGRATE
)) {
209 /* Activation can happen when migration process is still active, for
210 * example when nbd_server_add is called during non-shared storage
211 * migration. Defer the shared_perm update to migration completion. */
213 blk
->vmsh
= qemu_add_vm_change_state_handler(blk_vm_state_changed
,
219 blk_set_perm(blk
, blk
->perm
, blk
->shared_perm
, &local_err
);
221 error_propagate(errp
, local_err
);
222 blk
->disable_perm
= true;
227 void blk_set_force_allow_inactivate(BlockBackend
*blk
)
229 blk
->force_allow_inactivate
= true;
232 static bool blk_can_inactivate(BlockBackend
*blk
)
234 /* If it is a guest device, inactivate is ok. */
235 if (blk
->dev
|| blk_name(blk
)[0]) {
239 /* Inactivating means no more writes to the image can be done,
240 * even if those writes would be changes invisible to the
241 * guest. For block job BBs that satisfy this, we can just allow
242 * it. This is the case for mirror job source, which is required
243 * by libvirt non-shared block migration. */
244 if (!(blk
->perm
& (BLK_PERM_WRITE
| BLK_PERM_WRITE_UNCHANGED
))) {
248 return blk
->force_allow_inactivate
;
251 static int blk_root_inactivate(BdrvChild
*child
)
253 BlockBackend
*blk
= child
->opaque
;
255 if (blk
->disable_perm
) {
259 if (!blk_can_inactivate(blk
)) {
263 blk
->disable_perm
= true;
265 bdrv_child_try_set_perm(blk
->root
, 0, BLK_PERM_ALL
, &error_abort
);
271 static void blk_root_attach(BdrvChild
*child
)
273 BlockBackend
*blk
= child
->opaque
;
274 BlockBackendAioNotifier
*notifier
;
276 trace_blk_root_attach(child
, blk
, child
->bs
);
278 QLIST_FOREACH(notifier
, &blk
->aio_notifiers
, list
) {
279 bdrv_add_aio_context_notifier(child
->bs
,
280 notifier
->attached_aio_context
,
281 notifier
->detach_aio_context
,
286 static void blk_root_detach(BdrvChild
*child
)
288 BlockBackend
*blk
= child
->opaque
;
289 BlockBackendAioNotifier
*notifier
;
291 trace_blk_root_detach(child
, blk
, child
->bs
);
293 QLIST_FOREACH(notifier
, &blk
->aio_notifiers
, list
) {
294 bdrv_remove_aio_context_notifier(child
->bs
,
295 notifier
->attached_aio_context
,
296 notifier
->detach_aio_context
,
301 static AioContext
*blk_root_get_parent_aio_context(BdrvChild
*c
)
303 BlockBackend
*blk
= c
->opaque
;
305 return blk_get_aio_context(blk
);
308 static const BdrvChildClass child_root
= {
309 .inherit_options
= blk_root_inherit_options
,
311 .change_media
= blk_root_change_media
,
312 .resize
= blk_root_resize
,
313 .get_name
= blk_root_get_name
,
314 .get_parent_desc
= blk_root_get_parent_desc
,
316 .drained_begin
= blk_root_drained_begin
,
317 .drained_poll
= blk_root_drained_poll
,
318 .drained_end
= blk_root_drained_end
,
320 .activate
= blk_root_activate
,
321 .inactivate
= blk_root_inactivate
,
323 .attach
= blk_root_attach
,
324 .detach
= blk_root_detach
,
326 .can_set_aio_ctx
= blk_root_can_set_aio_ctx
,
327 .set_aio_ctx
= blk_root_set_aio_ctx
,
329 .get_parent_aio_context
= blk_root_get_parent_aio_context
,
333 * Create a new BlockBackend with a reference count of one.
335 * @perm is a bitmasks of BLK_PERM_* constants which describes the permissions
336 * to request for a block driver node that is attached to this BlockBackend.
337 * @shared_perm is a bitmask which describes which permissions may be granted
338 * to other users of the attached node.
339 * Both sets of permissions can be changed later using blk_set_perm().
341 * Return the new BlockBackend on success, null on failure.
343 BlockBackend
*blk_new(AioContext
*ctx
, uint64_t perm
, uint64_t shared_perm
)
347 blk
= g_new0(BlockBackend
, 1);
351 blk
->shared_perm
= shared_perm
;
352 blk_set_enable_write_cache(blk
, true);
354 blk
->on_read_error
= BLOCKDEV_ON_ERROR_REPORT
;
355 blk
->on_write_error
= BLOCKDEV_ON_ERROR_ENOSPC
;
357 block_acct_init(&blk
->stats
);
359 qemu_co_queue_init(&blk
->queued_requests
);
360 notifier_list_init(&blk
->remove_bs_notifiers
);
361 notifier_list_init(&blk
->insert_bs_notifiers
);
362 QLIST_INIT(&blk
->aio_notifiers
);
364 QTAILQ_INSERT_TAIL(&block_backends
, blk
, link
);
369 * Create a new BlockBackend connected to an existing BlockDriverState.
371 * @perm is a bitmasks of BLK_PERM_* constants which describes the
372 * permissions to request for @bs that is attached to this
373 * BlockBackend. @shared_perm is a bitmask which describes which
374 * permissions may be granted to other users of the attached node.
375 * Both sets of permissions can be changed later using blk_set_perm().
377 * Return the new BlockBackend on success, null on failure.
379 BlockBackend
*blk_new_with_bs(BlockDriverState
*bs
, uint64_t perm
,
380 uint64_t shared_perm
, Error
**errp
)
382 BlockBackend
*blk
= blk_new(bdrv_get_aio_context(bs
), perm
, shared_perm
);
384 if (blk_insert_bs(blk
, bs
, errp
) < 0) {
392 * Creates a new BlockBackend, opens a new BlockDriverState, and connects both.
393 * The new BlockBackend is in the main AioContext.
395 * Just as with bdrv_open(), after having called this function the reference to
396 * @options belongs to the block layer (even on failure).
398 * TODO: Remove @filename and @flags; it should be possible to specify a whole
399 * BDS tree just by specifying the @options QDict (or @reference,
400 * alternatively). At the time of adding this function, this is not possible,
401 * though, so callers of this function have to be able to specify @filename and
404 BlockBackend
*blk_new_open(const char *filename
, const char *reference
,
405 QDict
*options
, int flags
, Error
**errp
)
408 BlockDriverState
*bs
;
410 uint64_t shared
= BLK_PERM_ALL
;
413 * blk_new_open() is mainly used in .bdrv_create implementations and the
414 * tools where sharing isn't a major concern because the BDS stays private
415 * and the file is generally not supposed to be used by a second process,
416 * so we just request permission according to the flags.
418 * The exceptions are xen_disk and blockdev_init(); in these cases, the
419 * caller of blk_new_open() doesn't make use of the permissions, but they
420 * shouldn't hurt either. We can still share everything here because the
421 * guest devices will add their own blockers if they can't share.
423 if ((flags
& BDRV_O_NO_IO
) == 0) {
424 perm
|= BLK_PERM_CONSISTENT_READ
;
425 if (flags
& BDRV_O_RDWR
) {
426 perm
|= BLK_PERM_WRITE
;
429 if (flags
& BDRV_O_RESIZE
) {
430 perm
|= BLK_PERM_RESIZE
;
432 if (flags
& BDRV_O_NO_SHARE
) {
433 shared
= BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE_UNCHANGED
;
436 blk
= blk_new(qemu_get_aio_context(), perm
, shared
);
437 bs
= bdrv_open(filename
, reference
, options
, flags
, errp
);
443 blk
->root
= bdrv_root_attach_child(bs
, "root", &child_root
,
444 BDRV_CHILD_FILTERED
| BDRV_CHILD_PRIMARY
,
445 perm
, shared
, blk
, errp
);
454 static void blk_delete(BlockBackend
*blk
)
456 assert(!blk
->refcnt
);
459 if (blk
->public.throttle_group_member
.throttle_state
) {
460 blk_io_limits_disable(blk
);
466 qemu_del_vm_change_state_handler(blk
->vmsh
);
469 assert(QLIST_EMPTY(&blk
->remove_bs_notifiers
.notifiers
));
470 assert(QLIST_EMPTY(&blk
->insert_bs_notifiers
.notifiers
));
471 assert(QLIST_EMPTY(&blk
->aio_notifiers
));
472 QTAILQ_REMOVE(&block_backends
, blk
, link
);
473 drive_info_del(blk
->legacy_dinfo
);
474 block_acct_cleanup(&blk
->stats
);
478 static void drive_info_del(DriveInfo
*dinfo
)
483 qemu_opts_del(dinfo
->opts
);
487 int blk_get_refcnt(BlockBackend
*blk
)
489 return blk
? blk
->refcnt
: 0;
493 * Increment @blk's reference count.
494 * @blk must not be null.
496 void blk_ref(BlockBackend
*blk
)
498 assert(blk
->refcnt
> 0);
503 * Decrement @blk's reference count.
504 * If this drops it to zero, destroy @blk.
505 * For convenience, do nothing if @blk is null.
507 void blk_unref(BlockBackend
*blk
)
510 assert(blk
->refcnt
> 0);
511 if (blk
->refcnt
> 1) {
515 /* blk_drain() cannot resurrect blk, nobody held a reference */
516 assert(blk
->refcnt
== 1);
524 * Behaves similarly to blk_next() but iterates over all BlockBackends, even the
525 * ones which are hidden (i.e. are not referenced by the monitor).
527 BlockBackend
*blk_all_next(BlockBackend
*blk
)
529 return blk
? QTAILQ_NEXT(blk
, link
)
530 : QTAILQ_FIRST(&block_backends
);
533 void blk_remove_all_bs(void)
535 BlockBackend
*blk
= NULL
;
537 while ((blk
= blk_all_next(blk
)) != NULL
) {
538 AioContext
*ctx
= blk_get_aio_context(blk
);
540 aio_context_acquire(ctx
);
544 aio_context_release(ctx
);
549 * Return the monitor-owned BlockBackend after @blk.
550 * If @blk is null, return the first one.
551 * Else, return @blk's next sibling, which may be null.
553 * To iterate over all BlockBackends, do
554 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
558 BlockBackend
*blk_next(BlockBackend
*blk
)
560 return blk
? QTAILQ_NEXT(blk
, monitor_link
)
561 : QTAILQ_FIRST(&monitor_block_backends
);
564 /* Iterates over all top-level BlockDriverStates, i.e. BDSs that are owned by
565 * the monitor or attached to a BlockBackend */
566 BlockDriverState
*bdrv_next(BdrvNextIterator
*it
)
568 BlockDriverState
*bs
, *old_bs
;
570 /* Must be called from the main loop */
571 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
573 /* First, return all root nodes of BlockBackends. In order to avoid
574 * returning a BDS twice when multiple BBs refer to it, we only return it
575 * if the BB is the first one in the parent list of the BDS. */
576 if (it
->phase
== BDRV_NEXT_BACKEND_ROOTS
) {
577 BlockBackend
*old_blk
= it
->blk
;
579 old_bs
= old_blk
? blk_bs(old_blk
) : NULL
;
582 it
->blk
= blk_all_next(it
->blk
);
583 bs
= it
->blk
? blk_bs(it
->blk
) : NULL
;
584 } while (it
->blk
&& (bs
== NULL
|| bdrv_first_blk(bs
) != it
->blk
));
596 it
->phase
= BDRV_NEXT_MONITOR_OWNED
;
601 /* Then return the monitor-owned BDSes without a BB attached. Ignore all
602 * BDSes that are attached to a BlockBackend here; they have been handled
603 * by the above block already */
605 it
->bs
= bdrv_next_monitor_owned(it
->bs
);
607 } while (bs
&& bdrv_has_blk(bs
));
617 static void bdrv_next_reset(BdrvNextIterator
*it
)
619 *it
= (BdrvNextIterator
) {
620 .phase
= BDRV_NEXT_BACKEND_ROOTS
,
624 BlockDriverState
*bdrv_first(BdrvNextIterator
*it
)
627 return bdrv_next(it
);
630 /* Must be called when aborting a bdrv_next() iteration before
631 * bdrv_next() returns NULL */
632 void bdrv_next_cleanup(BdrvNextIterator
*it
)
634 /* Must be called from the main loop */
635 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
637 if (it
->phase
== BDRV_NEXT_BACKEND_ROOTS
) {
639 bdrv_unref(blk_bs(it
->blk
));
650 * Add a BlockBackend into the list of backends referenced by the monitor, with
651 * the given @name acting as the handle for the monitor.
652 * Strictly for use by blockdev.c.
654 * @name must not be null or empty.
656 * Returns true on success and false on failure. In the latter case, an Error
657 * object is returned through @errp.
659 bool monitor_add_blk(BlockBackend
*blk
, const char *name
, Error
**errp
)
662 assert(name
&& name
[0]);
664 if (!id_wellformed(name
)) {
665 error_setg(errp
, "Invalid device name");
668 if (blk_by_name(name
)) {
669 error_setg(errp
, "Device with id '%s' already exists", name
);
672 if (bdrv_find_node(name
)) {
674 "Device name '%s' conflicts with an existing node name",
679 blk
->name
= g_strdup(name
);
680 QTAILQ_INSERT_TAIL(&monitor_block_backends
, blk
, monitor_link
);
685 * Remove a BlockBackend from the list of backends referenced by the monitor.
686 * Strictly for use by blockdev.c.
688 void monitor_remove_blk(BlockBackend
*blk
)
694 QTAILQ_REMOVE(&monitor_block_backends
, blk
, monitor_link
);
700 * Return @blk's name, a non-null string.
701 * Returns an empty string iff @blk is not referenced by the monitor.
703 const char *blk_name(const BlockBackend
*blk
)
705 return blk
->name
?: "";
709 * Return the BlockBackend with name @name if it exists, else null.
710 * @name must not be null.
712 BlockBackend
*blk_by_name(const char *name
)
714 BlockBackend
*blk
= NULL
;
717 while ((blk
= blk_next(blk
)) != NULL
) {
718 if (!strcmp(name
, blk
->name
)) {
726 * Return the BlockDriverState attached to @blk if any, else null.
728 BlockDriverState
*blk_bs(BlockBackend
*blk
)
730 return blk
->root
? blk
->root
->bs
: NULL
;
733 static BlockBackend
*bdrv_first_blk(BlockDriverState
*bs
)
736 QLIST_FOREACH(child
, &bs
->parents
, next_parent
) {
737 if (child
->klass
== &child_root
) {
738 return child
->opaque
;
746 * Returns true if @bs has an associated BlockBackend.
748 bool bdrv_has_blk(BlockDriverState
*bs
)
750 return bdrv_first_blk(bs
) != NULL
;
754 * Returns true if @bs has only BlockBackends as parents.
756 bool bdrv_is_root_node(BlockDriverState
*bs
)
760 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
761 if (c
->klass
!= &child_root
) {
770 * Return @blk's DriveInfo if any, else null.
772 DriveInfo
*blk_legacy_dinfo(BlockBackend
*blk
)
774 return blk
->legacy_dinfo
;
778 * Set @blk's DriveInfo to @dinfo, and return it.
779 * @blk must not have a DriveInfo set already.
780 * No other BlockBackend may have the same DriveInfo set.
782 DriveInfo
*blk_set_legacy_dinfo(BlockBackend
*blk
, DriveInfo
*dinfo
)
784 assert(!blk
->legacy_dinfo
);
785 return blk
->legacy_dinfo
= dinfo
;
789 * Return the BlockBackend with DriveInfo @dinfo.
792 BlockBackend
*blk_by_legacy_dinfo(DriveInfo
*dinfo
)
794 BlockBackend
*blk
= NULL
;
796 while ((blk
= blk_next(blk
)) != NULL
) {
797 if (blk
->legacy_dinfo
== dinfo
) {
805 * Returns a pointer to the publicly accessible fields of @blk.
807 BlockBackendPublic
*blk_get_public(BlockBackend
*blk
)
813 * Returns a BlockBackend given the associated @public fields.
815 BlockBackend
*blk_by_public(BlockBackendPublic
*public)
817 return container_of(public, BlockBackend
, public);
821 * Disassociates the currently associated BlockDriverState from @blk.
823 void blk_remove_bs(BlockBackend
*blk
)
825 ThrottleGroupMember
*tgm
= &blk
->public.throttle_group_member
;
826 BlockDriverState
*bs
;
829 notifier_list_notify(&blk
->remove_bs_notifiers
, blk
);
830 if (tgm
->throttle_state
) {
832 bdrv_drained_begin(bs
);
833 throttle_group_detach_aio_context(tgm
);
834 throttle_group_attach_aio_context(tgm
, qemu_get_aio_context());
835 bdrv_drained_end(bs
);
838 blk_update_root_state(blk
);
840 /* bdrv_root_unref_child() will cause blk->root to become stale and may
841 * switch to a completion coroutine later on. Let's drain all I/O here
842 * to avoid that and a potential QEMU crash.
847 bdrv_root_unref_child(root
);
851 * Associates a new BlockDriverState with @blk.
853 int blk_insert_bs(BlockBackend
*blk
, BlockDriverState
*bs
, Error
**errp
)
855 ThrottleGroupMember
*tgm
= &blk
->public.throttle_group_member
;
857 blk
->root
= bdrv_root_attach_child(bs
, "root", &child_root
,
858 BDRV_CHILD_FILTERED
| BDRV_CHILD_PRIMARY
,
859 blk
->perm
, blk
->shared_perm
,
861 if (blk
->root
== NULL
) {
865 notifier_list_notify(&blk
->insert_bs_notifiers
, blk
);
866 if (tgm
->throttle_state
) {
867 throttle_group_detach_aio_context(tgm
);
868 throttle_group_attach_aio_context(tgm
, bdrv_get_aio_context(bs
));
875 * Sets the permission bitmasks that the user of the BlockBackend needs.
877 int blk_set_perm(BlockBackend
*blk
, uint64_t perm
, uint64_t shared_perm
,
882 if (blk
->root
&& !blk
->disable_perm
) {
883 ret
= bdrv_child_try_set_perm(blk
->root
, perm
, shared_perm
, errp
);
890 blk
->shared_perm
= shared_perm
;
895 void blk_get_perm(BlockBackend
*blk
, uint64_t *perm
, uint64_t *shared_perm
)
898 *shared_perm
= blk
->shared_perm
;
902 * Attach device model @dev to @blk.
903 * Return 0 on success, -EBUSY when a device model is attached already.
905 int blk_attach_dev(BlockBackend
*blk
, DeviceState
*dev
)
911 /* While migration is still incoming, we don't need to apply the
912 * permissions of guest device BlockBackends. We might still have a block
913 * job or NBD server writing to the image for storage migration. */
914 if (runstate_check(RUN_STATE_INMIGRATE
)) {
915 blk
->disable_perm
= true;
920 blk_iostatus_reset(blk
);
926 * Detach device model @dev from @blk.
927 * @dev must be currently attached to @blk.
929 void blk_detach_dev(BlockBackend
*blk
, DeviceState
*dev
)
931 assert(blk
->dev
== dev
);
934 blk
->dev_opaque
= NULL
;
935 blk
->guest_block_size
= 512;
936 blk_set_perm(blk
, 0, BLK_PERM_ALL
, &error_abort
);
941 * Return the device model attached to @blk if any, else null.
943 DeviceState
*blk_get_attached_dev(BlockBackend
*blk
)
948 /* Return the qdev ID, or if no ID is assigned the QOM path, of the block
949 * device attached to the BlockBackend. */
950 char *blk_get_attached_dev_id(BlockBackend
*blk
)
952 DeviceState
*dev
= blk
->dev
;
956 } else if (dev
->id
) {
957 return g_strdup(dev
->id
);
960 return object_get_canonical_path(OBJECT(dev
)) ?: g_strdup("");
964 * Return the BlockBackend which has the device model @dev attached if it
967 * @dev must not be null.
969 BlockBackend
*blk_by_dev(void *dev
)
971 BlockBackend
*blk
= NULL
;
974 while ((blk
= blk_all_next(blk
)) != NULL
) {
975 if (blk
->dev
== dev
) {
983 * Set @blk's device model callbacks to @ops.
984 * @opaque is the opaque argument to pass to the callbacks.
985 * This is for use by device models.
987 void blk_set_dev_ops(BlockBackend
*blk
, const BlockDevOps
*ops
,
991 blk
->dev_opaque
= opaque
;
993 /* Are we currently quiesced? Should we enforce this right now? */
994 if (blk
->quiesce_counter
&& ops
->drained_begin
) {
995 ops
->drained_begin(opaque
);
1000 * Notify @blk's attached device model of media change.
1002 * If @load is true, notify of media load. This action can fail, meaning that
1003 * the medium cannot be loaded. @errp is set then.
1005 * If @load is false, notify of media eject. This can never fail.
1007 * Also send DEVICE_TRAY_MOVED events as appropriate.
1009 void blk_dev_change_media_cb(BlockBackend
*blk
, bool load
, Error
**errp
)
1011 if (blk
->dev_ops
&& blk
->dev_ops
->change_media_cb
) {
1012 bool tray_was_open
, tray_is_open
;
1013 Error
*local_err
= NULL
;
1015 tray_was_open
= blk_dev_is_tray_open(blk
);
1016 blk
->dev_ops
->change_media_cb(blk
->dev_opaque
, load
, &local_err
);
1018 assert(load
== true);
1019 error_propagate(errp
, local_err
);
1022 tray_is_open
= blk_dev_is_tray_open(blk
);
1024 if (tray_was_open
!= tray_is_open
) {
1025 char *id
= blk_get_attached_dev_id(blk
);
1026 qapi_event_send_device_tray_moved(blk_name(blk
), id
, tray_is_open
);
1032 static void blk_root_change_media(BdrvChild
*child
, bool load
)
1034 blk_dev_change_media_cb(child
->opaque
, load
, NULL
);
1038 * Does @blk's attached device model have removable media?
1039 * %true if no device model is attached.
1041 bool blk_dev_has_removable_media(BlockBackend
*blk
)
1043 return !blk
->dev
|| (blk
->dev_ops
&& blk
->dev_ops
->change_media_cb
);
1047 * Does @blk's attached device model have a tray?
1049 bool blk_dev_has_tray(BlockBackend
*blk
)
1051 return blk
->dev_ops
&& blk
->dev_ops
->is_tray_open
;
1055 * Notify @blk's attached device model of a media eject request.
1056 * If @force is true, the medium is about to be yanked out forcefully.
1058 void blk_dev_eject_request(BlockBackend
*blk
, bool force
)
1060 if (blk
->dev_ops
&& blk
->dev_ops
->eject_request_cb
) {
1061 blk
->dev_ops
->eject_request_cb(blk
->dev_opaque
, force
);
1066 * Does @blk's attached device model have a tray, and is it open?
1068 bool blk_dev_is_tray_open(BlockBackend
*blk
)
1070 if (blk_dev_has_tray(blk
)) {
1071 return blk
->dev_ops
->is_tray_open(blk
->dev_opaque
);
1077 * Does @blk's attached device model have the medium locked?
1078 * %false if the device model has no such lock.
1080 bool blk_dev_is_medium_locked(BlockBackend
*blk
)
1082 if (blk
->dev_ops
&& blk
->dev_ops
->is_medium_locked
) {
1083 return blk
->dev_ops
->is_medium_locked(blk
->dev_opaque
);
1089 * Notify @blk's attached device model of a backend size change.
1091 static void blk_root_resize(BdrvChild
*child
)
1093 BlockBackend
*blk
= child
->opaque
;
1095 if (blk
->dev_ops
&& blk
->dev_ops
->resize_cb
) {
1096 blk
->dev_ops
->resize_cb(blk
->dev_opaque
);
1100 void blk_iostatus_enable(BlockBackend
*blk
)
1102 blk
->iostatus_enabled
= true;
1103 blk
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
1106 /* The I/O status is only enabled if the drive explicitly
1107 * enables it _and_ the VM is configured to stop on errors */
1108 bool blk_iostatus_is_enabled(const BlockBackend
*blk
)
1110 return (blk
->iostatus_enabled
&&
1111 (blk
->on_write_error
== BLOCKDEV_ON_ERROR_ENOSPC
||
1112 blk
->on_write_error
== BLOCKDEV_ON_ERROR_STOP
||
1113 blk
->on_read_error
== BLOCKDEV_ON_ERROR_STOP
));
1116 BlockDeviceIoStatus
blk_iostatus(const BlockBackend
*blk
)
1118 return blk
->iostatus
;
1121 void blk_iostatus_disable(BlockBackend
*blk
)
1123 blk
->iostatus_enabled
= false;
1126 void blk_iostatus_reset(BlockBackend
*blk
)
1128 if (blk_iostatus_is_enabled(blk
)) {
1129 blk
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
1133 void blk_iostatus_set_err(BlockBackend
*blk
, int error
)
1135 assert(blk_iostatus_is_enabled(blk
));
1136 if (blk
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
1137 blk
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
1138 BLOCK_DEVICE_IO_STATUS_FAILED
;
1142 void blk_set_allow_write_beyond_eof(BlockBackend
*blk
, bool allow
)
1144 blk
->allow_write_beyond_eof
= allow
;
1147 void blk_set_allow_aio_context_change(BlockBackend
*blk
, bool allow
)
1149 blk
->allow_aio_context_change
= allow
;
1152 void blk_set_disable_request_queuing(BlockBackend
*blk
, bool disable
)
1154 blk
->disable_request_queuing
= disable
;
1157 static int blk_check_byte_request(BlockBackend
*blk
, int64_t offset
,
1162 if (size
> INT_MAX
) {
1166 if (!blk_is_available(blk
)) {
1174 if (!blk
->allow_write_beyond_eof
) {
1175 len
= blk_getlength(blk
);
1180 if (offset
> len
|| len
- offset
< size
) {
1188 /* To be called between exactly one pair of blk_inc/dec_in_flight() */
1189 static void coroutine_fn
blk_wait_while_drained(BlockBackend
*blk
)
1191 assert(blk
->in_flight
> 0);
1193 if (blk
->quiesce_counter
&& !blk
->disable_request_queuing
) {
1194 blk_dec_in_flight(blk
);
1195 qemu_co_queue_wait(&blk
->queued_requests
, NULL
);
1196 blk_inc_in_flight(blk
);
1200 /* To be called between exactly one pair of blk_inc/dec_in_flight() */
1201 static int coroutine_fn
1202 blk_do_preadv(BlockBackend
*blk
, int64_t offset
, unsigned int bytes
,
1203 QEMUIOVector
*qiov
, BdrvRequestFlags flags
)
1206 BlockDriverState
*bs
;
1208 blk_wait_while_drained(blk
);
1210 /* Call blk_bs() only after waiting, the graph may have changed */
1212 trace_blk_co_preadv(blk
, bs
, offset
, bytes
, flags
);
1214 ret
= blk_check_byte_request(blk
, offset
, bytes
);
1219 bdrv_inc_in_flight(bs
);
1221 /* throttling disk I/O */
1222 if (blk
->public.throttle_group_member
.throttle_state
) {
1223 throttle_group_co_io_limits_intercept(&blk
->public.throttle_group_member
,
1227 ret
= bdrv_co_preadv(blk
->root
, offset
, bytes
, qiov
, flags
);
1228 bdrv_dec_in_flight(bs
);
1232 int coroutine_fn
blk_co_preadv(BlockBackend
*blk
, int64_t offset
,
1233 unsigned int bytes
, QEMUIOVector
*qiov
,
1234 BdrvRequestFlags flags
)
1238 blk_inc_in_flight(blk
);
1239 ret
= blk_do_preadv(blk
, offset
, bytes
, qiov
, flags
);
1240 blk_dec_in_flight(blk
);
1245 /* To be called between exactly one pair of blk_inc/dec_in_flight() */
1246 static int coroutine_fn
1247 blk_do_pwritev_part(BlockBackend
*blk
, int64_t offset
, unsigned int bytes
,
1248 QEMUIOVector
*qiov
, size_t qiov_offset
,
1249 BdrvRequestFlags flags
)
1252 BlockDriverState
*bs
;
1254 blk_wait_while_drained(blk
);
1256 /* Call blk_bs() only after waiting, the graph may have changed */
1258 trace_blk_co_pwritev(blk
, bs
, offset
, bytes
, flags
);
1260 ret
= blk_check_byte_request(blk
, offset
, bytes
);
1265 bdrv_inc_in_flight(bs
);
1266 /* throttling disk I/O */
1267 if (blk
->public.throttle_group_member
.throttle_state
) {
1268 throttle_group_co_io_limits_intercept(&blk
->public.throttle_group_member
,
1272 if (!blk
->enable_write_cache
) {
1273 flags
|= BDRV_REQ_FUA
;
1276 ret
= bdrv_co_pwritev_part(blk
->root
, offset
, bytes
, qiov
, qiov_offset
,
1278 bdrv_dec_in_flight(bs
);
1282 int coroutine_fn
blk_co_pwritev_part(BlockBackend
*blk
, int64_t offset
,
1284 QEMUIOVector
*qiov
, size_t qiov_offset
,
1285 BdrvRequestFlags flags
)
1289 blk_inc_in_flight(blk
);
1290 ret
= blk_do_pwritev_part(blk
, offset
, bytes
, qiov
, qiov_offset
, flags
);
1291 blk_dec_in_flight(blk
);
1296 int coroutine_fn
blk_co_pwritev(BlockBackend
*blk
, int64_t offset
,
1297 unsigned int bytes
, QEMUIOVector
*qiov
,
1298 BdrvRequestFlags flags
)
1300 return blk_co_pwritev_part(blk
, offset
, bytes
, qiov
, 0, flags
);
1303 typedef struct BlkRwCo
{
1308 BdrvRequestFlags flags
;
1311 static void blk_read_entry(void *opaque
)
1313 BlkRwCo
*rwco
= opaque
;
1314 QEMUIOVector
*qiov
= rwco
->iobuf
;
1316 rwco
->ret
= blk_do_preadv(rwco
->blk
, rwco
->offset
, qiov
->size
,
1321 static void blk_write_entry(void *opaque
)
1323 BlkRwCo
*rwco
= opaque
;
1324 QEMUIOVector
*qiov
= rwco
->iobuf
;
1326 rwco
->ret
= blk_do_pwritev_part(rwco
->blk
, rwco
->offset
, qiov
->size
,
1327 qiov
, 0, rwco
->flags
);
1331 static int blk_prw(BlockBackend
*blk
, int64_t offset
, uint8_t *buf
,
1332 int64_t bytes
, CoroutineEntry co_entry
,
1333 BdrvRequestFlags flags
)
1335 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, bytes
);
1344 blk_inc_in_flight(blk
);
1345 if (qemu_in_coroutine()) {
1346 /* Fast-path if already in coroutine context */
1349 Coroutine
*co
= qemu_coroutine_create(co_entry
, &rwco
);
1350 bdrv_coroutine_enter(blk_bs(blk
), co
);
1351 BDRV_POLL_WHILE(blk_bs(blk
), rwco
.ret
== NOT_DONE
);
1353 blk_dec_in_flight(blk
);
1358 int blk_pwrite_zeroes(BlockBackend
*blk
, int64_t offset
,
1359 int bytes
, BdrvRequestFlags flags
)
1361 return blk_prw(blk
, offset
, NULL
, bytes
, blk_write_entry
,
1362 flags
| BDRV_REQ_ZERO_WRITE
);
1365 int blk_make_zero(BlockBackend
*blk
, BdrvRequestFlags flags
)
1367 return bdrv_make_zero(blk
->root
, flags
);
1370 void blk_inc_in_flight(BlockBackend
*blk
)
1372 qatomic_inc(&blk
->in_flight
);
1375 void blk_dec_in_flight(BlockBackend
*blk
)
1377 qatomic_dec(&blk
->in_flight
);
1381 static void error_callback_bh(void *opaque
)
1383 struct BlockBackendAIOCB
*acb
= opaque
;
1385 blk_dec_in_flight(acb
->blk
);
1386 acb
->common
.cb(acb
->common
.opaque
, acb
->ret
);
1387 qemu_aio_unref(acb
);
1390 BlockAIOCB
*blk_abort_aio_request(BlockBackend
*blk
,
1391 BlockCompletionFunc
*cb
,
1392 void *opaque
, int ret
)
1394 struct BlockBackendAIOCB
*acb
;
1396 blk_inc_in_flight(blk
);
1397 acb
= blk_aio_get(&block_backend_aiocb_info
, blk
, cb
, opaque
);
1401 replay_bh_schedule_oneshot_event(blk_get_aio_context(blk
),
1402 error_callback_bh
, acb
);
1403 return &acb
->common
;
1406 typedef struct BlkAioEmAIOCB
{
1413 static AioContext
*blk_aio_em_aiocb_get_aio_context(BlockAIOCB
*acb_
)
1415 BlkAioEmAIOCB
*acb
= container_of(acb_
, BlkAioEmAIOCB
, common
);
1417 return blk_get_aio_context(acb
->rwco
.blk
);
1420 static const AIOCBInfo blk_aio_em_aiocb_info
= {
1421 .aiocb_size
= sizeof(BlkAioEmAIOCB
),
1422 .get_aio_context
= blk_aio_em_aiocb_get_aio_context
,
1425 static void blk_aio_complete(BlkAioEmAIOCB
*acb
)
1427 if (acb
->has_returned
) {
1428 acb
->common
.cb(acb
->common
.opaque
, acb
->rwco
.ret
);
1429 blk_dec_in_flight(acb
->rwco
.blk
);
1430 qemu_aio_unref(acb
);
1434 static void blk_aio_complete_bh(void *opaque
)
1436 BlkAioEmAIOCB
*acb
= opaque
;
1437 assert(acb
->has_returned
);
1438 blk_aio_complete(acb
);
1441 static BlockAIOCB
*blk_aio_prwv(BlockBackend
*blk
, int64_t offset
, int bytes
,
1442 void *iobuf
, CoroutineEntry co_entry
,
1443 BdrvRequestFlags flags
,
1444 BlockCompletionFunc
*cb
, void *opaque
)
1449 blk_inc_in_flight(blk
);
1450 acb
= blk_aio_get(&blk_aio_em_aiocb_info
, blk
, cb
, opaque
);
1451 acb
->rwco
= (BlkRwCo
) {
1459 acb
->has_returned
= false;
1461 co
= qemu_coroutine_create(co_entry
, acb
);
1462 bdrv_coroutine_enter(blk_bs(blk
), co
);
1464 acb
->has_returned
= true;
1465 if (acb
->rwco
.ret
!= NOT_DONE
) {
1466 replay_bh_schedule_oneshot_event(blk_get_aio_context(blk
),
1467 blk_aio_complete_bh
, acb
);
1470 return &acb
->common
;
1473 static void blk_aio_read_entry(void *opaque
)
1475 BlkAioEmAIOCB
*acb
= opaque
;
1476 BlkRwCo
*rwco
= &acb
->rwco
;
1477 QEMUIOVector
*qiov
= rwco
->iobuf
;
1479 assert(qiov
->size
== acb
->bytes
);
1480 rwco
->ret
= blk_do_preadv(rwco
->blk
, rwco
->offset
, acb
->bytes
,
1482 blk_aio_complete(acb
);
1485 static void blk_aio_write_entry(void *opaque
)
1487 BlkAioEmAIOCB
*acb
= opaque
;
1488 BlkRwCo
*rwco
= &acb
->rwco
;
1489 QEMUIOVector
*qiov
= rwco
->iobuf
;
1491 assert(!qiov
|| qiov
->size
== acb
->bytes
);
1492 rwco
->ret
= blk_do_pwritev_part(rwco
->blk
, rwco
->offset
, acb
->bytes
,
1493 qiov
, 0, rwco
->flags
);
1494 blk_aio_complete(acb
);
1497 BlockAIOCB
*blk_aio_pwrite_zeroes(BlockBackend
*blk
, int64_t offset
,
1498 int count
, BdrvRequestFlags flags
,
1499 BlockCompletionFunc
*cb
, void *opaque
)
1501 return blk_aio_prwv(blk
, offset
, count
, NULL
, blk_aio_write_entry
,
1502 flags
| BDRV_REQ_ZERO_WRITE
, cb
, opaque
);
1505 int blk_pread(BlockBackend
*blk
, int64_t offset
, void *buf
, int count
)
1507 int ret
= blk_prw(blk
, offset
, buf
, count
, blk_read_entry
, 0);
1514 int blk_pwrite(BlockBackend
*blk
, int64_t offset
, const void *buf
, int count
,
1515 BdrvRequestFlags flags
)
1517 int ret
= blk_prw(blk
, offset
, (void *) buf
, count
, blk_write_entry
,
1525 int64_t blk_getlength(BlockBackend
*blk
)
1527 if (!blk_is_available(blk
)) {
1531 return bdrv_getlength(blk_bs(blk
));
1534 void blk_get_geometry(BlockBackend
*blk
, uint64_t *nb_sectors_ptr
)
1537 *nb_sectors_ptr
= 0;
1539 bdrv_get_geometry(blk_bs(blk
), nb_sectors_ptr
);
1543 int64_t blk_nb_sectors(BlockBackend
*blk
)
1545 if (!blk_is_available(blk
)) {
1549 return bdrv_nb_sectors(blk_bs(blk
));
1552 BlockAIOCB
*blk_aio_preadv(BlockBackend
*blk
, int64_t offset
,
1553 QEMUIOVector
*qiov
, BdrvRequestFlags flags
,
1554 BlockCompletionFunc
*cb
, void *opaque
)
1556 return blk_aio_prwv(blk
, offset
, qiov
->size
, qiov
,
1557 blk_aio_read_entry
, flags
, cb
, opaque
);
1560 BlockAIOCB
*blk_aio_pwritev(BlockBackend
*blk
, int64_t offset
,
1561 QEMUIOVector
*qiov
, BdrvRequestFlags flags
,
1562 BlockCompletionFunc
*cb
, void *opaque
)
1564 return blk_aio_prwv(blk
, offset
, qiov
->size
, qiov
,
1565 blk_aio_write_entry
, flags
, cb
, opaque
);
1568 void blk_aio_cancel(BlockAIOCB
*acb
)
1570 bdrv_aio_cancel(acb
);
1573 void blk_aio_cancel_async(BlockAIOCB
*acb
)
1575 bdrv_aio_cancel_async(acb
);
1578 /* To be called between exactly one pair of blk_inc/dec_in_flight() */
1579 static int coroutine_fn
1580 blk_do_ioctl(BlockBackend
*blk
, unsigned long int req
, void *buf
)
1582 blk_wait_while_drained(blk
);
1584 if (!blk_is_available(blk
)) {
1588 return bdrv_co_ioctl(blk_bs(blk
), req
, buf
);
1591 static void blk_ioctl_entry(void *opaque
)
1593 BlkRwCo
*rwco
= opaque
;
1594 QEMUIOVector
*qiov
= rwco
->iobuf
;
1596 rwco
->ret
= blk_do_ioctl(rwco
->blk
, rwco
->offset
, qiov
->iov
[0].iov_base
);
1600 int blk_ioctl(BlockBackend
*blk
, unsigned long int req
, void *buf
)
1602 return blk_prw(blk
, req
, buf
, 0, blk_ioctl_entry
, 0);
1605 static void blk_aio_ioctl_entry(void *opaque
)
1607 BlkAioEmAIOCB
*acb
= opaque
;
1608 BlkRwCo
*rwco
= &acb
->rwco
;
1610 rwco
->ret
= blk_do_ioctl(rwco
->blk
, rwco
->offset
, rwco
->iobuf
);
1612 blk_aio_complete(acb
);
1615 BlockAIOCB
*blk_aio_ioctl(BlockBackend
*blk
, unsigned long int req
, void *buf
,
1616 BlockCompletionFunc
*cb
, void *opaque
)
1618 return blk_aio_prwv(blk
, req
, 0, buf
, blk_aio_ioctl_entry
, 0, cb
, opaque
);
1621 /* To be called between exactly one pair of blk_inc/dec_in_flight() */
1622 static int coroutine_fn
1623 blk_do_pdiscard(BlockBackend
*blk
, int64_t offset
, int bytes
)
1627 blk_wait_while_drained(blk
);
1629 ret
= blk_check_byte_request(blk
, offset
, bytes
);
1634 return bdrv_co_pdiscard(blk
->root
, offset
, bytes
);
1637 static void blk_aio_pdiscard_entry(void *opaque
)
1639 BlkAioEmAIOCB
*acb
= opaque
;
1640 BlkRwCo
*rwco
= &acb
->rwco
;
1642 rwco
->ret
= blk_do_pdiscard(rwco
->blk
, rwco
->offset
, acb
->bytes
);
1643 blk_aio_complete(acb
);
1646 BlockAIOCB
*blk_aio_pdiscard(BlockBackend
*blk
,
1647 int64_t offset
, int bytes
,
1648 BlockCompletionFunc
*cb
, void *opaque
)
1650 return blk_aio_prwv(blk
, offset
, bytes
, NULL
, blk_aio_pdiscard_entry
, 0,
1654 int coroutine_fn
blk_co_pdiscard(BlockBackend
*blk
, int64_t offset
, int bytes
)
1658 blk_inc_in_flight(blk
);
1659 ret
= blk_do_pdiscard(blk
, offset
, bytes
);
1660 blk_dec_in_flight(blk
);
1665 static void blk_pdiscard_entry(void *opaque
)
1667 BlkRwCo
*rwco
= opaque
;
1668 QEMUIOVector
*qiov
= rwco
->iobuf
;
1670 rwco
->ret
= blk_do_pdiscard(rwco
->blk
, rwco
->offset
, qiov
->size
);
1674 int blk_pdiscard(BlockBackend
*blk
, int64_t offset
, int bytes
)
1676 return blk_prw(blk
, offset
, NULL
, bytes
, blk_pdiscard_entry
, 0);
1679 /* To be called between exactly one pair of blk_inc/dec_in_flight() */
1680 static int coroutine_fn
blk_do_flush(BlockBackend
*blk
)
1682 blk_wait_while_drained(blk
);
1684 if (!blk_is_available(blk
)) {
1688 return bdrv_co_flush(blk_bs(blk
));
1691 static void blk_aio_flush_entry(void *opaque
)
1693 BlkAioEmAIOCB
*acb
= opaque
;
1694 BlkRwCo
*rwco
= &acb
->rwco
;
1696 rwco
->ret
= blk_do_flush(rwco
->blk
);
1697 blk_aio_complete(acb
);
1700 BlockAIOCB
*blk_aio_flush(BlockBackend
*blk
,
1701 BlockCompletionFunc
*cb
, void *opaque
)
1703 return blk_aio_prwv(blk
, 0, 0, NULL
, blk_aio_flush_entry
, 0, cb
, opaque
);
1706 int coroutine_fn
blk_co_flush(BlockBackend
*blk
)
1710 blk_inc_in_flight(blk
);
1711 ret
= blk_do_flush(blk
);
1712 blk_dec_in_flight(blk
);
1717 static void blk_flush_entry(void *opaque
)
1719 BlkRwCo
*rwco
= opaque
;
1720 rwco
->ret
= blk_do_flush(rwco
->blk
);
1724 int blk_flush(BlockBackend
*blk
)
1726 return blk_prw(blk
, 0, NULL
, 0, blk_flush_entry
, 0);
1729 void blk_drain(BlockBackend
*blk
)
1731 BlockDriverState
*bs
= blk_bs(blk
);
1734 bdrv_drained_begin(bs
);
1737 /* We may have -ENOMEDIUM completions in flight */
1738 AIO_WAIT_WHILE(blk_get_aio_context(blk
),
1739 qatomic_mb_read(&blk
->in_flight
) > 0);
1742 bdrv_drained_end(bs
);
1746 void blk_drain_all(void)
1748 BlockBackend
*blk
= NULL
;
1750 bdrv_drain_all_begin();
1752 while ((blk
= blk_all_next(blk
)) != NULL
) {
1753 AioContext
*ctx
= blk_get_aio_context(blk
);
1755 aio_context_acquire(ctx
);
1757 /* We may have -ENOMEDIUM completions in flight */
1758 AIO_WAIT_WHILE(ctx
, qatomic_mb_read(&blk
->in_flight
) > 0);
1760 aio_context_release(ctx
);
1763 bdrv_drain_all_end();
1766 void blk_set_on_error(BlockBackend
*blk
, BlockdevOnError on_read_error
,
1767 BlockdevOnError on_write_error
)
1769 blk
->on_read_error
= on_read_error
;
1770 blk
->on_write_error
= on_write_error
;
1773 BlockdevOnError
blk_get_on_error(BlockBackend
*blk
, bool is_read
)
1775 return is_read
? blk
->on_read_error
: blk
->on_write_error
;
1778 BlockErrorAction
blk_get_error_action(BlockBackend
*blk
, bool is_read
,
1781 BlockdevOnError on_err
= blk_get_on_error(blk
, is_read
);
1784 case BLOCKDEV_ON_ERROR_ENOSPC
:
1785 return (error
== ENOSPC
) ?
1786 BLOCK_ERROR_ACTION_STOP
: BLOCK_ERROR_ACTION_REPORT
;
1787 case BLOCKDEV_ON_ERROR_STOP
:
1788 return BLOCK_ERROR_ACTION_STOP
;
1789 case BLOCKDEV_ON_ERROR_REPORT
:
1790 return BLOCK_ERROR_ACTION_REPORT
;
1791 case BLOCKDEV_ON_ERROR_IGNORE
:
1792 return BLOCK_ERROR_ACTION_IGNORE
;
1793 case BLOCKDEV_ON_ERROR_AUTO
:
1799 static void send_qmp_error_event(BlockBackend
*blk
,
1800 BlockErrorAction action
,
1801 bool is_read
, int error
)
1803 IoOperationType optype
;
1804 BlockDriverState
*bs
= blk_bs(blk
);
1806 optype
= is_read
? IO_OPERATION_TYPE_READ
: IO_OPERATION_TYPE_WRITE
;
1807 qapi_event_send_block_io_error(blk_name(blk
), !!bs
,
1808 bs
? bdrv_get_node_name(bs
) : NULL
, optype
,
1809 action
, blk_iostatus_is_enabled(blk
),
1810 error
== ENOSPC
, strerror(error
));
1813 /* This is done by device models because, while the block layer knows
1814 * about the error, it does not know whether an operation comes from
1815 * the device or the block layer (from a job, for example).
1817 void blk_error_action(BlockBackend
*blk
, BlockErrorAction action
,
1818 bool is_read
, int error
)
1822 if (action
== BLOCK_ERROR_ACTION_STOP
) {
1823 /* First set the iostatus, so that "info block" returns an iostatus
1824 * that matches the events raised so far (an additional error iostatus
1825 * is fine, but not a lost one).
1827 blk_iostatus_set_err(blk
, error
);
1829 /* Then raise the request to stop the VM and the event.
1830 * qemu_system_vmstop_request_prepare has two effects. First,
1831 * it ensures that the STOP event always comes after the
1832 * BLOCK_IO_ERROR event. Second, it ensures that even if management
1833 * can observe the STOP event and do a "cont" before the STOP
1834 * event is issued, the VM will not stop. In this case, vm_start()
1835 * also ensures that the STOP/RESUME pair of events is emitted.
1837 qemu_system_vmstop_request_prepare();
1838 send_qmp_error_event(blk
, action
, is_read
, error
);
1839 qemu_system_vmstop_request(RUN_STATE_IO_ERROR
);
1841 send_qmp_error_event(blk
, action
, is_read
, error
);
1846 * Returns true if the BlockBackend can support taking write permissions
1847 * (because its root node is not read-only).
1849 bool blk_supports_write_perm(BlockBackend
*blk
)
1851 BlockDriverState
*bs
= blk_bs(blk
);
1854 return !bdrv_is_read_only(bs
);
1856 return !blk
->root_state
.read_only
;
1861 * Returns true if the BlockBackend can be written to in its current
1862 * configuration (i.e. if write permission have been requested)
1864 bool blk_is_writable(BlockBackend
*blk
)
1866 return blk
->perm
& BLK_PERM_WRITE
;
1869 bool blk_is_sg(BlockBackend
*blk
)
1871 BlockDriverState
*bs
= blk_bs(blk
);
1877 return bdrv_is_sg(bs
);
1880 bool blk_enable_write_cache(BlockBackend
*blk
)
1882 return blk
->enable_write_cache
;
1885 void blk_set_enable_write_cache(BlockBackend
*blk
, bool wce
)
1887 blk
->enable_write_cache
= wce
;
1890 void blk_invalidate_cache(BlockBackend
*blk
, Error
**errp
)
1892 BlockDriverState
*bs
= blk_bs(blk
);
1895 error_setg(errp
, "Device '%s' has no medium", blk
->name
);
1899 bdrv_invalidate_cache(bs
, errp
);
1902 bool blk_is_inserted(BlockBackend
*blk
)
1904 BlockDriverState
*bs
= blk_bs(blk
);
1906 return bs
&& bdrv_is_inserted(bs
);
1909 bool blk_is_available(BlockBackend
*blk
)
1911 return blk_is_inserted(blk
) && !blk_dev_is_tray_open(blk
);
1914 void blk_lock_medium(BlockBackend
*blk
, bool locked
)
1916 BlockDriverState
*bs
= blk_bs(blk
);
1919 bdrv_lock_medium(bs
, locked
);
1923 void blk_eject(BlockBackend
*blk
, bool eject_flag
)
1925 BlockDriverState
*bs
= blk_bs(blk
);
1929 bdrv_eject(bs
, eject_flag
);
1932 /* Whether or not we ejected on the backend,
1933 * the frontend experienced a tray event. */
1934 id
= blk_get_attached_dev_id(blk
);
1935 qapi_event_send_device_tray_moved(blk_name(blk
), id
,
1940 int blk_get_flags(BlockBackend
*blk
)
1942 BlockDriverState
*bs
= blk_bs(blk
);
1945 return bdrv_get_flags(bs
);
1947 return blk
->root_state
.open_flags
;
1951 /* Returns the minimum request alignment, in bytes; guaranteed nonzero */
1952 uint32_t blk_get_request_alignment(BlockBackend
*blk
)
1954 BlockDriverState
*bs
= blk_bs(blk
);
1955 return bs
? bs
->bl
.request_alignment
: BDRV_SECTOR_SIZE
;
1958 /* Returns the maximum transfer length, in bytes; guaranteed nonzero */
1959 uint32_t blk_get_max_transfer(BlockBackend
*blk
)
1961 BlockDriverState
*bs
= blk_bs(blk
);
1965 max
= bs
->bl
.max_transfer
;
1967 return MIN_NON_ZERO(max
, INT_MAX
);
1970 int blk_get_max_iov(BlockBackend
*blk
)
1972 return blk
->root
->bs
->bl
.max_iov
;
1975 void blk_set_guest_block_size(BlockBackend
*blk
, int align
)
1977 blk
->guest_block_size
= align
;
1980 void *blk_try_blockalign(BlockBackend
*blk
, size_t size
)
1982 return qemu_try_blockalign(blk
? blk_bs(blk
) : NULL
, size
);
1985 void *blk_blockalign(BlockBackend
*blk
, size_t size
)
1987 return qemu_blockalign(blk
? blk_bs(blk
) : NULL
, size
);
1990 bool blk_op_is_blocked(BlockBackend
*blk
, BlockOpType op
, Error
**errp
)
1992 BlockDriverState
*bs
= blk_bs(blk
);
1998 return bdrv_op_is_blocked(bs
, op
, errp
);
2001 void blk_op_unblock(BlockBackend
*blk
, BlockOpType op
, Error
*reason
)
2003 BlockDriverState
*bs
= blk_bs(blk
);
2006 bdrv_op_unblock(bs
, op
, reason
);
2010 void blk_op_block_all(BlockBackend
*blk
, Error
*reason
)
2012 BlockDriverState
*bs
= blk_bs(blk
);
2015 bdrv_op_block_all(bs
, reason
);
2019 void blk_op_unblock_all(BlockBackend
*blk
, Error
*reason
)
2021 BlockDriverState
*bs
= blk_bs(blk
);
2024 bdrv_op_unblock_all(bs
, reason
);
2028 AioContext
*blk_get_aio_context(BlockBackend
*blk
)
2030 BlockDriverState
*bs
= blk_bs(blk
);
2033 AioContext
*ctx
= bdrv_get_aio_context(blk_bs(blk
));
2034 assert(ctx
== blk
->ctx
);
2040 static AioContext
*blk_aiocb_get_aio_context(BlockAIOCB
*acb
)
2042 BlockBackendAIOCB
*blk_acb
= DO_UPCAST(BlockBackendAIOCB
, common
, acb
);
2043 return blk_get_aio_context(blk_acb
->blk
);
2046 static int blk_do_set_aio_context(BlockBackend
*blk
, AioContext
*new_context
,
2047 bool update_root_node
, Error
**errp
)
2049 BlockDriverState
*bs
= blk_bs(blk
);
2050 ThrottleGroupMember
*tgm
= &blk
->public.throttle_group_member
;
2054 if (update_root_node
) {
2055 ret
= bdrv_child_try_set_aio_context(bs
, new_context
, blk
->root
,
2061 if (tgm
->throttle_state
) {
2062 bdrv_drained_begin(bs
);
2063 throttle_group_detach_aio_context(tgm
);
2064 throttle_group_attach_aio_context(tgm
, new_context
);
2065 bdrv_drained_end(bs
);
2069 blk
->ctx
= new_context
;
2073 int blk_set_aio_context(BlockBackend
*blk
, AioContext
*new_context
,
2076 return blk_do_set_aio_context(blk
, new_context
, true, errp
);
2079 static bool blk_root_can_set_aio_ctx(BdrvChild
*child
, AioContext
*ctx
,
2080 GSList
**ignore
, Error
**errp
)
2082 BlockBackend
*blk
= child
->opaque
;
2084 if (blk
->allow_aio_context_change
) {
2088 /* Only manually created BlockBackends that are not attached to anything
2089 * can change their AioContext without updating their user. */
2090 if (!blk
->name
|| blk
->dev
) {
2091 /* TODO Add BB name/QOM path */
2092 error_setg(errp
, "Cannot change iothread of active block backend");
2099 static void blk_root_set_aio_ctx(BdrvChild
*child
, AioContext
*ctx
,
2102 BlockBackend
*blk
= child
->opaque
;
2103 blk_do_set_aio_context(blk
, ctx
, false, &error_abort
);
2106 void blk_add_aio_context_notifier(BlockBackend
*blk
,
2107 void (*attached_aio_context
)(AioContext
*new_context
, void *opaque
),
2108 void (*detach_aio_context
)(void *opaque
), void *opaque
)
2110 BlockBackendAioNotifier
*notifier
;
2111 BlockDriverState
*bs
= blk_bs(blk
);
2113 notifier
= g_new(BlockBackendAioNotifier
, 1);
2114 notifier
->attached_aio_context
= attached_aio_context
;
2115 notifier
->detach_aio_context
= detach_aio_context
;
2116 notifier
->opaque
= opaque
;
2117 QLIST_INSERT_HEAD(&blk
->aio_notifiers
, notifier
, list
);
2120 bdrv_add_aio_context_notifier(bs
, attached_aio_context
,
2121 detach_aio_context
, opaque
);
2125 void blk_remove_aio_context_notifier(BlockBackend
*blk
,
2126 void (*attached_aio_context
)(AioContext
*,
2128 void (*detach_aio_context
)(void *),
2131 BlockBackendAioNotifier
*notifier
;
2132 BlockDriverState
*bs
= blk_bs(blk
);
2135 bdrv_remove_aio_context_notifier(bs
, attached_aio_context
,
2136 detach_aio_context
, opaque
);
2139 QLIST_FOREACH(notifier
, &blk
->aio_notifiers
, list
) {
2140 if (notifier
->attached_aio_context
== attached_aio_context
&&
2141 notifier
->detach_aio_context
== detach_aio_context
&&
2142 notifier
->opaque
== opaque
) {
2143 QLIST_REMOVE(notifier
, list
);
2152 void blk_add_remove_bs_notifier(BlockBackend
*blk
, Notifier
*notify
)
2154 notifier_list_add(&blk
->remove_bs_notifiers
, notify
);
2157 void blk_add_insert_bs_notifier(BlockBackend
*blk
, Notifier
*notify
)
2159 notifier_list_add(&blk
->insert_bs_notifiers
, notify
);
2162 void blk_io_plug(BlockBackend
*blk
)
2164 BlockDriverState
*bs
= blk_bs(blk
);
2171 void blk_io_unplug(BlockBackend
*blk
)
2173 BlockDriverState
*bs
= blk_bs(blk
);
2180 BlockAcctStats
*blk_get_stats(BlockBackend
*blk
)
2185 void *blk_aio_get(const AIOCBInfo
*aiocb_info
, BlockBackend
*blk
,
2186 BlockCompletionFunc
*cb
, void *opaque
)
2188 return qemu_aio_get(aiocb_info
, blk_bs(blk
), cb
, opaque
);
2191 int coroutine_fn
blk_co_pwrite_zeroes(BlockBackend
*blk
, int64_t offset
,
2192 int bytes
, BdrvRequestFlags flags
)
2194 return blk_co_pwritev(blk
, offset
, bytes
, NULL
,
2195 flags
| BDRV_REQ_ZERO_WRITE
);
2198 int blk_pwrite_compressed(BlockBackend
*blk
, int64_t offset
, const void *buf
,
2201 return blk_prw(blk
, offset
, (void *) buf
, count
, blk_write_entry
,
2202 BDRV_REQ_WRITE_COMPRESSED
);
2205 int blk_truncate(BlockBackend
*blk
, int64_t offset
, bool exact
,
2206 PreallocMode prealloc
, BdrvRequestFlags flags
, Error
**errp
)
2208 if (!blk_is_available(blk
)) {
2209 error_setg(errp
, "No medium inserted");
2213 return bdrv_truncate(blk
->root
, offset
, exact
, prealloc
, flags
, errp
);
2216 int blk_save_vmstate(BlockBackend
*blk
, const uint8_t *buf
,
2217 int64_t pos
, int size
)
2221 if (!blk_is_available(blk
)) {
2225 ret
= bdrv_save_vmstate(blk_bs(blk
), buf
, pos
, size
);
2230 if (ret
== size
&& !blk
->enable_write_cache
) {
2231 ret
= bdrv_flush(blk_bs(blk
));
2234 return ret
< 0 ? ret
: size
;
2237 int blk_load_vmstate(BlockBackend
*blk
, uint8_t *buf
, int64_t pos
, int size
)
2239 if (!blk_is_available(blk
)) {
2243 return bdrv_load_vmstate(blk_bs(blk
), buf
, pos
, size
);
2246 int blk_probe_blocksizes(BlockBackend
*blk
, BlockSizes
*bsz
)
2248 if (!blk_is_available(blk
)) {
2252 return bdrv_probe_blocksizes(blk_bs(blk
), bsz
);
2255 int blk_probe_geometry(BlockBackend
*blk
, HDGeometry
*geo
)
2257 if (!blk_is_available(blk
)) {
2261 return bdrv_probe_geometry(blk_bs(blk
), geo
);
2265 * Updates the BlockBackendRootState object with data from the currently
2266 * attached BlockDriverState.
2268 void blk_update_root_state(BlockBackend
*blk
)
2272 blk
->root_state
.open_flags
= blk
->root
->bs
->open_flags
;
2273 blk
->root_state
.read_only
= blk
->root
->bs
->read_only
;
2274 blk
->root_state
.detect_zeroes
= blk
->root
->bs
->detect_zeroes
;
2278 * Returns the detect-zeroes setting to be used for bdrv_open() of a
2279 * BlockDriverState which is supposed to inherit the root state.
2281 bool blk_get_detect_zeroes_from_root_state(BlockBackend
*blk
)
2283 return blk
->root_state
.detect_zeroes
;
2287 * Returns the flags to be used for bdrv_open() of a BlockDriverState which is
2288 * supposed to inherit the root state.
2290 int blk_get_open_flags_from_root_state(BlockBackend
*blk
)
2294 bs_flags
= blk
->root_state
.read_only
? 0 : BDRV_O_RDWR
;
2295 bs_flags
|= blk
->root_state
.open_flags
& ~BDRV_O_RDWR
;
2300 BlockBackendRootState
*blk_get_root_state(BlockBackend
*blk
)
2302 return &blk
->root_state
;
2305 int blk_commit_all(void)
2307 BlockBackend
*blk
= NULL
;
2309 while ((blk
= blk_all_next(blk
)) != NULL
) {
2310 AioContext
*aio_context
= blk_get_aio_context(blk
);
2311 BlockDriverState
*unfiltered_bs
= bdrv_skip_filters(blk_bs(blk
));
2313 aio_context_acquire(aio_context
);
2314 if (blk_is_inserted(blk
) && bdrv_cow_child(unfiltered_bs
)) {
2317 ret
= bdrv_commit(unfiltered_bs
);
2319 aio_context_release(aio_context
);
2323 aio_context_release(aio_context
);
2329 /* throttling disk I/O limits */
2330 void blk_set_io_limits(BlockBackend
*blk
, ThrottleConfig
*cfg
)
2332 throttle_group_config(&blk
->public.throttle_group_member
, cfg
);
2335 void blk_io_limits_disable(BlockBackend
*blk
)
2337 BlockDriverState
*bs
= blk_bs(blk
);
2338 ThrottleGroupMember
*tgm
= &blk
->public.throttle_group_member
;
2339 assert(tgm
->throttle_state
);
2341 bdrv_drained_begin(bs
);
2343 throttle_group_unregister_tgm(tgm
);
2345 bdrv_drained_end(bs
);
2349 /* should be called before blk_set_io_limits if a limit is set */
2350 void blk_io_limits_enable(BlockBackend
*blk
, const char *group
)
2352 assert(!blk
->public.throttle_group_member
.throttle_state
);
2353 throttle_group_register_tgm(&blk
->public.throttle_group_member
,
2354 group
, blk_get_aio_context(blk
));
2357 void blk_io_limits_update_group(BlockBackend
*blk
, const char *group
)
2359 /* this BB is not part of any group */
2360 if (!blk
->public.throttle_group_member
.throttle_state
) {
2364 /* this BB is a part of the same group than the one we want */
2365 if (!g_strcmp0(throttle_group_get_name(&blk
->public.throttle_group_member
),
2370 /* need to change the group this bs belong to */
2371 blk_io_limits_disable(blk
);
2372 blk_io_limits_enable(blk
, group
);
2375 static void blk_root_drained_begin(BdrvChild
*child
)
2377 BlockBackend
*blk
= child
->opaque
;
2378 ThrottleGroupMember
*tgm
= &blk
->public.throttle_group_member
;
2380 if (++blk
->quiesce_counter
== 1) {
2381 if (blk
->dev_ops
&& blk
->dev_ops
->drained_begin
) {
2382 blk
->dev_ops
->drained_begin(blk
->dev_opaque
);
2386 /* Note that blk->root may not be accessible here yet if we are just
2387 * attaching to a BlockDriverState that is drained. Use child instead. */
2389 if (qatomic_fetch_inc(&tgm
->io_limits_disabled
) == 0) {
2390 throttle_group_restart_tgm(tgm
);
2394 static bool blk_root_drained_poll(BdrvChild
*child
)
2396 BlockBackend
*blk
= child
->opaque
;
2397 assert(blk
->quiesce_counter
);
2398 return !!blk
->in_flight
;
2401 static void blk_root_drained_end(BdrvChild
*child
, int *drained_end_counter
)
2403 BlockBackend
*blk
= child
->opaque
;
2404 assert(blk
->quiesce_counter
);
2406 assert(blk
->public.throttle_group_member
.io_limits_disabled
);
2407 qatomic_dec(&blk
->public.throttle_group_member
.io_limits_disabled
);
2409 if (--blk
->quiesce_counter
== 0) {
2410 if (blk
->dev_ops
&& blk
->dev_ops
->drained_end
) {
2411 blk
->dev_ops
->drained_end(blk
->dev_opaque
);
2413 while (qemu_co_enter_next(&blk
->queued_requests
, NULL
)) {
2414 /* Resume all queued requests */
2419 void blk_register_buf(BlockBackend
*blk
, void *host
, size_t size
)
2421 bdrv_register_buf(blk_bs(blk
), host
, size
);
2424 void blk_unregister_buf(BlockBackend
*blk
, void *host
)
2426 bdrv_unregister_buf(blk_bs(blk
), host
);
2429 int coroutine_fn
blk_co_copy_range(BlockBackend
*blk_in
, int64_t off_in
,
2430 BlockBackend
*blk_out
, int64_t off_out
,
2431 int bytes
, BdrvRequestFlags read_flags
,
2432 BdrvRequestFlags write_flags
)
2435 r
= blk_check_byte_request(blk_in
, off_in
, bytes
);
2439 r
= blk_check_byte_request(blk_out
, off_out
, bytes
);
2443 return bdrv_co_copy_range(blk_in
->root
, off_in
,
2444 blk_out
->root
, off_out
,
2445 bytes
, read_flags
, write_flags
);
2448 const BdrvChild
*blk_root(BlockBackend
*blk
)
2453 int blk_make_empty(BlockBackend
*blk
, Error
**errp
)
2455 if (!blk_is_available(blk
)) {
2456 error_setg(errp
, "No medium inserted");
2460 return bdrv_make_empty(blk
->root
, errp
);