4 * Copyright (C) 2014-2016 Red Hat, Inc.
7 * Markus Armbruster <armbru@redhat.com>,
9 * This work is licensed under the terms of the GNU LGPL, version 2.1
10 * or later. See the COPYING.LIB file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "sysemu/block-backend.h"
15 #include "block/block_int.h"
16 #include "block/blockjob.h"
17 #include "block/coroutines.h"
18 #include "block/throttle-groups.h"
19 #include "hw/qdev-core.h"
20 #include "sysemu/blockdev.h"
21 #include "sysemu/runstate.h"
22 #include "sysemu/replay.h"
23 #include "qapi/error.h"
24 #include "qapi/qapi-events-block.h"
26 #include "qemu/main-loop.h"
27 #include "qemu/option.h"
29 #include "migration/misc.h"
31 /* Number of coroutines to reserve per attached device model */
32 #define COROUTINE_POOL_RESERVATION 64
34 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
36 static AioContext
*blk_aiocb_get_aio_context(BlockAIOCB
*acb
);
38 typedef struct BlockBackendAioNotifier
{
39 void (*attached_aio_context
)(AioContext
*new_context
, void *opaque
);
40 void (*detach_aio_context
)(void *opaque
);
42 QLIST_ENTRY(BlockBackendAioNotifier
) list
;
43 } BlockBackendAioNotifier
;
50 DriveInfo
*legacy_dinfo
; /* null unless created by drive_new() */
51 QTAILQ_ENTRY(BlockBackend
) link
; /* for block_backends */
52 QTAILQ_ENTRY(BlockBackend
) monitor_link
; /* for monitor_block_backends */
53 BlockBackendPublic
public;
55 DeviceState
*dev
; /* attached device model, if any */
56 const BlockDevOps
*dev_ops
;
59 /* If the BDS tree is removed, some of its options are stored here (which
60 * can be used to restore those options in the new BDS on insert) */
61 BlockBackendRootState root_state
;
63 bool enable_write_cache
;
65 /* I/O stats (display with "info blockstats"). */
68 BlockdevOnError on_read_error
, on_write_error
;
69 bool iostatus_enabled
;
70 BlockDeviceIoStatus iostatus
;
76 bool allow_aio_context_change
;
77 bool allow_write_beyond_eof
;
79 /* Protected by BQL */
80 NotifierList remove_bs_notifiers
, insert_bs_notifiers
;
81 QLIST_HEAD(, BlockBackendAioNotifier
) aio_notifiers
;
83 int quiesce_counter
; /* atomic: written under BQL, read by other threads */
84 QemuMutex queued_requests_lock
; /* protects queued_requests */
85 CoQueue queued_requests
;
86 bool disable_request_queuing
; /* atomic */
88 VMChangeStateEntry
*vmsh
;
89 bool force_allow_inactivate
;
91 /* Number of in-flight aio requests. BlockDriverState also counts
92 * in-flight requests but aio requests can exist even when blk->root is
93 * NULL, so we cannot rely on its counter for that case.
94 * Accessed with atomic ops.
96 unsigned int in_flight
;
99 typedef struct BlockBackendAIOCB
{
105 static const AIOCBInfo block_backend_aiocb_info
= {
106 .get_aio_context
= blk_aiocb_get_aio_context
,
107 .aiocb_size
= sizeof(BlockBackendAIOCB
),
110 static void drive_info_del(DriveInfo
*dinfo
);
111 static BlockBackend
*bdrv_first_blk(BlockDriverState
*bs
);
113 /* All BlockBackends. Protected by BQL. */
114 static QTAILQ_HEAD(, BlockBackend
) block_backends
=
115 QTAILQ_HEAD_INITIALIZER(block_backends
);
118 * All BlockBackends referenced by the monitor and which are iterated through by
119 * blk_next(). Protected by BQL.
121 static QTAILQ_HEAD(, BlockBackend
) monitor_block_backends
=
122 QTAILQ_HEAD_INITIALIZER(monitor_block_backends
);
124 static void blk_root_inherit_options(BdrvChildRole role
, bool parent_is_format
,
125 int *child_flags
, QDict
*child_options
,
126 int parent_flags
, QDict
*parent_options
)
128 /* We're not supposed to call this function for root nodes */
131 static void blk_root_drained_begin(BdrvChild
*child
);
132 static bool blk_root_drained_poll(BdrvChild
*child
);
133 static void blk_root_drained_end(BdrvChild
*child
);
135 static void blk_root_change_media(BdrvChild
*child
, bool load
);
136 static void blk_root_resize(BdrvChild
*child
);
138 static bool blk_root_change_aio_ctx(BdrvChild
*child
, AioContext
*ctx
,
139 GHashTable
*visited
, Transaction
*tran
,
142 static char *blk_root_get_parent_desc(BdrvChild
*child
)
144 BlockBackend
*blk
= child
->opaque
;
145 g_autofree
char *dev_id
= NULL
;
148 return g_strdup_printf("block device '%s'", blk
->name
);
151 dev_id
= blk_get_attached_dev_id(blk
);
153 return g_strdup_printf("block device '%s'", dev_id
);
155 /* TODO Callback into the BB owner for something more detailed */
156 return g_strdup("an unnamed block device");
160 static const char *blk_root_get_name(BdrvChild
*child
)
162 return blk_name(child
->opaque
);
165 static void blk_vm_state_changed(void *opaque
, bool running
, RunState state
)
167 Error
*local_err
= NULL
;
168 BlockBackend
*blk
= opaque
;
170 if (state
== RUN_STATE_INMIGRATE
) {
174 qemu_del_vm_change_state_handler(blk
->vmsh
);
176 blk_set_perm(blk
, blk
->perm
, blk
->shared_perm
, &local_err
);
178 error_report_err(local_err
);
183 * Notifies the user of the BlockBackend that migration has completed. qdev
184 * devices can tighten their permissions in response (specifically revoke
185 * shared write permissions that we needed for storage migration).
187 * If an error is returned, the VM cannot be allowed to be resumed.
189 static void blk_root_activate(BdrvChild
*child
, Error
**errp
)
191 BlockBackend
*blk
= child
->opaque
;
192 Error
*local_err
= NULL
;
193 uint64_t saved_shared_perm
;
195 if (!blk
->disable_perm
) {
199 blk
->disable_perm
= false;
202 * blk->shared_perm contains the permissions we want to share once
203 * migration is really completely done. For now, we need to share
204 * all; but we also need to retain blk->shared_perm, which is
205 * overwritten by a successful blk_set_perm() call. Save it and
208 saved_shared_perm
= blk
->shared_perm
;
210 blk_set_perm(blk
, blk
->perm
, BLK_PERM_ALL
, &local_err
);
212 error_propagate(errp
, local_err
);
213 blk
->disable_perm
= true;
216 blk
->shared_perm
= saved_shared_perm
;
218 if (runstate_check(RUN_STATE_INMIGRATE
)) {
219 /* Activation can happen when migration process is still active, for
220 * example when nbd_server_add is called during non-shared storage
221 * migration. Defer the shared_perm update to migration completion. */
223 blk
->vmsh
= qemu_add_vm_change_state_handler(blk_vm_state_changed
,
229 blk_set_perm(blk
, blk
->perm
, blk
->shared_perm
, &local_err
);
231 error_propagate(errp
, local_err
);
232 blk
->disable_perm
= true;
237 void blk_set_force_allow_inactivate(BlockBackend
*blk
)
240 blk
->force_allow_inactivate
= true;
243 static bool blk_can_inactivate(BlockBackend
*blk
)
245 /* If it is a guest device, inactivate is ok. */
246 if (blk
->dev
|| blk_name(blk
)[0]) {
250 /* Inactivating means no more writes to the image can be done,
251 * even if those writes would be changes invisible to the
252 * guest. For block job BBs that satisfy this, we can just allow
253 * it. This is the case for mirror job source, which is required
254 * by libvirt non-shared block migration. */
255 if (!(blk
->perm
& (BLK_PERM_WRITE
| BLK_PERM_WRITE_UNCHANGED
))) {
259 return blk
->force_allow_inactivate
;
262 static int blk_root_inactivate(BdrvChild
*child
)
264 BlockBackend
*blk
= child
->opaque
;
266 if (blk
->disable_perm
) {
270 if (!blk_can_inactivate(blk
)) {
274 blk
->disable_perm
= true;
276 bdrv_child_try_set_perm(blk
->root
, 0, BLK_PERM_ALL
, &error_abort
);
282 static void blk_root_attach(BdrvChild
*child
)
284 BlockBackend
*blk
= child
->opaque
;
285 BlockBackendAioNotifier
*notifier
;
287 trace_blk_root_attach(child
, blk
, child
->bs
);
289 QLIST_FOREACH(notifier
, &blk
->aio_notifiers
, list
) {
290 bdrv_add_aio_context_notifier(child
->bs
,
291 notifier
->attached_aio_context
,
292 notifier
->detach_aio_context
,
297 static void blk_root_detach(BdrvChild
*child
)
299 BlockBackend
*blk
= child
->opaque
;
300 BlockBackendAioNotifier
*notifier
;
302 trace_blk_root_detach(child
, blk
, child
->bs
);
304 QLIST_FOREACH(notifier
, &blk
->aio_notifiers
, list
) {
305 bdrv_remove_aio_context_notifier(child
->bs
,
306 notifier
->attached_aio_context
,
307 notifier
->detach_aio_context
,
312 static AioContext
*blk_root_get_parent_aio_context(BdrvChild
*c
)
314 BlockBackend
*blk
= c
->opaque
;
317 return blk_get_aio_context(blk
);
320 static const BdrvChildClass child_root
= {
321 .inherit_options
= blk_root_inherit_options
,
323 .change_media
= blk_root_change_media
,
324 .resize
= blk_root_resize
,
325 .get_name
= blk_root_get_name
,
326 .get_parent_desc
= blk_root_get_parent_desc
,
328 .drained_begin
= blk_root_drained_begin
,
329 .drained_poll
= blk_root_drained_poll
,
330 .drained_end
= blk_root_drained_end
,
332 .activate
= blk_root_activate
,
333 .inactivate
= blk_root_inactivate
,
335 .attach
= blk_root_attach
,
336 .detach
= blk_root_detach
,
338 .change_aio_ctx
= blk_root_change_aio_ctx
,
340 .get_parent_aio_context
= blk_root_get_parent_aio_context
,
344 * Create a new BlockBackend with a reference count of one.
346 * @perm is a bitmasks of BLK_PERM_* constants which describes the permissions
347 * to request for a block driver node that is attached to this BlockBackend.
348 * @shared_perm is a bitmask which describes which permissions may be granted
349 * to other users of the attached node.
350 * Both sets of permissions can be changed later using blk_set_perm().
352 * Return the new BlockBackend on success, null on failure.
354 BlockBackend
*blk_new(AioContext
*ctx
, uint64_t perm
, uint64_t shared_perm
)
360 blk
= g_new0(BlockBackend
, 1);
364 blk
->shared_perm
= shared_perm
;
365 blk_set_enable_write_cache(blk
, true);
367 blk
->on_read_error
= BLOCKDEV_ON_ERROR_REPORT
;
368 blk
->on_write_error
= BLOCKDEV_ON_ERROR_ENOSPC
;
370 block_acct_init(&blk
->stats
);
372 qemu_mutex_init(&blk
->queued_requests_lock
);
373 qemu_co_queue_init(&blk
->queued_requests
);
374 notifier_list_init(&blk
->remove_bs_notifiers
);
375 notifier_list_init(&blk
->insert_bs_notifiers
);
376 QLIST_INIT(&blk
->aio_notifiers
);
378 QTAILQ_INSERT_TAIL(&block_backends
, blk
, link
);
383 * Create a new BlockBackend connected to an existing BlockDriverState.
385 * @perm is a bitmasks of BLK_PERM_* constants which describes the
386 * permissions to request for @bs that is attached to this
387 * BlockBackend. @shared_perm is a bitmask which describes which
388 * permissions may be granted to other users of the attached node.
389 * Both sets of permissions can be changed later using blk_set_perm().
391 * Return the new BlockBackend on success, null on failure.
393 BlockBackend
*blk_new_with_bs(BlockDriverState
*bs
, uint64_t perm
,
394 uint64_t shared_perm
, Error
**errp
)
396 BlockBackend
*blk
= blk_new(bdrv_get_aio_context(bs
), perm
, shared_perm
);
400 if (blk_insert_bs(blk
, bs
, errp
) < 0) {
408 * Creates a new BlockBackend, opens a new BlockDriverState, and connects both.
409 * The new BlockBackend is in the main AioContext.
411 * Just as with bdrv_open(), after having called this function the reference to
412 * @options belongs to the block layer (even on failure).
414 * TODO: Remove @filename and @flags; it should be possible to specify a whole
415 * BDS tree just by specifying the @options QDict (or @reference,
416 * alternatively). At the time of adding this function, this is not possible,
417 * though, so callers of this function have to be able to specify @filename and
420 BlockBackend
*blk_new_open(const char *filename
, const char *reference
,
421 QDict
*options
, int flags
, Error
**errp
)
424 BlockDriverState
*bs
;
426 uint64_t shared
= BLK_PERM_ALL
;
431 * blk_new_open() is mainly used in .bdrv_create implementations and the
432 * tools where sharing isn't a major concern because the BDS stays private
433 * and the file is generally not supposed to be used by a second process,
434 * so we just request permission according to the flags.
436 * The exceptions are xen_disk and blockdev_init(); in these cases, the
437 * caller of blk_new_open() doesn't make use of the permissions, but they
438 * shouldn't hurt either. We can still share everything here because the
439 * guest devices will add their own blockers if they can't share.
441 if ((flags
& BDRV_O_NO_IO
) == 0) {
442 perm
|= BLK_PERM_CONSISTENT_READ
;
443 if (flags
& BDRV_O_RDWR
) {
444 perm
|= BLK_PERM_WRITE
;
447 if (flags
& BDRV_O_RESIZE
) {
448 perm
|= BLK_PERM_RESIZE
;
450 if (flags
& BDRV_O_NO_SHARE
) {
451 shared
= BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE_UNCHANGED
;
454 blk
= blk_new(qemu_get_aio_context(), perm
, shared
);
455 bs
= bdrv_open(filename
, reference
, options
, flags
, errp
);
461 blk
->root
= bdrv_root_attach_child(bs
, "root", &child_root
,
462 BDRV_CHILD_FILTERED
| BDRV_CHILD_PRIMARY
,
463 perm
, shared
, blk
, errp
);
472 static void blk_delete(BlockBackend
*blk
)
474 assert(!blk
->refcnt
);
477 if (blk
->public.throttle_group_member
.throttle_state
) {
478 blk_io_limits_disable(blk
);
484 qemu_del_vm_change_state_handler(blk
->vmsh
);
487 assert(QLIST_EMPTY(&blk
->remove_bs_notifiers
.notifiers
));
488 assert(QLIST_EMPTY(&blk
->insert_bs_notifiers
.notifiers
));
489 assert(QLIST_EMPTY(&blk
->aio_notifiers
));
490 assert(qemu_co_queue_empty(&blk
->queued_requests
));
491 qemu_mutex_destroy(&blk
->queued_requests_lock
);
492 QTAILQ_REMOVE(&block_backends
, blk
, link
);
493 drive_info_del(blk
->legacy_dinfo
);
494 block_acct_cleanup(&blk
->stats
);
498 static void drive_info_del(DriveInfo
*dinfo
)
503 qemu_opts_del(dinfo
->opts
);
507 int blk_get_refcnt(BlockBackend
*blk
)
510 return blk
? blk
->refcnt
: 0;
514 * Increment @blk's reference count.
515 * @blk must not be null.
517 void blk_ref(BlockBackend
*blk
)
519 assert(blk
->refcnt
> 0);
525 * Decrement @blk's reference count.
526 * If this drops it to zero, destroy @blk.
527 * For convenience, do nothing if @blk is null.
529 void blk_unref(BlockBackend
*blk
)
533 assert(blk
->refcnt
> 0);
534 if (blk
->refcnt
> 1) {
538 /* blk_drain() cannot resurrect blk, nobody held a reference */
539 assert(blk
->refcnt
== 1);
547 * Behaves similarly to blk_next() but iterates over all BlockBackends, even the
548 * ones which are hidden (i.e. are not referenced by the monitor).
550 BlockBackend
*blk_all_next(BlockBackend
*blk
)
553 return blk
? QTAILQ_NEXT(blk
, link
)
554 : QTAILQ_FIRST(&block_backends
);
557 void blk_remove_all_bs(void)
559 BlockBackend
*blk
= NULL
;
563 while ((blk
= blk_all_next(blk
)) != NULL
) {
564 AioContext
*ctx
= blk_get_aio_context(blk
);
566 aio_context_acquire(ctx
);
570 aio_context_release(ctx
);
575 * Return the monitor-owned BlockBackend after @blk.
576 * If @blk is null, return the first one.
577 * Else, return @blk's next sibling, which may be null.
579 * To iterate over all BlockBackends, do
580 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
584 BlockBackend
*blk_next(BlockBackend
*blk
)
587 return blk
? QTAILQ_NEXT(blk
, monitor_link
)
588 : QTAILQ_FIRST(&monitor_block_backends
);
591 /* Iterates over all top-level BlockDriverStates, i.e. BDSs that are owned by
592 * the monitor or attached to a BlockBackend */
593 BlockDriverState
*bdrv_next(BdrvNextIterator
*it
)
595 BlockDriverState
*bs
, *old_bs
;
597 /* Must be called from the main loop */
598 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
600 /* First, return all root nodes of BlockBackends. In order to avoid
601 * returning a BDS twice when multiple BBs refer to it, we only return it
602 * if the BB is the first one in the parent list of the BDS. */
603 if (it
->phase
== BDRV_NEXT_BACKEND_ROOTS
) {
604 BlockBackend
*old_blk
= it
->blk
;
606 old_bs
= old_blk
? blk_bs(old_blk
) : NULL
;
609 it
->blk
= blk_all_next(it
->blk
);
610 bs
= it
->blk
? blk_bs(it
->blk
) : NULL
;
611 } while (it
->blk
&& (bs
== NULL
|| bdrv_first_blk(bs
) != it
->blk
));
623 it
->phase
= BDRV_NEXT_MONITOR_OWNED
;
628 /* Then return the monitor-owned BDSes without a BB attached. Ignore all
629 * BDSes that are attached to a BlockBackend here; they have been handled
630 * by the above block already */
632 it
->bs
= bdrv_next_monitor_owned(it
->bs
);
634 } while (bs
&& bdrv_has_blk(bs
));
644 static void bdrv_next_reset(BdrvNextIterator
*it
)
646 *it
= (BdrvNextIterator
) {
647 .phase
= BDRV_NEXT_BACKEND_ROOTS
,
651 BlockDriverState
*bdrv_first(BdrvNextIterator
*it
)
655 return bdrv_next(it
);
658 /* Must be called when aborting a bdrv_next() iteration before
659 * bdrv_next() returns NULL */
660 void bdrv_next_cleanup(BdrvNextIterator
*it
)
662 /* Must be called from the main loop */
663 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
665 if (it
->phase
== BDRV_NEXT_BACKEND_ROOTS
) {
667 bdrv_unref(blk_bs(it
->blk
));
678 * Add a BlockBackend into the list of backends referenced by the monitor, with
679 * the given @name acting as the handle for the monitor.
680 * Strictly for use by blockdev.c.
682 * @name must not be null or empty.
684 * Returns true on success and false on failure. In the latter case, an Error
685 * object is returned through @errp.
687 bool monitor_add_blk(BlockBackend
*blk
, const char *name
, Error
**errp
)
690 assert(name
&& name
[0]);
693 if (!id_wellformed(name
)) {
694 error_setg(errp
, "Invalid device name");
697 if (blk_by_name(name
)) {
698 error_setg(errp
, "Device with id '%s' already exists", name
);
701 if (bdrv_find_node(name
)) {
703 "Device name '%s' conflicts with an existing node name",
708 blk
->name
= g_strdup(name
);
709 QTAILQ_INSERT_TAIL(&monitor_block_backends
, blk
, monitor_link
);
714 * Remove a BlockBackend from the list of backends referenced by the monitor.
715 * Strictly for use by blockdev.c.
717 void monitor_remove_blk(BlockBackend
*blk
)
725 QTAILQ_REMOVE(&monitor_block_backends
, blk
, monitor_link
);
731 * Return @blk's name, a non-null string.
732 * Returns an empty string iff @blk is not referenced by the monitor.
734 const char *blk_name(const BlockBackend
*blk
)
737 return blk
->name
?: "";
741 * Return the BlockBackend with name @name if it exists, else null.
742 * @name must not be null.
744 BlockBackend
*blk_by_name(const char *name
)
746 BlockBackend
*blk
= NULL
;
750 while ((blk
= blk_next(blk
)) != NULL
) {
751 if (!strcmp(name
, blk
->name
)) {
759 * Return the BlockDriverState attached to @blk if any, else null.
761 BlockDriverState
*blk_bs(BlockBackend
*blk
)
764 return blk
->root
? blk
->root
->bs
: NULL
;
767 static BlockBackend
*bdrv_first_blk(BlockDriverState
*bs
)
773 QLIST_FOREACH(child
, &bs
->parents
, next_parent
) {
774 if (child
->klass
== &child_root
) {
775 return child
->opaque
;
783 * Returns true if @bs has an associated BlockBackend.
785 bool bdrv_has_blk(BlockDriverState
*bs
)
788 return bdrv_first_blk(bs
) != NULL
;
792 * Returns true if @bs has only BlockBackends as parents.
794 bool bdrv_is_root_node(BlockDriverState
*bs
)
799 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
800 if (c
->klass
!= &child_root
) {
809 * Return @blk's DriveInfo if any, else null.
811 DriveInfo
*blk_legacy_dinfo(BlockBackend
*blk
)
814 return blk
->legacy_dinfo
;
818 * Set @blk's DriveInfo to @dinfo, and return it.
819 * @blk must not have a DriveInfo set already.
820 * No other BlockBackend may have the same DriveInfo set.
822 DriveInfo
*blk_set_legacy_dinfo(BlockBackend
*blk
, DriveInfo
*dinfo
)
824 assert(!blk
->legacy_dinfo
);
826 return blk
->legacy_dinfo
= dinfo
;
830 * Return the BlockBackend with DriveInfo @dinfo.
833 BlockBackend
*blk_by_legacy_dinfo(DriveInfo
*dinfo
)
835 BlockBackend
*blk
= NULL
;
838 while ((blk
= blk_next(blk
)) != NULL
) {
839 if (blk
->legacy_dinfo
== dinfo
) {
847 * Returns a pointer to the publicly accessible fields of @blk.
849 BlockBackendPublic
*blk_get_public(BlockBackend
*blk
)
856 * Returns a BlockBackend given the associated @public fields.
858 BlockBackend
*blk_by_public(BlockBackendPublic
*public)
861 return container_of(public, BlockBackend
, public);
865 * Disassociates the currently associated BlockDriverState from @blk.
867 void blk_remove_bs(BlockBackend
*blk
)
869 ThrottleGroupMember
*tgm
= &blk
->public.throttle_group_member
;
874 notifier_list_notify(&blk
->remove_bs_notifiers
, blk
);
875 if (tgm
->throttle_state
) {
876 BlockDriverState
*bs
= blk_bs(blk
);
879 * Take a ref in case blk_bs() changes across bdrv_drained_begin(), for
880 * example, if a temporary filter node is removed by a blockjob.
883 bdrv_drained_begin(bs
);
884 throttle_group_detach_aio_context(tgm
);
885 throttle_group_attach_aio_context(tgm
, qemu_get_aio_context());
886 bdrv_drained_end(bs
);
890 blk_update_root_state(blk
);
892 /* bdrv_root_unref_child() will cause blk->root to become stale and may
893 * switch to a completion coroutine later on. Let's drain all I/O here
894 * to avoid that and a potential QEMU crash.
899 bdrv_root_unref_child(root
);
903 * Associates a new BlockDriverState with @blk.
905 int blk_insert_bs(BlockBackend
*blk
, BlockDriverState
*bs
, Error
**errp
)
907 ThrottleGroupMember
*tgm
= &blk
->public.throttle_group_member
;
910 blk
->root
= bdrv_root_attach_child(bs
, "root", &child_root
,
911 BDRV_CHILD_FILTERED
| BDRV_CHILD_PRIMARY
,
912 blk
->perm
, blk
->shared_perm
,
914 if (blk
->root
== NULL
) {
918 notifier_list_notify(&blk
->insert_bs_notifiers
, blk
);
919 if (tgm
->throttle_state
) {
920 throttle_group_detach_aio_context(tgm
);
921 throttle_group_attach_aio_context(tgm
, bdrv_get_aio_context(bs
));
928 * Change BlockDriverState associated with @blk.
930 int blk_replace_bs(BlockBackend
*blk
, BlockDriverState
*new_bs
, Error
**errp
)
933 return bdrv_replace_child_bs(blk
->root
, new_bs
, errp
);
937 * Sets the permission bitmasks that the user of the BlockBackend needs.
939 int blk_set_perm(BlockBackend
*blk
, uint64_t perm
, uint64_t shared_perm
,
945 if (blk
->root
&& !blk
->disable_perm
) {
946 ret
= bdrv_child_try_set_perm(blk
->root
, perm
, shared_perm
, errp
);
953 blk
->shared_perm
= shared_perm
;
958 void blk_get_perm(BlockBackend
*blk
, uint64_t *perm
, uint64_t *shared_perm
)
962 *shared_perm
= blk
->shared_perm
;
966 * Attach device model @dev to @blk.
967 * Return 0 on success, -EBUSY when a device model is attached already.
969 int blk_attach_dev(BlockBackend
*blk
, DeviceState
*dev
)
976 /* While migration is still incoming, we don't need to apply the
977 * permissions of guest device BlockBackends. We might still have a block
978 * job or NBD server writing to the image for storage migration. */
979 if (runstate_check(RUN_STATE_INMIGRATE
)) {
980 blk
->disable_perm
= true;
985 blk_iostatus_reset(blk
);
991 * Detach device model @dev from @blk.
992 * @dev must be currently attached to @blk.
994 void blk_detach_dev(BlockBackend
*blk
, DeviceState
*dev
)
996 assert(blk
->dev
== dev
);
1000 blk
->dev_opaque
= NULL
;
1001 blk_set_perm(blk
, 0, BLK_PERM_ALL
, &error_abort
);
1006 * Return the device model attached to @blk if any, else null.
1008 DeviceState
*blk_get_attached_dev(BlockBackend
*blk
)
1010 GLOBAL_STATE_CODE();
1014 /* Return the qdev ID, or if no ID is assigned the QOM path, of the block
1015 * device attached to the BlockBackend. */
1016 char *blk_get_attached_dev_id(BlockBackend
*blk
)
1018 DeviceState
*dev
= blk
->dev
;
1022 return g_strdup("");
1023 } else if (dev
->id
) {
1024 return g_strdup(dev
->id
);
1027 return object_get_canonical_path(OBJECT(dev
)) ?: g_strdup("");
1031 * Return the BlockBackend which has the device model @dev attached if it
1032 * exists, else null.
1034 * @dev must not be null.
1036 BlockBackend
*blk_by_dev(void *dev
)
1038 BlockBackend
*blk
= NULL
;
1040 GLOBAL_STATE_CODE();
1042 assert(dev
!= NULL
);
1043 while ((blk
= blk_all_next(blk
)) != NULL
) {
1044 if (blk
->dev
== dev
) {
1052 * Set @blk's device model callbacks to @ops.
1053 * @opaque is the opaque argument to pass to the callbacks.
1054 * This is for use by device models.
1056 void blk_set_dev_ops(BlockBackend
*blk
, const BlockDevOps
*ops
,
1059 GLOBAL_STATE_CODE();
1061 blk
->dev_opaque
= opaque
;
1063 /* Are we currently quiesced? Should we enforce this right now? */
1064 if (qatomic_read(&blk
->quiesce_counter
) && ops
&& ops
->drained_begin
) {
1065 ops
->drained_begin(opaque
);
1070 * Notify @blk's attached device model of media change.
1072 * If @load is true, notify of media load. This action can fail, meaning that
1073 * the medium cannot be loaded. @errp is set then.
1075 * If @load is false, notify of media eject. This can never fail.
1077 * Also send DEVICE_TRAY_MOVED events as appropriate.
1079 void blk_dev_change_media_cb(BlockBackend
*blk
, bool load
, Error
**errp
)
1081 GLOBAL_STATE_CODE();
1082 if (blk
->dev_ops
&& blk
->dev_ops
->change_media_cb
) {
1083 bool tray_was_open
, tray_is_open
;
1084 Error
*local_err
= NULL
;
1086 tray_was_open
= blk_dev_is_tray_open(blk
);
1087 blk
->dev_ops
->change_media_cb(blk
->dev_opaque
, load
, &local_err
);
1089 assert(load
== true);
1090 error_propagate(errp
, local_err
);
1093 tray_is_open
= blk_dev_is_tray_open(blk
);
1095 if (tray_was_open
!= tray_is_open
) {
1096 char *id
= blk_get_attached_dev_id(blk
);
1097 qapi_event_send_device_tray_moved(blk_name(blk
), id
, tray_is_open
);
1103 static void blk_root_change_media(BdrvChild
*child
, bool load
)
1105 blk_dev_change_media_cb(child
->opaque
, load
, NULL
);
1109 * Does @blk's attached device model have removable media?
1110 * %true if no device model is attached.
1112 bool blk_dev_has_removable_media(BlockBackend
*blk
)
1114 GLOBAL_STATE_CODE();
1115 return !blk
->dev
|| (blk
->dev_ops
&& blk
->dev_ops
->change_media_cb
);
1119 * Does @blk's attached device model have a tray?
1121 bool blk_dev_has_tray(BlockBackend
*blk
)
1124 return blk
->dev_ops
&& blk
->dev_ops
->is_tray_open
;
1128 * Notify @blk's attached device model of a media eject request.
1129 * If @force is true, the medium is about to be yanked out forcefully.
1131 void blk_dev_eject_request(BlockBackend
*blk
, bool force
)
1133 GLOBAL_STATE_CODE();
1134 if (blk
->dev_ops
&& blk
->dev_ops
->eject_request_cb
) {
1135 blk
->dev_ops
->eject_request_cb(blk
->dev_opaque
, force
);
1140 * Does @blk's attached device model have a tray, and is it open?
1142 bool blk_dev_is_tray_open(BlockBackend
*blk
)
1145 if (blk_dev_has_tray(blk
)) {
1146 return blk
->dev_ops
->is_tray_open(blk
->dev_opaque
);
1152 * Does @blk's attached device model have the medium locked?
1153 * %false if the device model has no such lock.
1155 bool blk_dev_is_medium_locked(BlockBackend
*blk
)
1157 GLOBAL_STATE_CODE();
1158 if (blk
->dev_ops
&& blk
->dev_ops
->is_medium_locked
) {
1159 return blk
->dev_ops
->is_medium_locked(blk
->dev_opaque
);
1165 * Notify @blk's attached device model of a backend size change.
1167 static void blk_root_resize(BdrvChild
*child
)
1169 BlockBackend
*blk
= child
->opaque
;
1171 if (blk
->dev_ops
&& blk
->dev_ops
->resize_cb
) {
1172 blk
->dev_ops
->resize_cb(blk
->dev_opaque
);
1176 void blk_iostatus_enable(BlockBackend
*blk
)
1178 GLOBAL_STATE_CODE();
1179 blk
->iostatus_enabled
= true;
1180 blk
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
1183 /* The I/O status is only enabled if the drive explicitly
1184 * enables it _and_ the VM is configured to stop on errors */
1185 bool blk_iostatus_is_enabled(const BlockBackend
*blk
)
1188 return (blk
->iostatus_enabled
&&
1189 (blk
->on_write_error
== BLOCKDEV_ON_ERROR_ENOSPC
||
1190 blk
->on_write_error
== BLOCKDEV_ON_ERROR_STOP
||
1191 blk
->on_read_error
== BLOCKDEV_ON_ERROR_STOP
));
1194 BlockDeviceIoStatus
blk_iostatus(const BlockBackend
*blk
)
1196 GLOBAL_STATE_CODE();
1197 return blk
->iostatus
;
1200 void blk_iostatus_disable(BlockBackend
*blk
)
1202 GLOBAL_STATE_CODE();
1203 blk
->iostatus_enabled
= false;
1206 void blk_iostatus_reset(BlockBackend
*blk
)
1208 GLOBAL_STATE_CODE();
1209 if (blk_iostatus_is_enabled(blk
)) {
1210 blk
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
1214 void blk_iostatus_set_err(BlockBackend
*blk
, int error
)
1217 assert(blk_iostatus_is_enabled(blk
));
1218 if (blk
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
1219 blk
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
1220 BLOCK_DEVICE_IO_STATUS_FAILED
;
1224 void blk_set_allow_write_beyond_eof(BlockBackend
*blk
, bool allow
)
1227 blk
->allow_write_beyond_eof
= allow
;
1230 void blk_set_allow_aio_context_change(BlockBackend
*blk
, bool allow
)
1233 blk
->allow_aio_context_change
= allow
;
1236 void blk_set_disable_request_queuing(BlockBackend
*blk
, bool disable
)
1239 qatomic_set(&blk
->disable_request_queuing
, disable
);
1242 static int coroutine_fn GRAPH_RDLOCK
1243 blk_check_byte_request(BlockBackend
*blk
, int64_t offset
, int64_t bytes
)
1251 if (!blk_co_is_available(blk
)) {
1259 if (!blk
->allow_write_beyond_eof
) {
1260 len
= bdrv_co_getlength(blk_bs(blk
));
1265 if (offset
> len
|| len
- offset
< bytes
) {
1273 /* To be called between exactly one pair of blk_inc/dec_in_flight() */
1274 static void coroutine_fn
blk_wait_while_drained(BlockBackend
*blk
)
1276 assert(blk
->in_flight
> 0);
1278 if (qatomic_read(&blk
->quiesce_counter
) &&
1279 !qatomic_read(&blk
->disable_request_queuing
)) {
1281 * Take lock before decrementing in flight counter so main loop thread
1282 * waits for us to enqueue ourselves before it can leave the drained
1285 qemu_mutex_lock(&blk
->queued_requests_lock
);
1286 blk_dec_in_flight(blk
);
1287 qemu_co_queue_wait(&blk
->queued_requests
, &blk
->queued_requests_lock
);
1288 blk_inc_in_flight(blk
);
1289 qemu_mutex_unlock(&blk
->queued_requests_lock
);
1293 /* To be called between exactly one pair of blk_inc/dec_in_flight() */
1294 static int coroutine_fn
1295 blk_co_do_preadv_part(BlockBackend
*blk
, int64_t offset
, int64_t bytes
,
1296 QEMUIOVector
*qiov
, size_t qiov_offset
,
1297 BdrvRequestFlags flags
)
1300 BlockDriverState
*bs
;
1303 blk_wait_while_drained(blk
);
1304 GRAPH_RDLOCK_GUARD();
1306 /* Call blk_bs() only after waiting, the graph may have changed */
1308 trace_blk_co_preadv(blk
, bs
, offset
, bytes
, flags
);
1310 ret
= blk_check_byte_request(blk
, offset
, bytes
);
1315 bdrv_inc_in_flight(bs
);
1317 /* throttling disk I/O */
1318 if (blk
->public.throttle_group_member
.throttle_state
) {
1319 throttle_group_co_io_limits_intercept(&blk
->public.throttle_group_member
,
1323 ret
= bdrv_co_preadv_part(blk
->root
, offset
, bytes
, qiov
, qiov_offset
,
1325 bdrv_dec_in_flight(bs
);
1329 int coroutine_fn
blk_co_pread(BlockBackend
*blk
, int64_t offset
, int64_t bytes
,
1330 void *buf
, BdrvRequestFlags flags
)
1332 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, bytes
);
1335 assert(bytes
<= SIZE_MAX
);
1337 return blk_co_preadv(blk
, offset
, bytes
, &qiov
, flags
);
1340 int coroutine_fn
blk_co_preadv(BlockBackend
*blk
, int64_t offset
,
1341 int64_t bytes
, QEMUIOVector
*qiov
,
1342 BdrvRequestFlags flags
)
1347 blk_inc_in_flight(blk
);
1348 ret
= blk_co_do_preadv_part(blk
, offset
, bytes
, qiov
, 0, flags
);
1349 blk_dec_in_flight(blk
);
1354 int coroutine_fn
blk_co_preadv_part(BlockBackend
*blk
, int64_t offset
,
1355 int64_t bytes
, QEMUIOVector
*qiov
,
1356 size_t qiov_offset
, BdrvRequestFlags flags
)
1361 blk_inc_in_flight(blk
);
1362 ret
= blk_co_do_preadv_part(blk
, offset
, bytes
, qiov
, qiov_offset
, flags
);
1363 blk_dec_in_flight(blk
);
1368 /* To be called between exactly one pair of blk_inc/dec_in_flight() */
1369 static int coroutine_fn
1370 blk_co_do_pwritev_part(BlockBackend
*blk
, int64_t offset
, int64_t bytes
,
1371 QEMUIOVector
*qiov
, size_t qiov_offset
,
1372 BdrvRequestFlags flags
)
1375 BlockDriverState
*bs
;
1378 blk_wait_while_drained(blk
);
1379 GRAPH_RDLOCK_GUARD();
1381 /* Call blk_bs() only after waiting, the graph may have changed */
1383 trace_blk_co_pwritev(blk
, bs
, offset
, bytes
, flags
);
1385 ret
= blk_check_byte_request(blk
, offset
, bytes
);
1390 bdrv_inc_in_flight(bs
);
1391 /* throttling disk I/O */
1392 if (blk
->public.throttle_group_member
.throttle_state
) {
1393 throttle_group_co_io_limits_intercept(&blk
->public.throttle_group_member
,
1397 if (!blk
->enable_write_cache
) {
1398 flags
|= BDRV_REQ_FUA
;
1401 ret
= bdrv_co_pwritev_part(blk
->root
, offset
, bytes
, qiov
, qiov_offset
,
1403 bdrv_dec_in_flight(bs
);
1407 int coroutine_fn
blk_co_pwritev_part(BlockBackend
*blk
, int64_t offset
,
1409 QEMUIOVector
*qiov
, size_t qiov_offset
,
1410 BdrvRequestFlags flags
)
1415 blk_inc_in_flight(blk
);
1416 ret
= blk_co_do_pwritev_part(blk
, offset
, bytes
, qiov
, qiov_offset
, flags
);
1417 blk_dec_in_flight(blk
);
1422 int coroutine_fn
blk_co_pwrite(BlockBackend
*blk
, int64_t offset
, int64_t bytes
,
1423 const void *buf
, BdrvRequestFlags flags
)
1425 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, bytes
);
1428 assert(bytes
<= SIZE_MAX
);
1430 return blk_co_pwritev(blk
, offset
, bytes
, &qiov
, flags
);
1433 int coroutine_fn
blk_co_pwritev(BlockBackend
*blk
, int64_t offset
,
1434 int64_t bytes
, QEMUIOVector
*qiov
,
1435 BdrvRequestFlags flags
)
1438 return blk_co_pwritev_part(blk
, offset
, bytes
, qiov
, 0, flags
);
1441 int coroutine_fn
blk_co_block_status_above(BlockBackend
*blk
,
1442 BlockDriverState
*base
,
1443 int64_t offset
, int64_t bytes
,
1444 int64_t *pnum
, int64_t *map
,
1445 BlockDriverState
**file
)
1448 GRAPH_RDLOCK_GUARD();
1449 return bdrv_co_block_status_above(blk_bs(blk
), base
, offset
, bytes
, pnum
,
1453 int coroutine_fn
blk_co_is_allocated_above(BlockBackend
*blk
,
1454 BlockDriverState
*base
,
1455 bool include_base
, int64_t offset
,
1456 int64_t bytes
, int64_t *pnum
)
1459 GRAPH_RDLOCK_GUARD();
1460 return bdrv_co_is_allocated_above(blk_bs(blk
), base
, include_base
, offset
,
1464 typedef struct BlkRwCo
{
1469 BdrvRequestFlags flags
;
1472 int blk_make_zero(BlockBackend
*blk
, BdrvRequestFlags flags
)
1474 GLOBAL_STATE_CODE();
1475 return bdrv_make_zero(blk
->root
, flags
);
1478 void blk_inc_in_flight(BlockBackend
*blk
)
1481 qatomic_inc(&blk
->in_flight
);
1484 void blk_dec_in_flight(BlockBackend
*blk
)
1487 qatomic_dec(&blk
->in_flight
);
1491 static void error_callback_bh(void *opaque
)
1493 struct BlockBackendAIOCB
*acb
= opaque
;
1495 blk_dec_in_flight(acb
->blk
);
1496 acb
->common
.cb(acb
->common
.opaque
, acb
->ret
);
1497 qemu_aio_unref(acb
);
1500 BlockAIOCB
*blk_abort_aio_request(BlockBackend
*blk
,
1501 BlockCompletionFunc
*cb
,
1502 void *opaque
, int ret
)
1504 struct BlockBackendAIOCB
*acb
;
1507 blk_inc_in_flight(blk
);
1508 acb
= blk_aio_get(&block_backend_aiocb_info
, blk
, cb
, opaque
);
1512 replay_bh_schedule_oneshot_event(blk_get_aio_context(blk
),
1513 error_callback_bh
, acb
);
1514 return &acb
->common
;
1517 typedef struct BlkAioEmAIOCB
{
1524 static AioContext
*blk_aio_em_aiocb_get_aio_context(BlockAIOCB
*acb_
)
1526 BlkAioEmAIOCB
*acb
= container_of(acb_
, BlkAioEmAIOCB
, common
);
1528 return blk_get_aio_context(acb
->rwco
.blk
);
1531 static const AIOCBInfo blk_aio_em_aiocb_info
= {
1532 .aiocb_size
= sizeof(BlkAioEmAIOCB
),
1533 .get_aio_context
= blk_aio_em_aiocb_get_aio_context
,
1536 static void blk_aio_complete(BlkAioEmAIOCB
*acb
)
1538 if (acb
->has_returned
) {
1539 acb
->common
.cb(acb
->common
.opaque
, acb
->rwco
.ret
);
1540 blk_dec_in_flight(acb
->rwco
.blk
);
1541 qemu_aio_unref(acb
);
1545 static void blk_aio_complete_bh(void *opaque
)
1547 BlkAioEmAIOCB
*acb
= opaque
;
1548 assert(acb
->has_returned
);
1549 blk_aio_complete(acb
);
1552 static BlockAIOCB
*blk_aio_prwv(BlockBackend
*blk
, int64_t offset
,
1554 void *iobuf
, CoroutineEntry co_entry
,
1555 BdrvRequestFlags flags
,
1556 BlockCompletionFunc
*cb
, void *opaque
)
1561 blk_inc_in_flight(blk
);
1562 acb
= blk_aio_get(&blk_aio_em_aiocb_info
, blk
, cb
, opaque
);
1563 acb
->rwco
= (BlkRwCo
) {
1571 acb
->has_returned
= false;
1573 co
= qemu_coroutine_create(co_entry
, acb
);
1574 aio_co_enter(blk_get_aio_context(blk
), co
);
1576 acb
->has_returned
= true;
1577 if (acb
->rwco
.ret
!= NOT_DONE
) {
1578 replay_bh_schedule_oneshot_event(blk_get_aio_context(blk
),
1579 blk_aio_complete_bh
, acb
);
1582 return &acb
->common
;
1585 static void coroutine_fn
blk_aio_read_entry(void *opaque
)
1587 BlkAioEmAIOCB
*acb
= opaque
;
1588 BlkRwCo
*rwco
= &acb
->rwco
;
1589 QEMUIOVector
*qiov
= rwco
->iobuf
;
1591 assert(qiov
->size
== acb
->bytes
);
1592 rwco
->ret
= blk_co_do_preadv_part(rwco
->blk
, rwco
->offset
, acb
->bytes
, qiov
,
1594 blk_aio_complete(acb
);
1597 static void coroutine_fn
blk_aio_write_entry(void *opaque
)
1599 BlkAioEmAIOCB
*acb
= opaque
;
1600 BlkRwCo
*rwco
= &acb
->rwco
;
1601 QEMUIOVector
*qiov
= rwco
->iobuf
;
1603 assert(!qiov
|| qiov
->size
== acb
->bytes
);
1604 rwco
->ret
= blk_co_do_pwritev_part(rwco
->blk
, rwco
->offset
, acb
->bytes
,
1605 qiov
, 0, rwco
->flags
);
1606 blk_aio_complete(acb
);
1609 BlockAIOCB
*blk_aio_pwrite_zeroes(BlockBackend
*blk
, int64_t offset
,
1610 int64_t bytes
, BdrvRequestFlags flags
,
1611 BlockCompletionFunc
*cb
, void *opaque
)
1614 return blk_aio_prwv(blk
, offset
, bytes
, NULL
, blk_aio_write_entry
,
1615 flags
| BDRV_REQ_ZERO_WRITE
, cb
, opaque
);
1618 int64_t coroutine_fn
blk_co_getlength(BlockBackend
*blk
)
1621 GRAPH_RDLOCK_GUARD();
1623 if (!blk_co_is_available(blk
)) {
1627 return bdrv_co_getlength(blk_bs(blk
));
1630 int64_t coroutine_fn
blk_co_nb_sectors(BlockBackend
*blk
)
1632 BlockDriverState
*bs
= blk_bs(blk
);
1635 GRAPH_RDLOCK_GUARD();
1640 return bdrv_co_nb_sectors(bs
);
1645 * This wrapper is written by hand because this function is in the hot I/O path,
1646 * via blk_get_geometry.
1648 int64_t coroutine_mixed_fn
blk_nb_sectors(BlockBackend
*blk
)
1650 BlockDriverState
*bs
= blk_bs(blk
);
1657 return bdrv_nb_sectors(bs
);
1661 /* return 0 as number of sectors if no device present or error */
1662 void coroutine_fn
blk_co_get_geometry(BlockBackend
*blk
,
1663 uint64_t *nb_sectors_ptr
)
1665 int64_t ret
= blk_co_nb_sectors(blk
);
1666 *nb_sectors_ptr
= ret
< 0 ? 0 : ret
;
1670 * This wrapper is written by hand because this function is in the hot I/O path.
1672 void coroutine_mixed_fn
blk_get_geometry(BlockBackend
*blk
,
1673 uint64_t *nb_sectors_ptr
)
1675 int64_t ret
= blk_nb_sectors(blk
);
1676 *nb_sectors_ptr
= ret
< 0 ? 0 : ret
;
1679 BlockAIOCB
*blk_aio_preadv(BlockBackend
*blk
, int64_t offset
,
1680 QEMUIOVector
*qiov
, BdrvRequestFlags flags
,
1681 BlockCompletionFunc
*cb
, void *opaque
)
1684 assert((uint64_t)qiov
->size
<= INT64_MAX
);
1685 return blk_aio_prwv(blk
, offset
, qiov
->size
, qiov
,
1686 blk_aio_read_entry
, flags
, cb
, opaque
);
1689 BlockAIOCB
*blk_aio_pwritev(BlockBackend
*blk
, int64_t offset
,
1690 QEMUIOVector
*qiov
, BdrvRequestFlags flags
,
1691 BlockCompletionFunc
*cb
, void *opaque
)
1694 assert((uint64_t)qiov
->size
<= INT64_MAX
);
1695 return blk_aio_prwv(blk
, offset
, qiov
->size
, qiov
,
1696 blk_aio_write_entry
, flags
, cb
, opaque
);
1699 void blk_aio_cancel(BlockAIOCB
*acb
)
1701 GLOBAL_STATE_CODE();
1702 bdrv_aio_cancel(acb
);
1705 void blk_aio_cancel_async(BlockAIOCB
*acb
)
1708 bdrv_aio_cancel_async(acb
);
1711 /* To be called between exactly one pair of blk_inc/dec_in_flight() */
1712 static int coroutine_fn
1713 blk_co_do_ioctl(BlockBackend
*blk
, unsigned long int req
, void *buf
)
1717 blk_wait_while_drained(blk
);
1718 GRAPH_RDLOCK_GUARD();
1720 if (!blk_co_is_available(blk
)) {
1724 return bdrv_co_ioctl(blk_bs(blk
), req
, buf
);
1727 int coroutine_fn
blk_co_ioctl(BlockBackend
*blk
, unsigned long int req
,
1733 blk_inc_in_flight(blk
);
1734 ret
= blk_co_do_ioctl(blk
, req
, buf
);
1735 blk_dec_in_flight(blk
);
1740 static void coroutine_fn
blk_aio_ioctl_entry(void *opaque
)
1742 BlkAioEmAIOCB
*acb
= opaque
;
1743 BlkRwCo
*rwco
= &acb
->rwco
;
1745 rwco
->ret
= blk_co_do_ioctl(rwco
->blk
, rwco
->offset
, rwco
->iobuf
);
1747 blk_aio_complete(acb
);
1750 BlockAIOCB
*blk_aio_ioctl(BlockBackend
*blk
, unsigned long int req
, void *buf
,
1751 BlockCompletionFunc
*cb
, void *opaque
)
1754 return blk_aio_prwv(blk
, req
, 0, buf
, blk_aio_ioctl_entry
, 0, cb
, opaque
);
1757 /* To be called between exactly one pair of blk_inc/dec_in_flight() */
1758 static int coroutine_fn
1759 blk_co_do_pdiscard(BlockBackend
*blk
, int64_t offset
, int64_t bytes
)
1764 blk_wait_while_drained(blk
);
1765 GRAPH_RDLOCK_GUARD();
1767 ret
= blk_check_byte_request(blk
, offset
, bytes
);
1772 return bdrv_co_pdiscard(blk
->root
, offset
, bytes
);
1775 static void coroutine_fn
blk_aio_pdiscard_entry(void *opaque
)
1777 BlkAioEmAIOCB
*acb
= opaque
;
1778 BlkRwCo
*rwco
= &acb
->rwco
;
1780 rwco
->ret
= blk_co_do_pdiscard(rwco
->blk
, rwco
->offset
, acb
->bytes
);
1781 blk_aio_complete(acb
);
1784 BlockAIOCB
*blk_aio_pdiscard(BlockBackend
*blk
,
1785 int64_t offset
, int64_t bytes
,
1786 BlockCompletionFunc
*cb
, void *opaque
)
1789 return blk_aio_prwv(blk
, offset
, bytes
, NULL
, blk_aio_pdiscard_entry
, 0,
1793 int coroutine_fn
blk_co_pdiscard(BlockBackend
*blk
, int64_t offset
,
1799 blk_inc_in_flight(blk
);
1800 ret
= blk_co_do_pdiscard(blk
, offset
, bytes
);
1801 blk_dec_in_flight(blk
);
1806 /* To be called between exactly one pair of blk_inc/dec_in_flight() */
1807 static int coroutine_fn
blk_co_do_flush(BlockBackend
*blk
)
1810 blk_wait_while_drained(blk
);
1811 GRAPH_RDLOCK_GUARD();
1813 if (!blk_co_is_available(blk
)) {
1817 return bdrv_co_flush(blk_bs(blk
));
1820 static void coroutine_fn
blk_aio_flush_entry(void *opaque
)
1822 BlkAioEmAIOCB
*acb
= opaque
;
1823 BlkRwCo
*rwco
= &acb
->rwco
;
1825 rwco
->ret
= blk_co_do_flush(rwco
->blk
);
1826 blk_aio_complete(acb
);
1829 BlockAIOCB
*blk_aio_flush(BlockBackend
*blk
,
1830 BlockCompletionFunc
*cb
, void *opaque
)
1833 return blk_aio_prwv(blk
, 0, 0, NULL
, blk_aio_flush_entry
, 0, cb
, opaque
);
1836 int coroutine_fn
blk_co_flush(BlockBackend
*blk
)
1841 blk_inc_in_flight(blk
);
1842 ret
= blk_co_do_flush(blk
);
1843 blk_dec_in_flight(blk
);
1848 static void coroutine_fn
blk_aio_zone_report_entry(void *opaque
)
1850 BlkAioEmAIOCB
*acb
= opaque
;
1851 BlkRwCo
*rwco
= &acb
->rwco
;
1853 rwco
->ret
= blk_co_zone_report(rwco
->blk
, rwco
->offset
,
1854 (unsigned int*)(uintptr_t)acb
->bytes
,
1856 blk_aio_complete(acb
);
1859 BlockAIOCB
*blk_aio_zone_report(BlockBackend
*blk
, int64_t offset
,
1860 unsigned int *nr_zones
,
1861 BlockZoneDescriptor
*zones
,
1862 BlockCompletionFunc
*cb
, void *opaque
)
1868 blk_inc_in_flight(blk
);
1869 acb
= blk_aio_get(&blk_aio_em_aiocb_info
, blk
, cb
, opaque
);
1870 acb
->rwco
= (BlkRwCo
) {
1876 acb
->bytes
= (int64_t)(uintptr_t)nr_zones
,
1877 acb
->has_returned
= false;
1879 co
= qemu_coroutine_create(blk_aio_zone_report_entry
, acb
);
1880 aio_co_enter(blk_get_aio_context(blk
), co
);
1882 acb
->has_returned
= true;
1883 if (acb
->rwco
.ret
!= NOT_DONE
) {
1884 replay_bh_schedule_oneshot_event(blk_get_aio_context(blk
),
1885 blk_aio_complete_bh
, acb
);
1888 return &acb
->common
;
1891 static void coroutine_fn
blk_aio_zone_mgmt_entry(void *opaque
)
1893 BlkAioEmAIOCB
*acb
= opaque
;
1894 BlkRwCo
*rwco
= &acb
->rwco
;
1896 rwco
->ret
= blk_co_zone_mgmt(rwco
->blk
,
1897 (BlockZoneOp
)(uintptr_t)rwco
->iobuf
,
1898 rwco
->offset
, acb
->bytes
);
1899 blk_aio_complete(acb
);
1902 BlockAIOCB
*blk_aio_zone_mgmt(BlockBackend
*blk
, BlockZoneOp op
,
1903 int64_t offset
, int64_t len
,
1904 BlockCompletionFunc
*cb
, void *opaque
) {
1909 blk_inc_in_flight(blk
);
1910 acb
= blk_aio_get(&blk_aio_em_aiocb_info
, blk
, cb
, opaque
);
1911 acb
->rwco
= (BlkRwCo
) {
1914 .iobuf
= (void *)(uintptr_t)op
,
1918 acb
->has_returned
= false;
1920 co
= qemu_coroutine_create(blk_aio_zone_mgmt_entry
, acb
);
1921 aio_co_enter(blk_get_aio_context(blk
), co
);
1923 acb
->has_returned
= true;
1924 if (acb
->rwco
.ret
!= NOT_DONE
) {
1925 replay_bh_schedule_oneshot_event(blk_get_aio_context(blk
),
1926 blk_aio_complete_bh
, acb
);
1929 return &acb
->common
;
1932 static void coroutine_fn
blk_aio_zone_append_entry(void *opaque
)
1934 BlkAioEmAIOCB
*acb
= opaque
;
1935 BlkRwCo
*rwco
= &acb
->rwco
;
1937 rwco
->ret
= blk_co_zone_append(rwco
->blk
, (int64_t *)(uintptr_t)acb
->bytes
,
1938 rwco
->iobuf
, rwco
->flags
);
1939 blk_aio_complete(acb
);
1942 BlockAIOCB
*blk_aio_zone_append(BlockBackend
*blk
, int64_t *offset
,
1943 QEMUIOVector
*qiov
, BdrvRequestFlags flags
,
1944 BlockCompletionFunc
*cb
, void *opaque
) {
1949 blk_inc_in_flight(blk
);
1950 acb
= blk_aio_get(&blk_aio_em_aiocb_info
, blk
, cb
, opaque
);
1951 acb
->rwco
= (BlkRwCo
) {
1957 acb
->bytes
= (int64_t)(uintptr_t)offset
;
1958 acb
->has_returned
= false;
1960 co
= qemu_coroutine_create(blk_aio_zone_append_entry
, acb
);
1961 aio_co_enter(blk_get_aio_context(blk
), co
);
1962 acb
->has_returned
= true;
1963 if (acb
->rwco
.ret
!= NOT_DONE
) {
1964 replay_bh_schedule_oneshot_event(blk_get_aio_context(blk
),
1965 blk_aio_complete_bh
, acb
);
1968 return &acb
->common
;
1972 * Send a zone_report command.
1973 * offset is a byte offset from the start of the device. No alignment
1974 * required for offset.
1975 * nr_zones represents IN maximum and OUT actual.
1977 int coroutine_fn
blk_co_zone_report(BlockBackend
*blk
, int64_t offset
,
1978 unsigned int *nr_zones
,
1979 BlockZoneDescriptor
*zones
)
1984 blk_inc_in_flight(blk
); /* increase before waiting */
1985 blk_wait_while_drained(blk
);
1986 GRAPH_RDLOCK_GUARD();
1987 if (!blk_is_available(blk
)) {
1988 blk_dec_in_flight(blk
);
1991 ret
= bdrv_co_zone_report(blk_bs(blk
), offset
, nr_zones
, zones
);
1992 blk_dec_in_flight(blk
);
1997 * Send a zone_management command.
1998 * op is the zone operation;
1999 * offset is the byte offset from the start of the zoned device;
2000 * len is the maximum number of bytes the command should operate on. It
2001 * should be aligned with the device zone size.
2003 int coroutine_fn
blk_co_zone_mgmt(BlockBackend
*blk
, BlockZoneOp op
,
2004 int64_t offset
, int64_t len
)
2009 blk_inc_in_flight(blk
);
2010 blk_wait_while_drained(blk
);
2011 GRAPH_RDLOCK_GUARD();
2013 ret
= blk_check_byte_request(blk
, offset
, len
);
2015 blk_dec_in_flight(blk
);
2019 ret
= bdrv_co_zone_mgmt(blk_bs(blk
), op
, offset
, len
);
2020 blk_dec_in_flight(blk
);
2025 * Send a zone_append command.
2027 int coroutine_fn
blk_co_zone_append(BlockBackend
*blk
, int64_t *offset
,
2028 QEMUIOVector
*qiov
, BdrvRequestFlags flags
)
2033 blk_inc_in_flight(blk
);
2034 blk_wait_while_drained(blk
);
2035 GRAPH_RDLOCK_GUARD();
2036 if (!blk_is_available(blk
)) {
2037 blk_dec_in_flight(blk
);
2041 ret
= bdrv_co_zone_append(blk_bs(blk
), offset
, qiov
, flags
);
2042 blk_dec_in_flight(blk
);
2046 void blk_drain(BlockBackend
*blk
)
2048 BlockDriverState
*bs
= blk_bs(blk
);
2049 GLOBAL_STATE_CODE();
2053 bdrv_drained_begin(bs
);
2056 /* We may have -ENOMEDIUM completions in flight */
2057 AIO_WAIT_WHILE(blk_get_aio_context(blk
),
2058 qatomic_read(&blk
->in_flight
) > 0);
2061 bdrv_drained_end(bs
);
2066 void blk_drain_all(void)
2068 BlockBackend
*blk
= NULL
;
2070 GLOBAL_STATE_CODE();
2072 bdrv_drain_all_begin();
2074 while ((blk
= blk_all_next(blk
)) != NULL
) {
2075 /* We may have -ENOMEDIUM completions in flight */
2076 AIO_WAIT_WHILE_UNLOCKED(NULL
, qatomic_read(&blk
->in_flight
) > 0);
2079 bdrv_drain_all_end();
2082 void blk_set_on_error(BlockBackend
*blk
, BlockdevOnError on_read_error
,
2083 BlockdevOnError on_write_error
)
2085 GLOBAL_STATE_CODE();
2086 blk
->on_read_error
= on_read_error
;
2087 blk
->on_write_error
= on_write_error
;
2090 BlockdevOnError
blk_get_on_error(BlockBackend
*blk
, bool is_read
)
2093 return is_read
? blk
->on_read_error
: blk
->on_write_error
;
2096 BlockErrorAction
blk_get_error_action(BlockBackend
*blk
, bool is_read
,
2099 BlockdevOnError on_err
= blk_get_on_error(blk
, is_read
);
2103 case BLOCKDEV_ON_ERROR_ENOSPC
:
2104 return (error
== ENOSPC
) ?
2105 BLOCK_ERROR_ACTION_STOP
: BLOCK_ERROR_ACTION_REPORT
;
2106 case BLOCKDEV_ON_ERROR_STOP
:
2107 return BLOCK_ERROR_ACTION_STOP
;
2108 case BLOCKDEV_ON_ERROR_REPORT
:
2109 return BLOCK_ERROR_ACTION_REPORT
;
2110 case BLOCKDEV_ON_ERROR_IGNORE
:
2111 return BLOCK_ERROR_ACTION_IGNORE
;
2112 case BLOCKDEV_ON_ERROR_AUTO
:
2118 static void send_qmp_error_event(BlockBackend
*blk
,
2119 BlockErrorAction action
,
2120 bool is_read
, int error
)
2122 IoOperationType optype
;
2123 BlockDriverState
*bs
= blk_bs(blk
);
2125 optype
= is_read
? IO_OPERATION_TYPE_READ
: IO_OPERATION_TYPE_WRITE
;
2126 qapi_event_send_block_io_error(blk_name(blk
),
2127 bs
? bdrv_get_node_name(bs
) : NULL
, optype
,
2128 action
, blk_iostatus_is_enabled(blk
),
2129 error
== ENOSPC
, strerror(error
));
2132 /* This is done by device models because, while the block layer knows
2133 * about the error, it does not know whether an operation comes from
2134 * the device or the block layer (from a job, for example).
2136 void blk_error_action(BlockBackend
*blk
, BlockErrorAction action
,
2137 bool is_read
, int error
)
2142 if (action
== BLOCK_ERROR_ACTION_STOP
) {
2143 /* First set the iostatus, so that "info block" returns an iostatus
2144 * that matches the events raised so far (an additional error iostatus
2145 * is fine, but not a lost one).
2147 blk_iostatus_set_err(blk
, error
);
2149 /* Then raise the request to stop the VM and the event.
2150 * qemu_system_vmstop_request_prepare has two effects. First,
2151 * it ensures that the STOP event always comes after the
2152 * BLOCK_IO_ERROR event. Second, it ensures that even if management
2153 * can observe the STOP event and do a "cont" before the STOP
2154 * event is issued, the VM will not stop. In this case, vm_start()
2155 * also ensures that the STOP/RESUME pair of events is emitted.
2157 qemu_system_vmstop_request_prepare();
2158 send_qmp_error_event(blk
, action
, is_read
, error
);
2159 qemu_system_vmstop_request(RUN_STATE_IO_ERROR
);
2161 send_qmp_error_event(blk
, action
, is_read
, error
);
2166 * Returns true if the BlockBackend can support taking write permissions
2167 * (because its root node is not read-only).
2169 bool blk_supports_write_perm(BlockBackend
*blk
)
2171 BlockDriverState
*bs
= blk_bs(blk
);
2172 GLOBAL_STATE_CODE();
2175 return !bdrv_is_read_only(bs
);
2177 return blk
->root_state
.open_flags
& BDRV_O_RDWR
;
2182 * Returns true if the BlockBackend can be written to in its current
2183 * configuration (i.e. if write permission have been requested)
2185 bool blk_is_writable(BlockBackend
*blk
)
2188 return blk
->perm
& BLK_PERM_WRITE
;
2191 bool blk_is_sg(BlockBackend
*blk
)
2193 BlockDriverState
*bs
= blk_bs(blk
);
2194 GLOBAL_STATE_CODE();
2200 return bdrv_is_sg(bs
);
2203 bool blk_enable_write_cache(BlockBackend
*blk
)
2206 return blk
->enable_write_cache
;
2209 void blk_set_enable_write_cache(BlockBackend
*blk
, bool wce
)
2212 blk
->enable_write_cache
= wce
;
2215 void blk_activate(BlockBackend
*blk
, Error
**errp
)
2217 BlockDriverState
*bs
= blk_bs(blk
);
2218 GLOBAL_STATE_CODE();
2221 error_setg(errp
, "Device '%s' has no medium", blk
->name
);
2226 * Migration code can call this function in coroutine context, so leave
2227 * coroutine context if necessary.
2229 if (qemu_in_coroutine()) {
2230 bdrv_co_activate(bs
, errp
);
2232 bdrv_activate(bs
, errp
);
2236 bool coroutine_fn
blk_co_is_inserted(BlockBackend
*blk
)
2238 BlockDriverState
*bs
= blk_bs(blk
);
2240 assert_bdrv_graph_readable();
2242 return bs
&& bdrv_co_is_inserted(bs
);
2245 bool coroutine_fn
blk_co_is_available(BlockBackend
*blk
)
2248 return blk_co_is_inserted(blk
) && !blk_dev_is_tray_open(blk
);
2251 void coroutine_fn
blk_co_lock_medium(BlockBackend
*blk
, bool locked
)
2253 BlockDriverState
*bs
= blk_bs(blk
);
2255 GRAPH_RDLOCK_GUARD();
2258 bdrv_co_lock_medium(bs
, locked
);
2262 void coroutine_fn
blk_co_eject(BlockBackend
*blk
, bool eject_flag
)
2264 BlockDriverState
*bs
= blk_bs(blk
);
2267 GRAPH_RDLOCK_GUARD();
2270 bdrv_co_eject(bs
, eject_flag
);
2273 /* Whether or not we ejected on the backend,
2274 * the frontend experienced a tray event. */
2275 id
= blk_get_attached_dev_id(blk
);
2276 qapi_event_send_device_tray_moved(blk_name(blk
), id
,
2281 int blk_get_flags(BlockBackend
*blk
)
2283 BlockDriverState
*bs
= blk_bs(blk
);
2284 GLOBAL_STATE_CODE();
2287 return bdrv_get_flags(bs
);
2289 return blk
->root_state
.open_flags
;
2293 /* Returns the minimum request alignment, in bytes; guaranteed nonzero */
2294 uint32_t blk_get_request_alignment(BlockBackend
*blk
)
2296 BlockDriverState
*bs
= blk_bs(blk
);
2298 return bs
? bs
->bl
.request_alignment
: BDRV_SECTOR_SIZE
;
2301 /* Returns the maximum hardware transfer length, in bytes; guaranteed nonzero */
2302 uint64_t blk_get_max_hw_transfer(BlockBackend
*blk
)
2304 BlockDriverState
*bs
= blk_bs(blk
);
2305 uint64_t max
= INT_MAX
;
2309 max
= MIN_NON_ZERO(max
, bs
->bl
.max_hw_transfer
);
2310 max
= MIN_NON_ZERO(max
, bs
->bl
.max_transfer
);
2312 return ROUND_DOWN(max
, blk_get_request_alignment(blk
));
2315 /* Returns the maximum transfer length, in bytes; guaranteed nonzero */
2316 uint32_t blk_get_max_transfer(BlockBackend
*blk
)
2318 BlockDriverState
*bs
= blk_bs(blk
);
2319 uint32_t max
= INT_MAX
;
2323 max
= MIN_NON_ZERO(max
, bs
->bl
.max_transfer
);
2325 return ROUND_DOWN(max
, blk_get_request_alignment(blk
));
2328 int blk_get_max_hw_iov(BlockBackend
*blk
)
2331 return MIN_NON_ZERO(blk
->root
->bs
->bl
.max_hw_iov
,
2332 blk
->root
->bs
->bl
.max_iov
);
2335 int blk_get_max_iov(BlockBackend
*blk
)
2338 return blk
->root
->bs
->bl
.max_iov
;
2341 void *blk_try_blockalign(BlockBackend
*blk
, size_t size
)
2344 return qemu_try_blockalign(blk
? blk_bs(blk
) : NULL
, size
);
2347 void *blk_blockalign(BlockBackend
*blk
, size_t size
)
2350 return qemu_blockalign(blk
? blk_bs(blk
) : NULL
, size
);
2353 bool blk_op_is_blocked(BlockBackend
*blk
, BlockOpType op
, Error
**errp
)
2355 BlockDriverState
*bs
= blk_bs(blk
);
2356 GLOBAL_STATE_CODE();
2362 return bdrv_op_is_blocked(bs
, op
, errp
);
2365 void blk_op_unblock(BlockBackend
*blk
, BlockOpType op
, Error
*reason
)
2367 BlockDriverState
*bs
= blk_bs(blk
);
2368 GLOBAL_STATE_CODE();
2371 bdrv_op_unblock(bs
, op
, reason
);
2375 void blk_op_block_all(BlockBackend
*blk
, Error
*reason
)
2377 BlockDriverState
*bs
= blk_bs(blk
);
2378 GLOBAL_STATE_CODE();
2381 bdrv_op_block_all(bs
, reason
);
2385 void blk_op_unblock_all(BlockBackend
*blk
, Error
*reason
)
2387 BlockDriverState
*bs
= blk_bs(blk
);
2388 GLOBAL_STATE_CODE();
2391 bdrv_op_unblock_all(bs
, reason
);
2395 AioContext
*blk_get_aio_context(BlockBackend
*blk
)
2397 BlockDriverState
*bs
= blk_bs(blk
);
2401 AioContext
*ctx
= bdrv_get_aio_context(blk_bs(blk
));
2402 assert(ctx
== blk
->ctx
);
2408 static AioContext
*blk_aiocb_get_aio_context(BlockAIOCB
*acb
)
2410 BlockBackendAIOCB
*blk_acb
= DO_UPCAST(BlockBackendAIOCB
, common
, acb
);
2411 return blk_get_aio_context(blk_acb
->blk
);
2414 static int blk_do_set_aio_context(BlockBackend
*blk
, AioContext
*new_context
,
2415 bool update_root_node
, Error
**errp
)
2417 BlockDriverState
*bs
= blk_bs(blk
);
2418 ThrottleGroupMember
*tgm
= &blk
->public.throttle_group_member
;
2424 if (update_root_node
) {
2426 * update_root_node MUST be false for blk_root_set_aio_ctx_commit(),
2427 * as we are already in the commit function of a transaction.
2429 ret
= bdrv_try_change_aio_context(bs
, new_context
, blk
->root
, errp
);
2436 * Make blk->ctx consistent with the root node before we invoke any
2437 * other operations like drain that might inquire blk->ctx
2439 blk
->ctx
= new_context
;
2440 if (tgm
->throttle_state
) {
2441 bdrv_drained_begin(bs
);
2442 throttle_group_detach_aio_context(tgm
);
2443 throttle_group_attach_aio_context(tgm
, new_context
);
2444 bdrv_drained_end(bs
);
2449 blk
->ctx
= new_context
;
2455 int blk_set_aio_context(BlockBackend
*blk
, AioContext
*new_context
,
2458 GLOBAL_STATE_CODE();
2459 return blk_do_set_aio_context(blk
, new_context
, true, errp
);
2462 typedef struct BdrvStateBlkRootContext
{
2463 AioContext
*new_ctx
;
2465 } BdrvStateBlkRootContext
;
2467 static void blk_root_set_aio_ctx_commit(void *opaque
)
2469 BdrvStateBlkRootContext
*s
= opaque
;
2470 BlockBackend
*blk
= s
->blk
;
2472 blk_do_set_aio_context(blk
, s
->new_ctx
, false, &error_abort
);
2475 static TransactionActionDrv set_blk_root_context
= {
2476 .commit
= blk_root_set_aio_ctx_commit
,
2480 static bool blk_root_change_aio_ctx(BdrvChild
*child
, AioContext
*ctx
,
2481 GHashTable
*visited
, Transaction
*tran
,
2484 BlockBackend
*blk
= child
->opaque
;
2485 BdrvStateBlkRootContext
*s
;
2487 if (!blk
->allow_aio_context_change
) {
2489 * Manually created BlockBackends (those with a name) that are not
2490 * attached to anything can change their AioContext without updating
2491 * their user; return an error for others.
2493 if (!blk
->name
|| blk
->dev
) {
2494 /* TODO Add BB name/QOM path */
2495 error_setg(errp
, "Cannot change iothread of active block backend");
2500 s
= g_new(BdrvStateBlkRootContext
, 1);
2501 *s
= (BdrvStateBlkRootContext
) {
2506 tran_add(tran
, &set_blk_root_context
, s
);
2510 void blk_add_aio_context_notifier(BlockBackend
*blk
,
2511 void (*attached_aio_context
)(AioContext
*new_context
, void *opaque
),
2512 void (*detach_aio_context
)(void *opaque
), void *opaque
)
2514 BlockBackendAioNotifier
*notifier
;
2515 BlockDriverState
*bs
= blk_bs(blk
);
2516 GLOBAL_STATE_CODE();
2518 notifier
= g_new(BlockBackendAioNotifier
, 1);
2519 notifier
->attached_aio_context
= attached_aio_context
;
2520 notifier
->detach_aio_context
= detach_aio_context
;
2521 notifier
->opaque
= opaque
;
2522 QLIST_INSERT_HEAD(&blk
->aio_notifiers
, notifier
, list
);
2525 bdrv_add_aio_context_notifier(bs
, attached_aio_context
,
2526 detach_aio_context
, opaque
);
2530 void blk_remove_aio_context_notifier(BlockBackend
*blk
,
2531 void (*attached_aio_context
)(AioContext
*,
2533 void (*detach_aio_context
)(void *),
2536 BlockBackendAioNotifier
*notifier
;
2537 BlockDriverState
*bs
= blk_bs(blk
);
2539 GLOBAL_STATE_CODE();
2542 bdrv_remove_aio_context_notifier(bs
, attached_aio_context
,
2543 detach_aio_context
, opaque
);
2546 QLIST_FOREACH(notifier
, &blk
->aio_notifiers
, list
) {
2547 if (notifier
->attached_aio_context
== attached_aio_context
&&
2548 notifier
->detach_aio_context
== detach_aio_context
&&
2549 notifier
->opaque
== opaque
) {
2550 QLIST_REMOVE(notifier
, list
);
2559 void blk_add_remove_bs_notifier(BlockBackend
*blk
, Notifier
*notify
)
2561 GLOBAL_STATE_CODE();
2562 notifier_list_add(&blk
->remove_bs_notifiers
, notify
);
2565 void blk_add_insert_bs_notifier(BlockBackend
*blk
, Notifier
*notify
)
2567 GLOBAL_STATE_CODE();
2568 notifier_list_add(&blk
->insert_bs_notifiers
, notify
);
2571 void coroutine_fn
blk_co_io_plug(BlockBackend
*blk
)
2573 BlockDriverState
*bs
= blk_bs(blk
);
2575 GRAPH_RDLOCK_GUARD();
2578 bdrv_co_io_plug(bs
);
2582 void coroutine_fn
blk_co_io_unplug(BlockBackend
*blk
)
2584 BlockDriverState
*bs
= blk_bs(blk
);
2586 GRAPH_RDLOCK_GUARD();
2589 bdrv_co_io_unplug(bs
);
2593 BlockAcctStats
*blk_get_stats(BlockBackend
*blk
)
2599 void *blk_aio_get(const AIOCBInfo
*aiocb_info
, BlockBackend
*blk
,
2600 BlockCompletionFunc
*cb
, void *opaque
)
2603 return qemu_aio_get(aiocb_info
, blk_bs(blk
), cb
, opaque
);
2606 int coroutine_fn
blk_co_pwrite_zeroes(BlockBackend
*blk
, int64_t offset
,
2607 int64_t bytes
, BdrvRequestFlags flags
)
2610 return blk_co_pwritev(blk
, offset
, bytes
, NULL
,
2611 flags
| BDRV_REQ_ZERO_WRITE
);
2614 int coroutine_fn
blk_co_pwrite_compressed(BlockBackend
*blk
, int64_t offset
,
2615 int64_t bytes
, const void *buf
)
2617 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, bytes
);
2619 return blk_co_pwritev_part(blk
, offset
, bytes
, &qiov
, 0,
2620 BDRV_REQ_WRITE_COMPRESSED
);
2623 int coroutine_fn
blk_co_truncate(BlockBackend
*blk
, int64_t offset
, bool exact
,
2624 PreallocMode prealloc
, BdrvRequestFlags flags
,
2628 GRAPH_RDLOCK_GUARD();
2629 if (!blk_co_is_available(blk
)) {
2630 error_setg(errp
, "No medium inserted");
2634 return bdrv_co_truncate(blk
->root
, offset
, exact
, prealloc
, flags
, errp
);
2637 int blk_save_vmstate(BlockBackend
*blk
, const uint8_t *buf
,
2638 int64_t pos
, int size
)
2641 GLOBAL_STATE_CODE();
2643 if (!blk_is_available(blk
)) {
2647 ret
= bdrv_save_vmstate(blk_bs(blk
), buf
, pos
, size
);
2652 if (ret
== size
&& !blk
->enable_write_cache
) {
2653 ret
= bdrv_flush(blk_bs(blk
));
2656 return ret
< 0 ? ret
: size
;
2659 int blk_load_vmstate(BlockBackend
*blk
, uint8_t *buf
, int64_t pos
, int size
)
2661 GLOBAL_STATE_CODE();
2662 if (!blk_is_available(blk
)) {
2666 return bdrv_load_vmstate(blk_bs(blk
), buf
, pos
, size
);
2669 int blk_probe_blocksizes(BlockBackend
*blk
, BlockSizes
*bsz
)
2671 GLOBAL_STATE_CODE();
2672 if (!blk_is_available(blk
)) {
2676 return bdrv_probe_blocksizes(blk_bs(blk
), bsz
);
2679 int blk_probe_geometry(BlockBackend
*blk
, HDGeometry
*geo
)
2681 GLOBAL_STATE_CODE();
2682 if (!blk_is_available(blk
)) {
2686 return bdrv_probe_geometry(blk_bs(blk
), geo
);
2690 * Updates the BlockBackendRootState object with data from the currently
2691 * attached BlockDriverState.
2693 void blk_update_root_state(BlockBackend
*blk
)
2695 GLOBAL_STATE_CODE();
2698 blk
->root_state
.open_flags
= blk
->root
->bs
->open_flags
;
2699 blk
->root_state
.detect_zeroes
= blk
->root
->bs
->detect_zeroes
;
2703 * Returns the detect-zeroes setting to be used for bdrv_open() of a
2704 * BlockDriverState which is supposed to inherit the root state.
2706 bool blk_get_detect_zeroes_from_root_state(BlockBackend
*blk
)
2708 GLOBAL_STATE_CODE();
2709 return blk
->root_state
.detect_zeroes
;
2713 * Returns the flags to be used for bdrv_open() of a BlockDriverState which is
2714 * supposed to inherit the root state.
2716 int blk_get_open_flags_from_root_state(BlockBackend
*blk
)
2718 GLOBAL_STATE_CODE();
2719 return blk
->root_state
.open_flags
;
2722 BlockBackendRootState
*blk_get_root_state(BlockBackend
*blk
)
2724 GLOBAL_STATE_CODE();
2725 return &blk
->root_state
;
2728 int blk_commit_all(void)
2730 BlockBackend
*blk
= NULL
;
2731 GLOBAL_STATE_CODE();
2733 while ((blk
= blk_all_next(blk
)) != NULL
) {
2734 AioContext
*aio_context
= blk_get_aio_context(blk
);
2735 BlockDriverState
*unfiltered_bs
= bdrv_skip_filters(blk_bs(blk
));
2737 aio_context_acquire(aio_context
);
2738 if (blk_is_inserted(blk
) && bdrv_cow_child(unfiltered_bs
)) {
2741 ret
= bdrv_commit(unfiltered_bs
);
2743 aio_context_release(aio_context
);
2747 aio_context_release(aio_context
);
2753 /* throttling disk I/O limits */
2754 void blk_set_io_limits(BlockBackend
*blk
, ThrottleConfig
*cfg
)
2756 GLOBAL_STATE_CODE();
2757 throttle_group_config(&blk
->public.throttle_group_member
, cfg
);
2760 void blk_io_limits_disable(BlockBackend
*blk
)
2762 BlockDriverState
*bs
= blk_bs(blk
);
2763 ThrottleGroupMember
*tgm
= &blk
->public.throttle_group_member
;
2764 assert(tgm
->throttle_state
);
2765 GLOBAL_STATE_CODE();
2768 bdrv_drained_begin(bs
);
2770 throttle_group_unregister_tgm(tgm
);
2772 bdrv_drained_end(bs
);
2777 /* should be called before blk_set_io_limits if a limit is set */
2778 void blk_io_limits_enable(BlockBackend
*blk
, const char *group
)
2780 assert(!blk
->public.throttle_group_member
.throttle_state
);
2781 GLOBAL_STATE_CODE();
2782 throttle_group_register_tgm(&blk
->public.throttle_group_member
,
2783 group
, blk_get_aio_context(blk
));
2786 void blk_io_limits_update_group(BlockBackend
*blk
, const char *group
)
2788 GLOBAL_STATE_CODE();
2789 /* this BB is not part of any group */
2790 if (!blk
->public.throttle_group_member
.throttle_state
) {
2794 /* this BB is a part of the same group than the one we want */
2795 if (!g_strcmp0(throttle_group_get_name(&blk
->public.throttle_group_member
),
2800 /* need to change the group this bs belong to */
2801 blk_io_limits_disable(blk
);
2802 blk_io_limits_enable(blk
, group
);
2805 static void blk_root_drained_begin(BdrvChild
*child
)
2807 BlockBackend
*blk
= child
->opaque
;
2808 ThrottleGroupMember
*tgm
= &blk
->public.throttle_group_member
;
2810 if (qatomic_fetch_inc(&blk
->quiesce_counter
) == 0) {
2811 if (blk
->dev_ops
&& blk
->dev_ops
->drained_begin
) {
2812 blk
->dev_ops
->drained_begin(blk
->dev_opaque
);
2816 /* Note that blk->root may not be accessible here yet if we are just
2817 * attaching to a BlockDriverState that is drained. Use child instead. */
2819 if (qatomic_fetch_inc(&tgm
->io_limits_disabled
) == 0) {
2820 throttle_group_restart_tgm(tgm
);
2824 static bool blk_root_drained_poll(BdrvChild
*child
)
2826 BlockBackend
*blk
= child
->opaque
;
2828 assert(qatomic_read(&blk
->quiesce_counter
));
2830 if (blk
->dev_ops
&& blk
->dev_ops
->drained_poll
) {
2831 busy
= blk
->dev_ops
->drained_poll(blk
->dev_opaque
);
2833 return busy
|| !!blk
->in_flight
;
2836 static void blk_root_drained_end(BdrvChild
*child
)
2838 BlockBackend
*blk
= child
->opaque
;
2839 assert(qatomic_read(&blk
->quiesce_counter
));
2841 assert(blk
->public.throttle_group_member
.io_limits_disabled
);
2842 qatomic_dec(&blk
->public.throttle_group_member
.io_limits_disabled
);
2844 if (qatomic_fetch_dec(&blk
->quiesce_counter
) == 1) {
2845 if (blk
->dev_ops
&& blk
->dev_ops
->drained_end
) {
2846 blk
->dev_ops
->drained_end(blk
->dev_opaque
);
2848 qemu_mutex_lock(&blk
->queued_requests_lock
);
2849 while (qemu_co_enter_next(&blk
->queued_requests
,
2850 &blk
->queued_requests_lock
)) {
2851 /* Resume all queued requests */
2853 qemu_mutex_unlock(&blk
->queued_requests_lock
);
2857 bool blk_register_buf(BlockBackend
*blk
, void *host
, size_t size
, Error
**errp
)
2859 BlockDriverState
*bs
= blk_bs(blk
);
2861 GLOBAL_STATE_CODE();
2864 return bdrv_register_buf(bs
, host
, size
, errp
);
2869 void blk_unregister_buf(BlockBackend
*blk
, void *host
, size_t size
)
2871 BlockDriverState
*bs
= blk_bs(blk
);
2873 GLOBAL_STATE_CODE();
2876 bdrv_unregister_buf(bs
, host
, size
);
2880 int coroutine_fn
blk_co_copy_range(BlockBackend
*blk_in
, int64_t off_in
,
2881 BlockBackend
*blk_out
, int64_t off_out
,
2882 int64_t bytes
, BdrvRequestFlags read_flags
,
2883 BdrvRequestFlags write_flags
)
2887 GRAPH_RDLOCK_GUARD();
2889 r
= blk_check_byte_request(blk_in
, off_in
, bytes
);
2893 r
= blk_check_byte_request(blk_out
, off_out
, bytes
);
2898 return bdrv_co_copy_range(blk_in
->root
, off_in
,
2899 blk_out
->root
, off_out
,
2900 bytes
, read_flags
, write_flags
);
2903 const BdrvChild
*blk_root(BlockBackend
*blk
)
2905 GLOBAL_STATE_CODE();
2909 int blk_make_empty(BlockBackend
*blk
, Error
**errp
)
2911 GLOBAL_STATE_CODE();
2912 if (!blk_is_available(blk
)) {
2913 error_setg(errp
, "No medium inserted");
2917 return bdrv_make_empty(blk
->root
, errp
);