Merge remote-tracking branch 'remotes/thibault/tags/samuel-thibault' into staging
[qemu/ar7.git] / block / block-backend.c
blobedad02a0f2a3a0b18bbe174fc7f8ec33aa7ed988
1 /*
2 * QEMU Block backends
4 * Copyright (C) 2014-2016 Red Hat, Inc.
6 * Authors:
7 * Markus Armbruster <armbru@redhat.com>,
9 * This work is licensed under the terms of the GNU LGPL, version 2.1
10 * or later. See the COPYING.LIB file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "sysemu/block-backend.h"
15 #include "block/block_int.h"
16 #include "block/blockjob.h"
17 #include "block/throttle-groups.h"
18 #include "sysemu/blockdev.h"
19 #include "sysemu/sysemu.h"
20 #include "qapi/error.h"
21 #include "qapi/qapi-events-block.h"
22 #include "qemu/id.h"
23 #include "qemu/option.h"
24 #include "trace.h"
25 #include "migration/misc.h"
27 /* Number of coroutines to reserve per attached device model */
28 #define COROUTINE_POOL_RESERVATION 64
30 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
32 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb);
34 typedef struct BlockBackendAioNotifier {
35 void (*attached_aio_context)(AioContext *new_context, void *opaque);
36 void (*detach_aio_context)(void *opaque);
37 void *opaque;
38 QLIST_ENTRY(BlockBackendAioNotifier) list;
39 } BlockBackendAioNotifier;
41 struct BlockBackend {
42 char *name;
43 int refcnt;
44 BdrvChild *root;
45 DriveInfo *legacy_dinfo; /* null unless created by drive_new() */
46 QTAILQ_ENTRY(BlockBackend) link; /* for block_backends */
47 QTAILQ_ENTRY(BlockBackend) monitor_link; /* for monitor_block_backends */
48 BlockBackendPublic public;
50 DeviceState *dev; /* attached device model, if any */
51 const BlockDevOps *dev_ops;
52 void *dev_opaque;
54 /* the block size for which the guest device expects atomicity */
55 int guest_block_size;
57 /* If the BDS tree is removed, some of its options are stored here (which
58 * can be used to restore those options in the new BDS on insert) */
59 BlockBackendRootState root_state;
61 bool enable_write_cache;
63 /* I/O stats (display with "info blockstats"). */
64 BlockAcctStats stats;
66 BlockdevOnError on_read_error, on_write_error;
67 bool iostatus_enabled;
68 BlockDeviceIoStatus iostatus;
70 uint64_t perm;
71 uint64_t shared_perm;
72 bool disable_perm;
74 bool allow_write_beyond_eof;
76 NotifierList remove_bs_notifiers, insert_bs_notifiers;
77 QLIST_HEAD(, BlockBackendAioNotifier) aio_notifiers;
79 int quiesce_counter;
80 VMChangeStateEntry *vmsh;
81 bool force_allow_inactivate;
83 /* Number of in-flight aio requests. BlockDriverState also counts
84 * in-flight requests but aio requests can exist even when blk->root is
85 * NULL, so we cannot rely on its counter for that case.
86 * Accessed with atomic ops.
88 unsigned int in_flight;
91 typedef struct BlockBackendAIOCB {
92 BlockAIOCB common;
93 BlockBackend *blk;
94 int ret;
95 } BlockBackendAIOCB;
97 static const AIOCBInfo block_backend_aiocb_info = {
98 .get_aio_context = blk_aiocb_get_aio_context,
99 .aiocb_size = sizeof(BlockBackendAIOCB),
102 static void drive_info_del(DriveInfo *dinfo);
103 static BlockBackend *bdrv_first_blk(BlockDriverState *bs);
105 /* All BlockBackends */
106 static QTAILQ_HEAD(, BlockBackend) block_backends =
107 QTAILQ_HEAD_INITIALIZER(block_backends);
109 /* All BlockBackends referenced by the monitor and which are iterated through by
110 * blk_next() */
111 static QTAILQ_HEAD(, BlockBackend) monitor_block_backends =
112 QTAILQ_HEAD_INITIALIZER(monitor_block_backends);
114 static void blk_root_inherit_options(int *child_flags, QDict *child_options,
115 int parent_flags, QDict *parent_options)
117 /* We're not supposed to call this function for root nodes */
118 abort();
120 static void blk_root_drained_begin(BdrvChild *child);
121 static bool blk_root_drained_poll(BdrvChild *child);
122 static void blk_root_drained_end(BdrvChild *child);
124 static void blk_root_change_media(BdrvChild *child, bool load);
125 static void blk_root_resize(BdrvChild *child);
127 static char *blk_root_get_parent_desc(BdrvChild *child)
129 BlockBackend *blk = child->opaque;
130 char *dev_id;
132 if (blk->name) {
133 return g_strdup(blk->name);
136 dev_id = blk_get_attached_dev_id(blk);
137 if (*dev_id) {
138 return dev_id;
139 } else {
140 /* TODO Callback into the BB owner for something more detailed */
141 g_free(dev_id);
142 return g_strdup("a block device");
146 static const char *blk_root_get_name(BdrvChild *child)
148 return blk_name(child->opaque);
151 static void blk_vm_state_changed(void *opaque, int running, RunState state)
153 Error *local_err = NULL;
154 BlockBackend *blk = opaque;
156 if (state == RUN_STATE_INMIGRATE) {
157 return;
160 qemu_del_vm_change_state_handler(blk->vmsh);
161 blk->vmsh = NULL;
162 blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err);
163 if (local_err) {
164 error_report_err(local_err);
169 * Notifies the user of the BlockBackend that migration has completed. qdev
170 * devices can tighten their permissions in response (specifically revoke
171 * shared write permissions that we needed for storage migration).
173 * If an error is returned, the VM cannot be allowed to be resumed.
175 static void blk_root_activate(BdrvChild *child, Error **errp)
177 BlockBackend *blk = child->opaque;
178 Error *local_err = NULL;
180 if (!blk->disable_perm) {
181 return;
184 blk->disable_perm = false;
186 blk_set_perm(blk, blk->perm, BLK_PERM_ALL, &local_err);
187 if (local_err) {
188 error_propagate(errp, local_err);
189 blk->disable_perm = true;
190 return;
193 if (runstate_check(RUN_STATE_INMIGRATE)) {
194 /* Activation can happen when migration process is still active, for
195 * example when nbd_server_add is called during non-shared storage
196 * migration. Defer the shared_perm update to migration completion. */
197 if (!blk->vmsh) {
198 blk->vmsh = qemu_add_vm_change_state_handler(blk_vm_state_changed,
199 blk);
201 return;
204 blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err);
205 if (local_err) {
206 error_propagate(errp, local_err);
207 blk->disable_perm = true;
208 return;
212 void blk_set_force_allow_inactivate(BlockBackend *blk)
214 blk->force_allow_inactivate = true;
217 static bool blk_can_inactivate(BlockBackend *blk)
219 /* If it is a guest device, inactivate is ok. */
220 if (blk->dev || blk_name(blk)[0]) {
221 return true;
224 /* Inactivating means no more writes to the image can be done,
225 * even if those writes would be changes invisible to the
226 * guest. For block job BBs that satisfy this, we can just allow
227 * it. This is the case for mirror job source, which is required
228 * by libvirt non-shared block migration. */
229 if (!(blk->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED))) {
230 return true;
233 return blk->force_allow_inactivate;
236 static int blk_root_inactivate(BdrvChild *child)
238 BlockBackend *blk = child->opaque;
240 if (blk->disable_perm) {
241 return 0;
244 if (!blk_can_inactivate(blk)) {
245 return -EPERM;
248 blk->disable_perm = true;
249 if (blk->root) {
250 bdrv_child_try_set_perm(blk->root, 0, BLK_PERM_ALL, &error_abort);
253 return 0;
256 static void blk_root_attach(BdrvChild *child)
258 BlockBackend *blk = child->opaque;
259 BlockBackendAioNotifier *notifier;
261 trace_blk_root_attach(child, blk, child->bs);
263 QLIST_FOREACH(notifier, &blk->aio_notifiers, list) {
264 bdrv_add_aio_context_notifier(child->bs,
265 notifier->attached_aio_context,
266 notifier->detach_aio_context,
267 notifier->opaque);
271 static void blk_root_detach(BdrvChild *child)
273 BlockBackend *blk = child->opaque;
274 BlockBackendAioNotifier *notifier;
276 trace_blk_root_detach(child, blk, child->bs);
278 QLIST_FOREACH(notifier, &blk->aio_notifiers, list) {
279 bdrv_remove_aio_context_notifier(child->bs,
280 notifier->attached_aio_context,
281 notifier->detach_aio_context,
282 notifier->opaque);
286 static const BdrvChildRole child_root = {
287 .inherit_options = blk_root_inherit_options,
289 .change_media = blk_root_change_media,
290 .resize = blk_root_resize,
291 .get_name = blk_root_get_name,
292 .get_parent_desc = blk_root_get_parent_desc,
294 .drained_begin = blk_root_drained_begin,
295 .drained_poll = blk_root_drained_poll,
296 .drained_end = blk_root_drained_end,
298 .activate = blk_root_activate,
299 .inactivate = blk_root_inactivate,
301 .attach = blk_root_attach,
302 .detach = blk_root_detach,
306 * Create a new BlockBackend with a reference count of one.
308 * @perm is a bitmasks of BLK_PERM_* constants which describes the permissions
309 * to request for a block driver node that is attached to this BlockBackend.
310 * @shared_perm is a bitmask which describes which permissions may be granted
311 * to other users of the attached node.
312 * Both sets of permissions can be changed later using blk_set_perm().
314 * Return the new BlockBackend on success, null on failure.
316 BlockBackend *blk_new(uint64_t perm, uint64_t shared_perm)
318 BlockBackend *blk;
320 blk = g_new0(BlockBackend, 1);
321 blk->refcnt = 1;
322 blk->perm = perm;
323 blk->shared_perm = shared_perm;
324 blk_set_enable_write_cache(blk, true);
326 blk->on_read_error = BLOCKDEV_ON_ERROR_REPORT;
327 blk->on_write_error = BLOCKDEV_ON_ERROR_ENOSPC;
329 block_acct_init(&blk->stats);
331 notifier_list_init(&blk->remove_bs_notifiers);
332 notifier_list_init(&blk->insert_bs_notifiers);
333 QLIST_INIT(&blk->aio_notifiers);
335 QTAILQ_INSERT_TAIL(&block_backends, blk, link);
336 return blk;
340 * Creates a new BlockBackend, opens a new BlockDriverState, and connects both.
342 * Just as with bdrv_open(), after having called this function the reference to
343 * @options belongs to the block layer (even on failure).
345 * TODO: Remove @filename and @flags; it should be possible to specify a whole
346 * BDS tree just by specifying the @options QDict (or @reference,
347 * alternatively). At the time of adding this function, this is not possible,
348 * though, so callers of this function have to be able to specify @filename and
349 * @flags.
351 BlockBackend *blk_new_open(const char *filename, const char *reference,
352 QDict *options, int flags, Error **errp)
354 BlockBackend *blk;
355 BlockDriverState *bs;
356 uint64_t perm = 0;
358 /* blk_new_open() is mainly used in .bdrv_create implementations and the
359 * tools where sharing isn't a concern because the BDS stays private, so we
360 * just request permission according to the flags.
362 * The exceptions are xen_disk and blockdev_init(); in these cases, the
363 * caller of blk_new_open() doesn't make use of the permissions, but they
364 * shouldn't hurt either. We can still share everything here because the
365 * guest devices will add their own blockers if they can't share. */
366 if ((flags & BDRV_O_NO_IO) == 0) {
367 perm |= BLK_PERM_CONSISTENT_READ;
368 if (flags & BDRV_O_RDWR) {
369 perm |= BLK_PERM_WRITE;
372 if (flags & BDRV_O_RESIZE) {
373 perm |= BLK_PERM_RESIZE;
376 blk = blk_new(perm, BLK_PERM_ALL);
377 bs = bdrv_open(filename, reference, options, flags, errp);
378 if (!bs) {
379 blk_unref(blk);
380 return NULL;
383 blk->root = bdrv_root_attach_child(bs, "root", &child_root,
384 perm, BLK_PERM_ALL, blk, errp);
385 if (!blk->root) {
386 bdrv_unref(bs);
387 blk_unref(blk);
388 return NULL;
391 return blk;
394 static void blk_delete(BlockBackend *blk)
396 assert(!blk->refcnt);
397 assert(!blk->name);
398 assert(!blk->dev);
399 if (blk->public.throttle_group_member.throttle_state) {
400 blk_io_limits_disable(blk);
402 if (blk->root) {
403 blk_remove_bs(blk);
405 if (blk->vmsh) {
406 qemu_del_vm_change_state_handler(blk->vmsh);
407 blk->vmsh = NULL;
409 assert(QLIST_EMPTY(&blk->remove_bs_notifiers.notifiers));
410 assert(QLIST_EMPTY(&blk->insert_bs_notifiers.notifiers));
411 assert(QLIST_EMPTY(&blk->aio_notifiers));
412 QTAILQ_REMOVE(&block_backends, blk, link);
413 drive_info_del(blk->legacy_dinfo);
414 block_acct_cleanup(&blk->stats);
415 g_free(blk);
418 static void drive_info_del(DriveInfo *dinfo)
420 if (!dinfo) {
421 return;
423 qemu_opts_del(dinfo->opts);
424 g_free(dinfo);
427 int blk_get_refcnt(BlockBackend *blk)
429 return blk ? blk->refcnt : 0;
433 * Increment @blk's reference count.
434 * @blk must not be null.
436 void blk_ref(BlockBackend *blk)
438 assert(blk->refcnt > 0);
439 blk->refcnt++;
443 * Decrement @blk's reference count.
444 * If this drops it to zero, destroy @blk.
445 * For convenience, do nothing if @blk is null.
447 void blk_unref(BlockBackend *blk)
449 if (blk) {
450 assert(blk->refcnt > 0);
451 if (blk->refcnt > 1) {
452 blk->refcnt--;
453 } else {
454 blk_drain(blk);
455 /* blk_drain() cannot resurrect blk, nobody held a reference */
456 assert(blk->refcnt == 1);
457 blk->refcnt = 0;
458 blk_delete(blk);
464 * Behaves similarly to blk_next() but iterates over all BlockBackends, even the
465 * ones which are hidden (i.e. are not referenced by the monitor).
467 BlockBackend *blk_all_next(BlockBackend *blk)
469 return blk ? QTAILQ_NEXT(blk, link)
470 : QTAILQ_FIRST(&block_backends);
473 void blk_remove_all_bs(void)
475 BlockBackend *blk = NULL;
477 while ((blk = blk_all_next(blk)) != NULL) {
478 AioContext *ctx = blk_get_aio_context(blk);
480 aio_context_acquire(ctx);
481 if (blk->root) {
482 blk_remove_bs(blk);
484 aio_context_release(ctx);
489 * Return the monitor-owned BlockBackend after @blk.
490 * If @blk is null, return the first one.
491 * Else, return @blk's next sibling, which may be null.
493 * To iterate over all BlockBackends, do
494 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
495 * ...
498 BlockBackend *blk_next(BlockBackend *blk)
500 return blk ? QTAILQ_NEXT(blk, monitor_link)
501 : QTAILQ_FIRST(&monitor_block_backends);
504 /* Iterates over all top-level BlockDriverStates, i.e. BDSs that are owned by
505 * the monitor or attached to a BlockBackend */
506 BlockDriverState *bdrv_next(BdrvNextIterator *it)
508 BlockDriverState *bs, *old_bs;
510 /* Must be called from the main loop */
511 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
513 /* First, return all root nodes of BlockBackends. In order to avoid
514 * returning a BDS twice when multiple BBs refer to it, we only return it
515 * if the BB is the first one in the parent list of the BDS. */
516 if (it->phase == BDRV_NEXT_BACKEND_ROOTS) {
517 BlockBackend *old_blk = it->blk;
519 old_bs = old_blk ? blk_bs(old_blk) : NULL;
521 do {
522 it->blk = blk_all_next(it->blk);
523 bs = it->blk ? blk_bs(it->blk) : NULL;
524 } while (it->blk && (bs == NULL || bdrv_first_blk(bs) != it->blk));
526 if (it->blk) {
527 blk_ref(it->blk);
529 blk_unref(old_blk);
531 if (bs) {
532 bdrv_ref(bs);
533 bdrv_unref(old_bs);
534 return bs;
536 it->phase = BDRV_NEXT_MONITOR_OWNED;
537 } else {
538 old_bs = it->bs;
541 /* Then return the monitor-owned BDSes without a BB attached. Ignore all
542 * BDSes that are attached to a BlockBackend here; they have been handled
543 * by the above block already */
544 do {
545 it->bs = bdrv_next_monitor_owned(it->bs);
546 bs = it->bs;
547 } while (bs && bdrv_has_blk(bs));
549 if (bs) {
550 bdrv_ref(bs);
552 bdrv_unref(old_bs);
554 return bs;
557 static void bdrv_next_reset(BdrvNextIterator *it)
559 *it = (BdrvNextIterator) {
560 .phase = BDRV_NEXT_BACKEND_ROOTS,
564 BlockDriverState *bdrv_first(BdrvNextIterator *it)
566 bdrv_next_reset(it);
567 return bdrv_next(it);
570 /* Must be called when aborting a bdrv_next() iteration before
571 * bdrv_next() returns NULL */
572 void bdrv_next_cleanup(BdrvNextIterator *it)
574 /* Must be called from the main loop */
575 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
577 if (it->phase == BDRV_NEXT_BACKEND_ROOTS) {
578 if (it->blk) {
579 bdrv_unref(blk_bs(it->blk));
580 blk_unref(it->blk);
582 } else {
583 bdrv_unref(it->bs);
586 bdrv_next_reset(it);
590 * Add a BlockBackend into the list of backends referenced by the monitor, with
591 * the given @name acting as the handle for the monitor.
592 * Strictly for use by blockdev.c.
594 * @name must not be null or empty.
596 * Returns true on success and false on failure. In the latter case, an Error
597 * object is returned through @errp.
599 bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp)
601 assert(!blk->name);
602 assert(name && name[0]);
604 if (!id_wellformed(name)) {
605 error_setg(errp, "Invalid device name");
606 return false;
608 if (blk_by_name(name)) {
609 error_setg(errp, "Device with id '%s' already exists", name);
610 return false;
612 if (bdrv_find_node(name)) {
613 error_setg(errp,
614 "Device name '%s' conflicts with an existing node name",
615 name);
616 return false;
619 blk->name = g_strdup(name);
620 QTAILQ_INSERT_TAIL(&monitor_block_backends, blk, monitor_link);
621 return true;
625 * Remove a BlockBackend from the list of backends referenced by the monitor.
626 * Strictly for use by blockdev.c.
628 void monitor_remove_blk(BlockBackend *blk)
630 if (!blk->name) {
631 return;
634 QTAILQ_REMOVE(&monitor_block_backends, blk, monitor_link);
635 g_free(blk->name);
636 blk->name = NULL;
640 * Return @blk's name, a non-null string.
641 * Returns an empty string iff @blk is not referenced by the monitor.
643 const char *blk_name(const BlockBackend *blk)
645 return blk->name ?: "";
649 * Return the BlockBackend with name @name if it exists, else null.
650 * @name must not be null.
652 BlockBackend *blk_by_name(const char *name)
654 BlockBackend *blk = NULL;
656 assert(name);
657 while ((blk = blk_next(blk)) != NULL) {
658 if (!strcmp(name, blk->name)) {
659 return blk;
662 return NULL;
666 * Return the BlockDriverState attached to @blk if any, else null.
668 BlockDriverState *blk_bs(BlockBackend *blk)
670 return blk->root ? blk->root->bs : NULL;
673 static BlockBackend *bdrv_first_blk(BlockDriverState *bs)
675 BdrvChild *child;
676 QLIST_FOREACH(child, &bs->parents, next_parent) {
677 if (child->role == &child_root) {
678 return child->opaque;
682 return NULL;
686 * Returns true if @bs has an associated BlockBackend.
688 bool bdrv_has_blk(BlockDriverState *bs)
690 return bdrv_first_blk(bs) != NULL;
694 * Returns true if @bs has only BlockBackends as parents.
696 bool bdrv_is_root_node(BlockDriverState *bs)
698 BdrvChild *c;
700 QLIST_FOREACH(c, &bs->parents, next_parent) {
701 if (c->role != &child_root) {
702 return false;
706 return true;
710 * Return @blk's DriveInfo if any, else null.
712 DriveInfo *blk_legacy_dinfo(BlockBackend *blk)
714 return blk->legacy_dinfo;
718 * Set @blk's DriveInfo to @dinfo, and return it.
719 * @blk must not have a DriveInfo set already.
720 * No other BlockBackend may have the same DriveInfo set.
722 DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo)
724 assert(!blk->legacy_dinfo);
725 return blk->legacy_dinfo = dinfo;
729 * Return the BlockBackend with DriveInfo @dinfo.
730 * It must exist.
732 BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo)
734 BlockBackend *blk = NULL;
736 while ((blk = blk_next(blk)) != NULL) {
737 if (blk->legacy_dinfo == dinfo) {
738 return blk;
741 abort();
745 * Returns a pointer to the publicly accessible fields of @blk.
747 BlockBackendPublic *blk_get_public(BlockBackend *blk)
749 return &blk->public;
753 * Returns a BlockBackend given the associated @public fields.
755 BlockBackend *blk_by_public(BlockBackendPublic *public)
757 return container_of(public, BlockBackend, public);
761 * Disassociates the currently associated BlockDriverState from @blk.
763 void blk_remove_bs(BlockBackend *blk)
765 ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
766 BlockDriverState *bs;
768 notifier_list_notify(&blk->remove_bs_notifiers, blk);
769 if (tgm->throttle_state) {
770 bs = blk_bs(blk);
771 bdrv_drained_begin(bs);
772 throttle_group_detach_aio_context(tgm);
773 throttle_group_attach_aio_context(tgm, qemu_get_aio_context());
774 bdrv_drained_end(bs);
777 blk_update_root_state(blk);
779 /* bdrv_root_unref_child() will cause blk->root to become stale and may
780 * switch to a completion coroutine later on. Let's drain all I/O here
781 * to avoid that and a potential QEMU crash.
783 blk_drain(blk);
784 bdrv_root_unref_child(blk->root);
785 blk->root = NULL;
789 * Associates a new BlockDriverState with @blk.
791 int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
793 ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
794 blk->root = bdrv_root_attach_child(bs, "root", &child_root,
795 blk->perm, blk->shared_perm, blk, errp);
796 if (blk->root == NULL) {
797 return -EPERM;
799 bdrv_ref(bs);
801 notifier_list_notify(&blk->insert_bs_notifiers, blk);
802 if (tgm->throttle_state) {
803 throttle_group_detach_aio_context(tgm);
804 throttle_group_attach_aio_context(tgm, bdrv_get_aio_context(bs));
807 return 0;
811 * Sets the permission bitmasks that the user of the BlockBackend needs.
813 int blk_set_perm(BlockBackend *blk, uint64_t perm, uint64_t shared_perm,
814 Error **errp)
816 int ret;
818 if (blk->root && !blk->disable_perm) {
819 ret = bdrv_child_try_set_perm(blk->root, perm, shared_perm, errp);
820 if (ret < 0) {
821 return ret;
825 blk->perm = perm;
826 blk->shared_perm = shared_perm;
828 return 0;
831 void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm)
833 *perm = blk->perm;
834 *shared_perm = blk->shared_perm;
838 * Attach device model @dev to @blk.
839 * Return 0 on success, -EBUSY when a device model is attached already.
841 int blk_attach_dev(BlockBackend *blk, DeviceState *dev)
843 if (blk->dev) {
844 return -EBUSY;
847 /* While migration is still incoming, we don't need to apply the
848 * permissions of guest device BlockBackends. We might still have a block
849 * job or NBD server writing to the image for storage migration. */
850 if (runstate_check(RUN_STATE_INMIGRATE)) {
851 blk->disable_perm = true;
854 blk_ref(blk);
855 blk->dev = dev;
856 blk_iostatus_reset(blk);
858 return 0;
862 * Detach device model @dev from @blk.
863 * @dev must be currently attached to @blk.
865 void blk_detach_dev(BlockBackend *blk, DeviceState *dev)
867 assert(blk->dev == dev);
868 blk->dev = NULL;
869 blk->dev_ops = NULL;
870 blk->dev_opaque = NULL;
871 blk->guest_block_size = 512;
872 blk_set_perm(blk, 0, BLK_PERM_ALL, &error_abort);
873 blk_unref(blk);
877 * Return the device model attached to @blk if any, else null.
879 DeviceState *blk_get_attached_dev(BlockBackend *blk)
881 return blk->dev;
884 /* Return the qdev ID, or if no ID is assigned the QOM path, of the block
885 * device attached to the BlockBackend. */
886 char *blk_get_attached_dev_id(BlockBackend *blk)
888 DeviceState *dev = blk->dev;
890 if (!dev) {
891 return g_strdup("");
892 } else if (dev->id) {
893 return g_strdup(dev->id);
896 return object_get_canonical_path(OBJECT(dev)) ?: g_strdup("");
900 * Return the BlockBackend which has the device model @dev attached if it
901 * exists, else null.
903 * @dev must not be null.
905 BlockBackend *blk_by_dev(void *dev)
907 BlockBackend *blk = NULL;
909 assert(dev != NULL);
910 while ((blk = blk_all_next(blk)) != NULL) {
911 if (blk->dev == dev) {
912 return blk;
915 return NULL;
919 * Set @blk's device model callbacks to @ops.
920 * @opaque is the opaque argument to pass to the callbacks.
921 * This is for use by device models.
923 void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops,
924 void *opaque)
926 blk->dev_ops = ops;
927 blk->dev_opaque = opaque;
929 /* Are we currently quiesced? Should we enforce this right now? */
930 if (blk->quiesce_counter && ops->drained_begin) {
931 ops->drained_begin(opaque);
936 * Notify @blk's attached device model of media change.
938 * If @load is true, notify of media load. This action can fail, meaning that
939 * the medium cannot be loaded. @errp is set then.
941 * If @load is false, notify of media eject. This can never fail.
943 * Also send DEVICE_TRAY_MOVED events as appropriate.
945 void blk_dev_change_media_cb(BlockBackend *blk, bool load, Error **errp)
947 if (blk->dev_ops && blk->dev_ops->change_media_cb) {
948 bool tray_was_open, tray_is_open;
949 Error *local_err = NULL;
951 tray_was_open = blk_dev_is_tray_open(blk);
952 blk->dev_ops->change_media_cb(blk->dev_opaque, load, &local_err);
953 if (local_err) {
954 assert(load == true);
955 error_propagate(errp, local_err);
956 return;
958 tray_is_open = blk_dev_is_tray_open(blk);
960 if (tray_was_open != tray_is_open) {
961 char *id = blk_get_attached_dev_id(blk);
962 qapi_event_send_device_tray_moved(blk_name(blk), id, tray_is_open);
963 g_free(id);
968 static void blk_root_change_media(BdrvChild *child, bool load)
970 blk_dev_change_media_cb(child->opaque, load, NULL);
974 * Does @blk's attached device model have removable media?
975 * %true if no device model is attached.
977 bool blk_dev_has_removable_media(BlockBackend *blk)
979 return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb);
983 * Does @blk's attached device model have a tray?
985 bool blk_dev_has_tray(BlockBackend *blk)
987 return blk->dev_ops && blk->dev_ops->is_tray_open;
991 * Notify @blk's attached device model of a media eject request.
992 * If @force is true, the medium is about to be yanked out forcefully.
994 void blk_dev_eject_request(BlockBackend *blk, bool force)
996 if (blk->dev_ops && blk->dev_ops->eject_request_cb) {
997 blk->dev_ops->eject_request_cb(blk->dev_opaque, force);
1002 * Does @blk's attached device model have a tray, and is it open?
1004 bool blk_dev_is_tray_open(BlockBackend *blk)
1006 if (blk_dev_has_tray(blk)) {
1007 return blk->dev_ops->is_tray_open(blk->dev_opaque);
1009 return false;
1013 * Does @blk's attached device model have the medium locked?
1014 * %false if the device model has no such lock.
1016 bool blk_dev_is_medium_locked(BlockBackend *blk)
1018 if (blk->dev_ops && blk->dev_ops->is_medium_locked) {
1019 return blk->dev_ops->is_medium_locked(blk->dev_opaque);
1021 return false;
1025 * Notify @blk's attached device model of a backend size change.
1027 static void blk_root_resize(BdrvChild *child)
1029 BlockBackend *blk = child->opaque;
1031 if (blk->dev_ops && blk->dev_ops->resize_cb) {
1032 blk->dev_ops->resize_cb(blk->dev_opaque);
1036 void blk_iostatus_enable(BlockBackend *blk)
1038 blk->iostatus_enabled = true;
1039 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
1042 /* The I/O status is only enabled if the drive explicitly
1043 * enables it _and_ the VM is configured to stop on errors */
1044 bool blk_iostatus_is_enabled(const BlockBackend *blk)
1046 return (blk->iostatus_enabled &&
1047 (blk->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
1048 blk->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
1049 blk->on_read_error == BLOCKDEV_ON_ERROR_STOP));
1052 BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk)
1054 return blk->iostatus;
1057 void blk_iostatus_disable(BlockBackend *blk)
1059 blk->iostatus_enabled = false;
1062 void blk_iostatus_reset(BlockBackend *blk)
1064 if (blk_iostatus_is_enabled(blk)) {
1065 BlockDriverState *bs = blk_bs(blk);
1066 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
1067 if (bs && bs->job) {
1068 block_job_iostatus_reset(bs->job);
1073 void blk_iostatus_set_err(BlockBackend *blk, int error)
1075 assert(blk_iostatus_is_enabled(blk));
1076 if (blk->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
1077 blk->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
1078 BLOCK_DEVICE_IO_STATUS_FAILED;
1082 void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow)
1084 blk->allow_write_beyond_eof = allow;
1087 static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
1088 size_t size)
1090 int64_t len;
1092 if (size > INT_MAX) {
1093 return -EIO;
1096 if (!blk_is_available(blk)) {
1097 return -ENOMEDIUM;
1100 if (offset < 0) {
1101 return -EIO;
1104 if (!blk->allow_write_beyond_eof) {
1105 len = blk_getlength(blk);
1106 if (len < 0) {
1107 return len;
1110 if (offset > len || len - offset < size) {
1111 return -EIO;
1115 return 0;
1118 int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
1119 unsigned int bytes, QEMUIOVector *qiov,
1120 BdrvRequestFlags flags)
1122 int ret;
1123 BlockDriverState *bs = blk_bs(blk);
1125 trace_blk_co_preadv(blk, bs, offset, bytes, flags);
1127 ret = blk_check_byte_request(blk, offset, bytes);
1128 if (ret < 0) {
1129 return ret;
1132 bdrv_inc_in_flight(bs);
1134 /* throttling disk I/O */
1135 if (blk->public.throttle_group_member.throttle_state) {
1136 throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member,
1137 bytes, false);
1140 ret = bdrv_co_preadv(blk->root, offset, bytes, qiov, flags);
1141 bdrv_dec_in_flight(bs);
1142 return ret;
1145 int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
1146 unsigned int bytes, QEMUIOVector *qiov,
1147 BdrvRequestFlags flags)
1149 int ret;
1150 BlockDriverState *bs = blk_bs(blk);
1152 trace_blk_co_pwritev(blk, bs, offset, bytes, flags);
1154 ret = blk_check_byte_request(blk, offset, bytes);
1155 if (ret < 0) {
1156 return ret;
1159 bdrv_inc_in_flight(bs);
1160 /* throttling disk I/O */
1161 if (blk->public.throttle_group_member.throttle_state) {
1162 throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member,
1163 bytes, true);
1166 if (!blk->enable_write_cache) {
1167 flags |= BDRV_REQ_FUA;
1170 ret = bdrv_co_pwritev(blk->root, offset, bytes, qiov, flags);
1171 bdrv_dec_in_flight(bs);
1172 return ret;
1175 typedef struct BlkRwCo {
1176 BlockBackend *blk;
1177 int64_t offset;
1178 void *iobuf;
1179 int ret;
1180 BdrvRequestFlags flags;
1181 } BlkRwCo;
1183 static void blk_read_entry(void *opaque)
1185 BlkRwCo *rwco = opaque;
1186 QEMUIOVector *qiov = rwco->iobuf;
1188 rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, qiov->size,
1189 qiov, rwco->flags);
1190 aio_wait_kick();
1193 static void blk_write_entry(void *opaque)
1195 BlkRwCo *rwco = opaque;
1196 QEMUIOVector *qiov = rwco->iobuf;
1198 rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, qiov->size,
1199 qiov, rwco->flags);
1200 aio_wait_kick();
1203 static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
1204 int64_t bytes, CoroutineEntry co_entry,
1205 BdrvRequestFlags flags)
1207 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
1208 BlkRwCo rwco = {
1209 .blk = blk,
1210 .offset = offset,
1211 .iobuf = &qiov,
1212 .flags = flags,
1213 .ret = NOT_DONE,
1216 if (qemu_in_coroutine()) {
1217 /* Fast-path if already in coroutine context */
1218 co_entry(&rwco);
1219 } else {
1220 Coroutine *co = qemu_coroutine_create(co_entry, &rwco);
1221 bdrv_coroutine_enter(blk_bs(blk), co);
1222 BDRV_POLL_WHILE(blk_bs(blk), rwco.ret == NOT_DONE);
1225 return rwco.ret;
1228 int blk_pread_unthrottled(BlockBackend *blk, int64_t offset, uint8_t *buf,
1229 int count)
1231 int ret;
1233 ret = blk_check_byte_request(blk, offset, count);
1234 if (ret < 0) {
1235 return ret;
1238 blk_root_drained_begin(blk->root);
1239 ret = blk_pread(blk, offset, buf, count);
1240 blk_root_drained_end(blk->root);
1241 return ret;
1244 int blk_pwrite_zeroes(BlockBackend *blk, int64_t offset,
1245 int bytes, BdrvRequestFlags flags)
1247 return blk_prw(blk, offset, NULL, bytes, blk_write_entry,
1248 flags | BDRV_REQ_ZERO_WRITE);
1251 int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags)
1253 return bdrv_make_zero(blk->root, flags);
1256 void blk_inc_in_flight(BlockBackend *blk)
1258 atomic_inc(&blk->in_flight);
1261 void blk_dec_in_flight(BlockBackend *blk)
1263 atomic_dec(&blk->in_flight);
1264 aio_wait_kick();
1267 static void error_callback_bh(void *opaque)
1269 struct BlockBackendAIOCB *acb = opaque;
1271 blk_dec_in_flight(acb->blk);
1272 acb->common.cb(acb->common.opaque, acb->ret);
1273 qemu_aio_unref(acb);
1276 BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
1277 BlockCompletionFunc *cb,
1278 void *opaque, int ret)
1280 struct BlockBackendAIOCB *acb;
1282 blk_inc_in_flight(blk);
1283 acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
1284 acb->blk = blk;
1285 acb->ret = ret;
1287 aio_bh_schedule_oneshot(blk_get_aio_context(blk), error_callback_bh, acb);
1288 return &acb->common;
1291 typedef struct BlkAioEmAIOCB {
1292 BlockAIOCB common;
1293 BlkRwCo rwco;
1294 int bytes;
1295 bool has_returned;
1296 } BlkAioEmAIOCB;
1298 static const AIOCBInfo blk_aio_em_aiocb_info = {
1299 .aiocb_size = sizeof(BlkAioEmAIOCB),
1302 static void blk_aio_complete(BlkAioEmAIOCB *acb)
1304 if (acb->has_returned) {
1305 acb->common.cb(acb->common.opaque, acb->rwco.ret);
1306 blk_dec_in_flight(acb->rwco.blk);
1307 qemu_aio_unref(acb);
1311 static void blk_aio_complete_bh(void *opaque)
1313 BlkAioEmAIOCB *acb = opaque;
1314 assert(acb->has_returned);
1315 blk_aio_complete(acb);
1318 static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
1319 void *iobuf, CoroutineEntry co_entry,
1320 BdrvRequestFlags flags,
1321 BlockCompletionFunc *cb, void *opaque)
1323 BlkAioEmAIOCB *acb;
1324 Coroutine *co;
1326 blk_inc_in_flight(blk);
1327 acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque);
1328 acb->rwco = (BlkRwCo) {
1329 .blk = blk,
1330 .offset = offset,
1331 .iobuf = iobuf,
1332 .flags = flags,
1333 .ret = NOT_DONE,
1335 acb->bytes = bytes;
1336 acb->has_returned = false;
1338 co = qemu_coroutine_create(co_entry, acb);
1339 bdrv_coroutine_enter(blk_bs(blk), co);
1341 acb->has_returned = true;
1342 if (acb->rwco.ret != NOT_DONE) {
1343 aio_bh_schedule_oneshot(blk_get_aio_context(blk),
1344 blk_aio_complete_bh, acb);
1347 return &acb->common;
1350 static void blk_aio_read_entry(void *opaque)
1352 BlkAioEmAIOCB *acb = opaque;
1353 BlkRwCo *rwco = &acb->rwco;
1354 QEMUIOVector *qiov = rwco->iobuf;
1356 assert(qiov->size == acb->bytes);
1357 rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, acb->bytes,
1358 qiov, rwco->flags);
1359 blk_aio_complete(acb);
1362 static void blk_aio_write_entry(void *opaque)
1364 BlkAioEmAIOCB *acb = opaque;
1365 BlkRwCo *rwco = &acb->rwco;
1366 QEMUIOVector *qiov = rwco->iobuf;
1368 assert(!qiov || qiov->size == acb->bytes);
1369 rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, acb->bytes,
1370 qiov, rwco->flags);
1371 blk_aio_complete(acb);
1374 BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset,
1375 int count, BdrvRequestFlags flags,
1376 BlockCompletionFunc *cb, void *opaque)
1378 return blk_aio_prwv(blk, offset, count, NULL, blk_aio_write_entry,
1379 flags | BDRV_REQ_ZERO_WRITE, cb, opaque);
1382 int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count)
1384 int ret = blk_prw(blk, offset, buf, count, blk_read_entry, 0);
1385 if (ret < 0) {
1386 return ret;
1388 return count;
1391 int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count,
1392 BdrvRequestFlags flags)
1394 int ret = blk_prw(blk, offset, (void *) buf, count, blk_write_entry,
1395 flags);
1396 if (ret < 0) {
1397 return ret;
1399 return count;
1402 int64_t blk_getlength(BlockBackend *blk)
1404 if (!blk_is_available(blk)) {
1405 return -ENOMEDIUM;
1408 return bdrv_getlength(blk_bs(blk));
1411 void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr)
1413 if (!blk_bs(blk)) {
1414 *nb_sectors_ptr = 0;
1415 } else {
1416 bdrv_get_geometry(blk_bs(blk), nb_sectors_ptr);
1420 int64_t blk_nb_sectors(BlockBackend *blk)
1422 if (!blk_is_available(blk)) {
1423 return -ENOMEDIUM;
1426 return bdrv_nb_sectors(blk_bs(blk));
1429 BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset,
1430 QEMUIOVector *qiov, BdrvRequestFlags flags,
1431 BlockCompletionFunc *cb, void *opaque)
1433 return blk_aio_prwv(blk, offset, qiov->size, qiov,
1434 blk_aio_read_entry, flags, cb, opaque);
1437 BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset,
1438 QEMUIOVector *qiov, BdrvRequestFlags flags,
1439 BlockCompletionFunc *cb, void *opaque)
1441 return blk_aio_prwv(blk, offset, qiov->size, qiov,
1442 blk_aio_write_entry, flags, cb, opaque);
1445 static void blk_aio_flush_entry(void *opaque)
1447 BlkAioEmAIOCB *acb = opaque;
1448 BlkRwCo *rwco = &acb->rwco;
1450 rwco->ret = blk_co_flush(rwco->blk);
1451 blk_aio_complete(acb);
1454 BlockAIOCB *blk_aio_flush(BlockBackend *blk,
1455 BlockCompletionFunc *cb, void *opaque)
1457 return blk_aio_prwv(blk, 0, 0, NULL, blk_aio_flush_entry, 0, cb, opaque);
1460 static void blk_aio_pdiscard_entry(void *opaque)
1462 BlkAioEmAIOCB *acb = opaque;
1463 BlkRwCo *rwco = &acb->rwco;
1465 rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, acb->bytes);
1466 blk_aio_complete(acb);
1469 BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk,
1470 int64_t offset, int bytes,
1471 BlockCompletionFunc *cb, void *opaque)
1473 return blk_aio_prwv(blk, offset, bytes, NULL, blk_aio_pdiscard_entry, 0,
1474 cb, opaque);
1477 void blk_aio_cancel(BlockAIOCB *acb)
1479 bdrv_aio_cancel(acb);
1482 void blk_aio_cancel_async(BlockAIOCB *acb)
1484 bdrv_aio_cancel_async(acb);
1487 int blk_co_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
1489 if (!blk_is_available(blk)) {
1490 return -ENOMEDIUM;
1493 return bdrv_co_ioctl(blk_bs(blk), req, buf);
1496 static void blk_ioctl_entry(void *opaque)
1498 BlkRwCo *rwco = opaque;
1499 QEMUIOVector *qiov = rwco->iobuf;
1501 rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset,
1502 qiov->iov[0].iov_base);
1503 aio_wait_kick();
1506 int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
1508 return blk_prw(blk, req, buf, 0, blk_ioctl_entry, 0);
1511 static void blk_aio_ioctl_entry(void *opaque)
1513 BlkAioEmAIOCB *acb = opaque;
1514 BlkRwCo *rwco = &acb->rwco;
1516 rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset, rwco->iobuf);
1518 blk_aio_complete(acb);
1521 BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
1522 BlockCompletionFunc *cb, void *opaque)
1524 return blk_aio_prwv(blk, req, 0, buf, blk_aio_ioctl_entry, 0, cb, opaque);
1527 int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
1529 int ret = blk_check_byte_request(blk, offset, bytes);
1530 if (ret < 0) {
1531 return ret;
1534 return bdrv_co_pdiscard(blk->root, offset, bytes);
1537 int blk_co_flush(BlockBackend *blk)
1539 if (!blk_is_available(blk)) {
1540 return -ENOMEDIUM;
1543 return bdrv_co_flush(blk_bs(blk));
1546 static void blk_flush_entry(void *opaque)
1548 BlkRwCo *rwco = opaque;
1549 rwco->ret = blk_co_flush(rwco->blk);
1550 aio_wait_kick();
1553 int blk_flush(BlockBackend *blk)
1555 return blk_prw(blk, 0, NULL, 0, blk_flush_entry, 0);
1558 void blk_drain(BlockBackend *blk)
1560 BlockDriverState *bs = blk_bs(blk);
1562 if (bs) {
1563 bdrv_drained_begin(bs);
1566 /* We may have -ENOMEDIUM completions in flight */
1567 AIO_WAIT_WHILE(blk_get_aio_context(blk),
1568 atomic_mb_read(&blk->in_flight) > 0);
1570 if (bs) {
1571 bdrv_drained_end(bs);
1575 void blk_drain_all(void)
1577 BlockBackend *blk = NULL;
1579 bdrv_drain_all_begin();
1581 while ((blk = blk_all_next(blk)) != NULL) {
1582 AioContext *ctx = blk_get_aio_context(blk);
1584 aio_context_acquire(ctx);
1586 /* We may have -ENOMEDIUM completions in flight */
1587 AIO_WAIT_WHILE(ctx, atomic_mb_read(&blk->in_flight) > 0);
1589 aio_context_release(ctx);
1592 bdrv_drain_all_end();
1595 void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
1596 BlockdevOnError on_write_error)
1598 blk->on_read_error = on_read_error;
1599 blk->on_write_error = on_write_error;
1602 BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read)
1604 return is_read ? blk->on_read_error : blk->on_write_error;
1607 BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
1608 int error)
1610 BlockdevOnError on_err = blk_get_on_error(blk, is_read);
1612 switch (on_err) {
1613 case BLOCKDEV_ON_ERROR_ENOSPC:
1614 return (error == ENOSPC) ?
1615 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
1616 case BLOCKDEV_ON_ERROR_STOP:
1617 return BLOCK_ERROR_ACTION_STOP;
1618 case BLOCKDEV_ON_ERROR_REPORT:
1619 return BLOCK_ERROR_ACTION_REPORT;
1620 case BLOCKDEV_ON_ERROR_IGNORE:
1621 return BLOCK_ERROR_ACTION_IGNORE;
1622 case BLOCKDEV_ON_ERROR_AUTO:
1623 default:
1624 abort();
1628 static void send_qmp_error_event(BlockBackend *blk,
1629 BlockErrorAction action,
1630 bool is_read, int error)
1632 IoOperationType optype;
1633 BlockDriverState *bs = blk_bs(blk);
1635 optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
1636 qapi_event_send_block_io_error(blk_name(blk), !!bs,
1637 bs ? bdrv_get_node_name(bs) : NULL, optype,
1638 action, blk_iostatus_is_enabled(blk),
1639 error == ENOSPC, strerror(error));
1642 /* This is done by device models because, while the block layer knows
1643 * about the error, it does not know whether an operation comes from
1644 * the device or the block layer (from a job, for example).
1646 void blk_error_action(BlockBackend *blk, BlockErrorAction action,
1647 bool is_read, int error)
1649 assert(error >= 0);
1651 if (action == BLOCK_ERROR_ACTION_STOP) {
1652 /* First set the iostatus, so that "info block" returns an iostatus
1653 * that matches the events raised so far (an additional error iostatus
1654 * is fine, but not a lost one).
1656 blk_iostatus_set_err(blk, error);
1658 /* Then raise the request to stop the VM and the event.
1659 * qemu_system_vmstop_request_prepare has two effects. First,
1660 * it ensures that the STOP event always comes after the
1661 * BLOCK_IO_ERROR event. Second, it ensures that even if management
1662 * can observe the STOP event and do a "cont" before the STOP
1663 * event is issued, the VM will not stop. In this case, vm_start()
1664 * also ensures that the STOP/RESUME pair of events is emitted.
1666 qemu_system_vmstop_request_prepare();
1667 send_qmp_error_event(blk, action, is_read, error);
1668 qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
1669 } else {
1670 send_qmp_error_event(blk, action, is_read, error);
1674 bool blk_is_read_only(BlockBackend *blk)
1676 BlockDriverState *bs = blk_bs(blk);
1678 if (bs) {
1679 return bdrv_is_read_only(bs);
1680 } else {
1681 return blk->root_state.read_only;
1685 bool blk_is_sg(BlockBackend *blk)
1687 BlockDriverState *bs = blk_bs(blk);
1689 if (!bs) {
1690 return false;
1693 return bdrv_is_sg(bs);
1696 bool blk_enable_write_cache(BlockBackend *blk)
1698 return blk->enable_write_cache;
1701 void blk_set_enable_write_cache(BlockBackend *blk, bool wce)
1703 blk->enable_write_cache = wce;
1706 void blk_invalidate_cache(BlockBackend *blk, Error **errp)
1708 BlockDriverState *bs = blk_bs(blk);
1710 if (!bs) {
1711 error_setg(errp, "Device '%s' has no medium", blk->name);
1712 return;
1715 bdrv_invalidate_cache(bs, errp);
1718 bool blk_is_inserted(BlockBackend *blk)
1720 BlockDriverState *bs = blk_bs(blk);
1722 return bs && bdrv_is_inserted(bs);
1725 bool blk_is_available(BlockBackend *blk)
1727 return blk_is_inserted(blk) && !blk_dev_is_tray_open(blk);
1730 void blk_lock_medium(BlockBackend *blk, bool locked)
1732 BlockDriverState *bs = blk_bs(blk);
1734 if (bs) {
1735 bdrv_lock_medium(bs, locked);
1739 void blk_eject(BlockBackend *blk, bool eject_flag)
1741 BlockDriverState *bs = blk_bs(blk);
1742 char *id;
1744 if (bs) {
1745 bdrv_eject(bs, eject_flag);
1748 /* Whether or not we ejected on the backend,
1749 * the frontend experienced a tray event. */
1750 id = blk_get_attached_dev_id(blk);
1751 qapi_event_send_device_tray_moved(blk_name(blk), id,
1752 eject_flag);
1753 g_free(id);
1756 int blk_get_flags(BlockBackend *blk)
1758 BlockDriverState *bs = blk_bs(blk);
1760 if (bs) {
1761 return bdrv_get_flags(bs);
1762 } else {
1763 return blk->root_state.open_flags;
1767 /* Returns the maximum transfer length, in bytes; guaranteed nonzero */
1768 uint32_t blk_get_max_transfer(BlockBackend *blk)
1770 BlockDriverState *bs = blk_bs(blk);
1771 uint32_t max = 0;
1773 if (bs) {
1774 max = bs->bl.max_transfer;
1776 return MIN_NON_ZERO(max, INT_MAX);
1779 int blk_get_max_iov(BlockBackend *blk)
1781 return blk->root->bs->bl.max_iov;
1784 void blk_set_guest_block_size(BlockBackend *blk, int align)
1786 blk->guest_block_size = align;
1789 void *blk_try_blockalign(BlockBackend *blk, size_t size)
1791 return qemu_try_blockalign(blk ? blk_bs(blk) : NULL, size);
1794 void *blk_blockalign(BlockBackend *blk, size_t size)
1796 return qemu_blockalign(blk ? blk_bs(blk) : NULL, size);
1799 bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
1801 BlockDriverState *bs = blk_bs(blk);
1803 if (!bs) {
1804 return false;
1807 return bdrv_op_is_blocked(bs, op, errp);
1810 void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason)
1812 BlockDriverState *bs = blk_bs(blk);
1814 if (bs) {
1815 bdrv_op_unblock(bs, op, reason);
1819 void blk_op_block_all(BlockBackend *blk, Error *reason)
1821 BlockDriverState *bs = blk_bs(blk);
1823 if (bs) {
1824 bdrv_op_block_all(bs, reason);
1828 void blk_op_unblock_all(BlockBackend *blk, Error *reason)
1830 BlockDriverState *bs = blk_bs(blk);
1832 if (bs) {
1833 bdrv_op_unblock_all(bs, reason);
1837 AioContext *blk_get_aio_context(BlockBackend *blk)
1839 return bdrv_get_aio_context(blk_bs(blk));
1842 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb)
1844 BlockBackendAIOCB *blk_acb = DO_UPCAST(BlockBackendAIOCB, common, acb);
1845 return blk_get_aio_context(blk_acb->blk);
1848 void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
1850 BlockDriverState *bs = blk_bs(blk);
1851 ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
1853 if (bs) {
1854 if (tgm->throttle_state) {
1855 bdrv_drained_begin(bs);
1856 throttle_group_detach_aio_context(tgm);
1857 throttle_group_attach_aio_context(tgm, new_context);
1858 bdrv_drained_end(bs);
1860 bdrv_set_aio_context(bs, new_context);
1864 void blk_add_aio_context_notifier(BlockBackend *blk,
1865 void (*attached_aio_context)(AioContext *new_context, void *opaque),
1866 void (*detach_aio_context)(void *opaque), void *opaque)
1868 BlockBackendAioNotifier *notifier;
1869 BlockDriverState *bs = blk_bs(blk);
1871 notifier = g_new(BlockBackendAioNotifier, 1);
1872 notifier->attached_aio_context = attached_aio_context;
1873 notifier->detach_aio_context = detach_aio_context;
1874 notifier->opaque = opaque;
1875 QLIST_INSERT_HEAD(&blk->aio_notifiers, notifier, list);
1877 if (bs) {
1878 bdrv_add_aio_context_notifier(bs, attached_aio_context,
1879 detach_aio_context, opaque);
1883 void blk_remove_aio_context_notifier(BlockBackend *blk,
1884 void (*attached_aio_context)(AioContext *,
1885 void *),
1886 void (*detach_aio_context)(void *),
1887 void *opaque)
1889 BlockBackendAioNotifier *notifier;
1890 BlockDriverState *bs = blk_bs(blk);
1892 if (bs) {
1893 bdrv_remove_aio_context_notifier(bs, attached_aio_context,
1894 detach_aio_context, opaque);
1897 QLIST_FOREACH(notifier, &blk->aio_notifiers, list) {
1898 if (notifier->attached_aio_context == attached_aio_context &&
1899 notifier->detach_aio_context == detach_aio_context &&
1900 notifier->opaque == opaque) {
1901 QLIST_REMOVE(notifier, list);
1902 g_free(notifier);
1903 return;
1907 abort();
1910 void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify)
1912 notifier_list_add(&blk->remove_bs_notifiers, notify);
1915 void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify)
1917 notifier_list_add(&blk->insert_bs_notifiers, notify);
1920 void blk_io_plug(BlockBackend *blk)
1922 BlockDriverState *bs = blk_bs(blk);
1924 if (bs) {
1925 bdrv_io_plug(bs);
1929 void blk_io_unplug(BlockBackend *blk)
1931 BlockDriverState *bs = blk_bs(blk);
1933 if (bs) {
1934 bdrv_io_unplug(bs);
1938 BlockAcctStats *blk_get_stats(BlockBackend *blk)
1940 return &blk->stats;
1943 void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
1944 BlockCompletionFunc *cb, void *opaque)
1946 return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque);
1949 int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset,
1950 int bytes, BdrvRequestFlags flags)
1952 return blk_co_pwritev(blk, offset, bytes, NULL,
1953 flags | BDRV_REQ_ZERO_WRITE);
1956 int blk_pwrite_compressed(BlockBackend *blk, int64_t offset, const void *buf,
1957 int count)
1959 return blk_prw(blk, offset, (void *) buf, count, blk_write_entry,
1960 BDRV_REQ_WRITE_COMPRESSED);
1963 int blk_truncate(BlockBackend *blk, int64_t offset, PreallocMode prealloc,
1964 Error **errp)
1966 if (!blk_is_available(blk)) {
1967 error_setg(errp, "No medium inserted");
1968 return -ENOMEDIUM;
1971 return bdrv_truncate(blk->root, offset, prealloc, errp);
1974 static void blk_pdiscard_entry(void *opaque)
1976 BlkRwCo *rwco = opaque;
1977 QEMUIOVector *qiov = rwco->iobuf;
1979 rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, qiov->size);
1980 aio_wait_kick();
1983 int blk_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
1985 return blk_prw(blk, offset, NULL, bytes, blk_pdiscard_entry, 0);
1988 int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
1989 int64_t pos, int size)
1991 int ret;
1993 if (!blk_is_available(blk)) {
1994 return -ENOMEDIUM;
1997 ret = bdrv_save_vmstate(blk_bs(blk), buf, pos, size);
1998 if (ret < 0) {
1999 return ret;
2002 if (ret == size && !blk->enable_write_cache) {
2003 ret = bdrv_flush(blk_bs(blk));
2006 return ret < 0 ? ret : size;
2009 int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size)
2011 if (!blk_is_available(blk)) {
2012 return -ENOMEDIUM;
2015 return bdrv_load_vmstate(blk_bs(blk), buf, pos, size);
2018 int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz)
2020 if (!blk_is_available(blk)) {
2021 return -ENOMEDIUM;
2024 return bdrv_probe_blocksizes(blk_bs(blk), bsz);
2027 int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo)
2029 if (!blk_is_available(blk)) {
2030 return -ENOMEDIUM;
2033 return bdrv_probe_geometry(blk_bs(blk), geo);
2037 * Updates the BlockBackendRootState object with data from the currently
2038 * attached BlockDriverState.
2040 void blk_update_root_state(BlockBackend *blk)
2042 assert(blk->root);
2044 blk->root_state.open_flags = blk->root->bs->open_flags;
2045 blk->root_state.read_only = blk->root->bs->read_only;
2046 blk->root_state.detect_zeroes = blk->root->bs->detect_zeroes;
2050 * Returns the detect-zeroes setting to be used for bdrv_open() of a
2051 * BlockDriverState which is supposed to inherit the root state.
2053 bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk)
2055 return blk->root_state.detect_zeroes;
2059 * Returns the flags to be used for bdrv_open() of a BlockDriverState which is
2060 * supposed to inherit the root state.
2062 int blk_get_open_flags_from_root_state(BlockBackend *blk)
2064 int bs_flags;
2066 bs_flags = blk->root_state.read_only ? 0 : BDRV_O_RDWR;
2067 bs_flags |= blk->root_state.open_flags & ~BDRV_O_RDWR;
2069 return bs_flags;
2072 BlockBackendRootState *blk_get_root_state(BlockBackend *blk)
2074 return &blk->root_state;
2077 int blk_commit_all(void)
2079 BlockBackend *blk = NULL;
2081 while ((blk = blk_all_next(blk)) != NULL) {
2082 AioContext *aio_context = blk_get_aio_context(blk);
2084 aio_context_acquire(aio_context);
2085 if (blk_is_inserted(blk) && blk->root->bs->backing) {
2086 int ret = bdrv_commit(blk->root->bs);
2087 if (ret < 0) {
2088 aio_context_release(aio_context);
2089 return ret;
2092 aio_context_release(aio_context);
2094 return 0;
2098 /* throttling disk I/O limits */
2099 void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg)
2101 throttle_group_config(&blk->public.throttle_group_member, cfg);
2104 void blk_io_limits_disable(BlockBackend *blk)
2106 BlockDriverState *bs = blk_bs(blk);
2107 ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
2108 assert(tgm->throttle_state);
2109 if (bs) {
2110 bdrv_drained_begin(bs);
2112 throttle_group_unregister_tgm(tgm);
2113 if (bs) {
2114 bdrv_drained_end(bs);
2118 /* should be called before blk_set_io_limits if a limit is set */
2119 void blk_io_limits_enable(BlockBackend *blk, const char *group)
2121 assert(!blk->public.throttle_group_member.throttle_state);
2122 throttle_group_register_tgm(&blk->public.throttle_group_member,
2123 group, blk_get_aio_context(blk));
2126 void blk_io_limits_update_group(BlockBackend *blk, const char *group)
2128 /* this BB is not part of any group */
2129 if (!blk->public.throttle_group_member.throttle_state) {
2130 return;
2133 /* this BB is a part of the same group than the one we want */
2134 if (!g_strcmp0(throttle_group_get_name(&blk->public.throttle_group_member),
2135 group)) {
2136 return;
2139 /* need to change the group this bs belong to */
2140 blk_io_limits_disable(blk);
2141 blk_io_limits_enable(blk, group);
2144 static void blk_root_drained_begin(BdrvChild *child)
2146 BlockBackend *blk = child->opaque;
2148 if (++blk->quiesce_counter == 1) {
2149 if (blk->dev_ops && blk->dev_ops->drained_begin) {
2150 blk->dev_ops->drained_begin(blk->dev_opaque);
2154 /* Note that blk->root may not be accessible here yet if we are just
2155 * attaching to a BlockDriverState that is drained. Use child instead. */
2157 if (atomic_fetch_inc(&blk->public.throttle_group_member.io_limits_disabled) == 0) {
2158 throttle_group_restart_tgm(&blk->public.throttle_group_member);
2162 static bool blk_root_drained_poll(BdrvChild *child)
2164 BlockBackend *blk = child->opaque;
2165 assert(blk->quiesce_counter);
2166 return !!blk->in_flight;
2169 static void blk_root_drained_end(BdrvChild *child)
2171 BlockBackend *blk = child->opaque;
2172 assert(blk->quiesce_counter);
2174 assert(blk->public.throttle_group_member.io_limits_disabled);
2175 atomic_dec(&blk->public.throttle_group_member.io_limits_disabled);
2177 if (--blk->quiesce_counter == 0) {
2178 if (blk->dev_ops && blk->dev_ops->drained_end) {
2179 blk->dev_ops->drained_end(blk->dev_opaque);
2184 void blk_register_buf(BlockBackend *blk, void *host, size_t size)
2186 bdrv_register_buf(blk_bs(blk), host, size);
2189 void blk_unregister_buf(BlockBackend *blk, void *host)
2191 bdrv_unregister_buf(blk_bs(blk), host);
2194 int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in,
2195 BlockBackend *blk_out, int64_t off_out,
2196 int bytes, BdrvRequestFlags read_flags,
2197 BdrvRequestFlags write_flags)
2199 int r;
2200 r = blk_check_byte_request(blk_in, off_in, bytes);
2201 if (r) {
2202 return r;
2204 r = blk_check_byte_request(blk_out, off_out, bytes);
2205 if (r) {
2206 return r;
2208 return bdrv_co_copy_range(blk_in->root, off_in,
2209 blk_out->root, off_out,
2210 bytes, read_flags, write_flags);
2213 const BdrvChild *blk_root(BlockBackend *blk)
2215 return blk->root;