s390x/tcg: Fix VECTOR MULTIPLY LOGICAL ODD
[qemu/ar7.git] / block / block-backend.c
blobeb22ff306e90008d74f1eda5598b74df99c620d3
1 /*
2 * QEMU Block backends
4 * Copyright (C) 2014-2016 Red Hat, Inc.
6 * Authors:
7 * Markus Armbruster <armbru@redhat.com>,
9 * This work is licensed under the terms of the GNU LGPL, version 2.1
10 * or later. See the COPYING.LIB file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "sysemu/block-backend.h"
15 #include "block/block_int.h"
16 #include "block/blockjob.h"
17 #include "block/throttle-groups.h"
18 #include "hw/qdev-core.h"
19 #include "sysemu/blockdev.h"
20 #include "sysemu/runstate.h"
21 #include "sysemu/sysemu.h"
22 #include "sysemu/replay.h"
23 #include "qapi/error.h"
24 #include "qapi/qapi-events-block.h"
25 #include "qemu/id.h"
26 #include "qemu/main-loop.h"
27 #include "qemu/option.h"
28 #include "trace.h"
29 #include "migration/misc.h"
31 /* Number of coroutines to reserve per attached device model */
32 #define COROUTINE_POOL_RESERVATION 64
34 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
36 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb);
38 typedef struct BlockBackendAioNotifier {
39 void (*attached_aio_context)(AioContext *new_context, void *opaque);
40 void (*detach_aio_context)(void *opaque);
41 void *opaque;
42 QLIST_ENTRY(BlockBackendAioNotifier) list;
43 } BlockBackendAioNotifier;
45 struct BlockBackend {
46 char *name;
47 int refcnt;
48 BdrvChild *root;
49 AioContext *ctx;
50 DriveInfo *legacy_dinfo; /* null unless created by drive_new() */
51 QTAILQ_ENTRY(BlockBackend) link; /* for block_backends */
52 QTAILQ_ENTRY(BlockBackend) monitor_link; /* for monitor_block_backends */
53 BlockBackendPublic public;
55 DeviceState *dev; /* attached device model, if any */
56 const BlockDevOps *dev_ops;
57 void *dev_opaque;
59 /* the block size for which the guest device expects atomicity */
60 int guest_block_size;
62 /* If the BDS tree is removed, some of its options are stored here (which
63 * can be used to restore those options in the new BDS on insert) */
64 BlockBackendRootState root_state;
66 bool enable_write_cache;
68 /* I/O stats (display with "info blockstats"). */
69 BlockAcctStats stats;
71 BlockdevOnError on_read_error, on_write_error;
72 bool iostatus_enabled;
73 BlockDeviceIoStatus iostatus;
75 uint64_t perm;
76 uint64_t shared_perm;
77 bool disable_perm;
79 bool allow_aio_context_change;
80 bool allow_write_beyond_eof;
82 NotifierList remove_bs_notifiers, insert_bs_notifiers;
83 QLIST_HEAD(, BlockBackendAioNotifier) aio_notifiers;
85 int quiesce_counter;
86 CoQueue queued_requests;
87 bool disable_request_queuing;
89 VMChangeStateEntry *vmsh;
90 bool force_allow_inactivate;
92 /* Number of in-flight aio requests. BlockDriverState also counts
93 * in-flight requests but aio requests can exist even when blk->root is
94 * NULL, so we cannot rely on its counter for that case.
95 * Accessed with atomic ops.
97 unsigned int in_flight;
100 typedef struct BlockBackendAIOCB {
101 BlockAIOCB common;
102 BlockBackend *blk;
103 int ret;
104 } BlockBackendAIOCB;
106 static const AIOCBInfo block_backend_aiocb_info = {
107 .get_aio_context = blk_aiocb_get_aio_context,
108 .aiocb_size = sizeof(BlockBackendAIOCB),
111 static void drive_info_del(DriveInfo *dinfo);
112 static BlockBackend *bdrv_first_blk(BlockDriverState *bs);
114 /* All BlockBackends */
115 static QTAILQ_HEAD(, BlockBackend) block_backends =
116 QTAILQ_HEAD_INITIALIZER(block_backends);
118 /* All BlockBackends referenced by the monitor and which are iterated through by
119 * blk_next() */
120 static QTAILQ_HEAD(, BlockBackend) monitor_block_backends =
121 QTAILQ_HEAD_INITIALIZER(monitor_block_backends);
123 static void blk_root_inherit_options(int *child_flags, QDict *child_options,
124 int parent_flags, QDict *parent_options)
126 /* We're not supposed to call this function for root nodes */
127 abort();
129 static void blk_root_drained_begin(BdrvChild *child);
130 static bool blk_root_drained_poll(BdrvChild *child);
131 static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter);
133 static void blk_root_change_media(BdrvChild *child, bool load);
134 static void blk_root_resize(BdrvChild *child);
136 static bool blk_root_can_set_aio_ctx(BdrvChild *child, AioContext *ctx,
137 GSList **ignore, Error **errp);
138 static void blk_root_set_aio_ctx(BdrvChild *child, AioContext *ctx,
139 GSList **ignore);
141 static char *blk_root_get_parent_desc(BdrvChild *child)
143 BlockBackend *blk = child->opaque;
144 char *dev_id;
146 if (blk->name) {
147 return g_strdup(blk->name);
150 dev_id = blk_get_attached_dev_id(blk);
151 if (*dev_id) {
152 return dev_id;
153 } else {
154 /* TODO Callback into the BB owner for something more detailed */
155 g_free(dev_id);
156 return g_strdup("a block device");
160 static const char *blk_root_get_name(BdrvChild *child)
162 return blk_name(child->opaque);
165 static void blk_vm_state_changed(void *opaque, int running, RunState state)
167 Error *local_err = NULL;
168 BlockBackend *blk = opaque;
170 if (state == RUN_STATE_INMIGRATE) {
171 return;
174 qemu_del_vm_change_state_handler(blk->vmsh);
175 blk->vmsh = NULL;
176 blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err);
177 if (local_err) {
178 error_report_err(local_err);
183 * Notifies the user of the BlockBackend that migration has completed. qdev
184 * devices can tighten their permissions in response (specifically revoke
185 * shared write permissions that we needed for storage migration).
187 * If an error is returned, the VM cannot be allowed to be resumed.
189 static void blk_root_activate(BdrvChild *child, Error **errp)
191 BlockBackend *blk = child->opaque;
192 Error *local_err = NULL;
194 if (!blk->disable_perm) {
195 return;
198 blk->disable_perm = false;
200 blk_set_perm(blk, blk->perm, BLK_PERM_ALL, &local_err);
201 if (local_err) {
202 error_propagate(errp, local_err);
203 blk->disable_perm = true;
204 return;
207 if (runstate_check(RUN_STATE_INMIGRATE)) {
208 /* Activation can happen when migration process is still active, for
209 * example when nbd_server_add is called during non-shared storage
210 * migration. Defer the shared_perm update to migration completion. */
211 if (!blk->vmsh) {
212 blk->vmsh = qemu_add_vm_change_state_handler(blk_vm_state_changed,
213 blk);
215 return;
218 blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err);
219 if (local_err) {
220 error_propagate(errp, local_err);
221 blk->disable_perm = true;
222 return;
226 void blk_set_force_allow_inactivate(BlockBackend *blk)
228 blk->force_allow_inactivate = true;
231 static bool blk_can_inactivate(BlockBackend *blk)
233 /* If it is a guest device, inactivate is ok. */
234 if (blk->dev || blk_name(blk)[0]) {
235 return true;
238 /* Inactivating means no more writes to the image can be done,
239 * even if those writes would be changes invisible to the
240 * guest. For block job BBs that satisfy this, we can just allow
241 * it. This is the case for mirror job source, which is required
242 * by libvirt non-shared block migration. */
243 if (!(blk->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED))) {
244 return true;
247 return blk->force_allow_inactivate;
250 static int blk_root_inactivate(BdrvChild *child)
252 BlockBackend *blk = child->opaque;
254 if (blk->disable_perm) {
255 return 0;
258 if (!blk_can_inactivate(blk)) {
259 return -EPERM;
262 blk->disable_perm = true;
263 if (blk->root) {
264 bdrv_child_try_set_perm(blk->root, 0, BLK_PERM_ALL, &error_abort);
267 return 0;
270 static void blk_root_attach(BdrvChild *child)
272 BlockBackend *blk = child->opaque;
273 BlockBackendAioNotifier *notifier;
275 trace_blk_root_attach(child, blk, child->bs);
277 QLIST_FOREACH(notifier, &blk->aio_notifiers, list) {
278 bdrv_add_aio_context_notifier(child->bs,
279 notifier->attached_aio_context,
280 notifier->detach_aio_context,
281 notifier->opaque);
285 static void blk_root_detach(BdrvChild *child)
287 BlockBackend *blk = child->opaque;
288 BlockBackendAioNotifier *notifier;
290 trace_blk_root_detach(child, blk, child->bs);
292 QLIST_FOREACH(notifier, &blk->aio_notifiers, list) {
293 bdrv_remove_aio_context_notifier(child->bs,
294 notifier->attached_aio_context,
295 notifier->detach_aio_context,
296 notifier->opaque);
300 static const BdrvChildRole child_root = {
301 .inherit_options = blk_root_inherit_options,
303 .change_media = blk_root_change_media,
304 .resize = blk_root_resize,
305 .get_name = blk_root_get_name,
306 .get_parent_desc = blk_root_get_parent_desc,
308 .drained_begin = blk_root_drained_begin,
309 .drained_poll = blk_root_drained_poll,
310 .drained_end = blk_root_drained_end,
312 .activate = blk_root_activate,
313 .inactivate = blk_root_inactivate,
315 .attach = blk_root_attach,
316 .detach = blk_root_detach,
318 .can_set_aio_ctx = blk_root_can_set_aio_ctx,
319 .set_aio_ctx = blk_root_set_aio_ctx,
323 * Create a new BlockBackend with a reference count of one.
325 * @perm is a bitmasks of BLK_PERM_* constants which describes the permissions
326 * to request for a block driver node that is attached to this BlockBackend.
327 * @shared_perm is a bitmask which describes which permissions may be granted
328 * to other users of the attached node.
329 * Both sets of permissions can be changed later using blk_set_perm().
331 * Return the new BlockBackend on success, null on failure.
333 BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm)
335 BlockBackend *blk;
337 blk = g_new0(BlockBackend, 1);
338 blk->refcnt = 1;
339 blk->ctx = ctx;
340 blk->perm = perm;
341 blk->shared_perm = shared_perm;
342 blk_set_enable_write_cache(blk, true);
344 blk->on_read_error = BLOCKDEV_ON_ERROR_REPORT;
345 blk->on_write_error = BLOCKDEV_ON_ERROR_ENOSPC;
347 block_acct_init(&blk->stats);
349 qemu_co_queue_init(&blk->queued_requests);
350 notifier_list_init(&blk->remove_bs_notifiers);
351 notifier_list_init(&blk->insert_bs_notifiers);
352 QLIST_INIT(&blk->aio_notifiers);
354 QTAILQ_INSERT_TAIL(&block_backends, blk, link);
355 return blk;
359 * Creates a new BlockBackend, opens a new BlockDriverState, and connects both.
360 * The new BlockBackend is in the main AioContext.
362 * Just as with bdrv_open(), after having called this function the reference to
363 * @options belongs to the block layer (even on failure).
365 * TODO: Remove @filename and @flags; it should be possible to specify a whole
366 * BDS tree just by specifying the @options QDict (or @reference,
367 * alternatively). At the time of adding this function, this is not possible,
368 * though, so callers of this function have to be able to specify @filename and
369 * @flags.
371 BlockBackend *blk_new_open(const char *filename, const char *reference,
372 QDict *options, int flags, Error **errp)
374 BlockBackend *blk;
375 BlockDriverState *bs;
376 uint64_t perm = 0;
378 /* blk_new_open() is mainly used in .bdrv_create implementations and the
379 * tools where sharing isn't a concern because the BDS stays private, so we
380 * just request permission according to the flags.
382 * The exceptions are xen_disk and blockdev_init(); in these cases, the
383 * caller of blk_new_open() doesn't make use of the permissions, but they
384 * shouldn't hurt either. We can still share everything here because the
385 * guest devices will add their own blockers if they can't share. */
386 if ((flags & BDRV_O_NO_IO) == 0) {
387 perm |= BLK_PERM_CONSISTENT_READ;
388 if (flags & BDRV_O_RDWR) {
389 perm |= BLK_PERM_WRITE;
392 if (flags & BDRV_O_RESIZE) {
393 perm |= BLK_PERM_RESIZE;
396 blk = blk_new(qemu_get_aio_context(), perm, BLK_PERM_ALL);
397 bs = bdrv_open(filename, reference, options, flags, errp);
398 if (!bs) {
399 blk_unref(blk);
400 return NULL;
403 blk->root = bdrv_root_attach_child(bs, "root", &child_root, blk->ctx,
404 perm, BLK_PERM_ALL, blk, errp);
405 if (!blk->root) {
406 blk_unref(blk);
407 return NULL;
410 return blk;
413 static void blk_delete(BlockBackend *blk)
415 assert(!blk->refcnt);
416 assert(!blk->name);
417 assert(!blk->dev);
418 if (blk->public.throttle_group_member.throttle_state) {
419 blk_io_limits_disable(blk);
421 if (blk->root) {
422 blk_remove_bs(blk);
424 if (blk->vmsh) {
425 qemu_del_vm_change_state_handler(blk->vmsh);
426 blk->vmsh = NULL;
428 assert(QLIST_EMPTY(&blk->remove_bs_notifiers.notifiers));
429 assert(QLIST_EMPTY(&blk->insert_bs_notifiers.notifiers));
430 assert(QLIST_EMPTY(&blk->aio_notifiers));
431 QTAILQ_REMOVE(&block_backends, blk, link);
432 drive_info_del(blk->legacy_dinfo);
433 block_acct_cleanup(&blk->stats);
434 g_free(blk);
437 static void drive_info_del(DriveInfo *dinfo)
439 if (!dinfo) {
440 return;
442 qemu_opts_del(dinfo->opts);
443 g_free(dinfo);
446 int blk_get_refcnt(BlockBackend *blk)
448 return blk ? blk->refcnt : 0;
452 * Increment @blk's reference count.
453 * @blk must not be null.
455 void blk_ref(BlockBackend *blk)
457 assert(blk->refcnt > 0);
458 blk->refcnt++;
462 * Decrement @blk's reference count.
463 * If this drops it to zero, destroy @blk.
464 * For convenience, do nothing if @blk is null.
466 void blk_unref(BlockBackend *blk)
468 if (blk) {
469 assert(blk->refcnt > 0);
470 if (blk->refcnt > 1) {
471 blk->refcnt--;
472 } else {
473 blk_drain(blk);
474 /* blk_drain() cannot resurrect blk, nobody held a reference */
475 assert(blk->refcnt == 1);
476 blk->refcnt = 0;
477 blk_delete(blk);
483 * Behaves similarly to blk_next() but iterates over all BlockBackends, even the
484 * ones which are hidden (i.e. are not referenced by the monitor).
486 BlockBackend *blk_all_next(BlockBackend *blk)
488 return blk ? QTAILQ_NEXT(blk, link)
489 : QTAILQ_FIRST(&block_backends);
492 void blk_remove_all_bs(void)
494 BlockBackend *blk = NULL;
496 while ((blk = blk_all_next(blk)) != NULL) {
497 AioContext *ctx = blk_get_aio_context(blk);
499 aio_context_acquire(ctx);
500 if (blk->root) {
501 blk_remove_bs(blk);
503 aio_context_release(ctx);
508 * Return the monitor-owned BlockBackend after @blk.
509 * If @blk is null, return the first one.
510 * Else, return @blk's next sibling, which may be null.
512 * To iterate over all BlockBackends, do
513 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
514 * ...
517 BlockBackend *blk_next(BlockBackend *blk)
519 return blk ? QTAILQ_NEXT(blk, monitor_link)
520 : QTAILQ_FIRST(&monitor_block_backends);
523 /* Iterates over all top-level BlockDriverStates, i.e. BDSs that are owned by
524 * the monitor or attached to a BlockBackend */
525 BlockDriverState *bdrv_next(BdrvNextIterator *it)
527 BlockDriverState *bs, *old_bs;
529 /* Must be called from the main loop */
530 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
532 /* First, return all root nodes of BlockBackends. In order to avoid
533 * returning a BDS twice when multiple BBs refer to it, we only return it
534 * if the BB is the first one in the parent list of the BDS. */
535 if (it->phase == BDRV_NEXT_BACKEND_ROOTS) {
536 BlockBackend *old_blk = it->blk;
538 old_bs = old_blk ? blk_bs(old_blk) : NULL;
540 do {
541 it->blk = blk_all_next(it->blk);
542 bs = it->blk ? blk_bs(it->blk) : NULL;
543 } while (it->blk && (bs == NULL || bdrv_first_blk(bs) != it->blk));
545 if (it->blk) {
546 blk_ref(it->blk);
548 blk_unref(old_blk);
550 if (bs) {
551 bdrv_ref(bs);
552 bdrv_unref(old_bs);
553 return bs;
555 it->phase = BDRV_NEXT_MONITOR_OWNED;
556 } else {
557 old_bs = it->bs;
560 /* Then return the monitor-owned BDSes without a BB attached. Ignore all
561 * BDSes that are attached to a BlockBackend here; they have been handled
562 * by the above block already */
563 do {
564 it->bs = bdrv_next_monitor_owned(it->bs);
565 bs = it->bs;
566 } while (bs && bdrv_has_blk(bs));
568 if (bs) {
569 bdrv_ref(bs);
571 bdrv_unref(old_bs);
573 return bs;
576 static void bdrv_next_reset(BdrvNextIterator *it)
578 *it = (BdrvNextIterator) {
579 .phase = BDRV_NEXT_BACKEND_ROOTS,
583 BlockDriverState *bdrv_first(BdrvNextIterator *it)
585 bdrv_next_reset(it);
586 return bdrv_next(it);
589 /* Must be called when aborting a bdrv_next() iteration before
590 * bdrv_next() returns NULL */
591 void bdrv_next_cleanup(BdrvNextIterator *it)
593 /* Must be called from the main loop */
594 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
596 if (it->phase == BDRV_NEXT_BACKEND_ROOTS) {
597 if (it->blk) {
598 bdrv_unref(blk_bs(it->blk));
599 blk_unref(it->blk);
601 } else {
602 bdrv_unref(it->bs);
605 bdrv_next_reset(it);
609 * Add a BlockBackend into the list of backends referenced by the monitor, with
610 * the given @name acting as the handle for the monitor.
611 * Strictly for use by blockdev.c.
613 * @name must not be null or empty.
615 * Returns true on success and false on failure. In the latter case, an Error
616 * object is returned through @errp.
618 bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp)
620 assert(!blk->name);
621 assert(name && name[0]);
623 if (!id_wellformed(name)) {
624 error_setg(errp, "Invalid device name");
625 return false;
627 if (blk_by_name(name)) {
628 error_setg(errp, "Device with id '%s' already exists", name);
629 return false;
631 if (bdrv_find_node(name)) {
632 error_setg(errp,
633 "Device name '%s' conflicts with an existing node name",
634 name);
635 return false;
638 blk->name = g_strdup(name);
639 QTAILQ_INSERT_TAIL(&monitor_block_backends, blk, monitor_link);
640 return true;
644 * Remove a BlockBackend from the list of backends referenced by the monitor.
645 * Strictly for use by blockdev.c.
647 void monitor_remove_blk(BlockBackend *blk)
649 if (!blk->name) {
650 return;
653 QTAILQ_REMOVE(&monitor_block_backends, blk, monitor_link);
654 g_free(blk->name);
655 blk->name = NULL;
659 * Return @blk's name, a non-null string.
660 * Returns an empty string iff @blk is not referenced by the monitor.
662 const char *blk_name(const BlockBackend *blk)
664 return blk->name ?: "";
668 * Return the BlockBackend with name @name if it exists, else null.
669 * @name must not be null.
671 BlockBackend *blk_by_name(const char *name)
673 BlockBackend *blk = NULL;
675 assert(name);
676 while ((blk = blk_next(blk)) != NULL) {
677 if (!strcmp(name, blk->name)) {
678 return blk;
681 return NULL;
685 * Return the BlockDriverState attached to @blk if any, else null.
687 BlockDriverState *blk_bs(BlockBackend *blk)
689 return blk->root ? blk->root->bs : NULL;
692 static BlockBackend *bdrv_first_blk(BlockDriverState *bs)
694 BdrvChild *child;
695 QLIST_FOREACH(child, &bs->parents, next_parent) {
696 if (child->role == &child_root) {
697 return child->opaque;
701 return NULL;
705 * Returns true if @bs has an associated BlockBackend.
707 bool bdrv_has_blk(BlockDriverState *bs)
709 return bdrv_first_blk(bs) != NULL;
713 * Returns true if @bs has only BlockBackends as parents.
715 bool bdrv_is_root_node(BlockDriverState *bs)
717 BdrvChild *c;
719 QLIST_FOREACH(c, &bs->parents, next_parent) {
720 if (c->role != &child_root) {
721 return false;
725 return true;
729 * Return @blk's DriveInfo if any, else null.
731 DriveInfo *blk_legacy_dinfo(BlockBackend *blk)
733 return blk->legacy_dinfo;
737 * Set @blk's DriveInfo to @dinfo, and return it.
738 * @blk must not have a DriveInfo set already.
739 * No other BlockBackend may have the same DriveInfo set.
741 DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo)
743 assert(!blk->legacy_dinfo);
744 return blk->legacy_dinfo = dinfo;
748 * Return the BlockBackend with DriveInfo @dinfo.
749 * It must exist.
751 BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo)
753 BlockBackend *blk = NULL;
755 while ((blk = blk_next(blk)) != NULL) {
756 if (blk->legacy_dinfo == dinfo) {
757 return blk;
760 abort();
764 * Returns a pointer to the publicly accessible fields of @blk.
766 BlockBackendPublic *blk_get_public(BlockBackend *blk)
768 return &blk->public;
772 * Returns a BlockBackend given the associated @public fields.
774 BlockBackend *blk_by_public(BlockBackendPublic *public)
776 return container_of(public, BlockBackend, public);
780 * Disassociates the currently associated BlockDriverState from @blk.
782 void blk_remove_bs(BlockBackend *blk)
784 ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
785 BlockDriverState *bs;
787 notifier_list_notify(&blk->remove_bs_notifiers, blk);
788 if (tgm->throttle_state) {
789 bs = blk_bs(blk);
790 bdrv_drained_begin(bs);
791 throttle_group_detach_aio_context(tgm);
792 throttle_group_attach_aio_context(tgm, qemu_get_aio_context());
793 bdrv_drained_end(bs);
796 blk_update_root_state(blk);
798 /* bdrv_root_unref_child() will cause blk->root to become stale and may
799 * switch to a completion coroutine later on. Let's drain all I/O here
800 * to avoid that and a potential QEMU crash.
802 blk_drain(blk);
803 bdrv_root_unref_child(blk->root);
804 blk->root = NULL;
808 * Associates a new BlockDriverState with @blk.
810 int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
812 ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
813 bdrv_ref(bs);
814 blk->root = bdrv_root_attach_child(bs, "root", &child_root, blk->ctx,
815 blk->perm, blk->shared_perm, blk, errp);
816 if (blk->root == NULL) {
817 return -EPERM;
820 notifier_list_notify(&blk->insert_bs_notifiers, blk);
821 if (tgm->throttle_state) {
822 throttle_group_detach_aio_context(tgm);
823 throttle_group_attach_aio_context(tgm, bdrv_get_aio_context(bs));
826 return 0;
830 * Sets the permission bitmasks that the user of the BlockBackend needs.
832 int blk_set_perm(BlockBackend *blk, uint64_t perm, uint64_t shared_perm,
833 Error **errp)
835 int ret;
837 if (blk->root && !blk->disable_perm) {
838 ret = bdrv_child_try_set_perm(blk->root, perm, shared_perm, errp);
839 if (ret < 0) {
840 return ret;
844 blk->perm = perm;
845 blk->shared_perm = shared_perm;
847 return 0;
850 void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm)
852 *perm = blk->perm;
853 *shared_perm = blk->shared_perm;
857 * Attach device model @dev to @blk.
858 * Return 0 on success, -EBUSY when a device model is attached already.
860 int blk_attach_dev(BlockBackend *blk, DeviceState *dev)
862 if (blk->dev) {
863 return -EBUSY;
866 /* While migration is still incoming, we don't need to apply the
867 * permissions of guest device BlockBackends. We might still have a block
868 * job or NBD server writing to the image for storage migration. */
869 if (runstate_check(RUN_STATE_INMIGRATE)) {
870 blk->disable_perm = true;
873 blk_ref(blk);
874 blk->dev = dev;
875 blk_iostatus_reset(blk);
877 return 0;
881 * Detach device model @dev from @blk.
882 * @dev must be currently attached to @blk.
884 void blk_detach_dev(BlockBackend *blk, DeviceState *dev)
886 assert(blk->dev == dev);
887 blk->dev = NULL;
888 blk->dev_ops = NULL;
889 blk->dev_opaque = NULL;
890 blk->guest_block_size = 512;
891 blk_set_perm(blk, 0, BLK_PERM_ALL, &error_abort);
892 blk_unref(blk);
896 * Return the device model attached to @blk if any, else null.
898 DeviceState *blk_get_attached_dev(BlockBackend *blk)
900 return blk->dev;
903 /* Return the qdev ID, or if no ID is assigned the QOM path, of the block
904 * device attached to the BlockBackend. */
905 char *blk_get_attached_dev_id(BlockBackend *blk)
907 DeviceState *dev = blk->dev;
909 if (!dev) {
910 return g_strdup("");
911 } else if (dev->id) {
912 return g_strdup(dev->id);
915 return object_get_canonical_path(OBJECT(dev)) ?: g_strdup("");
919 * Return the BlockBackend which has the device model @dev attached if it
920 * exists, else null.
922 * @dev must not be null.
924 BlockBackend *blk_by_dev(void *dev)
926 BlockBackend *blk = NULL;
928 assert(dev != NULL);
929 while ((blk = blk_all_next(blk)) != NULL) {
930 if (blk->dev == dev) {
931 return blk;
934 return NULL;
938 * Set @blk's device model callbacks to @ops.
939 * @opaque is the opaque argument to pass to the callbacks.
940 * This is for use by device models.
942 void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops,
943 void *opaque)
945 blk->dev_ops = ops;
946 blk->dev_opaque = opaque;
948 /* Are we currently quiesced? Should we enforce this right now? */
949 if (blk->quiesce_counter && ops->drained_begin) {
950 ops->drained_begin(opaque);
955 * Notify @blk's attached device model of media change.
957 * If @load is true, notify of media load. This action can fail, meaning that
958 * the medium cannot be loaded. @errp is set then.
960 * If @load is false, notify of media eject. This can never fail.
962 * Also send DEVICE_TRAY_MOVED events as appropriate.
964 void blk_dev_change_media_cb(BlockBackend *blk, bool load, Error **errp)
966 if (blk->dev_ops && blk->dev_ops->change_media_cb) {
967 bool tray_was_open, tray_is_open;
968 Error *local_err = NULL;
970 tray_was_open = blk_dev_is_tray_open(blk);
971 blk->dev_ops->change_media_cb(blk->dev_opaque, load, &local_err);
972 if (local_err) {
973 assert(load == true);
974 error_propagate(errp, local_err);
975 return;
977 tray_is_open = blk_dev_is_tray_open(blk);
979 if (tray_was_open != tray_is_open) {
980 char *id = blk_get_attached_dev_id(blk);
981 qapi_event_send_device_tray_moved(blk_name(blk), id, tray_is_open);
982 g_free(id);
987 static void blk_root_change_media(BdrvChild *child, bool load)
989 blk_dev_change_media_cb(child->opaque, load, NULL);
993 * Does @blk's attached device model have removable media?
994 * %true if no device model is attached.
996 bool blk_dev_has_removable_media(BlockBackend *blk)
998 return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb);
1002 * Does @blk's attached device model have a tray?
1004 bool blk_dev_has_tray(BlockBackend *blk)
1006 return blk->dev_ops && blk->dev_ops->is_tray_open;
1010 * Notify @blk's attached device model of a media eject request.
1011 * If @force is true, the medium is about to be yanked out forcefully.
1013 void blk_dev_eject_request(BlockBackend *blk, bool force)
1015 if (blk->dev_ops && blk->dev_ops->eject_request_cb) {
1016 blk->dev_ops->eject_request_cb(blk->dev_opaque, force);
1021 * Does @blk's attached device model have a tray, and is it open?
1023 bool blk_dev_is_tray_open(BlockBackend *blk)
1025 if (blk_dev_has_tray(blk)) {
1026 return blk->dev_ops->is_tray_open(blk->dev_opaque);
1028 return false;
1032 * Does @blk's attached device model have the medium locked?
1033 * %false if the device model has no such lock.
1035 bool blk_dev_is_medium_locked(BlockBackend *blk)
1037 if (blk->dev_ops && blk->dev_ops->is_medium_locked) {
1038 return blk->dev_ops->is_medium_locked(blk->dev_opaque);
1040 return false;
1044 * Notify @blk's attached device model of a backend size change.
1046 static void blk_root_resize(BdrvChild *child)
1048 BlockBackend *blk = child->opaque;
1050 if (blk->dev_ops && blk->dev_ops->resize_cb) {
1051 blk->dev_ops->resize_cb(blk->dev_opaque);
1055 void blk_iostatus_enable(BlockBackend *blk)
1057 blk->iostatus_enabled = true;
1058 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
1061 /* The I/O status is only enabled if the drive explicitly
1062 * enables it _and_ the VM is configured to stop on errors */
1063 bool blk_iostatus_is_enabled(const BlockBackend *blk)
1065 return (blk->iostatus_enabled &&
1066 (blk->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
1067 blk->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
1068 blk->on_read_error == BLOCKDEV_ON_ERROR_STOP));
1071 BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk)
1073 return blk->iostatus;
1076 void blk_iostatus_disable(BlockBackend *blk)
1078 blk->iostatus_enabled = false;
1081 void blk_iostatus_reset(BlockBackend *blk)
1083 if (blk_iostatus_is_enabled(blk)) {
1084 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
1088 void blk_iostatus_set_err(BlockBackend *blk, int error)
1090 assert(blk_iostatus_is_enabled(blk));
1091 if (blk->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
1092 blk->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
1093 BLOCK_DEVICE_IO_STATUS_FAILED;
1097 void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow)
1099 blk->allow_write_beyond_eof = allow;
1102 void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow)
1104 blk->allow_aio_context_change = allow;
1107 void blk_set_disable_request_queuing(BlockBackend *blk, bool disable)
1109 blk->disable_request_queuing = disable;
1112 static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
1113 size_t size)
1115 int64_t len;
1117 if (size > INT_MAX) {
1118 return -EIO;
1121 if (!blk_is_available(blk)) {
1122 return -ENOMEDIUM;
1125 if (offset < 0) {
1126 return -EIO;
1129 if (!blk->allow_write_beyond_eof) {
1130 len = blk_getlength(blk);
1131 if (len < 0) {
1132 return len;
1135 if (offset > len || len - offset < size) {
1136 return -EIO;
1140 return 0;
1143 static void coroutine_fn blk_wait_while_drained(BlockBackend *blk)
1145 if (blk->quiesce_counter && !blk->disable_request_queuing) {
1146 qemu_co_queue_wait(&blk->queued_requests, NULL);
1150 int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
1151 unsigned int bytes, QEMUIOVector *qiov,
1152 BdrvRequestFlags flags)
1154 int ret;
1155 BlockDriverState *bs;
1157 blk_wait_while_drained(blk);
1159 /* Call blk_bs() only after waiting, the graph may have changed */
1160 bs = blk_bs(blk);
1161 trace_blk_co_preadv(blk, bs, offset, bytes, flags);
1163 ret = blk_check_byte_request(blk, offset, bytes);
1164 if (ret < 0) {
1165 return ret;
1168 bdrv_inc_in_flight(bs);
1170 /* throttling disk I/O */
1171 if (blk->public.throttle_group_member.throttle_state) {
1172 throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member,
1173 bytes, false);
1176 ret = bdrv_co_preadv(blk->root, offset, bytes, qiov, flags);
1177 bdrv_dec_in_flight(bs);
1178 return ret;
1181 int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
1182 unsigned int bytes, QEMUIOVector *qiov,
1183 BdrvRequestFlags flags)
1185 int ret;
1186 BlockDriverState *bs;
1188 blk_wait_while_drained(blk);
1190 /* Call blk_bs() only after waiting, the graph may have changed */
1191 bs = blk_bs(blk);
1192 trace_blk_co_pwritev(blk, bs, offset, bytes, flags);
1194 ret = blk_check_byte_request(blk, offset, bytes);
1195 if (ret < 0) {
1196 return ret;
1199 bdrv_inc_in_flight(bs);
1200 /* throttling disk I/O */
1201 if (blk->public.throttle_group_member.throttle_state) {
1202 throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member,
1203 bytes, true);
1206 if (!blk->enable_write_cache) {
1207 flags |= BDRV_REQ_FUA;
1210 ret = bdrv_co_pwritev(blk->root, offset, bytes, qiov, flags);
1211 bdrv_dec_in_flight(bs);
1212 return ret;
1215 typedef struct BlkRwCo {
1216 BlockBackend *blk;
1217 int64_t offset;
1218 void *iobuf;
1219 int ret;
1220 BdrvRequestFlags flags;
1221 } BlkRwCo;
1223 static void blk_read_entry(void *opaque)
1225 BlkRwCo *rwco = opaque;
1226 QEMUIOVector *qiov = rwco->iobuf;
1228 rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, qiov->size,
1229 qiov, rwco->flags);
1230 aio_wait_kick();
1233 static void blk_write_entry(void *opaque)
1235 BlkRwCo *rwco = opaque;
1236 QEMUIOVector *qiov = rwco->iobuf;
1238 rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, qiov->size,
1239 qiov, rwco->flags);
1240 aio_wait_kick();
1243 static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
1244 int64_t bytes, CoroutineEntry co_entry,
1245 BdrvRequestFlags flags)
1247 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
1248 BlkRwCo rwco = {
1249 .blk = blk,
1250 .offset = offset,
1251 .iobuf = &qiov,
1252 .flags = flags,
1253 .ret = NOT_DONE,
1256 if (qemu_in_coroutine()) {
1257 /* Fast-path if already in coroutine context */
1258 co_entry(&rwco);
1259 } else {
1260 Coroutine *co = qemu_coroutine_create(co_entry, &rwco);
1261 bdrv_coroutine_enter(blk_bs(blk), co);
1262 BDRV_POLL_WHILE(blk_bs(blk), rwco.ret == NOT_DONE);
1265 return rwco.ret;
1268 int blk_pwrite_zeroes(BlockBackend *blk, int64_t offset,
1269 int bytes, BdrvRequestFlags flags)
1271 return blk_prw(blk, offset, NULL, bytes, blk_write_entry,
1272 flags | BDRV_REQ_ZERO_WRITE);
1275 int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags)
1277 return bdrv_make_zero(blk->root, flags);
1280 void blk_inc_in_flight(BlockBackend *blk)
1282 atomic_inc(&blk->in_flight);
1285 void blk_dec_in_flight(BlockBackend *blk)
1287 atomic_dec(&blk->in_flight);
1288 aio_wait_kick();
1291 static void error_callback_bh(void *opaque)
1293 struct BlockBackendAIOCB *acb = opaque;
1295 blk_dec_in_flight(acb->blk);
1296 acb->common.cb(acb->common.opaque, acb->ret);
1297 qemu_aio_unref(acb);
1300 BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
1301 BlockCompletionFunc *cb,
1302 void *opaque, int ret)
1304 struct BlockBackendAIOCB *acb;
1306 blk_inc_in_flight(blk);
1307 acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
1308 acb->blk = blk;
1309 acb->ret = ret;
1311 replay_bh_schedule_oneshot_event(blk_get_aio_context(blk),
1312 error_callback_bh, acb);
1313 return &acb->common;
1316 typedef struct BlkAioEmAIOCB {
1317 BlockAIOCB common;
1318 BlkRwCo rwco;
1319 int bytes;
1320 bool has_returned;
1321 } BlkAioEmAIOCB;
1323 static const AIOCBInfo blk_aio_em_aiocb_info = {
1324 .aiocb_size = sizeof(BlkAioEmAIOCB),
1327 static void blk_aio_complete(BlkAioEmAIOCB *acb)
1329 if (acb->has_returned) {
1330 acb->common.cb(acb->common.opaque, acb->rwco.ret);
1331 blk_dec_in_flight(acb->rwco.blk);
1332 qemu_aio_unref(acb);
1336 static void blk_aio_complete_bh(void *opaque)
1338 BlkAioEmAIOCB *acb = opaque;
1339 assert(acb->has_returned);
1340 blk_aio_complete(acb);
1343 static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
1344 void *iobuf, CoroutineEntry co_entry,
1345 BdrvRequestFlags flags,
1346 BlockCompletionFunc *cb, void *opaque)
1348 BlkAioEmAIOCB *acb;
1349 Coroutine *co;
1351 blk_inc_in_flight(blk);
1352 acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque);
1353 acb->rwco = (BlkRwCo) {
1354 .blk = blk,
1355 .offset = offset,
1356 .iobuf = iobuf,
1357 .flags = flags,
1358 .ret = NOT_DONE,
1360 acb->bytes = bytes;
1361 acb->has_returned = false;
1363 co = qemu_coroutine_create(co_entry, acb);
1364 bdrv_coroutine_enter(blk_bs(blk), co);
1366 acb->has_returned = true;
1367 if (acb->rwco.ret != NOT_DONE) {
1368 replay_bh_schedule_oneshot_event(blk_get_aio_context(blk),
1369 blk_aio_complete_bh, acb);
1372 return &acb->common;
1375 static void blk_aio_read_entry(void *opaque)
1377 BlkAioEmAIOCB *acb = opaque;
1378 BlkRwCo *rwco = &acb->rwco;
1379 QEMUIOVector *qiov = rwco->iobuf;
1381 if (rwco->blk->quiesce_counter) {
1382 blk_dec_in_flight(rwco->blk);
1383 blk_wait_while_drained(rwco->blk);
1384 blk_inc_in_flight(rwco->blk);
1387 assert(qiov->size == acb->bytes);
1388 rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, acb->bytes,
1389 qiov, rwco->flags);
1390 blk_aio_complete(acb);
1393 static void blk_aio_write_entry(void *opaque)
1395 BlkAioEmAIOCB *acb = opaque;
1396 BlkRwCo *rwco = &acb->rwco;
1397 QEMUIOVector *qiov = rwco->iobuf;
1399 if (rwco->blk->quiesce_counter) {
1400 blk_dec_in_flight(rwco->blk);
1401 blk_wait_while_drained(rwco->blk);
1402 blk_inc_in_flight(rwco->blk);
1405 assert(!qiov || qiov->size == acb->bytes);
1406 rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, acb->bytes,
1407 qiov, rwco->flags);
1408 blk_aio_complete(acb);
1411 BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset,
1412 int count, BdrvRequestFlags flags,
1413 BlockCompletionFunc *cb, void *opaque)
1415 return blk_aio_prwv(blk, offset, count, NULL, blk_aio_write_entry,
1416 flags | BDRV_REQ_ZERO_WRITE, cb, opaque);
1419 int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count)
1421 int ret = blk_prw(blk, offset, buf, count, blk_read_entry, 0);
1422 if (ret < 0) {
1423 return ret;
1425 return count;
1428 int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count,
1429 BdrvRequestFlags flags)
1431 int ret = blk_prw(blk, offset, (void *) buf, count, blk_write_entry,
1432 flags);
1433 if (ret < 0) {
1434 return ret;
1436 return count;
1439 int64_t blk_getlength(BlockBackend *blk)
1441 if (!blk_is_available(blk)) {
1442 return -ENOMEDIUM;
1445 return bdrv_getlength(blk_bs(blk));
1448 void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr)
1450 if (!blk_bs(blk)) {
1451 *nb_sectors_ptr = 0;
1452 } else {
1453 bdrv_get_geometry(blk_bs(blk), nb_sectors_ptr);
1457 int64_t blk_nb_sectors(BlockBackend *blk)
1459 if (!blk_is_available(blk)) {
1460 return -ENOMEDIUM;
1463 return bdrv_nb_sectors(blk_bs(blk));
1466 BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset,
1467 QEMUIOVector *qiov, BdrvRequestFlags flags,
1468 BlockCompletionFunc *cb, void *opaque)
1470 return blk_aio_prwv(blk, offset, qiov->size, qiov,
1471 blk_aio_read_entry, flags, cb, opaque);
1474 BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset,
1475 QEMUIOVector *qiov, BdrvRequestFlags flags,
1476 BlockCompletionFunc *cb, void *opaque)
1478 return blk_aio_prwv(blk, offset, qiov->size, qiov,
1479 blk_aio_write_entry, flags, cb, opaque);
1482 static void blk_aio_flush_entry(void *opaque)
1484 BlkAioEmAIOCB *acb = opaque;
1485 BlkRwCo *rwco = &acb->rwco;
1487 rwco->ret = blk_co_flush(rwco->blk);
1488 blk_aio_complete(acb);
1491 BlockAIOCB *blk_aio_flush(BlockBackend *blk,
1492 BlockCompletionFunc *cb, void *opaque)
1494 return blk_aio_prwv(blk, 0, 0, NULL, blk_aio_flush_entry, 0, cb, opaque);
1497 static void blk_aio_pdiscard_entry(void *opaque)
1499 BlkAioEmAIOCB *acb = opaque;
1500 BlkRwCo *rwco = &acb->rwco;
1502 rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, acb->bytes);
1503 blk_aio_complete(acb);
1506 BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk,
1507 int64_t offset, int bytes,
1508 BlockCompletionFunc *cb, void *opaque)
1510 return blk_aio_prwv(blk, offset, bytes, NULL, blk_aio_pdiscard_entry, 0,
1511 cb, opaque);
1514 void blk_aio_cancel(BlockAIOCB *acb)
1516 bdrv_aio_cancel(acb);
1519 void blk_aio_cancel_async(BlockAIOCB *acb)
1521 bdrv_aio_cancel_async(acb);
1524 int blk_co_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
1526 blk_wait_while_drained(blk);
1528 if (!blk_is_available(blk)) {
1529 return -ENOMEDIUM;
1532 return bdrv_co_ioctl(blk_bs(blk), req, buf);
1535 static void blk_ioctl_entry(void *opaque)
1537 BlkRwCo *rwco = opaque;
1538 QEMUIOVector *qiov = rwco->iobuf;
1540 rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset,
1541 qiov->iov[0].iov_base);
1542 aio_wait_kick();
1545 int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
1547 return blk_prw(blk, req, buf, 0, blk_ioctl_entry, 0);
1550 static void blk_aio_ioctl_entry(void *opaque)
1552 BlkAioEmAIOCB *acb = opaque;
1553 BlkRwCo *rwco = &acb->rwco;
1555 rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset, rwco->iobuf);
1557 blk_aio_complete(acb);
1560 BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
1561 BlockCompletionFunc *cb, void *opaque)
1563 return blk_aio_prwv(blk, req, 0, buf, blk_aio_ioctl_entry, 0, cb, opaque);
1566 int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
1568 int ret;
1570 blk_wait_while_drained(blk);
1572 ret = blk_check_byte_request(blk, offset, bytes);
1573 if (ret < 0) {
1574 return ret;
1577 return bdrv_co_pdiscard(blk->root, offset, bytes);
1580 int blk_co_flush(BlockBackend *blk)
1582 blk_wait_while_drained(blk);
1584 if (!blk_is_available(blk)) {
1585 return -ENOMEDIUM;
1588 return bdrv_co_flush(blk_bs(blk));
1591 static void blk_flush_entry(void *opaque)
1593 BlkRwCo *rwco = opaque;
1594 rwco->ret = blk_co_flush(rwco->blk);
1595 aio_wait_kick();
1598 int blk_flush(BlockBackend *blk)
1600 return blk_prw(blk, 0, NULL, 0, blk_flush_entry, 0);
1603 void blk_drain(BlockBackend *blk)
1605 BlockDriverState *bs = blk_bs(blk);
1607 if (bs) {
1608 bdrv_drained_begin(bs);
1611 /* We may have -ENOMEDIUM completions in flight */
1612 AIO_WAIT_WHILE(blk_get_aio_context(blk),
1613 atomic_mb_read(&blk->in_flight) > 0);
1615 if (bs) {
1616 bdrv_drained_end(bs);
1620 void blk_drain_all(void)
1622 BlockBackend *blk = NULL;
1624 bdrv_drain_all_begin();
1626 while ((blk = blk_all_next(blk)) != NULL) {
1627 AioContext *ctx = blk_get_aio_context(blk);
1629 aio_context_acquire(ctx);
1631 /* We may have -ENOMEDIUM completions in flight */
1632 AIO_WAIT_WHILE(ctx, atomic_mb_read(&blk->in_flight) > 0);
1634 aio_context_release(ctx);
1637 bdrv_drain_all_end();
1640 void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
1641 BlockdevOnError on_write_error)
1643 blk->on_read_error = on_read_error;
1644 blk->on_write_error = on_write_error;
1647 BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read)
1649 return is_read ? blk->on_read_error : blk->on_write_error;
1652 BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
1653 int error)
1655 BlockdevOnError on_err = blk_get_on_error(blk, is_read);
1657 switch (on_err) {
1658 case BLOCKDEV_ON_ERROR_ENOSPC:
1659 return (error == ENOSPC) ?
1660 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
1661 case BLOCKDEV_ON_ERROR_STOP:
1662 return BLOCK_ERROR_ACTION_STOP;
1663 case BLOCKDEV_ON_ERROR_REPORT:
1664 return BLOCK_ERROR_ACTION_REPORT;
1665 case BLOCKDEV_ON_ERROR_IGNORE:
1666 return BLOCK_ERROR_ACTION_IGNORE;
1667 case BLOCKDEV_ON_ERROR_AUTO:
1668 default:
1669 abort();
1673 static void send_qmp_error_event(BlockBackend *blk,
1674 BlockErrorAction action,
1675 bool is_read, int error)
1677 IoOperationType optype;
1678 BlockDriverState *bs = blk_bs(blk);
1680 optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
1681 qapi_event_send_block_io_error(blk_name(blk), !!bs,
1682 bs ? bdrv_get_node_name(bs) : NULL, optype,
1683 action, blk_iostatus_is_enabled(blk),
1684 error == ENOSPC, strerror(error));
1687 /* This is done by device models because, while the block layer knows
1688 * about the error, it does not know whether an operation comes from
1689 * the device or the block layer (from a job, for example).
1691 void blk_error_action(BlockBackend *blk, BlockErrorAction action,
1692 bool is_read, int error)
1694 assert(error >= 0);
1696 if (action == BLOCK_ERROR_ACTION_STOP) {
1697 /* First set the iostatus, so that "info block" returns an iostatus
1698 * that matches the events raised so far (an additional error iostatus
1699 * is fine, but not a lost one).
1701 blk_iostatus_set_err(blk, error);
1703 /* Then raise the request to stop the VM and the event.
1704 * qemu_system_vmstop_request_prepare has two effects. First,
1705 * it ensures that the STOP event always comes after the
1706 * BLOCK_IO_ERROR event. Second, it ensures that even if management
1707 * can observe the STOP event and do a "cont" before the STOP
1708 * event is issued, the VM will not stop. In this case, vm_start()
1709 * also ensures that the STOP/RESUME pair of events is emitted.
1711 qemu_system_vmstop_request_prepare();
1712 send_qmp_error_event(blk, action, is_read, error);
1713 qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
1714 } else {
1715 send_qmp_error_event(blk, action, is_read, error);
1719 bool blk_is_read_only(BlockBackend *blk)
1721 BlockDriverState *bs = blk_bs(blk);
1723 if (bs) {
1724 return bdrv_is_read_only(bs);
1725 } else {
1726 return blk->root_state.read_only;
1730 bool blk_is_sg(BlockBackend *blk)
1732 BlockDriverState *bs = blk_bs(blk);
1734 if (!bs) {
1735 return false;
1738 return bdrv_is_sg(bs);
1741 bool blk_enable_write_cache(BlockBackend *blk)
1743 return blk->enable_write_cache;
1746 void blk_set_enable_write_cache(BlockBackend *blk, bool wce)
1748 blk->enable_write_cache = wce;
1751 void blk_invalidate_cache(BlockBackend *blk, Error **errp)
1753 BlockDriverState *bs = blk_bs(blk);
1755 if (!bs) {
1756 error_setg(errp, "Device '%s' has no medium", blk->name);
1757 return;
1760 bdrv_invalidate_cache(bs, errp);
1763 bool blk_is_inserted(BlockBackend *blk)
1765 BlockDriverState *bs = blk_bs(blk);
1767 return bs && bdrv_is_inserted(bs);
1770 bool blk_is_available(BlockBackend *blk)
1772 return blk_is_inserted(blk) && !blk_dev_is_tray_open(blk);
1775 void blk_lock_medium(BlockBackend *blk, bool locked)
1777 BlockDriverState *bs = blk_bs(blk);
1779 if (bs) {
1780 bdrv_lock_medium(bs, locked);
1784 void blk_eject(BlockBackend *blk, bool eject_flag)
1786 BlockDriverState *bs = blk_bs(blk);
1787 char *id;
1789 if (bs) {
1790 bdrv_eject(bs, eject_flag);
1793 /* Whether or not we ejected on the backend,
1794 * the frontend experienced a tray event. */
1795 id = blk_get_attached_dev_id(blk);
1796 qapi_event_send_device_tray_moved(blk_name(blk), id,
1797 eject_flag);
1798 g_free(id);
1801 int blk_get_flags(BlockBackend *blk)
1803 BlockDriverState *bs = blk_bs(blk);
1805 if (bs) {
1806 return bdrv_get_flags(bs);
1807 } else {
1808 return blk->root_state.open_flags;
1812 /* Returns the minimum request alignment, in bytes; guaranteed nonzero */
1813 uint32_t blk_get_request_alignment(BlockBackend *blk)
1815 BlockDriverState *bs = blk_bs(blk);
1816 return bs ? bs->bl.request_alignment : BDRV_SECTOR_SIZE;
1819 /* Returns the maximum transfer length, in bytes; guaranteed nonzero */
1820 uint32_t blk_get_max_transfer(BlockBackend *blk)
1822 BlockDriverState *bs = blk_bs(blk);
1823 uint32_t max = 0;
1825 if (bs) {
1826 max = bs->bl.max_transfer;
1828 return MIN_NON_ZERO(max, INT_MAX);
1831 int blk_get_max_iov(BlockBackend *blk)
1833 return blk->root->bs->bl.max_iov;
1836 void blk_set_guest_block_size(BlockBackend *blk, int align)
1838 blk->guest_block_size = align;
1841 void *blk_try_blockalign(BlockBackend *blk, size_t size)
1843 return qemu_try_blockalign(blk ? blk_bs(blk) : NULL, size);
1846 void *blk_blockalign(BlockBackend *blk, size_t size)
1848 return qemu_blockalign(blk ? blk_bs(blk) : NULL, size);
1851 bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
1853 BlockDriverState *bs = blk_bs(blk);
1855 if (!bs) {
1856 return false;
1859 return bdrv_op_is_blocked(bs, op, errp);
1862 void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason)
1864 BlockDriverState *bs = blk_bs(blk);
1866 if (bs) {
1867 bdrv_op_unblock(bs, op, reason);
1871 void blk_op_block_all(BlockBackend *blk, Error *reason)
1873 BlockDriverState *bs = blk_bs(blk);
1875 if (bs) {
1876 bdrv_op_block_all(bs, reason);
1880 void blk_op_unblock_all(BlockBackend *blk, Error *reason)
1882 BlockDriverState *bs = blk_bs(blk);
1884 if (bs) {
1885 bdrv_op_unblock_all(bs, reason);
1889 AioContext *blk_get_aio_context(BlockBackend *blk)
1891 BlockDriverState *bs = blk_bs(blk);
1893 if (bs) {
1894 AioContext *ctx = bdrv_get_aio_context(blk_bs(blk));
1895 assert(ctx == blk->ctx);
1898 return blk->ctx;
1901 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb)
1903 BlockBackendAIOCB *blk_acb = DO_UPCAST(BlockBackendAIOCB, common, acb);
1904 return blk_get_aio_context(blk_acb->blk);
1907 static int blk_do_set_aio_context(BlockBackend *blk, AioContext *new_context,
1908 bool update_root_node, Error **errp)
1910 BlockDriverState *bs = blk_bs(blk);
1911 ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
1912 int ret;
1914 if (bs) {
1915 if (update_root_node) {
1916 ret = bdrv_child_try_set_aio_context(bs, new_context, blk->root,
1917 errp);
1918 if (ret < 0) {
1919 return ret;
1922 if (tgm->throttle_state) {
1923 bdrv_drained_begin(bs);
1924 throttle_group_detach_aio_context(tgm);
1925 throttle_group_attach_aio_context(tgm, new_context);
1926 bdrv_drained_end(bs);
1930 blk->ctx = new_context;
1931 return 0;
1934 int blk_set_aio_context(BlockBackend *blk, AioContext *new_context,
1935 Error **errp)
1937 return blk_do_set_aio_context(blk, new_context, true, errp);
1940 static bool blk_root_can_set_aio_ctx(BdrvChild *child, AioContext *ctx,
1941 GSList **ignore, Error **errp)
1943 BlockBackend *blk = child->opaque;
1945 if (blk->allow_aio_context_change) {
1946 return true;
1949 /* Only manually created BlockBackends that are not attached to anything
1950 * can change their AioContext without updating their user. */
1951 if (!blk->name || blk->dev) {
1952 /* TODO Add BB name/QOM path */
1953 error_setg(errp, "Cannot change iothread of active block backend");
1954 return false;
1957 return true;
1960 static void blk_root_set_aio_ctx(BdrvChild *child, AioContext *ctx,
1961 GSList **ignore)
1963 BlockBackend *blk = child->opaque;
1964 blk_do_set_aio_context(blk, ctx, false, &error_abort);
1967 void blk_add_aio_context_notifier(BlockBackend *blk,
1968 void (*attached_aio_context)(AioContext *new_context, void *opaque),
1969 void (*detach_aio_context)(void *opaque), void *opaque)
1971 BlockBackendAioNotifier *notifier;
1972 BlockDriverState *bs = blk_bs(blk);
1974 notifier = g_new(BlockBackendAioNotifier, 1);
1975 notifier->attached_aio_context = attached_aio_context;
1976 notifier->detach_aio_context = detach_aio_context;
1977 notifier->opaque = opaque;
1978 QLIST_INSERT_HEAD(&blk->aio_notifiers, notifier, list);
1980 if (bs) {
1981 bdrv_add_aio_context_notifier(bs, attached_aio_context,
1982 detach_aio_context, opaque);
1986 void blk_remove_aio_context_notifier(BlockBackend *blk,
1987 void (*attached_aio_context)(AioContext *,
1988 void *),
1989 void (*detach_aio_context)(void *),
1990 void *opaque)
1992 BlockBackendAioNotifier *notifier;
1993 BlockDriverState *bs = blk_bs(blk);
1995 if (bs) {
1996 bdrv_remove_aio_context_notifier(bs, attached_aio_context,
1997 detach_aio_context, opaque);
2000 QLIST_FOREACH(notifier, &blk->aio_notifiers, list) {
2001 if (notifier->attached_aio_context == attached_aio_context &&
2002 notifier->detach_aio_context == detach_aio_context &&
2003 notifier->opaque == opaque) {
2004 QLIST_REMOVE(notifier, list);
2005 g_free(notifier);
2006 return;
2010 abort();
2013 void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify)
2015 notifier_list_add(&blk->remove_bs_notifiers, notify);
2018 void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify)
2020 notifier_list_add(&blk->insert_bs_notifiers, notify);
2023 void blk_io_plug(BlockBackend *blk)
2025 BlockDriverState *bs = blk_bs(blk);
2027 if (bs) {
2028 bdrv_io_plug(bs);
2032 void blk_io_unplug(BlockBackend *blk)
2034 BlockDriverState *bs = blk_bs(blk);
2036 if (bs) {
2037 bdrv_io_unplug(bs);
2041 BlockAcctStats *blk_get_stats(BlockBackend *blk)
2043 return &blk->stats;
2046 void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
2047 BlockCompletionFunc *cb, void *opaque)
2049 return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque);
2052 int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset,
2053 int bytes, BdrvRequestFlags flags)
2055 return blk_co_pwritev(blk, offset, bytes, NULL,
2056 flags | BDRV_REQ_ZERO_WRITE);
2059 int blk_pwrite_compressed(BlockBackend *blk, int64_t offset, const void *buf,
2060 int count)
2062 return blk_prw(blk, offset, (void *) buf, count, blk_write_entry,
2063 BDRV_REQ_WRITE_COMPRESSED);
2066 int blk_truncate(BlockBackend *blk, int64_t offset, PreallocMode prealloc,
2067 Error **errp)
2069 if (!blk_is_available(blk)) {
2070 error_setg(errp, "No medium inserted");
2071 return -ENOMEDIUM;
2074 return bdrv_truncate(blk->root, offset, prealloc, errp);
2077 static void blk_pdiscard_entry(void *opaque)
2079 BlkRwCo *rwco = opaque;
2080 QEMUIOVector *qiov = rwco->iobuf;
2082 rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, qiov->size);
2083 aio_wait_kick();
2086 int blk_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
2088 return blk_prw(blk, offset, NULL, bytes, blk_pdiscard_entry, 0);
2091 int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
2092 int64_t pos, int size)
2094 int ret;
2096 if (!blk_is_available(blk)) {
2097 return -ENOMEDIUM;
2100 ret = bdrv_save_vmstate(blk_bs(blk), buf, pos, size);
2101 if (ret < 0) {
2102 return ret;
2105 if (ret == size && !blk->enable_write_cache) {
2106 ret = bdrv_flush(blk_bs(blk));
2109 return ret < 0 ? ret : size;
2112 int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size)
2114 if (!blk_is_available(blk)) {
2115 return -ENOMEDIUM;
2118 return bdrv_load_vmstate(blk_bs(blk), buf, pos, size);
2121 int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz)
2123 if (!blk_is_available(blk)) {
2124 return -ENOMEDIUM;
2127 return bdrv_probe_blocksizes(blk_bs(blk), bsz);
2130 int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo)
2132 if (!blk_is_available(blk)) {
2133 return -ENOMEDIUM;
2136 return bdrv_probe_geometry(blk_bs(blk), geo);
2140 * Updates the BlockBackendRootState object with data from the currently
2141 * attached BlockDriverState.
2143 void blk_update_root_state(BlockBackend *blk)
2145 assert(blk->root);
2147 blk->root_state.open_flags = blk->root->bs->open_flags;
2148 blk->root_state.read_only = blk->root->bs->read_only;
2149 blk->root_state.detect_zeroes = blk->root->bs->detect_zeroes;
2153 * Returns the detect-zeroes setting to be used for bdrv_open() of a
2154 * BlockDriverState which is supposed to inherit the root state.
2156 bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk)
2158 return blk->root_state.detect_zeroes;
2162 * Returns the flags to be used for bdrv_open() of a BlockDriverState which is
2163 * supposed to inherit the root state.
2165 int blk_get_open_flags_from_root_state(BlockBackend *blk)
2167 int bs_flags;
2169 bs_flags = blk->root_state.read_only ? 0 : BDRV_O_RDWR;
2170 bs_flags |= blk->root_state.open_flags & ~BDRV_O_RDWR;
2172 return bs_flags;
2175 BlockBackendRootState *blk_get_root_state(BlockBackend *blk)
2177 return &blk->root_state;
2180 int blk_commit_all(void)
2182 BlockBackend *blk = NULL;
2184 while ((blk = blk_all_next(blk)) != NULL) {
2185 AioContext *aio_context = blk_get_aio_context(blk);
2187 aio_context_acquire(aio_context);
2188 if (blk_is_inserted(blk) && blk->root->bs->backing) {
2189 int ret = bdrv_commit(blk->root->bs);
2190 if (ret < 0) {
2191 aio_context_release(aio_context);
2192 return ret;
2195 aio_context_release(aio_context);
2197 return 0;
2201 /* throttling disk I/O limits */
2202 void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg)
2204 throttle_group_config(&blk->public.throttle_group_member, cfg);
2207 void blk_io_limits_disable(BlockBackend *blk)
2209 BlockDriverState *bs = blk_bs(blk);
2210 ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
2211 assert(tgm->throttle_state);
2212 if (bs) {
2213 bdrv_drained_begin(bs);
2215 throttle_group_unregister_tgm(tgm);
2216 if (bs) {
2217 bdrv_drained_end(bs);
2221 /* should be called before blk_set_io_limits if a limit is set */
2222 void blk_io_limits_enable(BlockBackend *blk, const char *group)
2224 assert(!blk->public.throttle_group_member.throttle_state);
2225 throttle_group_register_tgm(&blk->public.throttle_group_member,
2226 group, blk_get_aio_context(blk));
2229 void blk_io_limits_update_group(BlockBackend *blk, const char *group)
2231 /* this BB is not part of any group */
2232 if (!blk->public.throttle_group_member.throttle_state) {
2233 return;
2236 /* this BB is a part of the same group than the one we want */
2237 if (!g_strcmp0(throttle_group_get_name(&blk->public.throttle_group_member),
2238 group)) {
2239 return;
2242 /* need to change the group this bs belong to */
2243 blk_io_limits_disable(blk);
2244 blk_io_limits_enable(blk, group);
2247 static void blk_root_drained_begin(BdrvChild *child)
2249 BlockBackend *blk = child->opaque;
2251 if (++blk->quiesce_counter == 1) {
2252 if (blk->dev_ops && blk->dev_ops->drained_begin) {
2253 blk->dev_ops->drained_begin(blk->dev_opaque);
2257 /* Note that blk->root may not be accessible here yet if we are just
2258 * attaching to a BlockDriverState that is drained. Use child instead. */
2260 if (atomic_fetch_inc(&blk->public.throttle_group_member.io_limits_disabled) == 0) {
2261 throttle_group_restart_tgm(&blk->public.throttle_group_member);
2265 static bool blk_root_drained_poll(BdrvChild *child)
2267 BlockBackend *blk = child->opaque;
2268 assert(blk->quiesce_counter);
2269 return !!blk->in_flight;
2272 static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter)
2274 BlockBackend *blk = child->opaque;
2275 assert(blk->quiesce_counter);
2277 assert(blk->public.throttle_group_member.io_limits_disabled);
2278 atomic_dec(&blk->public.throttle_group_member.io_limits_disabled);
2280 if (--blk->quiesce_counter == 0) {
2281 if (blk->dev_ops && blk->dev_ops->drained_end) {
2282 blk->dev_ops->drained_end(blk->dev_opaque);
2284 while (qemu_co_enter_next(&blk->queued_requests, NULL)) {
2285 /* Resume all queued requests */
2290 void blk_register_buf(BlockBackend *blk, void *host, size_t size)
2292 bdrv_register_buf(blk_bs(blk), host, size);
2295 void blk_unregister_buf(BlockBackend *blk, void *host)
2297 bdrv_unregister_buf(blk_bs(blk), host);
2300 int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in,
2301 BlockBackend *blk_out, int64_t off_out,
2302 int bytes, BdrvRequestFlags read_flags,
2303 BdrvRequestFlags write_flags)
2305 int r;
2306 r = blk_check_byte_request(blk_in, off_in, bytes);
2307 if (r) {
2308 return r;
2310 r = blk_check_byte_request(blk_out, off_out, bytes);
2311 if (r) {
2312 return r;
2314 return bdrv_co_copy_range(blk_in->root, off_in,
2315 blk_out->root, off_out,
2316 bytes, read_flags, write_flags);
2319 const BdrvChild *blk_root(BlockBackend *blk)
2321 return blk->root;