4 * Copyright (C) 2014-2016 Red Hat, Inc.
7 * Markus Armbruster <armbru@redhat.com>,
9 * This work is licensed under the terms of the GNU LGPL, version 2.1
10 * or later. See the COPYING.LIB file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "sysemu/block-backend.h"
15 #include "block/block_int.h"
16 #include "block/blockjob.h"
17 #include "block/throttle-groups.h"
18 #include "sysemu/blockdev.h"
19 #include "sysemu/sysemu.h"
20 #include "qapi-event.h"
24 /* Number of coroutines to reserve per attached device model */
25 #define COROUTINE_POOL_RESERVATION 64
27 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
29 static AioContext
*blk_aiocb_get_aio_context(BlockAIOCB
*acb
);
35 DriveInfo
*legacy_dinfo
; /* null unless created by drive_new() */
36 QTAILQ_ENTRY(BlockBackend
) link
; /* for block_backends */
37 QTAILQ_ENTRY(BlockBackend
) monitor_link
; /* for monitor_block_backends */
38 BlockBackendPublic
public;
40 void *dev
; /* attached device model, if any */
41 /* TODO change to DeviceState when all users are qdevified */
42 const BlockDevOps
*dev_ops
;
45 /* the block size for which the guest device expects atomicity */
48 /* If the BDS tree is removed, some of its options are stored here (which
49 * can be used to restore those options in the new BDS on insert) */
50 BlockBackendRootState root_state
;
52 bool enable_write_cache
;
54 /* I/O stats (display with "info blockstats"). */
57 BlockdevOnError on_read_error
, on_write_error
;
58 bool iostatus_enabled
;
59 BlockDeviceIoStatus iostatus
;
61 bool allow_write_beyond_eof
;
63 NotifierList remove_bs_notifiers
, insert_bs_notifiers
;
66 typedef struct BlockBackendAIOCB
{
73 static const AIOCBInfo block_backend_aiocb_info
= {
74 .get_aio_context
= blk_aiocb_get_aio_context
,
75 .aiocb_size
= sizeof(BlockBackendAIOCB
),
78 static void drive_info_del(DriveInfo
*dinfo
);
79 static BlockBackend
*bdrv_first_blk(BlockDriverState
*bs
);
81 /* All BlockBackends */
82 static QTAILQ_HEAD(, BlockBackend
) block_backends
=
83 QTAILQ_HEAD_INITIALIZER(block_backends
);
85 /* All BlockBackends referenced by the monitor and which are iterated through by
87 static QTAILQ_HEAD(, BlockBackend
) monitor_block_backends
=
88 QTAILQ_HEAD_INITIALIZER(monitor_block_backends
);
90 static void blk_root_inherit_options(int *child_flags
, QDict
*child_options
,
91 int parent_flags
, QDict
*parent_options
)
93 /* We're not supposed to call this function for root nodes */
96 static void blk_root_drained_begin(BdrvChild
*child
);
97 static void blk_root_drained_end(BdrvChild
*child
);
99 static void blk_root_change_media(BdrvChild
*child
, bool load
);
100 static void blk_root_resize(BdrvChild
*child
);
102 static const char *blk_root_get_name(BdrvChild
*child
)
104 return blk_name(child
->opaque
);
107 static const BdrvChildRole child_root
= {
108 .inherit_options
= blk_root_inherit_options
,
110 .change_media
= blk_root_change_media
,
111 .resize
= blk_root_resize
,
112 .get_name
= blk_root_get_name
,
114 .drained_begin
= blk_root_drained_begin
,
115 .drained_end
= blk_root_drained_end
,
119 * Create a new BlockBackend with a reference count of one.
120 * Store an error through @errp on failure, unless it's null.
121 * Return the new BlockBackend on success, null on failure.
123 BlockBackend
*blk_new(void)
127 blk
= g_new0(BlockBackend
, 1);
129 blk_set_enable_write_cache(blk
, true);
131 qemu_co_queue_init(&blk
->public.throttled_reqs
[0]);
132 qemu_co_queue_init(&blk
->public.throttled_reqs
[1]);
134 notifier_list_init(&blk
->remove_bs_notifiers
);
135 notifier_list_init(&blk
->insert_bs_notifiers
);
137 QTAILQ_INSERT_TAIL(&block_backends
, blk
, link
);
142 * Creates a new BlockBackend, opens a new BlockDriverState, and connects both.
144 * Just as with bdrv_open(), after having called this function the reference to
145 * @options belongs to the block layer (even on failure).
147 * TODO: Remove @filename and @flags; it should be possible to specify a whole
148 * BDS tree just by specifying the @options QDict (or @reference,
149 * alternatively). At the time of adding this function, this is not possible,
150 * though, so callers of this function have to be able to specify @filename and
153 BlockBackend
*blk_new_open(const char *filename
, const char *reference
,
154 QDict
*options
, int flags
, Error
**errp
)
157 BlockDriverState
*bs
;
160 bs
= bdrv_open(filename
, reference
, options
, flags
, errp
);
166 blk
->root
= bdrv_root_attach_child(bs
, "root", &child_root
, blk
);
171 static void blk_delete(BlockBackend
*blk
)
173 assert(!blk
->refcnt
);
179 assert(QLIST_EMPTY(&blk
->remove_bs_notifiers
.notifiers
));
180 assert(QLIST_EMPTY(&blk
->insert_bs_notifiers
.notifiers
));
181 QTAILQ_REMOVE(&block_backends
, blk
, link
);
182 drive_info_del(blk
->legacy_dinfo
);
183 block_acct_cleanup(&blk
->stats
);
187 static void drive_info_del(DriveInfo
*dinfo
)
192 qemu_opts_del(dinfo
->opts
);
193 g_free(dinfo
->serial
);
197 int blk_get_refcnt(BlockBackend
*blk
)
199 return blk
? blk
->refcnt
: 0;
203 * Increment @blk's reference count.
204 * @blk must not be null.
206 void blk_ref(BlockBackend
*blk
)
212 * Decrement @blk's reference count.
213 * If this drops it to zero, destroy @blk.
214 * For convenience, do nothing if @blk is null.
216 void blk_unref(BlockBackend
*blk
)
219 assert(blk
->refcnt
> 0);
220 if (!--blk
->refcnt
) {
227 * Behaves similarly to blk_next() but iterates over all BlockBackends, even the
228 * ones which are hidden (i.e. are not referenced by the monitor).
230 static BlockBackend
*blk_all_next(BlockBackend
*blk
)
232 return blk
? QTAILQ_NEXT(blk
, link
)
233 : QTAILQ_FIRST(&block_backends
);
236 void blk_remove_all_bs(void)
238 BlockBackend
*blk
= NULL
;
240 while ((blk
= blk_all_next(blk
)) != NULL
) {
241 AioContext
*ctx
= blk_get_aio_context(blk
);
243 aio_context_acquire(ctx
);
247 aio_context_release(ctx
);
252 * Return the monitor-owned BlockBackend after @blk.
253 * If @blk is null, return the first one.
254 * Else, return @blk's next sibling, which may be null.
256 * To iterate over all BlockBackends, do
257 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
261 BlockBackend
*blk_next(BlockBackend
*blk
)
263 return blk
? QTAILQ_NEXT(blk
, monitor_link
)
264 : QTAILQ_FIRST(&monitor_block_backends
);
267 /* Iterates over all top-level BlockDriverStates, i.e. BDSs that are owned by
268 * the monitor or attached to a BlockBackend */
269 BlockDriverState
*bdrv_next(BdrvNextIterator
*it
)
271 BlockDriverState
*bs
;
273 /* First, return all root nodes of BlockBackends. In order to avoid
274 * returning a BDS twice when multiple BBs refer to it, we only return it
275 * if the BB is the first one in the parent list of the BDS. */
276 if (it
->phase
== BDRV_NEXT_BACKEND_ROOTS
) {
278 it
->blk
= blk_all_next(it
->blk
);
279 bs
= it
->blk
? blk_bs(it
->blk
) : NULL
;
280 } while (it
->blk
&& (bs
== NULL
|| bdrv_first_blk(bs
) != it
->blk
));
285 it
->phase
= BDRV_NEXT_MONITOR_OWNED
;
288 /* Then return the monitor-owned BDSes without a BB attached. Ignore all
289 * BDSes that are attached to a BlockBackend here; they have been handled
290 * by the above block already */
292 it
->bs
= bdrv_next_monitor_owned(it
->bs
);
294 } while (bs
&& bdrv_has_blk(bs
));
299 BlockDriverState
*bdrv_first(BdrvNextIterator
*it
)
301 *it
= (BdrvNextIterator
) {
302 .phase
= BDRV_NEXT_BACKEND_ROOTS
,
305 return bdrv_next(it
);
309 * Add a BlockBackend into the list of backends referenced by the monitor, with
310 * the given @name acting as the handle for the monitor.
311 * Strictly for use by blockdev.c.
313 * @name must not be null or empty.
315 * Returns true on success and false on failure. In the latter case, an Error
316 * object is returned through @errp.
318 bool monitor_add_blk(BlockBackend
*blk
, const char *name
, Error
**errp
)
321 assert(name
&& name
[0]);
323 if (!id_wellformed(name
)) {
324 error_setg(errp
, "Invalid device name");
327 if (blk_by_name(name
)) {
328 error_setg(errp
, "Device with id '%s' already exists", name
);
331 if (bdrv_find_node(name
)) {
333 "Device name '%s' conflicts with an existing node name",
338 blk
->name
= g_strdup(name
);
339 QTAILQ_INSERT_TAIL(&monitor_block_backends
, blk
, monitor_link
);
344 * Remove a BlockBackend from the list of backends referenced by the monitor.
345 * Strictly for use by blockdev.c.
347 void monitor_remove_blk(BlockBackend
*blk
)
353 QTAILQ_REMOVE(&monitor_block_backends
, blk
, monitor_link
);
359 * Return @blk's name, a non-null string.
360 * Returns an empty string iff @blk is not referenced by the monitor.
362 const char *blk_name(BlockBackend
*blk
)
364 return blk
->name
?: "";
368 * Return the BlockBackend with name @name if it exists, else null.
369 * @name must not be null.
371 BlockBackend
*blk_by_name(const char *name
)
373 BlockBackend
*blk
= NULL
;
376 while ((blk
= blk_next(blk
)) != NULL
) {
377 if (!strcmp(name
, blk
->name
)) {
385 * Return the BlockDriverState attached to @blk if any, else null.
387 BlockDriverState
*blk_bs(BlockBackend
*blk
)
389 return blk
->root
? blk
->root
->bs
: NULL
;
392 static BlockBackend
*bdrv_first_blk(BlockDriverState
*bs
)
395 QLIST_FOREACH(child
, &bs
->parents
, next_parent
) {
396 if (child
->role
== &child_root
) {
397 return child
->opaque
;
405 * Returns true if @bs has an associated BlockBackend.
407 bool bdrv_has_blk(BlockDriverState
*bs
)
409 return bdrv_first_blk(bs
) != NULL
;
413 * Return @blk's DriveInfo if any, else null.
415 DriveInfo
*blk_legacy_dinfo(BlockBackend
*blk
)
417 return blk
->legacy_dinfo
;
421 * Set @blk's DriveInfo to @dinfo, and return it.
422 * @blk must not have a DriveInfo set already.
423 * No other BlockBackend may have the same DriveInfo set.
425 DriveInfo
*blk_set_legacy_dinfo(BlockBackend
*blk
, DriveInfo
*dinfo
)
427 assert(!blk
->legacy_dinfo
);
428 return blk
->legacy_dinfo
= dinfo
;
432 * Return the BlockBackend with DriveInfo @dinfo.
435 BlockBackend
*blk_by_legacy_dinfo(DriveInfo
*dinfo
)
437 BlockBackend
*blk
= NULL
;
439 while ((blk
= blk_next(blk
)) != NULL
) {
440 if (blk
->legacy_dinfo
== dinfo
) {
448 * Returns a pointer to the publicly accessible fields of @blk.
450 BlockBackendPublic
*blk_get_public(BlockBackend
*blk
)
456 * Returns a BlockBackend given the associated @public fields.
458 BlockBackend
*blk_by_public(BlockBackendPublic
*public)
460 return container_of(public, BlockBackend
, public);
464 * Disassociates the currently associated BlockDriverState from @blk.
466 void blk_remove_bs(BlockBackend
*blk
)
468 notifier_list_notify(&blk
->remove_bs_notifiers
, blk
);
469 if (blk
->public.throttle_state
) {
470 throttle_timers_detach_aio_context(&blk
->public.throttle_timers
);
473 blk_update_root_state(blk
);
475 bdrv_root_unref_child(blk
->root
);
480 * Associates a new BlockDriverState with @blk.
482 void blk_insert_bs(BlockBackend
*blk
, BlockDriverState
*bs
)
485 blk
->root
= bdrv_root_attach_child(bs
, "root", &child_root
, blk
);
487 notifier_list_notify(&blk
->insert_bs_notifiers
, blk
);
488 if (blk
->public.throttle_state
) {
489 throttle_timers_attach_aio_context(
490 &blk
->public.throttle_timers
, bdrv_get_aio_context(bs
));
495 * Attach device model @dev to @blk.
496 * Return 0 on success, -EBUSY when a device model is attached already.
498 int blk_attach_dev(BlockBackend
*blk
, void *dev
)
499 /* TODO change to DeviceState *dev when all users are qdevified */
506 blk_iostatus_reset(blk
);
511 * Attach device model @dev to @blk.
512 * @blk must not have a device model attached already.
513 * TODO qdevified devices don't use this, remove when devices are qdevified
515 void blk_attach_dev_nofail(BlockBackend
*blk
, void *dev
)
517 if (blk_attach_dev(blk
, dev
) < 0) {
523 * Detach device model @dev from @blk.
524 * @dev must be currently attached to @blk.
526 void blk_detach_dev(BlockBackend
*blk
, void *dev
)
527 /* TODO change to DeviceState *dev when all users are qdevified */
529 assert(blk
->dev
== dev
);
532 blk
->dev_opaque
= NULL
;
533 blk
->guest_block_size
= 512;
538 * Return the device model attached to @blk if any, else null.
540 void *blk_get_attached_dev(BlockBackend
*blk
)
541 /* TODO change to return DeviceState * when all users are qdevified */
547 * Set @blk's device model callbacks to @ops.
548 * @opaque is the opaque argument to pass to the callbacks.
549 * This is for use by device models.
551 void blk_set_dev_ops(BlockBackend
*blk
, const BlockDevOps
*ops
,
555 blk
->dev_opaque
= opaque
;
559 * Notify @blk's attached device model of media change.
560 * If @load is true, notify of media load.
561 * Else, notify of media eject.
562 * Also send DEVICE_TRAY_MOVED events as appropriate.
564 void blk_dev_change_media_cb(BlockBackend
*blk
, bool load
)
566 if (blk
->dev_ops
&& blk
->dev_ops
->change_media_cb
) {
567 bool tray_was_open
, tray_is_open
;
569 tray_was_open
= blk_dev_is_tray_open(blk
);
570 blk
->dev_ops
->change_media_cb(blk
->dev_opaque
, load
);
571 tray_is_open
= blk_dev_is_tray_open(blk
);
573 if (tray_was_open
!= tray_is_open
) {
574 qapi_event_send_device_tray_moved(blk_name(blk
), tray_is_open
,
580 static void blk_root_change_media(BdrvChild
*child
, bool load
)
582 blk_dev_change_media_cb(child
->opaque
, load
);
586 * Does @blk's attached device model have removable media?
587 * %true if no device model is attached.
589 bool blk_dev_has_removable_media(BlockBackend
*blk
)
591 return !blk
->dev
|| (blk
->dev_ops
&& blk
->dev_ops
->change_media_cb
);
595 * Does @blk's attached device model have a tray?
597 bool blk_dev_has_tray(BlockBackend
*blk
)
599 return blk
->dev_ops
&& blk
->dev_ops
->is_tray_open
;
603 * Notify @blk's attached device model of a media eject request.
604 * If @force is true, the medium is about to be yanked out forcefully.
606 void blk_dev_eject_request(BlockBackend
*blk
, bool force
)
608 if (blk
->dev_ops
&& blk
->dev_ops
->eject_request_cb
) {
609 blk
->dev_ops
->eject_request_cb(blk
->dev_opaque
, force
);
614 * Does @blk's attached device model have a tray, and is it open?
616 bool blk_dev_is_tray_open(BlockBackend
*blk
)
618 if (blk_dev_has_tray(blk
)) {
619 return blk
->dev_ops
->is_tray_open(blk
->dev_opaque
);
625 * Does @blk's attached device model have the medium locked?
626 * %false if the device model has no such lock.
628 bool blk_dev_is_medium_locked(BlockBackend
*blk
)
630 if (blk
->dev_ops
&& blk
->dev_ops
->is_medium_locked
) {
631 return blk
->dev_ops
->is_medium_locked(blk
->dev_opaque
);
637 * Notify @blk's attached device model of a backend size change.
639 static void blk_root_resize(BdrvChild
*child
)
641 BlockBackend
*blk
= child
->opaque
;
643 if (blk
->dev_ops
&& blk
->dev_ops
->resize_cb
) {
644 blk
->dev_ops
->resize_cb(blk
->dev_opaque
);
648 void blk_iostatus_enable(BlockBackend
*blk
)
650 blk
->iostatus_enabled
= true;
651 blk
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
654 /* The I/O status is only enabled if the drive explicitly
655 * enables it _and_ the VM is configured to stop on errors */
656 bool blk_iostatus_is_enabled(const BlockBackend
*blk
)
658 return (blk
->iostatus_enabled
&&
659 (blk
->on_write_error
== BLOCKDEV_ON_ERROR_ENOSPC
||
660 blk
->on_write_error
== BLOCKDEV_ON_ERROR_STOP
||
661 blk
->on_read_error
== BLOCKDEV_ON_ERROR_STOP
));
664 BlockDeviceIoStatus
blk_iostatus(const BlockBackend
*blk
)
666 return blk
->iostatus
;
669 void blk_iostatus_disable(BlockBackend
*blk
)
671 blk
->iostatus_enabled
= false;
674 void blk_iostatus_reset(BlockBackend
*blk
)
676 if (blk_iostatus_is_enabled(blk
)) {
677 BlockDriverState
*bs
= blk_bs(blk
);
678 blk
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
680 block_job_iostatus_reset(bs
->job
);
685 void blk_iostatus_set_err(BlockBackend
*blk
, int error
)
687 assert(blk_iostatus_is_enabled(blk
));
688 if (blk
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
689 blk
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
690 BLOCK_DEVICE_IO_STATUS_FAILED
;
694 void blk_set_allow_write_beyond_eof(BlockBackend
*blk
, bool allow
)
696 blk
->allow_write_beyond_eof
= allow
;
699 static int blk_check_byte_request(BlockBackend
*blk
, int64_t offset
,
704 if (size
> INT_MAX
) {
708 if (!blk_is_available(blk
)) {
716 if (!blk
->allow_write_beyond_eof
) {
717 len
= blk_getlength(blk
);
722 if (offset
> len
|| len
- offset
< size
) {
730 static int blk_check_request(BlockBackend
*blk
, int64_t sector_num
,
733 if (sector_num
< 0 || sector_num
> INT64_MAX
/ BDRV_SECTOR_SIZE
) {
737 if (nb_sectors
< 0 || nb_sectors
> INT_MAX
/ BDRV_SECTOR_SIZE
) {
741 return blk_check_byte_request(blk
, sector_num
* BDRV_SECTOR_SIZE
,
742 nb_sectors
* BDRV_SECTOR_SIZE
);
745 int coroutine_fn
blk_co_preadv(BlockBackend
*blk
, int64_t offset
,
746 unsigned int bytes
, QEMUIOVector
*qiov
,
747 BdrvRequestFlags flags
)
751 trace_blk_co_preadv(blk
, blk_bs(blk
), offset
, bytes
, flags
);
753 ret
= blk_check_byte_request(blk
, offset
, bytes
);
758 /* throttling disk I/O */
759 if (blk
->public.throttle_state
) {
760 throttle_group_co_io_limits_intercept(blk
, bytes
, false);
763 return bdrv_co_preadv(blk_bs(blk
), offset
, bytes
, qiov
, flags
);
766 int coroutine_fn
blk_co_pwritev(BlockBackend
*blk
, int64_t offset
,
767 unsigned int bytes
, QEMUIOVector
*qiov
,
768 BdrvRequestFlags flags
)
772 trace_blk_co_pwritev(blk
, blk_bs(blk
), offset
, bytes
, flags
);
774 ret
= blk_check_byte_request(blk
, offset
, bytes
);
779 /* throttling disk I/O */
780 if (blk
->public.throttle_state
) {
781 throttle_group_co_io_limits_intercept(blk
, bytes
, true);
784 if (!blk
->enable_write_cache
) {
785 flags
|= BDRV_REQ_FUA
;
788 return bdrv_co_pwritev(blk_bs(blk
), offset
, bytes
, qiov
, flags
);
791 typedef struct BlkRwCo
{
796 BdrvRequestFlags flags
;
799 static void blk_read_entry(void *opaque
)
801 BlkRwCo
*rwco
= opaque
;
803 rwco
->ret
= blk_co_preadv(rwco
->blk
, rwco
->offset
, rwco
->qiov
->size
,
804 rwco
->qiov
, rwco
->flags
);
807 static void blk_write_entry(void *opaque
)
809 BlkRwCo
*rwco
= opaque
;
811 rwco
->ret
= blk_co_pwritev(rwco
->blk
, rwco
->offset
, rwco
->qiov
->size
,
812 rwco
->qiov
, rwco
->flags
);
815 static int blk_prw(BlockBackend
*blk
, int64_t offset
, uint8_t *buf
,
816 int64_t bytes
, CoroutineEntry co_entry
,
817 BdrvRequestFlags flags
)
819 AioContext
*aio_context
;
825 iov
= (struct iovec
) {
829 qemu_iovec_init_external(&qiov
, &iov
, 1);
839 co
= qemu_coroutine_create(co_entry
);
840 qemu_coroutine_enter(co
, &rwco
);
842 aio_context
= blk_get_aio_context(blk
);
843 while (rwco
.ret
== NOT_DONE
) {
844 aio_poll(aio_context
, true);
850 int blk_pread_unthrottled(BlockBackend
*blk
, int64_t offset
, uint8_t *buf
,
855 ret
= blk_check_byte_request(blk
, offset
, count
);
860 blk_root_drained_begin(blk
->root
);
861 ret
= blk_pread(blk
, offset
, buf
, count
);
862 blk_root_drained_end(blk
->root
);
866 int blk_pwrite_zeroes(BlockBackend
*blk
, int64_t offset
,
867 int count
, BdrvRequestFlags flags
)
869 return blk_prw(blk
, offset
, NULL
, count
, blk_write_entry
,
870 flags
| BDRV_REQ_ZERO_WRITE
);
873 static void error_callback_bh(void *opaque
)
875 struct BlockBackendAIOCB
*acb
= opaque
;
876 qemu_bh_delete(acb
->bh
);
877 acb
->common
.cb(acb
->common
.opaque
, acb
->ret
);
881 BlockAIOCB
*blk_abort_aio_request(BlockBackend
*blk
,
882 BlockCompletionFunc
*cb
,
883 void *opaque
, int ret
)
885 struct BlockBackendAIOCB
*acb
;
888 acb
= blk_aio_get(&block_backend_aiocb_info
, blk
, cb
, opaque
);
892 bh
= aio_bh_new(blk_get_aio_context(blk
), error_callback_bh
, acb
);
894 qemu_bh_schedule(bh
);
899 typedef struct BlkAioEmAIOCB
{
907 static const AIOCBInfo blk_aio_em_aiocb_info
= {
908 .aiocb_size
= sizeof(BlkAioEmAIOCB
),
911 static void blk_aio_complete(BlkAioEmAIOCB
*acb
)
914 assert(acb
->has_returned
);
915 qemu_bh_delete(acb
->bh
);
917 if (acb
->has_returned
) {
918 acb
->common
.cb(acb
->common
.opaque
, acb
->rwco
.ret
);
923 static void blk_aio_complete_bh(void *opaque
)
925 blk_aio_complete(opaque
);
928 static BlockAIOCB
*blk_aio_prwv(BlockBackend
*blk
, int64_t offset
, int bytes
,
929 QEMUIOVector
*qiov
, CoroutineEntry co_entry
,
930 BdrvRequestFlags flags
,
931 BlockCompletionFunc
*cb
, void *opaque
)
936 acb
= blk_aio_get(&blk_aio_em_aiocb_info
, blk
, cb
, opaque
);
937 acb
->rwco
= (BlkRwCo
) {
946 acb
->has_returned
= false;
948 co
= qemu_coroutine_create(co_entry
);
949 qemu_coroutine_enter(co
, acb
);
951 acb
->has_returned
= true;
952 if (acb
->rwco
.ret
!= NOT_DONE
) {
953 acb
->bh
= aio_bh_new(blk_get_aio_context(blk
), blk_aio_complete_bh
, acb
);
954 qemu_bh_schedule(acb
->bh
);
960 static void blk_aio_read_entry(void *opaque
)
962 BlkAioEmAIOCB
*acb
= opaque
;
963 BlkRwCo
*rwco
= &acb
->rwco
;
965 assert(rwco
->qiov
->size
== acb
->bytes
);
966 rwco
->ret
= blk_co_preadv(rwco
->blk
, rwco
->offset
, acb
->bytes
,
967 rwco
->qiov
, rwco
->flags
);
968 blk_aio_complete(acb
);
971 static void blk_aio_write_entry(void *opaque
)
973 BlkAioEmAIOCB
*acb
= opaque
;
974 BlkRwCo
*rwco
= &acb
->rwco
;
976 assert(!rwco
->qiov
|| rwco
->qiov
->size
== acb
->bytes
);
977 rwco
->ret
= blk_co_pwritev(rwco
->blk
, rwco
->offset
, acb
->bytes
,
978 rwco
->qiov
, rwco
->flags
);
979 blk_aio_complete(acb
);
982 BlockAIOCB
*blk_aio_pwrite_zeroes(BlockBackend
*blk
, int64_t offset
,
983 int count
, BdrvRequestFlags flags
,
984 BlockCompletionFunc
*cb
, void *opaque
)
986 return blk_aio_prwv(blk
, offset
, count
, NULL
, blk_aio_write_entry
,
987 flags
| BDRV_REQ_ZERO_WRITE
, cb
, opaque
);
990 int blk_pread(BlockBackend
*blk
, int64_t offset
, void *buf
, int count
)
992 int ret
= blk_prw(blk
, offset
, buf
, count
, blk_read_entry
, 0);
999 int blk_pwrite(BlockBackend
*blk
, int64_t offset
, const void *buf
, int count
,
1000 BdrvRequestFlags flags
)
1002 int ret
= blk_prw(blk
, offset
, (void *) buf
, count
, blk_write_entry
,
1010 int64_t blk_getlength(BlockBackend
*blk
)
1012 if (!blk_is_available(blk
)) {
1016 return bdrv_getlength(blk_bs(blk
));
1019 void blk_get_geometry(BlockBackend
*blk
, uint64_t *nb_sectors_ptr
)
1022 *nb_sectors_ptr
= 0;
1024 bdrv_get_geometry(blk_bs(blk
), nb_sectors_ptr
);
1028 int64_t blk_nb_sectors(BlockBackend
*blk
)
1030 if (!blk_is_available(blk
)) {
1034 return bdrv_nb_sectors(blk_bs(blk
));
1037 BlockAIOCB
*blk_aio_preadv(BlockBackend
*blk
, int64_t offset
,
1038 QEMUIOVector
*qiov
, BdrvRequestFlags flags
,
1039 BlockCompletionFunc
*cb
, void *opaque
)
1041 return blk_aio_prwv(blk
, offset
, qiov
->size
, qiov
,
1042 blk_aio_read_entry
, flags
, cb
, opaque
);
1045 BlockAIOCB
*blk_aio_pwritev(BlockBackend
*blk
, int64_t offset
,
1046 QEMUIOVector
*qiov
, BdrvRequestFlags flags
,
1047 BlockCompletionFunc
*cb
, void *opaque
)
1049 return blk_aio_prwv(blk
, offset
, qiov
->size
, qiov
,
1050 blk_aio_write_entry
, flags
, cb
, opaque
);
1053 BlockAIOCB
*blk_aio_flush(BlockBackend
*blk
,
1054 BlockCompletionFunc
*cb
, void *opaque
)
1056 if (!blk_is_available(blk
)) {
1057 return blk_abort_aio_request(blk
, cb
, opaque
, -ENOMEDIUM
);
1060 return bdrv_aio_flush(blk_bs(blk
), cb
, opaque
);
1063 BlockAIOCB
*blk_aio_discard(BlockBackend
*blk
,
1064 int64_t sector_num
, int nb_sectors
,
1065 BlockCompletionFunc
*cb
, void *opaque
)
1067 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
1069 return blk_abort_aio_request(blk
, cb
, opaque
, ret
);
1072 return bdrv_aio_discard(blk_bs(blk
), sector_num
, nb_sectors
, cb
, opaque
);
1075 void blk_aio_cancel(BlockAIOCB
*acb
)
1077 bdrv_aio_cancel(acb
);
1080 void blk_aio_cancel_async(BlockAIOCB
*acb
)
1082 bdrv_aio_cancel_async(acb
);
1085 int blk_ioctl(BlockBackend
*blk
, unsigned long int req
, void *buf
)
1087 if (!blk_is_available(blk
)) {
1091 return bdrv_ioctl(blk_bs(blk
), req
, buf
);
1094 BlockAIOCB
*blk_aio_ioctl(BlockBackend
*blk
, unsigned long int req
, void *buf
,
1095 BlockCompletionFunc
*cb
, void *opaque
)
1097 if (!blk_is_available(blk
)) {
1098 return blk_abort_aio_request(blk
, cb
, opaque
, -ENOMEDIUM
);
1101 return bdrv_aio_ioctl(blk_bs(blk
), req
, buf
, cb
, opaque
);
1104 int blk_co_discard(BlockBackend
*blk
, int64_t sector_num
, int nb_sectors
)
1106 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
1111 return bdrv_co_discard(blk_bs(blk
), sector_num
, nb_sectors
);
1114 int blk_co_flush(BlockBackend
*blk
)
1116 if (!blk_is_available(blk
)) {
1120 return bdrv_co_flush(blk_bs(blk
));
1123 int blk_flush(BlockBackend
*blk
)
1125 if (!blk_is_available(blk
)) {
1129 return bdrv_flush(blk_bs(blk
));
1132 void blk_drain(BlockBackend
*blk
)
1135 bdrv_drain(blk_bs(blk
));
1139 void blk_drain_all(void)
1144 void blk_set_on_error(BlockBackend
*blk
, BlockdevOnError on_read_error
,
1145 BlockdevOnError on_write_error
)
1147 blk
->on_read_error
= on_read_error
;
1148 blk
->on_write_error
= on_write_error
;
1151 BlockdevOnError
blk_get_on_error(BlockBackend
*blk
, bool is_read
)
1153 return is_read
? blk
->on_read_error
: blk
->on_write_error
;
1156 BlockErrorAction
blk_get_error_action(BlockBackend
*blk
, bool is_read
,
1159 BlockdevOnError on_err
= blk_get_on_error(blk
, is_read
);
1162 case BLOCKDEV_ON_ERROR_ENOSPC
:
1163 return (error
== ENOSPC
) ?
1164 BLOCK_ERROR_ACTION_STOP
: BLOCK_ERROR_ACTION_REPORT
;
1165 case BLOCKDEV_ON_ERROR_STOP
:
1166 return BLOCK_ERROR_ACTION_STOP
;
1167 case BLOCKDEV_ON_ERROR_REPORT
:
1168 return BLOCK_ERROR_ACTION_REPORT
;
1169 case BLOCKDEV_ON_ERROR_IGNORE
:
1170 return BLOCK_ERROR_ACTION_IGNORE
;
1176 static void send_qmp_error_event(BlockBackend
*blk
,
1177 BlockErrorAction action
,
1178 bool is_read
, int error
)
1180 IoOperationType optype
;
1182 optype
= is_read
? IO_OPERATION_TYPE_READ
: IO_OPERATION_TYPE_WRITE
;
1183 qapi_event_send_block_io_error(blk_name(blk
), optype
, action
,
1184 blk_iostatus_is_enabled(blk
),
1185 error
== ENOSPC
, strerror(error
),
1189 /* This is done by device models because, while the block layer knows
1190 * about the error, it does not know whether an operation comes from
1191 * the device or the block layer (from a job, for example).
1193 void blk_error_action(BlockBackend
*blk
, BlockErrorAction action
,
1194 bool is_read
, int error
)
1198 if (action
== BLOCK_ERROR_ACTION_STOP
) {
1199 /* First set the iostatus, so that "info block" returns an iostatus
1200 * that matches the events raised so far (an additional error iostatus
1201 * is fine, but not a lost one).
1203 blk_iostatus_set_err(blk
, error
);
1205 /* Then raise the request to stop the VM and the event.
1206 * qemu_system_vmstop_request_prepare has two effects. First,
1207 * it ensures that the STOP event always comes after the
1208 * BLOCK_IO_ERROR event. Second, it ensures that even if management
1209 * can observe the STOP event and do a "cont" before the STOP
1210 * event is issued, the VM will not stop. In this case, vm_start()
1211 * also ensures that the STOP/RESUME pair of events is emitted.
1213 qemu_system_vmstop_request_prepare();
1214 send_qmp_error_event(blk
, action
, is_read
, error
);
1215 qemu_system_vmstop_request(RUN_STATE_IO_ERROR
);
1217 send_qmp_error_event(blk
, action
, is_read
, error
);
1221 int blk_is_read_only(BlockBackend
*blk
)
1223 BlockDriverState
*bs
= blk_bs(blk
);
1226 return bdrv_is_read_only(bs
);
1228 return blk
->root_state
.read_only
;
1232 int blk_is_sg(BlockBackend
*blk
)
1234 BlockDriverState
*bs
= blk_bs(blk
);
1240 return bdrv_is_sg(bs
);
1243 int blk_enable_write_cache(BlockBackend
*blk
)
1245 return blk
->enable_write_cache
;
1248 void blk_set_enable_write_cache(BlockBackend
*blk
, bool wce
)
1250 blk
->enable_write_cache
= wce
;
1253 void blk_invalidate_cache(BlockBackend
*blk
, Error
**errp
)
1255 BlockDriverState
*bs
= blk_bs(blk
);
1258 error_setg(errp
, "Device '%s' has no medium", blk
->name
);
1262 bdrv_invalidate_cache(bs
, errp
);
1265 bool blk_is_inserted(BlockBackend
*blk
)
1267 BlockDriverState
*bs
= blk_bs(blk
);
1269 return bs
&& bdrv_is_inserted(bs
);
1272 bool blk_is_available(BlockBackend
*blk
)
1274 return blk_is_inserted(blk
) && !blk_dev_is_tray_open(blk
);
1277 void blk_lock_medium(BlockBackend
*blk
, bool locked
)
1279 BlockDriverState
*bs
= blk_bs(blk
);
1282 bdrv_lock_medium(bs
, locked
);
1286 void blk_eject(BlockBackend
*blk
, bool eject_flag
)
1288 BlockDriverState
*bs
= blk_bs(blk
);
1291 bdrv_eject(bs
, eject_flag
);
1295 int blk_get_flags(BlockBackend
*blk
)
1297 BlockDriverState
*bs
= blk_bs(blk
);
1300 return bdrv_get_flags(bs
);
1302 return blk
->root_state
.open_flags
;
1306 int blk_get_max_transfer_length(BlockBackend
*blk
)
1308 BlockDriverState
*bs
= blk_bs(blk
);
1311 return bs
->bl
.max_transfer_length
;
1317 int blk_get_max_iov(BlockBackend
*blk
)
1319 return blk
->root
->bs
->bl
.max_iov
;
1322 void blk_set_guest_block_size(BlockBackend
*blk
, int align
)
1324 blk
->guest_block_size
= align
;
1327 void *blk_try_blockalign(BlockBackend
*blk
, size_t size
)
1329 return qemu_try_blockalign(blk
? blk_bs(blk
) : NULL
, size
);
1332 void *blk_blockalign(BlockBackend
*blk
, size_t size
)
1334 return qemu_blockalign(blk
? blk_bs(blk
) : NULL
, size
);
1337 bool blk_op_is_blocked(BlockBackend
*blk
, BlockOpType op
, Error
**errp
)
1339 BlockDriverState
*bs
= blk_bs(blk
);
1345 return bdrv_op_is_blocked(bs
, op
, errp
);
1348 void blk_op_unblock(BlockBackend
*blk
, BlockOpType op
, Error
*reason
)
1350 BlockDriverState
*bs
= blk_bs(blk
);
1353 bdrv_op_unblock(bs
, op
, reason
);
1357 void blk_op_block_all(BlockBackend
*blk
, Error
*reason
)
1359 BlockDriverState
*bs
= blk_bs(blk
);
1362 bdrv_op_block_all(bs
, reason
);
1366 void blk_op_unblock_all(BlockBackend
*blk
, Error
*reason
)
1368 BlockDriverState
*bs
= blk_bs(blk
);
1371 bdrv_op_unblock_all(bs
, reason
);
1375 AioContext
*blk_get_aio_context(BlockBackend
*blk
)
1377 BlockDriverState
*bs
= blk_bs(blk
);
1380 return bdrv_get_aio_context(bs
);
1382 return qemu_get_aio_context();
1386 static AioContext
*blk_aiocb_get_aio_context(BlockAIOCB
*acb
)
1388 BlockBackendAIOCB
*blk_acb
= DO_UPCAST(BlockBackendAIOCB
, common
, acb
);
1389 return blk_get_aio_context(blk_acb
->blk
);
1392 void blk_set_aio_context(BlockBackend
*blk
, AioContext
*new_context
)
1394 BlockDriverState
*bs
= blk_bs(blk
);
1397 if (blk
->public.throttle_state
) {
1398 throttle_timers_detach_aio_context(&blk
->public.throttle_timers
);
1400 bdrv_set_aio_context(bs
, new_context
);
1401 if (blk
->public.throttle_state
) {
1402 throttle_timers_attach_aio_context(&blk
->public.throttle_timers
,
1408 void blk_add_aio_context_notifier(BlockBackend
*blk
,
1409 void (*attached_aio_context
)(AioContext
*new_context
, void *opaque
),
1410 void (*detach_aio_context
)(void *opaque
), void *opaque
)
1412 BlockDriverState
*bs
= blk_bs(blk
);
1415 bdrv_add_aio_context_notifier(bs
, attached_aio_context
,
1416 detach_aio_context
, opaque
);
1420 void blk_remove_aio_context_notifier(BlockBackend
*blk
,
1421 void (*attached_aio_context
)(AioContext
*,
1423 void (*detach_aio_context
)(void *),
1426 BlockDriverState
*bs
= blk_bs(blk
);
1429 bdrv_remove_aio_context_notifier(bs
, attached_aio_context
,
1430 detach_aio_context
, opaque
);
1434 void blk_add_remove_bs_notifier(BlockBackend
*blk
, Notifier
*notify
)
1436 notifier_list_add(&blk
->remove_bs_notifiers
, notify
);
1439 void blk_add_insert_bs_notifier(BlockBackend
*blk
, Notifier
*notify
)
1441 notifier_list_add(&blk
->insert_bs_notifiers
, notify
);
1444 void blk_io_plug(BlockBackend
*blk
)
1446 BlockDriverState
*bs
= blk_bs(blk
);
1453 void blk_io_unplug(BlockBackend
*blk
)
1455 BlockDriverState
*bs
= blk_bs(blk
);
1462 BlockAcctStats
*blk_get_stats(BlockBackend
*blk
)
1467 void *blk_aio_get(const AIOCBInfo
*aiocb_info
, BlockBackend
*blk
,
1468 BlockCompletionFunc
*cb
, void *opaque
)
1470 return qemu_aio_get(aiocb_info
, blk_bs(blk
), cb
, opaque
);
1473 int coroutine_fn
blk_co_pwrite_zeroes(BlockBackend
*blk
, int64_t offset
,
1474 int count
, BdrvRequestFlags flags
)
1476 return blk_co_pwritev(blk
, offset
, count
, NULL
,
1477 flags
| BDRV_REQ_ZERO_WRITE
);
1480 int blk_write_compressed(BlockBackend
*blk
, int64_t sector_num
,
1481 const uint8_t *buf
, int nb_sectors
)
1483 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
1488 return bdrv_write_compressed(blk_bs(blk
), sector_num
, buf
, nb_sectors
);
1491 int blk_truncate(BlockBackend
*blk
, int64_t offset
)
1493 if (!blk_is_available(blk
)) {
1497 return bdrv_truncate(blk_bs(blk
), offset
);
1500 int blk_discard(BlockBackend
*blk
, int64_t sector_num
, int nb_sectors
)
1502 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
1507 return bdrv_discard(blk_bs(blk
), sector_num
, nb_sectors
);
1510 int blk_save_vmstate(BlockBackend
*blk
, const uint8_t *buf
,
1511 int64_t pos
, int size
)
1515 if (!blk_is_available(blk
)) {
1519 ret
= bdrv_save_vmstate(blk_bs(blk
), buf
, pos
, size
);
1524 if (ret
== size
&& !blk
->enable_write_cache
) {
1525 ret
= bdrv_flush(blk_bs(blk
));
1528 return ret
< 0 ? ret
: size
;
1531 int blk_load_vmstate(BlockBackend
*blk
, uint8_t *buf
, int64_t pos
, int size
)
1533 if (!blk_is_available(blk
)) {
1537 return bdrv_load_vmstate(blk_bs(blk
), buf
, pos
, size
);
1540 int blk_probe_blocksizes(BlockBackend
*blk
, BlockSizes
*bsz
)
1542 if (!blk_is_available(blk
)) {
1546 return bdrv_probe_blocksizes(blk_bs(blk
), bsz
);
1549 int blk_probe_geometry(BlockBackend
*blk
, HDGeometry
*geo
)
1551 if (!blk_is_available(blk
)) {
1555 return bdrv_probe_geometry(blk_bs(blk
), geo
);
1559 * Updates the BlockBackendRootState object with data from the currently
1560 * attached BlockDriverState.
1562 void blk_update_root_state(BlockBackend
*blk
)
1566 blk
->root_state
.open_flags
= blk
->root
->bs
->open_flags
;
1567 blk
->root_state
.read_only
= blk
->root
->bs
->read_only
;
1568 blk
->root_state
.detect_zeroes
= blk
->root
->bs
->detect_zeroes
;
1572 * Applies the information in the root state to the given BlockDriverState. This
1573 * does not include the flags which have to be specified for bdrv_open(), use
1574 * blk_get_open_flags_from_root_state() to inquire them.
1576 void blk_apply_root_state(BlockBackend
*blk
, BlockDriverState
*bs
)
1578 bs
->detect_zeroes
= blk
->root_state
.detect_zeroes
;
1582 * Returns the flags to be used for bdrv_open() of a BlockDriverState which is
1583 * supposed to inherit the root state.
1585 int blk_get_open_flags_from_root_state(BlockBackend
*blk
)
1589 bs_flags
= blk
->root_state
.read_only
? 0 : BDRV_O_RDWR
;
1590 bs_flags
|= blk
->root_state
.open_flags
& ~BDRV_O_RDWR
;
1595 BlockBackendRootState
*blk_get_root_state(BlockBackend
*blk
)
1597 return &blk
->root_state
;
1600 int blk_commit_all(void)
1602 BlockBackend
*blk
= NULL
;
1604 while ((blk
= blk_all_next(blk
)) != NULL
) {
1605 AioContext
*aio_context
= blk_get_aio_context(blk
);
1607 aio_context_acquire(aio_context
);
1608 if (blk_is_inserted(blk
) && blk
->root
->bs
->backing
) {
1609 int ret
= bdrv_commit(blk
->root
->bs
);
1611 aio_context_release(aio_context
);
1615 aio_context_release(aio_context
);
1620 int blk_flush_all(void)
1622 BlockBackend
*blk
= NULL
;
1625 while ((blk
= blk_all_next(blk
)) != NULL
) {
1626 AioContext
*aio_context
= blk_get_aio_context(blk
);
1629 aio_context_acquire(aio_context
);
1630 if (blk_is_inserted(blk
)) {
1631 ret
= blk_flush(blk
);
1632 if (ret
< 0 && !result
) {
1636 aio_context_release(aio_context
);
1643 /* throttling disk I/O limits */
1644 void blk_set_io_limits(BlockBackend
*blk
, ThrottleConfig
*cfg
)
1646 throttle_group_config(blk
, cfg
);
1649 void blk_io_limits_disable(BlockBackend
*blk
)
1651 assert(blk
->public.throttle_state
);
1652 bdrv_drained_begin(blk_bs(blk
));
1653 throttle_group_unregister_blk(blk
);
1654 bdrv_drained_end(blk_bs(blk
));
1657 /* should be called before blk_set_io_limits if a limit is set */
1658 void blk_io_limits_enable(BlockBackend
*blk
, const char *group
)
1660 assert(!blk
->public.throttle_state
);
1661 throttle_group_register_blk(blk
, group
);
1664 void blk_io_limits_update_group(BlockBackend
*blk
, const char *group
)
1666 /* this BB is not part of any group */
1667 if (!blk
->public.throttle_state
) {
1671 /* this BB is a part of the same group than the one we want */
1672 if (!g_strcmp0(throttle_group_get_name(blk
), group
)) {
1676 /* need to change the group this bs belong to */
1677 blk_io_limits_disable(blk
);
1678 blk_io_limits_enable(blk
, group
);
1681 static void blk_root_drained_begin(BdrvChild
*child
)
1683 BlockBackend
*blk
= child
->opaque
;
1685 /* Note that blk->root may not be accessible here yet if we are just
1686 * attaching to a BlockDriverState that is drained. Use child instead. */
1688 if (blk
->public.io_limits_disabled
++ == 0) {
1689 throttle_group_restart_blk(blk
);
1693 static void blk_root_drained_end(BdrvChild
*child
)
1695 BlockBackend
*blk
= child
->opaque
;
1697 assert(blk
->public.io_limits_disabled
);
1698 --blk
->public.io_limits_disabled
;