4 * Copyright (C) 2014 Red Hat, Inc.
7 * Markus Armbruster <armbru@redhat.com>,
9 * This work is licensed under the terms of the GNU LGPL, version 2.1
10 * or later. See the COPYING.LIB file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "sysemu/block-backend.h"
15 #include "block/block_int.h"
16 #include "block/blockjob.h"
17 #include "block/throttle-groups.h"
18 #include "sysemu/blockdev.h"
19 #include "sysemu/sysemu.h"
20 #include "qapi-event.h"
22 /* Number of coroutines to reserve per attached device model */
23 #define COROUTINE_POOL_RESERVATION 64
25 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
27 static AioContext
*blk_aiocb_get_aio_context(BlockAIOCB
*acb
);
33 DriveInfo
*legacy_dinfo
; /* null unless created by drive_new() */
34 QTAILQ_ENTRY(BlockBackend
) link
; /* for block_backends */
35 QTAILQ_ENTRY(BlockBackend
) monitor_link
; /* for monitor_block_backends */
37 void *dev
; /* attached device model, if any */
38 /* TODO change to DeviceState when all users are qdevified */
39 const BlockDevOps
*dev_ops
;
42 /* the block size for which the guest device expects atomicity */
45 /* If the BDS tree is removed, some of its options are stored here (which
46 * can be used to restore those options in the new BDS on insert) */
47 BlockBackendRootState root_state
;
49 /* I/O stats (display with "info blockstats"). */
52 BlockdevOnError on_read_error
, on_write_error
;
53 bool iostatus_enabled
;
54 BlockDeviceIoStatus iostatus
;
56 bool allow_write_beyond_eof
;
58 NotifierList remove_bs_notifiers
, insert_bs_notifiers
;
61 typedef struct BlockBackendAIOCB
{
68 static const AIOCBInfo block_backend_aiocb_info
= {
69 .get_aio_context
= blk_aiocb_get_aio_context
,
70 .aiocb_size
= sizeof(BlockBackendAIOCB
),
73 static void drive_info_del(DriveInfo
*dinfo
);
75 /* All BlockBackends */
76 static QTAILQ_HEAD(, BlockBackend
) block_backends
=
77 QTAILQ_HEAD_INITIALIZER(block_backends
);
79 /* All BlockBackends referenced by the monitor and which are iterated through by
81 static QTAILQ_HEAD(, BlockBackend
) monitor_block_backends
=
82 QTAILQ_HEAD_INITIALIZER(monitor_block_backends
);
84 static void blk_root_inherit_options(int *child_flags
, QDict
*child_options
,
85 int parent_flags
, QDict
*parent_options
)
87 /* We're not supposed to call this function for root nodes */
91 static const BdrvChildRole child_root
= {
92 .inherit_options
= blk_root_inherit_options
,
96 * Create a new BlockBackend with a reference count of one.
97 * Store an error through @errp on failure, unless it's null.
98 * Return the new BlockBackend on success, null on failure.
100 BlockBackend
*blk_new(Error
**errp
)
104 blk
= g_new0(BlockBackend
, 1);
106 notifier_list_init(&blk
->remove_bs_notifiers
);
107 notifier_list_init(&blk
->insert_bs_notifiers
);
108 QTAILQ_INSERT_TAIL(&block_backends
, blk
, link
);
113 * Create a new BlockBackend with a new BlockDriverState attached.
114 * Otherwise just like blk_new(), which see.
116 BlockBackend
*blk_new_with_bs(Error
**errp
)
119 BlockDriverState
*bs
;
126 bs
= bdrv_new_root();
127 blk
->root
= bdrv_root_attach_child(bs
, "root", &child_root
);
133 * Calls blk_new_with_bs() and then calls bdrv_open() on the BlockDriverState.
135 * Just as with bdrv_open(), after having called this function the reference to
136 * @options belongs to the block layer (even on failure).
138 * TODO: Remove @filename and @flags; it should be possible to specify a whole
139 * BDS tree just by specifying the @options QDict (or @reference,
140 * alternatively). At the time of adding this function, this is not possible,
141 * though, so callers of this function have to be able to specify @filename and
144 BlockBackend
*blk_new_open(const char *filename
, const char *reference
,
145 QDict
*options
, int flags
, Error
**errp
)
150 blk
= blk_new_with_bs(errp
);
156 ret
= bdrv_open(&blk
->root
->bs
, filename
, reference
, options
, flags
, errp
);
165 static void blk_delete(BlockBackend
*blk
)
167 assert(!blk
->refcnt
);
173 assert(QLIST_EMPTY(&blk
->remove_bs_notifiers
.notifiers
));
174 assert(QLIST_EMPTY(&blk
->insert_bs_notifiers
.notifiers
));
175 if (blk
->root_state
.throttle_state
) {
176 g_free(blk
->root_state
.throttle_group
);
177 throttle_group_unref(blk
->root_state
.throttle_state
);
179 QTAILQ_REMOVE(&block_backends
, blk
, link
);
180 drive_info_del(blk
->legacy_dinfo
);
181 block_acct_cleanup(&blk
->stats
);
185 static void drive_info_del(DriveInfo
*dinfo
)
190 qemu_opts_del(dinfo
->opts
);
191 g_free(dinfo
->serial
);
195 int blk_get_refcnt(BlockBackend
*blk
)
197 return blk
? blk
->refcnt
: 0;
201 * Increment @blk's reference count.
202 * @blk must not be null.
204 void blk_ref(BlockBackend
*blk
)
210 * Decrement @blk's reference count.
211 * If this drops it to zero, destroy @blk.
212 * For convenience, do nothing if @blk is null.
214 void blk_unref(BlockBackend
*blk
)
217 assert(blk
->refcnt
> 0);
218 if (!--blk
->refcnt
) {
225 * Behaves similarly to blk_next() but iterates over all BlockBackends, even the
226 * ones which are hidden (i.e. are not referenced by the monitor).
228 static BlockBackend
*blk_all_next(BlockBackend
*blk
)
230 return blk
? QTAILQ_NEXT(blk
, link
)
231 : QTAILQ_FIRST(&block_backends
);
234 void blk_remove_all_bs(void)
236 BlockBackend
*blk
= NULL
;
238 while ((blk
= blk_all_next(blk
)) != NULL
) {
239 AioContext
*ctx
= blk_get_aio_context(blk
);
241 aio_context_acquire(ctx
);
245 aio_context_release(ctx
);
250 * Return the monitor-owned BlockBackend after @blk.
251 * If @blk is null, return the first one.
252 * Else, return @blk's next sibling, which may be null.
254 * To iterate over all BlockBackends, do
255 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
259 BlockBackend
*blk_next(BlockBackend
*blk
)
261 return blk
? QTAILQ_NEXT(blk
, monitor_link
)
262 : QTAILQ_FIRST(&monitor_block_backends
);
266 * Iterates over all BlockDriverStates which are attached to a BlockBackend.
267 * This function is for use by bdrv_next().
269 * @bs must be NULL or a BDS that is attached to a BB.
271 BlockDriverState
*blk_next_root_bs(BlockDriverState
*bs
)
283 blk
= blk_all_next(blk
);
284 } while (blk
&& !blk
->root
);
286 return blk
? blk
->root
->bs
: NULL
;
290 * Add a BlockBackend into the list of backends referenced by the monitor, with
291 * the given @name acting as the handle for the monitor.
292 * Strictly for use by blockdev.c.
294 * @name must not be null or empty.
296 * Returns true on success and false on failure. In the latter case, an Error
297 * object is returned through @errp.
299 bool monitor_add_blk(BlockBackend
*blk
, const char *name
, Error
**errp
)
302 assert(name
&& name
[0]);
304 if (!id_wellformed(name
)) {
305 error_setg(errp
, "Invalid device name");
308 if (blk_by_name(name
)) {
309 error_setg(errp
, "Device with id '%s' already exists", name
);
312 if (bdrv_find_node(name
)) {
314 "Device name '%s' conflicts with an existing node name",
319 blk
->name
= g_strdup(name
);
320 QTAILQ_INSERT_TAIL(&monitor_block_backends
, blk
, monitor_link
);
325 * Remove a BlockBackend from the list of backends referenced by the monitor.
326 * Strictly for use by blockdev.c.
328 void monitor_remove_blk(BlockBackend
*blk
)
334 QTAILQ_REMOVE(&monitor_block_backends
, blk
, monitor_link
);
340 * Return @blk's name, a non-null string.
341 * Returns an empty string iff @blk is not referenced by the monitor.
343 const char *blk_name(BlockBackend
*blk
)
345 return blk
->name
?: "";
349 * Return the BlockBackend with name @name if it exists, else null.
350 * @name must not be null.
352 BlockBackend
*blk_by_name(const char *name
)
354 BlockBackend
*blk
= NULL
;
357 while ((blk
= blk_next(blk
)) != NULL
) {
358 if (!strcmp(name
, blk
->name
)) {
366 * Return the BlockDriverState attached to @blk if any, else null.
368 BlockDriverState
*blk_bs(BlockBackend
*blk
)
370 return blk
->root
? blk
->root
->bs
: NULL
;
374 * Changes the BlockDriverState attached to @blk
376 void blk_set_bs(BlockBackend
*blk
, BlockDriverState
*bs
)
381 blk
->root
->bs
->blk
= NULL
;
382 bdrv_root_unref_child(blk
->root
);
384 assert(bs
->blk
== NULL
);
386 blk
->root
= bdrv_root_attach_child(bs
, "root", &child_root
);
391 * Return @blk's DriveInfo if any, else null.
393 DriveInfo
*blk_legacy_dinfo(BlockBackend
*blk
)
395 return blk
->legacy_dinfo
;
399 * Set @blk's DriveInfo to @dinfo, and return it.
400 * @blk must not have a DriveInfo set already.
401 * No other BlockBackend may have the same DriveInfo set.
403 DriveInfo
*blk_set_legacy_dinfo(BlockBackend
*blk
, DriveInfo
*dinfo
)
405 assert(!blk
->legacy_dinfo
);
406 return blk
->legacy_dinfo
= dinfo
;
410 * Return the BlockBackend with DriveInfo @dinfo.
413 BlockBackend
*blk_by_legacy_dinfo(DriveInfo
*dinfo
)
415 BlockBackend
*blk
= NULL
;
417 while ((blk
= blk_next(blk
)) != NULL
) {
418 if (blk
->legacy_dinfo
== dinfo
) {
426 * Disassociates the currently associated BlockDriverState from @blk.
428 void blk_remove_bs(BlockBackend
*blk
)
430 assert(blk
->root
->bs
->blk
== blk
);
432 notifier_list_notify(&blk
->remove_bs_notifiers
, blk
);
434 blk_update_root_state(blk
);
436 blk
->root
->bs
->blk
= NULL
;
437 bdrv_root_unref_child(blk
->root
);
442 * Associates a new BlockDriverState with @blk.
444 void blk_insert_bs(BlockBackend
*blk
, BlockDriverState
*bs
)
446 assert(!blk
->root
&& !bs
->blk
);
448 blk
->root
= bdrv_root_attach_child(bs
, "root", &child_root
);
451 notifier_list_notify(&blk
->insert_bs_notifiers
, blk
);
455 * Attach device model @dev to @blk.
456 * Return 0 on success, -EBUSY when a device model is attached already.
458 int blk_attach_dev(BlockBackend
*blk
, void *dev
)
459 /* TODO change to DeviceState *dev when all users are qdevified */
466 blk_iostatus_reset(blk
);
471 * Attach device model @dev to @blk.
472 * @blk must not have a device model attached already.
473 * TODO qdevified devices don't use this, remove when devices are qdevified
475 void blk_attach_dev_nofail(BlockBackend
*blk
, void *dev
)
477 if (blk_attach_dev(blk
, dev
) < 0) {
483 * Detach device model @dev from @blk.
484 * @dev must be currently attached to @blk.
486 void blk_detach_dev(BlockBackend
*blk
, void *dev
)
487 /* TODO change to DeviceState *dev when all users are qdevified */
489 assert(blk
->dev
== dev
);
492 blk
->dev_opaque
= NULL
;
493 blk
->guest_block_size
= 512;
498 * Return the device model attached to @blk if any, else null.
500 void *blk_get_attached_dev(BlockBackend
*blk
)
501 /* TODO change to return DeviceState * when all users are qdevified */
507 * Set @blk's device model callbacks to @ops.
508 * @opaque is the opaque argument to pass to the callbacks.
509 * This is for use by device models.
511 void blk_set_dev_ops(BlockBackend
*blk
, const BlockDevOps
*ops
,
515 blk
->dev_opaque
= opaque
;
519 * Notify @blk's attached device model of media change.
520 * If @load is true, notify of media load.
521 * Else, notify of media eject.
522 * Also send DEVICE_TRAY_MOVED events as appropriate.
524 void blk_dev_change_media_cb(BlockBackend
*blk
, bool load
)
526 if (blk
->dev_ops
&& blk
->dev_ops
->change_media_cb
) {
527 bool tray_was_open
, tray_is_open
;
529 tray_was_open
= blk_dev_is_tray_open(blk
);
530 blk
->dev_ops
->change_media_cb(blk
->dev_opaque
, load
);
531 tray_is_open
= blk_dev_is_tray_open(blk
);
533 if (tray_was_open
!= tray_is_open
) {
534 qapi_event_send_device_tray_moved(blk_name(blk
), tray_is_open
,
541 * Does @blk's attached device model have removable media?
542 * %true if no device model is attached.
544 bool blk_dev_has_removable_media(BlockBackend
*blk
)
546 return !blk
->dev
|| (blk
->dev_ops
&& blk
->dev_ops
->change_media_cb
);
550 * Does @blk's attached device model have a tray?
552 bool blk_dev_has_tray(BlockBackend
*blk
)
554 return blk
->dev_ops
&& blk
->dev_ops
->is_tray_open
;
558 * Notify @blk's attached device model of a media eject request.
559 * If @force is true, the medium is about to be yanked out forcefully.
561 void blk_dev_eject_request(BlockBackend
*blk
, bool force
)
563 if (blk
->dev_ops
&& blk
->dev_ops
->eject_request_cb
) {
564 blk
->dev_ops
->eject_request_cb(blk
->dev_opaque
, force
);
569 * Does @blk's attached device model have a tray, and is it open?
571 bool blk_dev_is_tray_open(BlockBackend
*blk
)
573 if (blk_dev_has_tray(blk
)) {
574 return blk
->dev_ops
->is_tray_open(blk
->dev_opaque
);
580 * Does @blk's attached device model have the medium locked?
581 * %false if the device model has no such lock.
583 bool blk_dev_is_medium_locked(BlockBackend
*blk
)
585 if (blk
->dev_ops
&& blk
->dev_ops
->is_medium_locked
) {
586 return blk
->dev_ops
->is_medium_locked(blk
->dev_opaque
);
592 * Notify @blk's attached device model of a backend size change.
594 void blk_dev_resize_cb(BlockBackend
*blk
)
596 if (blk
->dev_ops
&& blk
->dev_ops
->resize_cb
) {
597 blk
->dev_ops
->resize_cb(blk
->dev_opaque
);
601 void blk_iostatus_enable(BlockBackend
*blk
)
603 blk
->iostatus_enabled
= true;
604 blk
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
607 /* The I/O status is only enabled if the drive explicitly
608 * enables it _and_ the VM is configured to stop on errors */
609 bool blk_iostatus_is_enabled(const BlockBackend
*blk
)
611 return (blk
->iostatus_enabled
&&
612 (blk
->on_write_error
== BLOCKDEV_ON_ERROR_ENOSPC
||
613 blk
->on_write_error
== BLOCKDEV_ON_ERROR_STOP
||
614 blk
->on_read_error
== BLOCKDEV_ON_ERROR_STOP
));
617 BlockDeviceIoStatus
blk_iostatus(const BlockBackend
*blk
)
619 return blk
->iostatus
;
622 void blk_iostatus_disable(BlockBackend
*blk
)
624 blk
->iostatus_enabled
= false;
627 void blk_iostatus_reset(BlockBackend
*blk
)
629 if (blk_iostatus_is_enabled(blk
)) {
630 BlockDriverState
*bs
= blk_bs(blk
);
631 blk
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
633 block_job_iostatus_reset(bs
->job
);
638 void blk_iostatus_set_err(BlockBackend
*blk
, int error
)
640 assert(blk_iostatus_is_enabled(blk
));
641 if (blk
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
642 blk
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
643 BLOCK_DEVICE_IO_STATUS_FAILED
;
647 void blk_set_allow_write_beyond_eof(BlockBackend
*blk
, bool allow
)
649 blk
->allow_write_beyond_eof
= allow
;
652 static int blk_check_byte_request(BlockBackend
*blk
, int64_t offset
,
657 if (size
> INT_MAX
) {
661 if (!blk_is_available(blk
)) {
669 if (!blk
->allow_write_beyond_eof
) {
670 len
= blk_getlength(blk
);
675 if (offset
> len
|| len
- offset
< size
) {
683 static int blk_check_request(BlockBackend
*blk
, int64_t sector_num
,
686 if (sector_num
< 0 || sector_num
> INT64_MAX
/ BDRV_SECTOR_SIZE
) {
690 if (nb_sectors
< 0 || nb_sectors
> INT_MAX
/ BDRV_SECTOR_SIZE
) {
694 return blk_check_byte_request(blk
, sector_num
* BDRV_SECTOR_SIZE
,
695 nb_sectors
* BDRV_SECTOR_SIZE
);
698 static int coroutine_fn
blk_co_preadv(BlockBackend
*blk
, int64_t offset
,
699 unsigned int bytes
, QEMUIOVector
*qiov
,
700 BdrvRequestFlags flags
)
702 int ret
= blk_check_byte_request(blk
, offset
, bytes
);
707 return bdrv_co_do_preadv(blk_bs(blk
), offset
, bytes
, qiov
, flags
);
710 static int coroutine_fn
blk_co_pwritev(BlockBackend
*blk
, int64_t offset
,
711 unsigned int bytes
, QEMUIOVector
*qiov
,
712 BdrvRequestFlags flags
)
714 int ret
= blk_check_byte_request(blk
, offset
, bytes
);
719 return bdrv_co_do_pwritev(blk_bs(blk
), offset
, bytes
, qiov
, flags
);
722 typedef struct BlkRwCo
{
727 BdrvRequestFlags flags
;
730 static void blk_read_entry(void *opaque
)
732 BlkRwCo
*rwco
= opaque
;
734 rwco
->ret
= blk_co_preadv(rwco
->blk
, rwco
->offset
, rwco
->qiov
->size
,
735 rwco
->qiov
, rwco
->flags
);
738 static void blk_write_entry(void *opaque
)
740 BlkRwCo
*rwco
= opaque
;
742 rwco
->ret
= blk_co_pwritev(rwco
->blk
, rwco
->offset
, rwco
->qiov
->size
,
743 rwco
->qiov
, rwco
->flags
);
746 static int blk_prw(BlockBackend
*blk
, int64_t offset
, uint8_t *buf
,
747 int64_t bytes
, CoroutineEntry co_entry
,
748 BdrvRequestFlags flags
)
750 AioContext
*aio_context
;
756 iov
= (struct iovec
) {
760 qemu_iovec_init_external(&qiov
, &iov
, 1);
770 co
= qemu_coroutine_create(co_entry
);
771 qemu_coroutine_enter(co
, &rwco
);
773 aio_context
= blk_get_aio_context(blk
);
774 while (rwco
.ret
== NOT_DONE
) {
775 aio_poll(aio_context
, true);
781 static int blk_rw(BlockBackend
*blk
, int64_t sector_num
, uint8_t *buf
,
782 int nb_sectors
, CoroutineEntry co_entry
,
783 BdrvRequestFlags flags
)
785 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
789 return blk_prw(blk
, sector_num
<< BDRV_SECTOR_BITS
, buf
,
790 nb_sectors
<< BDRV_SECTOR_BITS
, co_entry
, flags
);
793 int blk_read(BlockBackend
*blk
, int64_t sector_num
, uint8_t *buf
,
796 return blk_rw(blk
, sector_num
, buf
, nb_sectors
, blk_read_entry
, 0);
799 int blk_read_unthrottled(BlockBackend
*blk
, int64_t sector_num
, uint8_t *buf
,
802 BlockDriverState
*bs
= blk_bs(blk
);
806 ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
811 enabled
= bs
->io_limits_enabled
;
812 bs
->io_limits_enabled
= false;
813 ret
= blk_read(blk
, sector_num
, buf
, nb_sectors
);
814 bs
->io_limits_enabled
= enabled
;
818 int blk_write(BlockBackend
*blk
, int64_t sector_num
, const uint8_t *buf
,
821 return blk_rw(blk
, sector_num
, (uint8_t*) buf
, nb_sectors
,
825 int blk_write_zeroes(BlockBackend
*blk
, int64_t sector_num
,
826 int nb_sectors
, BdrvRequestFlags flags
)
828 return blk_rw(blk
, sector_num
, NULL
, nb_sectors
, blk_write_entry
,
829 BDRV_REQ_ZERO_WRITE
);
832 static void error_callback_bh(void *opaque
)
834 struct BlockBackendAIOCB
*acb
= opaque
;
835 qemu_bh_delete(acb
->bh
);
836 acb
->common
.cb(acb
->common
.opaque
, acb
->ret
);
840 BlockAIOCB
*blk_abort_aio_request(BlockBackend
*blk
,
841 BlockCompletionFunc
*cb
,
842 void *opaque
, int ret
)
844 struct BlockBackendAIOCB
*acb
;
847 acb
= blk_aio_get(&block_backend_aiocb_info
, blk
, cb
, opaque
);
851 bh
= aio_bh_new(blk_get_aio_context(blk
), error_callback_bh
, acb
);
853 qemu_bh_schedule(bh
);
858 typedef struct BlkAioEmAIOCB
{
865 static const AIOCBInfo blk_aio_em_aiocb_info
= {
866 .aiocb_size
= sizeof(BlkAioEmAIOCB
),
869 static void blk_aio_complete(BlkAioEmAIOCB
*acb
)
872 assert(acb
->has_returned
);
873 qemu_bh_delete(acb
->bh
);
875 if (acb
->has_returned
) {
876 acb
->common
.cb(acb
->common
.opaque
, acb
->rwco
.ret
);
881 static void blk_aio_complete_bh(void *opaque
)
883 blk_aio_complete(opaque
);
886 static BlockAIOCB
*blk_aio_prwv(BlockBackend
*blk
, int64_t offset
,
887 QEMUIOVector
*qiov
, CoroutineEntry co_entry
,
888 BdrvRequestFlags flags
,
889 BlockCompletionFunc
*cb
, void *opaque
)
894 acb
= blk_aio_get(&blk_aio_em_aiocb_info
, blk
, cb
, opaque
);
895 acb
->rwco
= (BlkRwCo
) {
903 acb
->has_returned
= false;
905 co
= qemu_coroutine_create(co_entry
);
906 qemu_coroutine_enter(co
, acb
);
908 acb
->has_returned
= true;
909 if (acb
->rwco
.ret
!= NOT_DONE
) {
910 acb
->bh
= aio_bh_new(blk_get_aio_context(blk
), blk_aio_complete_bh
, acb
);
911 qemu_bh_schedule(acb
->bh
);
917 static void blk_aio_read_entry(void *opaque
)
919 BlkAioEmAIOCB
*acb
= opaque
;
920 BlkRwCo
*rwco
= &acb
->rwco
;
922 rwco
->ret
= blk_co_preadv(rwco
->blk
, rwco
->offset
, rwco
->qiov
->size
,
923 rwco
->qiov
, rwco
->flags
);
924 blk_aio_complete(acb
);
927 static void blk_aio_write_entry(void *opaque
)
929 BlkAioEmAIOCB
*acb
= opaque
;
930 BlkRwCo
*rwco
= &acb
->rwco
;
932 rwco
->ret
= blk_co_pwritev(rwco
->blk
, rwco
->offset
,
933 rwco
->qiov
? rwco
->qiov
->size
: 0,
934 rwco
->qiov
, rwco
->flags
);
935 blk_aio_complete(acb
);
938 BlockAIOCB
*blk_aio_write_zeroes(BlockBackend
*blk
, int64_t sector_num
,
939 int nb_sectors
, BdrvRequestFlags flags
,
940 BlockCompletionFunc
*cb
, void *opaque
)
942 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
943 return blk_abort_aio_request(blk
, cb
, opaque
, -EINVAL
);
946 return blk_aio_prwv(blk
, sector_num
<< BDRV_SECTOR_BITS
, NULL
,
947 blk_aio_write_entry
, BDRV_REQ_ZERO_WRITE
, cb
, opaque
);
950 int blk_pread(BlockBackend
*blk
, int64_t offset
, void *buf
, int count
)
952 int ret
= blk_prw(blk
, offset
, buf
, count
, blk_read_entry
, 0);
959 int blk_pwrite(BlockBackend
*blk
, int64_t offset
, const void *buf
, int count
)
961 int ret
= blk_prw(blk
, offset
, (void*) buf
, count
, blk_write_entry
, 0);
968 int64_t blk_getlength(BlockBackend
*blk
)
970 if (!blk_is_available(blk
)) {
974 return bdrv_getlength(blk_bs(blk
));
977 void blk_get_geometry(BlockBackend
*blk
, uint64_t *nb_sectors_ptr
)
982 bdrv_get_geometry(blk_bs(blk
), nb_sectors_ptr
);
986 int64_t blk_nb_sectors(BlockBackend
*blk
)
988 if (!blk_is_available(blk
)) {
992 return bdrv_nb_sectors(blk_bs(blk
));
995 BlockAIOCB
*blk_aio_readv(BlockBackend
*blk
, int64_t sector_num
,
996 QEMUIOVector
*iov
, int nb_sectors
,
997 BlockCompletionFunc
*cb
, void *opaque
)
999 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
1000 return blk_abort_aio_request(blk
, cb
, opaque
, -EINVAL
);
1003 return blk_aio_prwv(blk
, sector_num
<< BDRV_SECTOR_BITS
, iov
,
1004 blk_aio_read_entry
, 0, cb
, opaque
);
1007 BlockAIOCB
*blk_aio_writev(BlockBackend
*blk
, int64_t sector_num
,
1008 QEMUIOVector
*iov
, int nb_sectors
,
1009 BlockCompletionFunc
*cb
, void *opaque
)
1011 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
1012 return blk_abort_aio_request(blk
, cb
, opaque
, -EINVAL
);
1015 return blk_aio_prwv(blk
, sector_num
<< BDRV_SECTOR_BITS
, iov
,
1016 blk_aio_write_entry
, 0, cb
, opaque
);
1019 BlockAIOCB
*blk_aio_flush(BlockBackend
*blk
,
1020 BlockCompletionFunc
*cb
, void *opaque
)
1022 if (!blk_is_available(blk
)) {
1023 return blk_abort_aio_request(blk
, cb
, opaque
, -ENOMEDIUM
);
1026 return bdrv_aio_flush(blk_bs(blk
), cb
, opaque
);
1029 BlockAIOCB
*blk_aio_discard(BlockBackend
*blk
,
1030 int64_t sector_num
, int nb_sectors
,
1031 BlockCompletionFunc
*cb
, void *opaque
)
1033 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
1035 return blk_abort_aio_request(blk
, cb
, opaque
, ret
);
1038 return bdrv_aio_discard(blk_bs(blk
), sector_num
, nb_sectors
, cb
, opaque
);
1041 void blk_aio_cancel(BlockAIOCB
*acb
)
1043 bdrv_aio_cancel(acb
);
1046 void blk_aio_cancel_async(BlockAIOCB
*acb
)
1048 bdrv_aio_cancel_async(acb
);
1051 int blk_aio_multiwrite(BlockBackend
*blk
, BlockRequest
*reqs
, int num_reqs
)
1055 for (i
= 0; i
< num_reqs
; i
++) {
1056 ret
= blk_check_request(blk
, reqs
[i
].sector
, reqs
[i
].nb_sectors
);
1062 return bdrv_aio_multiwrite(blk_bs(blk
), reqs
, num_reqs
);
1065 int blk_ioctl(BlockBackend
*blk
, unsigned long int req
, void *buf
)
1067 if (!blk_is_available(blk
)) {
1071 return bdrv_ioctl(blk_bs(blk
), req
, buf
);
1074 BlockAIOCB
*blk_aio_ioctl(BlockBackend
*blk
, unsigned long int req
, void *buf
,
1075 BlockCompletionFunc
*cb
, void *opaque
)
1077 if (!blk_is_available(blk
)) {
1078 return blk_abort_aio_request(blk
, cb
, opaque
, -ENOMEDIUM
);
1081 return bdrv_aio_ioctl(blk_bs(blk
), req
, buf
, cb
, opaque
);
1084 int blk_co_discard(BlockBackend
*blk
, int64_t sector_num
, int nb_sectors
)
1086 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
1091 return bdrv_co_discard(blk_bs(blk
), sector_num
, nb_sectors
);
1094 int blk_co_flush(BlockBackend
*blk
)
1096 if (!blk_is_available(blk
)) {
1100 return bdrv_co_flush(blk_bs(blk
));
1103 int blk_flush(BlockBackend
*blk
)
1105 if (!blk_is_available(blk
)) {
1109 return bdrv_flush(blk_bs(blk
));
1112 void blk_drain(BlockBackend
*blk
)
1115 bdrv_drain(blk_bs(blk
));
1119 void blk_drain_all(void)
1124 void blk_set_on_error(BlockBackend
*blk
, BlockdevOnError on_read_error
,
1125 BlockdevOnError on_write_error
)
1127 blk
->on_read_error
= on_read_error
;
1128 blk
->on_write_error
= on_write_error
;
1131 BlockdevOnError
blk_get_on_error(BlockBackend
*blk
, bool is_read
)
1133 return is_read
? blk
->on_read_error
: blk
->on_write_error
;
1136 BlockErrorAction
blk_get_error_action(BlockBackend
*blk
, bool is_read
,
1139 BlockdevOnError on_err
= blk_get_on_error(blk
, is_read
);
1142 case BLOCKDEV_ON_ERROR_ENOSPC
:
1143 return (error
== ENOSPC
) ?
1144 BLOCK_ERROR_ACTION_STOP
: BLOCK_ERROR_ACTION_REPORT
;
1145 case BLOCKDEV_ON_ERROR_STOP
:
1146 return BLOCK_ERROR_ACTION_STOP
;
1147 case BLOCKDEV_ON_ERROR_REPORT
:
1148 return BLOCK_ERROR_ACTION_REPORT
;
1149 case BLOCKDEV_ON_ERROR_IGNORE
:
1150 return BLOCK_ERROR_ACTION_IGNORE
;
1156 static void send_qmp_error_event(BlockBackend
*blk
,
1157 BlockErrorAction action
,
1158 bool is_read
, int error
)
1160 IoOperationType optype
;
1162 optype
= is_read
? IO_OPERATION_TYPE_READ
: IO_OPERATION_TYPE_WRITE
;
1163 qapi_event_send_block_io_error(blk_name(blk
), optype
, action
,
1164 blk_iostatus_is_enabled(blk
),
1165 error
== ENOSPC
, strerror(error
),
1169 /* This is done by device models because, while the block layer knows
1170 * about the error, it does not know whether an operation comes from
1171 * the device or the block layer (from a job, for example).
1173 void blk_error_action(BlockBackend
*blk
, BlockErrorAction action
,
1174 bool is_read
, int error
)
1178 if (action
== BLOCK_ERROR_ACTION_STOP
) {
1179 /* First set the iostatus, so that "info block" returns an iostatus
1180 * that matches the events raised so far (an additional error iostatus
1181 * is fine, but not a lost one).
1183 blk_iostatus_set_err(blk
, error
);
1185 /* Then raise the request to stop the VM and the event.
1186 * qemu_system_vmstop_request_prepare has two effects. First,
1187 * it ensures that the STOP event always comes after the
1188 * BLOCK_IO_ERROR event. Second, it ensures that even if management
1189 * can observe the STOP event and do a "cont" before the STOP
1190 * event is issued, the VM will not stop. In this case, vm_start()
1191 * also ensures that the STOP/RESUME pair of events is emitted.
1193 qemu_system_vmstop_request_prepare();
1194 send_qmp_error_event(blk
, action
, is_read
, error
);
1195 qemu_system_vmstop_request(RUN_STATE_IO_ERROR
);
1197 send_qmp_error_event(blk
, action
, is_read
, error
);
1201 int blk_is_read_only(BlockBackend
*blk
)
1203 BlockDriverState
*bs
= blk_bs(blk
);
1206 return bdrv_is_read_only(bs
);
1208 return blk
->root_state
.read_only
;
1212 int blk_is_sg(BlockBackend
*blk
)
1214 BlockDriverState
*bs
= blk_bs(blk
);
1220 return bdrv_is_sg(bs
);
1223 int blk_enable_write_cache(BlockBackend
*blk
)
1225 BlockDriverState
*bs
= blk_bs(blk
);
1228 return bdrv_enable_write_cache(bs
);
1230 return !!(blk
->root_state
.open_flags
& BDRV_O_CACHE_WB
);
1234 void blk_set_enable_write_cache(BlockBackend
*blk
, bool wce
)
1236 BlockDriverState
*bs
= blk_bs(blk
);
1239 bdrv_set_enable_write_cache(bs
, wce
);
1242 blk
->root_state
.open_flags
|= BDRV_O_CACHE_WB
;
1244 blk
->root_state
.open_flags
&= ~BDRV_O_CACHE_WB
;
1249 void blk_invalidate_cache(BlockBackend
*blk
, Error
**errp
)
1251 BlockDriverState
*bs
= blk_bs(blk
);
1254 error_setg(errp
, "Device '%s' has no medium", blk
->name
);
1258 bdrv_invalidate_cache(bs
, errp
);
1261 bool blk_is_inserted(BlockBackend
*blk
)
1263 BlockDriverState
*bs
= blk_bs(blk
);
1265 return bs
&& bdrv_is_inserted(bs
);
1268 bool blk_is_available(BlockBackend
*blk
)
1270 return blk_is_inserted(blk
) && !blk_dev_is_tray_open(blk
);
1273 void blk_lock_medium(BlockBackend
*blk
, bool locked
)
1275 BlockDriverState
*bs
= blk_bs(blk
);
1278 bdrv_lock_medium(bs
, locked
);
1282 void blk_eject(BlockBackend
*blk
, bool eject_flag
)
1284 BlockDriverState
*bs
= blk_bs(blk
);
1287 bdrv_eject(bs
, eject_flag
);
1291 int blk_get_flags(BlockBackend
*blk
)
1293 BlockDriverState
*bs
= blk_bs(blk
);
1296 return bdrv_get_flags(bs
);
1298 return blk
->root_state
.open_flags
;
1302 int blk_get_max_transfer_length(BlockBackend
*blk
)
1304 BlockDriverState
*bs
= blk_bs(blk
);
1307 return bs
->bl
.max_transfer_length
;
1313 int blk_get_max_iov(BlockBackend
*blk
)
1315 return blk
->root
->bs
->bl
.max_iov
;
1318 void blk_set_guest_block_size(BlockBackend
*blk
, int align
)
1320 blk
->guest_block_size
= align
;
1323 void *blk_try_blockalign(BlockBackend
*blk
, size_t size
)
1325 return qemu_try_blockalign(blk
? blk_bs(blk
) : NULL
, size
);
1328 void *blk_blockalign(BlockBackend
*blk
, size_t size
)
1330 return qemu_blockalign(blk
? blk_bs(blk
) : NULL
, size
);
1333 bool blk_op_is_blocked(BlockBackend
*blk
, BlockOpType op
, Error
**errp
)
1335 BlockDriverState
*bs
= blk_bs(blk
);
1341 return bdrv_op_is_blocked(bs
, op
, errp
);
1344 void blk_op_unblock(BlockBackend
*blk
, BlockOpType op
, Error
*reason
)
1346 BlockDriverState
*bs
= blk_bs(blk
);
1349 bdrv_op_unblock(bs
, op
, reason
);
1353 void blk_op_block_all(BlockBackend
*blk
, Error
*reason
)
1355 BlockDriverState
*bs
= blk_bs(blk
);
1358 bdrv_op_block_all(bs
, reason
);
1362 void blk_op_unblock_all(BlockBackend
*blk
, Error
*reason
)
1364 BlockDriverState
*bs
= blk_bs(blk
);
1367 bdrv_op_unblock_all(bs
, reason
);
1371 AioContext
*blk_get_aio_context(BlockBackend
*blk
)
1373 BlockDriverState
*bs
= blk_bs(blk
);
1376 return bdrv_get_aio_context(bs
);
1378 return qemu_get_aio_context();
1382 static AioContext
*blk_aiocb_get_aio_context(BlockAIOCB
*acb
)
1384 BlockBackendAIOCB
*blk_acb
= DO_UPCAST(BlockBackendAIOCB
, common
, acb
);
1385 return blk_get_aio_context(blk_acb
->blk
);
1388 void blk_set_aio_context(BlockBackend
*blk
, AioContext
*new_context
)
1390 BlockDriverState
*bs
= blk_bs(blk
);
1393 bdrv_set_aio_context(bs
, new_context
);
1397 void blk_add_aio_context_notifier(BlockBackend
*blk
,
1398 void (*attached_aio_context
)(AioContext
*new_context
, void *opaque
),
1399 void (*detach_aio_context
)(void *opaque
), void *opaque
)
1401 BlockDriverState
*bs
= blk_bs(blk
);
1404 bdrv_add_aio_context_notifier(bs
, attached_aio_context
,
1405 detach_aio_context
, opaque
);
1409 void blk_remove_aio_context_notifier(BlockBackend
*blk
,
1410 void (*attached_aio_context
)(AioContext
*,
1412 void (*detach_aio_context
)(void *),
1415 BlockDriverState
*bs
= blk_bs(blk
);
1418 bdrv_remove_aio_context_notifier(bs
, attached_aio_context
,
1419 detach_aio_context
, opaque
);
1423 void blk_add_remove_bs_notifier(BlockBackend
*blk
, Notifier
*notify
)
1425 notifier_list_add(&blk
->remove_bs_notifiers
, notify
);
1428 void blk_add_insert_bs_notifier(BlockBackend
*blk
, Notifier
*notify
)
1430 notifier_list_add(&blk
->insert_bs_notifiers
, notify
);
1433 void blk_io_plug(BlockBackend
*blk
)
1435 BlockDriverState
*bs
= blk_bs(blk
);
1442 void blk_io_unplug(BlockBackend
*blk
)
1444 BlockDriverState
*bs
= blk_bs(blk
);
1451 BlockAcctStats
*blk_get_stats(BlockBackend
*blk
)
1456 void *blk_aio_get(const AIOCBInfo
*aiocb_info
, BlockBackend
*blk
,
1457 BlockCompletionFunc
*cb
, void *opaque
)
1459 return qemu_aio_get(aiocb_info
, blk_bs(blk
), cb
, opaque
);
1462 int coroutine_fn
blk_co_write_zeroes(BlockBackend
*blk
, int64_t sector_num
,
1463 int nb_sectors
, BdrvRequestFlags flags
)
1465 if (nb_sectors
< 0 || nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
1469 return blk_co_pwritev(blk
, sector_num
<< BDRV_SECTOR_BITS
,
1470 nb_sectors
<< BDRV_SECTOR_BITS
, NULL
,
1471 BDRV_REQ_ZERO_WRITE
);
1474 int blk_write_compressed(BlockBackend
*blk
, int64_t sector_num
,
1475 const uint8_t *buf
, int nb_sectors
)
1477 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
1482 return bdrv_write_compressed(blk_bs(blk
), sector_num
, buf
, nb_sectors
);
1485 int blk_truncate(BlockBackend
*blk
, int64_t offset
)
1487 if (!blk_is_available(blk
)) {
1491 return bdrv_truncate(blk_bs(blk
), offset
);
1494 int blk_discard(BlockBackend
*blk
, int64_t sector_num
, int nb_sectors
)
1496 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
1501 return bdrv_discard(blk_bs(blk
), sector_num
, nb_sectors
);
1504 int blk_save_vmstate(BlockBackend
*blk
, const uint8_t *buf
,
1505 int64_t pos
, int size
)
1507 if (!blk_is_available(blk
)) {
1511 return bdrv_save_vmstate(blk_bs(blk
), buf
, pos
, size
);
1514 int blk_load_vmstate(BlockBackend
*blk
, uint8_t *buf
, int64_t pos
, int size
)
1516 if (!blk_is_available(blk
)) {
1520 return bdrv_load_vmstate(blk_bs(blk
), buf
, pos
, size
);
1523 int blk_probe_blocksizes(BlockBackend
*blk
, BlockSizes
*bsz
)
1525 if (!blk_is_available(blk
)) {
1529 return bdrv_probe_blocksizes(blk_bs(blk
), bsz
);
1532 int blk_probe_geometry(BlockBackend
*blk
, HDGeometry
*geo
)
1534 if (!blk_is_available(blk
)) {
1538 return bdrv_probe_geometry(blk_bs(blk
), geo
);
1542 * Updates the BlockBackendRootState object with data from the currently
1543 * attached BlockDriverState.
1545 void blk_update_root_state(BlockBackend
*blk
)
1549 blk
->root_state
.open_flags
= blk
->root
->bs
->open_flags
;
1550 blk
->root_state
.read_only
= blk
->root
->bs
->read_only
;
1551 blk
->root_state
.detect_zeroes
= blk
->root
->bs
->detect_zeroes
;
1553 if (blk
->root_state
.throttle_group
) {
1554 g_free(blk
->root_state
.throttle_group
);
1555 throttle_group_unref(blk
->root_state
.throttle_state
);
1557 if (blk
->root
->bs
->throttle_state
) {
1558 const char *name
= throttle_group_get_name(blk
->root
->bs
);
1559 blk
->root_state
.throttle_group
= g_strdup(name
);
1560 blk
->root_state
.throttle_state
= throttle_group_incref(name
);
1562 blk
->root_state
.throttle_group
= NULL
;
1563 blk
->root_state
.throttle_state
= NULL
;
1568 * Applies the information in the root state to the given BlockDriverState. This
1569 * does not include the flags which have to be specified for bdrv_open(), use
1570 * blk_get_open_flags_from_root_state() to inquire them.
1572 void blk_apply_root_state(BlockBackend
*blk
, BlockDriverState
*bs
)
1574 bs
->detect_zeroes
= blk
->root_state
.detect_zeroes
;
1575 if (blk
->root_state
.throttle_group
) {
1576 bdrv_io_limits_enable(bs
, blk
->root_state
.throttle_group
);
1581 * Returns the flags to be used for bdrv_open() of a BlockDriverState which is
1582 * supposed to inherit the root state.
1584 int blk_get_open_flags_from_root_state(BlockBackend
*blk
)
1588 bs_flags
= blk
->root_state
.read_only
? 0 : BDRV_O_RDWR
;
1589 bs_flags
|= blk
->root_state
.open_flags
& ~BDRV_O_RDWR
;
1594 BlockBackendRootState
*blk_get_root_state(BlockBackend
*blk
)
1596 return &blk
->root_state
;
1599 int blk_commit_all(void)
1601 BlockBackend
*blk
= NULL
;
1603 while ((blk
= blk_all_next(blk
)) != NULL
) {
1604 AioContext
*aio_context
= blk_get_aio_context(blk
);
1606 aio_context_acquire(aio_context
);
1607 if (blk_is_inserted(blk
) && blk
->root
->bs
->backing
) {
1608 int ret
= bdrv_commit(blk
->root
->bs
);
1610 aio_context_release(aio_context
);
1614 aio_context_release(aio_context
);
1619 int blk_flush_all(void)
1621 BlockBackend
*blk
= NULL
;
1624 while ((blk
= blk_all_next(blk
)) != NULL
) {
1625 AioContext
*aio_context
= blk_get_aio_context(blk
);
1628 aio_context_acquire(aio_context
);
1629 if (blk_is_inserted(blk
)) {
1630 ret
= blk_flush(blk
);
1631 if (ret
< 0 && !result
) {
1635 aio_context_release(aio_context
);