4 * Copyright (C) 2014 Red Hat, Inc.
7 * Markus Armbruster <armbru@redhat.com>,
9 * This work is licensed under the terms of the GNU LGPL, version 2.1
10 * or later. See the COPYING.LIB file in the top-level directory.
13 #include "sysemu/block-backend.h"
14 #include "block/block_int.h"
15 #include "block/blockjob.h"
16 #include "block/throttle-groups.h"
17 #include "sysemu/blockdev.h"
18 #include "sysemu/sysemu.h"
19 #include "qapi-event.h"
21 /* Number of coroutines to reserve per attached device model */
22 #define COROUTINE_POOL_RESERVATION 64
24 static AioContext
*blk_aiocb_get_aio_context(BlockAIOCB
*acb
);
30 DriveInfo
*legacy_dinfo
; /* null unless created by drive_new() */
31 QTAILQ_ENTRY(BlockBackend
) link
; /* for blk_backends */
33 void *dev
; /* attached device model, if any */
34 /* TODO change to DeviceState when all users are qdevified */
35 const BlockDevOps
*dev_ops
;
38 /* the block size for which the guest device expects atomicity */
41 /* If the BDS tree is removed, some of its options are stored here (which
42 * can be used to restore those options in the new BDS on insert) */
43 BlockBackendRootState root_state
;
45 /* I/O stats (display with "info blockstats"). */
48 BlockdevOnError on_read_error
, on_write_error
;
49 bool iostatus_enabled
;
50 BlockDeviceIoStatus iostatus
;
53 typedef struct BlockBackendAIOCB
{
60 static const AIOCBInfo block_backend_aiocb_info
= {
61 .get_aio_context
= blk_aiocb_get_aio_context
,
62 .aiocb_size
= sizeof(BlockBackendAIOCB
),
65 static void drive_info_del(DriveInfo
*dinfo
);
67 /* All the BlockBackends (except for hidden ones) */
68 static QTAILQ_HEAD(, BlockBackend
) blk_backends
=
69 QTAILQ_HEAD_INITIALIZER(blk_backends
);
72 * Create a new BlockBackend with @name, with a reference count of one.
73 * @name must not be null or empty.
74 * Fail if a BlockBackend with this name already exists.
75 * Store an error through @errp on failure, unless it's null.
76 * Return the new BlockBackend on success, null on failure.
78 BlockBackend
*blk_new(const char *name
, Error
**errp
)
82 assert(name
&& name
[0]);
83 if (!id_wellformed(name
)) {
84 error_setg(errp
, "Invalid device name");
87 if (blk_by_name(name
)) {
88 error_setg(errp
, "Device with id '%s' already exists", name
);
91 if (bdrv_find_node(name
)) {
93 "Device name '%s' conflicts with an existing node name",
98 blk
= g_new0(BlockBackend
, 1);
99 blk
->name
= g_strdup(name
);
101 QTAILQ_INSERT_TAIL(&blk_backends
, blk
, link
);
106 * Create a new BlockBackend with a new BlockDriverState attached.
107 * Otherwise just like blk_new(), which see.
109 BlockBackend
*blk_new_with_bs(const char *name
, Error
**errp
)
112 BlockDriverState
*bs
;
114 blk
= blk_new(name
, errp
);
119 bs
= bdrv_new_root();
126 * Calls blk_new_with_bs() and then calls bdrv_open() on the BlockDriverState.
128 * Just as with bdrv_open(), after having called this function the reference to
129 * @options belongs to the block layer (even on failure).
131 * TODO: Remove @filename and @flags; it should be possible to specify a whole
132 * BDS tree just by specifying the @options QDict (or @reference,
133 * alternatively). At the time of adding this function, this is not possible,
134 * though, so callers of this function have to be able to specify @filename and
137 BlockBackend
*blk_new_open(const char *name
, const char *filename
,
138 const char *reference
, QDict
*options
, int flags
,
144 blk
= blk_new_with_bs(name
, errp
);
150 ret
= bdrv_open(&blk
->bs
, filename
, reference
, options
, flags
, errp
);
159 static void blk_delete(BlockBackend
*blk
)
161 assert(!blk
->refcnt
);
164 assert(blk
->bs
->blk
== blk
);
169 if (blk
->root_state
.throttle_state
) {
170 g_free(blk
->root_state
.throttle_group
);
171 throttle_group_unref(blk
->root_state
.throttle_state
);
173 /* Avoid double-remove after blk_hide_on_behalf_of_hmp_drive_del() */
175 QTAILQ_REMOVE(&blk_backends
, blk
, link
);
178 drive_info_del(blk
->legacy_dinfo
);
182 static void drive_info_del(DriveInfo
*dinfo
)
187 qemu_opts_del(dinfo
->opts
);
188 g_free(dinfo
->serial
);
192 int blk_get_refcnt(BlockBackend
*blk
)
194 return blk
? blk
->refcnt
: 0;
198 * Increment @blk's reference count.
199 * @blk must not be null.
201 void blk_ref(BlockBackend
*blk
)
207 * Decrement @blk's reference count.
208 * If this drops it to zero, destroy @blk.
209 * For convenience, do nothing if @blk is null.
211 void blk_unref(BlockBackend
*blk
)
214 assert(blk
->refcnt
> 0);
215 if (!--blk
->refcnt
) {
222 * Return the BlockBackend after @blk.
223 * If @blk is null, return the first one.
224 * Else, return @blk's next sibling, which may be null.
226 * To iterate over all BlockBackends, do
227 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
231 BlockBackend
*blk_next(BlockBackend
*blk
)
233 return blk
? QTAILQ_NEXT(blk
, link
) : QTAILQ_FIRST(&blk_backends
);
237 * Return @blk's name, a non-null string.
238 * Wart: the name is empty iff @blk has been hidden with
239 * blk_hide_on_behalf_of_hmp_drive_del().
241 const char *blk_name(BlockBackend
*blk
)
247 * Return the BlockBackend with name @name if it exists, else null.
248 * @name must not be null.
250 BlockBackend
*blk_by_name(const char *name
)
255 QTAILQ_FOREACH(blk
, &blk_backends
, link
) {
256 if (!strcmp(name
, blk
->name
)) {
264 * Return the BlockDriverState attached to @blk if any, else null.
266 BlockDriverState
*blk_bs(BlockBackend
*blk
)
272 * Changes the BlockDriverState attached to @blk
274 void blk_set_bs(BlockBackend
*blk
, BlockDriverState
*bs
)
282 assert(bs
->blk
== NULL
);
289 * Return @blk's DriveInfo if any, else null.
291 DriveInfo
*blk_legacy_dinfo(BlockBackend
*blk
)
293 return blk
->legacy_dinfo
;
297 * Set @blk's DriveInfo to @dinfo, and return it.
298 * @blk must not have a DriveInfo set already.
299 * No other BlockBackend may have the same DriveInfo set.
301 DriveInfo
*blk_set_legacy_dinfo(BlockBackend
*blk
, DriveInfo
*dinfo
)
303 assert(!blk
->legacy_dinfo
);
304 return blk
->legacy_dinfo
= dinfo
;
308 * Return the BlockBackend with DriveInfo @dinfo.
311 BlockBackend
*blk_by_legacy_dinfo(DriveInfo
*dinfo
)
315 QTAILQ_FOREACH(blk
, &blk_backends
, link
) {
316 if (blk
->legacy_dinfo
== dinfo
) {
325 * @blk must not have been hidden already.
326 * Make attached BlockDriverState, if any, anonymous.
327 * Once hidden, @blk is invisible to all functions that don't receive
328 * it as argument. For example, blk_by_name() won't return it.
329 * Strictly for use by do_drive_del().
330 * TODO get rid of it!
332 void blk_hide_on_behalf_of_hmp_drive_del(BlockBackend
*blk
)
334 QTAILQ_REMOVE(&blk_backends
, blk
, link
);
337 bdrv_make_anon(blk
->bs
);
342 * Disassociates the currently associated BlockDriverState from @blk.
344 void blk_remove_bs(BlockBackend
*blk
)
346 blk_update_root_state(blk
);
354 * Associates a new BlockDriverState with @blk.
356 void blk_insert_bs(BlockBackend
*blk
, BlockDriverState
*bs
)
358 assert(!blk
->bs
&& !bs
->blk
);
365 * Attach device model @dev to @blk.
366 * Return 0 on success, -EBUSY when a device model is attached already.
368 int blk_attach_dev(BlockBackend
*blk
, void *dev
)
369 /* TODO change to DeviceState *dev when all users are qdevified */
376 blk_iostatus_reset(blk
);
381 * Attach device model @dev to @blk.
382 * @blk must not have a device model attached already.
383 * TODO qdevified devices don't use this, remove when devices are qdevified
385 void blk_attach_dev_nofail(BlockBackend
*blk
, void *dev
)
387 if (blk_attach_dev(blk
, dev
) < 0) {
393 * Detach device model @dev from @blk.
394 * @dev must be currently attached to @blk.
396 void blk_detach_dev(BlockBackend
*blk
, void *dev
)
397 /* TODO change to DeviceState *dev when all users are qdevified */
399 assert(blk
->dev
== dev
);
402 blk
->dev_opaque
= NULL
;
403 blk
->guest_block_size
= 512;
408 * Return the device model attached to @blk if any, else null.
410 void *blk_get_attached_dev(BlockBackend
*blk
)
411 /* TODO change to return DeviceState * when all users are qdevified */
417 * Set @blk's device model callbacks to @ops.
418 * @opaque is the opaque argument to pass to the callbacks.
419 * This is for use by device models.
421 void blk_set_dev_ops(BlockBackend
*blk
, const BlockDevOps
*ops
,
425 blk
->dev_opaque
= opaque
;
429 * Notify @blk's attached device model of media change.
430 * If @load is true, notify of media load.
431 * Else, notify of media eject.
432 * Also send DEVICE_TRAY_MOVED events as appropriate.
434 void blk_dev_change_media_cb(BlockBackend
*blk
, bool load
)
436 if (blk
->dev_ops
&& blk
->dev_ops
->change_media_cb
) {
437 bool tray_was_open
, tray_is_open
;
439 tray_was_open
= blk_dev_is_tray_open(blk
);
440 blk
->dev_ops
->change_media_cb(blk
->dev_opaque
, load
);
441 tray_is_open
= blk_dev_is_tray_open(blk
);
443 if (tray_was_open
!= tray_is_open
) {
444 qapi_event_send_device_tray_moved(blk_name(blk
), tray_is_open
,
451 * Does @blk's attached device model have removable media?
452 * %true if no device model is attached.
454 bool blk_dev_has_removable_media(BlockBackend
*blk
)
456 return !blk
->dev
|| (blk
->dev_ops
&& blk
->dev_ops
->change_media_cb
);
460 * Notify @blk's attached device model of a media eject request.
461 * If @force is true, the medium is about to be yanked out forcefully.
463 void blk_dev_eject_request(BlockBackend
*blk
, bool force
)
465 if (blk
->dev_ops
&& blk
->dev_ops
->eject_request_cb
) {
466 blk
->dev_ops
->eject_request_cb(blk
->dev_opaque
, force
);
471 * Does @blk's attached device model have a tray, and is it open?
473 bool blk_dev_is_tray_open(BlockBackend
*blk
)
475 if (blk
->dev_ops
&& blk
->dev_ops
->is_tray_open
) {
476 return blk
->dev_ops
->is_tray_open(blk
->dev_opaque
);
482 * Does @blk's attached device model have the medium locked?
483 * %false if the device model has no such lock.
485 bool blk_dev_is_medium_locked(BlockBackend
*blk
)
487 if (blk
->dev_ops
&& blk
->dev_ops
->is_medium_locked
) {
488 return blk
->dev_ops
->is_medium_locked(blk
->dev_opaque
);
494 * Notify @blk's attached device model of a backend size change.
496 void blk_dev_resize_cb(BlockBackend
*blk
)
498 if (blk
->dev_ops
&& blk
->dev_ops
->resize_cb
) {
499 blk
->dev_ops
->resize_cb(blk
->dev_opaque
);
503 void blk_iostatus_enable(BlockBackend
*blk
)
505 blk
->iostatus_enabled
= true;
506 blk
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
509 /* The I/O status is only enabled if the drive explicitly
510 * enables it _and_ the VM is configured to stop on errors */
511 bool blk_iostatus_is_enabled(const BlockBackend
*blk
)
513 return (blk
->iostatus_enabled
&&
514 (blk
->on_write_error
== BLOCKDEV_ON_ERROR_ENOSPC
||
515 blk
->on_write_error
== BLOCKDEV_ON_ERROR_STOP
||
516 blk
->on_read_error
== BLOCKDEV_ON_ERROR_STOP
));
519 BlockDeviceIoStatus
blk_iostatus(const BlockBackend
*blk
)
521 return blk
->iostatus
;
524 void blk_iostatus_disable(BlockBackend
*blk
)
526 blk
->iostatus_enabled
= false;
529 void blk_iostatus_reset(BlockBackend
*blk
)
531 if (blk_iostatus_is_enabled(blk
)) {
532 blk
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
533 if (blk
->bs
&& blk
->bs
->job
) {
534 block_job_iostatus_reset(blk
->bs
->job
);
539 void blk_iostatus_set_err(BlockBackend
*blk
, int error
)
541 assert(blk_iostatus_is_enabled(blk
));
542 if (blk
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
543 blk
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
544 BLOCK_DEVICE_IO_STATUS_FAILED
;
548 static int blk_check_byte_request(BlockBackend
*blk
, int64_t offset
,
553 if (size
> INT_MAX
) {
557 if (!blk_is_available(blk
)) {
561 len
= blk_getlength(blk
);
570 if (offset
> len
|| len
- offset
< size
) {
577 static int blk_check_request(BlockBackend
*blk
, int64_t sector_num
,
580 if (sector_num
< 0 || sector_num
> INT64_MAX
/ BDRV_SECTOR_SIZE
) {
584 if (nb_sectors
< 0 || nb_sectors
> INT_MAX
/ BDRV_SECTOR_SIZE
) {
588 return blk_check_byte_request(blk
, sector_num
* BDRV_SECTOR_SIZE
,
589 nb_sectors
* BDRV_SECTOR_SIZE
);
592 int blk_read(BlockBackend
*blk
, int64_t sector_num
, uint8_t *buf
,
595 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
600 return bdrv_read(blk
->bs
, sector_num
, buf
, nb_sectors
);
603 int blk_read_unthrottled(BlockBackend
*blk
, int64_t sector_num
, uint8_t *buf
,
606 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
611 return bdrv_read_unthrottled(blk
->bs
, sector_num
, buf
, nb_sectors
);
614 int blk_write(BlockBackend
*blk
, int64_t sector_num
, const uint8_t *buf
,
617 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
622 return bdrv_write(blk
->bs
, sector_num
, buf
, nb_sectors
);
625 int blk_write_zeroes(BlockBackend
*blk
, int64_t sector_num
,
626 int nb_sectors
, BdrvRequestFlags flags
)
628 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
633 return bdrv_write_zeroes(blk
->bs
, sector_num
, nb_sectors
, flags
);
636 static void error_callback_bh(void *opaque
)
638 struct BlockBackendAIOCB
*acb
= opaque
;
639 qemu_bh_delete(acb
->bh
);
640 acb
->common
.cb(acb
->common
.opaque
, acb
->ret
);
644 static BlockAIOCB
*abort_aio_request(BlockBackend
*blk
, BlockCompletionFunc
*cb
,
645 void *opaque
, int ret
)
647 struct BlockBackendAIOCB
*acb
;
650 acb
= blk_aio_get(&block_backend_aiocb_info
, blk
, cb
, opaque
);
654 bh
= aio_bh_new(blk_get_aio_context(blk
), error_callback_bh
, acb
);
656 qemu_bh_schedule(bh
);
661 BlockAIOCB
*blk_aio_write_zeroes(BlockBackend
*blk
, int64_t sector_num
,
662 int nb_sectors
, BdrvRequestFlags flags
,
663 BlockCompletionFunc
*cb
, void *opaque
)
665 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
667 return abort_aio_request(blk
, cb
, opaque
, ret
);
670 return bdrv_aio_write_zeroes(blk
->bs
, sector_num
, nb_sectors
, flags
,
674 int blk_pread(BlockBackend
*blk
, int64_t offset
, void *buf
, int count
)
676 int ret
= blk_check_byte_request(blk
, offset
, count
);
681 return bdrv_pread(blk
->bs
, offset
, buf
, count
);
684 int blk_pwrite(BlockBackend
*blk
, int64_t offset
, const void *buf
, int count
)
686 int ret
= blk_check_byte_request(blk
, offset
, count
);
691 return bdrv_pwrite(blk
->bs
, offset
, buf
, count
);
694 int64_t blk_getlength(BlockBackend
*blk
)
696 if (!blk_is_available(blk
)) {
700 return bdrv_getlength(blk
->bs
);
703 void blk_get_geometry(BlockBackend
*blk
, uint64_t *nb_sectors_ptr
)
708 bdrv_get_geometry(blk
->bs
, nb_sectors_ptr
);
712 int64_t blk_nb_sectors(BlockBackend
*blk
)
714 if (!blk_is_available(blk
)) {
718 return bdrv_nb_sectors(blk
->bs
);
721 BlockAIOCB
*blk_aio_readv(BlockBackend
*blk
, int64_t sector_num
,
722 QEMUIOVector
*iov
, int nb_sectors
,
723 BlockCompletionFunc
*cb
, void *opaque
)
725 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
727 return abort_aio_request(blk
, cb
, opaque
, ret
);
730 return bdrv_aio_readv(blk
->bs
, sector_num
, iov
, nb_sectors
, cb
, opaque
);
733 BlockAIOCB
*blk_aio_writev(BlockBackend
*blk
, int64_t sector_num
,
734 QEMUIOVector
*iov
, int nb_sectors
,
735 BlockCompletionFunc
*cb
, void *opaque
)
737 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
739 return abort_aio_request(blk
, cb
, opaque
, ret
);
742 return bdrv_aio_writev(blk
->bs
, sector_num
, iov
, nb_sectors
, cb
, opaque
);
745 BlockAIOCB
*blk_aio_flush(BlockBackend
*blk
,
746 BlockCompletionFunc
*cb
, void *opaque
)
748 if (!blk_is_available(blk
)) {
749 return abort_aio_request(blk
, cb
, opaque
, -ENOMEDIUM
);
752 return bdrv_aio_flush(blk
->bs
, cb
, opaque
);
755 BlockAIOCB
*blk_aio_discard(BlockBackend
*blk
,
756 int64_t sector_num
, int nb_sectors
,
757 BlockCompletionFunc
*cb
, void *opaque
)
759 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
761 return abort_aio_request(blk
, cb
, opaque
, ret
);
764 return bdrv_aio_discard(blk
->bs
, sector_num
, nb_sectors
, cb
, opaque
);
767 void blk_aio_cancel(BlockAIOCB
*acb
)
769 bdrv_aio_cancel(acb
);
772 void blk_aio_cancel_async(BlockAIOCB
*acb
)
774 bdrv_aio_cancel_async(acb
);
777 int blk_aio_multiwrite(BlockBackend
*blk
, BlockRequest
*reqs
, int num_reqs
)
781 for (i
= 0; i
< num_reqs
; i
++) {
782 ret
= blk_check_request(blk
, reqs
[i
].sector
, reqs
[i
].nb_sectors
);
788 return bdrv_aio_multiwrite(blk
->bs
, reqs
, num_reqs
);
791 int blk_ioctl(BlockBackend
*blk
, unsigned long int req
, void *buf
)
793 if (!blk_is_available(blk
)) {
797 return bdrv_ioctl(blk
->bs
, req
, buf
);
800 BlockAIOCB
*blk_aio_ioctl(BlockBackend
*blk
, unsigned long int req
, void *buf
,
801 BlockCompletionFunc
*cb
, void *opaque
)
803 if (!blk_is_available(blk
)) {
804 return abort_aio_request(blk
, cb
, opaque
, -ENOMEDIUM
);
807 return bdrv_aio_ioctl(blk
->bs
, req
, buf
, cb
, opaque
);
810 int blk_co_discard(BlockBackend
*blk
, int64_t sector_num
, int nb_sectors
)
812 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
817 return bdrv_co_discard(blk
->bs
, sector_num
, nb_sectors
);
820 int blk_co_flush(BlockBackend
*blk
)
822 if (!blk_is_available(blk
)) {
826 return bdrv_co_flush(blk
->bs
);
829 int blk_flush(BlockBackend
*blk
)
831 if (!blk_is_available(blk
)) {
835 return bdrv_flush(blk
->bs
);
838 int blk_flush_all(void)
840 return bdrv_flush_all();
843 void blk_drain(BlockBackend
*blk
)
850 void blk_drain_all(void)
855 void blk_set_on_error(BlockBackend
*blk
, BlockdevOnError on_read_error
,
856 BlockdevOnError on_write_error
)
858 blk
->on_read_error
= on_read_error
;
859 blk
->on_write_error
= on_write_error
;
862 BlockdevOnError
blk_get_on_error(BlockBackend
*blk
, bool is_read
)
864 return is_read
? blk
->on_read_error
: blk
->on_write_error
;
867 BlockErrorAction
blk_get_error_action(BlockBackend
*blk
, bool is_read
,
870 BlockdevOnError on_err
= blk_get_on_error(blk
, is_read
);
873 case BLOCKDEV_ON_ERROR_ENOSPC
:
874 return (error
== ENOSPC
) ?
875 BLOCK_ERROR_ACTION_STOP
: BLOCK_ERROR_ACTION_REPORT
;
876 case BLOCKDEV_ON_ERROR_STOP
:
877 return BLOCK_ERROR_ACTION_STOP
;
878 case BLOCKDEV_ON_ERROR_REPORT
:
879 return BLOCK_ERROR_ACTION_REPORT
;
880 case BLOCKDEV_ON_ERROR_IGNORE
:
881 return BLOCK_ERROR_ACTION_IGNORE
;
887 static void send_qmp_error_event(BlockBackend
*blk
,
888 BlockErrorAction action
,
889 bool is_read
, int error
)
891 IoOperationType optype
;
893 optype
= is_read
? IO_OPERATION_TYPE_READ
: IO_OPERATION_TYPE_WRITE
;
894 qapi_event_send_block_io_error(blk_name(blk
), optype
, action
,
895 blk_iostatus_is_enabled(blk
),
896 error
== ENOSPC
, strerror(error
),
900 /* This is done by device models because, while the block layer knows
901 * about the error, it does not know whether an operation comes from
902 * the device or the block layer (from a job, for example).
904 void blk_error_action(BlockBackend
*blk
, BlockErrorAction action
,
905 bool is_read
, int error
)
909 if (action
== BLOCK_ERROR_ACTION_STOP
) {
910 /* First set the iostatus, so that "info block" returns an iostatus
911 * that matches the events raised so far (an additional error iostatus
912 * is fine, but not a lost one).
914 blk_iostatus_set_err(blk
, error
);
916 /* Then raise the request to stop the VM and the event.
917 * qemu_system_vmstop_request_prepare has two effects. First,
918 * it ensures that the STOP event always comes after the
919 * BLOCK_IO_ERROR event. Second, it ensures that even if management
920 * can observe the STOP event and do a "cont" before the STOP
921 * event is issued, the VM will not stop. In this case, vm_start()
922 * also ensures that the STOP/RESUME pair of events is emitted.
924 qemu_system_vmstop_request_prepare();
925 send_qmp_error_event(blk
, action
, is_read
, error
);
926 qemu_system_vmstop_request(RUN_STATE_IO_ERROR
);
928 send_qmp_error_event(blk
, action
, is_read
, error
);
932 int blk_is_read_only(BlockBackend
*blk
)
935 return bdrv_is_read_only(blk
->bs
);
937 return blk
->root_state
.read_only
;
941 int blk_is_sg(BlockBackend
*blk
)
947 return bdrv_is_sg(blk
->bs
);
950 int blk_enable_write_cache(BlockBackend
*blk
)
953 return bdrv_enable_write_cache(blk
->bs
);
955 return !!(blk
->root_state
.open_flags
& BDRV_O_CACHE_WB
);
959 void blk_set_enable_write_cache(BlockBackend
*blk
, bool wce
)
962 bdrv_set_enable_write_cache(blk
->bs
, wce
);
965 blk
->root_state
.open_flags
|= BDRV_O_CACHE_WB
;
967 blk
->root_state
.open_flags
&= ~BDRV_O_CACHE_WB
;
972 void blk_invalidate_cache(BlockBackend
*blk
, Error
**errp
)
975 error_setg(errp
, "Device '%s' has no medium", blk
->name
);
979 bdrv_invalidate_cache(blk
->bs
, errp
);
982 bool blk_is_inserted(BlockBackend
*blk
)
984 return blk
->bs
&& bdrv_is_inserted(blk
->bs
);
987 bool blk_is_available(BlockBackend
*blk
)
989 return blk_is_inserted(blk
) && !blk_dev_is_tray_open(blk
);
992 void blk_lock_medium(BlockBackend
*blk
, bool locked
)
995 bdrv_lock_medium(blk
->bs
, locked
);
999 void blk_eject(BlockBackend
*blk
, bool eject_flag
)
1002 bdrv_eject(blk
->bs
, eject_flag
);
1006 int blk_get_flags(BlockBackend
*blk
)
1009 return bdrv_get_flags(blk
->bs
);
1011 return blk
->root_state
.open_flags
;
1015 int blk_get_max_transfer_length(BlockBackend
*blk
)
1018 return blk
->bs
->bl
.max_transfer_length
;
1024 void blk_set_guest_block_size(BlockBackend
*blk
, int align
)
1026 blk
->guest_block_size
= align
;
1029 void *blk_blockalign(BlockBackend
*blk
, size_t size
)
1031 return qemu_blockalign(blk
? blk
->bs
: NULL
, size
);
1034 bool blk_op_is_blocked(BlockBackend
*blk
, BlockOpType op
, Error
**errp
)
1040 return bdrv_op_is_blocked(blk
->bs
, op
, errp
);
1043 void blk_op_unblock(BlockBackend
*blk
, BlockOpType op
, Error
*reason
)
1046 bdrv_op_unblock(blk
->bs
, op
, reason
);
1050 void blk_op_block_all(BlockBackend
*blk
, Error
*reason
)
1053 bdrv_op_block_all(blk
->bs
, reason
);
1057 void blk_op_unblock_all(BlockBackend
*blk
, Error
*reason
)
1060 bdrv_op_unblock_all(blk
->bs
, reason
);
1064 AioContext
*blk_get_aio_context(BlockBackend
*blk
)
1067 return bdrv_get_aio_context(blk
->bs
);
1069 return qemu_get_aio_context();
1073 static AioContext
*blk_aiocb_get_aio_context(BlockAIOCB
*acb
)
1075 BlockBackendAIOCB
*blk_acb
= DO_UPCAST(BlockBackendAIOCB
, common
, acb
);
1076 return blk_get_aio_context(blk_acb
->blk
);
1079 void blk_set_aio_context(BlockBackend
*blk
, AioContext
*new_context
)
1082 bdrv_set_aio_context(blk
->bs
, new_context
);
1086 void blk_add_aio_context_notifier(BlockBackend
*blk
,
1087 void (*attached_aio_context
)(AioContext
*new_context
, void *opaque
),
1088 void (*detach_aio_context
)(void *opaque
), void *opaque
)
1091 bdrv_add_aio_context_notifier(blk
->bs
, attached_aio_context
,
1092 detach_aio_context
, opaque
);
1096 void blk_remove_aio_context_notifier(BlockBackend
*blk
,
1097 void (*attached_aio_context
)(AioContext
*,
1099 void (*detach_aio_context
)(void *),
1103 bdrv_remove_aio_context_notifier(blk
->bs
, attached_aio_context
,
1104 detach_aio_context
, opaque
);
1108 void blk_add_close_notifier(BlockBackend
*blk
, Notifier
*notify
)
1111 bdrv_add_close_notifier(blk
->bs
, notify
);
1115 void blk_io_plug(BlockBackend
*blk
)
1118 bdrv_io_plug(blk
->bs
);
1122 void blk_io_unplug(BlockBackend
*blk
)
1125 bdrv_io_unplug(blk
->bs
);
1129 BlockAcctStats
*blk_get_stats(BlockBackend
*blk
)
1134 void *blk_aio_get(const AIOCBInfo
*aiocb_info
, BlockBackend
*blk
,
1135 BlockCompletionFunc
*cb
, void *opaque
)
1137 return qemu_aio_get(aiocb_info
, blk_bs(blk
), cb
, opaque
);
1140 int coroutine_fn
blk_co_write_zeroes(BlockBackend
*blk
, int64_t sector_num
,
1141 int nb_sectors
, BdrvRequestFlags flags
)
1143 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
1148 return bdrv_co_write_zeroes(blk
->bs
, sector_num
, nb_sectors
, flags
);
1151 int blk_write_compressed(BlockBackend
*blk
, int64_t sector_num
,
1152 const uint8_t *buf
, int nb_sectors
)
1154 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
1159 return bdrv_write_compressed(blk
->bs
, sector_num
, buf
, nb_sectors
);
1162 int blk_truncate(BlockBackend
*blk
, int64_t offset
)
1164 if (!blk_is_available(blk
)) {
1168 return bdrv_truncate(blk
->bs
, offset
);
1171 int blk_discard(BlockBackend
*blk
, int64_t sector_num
, int nb_sectors
)
1173 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
1178 return bdrv_discard(blk
->bs
, sector_num
, nb_sectors
);
1181 int blk_save_vmstate(BlockBackend
*blk
, const uint8_t *buf
,
1182 int64_t pos
, int size
)
1184 if (!blk_is_available(blk
)) {
1188 return bdrv_save_vmstate(blk
->bs
, buf
, pos
, size
);
1191 int blk_load_vmstate(BlockBackend
*blk
, uint8_t *buf
, int64_t pos
, int size
)
1193 if (!blk_is_available(blk
)) {
1197 return bdrv_load_vmstate(blk
->bs
, buf
, pos
, size
);
1200 int blk_probe_blocksizes(BlockBackend
*blk
, BlockSizes
*bsz
)
1202 if (!blk_is_available(blk
)) {
1206 return bdrv_probe_blocksizes(blk
->bs
, bsz
);
1209 int blk_probe_geometry(BlockBackend
*blk
, HDGeometry
*geo
)
1211 if (!blk_is_available(blk
)) {
1215 return bdrv_probe_geometry(blk
->bs
, geo
);
1219 * Updates the BlockBackendRootState object with data from the currently
1220 * attached BlockDriverState.
1222 void blk_update_root_state(BlockBackend
*blk
)
1226 blk
->root_state
.open_flags
= blk
->bs
->open_flags
;
1227 blk
->root_state
.read_only
= blk
->bs
->read_only
;
1228 blk
->root_state
.detect_zeroes
= blk
->bs
->detect_zeroes
;
1230 if (blk
->root_state
.throttle_group
) {
1231 g_free(blk
->root_state
.throttle_group
);
1232 throttle_group_unref(blk
->root_state
.throttle_state
);
1234 if (blk
->bs
->throttle_state
) {
1235 const char *name
= throttle_group_get_name(blk
->bs
);
1236 blk
->root_state
.throttle_group
= g_strdup(name
);
1237 blk
->root_state
.throttle_state
= throttle_group_incref(name
);
1239 blk
->root_state
.throttle_group
= NULL
;
1240 blk
->root_state
.throttle_state
= NULL
;
1245 * Applies the information in the root state to the given BlockDriverState. This
1246 * does not include the flags which have to be specified for bdrv_open(), use
1247 * blk_get_open_flags_from_root_state() to inquire them.
1249 void blk_apply_root_state(BlockBackend
*blk
, BlockDriverState
*bs
)
1251 bs
->detect_zeroes
= blk
->root_state
.detect_zeroes
;
1252 if (blk
->root_state
.throttle_group
) {
1253 bdrv_io_limits_enable(bs
, blk
->root_state
.throttle_group
);
1258 * Returns the flags to be used for bdrv_open() of a BlockDriverState which is
1259 * supposed to inherit the root state.
1261 int blk_get_open_flags_from_root_state(BlockBackend
*blk
)
1265 bs_flags
= blk
->root_state
.read_only
? 0 : BDRV_O_RDWR
;
1266 bs_flags
|= blk
->root_state
.open_flags
& ~BDRV_O_RDWR
;
1271 BlockBackendRootState
*blk_get_root_state(BlockBackend
*blk
)
1273 return &blk
->root_state
;