4 * Copyright (C) 2014 Red Hat, Inc.
7 * Markus Armbruster <armbru@redhat.com>,
9 * This work is licensed under the terms of the GNU LGPL, version 2.1
10 * or later. See the COPYING.LIB file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "sysemu/block-backend.h"
15 #include "block/block_int.h"
16 #include "block/blockjob.h"
17 #include "block/throttle-groups.h"
18 #include "sysemu/blockdev.h"
19 #include "sysemu/sysemu.h"
20 #include "qapi-event.h"
22 /* Number of coroutines to reserve per attached device model */
23 #define COROUTINE_POOL_RESERVATION 64
25 static AioContext
*blk_aiocb_get_aio_context(BlockAIOCB
*acb
);
31 DriveInfo
*legacy_dinfo
; /* null unless created by drive_new() */
32 QTAILQ_ENTRY(BlockBackend
) monitor_link
; /* for monitor_block_backends */
34 void *dev
; /* attached device model, if any */
35 /* TODO change to DeviceState when all users are qdevified */
36 const BlockDevOps
*dev_ops
;
39 /* the block size for which the guest device expects atomicity */
42 /* If the BDS tree is removed, some of its options are stored here (which
43 * can be used to restore those options in the new BDS on insert) */
44 BlockBackendRootState root_state
;
46 /* I/O stats (display with "info blockstats"). */
49 BlockdevOnError on_read_error
, on_write_error
;
50 bool iostatus_enabled
;
51 BlockDeviceIoStatus iostatus
;
53 bool allow_write_beyond_eof
;
55 NotifierList remove_bs_notifiers
, insert_bs_notifiers
;
58 typedef struct BlockBackendAIOCB
{
65 static const AIOCBInfo block_backend_aiocb_info
= {
66 .get_aio_context
= blk_aiocb_get_aio_context
,
67 .aiocb_size
= sizeof(BlockBackendAIOCB
),
70 static void drive_info_del(DriveInfo
*dinfo
);
72 /* All BlockBackends referenced by the monitor and which are iterated through by
74 static QTAILQ_HEAD(, BlockBackend
) monitor_block_backends
=
75 QTAILQ_HEAD_INITIALIZER(monitor_block_backends
);
78 * Create a new BlockBackend with @name, with a reference count of one.
79 * @name must not be null or empty.
80 * Fail if a BlockBackend with this name already exists.
81 * Store an error through @errp on failure, unless it's null.
82 * Return the new BlockBackend on success, null on failure.
84 BlockBackend
*blk_new(const char *name
, Error
**errp
)
88 assert(name
&& name
[0]);
89 if (!id_wellformed(name
)) {
90 error_setg(errp
, "Invalid device name");
93 if (blk_by_name(name
)) {
94 error_setg(errp
, "Device with id '%s' already exists", name
);
97 if (bdrv_find_node(name
)) {
99 "Device name '%s' conflicts with an existing node name",
104 blk
= g_new0(BlockBackend
, 1);
105 blk
->name
= g_strdup(name
);
107 notifier_list_init(&blk
->remove_bs_notifiers
);
108 notifier_list_init(&blk
->insert_bs_notifiers
);
109 QTAILQ_INSERT_TAIL(&monitor_block_backends
, blk
, monitor_link
);
114 * Create a new BlockBackend with a new BlockDriverState attached.
115 * Otherwise just like blk_new(), which see.
117 BlockBackend
*blk_new_with_bs(const char *name
, Error
**errp
)
120 BlockDriverState
*bs
;
122 blk
= blk_new(name
, errp
);
127 bs
= bdrv_new_root();
134 * Calls blk_new_with_bs() and then calls bdrv_open() on the BlockDriverState.
136 * Just as with bdrv_open(), after having called this function the reference to
137 * @options belongs to the block layer (even on failure).
139 * TODO: Remove @filename and @flags; it should be possible to specify a whole
140 * BDS tree just by specifying the @options QDict (or @reference,
141 * alternatively). At the time of adding this function, this is not possible,
142 * though, so callers of this function have to be able to specify @filename and
145 BlockBackend
*blk_new_open(const char *name
, const char *filename
,
146 const char *reference
, QDict
*options
, int flags
,
152 blk
= blk_new_with_bs(name
, errp
);
158 ret
= bdrv_open(&blk
->bs
, filename
, reference
, options
, flags
, errp
);
167 static void blk_delete(BlockBackend
*blk
)
169 assert(!blk
->refcnt
);
174 assert(QLIST_EMPTY(&blk
->remove_bs_notifiers
.notifiers
));
175 assert(QLIST_EMPTY(&blk
->insert_bs_notifiers
.notifiers
));
176 if (blk
->root_state
.throttle_state
) {
177 g_free(blk
->root_state
.throttle_group
);
178 throttle_group_unref(blk
->root_state
.throttle_state
);
180 /* Avoid double-remove after blk_hide_on_behalf_of_hmp_drive_del() */
182 QTAILQ_REMOVE(&monitor_block_backends
, blk
, monitor_link
);
185 drive_info_del(blk
->legacy_dinfo
);
186 block_acct_cleanup(&blk
->stats
);
190 static void drive_info_del(DriveInfo
*dinfo
)
195 qemu_opts_del(dinfo
->opts
);
196 g_free(dinfo
->serial
);
200 int blk_get_refcnt(BlockBackend
*blk
)
202 return blk
? blk
->refcnt
: 0;
206 * Increment @blk's reference count.
207 * @blk must not be null.
209 void blk_ref(BlockBackend
*blk
)
215 * Decrement @blk's reference count.
216 * If this drops it to zero, destroy @blk.
217 * For convenience, do nothing if @blk is null.
219 void blk_unref(BlockBackend
*blk
)
222 assert(blk
->refcnt
> 0);
223 if (!--blk
->refcnt
) {
229 void blk_remove_all_bs(void)
231 BlockBackend
*blk
= NULL
;
233 while ((blk
= blk_next(blk
)) != NULL
) {
234 AioContext
*ctx
= blk_get_aio_context(blk
);
236 aio_context_acquire(ctx
);
240 aio_context_release(ctx
);
245 * Return the monitor-owned BlockBackend after @blk.
246 * If @blk is null, return the first one.
247 * Else, return @blk's next sibling, which may be null.
249 * To iterate over all BlockBackends, do
250 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
254 BlockBackend
*blk_next(BlockBackend
*blk
)
256 return blk
? QTAILQ_NEXT(blk
, monitor_link
)
257 : QTAILQ_FIRST(&monitor_block_backends
);
261 * Return @blk's name, a non-null string.
262 * Wart: the name is empty iff @blk has been hidden with
263 * blk_hide_on_behalf_of_hmp_drive_del().
265 const char *blk_name(BlockBackend
*blk
)
271 * Return the BlockBackend with name @name if it exists, else null.
272 * @name must not be null.
274 BlockBackend
*blk_by_name(const char *name
)
276 BlockBackend
*blk
= NULL
;
279 while ((blk
= blk_next(blk
)) != NULL
) {
280 if (!strcmp(name
, blk
->name
)) {
288 * Return the BlockDriverState attached to @blk if any, else null.
290 BlockDriverState
*blk_bs(BlockBackend
*blk
)
296 * Changes the BlockDriverState attached to @blk
298 void blk_set_bs(BlockBackend
*blk
, BlockDriverState
*bs
)
306 assert(bs
->blk
== NULL
);
313 * Return @blk's DriveInfo if any, else null.
315 DriveInfo
*blk_legacy_dinfo(BlockBackend
*blk
)
317 return blk
->legacy_dinfo
;
321 * Set @blk's DriveInfo to @dinfo, and return it.
322 * @blk must not have a DriveInfo set already.
323 * No other BlockBackend may have the same DriveInfo set.
325 DriveInfo
*blk_set_legacy_dinfo(BlockBackend
*blk
, DriveInfo
*dinfo
)
327 assert(!blk
->legacy_dinfo
);
328 return blk
->legacy_dinfo
= dinfo
;
332 * Return the BlockBackend with DriveInfo @dinfo.
335 BlockBackend
*blk_by_legacy_dinfo(DriveInfo
*dinfo
)
337 BlockBackend
*blk
= NULL
;
339 while ((blk
= blk_next(blk
)) != NULL
) {
340 if (blk
->legacy_dinfo
== dinfo
) {
349 * @blk must not have been hidden already.
350 * Make attached BlockDriverState, if any, anonymous.
351 * Once hidden, @blk is invisible to all functions that don't receive
352 * it as argument. For example, blk_by_name() won't return it.
353 * Strictly for use by do_drive_del().
354 * TODO get rid of it!
356 void blk_hide_on_behalf_of_hmp_drive_del(BlockBackend
*blk
)
358 QTAILQ_REMOVE(&monitor_block_backends
, blk
, monitor_link
);
361 bdrv_make_anon(blk
->bs
);
366 * Disassociates the currently associated BlockDriverState from @blk.
368 void blk_remove_bs(BlockBackend
*blk
)
370 assert(blk
->bs
->blk
== blk
);
372 notifier_list_notify(&blk
->remove_bs_notifiers
, blk
);
374 blk_update_root_state(blk
);
382 * Associates a new BlockDriverState with @blk.
384 void blk_insert_bs(BlockBackend
*blk
, BlockDriverState
*bs
)
386 assert(!blk
->bs
&& !bs
->blk
);
391 notifier_list_notify(&blk
->insert_bs_notifiers
, blk
);
395 * Attach device model @dev to @blk.
396 * Return 0 on success, -EBUSY when a device model is attached already.
398 int blk_attach_dev(BlockBackend
*blk
, void *dev
)
399 /* TODO change to DeviceState *dev when all users are qdevified */
406 blk_iostatus_reset(blk
);
411 * Attach device model @dev to @blk.
412 * @blk must not have a device model attached already.
413 * TODO qdevified devices don't use this, remove when devices are qdevified
415 void blk_attach_dev_nofail(BlockBackend
*blk
, void *dev
)
417 if (blk_attach_dev(blk
, dev
) < 0) {
423 * Detach device model @dev from @blk.
424 * @dev must be currently attached to @blk.
426 void blk_detach_dev(BlockBackend
*blk
, void *dev
)
427 /* TODO change to DeviceState *dev when all users are qdevified */
429 assert(blk
->dev
== dev
);
432 blk
->dev_opaque
= NULL
;
433 blk
->guest_block_size
= 512;
438 * Return the device model attached to @blk if any, else null.
440 void *blk_get_attached_dev(BlockBackend
*blk
)
441 /* TODO change to return DeviceState * when all users are qdevified */
447 * Set @blk's device model callbacks to @ops.
448 * @opaque is the opaque argument to pass to the callbacks.
449 * This is for use by device models.
451 void blk_set_dev_ops(BlockBackend
*blk
, const BlockDevOps
*ops
,
455 blk
->dev_opaque
= opaque
;
459 * Notify @blk's attached device model of media change.
460 * If @load is true, notify of media load.
461 * Else, notify of media eject.
462 * Also send DEVICE_TRAY_MOVED events as appropriate.
464 void blk_dev_change_media_cb(BlockBackend
*blk
, bool load
)
466 if (blk
->dev_ops
&& blk
->dev_ops
->change_media_cb
) {
467 bool tray_was_open
, tray_is_open
;
469 tray_was_open
= blk_dev_is_tray_open(blk
);
470 blk
->dev_ops
->change_media_cb(blk
->dev_opaque
, load
);
471 tray_is_open
= blk_dev_is_tray_open(blk
);
473 if (tray_was_open
!= tray_is_open
) {
474 qapi_event_send_device_tray_moved(blk_name(blk
), tray_is_open
,
481 * Does @blk's attached device model have removable media?
482 * %true if no device model is attached.
484 bool blk_dev_has_removable_media(BlockBackend
*blk
)
486 return !blk
->dev
|| (blk
->dev_ops
&& blk
->dev_ops
->change_media_cb
);
490 * Does @blk's attached device model have a tray?
492 bool blk_dev_has_tray(BlockBackend
*blk
)
494 return blk
->dev_ops
&& blk
->dev_ops
->is_tray_open
;
498 * Notify @blk's attached device model of a media eject request.
499 * If @force is true, the medium is about to be yanked out forcefully.
501 void blk_dev_eject_request(BlockBackend
*blk
, bool force
)
503 if (blk
->dev_ops
&& blk
->dev_ops
->eject_request_cb
) {
504 blk
->dev_ops
->eject_request_cb(blk
->dev_opaque
, force
);
509 * Does @blk's attached device model have a tray, and is it open?
511 bool blk_dev_is_tray_open(BlockBackend
*blk
)
513 if (blk_dev_has_tray(blk
)) {
514 return blk
->dev_ops
->is_tray_open(blk
->dev_opaque
);
520 * Does @blk's attached device model have the medium locked?
521 * %false if the device model has no such lock.
523 bool blk_dev_is_medium_locked(BlockBackend
*blk
)
525 if (blk
->dev_ops
&& blk
->dev_ops
->is_medium_locked
) {
526 return blk
->dev_ops
->is_medium_locked(blk
->dev_opaque
);
532 * Notify @blk's attached device model of a backend size change.
534 void blk_dev_resize_cb(BlockBackend
*blk
)
536 if (blk
->dev_ops
&& blk
->dev_ops
->resize_cb
) {
537 blk
->dev_ops
->resize_cb(blk
->dev_opaque
);
541 void blk_iostatus_enable(BlockBackend
*blk
)
543 blk
->iostatus_enabled
= true;
544 blk
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
547 /* The I/O status is only enabled if the drive explicitly
548 * enables it _and_ the VM is configured to stop on errors */
549 bool blk_iostatus_is_enabled(const BlockBackend
*blk
)
551 return (blk
->iostatus_enabled
&&
552 (blk
->on_write_error
== BLOCKDEV_ON_ERROR_ENOSPC
||
553 blk
->on_write_error
== BLOCKDEV_ON_ERROR_STOP
||
554 blk
->on_read_error
== BLOCKDEV_ON_ERROR_STOP
));
557 BlockDeviceIoStatus
blk_iostatus(const BlockBackend
*blk
)
559 return blk
->iostatus
;
562 void blk_iostatus_disable(BlockBackend
*blk
)
564 blk
->iostatus_enabled
= false;
567 void blk_iostatus_reset(BlockBackend
*blk
)
569 if (blk_iostatus_is_enabled(blk
)) {
570 blk
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
571 if (blk
->bs
&& blk
->bs
->job
) {
572 block_job_iostatus_reset(blk
->bs
->job
);
577 void blk_iostatus_set_err(BlockBackend
*blk
, int error
)
579 assert(blk_iostatus_is_enabled(blk
));
580 if (blk
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
581 blk
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
582 BLOCK_DEVICE_IO_STATUS_FAILED
;
586 void blk_set_allow_write_beyond_eof(BlockBackend
*blk
, bool allow
)
588 blk
->allow_write_beyond_eof
= allow
;
591 static int blk_check_byte_request(BlockBackend
*blk
, int64_t offset
,
596 if (size
> INT_MAX
) {
600 if (!blk_is_available(blk
)) {
608 if (!blk
->allow_write_beyond_eof
) {
609 len
= blk_getlength(blk
);
614 if (offset
> len
|| len
- offset
< size
) {
622 static int blk_check_request(BlockBackend
*blk
, int64_t sector_num
,
625 if (sector_num
< 0 || sector_num
> INT64_MAX
/ BDRV_SECTOR_SIZE
) {
629 if (nb_sectors
< 0 || nb_sectors
> INT_MAX
/ BDRV_SECTOR_SIZE
) {
633 return blk_check_byte_request(blk
, sector_num
* BDRV_SECTOR_SIZE
,
634 nb_sectors
* BDRV_SECTOR_SIZE
);
637 int blk_read(BlockBackend
*blk
, int64_t sector_num
, uint8_t *buf
,
640 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
645 return bdrv_read(blk
->bs
, sector_num
, buf
, nb_sectors
);
648 int blk_read_unthrottled(BlockBackend
*blk
, int64_t sector_num
, uint8_t *buf
,
651 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
656 return bdrv_read_unthrottled(blk
->bs
, sector_num
, buf
, nb_sectors
);
659 int blk_write(BlockBackend
*blk
, int64_t sector_num
, const uint8_t *buf
,
662 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
667 return bdrv_write(blk
->bs
, sector_num
, buf
, nb_sectors
);
670 int blk_write_zeroes(BlockBackend
*blk
, int64_t sector_num
,
671 int nb_sectors
, BdrvRequestFlags flags
)
673 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
678 return bdrv_write_zeroes(blk
->bs
, sector_num
, nb_sectors
, flags
);
681 static void error_callback_bh(void *opaque
)
683 struct BlockBackendAIOCB
*acb
= opaque
;
684 qemu_bh_delete(acb
->bh
);
685 acb
->common
.cb(acb
->common
.opaque
, acb
->ret
);
689 BlockAIOCB
*blk_abort_aio_request(BlockBackend
*blk
,
690 BlockCompletionFunc
*cb
,
691 void *opaque
, int ret
)
693 struct BlockBackendAIOCB
*acb
;
696 acb
= blk_aio_get(&block_backend_aiocb_info
, blk
, cb
, opaque
);
700 bh
= aio_bh_new(blk_get_aio_context(blk
), error_callback_bh
, acb
);
702 qemu_bh_schedule(bh
);
707 BlockAIOCB
*blk_aio_write_zeroes(BlockBackend
*blk
, int64_t sector_num
,
708 int nb_sectors
, BdrvRequestFlags flags
,
709 BlockCompletionFunc
*cb
, void *opaque
)
711 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
713 return blk_abort_aio_request(blk
, cb
, opaque
, ret
);
716 return bdrv_aio_write_zeroes(blk
->bs
, sector_num
, nb_sectors
, flags
,
720 int blk_pread(BlockBackend
*blk
, int64_t offset
, void *buf
, int count
)
722 int ret
= blk_check_byte_request(blk
, offset
, count
);
727 return bdrv_pread(blk
->bs
, offset
, buf
, count
);
730 int blk_pwrite(BlockBackend
*blk
, int64_t offset
, const void *buf
, int count
)
732 int ret
= blk_check_byte_request(blk
, offset
, count
);
737 return bdrv_pwrite(blk
->bs
, offset
, buf
, count
);
740 int64_t blk_getlength(BlockBackend
*blk
)
742 if (!blk_is_available(blk
)) {
746 return bdrv_getlength(blk
->bs
);
749 void blk_get_geometry(BlockBackend
*blk
, uint64_t *nb_sectors_ptr
)
754 bdrv_get_geometry(blk
->bs
, nb_sectors_ptr
);
758 int64_t blk_nb_sectors(BlockBackend
*blk
)
760 if (!blk_is_available(blk
)) {
764 return bdrv_nb_sectors(blk
->bs
);
767 BlockAIOCB
*blk_aio_readv(BlockBackend
*blk
, int64_t sector_num
,
768 QEMUIOVector
*iov
, int nb_sectors
,
769 BlockCompletionFunc
*cb
, void *opaque
)
771 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
773 return blk_abort_aio_request(blk
, cb
, opaque
, ret
);
776 return bdrv_aio_readv(blk
->bs
, sector_num
, iov
, nb_sectors
, cb
, opaque
);
779 BlockAIOCB
*blk_aio_writev(BlockBackend
*blk
, int64_t sector_num
,
780 QEMUIOVector
*iov
, int nb_sectors
,
781 BlockCompletionFunc
*cb
, void *opaque
)
783 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
785 return blk_abort_aio_request(blk
, cb
, opaque
, ret
);
788 return bdrv_aio_writev(blk
->bs
, sector_num
, iov
, nb_sectors
, cb
, opaque
);
791 BlockAIOCB
*blk_aio_flush(BlockBackend
*blk
,
792 BlockCompletionFunc
*cb
, void *opaque
)
794 if (!blk_is_available(blk
)) {
795 return blk_abort_aio_request(blk
, cb
, opaque
, -ENOMEDIUM
);
798 return bdrv_aio_flush(blk
->bs
, cb
, opaque
);
801 BlockAIOCB
*blk_aio_discard(BlockBackend
*blk
,
802 int64_t sector_num
, int nb_sectors
,
803 BlockCompletionFunc
*cb
, void *opaque
)
805 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
807 return blk_abort_aio_request(blk
, cb
, opaque
, ret
);
810 return bdrv_aio_discard(blk
->bs
, sector_num
, nb_sectors
, cb
, opaque
);
813 void blk_aio_cancel(BlockAIOCB
*acb
)
815 bdrv_aio_cancel(acb
);
818 void blk_aio_cancel_async(BlockAIOCB
*acb
)
820 bdrv_aio_cancel_async(acb
);
823 int blk_aio_multiwrite(BlockBackend
*blk
, BlockRequest
*reqs
, int num_reqs
)
827 for (i
= 0; i
< num_reqs
; i
++) {
828 ret
= blk_check_request(blk
, reqs
[i
].sector
, reqs
[i
].nb_sectors
);
834 return bdrv_aio_multiwrite(blk
->bs
, reqs
, num_reqs
);
837 int blk_ioctl(BlockBackend
*blk
, unsigned long int req
, void *buf
)
839 if (!blk_is_available(blk
)) {
843 return bdrv_ioctl(blk
->bs
, req
, buf
);
846 BlockAIOCB
*blk_aio_ioctl(BlockBackend
*blk
, unsigned long int req
, void *buf
,
847 BlockCompletionFunc
*cb
, void *opaque
)
849 if (!blk_is_available(blk
)) {
850 return blk_abort_aio_request(blk
, cb
, opaque
, -ENOMEDIUM
);
853 return bdrv_aio_ioctl(blk
->bs
, req
, buf
, cb
, opaque
);
856 int blk_co_discard(BlockBackend
*blk
, int64_t sector_num
, int nb_sectors
)
858 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
863 return bdrv_co_discard(blk
->bs
, sector_num
, nb_sectors
);
866 int blk_co_flush(BlockBackend
*blk
)
868 if (!blk_is_available(blk
)) {
872 return bdrv_co_flush(blk
->bs
);
875 int blk_flush(BlockBackend
*blk
)
877 if (!blk_is_available(blk
)) {
881 return bdrv_flush(blk
->bs
);
884 int blk_flush_all(void)
886 return bdrv_flush_all();
889 void blk_drain(BlockBackend
*blk
)
896 void blk_drain_all(void)
901 void blk_set_on_error(BlockBackend
*blk
, BlockdevOnError on_read_error
,
902 BlockdevOnError on_write_error
)
904 blk
->on_read_error
= on_read_error
;
905 blk
->on_write_error
= on_write_error
;
908 BlockdevOnError
blk_get_on_error(BlockBackend
*blk
, bool is_read
)
910 return is_read
? blk
->on_read_error
: blk
->on_write_error
;
913 BlockErrorAction
blk_get_error_action(BlockBackend
*blk
, bool is_read
,
916 BlockdevOnError on_err
= blk_get_on_error(blk
, is_read
);
919 case BLOCKDEV_ON_ERROR_ENOSPC
:
920 return (error
== ENOSPC
) ?
921 BLOCK_ERROR_ACTION_STOP
: BLOCK_ERROR_ACTION_REPORT
;
922 case BLOCKDEV_ON_ERROR_STOP
:
923 return BLOCK_ERROR_ACTION_STOP
;
924 case BLOCKDEV_ON_ERROR_REPORT
:
925 return BLOCK_ERROR_ACTION_REPORT
;
926 case BLOCKDEV_ON_ERROR_IGNORE
:
927 return BLOCK_ERROR_ACTION_IGNORE
;
933 static void send_qmp_error_event(BlockBackend
*blk
,
934 BlockErrorAction action
,
935 bool is_read
, int error
)
937 IoOperationType optype
;
939 optype
= is_read
? IO_OPERATION_TYPE_READ
: IO_OPERATION_TYPE_WRITE
;
940 qapi_event_send_block_io_error(blk_name(blk
), optype
, action
,
941 blk_iostatus_is_enabled(blk
),
942 error
== ENOSPC
, strerror(error
),
946 /* This is done by device models because, while the block layer knows
947 * about the error, it does not know whether an operation comes from
948 * the device or the block layer (from a job, for example).
950 void blk_error_action(BlockBackend
*blk
, BlockErrorAction action
,
951 bool is_read
, int error
)
955 if (action
== BLOCK_ERROR_ACTION_STOP
) {
956 /* First set the iostatus, so that "info block" returns an iostatus
957 * that matches the events raised so far (an additional error iostatus
958 * is fine, but not a lost one).
960 blk_iostatus_set_err(blk
, error
);
962 /* Then raise the request to stop the VM and the event.
963 * qemu_system_vmstop_request_prepare has two effects. First,
964 * it ensures that the STOP event always comes after the
965 * BLOCK_IO_ERROR event. Second, it ensures that even if management
966 * can observe the STOP event and do a "cont" before the STOP
967 * event is issued, the VM will not stop. In this case, vm_start()
968 * also ensures that the STOP/RESUME pair of events is emitted.
970 qemu_system_vmstop_request_prepare();
971 send_qmp_error_event(blk
, action
, is_read
, error
);
972 qemu_system_vmstop_request(RUN_STATE_IO_ERROR
);
974 send_qmp_error_event(blk
, action
, is_read
, error
);
978 int blk_is_read_only(BlockBackend
*blk
)
981 return bdrv_is_read_only(blk
->bs
);
983 return blk
->root_state
.read_only
;
987 int blk_is_sg(BlockBackend
*blk
)
993 return bdrv_is_sg(blk
->bs
);
996 int blk_enable_write_cache(BlockBackend
*blk
)
999 return bdrv_enable_write_cache(blk
->bs
);
1001 return !!(blk
->root_state
.open_flags
& BDRV_O_CACHE_WB
);
1005 void blk_set_enable_write_cache(BlockBackend
*blk
, bool wce
)
1008 bdrv_set_enable_write_cache(blk
->bs
, wce
);
1011 blk
->root_state
.open_flags
|= BDRV_O_CACHE_WB
;
1013 blk
->root_state
.open_flags
&= ~BDRV_O_CACHE_WB
;
1018 void blk_invalidate_cache(BlockBackend
*blk
, Error
**errp
)
1021 error_setg(errp
, "Device '%s' has no medium", blk
->name
);
1025 bdrv_invalidate_cache(blk
->bs
, errp
);
1028 bool blk_is_inserted(BlockBackend
*blk
)
1030 return blk
->bs
&& bdrv_is_inserted(blk
->bs
);
1033 bool blk_is_available(BlockBackend
*blk
)
1035 return blk_is_inserted(blk
) && !blk_dev_is_tray_open(blk
);
1038 void blk_lock_medium(BlockBackend
*blk
, bool locked
)
1041 bdrv_lock_medium(blk
->bs
, locked
);
1045 void blk_eject(BlockBackend
*blk
, bool eject_flag
)
1048 bdrv_eject(blk
->bs
, eject_flag
);
1052 int blk_get_flags(BlockBackend
*blk
)
1055 return bdrv_get_flags(blk
->bs
);
1057 return blk
->root_state
.open_flags
;
1061 int blk_get_max_transfer_length(BlockBackend
*blk
)
1064 return blk
->bs
->bl
.max_transfer_length
;
1070 int blk_get_max_iov(BlockBackend
*blk
)
1072 return blk
->bs
->bl
.max_iov
;
1075 void blk_set_guest_block_size(BlockBackend
*blk
, int align
)
1077 blk
->guest_block_size
= align
;
1080 void *blk_try_blockalign(BlockBackend
*blk
, size_t size
)
1082 return qemu_try_blockalign(blk
? blk
->bs
: NULL
, size
);
1085 void *blk_blockalign(BlockBackend
*blk
, size_t size
)
1087 return qemu_blockalign(blk
? blk
->bs
: NULL
, size
);
1090 bool blk_op_is_blocked(BlockBackend
*blk
, BlockOpType op
, Error
**errp
)
1096 return bdrv_op_is_blocked(blk
->bs
, op
, errp
);
1099 void blk_op_unblock(BlockBackend
*blk
, BlockOpType op
, Error
*reason
)
1102 bdrv_op_unblock(blk
->bs
, op
, reason
);
1106 void blk_op_block_all(BlockBackend
*blk
, Error
*reason
)
1109 bdrv_op_block_all(blk
->bs
, reason
);
1113 void blk_op_unblock_all(BlockBackend
*blk
, Error
*reason
)
1116 bdrv_op_unblock_all(blk
->bs
, reason
);
1120 AioContext
*blk_get_aio_context(BlockBackend
*blk
)
1123 return bdrv_get_aio_context(blk
->bs
);
1125 return qemu_get_aio_context();
1129 static AioContext
*blk_aiocb_get_aio_context(BlockAIOCB
*acb
)
1131 BlockBackendAIOCB
*blk_acb
= DO_UPCAST(BlockBackendAIOCB
, common
, acb
);
1132 return blk_get_aio_context(blk_acb
->blk
);
1135 void blk_set_aio_context(BlockBackend
*blk
, AioContext
*new_context
)
1138 bdrv_set_aio_context(blk
->bs
, new_context
);
1142 void blk_add_aio_context_notifier(BlockBackend
*blk
,
1143 void (*attached_aio_context
)(AioContext
*new_context
, void *opaque
),
1144 void (*detach_aio_context
)(void *opaque
), void *opaque
)
1147 bdrv_add_aio_context_notifier(blk
->bs
, attached_aio_context
,
1148 detach_aio_context
, opaque
);
1152 void blk_remove_aio_context_notifier(BlockBackend
*blk
,
1153 void (*attached_aio_context
)(AioContext
*,
1155 void (*detach_aio_context
)(void *),
1159 bdrv_remove_aio_context_notifier(blk
->bs
, attached_aio_context
,
1160 detach_aio_context
, opaque
);
1164 void blk_add_remove_bs_notifier(BlockBackend
*blk
, Notifier
*notify
)
1166 notifier_list_add(&blk
->remove_bs_notifiers
, notify
);
1169 void blk_add_insert_bs_notifier(BlockBackend
*blk
, Notifier
*notify
)
1171 notifier_list_add(&blk
->insert_bs_notifiers
, notify
);
1174 void blk_io_plug(BlockBackend
*blk
)
1177 bdrv_io_plug(blk
->bs
);
1181 void blk_io_unplug(BlockBackend
*blk
)
1184 bdrv_io_unplug(blk
->bs
);
1188 BlockAcctStats
*blk_get_stats(BlockBackend
*blk
)
1193 void *blk_aio_get(const AIOCBInfo
*aiocb_info
, BlockBackend
*blk
,
1194 BlockCompletionFunc
*cb
, void *opaque
)
1196 return qemu_aio_get(aiocb_info
, blk_bs(blk
), cb
, opaque
);
1199 int coroutine_fn
blk_co_write_zeroes(BlockBackend
*blk
, int64_t sector_num
,
1200 int nb_sectors
, BdrvRequestFlags flags
)
1202 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
1207 return bdrv_co_write_zeroes(blk
->bs
, sector_num
, nb_sectors
, flags
);
1210 int blk_write_compressed(BlockBackend
*blk
, int64_t sector_num
,
1211 const uint8_t *buf
, int nb_sectors
)
1213 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
1218 return bdrv_write_compressed(blk
->bs
, sector_num
, buf
, nb_sectors
);
1221 int blk_truncate(BlockBackend
*blk
, int64_t offset
)
1223 if (!blk_is_available(blk
)) {
1227 return bdrv_truncate(blk
->bs
, offset
);
1230 int blk_discard(BlockBackend
*blk
, int64_t sector_num
, int nb_sectors
)
1232 int ret
= blk_check_request(blk
, sector_num
, nb_sectors
);
1237 return bdrv_discard(blk
->bs
, sector_num
, nb_sectors
);
1240 int blk_save_vmstate(BlockBackend
*blk
, const uint8_t *buf
,
1241 int64_t pos
, int size
)
1243 if (!blk_is_available(blk
)) {
1247 return bdrv_save_vmstate(blk
->bs
, buf
, pos
, size
);
1250 int blk_load_vmstate(BlockBackend
*blk
, uint8_t *buf
, int64_t pos
, int size
)
1252 if (!blk_is_available(blk
)) {
1256 return bdrv_load_vmstate(blk
->bs
, buf
, pos
, size
);
1259 int blk_probe_blocksizes(BlockBackend
*blk
, BlockSizes
*bsz
)
1261 if (!blk_is_available(blk
)) {
1265 return bdrv_probe_blocksizes(blk
->bs
, bsz
);
1268 int blk_probe_geometry(BlockBackend
*blk
, HDGeometry
*geo
)
1270 if (!blk_is_available(blk
)) {
1274 return bdrv_probe_geometry(blk
->bs
, geo
);
1278 * Updates the BlockBackendRootState object with data from the currently
1279 * attached BlockDriverState.
1281 void blk_update_root_state(BlockBackend
*blk
)
1285 blk
->root_state
.open_flags
= blk
->bs
->open_flags
;
1286 blk
->root_state
.read_only
= blk
->bs
->read_only
;
1287 blk
->root_state
.detect_zeroes
= blk
->bs
->detect_zeroes
;
1289 if (blk
->root_state
.throttle_group
) {
1290 g_free(blk
->root_state
.throttle_group
);
1291 throttle_group_unref(blk
->root_state
.throttle_state
);
1293 if (blk
->bs
->throttle_state
) {
1294 const char *name
= throttle_group_get_name(blk
->bs
);
1295 blk
->root_state
.throttle_group
= g_strdup(name
);
1296 blk
->root_state
.throttle_state
= throttle_group_incref(name
);
1298 blk
->root_state
.throttle_group
= NULL
;
1299 blk
->root_state
.throttle_state
= NULL
;
1304 * Applies the information in the root state to the given BlockDriverState. This
1305 * does not include the flags which have to be specified for bdrv_open(), use
1306 * blk_get_open_flags_from_root_state() to inquire them.
1308 void blk_apply_root_state(BlockBackend
*blk
, BlockDriverState
*bs
)
1310 bs
->detect_zeroes
= blk
->root_state
.detect_zeroes
;
1311 if (blk
->root_state
.throttle_group
) {
1312 bdrv_io_limits_enable(bs
, blk
->root_state
.throttle_group
);
1317 * Returns the flags to be used for bdrv_open() of a BlockDriverState which is
1318 * supposed to inherit the root state.
1320 int blk_get_open_flags_from_root_state(BlockBackend
*blk
)
1324 bs_flags
= blk
->root_state
.read_only
? 0 : BDRV_O_RDWR
;
1325 bs_flags
|= blk
->root_state
.open_flags
& ~BDRV_O_RDWR
;
1330 BlockBackendRootState
*blk_get_root_state(BlockBackend
*blk
)
1332 return &blk
->root_state
;
1335 int blk_commit_all(void)
1337 return bdrv_commit_all();