blockdev: Remove blk_hide_on_behalf_of_hmp_drive_del()
[qemu/ar7.git] / block / block-backend.c
blob68f3662e7d621c8eaa9268febd32eba7a5293557
1 /*
2 * QEMU Block backends
4 * Copyright (C) 2014 Red Hat, Inc.
6 * Authors:
7 * Markus Armbruster <armbru@redhat.com>,
9 * This work is licensed under the terms of the GNU LGPL, version 2.1
10 * or later. See the COPYING.LIB file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "sysemu/block-backend.h"
15 #include "block/block_int.h"
16 #include "block/blockjob.h"
17 #include "block/throttle-groups.h"
18 #include "sysemu/blockdev.h"
19 #include "sysemu/sysemu.h"
20 #include "qapi-event.h"
22 /* Number of coroutines to reserve per attached device model */
23 #define COROUTINE_POOL_RESERVATION 64
25 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb);
27 struct BlockBackend {
28 char *name;
29 int refcnt;
30 BlockDriverState *bs;
31 DriveInfo *legacy_dinfo; /* null unless created by drive_new() */
32 QTAILQ_ENTRY(BlockBackend) link; /* for block_backends */
33 QTAILQ_ENTRY(BlockBackend) monitor_link; /* for monitor_block_backends */
35 void *dev; /* attached device model, if any */
36 /* TODO change to DeviceState when all users are qdevified */
37 const BlockDevOps *dev_ops;
38 void *dev_opaque;
40 /* the block size for which the guest device expects atomicity */
41 int guest_block_size;
43 /* If the BDS tree is removed, some of its options are stored here (which
44 * can be used to restore those options in the new BDS on insert) */
45 BlockBackendRootState root_state;
47 /* I/O stats (display with "info blockstats"). */
48 BlockAcctStats stats;
50 BlockdevOnError on_read_error, on_write_error;
51 bool iostatus_enabled;
52 BlockDeviceIoStatus iostatus;
54 bool allow_write_beyond_eof;
56 NotifierList remove_bs_notifiers, insert_bs_notifiers;
59 typedef struct BlockBackendAIOCB {
60 BlockAIOCB common;
61 QEMUBH *bh;
62 BlockBackend *blk;
63 int ret;
64 } BlockBackendAIOCB;
66 static const AIOCBInfo block_backend_aiocb_info = {
67 .get_aio_context = blk_aiocb_get_aio_context,
68 .aiocb_size = sizeof(BlockBackendAIOCB),
71 static void drive_info_del(DriveInfo *dinfo);
73 /* All BlockBackends */
74 static QTAILQ_HEAD(, BlockBackend) block_backends =
75 QTAILQ_HEAD_INITIALIZER(block_backends);
77 /* All BlockBackends referenced by the monitor and which are iterated through by
78 * blk_next() */
79 static QTAILQ_HEAD(, BlockBackend) monitor_block_backends =
80 QTAILQ_HEAD_INITIALIZER(monitor_block_backends);
83 * Create a new BlockBackend with a reference count of one.
84 * Store an error through @errp on failure, unless it's null.
85 * Return the new BlockBackend on success, null on failure.
87 BlockBackend *blk_new(Error **errp)
89 BlockBackend *blk;
91 blk = g_new0(BlockBackend, 1);
92 blk->refcnt = 1;
93 notifier_list_init(&blk->remove_bs_notifiers);
94 notifier_list_init(&blk->insert_bs_notifiers);
95 QTAILQ_INSERT_TAIL(&block_backends, blk, link);
96 return blk;
100 * Create a new BlockBackend with a new BlockDriverState attached.
101 * Otherwise just like blk_new(), which see.
103 BlockBackend *blk_new_with_bs(Error **errp)
105 BlockBackend *blk;
106 BlockDriverState *bs;
108 blk = blk_new(errp);
109 if (!blk) {
110 return NULL;
113 bs = bdrv_new_root();
114 blk->bs = bs;
115 bs->blk = blk;
116 return blk;
120 * Calls blk_new_with_bs() and then calls bdrv_open() on the BlockDriverState.
122 * Just as with bdrv_open(), after having called this function the reference to
123 * @options belongs to the block layer (even on failure).
125 * TODO: Remove @filename and @flags; it should be possible to specify a whole
126 * BDS tree just by specifying the @options QDict (or @reference,
127 * alternatively). At the time of adding this function, this is not possible,
128 * though, so callers of this function have to be able to specify @filename and
129 * @flags.
131 BlockBackend *blk_new_open(const char *filename, const char *reference,
132 QDict *options, int flags, Error **errp)
134 BlockBackend *blk;
135 int ret;
137 blk = blk_new_with_bs(errp);
138 if (!blk) {
139 QDECREF(options);
140 return NULL;
143 ret = bdrv_open(&blk->bs, filename, reference, options, flags, errp);
144 if (ret < 0) {
145 blk_unref(blk);
146 return NULL;
149 return blk;
152 static void blk_delete(BlockBackend *blk)
154 assert(!blk->refcnt);
155 assert(!blk->name);
156 assert(!blk->dev);
157 if (blk->bs) {
158 blk_remove_bs(blk);
160 assert(QLIST_EMPTY(&blk->remove_bs_notifiers.notifiers));
161 assert(QLIST_EMPTY(&blk->insert_bs_notifiers.notifiers));
162 if (blk->root_state.throttle_state) {
163 g_free(blk->root_state.throttle_group);
164 throttle_group_unref(blk->root_state.throttle_state);
166 QTAILQ_REMOVE(&block_backends, blk, link);
167 drive_info_del(blk->legacy_dinfo);
168 block_acct_cleanup(&blk->stats);
169 g_free(blk);
172 static void drive_info_del(DriveInfo *dinfo)
174 if (!dinfo) {
175 return;
177 qemu_opts_del(dinfo->opts);
178 g_free(dinfo->serial);
179 g_free(dinfo);
182 int blk_get_refcnt(BlockBackend *blk)
184 return blk ? blk->refcnt : 0;
188 * Increment @blk's reference count.
189 * @blk must not be null.
191 void blk_ref(BlockBackend *blk)
193 blk->refcnt++;
197 * Decrement @blk's reference count.
198 * If this drops it to zero, destroy @blk.
199 * For convenience, do nothing if @blk is null.
201 void blk_unref(BlockBackend *blk)
203 if (blk) {
204 assert(blk->refcnt > 0);
205 if (!--blk->refcnt) {
206 blk_delete(blk);
212 * Behaves similarly to blk_next() but iterates over all BlockBackends, even the
213 * ones which are hidden (i.e. are not referenced by the monitor).
215 static BlockBackend *blk_all_next(BlockBackend *blk)
217 return blk ? QTAILQ_NEXT(blk, link)
218 : QTAILQ_FIRST(&block_backends);
221 void blk_remove_all_bs(void)
223 BlockBackend *blk = NULL;
225 while ((blk = blk_all_next(blk)) != NULL) {
226 AioContext *ctx = blk_get_aio_context(blk);
228 aio_context_acquire(ctx);
229 if (blk->bs) {
230 blk_remove_bs(blk);
232 aio_context_release(ctx);
237 * Return the monitor-owned BlockBackend after @blk.
238 * If @blk is null, return the first one.
239 * Else, return @blk's next sibling, which may be null.
241 * To iterate over all BlockBackends, do
242 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
243 * ...
246 BlockBackend *blk_next(BlockBackend *blk)
248 return blk ? QTAILQ_NEXT(blk, monitor_link)
249 : QTAILQ_FIRST(&monitor_block_backends);
253 * Add a BlockBackend into the list of backends referenced by the monitor, with
254 * the given @name acting as the handle for the monitor.
255 * Strictly for use by blockdev.c.
257 * @name must not be null or empty.
259 * Returns true on success and false on failure. In the latter case, an Error
260 * object is returned through @errp.
262 bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp)
264 assert(!blk->name);
265 assert(name && name[0]);
267 if (!id_wellformed(name)) {
268 error_setg(errp, "Invalid device name");
269 return false;
271 if (blk_by_name(name)) {
272 error_setg(errp, "Device with id '%s' already exists", name);
273 return false;
275 if (bdrv_find_node(name)) {
276 error_setg(errp,
277 "Device name '%s' conflicts with an existing node name",
278 name);
279 return false;
282 blk->name = g_strdup(name);
283 QTAILQ_INSERT_TAIL(&monitor_block_backends, blk, monitor_link);
284 return true;
288 * Remove a BlockBackend from the list of backends referenced by the monitor.
289 * Strictly for use by blockdev.c.
291 void monitor_remove_blk(BlockBackend *blk)
293 if (!blk->name) {
294 return;
297 QTAILQ_REMOVE(&monitor_block_backends, blk, monitor_link);
298 g_free(blk->name);
299 blk->name = NULL;
303 * Return @blk's name, a non-null string.
304 * Returns an empty string iff @blk is not referenced by the monitor.
306 const char *blk_name(BlockBackend *blk)
308 return blk->name ?: "";
312 * Return the BlockBackend with name @name if it exists, else null.
313 * @name must not be null.
315 BlockBackend *blk_by_name(const char *name)
317 BlockBackend *blk = NULL;
319 assert(name);
320 while ((blk = blk_next(blk)) != NULL) {
321 if (!strcmp(name, blk->name)) {
322 return blk;
325 return NULL;
329 * Return the BlockDriverState attached to @blk if any, else null.
331 BlockDriverState *blk_bs(BlockBackend *blk)
333 return blk->bs;
337 * Changes the BlockDriverState attached to @blk
339 void blk_set_bs(BlockBackend *blk, BlockDriverState *bs)
341 bdrv_ref(bs);
343 if (blk->bs) {
344 blk->bs->blk = NULL;
345 bdrv_unref(blk->bs);
347 assert(bs->blk == NULL);
349 blk->bs = bs;
350 bs->blk = blk;
354 * Return @blk's DriveInfo if any, else null.
356 DriveInfo *blk_legacy_dinfo(BlockBackend *blk)
358 return blk->legacy_dinfo;
362 * Set @blk's DriveInfo to @dinfo, and return it.
363 * @blk must not have a DriveInfo set already.
364 * No other BlockBackend may have the same DriveInfo set.
366 DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo)
368 assert(!blk->legacy_dinfo);
369 return blk->legacy_dinfo = dinfo;
373 * Return the BlockBackend with DriveInfo @dinfo.
374 * It must exist.
376 BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo)
378 BlockBackend *blk = NULL;
380 while ((blk = blk_next(blk)) != NULL) {
381 if (blk->legacy_dinfo == dinfo) {
382 return blk;
385 abort();
389 * Disassociates the currently associated BlockDriverState from @blk.
391 void blk_remove_bs(BlockBackend *blk)
393 assert(blk->bs->blk == blk);
395 notifier_list_notify(&blk->remove_bs_notifiers, blk);
397 blk_update_root_state(blk);
399 blk->bs->blk = NULL;
400 bdrv_unref(blk->bs);
401 blk->bs = NULL;
405 * Associates a new BlockDriverState with @blk.
407 void blk_insert_bs(BlockBackend *blk, BlockDriverState *bs)
409 assert(!blk->bs && !bs->blk);
410 bdrv_ref(bs);
411 blk->bs = bs;
412 bs->blk = blk;
414 notifier_list_notify(&blk->insert_bs_notifiers, blk);
418 * Attach device model @dev to @blk.
419 * Return 0 on success, -EBUSY when a device model is attached already.
421 int blk_attach_dev(BlockBackend *blk, void *dev)
422 /* TODO change to DeviceState *dev when all users are qdevified */
424 if (blk->dev) {
425 return -EBUSY;
427 blk_ref(blk);
428 blk->dev = dev;
429 blk_iostatus_reset(blk);
430 return 0;
434 * Attach device model @dev to @blk.
435 * @blk must not have a device model attached already.
436 * TODO qdevified devices don't use this, remove when devices are qdevified
438 void blk_attach_dev_nofail(BlockBackend *blk, void *dev)
440 if (blk_attach_dev(blk, dev) < 0) {
441 abort();
446 * Detach device model @dev from @blk.
447 * @dev must be currently attached to @blk.
449 void blk_detach_dev(BlockBackend *blk, void *dev)
450 /* TODO change to DeviceState *dev when all users are qdevified */
452 assert(blk->dev == dev);
453 blk->dev = NULL;
454 blk->dev_ops = NULL;
455 blk->dev_opaque = NULL;
456 blk->guest_block_size = 512;
457 blk_unref(blk);
461 * Return the device model attached to @blk if any, else null.
463 void *blk_get_attached_dev(BlockBackend *blk)
464 /* TODO change to return DeviceState * when all users are qdevified */
466 return blk->dev;
470 * Set @blk's device model callbacks to @ops.
471 * @opaque is the opaque argument to pass to the callbacks.
472 * This is for use by device models.
474 void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops,
475 void *opaque)
477 blk->dev_ops = ops;
478 blk->dev_opaque = opaque;
482 * Notify @blk's attached device model of media change.
483 * If @load is true, notify of media load.
484 * Else, notify of media eject.
485 * Also send DEVICE_TRAY_MOVED events as appropriate.
487 void blk_dev_change_media_cb(BlockBackend *blk, bool load)
489 if (blk->dev_ops && blk->dev_ops->change_media_cb) {
490 bool tray_was_open, tray_is_open;
492 tray_was_open = blk_dev_is_tray_open(blk);
493 blk->dev_ops->change_media_cb(blk->dev_opaque, load);
494 tray_is_open = blk_dev_is_tray_open(blk);
496 if (tray_was_open != tray_is_open) {
497 qapi_event_send_device_tray_moved(blk_name(blk), tray_is_open,
498 &error_abort);
504 * Does @blk's attached device model have removable media?
505 * %true if no device model is attached.
507 bool blk_dev_has_removable_media(BlockBackend *blk)
509 return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb);
513 * Does @blk's attached device model have a tray?
515 bool blk_dev_has_tray(BlockBackend *blk)
517 return blk->dev_ops && blk->dev_ops->is_tray_open;
521 * Notify @blk's attached device model of a media eject request.
522 * If @force is true, the medium is about to be yanked out forcefully.
524 void blk_dev_eject_request(BlockBackend *blk, bool force)
526 if (blk->dev_ops && blk->dev_ops->eject_request_cb) {
527 blk->dev_ops->eject_request_cb(blk->dev_opaque, force);
532 * Does @blk's attached device model have a tray, and is it open?
534 bool blk_dev_is_tray_open(BlockBackend *blk)
536 if (blk_dev_has_tray(blk)) {
537 return blk->dev_ops->is_tray_open(blk->dev_opaque);
539 return false;
543 * Does @blk's attached device model have the medium locked?
544 * %false if the device model has no such lock.
546 bool blk_dev_is_medium_locked(BlockBackend *blk)
548 if (blk->dev_ops && blk->dev_ops->is_medium_locked) {
549 return blk->dev_ops->is_medium_locked(blk->dev_opaque);
551 return false;
555 * Notify @blk's attached device model of a backend size change.
557 void blk_dev_resize_cb(BlockBackend *blk)
559 if (blk->dev_ops && blk->dev_ops->resize_cb) {
560 blk->dev_ops->resize_cb(blk->dev_opaque);
564 void blk_iostatus_enable(BlockBackend *blk)
566 blk->iostatus_enabled = true;
567 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
570 /* The I/O status is only enabled if the drive explicitly
571 * enables it _and_ the VM is configured to stop on errors */
572 bool blk_iostatus_is_enabled(const BlockBackend *blk)
574 return (blk->iostatus_enabled &&
575 (blk->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
576 blk->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
577 blk->on_read_error == BLOCKDEV_ON_ERROR_STOP));
580 BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk)
582 return blk->iostatus;
585 void blk_iostatus_disable(BlockBackend *blk)
587 blk->iostatus_enabled = false;
590 void blk_iostatus_reset(BlockBackend *blk)
592 if (blk_iostatus_is_enabled(blk)) {
593 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
594 if (blk->bs && blk->bs->job) {
595 block_job_iostatus_reset(blk->bs->job);
600 void blk_iostatus_set_err(BlockBackend *blk, int error)
602 assert(blk_iostatus_is_enabled(blk));
603 if (blk->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
604 blk->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
605 BLOCK_DEVICE_IO_STATUS_FAILED;
609 void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow)
611 blk->allow_write_beyond_eof = allow;
614 static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
615 size_t size)
617 int64_t len;
619 if (size > INT_MAX) {
620 return -EIO;
623 if (!blk_is_available(blk)) {
624 return -ENOMEDIUM;
627 if (offset < 0) {
628 return -EIO;
631 if (!blk->allow_write_beyond_eof) {
632 len = blk_getlength(blk);
633 if (len < 0) {
634 return len;
637 if (offset > len || len - offset < size) {
638 return -EIO;
642 return 0;
645 static int blk_check_request(BlockBackend *blk, int64_t sector_num,
646 int nb_sectors)
648 if (sector_num < 0 || sector_num > INT64_MAX / BDRV_SECTOR_SIZE) {
649 return -EIO;
652 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
653 return -EIO;
656 return blk_check_byte_request(blk, sector_num * BDRV_SECTOR_SIZE,
657 nb_sectors * BDRV_SECTOR_SIZE);
660 int blk_read(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
661 int nb_sectors)
663 int ret = blk_check_request(blk, sector_num, nb_sectors);
664 if (ret < 0) {
665 return ret;
668 return bdrv_read(blk->bs, sector_num, buf, nb_sectors);
671 int blk_read_unthrottled(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
672 int nb_sectors)
674 int ret = blk_check_request(blk, sector_num, nb_sectors);
675 if (ret < 0) {
676 return ret;
679 return bdrv_read_unthrottled(blk->bs, sector_num, buf, nb_sectors);
682 int blk_write(BlockBackend *blk, int64_t sector_num, const uint8_t *buf,
683 int nb_sectors)
685 int ret = blk_check_request(blk, sector_num, nb_sectors);
686 if (ret < 0) {
687 return ret;
690 return bdrv_write(blk->bs, sector_num, buf, nb_sectors);
693 int blk_write_zeroes(BlockBackend *blk, int64_t sector_num,
694 int nb_sectors, BdrvRequestFlags flags)
696 int ret = blk_check_request(blk, sector_num, nb_sectors);
697 if (ret < 0) {
698 return ret;
701 return bdrv_write_zeroes(blk->bs, sector_num, nb_sectors, flags);
704 static void error_callback_bh(void *opaque)
706 struct BlockBackendAIOCB *acb = opaque;
707 qemu_bh_delete(acb->bh);
708 acb->common.cb(acb->common.opaque, acb->ret);
709 qemu_aio_unref(acb);
712 BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
713 BlockCompletionFunc *cb,
714 void *opaque, int ret)
716 struct BlockBackendAIOCB *acb;
717 QEMUBH *bh;
719 acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
720 acb->blk = blk;
721 acb->ret = ret;
723 bh = aio_bh_new(blk_get_aio_context(blk), error_callback_bh, acb);
724 acb->bh = bh;
725 qemu_bh_schedule(bh);
727 return &acb->common;
730 BlockAIOCB *blk_aio_write_zeroes(BlockBackend *blk, int64_t sector_num,
731 int nb_sectors, BdrvRequestFlags flags,
732 BlockCompletionFunc *cb, void *opaque)
734 int ret = blk_check_request(blk, sector_num, nb_sectors);
735 if (ret < 0) {
736 return blk_abort_aio_request(blk, cb, opaque, ret);
739 return bdrv_aio_write_zeroes(blk->bs, sector_num, nb_sectors, flags,
740 cb, opaque);
743 int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count)
745 int ret = blk_check_byte_request(blk, offset, count);
746 if (ret < 0) {
747 return ret;
750 return bdrv_pread(blk->bs, offset, buf, count);
753 int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count)
755 int ret = blk_check_byte_request(blk, offset, count);
756 if (ret < 0) {
757 return ret;
760 return bdrv_pwrite(blk->bs, offset, buf, count);
763 int64_t blk_getlength(BlockBackend *blk)
765 if (!blk_is_available(blk)) {
766 return -ENOMEDIUM;
769 return bdrv_getlength(blk->bs);
772 void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr)
774 if (!blk->bs) {
775 *nb_sectors_ptr = 0;
776 } else {
777 bdrv_get_geometry(blk->bs, nb_sectors_ptr);
781 int64_t blk_nb_sectors(BlockBackend *blk)
783 if (!blk_is_available(blk)) {
784 return -ENOMEDIUM;
787 return bdrv_nb_sectors(blk->bs);
790 BlockAIOCB *blk_aio_readv(BlockBackend *blk, int64_t sector_num,
791 QEMUIOVector *iov, int nb_sectors,
792 BlockCompletionFunc *cb, void *opaque)
794 int ret = blk_check_request(blk, sector_num, nb_sectors);
795 if (ret < 0) {
796 return blk_abort_aio_request(blk, cb, opaque, ret);
799 return bdrv_aio_readv(blk->bs, sector_num, iov, nb_sectors, cb, opaque);
802 BlockAIOCB *blk_aio_writev(BlockBackend *blk, int64_t sector_num,
803 QEMUIOVector *iov, int nb_sectors,
804 BlockCompletionFunc *cb, void *opaque)
806 int ret = blk_check_request(blk, sector_num, nb_sectors);
807 if (ret < 0) {
808 return blk_abort_aio_request(blk, cb, opaque, ret);
811 return bdrv_aio_writev(blk->bs, sector_num, iov, nb_sectors, cb, opaque);
814 BlockAIOCB *blk_aio_flush(BlockBackend *blk,
815 BlockCompletionFunc *cb, void *opaque)
817 if (!blk_is_available(blk)) {
818 return blk_abort_aio_request(blk, cb, opaque, -ENOMEDIUM);
821 return bdrv_aio_flush(blk->bs, cb, opaque);
824 BlockAIOCB *blk_aio_discard(BlockBackend *blk,
825 int64_t sector_num, int nb_sectors,
826 BlockCompletionFunc *cb, void *opaque)
828 int ret = blk_check_request(blk, sector_num, nb_sectors);
829 if (ret < 0) {
830 return blk_abort_aio_request(blk, cb, opaque, ret);
833 return bdrv_aio_discard(blk->bs, sector_num, nb_sectors, cb, opaque);
836 void blk_aio_cancel(BlockAIOCB *acb)
838 bdrv_aio_cancel(acb);
841 void blk_aio_cancel_async(BlockAIOCB *acb)
843 bdrv_aio_cancel_async(acb);
846 int blk_aio_multiwrite(BlockBackend *blk, BlockRequest *reqs, int num_reqs)
848 int i, ret;
850 for (i = 0; i < num_reqs; i++) {
851 ret = blk_check_request(blk, reqs[i].sector, reqs[i].nb_sectors);
852 if (ret < 0) {
853 return ret;
857 return bdrv_aio_multiwrite(blk->bs, reqs, num_reqs);
860 int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
862 if (!blk_is_available(blk)) {
863 return -ENOMEDIUM;
866 return bdrv_ioctl(blk->bs, req, buf);
869 BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
870 BlockCompletionFunc *cb, void *opaque)
872 if (!blk_is_available(blk)) {
873 return blk_abort_aio_request(blk, cb, opaque, -ENOMEDIUM);
876 return bdrv_aio_ioctl(blk->bs, req, buf, cb, opaque);
879 int blk_co_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
881 int ret = blk_check_request(blk, sector_num, nb_sectors);
882 if (ret < 0) {
883 return ret;
886 return bdrv_co_discard(blk->bs, sector_num, nb_sectors);
889 int blk_co_flush(BlockBackend *blk)
891 if (!blk_is_available(blk)) {
892 return -ENOMEDIUM;
895 return bdrv_co_flush(blk->bs);
898 int blk_flush(BlockBackend *blk)
900 if (!blk_is_available(blk)) {
901 return -ENOMEDIUM;
904 return bdrv_flush(blk->bs);
907 int blk_flush_all(void)
909 return bdrv_flush_all();
912 void blk_drain(BlockBackend *blk)
914 if (blk->bs) {
915 bdrv_drain(blk->bs);
919 void blk_drain_all(void)
921 bdrv_drain_all();
924 void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
925 BlockdevOnError on_write_error)
927 blk->on_read_error = on_read_error;
928 blk->on_write_error = on_write_error;
931 BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read)
933 return is_read ? blk->on_read_error : blk->on_write_error;
936 BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
937 int error)
939 BlockdevOnError on_err = blk_get_on_error(blk, is_read);
941 switch (on_err) {
942 case BLOCKDEV_ON_ERROR_ENOSPC:
943 return (error == ENOSPC) ?
944 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
945 case BLOCKDEV_ON_ERROR_STOP:
946 return BLOCK_ERROR_ACTION_STOP;
947 case BLOCKDEV_ON_ERROR_REPORT:
948 return BLOCK_ERROR_ACTION_REPORT;
949 case BLOCKDEV_ON_ERROR_IGNORE:
950 return BLOCK_ERROR_ACTION_IGNORE;
951 default:
952 abort();
956 static void send_qmp_error_event(BlockBackend *blk,
957 BlockErrorAction action,
958 bool is_read, int error)
960 IoOperationType optype;
962 optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
963 qapi_event_send_block_io_error(blk_name(blk), optype, action,
964 blk_iostatus_is_enabled(blk),
965 error == ENOSPC, strerror(error),
966 &error_abort);
969 /* This is done by device models because, while the block layer knows
970 * about the error, it does not know whether an operation comes from
971 * the device or the block layer (from a job, for example).
973 void blk_error_action(BlockBackend *blk, BlockErrorAction action,
974 bool is_read, int error)
976 assert(error >= 0);
978 if (action == BLOCK_ERROR_ACTION_STOP) {
979 /* First set the iostatus, so that "info block" returns an iostatus
980 * that matches the events raised so far (an additional error iostatus
981 * is fine, but not a lost one).
983 blk_iostatus_set_err(blk, error);
985 /* Then raise the request to stop the VM and the event.
986 * qemu_system_vmstop_request_prepare has two effects. First,
987 * it ensures that the STOP event always comes after the
988 * BLOCK_IO_ERROR event. Second, it ensures that even if management
989 * can observe the STOP event and do a "cont" before the STOP
990 * event is issued, the VM will not stop. In this case, vm_start()
991 * also ensures that the STOP/RESUME pair of events is emitted.
993 qemu_system_vmstop_request_prepare();
994 send_qmp_error_event(blk, action, is_read, error);
995 qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
996 } else {
997 send_qmp_error_event(blk, action, is_read, error);
1001 int blk_is_read_only(BlockBackend *blk)
1003 if (blk->bs) {
1004 return bdrv_is_read_only(blk->bs);
1005 } else {
1006 return blk->root_state.read_only;
1010 int blk_is_sg(BlockBackend *blk)
1012 if (!blk->bs) {
1013 return 0;
1016 return bdrv_is_sg(blk->bs);
1019 int blk_enable_write_cache(BlockBackend *blk)
1021 if (blk->bs) {
1022 return bdrv_enable_write_cache(blk->bs);
1023 } else {
1024 return !!(blk->root_state.open_flags & BDRV_O_CACHE_WB);
1028 void blk_set_enable_write_cache(BlockBackend *blk, bool wce)
1030 if (blk->bs) {
1031 bdrv_set_enable_write_cache(blk->bs, wce);
1032 } else {
1033 if (wce) {
1034 blk->root_state.open_flags |= BDRV_O_CACHE_WB;
1035 } else {
1036 blk->root_state.open_flags &= ~BDRV_O_CACHE_WB;
1041 void blk_invalidate_cache(BlockBackend *blk, Error **errp)
1043 if (!blk->bs) {
1044 error_setg(errp, "Device '%s' has no medium", blk->name);
1045 return;
1048 bdrv_invalidate_cache(blk->bs, errp);
1051 bool blk_is_inserted(BlockBackend *blk)
1053 return blk->bs && bdrv_is_inserted(blk->bs);
1056 bool blk_is_available(BlockBackend *blk)
1058 return blk_is_inserted(blk) && !blk_dev_is_tray_open(blk);
1061 void blk_lock_medium(BlockBackend *blk, bool locked)
1063 if (blk->bs) {
1064 bdrv_lock_medium(blk->bs, locked);
1068 void blk_eject(BlockBackend *blk, bool eject_flag)
1070 if (blk->bs) {
1071 bdrv_eject(blk->bs, eject_flag);
1075 int blk_get_flags(BlockBackend *blk)
1077 if (blk->bs) {
1078 return bdrv_get_flags(blk->bs);
1079 } else {
1080 return blk->root_state.open_flags;
1084 int blk_get_max_transfer_length(BlockBackend *blk)
1086 if (blk->bs) {
1087 return blk->bs->bl.max_transfer_length;
1088 } else {
1089 return 0;
1093 int blk_get_max_iov(BlockBackend *blk)
1095 return blk->bs->bl.max_iov;
1098 void blk_set_guest_block_size(BlockBackend *blk, int align)
1100 blk->guest_block_size = align;
1103 void *blk_try_blockalign(BlockBackend *blk, size_t size)
1105 return qemu_try_blockalign(blk ? blk->bs : NULL, size);
1108 void *blk_blockalign(BlockBackend *blk, size_t size)
1110 return qemu_blockalign(blk ? blk->bs : NULL, size);
1113 bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
1115 if (!blk->bs) {
1116 return false;
1119 return bdrv_op_is_blocked(blk->bs, op, errp);
1122 void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason)
1124 if (blk->bs) {
1125 bdrv_op_unblock(blk->bs, op, reason);
1129 void blk_op_block_all(BlockBackend *blk, Error *reason)
1131 if (blk->bs) {
1132 bdrv_op_block_all(blk->bs, reason);
1136 void blk_op_unblock_all(BlockBackend *blk, Error *reason)
1138 if (blk->bs) {
1139 bdrv_op_unblock_all(blk->bs, reason);
1143 AioContext *blk_get_aio_context(BlockBackend *blk)
1145 if (blk->bs) {
1146 return bdrv_get_aio_context(blk->bs);
1147 } else {
1148 return qemu_get_aio_context();
1152 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb)
1154 BlockBackendAIOCB *blk_acb = DO_UPCAST(BlockBackendAIOCB, common, acb);
1155 return blk_get_aio_context(blk_acb->blk);
1158 void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
1160 if (blk->bs) {
1161 bdrv_set_aio_context(blk->bs, new_context);
1165 void blk_add_aio_context_notifier(BlockBackend *blk,
1166 void (*attached_aio_context)(AioContext *new_context, void *opaque),
1167 void (*detach_aio_context)(void *opaque), void *opaque)
1169 if (blk->bs) {
1170 bdrv_add_aio_context_notifier(blk->bs, attached_aio_context,
1171 detach_aio_context, opaque);
1175 void blk_remove_aio_context_notifier(BlockBackend *blk,
1176 void (*attached_aio_context)(AioContext *,
1177 void *),
1178 void (*detach_aio_context)(void *),
1179 void *opaque)
1181 if (blk->bs) {
1182 bdrv_remove_aio_context_notifier(blk->bs, attached_aio_context,
1183 detach_aio_context, opaque);
1187 void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify)
1189 notifier_list_add(&blk->remove_bs_notifiers, notify);
1192 void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify)
1194 notifier_list_add(&blk->insert_bs_notifiers, notify);
1197 void blk_io_plug(BlockBackend *blk)
1199 if (blk->bs) {
1200 bdrv_io_plug(blk->bs);
1204 void blk_io_unplug(BlockBackend *blk)
1206 if (blk->bs) {
1207 bdrv_io_unplug(blk->bs);
1211 BlockAcctStats *blk_get_stats(BlockBackend *blk)
1213 return &blk->stats;
1216 void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
1217 BlockCompletionFunc *cb, void *opaque)
1219 return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque);
1222 int coroutine_fn blk_co_write_zeroes(BlockBackend *blk, int64_t sector_num,
1223 int nb_sectors, BdrvRequestFlags flags)
1225 int ret = blk_check_request(blk, sector_num, nb_sectors);
1226 if (ret < 0) {
1227 return ret;
1230 return bdrv_co_write_zeroes(blk->bs, sector_num, nb_sectors, flags);
1233 int blk_write_compressed(BlockBackend *blk, int64_t sector_num,
1234 const uint8_t *buf, int nb_sectors)
1236 int ret = blk_check_request(blk, sector_num, nb_sectors);
1237 if (ret < 0) {
1238 return ret;
1241 return bdrv_write_compressed(blk->bs, sector_num, buf, nb_sectors);
1244 int blk_truncate(BlockBackend *blk, int64_t offset)
1246 if (!blk_is_available(blk)) {
1247 return -ENOMEDIUM;
1250 return bdrv_truncate(blk->bs, offset);
1253 int blk_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
1255 int ret = blk_check_request(blk, sector_num, nb_sectors);
1256 if (ret < 0) {
1257 return ret;
1260 return bdrv_discard(blk->bs, sector_num, nb_sectors);
1263 int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
1264 int64_t pos, int size)
1266 if (!blk_is_available(blk)) {
1267 return -ENOMEDIUM;
1270 return bdrv_save_vmstate(blk->bs, buf, pos, size);
1273 int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size)
1275 if (!blk_is_available(blk)) {
1276 return -ENOMEDIUM;
1279 return bdrv_load_vmstate(blk->bs, buf, pos, size);
1282 int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz)
1284 if (!blk_is_available(blk)) {
1285 return -ENOMEDIUM;
1288 return bdrv_probe_blocksizes(blk->bs, bsz);
1291 int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo)
1293 if (!blk_is_available(blk)) {
1294 return -ENOMEDIUM;
1297 return bdrv_probe_geometry(blk->bs, geo);
1301 * Updates the BlockBackendRootState object with data from the currently
1302 * attached BlockDriverState.
1304 void blk_update_root_state(BlockBackend *blk)
1306 assert(blk->bs);
1308 blk->root_state.open_flags = blk->bs->open_flags;
1309 blk->root_state.read_only = blk->bs->read_only;
1310 blk->root_state.detect_zeroes = blk->bs->detect_zeroes;
1312 if (blk->root_state.throttle_group) {
1313 g_free(blk->root_state.throttle_group);
1314 throttle_group_unref(blk->root_state.throttle_state);
1316 if (blk->bs->throttle_state) {
1317 const char *name = throttle_group_get_name(blk->bs);
1318 blk->root_state.throttle_group = g_strdup(name);
1319 blk->root_state.throttle_state = throttle_group_incref(name);
1320 } else {
1321 blk->root_state.throttle_group = NULL;
1322 blk->root_state.throttle_state = NULL;
1327 * Applies the information in the root state to the given BlockDriverState. This
1328 * does not include the flags which have to be specified for bdrv_open(), use
1329 * blk_get_open_flags_from_root_state() to inquire them.
1331 void blk_apply_root_state(BlockBackend *blk, BlockDriverState *bs)
1333 bs->detect_zeroes = blk->root_state.detect_zeroes;
1334 if (blk->root_state.throttle_group) {
1335 bdrv_io_limits_enable(bs, blk->root_state.throttle_group);
1340 * Returns the flags to be used for bdrv_open() of a BlockDriverState which is
1341 * supposed to inherit the root state.
1343 int blk_get_open_flags_from_root_state(BlockBackend *blk)
1345 int bs_flags;
1347 bs_flags = blk->root_state.read_only ? 0 : BDRV_O_RDWR;
1348 bs_flags |= blk->root_state.open_flags & ~BDRV_O_RDWR;
1350 return bs_flags;
1353 BlockBackendRootState *blk_get_root_state(BlockBackend *blk)
1355 return &blk->root_state;
1358 int blk_commit_all(void)
1360 return bdrv_commit_all();