block: Move I/O throttling configuration functions to BlockBackend
[qemu/ar7.git] / include / sysemu / block-backend.h
blobdd9c8ca4e0d876dc2d0eb226cf5fd05cc7c3cb71
1 /*
2 * QEMU Block backends
4 * Copyright (C) 2014-2016 Red Hat, Inc.
6 * Authors:
7 * Markus Armbruster <armbru@redhat.com>,
9 * This work is licensed under the terms of the GNU LGPL, version 2.1
10 * or later. See the COPYING.LIB file in the top-level directory.
13 #ifndef BLOCK_BACKEND_H
14 #define BLOCK_BACKEND_H
16 #include "qemu/iov.h"
17 #include "block/throttle-groups.h"
20 * TODO Have to include block/block.h for a bunch of block layer
21 * types. Unfortunately, this pulls in the whole BlockDriverState
22 * API, which we don't want used by many BlockBackend users. Some of
23 * the types belong here, and the rest should be split into a common
24 * header and one for the BlockDriverState API.
26 #include "block/block.h"
28 /* Callbacks for block device models */
29 typedef struct BlockDevOps {
31 * Runs when virtual media changed (monitor commands eject, change)
32 * Argument load is true on load and false on eject.
33 * Beware: doesn't run when a host device's physical media
34 * changes. Sure would be useful if it did.
35 * Device models with removable media must implement this callback.
37 void (*change_media_cb)(void *opaque, bool load);
39 * Runs when an eject request is issued from the monitor, the tray
40 * is closed, and the medium is locked.
41 * Device models that do not implement is_medium_locked will not need
42 * this callback. Device models that can lock the medium or tray might
43 * want to implement the callback and unlock the tray when "force" is
44 * true, even if they do not support eject requests.
46 void (*eject_request_cb)(void *opaque, bool force);
48 * Is the virtual tray open?
49 * Device models implement this only when the device has a tray.
51 bool (*is_tray_open)(void *opaque);
53 * Is the virtual medium locked into the device?
54 * Device models implement this only when device has such a lock.
56 bool (*is_medium_locked)(void *opaque);
58 * Runs when the size changed (e.g. monitor command block_resize)
60 void (*resize_cb)(void *opaque);
61 } BlockDevOps;
63 /* This struct is embedded in (the private) BlockBackend struct and contains
64 * fields that must be public. This is in particular for QLIST_ENTRY() and
65 * friends so that BlockBackends can be kept in lists outside block-backend.c */
66 typedef struct BlockBackendPublic {
67 /* I/O throttling.
68 * throttle_state tells us if this BlockBackend has I/O limits configured.
69 * io_limits_disabled tells us if they are currently being enforced */
70 CoQueue throttled_reqs[2];
71 unsigned int io_limits_disabled;
73 /* The following fields are protected by the ThrottleGroup lock.
74 * See the ThrottleGroup documentation for details. */
75 ThrottleState *throttle_state;
76 ThrottleTimers throttle_timers;
77 unsigned pending_reqs[2];
78 QLIST_ENTRY(BlockBackendPublic) round_robin;
79 } BlockBackendPublic;
81 BlockBackend *blk_new(Error **errp);
82 BlockBackend *blk_new_with_bs(Error **errp);
83 BlockBackend *blk_new_open(const char *filename, const char *reference,
84 QDict *options, int flags, Error **errp);
85 int blk_get_refcnt(BlockBackend *blk);
86 void blk_ref(BlockBackend *blk);
87 void blk_unref(BlockBackend *blk);
88 void blk_remove_all_bs(void);
89 const char *blk_name(BlockBackend *blk);
90 BlockBackend *blk_by_name(const char *name);
91 BlockBackend *blk_next(BlockBackend *blk);
92 BlockDriverState *blk_next_root_bs(BlockDriverState *bs);
93 bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp);
94 void monitor_remove_blk(BlockBackend *blk);
96 BlockBackendPublic *blk_get_public(BlockBackend *blk);
97 BlockBackend *blk_by_public(BlockBackendPublic *public);
99 BlockDriverState *blk_bs(BlockBackend *blk);
100 void blk_remove_bs(BlockBackend *blk);
101 void blk_insert_bs(BlockBackend *blk, BlockDriverState *bs);
103 void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow);
104 void blk_iostatus_enable(BlockBackend *blk);
105 bool blk_iostatus_is_enabled(const BlockBackend *blk);
106 BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk);
107 void blk_iostatus_disable(BlockBackend *blk);
108 void blk_iostatus_reset(BlockBackend *blk);
109 void blk_iostatus_set_err(BlockBackend *blk, int error);
110 int blk_attach_dev(BlockBackend *blk, void *dev);
111 void blk_attach_dev_nofail(BlockBackend *blk, void *dev);
112 void blk_detach_dev(BlockBackend *blk, void *dev);
113 void *blk_get_attached_dev(BlockBackend *blk);
114 void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops, void *opaque);
115 int blk_pread_unthrottled(BlockBackend *blk, int64_t offset, uint8_t *buf,
116 int count);
117 int blk_write_zeroes(BlockBackend *blk, int64_t offset,
118 int count, BdrvRequestFlags flags);
119 BlockAIOCB *blk_aio_write_zeroes(BlockBackend *blk, int64_t offset,
120 int count, BdrvRequestFlags flags,
121 BlockCompletionFunc *cb, void *opaque);
122 int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count);
123 int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count,
124 BdrvRequestFlags flags);
125 int64_t blk_getlength(BlockBackend *blk);
126 void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr);
127 int64_t blk_nb_sectors(BlockBackend *blk);
128 BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset,
129 QEMUIOVector *qiov, BdrvRequestFlags flags,
130 BlockCompletionFunc *cb, void *opaque);
131 BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset,
132 QEMUIOVector *qiov, BdrvRequestFlags flags,
133 BlockCompletionFunc *cb, void *opaque);
134 BlockAIOCB *blk_aio_flush(BlockBackend *blk,
135 BlockCompletionFunc *cb, void *opaque);
136 BlockAIOCB *blk_aio_discard(BlockBackend *blk,
137 int64_t sector_num, int nb_sectors,
138 BlockCompletionFunc *cb, void *opaque);
139 void blk_aio_cancel(BlockAIOCB *acb);
140 void blk_aio_cancel_async(BlockAIOCB *acb);
141 int blk_aio_multiwrite(BlockBackend *blk, BlockRequest *reqs, int num_reqs);
142 int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf);
143 BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
144 BlockCompletionFunc *cb, void *opaque);
145 int blk_co_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors);
146 int blk_co_flush(BlockBackend *blk);
147 int blk_flush(BlockBackend *blk);
148 int blk_flush_all(void);
149 int blk_commit_all(void);
150 void blk_drain(BlockBackend *blk);
151 void blk_drain_all(void);
152 void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
153 BlockdevOnError on_write_error);
154 BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read);
155 BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
156 int error);
157 void blk_error_action(BlockBackend *blk, BlockErrorAction action,
158 bool is_read, int error);
159 int blk_is_read_only(BlockBackend *blk);
160 int blk_is_sg(BlockBackend *blk);
161 int blk_enable_write_cache(BlockBackend *blk);
162 void blk_set_enable_write_cache(BlockBackend *blk, bool wce);
163 void blk_invalidate_cache(BlockBackend *blk, Error **errp);
164 bool blk_is_inserted(BlockBackend *blk);
165 bool blk_is_available(BlockBackend *blk);
166 void blk_lock_medium(BlockBackend *blk, bool locked);
167 void blk_eject(BlockBackend *blk, bool eject_flag);
168 int blk_get_flags(BlockBackend *blk);
169 int blk_get_max_transfer_length(BlockBackend *blk);
170 int blk_get_max_iov(BlockBackend *blk);
171 void blk_set_guest_block_size(BlockBackend *blk, int align);
172 void *blk_try_blockalign(BlockBackend *blk, size_t size);
173 void *blk_blockalign(BlockBackend *blk, size_t size);
174 bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp);
175 void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason);
176 void blk_op_block_all(BlockBackend *blk, Error *reason);
177 void blk_op_unblock_all(BlockBackend *blk, Error *reason);
178 AioContext *blk_get_aio_context(BlockBackend *blk);
179 void blk_set_aio_context(BlockBackend *blk, AioContext *new_context);
180 void blk_add_aio_context_notifier(BlockBackend *blk,
181 void (*attached_aio_context)(AioContext *new_context, void *opaque),
182 void (*detach_aio_context)(void *opaque), void *opaque);
183 void blk_remove_aio_context_notifier(BlockBackend *blk,
184 void (*attached_aio_context)(AioContext *,
185 void *),
186 void (*detach_aio_context)(void *),
187 void *opaque);
188 void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify);
189 void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify);
190 void blk_io_plug(BlockBackend *blk);
191 void blk_io_unplug(BlockBackend *blk);
192 BlockAcctStats *blk_get_stats(BlockBackend *blk);
193 BlockBackendRootState *blk_get_root_state(BlockBackend *blk);
194 void blk_update_root_state(BlockBackend *blk);
195 void blk_apply_root_state(BlockBackend *blk, BlockDriverState *bs);
196 int blk_get_open_flags_from_root_state(BlockBackend *blk);
198 void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
199 BlockCompletionFunc *cb, void *opaque);
200 int coroutine_fn blk_co_write_zeroes(BlockBackend *blk, int64_t offset,
201 int count, BdrvRequestFlags flags);
202 int blk_write_compressed(BlockBackend *blk, int64_t sector_num,
203 const uint8_t *buf, int nb_sectors);
204 int blk_truncate(BlockBackend *blk, int64_t offset);
205 int blk_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors);
206 int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
207 int64_t pos, int size);
208 int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size);
209 int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz);
210 int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo);
211 BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
212 BlockCompletionFunc *cb,
213 void *opaque, int ret);
215 void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg);
216 void blk_io_limits_disable(BlockBackend *blk);
217 void blk_io_limits_enable(BlockBackend *blk, const char *group);
218 void blk_io_limits_update_group(BlockBackend *blk, const char *group);
220 #endif