vfio: simplify the conditional statements in vfio_msi_enable
[qemu/ar7.git] / include / block / block-io.h
blob62c84f051935da6e477e53804b11760bd9d06578
1 /*
2 * QEMU System Emulator block driver
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
24 #ifndef BLOCK_IO_H
25 #define BLOCK_IO_H
27 #include "block-common.h"
30 * I/O API functions. These functions are thread-safe, and therefore
31 * can run in any thread as long as the thread has called
32 * aio_context_acquire/release().
34 * These functions can only call functions from I/O and Common categories,
35 * but can be invoked by GS, "I/O or GS" and I/O APIs.
37 * All functions in this category must use the macro
38 * IO_CODE();
39 * to catch when they are accidentally called by the wrong API.
42 int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
43 int64_t bytes, BdrvRequestFlags flags);
44 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags);
45 int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int64_t bytes);
46 int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf,
47 int64_t bytes);
48 int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
49 const void *buf, int64_t bytes);
51 * Efficiently zero a region of the disk image. Note that this is a regular
52 * I/O request like read or write and should have a reasonable size. This
53 * function is not suitable for zeroing the entire image in a single request
54 * because it may allocate memory for the entire region.
56 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
57 int64_t bytes, BdrvRequestFlags flags);
59 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
60 PreallocMode prealloc, BdrvRequestFlags flags,
61 Error **errp);
63 int64_t bdrv_nb_sectors(BlockDriverState *bs);
64 int64_t bdrv_getlength(BlockDriverState *bs);
65 int64_t bdrv_get_allocated_file_size(BlockDriverState *bs);
66 BlockMeasureInfo *bdrv_measure(BlockDriver *drv, QemuOpts *opts,
67 BlockDriverState *in_bs, Error **errp);
68 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr);
69 int coroutine_fn bdrv_co_delete_file(BlockDriverState *bs, Error **errp);
70 void coroutine_fn bdrv_co_delete_file_noerr(BlockDriverState *bs);
73 /* async block I/O */
74 void bdrv_aio_cancel(BlockAIOCB *acb);
75 void bdrv_aio_cancel_async(BlockAIOCB *acb);
77 /* sg packet commands */
78 int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf);
80 /* Ensure contents are flushed to disk. */
81 int coroutine_fn bdrv_co_flush(BlockDriverState *bs);
83 int bdrv_co_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes);
84 bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs);
85 int bdrv_block_status(BlockDriverState *bs, int64_t offset,
86 int64_t bytes, int64_t *pnum, int64_t *map,
87 BlockDriverState **file);
88 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
89 int64_t offset, int64_t bytes, int64_t *pnum,
90 int64_t *map, BlockDriverState **file);
91 int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes,
92 int64_t *pnum);
93 int bdrv_is_allocated_above(BlockDriverState *top, BlockDriverState *base,
94 bool include_base, int64_t offset, int64_t bytes,
95 int64_t *pnum);
96 int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset,
97 int64_t bytes);
99 int bdrv_can_set_read_only(BlockDriverState *bs, bool read_only,
100 bool ignore_allow_rdw, Error **errp);
101 int bdrv_apply_auto_read_only(BlockDriverState *bs, const char *errmsg,
102 Error **errp);
103 bool bdrv_is_read_only(BlockDriverState *bs);
104 bool bdrv_is_writable(BlockDriverState *bs);
105 bool bdrv_is_sg(BlockDriverState *bs);
106 int bdrv_get_flags(BlockDriverState *bs);
107 bool bdrv_is_inserted(BlockDriverState *bs);
108 void bdrv_lock_medium(BlockDriverState *bs, bool locked);
109 void bdrv_eject(BlockDriverState *bs, bool eject_flag);
110 const char *bdrv_get_format_name(BlockDriverState *bs);
112 bool bdrv_supports_compressed_writes(BlockDriverState *bs);
113 const char *bdrv_get_node_name(const BlockDriverState *bs);
114 const char *bdrv_get_device_name(const BlockDriverState *bs);
115 const char *bdrv_get_device_or_node_name(const BlockDriverState *bs);
116 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi);
117 ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs,
118 Error **errp);
119 BlockStatsSpecific *bdrv_get_specific_stats(BlockDriverState *bs);
120 void bdrv_round_to_clusters(BlockDriverState *bs,
121 int64_t offset, int64_t bytes,
122 int64_t *cluster_offset,
123 int64_t *cluster_bytes);
125 void bdrv_get_backing_filename(BlockDriverState *bs,
126 char *filename, int filename_size);
128 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
129 int64_t pos, int size);
131 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
132 int64_t pos, int size);
135 * Returns the alignment in bytes that is required so that no bounce buffer
136 * is required throughout the stack
138 size_t bdrv_min_mem_align(BlockDriverState *bs);
139 /* Returns optimal alignment in bytes for bounce buffer */
140 size_t bdrv_opt_mem_align(BlockDriverState *bs);
141 void *qemu_blockalign(BlockDriverState *bs, size_t size);
142 void *qemu_blockalign0(BlockDriverState *bs, size_t size);
143 void *qemu_try_blockalign(BlockDriverState *bs, size_t size);
144 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size);
145 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov);
147 void bdrv_enable_copy_on_read(BlockDriverState *bs);
148 void bdrv_disable_copy_on_read(BlockDriverState *bs);
150 void bdrv_debug_event(BlockDriverState *bs, BlkdebugEvent event);
152 #define BLKDBG_EVENT(child, evt) \
153 do { \
154 if (child) { \
155 bdrv_debug_event(child->bs, evt); \
157 } while (0)
160 * bdrv_get_aio_context:
162 * Returns: the currently bound #AioContext
164 AioContext *bdrv_get_aio_context(BlockDriverState *bs);
167 * Move the current coroutine to the AioContext of @bs and return the old
168 * AioContext of the coroutine. Increase bs->in_flight so that draining @bs
169 * will wait for the operation to proceed until the corresponding
170 * bdrv_co_leave().
172 * Consequently, you can't call drain inside a bdrv_co_enter/leave() section as
173 * this will deadlock.
175 AioContext *coroutine_fn bdrv_co_enter(BlockDriverState *bs);
178 * Ends a section started by bdrv_co_enter(). Move the current coroutine back
179 * to old_ctx and decrease bs->in_flight again.
181 void coroutine_fn bdrv_co_leave(BlockDriverState *bs, AioContext *old_ctx);
184 * Transfer control to @co in the aio context of @bs
186 void bdrv_coroutine_enter(BlockDriverState *bs, Coroutine *co);
188 AioContext *child_of_bds_get_parent_aio_context(BdrvChild *c);
190 void bdrv_io_plug(BlockDriverState *bs);
191 void bdrv_io_unplug(BlockDriverState *bs);
193 bool bdrv_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name,
194 uint32_t granularity, Error **errp);
198 * bdrv_co_copy_range:
200 * Do offloaded copy between two children. If the operation is not implemented
201 * by the driver, or if the backend storage doesn't support it, a negative
202 * error code will be returned.
204 * Note: block layer doesn't emulate or fallback to a bounce buffer approach
205 * because usually the caller shouldn't attempt offloaded copy any more (e.g.
206 * calling copy_file_range(2)) after the first error, thus it should fall back
207 * to a read+write path in the caller level.
209 * @src: Source child to copy data from
210 * @src_offset: offset in @src image to read data
211 * @dst: Destination child to copy data to
212 * @dst_offset: offset in @dst image to write data
213 * @bytes: number of bytes to copy
214 * @flags: request flags. Supported flags:
215 * BDRV_REQ_ZERO_WRITE - treat the @src range as zero data and do zero
216 * write on @dst as if bdrv_co_pwrite_zeroes is
217 * called. Used to simplify caller code, or
218 * during BlockDriver.bdrv_co_copy_range_from()
219 * recursion.
220 * BDRV_REQ_NO_SERIALISING - do not serialize with other overlapping
221 * requests currently in flight.
223 * Returns: 0 if succeeded; negative error code if failed.
225 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset,
226 BdrvChild *dst, int64_t dst_offset,
227 int64_t bytes, BdrvRequestFlags read_flags,
228 BdrvRequestFlags write_flags);
231 * bdrv_drained_end_no_poll:
233 * Same as bdrv_drained_end(), but do not poll for the subgraph to
234 * actually become unquiesced. Therefore, no graph changes will occur
235 * with this function.
237 * *drained_end_counter is incremented for every background operation
238 * that is scheduled, and will be decremented for every operation once
239 * it settles. The caller must poll until it reaches 0. The counter
240 * should be accessed using atomic operations only.
242 void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter);
246 * "I/O or GS" API functions. These functions can run without
247 * the BQL, but only in one specific iothread/main loop.
249 * More specifically, these functions use BDRV_POLL_WHILE(bs), which
250 * requires the caller to be either in the main thread and hold
251 * the BlockdriverState (bs) AioContext lock, or directly in the
252 * home thread that runs the bs AioContext. Calling them from
253 * another thread in another AioContext would cause deadlocks.
255 * Therefore, these functions are not proper I/O, because they
256 * can't run in *any* iothreads, but only in a specific one.
258 * These functions can call any function from I/O, Common and this
259 * categories, but must be invoked only by other "I/O or GS" and GS APIs.
261 * All functions in this category must use the macro
262 * IO_OR_GS_CODE();
263 * to catch when they are accidentally called by the wrong API.
266 #define BDRV_POLL_WHILE(bs, cond) ({ \
267 BlockDriverState *bs_ = (bs); \
268 IO_OR_GS_CODE(); \
269 AIO_WAIT_WHILE(bdrv_get_aio_context(bs_), \
270 cond); })
272 void bdrv_drain(BlockDriverState *bs);
273 void coroutine_fn bdrv_co_drain(BlockDriverState *bs);
275 int generated_co_wrapper
276 bdrv_truncate(BdrvChild *child, int64_t offset, bool exact,
277 PreallocMode prealloc, BdrvRequestFlags flags, Error **errp);
279 int generated_co_wrapper bdrv_check(BlockDriverState *bs, BdrvCheckResult *res,
280 BdrvCheckMode fix);
282 /* Invalidate any cached metadata used by image formats */
283 int generated_co_wrapper bdrv_invalidate_cache(BlockDriverState *bs,
284 Error **errp);
285 int generated_co_wrapper bdrv_flush(BlockDriverState *bs);
286 int generated_co_wrapper bdrv_pdiscard(BdrvChild *child, int64_t offset,
287 int64_t bytes);
288 int generated_co_wrapper
289 bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
290 int generated_co_wrapper
291 bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
294 * bdrv_parent_drained_begin_single:
296 * Begin a quiesced section for the parent of @c. If @poll is true, wait for
297 * any pending activity to cease.
299 void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll);
302 * bdrv_parent_drained_end_single:
304 * End a quiesced section for the parent of @c.
306 * This polls @bs's AioContext until all scheduled sub-drained_ends
307 * have settled, which may result in graph changes.
309 void bdrv_parent_drained_end_single(BdrvChild *c);
312 * bdrv_drain_poll:
314 * Poll for pending requests in @bs, its parents (except for @ignore_parent),
315 * and if @recursive is true its children as well (used for subtree drain).
317 * If @ignore_bds_parents is true, parents that are BlockDriverStates must
318 * ignore the drain request because they will be drained separately (used for
319 * drain_all).
321 * This is part of bdrv_drained_begin.
323 bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
324 BdrvChild *ignore_parent, bool ignore_bds_parents);
327 * bdrv_drained_begin:
329 * Begin a quiesced section for exclusive access to the BDS, by disabling
330 * external request sources including NBD server, block jobs, and device model.
332 * This function can be recursive.
334 void bdrv_drained_begin(BlockDriverState *bs);
337 * bdrv_do_drained_begin_quiesce:
339 * Quiesces a BDS like bdrv_drained_begin(), but does not wait for already
340 * running requests to complete.
342 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
343 BdrvChild *parent, bool ignore_bds_parents);
346 * Like bdrv_drained_begin, but recursively begins a quiesced section for
347 * exclusive access to all child nodes as well.
349 void bdrv_subtree_drained_begin(BlockDriverState *bs);
352 * bdrv_drained_end:
354 * End a quiescent section started by bdrv_drained_begin().
356 * This polls @bs's AioContext until all scheduled sub-drained_ends
357 * have settled. On one hand, that may result in graph changes. On
358 * the other, this requires that the caller either runs in the main
359 * loop; or that all involved nodes (@bs and all of its parents) are
360 * in the caller's AioContext.
362 void bdrv_drained_end(BlockDriverState *bs);
365 * End a quiescent section started by bdrv_subtree_drained_begin().
367 void bdrv_subtree_drained_end(BlockDriverState *bs);
369 #endif /* BLOCK_IO_H */