2 * QEMU System Emulator block driver
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 #include "block-common.h"
30 * I/O API functions. These functions are thread-safe, and therefore
31 * can run in any thread as long as the thread has called
32 * aio_context_acquire/release().
34 * These functions can only call functions from I/O and Common categories,
35 * but can be invoked by GS, "I/O or GS" and I/O APIs.
37 * All functions in this category must use the macro
39 * to catch when they are accidentally called by the wrong API.
42 int generated_co_wrapper
bdrv_pwrite_zeroes(BdrvChild
*child
, int64_t offset
,
44 BdrvRequestFlags flags
);
45 int bdrv_make_zero(BdrvChild
*child
, BdrvRequestFlags flags
);
46 int generated_co_wrapper
bdrv_pread(BdrvChild
*child
, int64_t offset
,
47 int64_t bytes
, void *buf
,
48 BdrvRequestFlags flags
);
49 int generated_co_wrapper
bdrv_pwrite(BdrvChild
*child
, int64_t offset
,
50 int64_t bytes
, const void *buf
,
51 BdrvRequestFlags flags
);
52 int generated_co_wrapper
bdrv_pwrite_sync(BdrvChild
*child
, int64_t offset
,
53 int64_t bytes
, const void *buf
,
54 BdrvRequestFlags flags
);
55 int coroutine_fn
bdrv_co_pwrite_sync(BdrvChild
*child
, int64_t offset
,
56 int64_t bytes
, const void *buf
,
57 BdrvRequestFlags flags
);
59 * Efficiently zero a region of the disk image. Note that this is a regular
60 * I/O request like read or write and should have a reasonable size. This
61 * function is not suitable for zeroing the entire image in a single request
62 * because it may allocate memory for the entire region.
64 int coroutine_fn
bdrv_co_pwrite_zeroes(BdrvChild
*child
, int64_t offset
,
65 int64_t bytes
, BdrvRequestFlags flags
);
67 int coroutine_fn
bdrv_co_truncate(BdrvChild
*child
, int64_t offset
, bool exact
,
68 PreallocMode prealloc
, BdrvRequestFlags flags
,
71 int64_t bdrv_nb_sectors(BlockDriverState
*bs
);
72 int64_t bdrv_getlength(BlockDriverState
*bs
);
73 int64_t bdrv_get_allocated_file_size(BlockDriverState
*bs
);
74 BlockMeasureInfo
*bdrv_measure(BlockDriver
*drv
, QemuOpts
*opts
,
75 BlockDriverState
*in_bs
, Error
**errp
);
76 void bdrv_get_geometry(BlockDriverState
*bs
, uint64_t *nb_sectors_ptr
);
77 int coroutine_fn
bdrv_co_delete_file(BlockDriverState
*bs
, Error
**errp
);
78 void coroutine_fn
bdrv_co_delete_file_noerr(BlockDriverState
*bs
);
82 void bdrv_aio_cancel(BlockAIOCB
*acb
);
83 void bdrv_aio_cancel_async(BlockAIOCB
*acb
);
85 /* sg packet commands */
86 int bdrv_co_ioctl(BlockDriverState
*bs
, int req
, void *buf
);
88 /* Ensure contents are flushed to disk. */
89 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
);
91 int bdrv_co_pdiscard(BdrvChild
*child
, int64_t offset
, int64_t bytes
);
92 bool bdrv_can_write_zeroes_with_unmap(BlockDriverState
*bs
);
93 int bdrv_block_status(BlockDriverState
*bs
, int64_t offset
,
94 int64_t bytes
, int64_t *pnum
, int64_t *map
,
95 BlockDriverState
**file
);
96 int bdrv_block_status_above(BlockDriverState
*bs
, BlockDriverState
*base
,
97 int64_t offset
, int64_t bytes
, int64_t *pnum
,
98 int64_t *map
, BlockDriverState
**file
);
99 int bdrv_is_allocated(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
101 int bdrv_is_allocated_above(BlockDriverState
*top
, BlockDriverState
*base
,
102 bool include_base
, int64_t offset
, int64_t bytes
,
104 int coroutine_fn
bdrv_co_is_zero_fast(BlockDriverState
*bs
, int64_t offset
,
107 int bdrv_can_set_read_only(BlockDriverState
*bs
, bool read_only
,
108 bool ignore_allow_rdw
, Error
**errp
);
109 int bdrv_apply_auto_read_only(BlockDriverState
*bs
, const char *errmsg
,
111 bool bdrv_is_read_only(BlockDriverState
*bs
);
112 bool bdrv_is_writable(BlockDriverState
*bs
);
113 bool bdrv_is_sg(BlockDriverState
*bs
);
114 int bdrv_get_flags(BlockDriverState
*bs
);
115 bool bdrv_is_inserted(BlockDriverState
*bs
);
116 void bdrv_lock_medium(BlockDriverState
*bs
, bool locked
);
117 void bdrv_eject(BlockDriverState
*bs
, bool eject_flag
);
118 const char *bdrv_get_format_name(BlockDriverState
*bs
);
120 bool bdrv_supports_compressed_writes(BlockDriverState
*bs
);
121 const char *bdrv_get_node_name(const BlockDriverState
*bs
);
122 const char *bdrv_get_device_name(const BlockDriverState
*bs
);
123 const char *bdrv_get_device_or_node_name(const BlockDriverState
*bs
);
124 int bdrv_get_info(BlockDriverState
*bs
, BlockDriverInfo
*bdi
);
125 ImageInfoSpecific
*bdrv_get_specific_info(BlockDriverState
*bs
,
127 BlockStatsSpecific
*bdrv_get_specific_stats(BlockDriverState
*bs
);
128 void bdrv_round_to_clusters(BlockDriverState
*bs
,
129 int64_t offset
, int64_t bytes
,
130 int64_t *cluster_offset
,
131 int64_t *cluster_bytes
);
133 void bdrv_get_backing_filename(BlockDriverState
*bs
,
134 char *filename
, int filename_size
);
136 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
137 int64_t pos
, int size
);
139 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
140 int64_t pos
, int size
);
143 * Returns the alignment in bytes that is required so that no bounce buffer
144 * is required throughout the stack
146 size_t bdrv_min_mem_align(BlockDriverState
*bs
);
147 /* Returns optimal alignment in bytes for bounce buffer */
148 size_t bdrv_opt_mem_align(BlockDriverState
*bs
);
149 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
);
150 void *qemu_blockalign0(BlockDriverState
*bs
, size_t size
);
151 void *qemu_try_blockalign(BlockDriverState
*bs
, size_t size
);
152 void *qemu_try_blockalign0(BlockDriverState
*bs
, size_t size
);
153 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
);
155 void bdrv_enable_copy_on_read(BlockDriverState
*bs
);
156 void bdrv_disable_copy_on_read(BlockDriverState
*bs
);
158 void bdrv_debug_event(BlockDriverState
*bs
, BlkdebugEvent event
);
160 #define BLKDBG_EVENT(child, evt) \
163 bdrv_debug_event(child->bs, evt); \
168 * bdrv_get_aio_context:
170 * Returns: the currently bound #AioContext
172 AioContext
*bdrv_get_aio_context(BlockDriverState
*bs
);
175 * Move the current coroutine to the AioContext of @bs and return the old
176 * AioContext of the coroutine. Increase bs->in_flight so that draining @bs
177 * will wait for the operation to proceed until the corresponding
180 * Consequently, you can't call drain inside a bdrv_co_enter/leave() section as
181 * this will deadlock.
183 AioContext
*coroutine_fn
bdrv_co_enter(BlockDriverState
*bs
);
186 * Ends a section started by bdrv_co_enter(). Move the current coroutine back
187 * to old_ctx and decrease bs->in_flight again.
189 void coroutine_fn
bdrv_co_leave(BlockDriverState
*bs
, AioContext
*old_ctx
);
192 * Transfer control to @co in the aio context of @bs
194 void bdrv_coroutine_enter(BlockDriverState
*bs
, Coroutine
*co
);
196 AioContext
*child_of_bds_get_parent_aio_context(BdrvChild
*c
);
198 void bdrv_io_plug(BlockDriverState
*bs
);
199 void bdrv_io_unplug(BlockDriverState
*bs
);
201 bool bdrv_can_store_new_dirty_bitmap(BlockDriverState
*bs
, const char *name
,
202 uint32_t granularity
, Error
**errp
);
206 * bdrv_co_copy_range:
208 * Do offloaded copy between two children. If the operation is not implemented
209 * by the driver, or if the backend storage doesn't support it, a negative
210 * error code will be returned.
212 * Note: block layer doesn't emulate or fallback to a bounce buffer approach
213 * because usually the caller shouldn't attempt offloaded copy any more (e.g.
214 * calling copy_file_range(2)) after the first error, thus it should fall back
215 * to a read+write path in the caller level.
217 * @src: Source child to copy data from
218 * @src_offset: offset in @src image to read data
219 * @dst: Destination child to copy data to
220 * @dst_offset: offset in @dst image to write data
221 * @bytes: number of bytes to copy
222 * @flags: request flags. Supported flags:
223 * BDRV_REQ_ZERO_WRITE - treat the @src range as zero data and do zero
224 * write on @dst as if bdrv_co_pwrite_zeroes is
225 * called. Used to simplify caller code, or
226 * during BlockDriver.bdrv_co_copy_range_from()
228 * BDRV_REQ_NO_SERIALISING - do not serialize with other overlapping
229 * requests currently in flight.
231 * Returns: 0 if succeeded; negative error code if failed.
233 int coroutine_fn
bdrv_co_copy_range(BdrvChild
*src
, int64_t src_offset
,
234 BdrvChild
*dst
, int64_t dst_offset
,
235 int64_t bytes
, BdrvRequestFlags read_flags
,
236 BdrvRequestFlags write_flags
);
239 * bdrv_drained_end_no_poll:
241 * Same as bdrv_drained_end(), but do not poll for the subgraph to
242 * actually become unquiesced. Therefore, no graph changes will occur
243 * with this function.
245 * *drained_end_counter is incremented for every background operation
246 * that is scheduled, and will be decremented for every operation once
247 * it settles. The caller must poll until it reaches 0. The counter
248 * should be accessed using atomic operations only.
250 void bdrv_drained_end_no_poll(BlockDriverState
*bs
, int *drained_end_counter
);
254 * "I/O or GS" API functions. These functions can run without
255 * the BQL, but only in one specific iothread/main loop.
257 * More specifically, these functions use BDRV_POLL_WHILE(bs), which
258 * requires the caller to be either in the main thread and hold
259 * the BlockdriverState (bs) AioContext lock, or directly in the
260 * home thread that runs the bs AioContext. Calling them from
261 * another thread in another AioContext would cause deadlocks.
263 * Therefore, these functions are not proper I/O, because they
264 * can't run in *any* iothreads, but only in a specific one.
266 * These functions can call any function from I/O, Common and this
267 * categories, but must be invoked only by other "I/O or GS" and GS APIs.
269 * All functions in this category must use the macro
271 * to catch when they are accidentally called by the wrong API.
274 #define BDRV_POLL_WHILE(bs, cond) ({ \
275 BlockDriverState *bs_ = (bs); \
277 AIO_WAIT_WHILE(bdrv_get_aio_context(bs_), \
280 void bdrv_drain(BlockDriverState
*bs
);
282 int generated_co_wrapper
283 bdrv_truncate(BdrvChild
*child
, int64_t offset
, bool exact
,
284 PreallocMode prealloc
, BdrvRequestFlags flags
, Error
**errp
);
286 int generated_co_wrapper
bdrv_check(BlockDriverState
*bs
, BdrvCheckResult
*res
,
289 /* Invalidate any cached metadata used by image formats */
290 int generated_co_wrapper
bdrv_invalidate_cache(BlockDriverState
*bs
,
292 int generated_co_wrapper
bdrv_flush(BlockDriverState
*bs
);
293 int generated_co_wrapper
bdrv_pdiscard(BdrvChild
*child
, int64_t offset
,
295 int generated_co_wrapper
296 bdrv_readv_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
);
297 int generated_co_wrapper
298 bdrv_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
);
301 * bdrv_parent_drained_begin_single:
303 * Begin a quiesced section for the parent of @c. If @poll is true, wait for
304 * any pending activity to cease.
306 void bdrv_parent_drained_begin_single(BdrvChild
*c
, bool poll
);
309 * bdrv_parent_drained_end_single:
311 * End a quiesced section for the parent of @c.
313 * This polls @bs's AioContext until all scheduled sub-drained_ends
314 * have settled, which may result in graph changes.
316 void bdrv_parent_drained_end_single(BdrvChild
*c
);
321 * Poll for pending requests in @bs, its parents (except for @ignore_parent),
322 * and if @recursive is true its children as well (used for subtree drain).
324 * If @ignore_bds_parents is true, parents that are BlockDriverStates must
325 * ignore the drain request because they will be drained separately (used for
328 * This is part of bdrv_drained_begin.
330 bool bdrv_drain_poll(BlockDriverState
*bs
, bool recursive
,
331 BdrvChild
*ignore_parent
, bool ignore_bds_parents
);
334 * bdrv_drained_begin:
336 * Begin a quiesced section for exclusive access to the BDS, by disabling
337 * external request sources including NBD server, block jobs, and device model.
339 * This function can be recursive.
341 void bdrv_drained_begin(BlockDriverState
*bs
);
344 * bdrv_do_drained_begin_quiesce:
346 * Quiesces a BDS like bdrv_drained_begin(), but does not wait for already
347 * running requests to complete.
349 void bdrv_do_drained_begin_quiesce(BlockDriverState
*bs
,
350 BdrvChild
*parent
, bool ignore_bds_parents
);
353 * Like bdrv_drained_begin, but recursively begins a quiesced section for
354 * exclusive access to all child nodes as well.
356 void bdrv_subtree_drained_begin(BlockDriverState
*bs
);
361 * End a quiescent section started by bdrv_drained_begin().
363 * This polls @bs's AioContext until all scheduled sub-drained_ends
364 * have settled. On one hand, that may result in graph changes. On
365 * the other, this requires that the caller either runs in the main
366 * loop; or that all involved nodes (@bs and all of its parents) are
367 * in the caller's AioContext.
369 void bdrv_drained_end(BlockDriverState
*bs
);
372 * End a quiescent section started by bdrv_subtree_drained_begin().
374 void bdrv_subtree_drained_end(BlockDriverState
*bs
);
376 #endif /* BLOCK_IO_H */