6 #include "qemu/option.h"
7 #include "qemu/coroutine.h"
8 #include "block/accounting.h"
9 #include "block/dirty-bitmap.h"
10 #include "block/blockjob.h"
11 #include "qapi/qmp/qobject.h"
12 #include "qapi-types.h"
13 #include "qemu/hbitmap.h"
16 typedef struct BlockDriver BlockDriver
;
17 typedef struct BdrvChild BdrvChild
;
18 typedef struct BdrvChildRole BdrvChildRole
;
20 typedef struct BlockDriverInfo
{
21 /* in bytes, 0 if irrelevant */
23 /* offset at which the VM state can be saved (0 if not possible) */
24 int64_t vm_state_offset
;
27 * True if unallocated blocks read back as zeroes. This is equivalent
28 * to the LBPRZ flag in the SCSI logical block provisioning page.
30 bool unallocated_blocks_are_zero
;
32 * True if the driver can optimize writing zeroes by unmapping
33 * sectors. This is equivalent to the BLKDISCARDZEROES ioctl in Linux
34 * with the difference that in qemu a discard is allowed to silently
35 * fail. Therefore we have to use bdrv_pwrite_zeroes with the
36 * BDRV_REQ_MAY_UNMAP flag for an optimized zero write with unmapping.
37 * After this call the driver has to guarantee that the contents read
38 * back as zero. It is additionally required that the block device is
39 * opened with BDRV_O_UNMAP flag for this to work.
41 bool can_write_zeroes_with_unmap
;
43 * True if this block driver only supports compressed writes
45 bool needs_compressed_writes
;
48 typedef struct BlockFragInfo
{
49 uint64_t allocated_clusters
;
50 uint64_t total_clusters
;
51 uint64_t fragmented_clusters
;
52 uint64_t compressed_clusters
;
56 BDRV_REQ_COPY_ON_READ
= 0x1,
57 BDRV_REQ_ZERO_WRITE
= 0x2,
58 /* The BDRV_REQ_MAY_UNMAP flag is used to indicate that the block driver
59 * is allowed to optimize a write zeroes request by unmapping (discarding)
60 * blocks if it is guaranteed that the result will read back as
61 * zeroes. The flag is only passed to the driver if the block device is
62 * opened with BDRV_O_UNMAP.
64 BDRV_REQ_MAY_UNMAP
= 0x4,
65 BDRV_REQ_NO_SERIALISING
= 0x8,
67 BDRV_REQ_WRITE_COMPRESSED
= 0x20,
69 /* Mask of valid flags */
73 typedef struct BlockSizes
{
78 typedef struct HDGeometry
{
84 #define BDRV_O_RDWR 0x0002
85 #define BDRV_O_SNAPSHOT 0x0008 /* open the file read only and save writes in a snapshot */
86 #define BDRV_O_TEMPORARY 0x0010 /* delete the file after use */
87 #define BDRV_O_NOCACHE 0x0020 /* do not use the host page cache */
88 #define BDRV_O_NATIVE_AIO 0x0080 /* use native AIO instead of the thread pool */
89 #define BDRV_O_NO_BACKING 0x0100 /* don't open the backing file */
90 #define BDRV_O_NO_FLUSH 0x0200 /* disable flushing on this disk */
91 #define BDRV_O_COPY_ON_READ 0x0400 /* copy read backing sectors into image */
92 #define BDRV_O_INACTIVE 0x0800 /* consistency hint for migration handoff */
93 #define BDRV_O_CHECK 0x1000 /* open solely for consistency check */
94 #define BDRV_O_ALLOW_RDWR 0x2000 /* allow reopen to change from r/o to r/w */
95 #define BDRV_O_UNMAP 0x4000 /* execute guest UNMAP/TRIM operations */
96 #define BDRV_O_PROTOCOL 0x8000 /* if no block driver is explicitly given:
97 select an appropriate protocol driver,
98 ignoring the format layer */
99 #define BDRV_O_NO_IO 0x10000 /* don't initialize for I/O */
101 #define BDRV_O_CACHE_MASK (BDRV_O_NOCACHE | BDRV_O_NO_FLUSH)
104 /* Option names of options parsed by the block layer */
106 #define BDRV_OPT_CACHE_WB "cache.writeback"
107 #define BDRV_OPT_CACHE_DIRECT "cache.direct"
108 #define BDRV_OPT_CACHE_NO_FLUSH "cache.no-flush"
109 #define BDRV_OPT_READ_ONLY "read-only"
110 #define BDRV_OPT_DISCARD "discard"
113 #define BDRV_SECTOR_BITS 9
114 #define BDRV_SECTOR_SIZE (1ULL << BDRV_SECTOR_BITS)
115 #define BDRV_SECTOR_MASK ~(BDRV_SECTOR_SIZE - 1)
117 #define BDRV_REQUEST_MAX_SECTORS MIN(SIZE_MAX >> BDRV_SECTOR_BITS, \
118 INT_MAX >> BDRV_SECTOR_BITS)
121 * Allocation status flags
122 * BDRV_BLOCK_DATA: data is read from a file returned by bdrv_get_block_status.
123 * BDRV_BLOCK_ZERO: sectors read as zero
124 * BDRV_BLOCK_OFFSET_VALID: sector stored as raw data in a file returned by
125 * bdrv_get_block_status.
126 * BDRV_BLOCK_ALLOCATED: the content of the block is determined by this
127 * layer (as opposed to the backing file)
128 * BDRV_BLOCK_RAW: used internally to indicate that the request
129 * was answered by the raw driver and that one
130 * should look in bs->file directly.
132 * If BDRV_BLOCK_OFFSET_VALID is set, bits 9-62 represent the offset in
133 * bs->file where sector data can be read from as raw data.
135 * DATA == 0 && ZERO == 0 means that data is read from backing_hd if present.
137 * DATA ZERO OFFSET_VALID
138 * t t t sectors read as zero, bs->file is zero at offset
139 * t f t sectors read as valid from bs->file at offset
140 * f t t sectors preallocated, read as zero, bs->file not
141 * necessarily zero at offset
142 * f f t sectors preallocated but read from backing_hd,
143 * bs->file contains garbage at offset
144 * t t f sectors preallocated, read as zero, unknown offset
145 * t f f sectors read from unknown file or offset
146 * f t f not allocated or unknown offset, read as zero
147 * f f f not allocated or unknown offset, read from backing_hd
149 #define BDRV_BLOCK_DATA 0x01
150 #define BDRV_BLOCK_ZERO 0x02
151 #define BDRV_BLOCK_OFFSET_VALID 0x04
152 #define BDRV_BLOCK_RAW 0x08
153 #define BDRV_BLOCK_ALLOCATED 0x10
154 #define BDRV_BLOCK_OFFSET_MASK BDRV_SECTOR_MASK
156 typedef QSIMPLEQ_HEAD(BlockReopenQueue
, BlockReopenQueueEntry
) BlockReopenQueue
;
158 typedef struct BDRVReopenState
{
159 BlockDriverState
*bs
;
162 QDict
*explicit_options
;
167 * Block operation types
169 typedef enum BlockOpType
{
170 BLOCK_OP_TYPE_BACKUP_SOURCE
,
171 BLOCK_OP_TYPE_BACKUP_TARGET
,
172 BLOCK_OP_TYPE_CHANGE
,
173 BLOCK_OP_TYPE_COMMIT_SOURCE
,
174 BLOCK_OP_TYPE_COMMIT_TARGET
,
175 BLOCK_OP_TYPE_DATAPLANE
,
176 BLOCK_OP_TYPE_DRIVE_DEL
,
178 BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT
,
179 BLOCK_OP_TYPE_INTERNAL_SNAPSHOT
,
180 BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE
,
181 BLOCK_OP_TYPE_MIRROR_SOURCE
,
182 BLOCK_OP_TYPE_MIRROR_TARGET
,
183 BLOCK_OP_TYPE_RESIZE
,
184 BLOCK_OP_TYPE_STREAM
,
185 BLOCK_OP_TYPE_REPLACE
,
189 /* disk I/O throttling */
190 void bdrv_init(void);
191 void bdrv_init_with_whitelist(void);
192 bool bdrv_uses_whitelist(void);
193 BlockDriver
*bdrv_find_protocol(const char *filename
,
194 bool allow_protocol_prefix
,
196 BlockDriver
*bdrv_find_format(const char *format_name
);
197 int bdrv_create(BlockDriver
*drv
, const char* filename
,
198 QemuOpts
*opts
, Error
**errp
);
199 int bdrv_create_file(const char *filename
, QemuOpts
*opts
, Error
**errp
);
200 BlockDriverState
*bdrv_new(void);
201 void bdrv_append(BlockDriverState
*bs_new
, BlockDriverState
*bs_top
);
202 void bdrv_replace_in_backing_chain(BlockDriverState
*old
,
203 BlockDriverState
*new);
205 int bdrv_parse_cache_mode(const char *mode
, int *flags
, bool *writethrough
);
206 int bdrv_parse_discard_flags(const char *mode
, int *flags
);
207 BdrvChild
*bdrv_open_child(const char *filename
,
208 QDict
*options
, const char *bdref_key
,
209 BlockDriverState
* parent
,
210 const BdrvChildRole
*child_role
,
211 bool allow_none
, Error
**errp
);
212 void bdrv_set_backing_hd(BlockDriverState
*bs
, BlockDriverState
*backing_hd
);
213 int bdrv_open_backing_file(BlockDriverState
*bs
, QDict
*parent_options
,
214 const char *bdref_key
, Error
**errp
);
215 BlockDriverState
*bdrv_open(const char *filename
, const char *reference
,
216 QDict
*options
, int flags
, Error
**errp
);
217 BlockReopenQueue
*bdrv_reopen_queue(BlockReopenQueue
*bs_queue
,
218 BlockDriverState
*bs
,
219 QDict
*options
, int flags
);
220 int bdrv_reopen_multiple(AioContext
*ctx
, BlockReopenQueue
*bs_queue
, Error
**errp
);
221 int bdrv_reopen(BlockDriverState
*bs
, int bdrv_flags
, Error
**errp
);
222 int bdrv_reopen_prepare(BDRVReopenState
*reopen_state
,
223 BlockReopenQueue
*queue
, Error
**errp
);
224 void bdrv_reopen_commit(BDRVReopenState
*reopen_state
);
225 void bdrv_reopen_abort(BDRVReopenState
*reopen_state
);
226 int bdrv_read(BdrvChild
*child
, int64_t sector_num
,
227 uint8_t *buf
, int nb_sectors
);
228 int bdrv_write(BdrvChild
*child
, int64_t sector_num
,
229 const uint8_t *buf
, int nb_sectors
);
230 int bdrv_pwrite_zeroes(BdrvChild
*child
, int64_t offset
,
231 int count
, BdrvRequestFlags flags
);
232 int bdrv_make_zero(BdrvChild
*child
, BdrvRequestFlags flags
);
233 int bdrv_pread(BdrvChild
*child
, int64_t offset
, void *buf
, int bytes
);
234 int bdrv_preadv(BdrvChild
*child
, int64_t offset
, QEMUIOVector
*qiov
);
235 int bdrv_pwrite(BdrvChild
*child
, int64_t offset
, const void *buf
, int bytes
);
236 int bdrv_pwritev(BdrvChild
*child
, int64_t offset
, QEMUIOVector
*qiov
);
237 int bdrv_pwrite_sync(BdrvChild
*child
, int64_t offset
,
238 const void *buf
, int count
);
239 int coroutine_fn
bdrv_co_readv(BdrvChild
*child
, int64_t sector_num
,
240 int nb_sectors
, QEMUIOVector
*qiov
);
241 int coroutine_fn
bdrv_co_writev(BdrvChild
*child
, int64_t sector_num
,
242 int nb_sectors
, QEMUIOVector
*qiov
);
244 * Efficiently zero a region of the disk image. Note that this is a regular
245 * I/O request like read or write and should have a reasonable size. This
246 * function is not suitable for zeroing the entire image in a single request
247 * because it may allocate memory for the entire region.
249 int coroutine_fn
bdrv_co_pwrite_zeroes(BdrvChild
*child
, int64_t offset
,
250 int count
, BdrvRequestFlags flags
);
251 BlockDriverState
*bdrv_find_backing_image(BlockDriverState
*bs
,
252 const char *backing_file
);
253 int bdrv_get_backing_file_depth(BlockDriverState
*bs
);
254 void bdrv_refresh_filename(BlockDriverState
*bs
);
255 int bdrv_truncate(BlockDriverState
*bs
, int64_t offset
);
256 int64_t bdrv_nb_sectors(BlockDriverState
*bs
);
257 int64_t bdrv_getlength(BlockDriverState
*bs
);
258 int64_t bdrv_get_allocated_file_size(BlockDriverState
*bs
);
259 void bdrv_get_geometry(BlockDriverState
*bs
, uint64_t *nb_sectors_ptr
);
260 void bdrv_refresh_limits(BlockDriverState
*bs
, Error
**errp
);
261 int bdrv_commit(BlockDriverState
*bs
);
262 int bdrv_change_backing_file(BlockDriverState
*bs
,
263 const char *backing_file
, const char *backing_fmt
);
264 void bdrv_register(BlockDriver
*bdrv
);
265 int bdrv_drop_intermediate(BlockDriverState
*active
, BlockDriverState
*top
,
266 BlockDriverState
*base
,
267 const char *backing_file_str
);
268 BlockDriverState
*bdrv_find_overlay(BlockDriverState
*active
,
269 BlockDriverState
*bs
);
270 BlockDriverState
*bdrv_find_base(BlockDriverState
*bs
);
273 typedef struct BdrvCheckResult
{
277 int corruptions_fixed
;
279 int64_t image_end_offset
;
288 int bdrv_check(BlockDriverState
*bs
, BdrvCheckResult
*res
, BdrvCheckMode fix
);
290 /* The units of offset and total_work_size may be chosen arbitrarily by the
291 * block driver; total_work_size may change during the course of the amendment
293 typedef void BlockDriverAmendStatusCB(BlockDriverState
*bs
, int64_t offset
,
294 int64_t total_work_size
, void *opaque
);
295 int bdrv_amend_options(BlockDriverState
*bs_new
, QemuOpts
*opts
,
296 BlockDriverAmendStatusCB
*status_cb
, void *cb_opaque
);
298 /* external snapshots */
299 bool bdrv_recurse_is_first_non_filter(BlockDriverState
*bs
,
300 BlockDriverState
*candidate
);
301 bool bdrv_is_first_non_filter(BlockDriverState
*candidate
);
303 /* check if a named node can be replaced when doing drive-mirror */
304 BlockDriverState
*check_to_replace_node(BlockDriverState
*parent_bs
,
305 const char *node_name
, Error
**errp
);
307 /* async block I/O */
308 BlockAIOCB
*bdrv_aio_readv(BdrvChild
*child
, int64_t sector_num
,
309 QEMUIOVector
*iov
, int nb_sectors
,
310 BlockCompletionFunc
*cb
, void *opaque
);
311 BlockAIOCB
*bdrv_aio_writev(BdrvChild
*child
, int64_t sector_num
,
312 QEMUIOVector
*iov
, int nb_sectors
,
313 BlockCompletionFunc
*cb
, void *opaque
);
314 BlockAIOCB
*bdrv_aio_flush(BlockDriverState
*bs
,
315 BlockCompletionFunc
*cb
, void *opaque
);
316 void bdrv_aio_cancel(BlockAIOCB
*acb
);
317 void bdrv_aio_cancel_async(BlockAIOCB
*acb
);
319 /* sg packet commands */
320 int bdrv_co_ioctl(BlockDriverState
*bs
, int req
, void *buf
);
322 /* Invalidate any cached metadata used by image formats */
323 void bdrv_invalidate_cache(BlockDriverState
*bs
, Error
**errp
);
324 void bdrv_invalidate_cache_all(Error
**errp
);
325 int bdrv_inactivate_all(void);
327 /* Ensure contents are flushed to disk. */
328 int bdrv_flush(BlockDriverState
*bs
);
329 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
);
330 int bdrv_flush_all(void);
331 void bdrv_close_all(void);
332 void bdrv_drain(BlockDriverState
*bs
);
333 void coroutine_fn
bdrv_co_drain(BlockDriverState
*bs
);
334 void bdrv_drain_all_begin(void);
335 void bdrv_drain_all_end(void);
336 void bdrv_drain_all(void);
338 #define BDRV_POLL_WHILE(bs, cond) ({ \
339 bool waited_ = false; \
340 BlockDriverState *bs_ = (bs); \
341 AioContext *ctx_ = bdrv_get_aio_context(bs_); \
342 if (aio_context_in_iothread(ctx_)) { \
344 aio_poll(ctx_, true); \
348 assert(qemu_get_current_aio_context() == \
349 qemu_get_aio_context()); \
350 /* Ask bdrv_dec_in_flight to wake up the main \
351 * QEMU AioContext. Extra I/O threads never take \
352 * other I/O threads' AioContexts (see for example \
353 * block_job_defer_to_main_loop for how to do it). \
355 assert(!bs_->wakeup); \
356 bs_->wakeup = true; \
358 aio_context_release(ctx_); \
359 aio_poll(qemu_get_aio_context(), true); \
360 aio_context_acquire(ctx_); \
363 bs_->wakeup = false; \
367 int bdrv_pdiscard(BlockDriverState
*bs
, int64_t offset
, int count
);
368 int bdrv_co_pdiscard(BlockDriverState
*bs
, int64_t offset
, int count
);
369 int bdrv_has_zero_init_1(BlockDriverState
*bs
);
370 int bdrv_has_zero_init(BlockDriverState
*bs
);
371 bool bdrv_unallocated_blocks_are_zero(BlockDriverState
*bs
);
372 bool bdrv_can_write_zeroes_with_unmap(BlockDriverState
*bs
);
373 int64_t bdrv_get_block_status(BlockDriverState
*bs
, int64_t sector_num
,
374 int nb_sectors
, int *pnum
,
375 BlockDriverState
**file
);
376 int64_t bdrv_get_block_status_above(BlockDriverState
*bs
,
377 BlockDriverState
*base
,
379 int nb_sectors
, int *pnum
,
380 BlockDriverState
**file
);
381 int bdrv_is_allocated(BlockDriverState
*bs
, int64_t sector_num
, int nb_sectors
,
383 int bdrv_is_allocated_above(BlockDriverState
*top
, BlockDriverState
*base
,
384 int64_t sector_num
, int nb_sectors
, int *pnum
);
386 bool bdrv_is_read_only(BlockDriverState
*bs
);
387 bool bdrv_is_sg(BlockDriverState
*bs
);
388 bool bdrv_is_inserted(BlockDriverState
*bs
);
389 int bdrv_media_changed(BlockDriverState
*bs
);
390 void bdrv_lock_medium(BlockDriverState
*bs
, bool locked
);
391 void bdrv_eject(BlockDriverState
*bs
, bool eject_flag
);
392 const char *bdrv_get_format_name(BlockDriverState
*bs
);
393 BlockDriverState
*bdrv_find_node(const char *node_name
);
394 BlockDeviceInfoList
*bdrv_named_nodes_list(Error
**errp
);
395 BlockDriverState
*bdrv_lookup_bs(const char *device
,
396 const char *node_name
,
398 bool bdrv_chain_contains(BlockDriverState
*top
, BlockDriverState
*base
);
399 BlockDriverState
*bdrv_next_node(BlockDriverState
*bs
);
401 typedef struct BdrvNextIterator
{
403 BDRV_NEXT_BACKEND_ROOTS
,
404 BDRV_NEXT_MONITOR_OWNED
,
407 BlockDriverState
*bs
;
410 BlockDriverState
*bdrv_first(BdrvNextIterator
*it
);
411 BlockDriverState
*bdrv_next(BdrvNextIterator
*it
);
413 BlockDriverState
*bdrv_next_monitor_owned(BlockDriverState
*bs
);
414 bool bdrv_is_encrypted(BlockDriverState
*bs
);
415 bool bdrv_key_required(BlockDriverState
*bs
);
416 int bdrv_set_key(BlockDriverState
*bs
, const char *key
);
417 void bdrv_add_key(BlockDriverState
*bs
, const char *key
, Error
**errp
);
418 void bdrv_iterate_format(void (*it
)(void *opaque
, const char *name
),
420 const char *bdrv_get_node_name(const BlockDriverState
*bs
);
421 const char *bdrv_get_device_name(const BlockDriverState
*bs
);
422 const char *bdrv_get_device_or_node_name(const BlockDriverState
*bs
);
423 int bdrv_get_flags(BlockDriverState
*bs
);
424 int bdrv_get_info(BlockDriverState
*bs
, BlockDriverInfo
*bdi
);
425 ImageInfoSpecific
*bdrv_get_specific_info(BlockDriverState
*bs
);
426 void bdrv_round_sectors_to_clusters(BlockDriverState
*bs
,
427 int64_t sector_num
, int nb_sectors
,
428 int64_t *cluster_sector_num
,
429 int *cluster_nb_sectors
);
430 void bdrv_round_to_clusters(BlockDriverState
*bs
,
431 int64_t offset
, unsigned int bytes
,
432 int64_t *cluster_offset
,
433 unsigned int *cluster_bytes
);
435 const char *bdrv_get_encrypted_filename(BlockDriverState
*bs
);
436 void bdrv_get_backing_filename(BlockDriverState
*bs
,
437 char *filename
, int filename_size
);
438 void bdrv_get_full_backing_filename(BlockDriverState
*bs
,
439 char *dest
, size_t sz
, Error
**errp
);
440 void bdrv_get_full_backing_filename_from_filename(const char *backed
,
442 char *dest
, size_t sz
,
445 int path_has_protocol(const char *path
);
446 int path_is_absolute(const char *path
);
447 void path_combine(char *dest
, int dest_size
,
448 const char *base_path
,
449 const char *filename
);
451 int bdrv_readv_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
);
452 int bdrv_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
);
453 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
454 int64_t pos
, int size
);
456 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
457 int64_t pos
, int size
);
459 void bdrv_img_create(const char *filename
, const char *fmt
,
460 const char *base_filename
, const char *base_fmt
,
461 char *options
, uint64_t img_size
, int flags
,
462 Error
**errp
, bool quiet
);
464 /* Returns the alignment in bytes that is required so that no bounce buffer
465 * is required throughout the stack */
466 size_t bdrv_min_mem_align(BlockDriverState
*bs
);
467 /* Returns optimal alignment in bytes for bounce buffer */
468 size_t bdrv_opt_mem_align(BlockDriverState
*bs
);
469 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
);
470 void *qemu_blockalign0(BlockDriverState
*bs
, size_t size
);
471 void *qemu_try_blockalign(BlockDriverState
*bs
, size_t size
);
472 void *qemu_try_blockalign0(BlockDriverState
*bs
, size_t size
);
473 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
);
475 void bdrv_enable_copy_on_read(BlockDriverState
*bs
);
476 void bdrv_disable_copy_on_read(BlockDriverState
*bs
);
478 void bdrv_ref(BlockDriverState
*bs
);
479 void bdrv_unref(BlockDriverState
*bs
);
480 void bdrv_unref_child(BlockDriverState
*parent
, BdrvChild
*child
);
481 BdrvChild
*bdrv_attach_child(BlockDriverState
*parent_bs
,
482 BlockDriverState
*child_bs
,
483 const char *child_name
,
484 const BdrvChildRole
*child_role
);
486 bool bdrv_op_is_blocked(BlockDriverState
*bs
, BlockOpType op
, Error
**errp
);
487 void bdrv_op_block(BlockDriverState
*bs
, BlockOpType op
, Error
*reason
);
488 void bdrv_op_unblock(BlockDriverState
*bs
, BlockOpType op
, Error
*reason
);
489 void bdrv_op_block_all(BlockDriverState
*bs
, Error
*reason
);
490 void bdrv_op_unblock_all(BlockDriverState
*bs
, Error
*reason
);
491 bool bdrv_op_blocker_is_empty(BlockDriverState
*bs
);
493 #define BLKDBG_EVENT(child, evt) \
496 bdrv_debug_event(child->bs, evt); \
500 void bdrv_debug_event(BlockDriverState
*bs
, BlkdebugEvent event
);
502 int bdrv_debug_breakpoint(BlockDriverState
*bs
, const char *event
,
504 int bdrv_debug_remove_breakpoint(BlockDriverState
*bs
, const char *tag
);
505 int bdrv_debug_resume(BlockDriverState
*bs
, const char *tag
);
506 bool bdrv_debug_is_suspended(BlockDriverState
*bs
, const char *tag
);
509 * bdrv_get_aio_context:
511 * Returns: the currently bound #AioContext
513 AioContext
*bdrv_get_aio_context(BlockDriverState
*bs
);
516 * bdrv_set_aio_context:
518 * Changes the #AioContext used for fd handlers, timers, and BHs by this
519 * BlockDriverState and all its children.
521 * This function must be called with iothread lock held.
523 void bdrv_set_aio_context(BlockDriverState
*bs
, AioContext
*new_context
);
524 int bdrv_probe_blocksizes(BlockDriverState
*bs
, BlockSizes
*bsz
);
525 int bdrv_probe_geometry(BlockDriverState
*bs
, HDGeometry
*geo
);
527 void bdrv_io_plug(BlockDriverState
*bs
);
528 void bdrv_io_unplug(BlockDriverState
*bs
);
531 * bdrv_drained_begin:
533 * Begin a quiesced section for exclusive access to the BDS, by disabling
534 * external request sources including NBD server and device model. Note that
535 * this doesn't block timers or coroutines from submitting more requests, which
536 * means block_job_pause is still necessary.
538 * This function can be recursive.
540 void bdrv_drained_begin(BlockDriverState
*bs
);
545 * End a quiescent section started by bdrv_drained_begin().
547 void bdrv_drained_end(BlockDriverState
*bs
);
549 void bdrv_add_child(BlockDriverState
*parent
, BlockDriverState
*child
,
551 void bdrv_del_child(BlockDriverState
*parent
, BdrvChild
*child
, Error
**errp
);