2 * QEMU System Emulator block driver
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 #include "block/accounting.h"
28 #include "block/block.h"
29 #include "qemu/option.h"
30 #include "qemu/queue.h"
31 #include "qemu/coroutine.h"
32 #include "qemu/timer.h"
33 #include "qapi-types.h"
34 #include "qemu/hbitmap.h"
35 #include "block/snapshot.h"
36 #include "qemu/main-loop.h"
37 #include "qemu/throttle.h"
39 #define BLOCK_FLAG_ENCRYPT 1
40 #define BLOCK_FLAG_LAZY_REFCOUNTS 8
42 #define BLOCK_OPT_SIZE "size"
43 #define BLOCK_OPT_ENCRYPT "encryption"
44 #define BLOCK_OPT_COMPAT6 "compat6"
45 #define BLOCK_OPT_HWVERSION "hwversion"
46 #define BLOCK_OPT_BACKING_FILE "backing_file"
47 #define BLOCK_OPT_BACKING_FMT "backing_fmt"
48 #define BLOCK_OPT_CLUSTER_SIZE "cluster_size"
49 #define BLOCK_OPT_TABLE_SIZE "table_size"
50 #define BLOCK_OPT_PREALLOC "preallocation"
51 #define BLOCK_OPT_SUBFMT "subformat"
52 #define BLOCK_OPT_COMPAT_LEVEL "compat"
53 #define BLOCK_OPT_LAZY_REFCOUNTS "lazy_refcounts"
54 #define BLOCK_OPT_ADAPTER_TYPE "adapter_type"
55 #define BLOCK_OPT_REDUNDANCY "redundancy"
56 #define BLOCK_OPT_NOCOW "nocow"
57 #define BLOCK_OPT_OBJECT_SIZE "object_size"
58 #define BLOCK_OPT_REFCOUNT_BITS "refcount_bits"
60 #define BLOCK_PROBE_BUF_SIZE 512
62 enum BdrvTrackedRequestType
{
70 typedef struct BdrvTrackedRequest
{
74 enum BdrvTrackedRequestType type
;
77 int64_t overlap_offset
;
78 unsigned int overlap_bytes
;
80 QLIST_ENTRY(BdrvTrackedRequest
) list
;
81 Coroutine
*co
; /* owner, used for deadlock detection */
82 CoQueue wait_queue
; /* coroutines blocked on this request */
84 struct BdrvTrackedRequest
*waiting_for
;
88 const char *format_name
;
91 /* set to true if the BlockDriver is a block filter */
93 /* for snapshots block filter like Quorum can implement the
94 * following recursive callback.
95 * It's purpose is to recurse on the filter children while calling
96 * bdrv_recurse_is_first_non_filter on them.
97 * For a sample implementation look in the future Quorum block filter.
99 bool (*bdrv_recurse_is_first_non_filter
)(BlockDriverState
*bs
,
100 BlockDriverState
*candidate
);
102 int (*bdrv_probe
)(const uint8_t *buf
, int buf_size
, const char *filename
);
103 int (*bdrv_probe_device
)(const char *filename
);
105 /* Any driver implementing this callback is expected to be able to handle
106 * NULL file names in its .bdrv_open() implementation */
107 void (*bdrv_parse_filename
)(const char *filename
, QDict
*options
, Error
**errp
);
108 /* Drivers not implementing bdrv_parse_filename nor bdrv_open should have
109 * this field set to true, except ones that are defined only by their
111 * An example of the last type will be the quorum block driver.
113 bool bdrv_needs_filename
;
115 /* Set if a driver can support backing files */
116 bool supports_backing
;
118 /* For handling image reopen for split or non-split files */
119 int (*bdrv_reopen_prepare
)(BDRVReopenState
*reopen_state
,
120 BlockReopenQueue
*queue
, Error
**errp
);
121 void (*bdrv_reopen_commit
)(BDRVReopenState
*reopen_state
);
122 void (*bdrv_reopen_abort
)(BDRVReopenState
*reopen_state
);
123 void (*bdrv_join_options
)(QDict
*options
, QDict
*old_options
);
125 int (*bdrv_open
)(BlockDriverState
*bs
, QDict
*options
, int flags
,
127 int (*bdrv_file_open
)(BlockDriverState
*bs
, QDict
*options
, int flags
,
129 void (*bdrv_close
)(BlockDriverState
*bs
);
130 int (*bdrv_create
)(const char *filename
, QemuOpts
*opts
, Error
**errp
);
131 int (*bdrv_set_key
)(BlockDriverState
*bs
, const char *key
);
132 int (*bdrv_make_empty
)(BlockDriverState
*bs
);
134 void (*bdrv_refresh_filename
)(BlockDriverState
*bs
, QDict
*options
);
137 BlockAIOCB
*(*bdrv_aio_readv
)(BlockDriverState
*bs
,
138 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
139 BlockCompletionFunc
*cb
, void *opaque
);
140 BlockAIOCB
*(*bdrv_aio_writev
)(BlockDriverState
*bs
,
141 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
142 BlockCompletionFunc
*cb
, void *opaque
);
143 BlockAIOCB
*(*bdrv_aio_flush
)(BlockDriverState
*bs
,
144 BlockCompletionFunc
*cb
, void *opaque
);
145 BlockAIOCB
*(*bdrv_aio_discard
)(BlockDriverState
*bs
,
146 int64_t sector_num
, int nb_sectors
,
147 BlockCompletionFunc
*cb
, void *opaque
);
149 int coroutine_fn (*bdrv_co_readv
)(BlockDriverState
*bs
,
150 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
);
151 int coroutine_fn (*bdrv_co_preadv
)(BlockDriverState
*bs
,
152 uint64_t offset
, uint64_t bytes
, QEMUIOVector
*qiov
, int flags
);
153 int coroutine_fn (*bdrv_co_writev
)(BlockDriverState
*bs
,
154 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
);
155 int coroutine_fn (*bdrv_co_writev_flags
)(BlockDriverState
*bs
,
156 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
, int flags
);
157 int coroutine_fn (*bdrv_co_pwritev
)(BlockDriverState
*bs
,
158 uint64_t offset
, uint64_t bytes
, QEMUIOVector
*qiov
, int flags
);
161 * Efficiently zero a region of the disk image. Typically an image format
162 * would use a compact metadata representation to implement this. This
163 * function pointer may be NULL or return -ENOSUP and .bdrv_co_writev()
164 * will be called instead.
166 int coroutine_fn (*bdrv_co_pwrite_zeroes
)(BlockDriverState
*bs
,
167 int64_t offset
, int count
, BdrvRequestFlags flags
);
168 int coroutine_fn (*bdrv_co_discard
)(BlockDriverState
*bs
,
169 int64_t sector_num
, int nb_sectors
);
170 int64_t coroutine_fn (*bdrv_co_get_block_status
)(BlockDriverState
*bs
,
171 int64_t sector_num
, int nb_sectors
, int *pnum
,
172 BlockDriverState
**file
);
175 * Invalidate any cached meta-data.
177 void (*bdrv_invalidate_cache
)(BlockDriverState
*bs
, Error
**errp
);
178 int (*bdrv_inactivate
)(BlockDriverState
*bs
);
181 * Flushes all data for all layers by calling bdrv_co_flush for underlying
182 * layers, if needed. This function is needed for deterministic
183 * synchronization of the flush finishing callback.
185 int coroutine_fn (*bdrv_co_flush
)(BlockDriverState
*bs
);
188 * Flushes all data that was already written to the OS all the way down to
189 * the disk (for example raw-posix calls fsync()).
191 int coroutine_fn (*bdrv_co_flush_to_disk
)(BlockDriverState
*bs
);
194 * Flushes all internal caches to the OS. The data may still sit in a
195 * writeback cache of the host OS, but it will survive a crash of the qemu
198 int coroutine_fn (*bdrv_co_flush_to_os
)(BlockDriverState
*bs
);
200 const char *protocol_name
;
201 int (*bdrv_truncate
)(BlockDriverState
*bs
, int64_t offset
);
203 int64_t (*bdrv_getlength
)(BlockDriverState
*bs
);
204 bool has_variable_length
;
205 int64_t (*bdrv_get_allocated_file_size
)(BlockDriverState
*bs
);
207 int (*bdrv_write_compressed
)(BlockDriverState
*bs
, int64_t sector_num
,
208 const uint8_t *buf
, int nb_sectors
);
210 int (*bdrv_snapshot_create
)(BlockDriverState
*bs
,
211 QEMUSnapshotInfo
*sn_info
);
212 int (*bdrv_snapshot_goto
)(BlockDriverState
*bs
,
213 const char *snapshot_id
);
214 int (*bdrv_snapshot_delete
)(BlockDriverState
*bs
,
215 const char *snapshot_id
,
218 int (*bdrv_snapshot_list
)(BlockDriverState
*bs
,
219 QEMUSnapshotInfo
**psn_info
);
220 int (*bdrv_snapshot_load_tmp
)(BlockDriverState
*bs
,
221 const char *snapshot_id
,
224 int (*bdrv_get_info
)(BlockDriverState
*bs
, BlockDriverInfo
*bdi
);
225 ImageInfoSpecific
*(*bdrv_get_specific_info
)(BlockDriverState
*bs
);
227 int coroutine_fn (*bdrv_save_vmstate
)(BlockDriverState
*bs
,
230 int coroutine_fn (*bdrv_load_vmstate
)(BlockDriverState
*bs
,
234 int (*bdrv_change_backing_file
)(BlockDriverState
*bs
,
235 const char *backing_file
, const char *backing_fmt
);
237 /* removable device specific */
238 bool (*bdrv_is_inserted
)(BlockDriverState
*bs
);
239 int (*bdrv_media_changed
)(BlockDriverState
*bs
);
240 void (*bdrv_eject
)(BlockDriverState
*bs
, bool eject_flag
);
241 void (*bdrv_lock_medium
)(BlockDriverState
*bs
, bool locked
);
243 /* to control generic scsi devices */
244 BlockAIOCB
*(*bdrv_aio_ioctl
)(BlockDriverState
*bs
,
245 unsigned long int req
, void *buf
,
246 BlockCompletionFunc
*cb
, void *opaque
);
248 /* List of options for creating images, terminated by name == NULL */
249 QemuOptsList
*create_opts
;
252 * Returns 0 for completed check, -errno for internal errors.
253 * The check results are stored in result.
255 int (*bdrv_check
)(BlockDriverState
* bs
, BdrvCheckResult
*result
,
258 int (*bdrv_amend_options
)(BlockDriverState
*bs
, QemuOpts
*opts
,
259 BlockDriverAmendStatusCB
*status_cb
,
262 void (*bdrv_debug_event
)(BlockDriverState
*bs
, BlkdebugEvent event
);
264 /* TODO Better pass a option string/QDict/QemuOpts to add any rule? */
265 int (*bdrv_debug_breakpoint
)(BlockDriverState
*bs
, const char *event
,
267 int (*bdrv_debug_remove_breakpoint
)(BlockDriverState
*bs
,
269 int (*bdrv_debug_resume
)(BlockDriverState
*bs
, const char *tag
);
270 bool (*bdrv_debug_is_suspended
)(BlockDriverState
*bs
, const char *tag
);
272 void (*bdrv_refresh_limits
)(BlockDriverState
*bs
, Error
**errp
);
275 * Returns 1 if newly created images are guaranteed to contain only
276 * zeros, 0 otherwise.
278 int (*bdrv_has_zero_init
)(BlockDriverState
*bs
);
280 /* Remove fd handlers, timers, and other event loop callbacks so the event
281 * loop is no longer in use. Called with no in-flight requests and in
282 * depth-first traversal order with parents before child nodes.
284 void (*bdrv_detach_aio_context
)(BlockDriverState
*bs
);
286 /* Add fd handlers, timers, and other event loop callbacks so I/O requests
287 * can be processed again. Called with no in-flight requests and in
288 * depth-first traversal order with child nodes before parent nodes.
290 void (*bdrv_attach_aio_context
)(BlockDriverState
*bs
,
291 AioContext
*new_context
);
293 /* io queue for linux-aio */
294 void (*bdrv_io_plug
)(BlockDriverState
*bs
);
295 void (*bdrv_io_unplug
)(BlockDriverState
*bs
);
298 * Try to get @bs's logical and physical block size.
299 * On success, store them in @bsz and return zero.
300 * On failure, return negative errno.
302 int (*bdrv_probe_blocksizes
)(BlockDriverState
*bs
, BlockSizes
*bsz
);
304 * Try to get @bs's geometry (cyls, heads, sectors)
305 * On success, store them in @geo and return 0.
306 * On failure return -errno.
307 * Only drivers that want to override guest geometry implement this
308 * callback; see hd_geometry_guess().
310 int (*bdrv_probe_geometry
)(BlockDriverState
*bs
, HDGeometry
*geo
);
313 * Drain and stop any internal sources of requests in the driver, and
314 * remain so until next I/O callback (e.g. bdrv_co_writev) is called.
316 void (*bdrv_drain
)(BlockDriverState
*bs
);
318 void (*bdrv_add_child
)(BlockDriverState
*parent
, BlockDriverState
*child
,
320 void (*bdrv_del_child
)(BlockDriverState
*parent
, BdrvChild
*child
,
323 QLIST_ENTRY(BlockDriver
) list
;
326 typedef struct BlockLimits
{
327 /* Alignment requirement, in bytes, for offset/length of I/O
328 * requests. Must be a power of 2 less than INT_MAX; defaults to
329 * 1 for drivers with modern byte interfaces, and to 512
331 uint32_t request_alignment
;
333 /* maximum number of bytes that can be discarded at once (since it
334 * is signed, it must be < 2G, if set), should be multiple of
335 * pdiscard_alignment, but need not be power of 2. May be 0 if no
336 * inherent 32-bit limit */
337 int32_t max_pdiscard
;
339 /* optimal alignment for discard requests in bytes, must be power
340 * of 2, less than max_pdiscard if that is set, and multiple of
341 * bl.request_alignment. May be 0 if bl.request_alignment is good
343 uint32_t pdiscard_alignment
;
345 /* maximum number of bytes that can zeroized at once (since it is
346 * signed, it must be < 2G, if set), should be multiple of
347 * pwrite_zeroes_alignment. May be 0 if no inherent 32-bit limit */
348 int32_t max_pwrite_zeroes
;
350 /* optimal alignment for write zeroes requests in bytes, must be
351 * power of 2, less than max_pwrite_zeroes if that is set, and
352 * multiple of bl.request_alignment. May be 0 if
353 * bl.request_alignment is good enough */
354 uint32_t pwrite_zeroes_alignment
;
356 /* optimal transfer length in bytes (must be power of 2, and
357 * multiple of bl.request_alignment), or 0 if no preferred size */
358 uint32_t opt_transfer
;
360 /* maximal transfer length in bytes (need not be power of 2, but
361 * should be multiple of opt_transfer), or 0 for no 32-bit limit.
362 * For now, anything larger than INT_MAX is clamped down. */
363 uint32_t max_transfer
;
365 /* memory alignment, in bytes so that no bounce buffer is needed */
366 size_t min_mem_alignment
;
368 /* memory alignment, in bytes, for bounce buffer */
369 size_t opt_mem_alignment
;
371 /* maximum number of iovec elements */
375 typedef struct BdrvOpBlocker BdrvOpBlocker
;
377 typedef struct BdrvAioNotifier
{
378 void (*attached_aio_context
)(AioContext
*new_context
, void *opaque
);
379 void (*detach_aio_context
)(void *opaque
);
384 QLIST_ENTRY(BdrvAioNotifier
) list
;
387 struct BdrvChildRole
{
388 void (*inherit_options
)(int *child_flags
, QDict
*child_options
,
389 int parent_flags
, QDict
*parent_options
);
391 void (*change_media
)(BdrvChild
*child
, bool load
);
392 void (*resize
)(BdrvChild
*child
);
394 /* Returns a name that is supposedly more useful for human users than the
395 * node name for identifying the node in question (in particular, a BB
396 * name), or NULL if the parent can't provide a better name. */
397 const char* (*get_name
)(BdrvChild
*child
);
400 * If this pair of functions is implemented, the parent doesn't issue new
401 * requests after returning from .drained_begin() until .drained_end() is
404 * Note that this can be nested. If drained_begin() was called twice, new
405 * I/O is allowed only after drained_end() was called twice, too.
407 void (*drained_begin
)(BdrvChild
*child
);
408 void (*drained_end
)(BdrvChild
*child
);
411 extern const BdrvChildRole child_file
;
412 extern const BdrvChildRole child_format
;
415 BlockDriverState
*bs
;
417 const BdrvChildRole
*role
;
419 QLIST_ENTRY(BdrvChild
) next
;
420 QLIST_ENTRY(BdrvChild
) next_parent
;
424 * Note: the function bdrv_append() copies and swaps contents of
425 * BlockDriverStates, so if you add new fields to this struct, please
426 * inspect bdrv_append() to determine if the new fields need to be
429 struct BlockDriverState
{
430 int64_t total_sectors
; /* if we are reading a disk image, give its
432 int open_flags
; /* flags used to open the file, re-used for re-open */
433 bool read_only
; /* if true, the media is read only */
434 bool encrypted
; /* if true, the media is encrypted */
435 bool valid_key
; /* if true, a valid encryption key has been set */
436 bool sg
; /* if true, the device is a /dev/sg* */
437 bool probed
; /* if true, format was probed rather than specified */
439 int copy_on_read
; /* if nonzero, copy read backing sectors into image.
440 note this is a reference count */
442 CoQueue flush_queue
; /* Serializing flush queue */
443 unsigned int write_gen
; /* Current data generation */
444 unsigned int flush_started_gen
; /* Generation for which flush has started */
445 unsigned int flushed_gen
; /* Flushed write generation */
447 BlockDriver
*drv
; /* NULL means no media */
450 AioContext
*aio_context
; /* event loop used for fd handlers, timers, etc */
451 /* long-running tasks intended to always use the same AioContext as this
452 * BDS may register themselves in this list to be notified of changes
453 * regarding this BDS's context */
454 QLIST_HEAD(, BdrvAioNotifier
) aio_notifiers
;
455 bool walking_aio_notifiers
; /* to make removal during iteration safe */
457 char filename
[PATH_MAX
];
458 char backing_file
[PATH_MAX
]; /* if non zero, the image is a diff of
460 char backing_format
[16]; /* if non-zero and backing_file exists */
462 QDict
*full_open_options
;
463 char exact_filename
[PATH_MAX
];
468 /* Callback before write request is processed */
469 NotifierWithReturnList before_write_notifiers
;
471 /* number of in-flight serialising requests */
472 unsigned int serialising_in_flight
;
474 /* Offset after the highest byte written to */
475 uint64_t wr_highest_offset
;
480 /* Flags honored during pwrite (so far: BDRV_REQ_FUA) */
481 unsigned int supported_write_flags
;
482 /* Flags honored during pwrite_zeroes (so far: BDRV_REQ_FUA,
483 * BDRV_REQ_MAY_UNMAP) */
484 unsigned int supported_zero_flags
;
486 /* the following member gives a name to every node on the bs graph. */
488 /* element of the list of named nodes building the graph */
489 QTAILQ_ENTRY(BlockDriverState
) node_list
;
490 /* element of the list of all BlockDriverStates (all_bdrv_states) */
491 QTAILQ_ENTRY(BlockDriverState
) bs_list
;
492 /* element of the list of monitor-owned BDS */
493 QTAILQ_ENTRY(BlockDriverState
) monitor_list
;
494 QLIST_HEAD(, BdrvDirtyBitmap
) dirty_bitmaps
;
497 QLIST_HEAD(, BdrvTrackedRequest
) tracked_requests
;
499 /* operation blockers */
500 QLIST_HEAD(, BdrvOpBlocker
) op_blockers
[BLOCK_OP_TYPE_MAX
];
502 /* long-running background operation */
505 /* The node that this node inherited default options from (and a reopen on
506 * which can affect this node by changing these defaults). This is always a
507 * parent node of this node. */
508 BlockDriverState
*inherits_from
;
509 QLIST_HEAD(, BdrvChild
) children
;
510 QLIST_HEAD(, BdrvChild
) parents
;
513 QDict
*explicit_options
;
514 BlockdevDetectZeroesOptions detect_zeroes
;
516 /* The error object in use for blocking operations on backing_hd */
517 Error
*backing_blocker
;
519 /* threshold limit for writes, in bytes. "High water mark". */
520 uint64_t write_threshold_offset
;
521 NotifierWithReturn write_threshold_notifier
;
523 /* counters for nested bdrv_io_plug and bdrv_io_unplugged_begin */
525 unsigned io_plug_disabled
;
530 struct BlockBackendRootState
{
533 BlockdevDetectZeroesOptions detect_zeroes
;
536 typedef enum BlockMirrorBackingMode
{
537 /* Reuse the existing backing chain from the source for the target.
538 * - sync=full: Set backing BDS to NULL.
539 * - sync=top: Use source's backing BDS.
540 * - sync=none: Use source as the backing BDS. */
541 MIRROR_SOURCE_BACKING_CHAIN
,
543 /* Open the target's backing chain completely anew */
544 MIRROR_OPEN_BACKING_CHAIN
,
546 /* Do not change the target's backing BDS after job completion */
547 MIRROR_LEAVE_BACKING_CHAIN
,
548 } BlockMirrorBackingMode
;
550 static inline BlockDriverState
*backing_bs(BlockDriverState
*bs
)
552 return bs
->backing
? bs
->backing
->bs
: NULL
;
556 /* Essential block drivers which must always be statically linked into qemu, and
557 * which therefore can be accessed without using bdrv_find_format() */
558 extern BlockDriver bdrv_file
;
559 extern BlockDriver bdrv_raw
;
560 extern BlockDriver bdrv_qcow2
;
563 * bdrv_setup_io_funcs:
565 * Prepare a #BlockDriver for I/O request processing by populating
566 * unimplemented coroutine and AIO interfaces with generic wrapper functions
567 * that fall back to implemented interfaces.
569 void bdrv_setup_io_funcs(BlockDriver
*bdrv
);
571 int coroutine_fn
bdrv_co_preadv(BdrvChild
*child
,
572 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
573 BdrvRequestFlags flags
);
574 int coroutine_fn
bdrv_co_pwritev(BdrvChild
*child
,
575 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
576 BdrvRequestFlags flags
);
578 int get_tmp_filename(char *filename
, int size
);
579 BlockDriver
*bdrv_probe_all(const uint8_t *buf
, int buf_size
,
580 const char *filename
);
584 * bdrv_add_before_write_notifier:
586 * Register a callback that is invoked before write requests are processed but
587 * after any throttling or waiting for overlapping requests.
589 void bdrv_add_before_write_notifier(BlockDriverState
*bs
,
590 NotifierWithReturn
*notifier
);
593 * bdrv_detach_aio_context:
595 * May be called from .bdrv_detach_aio_context() to detach children from the
596 * current #AioContext. This is only needed by block drivers that manage their
597 * own children. Both ->file and ->backing are automatically handled and
598 * block drivers should not call this function on them explicitly.
600 void bdrv_detach_aio_context(BlockDriverState
*bs
);
603 * bdrv_attach_aio_context:
605 * May be called from .bdrv_attach_aio_context() to attach children to the new
606 * #AioContext. This is only needed by block drivers that manage their own
607 * children. Both ->file and ->backing are automatically handled and block
608 * drivers should not call this function on them explicitly.
610 void bdrv_attach_aio_context(BlockDriverState
*bs
,
611 AioContext
*new_context
);
614 * bdrv_add_aio_context_notifier:
616 * If a long-running job intends to be always run in the same AioContext as a
617 * certain BDS, it may use this function to be notified of changes regarding the
618 * association of the BDS to an AioContext.
620 * attached_aio_context() is called after the target BDS has been attached to a
621 * new AioContext; detach_aio_context() is called before the target BDS is being
622 * detached from its old AioContext.
624 void bdrv_add_aio_context_notifier(BlockDriverState
*bs
,
625 void (*attached_aio_context
)(AioContext
*new_context
, void *opaque
),
626 void (*detach_aio_context
)(void *opaque
), void *opaque
);
629 * bdrv_remove_aio_context_notifier:
631 * Unsubscribe of change notifications regarding the BDS's AioContext. The
632 * parameters given here have to be the same as those given to
633 * bdrv_add_aio_context_notifier().
635 void bdrv_remove_aio_context_notifier(BlockDriverState
*bs
,
636 void (*aio_context_attached
)(AioContext
*,
638 void (*aio_context_detached
)(void *),
642 int is_windows_drive(const char *filename
);
647 * @job_id: The id of the newly-created job, or %NULL to use the
648 * device name of @bs.
649 * @bs: Block device to operate on.
650 * @base: Block device that will become the new base, or %NULL to
651 * flatten the whole backing file chain onto @bs.
652 * @backing_file_str: The file name that will be written to @bs as the
653 * the new backing file if the job completes. Ignored if @base is %NULL.
654 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
655 * @on_error: The action to take upon error.
656 * @cb: Completion function for the job.
657 * @opaque: Opaque pointer value passed to @cb.
658 * @errp: Error object.
660 * Start a streaming operation on @bs. Clusters that are unallocated
661 * in @bs, but allocated in any image between @base and @bs (both
662 * exclusive) will be written to @bs. At the end of a successful
663 * streaming job, the backing file of @bs will be changed to
664 * @backing_file_str in the written image and to @base in the live
667 void stream_start(const char *job_id
, BlockDriverState
*bs
,
668 BlockDriverState
*base
, const char *backing_file_str
,
669 int64_t speed
, BlockdevOnError on_error
,
670 BlockCompletionFunc
*cb
, void *opaque
, Error
**errp
);
674 * @job_id: The id of the newly-created job, or %NULL to use the
675 * device name of @bs.
676 * @bs: Active block device.
677 * @top: Top block device to be committed.
678 * @base: Block device that will be written into, and become the new top.
679 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
680 * @on_error: The action to take upon error.
681 * @cb: Completion function for the job.
682 * @opaque: Opaque pointer value passed to @cb.
683 * @backing_file_str: String to use as the backing file in @top's overlay
684 * @errp: Error object.
687 void commit_start(const char *job_id
, BlockDriverState
*bs
,
688 BlockDriverState
*base
, BlockDriverState
*top
, int64_t speed
,
689 BlockdevOnError on_error
, BlockCompletionFunc
*cb
,
690 void *opaque
, const char *backing_file_str
, Error
**errp
);
692 * commit_active_start:
693 * @job_id: The id of the newly-created job, or %NULL to use the
694 * device name of @bs.
695 * @bs: Active block device to be committed.
696 * @base: Block device that will be written into, and become the new top.
697 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
698 * @on_error: The action to take upon error.
699 * @cb: Completion function for the job.
700 * @opaque: Opaque pointer value passed to @cb.
701 * @errp: Error object.
704 void commit_active_start(const char *job_id
, BlockDriverState
*bs
,
705 BlockDriverState
*base
, int64_t speed
,
706 BlockdevOnError on_error
,
707 BlockCompletionFunc
*cb
,
708 void *opaque
, Error
**errp
);
711 * @job_id: The id of the newly-created job, or %NULL to use the
712 * device name of @bs.
713 * @bs: Block device to operate on.
714 * @target: Block device to write to.
715 * @replaces: Block graph node name to replace once the mirror is done. Can
716 * only be used when full mirroring is selected.
717 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
718 * @granularity: The chosen granularity for the dirty bitmap.
719 * @buf_size: The amount of data that can be in flight at one time.
720 * @mode: Whether to collapse all images in the chain to the target.
721 * @backing_mode: How to establish the target's backing chain after completion.
722 * @on_source_error: The action to take upon error reading from the source.
723 * @on_target_error: The action to take upon error writing to the target.
724 * @unmap: Whether to unmap target where source sectors only contain zeroes.
725 * @cb: Completion function for the job.
726 * @opaque: Opaque pointer value passed to @cb.
727 * @errp: Error object.
729 * Start a mirroring operation on @bs. Clusters that are allocated
730 * in @bs will be written to @bs until the job is cancelled or
731 * manually completed. At the end of a successful mirroring job,
732 * @bs will be switched to read from @target.
734 void mirror_start(const char *job_id
, BlockDriverState
*bs
,
735 BlockDriverState
*target
, const char *replaces
,
736 int64_t speed
, uint32_t granularity
, int64_t buf_size
,
737 MirrorSyncMode mode
, BlockMirrorBackingMode backing_mode
,
738 BlockdevOnError on_source_error
,
739 BlockdevOnError on_target_error
,
741 BlockCompletionFunc
*cb
,
742 void *opaque
, Error
**errp
);
746 * @job_id: The id of the newly-created job, or %NULL to use the
747 * device name of @bs.
748 * @bs: Block device to operate on.
749 * @target: Block device to write to.
750 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
751 * @sync_mode: What parts of the disk image should be copied to the destination.
752 * @sync_bitmap: The dirty bitmap if sync_mode is MIRROR_SYNC_MODE_INCREMENTAL.
753 * @on_source_error: The action to take upon error reading from the source.
754 * @on_target_error: The action to take upon error writing to the target.
755 * @cb: Completion function for the job.
756 * @opaque: Opaque pointer value passed to @cb.
757 * @txn: Transaction that this job is part of (may be NULL).
759 * Start a backup operation on @bs. Clusters in @bs are written to @target
760 * until the job is cancelled or manually completed.
762 void backup_start(const char *job_id
, BlockDriverState
*bs
,
763 BlockDriverState
*target
, int64_t speed
,
764 MirrorSyncMode sync_mode
, BdrvDirtyBitmap
*sync_bitmap
,
765 BlockdevOnError on_source_error
,
766 BlockdevOnError on_target_error
,
767 BlockCompletionFunc
*cb
, void *opaque
,
768 BlockJobTxn
*txn
, Error
**errp
);
770 void hmp_drive_add_node(Monitor
*mon
, const char *optstr
);
772 BdrvChild
*bdrv_root_attach_child(BlockDriverState
*child_bs
,
773 const char *child_name
,
774 const BdrvChildRole
*child_role
,
776 void bdrv_root_unref_child(BdrvChild
*child
);
778 const char *bdrv_get_parent_name(const BlockDriverState
*bs
);
779 void blk_dev_change_media_cb(BlockBackend
*blk
, bool load
);
780 bool blk_dev_has_removable_media(BlockBackend
*blk
);
781 bool blk_dev_has_tray(BlockBackend
*blk
);
782 void blk_dev_eject_request(BlockBackend
*blk
, bool force
);
783 bool blk_dev_is_tray_open(BlockBackend
*blk
);
784 bool blk_dev_is_medium_locked(BlockBackend
*blk
);
786 void bdrv_set_dirty(BlockDriverState
*bs
, int64_t cur_sector
, int64_t nr_sect
);
787 bool bdrv_requests_pending(BlockDriverState
*bs
);
789 void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap
*bitmap
, HBitmap
**out
);
790 void bdrv_undo_clear_dirty_bitmap(BdrvDirtyBitmap
*bitmap
, HBitmap
*in
);
792 void blockdev_close_all_bdrv_states(void);
794 #endif /* BLOCK_INT_H */