block: Rename BLOCK_OP_TYPE_MIRROR to BLOCK_OP_TYPE_MIRROR_SOURCE
[qemu/ar7.git] / include / block / block_int.h
blob256609dd3d2d0edf38ddcc681e3a38d1ad94e461
1 /*
2 * QEMU System Emulator block driver
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
24 #ifndef BLOCK_INT_H
25 #define BLOCK_INT_H
27 #include "block/accounting.h"
28 #include "block/block.h"
29 #include "block/throttle-groups.h"
30 #include "qemu/option.h"
31 #include "qemu/queue.h"
32 #include "qemu/coroutine.h"
33 #include "qemu/timer.h"
34 #include "qapi-types.h"
35 #include "qemu/hbitmap.h"
36 #include "block/snapshot.h"
37 #include "qemu/main-loop.h"
38 #include "qemu/throttle.h"
40 #define BLOCK_FLAG_ENCRYPT 1
41 #define BLOCK_FLAG_COMPAT6 4
42 #define BLOCK_FLAG_LAZY_REFCOUNTS 8
44 #define BLOCK_OPT_SIZE "size"
45 #define BLOCK_OPT_ENCRYPT "encryption"
46 #define BLOCK_OPT_COMPAT6 "compat6"
47 #define BLOCK_OPT_BACKING_FILE "backing_file"
48 #define BLOCK_OPT_BACKING_FMT "backing_fmt"
49 #define BLOCK_OPT_CLUSTER_SIZE "cluster_size"
50 #define BLOCK_OPT_TABLE_SIZE "table_size"
51 #define BLOCK_OPT_PREALLOC "preallocation"
52 #define BLOCK_OPT_SUBFMT "subformat"
53 #define BLOCK_OPT_COMPAT_LEVEL "compat"
54 #define BLOCK_OPT_LAZY_REFCOUNTS "lazy_refcounts"
55 #define BLOCK_OPT_ADAPTER_TYPE "adapter_type"
56 #define BLOCK_OPT_REDUNDANCY "redundancy"
57 #define BLOCK_OPT_NOCOW "nocow"
58 #define BLOCK_OPT_OBJECT_SIZE "object_size"
59 #define BLOCK_OPT_REFCOUNT_BITS "refcount_bits"
61 #define BLOCK_PROBE_BUF_SIZE 512
63 enum BdrvTrackedRequestType {
64 BDRV_TRACKED_READ,
65 BDRV_TRACKED_WRITE,
66 BDRV_TRACKED_FLUSH,
67 BDRV_TRACKED_IOCTL,
68 BDRV_TRACKED_DISCARD,
71 typedef struct BdrvTrackedRequest {
72 BlockDriverState *bs;
73 int64_t offset;
74 unsigned int bytes;
75 enum BdrvTrackedRequestType type;
77 bool serialising;
78 int64_t overlap_offset;
79 unsigned int overlap_bytes;
81 QLIST_ENTRY(BdrvTrackedRequest) list;
82 Coroutine *co; /* owner, used for deadlock detection */
83 CoQueue wait_queue; /* coroutines blocked on this request */
85 struct BdrvTrackedRequest *waiting_for;
86 } BdrvTrackedRequest;
88 struct BlockDriver {
89 const char *format_name;
90 int instance_size;
92 /* set to true if the BlockDriver is a block filter */
93 bool is_filter;
94 /* for snapshots block filter like Quorum can implement the
95 * following recursive callback.
96 * It's purpose is to recurse on the filter children while calling
97 * bdrv_recurse_is_first_non_filter on them.
98 * For a sample implementation look in the future Quorum block filter.
100 bool (*bdrv_recurse_is_first_non_filter)(BlockDriverState *bs,
101 BlockDriverState *candidate);
103 int (*bdrv_probe)(const uint8_t *buf, int buf_size, const char *filename);
104 int (*bdrv_probe_device)(const char *filename);
106 /* Any driver implementing this callback is expected to be able to handle
107 * NULL file names in its .bdrv_open() implementation */
108 void (*bdrv_parse_filename)(const char *filename, QDict *options, Error **errp);
109 /* Drivers not implementing bdrv_parse_filename nor bdrv_open should have
110 * this field set to true, except ones that are defined only by their
111 * child's bs.
112 * An example of the last type will be the quorum block driver.
114 bool bdrv_needs_filename;
116 /* Set if a driver can support backing files */
117 bool supports_backing;
119 /* For handling image reopen for split or non-split files */
120 int (*bdrv_reopen_prepare)(BDRVReopenState *reopen_state,
121 BlockReopenQueue *queue, Error **errp);
122 void (*bdrv_reopen_commit)(BDRVReopenState *reopen_state);
123 void (*bdrv_reopen_abort)(BDRVReopenState *reopen_state);
124 void (*bdrv_join_options)(QDict *options, QDict *old_options);
126 int (*bdrv_open)(BlockDriverState *bs, QDict *options, int flags,
127 Error **errp);
128 int (*bdrv_file_open)(BlockDriverState *bs, QDict *options, int flags,
129 Error **errp);
130 int (*bdrv_read)(BlockDriverState *bs, int64_t sector_num,
131 uint8_t *buf, int nb_sectors);
132 int (*bdrv_write)(BlockDriverState *bs, int64_t sector_num,
133 const uint8_t *buf, int nb_sectors);
134 void (*bdrv_close)(BlockDriverState *bs);
135 int (*bdrv_create)(const char *filename, QemuOpts *opts, Error **errp);
136 int (*bdrv_set_key)(BlockDriverState *bs, const char *key);
137 int (*bdrv_make_empty)(BlockDriverState *bs);
139 void (*bdrv_refresh_filename)(BlockDriverState *bs, QDict *options);
141 /* aio */
142 BlockAIOCB *(*bdrv_aio_readv)(BlockDriverState *bs,
143 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
144 BlockCompletionFunc *cb, void *opaque);
145 BlockAIOCB *(*bdrv_aio_writev)(BlockDriverState *bs,
146 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
147 BlockCompletionFunc *cb, void *opaque);
148 BlockAIOCB *(*bdrv_aio_flush)(BlockDriverState *bs,
149 BlockCompletionFunc *cb, void *opaque);
150 BlockAIOCB *(*bdrv_aio_discard)(BlockDriverState *bs,
151 int64_t sector_num, int nb_sectors,
152 BlockCompletionFunc *cb, void *opaque);
154 int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs,
155 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
156 int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs,
157 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
159 * Efficiently zero a region of the disk image. Typically an image format
160 * would use a compact metadata representation to implement this. This
161 * function pointer may be NULL and .bdrv_co_writev() will be called
162 * instead.
164 int coroutine_fn (*bdrv_co_write_zeroes)(BlockDriverState *bs,
165 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
166 int coroutine_fn (*bdrv_co_discard)(BlockDriverState *bs,
167 int64_t sector_num, int nb_sectors);
168 int64_t coroutine_fn (*bdrv_co_get_block_status)(BlockDriverState *bs,
169 int64_t sector_num, int nb_sectors, int *pnum);
172 * Invalidate any cached meta-data.
174 void (*bdrv_invalidate_cache)(BlockDriverState *bs, Error **errp);
177 * Flushes all data that was already written to the OS all the way down to
178 * the disk (for example raw-posix calls fsync()).
180 int coroutine_fn (*bdrv_co_flush_to_disk)(BlockDriverState *bs);
183 * Flushes all internal caches to the OS. The data may still sit in a
184 * writeback cache of the host OS, but it will survive a crash of the qemu
185 * process.
187 int coroutine_fn (*bdrv_co_flush_to_os)(BlockDriverState *bs);
189 const char *protocol_name;
190 int (*bdrv_truncate)(BlockDriverState *bs, int64_t offset);
192 int64_t (*bdrv_getlength)(BlockDriverState *bs);
193 bool has_variable_length;
194 int64_t (*bdrv_get_allocated_file_size)(BlockDriverState *bs);
196 int (*bdrv_write_compressed)(BlockDriverState *bs, int64_t sector_num,
197 const uint8_t *buf, int nb_sectors);
199 int (*bdrv_snapshot_create)(BlockDriverState *bs,
200 QEMUSnapshotInfo *sn_info);
201 int (*bdrv_snapshot_goto)(BlockDriverState *bs,
202 const char *snapshot_id);
203 int (*bdrv_snapshot_delete)(BlockDriverState *bs,
204 const char *snapshot_id,
205 const char *name,
206 Error **errp);
207 int (*bdrv_snapshot_list)(BlockDriverState *bs,
208 QEMUSnapshotInfo **psn_info);
209 int (*bdrv_snapshot_load_tmp)(BlockDriverState *bs,
210 const char *snapshot_id,
211 const char *name,
212 Error **errp);
213 int (*bdrv_get_info)(BlockDriverState *bs, BlockDriverInfo *bdi);
214 ImageInfoSpecific *(*bdrv_get_specific_info)(BlockDriverState *bs);
216 int (*bdrv_save_vmstate)(BlockDriverState *bs, QEMUIOVector *qiov,
217 int64_t pos);
218 int (*bdrv_load_vmstate)(BlockDriverState *bs, uint8_t *buf,
219 int64_t pos, int size);
221 int (*bdrv_change_backing_file)(BlockDriverState *bs,
222 const char *backing_file, const char *backing_fmt);
224 /* removable device specific */
225 bool (*bdrv_is_inserted)(BlockDriverState *bs);
226 int (*bdrv_media_changed)(BlockDriverState *bs);
227 void (*bdrv_eject)(BlockDriverState *bs, bool eject_flag);
228 void (*bdrv_lock_medium)(BlockDriverState *bs, bool locked);
230 /* to control generic scsi devices */
231 BlockAIOCB *(*bdrv_aio_ioctl)(BlockDriverState *bs,
232 unsigned long int req, void *buf,
233 BlockCompletionFunc *cb, void *opaque);
235 /* List of options for creating images, terminated by name == NULL */
236 QemuOptsList *create_opts;
239 * Returns 0 for completed check, -errno for internal errors.
240 * The check results are stored in result.
242 int (*bdrv_check)(BlockDriverState* bs, BdrvCheckResult *result,
243 BdrvCheckMode fix);
245 int (*bdrv_amend_options)(BlockDriverState *bs, QemuOpts *opts,
246 BlockDriverAmendStatusCB *status_cb,
247 void *cb_opaque);
249 void (*bdrv_debug_event)(BlockDriverState *bs, BlkdebugEvent event);
251 /* TODO Better pass a option string/QDict/QemuOpts to add any rule? */
252 int (*bdrv_debug_breakpoint)(BlockDriverState *bs, const char *event,
253 const char *tag);
254 int (*bdrv_debug_remove_breakpoint)(BlockDriverState *bs,
255 const char *tag);
256 int (*bdrv_debug_resume)(BlockDriverState *bs, const char *tag);
257 bool (*bdrv_debug_is_suspended)(BlockDriverState *bs, const char *tag);
259 void (*bdrv_refresh_limits)(BlockDriverState *bs, Error **errp);
262 * Returns 1 if newly created images are guaranteed to contain only
263 * zeros, 0 otherwise.
265 int (*bdrv_has_zero_init)(BlockDriverState *bs);
267 /* Remove fd handlers, timers, and other event loop callbacks so the event
268 * loop is no longer in use. Called with no in-flight requests and in
269 * depth-first traversal order with parents before child nodes.
271 void (*bdrv_detach_aio_context)(BlockDriverState *bs);
273 /* Add fd handlers, timers, and other event loop callbacks so I/O requests
274 * can be processed again. Called with no in-flight requests and in
275 * depth-first traversal order with child nodes before parent nodes.
277 void (*bdrv_attach_aio_context)(BlockDriverState *bs,
278 AioContext *new_context);
280 /* io queue for linux-aio */
281 void (*bdrv_io_plug)(BlockDriverState *bs);
282 void (*bdrv_io_unplug)(BlockDriverState *bs);
283 void (*bdrv_flush_io_queue)(BlockDriverState *bs);
286 * Try to get @bs's logical and physical block size.
287 * On success, store them in @bsz and return zero.
288 * On failure, return negative errno.
290 int (*bdrv_probe_blocksizes)(BlockDriverState *bs, BlockSizes *bsz);
292 * Try to get @bs's geometry (cyls, heads, sectors)
293 * On success, store them in @geo and return 0.
294 * On failure return -errno.
295 * Only drivers that want to override guest geometry implement this
296 * callback; see hd_geometry_guess().
298 int (*bdrv_probe_geometry)(BlockDriverState *bs, HDGeometry *geo);
301 * Drain and stop any internal sources of requests in the driver, and
302 * remain so until next I/O callback (e.g. bdrv_co_writev) is called.
304 void (*bdrv_drain)(BlockDriverState *bs);
306 QLIST_ENTRY(BlockDriver) list;
309 typedef struct BlockLimits {
310 /* maximum number of sectors that can be discarded at once */
311 int max_discard;
313 /* optimal alignment for discard requests in sectors */
314 int64_t discard_alignment;
316 /* maximum number of sectors that can zeroized at once */
317 int max_write_zeroes;
319 /* optimal alignment for write zeroes requests in sectors */
320 int64_t write_zeroes_alignment;
322 /* optimal transfer length in sectors */
323 int opt_transfer_length;
325 /* maximal transfer length in sectors */
326 int max_transfer_length;
328 /* memory alignment so that no bounce buffer is needed */
329 size_t min_mem_alignment;
331 /* memory alignment for bounce buffer */
332 size_t opt_mem_alignment;
334 /* maximum number of iovec elements */
335 int max_iov;
336 } BlockLimits;
338 typedef struct BdrvOpBlocker BdrvOpBlocker;
340 typedef struct BdrvAioNotifier {
341 void (*attached_aio_context)(AioContext *new_context, void *opaque);
342 void (*detach_aio_context)(void *opaque);
344 void *opaque;
346 QLIST_ENTRY(BdrvAioNotifier) list;
347 } BdrvAioNotifier;
349 struct BdrvChildRole {
350 void (*inherit_options)(int *child_flags, QDict *child_options,
351 int parent_flags, QDict *parent_options);
354 extern const BdrvChildRole child_file;
355 extern const BdrvChildRole child_format;
357 struct BdrvChild {
358 BlockDriverState *bs;
359 char *name;
360 const BdrvChildRole *role;
361 QLIST_ENTRY(BdrvChild) next;
362 QLIST_ENTRY(BdrvChild) next_parent;
366 * Note: the function bdrv_append() copies and swaps contents of
367 * BlockDriverStates, so if you add new fields to this struct, please
368 * inspect bdrv_append() to determine if the new fields need to be
369 * copied as well.
371 struct BlockDriverState {
372 int64_t total_sectors; /* if we are reading a disk image, give its
373 size in sectors */
374 int read_only; /* if true, the media is read only */
375 int open_flags; /* flags used to open the file, re-used for re-open */
376 int encrypted; /* if true, the media is encrypted */
377 int valid_key; /* if true, a valid encryption key has been set */
378 int sg; /* if true, the device is a /dev/sg* */
379 int copy_on_read; /* if true, copy read backing sectors into image
380 note this is a reference count */
381 bool probed;
383 BlockDriver *drv; /* NULL means no media */
384 void *opaque;
386 BlockBackend *blk; /* owning backend, if any */
388 AioContext *aio_context; /* event loop used for fd handlers, timers, etc */
389 /* long-running tasks intended to always use the same AioContext as this
390 * BDS may register themselves in this list to be notified of changes
391 * regarding this BDS's context */
392 QLIST_HEAD(, BdrvAioNotifier) aio_notifiers;
394 char filename[PATH_MAX];
395 char backing_file[PATH_MAX]; /* if non zero, the image is a diff of
396 this file image */
397 char backing_format[16]; /* if non-zero and backing_file exists */
399 QDict *full_open_options;
400 char exact_filename[PATH_MAX];
402 BdrvChild *backing;
403 BdrvChild *file;
405 NotifierList close_notifiers;
407 /* Callback before write request is processed */
408 NotifierWithReturnList before_write_notifiers;
410 /* number of in-flight serialising requests */
411 unsigned int serialising_in_flight;
413 /* I/O throttling.
414 * throttle_state tells us if this BDS has I/O limits configured.
415 * io_limits_enabled tells us if they are currently being
416 * enforced, but it can be temporarily set to false */
417 CoQueue throttled_reqs[2];
418 bool io_limits_enabled;
419 /* The following fields are protected by the ThrottleGroup lock.
420 * See the ThrottleGroup documentation for details. */
421 ThrottleState *throttle_state;
422 ThrottleTimers throttle_timers;
423 unsigned pending_reqs[2];
424 QLIST_ENTRY(BlockDriverState) round_robin;
426 /* Offset after the highest byte written to */
427 uint64_t wr_highest_offset;
429 /* I/O Limits */
430 BlockLimits bl;
432 /* Whether produces zeros when read beyond eof */
433 bool zero_beyond_eof;
435 /* Alignment requirement for offset/length of I/O requests */
436 unsigned int request_alignment;
438 /* do we need to tell the quest if we have a volatile write cache? */
439 int enable_write_cache;
441 /* the following member gives a name to every node on the bs graph. */
442 char node_name[32];
443 /* element of the list of named nodes building the graph */
444 QTAILQ_ENTRY(BlockDriverState) node_list;
445 /* element of the list of "drives" the guest sees */
446 QTAILQ_ENTRY(BlockDriverState) device_list;
447 QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps;
448 int refcnt;
450 QLIST_HEAD(, BdrvTrackedRequest) tracked_requests;
452 /* operation blockers */
453 QLIST_HEAD(, BdrvOpBlocker) op_blockers[BLOCK_OP_TYPE_MAX];
455 /* long-running background operation */
456 BlockJob *job;
458 /* The node that this node inherited default options from (and a reopen on
459 * which can affect this node by changing these defaults). This is always a
460 * parent node of this node. */
461 BlockDriverState *inherits_from;
462 QLIST_HEAD(, BdrvChild) children;
463 QLIST_HEAD(, BdrvChild) parents;
465 QDict *options;
466 QDict *explicit_options;
467 BlockdevDetectZeroesOptions detect_zeroes;
469 /* The error object in use for blocking operations on backing_hd */
470 Error *backing_blocker;
472 /* threshold limit for writes, in bytes. "High water mark". */
473 uint64_t write_threshold_offset;
474 NotifierWithReturn write_threshold_notifier;
476 int quiesce_counter;
479 struct BlockBackendRootState {
480 int open_flags;
481 bool read_only;
482 BlockdevDetectZeroesOptions detect_zeroes;
484 char *throttle_group;
485 ThrottleState *throttle_state;
488 static inline BlockDriverState *backing_bs(BlockDriverState *bs)
490 return bs->backing ? bs->backing->bs : NULL;
494 /* Essential block drivers which must always be statically linked into qemu, and
495 * which therefore can be accessed without using bdrv_find_format() */
496 extern BlockDriver bdrv_file;
497 extern BlockDriver bdrv_raw;
498 extern BlockDriver bdrv_qcow2;
500 extern QTAILQ_HEAD(BdrvStates, BlockDriverState) bdrv_states;
503 * bdrv_setup_io_funcs:
505 * Prepare a #BlockDriver for I/O request processing by populating
506 * unimplemented coroutine and AIO interfaces with generic wrapper functions
507 * that fall back to implemented interfaces.
509 void bdrv_setup_io_funcs(BlockDriver *bdrv);
511 int get_tmp_filename(char *filename, int size);
512 BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size,
513 const char *filename);
515 void bdrv_set_io_limits(BlockDriverState *bs,
516 ThrottleConfig *cfg);
520 * bdrv_add_before_write_notifier:
522 * Register a callback that is invoked before write requests are processed but
523 * after any throttling or waiting for overlapping requests.
525 void bdrv_add_before_write_notifier(BlockDriverState *bs,
526 NotifierWithReturn *notifier);
529 * bdrv_detach_aio_context:
531 * May be called from .bdrv_detach_aio_context() to detach children from the
532 * current #AioContext. This is only needed by block drivers that manage their
533 * own children. Both ->file and ->backing are automatically handled and
534 * block drivers should not call this function on them explicitly.
536 void bdrv_detach_aio_context(BlockDriverState *bs);
539 * bdrv_attach_aio_context:
541 * May be called from .bdrv_attach_aio_context() to attach children to the new
542 * #AioContext. This is only needed by block drivers that manage their own
543 * children. Both ->file and ->backing are automatically handled and block
544 * drivers should not call this function on them explicitly.
546 void bdrv_attach_aio_context(BlockDriverState *bs,
547 AioContext *new_context);
550 * bdrv_add_aio_context_notifier:
552 * If a long-running job intends to be always run in the same AioContext as a
553 * certain BDS, it may use this function to be notified of changes regarding the
554 * association of the BDS to an AioContext.
556 * attached_aio_context() is called after the target BDS has been attached to a
557 * new AioContext; detach_aio_context() is called before the target BDS is being
558 * detached from its old AioContext.
560 void bdrv_add_aio_context_notifier(BlockDriverState *bs,
561 void (*attached_aio_context)(AioContext *new_context, void *opaque),
562 void (*detach_aio_context)(void *opaque), void *opaque);
565 * bdrv_remove_aio_context_notifier:
567 * Unsubscribe of change notifications regarding the BDS's AioContext. The
568 * parameters given here have to be the same as those given to
569 * bdrv_add_aio_context_notifier().
571 void bdrv_remove_aio_context_notifier(BlockDriverState *bs,
572 void (*aio_context_attached)(AioContext *,
573 void *),
574 void (*aio_context_detached)(void *),
575 void *opaque);
577 #ifdef _WIN32
578 int is_windows_drive(const char *filename);
579 #endif
582 * stream_start:
583 * @bs: Block device to operate on.
584 * @base: Block device that will become the new base, or %NULL to
585 * flatten the whole backing file chain onto @bs.
586 * @base_id: The file name that will be written to @bs as the new
587 * backing file if the job completes. Ignored if @base is %NULL.
588 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
589 * @on_error: The action to take upon error.
590 * @cb: Completion function for the job.
591 * @opaque: Opaque pointer value passed to @cb.
592 * @errp: Error object.
594 * Start a streaming operation on @bs. Clusters that are unallocated
595 * in @bs, but allocated in any image between @base and @bs (both
596 * exclusive) will be written to @bs. At the end of a successful
597 * streaming job, the backing file of @bs will be changed to
598 * @base_id in the written image and to @base in the live BlockDriverState.
600 void stream_start(BlockDriverState *bs, BlockDriverState *base,
601 const char *base_id, int64_t speed, BlockdevOnError on_error,
602 BlockCompletionFunc *cb,
603 void *opaque, Error **errp);
606 * commit_start:
607 * @bs: Active block device.
608 * @top: Top block device to be committed.
609 * @base: Block device that will be written into, and become the new top.
610 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
611 * @on_error: The action to take upon error.
612 * @cb: Completion function for the job.
613 * @opaque: Opaque pointer value passed to @cb.
614 * @backing_file_str: String to use as the backing file in @top's overlay
615 * @errp: Error object.
618 void commit_start(BlockDriverState *bs, BlockDriverState *base,
619 BlockDriverState *top, int64_t speed,
620 BlockdevOnError on_error, BlockCompletionFunc *cb,
621 void *opaque, const char *backing_file_str, Error **errp);
623 * commit_active_start:
624 * @bs: Active block device to be committed.
625 * @base: Block device that will be written into, and become the new top.
626 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
627 * @on_error: The action to take upon error.
628 * @cb: Completion function for the job.
629 * @opaque: Opaque pointer value passed to @cb.
630 * @errp: Error object.
633 void commit_active_start(BlockDriverState *bs, BlockDriverState *base,
634 int64_t speed,
635 BlockdevOnError on_error,
636 BlockCompletionFunc *cb,
637 void *opaque, Error **errp);
639 * mirror_start:
640 * @bs: Block device to operate on.
641 * @target: Block device to write to.
642 * @replaces: Block graph node name to replace once the mirror is done. Can
643 * only be used when full mirroring is selected.
644 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
645 * @granularity: The chosen granularity for the dirty bitmap.
646 * @buf_size: The amount of data that can be in flight at one time.
647 * @mode: Whether to collapse all images in the chain to the target.
648 * @on_source_error: The action to take upon error reading from the source.
649 * @on_target_error: The action to take upon error writing to the target.
650 * @unmap: Whether to unmap target where source sectors only contain zeroes.
651 * @cb: Completion function for the job.
652 * @opaque: Opaque pointer value passed to @cb.
653 * @errp: Error object.
655 * Start a mirroring operation on @bs. Clusters that are allocated
656 * in @bs will be written to @bs until the job is cancelled or
657 * manually completed. At the end of a successful mirroring job,
658 * @bs will be switched to read from @target.
660 void mirror_start(BlockDriverState *bs, BlockDriverState *target,
661 const char *replaces,
662 int64_t speed, uint32_t granularity, int64_t buf_size,
663 MirrorSyncMode mode, BlockdevOnError on_source_error,
664 BlockdevOnError on_target_error,
665 bool unmap,
666 BlockCompletionFunc *cb,
667 void *opaque, Error **errp);
670 * backup_start:
671 * @bs: Block device to operate on.
672 * @target: Block device to write to.
673 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
674 * @sync_mode: What parts of the disk image should be copied to the destination.
675 * @sync_bitmap: The dirty bitmap if sync_mode is MIRROR_SYNC_MODE_INCREMENTAL.
676 * @on_source_error: The action to take upon error reading from the source.
677 * @on_target_error: The action to take upon error writing to the target.
678 * @cb: Completion function for the job.
679 * @opaque: Opaque pointer value passed to @cb.
680 * @txn: Transaction that this job is part of (may be NULL).
682 * Start a backup operation on @bs. Clusters in @bs are written to @target
683 * until the job is cancelled or manually completed.
685 void backup_start(BlockDriverState *bs, BlockDriverState *target,
686 int64_t speed, MirrorSyncMode sync_mode,
687 BdrvDirtyBitmap *sync_bitmap,
688 BlockdevOnError on_source_error,
689 BlockdevOnError on_target_error,
690 BlockCompletionFunc *cb, void *opaque,
691 BlockJobTxn *txn, Error **errp);
693 void blk_set_bs(BlockBackend *blk, BlockDriverState *bs);
695 void blk_dev_change_media_cb(BlockBackend *blk, bool load);
696 bool blk_dev_has_removable_media(BlockBackend *blk);
697 void blk_dev_eject_request(BlockBackend *blk, bool force);
698 bool blk_dev_is_tray_open(BlockBackend *blk);
699 bool blk_dev_is_medium_locked(BlockBackend *blk);
700 void blk_dev_resize_cb(BlockBackend *blk);
702 void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors);
703 bool bdrv_requests_pending(BlockDriverState *bs);
705 void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap **out);
706 void bdrv_undo_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap *in);
708 #endif /* BLOCK_INT_H */