hw: Replace trivial drive_get_next() by drive_get()
[qemu.git] / include / block / block_int.h
blobf4c75e8ba956b4486e9155bb9be1efbd2aeaca49
1 /*
2 * QEMU System Emulator block driver
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
24 #ifndef BLOCK_INT_H
25 #define BLOCK_INT_H
27 #include "block/accounting.h"
28 #include "block/block.h"
29 #include "block/aio-wait.h"
30 #include "qemu/queue.h"
31 #include "qemu/coroutine.h"
32 #include "qemu/stats64.h"
33 #include "qemu/timer.h"
34 #include "qemu/hbitmap.h"
35 #include "block/snapshot.h"
36 #include "qemu/throttle.h"
37 #include "qemu/rcu.h"
39 #define BLOCK_FLAG_LAZY_REFCOUNTS 8
41 #define BLOCK_OPT_SIZE "size"
42 #define BLOCK_OPT_ENCRYPT "encryption"
43 #define BLOCK_OPT_ENCRYPT_FORMAT "encrypt.format"
44 #define BLOCK_OPT_COMPAT6 "compat6"
45 #define BLOCK_OPT_HWVERSION "hwversion"
46 #define BLOCK_OPT_BACKING_FILE "backing_file"
47 #define BLOCK_OPT_BACKING_FMT "backing_fmt"
48 #define BLOCK_OPT_CLUSTER_SIZE "cluster_size"
49 #define BLOCK_OPT_TABLE_SIZE "table_size"
50 #define BLOCK_OPT_PREALLOC "preallocation"
51 #define BLOCK_OPT_SUBFMT "subformat"
52 #define BLOCK_OPT_COMPAT_LEVEL "compat"
53 #define BLOCK_OPT_LAZY_REFCOUNTS "lazy_refcounts"
54 #define BLOCK_OPT_ADAPTER_TYPE "adapter_type"
55 #define BLOCK_OPT_REDUNDANCY "redundancy"
56 #define BLOCK_OPT_NOCOW "nocow"
57 #define BLOCK_OPT_EXTENT_SIZE_HINT "extent_size_hint"
58 #define BLOCK_OPT_OBJECT_SIZE "object_size"
59 #define BLOCK_OPT_REFCOUNT_BITS "refcount_bits"
60 #define BLOCK_OPT_DATA_FILE "data_file"
61 #define BLOCK_OPT_DATA_FILE_RAW "data_file_raw"
62 #define BLOCK_OPT_COMPRESSION_TYPE "compression_type"
63 #define BLOCK_OPT_EXTL2 "extended_l2"
65 #define BLOCK_PROBE_BUF_SIZE 512
67 enum BdrvTrackedRequestType {
68 BDRV_TRACKED_READ,
69 BDRV_TRACKED_WRITE,
70 BDRV_TRACKED_DISCARD,
71 BDRV_TRACKED_TRUNCATE,
75 * That is not quite good that BdrvTrackedRequest structure is public,
76 * as block/io.c is very careful about incoming offset/bytes being
77 * correct. Be sure to assert bdrv_check_request() succeeded after any
78 * modification of BdrvTrackedRequest object out of block/io.c
80 typedef struct BdrvTrackedRequest {
81 BlockDriverState *bs;
82 int64_t offset;
83 int64_t bytes;
84 enum BdrvTrackedRequestType type;
86 bool serialising;
87 int64_t overlap_offset;
88 int64_t overlap_bytes;
90 QLIST_ENTRY(BdrvTrackedRequest) list;
91 Coroutine *co; /* owner, used for deadlock detection */
92 CoQueue wait_queue; /* coroutines blocked on this request */
94 struct BdrvTrackedRequest *waiting_for;
95 } BdrvTrackedRequest;
97 int bdrv_check_qiov_request(int64_t offset, int64_t bytes,
98 QEMUIOVector *qiov, size_t qiov_offset,
99 Error **errp);
100 int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp);
102 struct BlockDriver {
103 const char *format_name;
104 int instance_size;
106 /* set to true if the BlockDriver is a block filter. Block filters pass
107 * certain callbacks that refer to data (see block.c) to their bs->file
108 * or bs->backing (whichever one exists) if the driver doesn't implement
109 * them. Drivers that do not wish to forward must implement them and return
110 * -ENOTSUP.
111 * Note that filters are not allowed to modify data.
113 * Filters generally cannot have more than a single filtered child,
114 * because the data they present must at all times be the same as
115 * that on their filtered child. That would be impossible to
116 * achieve for multiple filtered children.
117 * (And this filtered child must then be bs->file or bs->backing.)
119 bool is_filter;
121 * Set to true if the BlockDriver is a format driver. Format nodes
122 * generally do not expect their children to be other format nodes
123 * (except for backing files), and so format probing is disabled
124 * on those children.
126 bool is_format;
128 * Return true if @to_replace can be replaced by a BDS with the
129 * same data as @bs without it affecting @bs's behavior (that is,
130 * without it being visible to @bs's parents).
132 bool (*bdrv_recurse_can_replace)(BlockDriverState *bs,
133 BlockDriverState *to_replace);
135 int (*bdrv_probe)(const uint8_t *buf, int buf_size, const char *filename);
136 int (*bdrv_probe_device)(const char *filename);
138 /* Any driver implementing this callback is expected to be able to handle
139 * NULL file names in its .bdrv_open() implementation */
140 void (*bdrv_parse_filename)(const char *filename, QDict *options, Error **errp);
141 /* Drivers not implementing bdrv_parse_filename nor bdrv_open should have
142 * this field set to true, except ones that are defined only by their
143 * child's bs.
144 * An example of the last type will be the quorum block driver.
146 bool bdrv_needs_filename;
149 * Set if a driver can support backing files. This also implies the
150 * following semantics:
152 * - Return status 0 of .bdrv_co_block_status means that corresponding
153 * blocks are not allocated in this layer of backing-chain
154 * - For such (unallocated) blocks, read will:
155 * - fill buffer with zeros if there is no backing file
156 * - read from the backing file otherwise, where the block layer
157 * takes care of reading zeros beyond EOF if backing file is short
159 bool supports_backing;
161 /* For handling image reopen for split or non-split files */
162 int (*bdrv_reopen_prepare)(BDRVReopenState *reopen_state,
163 BlockReopenQueue *queue, Error **errp);
164 void (*bdrv_reopen_commit)(BDRVReopenState *reopen_state);
165 void (*bdrv_reopen_commit_post)(BDRVReopenState *reopen_state);
166 void (*bdrv_reopen_abort)(BDRVReopenState *reopen_state);
167 void (*bdrv_join_options)(QDict *options, QDict *old_options);
169 int (*bdrv_open)(BlockDriverState *bs, QDict *options, int flags,
170 Error **errp);
172 /* Protocol drivers should implement this instead of bdrv_open */
173 int (*bdrv_file_open)(BlockDriverState *bs, QDict *options, int flags,
174 Error **errp);
175 void (*bdrv_close)(BlockDriverState *bs);
178 int coroutine_fn (*bdrv_co_create)(BlockdevCreateOptions *opts,
179 Error **errp);
180 int coroutine_fn (*bdrv_co_create_opts)(BlockDriver *drv,
181 const char *filename,
182 QemuOpts *opts,
183 Error **errp);
185 int coroutine_fn (*bdrv_co_amend)(BlockDriverState *bs,
186 BlockdevAmendOptions *opts,
187 bool force,
188 Error **errp);
190 int (*bdrv_amend_options)(BlockDriverState *bs,
191 QemuOpts *opts,
192 BlockDriverAmendStatusCB *status_cb,
193 void *cb_opaque,
194 bool force,
195 Error **errp);
197 int (*bdrv_make_empty)(BlockDriverState *bs);
200 * Refreshes the bs->exact_filename field. If that is impossible,
201 * bs->exact_filename has to be left empty.
203 void (*bdrv_refresh_filename)(BlockDriverState *bs);
206 * Gathers the open options for all children into @target.
207 * A simple format driver (without backing file support) might
208 * implement this function like this:
210 * QINCREF(bs->file->bs->full_open_options);
211 * qdict_put(target, "file", bs->file->bs->full_open_options);
213 * If not specified, the generic implementation will simply put
214 * all children's options under their respective name.
216 * @backing_overridden is true when bs->backing seems not to be
217 * the child that would result from opening bs->backing_file.
218 * Therefore, if it is true, the backing child's options should be
219 * gathered; otherwise, there is no need since the backing child
220 * is the one implied by the image header.
222 * Note that ideally this function would not be needed. Every
223 * block driver which implements it is probably doing something
224 * shady regarding its runtime option structure.
226 void (*bdrv_gather_child_options)(BlockDriverState *bs, QDict *target,
227 bool backing_overridden);
230 * Returns an allocated string which is the directory name of this BDS: It
231 * will be used to make relative filenames absolute by prepending this
232 * function's return value to them.
234 char *(*bdrv_dirname)(BlockDriverState *bs, Error **errp);
236 /* aio */
237 BlockAIOCB *(*bdrv_aio_preadv)(BlockDriverState *bs,
238 int64_t offset, int64_t bytes, QEMUIOVector *qiov,
239 BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque);
240 BlockAIOCB *(*bdrv_aio_pwritev)(BlockDriverState *bs,
241 int64_t offset, int64_t bytes, QEMUIOVector *qiov,
242 BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque);
243 BlockAIOCB *(*bdrv_aio_flush)(BlockDriverState *bs,
244 BlockCompletionFunc *cb, void *opaque);
245 BlockAIOCB *(*bdrv_aio_pdiscard)(BlockDriverState *bs,
246 int64_t offset, int bytes,
247 BlockCompletionFunc *cb, void *opaque);
249 int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs,
250 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
253 * @offset: position in bytes to read at
254 * @bytes: number of bytes to read
255 * @qiov: the buffers to fill with read data
256 * @flags: currently unused, always 0
258 * @offset and @bytes will be a multiple of 'request_alignment',
259 * but the length of individual @qiov elements does not have to
260 * be a multiple.
262 * @bytes will always equal the total size of @qiov, and will be
263 * no larger than 'max_transfer'.
265 * The buffer in @qiov may point directly to guest memory.
267 int coroutine_fn (*bdrv_co_preadv)(BlockDriverState *bs,
268 int64_t offset, int64_t bytes, QEMUIOVector *qiov,
269 BdrvRequestFlags flags);
270 int coroutine_fn (*bdrv_co_preadv_part)(BlockDriverState *bs,
271 int64_t offset, int64_t bytes,
272 QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags);
273 int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs,
274 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int flags);
276 * @offset: position in bytes to write at
277 * @bytes: number of bytes to write
278 * @qiov: the buffers containing data to write
279 * @flags: zero or more bits allowed by 'supported_write_flags'
281 * @offset and @bytes will be a multiple of 'request_alignment',
282 * but the length of individual @qiov elements does not have to
283 * be a multiple.
285 * @bytes will always equal the total size of @qiov, and will be
286 * no larger than 'max_transfer'.
288 * The buffer in @qiov may point directly to guest memory.
290 int coroutine_fn (*bdrv_co_pwritev)(BlockDriverState *bs,
291 int64_t offset, int64_t bytes, QEMUIOVector *qiov,
292 BdrvRequestFlags flags);
293 int coroutine_fn (*bdrv_co_pwritev_part)(BlockDriverState *bs,
294 int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
295 BdrvRequestFlags flags);
298 * Efficiently zero a region of the disk image. Typically an image format
299 * would use a compact metadata representation to implement this. This
300 * function pointer may be NULL or return -ENOSUP and .bdrv_co_writev()
301 * will be called instead.
303 int coroutine_fn (*bdrv_co_pwrite_zeroes)(BlockDriverState *bs,
304 int64_t offset, int64_t bytes, BdrvRequestFlags flags);
305 int coroutine_fn (*bdrv_co_pdiscard)(BlockDriverState *bs,
306 int64_t offset, int64_t bytes);
308 /* Map [offset, offset + nbytes) range onto a child of @bs to copy from,
309 * and invoke bdrv_co_copy_range_from(child, ...), or invoke
310 * bdrv_co_copy_range_to() if @bs is the leaf child to copy data from.
312 * See the comment of bdrv_co_copy_range for the parameter and return value
313 * semantics.
315 int coroutine_fn (*bdrv_co_copy_range_from)(BlockDriverState *bs,
316 BdrvChild *src,
317 int64_t offset,
318 BdrvChild *dst,
319 int64_t dst_offset,
320 int64_t bytes,
321 BdrvRequestFlags read_flags,
322 BdrvRequestFlags write_flags);
324 /* Map [offset, offset + nbytes) range onto a child of bs to copy data to,
325 * and invoke bdrv_co_copy_range_to(child, src, ...), or perform the copy
326 * operation if @bs is the leaf and @src has the same BlockDriver. Return
327 * -ENOTSUP if @bs is the leaf but @src has a different BlockDriver.
329 * See the comment of bdrv_co_copy_range for the parameter and return value
330 * semantics.
332 int coroutine_fn (*bdrv_co_copy_range_to)(BlockDriverState *bs,
333 BdrvChild *src,
334 int64_t src_offset,
335 BdrvChild *dst,
336 int64_t dst_offset,
337 int64_t bytes,
338 BdrvRequestFlags read_flags,
339 BdrvRequestFlags write_flags);
342 * Building block for bdrv_block_status[_above] and
343 * bdrv_is_allocated[_above]. The driver should answer only
344 * according to the current layer, and should only need to set
345 * BDRV_BLOCK_DATA, BDRV_BLOCK_ZERO, BDRV_BLOCK_OFFSET_VALID,
346 * and/or BDRV_BLOCK_RAW; if the current layer defers to a backing
347 * layer, the result should be 0 (and not BDRV_BLOCK_ZERO). See
348 * block.h for the overall meaning of the bits. As a hint, the
349 * flag want_zero is true if the caller cares more about precise
350 * mappings (favor accurate _OFFSET_VALID/_ZERO) or false for
351 * overall allocation (favor larger *pnum, perhaps by reporting
352 * _DATA instead of _ZERO). The block layer guarantees input
353 * clamped to bdrv_getlength() and aligned to request_alignment,
354 * as well as non-NULL pnum, map, and file; in turn, the driver
355 * must return an error or set pnum to an aligned non-zero value.
357 * Note that @bytes is just a hint on how big of a region the
358 * caller wants to inspect. It is not a limit on *pnum.
359 * Implementations are free to return larger values of *pnum if
360 * doing so does not incur a performance penalty.
362 * block/io.c's bdrv_co_block_status() will utilize an unclamped
363 * *pnum value for the block-status cache on protocol nodes, prior
364 * to clamping *pnum for return to its caller.
366 int coroutine_fn (*bdrv_co_block_status)(BlockDriverState *bs,
367 bool want_zero, int64_t offset, int64_t bytes, int64_t *pnum,
368 int64_t *map, BlockDriverState **file);
371 * This informs the driver that we are no longer interested in the result
372 * of in-flight requests, so don't waste the time if possible.
374 * One example usage is to avoid waiting for an nbd target node reconnect
375 * timeout during job-cancel with force=true.
377 void (*bdrv_cancel_in_flight)(BlockDriverState *bs);
380 * Invalidate any cached meta-data.
382 void coroutine_fn (*bdrv_co_invalidate_cache)(BlockDriverState *bs,
383 Error **errp);
384 int (*bdrv_inactivate)(BlockDriverState *bs);
387 * Flushes all data for all layers by calling bdrv_co_flush for underlying
388 * layers, if needed. This function is needed for deterministic
389 * synchronization of the flush finishing callback.
391 int coroutine_fn (*bdrv_co_flush)(BlockDriverState *bs);
393 /* Delete a created file. */
394 int coroutine_fn (*bdrv_co_delete_file)(BlockDriverState *bs,
395 Error **errp);
398 * Flushes all data that was already written to the OS all the way down to
399 * the disk (for example file-posix.c calls fsync()).
401 int coroutine_fn (*bdrv_co_flush_to_disk)(BlockDriverState *bs);
404 * Flushes all internal caches to the OS. The data may still sit in a
405 * writeback cache of the host OS, but it will survive a crash of the qemu
406 * process.
408 int coroutine_fn (*bdrv_co_flush_to_os)(BlockDriverState *bs);
411 * Drivers setting this field must be able to work with just a plain
412 * filename with '<protocol_name>:' as a prefix, and no other options.
413 * Options may be extracted from the filename by implementing
414 * bdrv_parse_filename.
416 const char *protocol_name;
419 * Truncate @bs to @offset bytes using the given @prealloc mode
420 * when growing. Modes other than PREALLOC_MODE_OFF should be
421 * rejected when shrinking @bs.
423 * If @exact is true, @bs must be resized to exactly @offset.
424 * Otherwise, it is sufficient for @bs (if it is a host block
425 * device and thus there is no way to resize it) to be at least
426 * @offset bytes in length.
428 * If @exact is true and this function fails but would succeed
429 * with @exact = false, it should return -ENOTSUP.
431 int coroutine_fn (*bdrv_co_truncate)(BlockDriverState *bs, int64_t offset,
432 bool exact, PreallocMode prealloc,
433 BdrvRequestFlags flags, Error **errp);
435 int64_t (*bdrv_getlength)(BlockDriverState *bs);
436 bool has_variable_length;
437 int64_t (*bdrv_get_allocated_file_size)(BlockDriverState *bs);
438 BlockMeasureInfo *(*bdrv_measure)(QemuOpts *opts, BlockDriverState *in_bs,
439 Error **errp);
441 int coroutine_fn (*bdrv_co_pwritev_compressed)(BlockDriverState *bs,
442 int64_t offset, int64_t bytes, QEMUIOVector *qiov);
443 int coroutine_fn (*bdrv_co_pwritev_compressed_part)(BlockDriverState *bs,
444 int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset);
446 int (*bdrv_snapshot_create)(BlockDriverState *bs,
447 QEMUSnapshotInfo *sn_info);
448 int (*bdrv_snapshot_goto)(BlockDriverState *bs,
449 const char *snapshot_id);
450 int (*bdrv_snapshot_delete)(BlockDriverState *bs,
451 const char *snapshot_id,
452 const char *name,
453 Error **errp);
454 int (*bdrv_snapshot_list)(BlockDriverState *bs,
455 QEMUSnapshotInfo **psn_info);
456 int (*bdrv_snapshot_load_tmp)(BlockDriverState *bs,
457 const char *snapshot_id,
458 const char *name,
459 Error **errp);
460 int (*bdrv_get_info)(BlockDriverState *bs, BlockDriverInfo *bdi);
461 ImageInfoSpecific *(*bdrv_get_specific_info)(BlockDriverState *bs,
462 Error **errp);
463 BlockStatsSpecific *(*bdrv_get_specific_stats)(BlockDriverState *bs);
465 int coroutine_fn (*bdrv_save_vmstate)(BlockDriverState *bs,
466 QEMUIOVector *qiov,
467 int64_t pos);
468 int coroutine_fn (*bdrv_load_vmstate)(BlockDriverState *bs,
469 QEMUIOVector *qiov,
470 int64_t pos);
472 int (*bdrv_change_backing_file)(BlockDriverState *bs,
473 const char *backing_file, const char *backing_fmt);
475 /* removable device specific */
476 bool (*bdrv_is_inserted)(BlockDriverState *bs);
477 void (*bdrv_eject)(BlockDriverState *bs, bool eject_flag);
478 void (*bdrv_lock_medium)(BlockDriverState *bs, bool locked);
480 /* to control generic scsi devices */
481 BlockAIOCB *(*bdrv_aio_ioctl)(BlockDriverState *bs,
482 unsigned long int req, void *buf,
483 BlockCompletionFunc *cb, void *opaque);
484 int coroutine_fn (*bdrv_co_ioctl)(BlockDriverState *bs,
485 unsigned long int req, void *buf);
487 /* List of options for creating images, terminated by name == NULL */
488 QemuOptsList *create_opts;
490 /* List of options for image amend */
491 QemuOptsList *amend_opts;
494 * If this driver supports reopening images this contains a
495 * NULL-terminated list of the runtime options that can be
496 * modified. If an option in this list is unspecified during
497 * reopen then it _must_ be reset to its default value or return
498 * an error.
500 const char *const *mutable_opts;
503 * Returns 0 for completed check, -errno for internal errors.
504 * The check results are stored in result.
506 int coroutine_fn (*bdrv_co_check)(BlockDriverState *bs,
507 BdrvCheckResult *result,
508 BdrvCheckMode fix);
510 void (*bdrv_debug_event)(BlockDriverState *bs, BlkdebugEvent event);
512 /* TODO Better pass a option string/QDict/QemuOpts to add any rule? */
513 int (*bdrv_debug_breakpoint)(BlockDriverState *bs, const char *event,
514 const char *tag);
515 int (*bdrv_debug_remove_breakpoint)(BlockDriverState *bs,
516 const char *tag);
517 int (*bdrv_debug_resume)(BlockDriverState *bs, const char *tag);
518 bool (*bdrv_debug_is_suspended)(BlockDriverState *bs, const char *tag);
520 void (*bdrv_refresh_limits)(BlockDriverState *bs, Error **errp);
523 * Returns 1 if newly created images are guaranteed to contain only
524 * zeros, 0 otherwise.
526 int (*bdrv_has_zero_init)(BlockDriverState *bs);
528 /* Remove fd handlers, timers, and other event loop callbacks so the event
529 * loop is no longer in use. Called with no in-flight requests and in
530 * depth-first traversal order with parents before child nodes.
532 void (*bdrv_detach_aio_context)(BlockDriverState *bs);
534 /* Add fd handlers, timers, and other event loop callbacks so I/O requests
535 * can be processed again. Called with no in-flight requests and in
536 * depth-first traversal order with child nodes before parent nodes.
538 void (*bdrv_attach_aio_context)(BlockDriverState *bs,
539 AioContext *new_context);
541 /* io queue for linux-aio */
542 void (*bdrv_io_plug)(BlockDriverState *bs);
543 void (*bdrv_io_unplug)(BlockDriverState *bs);
546 * Try to get @bs's logical and physical block size.
547 * On success, store them in @bsz and return zero.
548 * On failure, return negative errno.
550 int (*bdrv_probe_blocksizes)(BlockDriverState *bs, BlockSizes *bsz);
552 * Try to get @bs's geometry (cyls, heads, sectors)
553 * On success, store them in @geo and return 0.
554 * On failure return -errno.
555 * Only drivers that want to override guest geometry implement this
556 * callback; see hd_geometry_guess().
558 int (*bdrv_probe_geometry)(BlockDriverState *bs, HDGeometry *geo);
561 * bdrv_co_drain_begin is called if implemented in the beginning of a
562 * drain operation to drain and stop any internal sources of requests in
563 * the driver.
564 * bdrv_co_drain_end is called if implemented at the end of the drain.
566 * They should be used by the driver to e.g. manage scheduled I/O
567 * requests, or toggle an internal state. After the end of the drain new
568 * requests will continue normally.
570 void coroutine_fn (*bdrv_co_drain_begin)(BlockDriverState *bs);
571 void coroutine_fn (*bdrv_co_drain_end)(BlockDriverState *bs);
573 void (*bdrv_add_child)(BlockDriverState *parent, BlockDriverState *child,
574 Error **errp);
575 void (*bdrv_del_child)(BlockDriverState *parent, BdrvChild *child,
576 Error **errp);
579 * Informs the block driver that a permission change is intended. The
580 * driver checks whether the change is permissible and may take other
581 * preparations for the change (e.g. get file system locks). This operation
582 * is always followed either by a call to either .bdrv_set_perm or
583 * .bdrv_abort_perm_update.
585 * Checks whether the requested set of cumulative permissions in @perm
586 * can be granted for accessing @bs and whether no other users are using
587 * permissions other than those given in @shared (both arguments take
588 * BLK_PERM_* bitmasks).
590 * If both conditions are met, 0 is returned. Otherwise, -errno is returned
591 * and errp is set to an error describing the conflict.
593 int (*bdrv_check_perm)(BlockDriverState *bs, uint64_t perm,
594 uint64_t shared, Error **errp);
597 * Called to inform the driver that the set of cumulative set of used
598 * permissions for @bs has changed to @perm, and the set of sharable
599 * permission to @shared. The driver can use this to propagate changes to
600 * its children (i.e. request permissions only if a parent actually needs
601 * them).
603 * This function is only invoked after bdrv_check_perm(), so block drivers
604 * may rely on preparations made in their .bdrv_check_perm implementation.
606 void (*bdrv_set_perm)(BlockDriverState *bs, uint64_t perm, uint64_t shared);
609 * Called to inform the driver that after a previous bdrv_check_perm()
610 * call, the permission update is not performed and any preparations made
611 * for it (e.g. taken file locks) need to be undone.
613 * This function can be called even for nodes that never saw a
614 * bdrv_check_perm() call. It is a no-op then.
616 void (*bdrv_abort_perm_update)(BlockDriverState *bs);
619 * Returns in @nperm and @nshared the permissions that the driver for @bs
620 * needs on its child @c, based on the cumulative permissions requested by
621 * the parents in @parent_perm and @parent_shared.
623 * If @c is NULL, return the permissions for attaching a new child for the
624 * given @child_class and @role.
626 * If @reopen_queue is non-NULL, don't return the currently needed
627 * permissions, but those that will be needed after applying the
628 * @reopen_queue.
630 void (*bdrv_child_perm)(BlockDriverState *bs, BdrvChild *c,
631 BdrvChildRole role,
632 BlockReopenQueue *reopen_queue,
633 uint64_t parent_perm, uint64_t parent_shared,
634 uint64_t *nperm, uint64_t *nshared);
636 bool (*bdrv_supports_persistent_dirty_bitmap)(BlockDriverState *bs);
637 bool (*bdrv_co_can_store_new_dirty_bitmap)(BlockDriverState *bs,
638 const char *name,
639 uint32_t granularity,
640 Error **errp);
641 int (*bdrv_co_remove_persistent_dirty_bitmap)(BlockDriverState *bs,
642 const char *name,
643 Error **errp);
646 * Register/unregister a buffer for I/O. For example, when the driver is
647 * interested to know the memory areas that will later be used in iovs, so
648 * that it can do IOMMU mapping with VFIO etc., in order to get better
649 * performance. In the case of VFIO drivers, this callback is used to do
650 * DMA mapping for hot buffers.
652 void (*bdrv_register_buf)(BlockDriverState *bs, void *host, size_t size);
653 void (*bdrv_unregister_buf)(BlockDriverState *bs, void *host);
654 QLIST_ENTRY(BlockDriver) list;
656 /* Pointer to a NULL-terminated array of names of strong options
657 * that can be specified for bdrv_open(). A strong option is one
658 * that changes the data of a BDS.
659 * If this pointer is NULL, the array is considered empty.
660 * "filename" and "driver" are always considered strong. */
661 const char *const *strong_runtime_opts;
664 static inline bool block_driver_can_compress(BlockDriver *drv)
666 return drv->bdrv_co_pwritev_compressed ||
667 drv->bdrv_co_pwritev_compressed_part;
670 typedef struct BlockLimits {
671 /* Alignment requirement, in bytes, for offset/length of I/O
672 * requests. Must be a power of 2 less than INT_MAX; defaults to
673 * 1 for drivers with modern byte interfaces, and to 512
674 * otherwise. */
675 uint32_t request_alignment;
678 * Maximum number of bytes that can be discarded at once. Must be multiple
679 * of pdiscard_alignment, but need not be power of 2. May be 0 if no
680 * inherent 64-bit limit.
682 int64_t max_pdiscard;
684 /* Optimal alignment for discard requests in bytes. A power of 2
685 * is best but not mandatory. Must be a multiple of
686 * bl.request_alignment, and must be less than max_pdiscard if
687 * that is set. May be 0 if bl.request_alignment is good enough */
688 uint32_t pdiscard_alignment;
691 * Maximum number of bytes that can zeroized at once. Must be multiple of
692 * pwrite_zeroes_alignment. 0 means no limit.
694 int64_t max_pwrite_zeroes;
696 /* Optimal alignment for write zeroes requests in bytes. A power
697 * of 2 is best but not mandatory. Must be a multiple of
698 * bl.request_alignment, and must be less than max_pwrite_zeroes
699 * if that is set. May be 0 if bl.request_alignment is good
700 * enough */
701 uint32_t pwrite_zeroes_alignment;
703 /* Optimal transfer length in bytes. A power of 2 is best but not
704 * mandatory. Must be a multiple of bl.request_alignment, or 0 if
705 * no preferred size */
706 uint32_t opt_transfer;
708 /* Maximal transfer length in bytes. Need not be power of 2, but
709 * must be multiple of opt_transfer and bl.request_alignment, or 0
710 * for no 32-bit limit. For now, anything larger than INT_MAX is
711 * clamped down. */
712 uint32_t max_transfer;
714 /* Maximal hardware transfer length in bytes. Applies whenever
715 * transfers to the device bypass the kernel I/O scheduler, for
716 * example with SG_IO. If larger than max_transfer or if zero,
717 * blk_get_max_hw_transfer will fall back to max_transfer.
719 uint64_t max_hw_transfer;
721 /* Maximal number of scatter/gather elements allowed by the hardware.
722 * Applies whenever transfers to the device bypass the kernel I/O
723 * scheduler, for example with SG_IO. If larger than max_iov
724 * or if zero, blk_get_max_hw_iov will fall back to max_iov.
726 int max_hw_iov;
728 /* memory alignment, in bytes so that no bounce buffer is needed */
729 size_t min_mem_alignment;
731 /* memory alignment, in bytes, for bounce buffer */
732 size_t opt_mem_alignment;
734 /* maximum number of iovec elements */
735 int max_iov;
736 } BlockLimits;
738 typedef struct BdrvOpBlocker BdrvOpBlocker;
740 typedef struct BdrvAioNotifier {
741 void (*attached_aio_context)(AioContext *new_context, void *opaque);
742 void (*detach_aio_context)(void *opaque);
744 void *opaque;
745 bool deleted;
747 QLIST_ENTRY(BdrvAioNotifier) list;
748 } BdrvAioNotifier;
750 struct BdrvChildClass {
751 /* If true, bdrv_replace_node() doesn't change the node this BdrvChild
752 * points to. */
753 bool stay_at_node;
755 /* If true, the parent is a BlockDriverState and bdrv_next_all_states()
756 * will return it. This information is used for drain_all, where every node
757 * will be drained separately, so the drain only needs to be propagated to
758 * non-BDS parents. */
759 bool parent_is_bds;
761 void (*inherit_options)(BdrvChildRole role, bool parent_is_format,
762 int *child_flags, QDict *child_options,
763 int parent_flags, QDict *parent_options);
765 void (*change_media)(BdrvChild *child, bool load);
766 void (*resize)(BdrvChild *child);
768 /* Returns a name that is supposedly more useful for human users than the
769 * node name for identifying the node in question (in particular, a BB
770 * name), or NULL if the parent can't provide a better name. */
771 const char *(*get_name)(BdrvChild *child);
773 /* Returns a malloced string that describes the parent of the child for a
774 * human reader. This could be a node-name, BlockBackend name, qdev ID or
775 * QOM path of the device owning the BlockBackend, job type and ID etc. The
776 * caller is responsible for freeing the memory. */
777 char *(*get_parent_desc)(BdrvChild *child);
780 * If this pair of functions is implemented, the parent doesn't issue new
781 * requests after returning from .drained_begin() until .drained_end() is
782 * called.
784 * These functions must not change the graph (and therefore also must not
785 * call aio_poll(), which could change the graph indirectly).
787 * If drained_end() schedules background operations, it must atomically
788 * increment *drained_end_counter for each such operation and atomically
789 * decrement it once the operation has settled.
791 * Note that this can be nested. If drained_begin() was called twice, new
792 * I/O is allowed only after drained_end() was called twice, too.
794 void (*drained_begin)(BdrvChild *child);
795 void (*drained_end)(BdrvChild *child, int *drained_end_counter);
798 * Returns whether the parent has pending requests for the child. This
799 * callback is polled after .drained_begin() has been called until all
800 * activity on the child has stopped.
802 bool (*drained_poll)(BdrvChild *child);
804 /* Notifies the parent that the child has been activated/inactivated (e.g.
805 * when migration is completing) and it can start/stop requesting
806 * permissions and doing I/O on it. */
807 void (*activate)(BdrvChild *child, Error **errp);
808 int (*inactivate)(BdrvChild *child);
810 void (*attach)(BdrvChild *child);
811 void (*detach)(BdrvChild *child);
813 /* Notifies the parent that the filename of its child has changed (e.g.
814 * because the direct child was removed from the backing chain), so that it
815 * can update its reference. */
816 int (*update_filename)(BdrvChild *child, BlockDriverState *new_base,
817 const char *filename, Error **errp);
819 bool (*can_set_aio_ctx)(BdrvChild *child, AioContext *ctx,
820 GSList **ignore, Error **errp);
821 void (*set_aio_ctx)(BdrvChild *child, AioContext *ctx, GSList **ignore);
823 AioContext *(*get_parent_aio_context)(BdrvChild *child);
826 extern const BdrvChildClass child_of_bds;
828 struct BdrvChild {
829 BlockDriverState *bs;
830 char *name;
831 const BdrvChildClass *klass;
832 BdrvChildRole role;
833 void *opaque;
836 * Granted permissions for operating on this BdrvChild (BLK_PERM_* bitmask)
838 uint64_t perm;
841 * Permissions that can still be granted to other users of @bs while this
842 * BdrvChild is still attached to it. (BLK_PERM_* bitmask)
844 uint64_t shared_perm;
847 * This link is frozen: the child can neither be replaced nor
848 * detached from the parent.
850 bool frozen;
853 * How many times the parent of this child has been drained
854 * (through klass->drained_*).
855 * Usually, this is equal to bs->quiesce_counter (potentially
856 * reduced by bdrv_drain_all_count). It may differ while the
857 * child is entering or leaving a drained section.
859 int parent_quiesce_counter;
861 QLIST_ENTRY(BdrvChild) next;
862 QLIST_ENTRY(BdrvChild) next_parent;
866 * Allows bdrv_co_block_status() to cache one data region for a
867 * protocol node.
869 * @valid: Whether the cache is valid (should be accessed with atomic
870 * functions so this can be reset by RCU readers)
871 * @data_start: Offset where we know (or strongly assume) is data
872 * @data_end: Offset where the data region ends (which is not necessarily
873 * the start of a zeroed region)
875 typedef struct BdrvBlockStatusCache {
876 struct rcu_head rcu;
878 bool valid;
879 int64_t data_start;
880 int64_t data_end;
881 } BdrvBlockStatusCache;
883 struct BlockDriverState {
884 /* Protected by big QEMU lock or read-only after opening. No special
885 * locking needed during I/O...
887 int open_flags; /* flags used to open the file, re-used for re-open */
888 bool encrypted; /* if true, the media is encrypted */
889 bool sg; /* if true, the device is a /dev/sg* */
890 bool probed; /* if true, format was probed rather than specified */
891 bool force_share; /* if true, always allow all shared permissions */
892 bool implicit; /* if true, this filter node was automatically inserted */
894 BlockDriver *drv; /* NULL means no media */
895 void *opaque;
897 AioContext *aio_context; /* event loop used for fd handlers, timers, etc */
898 /* long-running tasks intended to always use the same AioContext as this
899 * BDS may register themselves in this list to be notified of changes
900 * regarding this BDS's context */
901 QLIST_HEAD(, BdrvAioNotifier) aio_notifiers;
902 bool walking_aio_notifiers; /* to make removal during iteration safe */
904 char filename[PATH_MAX];
906 * If not empty, this image is a diff in relation to backing_file.
907 * Note that this is the name given in the image header and
908 * therefore may or may not be equal to .backing->bs->filename.
909 * If this field contains a relative path, it is to be resolved
910 * relatively to the overlay's location.
912 char backing_file[PATH_MAX];
914 * The backing filename indicated by the image header. Contrary
915 * to backing_file, if we ever open this file, auto_backing_file
916 * is replaced by the resulting BDS's filename (i.e. after a
917 * bdrv_refresh_filename() run).
919 char auto_backing_file[PATH_MAX];
920 char backing_format[16]; /* if non-zero and backing_file exists */
922 QDict *full_open_options;
923 char exact_filename[PATH_MAX];
925 BdrvChild *backing;
926 BdrvChild *file;
928 /* I/O Limits */
929 BlockLimits bl;
932 * Flags honored during pread
934 unsigned int supported_read_flags;
935 /* Flags honored during pwrite (so far: BDRV_REQ_FUA,
936 * BDRV_REQ_WRITE_UNCHANGED).
937 * If a driver does not support BDRV_REQ_WRITE_UNCHANGED, those
938 * writes will be issued as normal writes without the flag set.
939 * This is important to note for drivers that do not explicitly
940 * request a WRITE permission for their children and instead take
941 * the same permissions as their parent did (this is commonly what
942 * block filters do). Such drivers have to be aware that the
943 * parent may have taken a WRITE_UNCHANGED permission only and is
944 * issuing such requests. Drivers either must make sure that
945 * these requests do not result in plain WRITE accesses (usually
946 * by supporting BDRV_REQ_WRITE_UNCHANGED, and then forwarding
947 * every incoming write request as-is, including potentially that
948 * flag), or they have to explicitly take the WRITE permission for
949 * their children. */
950 unsigned int supported_write_flags;
951 /* Flags honored during pwrite_zeroes (so far: BDRV_REQ_FUA,
952 * BDRV_REQ_MAY_UNMAP, BDRV_REQ_WRITE_UNCHANGED) */
953 unsigned int supported_zero_flags;
955 * Flags honoured during truncate (so far: BDRV_REQ_ZERO_WRITE).
957 * If BDRV_REQ_ZERO_WRITE is given, the truncate operation must make sure
958 * that any added space reads as all zeros. If this can't be guaranteed,
959 * the operation must fail.
961 unsigned int supported_truncate_flags;
963 /* the following member gives a name to every node on the bs graph. */
964 char node_name[32];
965 /* element of the list of named nodes building the graph */
966 QTAILQ_ENTRY(BlockDriverState) node_list;
967 /* element of the list of all BlockDriverStates (all_bdrv_states) */
968 QTAILQ_ENTRY(BlockDriverState) bs_list;
969 /* element of the list of monitor-owned BDS */
970 QTAILQ_ENTRY(BlockDriverState) monitor_list;
971 int refcnt;
973 /* operation blockers */
974 QLIST_HEAD(, BdrvOpBlocker) op_blockers[BLOCK_OP_TYPE_MAX];
976 /* The node that this node inherited default options from (and a reopen on
977 * which can affect this node by changing these defaults). This is always a
978 * parent node of this node. */
979 BlockDriverState *inherits_from;
980 QLIST_HEAD(, BdrvChild) children;
981 QLIST_HEAD(, BdrvChild) parents;
983 QDict *options;
984 QDict *explicit_options;
985 BlockdevDetectZeroesOptions detect_zeroes;
987 /* The error object in use for blocking operations on backing_hd */
988 Error *backing_blocker;
990 /* Protected by AioContext lock */
992 /* If we are reading a disk image, give its size in sectors.
993 * Generally read-only; it is written to by load_snapshot and
994 * save_snaphost, but the block layer is quiescent during those.
996 int64_t total_sectors;
998 /* threshold limit for writes, in bytes. "High water mark". */
999 uint64_t write_threshold_offset;
1001 /* Writing to the list requires the BQL _and_ the dirty_bitmap_mutex.
1002 * Reading from the list can be done with either the BQL or the
1003 * dirty_bitmap_mutex. Modifying a bitmap only requires
1004 * dirty_bitmap_mutex. */
1005 QemuMutex dirty_bitmap_mutex;
1006 QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps;
1008 /* Offset after the highest byte written to */
1009 Stat64 wr_highest_offset;
1011 /* If true, copy read backing sectors into image. Can be >1 if more
1012 * than one client has requested copy-on-read. Accessed with atomic
1013 * ops.
1015 int copy_on_read;
1017 /* number of in-flight requests; overall and serialising.
1018 * Accessed with atomic ops.
1020 unsigned int in_flight;
1021 unsigned int serialising_in_flight;
1023 /* counter for nested bdrv_io_plug.
1024 * Accessed with atomic ops.
1026 unsigned io_plugged;
1028 /* do we need to tell the quest if we have a volatile write cache? */
1029 int enable_write_cache;
1031 /* Accessed with atomic ops. */
1032 int quiesce_counter;
1033 int recursive_quiesce_counter;
1035 unsigned int write_gen; /* Current data generation */
1037 /* Protected by reqs_lock. */
1038 CoMutex reqs_lock;
1039 QLIST_HEAD(, BdrvTrackedRequest) tracked_requests;
1040 CoQueue flush_queue; /* Serializing flush queue */
1041 bool active_flush_req; /* Flush request in flight? */
1043 /* Only read/written by whoever has set active_flush_req to true. */
1044 unsigned int flushed_gen; /* Flushed write generation */
1046 /* BdrvChild links to this node may never be frozen */
1047 bool never_freeze;
1049 /* Lock for block-status cache RCU writers */
1050 CoMutex bsc_modify_lock;
1051 /* Always non-NULL, but must only be dereferenced under an RCU read guard */
1052 BdrvBlockStatusCache *block_status_cache;
1055 struct BlockBackendRootState {
1056 int open_flags;
1057 BlockdevDetectZeroesOptions detect_zeroes;
1060 typedef enum BlockMirrorBackingMode {
1061 /* Reuse the existing backing chain from the source for the target.
1062 * - sync=full: Set backing BDS to NULL.
1063 * - sync=top: Use source's backing BDS.
1064 * - sync=none: Use source as the backing BDS. */
1065 MIRROR_SOURCE_BACKING_CHAIN,
1067 /* Open the target's backing chain completely anew */
1068 MIRROR_OPEN_BACKING_CHAIN,
1070 /* Do not change the target's backing BDS after job completion */
1071 MIRROR_LEAVE_BACKING_CHAIN,
1072 } BlockMirrorBackingMode;
1075 /* Essential block drivers which must always be statically linked into qemu, and
1076 * which therefore can be accessed without using bdrv_find_format() */
1077 extern BlockDriver bdrv_file;
1078 extern BlockDriver bdrv_raw;
1079 extern BlockDriver bdrv_qcow2;
1081 int coroutine_fn bdrv_co_preadv(BdrvChild *child,
1082 int64_t offset, int64_t bytes, QEMUIOVector *qiov,
1083 BdrvRequestFlags flags);
1084 int coroutine_fn bdrv_co_preadv_part(BdrvChild *child,
1085 int64_t offset, int64_t bytes,
1086 QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags);
1087 int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
1088 int64_t offset, int64_t bytes, QEMUIOVector *qiov,
1089 BdrvRequestFlags flags);
1090 int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
1091 int64_t offset, int64_t bytes,
1092 QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags);
1094 static inline int coroutine_fn bdrv_co_pread(BdrvChild *child,
1095 int64_t offset, unsigned int bytes, void *buf, BdrvRequestFlags flags)
1097 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
1099 return bdrv_co_preadv(child, offset, bytes, &qiov, flags);
1102 static inline int coroutine_fn bdrv_co_pwrite(BdrvChild *child,
1103 int64_t offset, unsigned int bytes, void *buf, BdrvRequestFlags flags)
1105 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
1107 return bdrv_co_pwritev(child, offset, bytes, &qiov, flags);
1110 extern unsigned int bdrv_drain_all_count;
1111 void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent);
1112 void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent);
1114 bool coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
1115 uint64_t align);
1116 BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs);
1118 int get_tmp_filename(char *filename, int size);
1119 BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size,
1120 const char *filename);
1122 void bdrv_parse_filename_strip_prefix(const char *filename, const char *prefix,
1123 QDict *options);
1125 bool bdrv_backing_overridden(BlockDriverState *bs);
1129 * bdrv_add_aio_context_notifier:
1131 * If a long-running job intends to be always run in the same AioContext as a
1132 * certain BDS, it may use this function to be notified of changes regarding the
1133 * association of the BDS to an AioContext.
1135 * attached_aio_context() is called after the target BDS has been attached to a
1136 * new AioContext; detach_aio_context() is called before the target BDS is being
1137 * detached from its old AioContext.
1139 void bdrv_add_aio_context_notifier(BlockDriverState *bs,
1140 void (*attached_aio_context)(AioContext *new_context, void *opaque),
1141 void (*detach_aio_context)(void *opaque), void *opaque);
1144 * bdrv_remove_aio_context_notifier:
1146 * Unsubscribe of change notifications regarding the BDS's AioContext. The
1147 * parameters given here have to be the same as those given to
1148 * bdrv_add_aio_context_notifier().
1150 void bdrv_remove_aio_context_notifier(BlockDriverState *bs,
1151 void (*aio_context_attached)(AioContext *,
1152 void *),
1153 void (*aio_context_detached)(void *),
1154 void *opaque);
1157 * bdrv_wakeup:
1158 * @bs: The BlockDriverState for which an I/O operation has been completed.
1160 * Wake up the main thread if it is waiting on BDRV_POLL_WHILE. During
1161 * synchronous I/O on a BlockDriverState that is attached to another
1162 * I/O thread, the main thread lets the I/O thread's event loop run,
1163 * waiting for the I/O operation to complete. A bdrv_wakeup will wake
1164 * up the main thread if necessary.
1166 * Manual calls to bdrv_wakeup are rarely necessary, because
1167 * bdrv_dec_in_flight already calls it.
1169 void bdrv_wakeup(BlockDriverState *bs);
1171 #ifdef _WIN32
1172 int is_windows_drive(const char *filename);
1173 #endif
1176 * stream_start:
1177 * @job_id: The id of the newly-created job, or %NULL to use the
1178 * device name of @bs.
1179 * @bs: Block device to operate on.
1180 * @base: Block device that will become the new base, or %NULL to
1181 * flatten the whole backing file chain onto @bs.
1182 * @backing_file_str: The file name that will be written to @bs as the
1183 * the new backing file if the job completes. Ignored if @base is %NULL.
1184 * @creation_flags: Flags that control the behavior of the Job lifetime.
1185 * See @BlockJobCreateFlags
1186 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
1187 * @on_error: The action to take upon error.
1188 * @filter_node_name: The node name that should be assigned to the filter
1189 * driver that the stream job inserts into the graph above
1190 * @bs. NULL means that a node name should be autogenerated.
1191 * @errp: Error object.
1193 * Start a streaming operation on @bs. Clusters that are unallocated
1194 * in @bs, but allocated in any image between @base and @bs (both
1195 * exclusive) will be written to @bs. At the end of a successful
1196 * streaming job, the backing file of @bs will be changed to
1197 * @backing_file_str in the written image and to @base in the live
1198 * BlockDriverState.
1200 void stream_start(const char *job_id, BlockDriverState *bs,
1201 BlockDriverState *base, const char *backing_file_str,
1202 BlockDriverState *bottom,
1203 int creation_flags, int64_t speed,
1204 BlockdevOnError on_error,
1205 const char *filter_node_name,
1206 Error **errp);
1209 * commit_start:
1210 * @job_id: The id of the newly-created job, or %NULL to use the
1211 * device name of @bs.
1212 * @bs: Active block device.
1213 * @top: Top block device to be committed.
1214 * @base: Block device that will be written into, and become the new top.
1215 * @creation_flags: Flags that control the behavior of the Job lifetime.
1216 * See @BlockJobCreateFlags
1217 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
1218 * @on_error: The action to take upon error.
1219 * @backing_file_str: String to use as the backing file in @top's overlay
1220 * @filter_node_name: The node name that should be assigned to the filter
1221 * driver that the commit job inserts into the graph above @top. NULL means
1222 * that a node name should be autogenerated.
1223 * @errp: Error object.
1226 void commit_start(const char *job_id, BlockDriverState *bs,
1227 BlockDriverState *base, BlockDriverState *top,
1228 int creation_flags, int64_t speed,
1229 BlockdevOnError on_error, const char *backing_file_str,
1230 const char *filter_node_name, Error **errp);
1232 * commit_active_start:
1233 * @job_id: The id of the newly-created job, or %NULL to use the
1234 * device name of @bs.
1235 * @bs: Active block device to be committed.
1236 * @base: Block device that will be written into, and become the new top.
1237 * @creation_flags: Flags that control the behavior of the Job lifetime.
1238 * See @BlockJobCreateFlags
1239 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
1240 * @on_error: The action to take upon error.
1241 * @filter_node_name: The node name that should be assigned to the filter
1242 * driver that the commit job inserts into the graph above @bs. NULL means that
1243 * a node name should be autogenerated.
1244 * @cb: Completion function for the job.
1245 * @opaque: Opaque pointer value passed to @cb.
1246 * @auto_complete: Auto complete the job.
1247 * @errp: Error object.
1250 BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs,
1251 BlockDriverState *base, int creation_flags,
1252 int64_t speed, BlockdevOnError on_error,
1253 const char *filter_node_name,
1254 BlockCompletionFunc *cb, void *opaque,
1255 bool auto_complete, Error **errp);
1257 * mirror_start:
1258 * @job_id: The id of the newly-created job, or %NULL to use the
1259 * device name of @bs.
1260 * @bs: Block device to operate on.
1261 * @target: Block device to write to.
1262 * @replaces: Block graph node name to replace once the mirror is done. Can
1263 * only be used when full mirroring is selected.
1264 * @creation_flags: Flags that control the behavior of the Job lifetime.
1265 * See @BlockJobCreateFlags
1266 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
1267 * @granularity: The chosen granularity for the dirty bitmap.
1268 * @buf_size: The amount of data that can be in flight at one time.
1269 * @mode: Whether to collapse all images in the chain to the target.
1270 * @backing_mode: How to establish the target's backing chain after completion.
1271 * @zero_target: Whether the target should be explicitly zero-initialized
1272 * @on_source_error: The action to take upon error reading from the source.
1273 * @on_target_error: The action to take upon error writing to the target.
1274 * @unmap: Whether to unmap target where source sectors only contain zeroes.
1275 * @filter_node_name: The node name that should be assigned to the filter
1276 * driver that the mirror job inserts into the graph above @bs. NULL means that
1277 * a node name should be autogenerated.
1278 * @copy_mode: When to trigger writes to the target.
1279 * @errp: Error object.
1281 * Start a mirroring operation on @bs. Clusters that are allocated
1282 * in @bs will be written to @target until the job is cancelled or
1283 * manually completed. At the end of a successful mirroring job,
1284 * @bs will be switched to read from @target.
1286 void mirror_start(const char *job_id, BlockDriverState *bs,
1287 BlockDriverState *target, const char *replaces,
1288 int creation_flags, int64_t speed,
1289 uint32_t granularity, int64_t buf_size,
1290 MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
1291 bool zero_target,
1292 BlockdevOnError on_source_error,
1293 BlockdevOnError on_target_error,
1294 bool unmap, const char *filter_node_name,
1295 MirrorCopyMode copy_mode, Error **errp);
1298 * backup_job_create:
1299 * @job_id: The id of the newly-created job, or %NULL to use the
1300 * device name of @bs.
1301 * @bs: Block device to operate on.
1302 * @target: Block device to write to.
1303 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
1304 * @sync_mode: What parts of the disk image should be copied to the destination.
1305 * @sync_bitmap: The dirty bitmap if sync_mode is 'bitmap' or 'incremental'
1306 * @bitmap_mode: The bitmap synchronization policy to use.
1307 * @perf: Performance options. All actual fields assumed to be present,
1308 * all ".has_*" fields are ignored.
1309 * @on_source_error: The action to take upon error reading from the source.
1310 * @on_target_error: The action to take upon error writing to the target.
1311 * @creation_flags: Flags that control the behavior of the Job lifetime.
1312 * See @BlockJobCreateFlags
1313 * @cb: Completion function for the job.
1314 * @opaque: Opaque pointer value passed to @cb.
1315 * @txn: Transaction that this job is part of (may be NULL).
1317 * Create a backup operation on @bs. Clusters in @bs are written to @target
1318 * until the job is cancelled or manually completed.
1320 BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
1321 BlockDriverState *target, int64_t speed,
1322 MirrorSyncMode sync_mode,
1323 BdrvDirtyBitmap *sync_bitmap,
1324 BitmapSyncMode bitmap_mode,
1325 bool compress,
1326 const char *filter_node_name,
1327 BackupPerf *perf,
1328 BlockdevOnError on_source_error,
1329 BlockdevOnError on_target_error,
1330 int creation_flags,
1331 BlockCompletionFunc *cb, void *opaque,
1332 JobTxn *txn, Error **errp);
1334 BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs,
1335 const char *child_name,
1336 const BdrvChildClass *child_class,
1337 BdrvChildRole child_role,
1338 uint64_t perm, uint64_t shared_perm,
1339 void *opaque, Error **errp);
1340 void bdrv_root_unref_child(BdrvChild *child);
1342 void bdrv_get_cumulative_perm(BlockDriverState *bs, uint64_t *perm,
1343 uint64_t *shared_perm);
1346 * Sets a BdrvChild's permissions. Avoid if the parent is a BDS; use
1347 * bdrv_child_refresh_perms() instead and make the parent's
1348 * .bdrv_child_perm() implementation return the correct values.
1350 int bdrv_child_try_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared,
1351 Error **errp);
1354 * Calls bs->drv->bdrv_child_perm() and updates the child's permission
1355 * masks with the result.
1356 * Drivers should invoke this function whenever an event occurs that
1357 * makes their .bdrv_child_perm() implementation return different
1358 * values than before, but which will not result in the block layer
1359 * automatically refreshing the permissions.
1361 int bdrv_child_refresh_perms(BlockDriverState *bs, BdrvChild *c, Error **errp);
1363 bool bdrv_recurse_can_replace(BlockDriverState *bs,
1364 BlockDriverState *to_replace);
1367 * Default implementation for BlockDriver.bdrv_child_perm() that can
1368 * be used by block filters and image formats, as long as they use the
1369 * child_of_bds child class and set an appropriate BdrvChildRole.
1371 void bdrv_default_perms(BlockDriverState *bs, BdrvChild *c,
1372 BdrvChildRole role, BlockReopenQueue *reopen_queue,
1373 uint64_t perm, uint64_t shared,
1374 uint64_t *nperm, uint64_t *nshared);
1376 const char *bdrv_get_parent_name(const BlockDriverState *bs);
1377 void blk_dev_change_media_cb(BlockBackend *blk, bool load, Error **errp);
1378 bool blk_dev_has_removable_media(BlockBackend *blk);
1379 bool blk_dev_has_tray(BlockBackend *blk);
1380 void blk_dev_eject_request(BlockBackend *blk, bool force);
1381 bool blk_dev_is_tray_open(BlockBackend *blk);
1382 bool blk_dev_is_medium_locked(BlockBackend *blk);
1384 void bdrv_set_dirty(BlockDriverState *bs, int64_t offset, int64_t bytes);
1386 void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap **out);
1387 void bdrv_restore_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap *backup);
1388 bool bdrv_dirty_bitmap_merge_internal(BdrvDirtyBitmap *dest,
1389 const BdrvDirtyBitmap *src,
1390 HBitmap **backup, bool lock);
1392 void bdrv_inc_in_flight(BlockDriverState *bs);
1393 void bdrv_dec_in_flight(BlockDriverState *bs);
1395 void blockdev_close_all_bdrv_states(void);
1397 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset,
1398 BdrvChild *dst, int64_t dst_offset,
1399 int64_t bytes,
1400 BdrvRequestFlags read_flags,
1401 BdrvRequestFlags write_flags);
1402 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset,
1403 BdrvChild *dst, int64_t dst_offset,
1404 int64_t bytes,
1405 BdrvRequestFlags read_flags,
1406 BdrvRequestFlags write_flags);
1408 int refresh_total_sectors(BlockDriverState *bs, int64_t hint);
1410 void bdrv_set_monitor_owned(BlockDriverState *bs);
1411 BlockDriverState *bds_tree_init(QDict *bs_opts, Error **errp);
1414 * Simple implementation of bdrv_co_create_opts for protocol drivers
1415 * which only support creation via opening a file
1416 * (usually existing raw storage device)
1418 int coroutine_fn bdrv_co_create_opts_simple(BlockDriver *drv,
1419 const char *filename,
1420 QemuOpts *opts,
1421 Error **errp);
1422 extern QemuOptsList bdrv_create_opts_simple;
1424 BdrvDirtyBitmap *block_dirty_bitmap_lookup(const char *node,
1425 const char *name,
1426 BlockDriverState **pbs,
1427 Error **errp);
1428 BdrvDirtyBitmap *block_dirty_bitmap_merge(const char *node, const char *target,
1429 BlockDirtyBitmapMergeSourceList *bms,
1430 HBitmap **backup, Error **errp);
1431 BdrvDirtyBitmap *block_dirty_bitmap_remove(const char *node, const char *name,
1432 bool release,
1433 BlockDriverState **bitmap_bs,
1434 Error **errp);
1436 BdrvChild *bdrv_cow_child(BlockDriverState *bs);
1437 BdrvChild *bdrv_filter_child(BlockDriverState *bs);
1438 BdrvChild *bdrv_filter_or_cow_child(BlockDriverState *bs);
1439 BdrvChild *bdrv_primary_child(BlockDriverState *bs);
1440 BlockDriverState *bdrv_skip_implicit_filters(BlockDriverState *bs);
1441 BlockDriverState *bdrv_skip_filters(BlockDriverState *bs);
1442 BlockDriverState *bdrv_backing_chain_next(BlockDriverState *bs);
1444 static inline BlockDriverState *child_bs(BdrvChild *child)
1446 return child ? child->bs : NULL;
1449 static inline BlockDriverState *bdrv_cow_bs(BlockDriverState *bs)
1451 return child_bs(bdrv_cow_child(bs));
1454 static inline BlockDriverState *bdrv_filter_bs(BlockDriverState *bs)
1456 return child_bs(bdrv_filter_child(bs));
1459 static inline BlockDriverState *bdrv_filter_or_cow_bs(BlockDriverState *bs)
1461 return child_bs(bdrv_filter_or_cow_child(bs));
1464 static inline BlockDriverState *bdrv_primary_bs(BlockDriverState *bs)
1466 return child_bs(bdrv_primary_child(bs));
1470 * End all quiescent sections started by bdrv_drain_all_begin(). This is
1471 * needed when deleting a BDS before bdrv_drain_all_end() is called.
1473 * NOTE: this is an internal helper for bdrv_close() *only*. No one else
1474 * should call it.
1476 void bdrv_drain_all_end_quiesce(BlockDriverState *bs);
1479 * Check whether the given offset is in the cached block-status data
1480 * region.
1482 * If it is, and @pnum is not NULL, *pnum is set to
1483 * `bsc.data_end - offset`, i.e. how many bytes, starting from
1484 * @offset, are data (according to the cache).
1485 * Otherwise, *pnum is not touched.
1487 bool bdrv_bsc_is_data(BlockDriverState *bs, int64_t offset, int64_t *pnum);
1490 * If [offset, offset + bytes) overlaps with the currently cached
1491 * block-status region, invalidate the cache.
1493 * (To be used by I/O paths that cause data regions to be zero or
1494 * holes.)
1496 void bdrv_bsc_invalidate_range(BlockDriverState *bs,
1497 int64_t offset, int64_t bytes);
1500 * Mark the range [offset, offset + bytes) as a data region.
1502 void bdrv_bsc_fill(BlockDriverState *bs, int64_t offset, int64_t bytes);
1504 #endif /* BLOCK_INT_H */