4 * Copyright (C) 2014-2016 Red Hat, Inc.
7 * Markus Armbruster <armbru@redhat.com>,
9 * This work is licensed under the terms of the GNU LGPL, version 2.1
10 * or later. See the COPYING.LIB file in the top-level directory.
13 #ifndef BLOCK_BACKEND_IO_H
14 #define BLOCK_BACKEND_IO_H
16 #include "block-backend-common.h"
17 #include "block/accounting.h"
20 * I/O API functions. These functions are thread-safe.
22 * See include/block/block-io.h for more information about
26 const char *blk_name(const BlockBackend
*blk
);
28 BlockDriverState
*blk_bs(BlockBackend
*blk
);
30 void blk_set_allow_write_beyond_eof(BlockBackend
*blk
, bool allow
);
31 void blk_set_allow_aio_context_change(BlockBackend
*blk
, bool allow
);
32 void blk_set_disable_request_queuing(BlockBackend
*blk
, bool disable
);
33 bool blk_iostatus_is_enabled(const BlockBackend
*blk
);
35 char *blk_get_attached_dev_id(BlockBackend
*blk
);
37 BlockAIOCB
*blk_aio_pwrite_zeroes(BlockBackend
*blk
, int64_t offset
,
38 int64_t bytes
, BdrvRequestFlags flags
,
39 BlockCompletionFunc
*cb
, void *opaque
);
41 BlockAIOCB
*blk_aio_preadv(BlockBackend
*blk
, int64_t offset
,
42 QEMUIOVector
*qiov
, BdrvRequestFlags flags
,
43 BlockCompletionFunc
*cb
, void *opaque
);
44 BlockAIOCB
*blk_aio_pwritev(BlockBackend
*blk
, int64_t offset
,
45 QEMUIOVector
*qiov
, BdrvRequestFlags flags
,
46 BlockCompletionFunc
*cb
, void *opaque
);
47 BlockAIOCB
*blk_aio_flush(BlockBackend
*blk
,
48 BlockCompletionFunc
*cb
, void *opaque
);
49 BlockAIOCB
*blk_aio_zone_report(BlockBackend
*blk
, int64_t offset
,
50 unsigned int *nr_zones
,
51 BlockZoneDescriptor
*zones
,
52 BlockCompletionFunc
*cb
, void *opaque
);
53 BlockAIOCB
*blk_aio_zone_mgmt(BlockBackend
*blk
, BlockZoneOp op
,
54 int64_t offset
, int64_t len
,
55 BlockCompletionFunc
*cb
, void *opaque
);
56 BlockAIOCB
*blk_aio_zone_append(BlockBackend
*blk
, int64_t *offset
,
57 QEMUIOVector
*qiov
, BdrvRequestFlags flags
,
58 BlockCompletionFunc
*cb
, void *opaque
);
59 BlockAIOCB
*blk_aio_pdiscard(BlockBackend
*blk
, int64_t offset
, int64_t bytes
,
60 BlockCompletionFunc
*cb
, void *opaque
);
61 void blk_aio_cancel_async(BlockAIOCB
*acb
);
62 BlockAIOCB
*blk_aio_ioctl(BlockBackend
*blk
, unsigned long int req
, void *buf
,
63 BlockCompletionFunc
*cb
, void *opaque
);
65 void blk_inc_in_flight(BlockBackend
*blk
);
66 void blk_dec_in_flight(BlockBackend
*blk
);
68 bool coroutine_fn GRAPH_RDLOCK
blk_co_is_inserted(BlockBackend
*blk
);
69 bool co_wrapper_mixed_bdrv_rdlock
blk_is_inserted(BlockBackend
*blk
);
71 bool coroutine_fn GRAPH_RDLOCK
blk_co_is_available(BlockBackend
*blk
);
72 bool co_wrapper_mixed_bdrv_rdlock
blk_is_available(BlockBackend
*blk
);
74 void coroutine_fn
blk_co_lock_medium(BlockBackend
*blk
, bool locked
);
75 void co_wrapper
blk_lock_medium(BlockBackend
*blk
, bool locked
);
77 void coroutine_fn
blk_co_eject(BlockBackend
*blk
, bool eject_flag
);
78 void co_wrapper
blk_eject(BlockBackend
*blk
, bool eject_flag
);
80 int64_t coroutine_fn
blk_co_getlength(BlockBackend
*blk
);
81 int64_t co_wrapper_mixed
blk_getlength(BlockBackend
*blk
);
83 void coroutine_fn
blk_co_get_geometry(BlockBackend
*blk
,
84 uint64_t *nb_sectors_ptr
);
85 void blk_get_geometry(BlockBackend
*blk
, uint64_t *nb_sectors_ptr
);
87 int64_t coroutine_fn
blk_co_nb_sectors(BlockBackend
*blk
);
88 int64_t blk_nb_sectors(BlockBackend
*blk
);
90 void *blk_try_blockalign(BlockBackend
*blk
, size_t size
);
91 void *blk_blockalign(BlockBackend
*blk
, size_t size
);
92 bool blk_is_writable(BlockBackend
*blk
);
93 bool blk_enable_write_cache(BlockBackend
*blk
);
94 BlockdevOnError
blk_get_on_error(BlockBackend
*blk
, bool is_read
);
95 BlockErrorAction
blk_get_error_action(BlockBackend
*blk
, bool is_read
,
97 void blk_error_action(BlockBackend
*blk
, BlockErrorAction action
,
98 bool is_read
, int error
);
99 void blk_iostatus_set_err(BlockBackend
*blk
, int error
);
100 int blk_get_max_iov(BlockBackend
*blk
);
101 int blk_get_max_hw_iov(BlockBackend
*blk
);
103 AioContext
*blk_get_aio_context(BlockBackend
*blk
);
104 BlockAcctStats
*blk_get_stats(BlockBackend
*blk
);
105 void *blk_aio_get(const AIOCBInfo
*aiocb_info
, BlockBackend
*blk
,
106 BlockCompletionFunc
*cb
, void *opaque
);
107 BlockAIOCB
*blk_abort_aio_request(BlockBackend
*blk
,
108 BlockCompletionFunc
*cb
,
109 void *opaque
, int ret
);
111 uint32_t blk_get_request_alignment(BlockBackend
*blk
);
112 uint32_t blk_get_max_transfer(BlockBackend
*blk
);
113 uint64_t blk_get_max_hw_transfer(BlockBackend
*blk
);
115 int coroutine_fn
blk_co_copy_range(BlockBackend
*blk_in
, int64_t off_in
,
116 BlockBackend
*blk_out
, int64_t off_out
,
117 int64_t bytes
, BdrvRequestFlags read_flags
,
118 BdrvRequestFlags write_flags
);
120 int coroutine_fn
blk_co_block_status_above(BlockBackend
*blk
,
121 BlockDriverState
*base
,
122 int64_t offset
, int64_t bytes
,
123 int64_t *pnum
, int64_t *map
,
124 BlockDriverState
**file
);
125 int coroutine_fn
blk_co_is_allocated_above(BlockBackend
*blk
,
126 BlockDriverState
*base
,
127 bool include_base
, int64_t offset
,
128 int64_t bytes
, int64_t *pnum
);
131 * "I/O or GS" API functions. These functions can run without
132 * the BQL, but only in one specific iothread/main loop.
134 * See include/block/block-io.h for more information about
135 * the "I/O or GS" API.
138 int co_wrapper_mixed
blk_pread(BlockBackend
*blk
, int64_t offset
,
139 int64_t bytes
, void *buf
,
140 BdrvRequestFlags flags
);
141 int coroutine_fn
blk_co_pread(BlockBackend
*blk
, int64_t offset
, int64_t bytes
,
142 void *buf
, BdrvRequestFlags flags
);
144 int co_wrapper_mixed
blk_preadv(BlockBackend
*blk
, int64_t offset
,
145 int64_t bytes
, QEMUIOVector
*qiov
,
146 BdrvRequestFlags flags
);
147 int coroutine_fn
blk_co_preadv(BlockBackend
*blk
, int64_t offset
,
148 int64_t bytes
, QEMUIOVector
*qiov
,
149 BdrvRequestFlags flags
);
151 int co_wrapper_mixed
blk_preadv_part(BlockBackend
*blk
, int64_t offset
,
152 int64_t bytes
, QEMUIOVector
*qiov
,
154 BdrvRequestFlags flags
);
155 int coroutine_fn
blk_co_preadv_part(BlockBackend
*blk
, int64_t offset
,
156 int64_t bytes
, QEMUIOVector
*qiov
,
157 size_t qiov_offset
, BdrvRequestFlags flags
);
159 int co_wrapper_mixed
blk_pwrite(BlockBackend
*blk
, int64_t offset
,
160 int64_t bytes
, const void *buf
,
161 BdrvRequestFlags flags
);
162 int coroutine_fn
blk_co_pwrite(BlockBackend
*blk
, int64_t offset
, int64_t bytes
,
163 const void *buf
, BdrvRequestFlags flags
);
165 int co_wrapper_mixed
blk_pwritev(BlockBackend
*blk
, int64_t offset
,
166 int64_t bytes
, QEMUIOVector
*qiov
,
167 BdrvRequestFlags flags
);
168 int coroutine_fn
blk_co_pwritev(BlockBackend
*blk
, int64_t offset
,
169 int64_t bytes
, QEMUIOVector
*qiov
,
170 BdrvRequestFlags flags
);
172 int co_wrapper_mixed
blk_pwritev_part(BlockBackend
*blk
, int64_t offset
,
173 int64_t bytes
, QEMUIOVector
*qiov
,
175 BdrvRequestFlags flags
);
176 int coroutine_fn
blk_co_pwritev_part(BlockBackend
*blk
, int64_t offset
,
178 QEMUIOVector
*qiov
, size_t qiov_offset
,
179 BdrvRequestFlags flags
);
181 int co_wrapper_mixed
blk_pwrite_compressed(BlockBackend
*blk
,
182 int64_t offset
, int64_t bytes
,
184 int coroutine_fn
blk_co_pwrite_compressed(BlockBackend
*blk
, int64_t offset
,
185 int64_t bytes
, const void *buf
);
187 int co_wrapper_mixed
blk_pwrite_zeroes(BlockBackend
*blk
, int64_t offset
,
189 BdrvRequestFlags flags
);
190 int coroutine_fn
blk_co_pwrite_zeroes(BlockBackend
*blk
, int64_t offset
,
191 int64_t bytes
, BdrvRequestFlags flags
);
193 int coroutine_fn
blk_co_zone_report(BlockBackend
*blk
, int64_t offset
,
194 unsigned int *nr_zones
,
195 BlockZoneDescriptor
*zones
);
196 int co_wrapper_mixed
blk_zone_report(BlockBackend
*blk
, int64_t offset
,
197 unsigned int *nr_zones
,
198 BlockZoneDescriptor
*zones
);
199 int coroutine_fn
blk_co_zone_mgmt(BlockBackend
*blk
, BlockZoneOp op
,
200 int64_t offset
, int64_t len
);
201 int co_wrapper_mixed
blk_zone_mgmt(BlockBackend
*blk
, BlockZoneOp op
,
202 int64_t offset
, int64_t len
);
203 int coroutine_fn
blk_co_zone_append(BlockBackend
*blk
, int64_t *offset
,
205 BdrvRequestFlags flags
);
206 int co_wrapper_mixed
blk_zone_append(BlockBackend
*blk
, int64_t *offset
,
208 BdrvRequestFlags flags
);
210 int co_wrapper_mixed
blk_pdiscard(BlockBackend
*blk
, int64_t offset
,
212 int coroutine_fn
blk_co_pdiscard(BlockBackend
*blk
, int64_t offset
,
215 int co_wrapper_mixed
blk_flush(BlockBackend
*blk
);
216 int coroutine_fn
blk_co_flush(BlockBackend
*blk
);
218 int co_wrapper_mixed
blk_ioctl(BlockBackend
*blk
, unsigned long int req
,
220 int coroutine_fn
blk_co_ioctl(BlockBackend
*blk
, unsigned long int req
,
223 int co_wrapper_mixed
blk_truncate(BlockBackend
*blk
, int64_t offset
,
224 bool exact
, PreallocMode prealloc
,
225 BdrvRequestFlags flags
, Error
**errp
);
226 int coroutine_fn
blk_co_truncate(BlockBackend
*blk
, int64_t offset
, bool exact
,
227 PreallocMode prealloc
, BdrvRequestFlags flags
,
230 #endif /* BLOCK_BACKEND_IO_H */