2 * QEMU Enhanced Disk Format Table I/O
4 * Copyright IBM, Corp. 2010
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
16 #include "qemu_socket.h" /* for EINPROGRESS on Windows */
28 static void qed_read_table_cb(void *opaque
, int ret
)
30 QEDReadTableCB
*read_table_cb
= opaque
;
31 QEDTable
*table
= read_table_cb
->table
;
32 int noffsets
= read_table_cb
->iov
.iov_len
/ sizeof(uint64_t);
35 /* Handle I/O error */
40 /* Byteswap offsets */
41 for (i
= 0; i
< noffsets
; i
++) {
42 table
->offsets
[i
] = le64_to_cpu(table
->offsets
[i
]);
47 trace_qed_read_table_cb(read_table_cb
->s
, read_table_cb
->table
, ret
);
48 gencb_complete(&read_table_cb
->gencb
, ret
);
51 static void qed_read_table(BDRVQEDState
*s
, uint64_t offset
, QEDTable
*table
,
52 BlockDriverCompletionFunc
*cb
, void *opaque
)
54 QEDReadTableCB
*read_table_cb
= gencb_alloc(sizeof(*read_table_cb
),
56 QEMUIOVector
*qiov
= &read_table_cb
->qiov
;
57 BlockDriverAIOCB
*aiocb
;
59 trace_qed_read_table(s
, offset
, table
);
62 read_table_cb
->table
= table
;
63 read_table_cb
->iov
.iov_base
= table
->offsets
,
64 read_table_cb
->iov
.iov_len
= s
->header
.cluster_size
* s
->header
.table_size
,
66 qemu_iovec_init_external(qiov
, &read_table_cb
->iov
, 1);
67 aiocb
= bdrv_aio_readv(s
->bs
->file
, offset
/ BDRV_SECTOR_SIZE
, qiov
,
68 read_table_cb
->iov
.iov_len
/ BDRV_SECTOR_SIZE
,
69 qed_read_table_cb
, read_table_cb
);
71 qed_read_table_cb(read_table_cb
, -EIO
);
80 bool flush
; /* flush after write? */
86 static void qed_write_table_cb(void *opaque
, int ret
)
88 QEDWriteTableCB
*write_table_cb
= opaque
;
90 trace_qed_write_table_cb(write_table_cb
->s
,
91 write_table_cb
->orig_table
,
92 write_table_cb
->flush
,
99 if (write_table_cb
->flush
) {
100 /* We still need to flush first */
101 write_table_cb
->flush
= false;
102 bdrv_aio_flush(write_table_cb
->s
->bs
, qed_write_table_cb
,
108 qemu_vfree(write_table_cb
->table
);
109 gencb_complete(&write_table_cb
->gencb
, ret
);
114 * Write out an updated part or all of a table
117 * @offset: Offset of table in image file, in bytes
119 * @index: Index of first element
120 * @n: Number of elements
121 * @flush: Whether or not to sync to disk
122 * @cb: Completion function
123 * @opaque: Argument for completion function
125 static void qed_write_table(BDRVQEDState
*s
, uint64_t offset
, QEDTable
*table
,
126 unsigned int index
, unsigned int n
, bool flush
,
127 BlockDriverCompletionFunc
*cb
, void *opaque
)
129 QEDWriteTableCB
*write_table_cb
;
130 BlockDriverAIOCB
*aiocb
;
131 unsigned int sector_mask
= BDRV_SECTOR_SIZE
/ sizeof(uint64_t) - 1;
132 unsigned int start
, end
, i
;
135 trace_qed_write_table(s
, offset
, table
, index
, n
);
137 /* Calculate indices of the first and one after last elements */
138 start
= index
& ~sector_mask
;
139 end
= (index
+ n
+ sector_mask
) & ~sector_mask
;
141 len_bytes
= (end
- start
) * sizeof(uint64_t);
143 write_table_cb
= gencb_alloc(sizeof(*write_table_cb
), cb
, opaque
);
144 write_table_cb
->s
= s
;
145 write_table_cb
->orig_table
= table
;
146 write_table_cb
->flush
= flush
;
147 write_table_cb
->table
= qemu_blockalign(s
->bs
, len_bytes
);
148 write_table_cb
->iov
.iov_base
= write_table_cb
->table
->offsets
;
149 write_table_cb
->iov
.iov_len
= len_bytes
;
150 qemu_iovec_init_external(&write_table_cb
->qiov
, &write_table_cb
->iov
, 1);
153 for (i
= start
; i
< end
; i
++) {
154 uint64_t le_offset
= cpu_to_le64(table
->offsets
[i
]);
155 write_table_cb
->table
->offsets
[i
- start
] = le_offset
;
158 /* Adjust for offset into table */
159 offset
+= start
* sizeof(uint64_t);
161 aiocb
= bdrv_aio_writev(s
->bs
->file
, offset
/ BDRV_SECTOR_SIZE
,
162 &write_table_cb
->qiov
,
163 write_table_cb
->iov
.iov_len
/ BDRV_SECTOR_SIZE
,
164 qed_write_table_cb
, write_table_cb
);
166 qed_write_table_cb(write_table_cb
, -EIO
);
171 * Propagate return value from async callback
173 static void qed_sync_cb(void *opaque
, int ret
)
175 *(int *)opaque
= ret
;
178 int qed_read_l1_table_sync(BDRVQEDState
*s
)
180 int ret
= -EINPROGRESS
;
182 async_context_push();
184 qed_read_table(s
, s
->header
.l1_table_offset
,
185 s
->l1_table
, qed_sync_cb
, &ret
);
186 while (ret
== -EINPROGRESS
) {
195 void qed_write_l1_table(BDRVQEDState
*s
, unsigned int index
, unsigned int n
,
196 BlockDriverCompletionFunc
*cb
, void *opaque
)
198 BLKDBG_EVENT(s
->bs
->file
, BLKDBG_L1_UPDATE
);
199 qed_write_table(s
, s
->header
.l1_table_offset
,
200 s
->l1_table
, index
, n
, false, cb
, opaque
);
203 int qed_write_l1_table_sync(BDRVQEDState
*s
, unsigned int index
,
206 int ret
= -EINPROGRESS
;
208 async_context_push();
210 qed_write_l1_table(s
, index
, n
, qed_sync_cb
, &ret
);
211 while (ret
== -EINPROGRESS
) {
227 static void qed_read_l2_table_cb(void *opaque
, int ret
)
229 QEDReadL2TableCB
*read_l2_table_cb
= opaque
;
230 QEDRequest
*request
= read_l2_table_cb
->request
;
231 BDRVQEDState
*s
= read_l2_table_cb
->s
;
232 CachedL2Table
*l2_table
= request
->l2_table
;
235 /* can't trust loaded L2 table anymore */
236 qed_unref_l2_cache_entry(l2_table
);
237 request
->l2_table
= NULL
;
239 l2_table
->offset
= read_l2_table_cb
->l2_offset
;
241 qed_commit_l2_cache_entry(&s
->l2_cache
, l2_table
);
243 /* This is guaranteed to succeed because we just committed the entry
246 request
->l2_table
= qed_find_l2_cache_entry(&s
->l2_cache
,
248 assert(request
->l2_table
!= NULL
);
251 gencb_complete(&read_l2_table_cb
->gencb
, ret
);
254 void qed_read_l2_table(BDRVQEDState
*s
, QEDRequest
*request
, uint64_t offset
,
255 BlockDriverCompletionFunc
*cb
, void *opaque
)
257 QEDReadL2TableCB
*read_l2_table_cb
;
259 qed_unref_l2_cache_entry(request
->l2_table
);
261 /* Check for cached L2 entry */
262 request
->l2_table
= qed_find_l2_cache_entry(&s
->l2_cache
, offset
);
263 if (request
->l2_table
) {
268 request
->l2_table
= qed_alloc_l2_cache_entry(&s
->l2_cache
);
269 request
->l2_table
->table
= qed_alloc_table(s
);
271 read_l2_table_cb
= gencb_alloc(sizeof(*read_l2_table_cb
), cb
, opaque
);
272 read_l2_table_cb
->s
= s
;
273 read_l2_table_cb
->l2_offset
= offset
;
274 read_l2_table_cb
->request
= request
;
276 BLKDBG_EVENT(s
->bs
->file
, BLKDBG_L2_LOAD
);
277 qed_read_table(s
, offset
, request
->l2_table
->table
,
278 qed_read_l2_table_cb
, read_l2_table_cb
);
281 int qed_read_l2_table_sync(BDRVQEDState
*s
, QEDRequest
*request
, uint64_t offset
)
283 int ret
= -EINPROGRESS
;
285 async_context_push();
287 qed_read_l2_table(s
, request
, offset
, qed_sync_cb
, &ret
);
288 while (ret
== -EINPROGRESS
) {
296 void qed_write_l2_table(BDRVQEDState
*s
, QEDRequest
*request
,
297 unsigned int index
, unsigned int n
, bool flush
,
298 BlockDriverCompletionFunc
*cb
, void *opaque
)
300 BLKDBG_EVENT(s
->bs
->file
, BLKDBG_L2_UPDATE
);
301 qed_write_table(s
, request
->l2_table
->offset
,
302 request
->l2_table
->table
, index
, n
, flush
, cb
, opaque
);
305 int qed_write_l2_table_sync(BDRVQEDState
*s
, QEDRequest
*request
,
306 unsigned int index
, unsigned int n
, bool flush
)
308 int ret
= -EINPROGRESS
;
310 async_context_push();
312 qed_write_l2_table(s
, request
, index
, n
, flush
, qed_sync_cb
, &ret
);
313 while (ret
== -EINPROGRESS
) {