2 * QEMU Enhanced Disk Format Table I/O
4 * Copyright IBM, Corp. 2010
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
15 #include "qemu/osdep.h"
17 #include "qemu/sockets.h" /* for EINPROGRESS on Windows */
19 #include "qemu/bswap.h"
30 static void qed_read_table_cb(void *opaque
, int ret
)
32 QEDReadTableCB
*read_table_cb
= opaque
;
33 QEDTable
*table
= read_table_cb
->table
;
34 int noffsets
= read_table_cb
->qiov
.size
/ sizeof(uint64_t);
37 /* Handle I/O error */
42 /* Byteswap offsets */
43 for (i
= 0; i
< noffsets
; i
++) {
44 table
->offsets
[i
] = le64_to_cpu(table
->offsets
[i
]);
49 trace_qed_read_table_cb(read_table_cb
->s
, read_table_cb
->table
, ret
);
50 gencb_complete(&read_table_cb
->gencb
, ret
);
53 static void qed_read_table(BDRVQEDState
*s
, uint64_t offset
, QEDTable
*table
,
54 BlockCompletionFunc
*cb
, void *opaque
)
56 QEDReadTableCB
*read_table_cb
= gencb_alloc(sizeof(*read_table_cb
),
58 QEMUIOVector
*qiov
= &read_table_cb
->qiov
;
60 trace_qed_read_table(s
, offset
, table
);
63 read_table_cb
->table
= table
;
64 read_table_cb
->iov
.iov_base
= table
->offsets
,
65 read_table_cb
->iov
.iov_len
= s
->header
.cluster_size
* s
->header
.table_size
,
67 qemu_iovec_init_external(qiov
, &read_table_cb
->iov
, 1);
68 bdrv_aio_readv(s
->bs
->file
, offset
/ BDRV_SECTOR_SIZE
, qiov
,
69 qiov
->size
/ BDRV_SECTOR_SIZE
,
70 qed_read_table_cb
, read_table_cb
);
78 bool flush
; /* flush after write? */
84 static void qed_write_table_cb(void *opaque
, int ret
)
86 QEDWriteTableCB
*write_table_cb
= opaque
;
88 trace_qed_write_table_cb(write_table_cb
->s
,
89 write_table_cb
->orig_table
,
90 write_table_cb
->flush
,
97 if (write_table_cb
->flush
) {
98 /* We still need to flush first */
99 write_table_cb
->flush
= false;
100 bdrv_aio_flush(write_table_cb
->s
->bs
, qed_write_table_cb
,
106 qemu_vfree(write_table_cb
->table
);
107 gencb_complete(&write_table_cb
->gencb
, ret
);
111 * Write out an updated part or all of a table
114 * @offset: Offset of table in image file, in bytes
116 * @index: Index of first element
117 * @n: Number of elements
118 * @flush: Whether or not to sync to disk
119 * @cb: Completion function
120 * @opaque: Argument for completion function
122 static void qed_write_table(BDRVQEDState
*s
, uint64_t offset
, QEDTable
*table
,
123 unsigned int index
, unsigned int n
, bool flush
,
124 BlockCompletionFunc
*cb
, void *opaque
)
126 QEDWriteTableCB
*write_table_cb
;
127 unsigned int sector_mask
= BDRV_SECTOR_SIZE
/ sizeof(uint64_t) - 1;
128 unsigned int start
, end
, i
;
131 trace_qed_write_table(s
, offset
, table
, index
, n
);
133 /* Calculate indices of the first and one after last elements */
134 start
= index
& ~sector_mask
;
135 end
= (index
+ n
+ sector_mask
) & ~sector_mask
;
137 len_bytes
= (end
- start
) * sizeof(uint64_t);
139 write_table_cb
= gencb_alloc(sizeof(*write_table_cb
), cb
, opaque
);
140 write_table_cb
->s
= s
;
141 write_table_cb
->orig_table
= table
;
142 write_table_cb
->flush
= flush
;
143 write_table_cb
->table
= qemu_blockalign(s
->bs
, len_bytes
);
144 write_table_cb
->iov
.iov_base
= write_table_cb
->table
->offsets
;
145 write_table_cb
->iov
.iov_len
= len_bytes
;
146 qemu_iovec_init_external(&write_table_cb
->qiov
, &write_table_cb
->iov
, 1);
149 for (i
= start
; i
< end
; i
++) {
150 uint64_t le_offset
= cpu_to_le64(table
->offsets
[i
]);
151 write_table_cb
->table
->offsets
[i
- start
] = le_offset
;
154 /* Adjust for offset into table */
155 offset
+= start
* sizeof(uint64_t);
157 bdrv_aio_writev(s
->bs
->file
, offset
/ BDRV_SECTOR_SIZE
,
158 &write_table_cb
->qiov
,
159 write_table_cb
->qiov
.size
/ BDRV_SECTOR_SIZE
,
160 qed_write_table_cb
, write_table_cb
);
164 * Propagate return value from async callback
166 static void qed_sync_cb(void *opaque
, int ret
)
168 *(int *)opaque
= ret
;
171 int qed_read_l1_table_sync(BDRVQEDState
*s
)
173 int ret
= -EINPROGRESS
;
175 qed_read_table(s
, s
->header
.l1_table_offset
,
176 s
->l1_table
, qed_sync_cb
, &ret
);
177 while (ret
== -EINPROGRESS
) {
178 aio_poll(bdrv_get_aio_context(s
->bs
), true);
184 void qed_write_l1_table(BDRVQEDState
*s
, unsigned int index
, unsigned int n
,
185 BlockCompletionFunc
*cb
, void *opaque
)
187 BLKDBG_EVENT(s
->bs
->file
, BLKDBG_L1_UPDATE
);
188 qed_write_table(s
, s
->header
.l1_table_offset
,
189 s
->l1_table
, index
, n
, false, cb
, opaque
);
192 int qed_write_l1_table_sync(BDRVQEDState
*s
, unsigned int index
,
195 int ret
= -EINPROGRESS
;
197 qed_write_l1_table(s
, index
, n
, qed_sync_cb
, &ret
);
198 while (ret
== -EINPROGRESS
) {
199 aio_poll(bdrv_get_aio_context(s
->bs
), true);
212 static void qed_read_l2_table_cb(void *opaque
, int ret
)
214 QEDReadL2TableCB
*read_l2_table_cb
= opaque
;
215 QEDRequest
*request
= read_l2_table_cb
->request
;
216 BDRVQEDState
*s
= read_l2_table_cb
->s
;
217 CachedL2Table
*l2_table
= request
->l2_table
;
218 uint64_t l2_offset
= read_l2_table_cb
->l2_offset
;
221 /* can't trust loaded L2 table anymore */
222 qed_unref_l2_cache_entry(l2_table
);
223 request
->l2_table
= NULL
;
225 l2_table
->offset
= l2_offset
;
227 qed_commit_l2_cache_entry(&s
->l2_cache
, l2_table
);
229 /* This is guaranteed to succeed because we just committed the entry
232 request
->l2_table
= qed_find_l2_cache_entry(&s
->l2_cache
, l2_offset
);
233 assert(request
->l2_table
!= NULL
);
236 gencb_complete(&read_l2_table_cb
->gencb
, ret
);
239 void qed_read_l2_table(BDRVQEDState
*s
, QEDRequest
*request
, uint64_t offset
,
240 BlockCompletionFunc
*cb
, void *opaque
)
242 QEDReadL2TableCB
*read_l2_table_cb
;
244 qed_unref_l2_cache_entry(request
->l2_table
);
246 /* Check for cached L2 entry */
247 request
->l2_table
= qed_find_l2_cache_entry(&s
->l2_cache
, offset
);
248 if (request
->l2_table
) {
253 request
->l2_table
= qed_alloc_l2_cache_entry(&s
->l2_cache
);
254 request
->l2_table
->table
= qed_alloc_table(s
);
256 read_l2_table_cb
= gencb_alloc(sizeof(*read_l2_table_cb
), cb
, opaque
);
257 read_l2_table_cb
->s
= s
;
258 read_l2_table_cb
->l2_offset
= offset
;
259 read_l2_table_cb
->request
= request
;
261 BLKDBG_EVENT(s
->bs
->file
, BLKDBG_L2_LOAD
);
262 qed_read_table(s
, offset
, request
->l2_table
->table
,
263 qed_read_l2_table_cb
, read_l2_table_cb
);
266 int qed_read_l2_table_sync(BDRVQEDState
*s
, QEDRequest
*request
, uint64_t offset
)
268 int ret
= -EINPROGRESS
;
270 qed_read_l2_table(s
, request
, offset
, qed_sync_cb
, &ret
);
271 while (ret
== -EINPROGRESS
) {
272 aio_poll(bdrv_get_aio_context(s
->bs
), true);
278 void qed_write_l2_table(BDRVQEDState
*s
, QEDRequest
*request
,
279 unsigned int index
, unsigned int n
, bool flush
,
280 BlockCompletionFunc
*cb
, void *opaque
)
282 BLKDBG_EVENT(s
->bs
->file
, BLKDBG_L2_UPDATE
);
283 qed_write_table(s
, request
->l2_table
->offset
,
284 request
->l2_table
->table
, index
, n
, flush
, cb
, opaque
);
287 int qed_write_l2_table_sync(BDRVQEDState
*s
, QEDRequest
*request
,
288 unsigned int index
, unsigned int n
, bool flush
)
290 int ret
= -EINPROGRESS
;
292 qed_write_l2_table(s
, request
, index
, n
, flush
, qed_sync_cb
, &ret
);
293 while (ret
== -EINPROGRESS
) {
294 aio_poll(bdrv_get_aio_context(s
->bs
), true);