2 * QEMU Enhanced Disk Format Table I/O
4 * Copyright IBM, Corp. 2010
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
15 #include "qemu/osdep.h"
17 #include "qemu/sockets.h" /* for EINPROGRESS on Windows */
29 static void qed_read_table_cb(void *opaque
, int ret
)
31 QEDReadTableCB
*read_table_cb
= opaque
;
32 QEDTable
*table
= read_table_cb
->table
;
33 int noffsets
= read_table_cb
->qiov
.size
/ sizeof(uint64_t);
36 /* Handle I/O error */
41 /* Byteswap offsets */
42 for (i
= 0; i
< noffsets
; i
++) {
43 table
->offsets
[i
] = le64_to_cpu(table
->offsets
[i
]);
48 trace_qed_read_table_cb(read_table_cb
->s
, read_table_cb
->table
, ret
);
49 gencb_complete(&read_table_cb
->gencb
, ret
);
52 static void qed_read_table(BDRVQEDState
*s
, uint64_t offset
, QEDTable
*table
,
53 BlockCompletionFunc
*cb
, void *opaque
)
55 QEDReadTableCB
*read_table_cb
= gencb_alloc(sizeof(*read_table_cb
),
57 QEMUIOVector
*qiov
= &read_table_cb
->qiov
;
59 trace_qed_read_table(s
, offset
, table
);
62 read_table_cb
->table
= table
;
63 read_table_cb
->iov
.iov_base
= table
->offsets
,
64 read_table_cb
->iov
.iov_len
= s
->header
.cluster_size
* s
->header
.table_size
,
66 qemu_iovec_init_external(qiov
, &read_table_cb
->iov
, 1);
67 bdrv_aio_readv(s
->bs
->file
->bs
, offset
/ BDRV_SECTOR_SIZE
, qiov
,
68 qiov
->size
/ BDRV_SECTOR_SIZE
,
69 qed_read_table_cb
, read_table_cb
);
77 bool flush
; /* flush after write? */
83 static void qed_write_table_cb(void *opaque
, int ret
)
85 QEDWriteTableCB
*write_table_cb
= opaque
;
87 trace_qed_write_table_cb(write_table_cb
->s
,
88 write_table_cb
->orig_table
,
89 write_table_cb
->flush
,
96 if (write_table_cb
->flush
) {
97 /* We still need to flush first */
98 write_table_cb
->flush
= false;
99 bdrv_aio_flush(write_table_cb
->s
->bs
, qed_write_table_cb
,
105 qemu_vfree(write_table_cb
->table
);
106 gencb_complete(&write_table_cb
->gencb
, ret
);
110 * Write out an updated part or all of a table
113 * @offset: Offset of table in image file, in bytes
115 * @index: Index of first element
116 * @n: Number of elements
117 * @flush: Whether or not to sync to disk
118 * @cb: Completion function
119 * @opaque: Argument for completion function
121 static void qed_write_table(BDRVQEDState
*s
, uint64_t offset
, QEDTable
*table
,
122 unsigned int index
, unsigned int n
, bool flush
,
123 BlockCompletionFunc
*cb
, void *opaque
)
125 QEDWriteTableCB
*write_table_cb
;
126 unsigned int sector_mask
= BDRV_SECTOR_SIZE
/ sizeof(uint64_t) - 1;
127 unsigned int start
, end
, i
;
130 trace_qed_write_table(s
, offset
, table
, index
, n
);
132 /* Calculate indices of the first and one after last elements */
133 start
= index
& ~sector_mask
;
134 end
= (index
+ n
+ sector_mask
) & ~sector_mask
;
136 len_bytes
= (end
- start
) * sizeof(uint64_t);
138 write_table_cb
= gencb_alloc(sizeof(*write_table_cb
), cb
, opaque
);
139 write_table_cb
->s
= s
;
140 write_table_cb
->orig_table
= table
;
141 write_table_cb
->flush
= flush
;
142 write_table_cb
->table
= qemu_blockalign(s
->bs
, len_bytes
);
143 write_table_cb
->iov
.iov_base
= write_table_cb
->table
->offsets
;
144 write_table_cb
->iov
.iov_len
= len_bytes
;
145 qemu_iovec_init_external(&write_table_cb
->qiov
, &write_table_cb
->iov
, 1);
148 for (i
= start
; i
< end
; i
++) {
149 uint64_t le_offset
= cpu_to_le64(table
->offsets
[i
]);
150 write_table_cb
->table
->offsets
[i
- start
] = le_offset
;
153 /* Adjust for offset into table */
154 offset
+= start
* sizeof(uint64_t);
156 bdrv_aio_writev(s
->bs
->file
->bs
, offset
/ BDRV_SECTOR_SIZE
,
157 &write_table_cb
->qiov
,
158 write_table_cb
->qiov
.size
/ BDRV_SECTOR_SIZE
,
159 qed_write_table_cb
, write_table_cb
);
163 * Propagate return value from async callback
165 static void qed_sync_cb(void *opaque
, int ret
)
167 *(int *)opaque
= ret
;
170 int qed_read_l1_table_sync(BDRVQEDState
*s
)
172 int ret
= -EINPROGRESS
;
174 qed_read_table(s
, s
->header
.l1_table_offset
,
175 s
->l1_table
, qed_sync_cb
, &ret
);
176 while (ret
== -EINPROGRESS
) {
177 aio_poll(bdrv_get_aio_context(s
->bs
), true);
183 void qed_write_l1_table(BDRVQEDState
*s
, unsigned int index
, unsigned int n
,
184 BlockCompletionFunc
*cb
, void *opaque
)
186 BLKDBG_EVENT(s
->bs
->file
, BLKDBG_L1_UPDATE
);
187 qed_write_table(s
, s
->header
.l1_table_offset
,
188 s
->l1_table
, index
, n
, false, cb
, opaque
);
191 int qed_write_l1_table_sync(BDRVQEDState
*s
, unsigned int index
,
194 int ret
= -EINPROGRESS
;
196 qed_write_l1_table(s
, index
, n
, qed_sync_cb
, &ret
);
197 while (ret
== -EINPROGRESS
) {
198 aio_poll(bdrv_get_aio_context(s
->bs
), true);
211 static void qed_read_l2_table_cb(void *opaque
, int ret
)
213 QEDReadL2TableCB
*read_l2_table_cb
= opaque
;
214 QEDRequest
*request
= read_l2_table_cb
->request
;
215 BDRVQEDState
*s
= read_l2_table_cb
->s
;
216 CachedL2Table
*l2_table
= request
->l2_table
;
217 uint64_t l2_offset
= read_l2_table_cb
->l2_offset
;
220 /* can't trust loaded L2 table anymore */
221 qed_unref_l2_cache_entry(l2_table
);
222 request
->l2_table
= NULL
;
224 l2_table
->offset
= l2_offset
;
226 qed_commit_l2_cache_entry(&s
->l2_cache
, l2_table
);
228 /* This is guaranteed to succeed because we just committed the entry
231 request
->l2_table
= qed_find_l2_cache_entry(&s
->l2_cache
, l2_offset
);
232 assert(request
->l2_table
!= NULL
);
235 gencb_complete(&read_l2_table_cb
->gencb
, ret
);
238 void qed_read_l2_table(BDRVQEDState
*s
, QEDRequest
*request
, uint64_t offset
,
239 BlockCompletionFunc
*cb
, void *opaque
)
241 QEDReadL2TableCB
*read_l2_table_cb
;
243 qed_unref_l2_cache_entry(request
->l2_table
);
245 /* Check for cached L2 entry */
246 request
->l2_table
= qed_find_l2_cache_entry(&s
->l2_cache
, offset
);
247 if (request
->l2_table
) {
252 request
->l2_table
= qed_alloc_l2_cache_entry(&s
->l2_cache
);
253 request
->l2_table
->table
= qed_alloc_table(s
);
255 read_l2_table_cb
= gencb_alloc(sizeof(*read_l2_table_cb
), cb
, opaque
);
256 read_l2_table_cb
->s
= s
;
257 read_l2_table_cb
->l2_offset
= offset
;
258 read_l2_table_cb
->request
= request
;
260 BLKDBG_EVENT(s
->bs
->file
, BLKDBG_L2_LOAD
);
261 qed_read_table(s
, offset
, request
->l2_table
->table
,
262 qed_read_l2_table_cb
, read_l2_table_cb
);
265 int qed_read_l2_table_sync(BDRVQEDState
*s
, QEDRequest
*request
, uint64_t offset
)
267 int ret
= -EINPROGRESS
;
269 qed_read_l2_table(s
, request
, offset
, qed_sync_cb
, &ret
);
270 while (ret
== -EINPROGRESS
) {
271 aio_poll(bdrv_get_aio_context(s
->bs
), true);
277 void qed_write_l2_table(BDRVQEDState
*s
, QEDRequest
*request
,
278 unsigned int index
, unsigned int n
, bool flush
,
279 BlockCompletionFunc
*cb
, void *opaque
)
281 BLKDBG_EVENT(s
->bs
->file
, BLKDBG_L2_UPDATE
);
282 qed_write_table(s
, request
->l2_table
->offset
,
283 request
->l2_table
->table
, index
, n
, flush
, cb
, opaque
);
286 int qed_write_l2_table_sync(BDRVQEDState
*s
, QEDRequest
*request
,
287 unsigned int index
, unsigned int n
, bool flush
)
289 int ret
= -EINPROGRESS
;
291 qed_write_l2_table(s
, request
, index
, n
, flush
, qed_sync_cb
, &ret
);
292 while (ret
== -EINPROGRESS
) {
293 aio_poll(bdrv_get_aio_context(s
->bs
), true);