ui: mix misleading comments & return types of VNC I/O helper methods
[qemu/ar7.git] / block / qed-table.c
blobeead8b0fc7c1915b02178d41c3fead983f44796e
1 /*
2 * QEMU Enhanced Disk Format Table I/O
4 * Copyright IBM, Corp. 2010
6 * Authors:
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "trace.h"
17 #include "qemu/sockets.h" /* for EINPROGRESS on Windows */
18 #include "qed.h"
19 #include "qemu/bswap.h"
21 /* Called either from qed_check or with table_lock held. */
22 static int qed_read_table(BDRVQEDState *s, uint64_t offset, QEDTable *table)
24 QEMUIOVector qiov;
25 int noffsets;
26 int i, ret;
28 struct iovec iov = {
29 .iov_base = table->offsets,
30 .iov_len = s->header.cluster_size * s->header.table_size,
32 qemu_iovec_init_external(&qiov, &iov, 1);
34 trace_qed_read_table(s, offset, table);
36 if (qemu_in_coroutine()) {
37 qemu_co_mutex_unlock(&s->table_lock);
39 ret = bdrv_preadv(s->bs->file, offset, &qiov);
40 if (qemu_in_coroutine()) {
41 qemu_co_mutex_lock(&s->table_lock);
43 if (ret < 0) {
44 goto out;
47 /* Byteswap offsets */
48 noffsets = qiov.size / sizeof(uint64_t);
49 for (i = 0; i < noffsets; i++) {
50 table->offsets[i] = le64_to_cpu(table->offsets[i]);
53 ret = 0;
54 out:
55 /* Completion */
56 trace_qed_read_table_cb(s, table, ret);
57 return ret;
60 /**
61 * Write out an updated part or all of a table
63 * @s: QED state
64 * @offset: Offset of table in image file, in bytes
65 * @table: Table
66 * @index: Index of first element
67 * @n: Number of elements
68 * @flush: Whether or not to sync to disk
70 * Called either from qed_check or with table_lock held.
72 static int qed_write_table(BDRVQEDState *s, uint64_t offset, QEDTable *table,
73 unsigned int index, unsigned int n, bool flush)
75 unsigned int sector_mask = BDRV_SECTOR_SIZE / sizeof(uint64_t) - 1;
76 unsigned int start, end, i;
77 QEDTable *new_table;
78 struct iovec iov;
79 QEMUIOVector qiov;
80 size_t len_bytes;
81 int ret;
83 trace_qed_write_table(s, offset, table, index, n);
85 /* Calculate indices of the first and one after last elements */
86 start = index & ~sector_mask;
87 end = (index + n + sector_mask) & ~sector_mask;
89 len_bytes = (end - start) * sizeof(uint64_t);
91 new_table = qemu_blockalign(s->bs, len_bytes);
92 iov = (struct iovec) {
93 .iov_base = new_table->offsets,
94 .iov_len = len_bytes,
96 qemu_iovec_init_external(&qiov, &iov, 1);
98 /* Byteswap table */
99 for (i = start; i < end; i++) {
100 uint64_t le_offset = cpu_to_le64(table->offsets[i]);
101 new_table->offsets[i - start] = le_offset;
104 /* Adjust for offset into table */
105 offset += start * sizeof(uint64_t);
107 if (qemu_in_coroutine()) {
108 qemu_co_mutex_unlock(&s->table_lock);
110 ret = bdrv_pwritev(s->bs->file, offset, &qiov);
111 if (qemu_in_coroutine()) {
112 qemu_co_mutex_lock(&s->table_lock);
114 trace_qed_write_table_cb(s, table, flush, ret);
115 if (ret < 0) {
116 goto out;
119 if (flush) {
120 ret = bdrv_flush(s->bs);
121 if (ret < 0) {
122 goto out;
126 ret = 0;
127 out:
128 qemu_vfree(new_table);
129 return ret;
132 int qed_read_l1_table_sync(BDRVQEDState *s)
134 return qed_read_table(s, s->header.l1_table_offset, s->l1_table);
137 /* Called either from qed_check or with table_lock held. */
138 int qed_write_l1_table(BDRVQEDState *s, unsigned int index, unsigned int n)
140 BLKDBG_EVENT(s->bs->file, BLKDBG_L1_UPDATE);
141 return qed_write_table(s, s->header.l1_table_offset,
142 s->l1_table, index, n, false);
145 int qed_write_l1_table_sync(BDRVQEDState *s, unsigned int index,
146 unsigned int n)
148 return qed_write_l1_table(s, index, n);
151 /* Called either from qed_check or with table_lock held. */
152 int qed_read_l2_table(BDRVQEDState *s, QEDRequest *request, uint64_t offset)
154 int ret;
156 qed_unref_l2_cache_entry(request->l2_table);
158 /* Check for cached L2 entry */
159 request->l2_table = qed_find_l2_cache_entry(&s->l2_cache, offset);
160 if (request->l2_table) {
161 return 0;
164 request->l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
165 request->l2_table->table = qed_alloc_table(s);
167 BLKDBG_EVENT(s->bs->file, BLKDBG_L2_LOAD);
168 ret = qed_read_table(s, offset, request->l2_table->table);
170 if (ret) {
171 /* can't trust loaded L2 table anymore */
172 qed_unref_l2_cache_entry(request->l2_table);
173 request->l2_table = NULL;
174 } else {
175 request->l2_table->offset = offset;
177 qed_commit_l2_cache_entry(&s->l2_cache, request->l2_table);
179 /* This is guaranteed to succeed because we just committed the entry
180 * to the cache.
182 request->l2_table = qed_find_l2_cache_entry(&s->l2_cache, offset);
183 assert(request->l2_table != NULL);
186 return ret;
189 int qed_read_l2_table_sync(BDRVQEDState *s, QEDRequest *request, uint64_t offset)
191 return qed_read_l2_table(s, request, offset);
194 /* Called either from qed_check or with table_lock held. */
195 int qed_write_l2_table(BDRVQEDState *s, QEDRequest *request,
196 unsigned int index, unsigned int n, bool flush)
198 BLKDBG_EVENT(s->bs->file, BLKDBG_L2_UPDATE);
199 return qed_write_table(s, request->l2_table->offset,
200 request->l2_table->table, index, n, flush);
203 int qed_write_l2_table_sync(BDRVQEDState *s, QEDRequest *request,
204 unsigned int index, unsigned int n, bool flush)
206 return qed_write_l2_table(s, request, index, n, flush);