i8259: Clean up pic_ioport_read
[qemu-kvm.git] / block / qcow2-cache.c
blob340a6f2b26ea63d1310218445e96ec590f23b6bd
1 /*
2 * L2/refcount table cache for the QCOW2 format
4 * Copyright (c) 2010 Kevin Wolf <kwolf@redhat.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "block_int.h"
26 #include "qemu-common.h"
27 #include "qcow2.h"
29 typedef struct Qcow2CachedTable {
30 void* table;
31 int64_t offset;
32 bool dirty;
33 int cache_hits;
34 int ref;
35 } Qcow2CachedTable;
37 struct Qcow2Cache {
38 Qcow2CachedTable* entries;
39 struct Qcow2Cache* depends;
40 int size;
41 bool depends_on_flush;
42 bool writethrough;
45 Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables,
46 bool writethrough)
48 BDRVQcowState *s = bs->opaque;
49 Qcow2Cache *c;
50 int i;
52 c = g_malloc0(sizeof(*c));
53 c->size = num_tables;
54 c->entries = g_malloc0(sizeof(*c->entries) * num_tables);
55 c->writethrough = writethrough;
57 for (i = 0; i < c->size; i++) {
58 c->entries[i].table = qemu_blockalign(bs, s->cluster_size);
61 return c;
64 int qcow2_cache_destroy(BlockDriverState* bs, Qcow2Cache *c)
66 int i;
68 for (i = 0; i < c->size; i++) {
69 assert(c->entries[i].ref == 0);
70 qemu_vfree(c->entries[i].table);
73 g_free(c->entries);
74 g_free(c);
76 return 0;
79 static int qcow2_cache_flush_dependency(BlockDriverState *bs, Qcow2Cache *c)
81 int ret;
83 ret = qcow2_cache_flush(bs, c->depends);
84 if (ret < 0) {
85 return ret;
88 c->depends = NULL;
89 c->depends_on_flush = false;
91 return 0;
94 static int qcow2_cache_entry_flush(BlockDriverState *bs, Qcow2Cache *c, int i)
96 BDRVQcowState *s = bs->opaque;
97 int ret = 0;
99 if (!c->entries[i].dirty || !c->entries[i].offset) {
100 return 0;
103 if (c->depends) {
104 ret = qcow2_cache_flush_dependency(bs, c);
105 } else if (c->depends_on_flush) {
106 ret = bdrv_flush(bs->file);
107 if (ret >= 0) {
108 c->depends_on_flush = false;
112 if (ret < 0) {
113 return ret;
116 if (c == s->refcount_block_cache) {
117 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_UPDATE_PART);
118 } else if (c == s->l2_table_cache) {
119 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE);
122 ret = bdrv_pwrite(bs->file, c->entries[i].offset, c->entries[i].table,
123 s->cluster_size);
124 if (ret < 0) {
125 return ret;
128 c->entries[i].dirty = false;
130 return 0;
133 int qcow2_cache_flush(BlockDriverState *bs, Qcow2Cache *c)
135 int result = 0;
136 int ret;
137 int i;
139 for (i = 0; i < c->size; i++) {
140 ret = qcow2_cache_entry_flush(bs, c, i);
141 if (ret < 0 && result != -ENOSPC) {
142 result = ret;
146 if (result == 0) {
147 ret = bdrv_flush(bs->file);
148 if (ret < 0) {
149 result = ret;
153 return result;
156 int qcow2_cache_set_dependency(BlockDriverState *bs, Qcow2Cache *c,
157 Qcow2Cache *dependency)
159 int ret;
161 if (dependency->depends) {
162 ret = qcow2_cache_flush_dependency(bs, dependency);
163 if (ret < 0) {
164 return ret;
168 if (c->depends && (c->depends != dependency)) {
169 ret = qcow2_cache_flush_dependency(bs, c);
170 if (ret < 0) {
171 return ret;
175 c->depends = dependency;
176 return 0;
179 void qcow2_cache_depends_on_flush(Qcow2Cache *c)
181 c->depends_on_flush = true;
184 static int qcow2_cache_find_entry_to_replace(Qcow2Cache *c)
186 int i;
187 int min_count = INT_MAX;
188 int min_index = -1;
191 for (i = 0; i < c->size; i++) {
192 if (c->entries[i].ref) {
193 continue;
196 if (c->entries[i].cache_hits < min_count) {
197 min_index = i;
198 min_count = c->entries[i].cache_hits;
201 /* Give newer hits priority */
202 /* TODO Check how to optimize the replacement strategy */
203 c->entries[i].cache_hits /= 2;
206 if (min_index == -1) {
207 /* This can't happen in current synchronous code, but leave the check
208 * here as a reminder for whoever starts using AIO with the cache */
209 abort();
211 return min_index;
214 static int qcow2_cache_do_get(BlockDriverState *bs, Qcow2Cache *c,
215 uint64_t offset, void **table, bool read_from_disk)
217 BDRVQcowState *s = bs->opaque;
218 int i;
219 int ret;
221 /* Check if the table is already cached */
222 for (i = 0; i < c->size; i++) {
223 if (c->entries[i].offset == offset) {
224 goto found;
228 /* If not, write a table back and replace it */
229 i = qcow2_cache_find_entry_to_replace(c);
230 if (i < 0) {
231 return i;
234 ret = qcow2_cache_entry_flush(bs, c, i);
235 if (ret < 0) {
236 return ret;
239 c->entries[i].offset = 0;
240 if (read_from_disk) {
241 if (c == s->l2_table_cache) {
242 BLKDBG_EVENT(bs->file, BLKDBG_L2_LOAD);
245 ret = bdrv_pread(bs->file, offset, c->entries[i].table, s->cluster_size);
246 if (ret < 0) {
247 return ret;
251 /* Give the table some hits for the start so that it won't be replaced
252 * immediately. The number 32 is completely arbitrary. */
253 c->entries[i].cache_hits = 32;
254 c->entries[i].offset = offset;
256 /* And return the right table */
257 found:
258 c->entries[i].cache_hits++;
259 c->entries[i].ref++;
260 *table = c->entries[i].table;
261 return 0;
264 int qcow2_cache_get(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
265 void **table)
267 return qcow2_cache_do_get(bs, c, offset, table, true);
270 int qcow2_cache_get_empty(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
271 void **table)
273 return qcow2_cache_do_get(bs, c, offset, table, false);
276 int qcow2_cache_put(BlockDriverState *bs, Qcow2Cache *c, void **table)
278 int i;
280 for (i = 0; i < c->size; i++) {
281 if (c->entries[i].table == *table) {
282 goto found;
285 return -ENOENT;
287 found:
288 c->entries[i].ref--;
289 *table = NULL;
291 assert(c->entries[i].ref >= 0);
293 if (c->writethrough) {
294 return qcow2_cache_entry_flush(bs, c, i);
295 } else {
296 return 0;
300 void qcow2_cache_entry_mark_dirty(Qcow2Cache *c, void *table)
302 int i;
304 for (i = 0; i < c->size; i++) {
305 if (c->entries[i].table == table) {
306 goto found;
309 abort();
311 found:
312 c->entries[i].dirty = true;
315 bool qcow2_cache_set_writethrough(BlockDriverState *bs, Qcow2Cache *c,
316 bool enable)
318 bool old = c->writethrough;
320 if (!old && enable) {
321 qcow2_cache_flush(bs, c);
324 c->writethrough = enable;
325 return old;