SMBIOS: Update Type 0 struct generator for machines >= 2.1
[qemu/ar7.git] / block / qcow2-cache.c
blob8ecbb5bc005acedcaa8422d6e174374e367514fe
1 /*
2 * L2/refcount table cache for the QCOW2 format
4 * Copyright (c) 2010 Kevin Wolf <kwolf@redhat.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "block/block_int.h"
26 #include "qemu-common.h"
27 #include "qcow2.h"
28 #include "trace.h"
30 typedef struct Qcow2CachedTable {
31 void* table;
32 int64_t offset;
33 bool dirty;
34 int cache_hits;
35 int ref;
36 } Qcow2CachedTable;
38 struct Qcow2Cache {
39 Qcow2CachedTable* entries;
40 struct Qcow2Cache* depends;
41 int size;
42 bool depends_on_flush;
45 Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables)
47 BDRVQcowState *s = bs->opaque;
48 Qcow2Cache *c;
49 int i;
51 c = g_malloc0(sizeof(*c));
52 c->size = num_tables;
53 c->entries = g_malloc0(sizeof(*c->entries) * num_tables);
55 for (i = 0; i < c->size; i++) {
56 c->entries[i].table = qemu_blockalign(bs, s->cluster_size);
59 return c;
62 int qcow2_cache_destroy(BlockDriverState* bs, Qcow2Cache *c)
64 int i;
66 for (i = 0; i < c->size; i++) {
67 assert(c->entries[i].ref == 0);
68 qemu_vfree(c->entries[i].table);
71 g_free(c->entries);
72 g_free(c);
74 return 0;
77 static int qcow2_cache_flush_dependency(BlockDriverState *bs, Qcow2Cache *c)
79 int ret;
81 ret = qcow2_cache_flush(bs, c->depends);
82 if (ret < 0) {
83 return ret;
86 c->depends = NULL;
87 c->depends_on_flush = false;
89 return 0;
92 static int qcow2_cache_entry_flush(BlockDriverState *bs, Qcow2Cache *c, int i)
94 BDRVQcowState *s = bs->opaque;
95 int ret = 0;
97 if (!c->entries[i].dirty || !c->entries[i].offset) {
98 return 0;
101 trace_qcow2_cache_entry_flush(qemu_coroutine_self(),
102 c == s->l2_table_cache, i);
104 if (c->depends) {
105 ret = qcow2_cache_flush_dependency(bs, c);
106 } else if (c->depends_on_flush) {
107 ret = bdrv_flush(bs->file);
108 if (ret >= 0) {
109 c->depends_on_flush = false;
113 if (ret < 0) {
114 return ret;
117 if (c == s->refcount_block_cache) {
118 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_REFCOUNT_BLOCK,
119 c->entries[i].offset, s->cluster_size);
120 } else if (c == s->l2_table_cache) {
121 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L2,
122 c->entries[i].offset, s->cluster_size);
123 } else {
124 ret = qcow2_pre_write_overlap_check(bs, 0,
125 c->entries[i].offset, s->cluster_size);
128 if (ret < 0) {
129 return ret;
132 if (c == s->refcount_block_cache) {
133 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_UPDATE_PART);
134 } else if (c == s->l2_table_cache) {
135 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE);
138 ret = bdrv_pwrite(bs->file, c->entries[i].offset, c->entries[i].table,
139 s->cluster_size);
140 if (ret < 0) {
141 return ret;
144 c->entries[i].dirty = false;
146 return 0;
149 int qcow2_cache_flush(BlockDriverState *bs, Qcow2Cache *c)
151 BDRVQcowState *s = bs->opaque;
152 int result = 0;
153 int ret;
154 int i;
156 trace_qcow2_cache_flush(qemu_coroutine_self(), c == s->l2_table_cache);
158 for (i = 0; i < c->size; i++) {
159 ret = qcow2_cache_entry_flush(bs, c, i);
160 if (ret < 0 && result != -ENOSPC) {
161 result = ret;
165 if (result == 0) {
166 ret = bdrv_flush(bs->file);
167 if (ret < 0) {
168 result = ret;
172 return result;
175 int qcow2_cache_set_dependency(BlockDriverState *bs, Qcow2Cache *c,
176 Qcow2Cache *dependency)
178 int ret;
180 if (dependency->depends) {
181 ret = qcow2_cache_flush_dependency(bs, dependency);
182 if (ret < 0) {
183 return ret;
187 if (c->depends && (c->depends != dependency)) {
188 ret = qcow2_cache_flush_dependency(bs, c);
189 if (ret < 0) {
190 return ret;
194 c->depends = dependency;
195 return 0;
198 void qcow2_cache_depends_on_flush(Qcow2Cache *c)
200 c->depends_on_flush = true;
203 int qcow2_cache_empty(BlockDriverState *bs, Qcow2Cache *c)
205 int ret, i;
207 ret = qcow2_cache_flush(bs, c);
208 if (ret < 0) {
209 return ret;
212 for (i = 0; i < c->size; i++) {
213 assert(c->entries[i].ref == 0);
214 c->entries[i].offset = 0;
215 c->entries[i].cache_hits = 0;
218 return 0;
221 static int qcow2_cache_find_entry_to_replace(Qcow2Cache *c)
223 int i;
224 int min_count = INT_MAX;
225 int min_index = -1;
228 for (i = 0; i < c->size; i++) {
229 if (c->entries[i].ref) {
230 continue;
233 if (c->entries[i].cache_hits < min_count) {
234 min_index = i;
235 min_count = c->entries[i].cache_hits;
238 /* Give newer hits priority */
239 /* TODO Check how to optimize the replacement strategy */
240 c->entries[i].cache_hits /= 2;
243 if (min_index == -1) {
244 /* This can't happen in current synchronous code, but leave the check
245 * here as a reminder for whoever starts using AIO with the cache */
246 abort();
248 return min_index;
251 static int qcow2_cache_do_get(BlockDriverState *bs, Qcow2Cache *c,
252 uint64_t offset, void **table, bool read_from_disk)
254 BDRVQcowState *s = bs->opaque;
255 int i;
256 int ret;
258 trace_qcow2_cache_get(qemu_coroutine_self(), c == s->l2_table_cache,
259 offset, read_from_disk);
261 /* Check if the table is already cached */
262 for (i = 0; i < c->size; i++) {
263 if (c->entries[i].offset == offset) {
264 goto found;
268 /* If not, write a table back and replace it */
269 i = qcow2_cache_find_entry_to_replace(c);
270 trace_qcow2_cache_get_replace_entry(qemu_coroutine_self(),
271 c == s->l2_table_cache, i);
272 if (i < 0) {
273 return i;
276 ret = qcow2_cache_entry_flush(bs, c, i);
277 if (ret < 0) {
278 return ret;
281 trace_qcow2_cache_get_read(qemu_coroutine_self(),
282 c == s->l2_table_cache, i);
283 c->entries[i].offset = 0;
284 if (read_from_disk) {
285 if (c == s->l2_table_cache) {
286 BLKDBG_EVENT(bs->file, BLKDBG_L2_LOAD);
289 ret = bdrv_pread(bs->file, offset, c->entries[i].table, s->cluster_size);
290 if (ret < 0) {
291 return ret;
295 /* Give the table some hits for the start so that it won't be replaced
296 * immediately. The number 32 is completely arbitrary. */
297 c->entries[i].cache_hits = 32;
298 c->entries[i].offset = offset;
300 /* And return the right table */
301 found:
302 c->entries[i].cache_hits++;
303 c->entries[i].ref++;
304 *table = c->entries[i].table;
306 trace_qcow2_cache_get_done(qemu_coroutine_self(),
307 c == s->l2_table_cache, i);
309 return 0;
312 int qcow2_cache_get(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
313 void **table)
315 return qcow2_cache_do_get(bs, c, offset, table, true);
318 int qcow2_cache_get_empty(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
319 void **table)
321 return qcow2_cache_do_get(bs, c, offset, table, false);
324 int qcow2_cache_put(BlockDriverState *bs, Qcow2Cache *c, void **table)
326 int i;
328 for (i = 0; i < c->size; i++) {
329 if (c->entries[i].table == *table) {
330 goto found;
333 return -ENOENT;
335 found:
336 c->entries[i].ref--;
337 *table = NULL;
339 assert(c->entries[i].ref >= 0);
340 return 0;
343 void qcow2_cache_entry_mark_dirty(Qcow2Cache *c, void *table)
345 int i;
347 for (i = 0; i < c->size; i++) {
348 if (c->entries[i].table == table) {
349 goto found;
352 abort();
354 found:
355 c->entries[i].dirty = true;