slirp: fix segv when init failed
[qemu.git] / block / qcow2-cache.c
blob6eaefeddc4015e974c0d7c3e806a0717b5741721
1 /*
2 * L2/refcount table cache for the QCOW2 format
4 * Copyright (c) 2010 Kevin Wolf <kwolf@redhat.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 /* Needed for CONFIG_MADVISE */
26 #include "qemu/osdep.h"
27 #include "block/block_int.h"
28 #include "qemu-common.h"
29 #include "qcow2.h"
30 #include "trace.h"
32 typedef struct Qcow2CachedTable {
33 int64_t offset;
34 uint64_t lru_counter;
35 int ref;
36 bool dirty;
37 } Qcow2CachedTable;
39 struct Qcow2Cache {
40 Qcow2CachedTable *entries;
41 struct Qcow2Cache *depends;
42 int size;
43 bool depends_on_flush;
44 void *table_array;
45 uint64_t lru_counter;
46 uint64_t cache_clean_lru_counter;
49 static inline void *qcow2_cache_get_table_addr(BlockDriverState *bs,
50 Qcow2Cache *c, int table)
52 BDRVQcow2State *s = bs->opaque;
53 return (uint8_t *) c->table_array + (size_t) table * s->cluster_size;
56 static inline int qcow2_cache_get_table_idx(BlockDriverState *bs,
57 Qcow2Cache *c, void *table)
59 BDRVQcow2State *s = bs->opaque;
60 ptrdiff_t table_offset = (uint8_t *) table - (uint8_t *) c->table_array;
61 int idx = table_offset / s->cluster_size;
62 assert(idx >= 0 && idx < c->size && table_offset % s->cluster_size == 0);
63 return idx;
66 static void qcow2_cache_table_release(BlockDriverState *bs, Qcow2Cache *c,
67 int i, int num_tables)
69 #if QEMU_MADV_DONTNEED != QEMU_MADV_INVALID
70 BDRVQcow2State *s = bs->opaque;
71 void *t = qcow2_cache_get_table_addr(bs, c, i);
72 int align = getpagesize();
73 size_t mem_size = (size_t) s->cluster_size * num_tables;
74 size_t offset = QEMU_ALIGN_UP((uintptr_t) t, align) - (uintptr_t) t;
75 size_t length = QEMU_ALIGN_DOWN(mem_size - offset, align);
76 if (length > 0) {
77 qemu_madvise((uint8_t *) t + offset, length, QEMU_MADV_DONTNEED);
79 #endif
82 static inline bool can_clean_entry(Qcow2Cache *c, int i)
84 Qcow2CachedTable *t = &c->entries[i];
85 return t->ref == 0 && !t->dirty && t->offset != 0 &&
86 t->lru_counter <= c->cache_clean_lru_counter;
89 void qcow2_cache_clean_unused(BlockDriverState *bs, Qcow2Cache *c)
91 int i = 0;
92 while (i < c->size) {
93 int to_clean = 0;
95 /* Skip the entries that we don't need to clean */
96 while (i < c->size && !can_clean_entry(c, i)) {
97 i++;
100 /* And count how many we can clean in a row */
101 while (i < c->size && can_clean_entry(c, i)) {
102 c->entries[i].offset = 0;
103 c->entries[i].lru_counter = 0;
104 i++;
105 to_clean++;
108 if (to_clean > 0) {
109 qcow2_cache_table_release(bs, c, i - to_clean, to_clean);
113 c->cache_clean_lru_counter = c->lru_counter;
116 Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables)
118 BDRVQcow2State *s = bs->opaque;
119 Qcow2Cache *c;
121 c = g_new0(Qcow2Cache, 1);
122 c->size = num_tables;
123 c->entries = g_try_new0(Qcow2CachedTable, num_tables);
124 c->table_array = qemu_try_blockalign(bs->file->bs,
125 (size_t) num_tables * s->cluster_size);
127 if (!c->entries || !c->table_array) {
128 qemu_vfree(c->table_array);
129 g_free(c->entries);
130 g_free(c);
131 c = NULL;
134 return c;
137 int qcow2_cache_destroy(BlockDriverState *bs, Qcow2Cache *c)
139 int i;
141 for (i = 0; i < c->size; i++) {
142 assert(c->entries[i].ref == 0);
145 qemu_vfree(c->table_array);
146 g_free(c->entries);
147 g_free(c);
149 return 0;
152 static int qcow2_cache_flush_dependency(BlockDriverState *bs, Qcow2Cache *c)
154 int ret;
156 ret = qcow2_cache_flush(bs, c->depends);
157 if (ret < 0) {
158 return ret;
161 c->depends = NULL;
162 c->depends_on_flush = false;
164 return 0;
167 static int qcow2_cache_entry_flush(BlockDriverState *bs, Qcow2Cache *c, int i)
169 BDRVQcow2State *s = bs->opaque;
170 int ret = 0;
172 if (!c->entries[i].dirty || !c->entries[i].offset) {
173 return 0;
176 trace_qcow2_cache_entry_flush(qemu_coroutine_self(),
177 c == s->l2_table_cache, i);
179 if (c->depends) {
180 ret = qcow2_cache_flush_dependency(bs, c);
181 } else if (c->depends_on_flush) {
182 ret = bdrv_flush(bs->file->bs);
183 if (ret >= 0) {
184 c->depends_on_flush = false;
188 if (ret < 0) {
189 return ret;
192 if (c == s->refcount_block_cache) {
193 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_REFCOUNT_BLOCK,
194 c->entries[i].offset, s->cluster_size);
195 } else if (c == s->l2_table_cache) {
196 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L2,
197 c->entries[i].offset, s->cluster_size);
198 } else {
199 ret = qcow2_pre_write_overlap_check(bs, 0,
200 c->entries[i].offset, s->cluster_size);
203 if (ret < 0) {
204 return ret;
207 if (c == s->refcount_block_cache) {
208 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_UPDATE_PART);
209 } else if (c == s->l2_table_cache) {
210 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE);
213 ret = bdrv_pwrite(bs->file, c->entries[i].offset,
214 qcow2_cache_get_table_addr(bs, c, i), s->cluster_size);
215 if (ret < 0) {
216 return ret;
219 c->entries[i].dirty = false;
221 return 0;
224 int qcow2_cache_write(BlockDriverState *bs, Qcow2Cache *c)
226 BDRVQcow2State *s = bs->opaque;
227 int result = 0;
228 int ret;
229 int i;
231 trace_qcow2_cache_flush(qemu_coroutine_self(), c == s->l2_table_cache);
233 for (i = 0; i < c->size; i++) {
234 ret = qcow2_cache_entry_flush(bs, c, i);
235 if (ret < 0 && result != -ENOSPC) {
236 result = ret;
240 return result;
243 int qcow2_cache_flush(BlockDriverState *bs, Qcow2Cache *c)
245 int result = qcow2_cache_write(bs, c);
247 if (result == 0) {
248 int ret = bdrv_flush(bs->file->bs);
249 if (ret < 0) {
250 result = ret;
254 return result;
257 int qcow2_cache_set_dependency(BlockDriverState *bs, Qcow2Cache *c,
258 Qcow2Cache *dependency)
260 int ret;
262 if (dependency->depends) {
263 ret = qcow2_cache_flush_dependency(bs, dependency);
264 if (ret < 0) {
265 return ret;
269 if (c->depends && (c->depends != dependency)) {
270 ret = qcow2_cache_flush_dependency(bs, c);
271 if (ret < 0) {
272 return ret;
276 c->depends = dependency;
277 return 0;
280 void qcow2_cache_depends_on_flush(Qcow2Cache *c)
282 c->depends_on_flush = true;
285 int qcow2_cache_empty(BlockDriverState *bs, Qcow2Cache *c)
287 int ret, i;
289 ret = qcow2_cache_flush(bs, c);
290 if (ret < 0) {
291 return ret;
294 for (i = 0; i < c->size; i++) {
295 assert(c->entries[i].ref == 0);
296 c->entries[i].offset = 0;
297 c->entries[i].lru_counter = 0;
300 qcow2_cache_table_release(bs, c, 0, c->size);
302 c->lru_counter = 0;
304 return 0;
307 static int qcow2_cache_do_get(BlockDriverState *bs, Qcow2Cache *c,
308 uint64_t offset, void **table, bool read_from_disk)
310 BDRVQcow2State *s = bs->opaque;
311 int i;
312 int ret;
313 int lookup_index;
314 uint64_t min_lru_counter = UINT64_MAX;
315 int min_lru_index = -1;
317 trace_qcow2_cache_get(qemu_coroutine_self(), c == s->l2_table_cache,
318 offset, read_from_disk);
320 /* Check if the table is already cached */
321 i = lookup_index = (offset / s->cluster_size * 4) % c->size;
322 do {
323 const Qcow2CachedTable *t = &c->entries[i];
324 if (t->offset == offset) {
325 goto found;
327 if (t->ref == 0 && t->lru_counter < min_lru_counter) {
328 min_lru_counter = t->lru_counter;
329 min_lru_index = i;
331 if (++i == c->size) {
332 i = 0;
334 } while (i != lookup_index);
336 if (min_lru_index == -1) {
337 /* This can't happen in current synchronous code, but leave the check
338 * here as a reminder for whoever starts using AIO with the cache */
339 abort();
342 /* Cache miss: write a table back and replace it */
343 i = min_lru_index;
344 trace_qcow2_cache_get_replace_entry(qemu_coroutine_self(),
345 c == s->l2_table_cache, i);
347 ret = qcow2_cache_entry_flush(bs, c, i);
348 if (ret < 0) {
349 return ret;
352 trace_qcow2_cache_get_read(qemu_coroutine_self(),
353 c == s->l2_table_cache, i);
354 c->entries[i].offset = 0;
355 if (read_from_disk) {
356 if (c == s->l2_table_cache) {
357 BLKDBG_EVENT(bs->file, BLKDBG_L2_LOAD);
360 ret = bdrv_pread(bs->file, offset,
361 qcow2_cache_get_table_addr(bs, c, i),
362 s->cluster_size);
363 if (ret < 0) {
364 return ret;
368 c->entries[i].offset = offset;
370 /* And return the right table */
371 found:
372 c->entries[i].ref++;
373 *table = qcow2_cache_get_table_addr(bs, c, i);
375 trace_qcow2_cache_get_done(qemu_coroutine_self(),
376 c == s->l2_table_cache, i);
378 return 0;
381 int qcow2_cache_get(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
382 void **table)
384 return qcow2_cache_do_get(bs, c, offset, table, true);
387 int qcow2_cache_get_empty(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
388 void **table)
390 return qcow2_cache_do_get(bs, c, offset, table, false);
393 void qcow2_cache_put(BlockDriverState *bs, Qcow2Cache *c, void **table)
395 int i = qcow2_cache_get_table_idx(bs, c, *table);
397 c->entries[i].ref--;
398 *table = NULL;
400 if (c->entries[i].ref == 0) {
401 c->entries[i].lru_counter = ++c->lru_counter;
404 assert(c->entries[i].ref >= 0);
407 void qcow2_cache_entry_mark_dirty(BlockDriverState *bs, Qcow2Cache *c,
408 void *table)
410 int i = qcow2_cache_get_table_idx(bs, c, table);
411 assert(c->entries[i].offset != 0);
412 c->entries[i].dirty = true;