2 * L2/refcount table cache for the QCOW2 format
4 * Copyright (c) 2010 Kevin Wolf <kwolf@redhat.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "block/block_int.h"
26 #include "qemu-common.h"
30 typedef struct Qcow2CachedTable
{
39 Qcow2CachedTable
* entries
;
40 struct Qcow2Cache
* depends
;
42 bool depends_on_flush
;
45 Qcow2Cache
*qcow2_cache_create(BlockDriverState
*bs
, int num_tables
)
47 BDRVQcowState
*s
= bs
->opaque
;
51 c
= g_malloc0(sizeof(*c
));
53 c
->entries
= g_malloc0(sizeof(*c
->entries
) * num_tables
);
55 for (i
= 0; i
< c
->size
; i
++) {
56 c
->entries
[i
].table
= qemu_blockalign(bs
, s
->cluster_size
);
62 int qcow2_cache_destroy(BlockDriverState
* bs
, Qcow2Cache
*c
)
66 for (i
= 0; i
< c
->size
; i
++) {
67 assert(c
->entries
[i
].ref
== 0);
68 qemu_vfree(c
->entries
[i
].table
);
77 static int qcow2_cache_flush_dependency(BlockDriverState
*bs
, Qcow2Cache
*c
)
81 ret
= qcow2_cache_flush(bs
, c
->depends
);
87 c
->depends_on_flush
= false;
92 static int qcow2_cache_entry_flush(BlockDriverState
*bs
, Qcow2Cache
*c
, int i
)
94 BDRVQcowState
*s
= bs
->opaque
;
97 if (!c
->entries
[i
].dirty
|| !c
->entries
[i
].offset
) {
101 trace_qcow2_cache_entry_flush(qemu_coroutine_self(),
102 c
== s
->l2_table_cache
, i
);
105 ret
= qcow2_cache_flush_dependency(bs
, c
);
106 } else if (c
->depends_on_flush
) {
107 ret
= bdrv_flush(bs
->file
);
109 c
->depends_on_flush
= false;
117 if (c
== s
->refcount_block_cache
) {
118 ret
= qcow2_pre_write_overlap_check(bs
, QCOW2_OL_REFCOUNT_BLOCK
,
119 c
->entries
[i
].offset
, s
->cluster_size
);
120 } else if (c
== s
->l2_table_cache
) {
121 ret
= qcow2_pre_write_overlap_check(bs
, QCOW2_OL_ACTIVE_L2
,
122 c
->entries
[i
].offset
, s
->cluster_size
);
124 ret
= qcow2_pre_write_overlap_check(bs
, 0,
125 c
->entries
[i
].offset
, s
->cluster_size
);
132 if (c
== s
->refcount_block_cache
) {
133 BLKDBG_EVENT(bs
->file
, BLKDBG_REFBLOCK_UPDATE_PART
);
134 } else if (c
== s
->l2_table_cache
) {
135 BLKDBG_EVENT(bs
->file
, BLKDBG_L2_UPDATE
);
138 ret
= bdrv_pwrite(bs
->file
, c
->entries
[i
].offset
, c
->entries
[i
].table
,
144 c
->entries
[i
].dirty
= false;
149 int qcow2_cache_flush(BlockDriverState
*bs
, Qcow2Cache
*c
)
151 BDRVQcowState
*s
= bs
->opaque
;
156 trace_qcow2_cache_flush(qemu_coroutine_self(), c
== s
->l2_table_cache
);
158 for (i
= 0; i
< c
->size
; i
++) {
159 ret
= qcow2_cache_entry_flush(bs
, c
, i
);
160 if (ret
< 0 && result
!= -ENOSPC
) {
166 ret
= bdrv_flush(bs
->file
);
175 int qcow2_cache_set_dependency(BlockDriverState
*bs
, Qcow2Cache
*c
,
176 Qcow2Cache
*dependency
)
180 if (dependency
->depends
) {
181 ret
= qcow2_cache_flush_dependency(bs
, dependency
);
187 if (c
->depends
&& (c
->depends
!= dependency
)) {
188 ret
= qcow2_cache_flush_dependency(bs
, c
);
194 c
->depends
= dependency
;
198 void qcow2_cache_depends_on_flush(Qcow2Cache
*c
)
200 c
->depends_on_flush
= true;
203 int qcow2_cache_empty(BlockDriverState
*bs
, Qcow2Cache
*c
)
207 ret
= qcow2_cache_flush(bs
, c
);
212 for (i
= 0; i
< c
->size
; i
++) {
213 assert(c
->entries
[i
].ref
== 0);
214 c
->entries
[i
].offset
= 0;
215 c
->entries
[i
].cache_hits
= 0;
221 static int qcow2_cache_find_entry_to_replace(Qcow2Cache
*c
)
224 int min_count
= INT_MAX
;
228 for (i
= 0; i
< c
->size
; i
++) {
229 if (c
->entries
[i
].ref
) {
233 if (c
->entries
[i
].cache_hits
< min_count
) {
235 min_count
= c
->entries
[i
].cache_hits
;
238 /* Give newer hits priority */
239 /* TODO Check how to optimize the replacement strategy */
240 c
->entries
[i
].cache_hits
/= 2;
243 if (min_index
== -1) {
244 /* This can't happen in current synchronous code, but leave the check
245 * here as a reminder for whoever starts using AIO with the cache */
251 static int qcow2_cache_do_get(BlockDriverState
*bs
, Qcow2Cache
*c
,
252 uint64_t offset
, void **table
, bool read_from_disk
)
254 BDRVQcowState
*s
= bs
->opaque
;
258 trace_qcow2_cache_get(qemu_coroutine_self(), c
== s
->l2_table_cache
,
259 offset
, read_from_disk
);
261 /* Check if the table is already cached */
262 for (i
= 0; i
< c
->size
; i
++) {
263 if (c
->entries
[i
].offset
== offset
) {
268 /* If not, write a table back and replace it */
269 i
= qcow2_cache_find_entry_to_replace(c
);
270 trace_qcow2_cache_get_replace_entry(qemu_coroutine_self(),
271 c
== s
->l2_table_cache
, i
);
276 ret
= qcow2_cache_entry_flush(bs
, c
, i
);
281 trace_qcow2_cache_get_read(qemu_coroutine_self(),
282 c
== s
->l2_table_cache
, i
);
283 c
->entries
[i
].offset
= 0;
284 if (read_from_disk
) {
285 if (c
== s
->l2_table_cache
) {
286 BLKDBG_EVENT(bs
->file
, BLKDBG_L2_LOAD
);
289 ret
= bdrv_pread(bs
->file
, offset
, c
->entries
[i
].table
, s
->cluster_size
);
295 /* Give the table some hits for the start so that it won't be replaced
296 * immediately. The number 32 is completely arbitrary. */
297 c
->entries
[i
].cache_hits
= 32;
298 c
->entries
[i
].offset
= offset
;
300 /* And return the right table */
302 c
->entries
[i
].cache_hits
++;
304 *table
= c
->entries
[i
].table
;
306 trace_qcow2_cache_get_done(qemu_coroutine_self(),
307 c
== s
->l2_table_cache
, i
);
312 int qcow2_cache_get(BlockDriverState
*bs
, Qcow2Cache
*c
, uint64_t offset
,
315 return qcow2_cache_do_get(bs
, c
, offset
, table
, true);
318 int qcow2_cache_get_empty(BlockDriverState
*bs
, Qcow2Cache
*c
, uint64_t offset
,
321 return qcow2_cache_do_get(bs
, c
, offset
, table
, false);
324 int qcow2_cache_put(BlockDriverState
*bs
, Qcow2Cache
*c
, void **table
)
328 for (i
= 0; i
< c
->size
; i
++) {
329 if (c
->entries
[i
].table
== *table
) {
339 assert(c
->entries
[i
].ref
>= 0);
343 void qcow2_cache_entry_mark_dirty(Qcow2Cache
*c
, void *table
)
347 for (i
= 0; i
< c
->size
; i
++) {
348 if (c
->entries
[i
].table
== table
) {
355 c
->entries
[i
].dirty
= true;