2 * L2/refcount table cache for the QCOW2 format
4 * Copyright (c) 2010 Kevin Wolf <kwolf@redhat.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "block/block_int.h"
26 #include "qemu-common.h"
30 typedef struct Qcow2CachedTable
{
39 Qcow2CachedTable
* entries
;
40 struct Qcow2Cache
* depends
;
42 bool depends_on_flush
;
45 Qcow2Cache
*qcow2_cache_create(BlockDriverState
*bs
, int num_tables
)
47 BDRVQcowState
*s
= bs
->opaque
;
51 c
= g_malloc0(sizeof(*c
));
53 c
->entries
= g_malloc0(sizeof(*c
->entries
) * num_tables
);
55 for (i
= 0; i
< c
->size
; i
++) {
56 c
->entries
[i
].table
= qemu_blockalign(bs
, s
->cluster_size
);
62 int qcow2_cache_destroy(BlockDriverState
* bs
, Qcow2Cache
*c
)
66 for (i
= 0; i
< c
->size
; i
++) {
67 assert(c
->entries
[i
].ref
== 0);
68 qemu_vfree(c
->entries
[i
].table
);
77 static int qcow2_cache_flush_dependency(BlockDriverState
*bs
, Qcow2Cache
*c
)
81 ret
= qcow2_cache_flush(bs
, c
->depends
);
87 c
->depends_on_flush
= false;
92 static int qcow2_cache_entry_flush(BlockDriverState
*bs
, Qcow2Cache
*c
, int i
)
94 BDRVQcowState
*s
= bs
->opaque
;
97 if (!c
->entries
[i
].dirty
|| !c
->entries
[i
].offset
) {
101 trace_qcow2_cache_entry_flush(qemu_coroutine_self(),
102 c
== s
->l2_table_cache
, i
);
105 ret
= qcow2_cache_flush_dependency(bs
, c
);
106 } else if (c
->depends_on_flush
) {
107 ret
= bdrv_flush(bs
->file
);
109 c
->depends_on_flush
= false;
117 if (c
== s
->refcount_block_cache
) {
118 ret
= qcow2_pre_write_overlap_check(bs
,
119 QCOW2_OL_DEFAULT
& ~QCOW2_OL_REFCOUNT_BLOCK
,
120 c
->entries
[i
].offset
, s
->cluster_size
);
121 } else if (c
== s
->l2_table_cache
) {
122 ret
= qcow2_pre_write_overlap_check(bs
,
123 QCOW2_OL_DEFAULT
& ~QCOW2_OL_ACTIVE_L2
,
124 c
->entries
[i
].offset
, s
->cluster_size
);
126 ret
= qcow2_pre_write_overlap_check(bs
, QCOW2_OL_DEFAULT
,
127 c
->entries
[i
].offset
, s
->cluster_size
);
134 if (c
== s
->refcount_block_cache
) {
135 BLKDBG_EVENT(bs
->file
, BLKDBG_REFBLOCK_UPDATE_PART
);
136 } else if (c
== s
->l2_table_cache
) {
137 BLKDBG_EVENT(bs
->file
, BLKDBG_L2_UPDATE
);
140 ret
= bdrv_pwrite(bs
->file
, c
->entries
[i
].offset
, c
->entries
[i
].table
,
146 c
->entries
[i
].dirty
= false;
151 int qcow2_cache_flush(BlockDriverState
*bs
, Qcow2Cache
*c
)
153 BDRVQcowState
*s
= bs
->opaque
;
158 trace_qcow2_cache_flush(qemu_coroutine_self(), c
== s
->l2_table_cache
);
160 for (i
= 0; i
< c
->size
; i
++) {
161 ret
= qcow2_cache_entry_flush(bs
, c
, i
);
162 if (ret
< 0 && result
!= -ENOSPC
) {
168 ret
= bdrv_flush(bs
->file
);
177 int qcow2_cache_set_dependency(BlockDriverState
*bs
, Qcow2Cache
*c
,
178 Qcow2Cache
*dependency
)
182 if (dependency
->depends
) {
183 ret
= qcow2_cache_flush_dependency(bs
, dependency
);
189 if (c
->depends
&& (c
->depends
!= dependency
)) {
190 ret
= qcow2_cache_flush_dependency(bs
, c
);
196 c
->depends
= dependency
;
200 void qcow2_cache_depends_on_flush(Qcow2Cache
*c
)
202 c
->depends_on_flush
= true;
205 int qcow2_cache_empty(BlockDriverState
*bs
, Qcow2Cache
*c
)
209 ret
= qcow2_cache_flush(bs
, c
);
214 for (i
= 0; i
< c
->size
; i
++) {
215 assert(c
->entries
[i
].ref
== 0);
216 c
->entries
[i
].offset
= 0;
217 c
->entries
[i
].cache_hits
= 0;
223 static int qcow2_cache_find_entry_to_replace(Qcow2Cache
*c
)
226 int min_count
= INT_MAX
;
230 for (i
= 0; i
< c
->size
; i
++) {
231 if (c
->entries
[i
].ref
) {
235 if (c
->entries
[i
].cache_hits
< min_count
) {
237 min_count
= c
->entries
[i
].cache_hits
;
240 /* Give newer hits priority */
241 /* TODO Check how to optimize the replacement strategy */
242 c
->entries
[i
].cache_hits
/= 2;
245 if (min_index
== -1) {
246 /* This can't happen in current synchronous code, but leave the check
247 * here as a reminder for whoever starts using AIO with the cache */
253 static int qcow2_cache_do_get(BlockDriverState
*bs
, Qcow2Cache
*c
,
254 uint64_t offset
, void **table
, bool read_from_disk
)
256 BDRVQcowState
*s
= bs
->opaque
;
260 trace_qcow2_cache_get(qemu_coroutine_self(), c
== s
->l2_table_cache
,
261 offset
, read_from_disk
);
263 /* Check if the table is already cached */
264 for (i
= 0; i
< c
->size
; i
++) {
265 if (c
->entries
[i
].offset
== offset
) {
270 /* If not, write a table back and replace it */
271 i
= qcow2_cache_find_entry_to_replace(c
);
272 trace_qcow2_cache_get_replace_entry(qemu_coroutine_self(),
273 c
== s
->l2_table_cache
, i
);
278 ret
= qcow2_cache_entry_flush(bs
, c
, i
);
283 trace_qcow2_cache_get_read(qemu_coroutine_self(),
284 c
== s
->l2_table_cache
, i
);
285 c
->entries
[i
].offset
= 0;
286 if (read_from_disk
) {
287 if (c
== s
->l2_table_cache
) {
288 BLKDBG_EVENT(bs
->file
, BLKDBG_L2_LOAD
);
291 ret
= bdrv_pread(bs
->file
, offset
, c
->entries
[i
].table
, s
->cluster_size
);
297 /* Give the table some hits for the start so that it won't be replaced
298 * immediately. The number 32 is completely arbitrary. */
299 c
->entries
[i
].cache_hits
= 32;
300 c
->entries
[i
].offset
= offset
;
302 /* And return the right table */
304 c
->entries
[i
].cache_hits
++;
306 *table
= c
->entries
[i
].table
;
308 trace_qcow2_cache_get_done(qemu_coroutine_self(),
309 c
== s
->l2_table_cache
, i
);
314 int qcow2_cache_get(BlockDriverState
*bs
, Qcow2Cache
*c
, uint64_t offset
,
317 return qcow2_cache_do_get(bs
, c
, offset
, table
, true);
320 int qcow2_cache_get_empty(BlockDriverState
*bs
, Qcow2Cache
*c
, uint64_t offset
,
323 return qcow2_cache_do_get(bs
, c
, offset
, table
, false);
326 int qcow2_cache_put(BlockDriverState
*bs
, Qcow2Cache
*c
, void **table
)
330 for (i
= 0; i
< c
->size
; i
++) {
331 if (c
->entries
[i
].table
== *table
) {
341 assert(c
->entries
[i
].ref
>= 0);
345 void qcow2_cache_entry_mark_dirty(Qcow2Cache
*c
, void *table
)
349 for (i
= 0; i
< c
->size
; i
++) {
350 if (c
->entries
[i
].table
== table
) {
357 c
->entries
[i
].dirty
= true;