2 * L2/refcount table cache for the QCOW2 format
4 * Copyright (c) 2010 Kevin Wolf <kwolf@redhat.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "block_int.h"
26 #include "qemu-common.h"
29 typedef struct Qcow2CachedTable
{
38 Qcow2CachedTable
* entries
;
39 struct Qcow2Cache
* depends
;
41 bool depends_on_flush
;
45 Qcow2Cache
*qcow2_cache_create(BlockDriverState
*bs
, int num_tables
,
48 BDRVQcowState
*s
= bs
->opaque
;
52 c
= g_malloc0(sizeof(*c
));
54 c
->entries
= g_malloc0(sizeof(*c
->entries
) * num_tables
);
55 c
->writethrough
= writethrough
;
57 for (i
= 0; i
< c
->size
; i
++) {
58 c
->entries
[i
].table
= qemu_blockalign(bs
, s
->cluster_size
);
64 int qcow2_cache_destroy(BlockDriverState
* bs
, Qcow2Cache
*c
)
68 for (i
= 0; i
< c
->size
; i
++) {
69 assert(c
->entries
[i
].ref
== 0);
70 qemu_vfree(c
->entries
[i
].table
);
79 static int qcow2_cache_flush_dependency(BlockDriverState
*bs
, Qcow2Cache
*c
)
83 ret
= qcow2_cache_flush(bs
, c
->depends
);
89 c
->depends_on_flush
= false;
94 static int qcow2_cache_entry_flush(BlockDriverState
*bs
, Qcow2Cache
*c
, int i
)
96 BDRVQcowState
*s
= bs
->opaque
;
99 if (!c
->entries
[i
].dirty
|| !c
->entries
[i
].offset
) {
104 ret
= qcow2_cache_flush_dependency(bs
, c
);
105 } else if (c
->depends_on_flush
) {
106 ret
= bdrv_flush(bs
->file
);
108 c
->depends_on_flush
= false;
116 if (c
== s
->refcount_block_cache
) {
117 BLKDBG_EVENT(bs
->file
, BLKDBG_REFBLOCK_UPDATE_PART
);
118 } else if (c
== s
->l2_table_cache
) {
119 BLKDBG_EVENT(bs
->file
, BLKDBG_L2_UPDATE
);
122 ret
= bdrv_pwrite(bs
->file
, c
->entries
[i
].offset
, c
->entries
[i
].table
,
128 c
->entries
[i
].dirty
= false;
133 int qcow2_cache_flush(BlockDriverState
*bs
, Qcow2Cache
*c
)
139 for (i
= 0; i
< c
->size
; i
++) {
140 ret
= qcow2_cache_entry_flush(bs
, c
, i
);
141 if (ret
< 0 && result
!= -ENOSPC
) {
147 ret
= bdrv_flush(bs
->file
);
156 int qcow2_cache_set_dependency(BlockDriverState
*bs
, Qcow2Cache
*c
,
157 Qcow2Cache
*dependency
)
161 if (dependency
->depends
) {
162 ret
= qcow2_cache_flush_dependency(bs
, dependency
);
168 if (c
->depends
&& (c
->depends
!= dependency
)) {
169 ret
= qcow2_cache_flush_dependency(bs
, c
);
175 c
->depends
= dependency
;
179 void qcow2_cache_depends_on_flush(Qcow2Cache
*c
)
181 c
->depends_on_flush
= true;
184 static int qcow2_cache_find_entry_to_replace(Qcow2Cache
*c
)
187 int min_count
= INT_MAX
;
191 for (i
= 0; i
< c
->size
; i
++) {
192 if (c
->entries
[i
].ref
) {
196 if (c
->entries
[i
].cache_hits
< min_count
) {
198 min_count
= c
->entries
[i
].cache_hits
;
201 /* Give newer hits priority */
202 /* TODO Check how to optimize the replacement strategy */
203 c
->entries
[i
].cache_hits
/= 2;
206 if (min_index
== -1) {
207 /* This can't happen in current synchronous code, but leave the check
208 * here as a reminder for whoever starts using AIO with the cache */
214 static int qcow2_cache_do_get(BlockDriverState
*bs
, Qcow2Cache
*c
,
215 uint64_t offset
, void **table
, bool read_from_disk
)
217 BDRVQcowState
*s
= bs
->opaque
;
221 /* Check if the table is already cached */
222 for (i
= 0; i
< c
->size
; i
++) {
223 if (c
->entries
[i
].offset
== offset
) {
228 /* If not, write a table back and replace it */
229 i
= qcow2_cache_find_entry_to_replace(c
);
234 ret
= qcow2_cache_entry_flush(bs
, c
, i
);
239 c
->entries
[i
].offset
= 0;
240 if (read_from_disk
) {
241 if (c
== s
->l2_table_cache
) {
242 BLKDBG_EVENT(bs
->file
, BLKDBG_L2_LOAD
);
245 ret
= bdrv_pread(bs
->file
, offset
, c
->entries
[i
].table
, s
->cluster_size
);
251 /* Give the table some hits for the start so that it won't be replaced
252 * immediately. The number 32 is completely arbitrary. */
253 c
->entries
[i
].cache_hits
= 32;
254 c
->entries
[i
].offset
= offset
;
256 /* And return the right table */
258 c
->entries
[i
].cache_hits
++;
260 *table
= c
->entries
[i
].table
;
264 int qcow2_cache_get(BlockDriverState
*bs
, Qcow2Cache
*c
, uint64_t offset
,
267 return qcow2_cache_do_get(bs
, c
, offset
, table
, true);
270 int qcow2_cache_get_empty(BlockDriverState
*bs
, Qcow2Cache
*c
, uint64_t offset
,
273 return qcow2_cache_do_get(bs
, c
, offset
, table
, false);
276 int qcow2_cache_put(BlockDriverState
*bs
, Qcow2Cache
*c
, void **table
)
280 for (i
= 0; i
< c
->size
; i
++) {
281 if (c
->entries
[i
].table
== *table
) {
291 assert(c
->entries
[i
].ref
>= 0);
293 if (c
->writethrough
) {
294 return qcow2_cache_entry_flush(bs
, c
, i
);
300 void qcow2_cache_entry_mark_dirty(Qcow2Cache
*c
, void *table
)
304 for (i
= 0; i
< c
->size
; i
++) {
305 if (c
->entries
[i
].table
== table
) {
312 c
->entries
[i
].dirty
= true;
315 bool qcow2_cache_set_writethrough(BlockDriverState
*bs
, Qcow2Cache
*c
,
318 bool old
= c
->writethrough
;
320 if (!old
&& enable
) {
321 qcow2_cache_flush(bs
, c
);
324 c
->writethrough
= enable
;