2 * L2/refcount table cache for the QCOW2 format
4 * Copyright (c) 2010 Kevin Wolf <kwolf@redhat.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "block/block_int.h"
26 #include "qemu-common.h"
30 typedef struct Qcow2CachedTable
{
39 Qcow2CachedTable
* entries
;
40 struct Qcow2Cache
* depends
;
42 bool depends_on_flush
;
45 Qcow2Cache
*qcow2_cache_create(BlockDriverState
*bs
, int num_tables
)
47 BDRVQcowState
*s
= bs
->opaque
;
51 c
= g_malloc0(sizeof(*c
));
53 c
->entries
= g_malloc0(sizeof(*c
->entries
) * num_tables
);
55 for (i
= 0; i
< c
->size
; i
++) {
56 c
->entries
[i
].table
= qemu_try_blockalign(bs
->file
, s
->cluster_size
);
57 if (c
->entries
[i
].table
== NULL
) {
65 for (i
= 0; i
< c
->size
; i
++) {
66 qemu_vfree(c
->entries
[i
].table
);
73 int qcow2_cache_destroy(BlockDriverState
* bs
, Qcow2Cache
*c
)
77 for (i
= 0; i
< c
->size
; i
++) {
78 assert(c
->entries
[i
].ref
== 0);
79 qemu_vfree(c
->entries
[i
].table
);
88 static int qcow2_cache_flush_dependency(BlockDriverState
*bs
, Qcow2Cache
*c
)
92 ret
= qcow2_cache_flush(bs
, c
->depends
);
98 c
->depends_on_flush
= false;
103 static int qcow2_cache_entry_flush(BlockDriverState
*bs
, Qcow2Cache
*c
, int i
)
105 BDRVQcowState
*s
= bs
->opaque
;
108 if (!c
->entries
[i
].dirty
|| !c
->entries
[i
].offset
) {
112 trace_qcow2_cache_entry_flush(qemu_coroutine_self(),
113 c
== s
->l2_table_cache
, i
);
116 ret
= qcow2_cache_flush_dependency(bs
, c
);
117 } else if (c
->depends_on_flush
) {
118 ret
= bdrv_flush(bs
->file
);
120 c
->depends_on_flush
= false;
128 if (c
== s
->refcount_block_cache
) {
129 ret
= qcow2_pre_write_overlap_check(bs
, QCOW2_OL_REFCOUNT_BLOCK
,
130 c
->entries
[i
].offset
, s
->cluster_size
);
131 } else if (c
== s
->l2_table_cache
) {
132 ret
= qcow2_pre_write_overlap_check(bs
, QCOW2_OL_ACTIVE_L2
,
133 c
->entries
[i
].offset
, s
->cluster_size
);
135 ret
= qcow2_pre_write_overlap_check(bs
, 0,
136 c
->entries
[i
].offset
, s
->cluster_size
);
143 if (c
== s
->refcount_block_cache
) {
144 BLKDBG_EVENT(bs
->file
, BLKDBG_REFBLOCK_UPDATE_PART
);
145 } else if (c
== s
->l2_table_cache
) {
146 BLKDBG_EVENT(bs
->file
, BLKDBG_L2_UPDATE
);
149 ret
= bdrv_pwrite(bs
->file
, c
->entries
[i
].offset
, c
->entries
[i
].table
,
155 c
->entries
[i
].dirty
= false;
160 int qcow2_cache_flush(BlockDriverState
*bs
, Qcow2Cache
*c
)
162 BDRVQcowState
*s
= bs
->opaque
;
167 trace_qcow2_cache_flush(qemu_coroutine_self(), c
== s
->l2_table_cache
);
169 for (i
= 0; i
< c
->size
; i
++) {
170 ret
= qcow2_cache_entry_flush(bs
, c
, i
);
171 if (ret
< 0 && result
!= -ENOSPC
) {
177 ret
= bdrv_flush(bs
->file
);
186 int qcow2_cache_set_dependency(BlockDriverState
*bs
, Qcow2Cache
*c
,
187 Qcow2Cache
*dependency
)
191 if (dependency
->depends
) {
192 ret
= qcow2_cache_flush_dependency(bs
, dependency
);
198 if (c
->depends
&& (c
->depends
!= dependency
)) {
199 ret
= qcow2_cache_flush_dependency(bs
, c
);
205 c
->depends
= dependency
;
209 void qcow2_cache_depends_on_flush(Qcow2Cache
*c
)
211 c
->depends_on_flush
= true;
214 int qcow2_cache_empty(BlockDriverState
*bs
, Qcow2Cache
*c
)
218 ret
= qcow2_cache_flush(bs
, c
);
223 for (i
= 0; i
< c
->size
; i
++) {
224 assert(c
->entries
[i
].ref
== 0);
225 c
->entries
[i
].offset
= 0;
226 c
->entries
[i
].cache_hits
= 0;
232 static int qcow2_cache_find_entry_to_replace(Qcow2Cache
*c
)
235 int min_count
= INT_MAX
;
239 for (i
= 0; i
< c
->size
; i
++) {
240 if (c
->entries
[i
].ref
) {
244 if (c
->entries
[i
].cache_hits
< min_count
) {
246 min_count
= c
->entries
[i
].cache_hits
;
249 /* Give newer hits priority */
250 /* TODO Check how to optimize the replacement strategy */
251 c
->entries
[i
].cache_hits
/= 2;
254 if (min_index
== -1) {
255 /* This can't happen in current synchronous code, but leave the check
256 * here as a reminder for whoever starts using AIO with the cache */
262 static int qcow2_cache_do_get(BlockDriverState
*bs
, Qcow2Cache
*c
,
263 uint64_t offset
, void **table
, bool read_from_disk
)
265 BDRVQcowState
*s
= bs
->opaque
;
269 trace_qcow2_cache_get(qemu_coroutine_self(), c
== s
->l2_table_cache
,
270 offset
, read_from_disk
);
272 /* Check if the table is already cached */
273 for (i
= 0; i
< c
->size
; i
++) {
274 if (c
->entries
[i
].offset
== offset
) {
279 /* If not, write a table back and replace it */
280 i
= qcow2_cache_find_entry_to_replace(c
);
281 trace_qcow2_cache_get_replace_entry(qemu_coroutine_self(),
282 c
== s
->l2_table_cache
, i
);
287 ret
= qcow2_cache_entry_flush(bs
, c
, i
);
292 trace_qcow2_cache_get_read(qemu_coroutine_self(),
293 c
== s
->l2_table_cache
, i
);
294 c
->entries
[i
].offset
= 0;
295 if (read_from_disk
) {
296 if (c
== s
->l2_table_cache
) {
297 BLKDBG_EVENT(bs
->file
, BLKDBG_L2_LOAD
);
300 ret
= bdrv_pread(bs
->file
, offset
, c
->entries
[i
].table
, s
->cluster_size
);
306 /* Give the table some hits for the start so that it won't be replaced
307 * immediately. The number 32 is completely arbitrary. */
308 c
->entries
[i
].cache_hits
= 32;
309 c
->entries
[i
].offset
= offset
;
311 /* And return the right table */
313 c
->entries
[i
].cache_hits
++;
315 *table
= c
->entries
[i
].table
;
317 trace_qcow2_cache_get_done(qemu_coroutine_self(),
318 c
== s
->l2_table_cache
, i
);
323 int qcow2_cache_get(BlockDriverState
*bs
, Qcow2Cache
*c
, uint64_t offset
,
326 return qcow2_cache_do_get(bs
, c
, offset
, table
, true);
329 int qcow2_cache_get_empty(BlockDriverState
*bs
, Qcow2Cache
*c
, uint64_t offset
,
332 return qcow2_cache_do_get(bs
, c
, offset
, table
, false);
335 int qcow2_cache_put(BlockDriverState
*bs
, Qcow2Cache
*c
, void **table
)
339 for (i
= 0; i
< c
->size
; i
++) {
340 if (c
->entries
[i
].table
== *table
) {
350 assert(c
->entries
[i
].ref
>= 0);
354 void qcow2_cache_entry_mark_dirty(Qcow2Cache
*c
, void *table
)
358 for (i
= 0; i
< c
->size
; i
++) {
359 if (c
->entries
[i
].table
== table
) {
366 c
->entries
[i
].dirty
= true;