2 * QEMU Enhanced Disk Format Consistency Check
4 * Copyright IBM, Corp. 2010
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
18 BdrvCheckResult
*result
;
19 bool fix
; /* whether to fix invalid offsets */
22 uint32_t *used_clusters
; /* referenced cluster bitmap */
27 static bool qed_test_bit(uint32_t *bitmap
, uint64_t n
) {
28 return !!(bitmap
[n
/ 32] & (1 << (n
% 32)));
31 static void qed_set_bit(uint32_t *bitmap
, uint64_t n
) {
32 bitmap
[n
/ 32] |= 1 << (n
% 32);
36 * Set bitmap bits for clusters
38 * @check: Check structure
39 * @offset: Starting offset in bytes
40 * @n: Number of clusters
42 static bool qed_set_used_clusters(QEDCheck
*check
, uint64_t offset
,
45 uint64_t cluster
= qed_bytes_to_clusters(check
->s
, offset
);
46 unsigned int corruptions
= 0;
49 /* Clusters should only be referenced once */
50 if (qed_test_bit(check
->used_clusters
, cluster
)) {
54 qed_set_bit(check
->used_clusters
, cluster
);
58 check
->result
->corruptions
+= corruptions
;
59 return corruptions
== 0;
65 * @ret: Number of invalid cluster offsets
67 static unsigned int qed_check_l2_table(QEDCheck
*check
, QEDTable
*table
)
69 BDRVQEDState
*s
= check
->s
;
70 unsigned int i
, num_invalid
= 0;
71 uint64_t last_offset
= 0;
73 for (i
= 0; i
< s
->table_nelems
; i
++) {
74 uint64_t offset
= table
->offsets
[i
];
76 if (qed_offset_is_unalloc_cluster(offset
) ||
77 qed_offset_is_zero_cluster(offset
)) {
80 check
->result
->bfi
.allocated_clusters
++;
81 if (last_offset
&& (last_offset
+ s
->header
.cluster_size
!= offset
)) {
82 check
->result
->bfi
.fragmented_clusters
++;
86 /* Detect invalid cluster offset */
87 if (!qed_check_cluster_offset(s
, offset
)) {
89 table
->offsets
[i
] = 0;
91 check
->result
->corruptions
++;
98 qed_set_used_clusters(check
, offset
, 1);
105 * Descend tables and check each cluster is referenced once only
107 static int qed_check_l1_table(QEDCheck
*check
, QEDTable
*table
)
109 BDRVQEDState
*s
= check
->s
;
110 unsigned int i
, num_invalid_l1
= 0;
111 int ret
, last_error
= 0;
113 /* Mark L1 table clusters used */
114 qed_set_used_clusters(check
, s
->header
.l1_table_offset
,
115 s
->header
.table_size
);
117 for (i
= 0; i
< s
->table_nelems
; i
++) {
118 unsigned int num_invalid_l2
;
119 uint64_t offset
= table
->offsets
[i
];
121 if (qed_offset_is_unalloc_cluster(offset
)) {
125 /* Detect invalid L2 offset */
126 if (!qed_check_table_offset(s
, offset
)) {
127 /* Clear invalid offset */
129 table
->offsets
[i
] = 0;
131 check
->result
->corruptions
++;
138 if (!qed_set_used_clusters(check
, offset
, s
->header
.table_size
)) {
139 continue; /* skip an invalid table */
142 ret
= qed_read_l2_table_sync(s
, &check
->request
, offset
);
144 check
->result
->check_errors
++;
149 num_invalid_l2
= qed_check_l2_table(check
,
150 check
->request
.l2_table
->table
);
152 /* Write out fixed L2 table */
153 if (num_invalid_l2
> 0 && check
->fix
) {
154 ret
= qed_write_l2_table_sync(s
, &check
->request
, 0,
155 s
->table_nelems
, false);
157 check
->result
->check_errors
++;
164 /* Drop reference to final table */
165 qed_unref_l2_cache_entry(check
->request
.l2_table
);
166 check
->request
.l2_table
= NULL
;
168 /* Write out fixed L1 table */
169 if (num_invalid_l1
> 0 && check
->fix
) {
170 ret
= qed_write_l1_table_sync(s
, 0, s
->table_nelems
);
172 check
->result
->check_errors
++;
181 * Check for unreferenced (leaked) clusters
183 static void qed_check_for_leaks(QEDCheck
*check
)
185 BDRVQEDState
*s
= check
->s
;
188 for (i
= s
->header
.header_size
; i
< check
->nclusters
; i
++) {
189 if (!qed_test_bit(check
->used_clusters
, i
)) {
190 check
->result
->leaks
++;
195 int qed_check(BDRVQEDState
*s
, BdrvCheckResult
*result
, bool fix
)
200 .nclusters
= qed_bytes_to_clusters(s
, s
->file_size
),
201 .request
= { .l2_table
= NULL
},
206 check
.used_clusters
= g_malloc0(((check
.nclusters
+ 31) / 32) *
207 sizeof(check
.used_clusters
[0]));
209 check
.result
->bfi
.total_clusters
=
210 (s
->header
.image_size
+ s
->header
.cluster_size
- 1) /
211 s
->header
.cluster_size
;
212 ret
= qed_check_l1_table(&check
, s
->l1_table
);
214 /* Only check for leaks if entire image was scanned successfully */
215 qed_check_for_leaks(&check
);
218 g_free(check
.used_clusters
);