2 * QEMU Enhanced Disk Format Cluster functions
4 * Copyright IBM, Corp. 2010
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
15 #include "qemu/osdep.h"
19 * Count the number of contiguous data clusters
23 * @index: First cluster index
24 * @n: Maximum number of clusters
25 * @offset: Set to first cluster offset
27 * This function scans tables for contiguous clusters. A contiguous run of
28 * clusters may be allocated, unallocated, or zero.
30 static unsigned int qed_count_contiguous_clusters(BDRVQEDState
*s
,
36 unsigned int end
= MIN(index
+ n
, s
->table_nelems
);
37 uint64_t last
= table
->offsets
[index
];
42 for (i
= index
+ 1; i
< end
; i
++) {
43 if (qed_offset_is_unalloc_cluster(last
)) {
44 /* Counting unallocated clusters */
45 if (!qed_offset_is_unalloc_cluster(table
->offsets
[i
])) {
48 } else if (qed_offset_is_zero_cluster(last
)) {
49 /* Counting zero clusters */
50 if (!qed_offset_is_zero_cluster(table
->offsets
[i
])) {
54 /* Counting allocated clusters */
55 if (table
->offsets
[i
] != last
+ s
->header
.cluster_size
) {
58 last
= table
->offsets
[i
];
65 * Find the offset of a data cluster
68 * @request: L2 cache entry
69 * @pos: Byte position in device
70 * @len: Number of bytes (may be shortened on return)
71 * @img_offset: Contains offset in the image file on success
73 * This function translates a position in the block device to an offset in the
74 * image file. The translated offset or unallocated range in the image file is
75 * reported back in *img_offset and *len.
77 * If the L2 table exists, request->l2_table points to the L2 table cache entry
78 * and the caller must free the reference when they are finished. The cache
79 * entry is exposed in this way to avoid callers having to read the L2 table
80 * again later during request processing. If request->l2_table is non-NULL it
81 * will be unreferenced before taking on the new cache entry.
83 * On success QED_CLUSTER_FOUND is returned and img_offset/len are a contiguous
84 * range in the image file.
86 * On failure QED_CLUSTER_L2 or QED_CLUSTER_L1 is returned for missing L2 or L1
87 * table offset, respectively. len is number of contiguous unallocated bytes.
89 * Called with table_lock held.
91 int coroutine_fn
qed_find_cluster(BDRVQEDState
*s
, QEDRequest
*request
,
92 uint64_t pos
, size_t *len
,
101 /* Limit length to L2 boundary. Requests are broken up at the L2 boundary
102 * so that a request acts on one L2 table at a time.
104 *len
= MIN(*len
, (((pos
>> s
->l1_shift
) + 1) << s
->l1_shift
) - pos
);
106 l2_offset
= s
->l1_table
->offsets
[qed_l1_index(s
, pos
)];
107 if (qed_offset_is_unalloc_cluster(l2_offset
)) {
109 return QED_CLUSTER_L1
;
111 if (!qed_check_table_offset(s
, l2_offset
)) {
112 *img_offset
= *len
= 0;
116 ret
= qed_read_l2_table(s
, request
, l2_offset
);
121 index
= qed_l2_index(s
, pos
);
122 n
= qed_bytes_to_clusters(s
, qed_offset_into_cluster(s
, pos
) + *len
);
123 n
= qed_count_contiguous_clusters(s
, request
->l2_table
->table
,
126 if (qed_offset_is_unalloc_cluster(offset
)) {
127 ret
= QED_CLUSTER_L2
;
128 } else if (qed_offset_is_zero_cluster(offset
)) {
129 ret
= QED_CLUSTER_ZERO
;
130 } else if (qed_check_cluster_offset(s
, offset
)) {
131 ret
= QED_CLUSTER_FOUND
;
137 n
* s
->header
.cluster_size
- qed_offset_into_cluster(s
, pos
));
140 *img_offset
= offset
;