apic: move target-dependent definitions to cpu.h
[qemu/rayw.git] / block / qed-cluster.c
blobc24e75616a00cfcf98020b6bb88f4346daf48b78
1 /*
2 * QEMU Enhanced Disk Format Cluster functions
4 * Copyright IBM, Corp. 2010
6 * Authors:
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "qed.h"
18 /**
19 * Count the number of contiguous data clusters
21 * @s: QED state
22 * @table: L2 table
23 * @index: First cluster index
24 * @n: Maximum number of clusters
25 * @offset: Set to first cluster offset
27 * This function scans tables for contiguous clusters. A contiguous run of
28 * clusters may be allocated, unallocated, or zero.
30 static unsigned int qed_count_contiguous_clusters(BDRVQEDState *s,
31 QEDTable *table,
32 unsigned int index,
33 unsigned int n,
34 uint64_t *offset)
36 unsigned int end = MIN(index + n, s->table_nelems);
37 uint64_t last = table->offsets[index];
38 unsigned int i;
40 *offset = last;
42 for (i = index + 1; i < end; i++) {
43 if (qed_offset_is_unalloc_cluster(last)) {
44 /* Counting unallocated clusters */
45 if (!qed_offset_is_unalloc_cluster(table->offsets[i])) {
46 break;
48 } else if (qed_offset_is_zero_cluster(last)) {
49 /* Counting zero clusters */
50 if (!qed_offset_is_zero_cluster(table->offsets[i])) {
51 break;
53 } else {
54 /* Counting allocated clusters */
55 if (table->offsets[i] != last + s->header.cluster_size) {
56 break;
58 last = table->offsets[i];
61 return i - index;
64 typedef struct {
65 BDRVQEDState *s;
66 uint64_t pos;
67 size_t len;
69 QEDRequest *request;
71 /* User callback */
72 QEDFindClusterFunc *cb;
73 void *opaque;
74 } QEDFindClusterCB;
76 static void qed_find_cluster_cb(void *opaque, int ret)
78 QEDFindClusterCB *find_cluster_cb = opaque;
79 BDRVQEDState *s = find_cluster_cb->s;
80 QEDRequest *request = find_cluster_cb->request;
81 uint64_t offset = 0;
82 size_t len = 0;
83 unsigned int index;
84 unsigned int n;
86 if (ret) {
87 goto out;
90 index = qed_l2_index(s, find_cluster_cb->pos);
91 n = qed_bytes_to_clusters(s,
92 qed_offset_into_cluster(s, find_cluster_cb->pos) +
93 find_cluster_cb->len);
94 n = qed_count_contiguous_clusters(s, request->l2_table->table,
95 index, n, &offset);
97 if (qed_offset_is_unalloc_cluster(offset)) {
98 ret = QED_CLUSTER_L2;
99 } else if (qed_offset_is_zero_cluster(offset)) {
100 ret = QED_CLUSTER_ZERO;
101 } else if (qed_check_cluster_offset(s, offset)) {
102 ret = QED_CLUSTER_FOUND;
103 } else {
104 ret = -EINVAL;
107 len = MIN(find_cluster_cb->len, n * s->header.cluster_size -
108 qed_offset_into_cluster(s, find_cluster_cb->pos));
110 out:
111 find_cluster_cb->cb(find_cluster_cb->opaque, ret, offset, len);
112 g_free(find_cluster_cb);
116 * Find the offset of a data cluster
118 * @s: QED state
119 * @request: L2 cache entry
120 * @pos: Byte position in device
121 * @len: Number of bytes
122 * @cb: Completion function
123 * @opaque: User data for completion function
125 * This function translates a position in the block device to an offset in the
126 * image file. It invokes the cb completion callback to report back the
127 * translated offset or unallocated range in the image file.
129 * If the L2 table exists, request->l2_table points to the L2 table cache entry
130 * and the caller must free the reference when they are finished. The cache
131 * entry is exposed in this way to avoid callers having to read the L2 table
132 * again later during request processing. If request->l2_table is non-NULL it
133 * will be unreferenced before taking on the new cache entry.
135 void qed_find_cluster(BDRVQEDState *s, QEDRequest *request, uint64_t pos,
136 size_t len, QEDFindClusterFunc *cb, void *opaque)
138 QEDFindClusterCB *find_cluster_cb;
139 uint64_t l2_offset;
141 /* Limit length to L2 boundary. Requests are broken up at the L2 boundary
142 * so that a request acts on one L2 table at a time.
144 len = MIN(len, (((pos >> s->l1_shift) + 1) << s->l1_shift) - pos);
146 l2_offset = s->l1_table->offsets[qed_l1_index(s, pos)];
147 if (qed_offset_is_unalloc_cluster(l2_offset)) {
148 cb(opaque, QED_CLUSTER_L1, 0, len);
149 return;
151 if (!qed_check_table_offset(s, l2_offset)) {
152 cb(opaque, -EINVAL, 0, 0);
153 return;
156 find_cluster_cb = g_malloc(sizeof(*find_cluster_cb));
157 find_cluster_cb->s = s;
158 find_cluster_cb->pos = pos;
159 find_cluster_cb->len = len;
160 find_cluster_cb->cb = cb;
161 find_cluster_cb->opaque = opaque;
162 find_cluster_cb->request = request;
164 qed_read_l2_table(s, request, l2_offset,
165 qed_find_cluster_cb, find_cluster_cb);