target/s390x: Use atomic operations for COMPARE SWAP PURGE
[qemu/ar7.git] / block / qed-cluster.c
blob8f5da74c4d6e3069dc4ed610e756457b1ec8ecb1
1 /*
2 * QEMU Enhanced Disk Format Cluster functions
4 * Copyright IBM, Corp. 2010
6 * Authors:
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "qed.h"
18 /**
19 * Count the number of contiguous data clusters
21 * @s: QED state
22 * @table: L2 table
23 * @index: First cluster index
24 * @n: Maximum number of clusters
25 * @offset: Set to first cluster offset
27 * This function scans tables for contiguous clusters. A contiguous run of
28 * clusters may be allocated, unallocated, or zero.
30 static unsigned int qed_count_contiguous_clusters(BDRVQEDState *s,
31 QEDTable *table,
32 unsigned int index,
33 unsigned int n,
34 uint64_t *offset)
36 unsigned int end = MIN(index + n, s->table_nelems);
37 uint64_t last = table->offsets[index];
38 unsigned int i;
40 *offset = last;
42 for (i = index + 1; i < end; i++) {
43 if (qed_offset_is_unalloc_cluster(last)) {
44 /* Counting unallocated clusters */
45 if (!qed_offset_is_unalloc_cluster(table->offsets[i])) {
46 break;
48 } else if (qed_offset_is_zero_cluster(last)) {
49 /* Counting zero clusters */
50 if (!qed_offset_is_zero_cluster(table->offsets[i])) {
51 break;
53 } else {
54 /* Counting allocated clusters */
55 if (table->offsets[i] != last + s->header.cluster_size) {
56 break;
58 last = table->offsets[i];
61 return i - index;
64 typedef struct {
65 BDRVQEDState *s;
66 uint64_t pos;
67 size_t len;
69 QEDRequest *request;
71 /* User callback */
72 QEDFindClusterFunc *cb;
73 void *opaque;
74 } QEDFindClusterCB;
76 static void qed_find_cluster_cb(void *opaque, int ret)
78 QEDFindClusterCB *find_cluster_cb = opaque;
79 BDRVQEDState *s = find_cluster_cb->s;
80 QEDRequest *request = find_cluster_cb->request;
81 uint64_t offset = 0;
82 size_t len = 0;
83 unsigned int index;
84 unsigned int n;
86 qed_acquire(s);
87 if (ret) {
88 goto out;
91 index = qed_l2_index(s, find_cluster_cb->pos);
92 n = qed_bytes_to_clusters(s,
93 qed_offset_into_cluster(s, find_cluster_cb->pos) +
94 find_cluster_cb->len);
95 n = qed_count_contiguous_clusters(s, request->l2_table->table,
96 index, n, &offset);
98 if (qed_offset_is_unalloc_cluster(offset)) {
99 ret = QED_CLUSTER_L2;
100 } else if (qed_offset_is_zero_cluster(offset)) {
101 ret = QED_CLUSTER_ZERO;
102 } else if (qed_check_cluster_offset(s, offset)) {
103 ret = QED_CLUSTER_FOUND;
104 } else {
105 ret = -EINVAL;
108 len = MIN(find_cluster_cb->len, n * s->header.cluster_size -
109 qed_offset_into_cluster(s, find_cluster_cb->pos));
111 out:
112 find_cluster_cb->cb(find_cluster_cb->opaque, ret, offset, len);
113 qed_release(s);
114 g_free(find_cluster_cb);
118 * Find the offset of a data cluster
120 * @s: QED state
121 * @request: L2 cache entry
122 * @pos: Byte position in device
123 * @len: Number of bytes
124 * @cb: Completion function
125 * @opaque: User data for completion function
127 * This function translates a position in the block device to an offset in the
128 * image file. It invokes the cb completion callback to report back the
129 * translated offset or unallocated range in the image file.
131 * If the L2 table exists, request->l2_table points to the L2 table cache entry
132 * and the caller must free the reference when they are finished. The cache
133 * entry is exposed in this way to avoid callers having to read the L2 table
134 * again later during request processing. If request->l2_table is non-NULL it
135 * will be unreferenced before taking on the new cache entry.
137 void qed_find_cluster(BDRVQEDState *s, QEDRequest *request, uint64_t pos,
138 size_t len, QEDFindClusterFunc *cb, void *opaque)
140 QEDFindClusterCB *find_cluster_cb;
141 uint64_t l2_offset;
143 /* Limit length to L2 boundary. Requests are broken up at the L2 boundary
144 * so that a request acts on one L2 table at a time.
146 len = MIN(len, (((pos >> s->l1_shift) + 1) << s->l1_shift) - pos);
148 l2_offset = s->l1_table->offsets[qed_l1_index(s, pos)];
149 if (qed_offset_is_unalloc_cluster(l2_offset)) {
150 cb(opaque, QED_CLUSTER_L1, 0, len);
151 return;
153 if (!qed_check_table_offset(s, l2_offset)) {
154 cb(opaque, -EINVAL, 0, 0);
155 return;
158 find_cluster_cb = g_malloc(sizeof(*find_cluster_cb));
159 find_cluster_cb->s = s;
160 find_cluster_cb->pos = pos;
161 find_cluster_cb->len = len;
162 find_cluster_cb->cb = cb;
163 find_cluster_cb->opaque = opaque;
164 find_cluster_cb->request = request;
166 qed_read_l2_table(s, request, l2_offset,
167 qed_find_cluster_cb, find_cluster_cb);