Btrfs progs v4.17.1
[btrfs-progs-unstable/devel.git] / extent-cache.c
blobf458de26129396ace1ed7e5f085557001bbd5c4f
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include "kerncompat.h"
21 #include "extent-cache.h"
22 #include "rbtree-utils.h"
24 struct cache_extent_search_range {
25 u64 objectid;
26 u64 start;
27 u64 size;
30 static int cache_tree_comp_range(struct rb_node *node, void *data)
32 struct cache_extent *entry;
33 struct cache_extent_search_range *range;
35 range = (struct cache_extent_search_range *)data;
36 entry = rb_entry(node, struct cache_extent, rb_node);
38 if (entry->start + entry->size <= range->start)
39 return 1;
40 else if (range->start + range->size <= entry->start)
41 return -1;
42 else
43 return 0;
46 static int cache_tree_comp_nodes(struct rb_node *node1, struct rb_node *node2)
48 struct cache_extent *entry;
49 struct cache_extent_search_range range;
51 entry = rb_entry(node2, struct cache_extent, rb_node);
52 range.start = entry->start;
53 range.size = entry->size;
55 return cache_tree_comp_range(node1, (void *)&range);
58 static int cache_tree_comp_range2(struct rb_node *node, void *data)
60 struct cache_extent *entry;
61 struct cache_extent_search_range *range;
63 range = (struct cache_extent_search_range *)data;
64 entry = rb_entry(node, struct cache_extent, rb_node);
66 if (entry->objectid < range->objectid)
67 return 1;
68 else if (entry->objectid > range->objectid)
69 return -1;
70 else if (entry->start + entry->size <= range->start)
71 return 1;
72 else if (range->start + range->size <= entry->start)
73 return -1;
74 else
75 return 0;
78 static int cache_tree_comp_nodes2(struct rb_node *node1, struct rb_node *node2)
80 struct cache_extent *entry;
81 struct cache_extent_search_range range;
83 entry = rb_entry(node2, struct cache_extent, rb_node);
84 range.objectid = entry->objectid;
85 range.start = entry->start;
86 range.size = entry->size;
88 return cache_tree_comp_range2(node1, (void *)&range);
91 void cache_tree_init(struct cache_tree *tree)
93 tree->root = RB_ROOT;
96 static struct cache_extent *alloc_cache_extent(u64 start, u64 size)
98 struct cache_extent *pe = malloc(sizeof(*pe));
100 if (!pe)
101 return pe;
103 pe->objectid = 0;
104 pe->start = start;
105 pe->size = size;
106 return pe;
109 int add_cache_extent(struct cache_tree *tree, u64 start, u64 size)
111 struct cache_extent *pe = alloc_cache_extent(start, size);
112 int ret;
114 if (!pe) {
115 fprintf(stderr, "memory allocation failed\n");
116 exit(1);
119 ret = insert_cache_extent(tree, pe);
120 if (ret)
121 free(pe);
123 return ret;
126 int insert_cache_extent(struct cache_tree *tree, struct cache_extent *pe)
128 return rb_insert(&tree->root, &pe->rb_node, cache_tree_comp_nodes);
131 int insert_cache_extent2(struct cache_tree *tree, struct cache_extent *pe)
133 return rb_insert(&tree->root, &pe->rb_node, cache_tree_comp_nodes2);
136 struct cache_extent *lookup_cache_extent(struct cache_tree *tree,
137 u64 start, u64 size)
139 struct rb_node *node;
140 struct cache_extent *entry;
141 struct cache_extent_search_range range;
143 range.start = start;
144 range.size = size;
145 node = rb_search(&tree->root, &range, cache_tree_comp_range, NULL);
146 if (!node)
147 return NULL;
149 entry = rb_entry(node, struct cache_extent, rb_node);
150 return entry;
153 struct cache_extent *lookup_cache_extent2(struct cache_tree *tree,
154 u64 objectid, u64 start, u64 size)
156 struct rb_node *node;
157 struct cache_extent *entry;
158 struct cache_extent_search_range range;
160 range.objectid = objectid;
161 range.start = start;
162 range.size = size;
163 node = rb_search(&tree->root, &range, cache_tree_comp_range2, NULL);
164 if (!node)
165 return NULL;
167 entry = rb_entry(node, struct cache_extent, rb_node);
168 return entry;
171 struct cache_extent *search_cache_extent(struct cache_tree *tree, u64 start)
173 struct rb_node *next;
174 struct rb_node *node;
175 struct cache_extent *entry;
176 struct cache_extent_search_range range;
178 range.start = start;
179 range.size = 1;
180 node = rb_search(&tree->root, &range, cache_tree_comp_range, &next);
181 if (!node)
182 node = next;
183 if (!node)
184 return NULL;
186 entry = rb_entry(node, struct cache_extent, rb_node);
187 return entry;
190 struct cache_extent *search_cache_extent2(struct cache_tree *tree,
191 u64 objectid, u64 start)
193 struct rb_node *next;
194 struct rb_node *node;
195 struct cache_extent *entry;
196 struct cache_extent_search_range range;
198 range.objectid = objectid;
199 range.start = start;
200 range.size = 1;
201 node = rb_search(&tree->root, &range, cache_tree_comp_range2, &next);
202 if (!node)
203 node = next;
204 if (!node)
205 return NULL;
207 entry = rb_entry(node, struct cache_extent, rb_node);
208 return entry;
211 struct cache_extent *first_cache_extent(struct cache_tree *tree)
213 struct rb_node *node = rb_first(&tree->root);
215 if (!node)
216 return NULL;
217 return rb_entry(node, struct cache_extent, rb_node);
220 struct cache_extent *last_cache_extent(struct cache_tree *tree)
222 struct rb_node *node = rb_last(&tree->root);
224 if (!node)
225 return NULL;
226 return rb_entry(node, struct cache_extent, rb_node);
229 struct cache_extent *prev_cache_extent(struct cache_extent *pe)
231 struct rb_node *node = rb_prev(&pe->rb_node);
233 if (!node)
234 return NULL;
235 return rb_entry(node, struct cache_extent, rb_node);
238 struct cache_extent *next_cache_extent(struct cache_extent *pe)
240 struct rb_node *node = rb_next(&pe->rb_node);
242 if (!node)
243 return NULL;
244 return rb_entry(node, struct cache_extent, rb_node);
247 void remove_cache_extent(struct cache_tree *tree, struct cache_extent *pe)
249 rb_erase(&pe->rb_node, &tree->root);
252 void cache_tree_free_extents(struct cache_tree *tree,
253 free_cache_extent free_func)
255 struct cache_extent *ce;
257 while ((ce = first_cache_extent(tree))) {
258 remove_cache_extent(tree, ce);
259 free_func(ce);
263 static void free_extent_cache(struct cache_extent *pe)
265 free(pe);
268 void free_extent_cache_tree(struct cache_tree *tree)
270 cache_tree_free_extents(tree, free_extent_cache);
273 int add_merge_cache_extent(struct cache_tree *tree, u64 start, u64 size)
275 struct cache_extent *cache;
276 struct cache_extent *next = NULL;
277 struct cache_extent *prev = NULL;
278 int next_merged = 0;
279 int prev_merged = 0;
280 int ret = 0;
282 if (cache_tree_empty(tree))
283 goto insert;
285 cache = search_cache_extent(tree, start);
286 if (!cache) {
288 * Either the tree is completely empty, or the no range after
289 * start.
290 * Either way, the last cache_extent should be prev.
292 prev = last_cache_extent(tree);
293 } else if (start <= cache->start) {
294 next = cache;
295 prev = prev_cache_extent(cache);
296 } else {
297 prev = cache;
298 next = next_cache_extent(cache);
302 * Ensure the range to be inserted won't cover with existings
303 * Or we will need extra loop to do merge
305 BUG_ON(next && start + size > next->start);
306 BUG_ON(prev && prev->start + prev->size > start);
308 if (next && start + size == next->start) {
309 next_merged = 1;
310 next->size = next->start + next->size - start;
311 next->start = start;
313 if (prev && prev->start + prev->size == start) {
314 prev_merged = 1;
315 if (next_merged) {
316 next->size = next->start + next->size - prev->start;
317 next->start = prev->start;
318 remove_cache_extent(tree, prev);
319 free(prev);
320 } else {
321 prev->size = start + size - prev->start;
324 insert:
325 if (!prev_merged && !next_merged)
326 ret = add_cache_extent(tree, start, size);
327 return ret;