btrfs-progs: replace leafsize with nodesize
[btrfs-progs-unstable/devel.git] / extent-cache.c
blob38bed8b5db70d8550f19d5b6dafca8b2f8ad457c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include "kerncompat.h"
21 #include "extent-cache.h"
22 #include "rbtree-utils.h"
24 struct cache_extent_search_range {
25 u64 objectid;
26 u64 start;
27 u64 size;
30 static int cache_tree_comp_range(struct rb_node *node, void *data)
32 struct cache_extent *entry;
33 struct cache_extent_search_range *range;
35 range = (struct cache_extent_search_range *)data;
36 entry = rb_entry(node, struct cache_extent, rb_node);
38 if (entry->start + entry->size <= range->start)
39 return 1;
40 else if (range->start + range->size <= entry->start)
41 return -1;
42 else
43 return 0;
46 static int cache_tree_comp_nodes(struct rb_node *node1, struct rb_node *node2)
48 struct cache_extent *entry;
49 struct cache_extent_search_range range;
51 entry = rb_entry(node2, struct cache_extent, rb_node);
52 range.start = entry->start;
53 range.size = entry->size;
55 return cache_tree_comp_range(node1, (void *)&range);
58 static int cache_tree_comp_range2(struct rb_node *node, void *data)
60 struct cache_extent *entry;
61 struct cache_extent_search_range *range;
63 range = (struct cache_extent_search_range *)data;
64 entry = rb_entry(node, struct cache_extent, rb_node);
66 if (entry->objectid < range->objectid)
67 return 1;
68 else if (entry->objectid > range->objectid)
69 return -1;
70 else if (entry->start + entry->size <= range->start)
71 return 1;
72 else if (range->start + range->size <= entry->start)
73 return -1;
74 else
75 return 0;
78 static int cache_tree_comp_nodes2(struct rb_node *node1, struct rb_node *node2)
80 struct cache_extent *entry;
81 struct cache_extent_search_range range;
83 entry = rb_entry(node2, struct cache_extent, rb_node);
84 range.objectid = entry->objectid;
85 range.start = entry->start;
86 range.size = entry->size;
88 return cache_tree_comp_range2(node1, (void *)&range);
91 void cache_tree_init(struct cache_tree *tree)
93 tree->root = RB_ROOT;
96 static struct cache_extent *
97 alloc_cache_extent(u64 objectid, u64 start, u64 size)
99 struct cache_extent *pe = malloc(sizeof(*pe));
101 if (!pe)
102 return pe;
104 pe->objectid = objectid;
105 pe->start = start;
106 pe->size = size;
107 return pe;
110 static int __add_cache_extent(struct cache_tree *tree,
111 u64 objectid, u64 start, u64 size)
113 struct cache_extent *pe = alloc_cache_extent(objectid, start, size);
114 int ret;
116 if (!pe) {
117 fprintf(stderr, "memory allocation failed\n");
118 exit(1);
121 ret = insert_cache_extent(tree, pe);
122 if (ret)
123 free(pe);
125 return ret;
128 int add_cache_extent(struct cache_tree *tree, u64 start, u64 size)
130 return __add_cache_extent(tree, 0, start, size);
133 int add_cache_extent2(struct cache_tree *tree,
134 u64 objectid, u64 start, u64 size)
136 return __add_cache_extent(tree, objectid, start, size);
139 int insert_cache_extent(struct cache_tree *tree, struct cache_extent *pe)
141 return rb_insert(&tree->root, &pe->rb_node, cache_tree_comp_nodes);
144 int insert_cache_extent2(struct cache_tree *tree, struct cache_extent *pe)
146 return rb_insert(&tree->root, &pe->rb_node, cache_tree_comp_nodes2);
149 struct cache_extent *lookup_cache_extent(struct cache_tree *tree,
150 u64 start, u64 size)
152 struct rb_node *node;
153 struct cache_extent *entry;
154 struct cache_extent_search_range range;
156 range.start = start;
157 range.size = size;
158 node = rb_search(&tree->root, &range, cache_tree_comp_range, NULL);
159 if (!node)
160 return NULL;
162 entry = rb_entry(node, struct cache_extent, rb_node);
163 return entry;
166 struct cache_extent *lookup_cache_extent2(struct cache_tree *tree,
167 u64 objectid, u64 start, u64 size)
169 struct rb_node *node;
170 struct cache_extent *entry;
171 struct cache_extent_search_range range;
173 range.objectid = objectid;
174 range.start = start;
175 range.size = size;
176 node = rb_search(&tree->root, &range, cache_tree_comp_range2, NULL);
177 if (!node)
178 return NULL;
180 entry = rb_entry(node, struct cache_extent, rb_node);
181 return entry;
184 struct cache_extent *search_cache_extent(struct cache_tree *tree, u64 start)
186 struct rb_node *next;
187 struct rb_node *node;
188 struct cache_extent *entry;
189 struct cache_extent_search_range range;
191 range.start = start;
192 range.size = 1;
193 node = rb_search(&tree->root, &range, cache_tree_comp_range, &next);
194 if (!node)
195 node = next;
196 if (!node)
197 return NULL;
199 entry = rb_entry(node, struct cache_extent, rb_node);
200 return entry;
203 struct cache_extent *search_cache_extent2(struct cache_tree *tree,
204 u64 objectid, u64 start)
206 struct rb_node *next;
207 struct rb_node *node;
208 struct cache_extent *entry;
209 struct cache_extent_search_range range;
211 range.objectid = objectid;
212 range.start = start;
213 range.size = 1;
214 node = rb_search(&tree->root, &range, cache_tree_comp_range2, &next);
215 if (!node)
216 node = next;
217 if (!node)
218 return NULL;
220 entry = rb_entry(node, struct cache_extent, rb_node);
221 return entry;
224 struct cache_extent *first_cache_extent(struct cache_tree *tree)
226 struct rb_node *node = rb_first(&tree->root);
228 if (!node)
229 return NULL;
230 return rb_entry(node, struct cache_extent, rb_node);
233 struct cache_extent *last_cache_extent(struct cache_tree *tree)
235 struct rb_node *node = rb_last(&tree->root);
237 if (!node)
238 return NULL;
239 return rb_entry(node, struct cache_extent, rb_node);
242 struct cache_extent *prev_cache_extent(struct cache_extent *pe)
244 struct rb_node *node = rb_prev(&pe->rb_node);
246 if (!node)
247 return NULL;
248 return rb_entry(node, struct cache_extent, rb_node);
251 struct cache_extent *next_cache_extent(struct cache_extent *pe)
253 struct rb_node *node = rb_next(&pe->rb_node);
255 if (!node)
256 return NULL;
257 return rb_entry(node, struct cache_extent, rb_node);
260 void remove_cache_extent(struct cache_tree *tree, struct cache_extent *pe)
262 rb_erase(&pe->rb_node, &tree->root);
265 void cache_tree_free_extents(struct cache_tree *tree,
266 free_cache_extent free_func)
268 struct cache_extent *ce;
270 while ((ce = first_cache_extent(tree))) {
271 remove_cache_extent(tree, ce);
272 free_func(ce);
276 static void free_extent_cache(struct cache_extent *pe)
278 free(pe);
281 void free_extent_cache_tree(struct cache_tree *tree)
283 cache_tree_free_extents(tree, free_extent_cache);
286 int add_merge_cache_extent(struct cache_tree *tree, u64 start, u64 size)
288 struct cache_extent *cache;
289 struct cache_extent *next = NULL;
290 struct cache_extent *prev = NULL;
291 int next_merged = 0;
292 int prev_merged = 0;
293 int ret = 0;
295 if (cache_tree_empty(tree))
296 goto insert;
298 cache = search_cache_extent(tree, start);
299 if (!cache) {
301 * Either the tree is completely empty, or the no range after
302 * start.
303 * Either way, the last cache_extent should be prev.
305 prev = last_cache_extent(tree);
306 } else if (start <= cache->start) {
307 next = cache;
308 prev = prev_cache_extent(cache);
309 } else {
310 prev = cache;
311 next = next_cache_extent(cache);
315 * Ensure the range to be inserted won't cover with existings
316 * Or we will need extra loop to do merge
318 BUG_ON(next && start + size > next->start);
319 BUG_ON(prev && prev->start + prev->size > start);
321 if (next && start + size == next->start) {
322 next_merged = 1;
323 next->size = next->start + next->size - start;
324 next->start = start;
326 if (prev && prev->start + prev->size == start) {
327 prev_merged = 1;
328 if (next_merged) {
329 next->size = next->start + next->size - prev->start;
330 next->start = prev->start;
331 remove_cache_extent(tree, prev);
332 free(prev);
333 } else {
334 prev->size = start + size - prev->start;
337 insert:
338 if (!prev_merged && !next_merged)
339 ret = add_cache_extent(tree, start, size);
340 return ret;