btrfs-progs: Fix the argument requirement for '--subvol-extents'
[btrfs-progs-unstable/devel.git] / extent-cache.c
blob7656ab259b8c900dc57ca5d544e2929113d2f915
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include "kerncompat.h"
21 #include "extent-cache.h"
22 #include "rbtree-utils.h"
24 struct cache_extent_search_range {
25 u64 objectid;
26 u64 start;
27 u64 size;
30 static int cache_tree_comp_range(struct rb_node *node, void *data)
32 struct cache_extent *entry;
33 struct cache_extent_search_range *range;
35 range = (struct cache_extent_search_range *)data;
36 entry = rb_entry(node, struct cache_extent, rb_node);
38 if (entry->start + entry->size <= range->start)
39 return 1;
40 else if (range->start + range->size <= entry->start)
41 return -1;
42 else
43 return 0;
46 static int cache_tree_comp_nodes(struct rb_node *node1, struct rb_node *node2)
48 struct cache_extent *entry;
49 struct cache_extent_search_range range;
51 entry = rb_entry(node2, struct cache_extent, rb_node);
52 range.start = entry->start;
53 range.size = entry->size;
55 return cache_tree_comp_range(node1, (void *)&range);
58 static int cache_tree_comp_range2(struct rb_node *node, void *data)
60 struct cache_extent *entry;
61 struct cache_extent_search_range *range;
63 range = (struct cache_extent_search_range *)data;
64 entry = rb_entry(node, struct cache_extent, rb_node);
66 if (entry->objectid < range->objectid)
67 return 1;
68 else if (entry->objectid > range->objectid)
69 return -1;
70 else if (entry->start + entry->size <= range->start)
71 return 1;
72 else if (range->start + range->size <= entry->start)
73 return -1;
74 else
75 return 0;
78 static int cache_tree_comp_nodes2(struct rb_node *node1, struct rb_node *node2)
80 struct cache_extent *entry;
81 struct cache_extent_search_range range;
83 entry = rb_entry(node2, struct cache_extent, rb_node);
84 range.objectid = entry->objectid;
85 range.start = entry->start;
86 range.size = entry->size;
88 return cache_tree_comp_range2(node1, (void *)&range);
91 void cache_tree_init(struct cache_tree *tree)
93 tree->root = RB_ROOT;
96 static struct cache_extent *
97 alloc_cache_extent(u64 objectid, u64 start, u64 size)
99 struct cache_extent *pe = malloc(sizeof(*pe));
101 if (!pe)
102 return pe;
104 pe->objectid = objectid;
105 pe->start = start;
106 pe->size = size;
107 return pe;
110 static int __add_cache_extent(struct cache_tree *tree,
111 u64 objectid, u64 start, u64 size)
113 struct cache_extent *pe = alloc_cache_extent(objectid, start, size);
114 int ret;
116 if (!pe) {
117 fprintf(stderr, "memory allocation failed\n");
118 exit(1);
121 ret = insert_cache_extent(tree, pe);
122 if (ret)
123 free(pe);
125 return ret;
128 int add_cache_extent(struct cache_tree *tree, u64 start, u64 size)
130 return __add_cache_extent(tree, 0, start, size);
133 int add_cache_extent2(struct cache_tree *tree,
134 u64 objectid, u64 start, u64 size)
136 return __add_cache_extent(tree, objectid, start, size);
139 int insert_cache_extent(struct cache_tree *tree, struct cache_extent *pe)
141 return rb_insert(&tree->root, &pe->rb_node, cache_tree_comp_nodes);
144 int insert_cache_extent2(struct cache_tree *tree, struct cache_extent *pe)
146 return rb_insert(&tree->root, &pe->rb_node, cache_tree_comp_nodes2);
149 struct cache_extent *lookup_cache_extent(struct cache_tree *tree,
150 u64 start, u64 size)
152 struct rb_node *node;
153 struct cache_extent *entry;
154 struct cache_extent_search_range range;
156 range.start = start;
157 range.size = size;
158 node = rb_search(&tree->root, &range, cache_tree_comp_range, NULL);
159 if (!node)
160 return NULL;
162 entry = rb_entry(node, struct cache_extent, rb_node);
163 return entry;
166 struct cache_extent *lookup_cache_extent2(struct cache_tree *tree,
167 u64 objectid, u64 start, u64 size)
169 struct rb_node *node;
170 struct cache_extent *entry;
171 struct cache_extent_search_range range;
173 range.objectid = objectid;
174 range.start = start;
175 range.size = size;
176 node = rb_search(&tree->root, &range, cache_tree_comp_range2, NULL);
177 if (!node)
178 return NULL;
180 entry = rb_entry(node, struct cache_extent, rb_node);
181 return entry;
184 struct cache_extent *search_cache_extent(struct cache_tree *tree, u64 start)
186 struct rb_node *next;
187 struct rb_node *node;
188 struct cache_extent *entry;
189 struct cache_extent_search_range range;
191 range.start = start;
192 range.size = 1;
193 node = rb_search(&tree->root, &range, cache_tree_comp_range, &next);
194 if (!node)
195 node = next;
196 if (!node)
197 return NULL;
199 entry = rb_entry(node, struct cache_extent, rb_node);
200 return entry;
203 struct cache_extent *search_cache_extent2(struct cache_tree *tree,
204 u64 objectid, u64 start)
206 struct rb_node *next;
207 struct rb_node *node;
208 struct cache_extent *entry;
209 struct cache_extent_search_range range;
211 range.objectid = objectid;
212 range.start = start;
213 range.size = 1;
214 node = rb_search(&tree->root, &range, cache_tree_comp_range2, &next);
215 if (!node)
216 node = next;
217 if (!node)
218 return NULL;
220 entry = rb_entry(node, struct cache_extent, rb_node);
221 return entry;
224 struct cache_extent *first_cache_extent(struct cache_tree *tree)
226 struct rb_node *node = rb_first(&tree->root);
228 if (!node)
229 return NULL;
230 return rb_entry(node, struct cache_extent, rb_node);
233 struct cache_extent *prev_cache_extent(struct cache_extent *pe)
235 struct rb_node *node = rb_prev(&pe->rb_node);
237 if (!node)
238 return NULL;
239 return rb_entry(node, struct cache_extent, rb_node);
242 struct cache_extent *next_cache_extent(struct cache_extent *pe)
244 struct rb_node *node = rb_next(&pe->rb_node);
246 if (!node)
247 return NULL;
248 return rb_entry(node, struct cache_extent, rb_node);
251 void remove_cache_extent(struct cache_tree *tree, struct cache_extent *pe)
253 rb_erase(&pe->rb_node, &tree->root);
256 void cache_tree_free_extents(struct cache_tree *tree,
257 free_cache_extent free_func)
259 struct cache_extent *ce;
261 while ((ce = first_cache_extent(tree))) {
262 remove_cache_extent(tree, ce);
263 free_func(ce);
267 static void free_extent_cache(struct cache_extent *pe)
269 free(pe);
272 void free_extent_cache_tree(struct cache_tree *tree)
274 cache_tree_free_extents(tree, free_extent_cache);