Btrfs-progs: add option to skip whether a scrub has started/resumed in userspace
[btrfs-progs-unstable/devel.git] / extent-cache.c
blob84de87b9b485f2482d2168bf4aa4c26e42e20a8d
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include "kerncompat.h"
21 #include "extent-cache.h"
23 struct cache_extent_search_range {
24 u64 objectid;
25 u64 start;
26 u64 size;
29 static int cache_tree_comp_range(struct rb_node *node, void *data)
31 struct cache_extent *entry;
32 struct cache_extent_search_range *range;
34 range = (struct cache_extent_search_range *)data;
35 entry = rb_entry(node, struct cache_extent, rb_node);
37 if (entry->start + entry->size <= range->start)
38 return 1;
39 else if (range->start + range->size <= entry->start)
40 return -1;
41 else
42 return 0;
45 static int cache_tree_comp_nodes(struct rb_node *node1, struct rb_node *node2)
47 struct cache_extent *entry;
48 struct cache_extent_search_range range;
50 entry = rb_entry(node2, struct cache_extent, rb_node);
51 range.start = entry->start;
52 range.size = entry->size;
54 return cache_tree_comp_range(node1, (void *)&range);
57 static int cache_tree_comp_range2(struct rb_node *node, void *data)
59 struct cache_extent *entry;
60 struct cache_extent_search_range *range;
62 range = (struct cache_extent_search_range *)data;
63 entry = rb_entry(node, struct cache_extent, rb_node);
65 if (entry->objectid < range->objectid)
66 return 1;
67 else if (entry->objectid > range->objectid)
68 return -1;
69 else if (entry->start + entry->size <= range->start)
70 return 1;
71 else if (range->start + range->size <= entry->start)
72 return -1;
73 else
74 return 0;
77 static int cache_tree_comp_nodes2(struct rb_node *node1, struct rb_node *node2)
79 struct cache_extent *entry;
80 struct cache_extent_search_range range;
82 entry = rb_entry(node2, struct cache_extent, rb_node);
83 range.objectid = entry->objectid;
84 range.start = entry->start;
85 range.size = entry->size;
87 return cache_tree_comp_range2(node1, (void *)&range);
90 void cache_tree_init(struct cache_tree *tree)
92 tree->root = RB_ROOT;
95 static struct cache_extent *
96 alloc_cache_extent(u64 objectid, u64 start, u64 size)
98 struct cache_extent *pe = malloc(sizeof(*pe));
100 if (!pe)
101 return pe;
103 pe->objectid = objectid;
104 pe->start = start;
105 pe->size = size;
106 return pe;
109 static int __add_cache_extent(struct cache_tree *tree,
110 u64 objectid, u64 start, u64 size)
112 struct cache_extent *pe = alloc_cache_extent(objectid, start, size);
113 int ret;
115 if (!pe) {
116 fprintf(stderr, "memory allocation failed\n");
117 exit(1);
120 ret = insert_cache_extent(tree, pe);
121 if (ret)
122 free(pe);
124 return ret;
127 int add_cache_extent(struct cache_tree *tree, u64 start, u64 size)
129 return __add_cache_extent(tree, 0, start, size);
132 int add_cache_extent2(struct cache_tree *tree,
133 u64 objectid, u64 start, u64 size)
135 return __add_cache_extent(tree, objectid, start, size);
138 int insert_cache_extent(struct cache_tree *tree, struct cache_extent *pe)
140 return rb_insert(&tree->root, &pe->rb_node, cache_tree_comp_nodes);
143 int insert_cache_extent2(struct cache_tree *tree, struct cache_extent *pe)
145 return rb_insert(&tree->root, &pe->rb_node, cache_tree_comp_nodes2);
148 struct cache_extent *lookup_cache_extent(struct cache_tree *tree,
149 u64 start, u64 size)
151 struct rb_node *node;
152 struct cache_extent *entry;
153 struct cache_extent_search_range range;
155 range.start = start;
156 range.size = size;
157 node = rb_search(&tree->root, &range, cache_tree_comp_range, NULL);
158 if (!node)
159 return NULL;
161 entry = rb_entry(node, struct cache_extent, rb_node);
162 return entry;
165 struct cache_extent *lookup_cache_extent2(struct cache_tree *tree,
166 u64 objectid, u64 start, u64 size)
168 struct rb_node *node;
169 struct cache_extent *entry;
170 struct cache_extent_search_range range;
172 range.objectid = objectid;
173 range.start = start;
174 range.size = size;
175 node = rb_search(&tree->root, &range, cache_tree_comp_range2, NULL);
176 if (!node)
177 return NULL;
179 entry = rb_entry(node, struct cache_extent, rb_node);
180 return entry;
183 struct cache_extent *search_cache_extent(struct cache_tree *tree, u64 start)
185 struct rb_node *next;
186 struct rb_node *node;
187 struct cache_extent *entry;
188 struct cache_extent_search_range range;
190 range.start = start;
191 range.size = 1;
192 node = rb_search(&tree->root, &range, cache_tree_comp_range, &next);
193 if (!node)
194 node = next;
195 if (!node)
196 return NULL;
198 entry = rb_entry(node, struct cache_extent, rb_node);
199 return entry;
202 struct cache_extent *search_cache_extent2(struct cache_tree *tree,
203 u64 objectid, u64 start)
205 struct rb_node *next;
206 struct rb_node *node;
207 struct cache_extent *entry;
208 struct cache_extent_search_range range;
210 range.objectid = objectid;
211 range.start = start;
212 range.size = 1;
213 node = rb_search(&tree->root, &range, cache_tree_comp_range2, &next);
214 if (!node)
215 node = next;
216 if (!node)
217 return NULL;
219 entry = rb_entry(node, struct cache_extent, rb_node);
220 return entry;
223 struct cache_extent *first_cache_extent(struct cache_tree *tree)
225 struct rb_node *node = rb_first(&tree->root);
227 if (!node)
228 return NULL;
229 return rb_entry(node, struct cache_extent, rb_node);
232 struct cache_extent *prev_cache_extent(struct cache_extent *pe)
234 struct rb_node *node = rb_prev(&pe->rb_node);
236 if (!node)
237 return NULL;
238 return rb_entry(node, struct cache_extent, rb_node);
241 struct cache_extent *next_cache_extent(struct cache_extent *pe)
243 struct rb_node *node = rb_next(&pe->rb_node);
245 if (!node)
246 return NULL;
247 return rb_entry(node, struct cache_extent, rb_node);
250 void remove_cache_extent(struct cache_tree *tree, struct cache_extent *pe)
252 rb_erase(&pe->rb_node, &tree->root);
255 void cache_tree_free_extents(struct cache_tree *tree,
256 free_cache_extent free_func)
258 struct cache_extent *ce;
260 while ((ce = first_cache_extent(tree))) {
261 remove_cache_extent(tree, ce);
262 free_func(ce);
266 static void free_extent_cache(struct cache_extent *pe)
268 free(pe);
271 void free_extent_cache_tree(struct cache_tree *tree)
273 cache_tree_free_extents(tree, free_extent_cache);