Btrfs: kill the block group alloc mutex
[linux-2.6.git] / fs / btrfs / free-space-cache.c
blobdf19b60eef619678ac233dad45ce773cc8c522c0
1 /*
2 * Copyright (C) 2008 Red Hat. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include "ctree.h"
22 static int tree_insert_offset(struct rb_root *root, u64 offset,
23 struct rb_node *node)
25 struct rb_node **p = &root->rb_node;
26 struct rb_node *parent = NULL;
27 struct btrfs_free_space *info;
29 while (*p) {
30 parent = *p;
31 info = rb_entry(parent, struct btrfs_free_space, offset_index);
33 if (offset < info->offset)
34 p = &(*p)->rb_left;
35 else if (offset > info->offset)
36 p = &(*p)->rb_right;
37 else
38 return -EEXIST;
41 rb_link_node(node, parent, p);
42 rb_insert_color(node, root);
44 return 0;
47 static int tree_insert_bytes(struct rb_root *root, u64 bytes,
48 struct rb_node *node)
50 struct rb_node **p = &root->rb_node;
51 struct rb_node *parent = NULL;
52 struct btrfs_free_space *info;
54 while (*p) {
55 parent = *p;
56 info = rb_entry(parent, struct btrfs_free_space, bytes_index);
58 if (bytes < info->bytes)
59 p = &(*p)->rb_left;
60 else
61 p = &(*p)->rb_right;
64 rb_link_node(node, parent, p);
65 rb_insert_color(node, root);
67 return 0;
71 * searches the tree for the given offset.
73 * fuzzy == 1: this is used for allocations where we are given a hint of where
74 * to look for free space. Because the hint may not be completely on an offset
75 * mark, or the hint may no longer point to free space we need to fudge our
76 * results a bit. So we look for free space starting at or after offset with at
77 * least bytes size. We prefer to find as close to the given offset as we can.
78 * Also if the offset is within a free space range, then we will return the free
79 * space that contains the given offset, which means we can return a free space
80 * chunk with an offset before the provided offset.
82 * fuzzy == 0: this is just a normal tree search. Give us the free space that
83 * starts at the given offset which is at least bytes size, and if its not there
84 * return NULL.
86 static struct btrfs_free_space *tree_search_offset(struct rb_root *root,
87 u64 offset, u64 bytes,
88 int fuzzy)
90 struct rb_node *n = root->rb_node;
91 struct btrfs_free_space *entry, *ret = NULL;
93 while (n) {
94 entry = rb_entry(n, struct btrfs_free_space, offset_index);
96 if (offset < entry->offset) {
97 if (fuzzy &&
98 (!ret || entry->offset < ret->offset) &&
99 (bytes <= entry->bytes))
100 ret = entry;
101 n = n->rb_left;
102 } else if (offset > entry->offset) {
103 if (fuzzy &&
104 (entry->offset + entry->bytes - 1) >= offset &&
105 bytes <= entry->bytes) {
106 ret = entry;
107 break;
109 n = n->rb_right;
110 } else {
111 if (bytes > entry->bytes) {
112 n = n->rb_right;
113 continue;
115 ret = entry;
116 break;
120 return ret;
124 * return a chunk at least bytes size, as close to offset that we can get.
126 static struct btrfs_free_space *tree_search_bytes(struct rb_root *root,
127 u64 offset, u64 bytes)
129 struct rb_node *n = root->rb_node;
130 struct btrfs_free_space *entry, *ret = NULL;
132 while (n) {
133 entry = rb_entry(n, struct btrfs_free_space, bytes_index);
135 if (bytes < entry->bytes) {
137 * We prefer to get a hole size as close to the size we
138 * are asking for so we don't take small slivers out of
139 * huge holes, but we also want to get as close to the
140 * offset as possible so we don't have a whole lot of
141 * fragmentation.
143 if (offset <= entry->offset) {
144 if (!ret)
145 ret = entry;
146 else if (entry->bytes < ret->bytes)
147 ret = entry;
148 else if (entry->offset < ret->offset)
149 ret = entry;
151 n = n->rb_left;
152 } else if (bytes > entry->bytes) {
153 n = n->rb_right;
154 } else {
156 * Ok we may have multiple chunks of the wanted size,
157 * so we don't want to take the first one we find, we
158 * want to take the one closest to our given offset, so
159 * keep searching just in case theres a better match.
161 n = n->rb_right;
162 if (offset > entry->offset)
163 continue;
164 else if (!ret || entry->offset < ret->offset)
165 ret = entry;
169 return ret;
172 static void unlink_free_space(struct btrfs_block_group_cache *block_group,
173 struct btrfs_free_space *info)
175 rb_erase(&info->offset_index, &block_group->free_space_offset);
176 rb_erase(&info->bytes_index, &block_group->free_space_bytes);
179 static int link_free_space(struct btrfs_block_group_cache *block_group,
180 struct btrfs_free_space *info)
182 int ret = 0;
185 BUG_ON(!info->bytes);
186 ret = tree_insert_offset(&block_group->free_space_offset, info->offset,
187 &info->offset_index);
188 if (ret)
189 return ret;
191 ret = tree_insert_bytes(&block_group->free_space_bytes, info->bytes,
192 &info->bytes_index);
193 if (ret)
194 return ret;
196 return ret;
199 int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
200 u64 offset, u64 bytes)
202 struct btrfs_free_space *right_info;
203 struct btrfs_free_space *left_info;
204 struct btrfs_free_space *info = NULL;
205 int ret = 0;
207 info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
208 if (!info)
209 return -ENOMEM;
211 info->offset = offset;
212 info->bytes = bytes;
214 spin_lock(&block_group->tree_lock);
217 * first we want to see if there is free space adjacent to the range we
218 * are adding, if there is remove that struct and add a new one to
219 * cover the entire range
221 right_info = tree_search_offset(&block_group->free_space_offset,
222 offset+bytes, 0, 0);
223 left_info = tree_search_offset(&block_group->free_space_offset,
224 offset-1, 0, 1);
226 if (right_info) {
227 unlink_free_space(block_group, right_info);
228 info->bytes += right_info->bytes;
229 kfree(right_info);
232 if (left_info && left_info->offset + left_info->bytes == offset) {
233 unlink_free_space(block_group, left_info);
234 info->offset = left_info->offset;
235 info->bytes += left_info->bytes;
236 kfree(left_info);
239 ret = link_free_space(block_group, info);
240 if (ret)
241 kfree(info);
243 spin_unlock(&block_group->tree_lock);
245 if (ret) {
246 printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret);
247 if (ret == -EEXIST)
248 BUG();
251 return ret;
254 int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
255 u64 offset, u64 bytes)
257 struct btrfs_free_space *info;
258 int ret = 0;
260 spin_lock(&block_group->tree_lock);
262 info = tree_search_offset(&block_group->free_space_offset, offset, 0,
264 if (info && info->offset == offset) {
265 if (info->bytes < bytes) {
266 printk(KERN_ERR "Found free space at %llu, size %llu,"
267 "trying to use %llu\n",
268 (unsigned long long)info->offset,
269 (unsigned long long)info->bytes,
270 (unsigned long long)bytes);
271 WARN_ON(1);
272 ret = -EINVAL;
273 spin_unlock(&block_group->tree_lock);
274 goto out;
276 unlink_free_space(block_group, info);
278 if (info->bytes == bytes) {
279 kfree(info);
280 spin_unlock(&block_group->tree_lock);
281 goto out;
284 info->offset += bytes;
285 info->bytes -= bytes;
287 ret = link_free_space(block_group, info);
288 spin_unlock(&block_group->tree_lock);
289 BUG_ON(ret);
290 } else if (info && info->offset < offset &&
291 info->offset + info->bytes >= offset + bytes) {
292 u64 old_start = info->offset;
294 * we're freeing space in the middle of the info,
295 * this can happen during tree log replay
297 * first unlink the old info and then
298 * insert it again after the hole we're creating
300 unlink_free_space(block_group, info);
301 if (offset + bytes < info->offset + info->bytes) {
302 u64 old_end = info->offset + info->bytes;
304 info->offset = offset + bytes;
305 info->bytes = old_end - info->offset;
306 ret = link_free_space(block_group, info);
307 BUG_ON(ret);
308 } else {
309 /* the hole we're creating ends at the end
310 * of the info struct, just free the info
312 kfree(info);
314 spin_unlock(&block_group->tree_lock);
315 /* step two, insert a new info struct to cover anything
316 * before the hole
318 ret = btrfs_add_free_space(block_group, old_start,
319 offset - old_start);
320 BUG_ON(ret);
321 } else {
322 spin_unlock(&block_group->tree_lock);
323 if (!info) {
324 printk(KERN_ERR "couldn't find space %llu to free\n",
325 (unsigned long long)offset);
326 printk(KERN_ERR "cached is %d, offset %llu bytes %llu\n",
327 block_group->cached, block_group->key.objectid,
328 block_group->key.offset);
329 btrfs_dump_free_space(block_group, bytes);
330 } else if (info) {
331 printk(KERN_ERR "hmm, found offset=%llu bytes=%llu, "
332 "but wanted offset=%llu bytes=%llu\n",
333 info->offset, info->bytes, offset, bytes);
335 WARN_ON(1);
337 out:
338 return ret;
341 void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
342 u64 bytes)
344 struct btrfs_free_space *info;
345 struct rb_node *n;
346 int count = 0;
348 for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) {
349 info = rb_entry(n, struct btrfs_free_space, offset_index);
350 if (info->bytes >= bytes)
351 count++;
352 printk(KERN_ERR "entry offset %llu, bytes %llu\n", info->offset,
353 info->bytes);
355 printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
356 "\n", count);
359 u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group)
361 struct btrfs_free_space *info;
362 struct rb_node *n;
363 u64 ret = 0;
365 for (n = rb_first(&block_group->free_space_offset); n;
366 n = rb_next(n)) {
367 info = rb_entry(n, struct btrfs_free_space, offset_index);
368 ret += info->bytes;
371 return ret;
374 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
376 struct btrfs_free_space *info;
377 struct rb_node *node;
379 spin_lock(&block_group->tree_lock);
380 while ((node = rb_last(&block_group->free_space_bytes)) != NULL) {
381 info = rb_entry(node, struct btrfs_free_space, bytes_index);
382 unlink_free_space(block_group, info);
383 kfree(info);
384 if (need_resched()) {
385 spin_unlock(&block_group->tree_lock);
386 cond_resched();
387 spin_lock(&block_group->tree_lock);
390 spin_unlock(&block_group->tree_lock);
393 u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
394 u64 offset, u64 bytes, u64 empty_size)
396 struct btrfs_free_space *entry = NULL;
397 u64 ret = 0;
399 spin_lock(&block_group->tree_lock);
400 entry = tree_search_offset(&block_group->free_space_offset, offset,
401 bytes + empty_size, 1);
402 if (!entry)
403 entry = tree_search_bytes(&block_group->free_space_bytes,
404 offset, bytes + empty_size);
405 if (entry) {
406 unlink_free_space(block_group, entry);
407 ret = entry->offset;
408 entry->offset += bytes;
409 entry->bytes -= bytes;
411 if (!entry->bytes)
412 kfree(entry);
413 else
414 link_free_space(block_group, entry);
416 spin_unlock(&block_group->tree_lock);
418 return ret;