2 * Copyright (C) 2008 Red Hat. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
22 static int tree_insert_offset(struct rb_root
*root
, u64 offset
,
25 struct rb_node
**p
= &root
->rb_node
;
26 struct rb_node
*parent
= NULL
;
27 struct btrfs_free_space
*info
;
31 info
= rb_entry(parent
, struct btrfs_free_space
, offset_index
);
33 if (offset
< info
->offset
)
35 else if (offset
> info
->offset
)
41 rb_link_node(node
, parent
, p
);
42 rb_insert_color(node
, root
);
47 static int tree_insert_bytes(struct rb_root
*root
, u64 bytes
,
50 struct rb_node
**p
= &root
->rb_node
;
51 struct rb_node
*parent
= NULL
;
52 struct btrfs_free_space
*info
;
56 info
= rb_entry(parent
, struct btrfs_free_space
, bytes_index
);
58 if (bytes
< info
->bytes
)
64 rb_link_node(node
, parent
, p
);
65 rb_insert_color(node
, root
);
71 * searches the tree for the given offset. If contains is set we will return
72 * the free space that contains the given offset. If contains is not set we
73 * will return the free space that starts at or after the given offset and is
74 * at least bytes long.
76 static struct btrfs_free_space
*tree_search_offset(struct rb_root
*root
,
77 u64 offset
, u64 bytes
,
80 struct rb_node
*n
= root
->rb_node
;
81 struct btrfs_free_space
*entry
, *ret
= NULL
;
84 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
86 if (offset
< entry
->offset
) {
88 (!ret
|| entry
->offset
< ret
->offset
) &&
89 (bytes
<= entry
->bytes
))
92 } else if (offset
> entry
->offset
) {
93 if ((entry
->offset
+ entry
->bytes
- 1) >= offset
&&
94 bytes
<= entry
->bytes
) {
100 if (bytes
> entry
->bytes
) {
113 * return a chunk at least bytes size, as close to offset that we can get.
115 static struct btrfs_free_space
*tree_search_bytes(struct rb_root
*root
,
116 u64 offset
, u64 bytes
)
118 struct rb_node
*n
= root
->rb_node
;
119 struct btrfs_free_space
*entry
, *ret
= NULL
;
122 entry
= rb_entry(n
, struct btrfs_free_space
, bytes_index
);
124 if (bytes
< entry
->bytes
) {
126 * We prefer to get a hole size as close to the size we
127 * are asking for so we don't take small slivers out of
128 * huge holes, but we also want to get as close to the
129 * offset as possible so we don't have a whole lot of
132 if (offset
<= entry
->offset
) {
135 else if (entry
->bytes
< ret
->bytes
)
137 else if (entry
->offset
< ret
->offset
)
141 } else if (bytes
> entry
->bytes
) {
145 * Ok we may have multiple chunks of the wanted size,
146 * so we don't want to take the first one we find, we
147 * want to take the one closest to our given offset, so
148 * keep searching just in case theres a better match.
151 if (offset
> entry
->offset
)
153 else if (!ret
|| entry
->offset
< ret
->offset
)
161 static void unlink_free_space(struct btrfs_block_group_cache
*block_group
,
162 struct btrfs_free_space
*info
)
164 rb_erase(&info
->offset_index
, &block_group
->free_space_offset
);
165 rb_erase(&info
->bytes_index
, &block_group
->free_space_bytes
);
168 static int link_free_space(struct btrfs_block_group_cache
*block_group
,
169 struct btrfs_free_space
*info
)
174 ret
= tree_insert_offset(&block_group
->free_space_offset
, info
->offset
,
175 &info
->offset_index
);
179 ret
= tree_insert_bytes(&block_group
->free_space_bytes
, info
->bytes
,
187 static int __btrfs_add_free_space(struct btrfs_block_group_cache
*block_group
,
188 u64 offset
, u64 bytes
)
190 struct btrfs_free_space
*right_info
;
191 struct btrfs_free_space
*left_info
;
192 struct btrfs_free_space
*info
= NULL
;
193 struct btrfs_free_space
*alloc_info
;
196 alloc_info
= kzalloc(sizeof(struct btrfs_free_space
), GFP_NOFS
);
201 * first we want to see if there is free space adjacent to the range we
202 * are adding, if there is remove that struct and add a new one to
203 * cover the entire range
205 right_info
= tree_search_offset(&block_group
->free_space_offset
,
207 left_info
= tree_search_offset(&block_group
->free_space_offset
,
210 if (right_info
&& right_info
->offset
== offset
+bytes
) {
211 unlink_free_space(block_group
, right_info
);
213 info
->offset
= offset
;
214 info
->bytes
+= bytes
;
215 } else if (right_info
&& right_info
->offset
!= offset
+bytes
) {
216 printk(KERN_ERR
"btrfs adding space in the middle of an "
217 "existing free space area. existing: "
218 "offset=%llu, bytes=%llu. new: offset=%llu, "
219 "bytes=%llu\n", (unsigned long long)right_info
->offset
,
220 (unsigned long long)right_info
->bytes
,
221 (unsigned long long)offset
,
222 (unsigned long long)bytes
);
227 unlink_free_space(block_group
, left_info
);
229 if (unlikely((left_info
->offset
+ left_info
->bytes
) !=
231 printk(KERN_ERR
"btrfs free space to the left "
232 "of new free space isn't "
233 "quite right. existing: offset=%llu, "
234 "bytes=%llu. new: offset=%llu, bytes=%llu\n",
235 (unsigned long long)left_info
->offset
,
236 (unsigned long long)left_info
->bytes
,
237 (unsigned long long)offset
,
238 (unsigned long long)bytes
);
243 info
->offset
= left_info
->offset
;
244 info
->bytes
+= left_info
->bytes
;
248 info
->bytes
+= bytes
;
253 ret
= link_free_space(block_group
, info
);
261 info
->offset
= offset
;
264 ret
= link_free_space(block_group
, info
);
269 printk(KERN_ERR
"btrfs: unable to add free space :%d\n", ret
);
280 __btrfs_remove_free_space(struct btrfs_block_group_cache
*block_group
,
281 u64 offset
, u64 bytes
)
283 struct btrfs_free_space
*info
;
286 info
= tree_search_offset(&block_group
->free_space_offset
, offset
, 0,
289 if (info
&& info
->offset
== offset
) {
290 if (info
->bytes
< bytes
) {
291 printk(KERN_ERR
"Found free space at %llu, size %llu,"
292 "trying to use %llu\n",
293 (unsigned long long)info
->offset
,
294 (unsigned long long)info
->bytes
,
295 (unsigned long long)bytes
);
300 unlink_free_space(block_group
, info
);
302 if (info
->bytes
== bytes
) {
307 info
->offset
+= bytes
;
308 info
->bytes
-= bytes
;
310 ret
= link_free_space(block_group
, info
);
312 } else if (info
&& info
->offset
< offset
&&
313 info
->offset
+ info
->bytes
>= offset
+ bytes
) {
314 u64 old_start
= info
->offset
;
316 * we're freeing space in the middle of the info,
317 * this can happen during tree log replay
319 * first unlink the old info and then
320 * insert it again after the hole we're creating
322 unlink_free_space(block_group
, info
);
323 if (offset
+ bytes
< info
->offset
+ info
->bytes
) {
324 u64 old_end
= info
->offset
+ info
->bytes
;
326 info
->offset
= offset
+ bytes
;
327 info
->bytes
= old_end
- info
->offset
;
328 ret
= link_free_space(block_group
, info
);
331 /* the hole we're creating ends at the end
332 * of the info struct, just free the info
337 /* step two, insert a new info struct to cover anything
340 ret
= __btrfs_add_free_space(block_group
, old_start
,
350 int btrfs_add_free_space(struct btrfs_block_group_cache
*block_group
,
351 u64 offset
, u64 bytes
)
354 struct btrfs_free_space
*sp
;
356 mutex_lock(&block_group
->alloc_mutex
);
357 ret
= __btrfs_add_free_space(block_group
, offset
, bytes
);
358 sp
= tree_search_offset(&block_group
->free_space_offset
, offset
, 0, 1);
360 mutex_unlock(&block_group
->alloc_mutex
);
365 int btrfs_add_free_space_lock(struct btrfs_block_group_cache
*block_group
,
366 u64 offset
, u64 bytes
)
369 struct btrfs_free_space
*sp
;
371 ret
= __btrfs_add_free_space(block_group
, offset
, bytes
);
372 sp
= tree_search_offset(&block_group
->free_space_offset
, offset
, 0, 1);
378 int btrfs_remove_free_space(struct btrfs_block_group_cache
*block_group
,
379 u64 offset
, u64 bytes
)
383 mutex_lock(&block_group
->alloc_mutex
);
384 ret
= __btrfs_remove_free_space(block_group
, offset
, bytes
);
385 mutex_unlock(&block_group
->alloc_mutex
);
390 int btrfs_remove_free_space_lock(struct btrfs_block_group_cache
*block_group
,
391 u64 offset
, u64 bytes
)
395 ret
= __btrfs_remove_free_space(block_group
, offset
, bytes
);
400 void btrfs_dump_free_space(struct btrfs_block_group_cache
*block_group
,
403 struct btrfs_free_space
*info
;
407 for (n
= rb_first(&block_group
->free_space_offset
); n
; n
= rb_next(n
)) {
408 info
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
409 if (info
->bytes
>= bytes
)
412 printk(KERN_INFO
"%d blocks of free space at or bigger than bytes is"
416 u64
btrfs_block_group_free_space(struct btrfs_block_group_cache
*block_group
)
418 struct btrfs_free_space
*info
;
422 for (n
= rb_first(&block_group
->free_space_offset
); n
;
424 info
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
431 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
*block_group
)
433 struct btrfs_free_space
*info
;
434 struct rb_node
*node
;
436 mutex_lock(&block_group
->alloc_mutex
);
437 while ((node
= rb_last(&block_group
->free_space_bytes
)) != NULL
) {
438 info
= rb_entry(node
, struct btrfs_free_space
, bytes_index
);
439 unlink_free_space(block_group
, info
);
441 if (need_resched()) {
442 mutex_unlock(&block_group
->alloc_mutex
);
444 mutex_lock(&block_group
->alloc_mutex
);
447 mutex_unlock(&block_group
->alloc_mutex
);
451 static struct btrfs_free_space
*btrfs_find_free_space_offset(struct
452 btrfs_block_group_cache
453 *block_group
, u64 offset
,
456 struct btrfs_free_space
*ret
;
458 mutex_lock(&block_group
->alloc_mutex
);
459 ret
= tree_search_offset(&block_group
->free_space_offset
, offset
,
461 mutex_unlock(&block_group
->alloc_mutex
);
466 static struct btrfs_free_space
*btrfs_find_free_space_bytes(struct
467 btrfs_block_group_cache
468 *block_group
, u64 offset
,
471 struct btrfs_free_space
*ret
;
473 mutex_lock(&block_group
->alloc_mutex
);
475 ret
= tree_search_bytes(&block_group
->free_space_bytes
, offset
, bytes
);
476 mutex_unlock(&block_group
->alloc_mutex
);
482 struct btrfs_free_space
*btrfs_find_free_space(struct btrfs_block_group_cache
483 *block_group
, u64 offset
,
486 struct btrfs_free_space
*ret
= NULL
;
488 ret
= tree_search_offset(&block_group
->free_space_offset
, offset
,
491 ret
= tree_search_bytes(&block_group
->free_space_bytes
,