2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
22 #include "print-tree.h"
23 #include "transaction.h"
25 static int find_free_extent(struct btrfs_trans_handle
*trans
, struct btrfs_root
26 *orig_root
, u64 num_blocks
, u64 search_start
,
27 u64 search_end
, u64 hint_block
,
28 struct btrfs_key
*ins
, u64 exclude_start
,
29 u64 exclude_nr
, int data
);
30 static int finish_current_insert(struct btrfs_trans_handle
*trans
, struct
31 btrfs_root
*extent_root
);
32 static int del_pending_extents(struct btrfs_trans_handle
*trans
, struct
33 btrfs_root
*extent_root
);
35 static void reada_extent_leaves(struct btrfs_root
*root
,
36 struct btrfs_path
*path
, u64 limit
)
38 struct btrfs_node
*node
;
48 node
= btrfs_buffer_node(path
->nodes
[1]);
49 slot
= path
->slots
[1] + 1;
50 nritems
= btrfs_header_nritems(&node
->header
);
51 for (i
= slot
; i
< nritems
&& i
< slot
+ 8; i
++) {
52 item_objectid
= btrfs_disk_key_objectid(&node
->ptrs
[i
].key
);
53 if (item_objectid
> limit
)
55 blocknr
= btrfs_node_blockptr(node
, i
);
56 ret
= readahead_tree_block(root
, blocknr
);
62 static int cache_block_group(struct btrfs_root
*root
,
63 struct btrfs_block_group_cache
*block_group
)
65 struct btrfs_path
*path
;
68 struct btrfs_leaf
*leaf
;
69 struct radix_tree_root
*extent_radix
;
77 root
= root
->fs_info
->extent_root
;
78 extent_radix
= &root
->fs_info
->extent_map_radix
;
80 if (block_group
->cached
)
82 if (block_group
->data
)
84 path
= btrfs_alloc_path();
87 key
.objectid
= block_group
->key
.objectid
;
90 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
91 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
94 if (ret
&& path
->slots
[0] > 0)
96 limit
= block_group
->key
.objectid
+ block_group
->key
.offset
;
97 reada_extent_leaves(root
, path
, limit
);
99 leaf
= btrfs_buffer_leaf(path
->nodes
[0]);
100 slot
= path
->slots
[0];
101 if (slot
>= btrfs_header_nritems(&leaf
->header
)) {
102 reada_extent_leaves(root
, path
, limit
);
103 ret
= btrfs_next_leaf(root
, path
);
110 hole_size
= block_group
->key
.objectid
+
111 block_group
->key
.offset
- last
;
113 last
= block_group
->key
.objectid
;
114 hole_size
= block_group
->key
.offset
;
116 for (i
= 0; i
< hole_size
; i
++) {
117 set_radix_bit(extent_radix
,
123 btrfs_disk_key_to_cpu(&key
, &leaf
->items
[slot
].key
);
124 if (key
.objectid
>= block_group
->key
.objectid
+
125 block_group
->key
.offset
) {
127 hole_size
= block_group
->key
.objectid
+
128 block_group
->key
.offset
- last
;
130 last
= block_group
->key
.objectid
;
131 hole_size
= block_group
->key
.offset
;
133 for (i
= 0; i
< hole_size
; i
++) {
134 set_radix_bit(extent_radix
, last
+ i
);
138 if (btrfs_key_type(&key
) == BTRFS_EXTENT_ITEM_KEY
) {
140 last
= key
.objectid
+ key
.offset
;
143 hole_size
= key
.objectid
- last
;
144 for (i
= 0; i
< hole_size
; i
++) {
145 set_radix_bit(extent_radix
, last
+ i
);
147 last
= key
.objectid
+ key
.offset
;
153 block_group
->cached
= 1;
155 btrfs_free_path(path
);
159 struct btrfs_block_group_cache
*btrfs_lookup_block_group(struct
163 struct btrfs_block_group_cache
*block_group
;
166 ret
= radix_tree_gang_lookup(&info
->block_group_radix
,
167 (void **)&block_group
,
170 if (block_group
->key
.objectid
<= blocknr
&& blocknr
<=
171 block_group
->key
.objectid
+ block_group
->key
.offset
)
174 ret
= radix_tree_gang_lookup(&info
->block_group_data_radix
,
175 (void **)&block_group
,
178 if (block_group
->key
.objectid
<= blocknr
&& blocknr
<=
179 block_group
->key
.objectid
+ block_group
->key
.offset
)
185 static u64
leaf_range(struct btrfs_root
*root
)
187 u64 size
= BTRFS_LEAF_DATA_SIZE(root
);
188 do_div(size
, sizeof(struct btrfs_extent_item
) +
189 sizeof(struct btrfs_item
));
193 static u64
find_search_start(struct btrfs_root
*root
,
194 struct btrfs_block_group_cache
**cache_ret
,
195 u64 search_start
, int num
)
197 unsigned long gang
[8];
199 struct btrfs_block_group_cache
*cache
= *cache_ret
;
200 u64 last
= max(search_start
, cache
->key
.objectid
);
205 last
= max(last
, cache
->last_prealloc
);
208 ret
= cache_block_group(root
, cache
);
212 ret
= find_first_radix_bit(&root
->fs_info
->extent_map_radix
,
213 gang
, last
, ARRAY_SIZE(gang
));
216 last
= gang
[ret
-1] + 1;
218 if (ret
!= ARRAY_SIZE(gang
)) {
221 if (gang
[ret
-1] - gang
[0] > leaf_range(root
)) {
225 if (gang
[0] >= cache
->key
.objectid
+ cache
->key
.offset
) {
231 return max(cache
->last_alloc
, search_start
);
234 cache
= btrfs_lookup_block_group(root
->fs_info
,
235 last
+ cache
->key
.offset
- 1);
237 return max((*cache_ret
)->last_alloc
, search_start
);
239 cache
= btrfs_find_block_group(root
, cache
,
240 last
+ cache
->key
.offset
- 1, 0, 0);
245 static u64
div_factor(u64 num
, int factor
)
252 struct btrfs_block_group_cache
*btrfs_find_block_group(struct btrfs_root
*root
,
253 struct btrfs_block_group_cache
254 *hint
, u64 search_start
,
257 struct btrfs_block_group_cache
*cache
[8];
258 struct btrfs_block_group_cache
*found_group
= NULL
;
259 struct btrfs_fs_info
*info
= root
->fs_info
;
260 struct radix_tree_root
*radix
;
261 struct radix_tree_root
*swap_radix
;
275 radix
= &info
->block_group_data_radix
;
276 swap_radix
= &info
->block_group_radix
;
278 radix
= &info
->block_group_radix
;
279 swap_radix
= &info
->block_group_data_radix
;
283 struct btrfs_block_group_cache
*shint
;
284 shint
= btrfs_lookup_block_group(info
, search_start
);
285 if (shint
->data
== data
) {
286 used
= btrfs_block_group_used(&shint
->item
);
287 if (used
+ shint
->pinned
<
288 div_factor(shint
->key
.offset
, factor
)) {
293 if (hint
&& hint
->data
== data
) {
294 used
= btrfs_block_group_used(&hint
->item
);
295 if (used
+ hint
->pinned
<
296 div_factor(hint
->key
.offset
, factor
)) {
299 if (used
>= div_factor(hint
->key
.offset
, 8)) {
300 radix_tree_tag_clear(radix
,
302 hint
->key
.offset
- 1,
303 BTRFS_BLOCK_GROUP_AVAIL
);
305 last
= hint
->key
.offset
* 3;
306 if (hint
->key
.objectid
>= last
)
307 last
= max(search_start
+ hint
->key
.offset
- 1,
308 hint
->key
.objectid
- last
);
310 last
= hint
->key
.objectid
+ hint
->key
.offset
;
314 hint_last
= max(hint
->key
.objectid
, search_start
);
316 hint_last
= search_start
;
321 ret
= radix_tree_gang_lookup_tag(radix
, (void **)cache
,
322 last
, ARRAY_SIZE(cache
),
323 BTRFS_BLOCK_GROUP_AVAIL
);
326 for (i
= 0; i
< ret
; i
++) {
327 last
= cache
[i
]->key
.objectid
+
328 cache
[i
]->key
.offset
;
329 used
= btrfs_block_group_used(&cache
[i
]->item
);
330 if (used
+ cache
[i
]->pinned
<
331 div_factor(cache
[i
]->key
.offset
, factor
)) {
332 found_group
= cache
[i
];
335 if (used
>= div_factor(cache
[i
]->key
.offset
, 8)) {
336 radix_tree_tag_clear(radix
,
337 cache
[i
]->key
.objectid
+
338 cache
[i
]->key
.offset
- 1,
339 BTRFS_BLOCK_GROUP_AVAIL
);
347 ret
= radix_tree_gang_lookup(radix
, (void **)cache
,
348 last
, ARRAY_SIZE(cache
));
351 for (i
= 0; i
< ret
; i
++) {
352 last
= cache
[i
]->key
.objectid
+
353 cache
[i
]->key
.offset
;
354 used
= btrfs_block_group_used(&cache
[i
]->item
);
355 if (used
+ cache
[i
]->pinned
< cache
[i
]->key
.offset
) {
356 found_group
= cache
[i
];
359 if (used
>= cache
[i
]->key
.offset
) {
360 radix_tree_tag_clear(radix
,
361 cache
[i
]->key
.objectid
+
362 cache
[i
]->key
.offset
- 1,
363 BTRFS_BLOCK_GROUP_AVAIL
);
374 struct radix_tree_root
*tmp
= radix
;
382 ret
= radix_tree_gang_lookup(radix
,
383 (void **)&found_group
, 0, 1);
385 ret
= radix_tree_gang_lookup(swap_radix
,
386 (void **)&found_group
,
395 int btrfs_inc_extent_ref(struct btrfs_trans_handle
*trans
,
396 struct btrfs_root
*root
,
397 u64 blocknr
, u64 num_blocks
)
399 struct btrfs_path
*path
;
401 struct btrfs_key key
;
402 struct btrfs_leaf
*l
;
403 struct btrfs_extent_item
*item
;
404 struct btrfs_key ins
;
407 path
= btrfs_alloc_path();
410 ret
= find_free_extent(trans
, root
->fs_info
->extent_root
, 0, 0,
411 (u64
)-1, 0, &ins
, 0, 0, 0);
413 btrfs_free_path(path
);
416 key
.objectid
= blocknr
;
418 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
419 key
.offset
= num_blocks
;
420 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
, &key
, path
,
428 l
= btrfs_buffer_leaf(path
->nodes
[0]);
429 item
= btrfs_item_ptr(l
, path
->slots
[0], struct btrfs_extent_item
);
430 refs
= btrfs_extent_refs(item
);
431 btrfs_set_extent_refs(item
, refs
+ 1);
432 btrfs_mark_buffer_dirty(path
->nodes
[0]);
434 btrfs_release_path(root
->fs_info
->extent_root
, path
);
435 btrfs_free_path(path
);
436 finish_current_insert(trans
, root
->fs_info
->extent_root
);
437 del_pending_extents(trans
, root
->fs_info
->extent_root
);
441 static int lookup_extent_ref(struct btrfs_trans_handle
*trans
,
442 struct btrfs_root
*root
, u64 blocknr
,
443 u64 num_blocks
, u32
*refs
)
445 struct btrfs_path
*path
;
447 struct btrfs_key key
;
448 struct btrfs_leaf
*l
;
449 struct btrfs_extent_item
*item
;
451 path
= btrfs_alloc_path();
452 key
.objectid
= blocknr
;
453 key
.offset
= num_blocks
;
455 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
456 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
, &key
, path
,
462 l
= btrfs_buffer_leaf(path
->nodes
[0]);
463 item
= btrfs_item_ptr(l
, path
->slots
[0], struct btrfs_extent_item
);
464 *refs
= btrfs_extent_refs(item
);
466 btrfs_free_path(path
);
470 int btrfs_inc_root_ref(struct btrfs_trans_handle
*trans
,
471 struct btrfs_root
*root
)
473 return btrfs_inc_extent_ref(trans
, root
, bh_blocknr(root
->node
), 1);
476 int btrfs_inc_ref(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
477 struct buffer_head
*buf
)
480 struct btrfs_node
*buf_node
;
481 struct btrfs_leaf
*buf_leaf
;
482 struct btrfs_disk_key
*key
;
483 struct btrfs_file_extent_item
*fi
;
492 buf_node
= btrfs_buffer_node(buf
);
493 leaf
= btrfs_is_leaf(buf_node
);
494 buf_leaf
= btrfs_buffer_leaf(buf
);
495 for (i
= 0; i
< btrfs_header_nritems(&buf_node
->header
); i
++) {
498 key
= &buf_leaf
->items
[i
].key
;
499 if (btrfs_disk_key_type(key
) != BTRFS_EXTENT_DATA_KEY
)
501 fi
= btrfs_item_ptr(buf_leaf
, i
,
502 struct btrfs_file_extent_item
);
503 if (btrfs_file_extent_type(fi
) ==
504 BTRFS_FILE_EXTENT_INLINE
)
506 disk_blocknr
= btrfs_file_extent_disk_blocknr(fi
);
507 if (disk_blocknr
== 0)
509 ret
= btrfs_inc_extent_ref(trans
, root
, disk_blocknr
,
510 btrfs_file_extent_disk_num_blocks(fi
));
516 blocknr
= btrfs_node_blockptr(buf_node
, i
);
517 ret
= btrfs_inc_extent_ref(trans
, root
, blocknr
, 1);
527 for (i
=0; i
< faili
; i
++) {
530 key
= &buf_leaf
->items
[i
].key
;
531 if (btrfs_disk_key_type(key
) != BTRFS_EXTENT_DATA_KEY
)
533 fi
= btrfs_item_ptr(buf_leaf
, i
,
534 struct btrfs_file_extent_item
);
535 if (btrfs_file_extent_type(fi
) ==
536 BTRFS_FILE_EXTENT_INLINE
)
538 disk_blocknr
= btrfs_file_extent_disk_blocknr(fi
);
539 if (disk_blocknr
== 0)
541 err
= btrfs_free_extent(trans
, root
, disk_blocknr
,
542 btrfs_file_extent_disk_num_blocks(fi
), 0);
545 blocknr
= btrfs_node_blockptr(buf_node
, i
);
546 err
= btrfs_free_extent(trans
, root
, blocknr
, 1, 0);
553 static int write_one_cache_group(struct btrfs_trans_handle
*trans
,
554 struct btrfs_root
*root
,
555 struct btrfs_path
*path
,
556 struct btrfs_block_group_cache
*cache
)
560 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
561 struct btrfs_block_group_item
*bi
;
562 struct btrfs_key ins
;
564 ret
= find_free_extent(trans
, extent_root
, 0, 0, (u64
)-1, 0, &ins
,
566 /* FIXME, set bit to recalc cache groups on next mount */
569 ret
= btrfs_search_slot(trans
, extent_root
, &cache
->key
, path
, 0, 1);
573 bi
= btrfs_item_ptr(btrfs_buffer_leaf(path
->nodes
[0]), path
->slots
[0],
574 struct btrfs_block_group_item
);
575 memcpy(bi
, &cache
->item
, sizeof(*bi
));
576 btrfs_mark_buffer_dirty(path
->nodes
[0]);
577 btrfs_release_path(extent_root
, path
);
579 finish_current_insert(trans
, extent_root
);
580 pending_ret
= del_pending_extents(trans
, extent_root
);
586 cache
->last_alloc
= cache
->first_free
;
591 static int write_dirty_block_radix(struct btrfs_trans_handle
*trans
,
592 struct btrfs_root
*root
,
593 struct radix_tree_root
*radix
)
595 struct btrfs_block_group_cache
*cache
[8];
600 struct btrfs_path
*path
;
601 unsigned long off
= 0;
603 path
= btrfs_alloc_path();
608 ret
= radix_tree_gang_lookup_tag(radix
, (void **)cache
,
609 off
, ARRAY_SIZE(cache
),
610 BTRFS_BLOCK_GROUP_DIRTY
);
613 for (i
= 0; i
< ret
; i
++) {
614 err
= write_one_cache_group(trans
, root
,
617 * if we fail to write the cache group, we want
618 * to keep it marked dirty in hopes that a later
623 off
= cache
[i
]->key
.objectid
+
624 cache
[i
]->key
.offset
;
628 radix_tree_tag_clear(radix
, cache
[i
]->key
.objectid
+
629 cache
[i
]->key
.offset
- 1,
630 BTRFS_BLOCK_GROUP_DIRTY
);
633 btrfs_free_path(path
);
637 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle
*trans
,
638 struct btrfs_root
*root
)
642 ret
= write_dirty_block_radix(trans
, root
,
643 &root
->fs_info
->block_group_radix
);
644 ret2
= write_dirty_block_radix(trans
, root
,
645 &root
->fs_info
->block_group_data_radix
);
653 static int update_block_group(struct btrfs_trans_handle
*trans
,
654 struct btrfs_root
*root
,
655 u64 blocknr
, u64 num
, int alloc
, int mark_free
,
658 struct btrfs_block_group_cache
*cache
;
659 struct btrfs_fs_info
*info
= root
->fs_info
;
667 cache
= btrfs_lookup_block_group(info
, blocknr
);
671 block_in_group
= blocknr
- cache
->key
.objectid
;
672 WARN_ON(block_in_group
> cache
->key
.offset
);
673 radix_tree_tag_set(cache
->radix
, cache
->key
.objectid
+
674 cache
->key
.offset
- 1,
675 BTRFS_BLOCK_GROUP_DIRTY
);
677 old_val
= btrfs_block_group_used(&cache
->item
);
678 num
= min(total
, cache
->key
.offset
- block_in_group
);
680 if (blocknr
> cache
->last_alloc
)
681 cache
->last_alloc
= blocknr
;
683 for (i
= 0; i
< num
; i
++) {
684 clear_radix_bit(&info
->extent_map_radix
,
688 if (cache
->data
!= data
&&
689 old_val
< (cache
->key
.offset
>> 1)) {
691 radix_tree_delete(cache
->radix
,
692 cache
->key
.objectid
+
693 cache
->key
.offset
- 1);
697 &info
->block_group_data_radix
;
699 BTRFS_BLOCK_GROUP_DATA
;
701 cache
->radix
= &info
->block_group_radix
;
703 ~BTRFS_BLOCK_GROUP_DATA
;
705 ret
= radix_tree_insert(cache
->radix
,
706 cache
->key
.objectid
+
707 cache
->key
.offset
- 1,
713 if (blocknr
< cache
->first_free
)
714 cache
->first_free
= blocknr
;
715 if (!cache
->data
&& mark_free
) {
716 for (i
= 0; i
< num
; i
++) {
717 set_radix_bit(&info
->extent_map_radix
,
721 if (old_val
< (cache
->key
.offset
>> 1) &&
722 old_val
+ num
>= (cache
->key
.offset
>> 1)) {
723 radix_tree_tag_set(cache
->radix
,
724 cache
->key
.objectid
+
725 cache
->key
.offset
- 1,
726 BTRFS_BLOCK_GROUP_AVAIL
);
729 btrfs_set_block_group_used(&cache
->item
, old_val
);
736 static int try_remove_page(struct address_space
*mapping
, unsigned long index
)
739 ret
= invalidate_mapping_pages(mapping
, index
, index
);
743 int btrfs_copy_pinned(struct btrfs_root
*root
, struct radix_tree_root
*copy
)
745 unsigned long gang
[8];
747 struct radix_tree_root
*pinned_radix
= &root
->fs_info
->pinned_radix
;
752 ret
= find_first_radix_bit(pinned_radix
, gang
, last
,
756 for (i
= 0 ; i
< ret
; i
++) {
757 set_radix_bit(copy
, gang
[i
]);
764 int btrfs_finish_extent_commit(struct btrfs_trans_handle
*trans
,
765 struct btrfs_root
*root
,
766 struct radix_tree_root
*unpin_radix
)
768 unsigned long gang
[8];
769 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
770 struct btrfs_block_group_cache
*block_group
;
774 struct radix_tree_root
*pinned_radix
= &root
->fs_info
->pinned_radix
;
775 struct radix_tree_root
*extent_radix
= &root
->fs_info
->extent_map_radix
;
778 ret
= find_first_radix_bit(unpin_radix
, gang
, 0,
784 for (i
= 0; i
< ret
; i
++) {
785 clear_radix_bit(pinned_radix
, gang
[i
]);
786 clear_radix_bit(unpin_radix
, gang
[i
]);
787 block_group
= btrfs_lookup_block_group(root
->fs_info
,
790 WARN_ON(block_group
->pinned
== 0);
791 block_group
->pinned
--;
792 if (gang
[i
] < block_group
->last_alloc
)
793 block_group
->last_alloc
= gang
[i
];
794 if (gang
[i
] < block_group
->last_prealloc
)
795 block_group
->last_prealloc
= gang
[i
];
796 if (!block_group
->data
)
797 set_radix_bit(extent_radix
, gang
[i
]);
799 try_remove_page(btree_inode
->i_mapping
,
800 gang
[i
] << (PAGE_CACHE_SHIFT
-
801 btree_inode
->i_blkbits
));
807 static int finish_current_insert(struct btrfs_trans_handle
*trans
, struct
808 btrfs_root
*extent_root
)
810 struct btrfs_key ins
;
811 struct btrfs_extent_item extent_item
;
814 u64 super_blocks_used
;
815 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
817 btrfs_set_extent_refs(&extent_item
, 1);
820 btrfs_set_key_type(&ins
, BTRFS_EXTENT_ITEM_KEY
);
821 btrfs_set_extent_owner(&extent_item
, extent_root
->root_key
.objectid
);
823 for (i
= 0; i
< extent_root
->fs_info
->extent_tree_insert_nr
; i
++) {
824 ins
.objectid
= extent_root
->fs_info
->extent_tree_insert
[i
];
825 super_blocks_used
= btrfs_super_blocks_used(&info
->super_copy
);
826 btrfs_set_super_blocks_used(&info
->super_copy
,
827 super_blocks_used
+ 1);
828 ret
= btrfs_insert_item(trans
, extent_root
, &ins
, &extent_item
,
829 sizeof(extent_item
));
832 extent_root
->fs_info
->extent_tree_insert_nr
= 0;
836 static int pin_down_block(struct btrfs_root
*root
, u64 blocknr
, int pending
)
839 struct btrfs_header
*header
;
840 struct buffer_head
*bh
;
843 bh
= btrfs_find_tree_block(root
, blocknr
);
845 if (buffer_uptodate(bh
)) {
847 root
->fs_info
->running_transaction
->transid
;
848 header
= btrfs_buffer_header(bh
);
849 if (btrfs_header_generation(header
) ==
851 btrfs_block_release(root
, bh
);
855 btrfs_block_release(root
, bh
);
857 err
= set_radix_bit(&root
->fs_info
->pinned_radix
, blocknr
);
859 struct btrfs_block_group_cache
*cache
;
860 cache
= btrfs_lookup_block_group(root
->fs_info
,
866 err
= set_radix_bit(&root
->fs_info
->pending_del_radix
, blocknr
);
873 * remove an extent from the root, returns 0 on success
875 static int __free_extent(struct btrfs_trans_handle
*trans
, struct btrfs_root
876 *root
, u64 blocknr
, u64 num_blocks
, int pin
,
879 struct btrfs_path
*path
;
880 struct btrfs_key key
;
881 struct btrfs_fs_info
*info
= root
->fs_info
;
882 struct btrfs_root
*extent_root
= info
->extent_root
;
884 struct btrfs_extent_item
*ei
;
885 struct btrfs_key ins
;
888 key
.objectid
= blocknr
;
890 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
891 key
.offset
= num_blocks
;
893 path
= btrfs_alloc_path();
897 ret
= find_free_extent(trans
, root
, 0, 0, (u64
)-1, 0, &ins
, 0, 0, 0);
899 btrfs_free_path(path
);
903 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
, -1, 1);
907 ei
= btrfs_item_ptr(btrfs_buffer_leaf(path
->nodes
[0]), path
->slots
[0],
908 struct btrfs_extent_item
);
909 BUG_ON(ei
->refs
== 0);
910 refs
= btrfs_extent_refs(ei
) - 1;
911 btrfs_set_extent_refs(ei
, refs
);
912 btrfs_mark_buffer_dirty(path
->nodes
[0]);
914 u64 super_blocks_used
;
917 ret
= pin_down_block(root
, blocknr
, 0);
921 super_blocks_used
= btrfs_super_blocks_used(&info
->super_copy
);
922 btrfs_set_super_blocks_used(&info
->super_copy
,
923 super_blocks_used
- num_blocks
);
924 ret
= btrfs_del_item(trans
, extent_root
, path
);
928 ret
= update_block_group(trans
, root
, blocknr
, num_blocks
, 0,
932 btrfs_free_path(path
);
933 finish_current_insert(trans
, extent_root
);
938 * find all the blocks marked as pending in the radix tree and remove
939 * them from the extent map
941 static int del_pending_extents(struct btrfs_trans_handle
*trans
, struct
942 btrfs_root
*extent_root
)
947 unsigned long gang
[4];
949 struct radix_tree_root
*pending_radix
;
950 struct radix_tree_root
*pinned_radix
;
951 struct btrfs_block_group_cache
*cache
;
953 pending_radix
= &extent_root
->fs_info
->pending_del_radix
;
954 pinned_radix
= &extent_root
->fs_info
->pinned_radix
;
957 ret
= find_first_radix_bit(pending_radix
, gang
, 0,
961 for (i
= 0; i
< ret
; i
++) {
962 wret
= set_radix_bit(pinned_radix
, gang
[i
]);
965 btrfs_lookup_block_group(extent_root
->fs_info
,
971 printk(KERN_CRIT
"set_radix_bit, err %d\n",
975 wret
= clear_radix_bit(pending_radix
, gang
[i
]);
977 wret
= __free_extent(trans
, extent_root
,
987 * remove an extent from the root, returns 0 on success
989 int btrfs_free_extent(struct btrfs_trans_handle
*trans
, struct btrfs_root
990 *root
, u64 blocknr
, u64 num_blocks
, int pin
)
992 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
996 if (root
== extent_root
) {
997 pin_down_block(root
, blocknr
, 1);
1000 ret
= __free_extent(trans
, root
, blocknr
, num_blocks
, pin
, pin
== 0);
1001 pending_ret
= del_pending_extents(trans
, root
->fs_info
->extent_root
);
1002 return ret
? ret
: pending_ret
;
1006 * walks the btree of allocated extents and find a hole of a given size.
1007 * The key ins is changed to record the hole:
1008 * ins->objectid == block start
1009 * ins->flags = BTRFS_EXTENT_ITEM_KEY
1010 * ins->offset == number of blocks
1011 * Any available blocks before search_start are skipped.
1013 static int find_free_extent(struct btrfs_trans_handle
*trans
, struct btrfs_root
1014 *orig_root
, u64 num_blocks
, u64 search_start
, u64
1015 search_end
, u64 hint_block
,
1016 struct btrfs_key
*ins
, u64 exclude_start
,
1017 u64 exclude_nr
, int data
)
1019 struct btrfs_path
*path
;
1020 struct btrfs_key key
;
1026 u64 orig_search_start
= search_start
;
1028 struct btrfs_leaf
*l
;
1029 struct btrfs_root
* root
= orig_root
->fs_info
->extent_root
;
1030 struct btrfs_fs_info
*info
= root
->fs_info
;
1031 int total_needed
= num_blocks
;
1032 int total_found
= 0;
1033 int fill_prealloc
= 0;
1035 struct btrfs_block_group_cache
*block_group
;
1041 btrfs_set_key_type(ins
, BTRFS_EXTENT_ITEM_KEY
);
1043 level
= btrfs_header_level(btrfs_buffer_header(root
->node
));
1044 if (num_blocks
== 0) {
1047 total_needed
= (min(level
+ 1, BTRFS_MAX_LEVEL
) + 2) * 3;
1049 if (fill_prealloc
) {
1051 int nr
= info
->extent_tree_prealloc_nr
;
1052 first
= info
->extent_tree_prealloc
[nr
- 1];
1053 if (info
->extent_tree_prealloc_nr
>= total_needed
&&
1054 first
>= search_start
) {
1055 ins
->objectid
= info
->extent_tree_prealloc
[0];
1059 info
->extent_tree_prealloc_nr
= 0;
1061 if (search_end
== (u64
)-1)
1062 search_end
= btrfs_super_total_blocks(&info
->super_copy
);
1064 block_group
= btrfs_lookup_block_group(info
, hint_block
);
1065 block_group
= btrfs_find_block_group(root
, block_group
,
1066 hint_block
, data
, 1);
1068 block_group
= btrfs_find_block_group(root
,
1069 trans
->block_group
, 0,
1073 path
= btrfs_alloc_path();
1076 if (!block_group
->data
)
1077 search_start
= find_search_start(root
, &block_group
,
1078 search_start
, total_needed
);
1079 else if (!full_scan
)
1080 search_start
= max(block_group
->last_alloc
, search_start
);
1082 btrfs_init_path(path
);
1083 ins
->objectid
= search_start
;
1087 ret
= btrfs_search_slot(trans
, root
, ins
, path
, 0, 0);
1091 if (path
->slots
[0] > 0) {
1095 l
= btrfs_buffer_leaf(path
->nodes
[0]);
1096 btrfs_disk_key_to_cpu(&key
, &l
->items
[path
->slots
[0]].key
);
1098 * a rare case, go back one key if we hit a block group item
1099 * instead of an extent item
1101 if (btrfs_key_type(&key
) != BTRFS_EXTENT_ITEM_KEY
&&
1102 key
.objectid
+ key
.offset
>= search_start
) {
1103 ins
->objectid
= key
.objectid
;
1104 ins
->offset
= key
.offset
- 1;
1105 btrfs_release_path(root
, path
);
1106 ret
= btrfs_search_slot(trans
, root
, ins
, path
, 0, 0);
1110 if (path
->slots
[0] > 0) {
1116 l
= btrfs_buffer_leaf(path
->nodes
[0]);
1117 slot
= path
->slots
[0];
1118 if (slot
>= btrfs_header_nritems(&l
->header
)) {
1119 if (fill_prealloc
) {
1120 info
->extent_tree_prealloc_nr
= 0;
1124 limit
= last_block
+
1125 (block_group
->key
.offset
>> 1);
1127 limit
= search_start
+
1128 (block_group
->key
.offset
>> 1);
1129 ret
= btrfs_next_leaf(root
, path
);
1135 ins
->objectid
= search_start
;
1136 ins
->offset
= search_end
- search_start
;
1140 ins
->objectid
= last_block
> search_start
?
1141 last_block
: search_start
;
1142 ins
->offset
= search_end
- ins
->objectid
;
1146 btrfs_disk_key_to_cpu(&key
, &l
->items
[slot
].key
);
1147 if (key
.objectid
>= search_start
&& key
.objectid
> last_block
&&
1149 if (last_block
< search_start
)
1150 last_block
= search_start
;
1151 hole_size
= key
.objectid
- last_block
;
1152 if (hole_size
>= num_blocks
) {
1153 ins
->objectid
= last_block
;
1154 ins
->offset
= hole_size
;
1159 if (btrfs_key_type(&key
) != BTRFS_EXTENT_ITEM_KEY
)
1163 last_block
= key
.objectid
+ key
.offset
;
1164 if (!full_scan
&& last_block
>= block_group
->key
.objectid
+
1165 block_group
->key
.offset
) {
1166 btrfs_release_path(root
, path
);
1167 search_start
= block_group
->key
.objectid
+
1168 block_group
->key
.offset
* 2;
1176 /* we have to make sure we didn't find an extent that has already
1177 * been allocated by the map tree or the original allocation
1179 btrfs_release_path(root
, path
);
1180 BUG_ON(ins
->objectid
< search_start
);
1182 if (ins
->objectid
+ num_blocks
>= search_end
) {
1187 search_start
= orig_search_start
;
1194 for (test_block
= ins
->objectid
;
1195 test_block
< ins
->objectid
+ num_blocks
; test_block
++) {
1196 if (test_radix_bit(&info
->pinned_radix
, test_block
)) {
1197 search_start
= test_block
+ 1;
1201 if (!fill_prealloc
&& info
->extent_tree_insert_nr
) {
1203 info
->extent_tree_insert
[info
->extent_tree_insert_nr
- 1];
1204 if (ins
->objectid
+ num_blocks
>
1205 info
->extent_tree_insert
[0] &&
1206 ins
->objectid
<= last
) {
1207 search_start
= last
+ 1;
1208 WARN_ON(!full_scan
);
1212 if (!fill_prealloc
&& info
->extent_tree_prealloc_nr
) {
1214 info
->extent_tree_prealloc
[info
->extent_tree_prealloc_nr
- 1];
1215 if (ins
->objectid
+ num_blocks
> first
&&
1216 ins
->objectid
<= info
->extent_tree_prealloc
[0]) {
1217 search_start
= info
->extent_tree_prealloc
[0] + 1;
1221 if (exclude_nr
> 0 && (ins
->objectid
+ num_blocks
> exclude_start
&&
1222 ins
->objectid
< exclude_start
+ exclude_nr
)) {
1223 search_start
= exclude_start
+ exclude_nr
;
1226 if (fill_prealloc
) {
1228 test_block
= ins
->objectid
;
1229 if (test_block
- info
->extent_tree_prealloc
[total_needed
- 1] >=
1232 info
->extent_tree_prealloc_nr
= total_found
;
1234 while(test_block
< ins
->objectid
+ ins
->offset
&&
1235 total_found
< total_needed
) {
1236 nr
= total_needed
- total_found
- 1;
1238 info
->extent_tree_prealloc
[nr
] = test_block
;
1242 if (total_found
< total_needed
) {
1243 search_start
= test_block
;
1246 info
->extent_tree_prealloc_nr
= total_found
;
1249 block_group
= btrfs_lookup_block_group(info
, ins
->objectid
);
1252 block_group
->last_prealloc
=
1253 info
->extent_tree_prealloc
[total_needed
-1];
1255 trans
->block_group
= block_group
;
1258 ins
->offset
= num_blocks
;
1259 btrfs_free_path(path
);
1263 if (search_start
+ num_blocks
>= search_end
) {
1264 search_start
= orig_search_start
;
1274 block_group
= btrfs_lookup_block_group(info
, search_start
);
1277 block_group
= btrfs_find_block_group(root
, block_group
,
1278 search_start
, data
, 0);
1282 btrfs_release_path(root
, path
);
1283 btrfs_free_path(path
);
1287 * finds a free extent and does all the dirty work required for allocation
1288 * returns the key for the extent through ins, and a tree buffer for
1289 * the first block of the extent through buf.
1291 * returns 0 if everything worked, non-zero otherwise.
1293 int btrfs_alloc_extent(struct btrfs_trans_handle
*trans
,
1294 struct btrfs_root
*root
, u64 owner
,
1295 u64 num_blocks
, u64 hint_block
,
1296 u64 search_end
, struct btrfs_key
*ins
, int data
)
1300 u64 super_blocks_used
;
1301 u64 search_start
= 0;
1302 u64 exclude_start
= 0;
1304 struct btrfs_fs_info
*info
= root
->fs_info
;
1305 struct btrfs_root
*extent_root
= info
->extent_root
;
1306 struct btrfs_extent_item extent_item
;
1307 struct btrfs_key prealloc_key
;
1309 btrfs_set_extent_refs(&extent_item
, 1);
1310 btrfs_set_extent_owner(&extent_item
, owner
);
1312 if (root
== extent_root
) {
1314 BUG_ON(info
->extent_tree_prealloc_nr
== 0);
1315 BUG_ON(num_blocks
!= 1);
1317 info
->extent_tree_prealloc_nr
--;
1318 nr
= info
->extent_tree_prealloc_nr
;
1319 ins
->objectid
= info
->extent_tree_prealloc
[nr
];
1320 info
->extent_tree_insert
[info
->extent_tree_insert_nr
++] =
1322 ret
= update_block_group(trans
, root
,
1323 ins
->objectid
, ins
->offset
, 1, 0, 0);
1329 * if we're doing a data allocation, preallocate room in the
1330 * extent tree first. This way the extent tree blocks end up
1331 * in the correct block group.
1334 ret
= find_free_extent(trans
, root
, 0, 0,
1335 search_end
, 0, &prealloc_key
, 0, 0, 0);
1339 exclude_nr
= info
->extent_tree_prealloc_nr
;
1340 exclude_start
= info
->extent_tree_prealloc
[exclude_nr
- 1];
1343 /* do the real allocation */
1344 ret
= find_free_extent(trans
, root
, num_blocks
, search_start
,
1345 search_end
, hint_block
, ins
,
1346 exclude_start
, exclude_nr
, data
);
1352 * if we're doing a metadata allocation, preallocate space in the
1353 * extent tree second. This way, we don't create a tiny hole
1354 * in the allocation map between any unused preallocation blocks
1355 * and the metadata block we're actually allocating. On disk,
1357 * [block we've allocated], [used prealloc 1], [ unused prealloc ]
1358 * The unused prealloc will get reused the next time around.
1361 exclude_start
= ins
->objectid
;
1362 exclude_nr
= ins
->offset
;
1363 hint_block
= exclude_start
+ exclude_nr
;
1364 ret
= find_free_extent(trans
, root
, 0, search_start
,
1365 search_end
, hint_block
,
1366 &prealloc_key
, exclude_start
,
1373 super_blocks_used
= btrfs_super_blocks_used(&info
->super_copy
);
1374 btrfs_set_super_blocks_used(&info
->super_copy
, super_blocks_used
+
1376 ret
= btrfs_insert_item(trans
, extent_root
, ins
, &extent_item
,
1377 sizeof(extent_item
));
1380 finish_current_insert(trans
, extent_root
);
1381 pending_ret
= del_pending_extents(trans
, extent_root
);
1388 ret
= update_block_group(trans
, root
, ins
->objectid
, ins
->offset
, 1, 0,
1395 * helper function to allocate a block for a given tree
1396 * returns the tree buffer or NULL.
1398 struct buffer_head
*btrfs_alloc_free_block(struct btrfs_trans_handle
*trans
,
1399 struct btrfs_root
*root
, u64 hint
)
1401 struct btrfs_key ins
;
1403 struct buffer_head
*buf
;
1405 ret
= btrfs_alloc_extent(trans
, root
, root
->root_key
.objectid
,
1406 1, hint
, (unsigned long)-1, &ins
, 0);
1409 return ERR_PTR(ret
);
1411 buf
= btrfs_find_create_tree_block(root
, ins
.objectid
);
1413 btrfs_free_extent(trans
, root
, ins
.objectid
, 1, 0);
1414 return ERR_PTR(-ENOMEM
);
1416 set_buffer_uptodate(buf
);
1417 set_buffer_checked(buf
);
1418 set_radix_bit(&trans
->transaction
->dirty_pages
, buf
->b_page
->index
);
1422 static int drop_leaf_ref(struct btrfs_trans_handle
*trans
,
1423 struct btrfs_root
*root
, struct buffer_head
*cur
)
1425 struct btrfs_disk_key
*key
;
1426 struct btrfs_leaf
*leaf
;
1427 struct btrfs_file_extent_item
*fi
;
1432 BUG_ON(!btrfs_is_leaf(btrfs_buffer_node(cur
)));
1433 leaf
= btrfs_buffer_leaf(cur
);
1434 nritems
= btrfs_header_nritems(&leaf
->header
);
1435 for (i
= 0; i
< nritems
; i
++) {
1437 key
= &leaf
->items
[i
].key
;
1438 if (btrfs_disk_key_type(key
) != BTRFS_EXTENT_DATA_KEY
)
1440 fi
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
1441 if (btrfs_file_extent_type(fi
) == BTRFS_FILE_EXTENT_INLINE
)
1444 * FIXME make sure to insert a trans record that
1445 * repeats the snapshot del on crash
1447 disk_blocknr
= btrfs_file_extent_disk_blocknr(fi
);
1448 if (disk_blocknr
== 0)
1450 ret
= btrfs_free_extent(trans
, root
, disk_blocknr
,
1451 btrfs_file_extent_disk_num_blocks(fi
),
1458 static void reada_walk_down(struct btrfs_root
*root
,
1459 struct btrfs_node
*node
)
1467 nritems
= btrfs_header_nritems(&node
->header
);
1468 for (i
= 0; i
< nritems
; i
++) {
1469 blocknr
= btrfs_node_blockptr(node
, i
);
1470 ret
= lookup_extent_ref(NULL
, root
, blocknr
, 1, &refs
);
1474 ret
= readahead_tree_block(root
, blocknr
);
1481 * helper function for drop_snapshot, this walks down the tree dropping ref
1482 * counts as it goes.
1484 static int walk_down_tree(struct btrfs_trans_handle
*trans
, struct btrfs_root
1485 *root
, struct btrfs_path
*path
, int *level
)
1487 struct buffer_head
*next
;
1488 struct buffer_head
*cur
;
1493 WARN_ON(*level
< 0);
1494 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
1495 ret
= lookup_extent_ref(trans
, root
, bh_blocknr(path
->nodes
[*level
]),
1502 * walk down to the last node level and free all the leaves
1504 while(*level
>= 0) {
1505 WARN_ON(*level
< 0);
1506 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
1507 cur
= path
->nodes
[*level
];
1509 if (*level
> 0 && path
->slots
[*level
] == 0)
1510 reada_walk_down(root
, btrfs_buffer_node(cur
));
1512 if (btrfs_header_level(btrfs_buffer_header(cur
)) != *level
)
1515 if (path
->slots
[*level
] >=
1516 btrfs_header_nritems(btrfs_buffer_header(cur
)))
1519 ret
= drop_leaf_ref(trans
, root
, cur
);
1523 blocknr
= btrfs_node_blockptr(btrfs_buffer_node(cur
),
1524 path
->slots
[*level
]);
1525 ret
= lookup_extent_ref(trans
, root
, blocknr
, 1, &refs
);
1528 path
->slots
[*level
]++;
1529 ret
= btrfs_free_extent(trans
, root
, blocknr
, 1, 1);
1533 next
= read_tree_block(root
, blocknr
);
1534 WARN_ON(*level
<= 0);
1535 if (path
->nodes
[*level
-1])
1536 btrfs_block_release(root
, path
->nodes
[*level
-1]);
1537 path
->nodes
[*level
-1] = next
;
1538 *level
= btrfs_header_level(btrfs_buffer_header(next
));
1539 path
->slots
[*level
] = 0;
1542 WARN_ON(*level
< 0);
1543 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
1544 ret
= btrfs_free_extent(trans
, root
,
1545 bh_blocknr(path
->nodes
[*level
]), 1, 1);
1546 btrfs_block_release(root
, path
->nodes
[*level
]);
1547 path
->nodes
[*level
] = NULL
;
1554 * helper for dropping snapshots. This walks back up the tree in the path
1555 * to find the first node higher up where we haven't yet gone through
1558 static int walk_up_tree(struct btrfs_trans_handle
*trans
, struct btrfs_root
1559 *root
, struct btrfs_path
*path
, int *level
)
1564 for(i
= *level
; i
< BTRFS_MAX_LEVEL
- 1 && path
->nodes
[i
]; i
++) {
1565 slot
= path
->slots
[i
];
1566 if (slot
< btrfs_header_nritems(
1567 btrfs_buffer_header(path
->nodes
[i
])) - 1) {
1572 ret
= btrfs_free_extent(trans
, root
,
1573 bh_blocknr(path
->nodes
[*level
]),
1576 btrfs_block_release(root
, path
->nodes
[*level
]);
1577 path
->nodes
[*level
] = NULL
;
1585 * drop the reference count on the tree rooted at 'snap'. This traverses
1586 * the tree freeing any blocks that have a ref count of zero after being
1589 int btrfs_drop_snapshot(struct btrfs_trans_handle
*trans
, struct btrfs_root
1590 *root
, struct buffer_head
*snap
)
1595 struct btrfs_path
*path
;
1599 path
= btrfs_alloc_path();
1602 level
= btrfs_header_level(btrfs_buffer_header(snap
));
1604 path
->nodes
[level
] = snap
;
1605 path
->slots
[level
] = 0;
1607 wret
= walk_down_tree(trans
, root
, path
, &level
);
1613 wret
= walk_up_tree(trans
, root
, path
, &level
);
1619 for (i
= 0; i
<= orig_level
; i
++) {
1620 if (path
->nodes
[i
]) {
1621 btrfs_block_release(root
, path
->nodes
[i
]);
1624 btrfs_free_path(path
);
1628 static int free_block_group_radix(struct radix_tree_root
*radix
)
1631 struct btrfs_block_group_cache
*cache
[8];
1635 ret
= radix_tree_gang_lookup(radix
, (void **)cache
, 0,
1639 for (i
= 0; i
< ret
; i
++) {
1640 radix_tree_delete(radix
, cache
[i
]->key
.objectid
+
1641 cache
[i
]->key
.offset
- 1);
1648 int btrfs_free_block_groups(struct btrfs_fs_info
*info
)
1652 unsigned long gang
[16];
1655 ret
= free_block_group_radix(&info
->block_group_radix
);
1656 ret2
= free_block_group_radix(&info
->block_group_data_radix
);
1663 ret
= find_first_radix_bit(&info
->extent_map_radix
,
1664 gang
, 0, ARRAY_SIZE(gang
));
1667 for (i
= 0; i
< ret
; i
++) {
1668 clear_radix_bit(&info
->extent_map_radix
, gang
[i
]);
1674 int btrfs_read_block_groups(struct btrfs_root
*root
)
1676 struct btrfs_path
*path
;
1679 struct btrfs_block_group_item
*bi
;
1680 struct btrfs_block_group_cache
*cache
;
1681 struct btrfs_fs_info
*info
= root
->fs_info
;
1682 struct radix_tree_root
*radix
;
1683 struct btrfs_key key
;
1684 struct btrfs_key found_key
;
1685 struct btrfs_leaf
*leaf
;
1686 u64 group_size_blocks
;
1689 group_size_blocks
= BTRFS_BLOCK_GROUP_SIZE
>>
1690 root
->fs_info
->sb
->s_blocksize_bits
;
1691 root
= info
->extent_root
;
1693 key
.offset
= group_size_blocks
;
1695 btrfs_set_key_type(&key
, BTRFS_BLOCK_GROUP_ITEM_KEY
);
1697 path
= btrfs_alloc_path();
1702 ret
= btrfs_search_slot(NULL
, info
->extent_root
,
1708 leaf
= btrfs_buffer_leaf(path
->nodes
[0]);
1709 btrfs_disk_key_to_cpu(&found_key
,
1710 &leaf
->items
[path
->slots
[0]].key
);
1711 cache
= kmalloc(sizeof(*cache
), GFP_NOFS
);
1717 bi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1718 struct btrfs_block_group_item
);
1719 if (bi
->flags
& BTRFS_BLOCK_GROUP_DATA
) {
1720 radix
= &info
->block_group_data_radix
;
1723 radix
= &info
->block_group_radix
;
1727 memcpy(&cache
->item
, bi
, sizeof(*bi
));
1728 memcpy(&cache
->key
, &found_key
, sizeof(found_key
));
1729 cache
->last_alloc
= cache
->key
.objectid
;
1730 cache
->first_free
= cache
->key
.objectid
;
1731 cache
->last_prealloc
= cache
->key
.objectid
;
1735 cache
->radix
= radix
;
1737 key
.objectid
= found_key
.objectid
+ found_key
.offset
;
1738 btrfs_release_path(root
, path
);
1739 ret
= radix_tree_insert(radix
, found_key
.objectid
+
1740 found_key
.offset
- 1,
1743 used
= btrfs_block_group_used(bi
);
1744 if (used
< div_factor(key
.offset
, 8)) {
1745 radix_tree_tag_set(radix
, found_key
.objectid
+
1746 found_key
.offset
- 1,
1747 BTRFS_BLOCK_GROUP_AVAIL
);
1750 btrfs_super_total_blocks(&info
->super_copy
))
1754 btrfs_free_path(path
);