2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
21 #include "kerncompat.h"
22 #include "radix-tree.h"
25 #include "print-tree.h"
26 #include "transaction.h"
30 #define BLOCK_GROUP_DATA EXTENT_WRITEBACK
31 #define BLOCK_GROUP_METADATA EXTENT_UPTODATE
32 #define BLOCK_GROUP_SYSTEM EXTENT_NEW
34 #define BLOCK_GROUP_DIRTY EXTENT_DIRTY
36 #define PENDING_EXTENT_INSERT 0
37 #define PENDING_EXTENT_DELETE 1
38 #define PENDING_BACKREF_UPDATE 2
40 struct pending_extent_op
{
51 static int finish_current_insert(struct btrfs_trans_handle
*trans
, struct
52 btrfs_root
*extent_root
);
53 static int del_pending_extents(struct btrfs_trans_handle
*trans
, struct
54 btrfs_root
*extent_root
);
56 void maybe_lock_mutex(struct btrfs_root
*root
)
60 void maybe_unlock_mutex(struct btrfs_root
*root
)
64 static int cache_block_group(struct btrfs_root
*root
,
65 struct btrfs_block_group_cache
*block_group
)
67 struct btrfs_path
*path
;
70 struct extent_buffer
*leaf
;
71 struct extent_io_tree
*free_space_cache
;
81 root
= root
->fs_info
->extent_root
;
82 free_space_cache
= &root
->fs_info
->free_space_cache
;
84 if (block_group
->cached
)
87 path
= btrfs_alloc_path();
92 first_free
= block_group
->key
.objectid
;
93 key
.objectid
= block_group
->key
.objectid
;
95 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
96 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
99 ret
= btrfs_previous_item(root
, path
, 0, BTRFS_EXTENT_ITEM_KEY
);
103 leaf
= path
->nodes
[0];
104 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
105 if (key
.objectid
+ key
.offset
> first_free
)
106 first_free
= key
.objectid
+ key
.offset
;
109 leaf
= path
->nodes
[0];
110 slot
= path
->slots
[0];
111 if (slot
>= btrfs_header_nritems(leaf
)) {
112 ret
= btrfs_next_leaf(root
, path
);
121 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
122 if (key
.objectid
< block_group
->key
.objectid
) {
125 if (key
.objectid
>= block_group
->key
.objectid
+
126 block_group
->key
.offset
) {
130 if (btrfs_key_type(&key
) == BTRFS_EXTENT_ITEM_KEY
) {
135 if (key
.objectid
> last
) {
136 hole_size
= key
.objectid
- last
;
137 set_extent_dirty(free_space_cache
, last
,
138 last
+ hole_size
- 1,
141 last
= key
.objectid
+ key
.offset
;
149 if (block_group
->key
.objectid
+
150 block_group
->key
.offset
> last
) {
151 hole_size
= block_group
->key
.objectid
+
152 block_group
->key
.offset
- last
;
153 set_extent_dirty(free_space_cache
, last
,
154 last
+ hole_size
- 1, GFP_NOFS
);
156 block_group
->cached
= 1;
158 btrfs_free_path(path
);
162 struct btrfs_block_group_cache
*btrfs_lookup_first_block_group(struct
166 struct extent_io_tree
*block_group_cache
;
167 struct btrfs_block_group_cache
*block_group
= NULL
;
173 bytenr
= max_t(u64
, bytenr
,
174 BTRFS_SUPER_INFO_OFFSET
+ BTRFS_SUPER_INFO_SIZE
);
175 block_group_cache
= &info
->block_group_cache
;
176 ret
= find_first_extent_bit(block_group_cache
,
177 bytenr
, &start
, &end
,
178 BLOCK_GROUP_DATA
| BLOCK_GROUP_METADATA
|
183 ret
= get_state_private(block_group_cache
, start
, &ptr
);
187 block_group
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
191 struct btrfs_block_group_cache
*btrfs_lookup_block_group(struct
195 struct extent_io_tree
*block_group_cache
;
196 struct btrfs_block_group_cache
*block_group
= NULL
;
202 block_group_cache
= &info
->block_group_cache
;
203 ret
= find_first_extent_bit(block_group_cache
,
204 bytenr
, &start
, &end
,
205 BLOCK_GROUP_DATA
| BLOCK_GROUP_METADATA
|
210 ret
= get_state_private(block_group_cache
, start
, &ptr
);
214 block_group
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
215 if (block_group
->key
.objectid
<= bytenr
&& bytenr
<
216 block_group
->key
.objectid
+ block_group
->key
.offset
)
221 static int block_group_bits(struct btrfs_block_group_cache
*cache
, u64 bits
)
223 return (cache
->flags
& bits
) == bits
;
226 static int noinline
find_search_start(struct btrfs_root
*root
,
227 struct btrfs_block_group_cache
**cache_ret
,
228 u64
*start_ret
, int num
, int data
)
231 struct btrfs_block_group_cache
*cache
= *cache_ret
;
235 u64 search_start
= *start_ret
;
242 ret
= cache_block_group(root
, cache
);
246 last
= max(search_start
, cache
->key
.objectid
);
247 if (cache
->ro
|| !block_group_bits(cache
, data
)) {
252 ret
= find_first_extent_bit(&root
->fs_info
->free_space_cache
,
253 last
, &start
, &end
, EXTENT_DIRTY
);
258 start
= max(last
, start
);
260 if (last
- start
< num
) {
263 if (start
+ num
> cache
->key
.objectid
+ cache
->key
.offset
) {
270 cache
= btrfs_lookup_block_group(root
->fs_info
, search_start
);
272 printk("Unable to find block group for %llu\n",
273 (unsigned long long)search_start
);
279 last
= cache
->key
.objectid
+ cache
->key
.offset
;
281 cache
= btrfs_lookup_first_block_group(root
->fs_info
, last
);
291 cache
= btrfs_find_block_group(root
, cache
, last
, data
, 0);
292 cache
= btrfs_find_block_group(root
, cache
, last
, data
, 0);
300 static u64
div_factor(u64 num
, int factor
)
309 static int block_group_state_bits(u64 flags
)
312 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
313 bits
|= BLOCK_GROUP_DATA
;
314 if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
315 bits
|= BLOCK_GROUP_METADATA
;
316 if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
317 bits
|= BLOCK_GROUP_SYSTEM
;
321 struct btrfs_block_group_cache
*btrfs_find_block_group(struct btrfs_root
*root
,
322 struct btrfs_block_group_cache
323 *hint
, u64 search_start
,
326 struct btrfs_block_group_cache
*cache
;
327 struct extent_io_tree
*block_group_cache
;
328 struct btrfs_block_group_cache
*found_group
= NULL
;
329 struct btrfs_fs_info
*info
= root
->fs_info
;
342 block_group_cache
= &info
->block_group_cache
;
347 bit
= block_group_state_bits(data
);
350 struct btrfs_block_group_cache
*shint
;
351 shint
= btrfs_lookup_block_group(info
, search_start
);
352 if (shint
&& !shint
->ro
&& block_group_bits(shint
, data
)) {
353 used
= btrfs_block_group_used(&shint
->item
);
354 if (used
+ shint
->pinned
<
355 div_factor(shint
->key
.offset
, factor
)) {
360 if (hint
&& !hint
->ro
&& block_group_bits(hint
, data
)) {
361 used
= btrfs_block_group_used(&hint
->item
);
362 if (used
+ hint
->pinned
<
363 div_factor(hint
->key
.offset
, factor
)) {
366 last
= hint
->key
.objectid
+ hint
->key
.offset
;
370 hint_last
= max(hint
->key
.objectid
, search_start
);
372 hint_last
= search_start
;
378 ret
= find_first_extent_bit(block_group_cache
, last
,
383 ret
= get_state_private(block_group_cache
, start
, &ptr
);
387 cache
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
388 last
= cache
->key
.objectid
+ cache
->key
.offset
;
389 used
= btrfs_block_group_used(&cache
->item
);
391 if (!cache
->ro
&& block_group_bits(cache
, data
)) {
393 free_check
= cache
->key
.offset
;
395 free_check
= div_factor(cache
->key
.offset
,
398 if (used
+ cache
->pinned
< free_check
) {
415 * Back reference rules. Back refs have three main goals:
417 * 1) differentiate between all holders of references to an extent so that
418 * when a reference is dropped we can make sure it was a valid reference
419 * before freeing the extent.
421 * 2) Provide enough information to quickly find the holders of an extent
422 * if we notice a given block is corrupted or bad.
424 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
425 * maintenance. This is actually the same as #2, but with a slightly
426 * different use case.
428 * File extents can be referenced by:
430 * - multiple snapshots, subvolumes, or different generations in one subvol
431 * - different files inside a single subvolume
432 * - different offsets inside a file (bookend extents in file.c)
434 * The extent ref structure has fields for:
436 * - Objectid of the subvolume root
437 * - Generation number of the tree holding the reference
438 * - objectid of the file holding the reference
439 * - offset in the file corresponding to the key holding the reference
440 * - number of references holding by parent node (alway 1 for tree blocks)
442 * Btree leaf may hold multiple references to a file extent. In most cases,
443 * these references are from same file and the corresponding offsets inside
444 * the file are close together. So inode objectid and offset in file are
445 * just hints, they provide hints about where in the btree the references
446 * can be found and when we can stop searching.
448 * When a file extent is allocated the fields are filled in:
449 * (root_key.objectid, trans->transid, inode objectid, offset in file, 1)
451 * When a leaf is cow'd new references are added for every file extent found
452 * in the leaf. It looks similar to the create case, but trans->transid will
453 * be different when the block is cow'd.
455 * (root_key.objectid, trans->transid, inode objectid, offset in file,
456 * number of references in the leaf)
458 * Because inode objectid and offset in file are just hints, they are not
459 * used when backrefs are deleted. When a file extent is removed either
460 * during snapshot deletion or file truncation, we find the corresponding
461 * back back reference and check the following fields.
463 * (btrfs_header_owner(leaf), btrfs_header_generation(leaf))
465 * Btree extents can be referenced by:
467 * - Different subvolumes
468 * - Different generations of the same subvolume
470 * When a tree block is created, back references are inserted:
472 * (root->root_key.objectid, trans->transid, level, 0, 1)
474 * When a tree block is cow'd, new back references are added for all the
475 * blocks it points to. If the tree block isn't in reference counted root,
476 * the old back references are removed. These new back references are of
477 * the form (trans->transid will have increased since creation):
479 * (root->root_key.objectid, trans->transid, level, 0, 1)
481 * When a backref is in deleting, the following fields are checked:
483 * if backref was for a tree root:
484 * (btrfs_header_owner(itself), btrfs_header_generation(itself))
486 * (btrfs_header_owner(parent), btrfs_header_generation(parent))
488 * Back Reference Key composing:
490 * The key objectid corresponds to the first byte in the extent, the key
491 * type is set to BTRFS_EXTENT_REF_KEY, and the key offset is the first
492 * byte of parent extent. If a extent is tree root, the key offset is set
493 * to the key objectid.
496 static int noinline
lookup_extent_backref(struct btrfs_trans_handle
*trans
,
497 struct btrfs_root
*root
,
498 struct btrfs_path
*path
,
499 u64 bytenr
, u64 parent
,
500 u64 ref_root
, u64 ref_generation
,
501 u64 owner_objectid
, int del
)
503 struct btrfs_key key
;
504 struct btrfs_extent_ref
*ref
;
505 struct extent_buffer
*leaf
;
509 key
.objectid
= bytenr
;
510 key
.type
= BTRFS_EXTENT_REF_KEY
;
513 ret
= btrfs_search_slot(trans
, root
, &key
, path
, del
? -1 : 0, 1);
521 leaf
= path
->nodes
[0];
522 ref
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_ref
);
523 ref_objectid
= btrfs_ref_objectid(leaf
, ref
);
524 if (btrfs_ref_root(leaf
, ref
) != ref_root
||
525 btrfs_ref_generation(leaf
, ref
) != ref_generation
||
526 (ref_objectid
!= owner_objectid
&&
527 ref_objectid
!= BTRFS_MULTIPLE_OBJECTIDS
)) {
537 static int noinline
insert_extent_backref(struct btrfs_trans_handle
*trans
,
538 struct btrfs_root
*root
,
539 struct btrfs_path
*path
,
540 u64 bytenr
, u64 parent
,
541 u64 ref_root
, u64 ref_generation
,
544 struct btrfs_key key
;
545 struct extent_buffer
*leaf
;
546 struct btrfs_extent_ref
*ref
;
550 key
.objectid
= bytenr
;
551 key
.type
= BTRFS_EXTENT_REF_KEY
;
554 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, sizeof(*ref
));
556 leaf
= path
->nodes
[0];
557 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
558 struct btrfs_extent_ref
);
559 btrfs_set_ref_root(leaf
, ref
, ref_root
);
560 btrfs_set_ref_generation(leaf
, ref
, ref_generation
);
561 btrfs_set_ref_objectid(leaf
, ref
, owner_objectid
);
562 btrfs_set_ref_num_refs(leaf
, ref
, 1);
563 } else if (ret
== -EEXIST
) {
565 BUG_ON(owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
);
566 leaf
= path
->nodes
[0];
567 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
568 struct btrfs_extent_ref
);
569 if (btrfs_ref_root(leaf
, ref
) != ref_root
||
570 btrfs_ref_generation(leaf
, ref
) != ref_generation
) {
576 num_refs
= btrfs_ref_num_refs(leaf
, ref
);
577 BUG_ON(num_refs
== 0);
578 btrfs_set_ref_num_refs(leaf
, ref
, num_refs
+ 1);
580 existing_owner
= btrfs_ref_objectid(leaf
, ref
);
581 if (existing_owner
!= owner_objectid
&&
582 existing_owner
!= BTRFS_MULTIPLE_OBJECTIDS
) {
583 btrfs_set_ref_objectid(leaf
, ref
,
584 BTRFS_MULTIPLE_OBJECTIDS
);
590 btrfs_mark_buffer_dirty(path
->nodes
[0]);
592 btrfs_release_path(root
, path
);
596 static int noinline
remove_extent_backref(struct btrfs_trans_handle
*trans
,
597 struct btrfs_root
*root
,
598 struct btrfs_path
*path
)
600 struct extent_buffer
*leaf
;
601 struct btrfs_extent_ref
*ref
;
605 leaf
= path
->nodes
[0];
606 ref
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_ref
);
607 num_refs
= btrfs_ref_num_refs(leaf
, ref
);
608 BUG_ON(num_refs
== 0);
611 ret
= btrfs_del_item(trans
, root
, path
);
613 btrfs_set_ref_num_refs(leaf
, ref
, num_refs
);
614 btrfs_mark_buffer_dirty(leaf
);
616 btrfs_release_path(root
, path
);
620 static int __btrfs_update_extent_ref(struct btrfs_trans_handle
*trans
,
621 struct btrfs_root
*root
, u64 bytenr
,
622 u64 orig_parent
, u64 parent
,
623 u64 orig_root
, u64 ref_root
,
624 u64 orig_generation
, u64 ref_generation
,
628 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
629 struct btrfs_path
*path
;
631 if (root
== root
->fs_info
->extent_root
) {
632 struct pending_extent_op
*extent_op
;
635 BUG_ON(owner_objectid
>= BTRFS_MAX_LEVEL
);
636 num_bytes
= btrfs_level_size(root
, (int)owner_objectid
);
637 if (test_range_bit(&root
->fs_info
->extent_ins
, bytenr
,
638 bytenr
+ num_bytes
- 1, EXTENT_LOCKED
, 0)) {
640 ret
= get_state_private(&root
->fs_info
->extent_ins
,
643 extent_op
= (struct pending_extent_op
*)
645 BUG_ON(extent_op
->parent
!= orig_parent
);
646 BUG_ON(extent_op
->generation
!= orig_generation
);
647 extent_op
->parent
= parent
;
648 extent_op
->generation
= ref_generation
;
650 extent_op
= kmalloc(sizeof(*extent_op
), GFP_NOFS
);
653 extent_op
->type
= PENDING_BACKREF_UPDATE
;
654 extent_op
->bytenr
= bytenr
;
655 extent_op
->num_bytes
= num_bytes
;
656 extent_op
->parent
= parent
;
657 extent_op
->orig_parent
= orig_parent
;
658 extent_op
->generation
= ref_generation
;
659 extent_op
->orig_generation
= orig_generation
;
660 extent_op
->level
= (int)owner_objectid
;
662 set_extent_bits(&root
->fs_info
->extent_ins
,
663 bytenr
, bytenr
+ num_bytes
- 1,
664 EXTENT_LOCKED
, GFP_NOFS
);
665 set_state_private(&root
->fs_info
->extent_ins
,
666 bytenr
, (unsigned long)extent_op
);
671 path
= btrfs_alloc_path();
674 ret
= lookup_extent_backref(trans
, extent_root
, path
,
675 bytenr
, orig_parent
, orig_root
,
676 orig_generation
, owner_objectid
, 1);
679 ret
= remove_extent_backref(trans
, extent_root
, path
);
682 ret
= insert_extent_backref(trans
, extent_root
, path
, bytenr
,
683 parent
, ref_root
, ref_generation
,
686 finish_current_insert(trans
, extent_root
);
687 del_pending_extents(trans
, extent_root
);
689 btrfs_free_path(path
);
693 int btrfs_update_extent_ref(struct btrfs_trans_handle
*trans
,
694 struct btrfs_root
*root
, u64 bytenr
,
695 u64 orig_parent
, u64 parent
,
696 u64 ref_root
, u64 ref_generation
,
700 if (ref_root
== BTRFS_TREE_LOG_OBJECTID
&&
701 owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
)
703 maybe_lock_mutex(root
);
704 ret
= __btrfs_update_extent_ref(trans
, root
, bytenr
, orig_parent
,
705 parent
, ref_root
, ref_root
,
706 ref_generation
, ref_generation
,
708 maybe_unlock_mutex(root
);
712 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle
*trans
,
713 struct btrfs_root
*root
, u64 bytenr
,
714 u64 orig_parent
, u64 parent
,
715 u64 orig_root
, u64 ref_root
,
716 u64 orig_generation
, u64 ref_generation
,
719 struct btrfs_path
*path
;
721 struct btrfs_key key
;
722 struct extent_buffer
*l
;
723 struct btrfs_extent_item
*item
;
726 path
= btrfs_alloc_path();
731 key
.objectid
= bytenr
;
732 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
733 key
.offset
= (u64
)-1;
735 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
, &key
, path
,
739 BUG_ON(ret
== 0 || path
->slots
[0] == 0);
744 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
745 BUG_ON(key
.objectid
!= bytenr
);
746 BUG_ON(key
.type
!= BTRFS_EXTENT_ITEM_KEY
);
748 item
= btrfs_item_ptr(l
, path
->slots
[0], struct btrfs_extent_item
);
749 refs
= btrfs_extent_refs(l
, item
);
750 btrfs_set_extent_refs(l
, item
, refs
+ 1);
751 btrfs_mark_buffer_dirty(path
->nodes
[0]);
753 btrfs_release_path(root
->fs_info
->extent_root
, path
);
756 ret
= insert_extent_backref(trans
, root
->fs_info
->extent_root
,
757 path
, bytenr
, parent
,
758 ref_root
, ref_generation
,
761 finish_current_insert(trans
, root
->fs_info
->extent_root
);
762 del_pending_extents(trans
, root
->fs_info
->extent_root
);
764 btrfs_free_path(path
);
768 int btrfs_inc_extent_ref(struct btrfs_trans_handle
*trans
,
769 struct btrfs_root
*root
,
770 u64 bytenr
, u64 num_bytes
, u64 parent
,
771 u64 ref_root
, u64 ref_generation
,
775 if (ref_root
== BTRFS_TREE_LOG_OBJECTID
&&
776 owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
)
778 maybe_lock_mutex(root
);
779 ret
= __btrfs_inc_extent_ref(trans
, root
, bytenr
, 0, parent
,
780 0, ref_root
, 0, ref_generation
,
782 maybe_unlock_mutex(root
);
786 int btrfs_extent_post_op(struct btrfs_trans_handle
*trans
,
787 struct btrfs_root
*root
)
789 finish_current_insert(trans
, root
->fs_info
->extent_root
);
790 del_pending_extents(trans
, root
->fs_info
->extent_root
);
794 int lookup_extent_ref(struct btrfs_trans_handle
*trans
,
795 struct btrfs_root
*root
, u64 bytenr
,
796 u64 num_bytes
, u32
*refs
)
798 struct btrfs_path
*path
;
800 struct btrfs_key key
;
801 struct extent_buffer
*l
;
802 struct btrfs_extent_item
*item
;
804 WARN_ON(num_bytes
< root
->sectorsize
);
805 path
= btrfs_alloc_path();
807 key
.objectid
= bytenr
;
808 key
.offset
= num_bytes
;
809 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
810 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
, &key
, path
,
815 btrfs_print_leaf(root
, path
->nodes
[0]);
816 printk("failed to find block number %Lu\n", bytenr
);
820 item
= btrfs_item_ptr(l
, path
->slots
[0], struct btrfs_extent_item
);
821 *refs
= btrfs_extent_refs(l
, item
);
823 btrfs_free_path(path
);
827 int btrfs_inc_ref(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
828 struct extent_buffer
*orig_buf
, struct extent_buffer
*buf
,
837 u32 nr_file_extents
= 0;
838 struct btrfs_key key
;
839 struct btrfs_file_extent_item
*fi
;
844 int (*process_func
)(struct btrfs_trans_handle
*, struct btrfs_root
*,
845 u64
, u64
, u64
, u64
, u64
, u64
, u64
, u64
);
847 ref_root
= btrfs_header_owner(buf
);
848 ref_generation
= btrfs_header_generation(buf
);
849 orig_root
= btrfs_header_owner(orig_buf
);
850 orig_generation
= btrfs_header_generation(orig_buf
);
852 nritems
= btrfs_header_nritems(buf
);
853 level
= btrfs_header_level(buf
);
855 if (root
->ref_cows
) {
856 process_func
= __btrfs_inc_extent_ref
;
859 root
->root_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
)
861 process_func
= __btrfs_update_extent_ref
;
864 for (i
= 0; i
< nritems
; i
++) {
867 btrfs_item_key_to_cpu(buf
, &key
, i
);
868 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
870 fi
= btrfs_item_ptr(buf
, i
,
871 struct btrfs_file_extent_item
);
872 if (btrfs_file_extent_type(buf
, fi
) ==
873 BTRFS_FILE_EXTENT_INLINE
)
875 bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
881 maybe_lock_mutex(root
);
882 ret
= process_func(trans
, root
, bytenr
,
883 orig_buf
->start
, buf
->start
,
885 orig_generation
, ref_generation
,
887 maybe_unlock_mutex(root
);
895 bytenr
= btrfs_node_blockptr(buf
, i
);
896 maybe_lock_mutex(root
);
897 ret
= process_func(trans
, root
, bytenr
,
898 orig_buf
->start
, buf
->start
,
900 orig_generation
, ref_generation
,
902 maybe_unlock_mutex(root
);
913 *nr_extents
= nr_file_extents
;
915 *nr_extents
= nritems
;
921 for (i
=0; i
< faili
; i
++) {
924 btrfs_item_key_to_cpu(buf
, &key
, i
);
925 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
927 fi
= btrfs_item_ptr(buf
, i
,
928 struct btrfs_file_extent_item
);
929 if (btrfs_file_extent_type(buf
, fi
) ==
930 BTRFS_FILE_EXTENT_INLINE
)
932 disk_bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
933 if (disk_bytenr
== 0)
935 err
= btrfs_free_extent(trans
, root
, disk_bytenr
,
936 btrfs_file_extent_disk_num_bytes(buf
,
940 bytenr
= btrfs_node_blockptr(buf
, i
);
941 err
= btrfs_free_extent(trans
, root
, bytenr
,
942 btrfs_level_size(root
, level
- 1), 0);
950 int btrfs_update_ref(struct btrfs_trans_handle
*trans
,
951 struct btrfs_root
*root
, struct extent_buffer
*orig_buf
,
952 struct extent_buffer
*buf
, int start_slot
, int nr
)
960 struct btrfs_key key
;
961 struct btrfs_file_extent_item
*fi
;
967 BUG_ON(start_slot
< 0);
968 BUG_ON(start_slot
+ nr
> btrfs_header_nritems(buf
));
970 ref_root
= btrfs_header_owner(buf
);
971 ref_generation
= btrfs_header_generation(buf
);
972 orig_root
= btrfs_header_owner(orig_buf
);
973 orig_generation
= btrfs_header_generation(orig_buf
);
974 level
= btrfs_header_level(buf
);
976 if (!root
->ref_cows
) {
978 root
->root_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
)
982 for (i
= 0, slot
= start_slot
; i
< nr
; i
++, slot
++) {
985 btrfs_item_key_to_cpu(buf
, &key
, slot
);
986 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
988 fi
= btrfs_item_ptr(buf
, slot
,
989 struct btrfs_file_extent_item
);
990 if (btrfs_file_extent_type(buf
, fi
) ==
991 BTRFS_FILE_EXTENT_INLINE
)
993 bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
997 maybe_lock_mutex(root
);
998 ret
= __btrfs_update_extent_ref(trans
, root
, bytenr
,
999 orig_buf
->start
, buf
->start
,
1000 orig_root
, ref_root
,
1001 orig_generation
, ref_generation
,
1003 maybe_unlock_mutex(root
);
1007 bytenr
= btrfs_node_blockptr(buf
, slot
);
1008 maybe_lock_mutex(root
);
1009 ret
= __btrfs_update_extent_ref(trans
, root
, bytenr
,
1010 orig_buf
->start
, buf
->start
,
1011 orig_root
, ref_root
,
1012 orig_generation
, ref_generation
,
1014 maybe_unlock_mutex(root
);
1025 static int write_one_cache_group(struct btrfs_trans_handle
*trans
,
1026 struct btrfs_root
*root
,
1027 struct btrfs_path
*path
,
1028 struct btrfs_block_group_cache
*cache
)
1032 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
1034 struct extent_buffer
*leaf
;
1036 ret
= btrfs_search_slot(trans
, extent_root
, &cache
->key
, path
, 0, 1);
1041 leaf
= path
->nodes
[0];
1042 bi
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1043 write_extent_buffer(leaf
, &cache
->item
, bi
, sizeof(cache
->item
));
1044 btrfs_mark_buffer_dirty(leaf
);
1045 btrfs_release_path(extent_root
, path
);
1047 finish_current_insert(trans
, extent_root
);
1048 pending_ret
= del_pending_extents(trans
, extent_root
);
1057 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle
*trans
,
1058 struct btrfs_root
*root
)
1060 struct extent_io_tree
*block_group_cache
;
1061 struct btrfs_block_group_cache
*cache
;
1065 struct btrfs_path
*path
;
1071 block_group_cache
= &root
->fs_info
->block_group_cache
;
1072 path
= btrfs_alloc_path();
1077 ret
= find_first_extent_bit(block_group_cache
, last
,
1078 &start
, &end
, BLOCK_GROUP_DIRTY
);
1083 ret
= get_state_private(block_group_cache
, start
, &ptr
);
1086 cache
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
1087 err
= write_one_cache_group(trans
, root
,
1090 * if we fail to write the cache group, we want
1091 * to keep it marked dirty in hopes that a later
1098 clear_extent_bits(block_group_cache
, start
, end
,
1099 BLOCK_GROUP_DIRTY
, GFP_NOFS
);
1101 btrfs_free_path(path
);
1105 static struct btrfs_space_info
*__find_space_info(struct btrfs_fs_info
*info
,
1108 struct list_head
*head
= &info
->space_info
;
1109 struct list_head
*cur
;
1110 struct btrfs_space_info
*found
;
1111 list_for_each(cur
, head
) {
1112 found
= list_entry(cur
, struct btrfs_space_info
, list
);
1113 if (found
->flags
== flags
)
1120 static int update_space_info(struct btrfs_fs_info
*info
, u64 flags
,
1121 u64 total_bytes
, u64 bytes_used
,
1122 struct btrfs_space_info
**space_info
)
1124 struct btrfs_space_info
*found
;
1126 found
= __find_space_info(info
, flags
);
1128 found
->total_bytes
+= total_bytes
;
1129 found
->bytes_used
+= bytes_used
;
1130 WARN_ON(found
->total_bytes
< found
->bytes_used
);
1131 *space_info
= found
;
1134 found
= kmalloc(sizeof(*found
), GFP_NOFS
);
1138 list_add(&found
->list
, &info
->space_info
);
1139 found
->flags
= flags
;
1140 found
->total_bytes
= total_bytes
;
1141 found
->bytes_used
= bytes_used
;
1142 found
->bytes_pinned
= 0;
1144 *space_info
= found
;
1149 static void set_avail_alloc_bits(struct btrfs_fs_info
*fs_info
, u64 flags
)
1151 u64 extra_flags
= flags
& (BTRFS_BLOCK_GROUP_RAID0
|
1152 BTRFS_BLOCK_GROUP_RAID1
|
1153 BTRFS_BLOCK_GROUP_DUP
);
1155 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
1156 fs_info
->avail_data_alloc_bits
|= extra_flags
;
1157 if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
1158 fs_info
->avail_metadata_alloc_bits
|= extra_flags
;
1159 if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
1160 fs_info
->avail_system_alloc_bits
|= extra_flags
;
1164 static int do_chunk_alloc(struct btrfs_trans_handle
*trans
,
1165 struct btrfs_root
*extent_root
, u64 alloc_bytes
,
1168 struct btrfs_space_info
*space_info
;
1174 space_info
= __find_space_info(extent_root
->fs_info
, flags
);
1176 ret
= update_space_info(extent_root
->fs_info
, flags
,
1180 BUG_ON(!space_info
);
1182 if (space_info
->full
)
1185 thresh
= div_factor(space_info
->total_bytes
, 7);
1186 if ((space_info
->bytes_used
+ space_info
->bytes_pinned
+ alloc_bytes
) <
1190 ret
= btrfs_alloc_chunk(trans
, extent_root
, &start
, &num_bytes
, flags
);
1191 if (ret
== -ENOSPC
) {
1192 space_info
->full
= 1;
1198 ret
= btrfs_make_block_group(trans
, extent_root
, 0, flags
,
1199 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, start
, num_bytes
);
1204 static int update_block_group(struct btrfs_trans_handle
*trans
,
1205 struct btrfs_root
*root
,
1206 u64 bytenr
, u64 num_bytes
, int alloc
,
1209 struct btrfs_block_group_cache
*cache
;
1210 struct btrfs_fs_info
*info
= root
->fs_info
;
1211 u64 total
= num_bytes
;
1218 cache
= btrfs_lookup_block_group(info
, bytenr
);
1222 byte_in_group
= bytenr
- cache
->key
.objectid
;
1223 WARN_ON(byte_in_group
> cache
->key
.offset
);
1224 start
= cache
->key
.objectid
;
1225 end
= start
+ cache
->key
.offset
- 1;
1226 set_extent_bits(&info
->block_group_cache
, start
, end
,
1227 BLOCK_GROUP_DIRTY
, GFP_NOFS
);
1229 old_val
= btrfs_block_group_used(&cache
->item
);
1230 num_bytes
= min(total
, cache
->key
.offset
- byte_in_group
);
1232 old_val
+= num_bytes
;
1233 cache
->space_info
->bytes_used
+= num_bytes
;
1235 old_val
-= num_bytes
;
1236 cache
->space_info
->bytes_used
-= num_bytes
;
1238 set_extent_dirty(&info
->free_space_cache
,
1239 bytenr
, bytenr
+ num_bytes
- 1,
1243 btrfs_set_block_group_used(&cache
->item
, old_val
);
1245 bytenr
+= num_bytes
;
1250 static int update_pinned_extents(struct btrfs_root
*root
,
1251 u64 bytenr
, u64 num
, int pin
)
1254 struct btrfs_block_group_cache
*cache
;
1255 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1258 set_extent_dirty(&fs_info
->pinned_extents
,
1259 bytenr
, bytenr
+ num
- 1, GFP_NOFS
);
1261 clear_extent_dirty(&fs_info
->pinned_extents
,
1262 bytenr
, bytenr
+ num
- 1, GFP_NOFS
);
1265 cache
= btrfs_lookup_block_group(fs_info
, bytenr
);
1267 len
= min(num
, cache
->key
.offset
-
1268 (bytenr
- cache
->key
.objectid
));
1270 cache
->pinned
+= len
;
1271 cache
->space_info
->bytes_pinned
+= len
;
1272 fs_info
->total_pinned
+= len
;
1274 cache
->pinned
-= len
;
1275 cache
->space_info
->bytes_pinned
-= len
;
1276 fs_info
->total_pinned
-= len
;
1284 int btrfs_copy_pinned(struct btrfs_root
*root
, struct extent_io_tree
*copy
)
1289 struct extent_io_tree
*pinned_extents
= &root
->fs_info
->pinned_extents
;
1293 ret
= find_first_extent_bit(pinned_extents
, last
,
1294 &start
, &end
, EXTENT_DIRTY
);
1297 set_extent_dirty(copy
, start
, end
, GFP_NOFS
);
1303 int btrfs_finish_extent_commit(struct btrfs_trans_handle
*trans
,
1304 struct btrfs_root
*root
,
1305 struct extent_io_tree
*unpin
)
1310 struct extent_io_tree
*free_space_cache
;
1311 free_space_cache
= &root
->fs_info
->free_space_cache
;
1314 ret
= find_first_extent_bit(unpin
, 0, &start
, &end
,
1318 update_pinned_extents(root
, start
, end
+ 1 - start
, 0);
1319 clear_extent_dirty(unpin
, start
, end
, GFP_NOFS
);
1320 set_extent_dirty(free_space_cache
, start
, end
, GFP_NOFS
);
1325 static int finish_current_insert(struct btrfs_trans_handle
*trans
,
1326 struct btrfs_root
*extent_root
)
1331 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
1332 struct btrfs_path
*path
;
1333 struct btrfs_extent_ref
*ref
;
1334 struct pending_extent_op
*extent_op
;
1335 struct btrfs_key key
;
1336 struct btrfs_extent_item extent_item
;
1340 btrfs_set_stack_extent_refs(&extent_item
, 1);
1341 path
= btrfs_alloc_path();
1344 ret
= find_first_extent_bit(&info
->extent_ins
, 0, &start
,
1345 &end
, EXTENT_LOCKED
);
1349 ret
= get_state_private(&info
->extent_ins
, start
, &priv
);
1351 extent_op
= (struct pending_extent_op
*)(unsigned long)priv
;
1353 if (extent_op
->type
== PENDING_EXTENT_INSERT
) {
1354 key
.objectid
= start
;
1355 key
.offset
= end
+ 1 - start
;
1356 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1357 err
= btrfs_insert_item(trans
, extent_root
, &key
,
1358 &extent_item
, sizeof(extent_item
));
1361 clear_extent_bits(&info
->extent_ins
, start
, end
,
1362 EXTENT_LOCKED
, GFP_NOFS
);
1364 err
= insert_extent_backref(trans
, extent_root
, path
,
1365 start
, extent_op
->parent
,
1366 extent_root
->root_key
.objectid
,
1367 extent_op
->generation
,
1370 } else if (extent_op
->type
== PENDING_BACKREF_UPDATE
) {
1371 err
= lookup_extent_backref(trans
, extent_root
, path
,
1372 start
, extent_op
->orig_parent
,
1373 extent_root
->root_key
.objectid
,
1374 extent_op
->orig_generation
,
1375 extent_op
->level
, 0);
1378 clear_extent_bits(&info
->extent_ins
, start
, end
,
1379 EXTENT_LOCKED
, GFP_NOFS
);
1381 key
.objectid
= start
;
1382 key
.offset
= extent_op
->parent
;
1383 key
.type
= BTRFS_EXTENT_REF_KEY
;
1384 err
= btrfs_set_item_key_safe(trans
, extent_root
, path
,
1387 ref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1388 struct btrfs_extent_ref
);
1389 btrfs_set_ref_generation(path
->nodes
[0], ref
,
1390 extent_op
->generation
);
1391 btrfs_mark_buffer_dirty(path
->nodes
[0]);
1392 btrfs_release_path(extent_root
, path
);
1398 btrfs_free_path(path
);
1402 static int pin_down_bytes(struct btrfs_trans_handle
*trans
,
1403 struct btrfs_root
*root
,
1404 u64 bytenr
, u64 num_bytes
, int is_data
)
1407 struct extent_buffer
*buf
;
1412 buf
= btrfs_find_tree_block(root
, bytenr
, num_bytes
);
1416 /* we can reuse a block if it hasn't been written
1417 * and it is from this transaction. We can't
1418 * reuse anything from the tree log root because
1419 * it has tiny sub-transactions.
1421 if (btrfs_buffer_uptodate(buf
, 0)) {
1422 u64 header_owner
= btrfs_header_owner(buf
);
1423 u64 header_transid
= btrfs_header_generation(buf
);
1424 if (header_owner
!= BTRFS_TREE_LOG_OBJECTID
&&
1425 header_owner
!= BTRFS_TREE_RELOC_OBJECTID
&&
1426 header_transid
== trans
->transid
&&
1427 !btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
)) {
1428 clean_tree_block(NULL
, root
, buf
);
1429 free_extent_buffer(buf
);
1433 free_extent_buffer(buf
);
1435 update_pinned_extents(root
, bytenr
, num_bytes
, 1);
1442 * remove an extent from the root, returns 0 on success
1444 static int __free_extent(struct btrfs_trans_handle
*trans
, struct btrfs_root
1445 *root
, u64 bytenr
, u64 num_bytes
, u64 parent
,
1446 u64 root_objectid
, u64 ref_generation
,
1447 u64 owner_objectid
, int pin
, int mark_free
)
1449 struct btrfs_path
*path
;
1450 struct btrfs_key key
;
1451 struct btrfs_fs_info
*info
= root
->fs_info
;
1452 struct btrfs_extent_ops
*ops
= info
->extent_ops
;
1453 struct btrfs_root
*extent_root
= info
->extent_root
;
1454 struct extent_buffer
*leaf
;
1456 int extent_slot
= 0;
1457 int found_extent
= 0;
1459 struct btrfs_extent_item
*ei
;
1462 key
.objectid
= bytenr
;
1463 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
1464 key
.offset
= num_bytes
;
1466 path
= btrfs_alloc_path();
1470 ret
= lookup_extent_backref(trans
, extent_root
, path
,
1471 bytenr
, parent
, root_objectid
,
1472 ref_generation
, owner_objectid
, 1);
1474 struct btrfs_key found_key
;
1475 extent_slot
= path
->slots
[0];
1476 while(extent_slot
> 0) {
1478 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1480 if (found_key
.objectid
!= bytenr
)
1482 if (found_key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
1483 found_key
.offset
== num_bytes
) {
1487 if (path
->slots
[0] - extent_slot
> 5)
1490 if (!found_extent
) {
1491 ret
= remove_extent_backref(trans
, extent_root
, path
);
1493 btrfs_release_path(extent_root
, path
);
1494 ret
= btrfs_search_slot(trans
, extent_root
,
1497 extent_slot
= path
->slots
[0];
1500 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
1501 printk("Unable to find ref byte nr %llu root %llu "
1502 " gen %llu owner %llu\n",
1503 (unsigned long long)bytenr
,
1504 (unsigned long long)root_objectid
,
1505 (unsigned long long)ref_generation
,
1506 (unsigned long long)owner_objectid
);
1510 leaf
= path
->nodes
[0];
1511 ei
= btrfs_item_ptr(leaf
, extent_slot
,
1512 struct btrfs_extent_item
);
1513 refs
= btrfs_extent_refs(leaf
, ei
);
1516 btrfs_set_extent_refs(leaf
, ei
, refs
);
1518 btrfs_mark_buffer_dirty(leaf
);
1520 if (refs
== 0 && found_extent
&& path
->slots
[0] == extent_slot
+ 1) {
1521 struct btrfs_extent_ref
*ref
;
1522 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
1523 struct btrfs_extent_ref
);
1524 BUG_ON(btrfs_ref_num_refs(leaf
, ref
) != 1);
1525 /* if the back ref and the extent are next to each other
1526 * they get deleted below in one shot
1528 path
->slots
[0] = extent_slot
;
1530 } else if (found_extent
) {
1531 /* otherwise delete the extent back ref */
1532 ret
= remove_extent_backref(trans
, extent_root
, path
);
1534 /* if refs are 0, we need to setup the path for deletion */
1536 btrfs_release_path(extent_root
, path
);
1537 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
,
1550 ret
= pin_down_bytes(trans
, root
, bytenr
, num_bytes
, 0);
1556 /* block accounting for super block */
1557 super_used
= btrfs_super_bytes_used(&info
->super_copy
);
1558 btrfs_set_super_bytes_used(&info
->super_copy
,
1559 super_used
- num_bytes
);
1561 /* block accounting for root item */
1562 root_used
= btrfs_root_used(&root
->root_item
);
1563 btrfs_set_root_used(&root
->root_item
,
1564 root_used
- num_bytes
);
1565 ret
= btrfs_del_items(trans
, extent_root
, path
, path
->slots
[0],
1570 if (ops
&& ops
->free_extent
)
1571 ops
->free_extent(root
, bytenr
, num_bytes
);
1573 ret
= update_block_group(trans
, root
, bytenr
, num_bytes
, 0,
1577 btrfs_free_path(path
);
1578 finish_current_insert(trans
, extent_root
);
1583 * find all the blocks marked as pending in the radix tree and remove
1584 * them from the extent map
1586 static int del_pending_extents(struct btrfs_trans_handle
*trans
, struct
1587 btrfs_root
*extent_root
)
1595 struct extent_io_tree
*pending_del
;
1596 struct extent_io_tree
*extent_ins
;
1597 struct pending_extent_op
*extent_op
;
1599 extent_ins
= &extent_root
->fs_info
->extent_ins
;
1600 pending_del
= &extent_root
->fs_info
->pending_del
;
1603 ret
= find_first_extent_bit(pending_del
, 0, &start
, &end
,
1608 ret
= get_state_private(pending_del
, start
, &priv
);
1610 extent_op
= (struct pending_extent_op
*)(unsigned long)priv
;
1612 clear_extent_bits(pending_del
, start
, end
, EXTENT_LOCKED
,
1615 ret
= pin_down_bytes(trans
, extent_root
, start
,
1616 end
+ 1 - start
, 0);
1617 mark_free
= ret
> 0;
1618 if (!test_range_bit(extent_ins
, start
, end
,
1619 EXTENT_LOCKED
, 0)) {
1621 ret
= __free_extent(trans
, extent_root
,
1622 start
, end
+ 1 - start
,
1623 extent_op
->orig_parent
,
1624 extent_root
->root_key
.objectid
,
1625 extent_op
->orig_generation
,
1626 extent_op
->level
, 0, mark_free
);
1630 ret
= get_state_private(extent_ins
, start
, &priv
);
1632 extent_op
= (struct pending_extent_op
*)
1633 (unsigned long)priv
;
1635 clear_extent_bits(extent_ins
, start
, end
,
1636 EXTENT_LOCKED
, GFP_NOFS
);
1638 if (extent_op
->type
== PENDING_BACKREF_UPDATE
)
1641 ret
= update_block_group(trans
, extent_root
, start
,
1642 end
+ 1 - start
, 0, mark_free
);
1653 * remove an extent from the root, returns 0 on success
1655 int btrfs_free_extent(struct btrfs_trans_handle
*trans
, struct btrfs_root
1656 *root
, u64 bytenr
, u64 num_bytes
, u64 parent
,
1657 u64 root_objectid
, u64 ref_generation
,
1658 u64 owner_objectid
, int pin
)
1660 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
1664 WARN_ON(num_bytes
< root
->sectorsize
);
1665 if (root
== extent_root
) {
1666 struct pending_extent_op
*extent_op
;
1668 extent_op
= kmalloc(sizeof(*extent_op
), GFP_NOFS
);
1671 extent_op
->type
= PENDING_EXTENT_DELETE
;
1672 extent_op
->bytenr
= bytenr
;
1673 extent_op
->num_bytes
= num_bytes
;
1674 extent_op
->parent
= parent
;
1675 extent_op
->orig_parent
= parent
;
1676 extent_op
->generation
= ref_generation
;
1677 extent_op
->orig_generation
= ref_generation
;
1678 extent_op
->level
= (int)owner_objectid
;
1680 set_extent_bits(&root
->fs_info
->pending_del
,
1681 bytenr
, bytenr
+ num_bytes
- 1,
1682 EXTENT_LOCKED
, GFP_NOFS
);
1683 set_state_private(&root
->fs_info
->pending_del
,
1684 bytenr
, (unsigned long)extent_op
);
1687 ret
= __free_extent(trans
, root
, bytenr
, num_bytes
, parent
,
1688 root_objectid
, ref_generation
,
1689 owner_objectid
, pin
, pin
== 0);
1690 pending_ret
= del_pending_extents(trans
, root
->fs_info
->extent_root
);
1691 return ret
? ret
: pending_ret
;
1694 static u64
stripe_align(struct btrfs_root
*root
, u64 val
)
1696 u64 mask
= ((u64
)root
->stripesize
- 1);
1697 u64 ret
= (val
+ mask
) & ~mask
;
1702 * walks the btree of allocated extents and find a hole of a given size.
1703 * The key ins is changed to record the hole:
1704 * ins->objectid == block start
1705 * ins->flags = BTRFS_EXTENT_ITEM_KEY
1706 * ins->offset == number of blocks
1707 * Any available blocks before search_start are skipped.
1709 static int noinline
find_free_extent(struct btrfs_trans_handle
*trans
,
1710 struct btrfs_root
*orig_root
,
1711 u64 num_bytes
, u64 empty_size
,
1712 u64 search_start
, u64 search_end
,
1713 u64 hint_byte
, struct btrfs_key
*ins
,
1714 u64 exclude_start
, u64 exclude_nr
,
1718 u64 orig_search_start
= search_start
;
1719 struct btrfs_root
* root
= orig_root
->fs_info
->extent_root
;
1720 struct btrfs_fs_info
*info
= root
->fs_info
;
1721 u64 total_needed
= num_bytes
;
1722 struct btrfs_block_group_cache
*block_group
;
1726 WARN_ON(num_bytes
< root
->sectorsize
);
1727 btrfs_set_key_type(ins
, BTRFS_EXTENT_ITEM_KEY
);
1730 block_group
= btrfs_lookup_first_block_group(info
, hint_byte
);
1732 hint_byte
= search_start
;
1733 block_group
= btrfs_find_block_group(root
, block_group
,
1734 hint_byte
, data
, 1);
1736 block_group
= btrfs_find_block_group(root
,
1738 search_start
, data
, 1);
1741 total_needed
+= empty_size
;
1745 block_group
= btrfs_lookup_first_block_group(info
,
1748 block_group
= btrfs_lookup_first_block_group(info
,
1751 ret
= find_search_start(root
, &block_group
, &search_start
,
1752 total_needed
, data
);
1756 search_start
= stripe_align(root
, search_start
);
1757 ins
->objectid
= search_start
;
1758 ins
->offset
= num_bytes
;
1760 if (ins
->objectid
+ num_bytes
>
1761 block_group
->key
.objectid
+ block_group
->key
.offset
) {
1762 search_start
= block_group
->key
.objectid
+
1763 block_group
->key
.offset
;
1767 if (test_range_bit(&info
->extent_ins
, ins
->objectid
,
1768 ins
->objectid
+ num_bytes
-1, EXTENT_LOCKED
, 0)) {
1769 search_start
= ins
->objectid
+ num_bytes
;
1773 if (test_range_bit(&info
->pinned_extents
, ins
->objectid
,
1774 ins
->objectid
+ num_bytes
-1, EXTENT_DIRTY
, 0)) {
1775 search_start
= ins
->objectid
+ num_bytes
;
1779 if (exclude_nr
> 0 && (ins
->objectid
+ num_bytes
> exclude_start
&&
1780 ins
->objectid
< exclude_start
+ exclude_nr
)) {
1781 search_start
= exclude_start
+ exclude_nr
;
1785 if (!(data
& BTRFS_BLOCK_GROUP_DATA
)) {
1786 block_group
= btrfs_lookup_block_group(info
, ins
->objectid
);
1788 trans
->block_group
= block_group
;
1790 ins
->offset
= num_bytes
;
1794 block_group
= btrfs_lookup_first_block_group(info
, search_start
);
1796 search_start
= orig_search_start
;
1803 total_needed
-= empty_size
;
1809 block_group
= btrfs_find_block_group(root
, block_group
,
1810 search_start
, data
, 0);
1817 * finds a free extent and does all the dirty work required for allocation
1818 * returns the key for the extent through ins, and a tree buffer for
1819 * the first block of the extent through buf.
1821 * returns 0 if everything worked, non-zero otherwise.
1823 int btrfs_alloc_extent(struct btrfs_trans_handle
*trans
,
1824 struct btrfs_root
*root
,
1825 u64 num_bytes
, u64 parent
,
1826 u64 root_objectid
, u64 ref_generation
,
1827 u64 owner
, u64 empty_size
, u64 hint_byte
,
1828 u64 search_end
, struct btrfs_key
*ins
, int data
)
1832 u64 super_used
, root_used
;
1833 u64 search_start
= 0;
1836 struct btrfs_fs_info
*info
= root
->fs_info
;
1837 struct btrfs_root
*extent_root
= info
->extent_root
;
1838 struct btrfs_path
*path
;
1839 struct btrfs_extent_item
*extent_item
;
1840 struct btrfs_extent_ref
*ref
;
1841 struct btrfs_key keys
[2];
1843 if (info
->extent_ops
) {
1844 struct btrfs_extent_ops
*ops
= info
->extent_ops
;
1845 ret
= ops
->alloc_extent(root
, num_bytes
, hint_byte
, ins
);
1851 alloc_profile
= info
->avail_data_alloc_bits
&
1852 info
->data_alloc_profile
;
1853 data
= BTRFS_BLOCK_GROUP_DATA
| alloc_profile
;
1854 } else if ((info
->system_allocs
> 0 || root
== info
->chunk_root
) &&
1855 info
->system_allocs
>= 0) {
1856 alloc_profile
= info
->avail_system_alloc_bits
&
1857 info
->system_alloc_profile
;
1858 data
= BTRFS_BLOCK_GROUP_SYSTEM
| alloc_profile
;
1860 alloc_profile
= info
->avail_metadata_alloc_bits
&
1861 info
->metadata_alloc_profile
;
1862 data
= BTRFS_BLOCK_GROUP_METADATA
| alloc_profile
;
1865 if (root
->ref_cows
) {
1866 if (!(data
& BTRFS_BLOCK_GROUP_METADATA
)) {
1867 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
1869 BTRFS_BLOCK_GROUP_METADATA
);
1872 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
1873 num_bytes
+ 2 * 1024 * 1024, data
);
1877 WARN_ON(num_bytes
< root
->sectorsize
);
1878 ret
= find_free_extent(trans
, root
, num_bytes
, empty_size
,
1879 search_start
, search_end
, hint_byte
, ins
,
1880 trans
->alloc_exclude_start
,
1881 trans
->alloc_exclude_nr
, data
);
1888 parent
= ins
->objectid
;
1890 /* block accounting for super block */
1891 super_used
= btrfs_super_bytes_used(&info
->super_copy
);
1892 btrfs_set_super_bytes_used(&info
->super_copy
, super_used
+ num_bytes
);
1894 /* block accounting for root item */
1895 root_used
= btrfs_root_used(&root
->root_item
);
1896 btrfs_set_root_used(&root
->root_item
, root_used
+ num_bytes
);
1898 clear_extent_dirty(&root
->fs_info
->free_space_cache
,
1899 ins
->objectid
, ins
->objectid
+ ins
->offset
- 1,
1902 if (root
== extent_root
) {
1903 struct pending_extent_op
*extent_op
;
1905 extent_op
= kmalloc(sizeof(*extent_op
), GFP_NOFS
);
1908 extent_op
->type
= PENDING_EXTENT_INSERT
;
1909 extent_op
->bytenr
= ins
->objectid
;
1910 extent_op
->num_bytes
= ins
->offset
;
1911 extent_op
->parent
= parent
;
1912 extent_op
->orig_parent
= 0;
1913 extent_op
->generation
= ref_generation
;
1914 extent_op
->orig_generation
= 0;
1915 extent_op
->level
= (int)owner
;
1917 set_extent_bits(&root
->fs_info
->extent_ins
, ins
->objectid
,
1918 ins
->objectid
+ ins
->offset
- 1,
1919 EXTENT_LOCKED
, GFP_NOFS
);
1920 set_state_private(&root
->fs_info
->extent_ins
,
1921 ins
->objectid
, (unsigned long)extent_op
);
1925 WARN_ON(trans
->alloc_exclude_nr
);
1926 trans
->alloc_exclude_start
= ins
->objectid
;
1927 trans
->alloc_exclude_nr
= ins
->offset
;
1929 memcpy(&keys
[0], ins
, sizeof(*ins
));
1930 keys
[1].objectid
= ins
->objectid
;
1931 keys
[1].type
= BTRFS_EXTENT_REF_KEY
;
1932 keys
[1].offset
= parent
;
1933 sizes
[0] = sizeof(*extent_item
);
1934 sizes
[1] = sizeof(*ref
);
1936 path
= btrfs_alloc_path();
1939 ret
= btrfs_insert_empty_items(trans
, extent_root
, path
, keys
,
1943 extent_item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1944 struct btrfs_extent_item
);
1945 btrfs_set_extent_refs(path
->nodes
[0], extent_item
, 1);
1946 ref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0] + 1,
1947 struct btrfs_extent_ref
);
1949 btrfs_set_ref_root(path
->nodes
[0], ref
, root_objectid
);
1950 btrfs_set_ref_generation(path
->nodes
[0], ref
, ref_generation
);
1951 btrfs_set_ref_objectid(path
->nodes
[0], ref
, owner
);
1952 btrfs_set_ref_num_refs(path
->nodes
[0], ref
, 1);
1954 btrfs_mark_buffer_dirty(path
->nodes
[0]);
1956 trans
->alloc_exclude_start
= 0;
1957 trans
->alloc_exclude_nr
= 0;
1958 btrfs_free_path(path
);
1959 finish_current_insert(trans
, extent_root
);
1960 pending_ret
= del_pending_extents(trans
, extent_root
);
1970 ret
= update_block_group(trans
, root
, ins
->objectid
, ins
->offset
, 1, 0);
1972 printk("update block group failed for %llu %llu\n",
1973 (unsigned long long)ins
->objectid
,
1974 (unsigned long long)ins
->offset
);
1981 * helper function to allocate a block for a given tree
1982 * returns the tree buffer or NULL.
1984 struct extent_buffer
*btrfs_alloc_free_block(struct btrfs_trans_handle
*trans
,
1985 struct btrfs_root
*root
,
1986 u32 blocksize
, u64 parent
,
1993 struct btrfs_key ins
;
1995 struct extent_buffer
*buf
;
1997 ret
= btrfs_alloc_extent(trans
, root
, blocksize
, parent
,
1998 root_objectid
, ref_generation
,
1999 level
, empty_size
, hint
,
2003 return ERR_PTR(ret
);
2005 buf
= btrfs_find_create_tree_block(root
, ins
.objectid
, blocksize
);
2008 parent
= ins
.objectid
;
2009 btrfs_free_extent(trans
, root
, ins
.objectid
, blocksize
,
2010 parent
, root
->root_key
.objectid
,
2011 ref_generation
, level
, 0);
2013 return ERR_PTR(-ENOMEM
);
2015 btrfs_set_buffer_uptodate(buf
);
2016 trans
->blocks_used
++;
2020 static int noinline
drop_leaf_ref(struct btrfs_trans_handle
*trans
,
2021 struct btrfs_root
*root
,
2022 struct extent_buffer
*leaf
)
2025 u64 leaf_generation
;
2026 struct btrfs_key key
;
2027 struct btrfs_file_extent_item
*fi
;
2032 BUG_ON(!btrfs_is_leaf(leaf
));
2033 nritems
= btrfs_header_nritems(leaf
);
2034 leaf_owner
= btrfs_header_owner(leaf
);
2035 leaf_generation
= btrfs_header_generation(leaf
);
2037 for (i
= 0; i
< nritems
; i
++) {
2040 btrfs_item_key_to_cpu(leaf
, &key
, i
);
2041 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
2043 fi
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
2044 if (btrfs_file_extent_type(leaf
, fi
) ==
2045 BTRFS_FILE_EXTENT_INLINE
)
2048 * FIXME make sure to insert a trans record that
2049 * repeats the snapshot del on crash
2051 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
2052 if (disk_bytenr
== 0)
2054 ret
= btrfs_free_extent(trans
, root
, disk_bytenr
,
2055 btrfs_file_extent_disk_num_bytes(leaf
, fi
),
2056 leaf
->start
, leaf_owner
, leaf_generation
,
2063 static void noinline
reada_walk_down(struct btrfs_root
*root
,
2064 struct extent_buffer
*node
,
2077 nritems
= btrfs_header_nritems(node
);
2078 level
= btrfs_header_level(node
);
2082 for (i
= slot
; i
< nritems
&& skipped
< 32; i
++) {
2083 bytenr
= btrfs_node_blockptr(node
, i
);
2084 if (last
&& ((bytenr
> last
&& bytenr
- last
> 32 * 1024) ||
2085 (last
> bytenr
&& last
- bytenr
> 32 * 1024))) {
2089 blocksize
= btrfs_level_size(root
, level
- 1);
2091 ret
= lookup_extent_ref(NULL
, root
, bytenr
,
2099 mutex_unlock(&root
->fs_info
->fs_mutex
);
2100 ret
= readahead_tree_block(root
, bytenr
, blocksize
,
2101 btrfs_node_ptr_generation(node
, i
));
2102 last
= bytenr
+ blocksize
;
2104 mutex_lock(&root
->fs_info
->fs_mutex
);
2111 * helper function for drop_snapshot, this walks down the tree dropping ref
2112 * counts as it goes.
2114 static int noinline
walk_down_tree(struct btrfs_trans_handle
*trans
,
2115 struct btrfs_root
*root
,
2116 struct btrfs_path
*path
, int *level
)
2122 struct extent_buffer
*next
;
2123 struct extent_buffer
*cur
;
2124 struct extent_buffer
*parent
;
2129 WARN_ON(*level
< 0);
2130 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2131 ret
= lookup_extent_ref(trans
, root
,
2132 path
->nodes
[*level
]->start
,
2133 path
->nodes
[*level
]->len
, &refs
);
2139 * walk down to the last node level and free all the leaves
2141 while(*level
>= 0) {
2142 WARN_ON(*level
< 0);
2143 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2144 cur
= path
->nodes
[*level
];
2146 if (btrfs_header_level(cur
) != *level
)
2149 if (path
->slots
[*level
] >=
2150 btrfs_header_nritems(cur
))
2153 ret
= drop_leaf_ref(trans
, root
, cur
);
2157 bytenr
= btrfs_node_blockptr(cur
, path
->slots
[*level
]);
2158 ptr_gen
= btrfs_node_ptr_generation(cur
, path
->slots
[*level
]);
2159 blocksize
= btrfs_level_size(root
, *level
- 1);
2160 ret
= lookup_extent_ref(trans
, root
, bytenr
, blocksize
, &refs
);
2163 parent
= path
->nodes
[*level
];
2164 root_owner
= btrfs_header_owner(parent
);
2165 root_gen
= btrfs_header_generation(parent
);
2166 path
->slots
[*level
]++;
2167 ret
= btrfs_free_extent(trans
, root
, bytenr
, blocksize
,
2168 parent
->start
, root_owner
,
2169 root_gen
, *level
- 1, 1);
2173 next
= btrfs_find_tree_block(root
, bytenr
, blocksize
);
2174 if (!next
|| !btrfs_buffer_uptodate(next
, ptr_gen
)) {
2175 free_extent_buffer(next
);
2176 reada_walk_down(root
, cur
, path
->slots
[*level
]);
2177 mutex_unlock(&root
->fs_info
->fs_mutex
);
2178 next
= read_tree_block(root
, bytenr
, blocksize
,
2180 mutex_lock(&root
->fs_info
->fs_mutex
);
2182 WARN_ON(*level
<= 0);
2183 if (path
->nodes
[*level
-1])
2184 free_extent_buffer(path
->nodes
[*level
-1]);
2185 path
->nodes
[*level
-1] = next
;
2186 *level
= btrfs_header_level(next
);
2187 path
->slots
[*level
] = 0;
2190 WARN_ON(*level
< 0);
2191 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2193 if (path
->nodes
[*level
] == root
->node
) {
2194 root_owner
= root
->root_key
.objectid
;
2195 parent
= path
->nodes
[*level
];
2197 parent
= path
->nodes
[*level
+ 1];
2198 root_owner
= btrfs_header_owner(parent
);
2201 root_gen
= btrfs_header_generation(parent
);
2202 ret
= btrfs_free_extent(trans
, root
, path
->nodes
[*level
]->start
,
2203 path
->nodes
[*level
]->len
, parent
->start
,
2204 root_owner
, root_gen
, *level
, 1);
2205 free_extent_buffer(path
->nodes
[*level
]);
2206 path
->nodes
[*level
] = NULL
;
2213 * helper for dropping snapshots. This walks back up the tree in the path
2214 * to find the first node higher up where we haven't yet gone through
2217 static int noinline
walk_up_tree(struct btrfs_trans_handle
*trans
,
2218 struct btrfs_root
*root
,
2219 struct btrfs_path
*path
, int *level
)
2223 struct btrfs_root_item
*root_item
= &root
->root_item
;
2228 for(i
= *level
; i
< BTRFS_MAX_LEVEL
- 1 && path
->nodes
[i
]; i
++) {
2229 slot
= path
->slots
[i
];
2230 if (slot
< btrfs_header_nritems(path
->nodes
[i
]) - 1) {
2231 struct extent_buffer
*node
;
2232 struct btrfs_disk_key disk_key
;
2233 node
= path
->nodes
[i
];
2236 WARN_ON(*level
== 0);
2237 btrfs_node_key(node
, &disk_key
, path
->slots
[i
]);
2238 memcpy(&root_item
->drop_progress
,
2239 &disk_key
, sizeof(disk_key
));
2240 root_item
->drop_level
= i
;
2243 struct extent_buffer
*parent
;
2244 if (path
->nodes
[*level
] == root
->node
)
2245 parent
= path
->nodes
[*level
];
2247 parent
= path
->nodes
[*level
+ 1];
2249 root_owner
= btrfs_header_owner(parent
);
2250 root_gen
= btrfs_header_generation(parent
);
2251 ret
= btrfs_free_extent(trans
, root
,
2252 path
->nodes
[*level
]->start
,
2253 path
->nodes
[*level
]->len
,
2254 parent
->start
, root_owner
,
2255 root_gen
, *level
, 1);
2257 free_extent_buffer(path
->nodes
[*level
]);
2258 path
->nodes
[*level
] = NULL
;
2266 * drop the reference count on the tree rooted at 'snap'. This traverses
2267 * the tree freeing any blocks that have a ref count of zero after being
2270 int btrfs_drop_snapshot(struct btrfs_trans_handle
*trans
, struct btrfs_root
2276 struct btrfs_path
*path
;
2279 struct btrfs_root_item
*root_item
= &root
->root_item
;
2281 path
= btrfs_alloc_path();
2284 level
= btrfs_header_level(root
->node
);
2286 if (btrfs_disk_key_objectid(&root_item
->drop_progress
) == 0) {
2287 path
->nodes
[level
] = root
->node
;
2288 extent_buffer_get(root
->node
);
2289 path
->slots
[level
] = 0;
2291 struct btrfs_key key
;
2292 struct btrfs_disk_key found_key
;
2293 struct extent_buffer
*node
;
2295 btrfs_disk_key_to_cpu(&key
, &root_item
->drop_progress
);
2296 level
= root_item
->drop_level
;
2297 path
->lowest_level
= level
;
2298 wret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2303 node
= path
->nodes
[level
];
2304 btrfs_node_key(node
, &found_key
, path
->slots
[level
]);
2305 WARN_ON(memcmp(&found_key
, &root_item
->drop_progress
,
2306 sizeof(found_key
)));
2309 wret
= walk_down_tree(trans
, root
, path
, &level
);
2315 wret
= walk_up_tree(trans
, root
, path
, &level
);
2325 for (i
= 0; i
<= orig_level
; i
++) {
2326 if (path
->nodes
[i
]) {
2327 free_extent_buffer(path
->nodes
[i
]);
2328 path
->nodes
[i
] = NULL
;
2332 btrfs_free_path(path
);
2336 int btrfs_free_block_groups(struct btrfs_fs_info
*info
)
2343 ret
= find_first_extent_bit(&info
->block_group_cache
, 0,
2344 &start
, &end
, (unsigned int)-1);
2347 ret
= get_state_private(&info
->block_group_cache
, start
, &ptr
);
2349 kfree((void *)(unsigned long)ptr
);
2350 clear_extent_bits(&info
->block_group_cache
, start
,
2351 end
, (unsigned int)-1, GFP_NOFS
);
2354 ret
= find_first_extent_bit(&info
->free_space_cache
, 0,
2355 &start
, &end
, EXTENT_DIRTY
);
2358 clear_extent_dirty(&info
->free_space_cache
, start
,
2364 int find_first_block_group(struct btrfs_root
*root
, struct btrfs_path
*path
,
2365 struct btrfs_key
*key
)
2368 struct btrfs_key found_key
;
2369 struct extent_buffer
*leaf
;
2372 ret
= btrfs_search_slot(NULL
, root
, key
, path
, 0, 0);
2376 slot
= path
->slots
[0];
2377 leaf
= path
->nodes
[0];
2378 if (slot
>= btrfs_header_nritems(leaf
)) {
2379 ret
= btrfs_next_leaf(root
, path
);
2386 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
2388 if (found_key
.objectid
>= key
->objectid
&&
2389 found_key
.type
== BTRFS_BLOCK_GROUP_ITEM_KEY
)
2398 int btrfs_read_block_groups(struct btrfs_root
*root
)
2400 struct btrfs_path
*path
;
2403 struct btrfs_block_group_cache
*cache
;
2404 struct btrfs_fs_info
*info
= root
->fs_info
;
2405 struct btrfs_space_info
*space_info
;
2406 struct extent_io_tree
*block_group_cache
;
2407 struct btrfs_key key
;
2408 struct btrfs_key found_key
;
2409 struct extent_buffer
*leaf
;
2411 block_group_cache
= &info
->block_group_cache
;
2413 root
= info
->extent_root
;
2416 btrfs_set_key_type(&key
, BTRFS_BLOCK_GROUP_ITEM_KEY
);
2417 path
= btrfs_alloc_path();
2422 ret
= find_first_block_group(root
, path
, &key
);
2430 leaf
= path
->nodes
[0];
2431 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
2432 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
2438 read_extent_buffer(leaf
, &cache
->item
,
2439 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
2440 sizeof(cache
->item
));
2441 memcpy(&cache
->key
, &found_key
, sizeof(found_key
));
2444 key
.objectid
= found_key
.objectid
+ found_key
.offset
;
2445 btrfs_release_path(root
, path
);
2446 cache
->flags
= btrfs_block_group_flags(&cache
->item
);
2448 if (cache
->flags
& BTRFS_BLOCK_GROUP_DATA
) {
2449 bit
= BLOCK_GROUP_DATA
;
2450 } else if (cache
->flags
& BTRFS_BLOCK_GROUP_SYSTEM
) {
2451 bit
= BLOCK_GROUP_SYSTEM
;
2452 } else if (cache
->flags
& BTRFS_BLOCK_GROUP_METADATA
) {
2453 bit
= BLOCK_GROUP_METADATA
;
2455 set_avail_alloc_bits(info
, cache
->flags
);
2456 if (btrfs_chunk_readonly(root
, cache
->key
.objectid
))
2459 ret
= update_space_info(info
, cache
->flags
, found_key
.offset
,
2460 btrfs_block_group_used(&cache
->item
),
2463 cache
->space_info
= space_info
;
2465 /* use EXTENT_LOCKED to prevent merging */
2466 set_extent_bits(block_group_cache
, found_key
.objectid
,
2467 found_key
.objectid
+ found_key
.offset
- 1,
2468 bit
| EXTENT_LOCKED
, GFP_NOFS
);
2469 set_state_private(block_group_cache
, found_key
.objectid
,
2470 (unsigned long)cache
);
2474 btrfs_free_path(path
);
2478 int btrfs_make_block_group(struct btrfs_trans_handle
*trans
,
2479 struct btrfs_root
*root
, u64 bytes_used
,
2480 u64 type
, u64 chunk_objectid
, u64 chunk_offset
,
2485 struct btrfs_root
*extent_root
;
2486 struct btrfs_block_group_cache
*cache
;
2487 struct extent_io_tree
*block_group_cache
;
2489 extent_root
= root
->fs_info
->extent_root
;
2490 block_group_cache
= &root
->fs_info
->block_group_cache
;
2492 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
2494 cache
->key
.objectid
= chunk_offset
;
2495 cache
->key
.offset
= size
;
2497 btrfs_set_key_type(&cache
->key
, BTRFS_BLOCK_GROUP_ITEM_KEY
);
2498 btrfs_set_block_group_used(&cache
->item
, bytes_used
);
2499 btrfs_set_block_group_chunk_objectid(&cache
->item
, chunk_objectid
);
2500 cache
->flags
= type
;
2501 btrfs_set_block_group_flags(&cache
->item
, type
);
2503 ret
= update_space_info(root
->fs_info
, cache
->flags
, size
, bytes_used
,
2504 &cache
->space_info
);
2507 bit
= block_group_state_bits(type
);
2508 set_extent_bits(block_group_cache
, chunk_offset
,
2509 chunk_offset
+ size
- 1,
2510 bit
| EXTENT_LOCKED
, GFP_NOFS
);
2512 set_state_private(block_group_cache
, chunk_offset
,
2513 (unsigned long)cache
);
2514 ret
= btrfs_insert_item(trans
, extent_root
, &cache
->key
, &cache
->item
,
2515 sizeof(cache
->item
));
2518 finish_current_insert(trans
, extent_root
);
2519 ret
= del_pending_extents(trans
, extent_root
);
2521 set_avail_alloc_bits(extent_root
->fs_info
, type
);
2526 * This is for converter use only.
2528 * In that case, we don't know where are free blocks located.
2529 * Therefore all block group cache entries must be setup properly
2530 * before doing any block allocation.
2532 int btrfs_make_block_groups(struct btrfs_trans_handle
*trans
,
2533 struct btrfs_root
*root
)
2541 u64 total_metadata
= 0;
2545 struct btrfs_root
*extent_root
;
2546 struct btrfs_block_group_cache
*cache
;
2547 struct extent_io_tree
*block_group_cache
;
2549 extent_root
= root
->fs_info
->extent_root
;
2550 block_group_cache
= &root
->fs_info
->block_group_cache
;
2551 chunk_objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
2552 total_bytes
= btrfs_super_total_bytes(&root
->fs_info
->super_copy
);
2553 group_align
= 64 * root
->sectorsize
;
2556 while (cur_start
< total_bytes
) {
2557 group_size
= total_bytes
/ 12;
2558 group_size
= min_t(u64
, group_size
, total_bytes
- cur_start
);
2559 if (cur_start
== 0) {
2560 bit
= BLOCK_GROUP_SYSTEM
;
2561 group_type
= BTRFS_BLOCK_GROUP_SYSTEM
;
2563 group_size
&= ~(group_align
- 1);
2564 group_size
= max_t(u64
, group_size
, 32 * 1024 * 1024);
2565 group_size
= min_t(u64
, group_size
, 128 * 1024 * 1024);
2567 group_size
&= ~(group_align
- 1);
2568 if (total_data
>= total_metadata
* 2) {
2569 group_type
= BTRFS_BLOCK_GROUP_METADATA
;
2570 group_size
= min_t(u64
, group_size
,
2571 1ULL * 1024 * 1024 * 1024);
2572 total_metadata
+= group_size
;
2574 group_type
= BTRFS_BLOCK_GROUP_DATA
;
2575 group_size
= min_t(u64
, group_size
,
2576 5ULL * 1024 * 1024 * 1024);
2577 total_data
+= group_size
;
2579 if ((total_bytes
- cur_start
) * 4 < group_size
* 5)
2580 group_size
= total_bytes
- cur_start
;
2583 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
2586 cache
->key
.objectid
= cur_start
;
2587 cache
->key
.offset
= group_size
;
2588 btrfs_set_key_type(&cache
->key
, BTRFS_BLOCK_GROUP_ITEM_KEY
);
2590 btrfs_set_block_group_used(&cache
->item
, 0);
2591 btrfs_set_block_group_chunk_objectid(&cache
->item
,
2593 btrfs_set_block_group_flags(&cache
->item
, group_type
);
2595 cache
->flags
= group_type
;
2597 ret
= update_space_info(root
->fs_info
, group_type
, group_size
,
2598 0, &cache
->space_info
);
2600 set_avail_alloc_bits(extent_root
->fs_info
, group_type
);
2602 set_extent_bits(block_group_cache
, cur_start
,
2603 cur_start
+ group_size
- 1,
2604 bit
| EXTENT_LOCKED
, GFP_NOFS
);
2605 set_state_private(block_group_cache
, cur_start
,
2606 (unsigned long)cache
);
2607 cur_start
+= group_size
;
2609 /* then insert all the items */
2611 while(cur_start
< total_bytes
) {
2612 cache
= btrfs_lookup_block_group(root
->fs_info
, cur_start
);
2615 ret
= btrfs_insert_item(trans
, extent_root
, &cache
->key
, &cache
->item
,
2616 sizeof(cache
->item
));
2619 finish_current_insert(trans
, extent_root
);
2620 ret
= del_pending_extents(trans
, extent_root
);
2623 cur_start
= cache
->key
.objectid
+ cache
->key
.offset
;
2628 int btrfs_update_block_group(struct btrfs_trans_handle
*trans
,
2629 struct btrfs_root
*root
,
2630 u64 bytenr
, u64 num_bytes
, int alloc
,
2633 return update_block_group(trans
, root
, bytenr
, num_bytes
,