2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
21 #include "kerncompat.h"
22 #include "radix-tree.h"
25 #include "print-tree.h"
26 #include "transaction.h"
30 #define BLOCK_GROUP_DATA EXTENT_WRITEBACK
31 #define BLOCK_GROUP_METADATA EXTENT_UPTODATE
32 #define BLOCK_GROUP_SYSTEM EXTENT_NEW
34 #define BLOCK_GROUP_DIRTY EXTENT_DIRTY
36 #define PENDING_EXTENT_INSERT 0
37 #define PENDING_EXTENT_DELETE 1
38 #define PENDING_BACKREF_UPDATE 2
40 struct pending_extent_op
{
45 struct btrfs_disk_key key
;
49 static int alloc_reserved_tree_block(struct btrfs_trans_handle
*trans
,
50 struct btrfs_root
*root
,
51 u64 root_objectid
, u64 generation
,
52 u64 flags
, struct btrfs_disk_key
*key
,
53 int level
, struct btrfs_key
*ins
);
54 static int __free_extent(struct btrfs_trans_handle
*trans
,
55 struct btrfs_root
*root
,
56 u64 bytenr
, u64 num_bytes
, u64 parent
,
57 u64 root_objectid
, u64 owner_objectid
,
58 u64 owner_offset
, int refs_to_drop
);
59 static int finish_current_insert(struct btrfs_trans_handle
*trans
, struct
60 btrfs_root
*extent_root
);
61 static int del_pending_extents(struct btrfs_trans_handle
*trans
, struct
62 btrfs_root
*extent_root
);
64 static int remove_sb_from_cache(struct btrfs_root
*root
,
65 struct btrfs_block_group_cache
*cache
)
71 struct extent_io_tree
*free_space_cache
;
73 free_space_cache
= &root
->fs_info
->free_space_cache
;
74 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
75 bytenr
= btrfs_sb_offset(i
);
76 ret
= btrfs_rmap_block(&root
->fs_info
->mapping_tree
,
77 cache
->key
.objectid
, bytenr
, 0,
78 &logical
, &nr
, &stripe_len
);
81 clear_extent_dirty(free_space_cache
, logical
[nr
],
82 logical
[nr
] + stripe_len
- 1, GFP_NOFS
);
89 static int cache_block_group(struct btrfs_root
*root
,
90 struct btrfs_block_group_cache
*block_group
)
92 struct btrfs_path
*path
;
95 struct extent_buffer
*leaf
;
96 struct extent_io_tree
*free_space_cache
;
104 root
= root
->fs_info
->extent_root
;
105 free_space_cache
= &root
->fs_info
->free_space_cache
;
107 if (block_group
->cached
)
110 path
= btrfs_alloc_path();
115 last
= max_t(u64
, block_group
->key
.objectid
, BTRFS_SUPER_INFO_OFFSET
);
118 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
119 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
124 leaf
= path
->nodes
[0];
125 slot
= path
->slots
[0];
126 if (slot
>= btrfs_header_nritems(leaf
)) {
127 ret
= btrfs_next_leaf(root
, path
);
136 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
137 if (key
.objectid
< block_group
->key
.objectid
) {
140 if (key
.objectid
>= block_group
->key
.objectid
+
141 block_group
->key
.offset
) {
145 if (btrfs_key_type(&key
) == BTRFS_EXTENT_ITEM_KEY
) {
146 if (key
.objectid
> last
) {
147 hole_size
= key
.objectid
- last
;
148 set_extent_dirty(free_space_cache
, last
,
149 last
+ hole_size
- 1,
152 last
= key
.objectid
+ key
.offset
;
158 if (block_group
->key
.objectid
+
159 block_group
->key
.offset
> last
) {
160 hole_size
= block_group
->key
.objectid
+
161 block_group
->key
.offset
- last
;
162 set_extent_dirty(free_space_cache
, last
,
163 last
+ hole_size
- 1, GFP_NOFS
);
165 remove_sb_from_cache(root
, block_group
);
166 block_group
->cached
= 1;
168 btrfs_free_path(path
);
172 struct btrfs_block_group_cache
*btrfs_lookup_first_block_group(struct
176 struct extent_io_tree
*block_group_cache
;
177 struct btrfs_block_group_cache
*block_group
= NULL
;
183 bytenr
= max_t(u64
, bytenr
,
184 BTRFS_SUPER_INFO_OFFSET
+ BTRFS_SUPER_INFO_SIZE
);
185 block_group_cache
= &info
->block_group_cache
;
186 ret
= find_first_extent_bit(block_group_cache
,
187 bytenr
, &start
, &end
,
188 BLOCK_GROUP_DATA
| BLOCK_GROUP_METADATA
|
193 ret
= get_state_private(block_group_cache
, start
, &ptr
);
197 block_group
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
201 struct btrfs_block_group_cache
*btrfs_lookup_block_group(struct
205 struct extent_io_tree
*block_group_cache
;
206 struct btrfs_block_group_cache
*block_group
= NULL
;
212 block_group_cache
= &info
->block_group_cache
;
213 ret
= find_first_extent_bit(block_group_cache
,
214 bytenr
, &start
, &end
,
215 BLOCK_GROUP_DATA
| BLOCK_GROUP_METADATA
|
220 ret
= get_state_private(block_group_cache
, start
, &ptr
);
224 block_group
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
225 if (block_group
->key
.objectid
<= bytenr
&& bytenr
<
226 block_group
->key
.objectid
+ block_group
->key
.offset
)
231 static int block_group_bits(struct btrfs_block_group_cache
*cache
, u64 bits
)
233 return (cache
->flags
& bits
) == bits
;
236 static int noinline
find_search_start(struct btrfs_root
*root
,
237 struct btrfs_block_group_cache
**cache_ret
,
238 u64
*start_ret
, int num
, int data
)
241 struct btrfs_block_group_cache
*cache
= *cache_ret
;
245 u64 search_start
= *start_ret
;
252 ret
= cache_block_group(root
, cache
);
256 last
= max(search_start
, cache
->key
.objectid
);
257 if (cache
->ro
|| !block_group_bits(cache
, data
)) {
262 ret
= find_first_extent_bit(&root
->fs_info
->free_space_cache
,
263 last
, &start
, &end
, EXTENT_DIRTY
);
268 start
= max(last
, start
);
270 if (last
- start
< num
) {
273 if (start
+ num
> cache
->key
.objectid
+ cache
->key
.offset
) {
280 cache
= btrfs_lookup_block_group(root
->fs_info
, search_start
);
282 printk("Unable to find block group for %llu\n",
283 (unsigned long long)search_start
);
289 last
= cache
->key
.objectid
+ cache
->key
.offset
;
291 cache
= btrfs_lookup_first_block_group(root
->fs_info
, last
);
301 cache
= btrfs_find_block_group(root
, cache
, last
, data
, 0);
302 cache
= btrfs_find_block_group(root
, cache
, last
, data
, 0);
310 static u64
div_factor(u64 num
, int factor
)
319 static int block_group_state_bits(u64 flags
)
322 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
323 bits
|= BLOCK_GROUP_DATA
;
324 if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
325 bits
|= BLOCK_GROUP_METADATA
;
326 if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
327 bits
|= BLOCK_GROUP_SYSTEM
;
331 struct btrfs_block_group_cache
*btrfs_find_block_group(struct btrfs_root
*root
,
332 struct btrfs_block_group_cache
333 *hint
, u64 search_start
,
336 struct btrfs_block_group_cache
*cache
;
337 struct extent_io_tree
*block_group_cache
;
338 struct btrfs_block_group_cache
*found_group
= NULL
;
339 struct btrfs_fs_info
*info
= root
->fs_info
;
352 block_group_cache
= &info
->block_group_cache
;
357 bit
= block_group_state_bits(data
);
360 struct btrfs_block_group_cache
*shint
;
361 shint
= btrfs_lookup_block_group(info
, search_start
);
362 if (shint
&& !shint
->ro
&& block_group_bits(shint
, data
)) {
363 used
= btrfs_block_group_used(&shint
->item
);
364 if (used
+ shint
->pinned
<
365 div_factor(shint
->key
.offset
, factor
)) {
370 if (hint
&& !hint
->ro
&& block_group_bits(hint
, data
)) {
371 used
= btrfs_block_group_used(&hint
->item
);
372 if (used
+ hint
->pinned
<
373 div_factor(hint
->key
.offset
, factor
)) {
376 last
= hint
->key
.objectid
+ hint
->key
.offset
;
380 hint_last
= max(hint
->key
.objectid
, search_start
);
382 hint_last
= search_start
;
388 ret
= find_first_extent_bit(block_group_cache
, last
,
393 ret
= get_state_private(block_group_cache
, start
, &ptr
);
397 cache
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
398 last
= cache
->key
.objectid
+ cache
->key
.offset
;
399 used
= btrfs_block_group_used(&cache
->item
);
401 if (!cache
->ro
&& block_group_bits(cache
, data
)) {
403 free_check
= cache
->key
.offset
;
405 free_check
= div_factor(cache
->key
.offset
,
408 if (used
+ cache
->pinned
< free_check
) {
425 * Back reference rules. Back refs have three main goals:
427 * 1) differentiate between all holders of references to an extent so that
428 * when a reference is dropped we can make sure it was a valid reference
429 * before freeing the extent.
431 * 2) Provide enough information to quickly find the holders of an extent
432 * if we notice a given block is corrupted or bad.
434 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
435 * maintenance. This is actually the same as #2, but with a slightly
436 * different use case.
438 * There are two kinds of back refs. The implicit back refs is optimized
439 * for pointers in non-shared tree blocks. For a given pointer in a block,
440 * back refs of this kind provide information about the block's owner tree
441 * and the pointer's key. These information allow us to find the block by
442 * b-tree searching. The full back refs is for pointers in tree blocks not
443 * referenced by their owner trees. The location of tree block is recorded
444 * in the back refs. Actually the full back refs is generic, and can be
445 * used in all cases the implicit back refs is used. The major shortcoming
446 * of the full back refs is its overhead. Every time a tree block gets
447 * COWed, we have to update back refs entry for all pointers in it.
449 * For a newly allocated tree block, we use implicit back refs for
450 * pointers in it. This means most tree related operations only involve
451 * implicit back refs. For a tree block created in old transaction, the
452 * only way to drop a reference to it is COW it. So we can detect the
453 * event that tree block loses its owner tree's reference and do the
454 * back refs conversion.
456 * When a tree block is COW'd through a tree, there are four cases:
458 * The reference count of the block is one and the tree is the block's
459 * owner tree. Nothing to do in this case.
461 * The reference count of the block is one and the tree is not the
462 * block's owner tree. In this case, full back refs is used for pointers
463 * in the block. Remove these full back refs, add implicit back refs for
464 * every pointers in the new block.
466 * The reference count of the block is greater than one and the tree is
467 * the block's owner tree. In this case, implicit back refs is used for
468 * pointers in the block. Add full back refs for every pointers in the
469 * block, increase lower level extents' reference counts. The original
470 * implicit back refs are entailed to the new block.
472 * The reference count of the block is greater than one and the tree is
473 * not the block's owner tree. Add implicit back refs for every pointer in
474 * the new block, increase lower level extents' reference count.
476 * Back Reference Key composing:
478 * The key objectid corresponds to the first byte in the extent,
479 * The key type is used to differentiate between types of back refs.
480 * There are different meanings of the key offset for different types
483 * File extents can be referenced by:
485 * - multiple snapshots, subvolumes, or different generations in one subvol
486 * - different files inside a single subvolume
487 * - different offsets inside a file (bookend extents in file.c)
489 * The extent ref structure for the implicit back refs has fields for:
491 * - Objectid of the subvolume root
492 * - objectid of the file holding the reference
493 * - original offset in the file
494 * - how many bookend extents
496 * The key offset for the implicit back refs is hash of the first
499 * The extent ref structure for the full back refs has field for:
501 * - number of pointers in the tree leaf
503 * The key offset for the implicit back refs is the first byte of
506 * When a file extent is allocated, The implicit back refs is used.
507 * the fields are filled in:
509 * (root_key.objectid, inode objectid, offset in file, 1)
511 * When a file extent is removed file truncation, we find the
512 * corresponding implicit back refs and check the following fields:
514 * (btrfs_header_owner(leaf), inode objectid, offset in file)
516 * Btree extents can be referenced by:
518 * - Different subvolumes
520 * Both the implicit back refs and the full back refs for tree blocks
521 * only consist of key. The key offset for the implicit back refs is
522 * objectid of block's owner tree. The key offset for the full back refs
523 * is the first byte of parent block.
525 * When implicit back refs is used, information about the lowest key and
526 * level of the tree block are required. These information are stored in
527 * tree block info structure.
530 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
531 static int convert_extent_item_v0(struct btrfs_trans_handle
*trans
,
532 struct btrfs_root
*root
,
533 struct btrfs_path
*path
,
534 u64 owner
, u32 extra_size
)
536 struct btrfs_extent_item
*item
;
537 struct btrfs_extent_item_v0
*ei0
;
538 struct btrfs_extent_ref_v0
*ref0
;
539 struct btrfs_tree_block_info
*bi
;
540 struct extent_buffer
*leaf
;
541 struct btrfs_key key
;
542 struct btrfs_key found_key
;
543 u32 new_size
= sizeof(*item
);
547 leaf
= path
->nodes
[0];
548 BUG_ON(btrfs_item_size_nr(leaf
, path
->slots
[0]) != sizeof(*ei0
));
550 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
551 ei0
= btrfs_item_ptr(leaf
, path
->slots
[0],
552 struct btrfs_extent_item_v0
);
553 refs
= btrfs_extent_refs_v0(leaf
, ei0
);
555 if (owner
== (u64
)-1) {
557 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
558 ret
= btrfs_next_leaf(root
, path
);
562 leaf
= path
->nodes
[0];
564 btrfs_item_key_to_cpu(leaf
, &found_key
,
566 BUG_ON(key
.objectid
!= found_key
.objectid
);
567 if (found_key
.type
!= BTRFS_EXTENT_REF_V0_KEY
) {
571 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
572 struct btrfs_extent_ref_v0
);
573 owner
= btrfs_ref_objectid_v0(leaf
, ref0
);
577 btrfs_release_path(root
, path
);
579 if (owner
< BTRFS_FIRST_FREE_OBJECTID
)
580 new_size
+= sizeof(*bi
);
582 new_size
-= sizeof(*ei0
);
583 ret
= btrfs_search_slot(trans
, root
, &key
, path
, new_size
, 1);
588 ret
= btrfs_extend_item(trans
, root
, path
, new_size
);
591 leaf
= path
->nodes
[0];
592 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
593 btrfs_set_extent_refs(leaf
, item
, refs
);
594 /* FIXME: get real generation */
595 btrfs_set_extent_generation(leaf
, item
, 0);
596 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
597 btrfs_set_extent_flags(leaf
, item
,
598 BTRFS_EXTENT_FLAG_TREE_BLOCK
|
599 BTRFS_BLOCK_FLAG_FULL_BACKREF
);
600 bi
= (struct btrfs_tree_block_info
*)(item
+ 1);
601 /* FIXME: get first key of the block */
602 memset_extent_buffer(leaf
, 0, (unsigned long)bi
, sizeof(*bi
));
603 btrfs_set_tree_block_level(leaf
, bi
, (int)owner
);
605 btrfs_set_extent_flags(leaf
, item
, BTRFS_EXTENT_FLAG_DATA
);
607 btrfs_mark_buffer_dirty(leaf
);
612 static u64
hash_extent_data_ref(u64 root_objectid
, u64 owner
, u64 offset
)
614 u32 high_crc
= ~(u32
)0;
615 u32 low_crc
= ~(u32
)0;
618 lenum
= cpu_to_le64(root_objectid
);
619 high_crc
= btrfs_crc32c(high_crc
, &lenum
, sizeof(lenum
));
620 lenum
= cpu_to_le64(owner
);
621 low_crc
= btrfs_crc32c(low_crc
, &lenum
, sizeof(lenum
));
622 lenum
= cpu_to_le64(offset
);
623 low_crc
= btrfs_crc32c(low_crc
, &lenum
, sizeof(lenum
));
625 return ((u64
)high_crc
<< 31) ^ (u64
)low_crc
;
628 static u64
hash_extent_data_ref_item(struct extent_buffer
*leaf
,
629 struct btrfs_extent_data_ref
*ref
)
631 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf
, ref
),
632 btrfs_extent_data_ref_objectid(leaf
, ref
),
633 btrfs_extent_data_ref_offset(leaf
, ref
));
636 static int match_extent_data_ref(struct extent_buffer
*leaf
,
637 struct btrfs_extent_data_ref
*ref
,
638 u64 root_objectid
, u64 owner
, u64 offset
)
640 if (btrfs_extent_data_ref_root(leaf
, ref
) != root_objectid
||
641 btrfs_extent_data_ref_objectid(leaf
, ref
) != owner
||
642 btrfs_extent_data_ref_offset(leaf
, ref
) != offset
)
647 static noinline
int lookup_extent_data_ref(struct btrfs_trans_handle
*trans
,
648 struct btrfs_root
*root
,
649 struct btrfs_path
*path
,
650 u64 bytenr
, u64 parent
,
652 u64 owner
, u64 offset
)
654 struct btrfs_key key
;
655 struct btrfs_extent_data_ref
*ref
;
656 struct extent_buffer
*leaf
;
662 key
.objectid
= bytenr
;
664 key
.type
= BTRFS_SHARED_DATA_REF_KEY
;
667 key
.type
= BTRFS_EXTENT_DATA_REF_KEY
;
668 key
.offset
= hash_extent_data_ref(root_objectid
,
673 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
682 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
683 key
.type
= BTRFS_EXTENT_REF_V0_KEY
;
684 btrfs_release_path(root
, path
);
685 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
696 leaf
= path
->nodes
[0];
697 nritems
= btrfs_header_nritems(leaf
);
699 if (path
->slots
[0] >= nritems
) {
700 ret
= btrfs_next_leaf(root
, path
);
706 leaf
= path
->nodes
[0];
707 nritems
= btrfs_header_nritems(leaf
);
711 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
712 if (key
.objectid
!= bytenr
||
713 key
.type
!= BTRFS_EXTENT_DATA_REF_KEY
)
716 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
717 struct btrfs_extent_data_ref
);
719 if (match_extent_data_ref(leaf
, ref
, root_objectid
,
722 btrfs_release_path(root
, path
);
734 static noinline
int insert_extent_data_ref(struct btrfs_trans_handle
*trans
,
735 struct btrfs_root
*root
,
736 struct btrfs_path
*path
,
737 u64 bytenr
, u64 parent
,
738 u64 root_objectid
, u64 owner
,
739 u64 offset
, int refs_to_add
)
741 struct btrfs_key key
;
742 struct extent_buffer
*leaf
;
747 key
.objectid
= bytenr
;
749 key
.type
= BTRFS_SHARED_DATA_REF_KEY
;
751 size
= sizeof(struct btrfs_shared_data_ref
);
753 key
.type
= BTRFS_EXTENT_DATA_REF_KEY
;
754 key
.offset
= hash_extent_data_ref(root_objectid
,
756 size
= sizeof(struct btrfs_extent_data_ref
);
759 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, size
);
760 if (ret
&& ret
!= -EEXIST
)
763 leaf
= path
->nodes
[0];
765 struct btrfs_shared_data_ref
*ref
;
766 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
767 struct btrfs_shared_data_ref
);
769 btrfs_set_shared_data_ref_count(leaf
, ref
, refs_to_add
);
771 num_refs
= btrfs_shared_data_ref_count(leaf
, ref
);
772 num_refs
+= refs_to_add
;
773 btrfs_set_shared_data_ref_count(leaf
, ref
, num_refs
);
776 struct btrfs_extent_data_ref
*ref
;
777 while (ret
== -EEXIST
) {
778 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
779 struct btrfs_extent_data_ref
);
780 if (match_extent_data_ref(leaf
, ref
, root_objectid
,
783 btrfs_release_path(root
, path
);
786 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
788 if (ret
&& ret
!= -EEXIST
)
791 leaf
= path
->nodes
[0];
793 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
794 struct btrfs_extent_data_ref
);
796 btrfs_set_extent_data_ref_root(leaf
, ref
,
798 btrfs_set_extent_data_ref_objectid(leaf
, ref
, owner
);
799 btrfs_set_extent_data_ref_offset(leaf
, ref
, offset
);
800 btrfs_set_extent_data_ref_count(leaf
, ref
, refs_to_add
);
802 num_refs
= btrfs_extent_data_ref_count(leaf
, ref
);
803 num_refs
+= refs_to_add
;
804 btrfs_set_extent_data_ref_count(leaf
, ref
, num_refs
);
807 btrfs_mark_buffer_dirty(leaf
);
810 btrfs_release_path(root
, path
);
814 static noinline
int remove_extent_data_ref(struct btrfs_trans_handle
*trans
,
815 struct btrfs_root
*root
,
816 struct btrfs_path
*path
,
819 struct btrfs_key key
;
820 struct btrfs_extent_data_ref
*ref1
= NULL
;
821 struct btrfs_shared_data_ref
*ref2
= NULL
;
822 struct extent_buffer
*leaf
;
826 leaf
= path
->nodes
[0];
827 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
829 if (key
.type
== BTRFS_EXTENT_DATA_REF_KEY
) {
830 ref1
= btrfs_item_ptr(leaf
, path
->slots
[0],
831 struct btrfs_extent_data_ref
);
832 num_refs
= btrfs_extent_data_ref_count(leaf
, ref1
);
833 } else if (key
.type
== BTRFS_SHARED_DATA_REF_KEY
) {
834 ref2
= btrfs_item_ptr(leaf
, path
->slots
[0],
835 struct btrfs_shared_data_ref
);
836 num_refs
= btrfs_shared_data_ref_count(leaf
, ref2
);
837 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
838 } else if (key
.type
== BTRFS_EXTENT_REF_V0_KEY
) {
839 struct btrfs_extent_ref_v0
*ref0
;
840 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
841 struct btrfs_extent_ref_v0
);
842 num_refs
= btrfs_ref_count_v0(leaf
, ref0
);
848 BUG_ON(num_refs
< refs_to_drop
);
849 num_refs
-= refs_to_drop
;
852 ret
= btrfs_del_item(trans
, root
, path
);
854 if (key
.type
== BTRFS_EXTENT_DATA_REF_KEY
)
855 btrfs_set_extent_data_ref_count(leaf
, ref1
, num_refs
);
856 else if (key
.type
== BTRFS_SHARED_DATA_REF_KEY
)
857 btrfs_set_shared_data_ref_count(leaf
, ref2
, num_refs
);
858 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
860 struct btrfs_extent_ref_v0
*ref0
;
861 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
862 struct btrfs_extent_ref_v0
);
863 btrfs_set_ref_count_v0(leaf
, ref0
, num_refs
);
866 btrfs_mark_buffer_dirty(leaf
);
871 static noinline u32
extent_data_ref_count(struct btrfs_root
*root
,
872 struct btrfs_path
*path
,
873 struct btrfs_extent_inline_ref
*iref
)
875 struct btrfs_key key
;
876 struct extent_buffer
*leaf
;
877 struct btrfs_extent_data_ref
*ref1
;
878 struct btrfs_shared_data_ref
*ref2
;
881 leaf
= path
->nodes
[0];
882 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
884 if (btrfs_extent_inline_ref_type(leaf
, iref
) ==
885 BTRFS_EXTENT_DATA_REF_KEY
) {
886 ref1
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
887 num_refs
= btrfs_extent_data_ref_count(leaf
, ref1
);
889 ref2
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
890 num_refs
= btrfs_shared_data_ref_count(leaf
, ref2
);
892 } else if (key
.type
== BTRFS_EXTENT_DATA_REF_KEY
) {
893 ref1
= btrfs_item_ptr(leaf
, path
->slots
[0],
894 struct btrfs_extent_data_ref
);
895 num_refs
= btrfs_extent_data_ref_count(leaf
, ref1
);
896 } else if (key
.type
== BTRFS_SHARED_DATA_REF_KEY
) {
897 ref2
= btrfs_item_ptr(leaf
, path
->slots
[0],
898 struct btrfs_shared_data_ref
);
899 num_refs
= btrfs_shared_data_ref_count(leaf
, ref2
);
900 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
901 } else if (key
.type
== BTRFS_EXTENT_REF_V0_KEY
) {
902 struct btrfs_extent_ref_v0
*ref0
;
903 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
904 struct btrfs_extent_ref_v0
);
905 num_refs
= btrfs_ref_count_v0(leaf
, ref0
);
913 static noinline
int lookup_tree_block_ref(struct btrfs_trans_handle
*trans
,
914 struct btrfs_root
*root
,
915 struct btrfs_path
*path
,
916 u64 bytenr
, u64 parent
,
919 struct btrfs_key key
;
922 key
.objectid
= bytenr
;
924 key
.type
= BTRFS_SHARED_BLOCK_REF_KEY
;
927 key
.type
= BTRFS_TREE_BLOCK_REF_KEY
;
928 key
.offset
= root_objectid
;
931 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
934 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
935 if (ret
== -ENOENT
&& parent
) {
936 btrfs_release_path(root
, path
);
937 key
.type
= BTRFS_EXTENT_REF_V0_KEY
;
938 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
946 static noinline
int insert_tree_block_ref(struct btrfs_trans_handle
*trans
,
947 struct btrfs_root
*root
,
948 struct btrfs_path
*path
,
949 u64 bytenr
, u64 parent
,
952 struct btrfs_key key
;
955 key
.objectid
= bytenr
;
957 key
.type
= BTRFS_SHARED_BLOCK_REF_KEY
;
960 key
.type
= BTRFS_TREE_BLOCK_REF_KEY
;
961 key
.offset
= root_objectid
;
964 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, 0);
966 btrfs_release_path(root
, path
);
970 static inline int extent_ref_type(u64 parent
, u64 owner
)
972 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
974 return BTRFS_SHARED_BLOCK_REF_KEY
;
976 return BTRFS_TREE_BLOCK_REF_KEY
;
979 return BTRFS_SHARED_DATA_REF_KEY
;
981 return BTRFS_EXTENT_DATA_REF_KEY
;
985 static int find_next_key(struct btrfs_path
*path
, struct btrfs_key
*key
)
989 for (level
= 0; level
< BTRFS_MAX_LEVEL
; level
++) {
990 if (!path
->nodes
[level
])
992 if (path
->slots
[level
] + 1 >=
993 btrfs_header_nritems(path
->nodes
[level
]))
996 btrfs_item_key_to_cpu(path
->nodes
[level
], key
,
997 path
->slots
[level
] + 1);
999 btrfs_node_key_to_cpu(path
->nodes
[level
], key
,
1000 path
->slots
[level
] + 1);
1006 static int lookup_inline_extent_backref(struct btrfs_trans_handle
*trans
,
1007 struct btrfs_root
*root
,
1008 struct btrfs_path
*path
,
1009 struct btrfs_extent_inline_ref
**ref_ret
,
1010 u64 bytenr
, u64 num_bytes
,
1011 u64 parent
, u64 root_objectid
,
1012 u64 owner
, u64 offset
, int insert
)
1014 struct btrfs_key key
;
1015 struct extent_buffer
*leaf
;
1016 struct btrfs_extent_item
*ei
;
1017 struct btrfs_extent_inline_ref
*iref
;
1028 key
.objectid
= bytenr
;
1029 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1030 key
.offset
= num_bytes
;
1032 want
= extent_ref_type(parent
, owner
);
1034 extra_size
= btrfs_extent_inline_ref_size(want
);
1037 ret
= btrfs_search_slot(trans
, root
, &key
, path
, extra_size
, 1);
1044 leaf
= path
->nodes
[0];
1045 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1046 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1047 if (item_size
< sizeof(*ei
)) {
1052 ret
= convert_extent_item_v0(trans
, root
, path
, owner
,
1058 leaf
= path
->nodes
[0];
1059 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1062 BUG_ON(item_size
< sizeof(*ei
));
1064 if (owner
< BTRFS_FIRST_FREE_OBJECTID
&& insert
&&
1065 item_size
+ extra_size
>= BTRFS_MAX_EXTENT_ITEM_SIZE(root
)) {
1070 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1071 flags
= btrfs_extent_flags(leaf
, ei
);
1073 ptr
= (unsigned long)(ei
+ 1);
1074 end
= (unsigned long)ei
+ item_size
;
1076 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
1077 ptr
+= sizeof(struct btrfs_tree_block_info
);
1080 BUG_ON(!(flags
& BTRFS_EXTENT_FLAG_DATA
));
1089 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
1090 type
= btrfs_extent_inline_ref_type(leaf
, iref
);
1094 ptr
+= btrfs_extent_inline_ref_size(type
);
1098 if (type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1099 struct btrfs_extent_data_ref
*dref
;
1100 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
1101 if (match_extent_data_ref(leaf
, dref
, root_objectid
,
1106 if (hash_extent_data_ref_item(leaf
, dref
) <
1107 hash_extent_data_ref(root_objectid
, owner
, offset
))
1111 ref_offset
= btrfs_extent_inline_ref_offset(leaf
, iref
);
1113 if (parent
== ref_offset
) {
1117 if (ref_offset
< parent
)
1120 if (root_objectid
== ref_offset
) {
1124 if (ref_offset
< root_objectid
)
1128 ptr
+= btrfs_extent_inline_ref_size(type
);
1130 if (err
== -ENOENT
&& insert
) {
1131 if (item_size
+ extra_size
>=
1132 BTRFS_MAX_EXTENT_ITEM_SIZE(root
)) {
1137 * To add new inline back ref, we have to make sure
1138 * there is no corresponding back ref item.
1139 * For simplicity, we just do not add new inline back
1140 * ref if there is any back ref item.
1142 if (owner
>= BTRFS_FIRST_FREE_OBJECTID
&&
1143 find_next_key(path
, &key
) == 0 && key
.objectid
== bytenr
) {
1148 *ref_ret
= (struct btrfs_extent_inline_ref
*)ptr
;
1153 static int setup_inline_extent_backref(struct btrfs_trans_handle
*trans
,
1154 struct btrfs_root
*root
,
1155 struct btrfs_path
*path
,
1156 struct btrfs_extent_inline_ref
*iref
,
1157 u64 parent
, u64 root_objectid
,
1158 u64 owner
, u64 offset
, int refs_to_add
)
1160 struct extent_buffer
*leaf
;
1161 struct btrfs_extent_item
*ei
;
1164 unsigned long item_offset
;
1170 leaf
= path
->nodes
[0];
1171 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1172 item_offset
= (unsigned long)iref
- (unsigned long)ei
;
1174 type
= extent_ref_type(parent
, owner
);
1175 size
= btrfs_extent_inline_ref_size(type
);
1177 ret
= btrfs_extend_item(trans
, root
, path
, size
);
1180 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1181 refs
= btrfs_extent_refs(leaf
, ei
);
1182 refs
+= refs_to_add
;
1183 btrfs_set_extent_refs(leaf
, ei
, refs
);
1185 ptr
= (unsigned long)ei
+ item_offset
;
1186 end
= (unsigned long)ei
+ btrfs_item_size_nr(leaf
, path
->slots
[0]);
1187 if (ptr
< end
- size
)
1188 memmove_extent_buffer(leaf
, ptr
+ size
, ptr
,
1191 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
1192 btrfs_set_extent_inline_ref_type(leaf
, iref
, type
);
1193 if (type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1194 struct btrfs_extent_data_ref
*dref
;
1195 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
1196 btrfs_set_extent_data_ref_root(leaf
, dref
, root_objectid
);
1197 btrfs_set_extent_data_ref_objectid(leaf
, dref
, owner
);
1198 btrfs_set_extent_data_ref_offset(leaf
, dref
, offset
);
1199 btrfs_set_extent_data_ref_count(leaf
, dref
, refs_to_add
);
1200 } else if (type
== BTRFS_SHARED_DATA_REF_KEY
) {
1201 struct btrfs_shared_data_ref
*sref
;
1202 sref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
1203 btrfs_set_shared_data_ref_count(leaf
, sref
, refs_to_add
);
1204 btrfs_set_extent_inline_ref_offset(leaf
, iref
, parent
);
1205 } else if (type
== BTRFS_SHARED_BLOCK_REF_KEY
) {
1206 btrfs_set_extent_inline_ref_offset(leaf
, iref
, parent
);
1208 btrfs_set_extent_inline_ref_offset(leaf
, iref
, root_objectid
);
1210 btrfs_mark_buffer_dirty(leaf
);
1214 static int lookup_extent_backref(struct btrfs_trans_handle
*trans
,
1215 struct btrfs_root
*root
,
1216 struct btrfs_path
*path
,
1217 struct btrfs_extent_inline_ref
**ref_ret
,
1218 u64 bytenr
, u64 num_bytes
, u64 parent
,
1219 u64 root_objectid
, u64 owner
, u64 offset
)
1223 ret
= lookup_inline_extent_backref(trans
, root
, path
, ref_ret
,
1224 bytenr
, num_bytes
, parent
,
1225 root_objectid
, owner
, offset
, 0);
1229 btrfs_release_path(root
, path
);
1232 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
1233 ret
= lookup_tree_block_ref(trans
, root
, path
, bytenr
, parent
,
1236 ret
= lookup_extent_data_ref(trans
, root
, path
, bytenr
, parent
,
1237 root_objectid
, owner
, offset
);
1242 static int update_inline_extent_backref(struct btrfs_trans_handle
*trans
,
1243 struct btrfs_root
*root
,
1244 struct btrfs_path
*path
,
1245 struct btrfs_extent_inline_ref
*iref
,
1248 struct extent_buffer
*leaf
;
1249 struct btrfs_extent_item
*ei
;
1250 struct btrfs_extent_data_ref
*dref
= NULL
;
1251 struct btrfs_shared_data_ref
*sref
= NULL
;
1260 leaf
= path
->nodes
[0];
1261 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1262 refs
= btrfs_extent_refs(leaf
, ei
);
1263 WARN_ON(refs_to_mod
< 0 && refs
+ refs_to_mod
<= 0);
1264 refs
+= refs_to_mod
;
1265 btrfs_set_extent_refs(leaf
, ei
, refs
);
1267 type
= btrfs_extent_inline_ref_type(leaf
, iref
);
1269 if (type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1270 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
1271 refs
= btrfs_extent_data_ref_count(leaf
, dref
);
1272 } else if (type
== BTRFS_SHARED_DATA_REF_KEY
) {
1273 sref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
1274 refs
= btrfs_shared_data_ref_count(leaf
, sref
);
1277 BUG_ON(refs_to_mod
!= -1);
1280 BUG_ON(refs_to_mod
< 0 && refs
< -refs_to_mod
);
1281 refs
+= refs_to_mod
;
1284 if (type
== BTRFS_EXTENT_DATA_REF_KEY
)
1285 btrfs_set_extent_data_ref_count(leaf
, dref
, refs
);
1287 btrfs_set_shared_data_ref_count(leaf
, sref
, refs
);
1289 size
= btrfs_extent_inline_ref_size(type
);
1290 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1291 ptr
= (unsigned long)iref
;
1292 end
= (unsigned long)ei
+ item_size
;
1293 if (ptr
+ size
< end
)
1294 memmove_extent_buffer(leaf
, ptr
, ptr
+ size
,
1297 ret
= btrfs_truncate_item(trans
, root
, path
, item_size
, 1);
1300 btrfs_mark_buffer_dirty(leaf
);
1304 static int insert_inline_extent_backref(struct btrfs_trans_handle
*trans
,
1305 struct btrfs_root
*root
,
1306 struct btrfs_path
*path
,
1307 u64 bytenr
, u64 num_bytes
, u64 parent
,
1308 u64 root_objectid
, u64 owner
,
1309 u64 offset
, int refs_to_add
)
1311 struct btrfs_extent_inline_ref
*iref
;
1314 ret
= lookup_inline_extent_backref(trans
, root
, path
, &iref
,
1315 bytenr
, num_bytes
, parent
,
1316 root_objectid
, owner
, offset
, 1);
1318 BUG_ON(owner
< BTRFS_FIRST_FREE_OBJECTID
);
1319 ret
= update_inline_extent_backref(trans
, root
, path
, iref
,
1321 } else if (ret
== -ENOENT
) {
1322 ret
= setup_inline_extent_backref(trans
, root
, path
, iref
,
1323 parent
, root_objectid
,
1324 owner
, offset
, refs_to_add
);
1329 static int insert_extent_backref(struct btrfs_trans_handle
*trans
,
1330 struct btrfs_root
*root
,
1331 struct btrfs_path
*path
,
1332 u64 bytenr
, u64 parent
, u64 root_objectid
,
1333 u64 owner
, u64 offset
, int refs_to_add
)
1337 if (owner
>= BTRFS_FIRST_FREE_OBJECTID
) {
1338 ret
= insert_extent_data_ref(trans
, root
, path
, bytenr
,
1339 parent
, root_objectid
,
1340 owner
, offset
, refs_to_add
);
1342 BUG_ON(refs_to_add
!= 1);
1343 ret
= insert_tree_block_ref(trans
, root
, path
, bytenr
,
1344 parent
, root_objectid
);
1349 static int remove_extent_backref(struct btrfs_trans_handle
*trans
,
1350 struct btrfs_root
*root
,
1351 struct btrfs_path
*path
,
1352 struct btrfs_extent_inline_ref
*iref
,
1353 int refs_to_drop
, int is_data
)
1357 BUG_ON(!is_data
&& refs_to_drop
!= 1);
1359 ret
= update_inline_extent_backref(trans
, root
, path
, iref
,
1361 } else if (is_data
) {
1362 ret
= remove_extent_data_ref(trans
, root
, path
, refs_to_drop
);
1364 ret
= btrfs_del_item(trans
, root
, path
);
1369 int btrfs_inc_extent_ref(struct btrfs_trans_handle
*trans
,
1370 struct btrfs_root
*root
,
1371 u64 bytenr
, u64 num_bytes
, u64 parent
,
1372 u64 root_objectid
, u64 owner
, u64 offset
)
1374 struct btrfs_path
*path
;
1375 struct extent_buffer
*leaf
;
1376 struct btrfs_extent_item
*item
;
1381 path
= btrfs_alloc_path();
1386 path
->leave_spinning
= 1;
1388 ret
= insert_inline_extent_backref(trans
, root
->fs_info
->extent_root
,
1389 path
, bytenr
, num_bytes
, parent
,
1390 root_objectid
, owner
, offset
, 1);
1394 if (ret
!= -EAGAIN
) {
1399 leaf
= path
->nodes
[0];
1400 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1401 refs
= btrfs_extent_refs(leaf
, item
);
1402 btrfs_set_extent_refs(leaf
, item
, refs
+ 1);
1404 btrfs_mark_buffer_dirty(leaf
);
1405 btrfs_release_path(root
->fs_info
->extent_root
, path
);
1408 path
->leave_spinning
= 1;
1410 /* now insert the actual backref */
1411 ret
= insert_extent_backref(trans
, root
->fs_info
->extent_root
,
1412 path
, bytenr
, parent
, root_objectid
,
1417 btrfs_free_path(path
);
1418 finish_current_insert(trans
, root
->fs_info
->extent_root
);
1419 del_pending_extents(trans
, root
->fs_info
->extent_root
);
1424 int btrfs_extent_post_op(struct btrfs_trans_handle
*trans
,
1425 struct btrfs_root
*root
)
1427 finish_current_insert(trans
, root
->fs_info
->extent_root
);
1428 del_pending_extents(trans
, root
->fs_info
->extent_root
);
1432 int btrfs_lookup_extent_info(struct btrfs_trans_handle
*trans
,
1433 struct btrfs_root
*root
, u64 bytenr
,
1434 u64 num_bytes
, u64
*refs
, u64
*flags
)
1436 struct btrfs_path
*path
;
1438 struct btrfs_key key
;
1439 struct extent_buffer
*l
;
1440 struct btrfs_extent_item
*item
;
1445 WARN_ON(num_bytes
< root
->sectorsize
);
1446 path
= btrfs_alloc_path();
1448 key
.objectid
= bytenr
;
1449 key
.offset
= num_bytes
;
1450 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
1451 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
, &key
, path
,
1456 btrfs_print_leaf(root
, path
->nodes
[0]);
1457 printk("failed to find block number %Lu\n", bytenr
);
1462 item_size
= btrfs_item_size_nr(l
, path
->slots
[0]);
1463 if (item_size
>= sizeof(*item
)) {
1464 item
= btrfs_item_ptr(l
, path
->slots
[0],
1465 struct btrfs_extent_item
);
1466 num_refs
= btrfs_extent_refs(l
, item
);
1467 extent_flags
= btrfs_extent_flags(l
, item
);
1469 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1470 struct btrfs_extent_item_v0
*ei0
;
1471 BUG_ON(item_size
!= sizeof(*ei0
));
1472 ei0
= btrfs_item_ptr(l
, path
->slots
[0],
1473 struct btrfs_extent_item_v0
);
1474 num_refs
= btrfs_extent_refs_v0(l
, ei0
);
1475 /* FIXME: this isn't correct for data */
1476 extent_flags
= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
1481 BUG_ON(num_refs
== 0);
1482 item
= btrfs_item_ptr(l
, path
->slots
[0], struct btrfs_extent_item
);
1486 *flags
= extent_flags
;
1488 btrfs_free_path(path
);
1492 int btrfs_set_block_flags(struct btrfs_trans_handle
*trans
,
1493 struct btrfs_root
*root
,
1494 u64 bytenr
, u64 num_bytes
, u64 flags
)
1496 struct btrfs_path
*path
;
1498 struct btrfs_key key
;
1499 struct extent_buffer
*l
;
1500 struct btrfs_extent_item
*item
;
1503 WARN_ON(num_bytes
< root
->sectorsize
);
1504 path
= btrfs_alloc_path();
1506 key
.objectid
= bytenr
;
1507 key
.offset
= num_bytes
;
1508 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
1509 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
, &key
, path
,
1514 btrfs_print_leaf(root
, path
->nodes
[0]);
1515 printk("failed to find block number %Lu\n",
1516 (unsigned long long)bytenr
);
1520 item_size
= btrfs_item_size_nr(l
, path
->slots
[0]);
1521 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1522 if (item_size
< sizeof(*item
)) {
1523 ret
= convert_extent_item_v0(trans
, root
->fs_info
->extent_root
,
1529 item_size
= btrfs_item_size_nr(l
, path
->slots
[0]);
1532 BUG_ON(item_size
< sizeof(*item
));
1533 item
= btrfs_item_ptr(l
, path
->slots
[0], struct btrfs_extent_item
);
1534 flags
|= btrfs_extent_flags(l
, item
);
1535 btrfs_set_extent_flags(l
, item
, flags
);
1537 btrfs_free_path(path
);
1538 finish_current_insert(trans
, root
->fs_info
->extent_root
);
1539 del_pending_extents(trans
, root
->fs_info
->extent_root
);
1543 static int __btrfs_mod_ref(struct btrfs_trans_handle
*trans
,
1544 struct btrfs_root
*root
,
1545 struct extent_buffer
*buf
,
1546 int record_parent
, int inc
)
1553 struct btrfs_key key
;
1554 struct btrfs_file_extent_item
*fi
;
1559 int (*process_func
)(struct btrfs_trans_handle
*trans
,
1560 struct btrfs_root
*root
,
1561 u64
, u64
, u64
, u64
, u64
, u64
);
1563 ref_root
= btrfs_header_owner(buf
);
1564 nritems
= btrfs_header_nritems(buf
);
1565 level
= btrfs_header_level(buf
);
1567 if (!root
->ref_cows
&& level
== 0)
1571 process_func
= btrfs_inc_extent_ref
;
1573 process_func
= btrfs_free_extent
;
1576 parent
= buf
->start
;
1580 for (i
= 0; i
< nritems
; i
++) {
1583 btrfs_item_key_to_cpu(buf
, &key
, i
);
1584 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
1586 fi
= btrfs_item_ptr(buf
, i
,
1587 struct btrfs_file_extent_item
);
1588 if (btrfs_file_extent_type(buf
, fi
) ==
1589 BTRFS_FILE_EXTENT_INLINE
)
1591 bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
1595 num_bytes
= btrfs_file_extent_disk_num_bytes(buf
, fi
);
1596 key
.offset
-= btrfs_file_extent_offset(buf
, fi
);
1597 ret
= process_func(trans
, root
, bytenr
, num_bytes
,
1598 parent
, ref_root
, key
.objectid
,
1606 bytenr
= btrfs_node_blockptr(buf
, i
);
1607 num_bytes
= btrfs_level_size(root
, level
- 1);
1608 ret
= process_func(trans
, root
, bytenr
, num_bytes
,
1609 parent
, ref_root
, level
- 1, 0);
1621 for (i
=0; i
< faili
; i
++) {
1624 btrfs_item_key_to_cpu(buf
, &key
, i
);
1625 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
1627 fi
= btrfs_item_ptr(buf
, i
,
1628 struct btrfs_file_extent_item
);
1629 if (btrfs_file_extent_type(buf
, fi
) ==
1630 BTRFS_FILE_EXTENT_INLINE
)
1632 disk_bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
1633 if (disk_bytenr
== 0)
1635 err
= btrfs_free_extent(trans
, root
, disk_bytenr
,
1636 btrfs_file_extent_disk_num_bytes(buf
,
1640 bytenr
= btrfs_node_blockptr(buf
, i
);
1641 err
= btrfs_free_extent(trans
, root
, bytenr
,
1642 btrfs_level_size(root
, level
- 1), 0);
1650 int btrfs_inc_ref(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
1651 struct extent_buffer
*buf
, int record_parent
)
1653 return __btrfs_mod_ref(trans
, root
, buf
, record_parent
, 1);
1656 int btrfs_dec_ref(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
1657 struct extent_buffer
*buf
, int record_parent
)
1659 return __btrfs_mod_ref(trans
, root
, buf
, record_parent
, 0);
1662 static int write_one_cache_group(struct btrfs_trans_handle
*trans
,
1663 struct btrfs_root
*root
,
1664 struct btrfs_path
*path
,
1665 struct btrfs_block_group_cache
*cache
)
1669 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
1671 struct extent_buffer
*leaf
;
1673 ret
= btrfs_search_slot(trans
, extent_root
, &cache
->key
, path
, 0, 1);
1678 leaf
= path
->nodes
[0];
1679 bi
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1680 write_extent_buffer(leaf
, &cache
->item
, bi
, sizeof(cache
->item
));
1681 btrfs_mark_buffer_dirty(leaf
);
1682 btrfs_release_path(extent_root
, path
);
1684 finish_current_insert(trans
, extent_root
);
1685 pending_ret
= del_pending_extents(trans
, extent_root
);
1694 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle
*trans
,
1695 struct btrfs_root
*root
)
1697 struct extent_io_tree
*block_group_cache
;
1698 struct btrfs_block_group_cache
*cache
;
1702 struct btrfs_path
*path
;
1708 block_group_cache
= &root
->fs_info
->block_group_cache
;
1709 path
= btrfs_alloc_path();
1714 ret
= find_first_extent_bit(block_group_cache
, last
,
1715 &start
, &end
, BLOCK_GROUP_DIRTY
);
1720 ret
= get_state_private(block_group_cache
, start
, &ptr
);
1723 cache
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
1724 err
= write_one_cache_group(trans
, root
,
1727 * if we fail to write the cache group, we want
1728 * to keep it marked dirty in hopes that a later
1735 clear_extent_bits(block_group_cache
, start
, end
,
1736 BLOCK_GROUP_DIRTY
, GFP_NOFS
);
1738 btrfs_free_path(path
);
1742 static struct btrfs_space_info
*__find_space_info(struct btrfs_fs_info
*info
,
1745 struct list_head
*head
= &info
->space_info
;
1746 struct list_head
*cur
;
1747 struct btrfs_space_info
*found
;
1748 list_for_each(cur
, head
) {
1749 found
= list_entry(cur
, struct btrfs_space_info
, list
);
1750 if (found
->flags
== flags
)
1757 static int update_space_info(struct btrfs_fs_info
*info
, u64 flags
,
1758 u64 total_bytes
, u64 bytes_used
,
1759 struct btrfs_space_info
**space_info
)
1761 struct btrfs_space_info
*found
;
1763 found
= __find_space_info(info
, flags
);
1765 found
->total_bytes
+= total_bytes
;
1766 found
->bytes_used
+= bytes_used
;
1767 WARN_ON(found
->total_bytes
< found
->bytes_used
);
1768 *space_info
= found
;
1771 found
= kmalloc(sizeof(*found
), GFP_NOFS
);
1775 list_add(&found
->list
, &info
->space_info
);
1776 found
->flags
= flags
;
1777 found
->total_bytes
= total_bytes
;
1778 found
->bytes_used
= bytes_used
;
1779 found
->bytes_pinned
= 0;
1781 *space_info
= found
;
1786 static void set_avail_alloc_bits(struct btrfs_fs_info
*fs_info
, u64 flags
)
1788 u64 extra_flags
= flags
& (BTRFS_BLOCK_GROUP_RAID0
|
1789 BTRFS_BLOCK_GROUP_RAID1
|
1790 BTRFS_BLOCK_GROUP_DUP
);
1792 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
1793 fs_info
->avail_data_alloc_bits
|= extra_flags
;
1794 if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
1795 fs_info
->avail_metadata_alloc_bits
|= extra_flags
;
1796 if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
1797 fs_info
->avail_system_alloc_bits
|= extra_flags
;
1801 static int do_chunk_alloc(struct btrfs_trans_handle
*trans
,
1802 struct btrfs_root
*extent_root
, u64 alloc_bytes
,
1805 struct btrfs_space_info
*space_info
;
1811 space_info
= __find_space_info(extent_root
->fs_info
, flags
);
1813 ret
= update_space_info(extent_root
->fs_info
, flags
,
1817 BUG_ON(!space_info
);
1819 if (space_info
->full
)
1822 thresh
= div_factor(space_info
->total_bytes
, 7);
1823 if ((space_info
->bytes_used
+ space_info
->bytes_pinned
+ alloc_bytes
) <
1827 ret
= btrfs_alloc_chunk(trans
, extent_root
, &start
, &num_bytes
, flags
);
1828 if (ret
== -ENOSPC
) {
1829 space_info
->full
= 1;
1835 ret
= btrfs_make_block_group(trans
, extent_root
, 0, flags
,
1836 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, start
, num_bytes
);
1841 static int update_block_group(struct btrfs_trans_handle
*trans
,
1842 struct btrfs_root
*root
,
1843 u64 bytenr
, u64 num_bytes
, int alloc
,
1846 struct btrfs_block_group_cache
*cache
;
1847 struct btrfs_fs_info
*info
= root
->fs_info
;
1848 u64 total
= num_bytes
;
1854 /* block accounting for super block */
1855 old_val
= btrfs_super_bytes_used(&info
->super_copy
);
1857 old_val
+= num_bytes
;
1859 old_val
-= num_bytes
;
1860 btrfs_set_super_bytes_used(&info
->super_copy
, old_val
);
1862 /* block accounting for root item */
1863 old_val
= btrfs_root_used(&root
->root_item
);
1865 old_val
+= num_bytes
;
1867 old_val
-= num_bytes
;
1868 btrfs_set_root_used(&root
->root_item
, old_val
);
1871 cache
= btrfs_lookup_block_group(info
, bytenr
);
1875 byte_in_group
= bytenr
- cache
->key
.objectid
;
1876 WARN_ON(byte_in_group
> cache
->key
.offset
);
1877 start
= cache
->key
.objectid
;
1878 end
= start
+ cache
->key
.offset
- 1;
1879 set_extent_bits(&info
->block_group_cache
, start
, end
,
1880 BLOCK_GROUP_DIRTY
, GFP_NOFS
);
1882 old_val
= btrfs_block_group_used(&cache
->item
);
1883 num_bytes
= min(total
, cache
->key
.offset
- byte_in_group
);
1885 old_val
+= num_bytes
;
1886 cache
->space_info
->bytes_used
+= num_bytes
;
1888 old_val
-= num_bytes
;
1889 cache
->space_info
->bytes_used
-= num_bytes
;
1891 set_extent_dirty(&info
->free_space_cache
,
1892 bytenr
, bytenr
+ num_bytes
- 1,
1896 btrfs_set_block_group_used(&cache
->item
, old_val
);
1898 bytenr
+= num_bytes
;
1903 static int update_pinned_extents(struct btrfs_root
*root
,
1904 u64 bytenr
, u64 num
, int pin
)
1907 struct btrfs_block_group_cache
*cache
;
1908 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1911 set_extent_dirty(&fs_info
->pinned_extents
,
1912 bytenr
, bytenr
+ num
- 1, GFP_NOFS
);
1914 clear_extent_dirty(&fs_info
->pinned_extents
,
1915 bytenr
, bytenr
+ num
- 1, GFP_NOFS
);
1918 cache
= btrfs_lookup_block_group(fs_info
, bytenr
);
1920 len
= min(num
, cache
->key
.offset
-
1921 (bytenr
- cache
->key
.objectid
));
1923 cache
->pinned
+= len
;
1924 cache
->space_info
->bytes_pinned
+= len
;
1925 fs_info
->total_pinned
+= len
;
1927 cache
->pinned
-= len
;
1928 cache
->space_info
->bytes_pinned
-= len
;
1929 fs_info
->total_pinned
-= len
;
1937 int btrfs_copy_pinned(struct btrfs_root
*root
, struct extent_io_tree
*copy
)
1942 struct extent_io_tree
*pinned_extents
= &root
->fs_info
->pinned_extents
;
1946 ret
= find_first_extent_bit(pinned_extents
, last
,
1947 &start
, &end
, EXTENT_DIRTY
);
1950 set_extent_dirty(copy
, start
, end
, GFP_NOFS
);
1956 int btrfs_finish_extent_commit(struct btrfs_trans_handle
*trans
,
1957 struct btrfs_root
*root
,
1958 struct extent_io_tree
*unpin
)
1963 struct extent_io_tree
*free_space_cache
;
1964 free_space_cache
= &root
->fs_info
->free_space_cache
;
1967 ret
= find_first_extent_bit(unpin
, 0, &start
, &end
,
1971 update_pinned_extents(root
, start
, end
+ 1 - start
, 0);
1972 clear_extent_dirty(unpin
, start
, end
, GFP_NOFS
);
1973 set_extent_dirty(free_space_cache
, start
, end
, GFP_NOFS
);
1978 static int finish_current_insert(struct btrfs_trans_handle
*trans
,
1979 struct btrfs_root
*extent_root
)
1984 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
1985 struct btrfs_path
*path
;
1986 struct pending_extent_op
*extent_op
;
1987 struct btrfs_key key
;
1990 path
= btrfs_alloc_path();
1993 ret
= find_first_extent_bit(&info
->extent_ins
, 0, &start
,
1994 &end
, EXTENT_LOCKED
);
1998 ret
= get_state_private(&info
->extent_ins
, start
, &priv
);
2000 extent_op
= (struct pending_extent_op
*)(unsigned long)priv
;
2002 if (extent_op
->type
== PENDING_EXTENT_INSERT
) {
2003 key
.objectid
= start
;
2004 key
.offset
= end
+ 1 - start
;
2005 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2006 ret
= alloc_reserved_tree_block(trans
, extent_root
,
2007 extent_root
->root_key
.objectid
,
2011 extent_op
->level
, &key
);
2016 clear_extent_bits(&info
->extent_ins
, start
, end
, EXTENT_LOCKED
,
2020 btrfs_free_path(path
);
2024 static int pin_down_bytes(struct btrfs_trans_handle
*trans
,
2025 struct btrfs_root
*root
,
2026 u64 bytenr
, u64 num_bytes
, int is_data
)
2029 struct extent_buffer
*buf
;
2034 buf
= btrfs_find_tree_block(root
, bytenr
, num_bytes
);
2038 /* we can reuse a block if it hasn't been written
2039 * and it is from this transaction. We can't
2040 * reuse anything from the tree log root because
2041 * it has tiny sub-transactions.
2043 if (btrfs_buffer_uptodate(buf
, 0)) {
2044 u64 header_owner
= btrfs_header_owner(buf
);
2045 u64 header_transid
= btrfs_header_generation(buf
);
2046 if (header_owner
!= BTRFS_TREE_LOG_OBJECTID
&&
2047 header_transid
== trans
->transid
&&
2048 !btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
)) {
2049 clean_tree_block(NULL
, root
, buf
);
2050 free_extent_buffer(buf
);
2054 free_extent_buffer(buf
);
2056 update_pinned_extents(root
, bytenr
, num_bytes
, 1);
2063 * remove an extent from the root, returns 0 on success
2065 static int __free_extent(struct btrfs_trans_handle
*trans
,
2066 struct btrfs_root
*root
,
2067 u64 bytenr
, u64 num_bytes
, u64 parent
,
2068 u64 root_objectid
, u64 owner_objectid
,
2069 u64 owner_offset
, int refs_to_drop
)
2072 struct btrfs_key key
;
2073 struct btrfs_path
*path
;
2074 struct btrfs_extent_ops
*ops
= root
->fs_info
->extent_ops
;
2075 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
2076 struct extent_buffer
*leaf
;
2077 struct btrfs_extent_item
*ei
;
2078 struct btrfs_extent_inline_ref
*iref
;
2081 int extent_slot
= 0;
2082 int found_extent
= 0;
2087 path
= btrfs_alloc_path();
2092 path
->leave_spinning
= 1;
2094 is_data
= owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
;
2095 BUG_ON(!is_data
&& refs_to_drop
!= 1);
2097 ret
= lookup_extent_backref(trans
, extent_root
, path
, &iref
,
2098 bytenr
, num_bytes
, parent
,
2099 root_objectid
, owner_objectid
,
2102 extent_slot
= path
->slots
[0];
2103 while (extent_slot
>= 0) {
2104 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
2106 if (key
.objectid
!= bytenr
)
2108 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
2109 key
.offset
== num_bytes
) {
2113 if (path
->slots
[0] - extent_slot
> 5)
2117 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2118 item_size
= btrfs_item_size_nr(path
->nodes
[0], extent_slot
);
2119 if (found_extent
&& item_size
< sizeof(*ei
))
2122 if (!found_extent
) {
2124 ret
= remove_extent_backref(trans
, extent_root
, path
,
2128 btrfs_release_path(extent_root
, path
);
2129 path
->leave_spinning
= 1;
2131 key
.objectid
= bytenr
;
2132 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2133 key
.offset
= num_bytes
;
2135 ret
= btrfs_search_slot(trans
, extent_root
,
2138 printk(KERN_ERR
"umm, got %d back from search"
2139 ", was looking for %llu\n", ret
,
2140 (unsigned long long)bytenr
);
2141 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
2144 extent_slot
= path
->slots
[0];
2147 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
2149 printk(KERN_ERR
"btrfs unable to find ref byte nr %llu "
2150 "parent %llu root %llu owner %llu offset %llu\n",
2151 (unsigned long long)bytenr
,
2152 (unsigned long long)parent
,
2153 (unsigned long long)root_objectid
,
2154 (unsigned long long)owner_objectid
,
2155 (unsigned long long)owner_offset
);
2158 leaf
= path
->nodes
[0];
2159 item_size
= btrfs_item_size_nr(leaf
, extent_slot
);
2160 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2161 if (item_size
< sizeof(*ei
)) {
2162 BUG_ON(found_extent
|| extent_slot
!= path
->slots
[0]);
2163 ret
= convert_extent_item_v0(trans
, extent_root
, path
,
2167 btrfs_release_path(extent_root
, path
);
2168 path
->leave_spinning
= 1;
2170 key
.objectid
= bytenr
;
2171 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2172 key
.offset
= num_bytes
;
2174 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
,
2177 printk(KERN_ERR
"umm, got %d back from search"
2178 ", was looking for %llu\n", ret
,
2179 (unsigned long long)bytenr
);
2180 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
2183 extent_slot
= path
->slots
[0];
2184 leaf
= path
->nodes
[0];
2185 item_size
= btrfs_item_size_nr(leaf
, extent_slot
);
2188 BUG_ON(item_size
< sizeof(*ei
));
2189 ei
= btrfs_item_ptr(leaf
, extent_slot
,
2190 struct btrfs_extent_item
);
2191 if (owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
) {
2192 struct btrfs_tree_block_info
*bi
;
2193 BUG_ON(item_size
< sizeof(*ei
) + sizeof(*bi
));
2194 bi
= (struct btrfs_tree_block_info
*)(ei
+ 1);
2195 WARN_ON(owner_objectid
!= btrfs_tree_block_level(leaf
, bi
));
2198 refs
= btrfs_extent_refs(leaf
, ei
);
2199 BUG_ON(refs
< refs_to_drop
);
2200 refs
-= refs_to_drop
;
2204 * In the case of inline back ref, reference count will
2205 * be updated by remove_extent_backref
2208 BUG_ON(!found_extent
);
2210 btrfs_set_extent_refs(leaf
, ei
, refs
);
2211 btrfs_mark_buffer_dirty(leaf
);
2214 ret
= remove_extent_backref(trans
, extent_root
, path
,
2224 BUG_ON(is_data
&& refs_to_drop
!=
2225 extent_data_ref_count(root
, path
, iref
));
2227 BUG_ON(path
->slots
[0] != extent_slot
);
2229 BUG_ON(path
->slots
[0] != extent_slot
+ 1);
2230 path
->slots
[0] = extent_slot
;
2235 if (ops
&& ops
->free_extent
) {
2236 ret
= ops
->free_extent(root
, bytenr
, num_bytes
);
2244 ret
= pin_down_bytes(trans
, root
, bytenr
, num_bytes
,
2251 ret
= btrfs_del_items(trans
, extent_root
, path
, path
->slots
[0],
2254 btrfs_release_path(extent_root
, path
);
2257 ret
= btrfs_del_csums(trans
, root
, bytenr
, num_bytes
);
2261 ret
= update_block_group(trans
, root
, bytenr
, num_bytes
, 0,
2265 btrfs_free_path(path
);
2266 finish_current_insert(trans
, extent_root
);
2271 * find all the blocks marked as pending in the radix tree and remove
2272 * them from the extent map
2274 static int del_pending_extents(struct btrfs_trans_handle
*trans
, struct
2275 btrfs_root
*extent_root
)
2282 struct extent_io_tree
*pending_del
;
2283 struct extent_io_tree
*extent_ins
;
2284 struct pending_extent_op
*extent_op
;
2286 extent_ins
= &extent_root
->fs_info
->extent_ins
;
2287 pending_del
= &extent_root
->fs_info
->pending_del
;
2290 ret
= find_first_extent_bit(pending_del
, 0, &start
, &end
,
2295 ret
= get_state_private(pending_del
, start
, &priv
);
2297 extent_op
= (struct pending_extent_op
*)(unsigned long)priv
;
2299 clear_extent_bits(pending_del
, start
, end
, EXTENT_LOCKED
,
2302 if (!test_range_bit(extent_ins
, start
, end
,
2303 EXTENT_LOCKED
, 0)) {
2304 ret
= __free_extent(trans
, extent_root
,
2305 start
, end
+ 1 - start
, 0,
2306 extent_root
->root_key
.objectid
,
2307 extent_op
->level
, 0, 1);
2311 ret
= get_state_private(extent_ins
, start
, &priv
);
2313 extent_op
= (struct pending_extent_op
*)
2314 (unsigned long)priv
;
2316 clear_extent_bits(extent_ins
, start
, end
,
2317 EXTENT_LOCKED
, GFP_NOFS
);
2319 if (extent_op
->type
== PENDING_BACKREF_UPDATE
)
2331 * remove an extent from the root, returns 0 on success
2334 int btrfs_free_extent(struct btrfs_trans_handle
*trans
,
2335 struct btrfs_root
*root
,
2336 u64 bytenr
, u64 num_bytes
, u64 parent
,
2337 u64 root_objectid
, u64 owner
, u64 offset
)
2339 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
2343 WARN_ON(num_bytes
< root
->sectorsize
);
2344 if (root
== extent_root
) {
2345 struct pending_extent_op
*extent_op
;
2347 extent_op
= kmalloc(sizeof(*extent_op
), GFP_NOFS
);
2350 extent_op
->type
= PENDING_EXTENT_DELETE
;
2351 extent_op
->bytenr
= bytenr
;
2352 extent_op
->num_bytes
= num_bytes
;
2353 extent_op
->level
= (int)owner
;
2355 set_extent_bits(&root
->fs_info
->pending_del
,
2356 bytenr
, bytenr
+ num_bytes
- 1,
2357 EXTENT_LOCKED
, GFP_NOFS
);
2358 set_state_private(&root
->fs_info
->pending_del
,
2359 bytenr
, (unsigned long)extent_op
);
2362 ret
= __free_extent(trans
, root
, bytenr
, num_bytes
, parent
,
2363 root_objectid
, owner
, offset
, 1);
2364 pending_ret
= del_pending_extents(trans
, root
->fs_info
->extent_root
);
2365 return ret
? ret
: pending_ret
;
2368 static u64
stripe_align(struct btrfs_root
*root
, u64 val
)
2370 u64 mask
= ((u64
)root
->stripesize
- 1);
2371 u64 ret
= (val
+ mask
) & ~mask
;
2376 * walks the btree of allocated extents and find a hole of a given size.
2377 * The key ins is changed to record the hole:
2378 * ins->objectid == block start
2379 * ins->flags = BTRFS_EXTENT_ITEM_KEY
2380 * ins->offset == number of blocks
2381 * Any available blocks before search_start are skipped.
2383 static int noinline
find_free_extent(struct btrfs_trans_handle
*trans
,
2384 struct btrfs_root
*orig_root
,
2385 u64 num_bytes
, u64 empty_size
,
2386 u64 search_start
, u64 search_end
,
2387 u64 hint_byte
, struct btrfs_key
*ins
,
2388 u64 exclude_start
, u64 exclude_nr
,
2392 u64 orig_search_start
= search_start
;
2393 struct btrfs_root
* root
= orig_root
->fs_info
->extent_root
;
2394 struct btrfs_fs_info
*info
= root
->fs_info
;
2395 u64 total_needed
= num_bytes
;
2396 struct btrfs_block_group_cache
*block_group
;
2400 WARN_ON(num_bytes
< root
->sectorsize
);
2401 btrfs_set_key_type(ins
, BTRFS_EXTENT_ITEM_KEY
);
2404 block_group
= btrfs_lookup_first_block_group(info
, hint_byte
);
2406 hint_byte
= search_start
;
2407 block_group
= btrfs_find_block_group(root
, block_group
,
2408 hint_byte
, data
, 1);
2410 block_group
= btrfs_find_block_group(root
,
2412 search_start
, data
, 1);
2415 total_needed
+= empty_size
;
2419 block_group
= btrfs_lookup_first_block_group(info
,
2422 block_group
= btrfs_lookup_first_block_group(info
,
2425 ret
= find_search_start(root
, &block_group
, &search_start
,
2426 total_needed
, data
);
2430 search_start
= stripe_align(root
, search_start
);
2431 ins
->objectid
= search_start
;
2432 ins
->offset
= num_bytes
;
2434 if (ins
->objectid
+ num_bytes
>
2435 block_group
->key
.objectid
+ block_group
->key
.offset
) {
2436 search_start
= block_group
->key
.objectid
+
2437 block_group
->key
.offset
;
2441 if (test_range_bit(&info
->extent_ins
, ins
->objectid
,
2442 ins
->objectid
+ num_bytes
-1, EXTENT_LOCKED
, 0)) {
2443 search_start
= ins
->objectid
+ num_bytes
;
2447 if (test_range_bit(&info
->pinned_extents
, ins
->objectid
,
2448 ins
->objectid
+ num_bytes
-1, EXTENT_DIRTY
, 0)) {
2449 search_start
= ins
->objectid
+ num_bytes
;
2453 if (exclude_nr
> 0 && (ins
->objectid
+ num_bytes
> exclude_start
&&
2454 ins
->objectid
< exclude_start
+ exclude_nr
)) {
2455 search_start
= exclude_start
+ exclude_nr
;
2459 if (!(data
& BTRFS_BLOCK_GROUP_DATA
)) {
2460 block_group
= btrfs_lookup_block_group(info
, ins
->objectid
);
2462 trans
->block_group
= block_group
;
2464 ins
->offset
= num_bytes
;
2468 block_group
= btrfs_lookup_first_block_group(info
, search_start
);
2470 search_start
= orig_search_start
;
2477 total_needed
-= empty_size
;
2483 block_group
= btrfs_find_block_group(root
, block_group
,
2484 search_start
, data
, 0);
2491 static int btrfs_reserve_extent(struct btrfs_trans_handle
*trans
,
2492 struct btrfs_root
*root
,
2493 u64 num_bytes
, u64 empty_size
,
2494 u64 hint_byte
, u64 search_end
,
2495 struct btrfs_key
*ins
, int data
)
2498 u64 search_start
= 0;
2500 struct btrfs_fs_info
*info
= root
->fs_info
;
2502 if (info
->extent_ops
) {
2503 struct btrfs_extent_ops
*ops
= info
->extent_ops
;
2504 ret
= ops
->alloc_extent(root
, num_bytes
, hint_byte
, ins
);
2510 alloc_profile
= info
->avail_data_alloc_bits
&
2511 info
->data_alloc_profile
;
2512 data
= BTRFS_BLOCK_GROUP_DATA
| alloc_profile
;
2513 } else if ((info
->system_allocs
> 0 || root
== info
->chunk_root
) &&
2514 info
->system_allocs
>= 0) {
2515 alloc_profile
= info
->avail_system_alloc_bits
&
2516 info
->system_alloc_profile
;
2517 data
= BTRFS_BLOCK_GROUP_SYSTEM
| alloc_profile
;
2519 alloc_profile
= info
->avail_metadata_alloc_bits
&
2520 info
->metadata_alloc_profile
;
2521 data
= BTRFS_BLOCK_GROUP_METADATA
| alloc_profile
;
2524 if (root
->ref_cows
) {
2525 if (!(data
& BTRFS_BLOCK_GROUP_METADATA
)) {
2526 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
2528 BTRFS_BLOCK_GROUP_METADATA
);
2531 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
2532 num_bytes
+ 2 * 1024 * 1024, data
);
2536 WARN_ON(num_bytes
< root
->sectorsize
);
2537 ret
= find_free_extent(trans
, root
, num_bytes
, empty_size
,
2538 search_start
, search_end
, hint_byte
, ins
,
2539 trans
->alloc_exclude_start
,
2540 trans
->alloc_exclude_nr
, data
);
2543 clear_extent_dirty(&root
->fs_info
->free_space_cache
,
2544 ins
->objectid
, ins
->objectid
+ ins
->offset
- 1,
2549 static int alloc_reserved_tree_block(struct btrfs_trans_handle
*trans
,
2550 struct btrfs_root
*root
,
2551 u64 root_objectid
, u64 generation
,
2552 u64 flags
, struct btrfs_disk_key
*key
,
2553 int level
, struct btrfs_key
*ins
)
2556 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2557 struct btrfs_extent_item
*extent_item
;
2558 struct btrfs_tree_block_info
*block_info
;
2559 struct btrfs_extent_inline_ref
*iref
;
2560 struct btrfs_path
*path
;
2561 struct extent_buffer
*leaf
;
2562 u32 size
= sizeof(*extent_item
) + sizeof(*block_info
) + sizeof(*iref
);
2564 path
= btrfs_alloc_path();
2567 path
->leave_spinning
= 1;
2568 ret
= btrfs_insert_empty_item(trans
, fs_info
->extent_root
, path
,
2572 leaf
= path
->nodes
[0];
2573 extent_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
2574 struct btrfs_extent_item
);
2575 btrfs_set_extent_refs(leaf
, extent_item
, 1);
2576 btrfs_set_extent_generation(leaf
, extent_item
, generation
);
2577 btrfs_set_extent_flags(leaf
, extent_item
,
2578 flags
| BTRFS_EXTENT_FLAG_TREE_BLOCK
);
2579 block_info
= (struct btrfs_tree_block_info
*)(extent_item
+ 1);
2581 btrfs_set_tree_block_key(leaf
, block_info
, key
);
2582 btrfs_set_tree_block_level(leaf
, block_info
, level
);
2584 iref
= (struct btrfs_extent_inline_ref
*)(block_info
+ 1);
2585 btrfs_set_extent_inline_ref_type(leaf
, iref
, BTRFS_TREE_BLOCK_REF_KEY
);
2586 btrfs_set_extent_inline_ref_offset(leaf
, iref
, root_objectid
);
2588 btrfs_mark_buffer_dirty(leaf
);
2589 btrfs_free_path(path
);
2591 ret
= update_block_group(trans
, root
, ins
->objectid
, ins
->offset
,
2594 printk(KERN_ERR
"btrfs update block group failed for %llu "
2595 "%llu\n", (unsigned long long)ins
->objectid
,
2596 (unsigned long long)ins
->offset
);
2602 static int alloc_tree_block(struct btrfs_trans_handle
*trans
,
2603 struct btrfs_root
*root
, u64 num_bytes
,
2604 u64 root_objectid
, u64 generation
,
2605 u64 flags
, struct btrfs_disk_key
*key
,
2606 int level
, u64 empty_size
, u64 hint_byte
,
2607 u64 search_end
, struct btrfs_key
*ins
)
2610 ret
= btrfs_reserve_extent(trans
, root
, num_bytes
, empty_size
,
2611 hint_byte
, search_end
, ins
, 0);
2614 if (root_objectid
== BTRFS_EXTENT_TREE_OBJECTID
) {
2615 struct pending_extent_op
*extent_op
;
2617 extent_op
= kmalloc(sizeof(*extent_op
), GFP_NOFS
);
2620 extent_op
->type
= PENDING_EXTENT_INSERT
;
2621 extent_op
->bytenr
= ins
->objectid
;
2622 extent_op
->num_bytes
= ins
->offset
;
2623 extent_op
->level
= level
;
2624 extent_op
->flags
= flags
;
2625 memcpy(&extent_op
->key
, key
, sizeof(*key
));
2627 set_extent_bits(&root
->fs_info
->extent_ins
, ins
->objectid
,
2628 ins
->objectid
+ ins
->offset
- 1,
2629 EXTENT_LOCKED
, GFP_NOFS
);
2630 set_state_private(&root
->fs_info
->extent_ins
,
2631 ins
->objectid
, (unsigned long)extent_op
);
2633 ret
= alloc_reserved_tree_block(trans
, root
, root_objectid
,
2636 finish_current_insert(trans
, root
->fs_info
->extent_root
);
2637 del_pending_extents(trans
, root
->fs_info
->extent_root
);
2643 * helper function to allocate a block for a given tree
2644 * returns the tree buffer or NULL.
2646 struct extent_buffer
*btrfs_alloc_free_block(struct btrfs_trans_handle
*trans
,
2647 struct btrfs_root
*root
,
2648 u32 blocksize
, u64 root_objectid
,
2649 struct btrfs_disk_key
*key
, int level
,
2650 u64 hint
, u64 empty_size
)
2652 struct btrfs_key ins
;
2654 struct extent_buffer
*buf
;
2656 ret
= alloc_tree_block(trans
, root
, blocksize
, root_objectid
,
2657 trans
->transid
, 0, key
, level
,
2658 empty_size
, hint
, (u64
)-1, &ins
);
2661 return ERR_PTR(ret
);
2664 buf
= btrfs_find_create_tree_block(root
, ins
.objectid
, blocksize
);
2666 btrfs_free_extent(trans
, root
, ins
.objectid
, ins
.offset
,
2667 0, root
->root_key
.objectid
, level
, 0);
2669 return ERR_PTR(-ENOMEM
);
2671 btrfs_set_buffer_uptodate(buf
);
2672 trans
->blocks_used
++;
2679 static int noinline
drop_leaf_ref(struct btrfs_trans_handle
*trans
,
2680 struct btrfs_root
*root
,
2681 struct extent_buffer
*leaf
)
2684 u64 leaf_generation
;
2685 struct btrfs_key key
;
2686 struct btrfs_file_extent_item
*fi
;
2691 BUG_ON(!btrfs_is_leaf(leaf
));
2692 nritems
= btrfs_header_nritems(leaf
);
2693 leaf_owner
= btrfs_header_owner(leaf
);
2694 leaf_generation
= btrfs_header_generation(leaf
);
2696 for (i
= 0; i
< nritems
; i
++) {
2699 btrfs_item_key_to_cpu(leaf
, &key
, i
);
2700 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
2702 fi
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
2703 if (btrfs_file_extent_type(leaf
, fi
) ==
2704 BTRFS_FILE_EXTENT_INLINE
)
2707 * FIXME make sure to insert a trans record that
2708 * repeats the snapshot del on crash
2710 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
2711 if (disk_bytenr
== 0)
2713 ret
= btrfs_free_extent(trans
, root
, disk_bytenr
,
2714 btrfs_file_extent_disk_num_bytes(leaf
, fi
),
2715 leaf
->start
, leaf_owner
, leaf_generation
,
2722 static void noinline
reada_walk_down(struct btrfs_root
*root
,
2723 struct extent_buffer
*node
,
2736 nritems
= btrfs_header_nritems(node
);
2737 level
= btrfs_header_level(node
);
2741 for (i
= slot
; i
< nritems
&& skipped
< 32; i
++) {
2742 bytenr
= btrfs_node_blockptr(node
, i
);
2743 if (last
&& ((bytenr
> last
&& bytenr
- last
> 32 * 1024) ||
2744 (last
> bytenr
&& last
- bytenr
> 32 * 1024))) {
2748 blocksize
= btrfs_level_size(root
, level
- 1);
2750 ret
= btrfs_lookup_extent_ref(NULL
, root
, bytenr
,
2758 mutex_unlock(&root
->fs_info
->fs_mutex
);
2759 ret
= readahead_tree_block(root
, bytenr
, blocksize
,
2760 btrfs_node_ptr_generation(node
, i
));
2761 last
= bytenr
+ blocksize
;
2763 mutex_lock(&root
->fs_info
->fs_mutex
);
2770 * helper function for drop_snapshot, this walks down the tree dropping ref
2771 * counts as it goes.
2773 static int noinline
walk_down_tree(struct btrfs_trans_handle
*trans
,
2774 struct btrfs_root
*root
,
2775 struct btrfs_path
*path
, int *level
)
2781 struct extent_buffer
*next
;
2782 struct extent_buffer
*cur
;
2783 struct extent_buffer
*parent
;
2788 WARN_ON(*level
< 0);
2789 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2790 ret
= btrfs_lookup_extent_ref(trans
, root
,
2791 path
->nodes
[*level
]->start
,
2792 path
->nodes
[*level
]->len
, &refs
);
2798 * walk down to the last node level and free all the leaves
2800 while(*level
>= 0) {
2801 WARN_ON(*level
< 0);
2802 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2803 cur
= path
->nodes
[*level
];
2805 if (btrfs_header_level(cur
) != *level
)
2808 if (path
->slots
[*level
] >=
2809 btrfs_header_nritems(cur
))
2812 ret
= drop_leaf_ref(trans
, root
, cur
);
2816 bytenr
= btrfs_node_blockptr(cur
, path
->slots
[*level
]);
2817 ptr_gen
= btrfs_node_ptr_generation(cur
, path
->slots
[*level
]);
2818 blocksize
= btrfs_level_size(root
, *level
- 1);
2819 ret
= btrfs_lookup_extent_ref(trans
, root
, bytenr
, blocksize
,
2823 parent
= path
->nodes
[*level
];
2824 root_owner
= btrfs_header_owner(parent
);
2825 root_gen
= btrfs_header_generation(parent
);
2826 path
->slots
[*level
]++;
2827 ret
= btrfs_free_extent(trans
, root
, bytenr
, blocksize
,
2828 parent
->start
, root_owner
,
2829 root_gen
, *level
- 1, 1);
2833 next
= btrfs_find_tree_block(root
, bytenr
, blocksize
);
2834 if (!next
|| !btrfs_buffer_uptodate(next
, ptr_gen
)) {
2835 free_extent_buffer(next
);
2836 reada_walk_down(root
, cur
, path
->slots
[*level
]);
2837 mutex_unlock(&root
->fs_info
->fs_mutex
);
2838 next
= read_tree_block(root
, bytenr
, blocksize
,
2840 mutex_lock(&root
->fs_info
->fs_mutex
);
2842 WARN_ON(*level
<= 0);
2843 if (path
->nodes
[*level
-1])
2844 free_extent_buffer(path
->nodes
[*level
-1]);
2845 path
->nodes
[*level
-1] = next
;
2846 *level
= btrfs_header_level(next
);
2847 path
->slots
[*level
] = 0;
2850 WARN_ON(*level
< 0);
2851 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2853 if (path
->nodes
[*level
] == root
->node
) {
2854 root_owner
= root
->root_key
.objectid
;
2855 parent
= path
->nodes
[*level
];
2857 parent
= path
->nodes
[*level
+ 1];
2858 root_owner
= btrfs_header_owner(parent
);
2861 root_gen
= btrfs_header_generation(parent
);
2862 ret
= btrfs_free_extent(trans
, root
, path
->nodes
[*level
]->start
,
2863 path
->nodes
[*level
]->len
, parent
->start
,
2864 root_owner
, root_gen
, *level
, 1);
2865 free_extent_buffer(path
->nodes
[*level
]);
2866 path
->nodes
[*level
] = NULL
;
2873 * helper for dropping snapshots. This walks back up the tree in the path
2874 * to find the first node higher up where we haven't yet gone through
2877 static int noinline
walk_up_tree(struct btrfs_trans_handle
*trans
,
2878 struct btrfs_root
*root
,
2879 struct btrfs_path
*path
, int *level
)
2883 struct btrfs_root_item
*root_item
= &root
->root_item
;
2888 for(i
= *level
; i
< BTRFS_MAX_LEVEL
- 1 && path
->nodes
[i
]; i
++) {
2889 slot
= path
->slots
[i
];
2890 if (slot
< btrfs_header_nritems(path
->nodes
[i
]) - 1) {
2891 struct extent_buffer
*node
;
2892 struct btrfs_disk_key disk_key
;
2893 node
= path
->nodes
[i
];
2896 WARN_ON(*level
== 0);
2897 btrfs_node_key(node
, &disk_key
, path
->slots
[i
]);
2898 memcpy(&root_item
->drop_progress
,
2899 &disk_key
, sizeof(disk_key
));
2900 root_item
->drop_level
= i
;
2903 struct extent_buffer
*parent
;
2904 if (path
->nodes
[*level
] == root
->node
)
2905 parent
= path
->nodes
[*level
];
2907 parent
= path
->nodes
[*level
+ 1];
2909 root_owner
= btrfs_header_owner(parent
);
2910 root_gen
= btrfs_header_generation(parent
);
2911 ret
= btrfs_free_extent(trans
, root
,
2912 path
->nodes
[*level
]->start
,
2913 path
->nodes
[*level
]->len
,
2914 parent
->start
, root_owner
,
2915 root_gen
, *level
, 1);
2917 free_extent_buffer(path
->nodes
[*level
]);
2918 path
->nodes
[*level
] = NULL
;
2926 * drop the reference count on the tree rooted at 'snap'. This traverses
2927 * the tree freeing any blocks that have a ref count of zero after being
2930 int btrfs_drop_snapshot(struct btrfs_trans_handle
*trans
, struct btrfs_root
2936 struct btrfs_path
*path
;
2939 struct btrfs_root_item
*root_item
= &root
->root_item
;
2941 path
= btrfs_alloc_path();
2944 level
= btrfs_header_level(root
->node
);
2946 if (btrfs_disk_key_objectid(&root_item
->drop_progress
) == 0) {
2947 path
->nodes
[level
] = root
->node
;
2948 extent_buffer_get(root
->node
);
2949 path
->slots
[level
] = 0;
2951 struct btrfs_key key
;
2952 struct btrfs_disk_key found_key
;
2953 struct extent_buffer
*node
;
2955 btrfs_disk_key_to_cpu(&key
, &root_item
->drop_progress
);
2956 level
= root_item
->drop_level
;
2957 path
->lowest_level
= level
;
2958 wret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2963 node
= path
->nodes
[level
];
2964 btrfs_node_key(node
, &found_key
, path
->slots
[level
]);
2965 WARN_ON(memcmp(&found_key
, &root_item
->drop_progress
,
2966 sizeof(found_key
)));
2969 wret
= walk_down_tree(trans
, root
, path
, &level
);
2975 wret
= walk_up_tree(trans
, root
, path
, &level
);
2985 for (i
= 0; i
<= orig_level
; i
++) {
2986 if (path
->nodes
[i
]) {
2987 free_extent_buffer(path
->nodes
[i
]);
2988 path
->nodes
[i
] = NULL
;
2992 btrfs_free_path(path
);
2998 int btrfs_free_block_groups(struct btrfs_fs_info
*info
)
3005 ret
= find_first_extent_bit(&info
->block_group_cache
, 0,
3006 &start
, &end
, (unsigned int)-1);
3009 ret
= get_state_private(&info
->block_group_cache
, start
, &ptr
);
3011 kfree((void *)(unsigned long)ptr
);
3012 clear_extent_bits(&info
->block_group_cache
, start
,
3013 end
, (unsigned int)-1, GFP_NOFS
);
3016 ret
= find_first_extent_bit(&info
->free_space_cache
, 0,
3017 &start
, &end
, EXTENT_DIRTY
);
3020 clear_extent_dirty(&info
->free_space_cache
, start
,
3026 int find_first_block_group(struct btrfs_root
*root
, struct btrfs_path
*path
,
3027 struct btrfs_key
*key
)
3030 struct btrfs_key found_key
;
3031 struct extent_buffer
*leaf
;
3034 ret
= btrfs_search_slot(NULL
, root
, key
, path
, 0, 0);
3038 slot
= path
->slots
[0];
3039 leaf
= path
->nodes
[0];
3040 if (slot
>= btrfs_header_nritems(leaf
)) {
3041 ret
= btrfs_next_leaf(root
, path
);
3048 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3050 if (found_key
.objectid
>= key
->objectid
&&
3051 found_key
.type
== BTRFS_BLOCK_GROUP_ITEM_KEY
)
3060 int btrfs_read_block_groups(struct btrfs_root
*root
)
3062 struct btrfs_path
*path
;
3065 struct btrfs_block_group_cache
*cache
;
3066 struct btrfs_fs_info
*info
= root
->fs_info
;
3067 struct btrfs_space_info
*space_info
;
3068 struct extent_io_tree
*block_group_cache
;
3069 struct btrfs_key key
;
3070 struct btrfs_key found_key
;
3071 struct extent_buffer
*leaf
;
3073 block_group_cache
= &info
->block_group_cache
;
3075 root
= info
->extent_root
;
3078 btrfs_set_key_type(&key
, BTRFS_BLOCK_GROUP_ITEM_KEY
);
3079 path
= btrfs_alloc_path();
3084 ret
= find_first_block_group(root
, path
, &key
);
3092 leaf
= path
->nodes
[0];
3093 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
3094 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
3100 read_extent_buffer(leaf
, &cache
->item
,
3101 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
3102 sizeof(cache
->item
));
3103 memcpy(&cache
->key
, &found_key
, sizeof(found_key
));
3106 key
.objectid
= found_key
.objectid
+ found_key
.offset
;
3107 btrfs_release_path(root
, path
);
3108 cache
->flags
= btrfs_block_group_flags(&cache
->item
);
3110 if (cache
->flags
& BTRFS_BLOCK_GROUP_DATA
) {
3111 bit
= BLOCK_GROUP_DATA
;
3112 } else if (cache
->flags
& BTRFS_BLOCK_GROUP_SYSTEM
) {
3113 bit
= BLOCK_GROUP_SYSTEM
;
3114 } else if (cache
->flags
& BTRFS_BLOCK_GROUP_METADATA
) {
3115 bit
= BLOCK_GROUP_METADATA
;
3117 set_avail_alloc_bits(info
, cache
->flags
);
3118 if (btrfs_chunk_readonly(root
, cache
->key
.objectid
))
3121 ret
= update_space_info(info
, cache
->flags
, found_key
.offset
,
3122 btrfs_block_group_used(&cache
->item
),
3125 cache
->space_info
= space_info
;
3127 /* use EXTENT_LOCKED to prevent merging */
3128 set_extent_bits(block_group_cache
, found_key
.objectid
,
3129 found_key
.objectid
+ found_key
.offset
- 1,
3130 bit
| EXTENT_LOCKED
, GFP_NOFS
);
3131 set_state_private(block_group_cache
, found_key
.objectid
,
3132 (unsigned long)cache
);
3136 btrfs_free_path(path
);
3140 int btrfs_make_block_group(struct btrfs_trans_handle
*trans
,
3141 struct btrfs_root
*root
, u64 bytes_used
,
3142 u64 type
, u64 chunk_objectid
, u64 chunk_offset
,
3147 struct btrfs_root
*extent_root
;
3148 struct btrfs_block_group_cache
*cache
;
3149 struct extent_io_tree
*block_group_cache
;
3151 extent_root
= root
->fs_info
->extent_root
;
3152 block_group_cache
= &root
->fs_info
->block_group_cache
;
3154 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
3156 cache
->key
.objectid
= chunk_offset
;
3157 cache
->key
.offset
= size
;
3159 btrfs_set_key_type(&cache
->key
, BTRFS_BLOCK_GROUP_ITEM_KEY
);
3160 btrfs_set_block_group_used(&cache
->item
, bytes_used
);
3161 btrfs_set_block_group_chunk_objectid(&cache
->item
, chunk_objectid
);
3162 cache
->flags
= type
;
3163 btrfs_set_block_group_flags(&cache
->item
, type
);
3165 ret
= update_space_info(root
->fs_info
, cache
->flags
, size
, bytes_used
,
3166 &cache
->space_info
);
3169 bit
= block_group_state_bits(type
);
3170 set_extent_bits(block_group_cache
, chunk_offset
,
3171 chunk_offset
+ size
- 1,
3172 bit
| EXTENT_LOCKED
, GFP_NOFS
);
3174 set_state_private(block_group_cache
, chunk_offset
,
3175 (unsigned long)cache
);
3176 ret
= btrfs_insert_item(trans
, extent_root
, &cache
->key
, &cache
->item
,
3177 sizeof(cache
->item
));
3180 finish_current_insert(trans
, extent_root
);
3181 ret
= del_pending_extents(trans
, extent_root
);
3183 set_avail_alloc_bits(extent_root
->fs_info
, type
);
3188 * This is for converter use only.
3190 * In that case, we don't know where are free blocks located.
3191 * Therefore all block group cache entries must be setup properly
3192 * before doing any block allocation.
3194 int btrfs_make_block_groups(struct btrfs_trans_handle
*trans
,
3195 struct btrfs_root
*root
)
3203 u64 total_metadata
= 0;
3207 struct btrfs_root
*extent_root
;
3208 struct btrfs_block_group_cache
*cache
;
3209 struct extent_io_tree
*block_group_cache
;
3211 extent_root
= root
->fs_info
->extent_root
;
3212 block_group_cache
= &root
->fs_info
->block_group_cache
;
3213 chunk_objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
3214 total_bytes
= btrfs_super_total_bytes(&root
->fs_info
->super_copy
);
3215 group_align
= 64 * root
->sectorsize
;
3218 while (cur_start
< total_bytes
) {
3219 group_size
= total_bytes
/ 12;
3220 group_size
= min_t(u64
, group_size
, total_bytes
- cur_start
);
3221 if (cur_start
== 0) {
3222 bit
= BLOCK_GROUP_SYSTEM
;
3223 group_type
= BTRFS_BLOCK_GROUP_SYSTEM
;
3225 group_size
&= ~(group_align
- 1);
3226 group_size
= max_t(u64
, group_size
, 8 * 1024 * 1024);
3227 group_size
= min_t(u64
, group_size
, 32 * 1024 * 1024);
3229 group_size
&= ~(group_align
- 1);
3230 if (total_data
>= total_metadata
* 2) {
3231 group_type
= BTRFS_BLOCK_GROUP_METADATA
;
3232 group_size
= min_t(u64
, group_size
,
3233 1ULL * 1024 * 1024 * 1024);
3234 total_metadata
+= group_size
;
3236 group_type
= BTRFS_BLOCK_GROUP_DATA
;
3237 group_size
= min_t(u64
, group_size
,
3238 5ULL * 1024 * 1024 * 1024);
3239 total_data
+= group_size
;
3241 if ((total_bytes
- cur_start
) * 4 < group_size
* 5)
3242 group_size
= total_bytes
- cur_start
;
3245 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
3248 cache
->key
.objectid
= cur_start
;
3249 cache
->key
.offset
= group_size
;
3250 btrfs_set_key_type(&cache
->key
, BTRFS_BLOCK_GROUP_ITEM_KEY
);
3252 btrfs_set_block_group_used(&cache
->item
, 0);
3253 btrfs_set_block_group_chunk_objectid(&cache
->item
,
3255 btrfs_set_block_group_flags(&cache
->item
, group_type
);
3257 cache
->flags
= group_type
;
3259 ret
= update_space_info(root
->fs_info
, group_type
, group_size
,
3260 0, &cache
->space_info
);
3262 set_avail_alloc_bits(extent_root
->fs_info
, group_type
);
3264 set_extent_bits(block_group_cache
, cur_start
,
3265 cur_start
+ group_size
- 1,
3266 bit
| EXTENT_LOCKED
, GFP_NOFS
);
3267 set_state_private(block_group_cache
, cur_start
,
3268 (unsigned long)cache
);
3269 cur_start
+= group_size
;
3271 /* then insert all the items */
3273 while(cur_start
< total_bytes
) {
3274 cache
= btrfs_lookup_block_group(root
->fs_info
, cur_start
);
3277 ret
= btrfs_insert_item(trans
, extent_root
, &cache
->key
, &cache
->item
,
3278 sizeof(cache
->item
));
3281 finish_current_insert(trans
, extent_root
);
3282 ret
= del_pending_extents(trans
, extent_root
);
3285 cur_start
= cache
->key
.objectid
+ cache
->key
.offset
;
3290 int btrfs_update_block_group(struct btrfs_trans_handle
*trans
,
3291 struct btrfs_root
*root
,
3292 u64 bytenr
, u64 num_bytes
, int alloc
,
3295 return update_block_group(trans
, root
, bytenr
, num_bytes
,