2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
21 #include "kerncompat.h"
22 #include "radix-tree.h"
25 #include "print-tree.h"
26 #include "transaction.h"
30 #define BLOCK_GROUP_DATA EXTENT_WRITEBACK
31 #define BLOCK_GROUP_METADATA EXTENT_UPTODATE
32 #define BLOCK_GROUP_SYSTEM EXTENT_NEW
34 #define BLOCK_GROUP_DIRTY EXTENT_DIRTY
36 static int finish_current_insert(struct btrfs_trans_handle
*trans
, struct
37 btrfs_root
*extent_root
);
38 static int del_pending_extents(struct btrfs_trans_handle
*trans
, struct
39 btrfs_root
*extent_root
);
40 int btrfs_make_block_group(struct btrfs_trans_handle
*trans
,
41 struct btrfs_root
*root
, u64 bytes_used
,
42 u64 type
, u64 chunk_tree
, u64 chunk_objectid
,
45 static int cache_block_group(struct btrfs_root
*root
,
46 struct btrfs_block_group_cache
*block_group
)
48 struct btrfs_path
*path
;
51 struct extent_buffer
*leaf
;
52 struct extent_io_tree
*free_space_cache
;
62 root
= root
->fs_info
->extent_root
;
63 free_space_cache
= &root
->fs_info
->free_space_cache
;
65 if (block_group
->cached
)
68 path
= btrfs_alloc_path();
73 first_free
= block_group
->key
.objectid
;
74 key
.objectid
= block_group
->key
.objectid
;
76 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
77 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
80 ret
= btrfs_previous_item(root
, path
, 0, BTRFS_EXTENT_ITEM_KEY
);
84 leaf
= path
->nodes
[0];
85 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
86 if (key
.objectid
+ key
.offset
> first_free
)
87 first_free
= key
.objectid
+ key
.offset
;
90 leaf
= path
->nodes
[0];
91 slot
= path
->slots
[0];
92 if (slot
>= btrfs_header_nritems(leaf
)) {
93 ret
= btrfs_next_leaf(root
, path
);
102 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
103 if (key
.objectid
< block_group
->key
.objectid
) {
106 if (key
.objectid
>= block_group
->key
.objectid
+
107 block_group
->key
.offset
) {
111 if (btrfs_key_type(&key
) == BTRFS_EXTENT_ITEM_KEY
) {
116 if (key
.objectid
> last
) {
117 hole_size
= key
.objectid
- last
;
118 set_extent_dirty(free_space_cache
, last
,
119 last
+ hole_size
- 1,
122 last
= key
.objectid
+ key
.offset
;
130 if (block_group
->key
.objectid
+
131 block_group
->key
.offset
> last
) {
132 hole_size
= block_group
->key
.objectid
+
133 block_group
->key
.offset
- last
;
134 set_extent_dirty(free_space_cache
, last
,
135 last
+ hole_size
- 1, GFP_NOFS
);
137 block_group
->cached
= 1;
139 btrfs_free_path(path
);
143 struct btrfs_block_group_cache
*btrfs_lookup_block_group(struct
147 struct extent_io_tree
*block_group_cache
;
148 struct btrfs_block_group_cache
*block_group
= NULL
;
154 block_group_cache
= &info
->block_group_cache
;
155 ret
= find_first_extent_bit(block_group_cache
,
156 bytenr
, &start
, &end
,
157 BLOCK_GROUP_DATA
| BLOCK_GROUP_METADATA
|
162 ret
= get_state_private(block_group_cache
, start
, &ptr
);
166 block_group
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
167 if (block_group
->key
.objectid
<= bytenr
&& bytenr
<
168 block_group
->key
.objectid
+ block_group
->key
.offset
)
173 static int block_group_bits(struct btrfs_block_group_cache
*cache
, u64 bits
)
175 return (cache
->flags
& bits
);
178 static int noinline
find_search_start(struct btrfs_root
*root
,
179 struct btrfs_block_group_cache
**cache_ret
,
180 u64
*start_ret
, int num
, int data
)
183 struct btrfs_block_group_cache
*cache
= *cache_ret
;
188 u64 search_start
= *start_ret
;
195 ret
= cache_block_group(root
, cache
);
199 last
= max(search_start
, cache
->key
.objectid
);
200 if (!block_group_bits(cache
, data
)) {
205 ret
= find_first_extent_bit(&root
->fs_info
->free_space_cache
,
206 last
, &start
, &end
, EXTENT_DIRTY
);
213 start
= max(last
, start
);
215 if (last
- start
< num
) {
216 if (last
== cache
->key
.objectid
+ cache
->key
.offset
)
220 if (start
+ num
> cache
->key
.objectid
+ cache
->key
.offset
)
226 cache
= btrfs_lookup_block_group(root
->fs_info
, search_start
);
228 printk("Unable to find block group for %llu\n",
229 (unsigned long long)search_start
);
235 last
= cache
->key
.objectid
+ cache
->key
.offset
;
237 cache
= btrfs_lookup_block_group(root
->fs_info
, last
);
247 if (cache_miss
&& !cache
->cached
) {
248 cache_block_group(root
, cache
);
250 cache
= btrfs_lookup_block_group(root
->fs_info
, last
);
252 cache
= btrfs_find_block_group(root
, cache
, last
, data
, 0);
260 static u64
div_factor(u64 num
, int factor
)
269 static int block_group_state_bits(u64 flags
)
272 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
273 bits
|= BLOCK_GROUP_DATA
;
274 if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
275 bits
|= BLOCK_GROUP_METADATA
;
276 if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
277 bits
|= BLOCK_GROUP_SYSTEM
;
281 struct btrfs_block_group_cache
*btrfs_find_block_group(struct btrfs_root
*root
,
282 struct btrfs_block_group_cache
283 *hint
, u64 search_start
,
286 struct btrfs_block_group_cache
*cache
;
287 struct extent_io_tree
*block_group_cache
;
288 struct btrfs_block_group_cache
*found_group
= NULL
;
289 struct btrfs_fs_info
*info
= root
->fs_info
;
302 block_group_cache
= &info
->block_group_cache
;
307 bit
= block_group_state_bits(data
);
310 struct btrfs_block_group_cache
*shint
;
311 shint
= btrfs_lookup_block_group(info
, search_start
);
312 if (shint
&& block_group_bits(shint
, data
)) {
313 used
= btrfs_block_group_used(&shint
->item
);
314 if (used
+ shint
->pinned
<
315 div_factor(shint
->key
.offset
, factor
)) {
320 if (hint
&& block_group_bits(hint
, data
)) {
321 used
= btrfs_block_group_used(&hint
->item
);
322 if (used
+ hint
->pinned
<
323 div_factor(hint
->key
.offset
, factor
)) {
326 last
= hint
->key
.objectid
+ hint
->key
.offset
;
330 hint_last
= max(hint
->key
.objectid
, search_start
);
332 hint_last
= search_start
;
338 ret
= find_first_extent_bit(block_group_cache
, last
,
343 ret
= get_state_private(block_group_cache
, start
, &ptr
);
347 cache
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
348 last
= cache
->key
.objectid
+ cache
->key
.offset
;
349 used
= btrfs_block_group_used(&cache
->item
);
352 free_check
= cache
->key
.offset
;
354 free_check
= div_factor(cache
->key
.offset
, factor
);
356 if (used
+ cache
->pinned
< free_check
) {
371 static u64
hash_extent_ref(u64 root_objectid
, u64 ref_generation
,
372 u64 owner
, u64 owner_offset
)
374 u32 high_crc
= ~(u32
)0;
375 u32 low_crc
= ~(u32
)0;
378 lenum
= cpu_to_le64(root_objectid
);
379 high_crc
= crc32c(high_crc
, &lenum
, sizeof(lenum
));
380 lenum
= cpu_to_le64(ref_generation
);
381 low_crc
= crc32c(low_crc
, &lenum
, sizeof(lenum
));
382 if (owner
>= BTRFS_FIRST_FREE_OBJECTID
) {
383 lenum
= cpu_to_le64(owner
);
384 low_crc
= crc32c(low_crc
, &lenum
, sizeof(lenum
));
385 lenum
= cpu_to_le64(owner_offset
);
386 low_crc
= crc32c(low_crc
, &lenum
, sizeof(lenum
));
388 return ((u64
)high_crc
<< 32) | (u64
)low_crc
;
391 static int match_extent_ref(struct extent_buffer
*leaf
,
392 struct btrfs_extent_ref
*disk_ref
,
393 struct btrfs_extent_ref
*cpu_ref
)
398 if (cpu_ref
->objectid
)
399 len
= sizeof(*cpu_ref
);
401 len
= 2 * sizeof(u64
);
402 ret
= memcmp_extent_buffer(leaf
, cpu_ref
, (unsigned long)disk_ref
,
407 static int noinline
lookup_extent_backref(struct btrfs_trans_handle
*trans
,
408 struct btrfs_root
*root
,
409 struct btrfs_path
*path
, u64 bytenr
,
411 u64 ref_generation
, u64 owner
,
412 u64 owner_offset
, int del
)
415 struct btrfs_key key
;
416 struct btrfs_key found_key
;
417 struct btrfs_extent_ref ref
;
418 struct extent_buffer
*leaf
;
419 struct btrfs_extent_ref
*disk_ref
;
423 btrfs_set_stack_ref_root(&ref
, root_objectid
);
424 btrfs_set_stack_ref_generation(&ref
, ref_generation
);
425 btrfs_set_stack_ref_objectid(&ref
, owner
);
426 btrfs_set_stack_ref_offset(&ref
, owner_offset
);
428 hash
= hash_extent_ref(root_objectid
, ref_generation
, owner
,
431 key
.objectid
= bytenr
;
432 key
.type
= BTRFS_EXTENT_REF_KEY
;
435 ret
= btrfs_search_slot(trans
, root
, &key
, path
,
439 leaf
= path
->nodes
[0];
441 u32 nritems
= btrfs_header_nritems(leaf
);
442 if (path
->slots
[0] >= nritems
) {
443 ret2
= btrfs_next_leaf(root
, path
);
446 leaf
= path
->nodes
[0];
448 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
449 if (found_key
.objectid
!= bytenr
||
450 found_key
.type
!= BTRFS_EXTENT_REF_KEY
)
452 key
.offset
= found_key
.offset
;
454 btrfs_release_path(root
, path
);
458 disk_ref
= btrfs_item_ptr(path
->nodes
[0],
460 struct btrfs_extent_ref
);
461 if (match_extent_ref(path
->nodes
[0], disk_ref
, &ref
)) {
465 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
466 key
.offset
= found_key
.offset
+ 1;
467 btrfs_release_path(root
, path
);
474 * Back reference rules. Back refs have three main goals:
476 * 1) differentiate between all holders of references to an extent so that
477 * when a reference is dropped we can make sure it was a valid reference
478 * before freeing the extent.
480 * 2) Provide enough information to quickly find the holders of an extent
481 * if we notice a given block is corrupted or bad.
483 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
484 * maintenance. This is actually the same as #2, but with a slightly
485 * different use case.
487 * File extents can be referenced by:
489 * - multiple snapshots, subvolumes, or different generations in one subvol
490 * - different files inside a single subvolume (in theory, not implemented yet)
491 * - different offsets inside a file (bookend extents in file.c)
493 * The extent ref structure has fields for:
495 * - Objectid of the subvolume root
496 * - Generation number of the tree holding the reference
497 * - objectid of the file holding the reference
498 * - offset in the file corresponding to the key holding the reference
500 * When a file extent is allocated the fields are filled in:
501 * (root_key.objectid, trans->transid, inode objectid, offset in file)
503 * When a leaf is cow'd new references are added for every file extent found
504 * in the leaf. It looks the same as the create case, but trans->transid
505 * will be different when the block is cow'd.
507 * (root_key.objectid, trans->transid, inode objectid, offset in file)
509 * When a file extent is removed either during snapshot deletion or file
510 * truncation, the corresponding back reference is found
513 * (btrfs_header_owner(leaf), btrfs_header_generation(leaf),
514 * inode objectid, offset in file)
516 * Btree extents can be referenced by:
518 * - Different subvolumes
519 * - Different generations of the same subvolume
521 * Storing sufficient information for a full reverse mapping of a btree
522 * block would require storing the lowest key of the block in the backref,
523 * and it would require updating that lowest key either before write out or
524 * every time it changed. Instead, the objectid of the lowest key is stored
525 * along with the level of the tree block. This provides a hint
526 * about where in the btree the block can be found. Searches through the
527 * btree only need to look for a pointer to that block, so they stop one
528 * level higher than the level recorded in the backref.
530 * Some btrees do not do reference counting on their extents. These
531 * include the extent tree and the tree of tree roots. Backrefs for these
532 * trees always have a generation of zero.
534 * When a tree block is created, back references are inserted:
536 * (root->root_key.objectid, trans->transid or zero, level, lowest_key_objectid)
538 * When a tree block is cow'd in a reference counted root,
539 * new back references are added for all the blocks it points to.
540 * These are of the form (trans->transid will have increased since creation):
542 * (root->root_key.objectid, trans->transid, level, lowest_key_objectid)
544 * Because the lowest_key_objectid and the level are just hints
545 * they are not used when backrefs are deleted. When a backref is deleted:
547 * if backref was for a tree root:
548 * root_objectid = root->root_key.objectid
550 * root_objectid = btrfs_header_owner(parent)
552 * (root_objectid, btrfs_header_generation(parent) or zero, 0, 0)
554 * Back Reference Key hashing:
556 * Back references have four fields, each 64 bits long. Unfortunately,
557 * This is hashed into a single 64 bit number and placed into the key offset.
558 * The key objectid corresponds to the first byte in the extent, and the
559 * key type is set to BTRFS_EXTENT_REF_KEY
561 int btrfs_insert_extent_backref(struct btrfs_trans_handle
*trans
,
562 struct btrfs_root
*root
,
563 struct btrfs_path
*path
, u64 bytenr
,
564 u64 root_objectid
, u64 ref_generation
,
565 u64 owner
, u64 owner_offset
)
568 struct btrfs_key key
;
569 struct btrfs_extent_ref ref
;
570 struct btrfs_extent_ref
*disk_ref
;
573 btrfs_set_stack_ref_root(&ref
, root_objectid
);
574 btrfs_set_stack_ref_generation(&ref
, ref_generation
);
575 btrfs_set_stack_ref_objectid(&ref
, owner
);
576 btrfs_set_stack_ref_offset(&ref
, owner_offset
);
578 hash
= hash_extent_ref(root_objectid
, ref_generation
, owner
,
581 key
.objectid
= bytenr
;
582 key
.type
= BTRFS_EXTENT_REF_KEY
;
584 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, sizeof(ref
));
585 while (ret
== -EEXIST
) {
586 disk_ref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
587 struct btrfs_extent_ref
);
588 if (match_extent_ref(path
->nodes
[0], disk_ref
, &ref
))
591 btrfs_release_path(root
, path
);
592 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
597 disk_ref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
598 struct btrfs_extent_ref
);
599 write_extent_buffer(path
->nodes
[0], &ref
, (unsigned long)disk_ref
,
601 btrfs_mark_buffer_dirty(path
->nodes
[0]);
603 btrfs_release_path(root
, path
);
607 int btrfs_inc_extent_ref(struct btrfs_trans_handle
*trans
,
608 struct btrfs_root
*root
,
609 u64 bytenr
, u64 num_bytes
,
610 u64 root_objectid
, u64 ref_generation
,
611 u64 owner
, u64 owner_offset
)
613 struct btrfs_path
*path
;
615 struct btrfs_key key
;
616 struct extent_buffer
*l
;
617 struct btrfs_extent_item
*item
;
620 WARN_ON(num_bytes
< root
->sectorsize
);
621 path
= btrfs_alloc_path();
625 key
.objectid
= bytenr
;
626 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
627 key
.offset
= num_bytes
;
628 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
, &key
, path
,
637 item
= btrfs_item_ptr(l
, path
->slots
[0], struct btrfs_extent_item
);
638 refs
= btrfs_extent_refs(l
, item
);
639 btrfs_set_extent_refs(l
, item
, refs
+ 1);
640 btrfs_mark_buffer_dirty(path
->nodes
[0]);
642 btrfs_release_path(root
->fs_info
->extent_root
, path
);
644 ret
= btrfs_insert_extent_backref(trans
, root
->fs_info
->extent_root
,
645 path
, bytenr
, root_objectid
,
646 ref_generation
, owner
, owner_offset
);
648 finish_current_insert(trans
, root
->fs_info
->extent_root
);
649 del_pending_extents(trans
, root
->fs_info
->extent_root
);
651 btrfs_free_path(path
);
655 int btrfs_extent_post_op(struct btrfs_trans_handle
*trans
,
656 struct btrfs_root
*root
)
658 finish_current_insert(trans
, root
->fs_info
->extent_root
);
659 del_pending_extents(trans
, root
->fs_info
->extent_root
);
663 static int lookup_extent_ref(struct btrfs_trans_handle
*trans
,
664 struct btrfs_root
*root
, u64 bytenr
,
665 u64 num_bytes
, u32
*refs
)
667 struct btrfs_path
*path
;
669 struct btrfs_key key
;
670 struct extent_buffer
*l
;
671 struct btrfs_extent_item
*item
;
673 WARN_ON(num_bytes
< root
->sectorsize
);
674 path
= btrfs_alloc_path();
675 key
.objectid
= bytenr
;
676 key
.offset
= num_bytes
;
677 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
678 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
, &key
, path
,
683 btrfs_print_leaf(root
, path
->nodes
[0]);
684 printk("failed to find block number %llu\n",
685 (unsigned long long)bytenr
);
689 item
= btrfs_item_ptr(l
, path
->slots
[0], struct btrfs_extent_item
);
690 *refs
= btrfs_extent_refs(l
, item
);
692 btrfs_free_path(path
);
696 u32
btrfs_count_snapshots_in_path(struct btrfs_root
*root
,
697 struct btrfs_path
*count_path
,
700 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
701 struct btrfs_path
*path
;
704 u64 root_objectid
= root
->root_key
.objectid
;
710 struct btrfs_key key
;
711 struct btrfs_key found_key
;
712 struct extent_buffer
*l
;
713 struct btrfs_extent_item
*item
;
714 struct btrfs_extent_ref
*ref_item
;
717 path
= btrfs_alloc_path();
720 bytenr
= first_extent
;
722 bytenr
= count_path
->nodes
[level
]->start
;
725 key
.objectid
= bytenr
;
728 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
729 ret
= btrfs_search_slot(NULL
, extent_root
, &key
, path
, 0, 0);
735 btrfs_item_key_to_cpu(l
, &found_key
, path
->slots
[0]);
737 if (found_key
.objectid
!= bytenr
||
738 found_key
.type
!= BTRFS_EXTENT_ITEM_KEY
) {
742 item
= btrfs_item_ptr(l
, path
->slots
[0], struct btrfs_extent_item
);
743 refs
= btrfs_extent_refs(l
, item
);
745 nritems
= btrfs_header_nritems(l
);
746 if (path
->slots
[0] >= nritems
) {
747 ret
= btrfs_next_leaf(extent_root
, path
);
752 btrfs_item_key_to_cpu(l
, &found_key
, path
->slots
[0]);
753 if (found_key
.objectid
!= bytenr
)
755 if (found_key
.type
!= BTRFS_EXTENT_REF_KEY
) {
761 ref_item
= btrfs_item_ptr(l
, path
->slots
[0],
762 struct btrfs_extent_ref
);
763 found_objectid
= btrfs_ref_root(l
, ref_item
);
765 if (found_objectid
!= root_objectid
) {
772 if (cur_count
== 0) {
776 if (level
>= 0 && root
->node
== count_path
->nodes
[level
])
779 btrfs_release_path(root
, path
);
783 btrfs_free_path(path
);
786 int btrfs_inc_root_ref(struct btrfs_trans_handle
*trans
,
787 struct btrfs_root
*root
, u64 owner_objectid
)
793 struct btrfs_disk_key disk_key
;
795 level
= btrfs_header_level(root
->node
);
796 generation
= trans
->transid
;
797 nritems
= btrfs_header_nritems(root
->node
);
800 btrfs_item_key(root
->node
, &disk_key
, 0);
802 btrfs_node_key(root
->node
, &disk_key
, 0);
803 key_objectid
= btrfs_disk_key_objectid(&disk_key
);
807 return btrfs_inc_extent_ref(trans
, root
, root
->node
->start
,
808 root
->node
->len
, owner_objectid
,
809 generation
, level
, key_objectid
);
812 int btrfs_inc_ref(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
813 struct extent_buffer
*buf
)
817 struct btrfs_key key
;
818 struct btrfs_file_extent_item
*fi
;
827 level
= btrfs_header_level(buf
);
828 nritems
= btrfs_header_nritems(buf
);
829 for (i
= 0; i
< nritems
; i
++) {
832 btrfs_item_key_to_cpu(buf
, &key
, i
);
833 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
835 fi
= btrfs_item_ptr(buf
, i
,
836 struct btrfs_file_extent_item
);
837 if (btrfs_file_extent_type(buf
, fi
) ==
838 BTRFS_FILE_EXTENT_INLINE
)
840 disk_bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
841 if (disk_bytenr
== 0)
843 ret
= btrfs_inc_extent_ref(trans
, root
, disk_bytenr
,
844 btrfs_file_extent_disk_num_bytes(buf
, fi
),
845 root
->root_key
.objectid
, trans
->transid
,
846 key
.objectid
, key
.offset
);
852 bytenr
= btrfs_node_blockptr(buf
, i
);
853 btrfs_node_key_to_cpu(buf
, &key
, i
);
854 ret
= btrfs_inc_extent_ref(trans
, root
, bytenr
,
855 btrfs_level_size(root
, level
- 1),
856 root
->root_key
.objectid
,
858 level
- 1, key
.objectid
);
869 for (i
=0; i
< faili
; i
++) {
872 btrfs_item_key_to_cpu(buf
, &key
, i
);
873 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
875 fi
= btrfs_item_ptr(buf
, i
,
876 struct btrfs_file_extent_item
);
877 if (btrfs_file_extent_type(buf
, fi
) ==
878 BTRFS_FILE_EXTENT_INLINE
)
880 disk_bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
881 if (disk_bytenr
== 0)
883 err
= btrfs_free_extent(trans
, root
, disk_bytenr
,
884 btrfs_file_extent_disk_num_bytes(buf
,
888 bytenr
= btrfs_node_blockptr(buf
, i
);
889 err
= btrfs_free_extent(trans
, root
, bytenr
,
890 btrfs_level_size(root
, level
- 1), 0);
898 static int write_one_cache_group(struct btrfs_trans_handle
*trans
,
899 struct btrfs_root
*root
,
900 struct btrfs_path
*path
,
901 struct btrfs_block_group_cache
*cache
)
905 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
907 struct extent_buffer
*leaf
;
909 ret
= btrfs_search_slot(trans
, extent_root
, &cache
->key
, path
, 0, 1);
914 leaf
= path
->nodes
[0];
915 bi
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
916 write_extent_buffer(leaf
, &cache
->item
, bi
, sizeof(cache
->item
));
917 btrfs_mark_buffer_dirty(leaf
);
918 btrfs_release_path(extent_root
, path
);
920 finish_current_insert(trans
, extent_root
);
921 pending_ret
= del_pending_extents(trans
, extent_root
);
930 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle
*trans
,
931 struct btrfs_root
*root
)
933 struct extent_io_tree
*block_group_cache
;
934 struct btrfs_block_group_cache
*cache
;
938 struct btrfs_path
*path
;
944 block_group_cache
= &root
->fs_info
->block_group_cache
;
945 path
= btrfs_alloc_path();
950 ret
= find_first_extent_bit(block_group_cache
, last
,
951 &start
, &end
, BLOCK_GROUP_DIRTY
);
956 ret
= get_state_private(block_group_cache
, start
, &ptr
);
960 cache
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
961 err
= write_one_cache_group(trans
, root
,
964 * if we fail to write the cache group, we want
965 * to keep it marked dirty in hopes that a later
972 clear_extent_bits(block_group_cache
, start
, end
,
973 BLOCK_GROUP_DIRTY
, GFP_NOFS
);
975 btrfs_free_path(path
);
979 static struct btrfs_space_info
*__find_space_info(struct btrfs_fs_info
*info
,
982 struct list_head
*head
= &info
->space_info
;
983 struct list_head
*cur
;
984 struct btrfs_space_info
*found
;
985 list_for_each(cur
, head
) {
986 found
= list_entry(cur
, struct btrfs_space_info
, list
);
987 if (found
->flags
== flags
)
994 static int update_space_info(struct btrfs_fs_info
*info
, u64 flags
,
995 u64 total_bytes
, u64 bytes_used
,
996 struct btrfs_space_info
**space_info
)
998 struct btrfs_space_info
*found
;
1000 found
= __find_space_info(info
, flags
);
1002 found
->total_bytes
+= total_bytes
;
1003 found
->bytes_used
+= bytes_used
;
1004 WARN_ON(found
->total_bytes
< found
->bytes_used
);
1005 *space_info
= found
;
1008 found
= kmalloc(sizeof(*found
), GFP_NOFS
);
1012 list_add(&found
->list
, &info
->space_info
);
1013 found
->flags
= flags
;
1014 found
->total_bytes
= total_bytes
;
1015 found
->bytes_used
= bytes_used
;
1016 found
->bytes_pinned
= 0;
1018 *space_info
= found
;
1023 static int do_chunk_alloc(struct btrfs_trans_handle
*trans
,
1024 struct btrfs_root
*extent_root
, u64 alloc_bytes
,
1027 struct btrfs_space_info
*space_info
;
1033 space_info
= __find_space_info(extent_root
->fs_info
, flags
);
1035 ret
= update_space_info(extent_root
->fs_info
, flags
,
1039 BUG_ON(!space_info
);
1041 if (space_info
->full
)
1044 thresh
= div_factor(space_info
->total_bytes
, 7);
1045 if ((space_info
->bytes_used
+ space_info
->bytes_pinned
+ alloc_bytes
) <
1049 ret
= btrfs_alloc_chunk(trans
, extent_root
, &start
, &num_bytes
, flags
);
1050 if (ret
== -ENOSPC
) {
1051 printk("space info full %llu\n", (unsigned long long)flags
);
1052 space_info
->full
= 1;
1058 ret
= btrfs_make_block_group(trans
, extent_root
, 0, flags
,
1059 extent_root
->fs_info
->chunk_root
->root_key
.objectid
,
1065 static int update_block_group(struct btrfs_trans_handle
*trans
,
1066 struct btrfs_root
*root
,
1067 u64 bytenr
, u64 num_bytes
, int alloc
,
1070 struct btrfs_block_group_cache
*cache
;
1071 struct btrfs_fs_info
*info
= root
->fs_info
;
1072 u64 total
= num_bytes
;
1079 cache
= btrfs_lookup_block_group(info
, bytenr
);
1083 byte_in_group
= bytenr
- cache
->key
.objectid
;
1084 WARN_ON(byte_in_group
> cache
->key
.offset
);
1085 start
= cache
->key
.objectid
;
1086 end
= start
+ cache
->key
.offset
- 1;
1087 set_extent_bits(&info
->block_group_cache
, start
, end
,
1088 BLOCK_GROUP_DIRTY
, GFP_NOFS
);
1090 old_val
= btrfs_block_group_used(&cache
->item
);
1091 num_bytes
= min(total
, cache
->key
.offset
- byte_in_group
);
1093 old_val
+= num_bytes
;
1094 cache
->space_info
->bytes_used
+= num_bytes
;
1096 old_val
-= num_bytes
;
1097 cache
->space_info
->bytes_used
-= num_bytes
;
1099 set_extent_dirty(&info
->free_space_cache
,
1100 bytenr
, bytenr
+ num_bytes
- 1,
1104 btrfs_set_block_group_used(&cache
->item
, old_val
);
1106 bytenr
+= num_bytes
;
1111 static int update_pinned_extents(struct btrfs_root
*root
,
1112 u64 bytenr
, u64 num
, int pin
)
1115 struct btrfs_block_group_cache
*cache
;
1116 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1119 set_extent_dirty(&fs_info
->pinned_extents
,
1120 bytenr
, bytenr
+ num
- 1, GFP_NOFS
);
1122 clear_extent_dirty(&fs_info
->pinned_extents
,
1123 bytenr
, bytenr
+ num
- 1, GFP_NOFS
);
1126 cache
= btrfs_lookup_block_group(fs_info
, bytenr
);
1128 len
= min(num
, cache
->key
.offset
-
1129 (bytenr
- cache
->key
.objectid
));
1131 cache
->pinned
+= len
;
1132 cache
->space_info
->bytes_pinned
+= len
;
1133 fs_info
->total_pinned
+= len
;
1135 cache
->pinned
-= len
;
1136 cache
->space_info
->bytes_pinned
-= len
;
1137 fs_info
->total_pinned
-= len
;
1145 int btrfs_copy_pinned(struct btrfs_root
*root
, struct extent_io_tree
*copy
)
1150 struct extent_io_tree
*pinned_extents
= &root
->fs_info
->pinned_extents
;
1154 ret
= find_first_extent_bit(pinned_extents
, last
,
1155 &start
, &end
, EXTENT_DIRTY
);
1158 set_extent_dirty(copy
, start
, end
, GFP_NOFS
);
1164 int btrfs_finish_extent_commit(struct btrfs_trans_handle
*trans
,
1165 struct btrfs_root
*root
,
1166 struct extent_io_tree
*unpin
)
1171 struct extent_io_tree
*free_space_cache
;
1172 free_space_cache
= &root
->fs_info
->free_space_cache
;
1175 ret
= find_first_extent_bit(unpin
, 0, &start
, &end
,
1179 update_pinned_extents(root
, start
, end
+ 1 - start
, 0);
1180 clear_extent_dirty(unpin
, start
, end
, GFP_NOFS
);
1181 set_extent_dirty(free_space_cache
, start
, end
, GFP_NOFS
);
1186 static int finish_current_insert(struct btrfs_trans_handle
*trans
,
1187 struct btrfs_root
*extent_root
)
1191 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
1192 struct extent_buffer
*eb
;
1193 struct btrfs_path
*path
;
1194 struct btrfs_key ins
;
1195 struct btrfs_disk_key first
;
1196 struct btrfs_extent_item extent_item
;
1201 btrfs_set_stack_extent_refs(&extent_item
, 1);
1202 btrfs_set_key_type(&ins
, BTRFS_EXTENT_ITEM_KEY
);
1203 path
= btrfs_alloc_path();
1206 ret
= find_first_extent_bit(&info
->extent_ins
, 0, &start
,
1207 &end
, EXTENT_LOCKED
);
1211 ins
.objectid
= start
;
1212 ins
.offset
= end
+ 1 - start
;
1213 err
= btrfs_insert_item(trans
, extent_root
, &ins
,
1214 &extent_item
, sizeof(extent_item
));
1215 clear_extent_bits(&info
->extent_ins
, start
, end
, EXTENT_LOCKED
,
1217 eb
= read_tree_block(extent_root
, ins
.objectid
, ins
.offset
);
1218 level
= btrfs_header_level(eb
);
1220 btrfs_item_key(eb
, &first
, 0);
1222 btrfs_node_key(eb
, &first
, 0);
1224 err
= btrfs_insert_extent_backref(trans
, extent_root
, path
,
1225 start
, extent_root
->root_key
.objectid
,
1227 btrfs_disk_key_objectid(&first
));
1229 free_extent_buffer(eb
);
1231 btrfs_free_path(path
);
1235 static int pin_down_bytes(struct btrfs_root
*root
, u64 bytenr
, u32 num_bytes
,
1239 struct extent_buffer
*buf
;
1242 buf
= btrfs_find_tree_block(root
, bytenr
, num_bytes
);
1244 if (btrfs_buffer_uptodate(buf
)) {
1246 root
->fs_info
->running_transaction
->transid
;
1247 if (btrfs_header_generation(buf
) == transid
) {
1248 free_extent_buffer(buf
);
1252 free_extent_buffer(buf
);
1254 update_pinned_extents(root
, bytenr
, num_bytes
, 1);
1256 set_extent_bits(&root
->fs_info
->pending_del
,
1257 bytenr
, bytenr
+ num_bytes
- 1,
1258 EXTENT_LOCKED
, GFP_NOFS
);
1265 * remove an extent from the root, returns 0 on success
1267 static int __free_extent(struct btrfs_trans_handle
*trans
, struct btrfs_root
1268 *root
, u64 bytenr
, u64 num_bytes
,
1269 u64 root_objectid
, u64 ref_generation
,
1270 u64 owner_objectid
, u64 owner_offset
, int pin
,
1273 struct btrfs_path
*path
;
1274 struct btrfs_key key
;
1275 struct btrfs_fs_info
*info
= root
->fs_info
;
1276 struct btrfs_extent_ops
*ops
= info
->extent_ops
;
1277 struct btrfs_root
*extent_root
= info
->extent_root
;
1278 struct extent_buffer
*leaf
;
1280 int extent_slot
= 0;
1281 int found_extent
= 0;
1283 struct btrfs_extent_item
*ei
;
1286 key
.objectid
= bytenr
;
1287 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
1288 key
.offset
= num_bytes
;
1290 path
= btrfs_alloc_path();
1294 ret
= lookup_extent_backref(trans
, extent_root
, path
,
1295 bytenr
, root_objectid
,
1297 owner_objectid
, owner_offset
, 1);
1299 struct btrfs_key found_key
;
1300 extent_slot
= path
->slots
[0];
1301 while(extent_slot
> 0) {
1303 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1305 if (found_key
.objectid
!= bytenr
)
1307 if (found_key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
1308 found_key
.offset
== num_bytes
) {
1312 if (path
->slots
[0] - extent_slot
> 5)
1316 ret
= btrfs_del_item(trans
, extent_root
, path
);
1318 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
1320 printk("Unable to find ref byte nr %llu root %llu "
1321 " gen %llu owner %llu offset %llu\n",
1322 (unsigned long long)bytenr
,
1323 (unsigned long long)root_objectid
,
1324 (unsigned long long)ref_generation
,
1325 (unsigned long long)owner_objectid
,
1326 (unsigned long long)owner_offset
);
1328 if (!found_extent
) {
1329 btrfs_release_path(extent_root
, path
);
1330 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
, -1, 1);
1334 extent_slot
= path
->slots
[0];
1337 leaf
= path
->nodes
[0];
1338 ei
= btrfs_item_ptr(leaf
, extent_slot
,
1339 struct btrfs_extent_item
);
1340 refs
= btrfs_extent_refs(leaf
, ei
);
1343 btrfs_set_extent_refs(leaf
, ei
, refs
);
1345 btrfs_mark_buffer_dirty(leaf
);
1347 if (refs
== 0 && found_extent
&& path
->slots
[0] == extent_slot
+ 1) {
1348 /* if the back ref and the extent are next to each other
1349 * they get deleted below in one shot
1351 path
->slots
[0] = extent_slot
;
1353 } else if (found_extent
) {
1354 /* otherwise delete the extent back ref */
1355 ret
= btrfs_del_item(trans
, extent_root
, path
);
1357 /* if refs are 0, we need to setup the path for deletion */
1359 btrfs_release_path(extent_root
, path
);
1360 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
,
1373 ret
= pin_down_bytes(root
, bytenr
, num_bytes
, 0);
1379 /* block accounting for super block */
1380 super_used
= btrfs_super_bytes_used(&info
->super_copy
);
1381 btrfs_set_super_bytes_used(&info
->super_copy
,
1382 super_used
- num_bytes
);
1384 /* block accounting for root item */
1385 root_used
= btrfs_root_used(&root
->root_item
);
1386 btrfs_set_root_used(&root
->root_item
,
1387 root_used
- num_bytes
);
1388 ret
= btrfs_del_items(trans
, extent_root
, path
, path
->slots
[0],
1393 if (ops
&& ops
->free_extent
)
1394 ops
->free_extent(root
, bytenr
, num_bytes
);
1396 ret
= update_block_group(trans
, root
, bytenr
, num_bytes
, 0,
1400 btrfs_free_path(path
);
1401 finish_current_insert(trans
, extent_root
);
1406 * find all the blocks marked as pending in the radix tree and remove
1407 * them from the extent map
1409 static int del_pending_extents(struct btrfs_trans_handle
*trans
, struct
1410 btrfs_root
*extent_root
)
1416 struct extent_io_tree
*pending_del
;
1417 struct extent_io_tree
*pinned_extents
;
1419 pending_del
= &extent_root
->fs_info
->pending_del
;
1420 pinned_extents
= &extent_root
->fs_info
->pinned_extents
;
1423 ret
= find_first_extent_bit(pending_del
, 0, &start
, &end
,
1427 update_pinned_extents(extent_root
, start
, end
+ 1 - start
, 1);
1428 clear_extent_bits(pending_del
, start
, end
, EXTENT_LOCKED
,
1430 ret
= __free_extent(trans
, extent_root
,
1431 start
, end
+ 1 - start
,
1432 extent_root
->root_key
.objectid
,
1441 * remove an extent from the root, returns 0 on success
1443 int btrfs_free_extent(struct btrfs_trans_handle
*trans
, struct btrfs_root
1444 *root
, u64 bytenr
, u64 num_bytes
,
1445 u64 root_objectid
, u64 ref_generation
,
1446 u64 owner_objectid
, u64 owner_offset
, int pin
)
1448 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
1452 WARN_ON(num_bytes
< root
->sectorsize
);
1453 if (!root
->ref_cows
)
1456 if (root
== extent_root
) {
1457 pin_down_bytes(root
, bytenr
, num_bytes
, 1);
1460 ret
= __free_extent(trans
, root
, bytenr
, num_bytes
, root_objectid
,
1461 ref_generation
, owner_objectid
, owner_offset
,
1463 pending_ret
= del_pending_extents(trans
, root
->fs_info
->extent_root
);
1464 return ret
? ret
: pending_ret
;
1467 static u64
stripe_align(struct btrfs_root
*root
, u64 val
)
1469 u64 mask
= ((u64
)root
->stripesize
- 1);
1470 u64 ret
= (val
+ mask
) & ~mask
;
1475 * walks the btree of allocated extents and find a hole of a given size.
1476 * The key ins is changed to record the hole:
1477 * ins->objectid == block start
1478 * ins->flags = BTRFS_EXTENT_ITEM_KEY
1479 * ins->offset == number of blocks
1480 * Any available blocks before search_start are skipped.
1482 static int noinline
find_free_extent(struct btrfs_trans_handle
*trans
,
1483 struct btrfs_root
*orig_root
,
1484 u64 num_bytes
, u64 empty_size
,
1485 u64 search_start
, u64 search_end
,
1486 u64 hint_byte
, struct btrfs_key
*ins
,
1487 u64 exclude_start
, u64 exclude_nr
,
1491 u64 orig_search_start
= search_start
;
1492 struct btrfs_root
* root
= orig_root
->fs_info
->extent_root
;
1493 struct btrfs_fs_info
*info
= root
->fs_info
;
1494 u64 total_needed
= num_bytes
;
1495 struct btrfs_block_group_cache
*block_group
;
1499 WARN_ON(num_bytes
< root
->sectorsize
);
1500 btrfs_set_key_type(ins
, BTRFS_EXTENT_ITEM_KEY
);
1502 if (search_end
== (u64
)-1)
1503 search_end
= btrfs_super_total_bytes(&info
->super_copy
);
1506 block_group
= btrfs_lookup_block_group(info
, hint_byte
);
1508 hint_byte
= search_start
;
1509 block_group
= btrfs_find_block_group(root
, block_group
,
1510 hint_byte
, data
, 1);
1512 block_group
= btrfs_find_block_group(root
,
1514 search_start
, data
, 1);
1517 total_needed
+= empty_size
;
1521 block_group
= btrfs_lookup_block_group(info
, search_start
);
1523 block_group
= btrfs_lookup_block_group(info
,
1526 ret
= find_search_start(root
, &block_group
, &search_start
,
1527 total_needed
, data
);
1531 search_start
= stripe_align(root
, search_start
);
1532 ins
->objectid
= search_start
;
1533 ins
->offset
= num_bytes
;
1535 if (ins
->objectid
+ num_bytes
>= search_end
)
1538 if (ins
->objectid
+ num_bytes
>
1539 block_group
->key
.objectid
+ block_group
->key
.offset
) {
1540 search_start
= block_group
->key
.objectid
+
1541 block_group
->key
.offset
;
1545 if (test_range_bit(&info
->extent_ins
, ins
->objectid
,
1546 ins
->objectid
+ num_bytes
-1, EXTENT_LOCKED
, 0)) {
1547 search_start
= ins
->objectid
+ num_bytes
;
1551 if (test_range_bit(&info
->pinned_extents
, ins
->objectid
,
1552 ins
->objectid
+ num_bytes
-1, EXTENT_DIRTY
, 0)) {
1553 search_start
= ins
->objectid
+ num_bytes
;
1557 if (exclude_nr
> 0 && (ins
->objectid
+ num_bytes
> exclude_start
&&
1558 ins
->objectid
< exclude_start
+ exclude_nr
)) {
1559 search_start
= exclude_start
+ exclude_nr
;
1563 if (!(data
& BTRFS_BLOCK_GROUP_DATA
)) {
1564 block_group
= btrfs_lookup_block_group(info
, ins
->objectid
);
1566 trans
->block_group
= block_group
;
1568 ins
->offset
= num_bytes
;
1572 if (search_start
+ num_bytes
>= search_end
) {
1574 search_start
= orig_search_start
;
1581 total_needed
-= empty_size
;
1586 block_group
= btrfs_lookup_block_group(info
, search_start
);
1588 block_group
= btrfs_find_block_group(root
, block_group
,
1589 search_start
, data
, 0);
1596 * finds a free extent and does all the dirty work required for allocation
1597 * returns the key for the extent through ins, and a tree buffer for
1598 * the first block of the extent through buf.
1600 * returns 0 if everything worked, non-zero otherwise.
1602 int btrfs_alloc_extent(struct btrfs_trans_handle
*trans
,
1603 struct btrfs_root
*root
,
1604 u64 num_bytes
, u64 root_objectid
, u64 ref_generation
,
1605 u64 owner
, u64 owner_offset
,
1606 u64 empty_size
, u64 hint_byte
,
1607 u64 search_end
, struct btrfs_key
*ins
, int data
)
1611 u64 super_used
, root_used
;
1612 u64 search_start
= 0;
1613 struct btrfs_fs_info
*info
= root
->fs_info
;
1614 struct btrfs_extent_ops
*ops
= info
->extent_ops
;
1616 struct btrfs_root
*extent_root
= info
->extent_root
;
1617 struct btrfs_path
*path
;
1618 struct btrfs_extent_item
*extent_item
;
1619 struct btrfs_extent_ref
*ref
;
1620 struct btrfs_key keys
[2];
1623 data
= BTRFS_BLOCK_GROUP_DATA
;
1624 } else if (root
== root
->fs_info
->chunk_root
||
1625 info
->force_system_allocs
) {
1626 data
= BTRFS_BLOCK_GROUP_SYSTEM
;
1628 data
= BTRFS_BLOCK_GROUP_METADATA
;
1631 if (root
->ref_cows
) {
1632 if (!(data
& BTRFS_BLOCK_GROUP_METADATA
)) {
1633 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
1635 BTRFS_BLOCK_GROUP_METADATA
);
1638 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
1639 num_bytes
+ 2 * 1024 * 1024, data
);
1643 WARN_ON(num_bytes
< root
->sectorsize
);
1644 if (ops
&& ops
->alloc_extent
) {
1645 ret
= ops
->alloc_extent(root
, num_bytes
, hint_byte
, ins
);
1647 ret
= find_free_extent(trans
, root
, num_bytes
, empty_size
,
1648 search_start
, search_end
, hint_byte
,
1649 ins
, trans
->alloc_exclude_start
,
1650 trans
->alloc_exclude_nr
, data
);
1656 /* block accounting for super block */
1657 super_used
= btrfs_super_bytes_used(&info
->super_copy
);
1658 btrfs_set_super_bytes_used(&info
->super_copy
, super_used
+ num_bytes
);
1660 /* block accounting for root item */
1661 root_used
= btrfs_root_used(&root
->root_item
);
1662 btrfs_set_root_used(&root
->root_item
, root_used
+ num_bytes
);
1664 clear_extent_dirty(&root
->fs_info
->free_space_cache
,
1665 ins
->objectid
, ins
->objectid
+ ins
->offset
- 1,
1668 if (root
== extent_root
) {
1669 set_extent_bits(&root
->fs_info
->extent_ins
, ins
->objectid
,
1670 ins
->objectid
+ ins
->offset
- 1,
1671 EXTENT_LOCKED
, GFP_NOFS
);
1675 WARN_ON(trans
->alloc_exclude_nr
);
1676 trans
->alloc_exclude_start
= ins
->objectid
;
1677 trans
->alloc_exclude_nr
= ins
->offset
;
1679 memcpy(&keys
[0], ins
, sizeof(*ins
));
1680 keys
[1].offset
= hash_extent_ref(root_objectid
, ref_generation
,
1681 owner
, owner_offset
);
1682 keys
[1].objectid
= ins
->objectid
;
1683 keys
[1].type
= BTRFS_EXTENT_REF_KEY
;
1684 sizes
[0] = sizeof(*extent_item
);
1685 sizes
[1] = sizeof(*ref
);
1687 path
= btrfs_alloc_path();
1690 ret
= btrfs_insert_empty_items(trans
, extent_root
, path
, keys
,
1694 extent_item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1695 struct btrfs_extent_item
);
1696 btrfs_set_extent_refs(path
->nodes
[0], extent_item
, 1);
1697 ref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0] + 1,
1698 struct btrfs_extent_ref
);
1700 btrfs_set_ref_root(path
->nodes
[0], ref
, root_objectid
);
1701 btrfs_set_ref_generation(path
->nodes
[0], ref
, ref_generation
);
1702 btrfs_set_ref_objectid(path
->nodes
[0], ref
, owner
);
1703 btrfs_set_ref_offset(path
->nodes
[0], ref
, owner_offset
);
1705 btrfs_mark_buffer_dirty(path
->nodes
[0]);
1707 trans
->alloc_exclude_start
= 0;
1708 trans
->alloc_exclude_nr
= 0;
1709 btrfs_free_path(path
);
1710 finish_current_insert(trans
, extent_root
);
1711 pending_ret
= del_pending_extents(trans
, extent_root
);
1721 ret
= update_block_group(trans
, root
, ins
->objectid
, ins
->offset
, 1, 0);
1723 printk("update block group failed for %llu %llu\n",
1724 (unsigned long long)ins
->objectid
,
1725 (unsigned long long)ins
->offset
);
1732 * helper function to allocate a block for a given tree
1733 * returns the tree buffer or NULL.
1735 struct extent_buffer
*btrfs_alloc_free_block(struct btrfs_trans_handle
*trans
,
1736 struct btrfs_root
*root
,
1738 u64 root_objectid
, u64 hint
,
1744 ref_generation
= trans
->transid
;
1749 return __btrfs_alloc_free_block(trans
, root
, blocksize
, root_objectid
,
1750 ref_generation
, 0, 0, hint
, empty_size
);
1754 * helper function to allocate a block for a given tree
1755 * returns the tree buffer or NULL.
1757 struct extent_buffer
*__btrfs_alloc_free_block(struct btrfs_trans_handle
*trans
,
1758 struct btrfs_root
*root
,
1767 struct btrfs_key ins
;
1769 struct extent_buffer
*buf
;
1771 ret
= btrfs_alloc_extent(trans
, root
, blocksize
,
1772 root_objectid
, ref_generation
,
1773 level
, first_objectid
, empty_size
, hint
,
1777 return ERR_PTR(ret
);
1779 buf
= btrfs_find_create_tree_block(root
, ins
.objectid
, blocksize
);
1781 btrfs_free_extent(trans
, root
, ins
.objectid
, blocksize
,
1782 root
->root_key
.objectid
, ref_generation
,
1785 return ERR_PTR(-ENOMEM
);
1787 btrfs_set_buffer_uptodate(buf
);
1788 trans
->blocks_used
++;
1792 static int noinline
drop_leaf_ref(struct btrfs_trans_handle
*trans
,
1793 struct btrfs_root
*root
,
1794 struct extent_buffer
*leaf
)
1797 u64 leaf_generation
;
1798 struct btrfs_key key
;
1799 struct btrfs_file_extent_item
*fi
;
1804 BUG_ON(!btrfs_is_leaf(leaf
));
1805 nritems
= btrfs_header_nritems(leaf
);
1806 leaf_owner
= btrfs_header_owner(leaf
);
1807 leaf_generation
= btrfs_header_generation(leaf
);
1809 for (i
= 0; i
< nritems
; i
++) {
1812 btrfs_item_key_to_cpu(leaf
, &key
, i
);
1813 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
1815 fi
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
1816 if (btrfs_file_extent_type(leaf
, fi
) ==
1817 BTRFS_FILE_EXTENT_INLINE
)
1820 * FIXME make sure to insert a trans record that
1821 * repeats the snapshot del on crash
1823 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
1824 if (disk_bytenr
== 0)
1826 ret
= btrfs_free_extent(trans
, root
, disk_bytenr
,
1827 btrfs_file_extent_disk_num_bytes(leaf
, fi
),
1828 leaf_owner
, leaf_generation
,
1829 key
.objectid
, key
.offset
, 0);
1835 static void noinline
reada_walk_down(struct btrfs_root
*root
,
1836 struct extent_buffer
*node
,
1849 nritems
= btrfs_header_nritems(node
);
1850 level
= btrfs_header_level(node
);
1854 for (i
= slot
; i
< nritems
&& skipped
< 32; i
++) {
1855 bytenr
= btrfs_node_blockptr(node
, i
);
1856 if (last
&& ((bytenr
> last
&& bytenr
- last
> 32 * 1024) ||
1857 (last
> bytenr
&& last
- bytenr
> 32 * 1024))) {
1861 blocksize
= btrfs_level_size(root
, level
- 1);
1863 ret
= lookup_extent_ref(NULL
, root
, bytenr
,
1871 mutex_unlock(&root
->fs_info
->fs_mutex
);
1872 ret
= readahead_tree_block(root
, bytenr
, blocksize
);
1873 last
= bytenr
+ blocksize
;
1875 mutex_lock(&root
->fs_info
->fs_mutex
);
1882 * helper function for drop_snapshot, this walks down the tree dropping ref
1883 * counts as it goes.
1885 static int noinline
walk_down_tree(struct btrfs_trans_handle
*trans
,
1886 struct btrfs_root
*root
,
1887 struct btrfs_path
*path
, int *level
)
1892 struct extent_buffer
*next
;
1893 struct extent_buffer
*cur
;
1894 struct extent_buffer
*parent
;
1899 WARN_ON(*level
< 0);
1900 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
1901 ret
= lookup_extent_ref(trans
, root
,
1902 path
->nodes
[*level
]->start
,
1903 path
->nodes
[*level
]->len
, &refs
);
1909 * walk down to the last node level and free all the leaves
1911 while(*level
>= 0) {
1912 WARN_ON(*level
< 0);
1913 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
1914 cur
= path
->nodes
[*level
];
1916 if (btrfs_header_level(cur
) != *level
)
1919 if (path
->slots
[*level
] >=
1920 btrfs_header_nritems(cur
))
1923 ret
= drop_leaf_ref(trans
, root
, cur
);
1927 bytenr
= btrfs_node_blockptr(cur
, path
->slots
[*level
]);
1928 blocksize
= btrfs_level_size(root
, *level
- 1);
1929 ret
= lookup_extent_ref(trans
, root
, bytenr
, blocksize
, &refs
);
1932 parent
= path
->nodes
[*level
];
1933 root_owner
= btrfs_header_owner(parent
);
1934 root_gen
= btrfs_header_generation(parent
);
1935 path
->slots
[*level
]++;
1936 ret
= btrfs_free_extent(trans
, root
, bytenr
,
1937 blocksize
, root_owner
,
1942 next
= btrfs_find_tree_block(root
, bytenr
, blocksize
);
1943 if (!next
|| !btrfs_buffer_uptodate(next
)) {
1944 free_extent_buffer(next
);
1945 reada_walk_down(root
, cur
, path
->slots
[*level
]);
1946 mutex_unlock(&root
->fs_info
->fs_mutex
);
1947 next
= read_tree_block(root
, bytenr
, blocksize
);
1948 mutex_lock(&root
->fs_info
->fs_mutex
);
1950 /* we dropped the lock, check one more time */
1951 ret
= lookup_extent_ref(trans
, root
, bytenr
,
1955 parent
= path
->nodes
[*level
];
1956 root_owner
= btrfs_header_owner(parent
);
1957 root_gen
= btrfs_header_generation(parent
);
1959 path
->slots
[*level
]++;
1960 free_extent_buffer(next
);
1961 ret
= btrfs_free_extent(trans
, root
, bytenr
,
1969 WARN_ON(*level
<= 0);
1970 if (path
->nodes
[*level
-1])
1971 free_extent_buffer(path
->nodes
[*level
-1]);
1972 path
->nodes
[*level
-1] = next
;
1973 *level
= btrfs_header_level(next
);
1974 path
->slots
[*level
] = 0;
1977 WARN_ON(*level
< 0);
1978 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
1980 if (path
->nodes
[*level
] == root
->node
) {
1981 root_owner
= root
->root_key
.objectid
;
1982 parent
= path
->nodes
[*level
];
1984 parent
= path
->nodes
[*level
+ 1];
1985 root_owner
= btrfs_header_owner(parent
);
1988 root_gen
= btrfs_header_generation(parent
);
1989 ret
= btrfs_free_extent(trans
, root
, path
->nodes
[*level
]->start
,
1990 path
->nodes
[*level
]->len
,
1991 root_owner
, root_gen
, 0, 0, 1);
1992 free_extent_buffer(path
->nodes
[*level
]);
1993 path
->nodes
[*level
] = NULL
;
2000 * helper for dropping snapshots. This walks back up the tree in the path
2001 * to find the first node higher up where we haven't yet gone through
2004 static int noinline
walk_up_tree(struct btrfs_trans_handle
*trans
,
2005 struct btrfs_root
*root
,
2006 struct btrfs_path
*path
, int *level
)
2010 struct btrfs_root_item
*root_item
= &root
->root_item
;
2015 for(i
= *level
; i
< BTRFS_MAX_LEVEL
- 1 && path
->nodes
[i
]; i
++) {
2016 slot
= path
->slots
[i
];
2017 if (slot
< btrfs_header_nritems(path
->nodes
[i
]) - 1) {
2018 struct extent_buffer
*node
;
2019 struct btrfs_disk_key disk_key
;
2020 node
= path
->nodes
[i
];
2023 WARN_ON(*level
== 0);
2024 btrfs_node_key(node
, &disk_key
, path
->slots
[i
]);
2025 memcpy(&root_item
->drop_progress
,
2026 &disk_key
, sizeof(disk_key
));
2027 root_item
->drop_level
= i
;
2030 if (path
->nodes
[*level
] == root
->node
) {
2031 root_owner
= root
->root_key
.objectid
;
2033 btrfs_header_generation(path
->nodes
[*level
]);
2035 struct extent_buffer
*node
;
2036 node
= path
->nodes
[*level
+ 1];
2037 root_owner
= btrfs_header_owner(node
);
2038 root_gen
= btrfs_header_generation(node
);
2040 ret
= btrfs_free_extent(trans
, root
,
2041 path
->nodes
[*level
]->start
,
2042 path
->nodes
[*level
]->len
,
2043 root_owner
, root_gen
, 0, 0, 1);
2045 free_extent_buffer(path
->nodes
[*level
]);
2046 path
->nodes
[*level
] = NULL
;
2054 * drop the reference count on the tree rooted at 'snap'. This traverses
2055 * the tree freeing any blocks that have a ref count of zero after being
2058 int btrfs_drop_snapshot(struct btrfs_trans_handle
*trans
, struct btrfs_root
2064 struct btrfs_path
*path
;
2067 struct btrfs_root_item
*root_item
= &root
->root_item
;
2069 path
= btrfs_alloc_path();
2072 level
= btrfs_header_level(root
->node
);
2074 if (btrfs_disk_key_objectid(&root_item
->drop_progress
) == 0) {
2075 path
->nodes
[level
] = root
->node
;
2076 extent_buffer_get(root
->node
);
2077 path
->slots
[level
] = 0;
2079 struct btrfs_key key
;
2080 struct btrfs_disk_key found_key
;
2081 struct extent_buffer
*node
;
2083 btrfs_disk_key_to_cpu(&key
, &root_item
->drop_progress
);
2084 level
= root_item
->drop_level
;
2085 path
->lowest_level
= level
;
2086 wret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2091 node
= path
->nodes
[level
];
2092 btrfs_node_key(node
, &found_key
, path
->slots
[level
]);
2093 WARN_ON(memcmp(&found_key
, &root_item
->drop_progress
,
2094 sizeof(found_key
)));
2097 wret
= walk_down_tree(trans
, root
, path
, &level
);
2103 wret
= walk_up_tree(trans
, root
, path
, &level
);
2113 for (i
= 0; i
<= orig_level
; i
++) {
2114 if (path
->nodes
[i
]) {
2115 free_extent_buffer(path
->nodes
[i
]);
2116 path
->nodes
[i
] = NULL
;
2120 btrfs_free_path(path
);
2124 int btrfs_free_block_groups(struct btrfs_fs_info
*info
)
2131 ret
= find_first_extent_bit(&info
->block_group_cache
, 0,
2132 &start
, &end
, (unsigned int)-1);
2135 ret
= get_state_private(&info
->block_group_cache
, start
, &ptr
);
2137 kfree((void *)(unsigned long)ptr
);
2138 clear_extent_bits(&info
->block_group_cache
, start
,
2139 end
, (unsigned int)-1, GFP_NOFS
);
2142 ret
= find_first_extent_bit(&info
->free_space_cache
, 0,
2143 &start
, &end
, EXTENT_DIRTY
);
2146 clear_extent_dirty(&info
->free_space_cache
, start
,
2152 int find_first_block_group(struct btrfs_root
*root
, struct btrfs_path
*path
,
2153 struct btrfs_key
*key
)
2156 struct btrfs_key found_key
;
2157 struct extent_buffer
*leaf
;
2160 ret
= btrfs_search_slot(NULL
, root
, key
, path
, 0, 0);
2164 slot
= path
->slots
[0];
2165 leaf
= path
->nodes
[0];
2166 if (slot
>= btrfs_header_nritems(leaf
)) {
2167 ret
= btrfs_next_leaf(root
, path
);
2174 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
2176 if (found_key
.objectid
>= key
->objectid
&&
2177 found_key
.type
== BTRFS_BLOCK_GROUP_ITEM_KEY
)
2186 int btrfs_read_block_groups(struct btrfs_root
*root
)
2188 struct btrfs_path
*path
;
2191 struct btrfs_block_group_cache
*cache
;
2192 struct btrfs_fs_info
*info
= root
->fs_info
;
2193 struct btrfs_space_info
*space_info
;
2194 struct extent_io_tree
*block_group_cache
;
2195 struct btrfs_key key
;
2196 struct btrfs_key found_key
;
2197 struct extent_buffer
*leaf
;
2199 block_group_cache
= &info
->block_group_cache
;
2201 root
= info
->extent_root
;
2204 btrfs_set_key_type(&key
, BTRFS_BLOCK_GROUP_ITEM_KEY
);
2205 path
= btrfs_alloc_path();
2210 ret
= find_first_block_group(root
, path
, &key
);
2218 leaf
= path
->nodes
[0];
2219 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
2220 cache
= kmalloc(sizeof(*cache
), GFP_NOFS
);
2226 read_extent_buffer(leaf
, &cache
->item
,
2227 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
2228 sizeof(cache
->item
));
2229 memcpy(&cache
->key
, &found_key
, sizeof(found_key
));
2232 key
.objectid
= found_key
.objectid
+ found_key
.offset
;
2233 btrfs_release_path(root
, path
);
2234 cache
->flags
= btrfs_block_group_flags(&cache
->item
);
2236 if (cache
->flags
& BTRFS_BLOCK_GROUP_DATA
) {
2237 bit
= BLOCK_GROUP_DATA
;
2238 } else if (cache
->flags
& BTRFS_BLOCK_GROUP_SYSTEM
) {
2239 bit
= BLOCK_GROUP_SYSTEM
;
2240 } else if (cache
->flags
& BTRFS_BLOCK_GROUP_METADATA
) {
2241 bit
= BLOCK_GROUP_METADATA
;
2244 ret
= update_space_info(info
, cache
->flags
, found_key
.offset
,
2245 btrfs_block_group_used(&cache
->item
),
2248 cache
->space_info
= space_info
;
2250 /* use EXTENT_LOCKED to prevent merging */
2251 set_extent_bits(block_group_cache
, found_key
.objectid
,
2252 found_key
.objectid
+ found_key
.offset
- 1,
2253 bit
| EXTENT_LOCKED
, GFP_NOFS
);
2254 set_state_private(block_group_cache
, found_key
.objectid
,
2255 (unsigned long)cache
);
2258 btrfs_super_total_bytes(&info
->super_copy
))
2263 btrfs_free_path(path
);
2267 int btrfs_make_block_group(struct btrfs_trans_handle
*trans
,
2268 struct btrfs_root
*root
, u64 bytes_used
,
2269 u64 type
, u64 chunk_tree
, u64 chunk_objectid
,
2274 struct btrfs_root
*extent_root
;
2275 struct btrfs_block_group_cache
*cache
;
2276 struct extent_io_tree
*block_group_cache
;
2278 extent_root
= root
->fs_info
->extent_root
;
2279 block_group_cache
= &root
->fs_info
->block_group_cache
;
2281 cache
= kmalloc(sizeof(*cache
), GFP_NOFS
);
2283 cache
->key
.objectid
= chunk_objectid
;
2284 cache
->key
.offset
= size
;
2286 btrfs_set_key_type(&cache
->key
, BTRFS_BLOCK_GROUP_ITEM_KEY
);
2287 memset(&cache
->item
, 0, sizeof(cache
->item
));
2288 btrfs_set_block_group_used(&cache
->item
, bytes_used
);
2289 btrfs_set_block_group_chunk_tree(&cache
->item
, chunk_tree
);
2290 btrfs_set_block_group_chunk_objectid(&cache
->item
, chunk_objectid
);
2291 cache
->flags
= type
;
2292 btrfs_set_block_group_flags(&cache
->item
, type
);
2294 ret
= update_space_info(root
->fs_info
, cache
->flags
, size
, bytes_used
,
2295 &cache
->space_info
);
2298 if (type
& BTRFS_BLOCK_GROUP_DATA
) {
2299 bit
= BLOCK_GROUP_DATA
;
2300 } else if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
2301 bit
= BLOCK_GROUP_SYSTEM
;
2302 } else if (type
& BTRFS_BLOCK_GROUP_METADATA
) {
2303 bit
= BLOCK_GROUP_METADATA
;
2305 set_extent_bits(block_group_cache
, chunk_objectid
,
2306 chunk_objectid
+ size
- 1,
2307 bit
| EXTENT_LOCKED
, GFP_NOFS
);
2308 set_state_private(block_group_cache
, chunk_objectid
,
2309 (unsigned long)cache
);
2311 ret
= btrfs_insert_item(trans
, extent_root
, &cache
->key
, &cache
->item
,
2312 sizeof(cache
->item
));
2315 finish_current_insert(trans
, extent_root
);
2316 ret
= del_pending_extents(trans
, extent_root
);
2321 u64
btrfs_hash_extent_ref(u64 root_objectid
, u64 ref_generation
,
2322 u64 owner
, u64 owner_offset
)
2324 return hash_extent_ref(root_objectid
, ref_generation
,
2325 owner
, owner_offset
);
2328 int btrfs_update_block_group(struct btrfs_trans_handle
*trans
,
2329 struct btrfs_root
*root
,
2330 u64 bytenr
, u64 num_bytes
, int alloc
,
2333 return update_block_group(trans
, root
, bytenr
, num_bytes
,