2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
29 #include "print-tree.h"
30 #include "transaction.h"
33 #include "ref-cache.h"
34 #include "free-space-cache.h"
36 #define PENDING_EXTENT_INSERT 0
37 #define PENDING_EXTENT_DELETE 1
38 #define PENDING_BACKREF_UPDATE 2
40 struct pending_extent_op
{
49 struct list_head list
;
53 static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle
*trans
,
54 struct btrfs_root
*root
, u64 parent
,
55 u64 root_objectid
, u64 ref_generation
,
56 u64 owner
, struct btrfs_key
*ins
,
58 static int update_reserved_extents(struct btrfs_root
*root
,
59 u64 bytenr
, u64 num
, int reserve
);
60 static int update_block_group(struct btrfs_trans_handle
*trans
,
61 struct btrfs_root
*root
,
62 u64 bytenr
, u64 num_bytes
, int alloc
,
64 static noinline
int __btrfs_free_extent(struct btrfs_trans_handle
*trans
,
65 struct btrfs_root
*root
,
66 u64 bytenr
, u64 num_bytes
, u64 parent
,
67 u64 root_objectid
, u64 ref_generation
,
68 u64 owner_objectid
, int pin
,
71 static int do_chunk_alloc(struct btrfs_trans_handle
*trans
,
72 struct btrfs_root
*extent_root
, u64 alloc_bytes
,
73 u64 flags
, int force
);
75 static int block_group_bits(struct btrfs_block_group_cache
*cache
, u64 bits
)
77 return (cache
->flags
& bits
) == bits
;
81 * this adds the block group to the fs_info rb tree for the block group
84 static int btrfs_add_block_group_cache(struct btrfs_fs_info
*info
,
85 struct btrfs_block_group_cache
*block_group
)
88 struct rb_node
*parent
= NULL
;
89 struct btrfs_block_group_cache
*cache
;
91 spin_lock(&info
->block_group_cache_lock
);
92 p
= &info
->block_group_cache_tree
.rb_node
;
96 cache
= rb_entry(parent
, struct btrfs_block_group_cache
,
98 if (block_group
->key
.objectid
< cache
->key
.objectid
) {
100 } else if (block_group
->key
.objectid
> cache
->key
.objectid
) {
103 spin_unlock(&info
->block_group_cache_lock
);
108 rb_link_node(&block_group
->cache_node
, parent
, p
);
109 rb_insert_color(&block_group
->cache_node
,
110 &info
->block_group_cache_tree
);
111 spin_unlock(&info
->block_group_cache_lock
);
117 * This will return the block group at or after bytenr if contains is 0, else
118 * it will return the block group that contains the bytenr
120 static struct btrfs_block_group_cache
*
121 block_group_cache_tree_search(struct btrfs_fs_info
*info
, u64 bytenr
,
124 struct btrfs_block_group_cache
*cache
, *ret
= NULL
;
128 spin_lock(&info
->block_group_cache_lock
);
129 n
= info
->block_group_cache_tree
.rb_node
;
132 cache
= rb_entry(n
, struct btrfs_block_group_cache
,
134 end
= cache
->key
.objectid
+ cache
->key
.offset
- 1;
135 start
= cache
->key
.objectid
;
137 if (bytenr
< start
) {
138 if (!contains
&& (!ret
|| start
< ret
->key
.objectid
))
141 } else if (bytenr
> start
) {
142 if (contains
&& bytenr
<= end
) {
153 atomic_inc(&ret
->count
);
154 spin_unlock(&info
->block_group_cache_lock
);
160 * this is only called by cache_block_group, since we could have freed extents
161 * we need to check the pinned_extents for any extents that can't be used yet
162 * since their free space will be released as soon as the transaction commits.
164 static int add_new_free_space(struct btrfs_block_group_cache
*block_group
,
165 struct btrfs_fs_info
*info
, u64 start
, u64 end
)
167 u64 extent_start
, extent_end
, size
;
170 while (start
< end
) {
171 ret
= find_first_extent_bit(&info
->pinned_extents
, start
,
172 &extent_start
, &extent_end
,
177 if (extent_start
== start
) {
178 start
= extent_end
+ 1;
179 } else if (extent_start
> start
&& extent_start
< end
) {
180 size
= extent_start
- start
;
181 ret
= btrfs_add_free_space(block_group
, start
,
184 start
= extent_end
+ 1;
192 ret
= btrfs_add_free_space(block_group
, start
, size
);
199 static int remove_sb_from_cache(struct btrfs_root
*root
,
200 struct btrfs_block_group_cache
*cache
)
207 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
208 bytenr
= btrfs_sb_offset(i
);
209 ret
= btrfs_rmap_block(&root
->fs_info
->mapping_tree
,
210 cache
->key
.objectid
, bytenr
, 0,
211 &logical
, &nr
, &stripe_len
);
214 btrfs_remove_free_space(cache
, logical
[nr
],
222 static int cache_block_group(struct btrfs_root
*root
,
223 struct btrfs_block_group_cache
*block_group
)
225 struct btrfs_path
*path
;
227 struct btrfs_key key
;
228 struct extent_buffer
*leaf
;
235 root
= root
->fs_info
->extent_root
;
237 if (block_group
->cached
)
240 path
= btrfs_alloc_path();
246 * we get into deadlocks with paths held by callers of this function.
247 * since the alloc_mutex is protecting things right now, just
248 * skip the locking here
250 path
->skip_locking
= 1;
251 last
= max_t(u64
, block_group
->key
.objectid
, BTRFS_SUPER_INFO_OFFSET
);
254 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
255 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
260 leaf
= path
->nodes
[0];
261 slot
= path
->slots
[0];
262 if (slot
>= btrfs_header_nritems(leaf
)) {
263 ret
= btrfs_next_leaf(root
, path
);
271 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
272 if (key
.objectid
< block_group
->key
.objectid
)
275 if (key
.objectid
>= block_group
->key
.objectid
+
276 block_group
->key
.offset
)
279 if (btrfs_key_type(&key
) == BTRFS_EXTENT_ITEM_KEY
) {
280 add_new_free_space(block_group
, root
->fs_info
, last
,
283 last
= key
.objectid
+ key
.offset
;
289 add_new_free_space(block_group
, root
->fs_info
, last
,
290 block_group
->key
.objectid
+
291 block_group
->key
.offset
);
293 block_group
->cached
= 1;
294 remove_sb_from_cache(root
, block_group
);
297 btrfs_free_path(path
);
302 * return the block group that starts at or after bytenr
304 static struct btrfs_block_group_cache
*
305 btrfs_lookup_first_block_group(struct btrfs_fs_info
*info
, u64 bytenr
)
307 struct btrfs_block_group_cache
*cache
;
309 cache
= block_group_cache_tree_search(info
, bytenr
, 0);
315 * return the block group that contains the given bytenr
317 struct btrfs_block_group_cache
*btrfs_lookup_block_group(
318 struct btrfs_fs_info
*info
,
321 struct btrfs_block_group_cache
*cache
;
323 cache
= block_group_cache_tree_search(info
, bytenr
, 1);
328 void btrfs_put_block_group(struct btrfs_block_group_cache
*cache
)
330 if (atomic_dec_and_test(&cache
->count
))
334 static struct btrfs_space_info
*__find_space_info(struct btrfs_fs_info
*info
,
337 struct list_head
*head
= &info
->space_info
;
338 struct btrfs_space_info
*found
;
341 list_for_each_entry_rcu(found
, head
, list
) {
342 if (found
->flags
== flags
) {
352 * after adding space to the filesystem, we need to clear the full flags
353 * on all the space infos.
355 void btrfs_clear_space_info_full(struct btrfs_fs_info
*info
)
357 struct list_head
*head
= &info
->space_info
;
358 struct btrfs_space_info
*found
;
361 list_for_each_entry_rcu(found
, head
, list
)
366 static u64
div_factor(u64 num
, int factor
)
375 u64
btrfs_find_block_group(struct btrfs_root
*root
,
376 u64 search_start
, u64 search_hint
, int owner
)
378 struct btrfs_block_group_cache
*cache
;
380 u64 last
= max(search_hint
, search_start
);
387 cache
= btrfs_lookup_first_block_group(root
->fs_info
, last
);
391 spin_lock(&cache
->lock
);
392 last
= cache
->key
.objectid
+ cache
->key
.offset
;
393 used
= btrfs_block_group_used(&cache
->item
);
395 if ((full_search
|| !cache
->ro
) &&
396 block_group_bits(cache
, BTRFS_BLOCK_GROUP_METADATA
)) {
397 if (used
+ cache
->pinned
+ cache
->reserved
<
398 div_factor(cache
->key
.offset
, factor
)) {
399 group_start
= cache
->key
.objectid
;
400 spin_unlock(&cache
->lock
);
401 btrfs_put_block_group(cache
);
405 spin_unlock(&cache
->lock
);
406 btrfs_put_block_group(cache
);
414 if (!full_search
&& factor
< 10) {
424 /* simple helper to search for an existing extent at a given offset */
425 int btrfs_lookup_extent(struct btrfs_root
*root
, u64 start
, u64 len
)
428 struct btrfs_key key
;
429 struct btrfs_path
*path
;
431 path
= btrfs_alloc_path();
433 key
.objectid
= start
;
435 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
436 ret
= btrfs_search_slot(NULL
, root
->fs_info
->extent_root
, &key
, path
,
438 btrfs_free_path(path
);
443 * Back reference rules. Back refs have three main goals:
445 * 1) differentiate between all holders of references to an extent so that
446 * when a reference is dropped we can make sure it was a valid reference
447 * before freeing the extent.
449 * 2) Provide enough information to quickly find the holders of an extent
450 * if we notice a given block is corrupted or bad.
452 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
453 * maintenance. This is actually the same as #2, but with a slightly
454 * different use case.
456 * File extents can be referenced by:
458 * - multiple snapshots, subvolumes, or different generations in one subvol
459 * - different files inside a single subvolume
460 * - different offsets inside a file (bookend extents in file.c)
462 * The extent ref structure has fields for:
464 * - Objectid of the subvolume root
465 * - Generation number of the tree holding the reference
466 * - objectid of the file holding the reference
467 * - number of references holding by parent node (alway 1 for tree blocks)
469 * Btree leaf may hold multiple references to a file extent. In most cases,
470 * these references are from same file and the corresponding offsets inside
471 * the file are close together.
473 * When a file extent is allocated the fields are filled in:
474 * (root_key.objectid, trans->transid, inode objectid, 1)
476 * When a leaf is cow'd new references are added for every file extent found
477 * in the leaf. It looks similar to the create case, but trans->transid will
478 * be different when the block is cow'd.
480 * (root_key.objectid, trans->transid, inode objectid,
481 * number of references in the leaf)
483 * When a file extent is removed either during snapshot deletion or
484 * file truncation, we find the corresponding back reference and check
485 * the following fields:
487 * (btrfs_header_owner(leaf), btrfs_header_generation(leaf),
490 * Btree extents can be referenced by:
492 * - Different subvolumes
493 * - Different generations of the same subvolume
495 * When a tree block is created, back references are inserted:
497 * (root->root_key.objectid, trans->transid, level, 1)
499 * When a tree block is cow'd, new back references are added for all the
500 * blocks it points to. If the tree block isn't in reference counted root,
501 * the old back references are removed. These new back references are of
502 * the form (trans->transid will have increased since creation):
504 * (root->root_key.objectid, trans->transid, level, 1)
506 * When a backref is in deleting, the following fields are checked:
508 * if backref was for a tree root:
509 * (btrfs_header_owner(itself), btrfs_header_generation(itself), level)
511 * (btrfs_header_owner(parent), btrfs_header_generation(parent), level)
513 * Back Reference Key composing:
515 * The key objectid corresponds to the first byte in the extent, the key
516 * type is set to BTRFS_EXTENT_REF_KEY, and the key offset is the first
517 * byte of parent extent. If a extent is tree root, the key offset is set
518 * to the key objectid.
521 static noinline
int lookup_extent_backref(struct btrfs_trans_handle
*trans
,
522 struct btrfs_root
*root
,
523 struct btrfs_path
*path
,
524 u64 bytenr
, u64 parent
,
525 u64 ref_root
, u64 ref_generation
,
526 u64 owner_objectid
, int del
)
528 struct btrfs_key key
;
529 struct btrfs_extent_ref
*ref
;
530 struct extent_buffer
*leaf
;
534 key
.objectid
= bytenr
;
535 key
.type
= BTRFS_EXTENT_REF_KEY
;
538 ret
= btrfs_search_slot(trans
, root
, &key
, path
, del
? -1 : 0, 1);
546 leaf
= path
->nodes
[0];
547 ref
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_ref
);
548 ref_objectid
= btrfs_ref_objectid(leaf
, ref
);
549 if (btrfs_ref_root(leaf
, ref
) != ref_root
||
550 btrfs_ref_generation(leaf
, ref
) != ref_generation
||
551 (ref_objectid
!= owner_objectid
&&
552 ref_objectid
!= BTRFS_MULTIPLE_OBJECTIDS
)) {
562 static noinline
int insert_extent_backref(struct btrfs_trans_handle
*trans
,
563 struct btrfs_root
*root
,
564 struct btrfs_path
*path
,
565 u64 bytenr
, u64 parent
,
566 u64 ref_root
, u64 ref_generation
,
570 struct btrfs_key key
;
571 struct extent_buffer
*leaf
;
572 struct btrfs_extent_ref
*ref
;
576 key
.objectid
= bytenr
;
577 key
.type
= BTRFS_EXTENT_REF_KEY
;
580 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, sizeof(*ref
));
582 leaf
= path
->nodes
[0];
583 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
584 struct btrfs_extent_ref
);
585 btrfs_set_ref_root(leaf
, ref
, ref_root
);
586 btrfs_set_ref_generation(leaf
, ref
, ref_generation
);
587 btrfs_set_ref_objectid(leaf
, ref
, owner_objectid
);
588 btrfs_set_ref_num_refs(leaf
, ref
, refs_to_add
);
589 } else if (ret
== -EEXIST
) {
592 BUG_ON(owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
);
593 leaf
= path
->nodes
[0];
594 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
595 struct btrfs_extent_ref
);
596 if (btrfs_ref_root(leaf
, ref
) != ref_root
||
597 btrfs_ref_generation(leaf
, ref
) != ref_generation
) {
603 num_refs
= btrfs_ref_num_refs(leaf
, ref
);
604 BUG_ON(num_refs
== 0);
605 btrfs_set_ref_num_refs(leaf
, ref
, num_refs
+ refs_to_add
);
607 existing_owner
= btrfs_ref_objectid(leaf
, ref
);
608 if (existing_owner
!= owner_objectid
&&
609 existing_owner
!= BTRFS_MULTIPLE_OBJECTIDS
) {
610 btrfs_set_ref_objectid(leaf
, ref
,
611 BTRFS_MULTIPLE_OBJECTIDS
);
617 btrfs_unlock_up_safe(path
, 1);
618 btrfs_mark_buffer_dirty(path
->nodes
[0]);
620 btrfs_release_path(root
, path
);
624 static noinline
int remove_extent_backref(struct btrfs_trans_handle
*trans
,
625 struct btrfs_root
*root
,
626 struct btrfs_path
*path
,
629 struct extent_buffer
*leaf
;
630 struct btrfs_extent_ref
*ref
;
634 leaf
= path
->nodes
[0];
635 ref
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_ref
);
636 num_refs
= btrfs_ref_num_refs(leaf
, ref
);
637 BUG_ON(num_refs
< refs_to_drop
);
638 num_refs
-= refs_to_drop
;
640 ret
= btrfs_del_item(trans
, root
, path
);
642 btrfs_set_ref_num_refs(leaf
, ref
, num_refs
);
643 btrfs_mark_buffer_dirty(leaf
);
645 btrfs_release_path(root
, path
);
649 #ifdef BIO_RW_DISCARD
650 static void btrfs_issue_discard(struct block_device
*bdev
,
653 blkdev_issue_discard(bdev
, start
>> 9, len
>> 9, GFP_KERNEL
);
657 static int btrfs_discard_extent(struct btrfs_root
*root
, u64 bytenr
,
660 #ifdef BIO_RW_DISCARD
662 u64 map_length
= num_bytes
;
663 struct btrfs_multi_bio
*multi
= NULL
;
665 /* Tell the block device(s) that the sectors can be discarded */
666 ret
= btrfs_map_block(&root
->fs_info
->mapping_tree
, READ
,
667 bytenr
, &map_length
, &multi
, 0);
669 struct btrfs_bio_stripe
*stripe
= multi
->stripes
;
672 if (map_length
> num_bytes
)
673 map_length
= num_bytes
;
675 for (i
= 0; i
< multi
->num_stripes
; i
++, stripe
++) {
676 btrfs_issue_discard(stripe
->dev
->bdev
,
689 static int __btrfs_update_extent_ref(struct btrfs_trans_handle
*trans
,
690 struct btrfs_root
*root
, u64 bytenr
,
692 u64 orig_parent
, u64 parent
,
693 u64 orig_root
, u64 ref_root
,
694 u64 orig_generation
, u64 ref_generation
,
698 int pin
= owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
;
700 ret
= btrfs_update_delayed_ref(trans
, bytenr
, num_bytes
,
701 orig_parent
, parent
, orig_root
,
702 ref_root
, orig_generation
,
703 ref_generation
, owner_objectid
, pin
);
708 int btrfs_update_extent_ref(struct btrfs_trans_handle
*trans
,
709 struct btrfs_root
*root
, u64 bytenr
,
710 u64 num_bytes
, u64 orig_parent
, u64 parent
,
711 u64 ref_root
, u64 ref_generation
,
715 if (ref_root
== BTRFS_TREE_LOG_OBJECTID
&&
716 owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
)
719 ret
= __btrfs_update_extent_ref(trans
, root
, bytenr
, num_bytes
,
720 orig_parent
, parent
, ref_root
,
721 ref_root
, ref_generation
,
722 ref_generation
, owner_objectid
);
725 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle
*trans
,
726 struct btrfs_root
*root
, u64 bytenr
,
728 u64 orig_parent
, u64 parent
,
729 u64 orig_root
, u64 ref_root
,
730 u64 orig_generation
, u64 ref_generation
,
735 ret
= btrfs_add_delayed_ref(trans
, bytenr
, num_bytes
, parent
, ref_root
,
736 ref_generation
, owner_objectid
,
737 BTRFS_ADD_DELAYED_REF
, 0);
742 static noinline_for_stack
int add_extent_ref(struct btrfs_trans_handle
*trans
,
743 struct btrfs_root
*root
, u64 bytenr
,
744 u64 num_bytes
, u64 parent
, u64 ref_root
,
745 u64 ref_generation
, u64 owner_objectid
,
748 struct btrfs_path
*path
;
750 struct btrfs_key key
;
751 struct extent_buffer
*l
;
752 struct btrfs_extent_item
*item
;
755 path
= btrfs_alloc_path();
760 path
->leave_spinning
= 1;
761 key
.objectid
= bytenr
;
762 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
763 key
.offset
= num_bytes
;
765 /* first find the extent item and update its reference count */
766 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
, &key
,
769 btrfs_set_path_blocking(path
);
775 btrfs_free_path(path
);
780 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
781 if (key
.objectid
!= bytenr
) {
782 btrfs_print_leaf(root
->fs_info
->extent_root
, path
->nodes
[0]);
783 printk(KERN_ERR
"btrfs wanted %llu found %llu\n",
784 (unsigned long long)bytenr
,
785 (unsigned long long)key
.objectid
);
788 BUG_ON(key
.type
!= BTRFS_EXTENT_ITEM_KEY
);
790 item
= btrfs_item_ptr(l
, path
->slots
[0], struct btrfs_extent_item
);
792 refs
= btrfs_extent_refs(l
, item
);
793 btrfs_set_extent_refs(l
, item
, refs
+ refs_to_add
);
794 btrfs_unlock_up_safe(path
, 1);
796 btrfs_mark_buffer_dirty(path
->nodes
[0]);
798 btrfs_release_path(root
->fs_info
->extent_root
, path
);
801 path
->leave_spinning
= 1;
803 /* now insert the actual backref */
804 ret
= insert_extent_backref(trans
, root
->fs_info
->extent_root
,
805 path
, bytenr
, parent
,
806 ref_root
, ref_generation
,
807 owner_objectid
, refs_to_add
);
809 btrfs_free_path(path
);
813 int btrfs_inc_extent_ref(struct btrfs_trans_handle
*trans
,
814 struct btrfs_root
*root
,
815 u64 bytenr
, u64 num_bytes
, u64 parent
,
816 u64 ref_root
, u64 ref_generation
,
820 if (ref_root
== BTRFS_TREE_LOG_OBJECTID
&&
821 owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
)
824 ret
= __btrfs_inc_extent_ref(trans
, root
, bytenr
, num_bytes
, 0, parent
,
825 0, ref_root
, 0, ref_generation
,
830 static int drop_delayed_ref(struct btrfs_trans_handle
*trans
,
831 struct btrfs_root
*root
,
832 struct btrfs_delayed_ref_node
*node
)
835 struct btrfs_delayed_ref
*ref
= btrfs_delayed_node_to_ref(node
);
837 BUG_ON(node
->ref_mod
== 0);
838 ret
= __btrfs_free_extent(trans
, root
, node
->bytenr
, node
->num_bytes
,
839 node
->parent
, ref
->root
, ref
->generation
,
840 ref
->owner_objectid
, ref
->pin
, node
->ref_mod
);
845 /* helper function to actually process a single delayed ref entry */
846 static noinline
int run_one_delayed_ref(struct btrfs_trans_handle
*trans
,
847 struct btrfs_root
*root
,
848 struct btrfs_delayed_ref_node
*node
,
852 struct btrfs_delayed_ref
*ref
;
854 if (node
->parent
== (u64
)-1) {
855 struct btrfs_delayed_ref_head
*head
;
857 * we've hit the end of the chain and we were supposed
858 * to insert this extent into the tree. But, it got
859 * deleted before we ever needed to insert it, so all
860 * we have to do is clean up the accounting
862 if (insert_reserved
) {
863 update_reserved_extents(root
, node
->bytenr
,
866 head
= btrfs_delayed_node_to_head(node
);
867 mutex_unlock(&head
->mutex
);
871 ref
= btrfs_delayed_node_to_ref(node
);
872 if (ref
->action
== BTRFS_ADD_DELAYED_REF
) {
873 if (insert_reserved
) {
874 struct btrfs_key ins
;
876 ins
.objectid
= node
->bytenr
;
877 ins
.offset
= node
->num_bytes
;
878 ins
.type
= BTRFS_EXTENT_ITEM_KEY
;
880 /* record the full extent allocation */
881 ret
= __btrfs_alloc_reserved_extent(trans
, root
,
882 node
->parent
, ref
->root
,
883 ref
->generation
, ref
->owner_objectid
,
884 &ins
, node
->ref_mod
);
885 update_reserved_extents(root
, node
->bytenr
,
888 /* just add one backref */
889 ret
= add_extent_ref(trans
, root
, node
->bytenr
,
891 node
->parent
, ref
->root
, ref
->generation
,
892 ref
->owner_objectid
, node
->ref_mod
);
895 } else if (ref
->action
== BTRFS_DROP_DELAYED_REF
) {
896 WARN_ON(insert_reserved
);
897 ret
= drop_delayed_ref(trans
, root
, node
);
902 static noinline
struct btrfs_delayed_ref_node
*
903 select_delayed_ref(struct btrfs_delayed_ref_head
*head
)
905 struct rb_node
*node
;
906 struct btrfs_delayed_ref_node
*ref
;
907 int action
= BTRFS_ADD_DELAYED_REF
;
910 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
911 * this prevents ref count from going down to zero when
912 * there still are pending delayed ref.
914 node
= rb_prev(&head
->node
.rb_node
);
918 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
,
920 if (ref
->bytenr
!= head
->node
.bytenr
)
922 if (btrfs_delayed_node_to_ref(ref
)->action
== action
)
924 node
= rb_prev(node
);
926 if (action
== BTRFS_ADD_DELAYED_REF
) {
927 action
= BTRFS_DROP_DELAYED_REF
;
933 static noinline
int run_clustered_refs(struct btrfs_trans_handle
*trans
,
934 struct btrfs_root
*root
,
935 struct list_head
*cluster
)
937 struct btrfs_delayed_ref_root
*delayed_refs
;
938 struct btrfs_delayed_ref_node
*ref
;
939 struct btrfs_delayed_ref_head
*locked_ref
= NULL
;
942 int must_insert_reserved
= 0;
944 delayed_refs
= &trans
->transaction
->delayed_refs
;
947 /* pick a new head ref from the cluster list */
948 if (list_empty(cluster
))
951 locked_ref
= list_entry(cluster
->next
,
952 struct btrfs_delayed_ref_head
, cluster
);
954 /* grab the lock that says we are going to process
955 * all the refs for this head */
956 ret
= btrfs_delayed_ref_lock(trans
, locked_ref
);
959 * we may have dropped the spin lock to get the head
960 * mutex lock, and that might have given someone else
961 * time to free the head. If that's true, it has been
962 * removed from our list and we can move on.
964 if (ret
== -EAGAIN
) {
972 * record the must insert reserved flag before we
973 * drop the spin lock.
975 must_insert_reserved
= locked_ref
->must_insert_reserved
;
976 locked_ref
->must_insert_reserved
= 0;
979 * locked_ref is the head node, so we have to go one
980 * node back for any delayed ref updates
982 ref
= select_delayed_ref(locked_ref
);
984 /* All delayed refs have been processed, Go ahead
985 * and send the head node to run_one_delayed_ref,
986 * so that any accounting fixes can happen
988 ref
= &locked_ref
->node
;
989 list_del_init(&locked_ref
->cluster
);
994 rb_erase(&ref
->rb_node
, &delayed_refs
->root
);
995 delayed_refs
->num_entries
--;
996 spin_unlock(&delayed_refs
->lock
);
998 ret
= run_one_delayed_ref(trans
, root
, ref
,
999 must_insert_reserved
);
1001 btrfs_put_delayed_ref(ref
);
1005 spin_lock(&delayed_refs
->lock
);
1011 * this starts processing the delayed reference count updates and
1012 * extent insertions we have queued up so far. count can be
1013 * 0, which means to process everything in the tree at the start
1014 * of the run (but not newly added entries), or it can be some target
1015 * number you'd like to process.
1017 int btrfs_run_delayed_refs(struct btrfs_trans_handle
*trans
,
1018 struct btrfs_root
*root
, unsigned long count
)
1020 struct rb_node
*node
;
1021 struct btrfs_delayed_ref_root
*delayed_refs
;
1022 struct btrfs_delayed_ref_node
*ref
;
1023 struct list_head cluster
;
1025 int run_all
= count
== (unsigned long)-1;
1028 if (root
== root
->fs_info
->extent_root
)
1029 root
= root
->fs_info
->tree_root
;
1031 delayed_refs
= &trans
->transaction
->delayed_refs
;
1032 INIT_LIST_HEAD(&cluster
);
1034 spin_lock(&delayed_refs
->lock
);
1036 count
= delayed_refs
->num_entries
* 2;
1040 if (!(run_all
|| run_most
) &&
1041 delayed_refs
->num_heads_ready
< 64)
1045 * go find something we can process in the rbtree. We start at
1046 * the beginning of the tree, and then build a cluster
1047 * of refs to process starting at the first one we are able to
1050 ret
= btrfs_find_ref_cluster(trans
, &cluster
,
1051 delayed_refs
->run_delayed_start
);
1055 ret
= run_clustered_refs(trans
, root
, &cluster
);
1058 count
-= min_t(unsigned long, ret
, count
);
1065 node
= rb_first(&delayed_refs
->root
);
1068 count
= (unsigned long)-1;
1071 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
,
1073 if (btrfs_delayed_ref_is_head(ref
)) {
1074 struct btrfs_delayed_ref_head
*head
;
1076 head
= btrfs_delayed_node_to_head(ref
);
1077 atomic_inc(&ref
->refs
);
1079 spin_unlock(&delayed_refs
->lock
);
1080 mutex_lock(&head
->mutex
);
1081 mutex_unlock(&head
->mutex
);
1083 btrfs_put_delayed_ref(ref
);
1087 node
= rb_next(node
);
1089 spin_unlock(&delayed_refs
->lock
);
1090 schedule_timeout(1);
1094 spin_unlock(&delayed_refs
->lock
);
1098 int btrfs_cross_ref_exist(struct btrfs_trans_handle
*trans
,
1099 struct btrfs_root
*root
, u64 objectid
, u64 bytenr
)
1101 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
1102 struct btrfs_path
*path
;
1103 struct extent_buffer
*leaf
;
1104 struct btrfs_extent_ref
*ref_item
;
1105 struct btrfs_key key
;
1106 struct btrfs_key found_key
;
1112 key
.objectid
= bytenr
;
1113 key
.offset
= (u64
)-1;
1114 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1116 path
= btrfs_alloc_path();
1117 ret
= btrfs_search_slot(NULL
, extent_root
, &key
, path
, 0, 0);
1123 if (path
->slots
[0] == 0)
1127 leaf
= path
->nodes
[0];
1128 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
1130 if (found_key
.objectid
!= bytenr
||
1131 found_key
.type
!= BTRFS_EXTENT_ITEM_KEY
)
1134 last_snapshot
= btrfs_root_last_snapshot(&root
->root_item
);
1136 leaf
= path
->nodes
[0];
1137 nritems
= btrfs_header_nritems(leaf
);
1138 if (path
->slots
[0] >= nritems
) {
1139 ret
= btrfs_next_leaf(extent_root
, path
);
1146 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
1147 if (found_key
.objectid
!= bytenr
)
1150 if (found_key
.type
!= BTRFS_EXTENT_REF_KEY
) {
1155 ref_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
1156 struct btrfs_extent_ref
);
1157 ref_root
= btrfs_ref_root(leaf
, ref_item
);
1158 if ((ref_root
!= root
->root_key
.objectid
&&
1159 ref_root
!= BTRFS_TREE_LOG_OBJECTID
) ||
1160 objectid
!= btrfs_ref_objectid(leaf
, ref_item
)) {
1164 if (btrfs_ref_generation(leaf
, ref_item
) <= last_snapshot
) {
1173 btrfs_free_path(path
);
1177 int btrfs_cache_ref(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
1178 struct extent_buffer
*buf
, u32 nr_extents
)
1180 struct btrfs_key key
;
1181 struct btrfs_file_extent_item
*fi
;
1189 if (!root
->ref_cows
)
1192 if (root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
) {
1194 root_gen
= root
->root_key
.offset
;
1197 root_gen
= trans
->transid
- 1;
1200 level
= btrfs_header_level(buf
);
1201 nritems
= btrfs_header_nritems(buf
);
1204 struct btrfs_leaf_ref
*ref
;
1205 struct btrfs_extent_info
*info
;
1207 ref
= btrfs_alloc_leaf_ref(root
, nr_extents
);
1213 ref
->root_gen
= root_gen
;
1214 ref
->bytenr
= buf
->start
;
1215 ref
->owner
= btrfs_header_owner(buf
);
1216 ref
->generation
= btrfs_header_generation(buf
);
1217 ref
->nritems
= nr_extents
;
1218 info
= ref
->extents
;
1220 for (i
= 0; nr_extents
> 0 && i
< nritems
; i
++) {
1222 btrfs_item_key_to_cpu(buf
, &key
, i
);
1223 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
1225 fi
= btrfs_item_ptr(buf
, i
,
1226 struct btrfs_file_extent_item
);
1227 if (btrfs_file_extent_type(buf
, fi
) ==
1228 BTRFS_FILE_EXTENT_INLINE
)
1230 disk_bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
1231 if (disk_bytenr
== 0)
1234 info
->bytenr
= disk_bytenr
;
1236 btrfs_file_extent_disk_num_bytes(buf
, fi
);
1237 info
->objectid
= key
.objectid
;
1238 info
->offset
= key
.offset
;
1242 ret
= btrfs_add_leaf_ref(root
, ref
, shared
);
1243 if (ret
== -EEXIST
&& shared
) {
1244 struct btrfs_leaf_ref
*old
;
1245 old
= btrfs_lookup_leaf_ref(root
, ref
->bytenr
);
1247 btrfs_remove_leaf_ref(root
, old
);
1248 btrfs_free_leaf_ref(root
, old
);
1249 ret
= btrfs_add_leaf_ref(root
, ref
, shared
);
1252 btrfs_free_leaf_ref(root
, ref
);
1258 /* when a block goes through cow, we update the reference counts of
1259 * everything that block points to. The internal pointers of the block
1260 * can be in just about any order, and it is likely to have clusters of
1261 * things that are close together and clusters of things that are not.
1263 * To help reduce the seeks that come with updating all of these reference
1264 * counts, sort them by byte number before actual updates are done.
1266 * struct refsort is used to match byte number to slot in the btree block.
1267 * we sort based on the byte number and then use the slot to actually
1270 * struct refsort is smaller than strcut btrfs_item and smaller than
1271 * struct btrfs_key_ptr. Since we're currently limited to the page size
1272 * for a btree block, there's no way for a kmalloc of refsorts for a
1273 * single node to be bigger than a page.
1281 * for passing into sort()
1283 static int refsort_cmp(const void *a_void
, const void *b_void
)
1285 const struct refsort
*a
= a_void
;
1286 const struct refsort
*b
= b_void
;
1288 if (a
->bytenr
< b
->bytenr
)
1290 if (a
->bytenr
> b
->bytenr
)
1296 noinline
int btrfs_inc_ref(struct btrfs_trans_handle
*trans
,
1297 struct btrfs_root
*root
,
1298 struct extent_buffer
*orig_buf
,
1299 struct extent_buffer
*buf
, u32
*nr_extents
)
1305 u64 orig_generation
;
1306 struct refsort
*sorted
;
1308 u32 nr_file_extents
= 0;
1309 struct btrfs_key key
;
1310 struct btrfs_file_extent_item
*fi
;
1317 int (*process_func
)(struct btrfs_trans_handle
*, struct btrfs_root
*,
1318 u64
, u64
, u64
, u64
, u64
, u64
, u64
, u64
, u64
);
1320 ref_root
= btrfs_header_owner(buf
);
1321 ref_generation
= btrfs_header_generation(buf
);
1322 orig_root
= btrfs_header_owner(orig_buf
);
1323 orig_generation
= btrfs_header_generation(orig_buf
);
1325 nritems
= btrfs_header_nritems(buf
);
1326 level
= btrfs_header_level(buf
);
1328 sorted
= kmalloc(sizeof(struct refsort
) * nritems
, GFP_NOFS
);
1331 if (root
->ref_cows
) {
1332 process_func
= __btrfs_inc_extent_ref
;
1335 root
->root_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
)
1338 root
->root_key
.objectid
== BTRFS_TREE_LOG_OBJECTID
)
1340 process_func
= __btrfs_update_extent_ref
;
1344 * we make two passes through the items. In the first pass we
1345 * only record the byte number and slot. Then we sort based on
1346 * byte number and do the actual work based on the sorted results
1348 for (i
= 0; i
< nritems
; i
++) {
1351 btrfs_item_key_to_cpu(buf
, &key
, i
);
1352 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
1354 fi
= btrfs_item_ptr(buf
, i
,
1355 struct btrfs_file_extent_item
);
1356 if (btrfs_file_extent_type(buf
, fi
) ==
1357 BTRFS_FILE_EXTENT_INLINE
)
1359 bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
1364 sorted
[refi
].bytenr
= bytenr
;
1365 sorted
[refi
].slot
= i
;
1368 bytenr
= btrfs_node_blockptr(buf
, i
);
1369 sorted
[refi
].bytenr
= bytenr
;
1370 sorted
[refi
].slot
= i
;
1375 * if refi == 0, we didn't actually put anything into the sorted
1376 * array and we're done
1381 sort(sorted
, refi
, sizeof(struct refsort
), refsort_cmp
, NULL
);
1383 for (i
= 0; i
< refi
; i
++) {
1385 slot
= sorted
[i
].slot
;
1386 bytenr
= sorted
[i
].bytenr
;
1389 btrfs_item_key_to_cpu(buf
, &key
, slot
);
1390 fi
= btrfs_item_ptr(buf
, slot
,
1391 struct btrfs_file_extent_item
);
1393 bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
1397 ret
= process_func(trans
, root
, bytenr
,
1398 btrfs_file_extent_disk_num_bytes(buf
, fi
),
1399 orig_buf
->start
, buf
->start
,
1400 orig_root
, ref_root
,
1401 orig_generation
, ref_generation
,
1410 ret
= process_func(trans
, root
, bytenr
, buf
->len
,
1411 orig_buf
->start
, buf
->start
,
1412 orig_root
, ref_root
,
1413 orig_generation
, ref_generation
,
1426 *nr_extents
= nr_file_extents
;
1428 *nr_extents
= nritems
;
1437 int btrfs_update_ref(struct btrfs_trans_handle
*trans
,
1438 struct btrfs_root
*root
, struct extent_buffer
*orig_buf
,
1439 struct extent_buffer
*buf
, int start_slot
, int nr
)
1446 u64 orig_generation
;
1447 struct btrfs_key key
;
1448 struct btrfs_file_extent_item
*fi
;
1454 BUG_ON(start_slot
< 0);
1455 BUG_ON(start_slot
+ nr
> btrfs_header_nritems(buf
));
1457 ref_root
= btrfs_header_owner(buf
);
1458 ref_generation
= btrfs_header_generation(buf
);
1459 orig_root
= btrfs_header_owner(orig_buf
);
1460 orig_generation
= btrfs_header_generation(orig_buf
);
1461 level
= btrfs_header_level(buf
);
1463 if (!root
->ref_cows
) {
1465 root
->root_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
)
1468 root
->root_key
.objectid
== BTRFS_TREE_LOG_OBJECTID
)
1472 for (i
= 0, slot
= start_slot
; i
< nr
; i
++, slot
++) {
1475 btrfs_item_key_to_cpu(buf
, &key
, slot
);
1476 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
1478 fi
= btrfs_item_ptr(buf
, slot
,
1479 struct btrfs_file_extent_item
);
1480 if (btrfs_file_extent_type(buf
, fi
) ==
1481 BTRFS_FILE_EXTENT_INLINE
)
1483 bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
1486 ret
= __btrfs_update_extent_ref(trans
, root
, bytenr
,
1487 btrfs_file_extent_disk_num_bytes(buf
, fi
),
1488 orig_buf
->start
, buf
->start
,
1489 orig_root
, ref_root
, orig_generation
,
1490 ref_generation
, key
.objectid
);
1494 bytenr
= btrfs_node_blockptr(buf
, slot
);
1495 ret
= __btrfs_update_extent_ref(trans
, root
, bytenr
,
1496 buf
->len
, orig_buf
->start
,
1497 buf
->start
, orig_root
, ref_root
,
1498 orig_generation
, ref_generation
,
1510 static int write_one_cache_group(struct btrfs_trans_handle
*trans
,
1511 struct btrfs_root
*root
,
1512 struct btrfs_path
*path
,
1513 struct btrfs_block_group_cache
*cache
)
1516 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
1518 struct extent_buffer
*leaf
;
1520 ret
= btrfs_search_slot(trans
, extent_root
, &cache
->key
, path
, 0, 1);
1525 leaf
= path
->nodes
[0];
1526 bi
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1527 write_extent_buffer(leaf
, &cache
->item
, bi
, sizeof(cache
->item
));
1528 btrfs_mark_buffer_dirty(leaf
);
1529 btrfs_release_path(extent_root
, path
);
1537 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle
*trans
,
1538 struct btrfs_root
*root
)
1540 struct btrfs_block_group_cache
*cache
, *entry
;
1544 struct btrfs_path
*path
;
1547 path
= btrfs_alloc_path();
1553 spin_lock(&root
->fs_info
->block_group_cache_lock
);
1554 for (n
= rb_first(&root
->fs_info
->block_group_cache_tree
);
1555 n
; n
= rb_next(n
)) {
1556 entry
= rb_entry(n
, struct btrfs_block_group_cache
,
1563 spin_unlock(&root
->fs_info
->block_group_cache_lock
);
1569 last
+= cache
->key
.offset
;
1571 err
= write_one_cache_group(trans
, root
,
1574 * if we fail to write the cache group, we want
1575 * to keep it marked dirty in hopes that a later
1583 btrfs_free_path(path
);
1587 int btrfs_extent_readonly(struct btrfs_root
*root
, u64 bytenr
)
1589 struct btrfs_block_group_cache
*block_group
;
1592 block_group
= btrfs_lookup_block_group(root
->fs_info
, bytenr
);
1593 if (!block_group
|| block_group
->ro
)
1596 btrfs_put_block_group(block_group
);
1600 static int update_space_info(struct btrfs_fs_info
*info
, u64 flags
,
1601 u64 total_bytes
, u64 bytes_used
,
1602 struct btrfs_space_info
**space_info
)
1604 struct btrfs_space_info
*found
;
1606 found
= __find_space_info(info
, flags
);
1608 spin_lock(&found
->lock
);
1609 found
->total_bytes
+= total_bytes
;
1610 found
->bytes_used
+= bytes_used
;
1612 spin_unlock(&found
->lock
);
1613 *space_info
= found
;
1616 found
= kzalloc(sizeof(*found
), GFP_NOFS
);
1620 INIT_LIST_HEAD(&found
->block_groups
);
1621 init_rwsem(&found
->groups_sem
);
1622 spin_lock_init(&found
->lock
);
1623 found
->flags
= flags
;
1624 found
->total_bytes
= total_bytes
;
1625 found
->bytes_used
= bytes_used
;
1626 found
->bytes_pinned
= 0;
1627 found
->bytes_reserved
= 0;
1628 found
->bytes_readonly
= 0;
1629 found
->bytes_delalloc
= 0;
1631 found
->force_alloc
= 0;
1632 *space_info
= found
;
1633 list_add_rcu(&found
->list
, &info
->space_info
);
1637 static void set_avail_alloc_bits(struct btrfs_fs_info
*fs_info
, u64 flags
)
1639 u64 extra_flags
= flags
& (BTRFS_BLOCK_GROUP_RAID0
|
1640 BTRFS_BLOCK_GROUP_RAID1
|
1641 BTRFS_BLOCK_GROUP_RAID10
|
1642 BTRFS_BLOCK_GROUP_DUP
);
1644 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
1645 fs_info
->avail_data_alloc_bits
|= extra_flags
;
1646 if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
1647 fs_info
->avail_metadata_alloc_bits
|= extra_flags
;
1648 if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
1649 fs_info
->avail_system_alloc_bits
|= extra_flags
;
1653 static void set_block_group_readonly(struct btrfs_block_group_cache
*cache
)
1655 spin_lock(&cache
->space_info
->lock
);
1656 spin_lock(&cache
->lock
);
1658 cache
->space_info
->bytes_readonly
+= cache
->key
.offset
-
1659 btrfs_block_group_used(&cache
->item
);
1662 spin_unlock(&cache
->lock
);
1663 spin_unlock(&cache
->space_info
->lock
);
1666 u64
btrfs_reduce_alloc_profile(struct btrfs_root
*root
, u64 flags
)
1668 u64 num_devices
= root
->fs_info
->fs_devices
->rw_devices
;
1670 if (num_devices
== 1)
1671 flags
&= ~(BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_RAID0
);
1672 if (num_devices
< 4)
1673 flags
&= ~BTRFS_BLOCK_GROUP_RAID10
;
1675 if ((flags
& BTRFS_BLOCK_GROUP_DUP
) &&
1676 (flags
& (BTRFS_BLOCK_GROUP_RAID1
|
1677 BTRFS_BLOCK_GROUP_RAID10
))) {
1678 flags
&= ~BTRFS_BLOCK_GROUP_DUP
;
1681 if ((flags
& BTRFS_BLOCK_GROUP_RAID1
) &&
1682 (flags
& BTRFS_BLOCK_GROUP_RAID10
)) {
1683 flags
&= ~BTRFS_BLOCK_GROUP_RAID1
;
1686 if ((flags
& BTRFS_BLOCK_GROUP_RAID0
) &&
1687 ((flags
& BTRFS_BLOCK_GROUP_RAID1
) |
1688 (flags
& BTRFS_BLOCK_GROUP_RAID10
) |
1689 (flags
& BTRFS_BLOCK_GROUP_DUP
)))
1690 flags
&= ~BTRFS_BLOCK_GROUP_RAID0
;
1694 static u64
btrfs_get_alloc_profile(struct btrfs_root
*root
, u64 data
)
1696 struct btrfs_fs_info
*info
= root
->fs_info
;
1700 alloc_profile
= info
->avail_data_alloc_bits
&
1701 info
->data_alloc_profile
;
1702 data
= BTRFS_BLOCK_GROUP_DATA
| alloc_profile
;
1703 } else if (root
== root
->fs_info
->chunk_root
) {
1704 alloc_profile
= info
->avail_system_alloc_bits
&
1705 info
->system_alloc_profile
;
1706 data
= BTRFS_BLOCK_GROUP_SYSTEM
| alloc_profile
;
1708 alloc_profile
= info
->avail_metadata_alloc_bits
&
1709 info
->metadata_alloc_profile
;
1710 data
= BTRFS_BLOCK_GROUP_METADATA
| alloc_profile
;
1713 return btrfs_reduce_alloc_profile(root
, data
);
1716 void btrfs_set_inode_space_info(struct btrfs_root
*root
, struct inode
*inode
)
1720 alloc_target
= btrfs_get_alloc_profile(root
, 1);
1721 BTRFS_I(inode
)->space_info
= __find_space_info(root
->fs_info
,
1726 * for now this just makes sure we have at least 5% of our metadata space free
1729 int btrfs_check_metadata_free_space(struct btrfs_root
*root
)
1731 struct btrfs_fs_info
*info
= root
->fs_info
;
1732 struct btrfs_space_info
*meta_sinfo
;
1733 u64 alloc_target
, thresh
;
1734 int committed
= 0, ret
;
1736 /* get the space info for where the metadata will live */
1737 alloc_target
= btrfs_get_alloc_profile(root
, 0);
1738 meta_sinfo
= __find_space_info(info
, alloc_target
);
1741 spin_lock(&meta_sinfo
->lock
);
1742 if (!meta_sinfo
->full
)
1743 thresh
= meta_sinfo
->total_bytes
* 80;
1745 thresh
= meta_sinfo
->total_bytes
* 95;
1747 do_div(thresh
, 100);
1749 if (meta_sinfo
->bytes_used
+ meta_sinfo
->bytes_reserved
+
1750 meta_sinfo
->bytes_pinned
+ meta_sinfo
->bytes_readonly
> thresh
) {
1751 struct btrfs_trans_handle
*trans
;
1752 if (!meta_sinfo
->full
) {
1753 meta_sinfo
->force_alloc
= 1;
1754 spin_unlock(&meta_sinfo
->lock
);
1756 trans
= btrfs_start_transaction(root
, 1);
1760 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
1761 2 * 1024 * 1024, alloc_target
, 0);
1762 btrfs_end_transaction(trans
, root
);
1765 spin_unlock(&meta_sinfo
->lock
);
1769 trans
= btrfs_join_transaction(root
, 1);
1772 ret
= btrfs_commit_transaction(trans
, root
);
1779 spin_unlock(&meta_sinfo
->lock
);
1785 * This will check the space that the inode allocates from to make sure we have
1786 * enough space for bytes.
1788 int btrfs_check_data_free_space(struct btrfs_root
*root
, struct inode
*inode
,
1791 struct btrfs_space_info
*data_sinfo
;
1792 int ret
= 0, committed
= 0;
1794 /* make sure bytes are sectorsize aligned */
1795 bytes
= (bytes
+ root
->sectorsize
- 1) & ~((u64
)root
->sectorsize
- 1);
1797 data_sinfo
= BTRFS_I(inode
)->space_info
;
1799 /* make sure we have enough space to handle the data first */
1800 spin_lock(&data_sinfo
->lock
);
1801 if (data_sinfo
->total_bytes
- data_sinfo
->bytes_used
-
1802 data_sinfo
->bytes_delalloc
- data_sinfo
->bytes_reserved
-
1803 data_sinfo
->bytes_pinned
- data_sinfo
->bytes_readonly
-
1804 data_sinfo
->bytes_may_use
< bytes
) {
1805 struct btrfs_trans_handle
*trans
;
1808 * if we don't have enough free bytes in this space then we need
1809 * to alloc a new chunk.
1811 if (!data_sinfo
->full
) {
1814 data_sinfo
->force_alloc
= 1;
1815 spin_unlock(&data_sinfo
->lock
);
1817 alloc_target
= btrfs_get_alloc_profile(root
, 1);
1818 trans
= btrfs_start_transaction(root
, 1);
1822 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
1823 bytes
+ 2 * 1024 * 1024,
1825 btrfs_end_transaction(trans
, root
);
1830 spin_unlock(&data_sinfo
->lock
);
1832 /* commit the current transaction and try again */
1835 trans
= btrfs_join_transaction(root
, 1);
1838 ret
= btrfs_commit_transaction(trans
, root
);
1844 printk(KERN_ERR
"no space left, need %llu, %llu delalloc bytes"
1845 ", %llu bytes_used, %llu bytes_reserved, "
1846 "%llu bytes_pinned, %llu bytes_readonly, %llu may use"
1847 "%llu total\n", (unsigned long long)bytes
,
1848 (unsigned long long)data_sinfo
->bytes_delalloc
,
1849 (unsigned long long)data_sinfo
->bytes_used
,
1850 (unsigned long long)data_sinfo
->bytes_reserved
,
1851 (unsigned long long)data_sinfo
->bytes_pinned
,
1852 (unsigned long long)data_sinfo
->bytes_readonly
,
1853 (unsigned long long)data_sinfo
->bytes_may_use
,
1854 (unsigned long long)data_sinfo
->total_bytes
);
1857 data_sinfo
->bytes_may_use
+= bytes
;
1858 BTRFS_I(inode
)->reserved_bytes
+= bytes
;
1859 spin_unlock(&data_sinfo
->lock
);
1861 return btrfs_check_metadata_free_space(root
);
1865 * if there was an error for whatever reason after calling
1866 * btrfs_check_data_free_space, call this so we can cleanup the counters.
1868 void btrfs_free_reserved_data_space(struct btrfs_root
*root
,
1869 struct inode
*inode
, u64 bytes
)
1871 struct btrfs_space_info
*data_sinfo
;
1873 /* make sure bytes are sectorsize aligned */
1874 bytes
= (bytes
+ root
->sectorsize
- 1) & ~((u64
)root
->sectorsize
- 1);
1876 data_sinfo
= BTRFS_I(inode
)->space_info
;
1877 spin_lock(&data_sinfo
->lock
);
1878 data_sinfo
->bytes_may_use
-= bytes
;
1879 BTRFS_I(inode
)->reserved_bytes
-= bytes
;
1880 spin_unlock(&data_sinfo
->lock
);
1883 /* called when we are adding a delalloc extent to the inode's io_tree */
1884 void btrfs_delalloc_reserve_space(struct btrfs_root
*root
, struct inode
*inode
,
1887 struct btrfs_space_info
*data_sinfo
;
1889 /* get the space info for where this inode will be storing its data */
1890 data_sinfo
= BTRFS_I(inode
)->space_info
;
1892 /* make sure we have enough space to handle the data first */
1893 spin_lock(&data_sinfo
->lock
);
1894 data_sinfo
->bytes_delalloc
+= bytes
;
1897 * we are adding a delalloc extent without calling
1898 * btrfs_check_data_free_space first. This happens on a weird
1899 * writepage condition, but shouldn't hurt our accounting
1901 if (unlikely(bytes
> BTRFS_I(inode
)->reserved_bytes
)) {
1902 data_sinfo
->bytes_may_use
-= BTRFS_I(inode
)->reserved_bytes
;
1903 BTRFS_I(inode
)->reserved_bytes
= 0;
1905 data_sinfo
->bytes_may_use
-= bytes
;
1906 BTRFS_I(inode
)->reserved_bytes
-= bytes
;
1909 spin_unlock(&data_sinfo
->lock
);
1912 /* called when we are clearing an delalloc extent from the inode's io_tree */
1913 void btrfs_delalloc_free_space(struct btrfs_root
*root
, struct inode
*inode
,
1916 struct btrfs_space_info
*info
;
1918 info
= BTRFS_I(inode
)->space_info
;
1920 spin_lock(&info
->lock
);
1921 info
->bytes_delalloc
-= bytes
;
1922 spin_unlock(&info
->lock
);
1925 static void force_metadata_allocation(struct btrfs_fs_info
*info
)
1927 struct list_head
*head
= &info
->space_info
;
1928 struct btrfs_space_info
*found
;
1931 list_for_each_entry_rcu(found
, head
, list
) {
1932 if (found
->flags
& BTRFS_BLOCK_GROUP_METADATA
)
1933 found
->force_alloc
= 1;
1938 static int do_chunk_alloc(struct btrfs_trans_handle
*trans
,
1939 struct btrfs_root
*extent_root
, u64 alloc_bytes
,
1940 u64 flags
, int force
)
1942 struct btrfs_space_info
*space_info
;
1943 struct btrfs_fs_info
*fs_info
= extent_root
->fs_info
;
1947 mutex_lock(&fs_info
->chunk_mutex
);
1949 flags
= btrfs_reduce_alloc_profile(extent_root
, flags
);
1951 space_info
= __find_space_info(extent_root
->fs_info
, flags
);
1953 ret
= update_space_info(extent_root
->fs_info
, flags
,
1957 BUG_ON(!space_info
);
1959 spin_lock(&space_info
->lock
);
1960 if (space_info
->force_alloc
) {
1962 space_info
->force_alloc
= 0;
1964 if (space_info
->full
) {
1965 spin_unlock(&space_info
->lock
);
1969 thresh
= space_info
->total_bytes
- space_info
->bytes_readonly
;
1970 thresh
= div_factor(thresh
, 6);
1972 (space_info
->bytes_used
+ space_info
->bytes_pinned
+
1973 space_info
->bytes_reserved
+ alloc_bytes
) < thresh
) {
1974 spin_unlock(&space_info
->lock
);
1977 spin_unlock(&space_info
->lock
);
1980 * if we're doing a data chunk, go ahead and make sure that
1981 * we keep a reasonable number of metadata chunks allocated in the
1984 if (flags
& BTRFS_BLOCK_GROUP_DATA
) {
1985 fs_info
->data_chunk_allocations
++;
1986 if (!(fs_info
->data_chunk_allocations
%
1987 fs_info
->metadata_ratio
))
1988 force_metadata_allocation(fs_info
);
1991 ret
= btrfs_alloc_chunk(trans
, extent_root
, flags
);
1993 space_info
->full
= 1;
1995 mutex_unlock(&extent_root
->fs_info
->chunk_mutex
);
1999 static int update_block_group(struct btrfs_trans_handle
*trans
,
2000 struct btrfs_root
*root
,
2001 u64 bytenr
, u64 num_bytes
, int alloc
,
2004 struct btrfs_block_group_cache
*cache
;
2005 struct btrfs_fs_info
*info
= root
->fs_info
;
2006 u64 total
= num_bytes
;
2011 cache
= btrfs_lookup_block_group(info
, bytenr
);
2014 byte_in_group
= bytenr
- cache
->key
.objectid
;
2015 WARN_ON(byte_in_group
> cache
->key
.offset
);
2017 spin_lock(&cache
->space_info
->lock
);
2018 spin_lock(&cache
->lock
);
2020 old_val
= btrfs_block_group_used(&cache
->item
);
2021 num_bytes
= min(total
, cache
->key
.offset
- byte_in_group
);
2023 old_val
+= num_bytes
;
2024 cache
->space_info
->bytes_used
+= num_bytes
;
2026 cache
->space_info
->bytes_readonly
-= num_bytes
;
2027 btrfs_set_block_group_used(&cache
->item
, old_val
);
2028 spin_unlock(&cache
->lock
);
2029 spin_unlock(&cache
->space_info
->lock
);
2031 old_val
-= num_bytes
;
2032 cache
->space_info
->bytes_used
-= num_bytes
;
2034 cache
->space_info
->bytes_readonly
+= num_bytes
;
2035 btrfs_set_block_group_used(&cache
->item
, old_val
);
2036 spin_unlock(&cache
->lock
);
2037 spin_unlock(&cache
->space_info
->lock
);
2041 ret
= btrfs_discard_extent(root
, bytenr
,
2045 ret
= btrfs_add_free_space(cache
, bytenr
,
2050 btrfs_put_block_group(cache
);
2052 bytenr
+= num_bytes
;
2057 static u64
first_logical_byte(struct btrfs_root
*root
, u64 search_start
)
2059 struct btrfs_block_group_cache
*cache
;
2062 cache
= btrfs_lookup_first_block_group(root
->fs_info
, search_start
);
2066 bytenr
= cache
->key
.objectid
;
2067 btrfs_put_block_group(cache
);
2072 int btrfs_update_pinned_extents(struct btrfs_root
*root
,
2073 u64 bytenr
, u64 num
, int pin
)
2076 struct btrfs_block_group_cache
*cache
;
2077 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2080 set_extent_dirty(&fs_info
->pinned_extents
,
2081 bytenr
, bytenr
+ num
- 1, GFP_NOFS
);
2083 clear_extent_dirty(&fs_info
->pinned_extents
,
2084 bytenr
, bytenr
+ num
- 1, GFP_NOFS
);
2088 cache
= btrfs_lookup_block_group(fs_info
, bytenr
);
2090 len
= min(num
, cache
->key
.offset
-
2091 (bytenr
- cache
->key
.objectid
));
2093 spin_lock(&cache
->space_info
->lock
);
2094 spin_lock(&cache
->lock
);
2095 cache
->pinned
+= len
;
2096 cache
->space_info
->bytes_pinned
+= len
;
2097 spin_unlock(&cache
->lock
);
2098 spin_unlock(&cache
->space_info
->lock
);
2099 fs_info
->total_pinned
+= len
;
2101 spin_lock(&cache
->space_info
->lock
);
2102 spin_lock(&cache
->lock
);
2103 cache
->pinned
-= len
;
2104 cache
->space_info
->bytes_pinned
-= len
;
2105 spin_unlock(&cache
->lock
);
2106 spin_unlock(&cache
->space_info
->lock
);
2107 fs_info
->total_pinned
-= len
;
2109 btrfs_add_free_space(cache
, bytenr
, len
);
2111 btrfs_put_block_group(cache
);
2118 static int update_reserved_extents(struct btrfs_root
*root
,
2119 u64 bytenr
, u64 num
, int reserve
)
2122 struct btrfs_block_group_cache
*cache
;
2123 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2126 cache
= btrfs_lookup_block_group(fs_info
, bytenr
);
2128 len
= min(num
, cache
->key
.offset
-
2129 (bytenr
- cache
->key
.objectid
));
2131 spin_lock(&cache
->space_info
->lock
);
2132 spin_lock(&cache
->lock
);
2134 cache
->reserved
+= len
;
2135 cache
->space_info
->bytes_reserved
+= len
;
2137 cache
->reserved
-= len
;
2138 cache
->space_info
->bytes_reserved
-= len
;
2140 spin_unlock(&cache
->lock
);
2141 spin_unlock(&cache
->space_info
->lock
);
2142 btrfs_put_block_group(cache
);
2149 int btrfs_copy_pinned(struct btrfs_root
*root
, struct extent_io_tree
*copy
)
2154 struct extent_io_tree
*pinned_extents
= &root
->fs_info
->pinned_extents
;
2158 ret
= find_first_extent_bit(pinned_extents
, last
,
2159 &start
, &end
, EXTENT_DIRTY
);
2162 set_extent_dirty(copy
, start
, end
, GFP_NOFS
);
2168 int btrfs_finish_extent_commit(struct btrfs_trans_handle
*trans
,
2169 struct btrfs_root
*root
,
2170 struct extent_io_tree
*unpin
)
2177 ret
= find_first_extent_bit(unpin
, 0, &start
, &end
,
2182 ret
= btrfs_discard_extent(root
, start
, end
+ 1 - start
);
2184 /* unlocks the pinned mutex */
2185 btrfs_update_pinned_extents(root
, start
, end
+ 1 - start
, 0);
2186 clear_extent_dirty(unpin
, start
, end
, GFP_NOFS
);
2193 static int pin_down_bytes(struct btrfs_trans_handle
*trans
,
2194 struct btrfs_root
*root
,
2195 struct btrfs_path
*path
,
2196 u64 bytenr
, u64 num_bytes
, int is_data
,
2197 struct extent_buffer
**must_clean
)
2200 struct extent_buffer
*buf
;
2205 buf
= btrfs_find_tree_block(root
, bytenr
, num_bytes
);
2209 /* we can reuse a block if it hasn't been written
2210 * and it is from this transaction. We can't
2211 * reuse anything from the tree log root because
2212 * it has tiny sub-transactions.
2214 if (btrfs_buffer_uptodate(buf
, 0) &&
2215 btrfs_try_tree_lock(buf
)) {
2216 u64 header_owner
= btrfs_header_owner(buf
);
2217 u64 header_transid
= btrfs_header_generation(buf
);
2218 if (header_owner
!= BTRFS_TREE_LOG_OBJECTID
&&
2219 header_owner
!= BTRFS_TREE_RELOC_OBJECTID
&&
2220 header_owner
!= BTRFS_DATA_RELOC_TREE_OBJECTID
&&
2221 header_transid
== trans
->transid
&&
2222 !btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
)) {
2226 btrfs_tree_unlock(buf
);
2228 free_extent_buffer(buf
);
2230 btrfs_set_path_blocking(path
);
2231 /* unlocks the pinned mutex */
2232 btrfs_update_pinned_extents(root
, bytenr
, num_bytes
, 1);
2239 * remove an extent from the root, returns 0 on success
2241 static int __free_extent(struct btrfs_trans_handle
*trans
,
2242 struct btrfs_root
*root
,
2243 u64 bytenr
, u64 num_bytes
, u64 parent
,
2244 u64 root_objectid
, u64 ref_generation
,
2245 u64 owner_objectid
, int pin
, int mark_free
,
2248 struct btrfs_path
*path
;
2249 struct btrfs_key key
;
2250 struct btrfs_fs_info
*info
= root
->fs_info
;
2251 struct btrfs_root
*extent_root
= info
->extent_root
;
2252 struct extent_buffer
*leaf
;
2254 int extent_slot
= 0;
2255 int found_extent
= 0;
2257 struct btrfs_extent_item
*ei
;
2260 key
.objectid
= bytenr
;
2261 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
2262 key
.offset
= num_bytes
;
2263 path
= btrfs_alloc_path();
2268 path
->leave_spinning
= 1;
2269 ret
= lookup_extent_backref(trans
, extent_root
, path
,
2270 bytenr
, parent
, root_objectid
,
2271 ref_generation
, owner_objectid
, 1);
2273 struct btrfs_key found_key
;
2274 extent_slot
= path
->slots
[0];
2275 while (extent_slot
> 0) {
2277 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
2279 if (found_key
.objectid
!= bytenr
)
2281 if (found_key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
2282 found_key
.offset
== num_bytes
) {
2286 if (path
->slots
[0] - extent_slot
> 5)
2289 if (!found_extent
) {
2290 ret
= remove_extent_backref(trans
, extent_root
, path
,
2293 btrfs_release_path(extent_root
, path
);
2294 path
->leave_spinning
= 1;
2295 ret
= btrfs_search_slot(trans
, extent_root
,
2298 printk(KERN_ERR
"umm, got %d back from search"
2299 ", was looking for %llu\n", ret
,
2300 (unsigned long long)bytenr
);
2301 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
2304 extent_slot
= path
->slots
[0];
2307 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
2309 printk(KERN_ERR
"btrfs unable to find ref byte nr %llu "
2310 "parent %llu root %llu gen %llu owner %llu\n",
2311 (unsigned long long)bytenr
,
2312 (unsigned long long)parent
,
2313 (unsigned long long)root_objectid
,
2314 (unsigned long long)ref_generation
,
2315 (unsigned long long)owner_objectid
);
2318 leaf
= path
->nodes
[0];
2319 ei
= btrfs_item_ptr(leaf
, extent_slot
,
2320 struct btrfs_extent_item
);
2321 refs
= btrfs_extent_refs(leaf
, ei
);
2324 * we're not allowed to delete the extent item if there
2325 * are other delayed ref updates pending
2328 BUG_ON(refs
< refs_to_drop
);
2329 refs
-= refs_to_drop
;
2330 btrfs_set_extent_refs(leaf
, ei
, refs
);
2331 btrfs_mark_buffer_dirty(leaf
);
2333 if (refs
== 0 && found_extent
&&
2334 path
->slots
[0] == extent_slot
+ 1) {
2335 struct btrfs_extent_ref
*ref
;
2336 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
2337 struct btrfs_extent_ref
);
2338 BUG_ON(btrfs_ref_num_refs(leaf
, ref
) != refs_to_drop
);
2339 /* if the back ref and the extent are next to each other
2340 * they get deleted below in one shot
2342 path
->slots
[0] = extent_slot
;
2344 } else if (found_extent
) {
2345 /* otherwise delete the extent back ref */
2346 ret
= remove_extent_backref(trans
, extent_root
, path
,
2349 /* if refs are 0, we need to setup the path for deletion */
2351 btrfs_release_path(extent_root
, path
);
2352 path
->leave_spinning
= 1;
2353 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
,
2362 struct extent_buffer
*must_clean
= NULL
;
2365 ret
= pin_down_bytes(trans
, root
, path
,
2367 owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
,
2374 /* block accounting for super block */
2375 spin_lock(&info
->delalloc_lock
);
2376 super_used
= btrfs_super_bytes_used(&info
->super_copy
);
2377 btrfs_set_super_bytes_used(&info
->super_copy
,
2378 super_used
- num_bytes
);
2380 /* block accounting for root item */
2381 root_used
= btrfs_root_used(&root
->root_item
);
2382 btrfs_set_root_used(&root
->root_item
,
2383 root_used
- num_bytes
);
2384 spin_unlock(&info
->delalloc_lock
);
2387 * it is going to be very rare for someone to be waiting
2388 * on the block we're freeing. del_items might need to
2389 * schedule, so rather than get fancy, just force it
2393 btrfs_set_lock_blocking(must_clean
);
2395 ret
= btrfs_del_items(trans
, extent_root
, path
, path
->slots
[0],
2398 btrfs_release_path(extent_root
, path
);
2401 clean_tree_block(NULL
, root
, must_clean
);
2402 btrfs_tree_unlock(must_clean
);
2403 free_extent_buffer(must_clean
);
2406 if (owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
2407 ret
= btrfs_del_csums(trans
, root
, bytenr
, num_bytes
);
2410 invalidate_mapping_pages(info
->btree_inode
->i_mapping
,
2411 bytenr
>> PAGE_CACHE_SHIFT
,
2412 (bytenr
+ num_bytes
- 1) >> PAGE_CACHE_SHIFT
);
2415 ret
= update_block_group(trans
, root
, bytenr
, num_bytes
, 0,
2419 btrfs_free_path(path
);
2424 * remove an extent from the root, returns 0 on success
2426 static int __btrfs_free_extent(struct btrfs_trans_handle
*trans
,
2427 struct btrfs_root
*root
,
2428 u64 bytenr
, u64 num_bytes
, u64 parent
,
2429 u64 root_objectid
, u64 ref_generation
,
2430 u64 owner_objectid
, int pin
,
2433 WARN_ON(num_bytes
< root
->sectorsize
);
2436 * if metadata always pin
2437 * if data pin when any transaction has committed this
2439 if (owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
||
2440 ref_generation
!= trans
->transid
)
2443 if (ref_generation
!= trans
->transid
)
2446 return __free_extent(trans
, root
, bytenr
, num_bytes
, parent
,
2447 root_objectid
, ref_generation
,
2448 owner_objectid
, pin
, pin
== 0, refs_to_drop
);
2452 * when we free an extent, it is possible (and likely) that we free the last
2453 * delayed ref for that extent as well. This searches the delayed ref tree for
2454 * a given extent, and if there are no other delayed refs to be processed, it
2455 * removes it from the tree.
2457 static noinline
int check_ref_cleanup(struct btrfs_trans_handle
*trans
,
2458 struct btrfs_root
*root
, u64 bytenr
)
2460 struct btrfs_delayed_ref_head
*head
;
2461 struct btrfs_delayed_ref_root
*delayed_refs
;
2462 struct btrfs_delayed_ref_node
*ref
;
2463 struct rb_node
*node
;
2466 delayed_refs
= &trans
->transaction
->delayed_refs
;
2467 spin_lock(&delayed_refs
->lock
);
2468 head
= btrfs_find_delayed_ref_head(trans
, bytenr
);
2472 node
= rb_prev(&head
->node
.rb_node
);
2476 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
2478 /* there are still entries for this ref, we can't drop it */
2479 if (ref
->bytenr
== bytenr
)
2483 * waiting for the lock here would deadlock. If someone else has it
2484 * locked they are already in the process of dropping it anyway
2486 if (!mutex_trylock(&head
->mutex
))
2490 * at this point we have a head with no other entries. Go
2491 * ahead and process it.
2493 head
->node
.in_tree
= 0;
2494 rb_erase(&head
->node
.rb_node
, &delayed_refs
->root
);
2496 delayed_refs
->num_entries
--;
2499 * we don't take a ref on the node because we're removing it from the
2500 * tree, so we just steal the ref the tree was holding.
2502 delayed_refs
->num_heads
--;
2503 if (list_empty(&head
->cluster
))
2504 delayed_refs
->num_heads_ready
--;
2506 list_del_init(&head
->cluster
);
2507 spin_unlock(&delayed_refs
->lock
);
2509 ret
= run_one_delayed_ref(trans
, root
->fs_info
->tree_root
,
2510 &head
->node
, head
->must_insert_reserved
);
2512 btrfs_put_delayed_ref(&head
->node
);
2515 spin_unlock(&delayed_refs
->lock
);
2519 int btrfs_free_extent(struct btrfs_trans_handle
*trans
,
2520 struct btrfs_root
*root
,
2521 u64 bytenr
, u64 num_bytes
, u64 parent
,
2522 u64 root_objectid
, u64 ref_generation
,
2523 u64 owner_objectid
, int pin
)
2528 * tree log blocks never actually go into the extent allocation
2529 * tree, just update pinning info and exit early.
2531 * data extents referenced by the tree log do need to have
2532 * their reference counts bumped.
2534 if (root
->root_key
.objectid
== BTRFS_TREE_LOG_OBJECTID
&&
2535 owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
) {
2536 /* unlocks the pinned mutex */
2537 btrfs_update_pinned_extents(root
, bytenr
, num_bytes
, 1);
2538 update_reserved_extents(root
, bytenr
, num_bytes
, 0);
2541 ret
= btrfs_add_delayed_ref(trans
, bytenr
, num_bytes
, parent
,
2542 root_objectid
, ref_generation
,
2544 BTRFS_DROP_DELAYED_REF
, 1);
2546 ret
= check_ref_cleanup(trans
, root
, bytenr
);
2552 static u64
stripe_align(struct btrfs_root
*root
, u64 val
)
2554 u64 mask
= ((u64
)root
->stripesize
- 1);
2555 u64 ret
= (val
+ mask
) & ~mask
;
2560 * walks the btree of allocated extents and find a hole of a given size.
2561 * The key ins is changed to record the hole:
2562 * ins->objectid == block start
2563 * ins->flags = BTRFS_EXTENT_ITEM_KEY
2564 * ins->offset == number of blocks
2565 * Any available blocks before search_start are skipped.
2567 static noinline
int find_free_extent(struct btrfs_trans_handle
*trans
,
2568 struct btrfs_root
*orig_root
,
2569 u64 num_bytes
, u64 empty_size
,
2570 u64 search_start
, u64 search_end
,
2571 u64 hint_byte
, struct btrfs_key
*ins
,
2572 u64 exclude_start
, u64 exclude_nr
,
2576 struct btrfs_root
*root
= orig_root
->fs_info
->extent_root
;
2577 struct btrfs_free_cluster
*last_ptr
= NULL
;
2578 struct btrfs_block_group_cache
*block_group
= NULL
;
2579 int empty_cluster
= 2 * 1024 * 1024;
2580 int allowed_chunk_alloc
= 0;
2581 struct btrfs_space_info
*space_info
;
2582 int last_ptr_loop
= 0;
2585 WARN_ON(num_bytes
< root
->sectorsize
);
2586 btrfs_set_key_type(ins
, BTRFS_EXTENT_ITEM_KEY
);
2590 space_info
= __find_space_info(root
->fs_info
, data
);
2592 if (orig_root
->ref_cows
|| empty_size
)
2593 allowed_chunk_alloc
= 1;
2595 if (data
& BTRFS_BLOCK_GROUP_METADATA
) {
2596 last_ptr
= &root
->fs_info
->meta_alloc_cluster
;
2597 if (!btrfs_test_opt(root
, SSD
))
2598 empty_cluster
= 64 * 1024;
2601 if ((data
& BTRFS_BLOCK_GROUP_DATA
) && btrfs_test_opt(root
, SSD
)) {
2602 last_ptr
= &root
->fs_info
->data_alloc_cluster
;
2606 spin_lock(&last_ptr
->lock
);
2607 if (last_ptr
->block_group
)
2608 hint_byte
= last_ptr
->window_start
;
2609 spin_unlock(&last_ptr
->lock
);
2612 search_start
= max(search_start
, first_logical_byte(root
, 0));
2613 search_start
= max(search_start
, hint_byte
);
2620 if (search_start
== hint_byte
) {
2621 block_group
= btrfs_lookup_block_group(root
->fs_info
,
2623 if (block_group
&& block_group_bits(block_group
, data
)) {
2624 down_read(&space_info
->groups_sem
);
2625 goto have_block_group
;
2626 } else if (block_group
) {
2627 btrfs_put_block_group(block_group
);
2632 down_read(&space_info
->groups_sem
);
2633 list_for_each_entry(block_group
, &space_info
->block_groups
, list
) {
2636 atomic_inc(&block_group
->count
);
2637 search_start
= block_group
->key
.objectid
;
2640 if (unlikely(!block_group
->cached
)) {
2641 mutex_lock(&block_group
->cache_mutex
);
2642 ret
= cache_block_group(root
, block_group
);
2643 mutex_unlock(&block_group
->cache_mutex
);
2645 btrfs_put_block_group(block_group
);
2650 if (unlikely(block_group
->ro
))
2655 * the refill lock keeps out other
2656 * people trying to start a new cluster
2658 spin_lock(&last_ptr
->refill_lock
);
2659 offset
= btrfs_alloc_from_cluster(block_group
, last_ptr
,
2660 num_bytes
, search_start
);
2662 /* we have a block, we're done */
2663 spin_unlock(&last_ptr
->refill_lock
);
2667 spin_lock(&last_ptr
->lock
);
2669 * whoops, this cluster doesn't actually point to
2670 * this block group. Get a ref on the block
2671 * group is does point to and try again
2673 if (!last_ptr_loop
&& last_ptr
->block_group
&&
2674 last_ptr
->block_group
!= block_group
) {
2676 btrfs_put_block_group(block_group
);
2677 block_group
= last_ptr
->block_group
;
2678 atomic_inc(&block_group
->count
);
2679 spin_unlock(&last_ptr
->lock
);
2680 spin_unlock(&last_ptr
->refill_lock
);
2683 search_start
= block_group
->key
.objectid
;
2684 goto have_block_group
;
2686 spin_unlock(&last_ptr
->lock
);
2689 * this cluster didn't work out, free it and
2692 btrfs_return_cluster_to_free_space(NULL
, last_ptr
);
2696 /* allocate a cluster in this block group */
2697 ret
= btrfs_find_space_cluster(trans
,
2698 block_group
, last_ptr
,
2700 empty_cluster
+ empty_size
);
2703 * now pull our allocation out of this
2706 offset
= btrfs_alloc_from_cluster(block_group
,
2707 last_ptr
, num_bytes
,
2710 /* we found one, proceed */
2711 spin_unlock(&last_ptr
->refill_lock
);
2716 * at this point we either didn't find a cluster
2717 * or we weren't able to allocate a block from our
2718 * cluster. Free the cluster we've been trying
2719 * to use, and go to the next block group
2722 btrfs_return_cluster_to_free_space(NULL
,
2724 spin_unlock(&last_ptr
->refill_lock
);
2727 spin_unlock(&last_ptr
->refill_lock
);
2730 offset
= btrfs_find_space_for_alloc(block_group
, search_start
,
2731 num_bytes
, empty_size
);
2735 search_start
= stripe_align(root
, offset
);
2737 /* move on to the next group */
2738 if (search_start
+ num_bytes
>= search_end
) {
2739 btrfs_add_free_space(block_group
, offset
, num_bytes
);
2743 /* move on to the next group */
2744 if (search_start
+ num_bytes
>
2745 block_group
->key
.objectid
+ block_group
->key
.offset
) {
2746 btrfs_add_free_space(block_group
, offset
, num_bytes
);
2750 if (exclude_nr
> 0 &&
2751 (search_start
+ num_bytes
> exclude_start
&&
2752 search_start
< exclude_start
+ exclude_nr
)) {
2753 search_start
= exclude_start
+ exclude_nr
;
2755 btrfs_add_free_space(block_group
, offset
, num_bytes
);
2757 * if search_start is still in this block group
2758 * then we just re-search this block group
2760 if (search_start
>= block_group
->key
.objectid
&&
2761 search_start
< (block_group
->key
.objectid
+
2762 block_group
->key
.offset
))
2763 goto have_block_group
;
2767 ins
->objectid
= search_start
;
2768 ins
->offset
= num_bytes
;
2770 if (offset
< search_start
)
2771 btrfs_add_free_space(block_group
, offset
,
2772 search_start
- offset
);
2773 BUG_ON(offset
> search_start
);
2775 /* we are all good, lets return */
2778 btrfs_put_block_group(block_group
);
2780 up_read(&space_info
->groups_sem
);
2782 /* loop == 0, try to find a clustered alloc in every block group
2783 * loop == 1, try again after forcing a chunk allocation
2784 * loop == 2, set empty_size and empty_cluster to 0 and try again
2786 if (!ins
->objectid
&& loop
< 3 &&
2787 (empty_size
|| empty_cluster
|| allowed_chunk_alloc
)) {
2793 if (allowed_chunk_alloc
) {
2794 ret
= do_chunk_alloc(trans
, root
, num_bytes
+
2795 2 * 1024 * 1024, data
, 1);
2796 allowed_chunk_alloc
= 0;
2798 space_info
->force_alloc
= 1;
2806 } else if (!ins
->objectid
) {
2810 /* we found what we needed */
2811 if (ins
->objectid
) {
2812 if (!(data
& BTRFS_BLOCK_GROUP_DATA
))
2813 trans
->block_group
= block_group
->key
.objectid
;
2815 btrfs_put_block_group(block_group
);
2822 static void dump_space_info(struct btrfs_space_info
*info
, u64 bytes
)
2824 struct btrfs_block_group_cache
*cache
;
2826 printk(KERN_INFO
"space_info has %llu free, is %sfull\n",
2827 (unsigned long long)(info
->total_bytes
- info
->bytes_used
-
2828 info
->bytes_pinned
- info
->bytes_reserved
),
2829 (info
->full
) ? "" : "not ");
2830 printk(KERN_INFO
"space_info total=%llu, pinned=%llu, delalloc=%llu,"
2831 " may_use=%llu, used=%llu\n",
2832 (unsigned long long)info
->total_bytes
,
2833 (unsigned long long)info
->bytes_pinned
,
2834 (unsigned long long)info
->bytes_delalloc
,
2835 (unsigned long long)info
->bytes_may_use
,
2836 (unsigned long long)info
->bytes_used
);
2838 down_read(&info
->groups_sem
);
2839 list_for_each_entry(cache
, &info
->block_groups
, list
) {
2840 spin_lock(&cache
->lock
);
2841 printk(KERN_INFO
"block group %llu has %llu bytes, %llu used "
2842 "%llu pinned %llu reserved\n",
2843 (unsigned long long)cache
->key
.objectid
,
2844 (unsigned long long)cache
->key
.offset
,
2845 (unsigned long long)btrfs_block_group_used(&cache
->item
),
2846 (unsigned long long)cache
->pinned
,
2847 (unsigned long long)cache
->reserved
);
2848 btrfs_dump_free_space(cache
, bytes
);
2849 spin_unlock(&cache
->lock
);
2851 up_read(&info
->groups_sem
);
2854 static int __btrfs_reserve_extent(struct btrfs_trans_handle
*trans
,
2855 struct btrfs_root
*root
,
2856 u64 num_bytes
, u64 min_alloc_size
,
2857 u64 empty_size
, u64 hint_byte
,
2858 u64 search_end
, struct btrfs_key
*ins
,
2862 u64 search_start
= 0;
2863 struct btrfs_fs_info
*info
= root
->fs_info
;
2865 data
= btrfs_get_alloc_profile(root
, data
);
2868 * the only place that sets empty_size is btrfs_realloc_node, which
2869 * is not called recursively on allocations
2871 if (empty_size
|| root
->ref_cows
) {
2872 if (!(data
& BTRFS_BLOCK_GROUP_METADATA
)) {
2873 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
2875 BTRFS_BLOCK_GROUP_METADATA
|
2876 (info
->metadata_alloc_profile
&
2877 info
->avail_metadata_alloc_bits
), 0);
2879 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
2880 num_bytes
+ 2 * 1024 * 1024, data
, 0);
2883 WARN_ON(num_bytes
< root
->sectorsize
);
2884 ret
= find_free_extent(trans
, root
, num_bytes
, empty_size
,
2885 search_start
, search_end
, hint_byte
, ins
,
2886 trans
->alloc_exclude_start
,
2887 trans
->alloc_exclude_nr
, data
);
2889 if (ret
== -ENOSPC
&& num_bytes
> min_alloc_size
) {
2890 num_bytes
= num_bytes
>> 1;
2891 num_bytes
= num_bytes
& ~(root
->sectorsize
- 1);
2892 num_bytes
= max(num_bytes
, min_alloc_size
);
2893 do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
2894 num_bytes
, data
, 1);
2898 struct btrfs_space_info
*sinfo
;
2900 sinfo
= __find_space_info(root
->fs_info
, data
);
2901 printk(KERN_ERR
"btrfs allocation failed flags %llu, "
2902 "wanted %llu\n", (unsigned long long)data
,
2903 (unsigned long long)num_bytes
);
2904 dump_space_info(sinfo
, num_bytes
);
2911 int btrfs_free_reserved_extent(struct btrfs_root
*root
, u64 start
, u64 len
)
2913 struct btrfs_block_group_cache
*cache
;
2916 cache
= btrfs_lookup_block_group(root
->fs_info
, start
);
2918 printk(KERN_ERR
"Unable to find block group for %llu\n",
2919 (unsigned long long)start
);
2923 ret
= btrfs_discard_extent(root
, start
, len
);
2925 btrfs_add_free_space(cache
, start
, len
);
2926 btrfs_put_block_group(cache
);
2927 update_reserved_extents(root
, start
, len
, 0);
2932 int btrfs_reserve_extent(struct btrfs_trans_handle
*trans
,
2933 struct btrfs_root
*root
,
2934 u64 num_bytes
, u64 min_alloc_size
,
2935 u64 empty_size
, u64 hint_byte
,
2936 u64 search_end
, struct btrfs_key
*ins
,
2940 ret
= __btrfs_reserve_extent(trans
, root
, num_bytes
, min_alloc_size
,
2941 empty_size
, hint_byte
, search_end
, ins
,
2943 update_reserved_extents(root
, ins
->objectid
, ins
->offset
, 1);
2947 static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle
*trans
,
2948 struct btrfs_root
*root
, u64 parent
,
2949 u64 root_objectid
, u64 ref_generation
,
2950 u64 owner
, struct btrfs_key
*ins
,
2956 u64 num_bytes
= ins
->offset
;
2958 struct btrfs_fs_info
*info
= root
->fs_info
;
2959 struct btrfs_root
*extent_root
= info
->extent_root
;
2960 struct btrfs_extent_item
*extent_item
;
2961 struct btrfs_extent_ref
*ref
;
2962 struct btrfs_path
*path
;
2963 struct btrfs_key keys
[2];
2966 parent
= ins
->objectid
;
2968 /* block accounting for super block */
2969 spin_lock(&info
->delalloc_lock
);
2970 super_used
= btrfs_super_bytes_used(&info
->super_copy
);
2971 btrfs_set_super_bytes_used(&info
->super_copy
, super_used
+ num_bytes
);
2973 /* block accounting for root item */
2974 root_used
= btrfs_root_used(&root
->root_item
);
2975 btrfs_set_root_used(&root
->root_item
, root_used
+ num_bytes
);
2976 spin_unlock(&info
->delalloc_lock
);
2978 memcpy(&keys
[0], ins
, sizeof(*ins
));
2979 keys
[1].objectid
= ins
->objectid
;
2980 keys
[1].type
= BTRFS_EXTENT_REF_KEY
;
2981 keys
[1].offset
= parent
;
2982 sizes
[0] = sizeof(*extent_item
);
2983 sizes
[1] = sizeof(*ref
);
2985 path
= btrfs_alloc_path();
2988 path
->leave_spinning
= 1;
2989 ret
= btrfs_insert_empty_items(trans
, extent_root
, path
, keys
,
2993 extent_item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
2994 struct btrfs_extent_item
);
2995 btrfs_set_extent_refs(path
->nodes
[0], extent_item
, ref_mod
);
2996 ref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0] + 1,
2997 struct btrfs_extent_ref
);
2999 btrfs_set_ref_root(path
->nodes
[0], ref
, root_objectid
);
3000 btrfs_set_ref_generation(path
->nodes
[0], ref
, ref_generation
);
3001 btrfs_set_ref_objectid(path
->nodes
[0], ref
, owner
);
3002 btrfs_set_ref_num_refs(path
->nodes
[0], ref
, ref_mod
);
3004 btrfs_mark_buffer_dirty(path
->nodes
[0]);
3006 trans
->alloc_exclude_start
= 0;
3007 trans
->alloc_exclude_nr
= 0;
3008 btrfs_free_path(path
);
3013 ret
= update_block_group(trans
, root
, ins
->objectid
,
3016 printk(KERN_ERR
"btrfs update block group failed for %llu "
3017 "%llu\n", (unsigned long long)ins
->objectid
,
3018 (unsigned long long)ins
->offset
);
3025 int btrfs_alloc_reserved_extent(struct btrfs_trans_handle
*trans
,
3026 struct btrfs_root
*root
, u64 parent
,
3027 u64 root_objectid
, u64 ref_generation
,
3028 u64 owner
, struct btrfs_key
*ins
)
3032 if (root_objectid
== BTRFS_TREE_LOG_OBJECTID
)
3035 ret
= btrfs_add_delayed_ref(trans
, ins
->objectid
,
3036 ins
->offset
, parent
, root_objectid
,
3037 ref_generation
, owner
,
3038 BTRFS_ADD_DELAYED_EXTENT
, 0);
3044 * this is used by the tree logging recovery code. It records that
3045 * an extent has been allocated and makes sure to clear the free
3046 * space cache bits as well
3048 int btrfs_alloc_logged_extent(struct btrfs_trans_handle
*trans
,
3049 struct btrfs_root
*root
, u64 parent
,
3050 u64 root_objectid
, u64 ref_generation
,
3051 u64 owner
, struct btrfs_key
*ins
)
3054 struct btrfs_block_group_cache
*block_group
;
3056 block_group
= btrfs_lookup_block_group(root
->fs_info
, ins
->objectid
);
3057 mutex_lock(&block_group
->cache_mutex
);
3058 cache_block_group(root
, block_group
);
3059 mutex_unlock(&block_group
->cache_mutex
);
3061 ret
= btrfs_remove_free_space(block_group
, ins
->objectid
,
3064 btrfs_put_block_group(block_group
);
3065 ret
= __btrfs_alloc_reserved_extent(trans
, root
, parent
, root_objectid
,
3066 ref_generation
, owner
, ins
, 1);
3071 * finds a free extent and does all the dirty work required for allocation
3072 * returns the key for the extent through ins, and a tree buffer for
3073 * the first block of the extent through buf.
3075 * returns 0 if everything worked, non-zero otherwise.
3077 int btrfs_alloc_extent(struct btrfs_trans_handle
*trans
,
3078 struct btrfs_root
*root
,
3079 u64 num_bytes
, u64 parent
, u64 min_alloc_size
,
3080 u64 root_objectid
, u64 ref_generation
,
3081 u64 owner_objectid
, u64 empty_size
, u64 hint_byte
,
3082 u64 search_end
, struct btrfs_key
*ins
, u64 data
)
3085 ret
= __btrfs_reserve_extent(trans
, root
, num_bytes
,
3086 min_alloc_size
, empty_size
, hint_byte
,
3087 search_end
, ins
, data
);
3089 if (root_objectid
!= BTRFS_TREE_LOG_OBJECTID
) {
3090 ret
= btrfs_add_delayed_ref(trans
, ins
->objectid
,
3091 ins
->offset
, parent
, root_objectid
,
3092 ref_generation
, owner_objectid
,
3093 BTRFS_ADD_DELAYED_EXTENT
, 0);
3096 update_reserved_extents(root
, ins
->objectid
, ins
->offset
, 1);
3100 struct extent_buffer
*btrfs_init_new_buffer(struct btrfs_trans_handle
*trans
,
3101 struct btrfs_root
*root
,
3102 u64 bytenr
, u32 blocksize
,
3105 struct extent_buffer
*buf
;
3107 buf
= btrfs_find_create_tree_block(root
, bytenr
, blocksize
);
3109 return ERR_PTR(-ENOMEM
);
3110 btrfs_set_header_generation(buf
, trans
->transid
);
3111 btrfs_set_buffer_lockdep_class(buf
, level
);
3112 btrfs_tree_lock(buf
);
3113 clean_tree_block(trans
, root
, buf
);
3115 btrfs_set_lock_blocking(buf
);
3116 btrfs_set_buffer_uptodate(buf
);
3118 if (root
->root_key
.objectid
== BTRFS_TREE_LOG_OBJECTID
) {
3119 set_extent_dirty(&root
->dirty_log_pages
, buf
->start
,
3120 buf
->start
+ buf
->len
- 1, GFP_NOFS
);
3122 set_extent_dirty(&trans
->transaction
->dirty_pages
, buf
->start
,
3123 buf
->start
+ buf
->len
- 1, GFP_NOFS
);
3125 trans
->blocks_used
++;
3126 /* this returns a buffer locked for blocking */
3131 * helper function to allocate a block for a given tree
3132 * returns the tree buffer or NULL.
3134 struct extent_buffer
*btrfs_alloc_free_block(struct btrfs_trans_handle
*trans
,
3135 struct btrfs_root
*root
,
3136 u32 blocksize
, u64 parent
,
3143 struct btrfs_key ins
;
3145 struct extent_buffer
*buf
;
3147 ret
= btrfs_alloc_extent(trans
, root
, blocksize
, parent
, blocksize
,
3148 root_objectid
, ref_generation
, level
,
3149 empty_size
, hint
, (u64
)-1, &ins
, 0);
3152 return ERR_PTR(ret
);
3155 buf
= btrfs_init_new_buffer(trans
, root
, ins
.objectid
,
3160 int btrfs_drop_leaf_ref(struct btrfs_trans_handle
*trans
,
3161 struct btrfs_root
*root
, struct extent_buffer
*leaf
)
3164 u64 leaf_generation
;
3165 struct refsort
*sorted
;
3166 struct btrfs_key key
;
3167 struct btrfs_file_extent_item
*fi
;
3174 BUG_ON(!btrfs_is_leaf(leaf
));
3175 nritems
= btrfs_header_nritems(leaf
);
3176 leaf_owner
= btrfs_header_owner(leaf
);
3177 leaf_generation
= btrfs_header_generation(leaf
);
3179 sorted
= kmalloc(sizeof(*sorted
) * nritems
, GFP_NOFS
);
3180 /* we do this loop twice. The first time we build a list
3181 * of the extents we have a reference on, then we sort the list
3182 * by bytenr. The second time around we actually do the
3185 for (i
= 0; i
< nritems
; i
++) {
3189 btrfs_item_key_to_cpu(leaf
, &key
, i
);
3191 /* only extents have references, skip everything else */
3192 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
3195 fi
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
3197 /* inline extents live in the btree, they don't have refs */
3198 if (btrfs_file_extent_type(leaf
, fi
) ==
3199 BTRFS_FILE_EXTENT_INLINE
)
3202 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
3204 /* holes don't have refs */
3205 if (disk_bytenr
== 0)
3208 sorted
[refi
].bytenr
= disk_bytenr
;
3209 sorted
[refi
].slot
= i
;
3216 sort(sorted
, refi
, sizeof(struct refsort
), refsort_cmp
, NULL
);
3218 for (i
= 0; i
< refi
; i
++) {
3221 disk_bytenr
= sorted
[i
].bytenr
;
3222 slot
= sorted
[i
].slot
;
3226 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
3227 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
3230 fi
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
3232 ret
= btrfs_free_extent(trans
, root
, disk_bytenr
,
3233 btrfs_file_extent_disk_num_bytes(leaf
, fi
),
3234 leaf
->start
, leaf_owner
, leaf_generation
,
3238 atomic_inc(&root
->fs_info
->throttle_gen
);
3239 wake_up(&root
->fs_info
->transaction_throttle
);
3247 static noinline
int cache_drop_leaf_ref(struct btrfs_trans_handle
*trans
,
3248 struct btrfs_root
*root
,
3249 struct btrfs_leaf_ref
*ref
)
3253 struct btrfs_extent_info
*info
;
3254 struct refsort
*sorted
;
3256 if (ref
->nritems
== 0)
3259 sorted
= kmalloc(sizeof(*sorted
) * ref
->nritems
, GFP_NOFS
);
3260 for (i
= 0; i
< ref
->nritems
; i
++) {
3261 sorted
[i
].bytenr
= ref
->extents
[i
].bytenr
;
3264 sort(sorted
, ref
->nritems
, sizeof(struct refsort
), refsort_cmp
, NULL
);
3267 * the items in the ref were sorted when the ref was inserted
3268 * into the ref cache, so this is already in order
3270 for (i
= 0; i
< ref
->nritems
; i
++) {
3271 info
= ref
->extents
+ sorted
[i
].slot
;
3272 ret
= btrfs_free_extent(trans
, root
, info
->bytenr
,
3273 info
->num_bytes
, ref
->bytenr
,
3274 ref
->owner
, ref
->generation
,
3277 atomic_inc(&root
->fs_info
->throttle_gen
);
3278 wake_up(&root
->fs_info
->transaction_throttle
);
3289 static int drop_snap_lookup_refcount(struct btrfs_trans_handle
*trans
,
3290 struct btrfs_root
*root
, u64 start
,
3295 ret
= btrfs_lookup_extent_ref(trans
, root
, start
, len
, refs
);
3298 #if 0 /* some debugging code in case we see problems here */
3299 /* if the refs count is one, it won't get increased again. But
3300 * if the ref count is > 1, someone may be decreasing it at
3301 * the same time we are.
3304 struct extent_buffer
*eb
= NULL
;
3305 eb
= btrfs_find_create_tree_block(root
, start
, len
);
3307 btrfs_tree_lock(eb
);
3309 mutex_lock(&root
->fs_info
->alloc_mutex
);
3310 ret
= lookup_extent_ref(NULL
, root
, start
, len
, refs
);
3312 mutex_unlock(&root
->fs_info
->alloc_mutex
);
3315 btrfs_tree_unlock(eb
);
3316 free_extent_buffer(eb
);
3319 printk(KERN_ERR
"btrfs block %llu went down to one "
3320 "during drop_snap\n", (unsigned long long)start
);
3331 * this is used while deleting old snapshots, and it drops the refs
3332 * on a whole subtree starting from a level 1 node.
3334 * The idea is to sort all the leaf pointers, and then drop the
3335 * ref on all the leaves in order. Most of the time the leaves
3336 * will have ref cache entries, so no leaf IOs will be required to
3337 * find the extents they have references on.
3339 * For each leaf, any references it has are also dropped in order
3341 * This ends up dropping the references in something close to optimal
3342 * order for reading and modifying the extent allocation tree.
3344 static noinline
int drop_level_one_refs(struct btrfs_trans_handle
*trans
,
3345 struct btrfs_root
*root
,
3346 struct btrfs_path
*path
)
3351 struct extent_buffer
*eb
= path
->nodes
[1];
3352 struct extent_buffer
*leaf
;
3353 struct btrfs_leaf_ref
*ref
;
3354 struct refsort
*sorted
= NULL
;
3355 int nritems
= btrfs_header_nritems(eb
);
3359 int slot
= path
->slots
[1];
3360 u32 blocksize
= btrfs_level_size(root
, 0);
3366 root_owner
= btrfs_header_owner(eb
);
3367 root_gen
= btrfs_header_generation(eb
);
3368 sorted
= kmalloc(sizeof(*sorted
) * nritems
, GFP_NOFS
);
3371 * step one, sort all the leaf pointers so we don't scribble
3372 * randomly into the extent allocation tree
3374 for (i
= slot
; i
< nritems
; i
++) {
3375 sorted
[refi
].bytenr
= btrfs_node_blockptr(eb
, i
);
3376 sorted
[refi
].slot
= i
;
3381 * nritems won't be zero, but if we're picking up drop_snapshot
3382 * after a crash, slot might be > 0, so double check things
3388 sort(sorted
, refi
, sizeof(struct refsort
), refsort_cmp
, NULL
);
3391 * the first loop frees everything the leaves point to
3393 for (i
= 0; i
< refi
; i
++) {
3396 bytenr
= sorted
[i
].bytenr
;
3399 * check the reference count on this leaf. If it is > 1
3400 * we just decrement it below and don't update any
3401 * of the refs the leaf points to.
3403 ret
= drop_snap_lookup_refcount(trans
, root
, bytenr
,
3409 ptr_gen
= btrfs_node_ptr_generation(eb
, sorted
[i
].slot
);
3412 * the leaf only had one reference, which means the
3413 * only thing pointing to this leaf is the snapshot
3414 * we're deleting. It isn't possible for the reference
3415 * count to increase again later
3417 * The reference cache is checked for the leaf,
3418 * and if found we'll be able to drop any refs held by
3419 * the leaf without needing to read it in.
3421 ref
= btrfs_lookup_leaf_ref(root
, bytenr
);
3422 if (ref
&& ref
->generation
!= ptr_gen
) {
3423 btrfs_free_leaf_ref(root
, ref
);
3427 ret
= cache_drop_leaf_ref(trans
, root
, ref
);
3429 btrfs_remove_leaf_ref(root
, ref
);
3430 btrfs_free_leaf_ref(root
, ref
);
3433 * the leaf wasn't in the reference cache, so
3434 * we have to read it.
3436 leaf
= read_tree_block(root
, bytenr
, blocksize
,
3438 ret
= btrfs_drop_leaf_ref(trans
, root
, leaf
);
3440 free_extent_buffer(leaf
);
3442 atomic_inc(&root
->fs_info
->throttle_gen
);
3443 wake_up(&root
->fs_info
->transaction_throttle
);
3448 * run through the loop again to free the refs on the leaves.
3449 * This is faster than doing it in the loop above because
3450 * the leaves are likely to be clustered together. We end up
3451 * working in nice chunks on the extent allocation tree.
3453 for (i
= 0; i
< refi
; i
++) {
3454 bytenr
= sorted
[i
].bytenr
;
3455 ret
= btrfs_free_extent(trans
, root
, bytenr
,
3456 blocksize
, eb
->start
,
3457 root_owner
, root_gen
, 0, 1);
3460 atomic_inc(&root
->fs_info
->throttle_gen
);
3461 wake_up(&root
->fs_info
->transaction_throttle
);
3468 * update the path to show we've processed the entire level 1
3469 * node. This will get saved into the root's drop_snapshot_progress
3470 * field so these drops are not repeated again if this transaction
3473 path
->slots
[1] = nritems
;
3478 * helper function for drop_snapshot, this walks down the tree dropping ref
3479 * counts as it goes.
3481 static noinline
int walk_down_tree(struct btrfs_trans_handle
*trans
,
3482 struct btrfs_root
*root
,
3483 struct btrfs_path
*path
, int *level
)
3489 struct extent_buffer
*next
;
3490 struct extent_buffer
*cur
;
3491 struct extent_buffer
*parent
;
3496 WARN_ON(*level
< 0);
3497 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
3498 ret
= drop_snap_lookup_refcount(trans
, root
, path
->nodes
[*level
]->start
,
3499 path
->nodes
[*level
]->len
, &refs
);
3505 * walk down to the last node level and free all the leaves
3507 while (*level
>= 0) {
3508 WARN_ON(*level
< 0);
3509 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
3510 cur
= path
->nodes
[*level
];
3512 if (btrfs_header_level(cur
) != *level
)
3515 if (path
->slots
[*level
] >=
3516 btrfs_header_nritems(cur
))
3519 /* the new code goes down to level 1 and does all the
3520 * leaves pointed to that node in bulk. So, this check
3521 * for level 0 will always be false.
3523 * But, the disk format allows the drop_snapshot_progress
3524 * field in the root to leave things in a state where
3525 * a leaf will need cleaning up here. If someone crashes
3526 * with the old code and then boots with the new code,
3527 * we might find a leaf here.
3530 ret
= btrfs_drop_leaf_ref(trans
, root
, cur
);
3536 * once we get to level one, process the whole node
3537 * at once, including everything below it.
3540 ret
= drop_level_one_refs(trans
, root
, path
);
3545 bytenr
= btrfs_node_blockptr(cur
, path
->slots
[*level
]);
3546 ptr_gen
= btrfs_node_ptr_generation(cur
, path
->slots
[*level
]);
3547 blocksize
= btrfs_level_size(root
, *level
- 1);
3549 ret
= drop_snap_lookup_refcount(trans
, root
, bytenr
,
3554 * if there is more than one reference, we don't need
3555 * to read that node to drop any references it has. We
3556 * just drop the ref we hold on that node and move on to the
3557 * next slot in this level.
3560 parent
= path
->nodes
[*level
];
3561 root_owner
= btrfs_header_owner(parent
);
3562 root_gen
= btrfs_header_generation(parent
);
3563 path
->slots
[*level
]++;
3565 ret
= btrfs_free_extent(trans
, root
, bytenr
,
3566 blocksize
, parent
->start
,
3567 root_owner
, root_gen
,
3571 atomic_inc(&root
->fs_info
->throttle_gen
);
3572 wake_up(&root
->fs_info
->transaction_throttle
);
3579 * we need to keep freeing things in the next level down.
3580 * read the block and loop around to process it
3582 next
= read_tree_block(root
, bytenr
, blocksize
, ptr_gen
);
3583 WARN_ON(*level
<= 0);
3584 if (path
->nodes
[*level
-1])
3585 free_extent_buffer(path
->nodes
[*level
-1]);
3586 path
->nodes
[*level
-1] = next
;
3587 *level
= btrfs_header_level(next
);
3588 path
->slots
[*level
] = 0;
3592 WARN_ON(*level
< 0);
3593 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
3595 if (path
->nodes
[*level
] == root
->node
) {
3596 parent
= path
->nodes
[*level
];
3597 bytenr
= path
->nodes
[*level
]->start
;
3599 parent
= path
->nodes
[*level
+ 1];
3600 bytenr
= btrfs_node_blockptr(parent
, path
->slots
[*level
+ 1]);
3603 blocksize
= btrfs_level_size(root
, *level
);
3604 root_owner
= btrfs_header_owner(parent
);
3605 root_gen
= btrfs_header_generation(parent
);
3608 * cleanup and free the reference on the last node
3611 ret
= btrfs_free_extent(trans
, root
, bytenr
, blocksize
,
3612 parent
->start
, root_owner
, root_gen
,
3614 free_extent_buffer(path
->nodes
[*level
]);
3615 path
->nodes
[*level
] = NULL
;
3625 * helper function for drop_subtree, this function is similar to
3626 * walk_down_tree. The main difference is that it checks reference
3627 * counts while tree blocks are locked.
3629 static noinline
int walk_down_subtree(struct btrfs_trans_handle
*trans
,
3630 struct btrfs_root
*root
,
3631 struct btrfs_path
*path
, int *level
)
3633 struct extent_buffer
*next
;
3634 struct extent_buffer
*cur
;
3635 struct extent_buffer
*parent
;
3642 cur
= path
->nodes
[*level
];
3643 ret
= btrfs_lookup_extent_ref(trans
, root
, cur
->start
, cur
->len
,
3649 while (*level
>= 0) {
3650 cur
= path
->nodes
[*level
];
3652 ret
= btrfs_drop_leaf_ref(trans
, root
, cur
);
3654 clean_tree_block(trans
, root
, cur
);
3657 if (path
->slots
[*level
] >= btrfs_header_nritems(cur
)) {
3658 clean_tree_block(trans
, root
, cur
);
3662 bytenr
= btrfs_node_blockptr(cur
, path
->slots
[*level
]);
3663 blocksize
= btrfs_level_size(root
, *level
- 1);
3664 ptr_gen
= btrfs_node_ptr_generation(cur
, path
->slots
[*level
]);
3666 next
= read_tree_block(root
, bytenr
, blocksize
, ptr_gen
);
3667 btrfs_tree_lock(next
);
3668 btrfs_set_lock_blocking(next
);
3670 ret
= btrfs_lookup_extent_ref(trans
, root
, bytenr
, blocksize
,
3674 parent
= path
->nodes
[*level
];
3675 ret
= btrfs_free_extent(trans
, root
, bytenr
,
3676 blocksize
, parent
->start
,
3677 btrfs_header_owner(parent
),
3678 btrfs_header_generation(parent
),
3681 path
->slots
[*level
]++;
3682 btrfs_tree_unlock(next
);
3683 free_extent_buffer(next
);
3687 *level
= btrfs_header_level(next
);
3688 path
->nodes
[*level
] = next
;
3689 path
->slots
[*level
] = 0;
3690 path
->locks
[*level
] = 1;
3694 parent
= path
->nodes
[*level
+ 1];
3695 bytenr
= path
->nodes
[*level
]->start
;
3696 blocksize
= path
->nodes
[*level
]->len
;
3698 ret
= btrfs_free_extent(trans
, root
, bytenr
, blocksize
,
3699 parent
->start
, btrfs_header_owner(parent
),
3700 btrfs_header_generation(parent
), *level
, 1);
3703 if (path
->locks
[*level
]) {
3704 btrfs_tree_unlock(path
->nodes
[*level
]);
3705 path
->locks
[*level
] = 0;
3707 free_extent_buffer(path
->nodes
[*level
]);
3708 path
->nodes
[*level
] = NULL
;
3715 * helper for dropping snapshots. This walks back up the tree in the path
3716 * to find the first node higher up where we haven't yet gone through
3719 static noinline
int walk_up_tree(struct btrfs_trans_handle
*trans
,
3720 struct btrfs_root
*root
,
3721 struct btrfs_path
*path
,
3722 int *level
, int max_level
)
3726 struct btrfs_root_item
*root_item
= &root
->root_item
;
3731 for (i
= *level
; i
< max_level
&& path
->nodes
[i
]; i
++) {
3732 slot
= path
->slots
[i
];
3733 if (slot
< btrfs_header_nritems(path
->nodes
[i
]) - 1) {
3734 struct extent_buffer
*node
;
3735 struct btrfs_disk_key disk_key
;
3738 * there is more work to do in this level.
3739 * Update the drop_progress marker to reflect
3740 * the work we've done so far, and then bump
3743 node
= path
->nodes
[i
];
3746 WARN_ON(*level
== 0);
3747 btrfs_node_key(node
, &disk_key
, path
->slots
[i
]);
3748 memcpy(&root_item
->drop_progress
,
3749 &disk_key
, sizeof(disk_key
));
3750 root_item
->drop_level
= i
;
3753 struct extent_buffer
*parent
;
3756 * this whole node is done, free our reference
3757 * on it and go up one level
3759 if (path
->nodes
[*level
] == root
->node
)
3760 parent
= path
->nodes
[*level
];
3762 parent
= path
->nodes
[*level
+ 1];
3764 root_owner
= btrfs_header_owner(parent
);
3765 root_gen
= btrfs_header_generation(parent
);
3767 clean_tree_block(trans
, root
, path
->nodes
[*level
]);
3768 ret
= btrfs_free_extent(trans
, root
,
3769 path
->nodes
[*level
]->start
,
3770 path
->nodes
[*level
]->len
,
3771 parent
->start
, root_owner
,
3772 root_gen
, *level
, 1);
3774 if (path
->locks
[*level
]) {
3775 btrfs_tree_unlock(path
->nodes
[*level
]);
3776 path
->locks
[*level
] = 0;
3778 free_extent_buffer(path
->nodes
[*level
]);
3779 path
->nodes
[*level
] = NULL
;
3787 * drop the reference count on the tree rooted at 'snap'. This traverses
3788 * the tree freeing any blocks that have a ref count of zero after being
3791 int btrfs_drop_snapshot(struct btrfs_trans_handle
*trans
, struct btrfs_root
3797 struct btrfs_path
*path
;
3801 struct btrfs_root_item
*root_item
= &root
->root_item
;
3803 WARN_ON(!mutex_is_locked(&root
->fs_info
->drop_mutex
));
3804 path
= btrfs_alloc_path();
3807 level
= btrfs_header_level(root
->node
);
3809 if (btrfs_disk_key_objectid(&root_item
->drop_progress
) == 0) {
3810 path
->nodes
[level
] = root
->node
;
3811 extent_buffer_get(root
->node
);
3812 path
->slots
[level
] = 0;
3814 struct btrfs_key key
;
3815 struct btrfs_disk_key found_key
;
3816 struct extent_buffer
*node
;
3818 btrfs_disk_key_to_cpu(&key
, &root_item
->drop_progress
);
3819 level
= root_item
->drop_level
;
3820 path
->lowest_level
= level
;
3821 wret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3826 node
= path
->nodes
[level
];
3827 btrfs_node_key(node
, &found_key
, path
->slots
[level
]);
3828 WARN_ON(memcmp(&found_key
, &root_item
->drop_progress
,
3829 sizeof(found_key
)));
3831 * unlock our path, this is safe because only this
3832 * function is allowed to delete this snapshot
3834 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
3835 if (path
->nodes
[i
] && path
->locks
[i
]) {
3837 btrfs_tree_unlock(path
->nodes
[i
]);
3842 unsigned long update
;
3843 wret
= walk_down_tree(trans
, root
, path
, &level
);
3849 wret
= walk_up_tree(trans
, root
, path
, &level
,
3855 if (trans
->transaction
->in_commit
||
3856 trans
->transaction
->delayed_refs
.flushing
) {
3860 atomic_inc(&root
->fs_info
->throttle_gen
);
3861 wake_up(&root
->fs_info
->transaction_throttle
);
3862 for (update_count
= 0; update_count
< 16; update_count
++) {
3863 update
= trans
->delayed_ref_updates
;
3864 trans
->delayed_ref_updates
= 0;
3866 btrfs_run_delayed_refs(trans
, root
, update
);
3871 for (i
= 0; i
<= orig_level
; i
++) {
3872 if (path
->nodes
[i
]) {
3873 free_extent_buffer(path
->nodes
[i
]);
3874 path
->nodes
[i
] = NULL
;
3878 btrfs_free_path(path
);
3882 int btrfs_drop_subtree(struct btrfs_trans_handle
*trans
,
3883 struct btrfs_root
*root
,
3884 struct extent_buffer
*node
,
3885 struct extent_buffer
*parent
)
3887 struct btrfs_path
*path
;
3893 path
= btrfs_alloc_path();
3896 btrfs_assert_tree_locked(parent
);
3897 parent_level
= btrfs_header_level(parent
);
3898 extent_buffer_get(parent
);
3899 path
->nodes
[parent_level
] = parent
;
3900 path
->slots
[parent_level
] = btrfs_header_nritems(parent
);
3902 btrfs_assert_tree_locked(node
);
3903 level
= btrfs_header_level(node
);
3904 extent_buffer_get(node
);
3905 path
->nodes
[level
] = node
;
3906 path
->slots
[level
] = 0;
3909 wret
= walk_down_subtree(trans
, root
, path
, &level
);
3915 wret
= walk_up_tree(trans
, root
, path
, &level
, parent_level
);
3922 btrfs_free_path(path
);
3926 static unsigned long calc_ra(unsigned long start
, unsigned long last
,
3929 return min(last
, start
+ nr
- 1);
3932 static noinline
int relocate_inode_pages(struct inode
*inode
, u64 start
,
3937 unsigned long first_index
;
3938 unsigned long last_index
;
3941 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
3942 struct file_ra_state
*ra
;
3943 struct btrfs_ordered_extent
*ordered
;
3944 unsigned int total_read
= 0;
3945 unsigned int total_dirty
= 0;
3948 ra
= kzalloc(sizeof(*ra
), GFP_NOFS
);
3950 mutex_lock(&inode
->i_mutex
);
3951 first_index
= start
>> PAGE_CACHE_SHIFT
;
3952 last_index
= (start
+ len
- 1) >> PAGE_CACHE_SHIFT
;
3954 /* make sure the dirty trick played by the caller work */
3955 ret
= invalidate_inode_pages2_range(inode
->i_mapping
,
3956 first_index
, last_index
);
3960 file_ra_state_init(ra
, inode
->i_mapping
);
3962 for (i
= first_index
; i
<= last_index
; i
++) {
3963 if (total_read
% ra
->ra_pages
== 0) {
3964 btrfs_force_ra(inode
->i_mapping
, ra
, NULL
, i
,
3965 calc_ra(i
, last_index
, ra
->ra_pages
));
3969 if (((u64
)i
<< PAGE_CACHE_SHIFT
) > i_size_read(inode
))
3971 page
= grab_cache_page(inode
->i_mapping
, i
);
3976 if (!PageUptodate(page
)) {
3977 btrfs_readpage(NULL
, page
);
3979 if (!PageUptodate(page
)) {
3981 page_cache_release(page
);
3986 wait_on_page_writeback(page
);
3988 page_start
= (u64
)page
->index
<< PAGE_CACHE_SHIFT
;
3989 page_end
= page_start
+ PAGE_CACHE_SIZE
- 1;
3990 lock_extent(io_tree
, page_start
, page_end
, GFP_NOFS
);
3992 ordered
= btrfs_lookup_ordered_extent(inode
, page_start
);
3994 unlock_extent(io_tree
, page_start
, page_end
, GFP_NOFS
);
3996 page_cache_release(page
);
3997 btrfs_start_ordered_extent(inode
, ordered
, 1);
3998 btrfs_put_ordered_extent(ordered
);
4001 set_page_extent_mapped(page
);
4003 if (i
== first_index
)
4004 set_extent_bits(io_tree
, page_start
, page_end
,
4005 EXTENT_BOUNDARY
, GFP_NOFS
);
4006 btrfs_set_extent_delalloc(inode
, page_start
, page_end
);
4008 set_page_dirty(page
);
4011 unlock_extent(io_tree
, page_start
, page_end
, GFP_NOFS
);
4013 page_cache_release(page
);
4018 mutex_unlock(&inode
->i_mutex
);
4019 balance_dirty_pages_ratelimited_nr(inode
->i_mapping
, total_dirty
);
4023 static noinline
int relocate_data_extent(struct inode
*reloc_inode
,
4024 struct btrfs_key
*extent_key
,
4027 struct btrfs_root
*root
= BTRFS_I(reloc_inode
)->root
;
4028 struct extent_map_tree
*em_tree
= &BTRFS_I(reloc_inode
)->extent_tree
;
4029 struct extent_map
*em
;
4030 u64 start
= extent_key
->objectid
- offset
;
4031 u64 end
= start
+ extent_key
->offset
- 1;
4033 em
= alloc_extent_map(GFP_NOFS
);
4034 BUG_ON(!em
|| IS_ERR(em
));
4037 em
->len
= extent_key
->offset
;
4038 em
->block_len
= extent_key
->offset
;
4039 em
->block_start
= extent_key
->objectid
;
4040 em
->bdev
= root
->fs_info
->fs_devices
->latest_bdev
;
4041 set_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
4043 /* setup extent map to cheat btrfs_readpage */
4044 lock_extent(&BTRFS_I(reloc_inode
)->io_tree
, start
, end
, GFP_NOFS
);
4047 spin_lock(&em_tree
->lock
);
4048 ret
= add_extent_mapping(em_tree
, em
);
4049 spin_unlock(&em_tree
->lock
);
4050 if (ret
!= -EEXIST
) {
4051 free_extent_map(em
);
4054 btrfs_drop_extent_cache(reloc_inode
, start
, end
, 0);
4056 unlock_extent(&BTRFS_I(reloc_inode
)->io_tree
, start
, end
, GFP_NOFS
);
4058 return relocate_inode_pages(reloc_inode
, start
, extent_key
->offset
);
4061 struct btrfs_ref_path
{
4063 u64 nodes
[BTRFS_MAX_LEVEL
];
4065 u64 root_generation
;
4072 struct btrfs_key node_keys
[BTRFS_MAX_LEVEL
];
4073 u64 new_nodes
[BTRFS_MAX_LEVEL
];
4076 struct disk_extent
{
4087 static int is_cowonly_root(u64 root_objectid
)
4089 if (root_objectid
== BTRFS_ROOT_TREE_OBJECTID
||
4090 root_objectid
== BTRFS_EXTENT_TREE_OBJECTID
||
4091 root_objectid
== BTRFS_CHUNK_TREE_OBJECTID
||
4092 root_objectid
== BTRFS_DEV_TREE_OBJECTID
||
4093 root_objectid
== BTRFS_TREE_LOG_OBJECTID
||
4094 root_objectid
== BTRFS_CSUM_TREE_OBJECTID
)
4099 static noinline
int __next_ref_path(struct btrfs_trans_handle
*trans
,
4100 struct btrfs_root
*extent_root
,
4101 struct btrfs_ref_path
*ref_path
,
4104 struct extent_buffer
*leaf
;
4105 struct btrfs_path
*path
;
4106 struct btrfs_extent_ref
*ref
;
4107 struct btrfs_key key
;
4108 struct btrfs_key found_key
;
4114 path
= btrfs_alloc_path();
4119 ref_path
->lowest_level
= -1;
4120 ref_path
->current_level
= -1;
4121 ref_path
->shared_level
= -1;
4125 level
= ref_path
->current_level
- 1;
4126 while (level
>= -1) {
4128 if (level
< ref_path
->lowest_level
)
4132 bytenr
= ref_path
->nodes
[level
];
4134 bytenr
= ref_path
->extent_start
;
4135 BUG_ON(bytenr
== 0);
4137 parent
= ref_path
->nodes
[level
+ 1];
4138 ref_path
->nodes
[level
+ 1] = 0;
4139 ref_path
->current_level
= level
;
4140 BUG_ON(parent
== 0);
4142 key
.objectid
= bytenr
;
4143 key
.offset
= parent
+ 1;
4144 key
.type
= BTRFS_EXTENT_REF_KEY
;
4146 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
, 0, 0);
4151 leaf
= path
->nodes
[0];
4152 nritems
= btrfs_header_nritems(leaf
);
4153 if (path
->slots
[0] >= nritems
) {
4154 ret
= btrfs_next_leaf(extent_root
, path
);
4159 leaf
= path
->nodes
[0];
4162 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
4163 if (found_key
.objectid
== bytenr
&&
4164 found_key
.type
== BTRFS_EXTENT_REF_KEY
) {
4165 if (level
< ref_path
->shared_level
)
4166 ref_path
->shared_level
= level
;
4171 btrfs_release_path(extent_root
, path
);
4174 /* reached lowest level */
4178 level
= ref_path
->current_level
;
4179 while (level
< BTRFS_MAX_LEVEL
- 1) {
4183 bytenr
= ref_path
->nodes
[level
];
4185 bytenr
= ref_path
->extent_start
;
4187 BUG_ON(bytenr
== 0);
4189 key
.objectid
= bytenr
;
4191 key
.type
= BTRFS_EXTENT_REF_KEY
;
4193 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
, 0, 0);
4197 leaf
= path
->nodes
[0];
4198 nritems
= btrfs_header_nritems(leaf
);
4199 if (path
->slots
[0] >= nritems
) {
4200 ret
= btrfs_next_leaf(extent_root
, path
);
4204 /* the extent was freed by someone */
4205 if (ref_path
->lowest_level
== level
)
4207 btrfs_release_path(extent_root
, path
);
4210 leaf
= path
->nodes
[0];
4213 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
4214 if (found_key
.objectid
!= bytenr
||
4215 found_key
.type
!= BTRFS_EXTENT_REF_KEY
) {
4216 /* the extent was freed by someone */
4217 if (ref_path
->lowest_level
== level
) {
4221 btrfs_release_path(extent_root
, path
);
4225 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
4226 struct btrfs_extent_ref
);
4227 ref_objectid
= btrfs_ref_objectid(leaf
, ref
);
4228 if (ref_objectid
< BTRFS_FIRST_FREE_OBJECTID
) {
4230 level
= (int)ref_objectid
;
4231 BUG_ON(level
>= BTRFS_MAX_LEVEL
);
4232 ref_path
->lowest_level
= level
;
4233 ref_path
->current_level
= level
;
4234 ref_path
->nodes
[level
] = bytenr
;
4236 WARN_ON(ref_objectid
!= level
);
4239 WARN_ON(level
!= -1);
4243 if (ref_path
->lowest_level
== level
) {
4244 ref_path
->owner_objectid
= ref_objectid
;
4245 ref_path
->num_refs
= btrfs_ref_num_refs(leaf
, ref
);
4249 * the block is tree root or the block isn't in reference
4252 if (found_key
.objectid
== found_key
.offset
||
4253 is_cowonly_root(btrfs_ref_root(leaf
, ref
))) {
4254 ref_path
->root_objectid
= btrfs_ref_root(leaf
, ref
);
4255 ref_path
->root_generation
=
4256 btrfs_ref_generation(leaf
, ref
);
4258 /* special reference from the tree log */
4259 ref_path
->nodes
[0] = found_key
.offset
;
4260 ref_path
->current_level
= 0;
4267 BUG_ON(ref_path
->nodes
[level
] != 0);
4268 ref_path
->nodes
[level
] = found_key
.offset
;
4269 ref_path
->current_level
= level
;
4272 * the reference was created in the running transaction,
4273 * no need to continue walking up.
4275 if (btrfs_ref_generation(leaf
, ref
) == trans
->transid
) {
4276 ref_path
->root_objectid
= btrfs_ref_root(leaf
, ref
);
4277 ref_path
->root_generation
=
4278 btrfs_ref_generation(leaf
, ref
);
4283 btrfs_release_path(extent_root
, path
);
4286 /* reached max tree level, but no tree root found. */
4289 btrfs_free_path(path
);
4293 static int btrfs_first_ref_path(struct btrfs_trans_handle
*trans
,
4294 struct btrfs_root
*extent_root
,
4295 struct btrfs_ref_path
*ref_path
,
4298 memset(ref_path
, 0, sizeof(*ref_path
));
4299 ref_path
->extent_start
= extent_start
;
4301 return __next_ref_path(trans
, extent_root
, ref_path
, 1);
4304 static int btrfs_next_ref_path(struct btrfs_trans_handle
*trans
,
4305 struct btrfs_root
*extent_root
,
4306 struct btrfs_ref_path
*ref_path
)
4308 return __next_ref_path(trans
, extent_root
, ref_path
, 0);
4311 static noinline
int get_new_locations(struct inode
*reloc_inode
,
4312 struct btrfs_key
*extent_key
,
4313 u64 offset
, int no_fragment
,
4314 struct disk_extent
**extents
,
4317 struct btrfs_root
*root
= BTRFS_I(reloc_inode
)->root
;
4318 struct btrfs_path
*path
;
4319 struct btrfs_file_extent_item
*fi
;
4320 struct extent_buffer
*leaf
;
4321 struct disk_extent
*exts
= *extents
;
4322 struct btrfs_key found_key
;
4327 int max
= *nr_extents
;
4330 WARN_ON(!no_fragment
&& *extents
);
4333 exts
= kmalloc(sizeof(*exts
) * max
, GFP_NOFS
);
4338 path
= btrfs_alloc_path();
4341 cur_pos
= extent_key
->objectid
- offset
;
4342 last_byte
= extent_key
->objectid
+ extent_key
->offset
;
4343 ret
= btrfs_lookup_file_extent(NULL
, root
, path
, reloc_inode
->i_ino
,
4353 leaf
= path
->nodes
[0];
4354 nritems
= btrfs_header_nritems(leaf
);
4355 if (path
->slots
[0] >= nritems
) {
4356 ret
= btrfs_next_leaf(root
, path
);
4361 leaf
= path
->nodes
[0];
4364 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
4365 if (found_key
.offset
!= cur_pos
||
4366 found_key
.type
!= BTRFS_EXTENT_DATA_KEY
||
4367 found_key
.objectid
!= reloc_inode
->i_ino
)
4370 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4371 struct btrfs_file_extent_item
);
4372 if (btrfs_file_extent_type(leaf
, fi
) !=
4373 BTRFS_FILE_EXTENT_REG
||
4374 btrfs_file_extent_disk_bytenr(leaf
, fi
) == 0)
4378 struct disk_extent
*old
= exts
;
4380 exts
= kzalloc(sizeof(*exts
) * max
, GFP_NOFS
);
4381 memcpy(exts
, old
, sizeof(*exts
) * nr
);
4382 if (old
!= *extents
)
4386 exts
[nr
].disk_bytenr
=
4387 btrfs_file_extent_disk_bytenr(leaf
, fi
);
4388 exts
[nr
].disk_num_bytes
=
4389 btrfs_file_extent_disk_num_bytes(leaf
, fi
);
4390 exts
[nr
].offset
= btrfs_file_extent_offset(leaf
, fi
);
4391 exts
[nr
].num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
);
4392 exts
[nr
].ram_bytes
= btrfs_file_extent_ram_bytes(leaf
, fi
);
4393 exts
[nr
].compression
= btrfs_file_extent_compression(leaf
, fi
);
4394 exts
[nr
].encryption
= btrfs_file_extent_encryption(leaf
, fi
);
4395 exts
[nr
].other_encoding
= btrfs_file_extent_other_encoding(leaf
,
4397 BUG_ON(exts
[nr
].offset
> 0);
4398 BUG_ON(exts
[nr
].compression
|| exts
[nr
].encryption
);
4399 BUG_ON(exts
[nr
].num_bytes
!= exts
[nr
].disk_num_bytes
);
4401 cur_pos
+= exts
[nr
].num_bytes
;
4404 if (cur_pos
+ offset
>= last_byte
)
4414 BUG_ON(cur_pos
+ offset
> last_byte
);
4415 if (cur_pos
+ offset
< last_byte
) {
4421 btrfs_free_path(path
);
4423 if (exts
!= *extents
)
4432 static noinline
int replace_one_extent(struct btrfs_trans_handle
*trans
,
4433 struct btrfs_root
*root
,
4434 struct btrfs_path
*path
,
4435 struct btrfs_key
*extent_key
,
4436 struct btrfs_key
*leaf_key
,
4437 struct btrfs_ref_path
*ref_path
,
4438 struct disk_extent
*new_extents
,
4441 struct extent_buffer
*leaf
;
4442 struct btrfs_file_extent_item
*fi
;
4443 struct inode
*inode
= NULL
;
4444 struct btrfs_key key
;
4449 u64 search_end
= (u64
)-1;
4452 int extent_locked
= 0;
4456 memcpy(&key
, leaf_key
, sizeof(key
));
4457 if (ref_path
->owner_objectid
!= BTRFS_MULTIPLE_OBJECTIDS
) {
4458 if (key
.objectid
< ref_path
->owner_objectid
||
4459 (key
.objectid
== ref_path
->owner_objectid
&&
4460 key
.type
< BTRFS_EXTENT_DATA_KEY
)) {
4461 key
.objectid
= ref_path
->owner_objectid
;
4462 key
.type
= BTRFS_EXTENT_DATA_KEY
;
4468 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
4472 leaf
= path
->nodes
[0];
4473 nritems
= btrfs_header_nritems(leaf
);
4475 if (extent_locked
&& ret
> 0) {
4477 * the file extent item was modified by someone
4478 * before the extent got locked.
4480 unlock_extent(&BTRFS_I(inode
)->io_tree
, lock_start
,
4481 lock_end
, GFP_NOFS
);
4485 if (path
->slots
[0] >= nritems
) {
4486 if (++nr_scaned
> 2)
4489 BUG_ON(extent_locked
);
4490 ret
= btrfs_next_leaf(root
, path
);
4495 leaf
= path
->nodes
[0];
4496 nritems
= btrfs_header_nritems(leaf
);
4499 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
4501 if (ref_path
->owner_objectid
!= BTRFS_MULTIPLE_OBJECTIDS
) {
4502 if ((key
.objectid
> ref_path
->owner_objectid
) ||
4503 (key
.objectid
== ref_path
->owner_objectid
&&
4504 key
.type
> BTRFS_EXTENT_DATA_KEY
) ||
4505 key
.offset
>= search_end
)
4509 if (inode
&& key
.objectid
!= inode
->i_ino
) {
4510 BUG_ON(extent_locked
);
4511 btrfs_release_path(root
, path
);
4512 mutex_unlock(&inode
->i_mutex
);
4518 if (key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
4523 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4524 struct btrfs_file_extent_item
);
4525 extent_type
= btrfs_file_extent_type(leaf
, fi
);
4526 if ((extent_type
!= BTRFS_FILE_EXTENT_REG
&&
4527 extent_type
!= BTRFS_FILE_EXTENT_PREALLOC
) ||
4528 (btrfs_file_extent_disk_bytenr(leaf
, fi
) !=
4529 extent_key
->objectid
)) {
4535 num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
);
4536 ext_offset
= btrfs_file_extent_offset(leaf
, fi
);
4538 if (search_end
== (u64
)-1) {
4539 search_end
= key
.offset
- ext_offset
+
4540 btrfs_file_extent_ram_bytes(leaf
, fi
);
4543 if (!extent_locked
) {
4544 lock_start
= key
.offset
;
4545 lock_end
= lock_start
+ num_bytes
- 1;
4547 if (lock_start
> key
.offset
||
4548 lock_end
+ 1 < key
.offset
+ num_bytes
) {
4549 unlock_extent(&BTRFS_I(inode
)->io_tree
,
4550 lock_start
, lock_end
, GFP_NOFS
);
4556 btrfs_release_path(root
, path
);
4558 inode
= btrfs_iget_locked(root
->fs_info
->sb
,
4559 key
.objectid
, root
);
4560 if (inode
->i_state
& I_NEW
) {
4561 BTRFS_I(inode
)->root
= root
;
4562 BTRFS_I(inode
)->location
.objectid
=
4564 BTRFS_I(inode
)->location
.type
=
4565 BTRFS_INODE_ITEM_KEY
;
4566 BTRFS_I(inode
)->location
.offset
= 0;
4567 btrfs_read_locked_inode(inode
);
4568 unlock_new_inode(inode
);
4571 * some code call btrfs_commit_transaction while
4572 * holding the i_mutex, so we can't use mutex_lock
4575 if (is_bad_inode(inode
) ||
4576 !mutex_trylock(&inode
->i_mutex
)) {
4579 key
.offset
= (u64
)-1;
4584 if (!extent_locked
) {
4585 struct btrfs_ordered_extent
*ordered
;
4587 btrfs_release_path(root
, path
);
4589 lock_extent(&BTRFS_I(inode
)->io_tree
, lock_start
,
4590 lock_end
, GFP_NOFS
);
4591 ordered
= btrfs_lookup_first_ordered_extent(inode
,
4594 ordered
->file_offset
<= lock_end
&&
4595 ordered
->file_offset
+ ordered
->len
> lock_start
) {
4596 unlock_extent(&BTRFS_I(inode
)->io_tree
,
4597 lock_start
, lock_end
, GFP_NOFS
);
4598 btrfs_start_ordered_extent(inode
, ordered
, 1);
4599 btrfs_put_ordered_extent(ordered
);
4600 key
.offset
+= num_bytes
;
4604 btrfs_put_ordered_extent(ordered
);
4610 if (nr_extents
== 1) {
4611 /* update extent pointer in place */
4612 btrfs_set_file_extent_disk_bytenr(leaf
, fi
,
4613 new_extents
[0].disk_bytenr
);
4614 btrfs_set_file_extent_disk_num_bytes(leaf
, fi
,
4615 new_extents
[0].disk_num_bytes
);
4616 btrfs_mark_buffer_dirty(leaf
);
4618 btrfs_drop_extent_cache(inode
, key
.offset
,
4619 key
.offset
+ num_bytes
- 1, 0);
4621 ret
= btrfs_inc_extent_ref(trans
, root
,
4622 new_extents
[0].disk_bytenr
,
4623 new_extents
[0].disk_num_bytes
,
4625 root
->root_key
.objectid
,
4630 ret
= btrfs_free_extent(trans
, root
,
4631 extent_key
->objectid
,
4634 btrfs_header_owner(leaf
),
4635 btrfs_header_generation(leaf
),
4639 btrfs_release_path(root
, path
);
4640 key
.offset
+= num_bytes
;
4648 * drop old extent pointer at first, then insert the
4649 * new pointers one bye one
4651 btrfs_release_path(root
, path
);
4652 ret
= btrfs_drop_extents(trans
, root
, inode
, key
.offset
,
4653 key
.offset
+ num_bytes
,
4654 key
.offset
, &alloc_hint
);
4657 for (i
= 0; i
< nr_extents
; i
++) {
4658 if (ext_offset
>= new_extents
[i
].num_bytes
) {
4659 ext_offset
-= new_extents
[i
].num_bytes
;
4662 extent_len
= min(new_extents
[i
].num_bytes
-
4663 ext_offset
, num_bytes
);
4665 ret
= btrfs_insert_empty_item(trans
, root
,
4670 leaf
= path
->nodes
[0];
4671 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4672 struct btrfs_file_extent_item
);
4673 btrfs_set_file_extent_generation(leaf
, fi
,
4675 btrfs_set_file_extent_type(leaf
, fi
,
4676 BTRFS_FILE_EXTENT_REG
);
4677 btrfs_set_file_extent_disk_bytenr(leaf
, fi
,
4678 new_extents
[i
].disk_bytenr
);
4679 btrfs_set_file_extent_disk_num_bytes(leaf
, fi
,
4680 new_extents
[i
].disk_num_bytes
);
4681 btrfs_set_file_extent_ram_bytes(leaf
, fi
,
4682 new_extents
[i
].ram_bytes
);
4684 btrfs_set_file_extent_compression(leaf
, fi
,
4685 new_extents
[i
].compression
);
4686 btrfs_set_file_extent_encryption(leaf
, fi
,
4687 new_extents
[i
].encryption
);
4688 btrfs_set_file_extent_other_encoding(leaf
, fi
,
4689 new_extents
[i
].other_encoding
);
4691 btrfs_set_file_extent_num_bytes(leaf
, fi
,
4693 ext_offset
+= new_extents
[i
].offset
;
4694 btrfs_set_file_extent_offset(leaf
, fi
,
4696 btrfs_mark_buffer_dirty(leaf
);
4698 btrfs_drop_extent_cache(inode
, key
.offset
,
4699 key
.offset
+ extent_len
- 1, 0);
4701 ret
= btrfs_inc_extent_ref(trans
, root
,
4702 new_extents
[i
].disk_bytenr
,
4703 new_extents
[i
].disk_num_bytes
,
4705 root
->root_key
.objectid
,
4706 trans
->transid
, key
.objectid
);
4708 btrfs_release_path(root
, path
);
4710 inode_add_bytes(inode
, extent_len
);
4713 num_bytes
-= extent_len
;
4714 key
.offset
+= extent_len
;
4719 BUG_ON(i
>= nr_extents
);
4723 if (extent_locked
) {
4724 unlock_extent(&BTRFS_I(inode
)->io_tree
, lock_start
,
4725 lock_end
, GFP_NOFS
);
4729 if (ref_path
->owner_objectid
!= BTRFS_MULTIPLE_OBJECTIDS
&&
4730 key
.offset
>= search_end
)
4737 btrfs_release_path(root
, path
);
4739 mutex_unlock(&inode
->i_mutex
);
4740 if (extent_locked
) {
4741 unlock_extent(&BTRFS_I(inode
)->io_tree
, lock_start
,
4742 lock_end
, GFP_NOFS
);
4749 int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle
*trans
,
4750 struct btrfs_root
*root
,
4751 struct extent_buffer
*buf
, u64 orig_start
)
4756 BUG_ON(btrfs_header_generation(buf
) != trans
->transid
);
4757 BUG_ON(root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
);
4759 level
= btrfs_header_level(buf
);
4761 struct btrfs_leaf_ref
*ref
;
4762 struct btrfs_leaf_ref
*orig_ref
;
4764 orig_ref
= btrfs_lookup_leaf_ref(root
, orig_start
);
4768 ref
= btrfs_alloc_leaf_ref(root
, orig_ref
->nritems
);
4770 btrfs_free_leaf_ref(root
, orig_ref
);
4774 ref
->nritems
= orig_ref
->nritems
;
4775 memcpy(ref
->extents
, orig_ref
->extents
,
4776 sizeof(ref
->extents
[0]) * ref
->nritems
);
4778 btrfs_free_leaf_ref(root
, orig_ref
);
4780 ref
->root_gen
= trans
->transid
;
4781 ref
->bytenr
= buf
->start
;
4782 ref
->owner
= btrfs_header_owner(buf
);
4783 ref
->generation
= btrfs_header_generation(buf
);
4785 ret
= btrfs_add_leaf_ref(root
, ref
, 0);
4787 btrfs_free_leaf_ref(root
, ref
);
4792 static noinline
int invalidate_extent_cache(struct btrfs_root
*root
,
4793 struct extent_buffer
*leaf
,
4794 struct btrfs_block_group_cache
*group
,
4795 struct btrfs_root
*target_root
)
4797 struct btrfs_key key
;
4798 struct inode
*inode
= NULL
;
4799 struct btrfs_file_extent_item
*fi
;
4801 u64 skip_objectid
= 0;
4805 nritems
= btrfs_header_nritems(leaf
);
4806 for (i
= 0; i
< nritems
; i
++) {
4807 btrfs_item_key_to_cpu(leaf
, &key
, i
);
4808 if (key
.objectid
== skip_objectid
||
4809 key
.type
!= BTRFS_EXTENT_DATA_KEY
)
4811 fi
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
4812 if (btrfs_file_extent_type(leaf
, fi
) ==
4813 BTRFS_FILE_EXTENT_INLINE
)
4815 if (btrfs_file_extent_disk_bytenr(leaf
, fi
) == 0)
4817 if (!inode
|| inode
->i_ino
!= key
.objectid
) {
4819 inode
= btrfs_ilookup(target_root
->fs_info
->sb
,
4820 key
.objectid
, target_root
, 1);
4823 skip_objectid
= key
.objectid
;
4826 num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
);
4828 lock_extent(&BTRFS_I(inode
)->io_tree
, key
.offset
,
4829 key
.offset
+ num_bytes
- 1, GFP_NOFS
);
4830 btrfs_drop_extent_cache(inode
, key
.offset
,
4831 key
.offset
+ num_bytes
- 1, 1);
4832 unlock_extent(&BTRFS_I(inode
)->io_tree
, key
.offset
,
4833 key
.offset
+ num_bytes
- 1, GFP_NOFS
);
4840 static noinline
int replace_extents_in_leaf(struct btrfs_trans_handle
*trans
,
4841 struct btrfs_root
*root
,
4842 struct extent_buffer
*leaf
,
4843 struct btrfs_block_group_cache
*group
,
4844 struct inode
*reloc_inode
)
4846 struct btrfs_key key
;
4847 struct btrfs_key extent_key
;
4848 struct btrfs_file_extent_item
*fi
;
4849 struct btrfs_leaf_ref
*ref
;
4850 struct disk_extent
*new_extent
;
4859 new_extent
= kmalloc(sizeof(*new_extent
), GFP_NOFS
);
4860 BUG_ON(!new_extent
);
4862 ref
= btrfs_lookup_leaf_ref(root
, leaf
->start
);
4866 nritems
= btrfs_header_nritems(leaf
);
4867 for (i
= 0; i
< nritems
; i
++) {
4868 btrfs_item_key_to_cpu(leaf
, &key
, i
);
4869 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
4871 fi
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
4872 if (btrfs_file_extent_type(leaf
, fi
) ==
4873 BTRFS_FILE_EXTENT_INLINE
)
4875 bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
4876 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
4881 if (bytenr
>= group
->key
.objectid
+ group
->key
.offset
||
4882 bytenr
+ num_bytes
<= group
->key
.objectid
)
4885 extent_key
.objectid
= bytenr
;
4886 extent_key
.offset
= num_bytes
;
4887 extent_key
.type
= BTRFS_EXTENT_ITEM_KEY
;
4889 ret
= get_new_locations(reloc_inode
, &extent_key
,
4890 group
->key
.objectid
, 1,
4891 &new_extent
, &nr_extent
);
4896 BUG_ON(ref
->extents
[ext_index
].bytenr
!= bytenr
);
4897 BUG_ON(ref
->extents
[ext_index
].num_bytes
!= num_bytes
);
4898 ref
->extents
[ext_index
].bytenr
= new_extent
->disk_bytenr
;
4899 ref
->extents
[ext_index
].num_bytes
= new_extent
->disk_num_bytes
;
4901 btrfs_set_file_extent_disk_bytenr(leaf
, fi
,
4902 new_extent
->disk_bytenr
);
4903 btrfs_set_file_extent_disk_num_bytes(leaf
, fi
,
4904 new_extent
->disk_num_bytes
);
4905 btrfs_mark_buffer_dirty(leaf
);
4907 ret
= btrfs_inc_extent_ref(trans
, root
,
4908 new_extent
->disk_bytenr
,
4909 new_extent
->disk_num_bytes
,
4911 root
->root_key
.objectid
,
4912 trans
->transid
, key
.objectid
);
4915 ret
= btrfs_free_extent(trans
, root
,
4916 bytenr
, num_bytes
, leaf
->start
,
4917 btrfs_header_owner(leaf
),
4918 btrfs_header_generation(leaf
),
4924 BUG_ON(ext_index
+ 1 != ref
->nritems
);
4925 btrfs_free_leaf_ref(root
, ref
);
4929 int btrfs_free_reloc_root(struct btrfs_trans_handle
*trans
,
4930 struct btrfs_root
*root
)
4932 struct btrfs_root
*reloc_root
;
4935 if (root
->reloc_root
) {
4936 reloc_root
= root
->reloc_root
;
4937 root
->reloc_root
= NULL
;
4938 list_add(&reloc_root
->dead_list
,
4939 &root
->fs_info
->dead_reloc_roots
);
4941 btrfs_set_root_bytenr(&reloc_root
->root_item
,
4942 reloc_root
->node
->start
);
4943 btrfs_set_root_level(&root
->root_item
,
4944 btrfs_header_level(reloc_root
->node
));
4945 memset(&reloc_root
->root_item
.drop_progress
, 0,
4946 sizeof(struct btrfs_disk_key
));
4947 reloc_root
->root_item
.drop_level
= 0;
4949 ret
= btrfs_update_root(trans
, root
->fs_info
->tree_root
,
4950 &reloc_root
->root_key
,
4951 &reloc_root
->root_item
);
4957 int btrfs_drop_dead_reloc_roots(struct btrfs_root
*root
)
4959 struct btrfs_trans_handle
*trans
;
4960 struct btrfs_root
*reloc_root
;
4961 struct btrfs_root
*prev_root
= NULL
;
4962 struct list_head dead_roots
;
4966 INIT_LIST_HEAD(&dead_roots
);
4967 list_splice_init(&root
->fs_info
->dead_reloc_roots
, &dead_roots
);
4969 while (!list_empty(&dead_roots
)) {
4970 reloc_root
= list_entry(dead_roots
.prev
,
4971 struct btrfs_root
, dead_list
);
4972 list_del_init(&reloc_root
->dead_list
);
4974 BUG_ON(reloc_root
->commit_root
!= NULL
);
4976 trans
= btrfs_join_transaction(root
, 1);
4979 mutex_lock(&root
->fs_info
->drop_mutex
);
4980 ret
= btrfs_drop_snapshot(trans
, reloc_root
);
4983 mutex_unlock(&root
->fs_info
->drop_mutex
);
4985 nr
= trans
->blocks_used
;
4986 ret
= btrfs_end_transaction(trans
, root
);
4988 btrfs_btree_balance_dirty(root
, nr
);
4991 free_extent_buffer(reloc_root
->node
);
4993 ret
= btrfs_del_root(trans
, root
->fs_info
->tree_root
,
4994 &reloc_root
->root_key
);
4996 mutex_unlock(&root
->fs_info
->drop_mutex
);
4998 nr
= trans
->blocks_used
;
4999 ret
= btrfs_end_transaction(trans
, root
);
5001 btrfs_btree_balance_dirty(root
, nr
);
5004 prev_root
= reloc_root
;
5007 btrfs_remove_leaf_refs(prev_root
, (u64
)-1, 0);
5013 int btrfs_add_dead_reloc_root(struct btrfs_root
*root
)
5015 list_add(&root
->dead_list
, &root
->fs_info
->dead_reloc_roots
);
5019 int btrfs_cleanup_reloc_trees(struct btrfs_root
*root
)
5021 struct btrfs_root
*reloc_root
;
5022 struct btrfs_trans_handle
*trans
;
5023 struct btrfs_key location
;
5027 mutex_lock(&root
->fs_info
->tree_reloc_mutex
);
5028 ret
= btrfs_find_dead_roots(root
, BTRFS_TREE_RELOC_OBJECTID
, NULL
);
5030 found
= !list_empty(&root
->fs_info
->dead_reloc_roots
);
5031 mutex_unlock(&root
->fs_info
->tree_reloc_mutex
);
5034 trans
= btrfs_start_transaction(root
, 1);
5036 ret
= btrfs_commit_transaction(trans
, root
);
5040 location
.objectid
= BTRFS_DATA_RELOC_TREE_OBJECTID
;
5041 location
.offset
= (u64
)-1;
5042 location
.type
= BTRFS_ROOT_ITEM_KEY
;
5044 reloc_root
= btrfs_read_fs_root_no_name(root
->fs_info
, &location
);
5045 BUG_ON(!reloc_root
);
5046 btrfs_orphan_cleanup(reloc_root
);
5050 static noinline
int init_reloc_tree(struct btrfs_trans_handle
*trans
,
5051 struct btrfs_root
*root
)
5053 struct btrfs_root
*reloc_root
;
5054 struct extent_buffer
*eb
;
5055 struct btrfs_root_item
*root_item
;
5056 struct btrfs_key root_key
;
5059 BUG_ON(!root
->ref_cows
);
5060 if (root
->reloc_root
)
5063 root_item
= kmalloc(sizeof(*root_item
), GFP_NOFS
);
5066 ret
= btrfs_copy_root(trans
, root
, root
->commit_root
,
5067 &eb
, BTRFS_TREE_RELOC_OBJECTID
);
5070 root_key
.objectid
= BTRFS_TREE_RELOC_OBJECTID
;
5071 root_key
.offset
= root
->root_key
.objectid
;
5072 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
5074 memcpy(root_item
, &root
->root_item
, sizeof(root_item
));
5075 btrfs_set_root_refs(root_item
, 0);
5076 btrfs_set_root_bytenr(root_item
, eb
->start
);
5077 btrfs_set_root_level(root_item
, btrfs_header_level(eb
));
5078 btrfs_set_root_generation(root_item
, trans
->transid
);
5080 btrfs_tree_unlock(eb
);
5081 free_extent_buffer(eb
);
5083 ret
= btrfs_insert_root(trans
, root
->fs_info
->tree_root
,
5084 &root_key
, root_item
);
5088 reloc_root
= btrfs_read_fs_root_no_radix(root
->fs_info
->tree_root
,
5090 BUG_ON(!reloc_root
);
5091 reloc_root
->last_trans
= trans
->transid
;
5092 reloc_root
->commit_root
= NULL
;
5093 reloc_root
->ref_tree
= &root
->fs_info
->reloc_ref_tree
;
5095 root
->reloc_root
= reloc_root
;
5100 * Core function of space balance.
5102 * The idea is using reloc trees to relocate tree blocks in reference
5103 * counted roots. There is one reloc tree for each subvol, and all
5104 * reloc trees share same root key objectid. Reloc trees are snapshots
5105 * of the latest committed roots of subvols (root->commit_root).
5107 * To relocate a tree block referenced by a subvol, there are two steps.
5108 * COW the block through subvol's reloc tree, then update block pointer
5109 * in the subvol to point to the new block. Since all reloc trees share
5110 * same root key objectid, doing special handing for tree blocks owned
5111 * by them is easy. Once a tree block has been COWed in one reloc tree,
5112 * we can use the resulting new block directly when the same block is
5113 * required to COW again through other reloc trees. By this way, relocated
5114 * tree blocks are shared between reloc trees, so they are also shared
5117 static noinline
int relocate_one_path(struct btrfs_trans_handle
*trans
,
5118 struct btrfs_root
*root
,
5119 struct btrfs_path
*path
,
5120 struct btrfs_key
*first_key
,
5121 struct btrfs_ref_path
*ref_path
,
5122 struct btrfs_block_group_cache
*group
,
5123 struct inode
*reloc_inode
)
5125 struct btrfs_root
*reloc_root
;
5126 struct extent_buffer
*eb
= NULL
;
5127 struct btrfs_key
*keys
;
5131 int lowest_level
= 0;
5134 if (ref_path
->owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
)
5135 lowest_level
= ref_path
->owner_objectid
;
5137 if (!root
->ref_cows
) {
5138 path
->lowest_level
= lowest_level
;
5139 ret
= btrfs_search_slot(trans
, root
, first_key
, path
, 0, 1);
5141 path
->lowest_level
= 0;
5142 btrfs_release_path(root
, path
);
5146 mutex_lock(&root
->fs_info
->tree_reloc_mutex
);
5147 ret
= init_reloc_tree(trans
, root
);
5149 reloc_root
= root
->reloc_root
;
5151 shared_level
= ref_path
->shared_level
;
5152 ref_path
->shared_level
= BTRFS_MAX_LEVEL
- 1;
5154 keys
= ref_path
->node_keys
;
5155 nodes
= ref_path
->new_nodes
;
5156 memset(&keys
[shared_level
+ 1], 0,
5157 sizeof(*keys
) * (BTRFS_MAX_LEVEL
- shared_level
- 1));
5158 memset(&nodes
[shared_level
+ 1], 0,
5159 sizeof(*nodes
) * (BTRFS_MAX_LEVEL
- shared_level
- 1));
5161 if (nodes
[lowest_level
] == 0) {
5162 path
->lowest_level
= lowest_level
;
5163 ret
= btrfs_search_slot(trans
, reloc_root
, first_key
, path
,
5166 for (level
= lowest_level
; level
< BTRFS_MAX_LEVEL
; level
++) {
5167 eb
= path
->nodes
[level
];
5168 if (!eb
|| eb
== reloc_root
->node
)
5170 nodes
[level
] = eb
->start
;
5172 btrfs_item_key_to_cpu(eb
, &keys
[level
], 0);
5174 btrfs_node_key_to_cpu(eb
, &keys
[level
], 0);
5177 ref_path
->owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
5178 eb
= path
->nodes
[0];
5179 ret
= replace_extents_in_leaf(trans
, reloc_root
, eb
,
5180 group
, reloc_inode
);
5183 btrfs_release_path(reloc_root
, path
);
5185 ret
= btrfs_merge_path(trans
, reloc_root
, keys
, nodes
,
5191 * replace tree blocks in the fs tree with tree blocks in
5194 ret
= btrfs_merge_path(trans
, root
, keys
, nodes
, lowest_level
);
5197 if (ref_path
->owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
5198 ret
= btrfs_search_slot(trans
, reloc_root
, first_key
, path
,
5201 extent_buffer_get(path
->nodes
[0]);
5202 eb
= path
->nodes
[0];
5203 btrfs_release_path(reloc_root
, path
);
5204 ret
= invalidate_extent_cache(reloc_root
, eb
, group
, root
);
5206 free_extent_buffer(eb
);
5209 mutex_unlock(&root
->fs_info
->tree_reloc_mutex
);
5210 path
->lowest_level
= 0;
5214 static noinline
int relocate_tree_block(struct btrfs_trans_handle
*trans
,
5215 struct btrfs_root
*root
,
5216 struct btrfs_path
*path
,
5217 struct btrfs_key
*first_key
,
5218 struct btrfs_ref_path
*ref_path
)
5222 ret
= relocate_one_path(trans
, root
, path
, first_key
,
5223 ref_path
, NULL
, NULL
);
5229 static noinline
int del_extent_zero(struct btrfs_trans_handle
*trans
,
5230 struct btrfs_root
*extent_root
,
5231 struct btrfs_path
*path
,
5232 struct btrfs_key
*extent_key
)
5236 ret
= btrfs_search_slot(trans
, extent_root
, extent_key
, path
, -1, 1);
5239 ret
= btrfs_del_item(trans
, extent_root
, path
);
5241 btrfs_release_path(extent_root
, path
);
5245 static noinline
struct btrfs_root
*read_ref_root(struct btrfs_fs_info
*fs_info
,
5246 struct btrfs_ref_path
*ref_path
)
5248 struct btrfs_key root_key
;
5250 root_key
.objectid
= ref_path
->root_objectid
;
5251 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
5252 if (is_cowonly_root(ref_path
->root_objectid
))
5253 root_key
.offset
= 0;
5255 root_key
.offset
= (u64
)-1;
5257 return btrfs_read_fs_root_no_name(fs_info
, &root_key
);
5260 static noinline
int relocate_one_extent(struct btrfs_root
*extent_root
,
5261 struct btrfs_path
*path
,
5262 struct btrfs_key
*extent_key
,
5263 struct btrfs_block_group_cache
*group
,
5264 struct inode
*reloc_inode
, int pass
)
5266 struct btrfs_trans_handle
*trans
;
5267 struct btrfs_root
*found_root
;
5268 struct btrfs_ref_path
*ref_path
= NULL
;
5269 struct disk_extent
*new_extents
= NULL
;
5274 struct btrfs_key first_key
;
5278 trans
= btrfs_start_transaction(extent_root
, 1);
5281 if (extent_key
->objectid
== 0) {
5282 ret
= del_extent_zero(trans
, extent_root
, path
, extent_key
);
5286 ref_path
= kmalloc(sizeof(*ref_path
), GFP_NOFS
);
5292 for (loops
= 0; ; loops
++) {
5294 ret
= btrfs_first_ref_path(trans
, extent_root
, ref_path
,
5295 extent_key
->objectid
);
5297 ret
= btrfs_next_ref_path(trans
, extent_root
, ref_path
);
5304 if (ref_path
->root_objectid
== BTRFS_TREE_LOG_OBJECTID
||
5305 ref_path
->root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
5308 found_root
= read_ref_root(extent_root
->fs_info
, ref_path
);
5309 BUG_ON(!found_root
);
5311 * for reference counted tree, only process reference paths
5312 * rooted at the latest committed root.
5314 if (found_root
->ref_cows
&&
5315 ref_path
->root_generation
!= found_root
->root_key
.offset
)
5318 if (ref_path
->owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
5321 * copy data extents to new locations
5323 u64 group_start
= group
->key
.objectid
;
5324 ret
= relocate_data_extent(reloc_inode
,
5333 level
= ref_path
->owner_objectid
;
5336 if (prev_block
!= ref_path
->nodes
[level
]) {
5337 struct extent_buffer
*eb
;
5338 u64 block_start
= ref_path
->nodes
[level
];
5339 u64 block_size
= btrfs_level_size(found_root
, level
);
5341 eb
= read_tree_block(found_root
, block_start
,
5343 btrfs_tree_lock(eb
);
5344 BUG_ON(level
!= btrfs_header_level(eb
));
5347 btrfs_item_key_to_cpu(eb
, &first_key
, 0);
5349 btrfs_node_key_to_cpu(eb
, &first_key
, 0);
5351 btrfs_tree_unlock(eb
);
5352 free_extent_buffer(eb
);
5353 prev_block
= block_start
;
5356 mutex_lock(&extent_root
->fs_info
->trans_mutex
);
5357 btrfs_record_root_in_trans(found_root
);
5358 mutex_unlock(&extent_root
->fs_info
->trans_mutex
);
5359 if (ref_path
->owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
5361 * try to update data extent references while
5362 * keeping metadata shared between snapshots.
5365 ret
= relocate_one_path(trans
, found_root
,
5366 path
, &first_key
, ref_path
,
5367 group
, reloc_inode
);
5373 * use fallback method to process the remaining
5377 u64 group_start
= group
->key
.objectid
;
5378 new_extents
= kmalloc(sizeof(*new_extents
),
5381 ret
= get_new_locations(reloc_inode
,
5389 ret
= replace_one_extent(trans
, found_root
,
5391 &first_key
, ref_path
,
5392 new_extents
, nr_extents
);
5394 ret
= relocate_tree_block(trans
, found_root
, path
,
5395 &first_key
, ref_path
);
5402 btrfs_end_transaction(trans
, extent_root
);
5408 static u64
update_block_group_flags(struct btrfs_root
*root
, u64 flags
)
5411 u64 stripped
= BTRFS_BLOCK_GROUP_RAID0
|
5412 BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_RAID10
;
5414 num_devices
= root
->fs_info
->fs_devices
->rw_devices
;
5415 if (num_devices
== 1) {
5416 stripped
|= BTRFS_BLOCK_GROUP_DUP
;
5417 stripped
= flags
& ~stripped
;
5419 /* turn raid0 into single device chunks */
5420 if (flags
& BTRFS_BLOCK_GROUP_RAID0
)
5423 /* turn mirroring into duplication */
5424 if (flags
& (BTRFS_BLOCK_GROUP_RAID1
|
5425 BTRFS_BLOCK_GROUP_RAID10
))
5426 return stripped
| BTRFS_BLOCK_GROUP_DUP
;
5429 /* they already had raid on here, just return */
5430 if (flags
& stripped
)
5433 stripped
|= BTRFS_BLOCK_GROUP_DUP
;
5434 stripped
= flags
& ~stripped
;
5436 /* switch duplicated blocks with raid1 */
5437 if (flags
& BTRFS_BLOCK_GROUP_DUP
)
5438 return stripped
| BTRFS_BLOCK_GROUP_RAID1
;
5440 /* turn single device chunks into raid0 */
5441 return stripped
| BTRFS_BLOCK_GROUP_RAID0
;
5446 static int __alloc_chunk_for_shrink(struct btrfs_root
*root
,
5447 struct btrfs_block_group_cache
*shrink_block_group
,
5450 struct btrfs_trans_handle
*trans
;
5451 u64 new_alloc_flags
;
5454 spin_lock(&shrink_block_group
->lock
);
5455 if (btrfs_block_group_used(&shrink_block_group
->item
) > 0) {
5456 spin_unlock(&shrink_block_group
->lock
);
5458 trans
= btrfs_start_transaction(root
, 1);
5459 spin_lock(&shrink_block_group
->lock
);
5461 new_alloc_flags
= update_block_group_flags(root
,
5462 shrink_block_group
->flags
);
5463 if (new_alloc_flags
!= shrink_block_group
->flags
) {
5465 btrfs_block_group_used(&shrink_block_group
->item
);
5467 calc
= shrink_block_group
->key
.offset
;
5469 spin_unlock(&shrink_block_group
->lock
);
5471 do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
5472 calc
+ 2 * 1024 * 1024, new_alloc_flags
, force
);
5474 btrfs_end_transaction(trans
, root
);
5476 spin_unlock(&shrink_block_group
->lock
);
5480 static int __insert_orphan_inode(struct btrfs_trans_handle
*trans
,
5481 struct btrfs_root
*root
,
5482 u64 objectid
, u64 size
)
5484 struct btrfs_path
*path
;
5485 struct btrfs_inode_item
*item
;
5486 struct extent_buffer
*leaf
;
5489 path
= btrfs_alloc_path();
5493 path
->leave_spinning
= 1;
5494 ret
= btrfs_insert_empty_inode(trans
, root
, path
, objectid
);
5498 leaf
= path
->nodes
[0];
5499 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_inode_item
);
5500 memset_extent_buffer(leaf
, 0, (unsigned long)item
, sizeof(*item
));
5501 btrfs_set_inode_generation(leaf
, item
, 1);
5502 btrfs_set_inode_size(leaf
, item
, size
);
5503 btrfs_set_inode_mode(leaf
, item
, S_IFREG
| 0600);
5504 btrfs_set_inode_flags(leaf
, item
, BTRFS_INODE_NOCOMPRESS
);
5505 btrfs_mark_buffer_dirty(leaf
);
5506 btrfs_release_path(root
, path
);
5508 btrfs_free_path(path
);
5512 static noinline
struct inode
*create_reloc_inode(struct btrfs_fs_info
*fs_info
,
5513 struct btrfs_block_group_cache
*group
)
5515 struct inode
*inode
= NULL
;
5516 struct btrfs_trans_handle
*trans
;
5517 struct btrfs_root
*root
;
5518 struct btrfs_key root_key
;
5519 u64 objectid
= BTRFS_FIRST_FREE_OBJECTID
;
5522 root_key
.objectid
= BTRFS_DATA_RELOC_TREE_OBJECTID
;
5523 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
5524 root_key
.offset
= (u64
)-1;
5525 root
= btrfs_read_fs_root_no_name(fs_info
, &root_key
);
5527 return ERR_CAST(root
);
5529 trans
= btrfs_start_transaction(root
, 1);
5532 err
= btrfs_find_free_objectid(trans
, root
, objectid
, &objectid
);
5536 err
= __insert_orphan_inode(trans
, root
, objectid
, group
->key
.offset
);
5539 err
= btrfs_insert_file_extent(trans
, root
, objectid
, 0, 0, 0,
5540 group
->key
.offset
, 0, group
->key
.offset
,
5544 inode
= btrfs_iget_locked(root
->fs_info
->sb
, objectid
, root
);
5545 if (inode
->i_state
& I_NEW
) {
5546 BTRFS_I(inode
)->root
= root
;
5547 BTRFS_I(inode
)->location
.objectid
= objectid
;
5548 BTRFS_I(inode
)->location
.type
= BTRFS_INODE_ITEM_KEY
;
5549 BTRFS_I(inode
)->location
.offset
= 0;
5550 btrfs_read_locked_inode(inode
);
5551 unlock_new_inode(inode
);
5552 BUG_ON(is_bad_inode(inode
));
5556 BTRFS_I(inode
)->index_cnt
= group
->key
.objectid
;
5558 err
= btrfs_orphan_add(trans
, inode
);
5560 btrfs_end_transaction(trans
, root
);
5564 inode
= ERR_PTR(err
);
5569 int btrfs_reloc_clone_csums(struct inode
*inode
, u64 file_pos
, u64 len
)
5572 struct btrfs_ordered_sum
*sums
;
5573 struct btrfs_sector_sum
*sector_sum
;
5574 struct btrfs_ordered_extent
*ordered
;
5575 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5576 struct list_head list
;
5581 INIT_LIST_HEAD(&list
);
5583 ordered
= btrfs_lookup_ordered_extent(inode
, file_pos
);
5584 BUG_ON(ordered
->file_offset
!= file_pos
|| ordered
->len
!= len
);
5586 disk_bytenr
= file_pos
+ BTRFS_I(inode
)->index_cnt
;
5587 ret
= btrfs_lookup_csums_range(root
->fs_info
->csum_root
, disk_bytenr
,
5588 disk_bytenr
+ len
- 1, &list
);
5590 while (!list_empty(&list
)) {
5591 sums
= list_entry(list
.next
, struct btrfs_ordered_sum
, list
);
5592 list_del_init(&sums
->list
);
5594 sector_sum
= sums
->sums
;
5595 sums
->bytenr
= ordered
->start
;
5598 while (offset
< sums
->len
) {
5599 sector_sum
->bytenr
+= ordered
->start
- disk_bytenr
;
5601 offset
+= root
->sectorsize
;
5604 btrfs_add_ordered_sum(inode
, ordered
, sums
);
5606 btrfs_put_ordered_extent(ordered
);
5610 int btrfs_relocate_block_group(struct btrfs_root
*root
, u64 group_start
)
5612 struct btrfs_trans_handle
*trans
;
5613 struct btrfs_path
*path
;
5614 struct btrfs_fs_info
*info
= root
->fs_info
;
5615 struct extent_buffer
*leaf
;
5616 struct inode
*reloc_inode
;
5617 struct btrfs_block_group_cache
*block_group
;
5618 struct btrfs_key key
;
5627 root
= root
->fs_info
->extent_root
;
5629 block_group
= btrfs_lookup_block_group(info
, group_start
);
5630 BUG_ON(!block_group
);
5632 printk(KERN_INFO
"btrfs relocating block group %llu flags %llu\n",
5633 (unsigned long long)block_group
->key
.objectid
,
5634 (unsigned long long)block_group
->flags
);
5636 path
= btrfs_alloc_path();
5639 reloc_inode
= create_reloc_inode(info
, block_group
);
5640 BUG_ON(IS_ERR(reloc_inode
));
5642 __alloc_chunk_for_shrink(root
, block_group
, 1);
5643 set_block_group_readonly(block_group
);
5645 btrfs_start_delalloc_inodes(info
->tree_root
);
5646 btrfs_wait_ordered_extents(info
->tree_root
, 0);
5651 key
.objectid
= block_group
->key
.objectid
;
5654 cur_byte
= key
.objectid
;
5656 trans
= btrfs_start_transaction(info
->tree_root
, 1);
5657 btrfs_commit_transaction(trans
, info
->tree_root
);
5659 mutex_lock(&root
->fs_info
->cleaner_mutex
);
5660 btrfs_clean_old_snapshots(info
->tree_root
);
5661 btrfs_remove_leaf_refs(info
->tree_root
, (u64
)-1, 1);
5662 mutex_unlock(&root
->fs_info
->cleaner_mutex
);
5664 trans
= btrfs_start_transaction(info
->tree_root
, 1);
5665 btrfs_commit_transaction(trans
, info
->tree_root
);
5668 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5672 leaf
= path
->nodes
[0];
5673 nritems
= btrfs_header_nritems(leaf
);
5674 if (path
->slots
[0] >= nritems
) {
5675 ret
= btrfs_next_leaf(root
, path
);
5682 leaf
= path
->nodes
[0];
5683 nritems
= btrfs_header_nritems(leaf
);
5686 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
5688 if (key
.objectid
>= block_group
->key
.objectid
+
5689 block_group
->key
.offset
)
5692 if (progress
&& need_resched()) {
5693 btrfs_release_path(root
, path
);
5700 if (btrfs_key_type(&key
) != BTRFS_EXTENT_ITEM_KEY
||
5701 key
.objectid
+ key
.offset
<= cur_byte
) {
5707 cur_byte
= key
.objectid
+ key
.offset
;
5708 btrfs_release_path(root
, path
);
5710 __alloc_chunk_for_shrink(root
, block_group
, 0);
5711 ret
= relocate_one_extent(root
, path
, &key
, block_group
,
5717 key
.objectid
= cur_byte
;
5722 btrfs_release_path(root
, path
);
5725 btrfs_wait_ordered_range(reloc_inode
, 0, (u64
)-1);
5726 invalidate_mapping_pages(reloc_inode
->i_mapping
, 0, -1);
5729 if (total_found
> 0) {
5730 printk(KERN_INFO
"btrfs found %llu extents in pass %d\n",
5731 (unsigned long long)total_found
, pass
);
5733 if (total_found
== skipped
&& pass
> 2) {
5735 reloc_inode
= create_reloc_inode(info
, block_group
);
5741 /* delete reloc_inode */
5744 /* unpin extents in this range */
5745 trans
= btrfs_start_transaction(info
->tree_root
, 1);
5746 btrfs_commit_transaction(trans
, info
->tree_root
);
5748 spin_lock(&block_group
->lock
);
5749 WARN_ON(block_group
->pinned
> 0);
5750 WARN_ON(block_group
->reserved
> 0);
5751 WARN_ON(btrfs_block_group_used(&block_group
->item
) > 0);
5752 spin_unlock(&block_group
->lock
);
5753 btrfs_put_block_group(block_group
);
5756 btrfs_free_path(path
);
5760 static int find_first_block_group(struct btrfs_root
*root
,
5761 struct btrfs_path
*path
, struct btrfs_key
*key
)
5764 struct btrfs_key found_key
;
5765 struct extent_buffer
*leaf
;
5768 ret
= btrfs_search_slot(NULL
, root
, key
, path
, 0, 0);
5773 slot
= path
->slots
[0];
5774 leaf
= path
->nodes
[0];
5775 if (slot
>= btrfs_header_nritems(leaf
)) {
5776 ret
= btrfs_next_leaf(root
, path
);
5783 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
5785 if (found_key
.objectid
>= key
->objectid
&&
5786 found_key
.type
== BTRFS_BLOCK_GROUP_ITEM_KEY
) {
5797 int btrfs_free_block_groups(struct btrfs_fs_info
*info
)
5799 struct btrfs_block_group_cache
*block_group
;
5800 struct btrfs_space_info
*space_info
;
5803 spin_lock(&info
->block_group_cache_lock
);
5804 while ((n
= rb_last(&info
->block_group_cache_tree
)) != NULL
) {
5805 block_group
= rb_entry(n
, struct btrfs_block_group_cache
,
5807 rb_erase(&block_group
->cache_node
,
5808 &info
->block_group_cache_tree
);
5809 spin_unlock(&info
->block_group_cache_lock
);
5811 btrfs_remove_free_space_cache(block_group
);
5812 down_write(&block_group
->space_info
->groups_sem
);
5813 list_del(&block_group
->list
);
5814 up_write(&block_group
->space_info
->groups_sem
);
5816 WARN_ON(atomic_read(&block_group
->count
) != 1);
5819 spin_lock(&info
->block_group_cache_lock
);
5821 spin_unlock(&info
->block_group_cache_lock
);
5823 /* now that all the block groups are freed, go through and
5824 * free all the space_info structs. This is only called during
5825 * the final stages of unmount, and so we know nobody is
5826 * using them. We call synchronize_rcu() once before we start,
5827 * just to be on the safe side.
5831 while(!list_empty(&info
->space_info
)) {
5832 space_info
= list_entry(info
->space_info
.next
,
5833 struct btrfs_space_info
,
5836 list_del(&space_info
->list
);
5842 int btrfs_read_block_groups(struct btrfs_root
*root
)
5844 struct btrfs_path
*path
;
5846 struct btrfs_block_group_cache
*cache
;
5847 struct btrfs_fs_info
*info
= root
->fs_info
;
5848 struct btrfs_space_info
*space_info
;
5849 struct btrfs_key key
;
5850 struct btrfs_key found_key
;
5851 struct extent_buffer
*leaf
;
5853 root
= info
->extent_root
;
5856 btrfs_set_key_type(&key
, BTRFS_BLOCK_GROUP_ITEM_KEY
);
5857 path
= btrfs_alloc_path();
5862 ret
= find_first_block_group(root
, path
, &key
);
5870 leaf
= path
->nodes
[0];
5871 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
5872 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
5878 atomic_set(&cache
->count
, 1);
5879 spin_lock_init(&cache
->lock
);
5880 spin_lock_init(&cache
->tree_lock
);
5881 mutex_init(&cache
->cache_mutex
);
5882 INIT_LIST_HEAD(&cache
->list
);
5883 INIT_LIST_HEAD(&cache
->cluster_list
);
5884 read_extent_buffer(leaf
, &cache
->item
,
5885 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
5886 sizeof(cache
->item
));
5887 memcpy(&cache
->key
, &found_key
, sizeof(found_key
));
5889 key
.objectid
= found_key
.objectid
+ found_key
.offset
;
5890 btrfs_release_path(root
, path
);
5891 cache
->flags
= btrfs_block_group_flags(&cache
->item
);
5893 ret
= update_space_info(info
, cache
->flags
, found_key
.offset
,
5894 btrfs_block_group_used(&cache
->item
),
5897 cache
->space_info
= space_info
;
5898 down_write(&space_info
->groups_sem
);
5899 list_add_tail(&cache
->list
, &space_info
->block_groups
);
5900 up_write(&space_info
->groups_sem
);
5902 ret
= btrfs_add_block_group_cache(root
->fs_info
, cache
);
5905 set_avail_alloc_bits(root
->fs_info
, cache
->flags
);
5906 if (btrfs_chunk_readonly(root
, cache
->key
.objectid
))
5907 set_block_group_readonly(cache
);
5911 btrfs_free_path(path
);
5915 int btrfs_make_block_group(struct btrfs_trans_handle
*trans
,
5916 struct btrfs_root
*root
, u64 bytes_used
,
5917 u64 type
, u64 chunk_objectid
, u64 chunk_offset
,
5921 struct btrfs_root
*extent_root
;
5922 struct btrfs_block_group_cache
*cache
;
5924 extent_root
= root
->fs_info
->extent_root
;
5926 root
->fs_info
->last_trans_log_full_commit
= trans
->transid
;
5928 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
5932 cache
->key
.objectid
= chunk_offset
;
5933 cache
->key
.offset
= size
;
5934 cache
->key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
5935 atomic_set(&cache
->count
, 1);
5936 spin_lock_init(&cache
->lock
);
5937 spin_lock_init(&cache
->tree_lock
);
5938 mutex_init(&cache
->cache_mutex
);
5939 INIT_LIST_HEAD(&cache
->list
);
5940 INIT_LIST_HEAD(&cache
->cluster_list
);
5942 btrfs_set_block_group_used(&cache
->item
, bytes_used
);
5943 btrfs_set_block_group_chunk_objectid(&cache
->item
, chunk_objectid
);
5944 cache
->flags
= type
;
5945 btrfs_set_block_group_flags(&cache
->item
, type
);
5947 ret
= update_space_info(root
->fs_info
, cache
->flags
, size
, bytes_used
,
5948 &cache
->space_info
);
5950 down_write(&cache
->space_info
->groups_sem
);
5951 list_add_tail(&cache
->list
, &cache
->space_info
->block_groups
);
5952 up_write(&cache
->space_info
->groups_sem
);
5954 ret
= btrfs_add_block_group_cache(root
->fs_info
, cache
);
5957 ret
= btrfs_insert_item(trans
, extent_root
, &cache
->key
, &cache
->item
,
5958 sizeof(cache
->item
));
5961 set_avail_alloc_bits(extent_root
->fs_info
, type
);
5966 int btrfs_remove_block_group(struct btrfs_trans_handle
*trans
,
5967 struct btrfs_root
*root
, u64 group_start
)
5969 struct btrfs_path
*path
;
5970 struct btrfs_block_group_cache
*block_group
;
5971 struct btrfs_key key
;
5974 root
= root
->fs_info
->extent_root
;
5976 block_group
= btrfs_lookup_block_group(root
->fs_info
, group_start
);
5977 BUG_ON(!block_group
);
5978 BUG_ON(!block_group
->ro
);
5980 memcpy(&key
, &block_group
->key
, sizeof(key
));
5982 path
= btrfs_alloc_path();
5985 spin_lock(&root
->fs_info
->block_group_cache_lock
);
5986 rb_erase(&block_group
->cache_node
,
5987 &root
->fs_info
->block_group_cache_tree
);
5988 spin_unlock(&root
->fs_info
->block_group_cache_lock
);
5989 btrfs_remove_free_space_cache(block_group
);
5990 down_write(&block_group
->space_info
->groups_sem
);
5991 list_del(&block_group
->list
);
5992 up_write(&block_group
->space_info
->groups_sem
);
5994 spin_lock(&block_group
->space_info
->lock
);
5995 block_group
->space_info
->total_bytes
-= block_group
->key
.offset
;
5996 block_group
->space_info
->bytes_readonly
-= block_group
->key
.offset
;
5997 spin_unlock(&block_group
->space_info
->lock
);
5998 block_group
->space_info
->full
= 0;
6000 btrfs_put_block_group(block_group
);
6001 btrfs_put_block_group(block_group
);
6003 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
6009 ret
= btrfs_del_item(trans
, root
, path
);
6011 btrfs_free_path(path
);