2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
28 #include "print-tree.h"
29 #include "transaction.h"
32 #include "ref-cache.h"
34 #define PENDING_EXTENT_INSERT 0
35 #define PENDING_EXTENT_DELETE 1
36 #define PENDING_BACKREF_UPDATE 2
38 struct pending_extent_op
{
47 struct list_head list
;
51 static int finish_current_insert(struct btrfs_trans_handle
*trans
,
52 struct btrfs_root
*extent_root
, int all
);
53 static int del_pending_extents(struct btrfs_trans_handle
*trans
,
54 struct btrfs_root
*extent_root
, int all
);
55 static int pin_down_bytes(struct btrfs_trans_handle
*trans
,
56 struct btrfs_root
*root
,
57 u64 bytenr
, u64 num_bytes
, int is_data
);
58 static int update_block_group(struct btrfs_trans_handle
*trans
,
59 struct btrfs_root
*root
,
60 u64 bytenr
, u64 num_bytes
, int alloc
,
63 static int block_group_bits(struct btrfs_block_group_cache
*cache
, u64 bits
)
65 return (cache
->flags
& bits
) == bits
;
69 * this adds the block group to the fs_info rb tree for the block group
72 static int btrfs_add_block_group_cache(struct btrfs_fs_info
*info
,
73 struct btrfs_block_group_cache
*block_group
)
76 struct rb_node
*parent
= NULL
;
77 struct btrfs_block_group_cache
*cache
;
79 spin_lock(&info
->block_group_cache_lock
);
80 p
= &info
->block_group_cache_tree
.rb_node
;
84 cache
= rb_entry(parent
, struct btrfs_block_group_cache
,
86 if (block_group
->key
.objectid
< cache
->key
.objectid
) {
88 } else if (block_group
->key
.objectid
> cache
->key
.objectid
) {
91 spin_unlock(&info
->block_group_cache_lock
);
96 rb_link_node(&block_group
->cache_node
, parent
, p
);
97 rb_insert_color(&block_group
->cache_node
,
98 &info
->block_group_cache_tree
);
99 spin_unlock(&info
->block_group_cache_lock
);
105 * This will return the block group at or after bytenr if contains is 0, else
106 * it will return the block group that contains the bytenr
108 static struct btrfs_block_group_cache
*
109 block_group_cache_tree_search(struct btrfs_fs_info
*info
, u64 bytenr
,
112 struct btrfs_block_group_cache
*cache
, *ret
= NULL
;
116 spin_lock(&info
->block_group_cache_lock
);
117 n
= info
->block_group_cache_tree
.rb_node
;
120 cache
= rb_entry(n
, struct btrfs_block_group_cache
,
122 end
= cache
->key
.objectid
+ cache
->key
.offset
- 1;
123 start
= cache
->key
.objectid
;
125 if (bytenr
< start
) {
126 if (!contains
&& (!ret
|| start
< ret
->key
.objectid
))
129 } else if (bytenr
> start
) {
130 if (contains
&& bytenr
<= end
) {
141 atomic_inc(&ret
->count
);
142 spin_unlock(&info
->block_group_cache_lock
);
148 * this is only called by cache_block_group, since we could have freed extents
149 * we need to check the pinned_extents for any extents that can't be used yet
150 * since their free space will be released as soon as the transaction commits.
152 static int add_new_free_space(struct btrfs_block_group_cache
*block_group
,
153 struct btrfs_fs_info
*info
, u64 start
, u64 end
)
155 u64 extent_start
, extent_end
, size
;
158 mutex_lock(&info
->pinned_mutex
);
159 while (start
< end
) {
160 ret
= find_first_extent_bit(&info
->pinned_extents
, start
,
161 &extent_start
, &extent_end
,
166 if (extent_start
== start
) {
167 start
= extent_end
+ 1;
168 } else if (extent_start
> start
&& extent_start
< end
) {
169 size
= extent_start
- start
;
170 ret
= btrfs_add_free_space(block_group
, start
,
173 start
= extent_end
+ 1;
181 ret
= btrfs_add_free_space(block_group
, start
, size
);
184 mutex_unlock(&info
->pinned_mutex
);
189 static int remove_sb_from_cache(struct btrfs_root
*root
,
190 struct btrfs_block_group_cache
*cache
)
197 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
198 bytenr
= btrfs_sb_offset(i
);
199 ret
= btrfs_rmap_block(&root
->fs_info
->mapping_tree
,
200 cache
->key
.objectid
, bytenr
, 0,
201 &logical
, &nr
, &stripe_len
);
204 btrfs_remove_free_space(cache
, logical
[nr
],
212 static int cache_block_group(struct btrfs_root
*root
,
213 struct btrfs_block_group_cache
*block_group
)
215 struct btrfs_path
*path
;
217 struct btrfs_key key
;
218 struct extent_buffer
*leaf
;
225 root
= root
->fs_info
->extent_root
;
227 if (block_group
->cached
)
230 path
= btrfs_alloc_path();
236 * we get into deadlocks with paths held by callers of this function.
237 * since the alloc_mutex is protecting things right now, just
238 * skip the locking here
240 path
->skip_locking
= 1;
241 last
= max_t(u64
, block_group
->key
.objectid
, BTRFS_SUPER_INFO_OFFSET
);
244 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
245 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
250 leaf
= path
->nodes
[0];
251 slot
= path
->slots
[0];
252 if (slot
>= btrfs_header_nritems(leaf
)) {
253 ret
= btrfs_next_leaf(root
, path
);
261 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
262 if (key
.objectid
< block_group
->key
.objectid
)
265 if (key
.objectid
>= block_group
->key
.objectid
+
266 block_group
->key
.offset
)
269 if (btrfs_key_type(&key
) == BTRFS_EXTENT_ITEM_KEY
) {
270 add_new_free_space(block_group
, root
->fs_info
, last
,
273 last
= key
.objectid
+ key
.offset
;
279 add_new_free_space(block_group
, root
->fs_info
, last
,
280 block_group
->key
.objectid
+
281 block_group
->key
.offset
);
283 remove_sb_from_cache(root
, block_group
);
284 block_group
->cached
= 1;
287 btrfs_free_path(path
);
292 * return the block group that starts at or after bytenr
294 static struct btrfs_block_group_cache
*
295 btrfs_lookup_first_block_group(struct btrfs_fs_info
*info
, u64 bytenr
)
297 struct btrfs_block_group_cache
*cache
;
299 cache
= block_group_cache_tree_search(info
, bytenr
, 0);
305 * return the block group that contains teh given bytenr
307 struct btrfs_block_group_cache
*btrfs_lookup_block_group(
308 struct btrfs_fs_info
*info
,
311 struct btrfs_block_group_cache
*cache
;
313 cache
= block_group_cache_tree_search(info
, bytenr
, 1);
318 static inline void put_block_group(struct btrfs_block_group_cache
*cache
)
320 if (atomic_dec_and_test(&cache
->count
))
324 static struct btrfs_space_info
*__find_space_info(struct btrfs_fs_info
*info
,
327 struct list_head
*head
= &info
->space_info
;
328 struct btrfs_space_info
*found
;
329 list_for_each_entry(found
, head
, list
) {
330 if (found
->flags
== flags
)
336 static u64
div_factor(u64 num
, int factor
)
345 u64
btrfs_find_block_group(struct btrfs_root
*root
,
346 u64 search_start
, u64 search_hint
, int owner
)
348 struct btrfs_block_group_cache
*cache
;
350 u64 last
= max(search_hint
, search_start
);
357 cache
= btrfs_lookup_first_block_group(root
->fs_info
, last
);
361 spin_lock(&cache
->lock
);
362 last
= cache
->key
.objectid
+ cache
->key
.offset
;
363 used
= btrfs_block_group_used(&cache
->item
);
365 if ((full_search
|| !cache
->ro
) &&
366 block_group_bits(cache
, BTRFS_BLOCK_GROUP_METADATA
)) {
367 if (used
+ cache
->pinned
+ cache
->reserved
<
368 div_factor(cache
->key
.offset
, factor
)) {
369 group_start
= cache
->key
.objectid
;
370 spin_unlock(&cache
->lock
);
371 put_block_group(cache
);
375 spin_unlock(&cache
->lock
);
376 put_block_group(cache
);
384 if (!full_search
&& factor
< 10) {
394 /* simple helper to search for an existing extent at a given offset */
395 int btrfs_lookup_extent(struct btrfs_root
*root
, u64 start
, u64 len
)
398 struct btrfs_key key
;
399 struct btrfs_path
*path
;
401 path
= btrfs_alloc_path();
403 key
.objectid
= start
;
405 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
406 ret
= btrfs_search_slot(NULL
, root
->fs_info
->extent_root
, &key
, path
,
408 btrfs_free_path(path
);
413 * Back reference rules. Back refs have three main goals:
415 * 1) differentiate between all holders of references to an extent so that
416 * when a reference is dropped we can make sure it was a valid reference
417 * before freeing the extent.
419 * 2) Provide enough information to quickly find the holders of an extent
420 * if we notice a given block is corrupted or bad.
422 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
423 * maintenance. This is actually the same as #2, but with a slightly
424 * different use case.
426 * File extents can be referenced by:
428 * - multiple snapshots, subvolumes, or different generations in one subvol
429 * - different files inside a single subvolume
430 * - different offsets inside a file (bookend extents in file.c)
432 * The extent ref structure has fields for:
434 * - Objectid of the subvolume root
435 * - Generation number of the tree holding the reference
436 * - objectid of the file holding the reference
437 * - number of references holding by parent node (alway 1 for tree blocks)
439 * Btree leaf may hold multiple references to a file extent. In most cases,
440 * these references are from same file and the corresponding offsets inside
441 * the file are close together.
443 * When a file extent is allocated the fields are filled in:
444 * (root_key.objectid, trans->transid, inode objectid, 1)
446 * When a leaf is cow'd new references are added for every file extent found
447 * in the leaf. It looks similar to the create case, but trans->transid will
448 * be different when the block is cow'd.
450 * (root_key.objectid, trans->transid, inode objectid,
451 * number of references in the leaf)
453 * When a file extent is removed either during snapshot deletion or
454 * file truncation, we find the corresponding back reference and check
455 * the following fields:
457 * (btrfs_header_owner(leaf), btrfs_header_generation(leaf),
460 * Btree extents can be referenced by:
462 * - Different subvolumes
463 * - Different generations of the same subvolume
465 * When a tree block is created, back references are inserted:
467 * (root->root_key.objectid, trans->transid, level, 1)
469 * When a tree block is cow'd, new back references are added for all the
470 * blocks it points to. If the tree block isn't in reference counted root,
471 * the old back references are removed. These new back references are of
472 * the form (trans->transid will have increased since creation):
474 * (root->root_key.objectid, trans->transid, level, 1)
476 * When a backref is in deleting, the following fields are checked:
478 * if backref was for a tree root:
479 * (btrfs_header_owner(itself), btrfs_header_generation(itself), level)
481 * (btrfs_header_owner(parent), btrfs_header_generation(parent), level)
483 * Back Reference Key composing:
485 * The key objectid corresponds to the first byte in the extent, the key
486 * type is set to BTRFS_EXTENT_REF_KEY, and the key offset is the first
487 * byte of parent extent. If a extent is tree root, the key offset is set
488 * to the key objectid.
491 static noinline
int lookup_extent_backref(struct btrfs_trans_handle
*trans
,
492 struct btrfs_root
*root
,
493 struct btrfs_path
*path
,
494 u64 bytenr
, u64 parent
,
495 u64 ref_root
, u64 ref_generation
,
496 u64 owner_objectid
, int del
)
498 struct btrfs_key key
;
499 struct btrfs_extent_ref
*ref
;
500 struct extent_buffer
*leaf
;
504 key
.objectid
= bytenr
;
505 key
.type
= BTRFS_EXTENT_REF_KEY
;
508 ret
= btrfs_search_slot(trans
, root
, &key
, path
, del
? -1 : 0, 1);
516 leaf
= path
->nodes
[0];
517 ref
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_ref
);
518 ref_objectid
= btrfs_ref_objectid(leaf
, ref
);
519 if (btrfs_ref_root(leaf
, ref
) != ref_root
||
520 btrfs_ref_generation(leaf
, ref
) != ref_generation
||
521 (ref_objectid
!= owner_objectid
&&
522 ref_objectid
!= BTRFS_MULTIPLE_OBJECTIDS
)) {
533 * updates all the backrefs that are pending on update_list for the
536 static noinline
int update_backrefs(struct btrfs_trans_handle
*trans
,
537 struct btrfs_root
*extent_root
,
538 struct btrfs_path
*path
,
539 struct list_head
*update_list
)
541 struct btrfs_key key
;
542 struct btrfs_extent_ref
*ref
;
543 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
544 struct pending_extent_op
*op
;
545 struct extent_buffer
*leaf
;
547 struct list_head
*cur
= update_list
->next
;
549 u64 ref_root
= extent_root
->root_key
.objectid
;
551 op
= list_entry(cur
, struct pending_extent_op
, list
);
554 key
.objectid
= op
->bytenr
;
555 key
.type
= BTRFS_EXTENT_REF_KEY
;
556 key
.offset
= op
->orig_parent
;
558 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
, 0, 1);
561 leaf
= path
->nodes
[0];
564 ref
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_ref
);
566 ref_objectid
= btrfs_ref_objectid(leaf
, ref
);
568 if (btrfs_ref_root(leaf
, ref
) != ref_root
||
569 btrfs_ref_generation(leaf
, ref
) != op
->orig_generation
||
570 (ref_objectid
!= op
->level
&&
571 ref_objectid
!= BTRFS_MULTIPLE_OBJECTIDS
)) {
572 printk(KERN_ERR
"btrfs couldn't find %llu, parent %llu, "
573 "root %llu, owner %u\n",
574 (unsigned long long)op
->bytenr
,
575 (unsigned long long)op
->orig_parent
,
576 (unsigned long long)ref_root
, op
->level
);
577 btrfs_print_leaf(extent_root
, leaf
);
581 key
.objectid
= op
->bytenr
;
582 key
.offset
= op
->parent
;
583 key
.type
= BTRFS_EXTENT_REF_KEY
;
584 ret
= btrfs_set_item_key_safe(trans
, extent_root
, path
, &key
);
586 ref
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_ref
);
587 btrfs_set_ref_generation(leaf
, ref
, op
->generation
);
591 list_del_init(&op
->list
);
592 unlock_extent(&info
->extent_ins
, op
->bytenr
,
593 op
->bytenr
+ op
->num_bytes
- 1, GFP_NOFS
);
596 if (cur
== update_list
) {
597 btrfs_mark_buffer_dirty(path
->nodes
[0]);
598 btrfs_release_path(extent_root
, path
);
602 op
= list_entry(cur
, struct pending_extent_op
, list
);
605 while (path
->slots
[0] < btrfs_header_nritems(leaf
)) {
606 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
607 if (key
.objectid
== op
->bytenr
&&
608 key
.type
== BTRFS_EXTENT_REF_KEY
)
613 btrfs_mark_buffer_dirty(path
->nodes
[0]);
614 btrfs_release_path(extent_root
, path
);
621 static noinline
int insert_extents(struct btrfs_trans_handle
*trans
,
622 struct btrfs_root
*extent_root
,
623 struct btrfs_path
*path
,
624 struct list_head
*insert_list
, int nr
)
626 struct btrfs_key
*keys
;
628 struct pending_extent_op
*op
;
629 struct extent_buffer
*leaf
;
630 struct list_head
*cur
= insert_list
->next
;
631 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
632 u64 ref_root
= extent_root
->root_key
.objectid
;
633 int i
= 0, last
= 0, ret
;
639 keys
= kzalloc(total
* sizeof(struct btrfs_key
), GFP_NOFS
);
643 data_size
= kzalloc(total
* sizeof(u32
), GFP_NOFS
);
649 list_for_each_entry(op
, insert_list
, list
) {
650 keys
[i
].objectid
= op
->bytenr
;
651 keys
[i
].offset
= op
->num_bytes
;
652 keys
[i
].type
= BTRFS_EXTENT_ITEM_KEY
;
653 data_size
[i
] = sizeof(struct btrfs_extent_item
);
656 keys
[i
].objectid
= op
->bytenr
;
657 keys
[i
].offset
= op
->parent
;
658 keys
[i
].type
= BTRFS_EXTENT_REF_KEY
;
659 data_size
[i
] = sizeof(struct btrfs_extent_ref
);
663 op
= list_entry(cur
, struct pending_extent_op
, list
);
667 ret
= btrfs_insert_some_items(trans
, extent_root
, path
,
668 keys
+i
, data_size
+i
, total
-i
);
674 leaf
= path
->nodes
[0];
675 for (c
= 0; c
< ret
; c
++) {
676 int ref_first
= keys
[i
].type
== BTRFS_EXTENT_REF_KEY
;
679 * if the first item we inserted was a backref, then
680 * the EXTENT_ITEM will be the odd c's, else it will
683 if ((ref_first
&& (c
% 2)) ||
684 (!ref_first
&& !(c
% 2))) {
685 struct btrfs_extent_item
*itm
;
687 itm
= btrfs_item_ptr(leaf
, path
->slots
[0] + c
,
688 struct btrfs_extent_item
);
689 btrfs_set_extent_refs(path
->nodes
[0], itm
, 1);
692 struct btrfs_extent_ref
*ref
;
694 ref
= btrfs_item_ptr(leaf
, path
->slots
[0] + c
,
695 struct btrfs_extent_ref
);
696 btrfs_set_ref_root(leaf
, ref
, ref_root
);
697 btrfs_set_ref_generation(leaf
, ref
,
699 btrfs_set_ref_objectid(leaf
, ref
, op
->level
);
700 btrfs_set_ref_num_refs(leaf
, ref
, 1);
705 * using del to see when its ok to free up the
706 * pending_extent_op. In the case where we insert the
707 * last item on the list in order to help do batching
708 * we need to not free the extent op until we actually
709 * insert the extent_item
712 unlock_extent(&info
->extent_ins
, op
->bytenr
,
713 op
->bytenr
+ op
->num_bytes
- 1,
716 list_del_init(&op
->list
);
718 if (cur
!= insert_list
)
720 struct pending_extent_op
,
724 btrfs_mark_buffer_dirty(leaf
);
725 btrfs_release_path(extent_root
, path
);
728 * Ok backref's and items usually go right next to eachother,
729 * but if we could only insert 1 item that means that we
730 * inserted on the end of a leaf, and we have no idea what may
731 * be on the next leaf so we just play it safe. In order to
732 * try and help this case we insert the last thing on our
733 * insert list so hopefully it will end up being the last
734 * thing on the leaf and everything else will be before it,
735 * which will let us insert a whole bunch of items at the same
738 if (ret
== 1 && !last
&& (i
+ ret
< total
)) {
740 * last: where we will pick up the next time around
741 * i: our current key to insert, will be total - 1
742 * cur: the current op we are screwing with
747 cur
= insert_list
->prev
;
748 op
= list_entry(cur
, struct pending_extent_op
, list
);
751 * ok we successfully inserted the last item on the
752 * list, lets reset everything
754 * i: our current key to insert, so where we left off
756 * last: done with this
757 * cur: the op we are messing with
759 * total: since we inserted the last key, we need to
760 * decrement total so we dont overflow
766 cur
= insert_list
->next
;
767 op
= list_entry(cur
, struct pending_extent_op
,
782 static noinline
int insert_extent_backref(struct btrfs_trans_handle
*trans
,
783 struct btrfs_root
*root
,
784 struct btrfs_path
*path
,
785 u64 bytenr
, u64 parent
,
786 u64 ref_root
, u64 ref_generation
,
789 struct btrfs_key key
;
790 struct extent_buffer
*leaf
;
791 struct btrfs_extent_ref
*ref
;
795 key
.objectid
= bytenr
;
796 key
.type
= BTRFS_EXTENT_REF_KEY
;
799 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, sizeof(*ref
));
801 leaf
= path
->nodes
[0];
802 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
803 struct btrfs_extent_ref
);
804 btrfs_set_ref_root(leaf
, ref
, ref_root
);
805 btrfs_set_ref_generation(leaf
, ref
, ref_generation
);
806 btrfs_set_ref_objectid(leaf
, ref
, owner_objectid
);
807 btrfs_set_ref_num_refs(leaf
, ref
, 1);
808 } else if (ret
== -EEXIST
) {
810 BUG_ON(owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
);
811 leaf
= path
->nodes
[0];
812 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
813 struct btrfs_extent_ref
);
814 if (btrfs_ref_root(leaf
, ref
) != ref_root
||
815 btrfs_ref_generation(leaf
, ref
) != ref_generation
) {
821 num_refs
= btrfs_ref_num_refs(leaf
, ref
);
822 BUG_ON(num_refs
== 0);
823 btrfs_set_ref_num_refs(leaf
, ref
, num_refs
+ 1);
825 existing_owner
= btrfs_ref_objectid(leaf
, ref
);
826 if (existing_owner
!= owner_objectid
&&
827 existing_owner
!= BTRFS_MULTIPLE_OBJECTIDS
) {
828 btrfs_set_ref_objectid(leaf
, ref
,
829 BTRFS_MULTIPLE_OBJECTIDS
);
835 btrfs_mark_buffer_dirty(path
->nodes
[0]);
837 btrfs_release_path(root
, path
);
841 static noinline
int remove_extent_backref(struct btrfs_trans_handle
*trans
,
842 struct btrfs_root
*root
,
843 struct btrfs_path
*path
)
845 struct extent_buffer
*leaf
;
846 struct btrfs_extent_ref
*ref
;
850 leaf
= path
->nodes
[0];
851 ref
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_ref
);
852 num_refs
= btrfs_ref_num_refs(leaf
, ref
);
853 BUG_ON(num_refs
== 0);
856 ret
= btrfs_del_item(trans
, root
, path
);
858 btrfs_set_ref_num_refs(leaf
, ref
, num_refs
);
859 btrfs_mark_buffer_dirty(leaf
);
861 btrfs_release_path(root
, path
);
865 #ifdef BIO_RW_DISCARD
866 static void btrfs_issue_discard(struct block_device
*bdev
,
869 blkdev_issue_discard(bdev
, start
>> 9, len
>> 9, GFP_KERNEL
);
873 static int btrfs_discard_extent(struct btrfs_root
*root
, u64 bytenr
,
876 #ifdef BIO_RW_DISCARD
878 u64 map_length
= num_bytes
;
879 struct btrfs_multi_bio
*multi
= NULL
;
881 /* Tell the block device(s) that the sectors can be discarded */
882 ret
= btrfs_map_block(&root
->fs_info
->mapping_tree
, READ
,
883 bytenr
, &map_length
, &multi
, 0);
885 struct btrfs_bio_stripe
*stripe
= multi
->stripes
;
888 if (map_length
> num_bytes
)
889 map_length
= num_bytes
;
891 for (i
= 0; i
< multi
->num_stripes
; i
++, stripe
++) {
892 btrfs_issue_discard(stripe
->dev
->bdev
,
905 static noinline
int free_extents(struct btrfs_trans_handle
*trans
,
906 struct btrfs_root
*extent_root
,
907 struct list_head
*del_list
)
909 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
910 struct btrfs_path
*path
;
911 struct btrfs_key key
, found_key
;
912 struct extent_buffer
*leaf
;
913 struct list_head
*cur
;
914 struct pending_extent_op
*op
;
915 struct btrfs_extent_item
*ei
;
916 int ret
, num_to_del
, extent_slot
= 0, found_extent
= 0;
920 path
= btrfs_alloc_path();
926 /* search for the backref for the current ref we want to delete */
927 cur
= del_list
->next
;
928 op
= list_entry(cur
, struct pending_extent_op
, list
);
929 ret
= lookup_extent_backref(trans
, extent_root
, path
, op
->bytenr
,
931 extent_root
->root_key
.objectid
,
932 op
->orig_generation
, op
->level
, 1);
934 printk(KERN_ERR
"btrfs unable to find backref byte nr %llu "
935 "root %llu gen %llu owner %u\n",
936 (unsigned long long)op
->bytenr
,
937 (unsigned long long)extent_root
->root_key
.objectid
,
938 (unsigned long long)op
->orig_generation
, op
->level
);
939 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
944 extent_slot
= path
->slots
[0];
949 * if we aren't the first item on the leaf we can move back one and see
950 * if our ref is right next to our extent item
952 if (likely(extent_slot
)) {
954 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
956 if (found_key
.objectid
== op
->bytenr
&&
957 found_key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
958 found_key
.offset
== op
->num_bytes
) {
965 * if we didn't find the extent we need to delete the backref and then
966 * search for the extent item key so we can update its ref count
969 key
.objectid
= op
->bytenr
;
970 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
971 key
.offset
= op
->num_bytes
;
973 ret
= remove_extent_backref(trans
, extent_root
, path
);
975 btrfs_release_path(extent_root
, path
);
976 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
, -1, 1);
978 extent_slot
= path
->slots
[0];
981 /* this is where we update the ref count for the extent */
982 leaf
= path
->nodes
[0];
983 ei
= btrfs_item_ptr(leaf
, extent_slot
, struct btrfs_extent_item
);
984 refs
= btrfs_extent_refs(leaf
, ei
);
987 btrfs_set_extent_refs(leaf
, ei
, refs
);
989 btrfs_mark_buffer_dirty(leaf
);
992 * This extent needs deleting. The reason cur_slot is extent_slot +
993 * num_to_del is because extent_slot points to the slot where the extent
994 * is, and if the backref was not right next to the extent we will be
995 * deleting at least 1 item, and will want to start searching at the
996 * slot directly next to extent_slot. However if we did find the
997 * backref next to the extent item them we will be deleting at least 2
998 * items and will want to start searching directly after the ref slot
1001 struct list_head
*pos
, *n
, *end
;
1002 int cur_slot
= extent_slot
+num_to_del
;
1006 path
->slots
[0] = extent_slot
;
1007 bytes_freed
= op
->num_bytes
;
1009 mutex_lock(&info
->pinned_mutex
);
1010 ret
= pin_down_bytes(trans
, extent_root
, op
->bytenr
,
1011 op
->num_bytes
, op
->level
>=
1012 BTRFS_FIRST_FREE_OBJECTID
);
1013 mutex_unlock(&info
->pinned_mutex
);
1018 * we need to see if we can delete multiple things at once, so
1019 * start looping through the list of extents we are wanting to
1020 * delete and see if their extent/backref's are right next to
1021 * eachother and the extents only have 1 ref
1023 for (pos
= cur
->next
; pos
!= del_list
; pos
= pos
->next
) {
1024 struct pending_extent_op
*tmp
;
1026 tmp
= list_entry(pos
, struct pending_extent_op
, list
);
1028 /* we only want to delete extent+ref at this stage */
1029 if (cur_slot
>= btrfs_header_nritems(leaf
) - 1)
1032 btrfs_item_key_to_cpu(leaf
, &found_key
, cur_slot
);
1033 if (found_key
.objectid
!= tmp
->bytenr
||
1034 found_key
.type
!= BTRFS_EXTENT_ITEM_KEY
||
1035 found_key
.offset
!= tmp
->num_bytes
)
1038 /* check to make sure this extent only has one ref */
1039 ei
= btrfs_item_ptr(leaf
, cur_slot
,
1040 struct btrfs_extent_item
);
1041 if (btrfs_extent_refs(leaf
, ei
) != 1)
1044 btrfs_item_key_to_cpu(leaf
, &found_key
, cur_slot
+1);
1045 if (found_key
.objectid
!= tmp
->bytenr
||
1046 found_key
.type
!= BTRFS_EXTENT_REF_KEY
||
1047 found_key
.offset
!= tmp
->orig_parent
)
1051 * the ref is right next to the extent, we can set the
1052 * ref count to 0 since we will delete them both now
1054 btrfs_set_extent_refs(leaf
, ei
, 0);
1056 /* pin down the bytes for this extent */
1057 mutex_lock(&info
->pinned_mutex
);
1058 ret
= pin_down_bytes(trans
, extent_root
, tmp
->bytenr
,
1059 tmp
->num_bytes
, tmp
->level
>=
1060 BTRFS_FIRST_FREE_OBJECTID
);
1061 mutex_unlock(&info
->pinned_mutex
);
1065 * use the del field to tell if we need to go ahead and
1066 * free up the extent when we delete the item or not.
1069 bytes_freed
+= tmp
->num_bytes
;
1076 /* update the free space counters */
1077 spin_lock(&info
->delalloc_lock
);
1078 super_used
= btrfs_super_bytes_used(&info
->super_copy
);
1079 btrfs_set_super_bytes_used(&info
->super_copy
,
1080 super_used
- bytes_freed
);
1082 root_used
= btrfs_root_used(&extent_root
->root_item
);
1083 btrfs_set_root_used(&extent_root
->root_item
,
1084 root_used
- bytes_freed
);
1085 spin_unlock(&info
->delalloc_lock
);
1087 /* delete the items */
1088 ret
= btrfs_del_items(trans
, extent_root
, path
,
1089 path
->slots
[0], num_to_del
);
1093 * loop through the extents we deleted and do the cleanup work
1096 for (pos
= cur
, n
= pos
->next
; pos
!= end
;
1097 pos
= n
, n
= pos
->next
) {
1098 struct pending_extent_op
*tmp
;
1099 tmp
= list_entry(pos
, struct pending_extent_op
, list
);
1102 * remember tmp->del tells us wether or not we pinned
1105 ret
= update_block_group(trans
, extent_root
,
1106 tmp
->bytenr
, tmp
->num_bytes
, 0,
1110 list_del_init(&tmp
->list
);
1111 unlock_extent(&info
->extent_ins
, tmp
->bytenr
,
1112 tmp
->bytenr
+ tmp
->num_bytes
- 1,
1116 } else if (refs
&& found_extent
) {
1118 * the ref and extent were right next to eachother, but the
1119 * extent still has a ref, so just free the backref and keep
1122 ret
= remove_extent_backref(trans
, extent_root
, path
);
1125 list_del_init(&op
->list
);
1126 unlock_extent(&info
->extent_ins
, op
->bytenr
,
1127 op
->bytenr
+ op
->num_bytes
- 1, GFP_NOFS
);
1131 * the extent has multiple refs and the backref we were looking
1132 * for was not right next to it, so just unlock and go next,
1135 list_del_init(&op
->list
);
1136 unlock_extent(&info
->extent_ins
, op
->bytenr
,
1137 op
->bytenr
+ op
->num_bytes
- 1, GFP_NOFS
);
1141 btrfs_release_path(extent_root
, path
);
1142 if (!list_empty(del_list
))
1146 btrfs_free_path(path
);
1150 static int __btrfs_update_extent_ref(struct btrfs_trans_handle
*trans
,
1151 struct btrfs_root
*root
, u64 bytenr
,
1152 u64 orig_parent
, u64 parent
,
1153 u64 orig_root
, u64 ref_root
,
1154 u64 orig_generation
, u64 ref_generation
,
1158 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
1159 struct btrfs_path
*path
;
1161 if (root
== root
->fs_info
->extent_root
) {
1162 struct pending_extent_op
*extent_op
;
1165 BUG_ON(owner_objectid
>= BTRFS_MAX_LEVEL
);
1166 num_bytes
= btrfs_level_size(root
, (int)owner_objectid
);
1167 mutex_lock(&root
->fs_info
->extent_ins_mutex
);
1168 if (test_range_bit(&root
->fs_info
->extent_ins
, bytenr
,
1169 bytenr
+ num_bytes
- 1, EXTENT_WRITEBACK
, 0)) {
1171 ret
= get_state_private(&root
->fs_info
->extent_ins
,
1174 extent_op
= (struct pending_extent_op
*)
1175 (unsigned long)priv
;
1176 BUG_ON(extent_op
->parent
!= orig_parent
);
1177 BUG_ON(extent_op
->generation
!= orig_generation
);
1179 extent_op
->parent
= parent
;
1180 extent_op
->generation
= ref_generation
;
1182 extent_op
= kmalloc(sizeof(*extent_op
), GFP_NOFS
);
1185 extent_op
->type
= PENDING_BACKREF_UPDATE
;
1186 extent_op
->bytenr
= bytenr
;
1187 extent_op
->num_bytes
= num_bytes
;
1188 extent_op
->parent
= parent
;
1189 extent_op
->orig_parent
= orig_parent
;
1190 extent_op
->generation
= ref_generation
;
1191 extent_op
->orig_generation
= orig_generation
;
1192 extent_op
->level
= (int)owner_objectid
;
1193 INIT_LIST_HEAD(&extent_op
->list
);
1196 set_extent_bits(&root
->fs_info
->extent_ins
,
1197 bytenr
, bytenr
+ num_bytes
- 1,
1198 EXTENT_WRITEBACK
, GFP_NOFS
);
1199 set_state_private(&root
->fs_info
->extent_ins
,
1200 bytenr
, (unsigned long)extent_op
);
1202 mutex_unlock(&root
->fs_info
->extent_ins_mutex
);
1206 path
= btrfs_alloc_path();
1209 ret
= lookup_extent_backref(trans
, extent_root
, path
,
1210 bytenr
, orig_parent
, orig_root
,
1211 orig_generation
, owner_objectid
, 1);
1214 ret
= remove_extent_backref(trans
, extent_root
, path
);
1217 ret
= insert_extent_backref(trans
, extent_root
, path
, bytenr
,
1218 parent
, ref_root
, ref_generation
,
1221 finish_current_insert(trans
, extent_root
, 0);
1222 del_pending_extents(trans
, extent_root
, 0);
1224 btrfs_free_path(path
);
1228 int btrfs_update_extent_ref(struct btrfs_trans_handle
*trans
,
1229 struct btrfs_root
*root
, u64 bytenr
,
1230 u64 orig_parent
, u64 parent
,
1231 u64 ref_root
, u64 ref_generation
,
1235 if (ref_root
== BTRFS_TREE_LOG_OBJECTID
&&
1236 owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
)
1238 ret
= __btrfs_update_extent_ref(trans
, root
, bytenr
, orig_parent
,
1239 parent
, ref_root
, ref_root
,
1240 ref_generation
, ref_generation
,
1245 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle
*trans
,
1246 struct btrfs_root
*root
, u64 bytenr
,
1247 u64 orig_parent
, u64 parent
,
1248 u64 orig_root
, u64 ref_root
,
1249 u64 orig_generation
, u64 ref_generation
,
1252 struct btrfs_path
*path
;
1254 struct btrfs_key key
;
1255 struct extent_buffer
*l
;
1256 struct btrfs_extent_item
*item
;
1259 path
= btrfs_alloc_path();
1264 key
.objectid
= bytenr
;
1265 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1266 key
.offset
= (u64
)-1;
1268 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
, &key
, path
,
1272 BUG_ON(ret
== 0 || path
->slots
[0] == 0);
1277 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
1278 if (key
.objectid
!= bytenr
) {
1279 btrfs_print_leaf(root
->fs_info
->extent_root
, path
->nodes
[0]);
1280 printk(KERN_ERR
"btrfs wanted %llu found %llu\n",
1281 (unsigned long long)bytenr
,
1282 (unsigned long long)key
.objectid
);
1285 BUG_ON(key
.type
!= BTRFS_EXTENT_ITEM_KEY
);
1287 item
= btrfs_item_ptr(l
, path
->slots
[0], struct btrfs_extent_item
);
1288 refs
= btrfs_extent_refs(l
, item
);
1289 btrfs_set_extent_refs(l
, item
, refs
+ 1);
1290 btrfs_mark_buffer_dirty(path
->nodes
[0]);
1292 btrfs_release_path(root
->fs_info
->extent_root
, path
);
1295 ret
= insert_extent_backref(trans
, root
->fs_info
->extent_root
,
1296 path
, bytenr
, parent
,
1297 ref_root
, ref_generation
,
1300 finish_current_insert(trans
, root
->fs_info
->extent_root
, 0);
1301 del_pending_extents(trans
, root
->fs_info
->extent_root
, 0);
1303 btrfs_free_path(path
);
1307 int btrfs_inc_extent_ref(struct btrfs_trans_handle
*trans
,
1308 struct btrfs_root
*root
,
1309 u64 bytenr
, u64 num_bytes
, u64 parent
,
1310 u64 ref_root
, u64 ref_generation
,
1314 if (ref_root
== BTRFS_TREE_LOG_OBJECTID
&&
1315 owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
)
1317 ret
= __btrfs_inc_extent_ref(trans
, root
, bytenr
, 0, parent
,
1318 0, ref_root
, 0, ref_generation
,
1323 int btrfs_extent_post_op(struct btrfs_trans_handle
*trans
,
1324 struct btrfs_root
*root
)
1331 finish_current_insert(trans
, root
->fs_info
->extent_root
, 1);
1332 del_pending_extents(trans
, root
->fs_info
->extent_root
, 1);
1334 /* is there more work to do? */
1335 ret
= find_first_extent_bit(&root
->fs_info
->pending_del
,
1336 0, &start
, &end
, EXTENT_WRITEBACK
);
1339 ret
= find_first_extent_bit(&root
->fs_info
->extent_ins
,
1340 0, &start
, &end
, EXTENT_WRITEBACK
);
1348 int btrfs_lookup_extent_ref(struct btrfs_trans_handle
*trans
,
1349 struct btrfs_root
*root
, u64 bytenr
,
1350 u64 num_bytes
, u32
*refs
)
1352 struct btrfs_path
*path
;
1354 struct btrfs_key key
;
1355 struct extent_buffer
*l
;
1356 struct btrfs_extent_item
*item
;
1358 WARN_ON(num_bytes
< root
->sectorsize
);
1359 path
= btrfs_alloc_path();
1361 key
.objectid
= bytenr
;
1362 key
.offset
= num_bytes
;
1363 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
1364 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
, &key
, path
,
1369 btrfs_print_leaf(root
, path
->nodes
[0]);
1370 printk(KERN_INFO
"btrfs failed to find block number %llu\n",
1371 (unsigned long long)bytenr
);
1375 item
= btrfs_item_ptr(l
, path
->slots
[0], struct btrfs_extent_item
);
1376 *refs
= btrfs_extent_refs(l
, item
);
1378 btrfs_free_path(path
);
1382 int btrfs_cross_ref_exist(struct btrfs_trans_handle
*trans
,
1383 struct btrfs_root
*root
, u64 objectid
, u64 bytenr
)
1385 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
1386 struct btrfs_path
*path
;
1387 struct extent_buffer
*leaf
;
1388 struct btrfs_extent_ref
*ref_item
;
1389 struct btrfs_key key
;
1390 struct btrfs_key found_key
;
1396 key
.objectid
= bytenr
;
1397 key
.offset
= (u64
)-1;
1398 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1400 path
= btrfs_alloc_path();
1401 ret
= btrfs_search_slot(NULL
, extent_root
, &key
, path
, 0, 0);
1407 if (path
->slots
[0] == 0)
1411 leaf
= path
->nodes
[0];
1412 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
1414 if (found_key
.objectid
!= bytenr
||
1415 found_key
.type
!= BTRFS_EXTENT_ITEM_KEY
)
1418 last_snapshot
= btrfs_root_last_snapshot(&root
->root_item
);
1420 leaf
= path
->nodes
[0];
1421 nritems
= btrfs_header_nritems(leaf
);
1422 if (path
->slots
[0] >= nritems
) {
1423 ret
= btrfs_next_leaf(extent_root
, path
);
1430 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
1431 if (found_key
.objectid
!= bytenr
)
1434 if (found_key
.type
!= BTRFS_EXTENT_REF_KEY
) {
1439 ref_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
1440 struct btrfs_extent_ref
);
1441 ref_root
= btrfs_ref_root(leaf
, ref_item
);
1442 if ((ref_root
!= root
->root_key
.objectid
&&
1443 ref_root
!= BTRFS_TREE_LOG_OBJECTID
) ||
1444 objectid
!= btrfs_ref_objectid(leaf
, ref_item
)) {
1448 if (btrfs_ref_generation(leaf
, ref_item
) <= last_snapshot
) {
1457 btrfs_free_path(path
);
1461 int btrfs_cache_ref(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
1462 struct extent_buffer
*buf
, u32 nr_extents
)
1464 struct btrfs_key key
;
1465 struct btrfs_file_extent_item
*fi
;
1473 if (!root
->ref_cows
)
1476 if (root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
) {
1478 root_gen
= root
->root_key
.offset
;
1481 root_gen
= trans
->transid
- 1;
1484 level
= btrfs_header_level(buf
);
1485 nritems
= btrfs_header_nritems(buf
);
1488 struct btrfs_leaf_ref
*ref
;
1489 struct btrfs_extent_info
*info
;
1491 ref
= btrfs_alloc_leaf_ref(root
, nr_extents
);
1497 ref
->root_gen
= root_gen
;
1498 ref
->bytenr
= buf
->start
;
1499 ref
->owner
= btrfs_header_owner(buf
);
1500 ref
->generation
= btrfs_header_generation(buf
);
1501 ref
->nritems
= nr_extents
;
1502 info
= ref
->extents
;
1504 for (i
= 0; nr_extents
> 0 && i
< nritems
; i
++) {
1506 btrfs_item_key_to_cpu(buf
, &key
, i
);
1507 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
1509 fi
= btrfs_item_ptr(buf
, i
,
1510 struct btrfs_file_extent_item
);
1511 if (btrfs_file_extent_type(buf
, fi
) ==
1512 BTRFS_FILE_EXTENT_INLINE
)
1514 disk_bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
1515 if (disk_bytenr
== 0)
1518 info
->bytenr
= disk_bytenr
;
1520 btrfs_file_extent_disk_num_bytes(buf
, fi
);
1521 info
->objectid
= key
.objectid
;
1522 info
->offset
= key
.offset
;
1526 ret
= btrfs_add_leaf_ref(root
, ref
, shared
);
1527 if (ret
== -EEXIST
&& shared
) {
1528 struct btrfs_leaf_ref
*old
;
1529 old
= btrfs_lookup_leaf_ref(root
, ref
->bytenr
);
1531 btrfs_remove_leaf_ref(root
, old
);
1532 btrfs_free_leaf_ref(root
, old
);
1533 ret
= btrfs_add_leaf_ref(root
, ref
, shared
);
1536 btrfs_free_leaf_ref(root
, ref
);
1542 /* when a block goes through cow, we update the reference counts of
1543 * everything that block points to. The internal pointers of the block
1544 * can be in just about any order, and it is likely to have clusters of
1545 * things that are close together and clusters of things that are not.
1547 * To help reduce the seeks that come with updating all of these reference
1548 * counts, sort them by byte number before actual updates are done.
1550 * struct refsort is used to match byte number to slot in the btree block.
1551 * we sort based on the byte number and then use the slot to actually
1554 * struct refsort is smaller than strcut btrfs_item and smaller than
1555 * struct btrfs_key_ptr. Since we're currently limited to the page size
1556 * for a btree block, there's no way for a kmalloc of refsorts for a
1557 * single node to be bigger than a page.
1565 * for passing into sort()
1567 static int refsort_cmp(const void *a_void
, const void *b_void
)
1569 const struct refsort
*a
= a_void
;
1570 const struct refsort
*b
= b_void
;
1572 if (a
->bytenr
< b
->bytenr
)
1574 if (a
->bytenr
> b
->bytenr
)
1580 noinline
int btrfs_inc_ref(struct btrfs_trans_handle
*trans
,
1581 struct btrfs_root
*root
,
1582 struct extent_buffer
*orig_buf
,
1583 struct extent_buffer
*buf
, u32
*nr_extents
)
1589 u64 orig_generation
;
1590 struct refsort
*sorted
;
1592 u32 nr_file_extents
= 0;
1593 struct btrfs_key key
;
1594 struct btrfs_file_extent_item
*fi
;
1601 int (*process_func
)(struct btrfs_trans_handle
*, struct btrfs_root
*,
1602 u64
, u64
, u64
, u64
, u64
, u64
, u64
, u64
);
1604 ref_root
= btrfs_header_owner(buf
);
1605 ref_generation
= btrfs_header_generation(buf
);
1606 orig_root
= btrfs_header_owner(orig_buf
);
1607 orig_generation
= btrfs_header_generation(orig_buf
);
1609 nritems
= btrfs_header_nritems(buf
);
1610 level
= btrfs_header_level(buf
);
1612 sorted
= kmalloc(sizeof(struct refsort
) * nritems
, GFP_NOFS
);
1615 if (root
->ref_cows
) {
1616 process_func
= __btrfs_inc_extent_ref
;
1619 root
->root_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
)
1622 root
->root_key
.objectid
== BTRFS_TREE_LOG_OBJECTID
)
1624 process_func
= __btrfs_update_extent_ref
;
1628 * we make two passes through the items. In the first pass we
1629 * only record the byte number and slot. Then we sort based on
1630 * byte number and do the actual work based on the sorted results
1632 for (i
= 0; i
< nritems
; i
++) {
1635 btrfs_item_key_to_cpu(buf
, &key
, i
);
1636 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
1638 fi
= btrfs_item_ptr(buf
, i
,
1639 struct btrfs_file_extent_item
);
1640 if (btrfs_file_extent_type(buf
, fi
) ==
1641 BTRFS_FILE_EXTENT_INLINE
)
1643 bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
1648 sorted
[refi
].bytenr
= bytenr
;
1649 sorted
[refi
].slot
= i
;
1652 bytenr
= btrfs_node_blockptr(buf
, i
);
1653 sorted
[refi
].bytenr
= bytenr
;
1654 sorted
[refi
].slot
= i
;
1659 * if refi == 0, we didn't actually put anything into the sorted
1660 * array and we're done
1665 sort(sorted
, refi
, sizeof(struct refsort
), refsort_cmp
, NULL
);
1667 for (i
= 0; i
< refi
; i
++) {
1669 slot
= sorted
[i
].slot
;
1670 bytenr
= sorted
[i
].bytenr
;
1673 btrfs_item_key_to_cpu(buf
, &key
, slot
);
1675 ret
= process_func(trans
, root
, bytenr
,
1676 orig_buf
->start
, buf
->start
,
1677 orig_root
, ref_root
,
1678 orig_generation
, ref_generation
,
1687 ret
= process_func(trans
, root
, bytenr
,
1688 orig_buf
->start
, buf
->start
,
1689 orig_root
, ref_root
,
1690 orig_generation
, ref_generation
,
1703 *nr_extents
= nr_file_extents
;
1705 *nr_extents
= nritems
;
1714 int btrfs_update_ref(struct btrfs_trans_handle
*trans
,
1715 struct btrfs_root
*root
, struct extent_buffer
*orig_buf
,
1716 struct extent_buffer
*buf
, int start_slot
, int nr
)
1723 u64 orig_generation
;
1724 struct btrfs_key key
;
1725 struct btrfs_file_extent_item
*fi
;
1731 BUG_ON(start_slot
< 0);
1732 BUG_ON(start_slot
+ nr
> btrfs_header_nritems(buf
));
1734 ref_root
= btrfs_header_owner(buf
);
1735 ref_generation
= btrfs_header_generation(buf
);
1736 orig_root
= btrfs_header_owner(orig_buf
);
1737 orig_generation
= btrfs_header_generation(orig_buf
);
1738 level
= btrfs_header_level(buf
);
1740 if (!root
->ref_cows
) {
1742 root
->root_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
)
1745 root
->root_key
.objectid
== BTRFS_TREE_LOG_OBJECTID
)
1749 for (i
= 0, slot
= start_slot
; i
< nr
; i
++, slot
++) {
1752 btrfs_item_key_to_cpu(buf
, &key
, slot
);
1753 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
1755 fi
= btrfs_item_ptr(buf
, slot
,
1756 struct btrfs_file_extent_item
);
1757 if (btrfs_file_extent_type(buf
, fi
) ==
1758 BTRFS_FILE_EXTENT_INLINE
)
1760 bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
1763 ret
= __btrfs_update_extent_ref(trans
, root
, bytenr
,
1764 orig_buf
->start
, buf
->start
,
1765 orig_root
, ref_root
,
1766 orig_generation
, ref_generation
,
1771 bytenr
= btrfs_node_blockptr(buf
, slot
);
1772 ret
= __btrfs_update_extent_ref(trans
, root
, bytenr
,
1773 orig_buf
->start
, buf
->start
,
1774 orig_root
, ref_root
,
1775 orig_generation
, ref_generation
,
1787 static int write_one_cache_group(struct btrfs_trans_handle
*trans
,
1788 struct btrfs_root
*root
,
1789 struct btrfs_path
*path
,
1790 struct btrfs_block_group_cache
*cache
)
1794 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
1796 struct extent_buffer
*leaf
;
1798 ret
= btrfs_search_slot(trans
, extent_root
, &cache
->key
, path
, 0, 1);
1803 leaf
= path
->nodes
[0];
1804 bi
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1805 write_extent_buffer(leaf
, &cache
->item
, bi
, sizeof(cache
->item
));
1806 btrfs_mark_buffer_dirty(leaf
);
1807 btrfs_release_path(extent_root
, path
);
1809 finish_current_insert(trans
, extent_root
, 0);
1810 pending_ret
= del_pending_extents(trans
, extent_root
, 0);
1819 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle
*trans
,
1820 struct btrfs_root
*root
)
1822 struct btrfs_block_group_cache
*cache
, *entry
;
1826 struct btrfs_path
*path
;
1829 path
= btrfs_alloc_path();
1835 spin_lock(&root
->fs_info
->block_group_cache_lock
);
1836 for (n
= rb_first(&root
->fs_info
->block_group_cache_tree
);
1837 n
; n
= rb_next(n
)) {
1838 entry
= rb_entry(n
, struct btrfs_block_group_cache
,
1845 spin_unlock(&root
->fs_info
->block_group_cache_lock
);
1851 last
+= cache
->key
.offset
;
1853 err
= write_one_cache_group(trans
, root
,
1856 * if we fail to write the cache group, we want
1857 * to keep it marked dirty in hopes that a later
1865 btrfs_free_path(path
);
1869 int btrfs_extent_readonly(struct btrfs_root
*root
, u64 bytenr
)
1871 struct btrfs_block_group_cache
*block_group
;
1874 block_group
= btrfs_lookup_block_group(root
->fs_info
, bytenr
);
1875 if (!block_group
|| block_group
->ro
)
1878 put_block_group(block_group
);
1882 static int update_space_info(struct btrfs_fs_info
*info
, u64 flags
,
1883 u64 total_bytes
, u64 bytes_used
,
1884 struct btrfs_space_info
**space_info
)
1886 struct btrfs_space_info
*found
;
1888 found
= __find_space_info(info
, flags
);
1890 spin_lock(&found
->lock
);
1891 found
->total_bytes
+= total_bytes
;
1892 found
->bytes_used
+= bytes_used
;
1894 spin_unlock(&found
->lock
);
1895 *space_info
= found
;
1898 found
= kzalloc(sizeof(*found
), GFP_NOFS
);
1902 list_add(&found
->list
, &info
->space_info
);
1903 INIT_LIST_HEAD(&found
->block_groups
);
1904 init_rwsem(&found
->groups_sem
);
1905 spin_lock_init(&found
->lock
);
1906 found
->flags
= flags
;
1907 found
->total_bytes
= total_bytes
;
1908 found
->bytes_used
= bytes_used
;
1909 found
->bytes_pinned
= 0;
1910 found
->bytes_reserved
= 0;
1911 found
->bytes_readonly
= 0;
1913 found
->force_alloc
= 0;
1914 *space_info
= found
;
1918 static void set_avail_alloc_bits(struct btrfs_fs_info
*fs_info
, u64 flags
)
1920 u64 extra_flags
= flags
& (BTRFS_BLOCK_GROUP_RAID0
|
1921 BTRFS_BLOCK_GROUP_RAID1
|
1922 BTRFS_BLOCK_GROUP_RAID10
|
1923 BTRFS_BLOCK_GROUP_DUP
);
1925 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
1926 fs_info
->avail_data_alloc_bits
|= extra_flags
;
1927 if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
1928 fs_info
->avail_metadata_alloc_bits
|= extra_flags
;
1929 if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
1930 fs_info
->avail_system_alloc_bits
|= extra_flags
;
1934 static void set_block_group_readonly(struct btrfs_block_group_cache
*cache
)
1936 spin_lock(&cache
->space_info
->lock
);
1937 spin_lock(&cache
->lock
);
1939 cache
->space_info
->bytes_readonly
+= cache
->key
.offset
-
1940 btrfs_block_group_used(&cache
->item
);
1943 spin_unlock(&cache
->lock
);
1944 spin_unlock(&cache
->space_info
->lock
);
1947 u64
btrfs_reduce_alloc_profile(struct btrfs_root
*root
, u64 flags
)
1949 u64 num_devices
= root
->fs_info
->fs_devices
->rw_devices
;
1951 if (num_devices
== 1)
1952 flags
&= ~(BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_RAID0
);
1953 if (num_devices
< 4)
1954 flags
&= ~BTRFS_BLOCK_GROUP_RAID10
;
1956 if ((flags
& BTRFS_BLOCK_GROUP_DUP
) &&
1957 (flags
& (BTRFS_BLOCK_GROUP_RAID1
|
1958 BTRFS_BLOCK_GROUP_RAID10
))) {
1959 flags
&= ~BTRFS_BLOCK_GROUP_DUP
;
1962 if ((flags
& BTRFS_BLOCK_GROUP_RAID1
) &&
1963 (flags
& BTRFS_BLOCK_GROUP_RAID10
)) {
1964 flags
&= ~BTRFS_BLOCK_GROUP_RAID1
;
1967 if ((flags
& BTRFS_BLOCK_GROUP_RAID0
) &&
1968 ((flags
& BTRFS_BLOCK_GROUP_RAID1
) |
1969 (flags
& BTRFS_BLOCK_GROUP_RAID10
) |
1970 (flags
& BTRFS_BLOCK_GROUP_DUP
)))
1971 flags
&= ~BTRFS_BLOCK_GROUP_RAID0
;
1975 static int do_chunk_alloc(struct btrfs_trans_handle
*trans
,
1976 struct btrfs_root
*extent_root
, u64 alloc_bytes
,
1977 u64 flags
, int force
)
1979 struct btrfs_space_info
*space_info
;
1983 mutex_lock(&extent_root
->fs_info
->chunk_mutex
);
1985 flags
= btrfs_reduce_alloc_profile(extent_root
, flags
);
1987 space_info
= __find_space_info(extent_root
->fs_info
, flags
);
1989 ret
= update_space_info(extent_root
->fs_info
, flags
,
1993 BUG_ON(!space_info
);
1995 spin_lock(&space_info
->lock
);
1996 if (space_info
->force_alloc
) {
1998 space_info
->force_alloc
= 0;
2000 if (space_info
->full
) {
2001 spin_unlock(&space_info
->lock
);
2005 thresh
= space_info
->total_bytes
- space_info
->bytes_readonly
;
2006 thresh
= div_factor(thresh
, 6);
2008 (space_info
->bytes_used
+ space_info
->bytes_pinned
+
2009 space_info
->bytes_reserved
+ alloc_bytes
) < thresh
) {
2010 spin_unlock(&space_info
->lock
);
2013 spin_unlock(&space_info
->lock
);
2015 ret
= btrfs_alloc_chunk(trans
, extent_root
, flags
);
2017 space_info
->full
= 1;
2019 mutex_unlock(&extent_root
->fs_info
->chunk_mutex
);
2023 static int update_block_group(struct btrfs_trans_handle
*trans
,
2024 struct btrfs_root
*root
,
2025 u64 bytenr
, u64 num_bytes
, int alloc
,
2028 struct btrfs_block_group_cache
*cache
;
2029 struct btrfs_fs_info
*info
= root
->fs_info
;
2030 u64 total
= num_bytes
;
2035 cache
= btrfs_lookup_block_group(info
, bytenr
);
2038 byte_in_group
= bytenr
- cache
->key
.objectid
;
2039 WARN_ON(byte_in_group
> cache
->key
.offset
);
2041 spin_lock(&cache
->space_info
->lock
);
2042 spin_lock(&cache
->lock
);
2044 old_val
= btrfs_block_group_used(&cache
->item
);
2045 num_bytes
= min(total
, cache
->key
.offset
- byte_in_group
);
2047 old_val
+= num_bytes
;
2048 cache
->space_info
->bytes_used
+= num_bytes
;
2050 cache
->space_info
->bytes_readonly
-= num_bytes
;
2051 btrfs_set_block_group_used(&cache
->item
, old_val
);
2052 spin_unlock(&cache
->lock
);
2053 spin_unlock(&cache
->space_info
->lock
);
2055 old_val
-= num_bytes
;
2056 cache
->space_info
->bytes_used
-= num_bytes
;
2058 cache
->space_info
->bytes_readonly
+= num_bytes
;
2059 btrfs_set_block_group_used(&cache
->item
, old_val
);
2060 spin_unlock(&cache
->lock
);
2061 spin_unlock(&cache
->space_info
->lock
);
2065 ret
= btrfs_discard_extent(root
, bytenr
,
2069 ret
= btrfs_add_free_space(cache
, bytenr
,
2074 put_block_group(cache
);
2076 bytenr
+= num_bytes
;
2081 static u64
first_logical_byte(struct btrfs_root
*root
, u64 search_start
)
2083 struct btrfs_block_group_cache
*cache
;
2086 cache
= btrfs_lookup_first_block_group(root
->fs_info
, search_start
);
2090 bytenr
= cache
->key
.objectid
;
2091 put_block_group(cache
);
2096 int btrfs_update_pinned_extents(struct btrfs_root
*root
,
2097 u64 bytenr
, u64 num
, int pin
)
2100 struct btrfs_block_group_cache
*cache
;
2101 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2103 WARN_ON(!mutex_is_locked(&root
->fs_info
->pinned_mutex
));
2105 set_extent_dirty(&fs_info
->pinned_extents
,
2106 bytenr
, bytenr
+ num
- 1, GFP_NOFS
);
2108 clear_extent_dirty(&fs_info
->pinned_extents
,
2109 bytenr
, bytenr
+ num
- 1, GFP_NOFS
);
2112 cache
= btrfs_lookup_block_group(fs_info
, bytenr
);
2114 len
= min(num
, cache
->key
.offset
-
2115 (bytenr
- cache
->key
.objectid
));
2117 spin_lock(&cache
->space_info
->lock
);
2118 spin_lock(&cache
->lock
);
2119 cache
->pinned
+= len
;
2120 cache
->space_info
->bytes_pinned
+= len
;
2121 spin_unlock(&cache
->lock
);
2122 spin_unlock(&cache
->space_info
->lock
);
2123 fs_info
->total_pinned
+= len
;
2125 spin_lock(&cache
->space_info
->lock
);
2126 spin_lock(&cache
->lock
);
2127 cache
->pinned
-= len
;
2128 cache
->space_info
->bytes_pinned
-= len
;
2129 spin_unlock(&cache
->lock
);
2130 spin_unlock(&cache
->space_info
->lock
);
2131 fs_info
->total_pinned
-= len
;
2133 btrfs_add_free_space(cache
, bytenr
, len
);
2135 put_block_group(cache
);
2142 static int update_reserved_extents(struct btrfs_root
*root
,
2143 u64 bytenr
, u64 num
, int reserve
)
2146 struct btrfs_block_group_cache
*cache
;
2147 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2150 cache
= btrfs_lookup_block_group(fs_info
, bytenr
);
2152 len
= min(num
, cache
->key
.offset
-
2153 (bytenr
- cache
->key
.objectid
));
2155 spin_lock(&cache
->space_info
->lock
);
2156 spin_lock(&cache
->lock
);
2158 cache
->reserved
+= len
;
2159 cache
->space_info
->bytes_reserved
+= len
;
2161 cache
->reserved
-= len
;
2162 cache
->space_info
->bytes_reserved
-= len
;
2164 spin_unlock(&cache
->lock
);
2165 spin_unlock(&cache
->space_info
->lock
);
2166 put_block_group(cache
);
2173 int btrfs_copy_pinned(struct btrfs_root
*root
, struct extent_io_tree
*copy
)
2178 struct extent_io_tree
*pinned_extents
= &root
->fs_info
->pinned_extents
;
2181 mutex_lock(&root
->fs_info
->pinned_mutex
);
2183 ret
= find_first_extent_bit(pinned_extents
, last
,
2184 &start
, &end
, EXTENT_DIRTY
);
2187 set_extent_dirty(copy
, start
, end
, GFP_NOFS
);
2190 mutex_unlock(&root
->fs_info
->pinned_mutex
);
2194 int btrfs_finish_extent_commit(struct btrfs_trans_handle
*trans
,
2195 struct btrfs_root
*root
,
2196 struct extent_io_tree
*unpin
)
2202 mutex_lock(&root
->fs_info
->pinned_mutex
);
2204 ret
= find_first_extent_bit(unpin
, 0, &start
, &end
,
2209 ret
= btrfs_discard_extent(root
, start
, end
+ 1 - start
);
2211 btrfs_update_pinned_extents(root
, start
, end
+ 1 - start
, 0);
2212 clear_extent_dirty(unpin
, start
, end
, GFP_NOFS
);
2214 if (need_resched()) {
2215 mutex_unlock(&root
->fs_info
->pinned_mutex
);
2217 mutex_lock(&root
->fs_info
->pinned_mutex
);
2220 mutex_unlock(&root
->fs_info
->pinned_mutex
);
2224 static int finish_current_insert(struct btrfs_trans_handle
*trans
,
2225 struct btrfs_root
*extent_root
, int all
)
2231 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
2232 struct btrfs_path
*path
;
2233 struct pending_extent_op
*extent_op
, *tmp
;
2234 struct list_head insert_list
, update_list
;
2236 int num_inserts
= 0, max_inserts
, restart
= 0;
2238 path
= btrfs_alloc_path();
2239 INIT_LIST_HEAD(&insert_list
);
2240 INIT_LIST_HEAD(&update_list
);
2242 max_inserts
= extent_root
->leafsize
/
2243 (2 * sizeof(struct btrfs_key
) + 2 * sizeof(struct btrfs_item
) +
2244 sizeof(struct btrfs_extent_ref
) +
2245 sizeof(struct btrfs_extent_item
));
2247 mutex_lock(&info
->extent_ins_mutex
);
2249 ret
= find_first_extent_bit(&info
->extent_ins
, search
, &start
,
2250 &end
, EXTENT_WRITEBACK
);
2252 if (restart
&& !num_inserts
&&
2253 list_empty(&update_list
)) {
2261 ret
= try_lock_extent(&info
->extent_ins
, start
, end
, GFP_NOFS
);
2266 if (need_resched()) {
2267 mutex_unlock(&info
->extent_ins_mutex
);
2269 mutex_lock(&info
->extent_ins_mutex
);
2274 ret
= get_state_private(&info
->extent_ins
, start
, &priv
);
2276 extent_op
= (struct pending_extent_op
*)(unsigned long) priv
;
2278 if (extent_op
->type
== PENDING_EXTENT_INSERT
) {
2280 list_add_tail(&extent_op
->list
, &insert_list
);
2282 if (num_inserts
== max_inserts
) {
2286 } else if (extent_op
->type
== PENDING_BACKREF_UPDATE
) {
2287 list_add_tail(&extent_op
->list
, &update_list
);
2295 * process the update list, clear the writeback bit for it, and if
2296 * somebody marked this thing for deletion then just unlock it and be
2297 * done, the free_extents will handle it
2299 list_for_each_entry_safe(extent_op
, tmp
, &update_list
, list
) {
2300 clear_extent_bits(&info
->extent_ins
, extent_op
->bytenr
,
2301 extent_op
->bytenr
+ extent_op
->num_bytes
- 1,
2302 EXTENT_WRITEBACK
, GFP_NOFS
);
2303 if (extent_op
->del
) {
2304 list_del_init(&extent_op
->list
);
2305 unlock_extent(&info
->extent_ins
, extent_op
->bytenr
,
2306 extent_op
->bytenr
+ extent_op
->num_bytes
2311 mutex_unlock(&info
->extent_ins_mutex
);
2314 * still have things left on the update list, go ahead an update
2317 if (!list_empty(&update_list
)) {
2318 ret
= update_backrefs(trans
, extent_root
, path
, &update_list
);
2321 /* we may have COW'ed new blocks, so lets start over */
2327 * if no inserts need to be done, but we skipped some extents and we
2328 * need to make sure everything is cleaned then reset everything and
2329 * go back to the beginning
2331 if (!num_inserts
&& restart
) {
2334 INIT_LIST_HEAD(&update_list
);
2335 INIT_LIST_HEAD(&insert_list
);
2337 } else if (!num_inserts
) {
2342 * process the insert extents list. Again if we are deleting this
2343 * extent, then just unlock it, pin down the bytes if need be, and be
2344 * done with it. Saves us from having to actually insert the extent
2345 * into the tree and then subsequently come along and delete it
2347 mutex_lock(&info
->extent_ins_mutex
);
2348 list_for_each_entry_safe(extent_op
, tmp
, &insert_list
, list
) {
2349 clear_extent_bits(&info
->extent_ins
, extent_op
->bytenr
,
2350 extent_op
->bytenr
+ extent_op
->num_bytes
- 1,
2351 EXTENT_WRITEBACK
, GFP_NOFS
);
2352 if (extent_op
->del
) {
2354 list_del_init(&extent_op
->list
);
2355 unlock_extent(&info
->extent_ins
, extent_op
->bytenr
,
2356 extent_op
->bytenr
+ extent_op
->num_bytes
2359 mutex_lock(&extent_root
->fs_info
->pinned_mutex
);
2360 ret
= pin_down_bytes(trans
, extent_root
,
2362 extent_op
->num_bytes
, 0);
2363 mutex_unlock(&extent_root
->fs_info
->pinned_mutex
);
2365 spin_lock(&info
->delalloc_lock
);
2366 used
= btrfs_super_bytes_used(&info
->super_copy
);
2367 btrfs_set_super_bytes_used(&info
->super_copy
,
2368 used
- extent_op
->num_bytes
);
2369 used
= btrfs_root_used(&extent_root
->root_item
);
2370 btrfs_set_root_used(&extent_root
->root_item
,
2371 used
- extent_op
->num_bytes
);
2372 spin_unlock(&info
->delalloc_lock
);
2374 ret
= update_block_group(trans
, extent_root
,
2376 extent_op
->num_bytes
,
2383 mutex_unlock(&info
->extent_ins_mutex
);
2385 ret
= insert_extents(trans
, extent_root
, path
, &insert_list
,
2390 * if restart is set for whatever reason we need to go back and start
2391 * searching through the pending list again.
2393 * We just inserted some extents, which could have resulted in new
2394 * blocks being allocated, which would result in new blocks needing
2395 * updates, so if all is set we _must_ restart to get the updated
2398 if (restart
|| all
) {
2399 INIT_LIST_HEAD(&insert_list
);
2400 INIT_LIST_HEAD(&update_list
);
2407 btrfs_free_path(path
);
2411 static int pin_down_bytes(struct btrfs_trans_handle
*trans
,
2412 struct btrfs_root
*root
,
2413 u64 bytenr
, u64 num_bytes
, int is_data
)
2416 struct extent_buffer
*buf
;
2421 buf
= btrfs_find_tree_block(root
, bytenr
, num_bytes
);
2425 /* we can reuse a block if it hasn't been written
2426 * and it is from this transaction. We can't
2427 * reuse anything from the tree log root because
2428 * it has tiny sub-transactions.
2430 if (btrfs_buffer_uptodate(buf
, 0) &&
2431 btrfs_try_tree_lock(buf
)) {
2432 u64 header_owner
= btrfs_header_owner(buf
);
2433 u64 header_transid
= btrfs_header_generation(buf
);
2434 if (header_owner
!= BTRFS_TREE_LOG_OBJECTID
&&
2435 header_owner
!= BTRFS_TREE_RELOC_OBJECTID
&&
2436 header_transid
== trans
->transid
&&
2437 !btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
)) {
2438 clean_tree_block(NULL
, root
, buf
);
2439 btrfs_tree_unlock(buf
);
2440 free_extent_buffer(buf
);
2443 btrfs_tree_unlock(buf
);
2445 free_extent_buffer(buf
);
2447 btrfs_update_pinned_extents(root
, bytenr
, num_bytes
, 1);
2454 * remove an extent from the root, returns 0 on success
2456 static int __free_extent(struct btrfs_trans_handle
*trans
,
2457 struct btrfs_root
*root
,
2458 u64 bytenr
, u64 num_bytes
, u64 parent
,
2459 u64 root_objectid
, u64 ref_generation
,
2460 u64 owner_objectid
, int pin
, int mark_free
)
2462 struct btrfs_path
*path
;
2463 struct btrfs_key key
;
2464 struct btrfs_fs_info
*info
= root
->fs_info
;
2465 struct btrfs_root
*extent_root
= info
->extent_root
;
2466 struct extent_buffer
*leaf
;
2468 int extent_slot
= 0;
2469 int found_extent
= 0;
2471 struct btrfs_extent_item
*ei
;
2474 key
.objectid
= bytenr
;
2475 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
2476 key
.offset
= num_bytes
;
2477 path
= btrfs_alloc_path();
2482 ret
= lookup_extent_backref(trans
, extent_root
, path
,
2483 bytenr
, parent
, root_objectid
,
2484 ref_generation
, owner_objectid
, 1);
2486 struct btrfs_key found_key
;
2487 extent_slot
= path
->slots
[0];
2488 while (extent_slot
> 0) {
2490 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
2492 if (found_key
.objectid
!= bytenr
)
2494 if (found_key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
2495 found_key
.offset
== num_bytes
) {
2499 if (path
->slots
[0] - extent_slot
> 5)
2502 if (!found_extent
) {
2503 ret
= remove_extent_backref(trans
, extent_root
, path
);
2505 btrfs_release_path(extent_root
, path
);
2506 ret
= btrfs_search_slot(trans
, extent_root
,
2509 printk(KERN_ERR
"umm, got %d back from search"
2510 ", was looking for %llu\n", ret
,
2511 (unsigned long long)bytenr
);
2512 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
2515 extent_slot
= path
->slots
[0];
2518 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
2520 printk(KERN_ERR
"btrfs unable to find ref byte nr %llu "
2521 "root %llu gen %llu owner %llu\n",
2522 (unsigned long long)bytenr
,
2523 (unsigned long long)root_objectid
,
2524 (unsigned long long)ref_generation
,
2525 (unsigned long long)owner_objectid
);
2528 leaf
= path
->nodes
[0];
2529 ei
= btrfs_item_ptr(leaf
, extent_slot
,
2530 struct btrfs_extent_item
);
2531 refs
= btrfs_extent_refs(leaf
, ei
);
2534 btrfs_set_extent_refs(leaf
, ei
, refs
);
2536 btrfs_mark_buffer_dirty(leaf
);
2538 if (refs
== 0 && found_extent
&& path
->slots
[0] == extent_slot
+ 1) {
2539 struct btrfs_extent_ref
*ref
;
2540 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
2541 struct btrfs_extent_ref
);
2542 BUG_ON(btrfs_ref_num_refs(leaf
, ref
) != 1);
2543 /* if the back ref and the extent are next to each other
2544 * they get deleted below in one shot
2546 path
->slots
[0] = extent_slot
;
2548 } else if (found_extent
) {
2549 /* otherwise delete the extent back ref */
2550 ret
= remove_extent_backref(trans
, extent_root
, path
);
2552 /* if refs are 0, we need to setup the path for deletion */
2554 btrfs_release_path(extent_root
, path
);
2555 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
,
2566 mutex_lock(&root
->fs_info
->pinned_mutex
);
2567 ret
= pin_down_bytes(trans
, root
, bytenr
, num_bytes
,
2568 owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
);
2569 mutex_unlock(&root
->fs_info
->pinned_mutex
);
2574 /* block accounting for super block */
2575 spin_lock(&info
->delalloc_lock
);
2576 super_used
= btrfs_super_bytes_used(&info
->super_copy
);
2577 btrfs_set_super_bytes_used(&info
->super_copy
,
2578 super_used
- num_bytes
);
2580 /* block accounting for root item */
2581 root_used
= btrfs_root_used(&root
->root_item
);
2582 btrfs_set_root_used(&root
->root_item
,
2583 root_used
- num_bytes
);
2584 spin_unlock(&info
->delalloc_lock
);
2585 ret
= btrfs_del_items(trans
, extent_root
, path
, path
->slots
[0],
2588 btrfs_release_path(extent_root
, path
);
2590 if (owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
2591 ret
= btrfs_del_csums(trans
, root
, bytenr
, num_bytes
);
2595 ret
= update_block_group(trans
, root
, bytenr
, num_bytes
, 0,
2599 btrfs_free_path(path
);
2600 finish_current_insert(trans
, extent_root
, 0);
2605 * find all the blocks marked as pending in the radix tree and remove
2606 * them from the extent map
2608 static int del_pending_extents(struct btrfs_trans_handle
*trans
,
2609 struct btrfs_root
*extent_root
, int all
)
2617 int nr
= 0, skipped
= 0;
2618 struct extent_io_tree
*pending_del
;
2619 struct extent_io_tree
*extent_ins
;
2620 struct pending_extent_op
*extent_op
;
2621 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
2622 struct list_head delete_list
;
2624 INIT_LIST_HEAD(&delete_list
);
2625 extent_ins
= &extent_root
->fs_info
->extent_ins
;
2626 pending_del
= &extent_root
->fs_info
->pending_del
;
2629 mutex_lock(&info
->extent_ins_mutex
);
2631 ret
= find_first_extent_bit(pending_del
, search
, &start
, &end
,
2634 if (all
&& skipped
&& !nr
) {
2639 mutex_unlock(&info
->extent_ins_mutex
);
2643 ret
= try_lock_extent(extent_ins
, start
, end
, GFP_NOFS
);
2648 if (need_resched()) {
2649 mutex_unlock(&info
->extent_ins_mutex
);
2651 mutex_lock(&info
->extent_ins_mutex
);
2658 ret
= get_state_private(pending_del
, start
, &priv
);
2660 extent_op
= (struct pending_extent_op
*)(unsigned long)priv
;
2662 clear_extent_bits(pending_del
, start
, end
, EXTENT_WRITEBACK
,
2664 if (!test_range_bit(extent_ins
, start
, end
,
2665 EXTENT_WRITEBACK
, 0)) {
2666 list_add_tail(&extent_op
->list
, &delete_list
);
2671 ret
= get_state_private(&info
->extent_ins
, start
,
2674 extent_op
= (struct pending_extent_op
*)
2675 (unsigned long)priv
;
2677 clear_extent_bits(&info
->extent_ins
, start
, end
,
2678 EXTENT_WRITEBACK
, GFP_NOFS
);
2680 if (extent_op
->type
== PENDING_BACKREF_UPDATE
) {
2681 list_add_tail(&extent_op
->list
, &delete_list
);
2687 mutex_lock(&extent_root
->fs_info
->pinned_mutex
);
2688 ret
= pin_down_bytes(trans
, extent_root
, start
,
2689 end
+ 1 - start
, 0);
2690 mutex_unlock(&extent_root
->fs_info
->pinned_mutex
);
2692 ret
= update_block_group(trans
, extent_root
, start
,
2693 end
+ 1 - start
, 0, ret
> 0);
2695 unlock_extent(extent_ins
, start
, end
, GFP_NOFS
);
2704 if (need_resched()) {
2705 mutex_unlock(&info
->extent_ins_mutex
);
2707 mutex_lock(&info
->extent_ins_mutex
);
2712 ret
= free_extents(trans
, extent_root
, &delete_list
);
2716 if (all
&& skipped
) {
2717 INIT_LIST_HEAD(&delete_list
);
2724 finish_current_insert(trans
, extent_root
, 0);
2729 * remove an extent from the root, returns 0 on success
2731 static int __btrfs_free_extent(struct btrfs_trans_handle
*trans
,
2732 struct btrfs_root
*root
,
2733 u64 bytenr
, u64 num_bytes
, u64 parent
,
2734 u64 root_objectid
, u64 ref_generation
,
2735 u64 owner_objectid
, int pin
)
2737 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
2741 WARN_ON(num_bytes
< root
->sectorsize
);
2742 if (root
== extent_root
) {
2743 struct pending_extent_op
*extent_op
= NULL
;
2745 mutex_lock(&root
->fs_info
->extent_ins_mutex
);
2746 if (test_range_bit(&root
->fs_info
->extent_ins
, bytenr
,
2747 bytenr
+ num_bytes
- 1, EXTENT_WRITEBACK
, 0)) {
2749 ret
= get_state_private(&root
->fs_info
->extent_ins
,
2752 extent_op
= (struct pending_extent_op
*)
2753 (unsigned long)priv
;
2756 if (extent_op
->type
== PENDING_EXTENT_INSERT
) {
2757 mutex_unlock(&root
->fs_info
->extent_ins_mutex
);
2763 ref_generation
= extent_op
->orig_generation
;
2764 parent
= extent_op
->orig_parent
;
2767 extent_op
= kmalloc(sizeof(*extent_op
), GFP_NOFS
);
2770 extent_op
->type
= PENDING_EXTENT_DELETE
;
2771 extent_op
->bytenr
= bytenr
;
2772 extent_op
->num_bytes
= num_bytes
;
2773 extent_op
->parent
= parent
;
2774 extent_op
->orig_parent
= parent
;
2775 extent_op
->generation
= ref_generation
;
2776 extent_op
->orig_generation
= ref_generation
;
2777 extent_op
->level
= (int)owner_objectid
;
2778 INIT_LIST_HEAD(&extent_op
->list
);
2781 set_extent_bits(&root
->fs_info
->pending_del
,
2782 bytenr
, bytenr
+ num_bytes
- 1,
2783 EXTENT_WRITEBACK
, GFP_NOFS
);
2784 set_state_private(&root
->fs_info
->pending_del
,
2785 bytenr
, (unsigned long)extent_op
);
2786 mutex_unlock(&root
->fs_info
->extent_ins_mutex
);
2789 /* if metadata always pin */
2790 if (owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
) {
2791 if (root
->root_key
.objectid
== BTRFS_TREE_LOG_OBJECTID
) {
2792 mutex_lock(&root
->fs_info
->pinned_mutex
);
2793 btrfs_update_pinned_extents(root
, bytenr
, num_bytes
, 1);
2794 mutex_unlock(&root
->fs_info
->pinned_mutex
);
2795 update_reserved_extents(root
, bytenr
, num_bytes
, 0);
2801 /* if data pin when any transaction has committed this */
2802 if (ref_generation
!= trans
->transid
)
2805 ret
= __free_extent(trans
, root
, bytenr
, num_bytes
, parent
,
2806 root_objectid
, ref_generation
,
2807 owner_objectid
, pin
, pin
== 0);
2809 finish_current_insert(trans
, root
->fs_info
->extent_root
, 0);
2810 pending_ret
= del_pending_extents(trans
, root
->fs_info
->extent_root
, 0);
2811 return ret
? ret
: pending_ret
;
2814 int btrfs_free_extent(struct btrfs_trans_handle
*trans
,
2815 struct btrfs_root
*root
,
2816 u64 bytenr
, u64 num_bytes
, u64 parent
,
2817 u64 root_objectid
, u64 ref_generation
,
2818 u64 owner_objectid
, int pin
)
2822 ret
= __btrfs_free_extent(trans
, root
, bytenr
, num_bytes
, parent
,
2823 root_objectid
, ref_generation
,
2824 owner_objectid
, pin
);
2828 static u64
stripe_align(struct btrfs_root
*root
, u64 val
)
2830 u64 mask
= ((u64
)root
->stripesize
- 1);
2831 u64 ret
= (val
+ mask
) & ~mask
;
2836 * walks the btree of allocated extents and find a hole of a given size.
2837 * The key ins is changed to record the hole:
2838 * ins->objectid == block start
2839 * ins->flags = BTRFS_EXTENT_ITEM_KEY
2840 * ins->offset == number of blocks
2841 * Any available blocks before search_start are skipped.
2843 static noinline
int find_free_extent(struct btrfs_trans_handle
*trans
,
2844 struct btrfs_root
*orig_root
,
2845 u64 num_bytes
, u64 empty_size
,
2846 u64 search_start
, u64 search_end
,
2847 u64 hint_byte
, struct btrfs_key
*ins
,
2848 u64 exclude_start
, u64 exclude_nr
,
2852 struct btrfs_root
*root
= orig_root
->fs_info
->extent_root
;
2853 u64 total_needed
= num_bytes
;
2854 u64
*last_ptr
= NULL
;
2855 u64 last_wanted
= 0;
2856 struct btrfs_block_group_cache
*block_group
= NULL
;
2857 int chunk_alloc_done
= 0;
2858 int empty_cluster
= 2 * 1024 * 1024;
2859 int allowed_chunk_alloc
= 0;
2860 struct list_head
*head
= NULL
, *cur
= NULL
;
2863 struct btrfs_space_info
*space_info
;
2865 WARN_ON(num_bytes
< root
->sectorsize
);
2866 btrfs_set_key_type(ins
, BTRFS_EXTENT_ITEM_KEY
);
2870 if (orig_root
->ref_cows
|| empty_size
)
2871 allowed_chunk_alloc
= 1;
2873 if (data
& BTRFS_BLOCK_GROUP_METADATA
) {
2874 last_ptr
= &root
->fs_info
->last_alloc
;
2875 empty_cluster
= 64 * 1024;
2878 if ((data
& BTRFS_BLOCK_GROUP_DATA
) && btrfs_test_opt(root
, SSD
))
2879 last_ptr
= &root
->fs_info
->last_data_alloc
;
2883 hint_byte
= *last_ptr
;
2884 last_wanted
= *last_ptr
;
2886 empty_size
+= empty_cluster
;
2890 search_start
= max(search_start
, first_logical_byte(root
, 0));
2891 search_start
= max(search_start
, hint_byte
);
2893 if (last_wanted
&& search_start
!= last_wanted
) {
2895 empty_size
+= empty_cluster
;
2898 total_needed
+= empty_size
;
2899 block_group
= btrfs_lookup_block_group(root
->fs_info
, search_start
);
2901 block_group
= btrfs_lookup_first_block_group(root
->fs_info
,
2903 space_info
= __find_space_info(root
->fs_info
, data
);
2905 down_read(&space_info
->groups_sem
);
2907 struct btrfs_free_space
*free_space
;
2909 * the only way this happens if our hint points to a block
2910 * group thats not of the proper type, while looping this
2911 * should never happen
2917 goto new_group_no_lock
;
2919 if (unlikely(!block_group
->cached
)) {
2920 mutex_lock(&block_group
->cache_mutex
);
2921 ret
= cache_block_group(root
, block_group
);
2922 mutex_unlock(&block_group
->cache_mutex
);
2927 mutex_lock(&block_group
->alloc_mutex
);
2928 if (unlikely(!block_group_bits(block_group
, data
)))
2931 if (unlikely(block_group
->ro
))
2934 free_space
= btrfs_find_free_space(block_group
, search_start
,
2937 u64 start
= block_group
->key
.objectid
;
2938 u64 end
= block_group
->key
.objectid
+
2939 block_group
->key
.offset
;
2941 search_start
= stripe_align(root
, free_space
->offset
);
2943 /* move on to the next group */
2944 if (search_start
+ num_bytes
>= search_end
)
2947 /* move on to the next group */
2948 if (search_start
+ num_bytes
> end
)
2951 if (last_wanted
&& search_start
!= last_wanted
) {
2952 total_needed
+= empty_cluster
;
2953 empty_size
+= empty_cluster
;
2956 * if search_start is still in this block group
2957 * then we just re-search this block group
2959 if (search_start
>= start
&&
2960 search_start
< end
) {
2961 mutex_unlock(&block_group
->alloc_mutex
);
2965 /* else we go to the next block group */
2969 if (exclude_nr
> 0 &&
2970 (search_start
+ num_bytes
> exclude_start
&&
2971 search_start
< exclude_start
+ exclude_nr
)) {
2972 search_start
= exclude_start
+ exclude_nr
;
2974 * if search_start is still in this block group
2975 * then we just re-search this block group
2977 if (search_start
>= start
&&
2978 search_start
< end
) {
2979 mutex_unlock(&block_group
->alloc_mutex
);
2984 /* else we go to the next block group */
2988 ins
->objectid
= search_start
;
2989 ins
->offset
= num_bytes
;
2991 btrfs_remove_free_space_lock(block_group
, search_start
,
2993 /* we are all good, lets return */
2994 mutex_unlock(&block_group
->alloc_mutex
);
2998 mutex_unlock(&block_group
->alloc_mutex
);
2999 put_block_group(block_group
);
3002 /* don't try to compare new allocations against the
3003 * last allocation any more
3008 * Here's how this works.
3009 * loop == 0: we were searching a block group via a hint
3010 * and didn't find anything, so we start at
3011 * the head of the block groups and keep searching
3012 * loop == 1: we're searching through all of the block groups
3013 * if we hit the head again we have searched
3014 * all of the block groups for this space and we
3015 * need to try and allocate, if we cant error out.
3016 * loop == 2: we allocated more space and are looping through
3017 * all of the block groups again.
3020 head
= &space_info
->block_groups
;
3023 } else if (loop
== 1 && cur
== head
) {
3026 /* at this point we give up on the empty_size
3027 * allocations and just try to allocate the min
3030 * The extra_loop field was set if an empty_size
3031 * allocation was attempted above, and if this
3032 * is try we need to try the loop again without
3033 * the additional empty_size.
3035 total_needed
-= empty_size
;
3037 keep_going
= extra_loop
;
3040 if (allowed_chunk_alloc
&& !chunk_alloc_done
) {
3041 up_read(&space_info
->groups_sem
);
3042 ret
= do_chunk_alloc(trans
, root
, num_bytes
+
3043 2 * 1024 * 1024, data
, 1);
3044 down_read(&space_info
->groups_sem
);
3047 head
= &space_info
->block_groups
;
3049 * we've allocated a new chunk, keep
3053 chunk_alloc_done
= 1;
3054 } else if (!allowed_chunk_alloc
) {
3055 space_info
->force_alloc
= 1;
3064 } else if (cur
== head
) {
3068 block_group
= list_entry(cur
, struct btrfs_block_group_cache
,
3070 atomic_inc(&block_group
->count
);
3072 search_start
= block_group
->key
.objectid
;
3076 /* we found what we needed */
3077 if (ins
->objectid
) {
3078 if (!(data
& BTRFS_BLOCK_GROUP_DATA
))
3079 trans
->block_group
= block_group
->key
.objectid
;
3082 *last_ptr
= ins
->objectid
+ ins
->offset
;
3085 printk(KERN_ERR
"btrfs searching for %llu bytes, "
3086 "num_bytes %llu, loop %d, allowed_alloc %d\n",
3087 (unsigned long long)total_needed
,
3088 (unsigned long long)num_bytes
,
3089 loop
, allowed_chunk_alloc
);
3093 put_block_group(block_group
);
3095 up_read(&space_info
->groups_sem
);
3099 static void dump_space_info(struct btrfs_space_info
*info
, u64 bytes
)
3101 struct btrfs_block_group_cache
*cache
;
3103 printk(KERN_INFO
"space_info has %llu free, is %sfull\n",
3104 (unsigned long long)(info
->total_bytes
- info
->bytes_used
-
3105 info
->bytes_pinned
- info
->bytes_reserved
),
3106 (info
->full
) ? "" : "not ");
3108 down_read(&info
->groups_sem
);
3109 list_for_each_entry(cache
, &info
->block_groups
, list
) {
3110 spin_lock(&cache
->lock
);
3111 printk(KERN_INFO
"block group %llu has %llu bytes, %llu used "
3112 "%llu pinned %llu reserved\n",
3113 (unsigned long long)cache
->key
.objectid
,
3114 (unsigned long long)cache
->key
.offset
,
3115 (unsigned long long)btrfs_block_group_used(&cache
->item
),
3116 (unsigned long long)cache
->pinned
,
3117 (unsigned long long)cache
->reserved
);
3118 btrfs_dump_free_space(cache
, bytes
);
3119 spin_unlock(&cache
->lock
);
3121 up_read(&info
->groups_sem
);
3124 static int __btrfs_reserve_extent(struct btrfs_trans_handle
*trans
,
3125 struct btrfs_root
*root
,
3126 u64 num_bytes
, u64 min_alloc_size
,
3127 u64 empty_size
, u64 hint_byte
,
3128 u64 search_end
, struct btrfs_key
*ins
,
3132 u64 search_start
= 0;
3134 struct btrfs_fs_info
*info
= root
->fs_info
;
3137 alloc_profile
= info
->avail_data_alloc_bits
&
3138 info
->data_alloc_profile
;
3139 data
= BTRFS_BLOCK_GROUP_DATA
| alloc_profile
;
3140 } else if (root
== root
->fs_info
->chunk_root
) {
3141 alloc_profile
= info
->avail_system_alloc_bits
&
3142 info
->system_alloc_profile
;
3143 data
= BTRFS_BLOCK_GROUP_SYSTEM
| alloc_profile
;
3145 alloc_profile
= info
->avail_metadata_alloc_bits
&
3146 info
->metadata_alloc_profile
;
3147 data
= BTRFS_BLOCK_GROUP_METADATA
| alloc_profile
;
3150 data
= btrfs_reduce_alloc_profile(root
, data
);
3152 * the only place that sets empty_size is btrfs_realloc_node, which
3153 * is not called recursively on allocations
3155 if (empty_size
|| root
->ref_cows
) {
3156 if (!(data
& BTRFS_BLOCK_GROUP_METADATA
)) {
3157 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
3159 BTRFS_BLOCK_GROUP_METADATA
|
3160 (info
->metadata_alloc_profile
&
3161 info
->avail_metadata_alloc_bits
), 0);
3163 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
3164 num_bytes
+ 2 * 1024 * 1024, data
, 0);
3167 WARN_ON(num_bytes
< root
->sectorsize
);
3168 ret
= find_free_extent(trans
, root
, num_bytes
, empty_size
,
3169 search_start
, search_end
, hint_byte
, ins
,
3170 trans
->alloc_exclude_start
,
3171 trans
->alloc_exclude_nr
, data
);
3173 if (ret
== -ENOSPC
&& num_bytes
> min_alloc_size
) {
3174 num_bytes
= num_bytes
>> 1;
3175 num_bytes
= num_bytes
& ~(root
->sectorsize
- 1);
3176 num_bytes
= max(num_bytes
, min_alloc_size
);
3177 do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
3178 num_bytes
, data
, 1);
3182 struct btrfs_space_info
*sinfo
;
3184 sinfo
= __find_space_info(root
->fs_info
, data
);
3185 printk(KERN_ERR
"btrfs allocation failed flags %llu, "
3186 "wanted %llu\n", (unsigned long long)data
,
3187 (unsigned long long)num_bytes
);
3188 dump_space_info(sinfo
, num_bytes
);
3195 int btrfs_free_reserved_extent(struct btrfs_root
*root
, u64 start
, u64 len
)
3197 struct btrfs_block_group_cache
*cache
;
3200 cache
= btrfs_lookup_block_group(root
->fs_info
, start
);
3202 printk(KERN_ERR
"Unable to find block group for %llu\n",
3203 (unsigned long long)start
);
3207 ret
= btrfs_discard_extent(root
, start
, len
);
3209 btrfs_add_free_space(cache
, start
, len
);
3210 put_block_group(cache
);
3211 update_reserved_extents(root
, start
, len
, 0);
3216 int btrfs_reserve_extent(struct btrfs_trans_handle
*trans
,
3217 struct btrfs_root
*root
,
3218 u64 num_bytes
, u64 min_alloc_size
,
3219 u64 empty_size
, u64 hint_byte
,
3220 u64 search_end
, struct btrfs_key
*ins
,
3224 ret
= __btrfs_reserve_extent(trans
, root
, num_bytes
, min_alloc_size
,
3225 empty_size
, hint_byte
, search_end
, ins
,
3227 update_reserved_extents(root
, ins
->objectid
, ins
->offset
, 1);
3231 static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle
*trans
,
3232 struct btrfs_root
*root
, u64 parent
,
3233 u64 root_objectid
, u64 ref_generation
,
3234 u64 owner
, struct btrfs_key
*ins
)
3240 u64 num_bytes
= ins
->offset
;
3242 struct btrfs_fs_info
*info
= root
->fs_info
;
3243 struct btrfs_root
*extent_root
= info
->extent_root
;
3244 struct btrfs_extent_item
*extent_item
;
3245 struct btrfs_extent_ref
*ref
;
3246 struct btrfs_path
*path
;
3247 struct btrfs_key keys
[2];
3250 parent
= ins
->objectid
;
3252 /* block accounting for super block */
3253 spin_lock(&info
->delalloc_lock
);
3254 super_used
= btrfs_super_bytes_used(&info
->super_copy
);
3255 btrfs_set_super_bytes_used(&info
->super_copy
, super_used
+ num_bytes
);
3257 /* block accounting for root item */
3258 root_used
= btrfs_root_used(&root
->root_item
);
3259 btrfs_set_root_used(&root
->root_item
, root_used
+ num_bytes
);
3260 spin_unlock(&info
->delalloc_lock
);
3262 if (root
== extent_root
) {
3263 struct pending_extent_op
*extent_op
;
3265 extent_op
= kmalloc(sizeof(*extent_op
), GFP_NOFS
);
3268 extent_op
->type
= PENDING_EXTENT_INSERT
;
3269 extent_op
->bytenr
= ins
->objectid
;
3270 extent_op
->num_bytes
= ins
->offset
;
3271 extent_op
->parent
= parent
;
3272 extent_op
->orig_parent
= 0;
3273 extent_op
->generation
= ref_generation
;
3274 extent_op
->orig_generation
= 0;
3275 extent_op
->level
= (int)owner
;
3276 INIT_LIST_HEAD(&extent_op
->list
);
3279 mutex_lock(&root
->fs_info
->extent_ins_mutex
);
3280 set_extent_bits(&root
->fs_info
->extent_ins
, ins
->objectid
,
3281 ins
->objectid
+ ins
->offset
- 1,
3282 EXTENT_WRITEBACK
, GFP_NOFS
);
3283 set_state_private(&root
->fs_info
->extent_ins
,
3284 ins
->objectid
, (unsigned long)extent_op
);
3285 mutex_unlock(&root
->fs_info
->extent_ins_mutex
);
3289 memcpy(&keys
[0], ins
, sizeof(*ins
));
3290 keys
[1].objectid
= ins
->objectid
;
3291 keys
[1].type
= BTRFS_EXTENT_REF_KEY
;
3292 keys
[1].offset
= parent
;
3293 sizes
[0] = sizeof(*extent_item
);
3294 sizes
[1] = sizeof(*ref
);
3296 path
= btrfs_alloc_path();
3299 ret
= btrfs_insert_empty_items(trans
, extent_root
, path
, keys
,
3303 extent_item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3304 struct btrfs_extent_item
);
3305 btrfs_set_extent_refs(path
->nodes
[0], extent_item
, 1);
3306 ref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0] + 1,
3307 struct btrfs_extent_ref
);
3309 btrfs_set_ref_root(path
->nodes
[0], ref
, root_objectid
);
3310 btrfs_set_ref_generation(path
->nodes
[0], ref
, ref_generation
);
3311 btrfs_set_ref_objectid(path
->nodes
[0], ref
, owner
);
3312 btrfs_set_ref_num_refs(path
->nodes
[0], ref
, 1);
3314 btrfs_mark_buffer_dirty(path
->nodes
[0]);
3316 trans
->alloc_exclude_start
= 0;
3317 trans
->alloc_exclude_nr
= 0;
3318 btrfs_free_path(path
);
3319 finish_current_insert(trans
, extent_root
, 0);
3320 pending_ret
= del_pending_extents(trans
, extent_root
, 0);
3330 ret
= update_block_group(trans
, root
, ins
->objectid
,
3333 printk(KERN_ERR
"btrfs update block group failed for %llu "
3334 "%llu\n", (unsigned long long)ins
->objectid
,
3335 (unsigned long long)ins
->offset
);
3342 int btrfs_alloc_reserved_extent(struct btrfs_trans_handle
*trans
,
3343 struct btrfs_root
*root
, u64 parent
,
3344 u64 root_objectid
, u64 ref_generation
,
3345 u64 owner
, struct btrfs_key
*ins
)
3349 if (root_objectid
== BTRFS_TREE_LOG_OBJECTID
)
3351 ret
= __btrfs_alloc_reserved_extent(trans
, root
, parent
, root_objectid
,
3352 ref_generation
, owner
, ins
);
3353 update_reserved_extents(root
, ins
->objectid
, ins
->offset
, 0);
3358 * this is used by the tree logging recovery code. It records that
3359 * an extent has been allocated and makes sure to clear the free
3360 * space cache bits as well
3362 int btrfs_alloc_logged_extent(struct btrfs_trans_handle
*trans
,
3363 struct btrfs_root
*root
, u64 parent
,
3364 u64 root_objectid
, u64 ref_generation
,
3365 u64 owner
, struct btrfs_key
*ins
)
3368 struct btrfs_block_group_cache
*block_group
;
3370 block_group
= btrfs_lookup_block_group(root
->fs_info
, ins
->objectid
);
3371 mutex_lock(&block_group
->cache_mutex
);
3372 cache_block_group(root
, block_group
);
3373 mutex_unlock(&block_group
->cache_mutex
);
3375 ret
= btrfs_remove_free_space(block_group
, ins
->objectid
,
3378 put_block_group(block_group
);
3379 ret
= __btrfs_alloc_reserved_extent(trans
, root
, parent
, root_objectid
,
3380 ref_generation
, owner
, ins
);
3385 * finds a free extent and does all the dirty work required for allocation
3386 * returns the key for the extent through ins, and a tree buffer for
3387 * the first block of the extent through buf.
3389 * returns 0 if everything worked, non-zero otherwise.
3391 int btrfs_alloc_extent(struct btrfs_trans_handle
*trans
,
3392 struct btrfs_root
*root
,
3393 u64 num_bytes
, u64 parent
, u64 min_alloc_size
,
3394 u64 root_objectid
, u64 ref_generation
,
3395 u64 owner_objectid
, u64 empty_size
, u64 hint_byte
,
3396 u64 search_end
, struct btrfs_key
*ins
, u64 data
)
3400 ret
= __btrfs_reserve_extent(trans
, root
, num_bytes
,
3401 min_alloc_size
, empty_size
, hint_byte
,
3402 search_end
, ins
, data
);
3404 if (root_objectid
!= BTRFS_TREE_LOG_OBJECTID
) {
3405 ret
= __btrfs_alloc_reserved_extent(trans
, root
, parent
,
3406 root_objectid
, ref_generation
,
3407 owner_objectid
, ins
);
3411 update_reserved_extents(root
, ins
->objectid
, ins
->offset
, 1);
3416 struct extent_buffer
*btrfs_init_new_buffer(struct btrfs_trans_handle
*trans
,
3417 struct btrfs_root
*root
,
3418 u64 bytenr
, u32 blocksize
)
3420 struct extent_buffer
*buf
;
3422 buf
= btrfs_find_create_tree_block(root
, bytenr
, blocksize
);
3424 return ERR_PTR(-ENOMEM
);
3425 btrfs_set_header_generation(buf
, trans
->transid
);
3426 btrfs_tree_lock(buf
);
3427 clean_tree_block(trans
, root
, buf
);
3429 btrfs_set_lock_blocking(buf
);
3430 btrfs_set_buffer_uptodate(buf
);
3432 if (root
->root_key
.objectid
== BTRFS_TREE_LOG_OBJECTID
) {
3433 set_extent_dirty(&root
->dirty_log_pages
, buf
->start
,
3434 buf
->start
+ buf
->len
- 1, GFP_NOFS
);
3436 set_extent_dirty(&trans
->transaction
->dirty_pages
, buf
->start
,
3437 buf
->start
+ buf
->len
- 1, GFP_NOFS
);
3439 trans
->blocks_used
++;
3440 /* this returns a buffer locked for blocking */
3445 * helper function to allocate a block for a given tree
3446 * returns the tree buffer or NULL.
3448 struct extent_buffer
*btrfs_alloc_free_block(struct btrfs_trans_handle
*trans
,
3449 struct btrfs_root
*root
,
3450 u32 blocksize
, u64 parent
,
3457 struct btrfs_key ins
;
3459 struct extent_buffer
*buf
;
3461 ret
= btrfs_alloc_extent(trans
, root
, blocksize
, parent
, blocksize
,
3462 root_objectid
, ref_generation
, level
,
3463 empty_size
, hint
, (u64
)-1, &ins
, 0);
3466 return ERR_PTR(ret
);
3469 buf
= btrfs_init_new_buffer(trans
, root
, ins
.objectid
, blocksize
);
3473 int btrfs_drop_leaf_ref(struct btrfs_trans_handle
*trans
,
3474 struct btrfs_root
*root
, struct extent_buffer
*leaf
)
3477 u64 leaf_generation
;
3478 struct refsort
*sorted
;
3479 struct btrfs_key key
;
3480 struct btrfs_file_extent_item
*fi
;
3487 BUG_ON(!btrfs_is_leaf(leaf
));
3488 nritems
= btrfs_header_nritems(leaf
);
3489 leaf_owner
= btrfs_header_owner(leaf
);
3490 leaf_generation
= btrfs_header_generation(leaf
);
3492 sorted
= kmalloc(sizeof(*sorted
) * nritems
, GFP_NOFS
);
3493 /* we do this loop twice. The first time we build a list
3494 * of the extents we have a reference on, then we sort the list
3495 * by bytenr. The second time around we actually do the
3498 for (i
= 0; i
< nritems
; i
++) {
3502 btrfs_item_key_to_cpu(leaf
, &key
, i
);
3504 /* only extents have references, skip everything else */
3505 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
3508 fi
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
3510 /* inline extents live in the btree, they don't have refs */
3511 if (btrfs_file_extent_type(leaf
, fi
) ==
3512 BTRFS_FILE_EXTENT_INLINE
)
3515 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
3517 /* holes don't have refs */
3518 if (disk_bytenr
== 0)
3521 sorted
[refi
].bytenr
= disk_bytenr
;
3522 sorted
[refi
].slot
= i
;
3529 sort(sorted
, refi
, sizeof(struct refsort
), refsort_cmp
, NULL
);
3531 for (i
= 0; i
< refi
; i
++) {
3534 disk_bytenr
= sorted
[i
].bytenr
;
3535 slot
= sorted
[i
].slot
;
3539 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
3540 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
3543 fi
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
3545 ret
= __btrfs_free_extent(trans
, root
, disk_bytenr
,
3546 btrfs_file_extent_disk_num_bytes(leaf
, fi
),
3547 leaf
->start
, leaf_owner
, leaf_generation
,
3551 atomic_inc(&root
->fs_info
->throttle_gen
);
3552 wake_up(&root
->fs_info
->transaction_throttle
);
3560 static noinline
int cache_drop_leaf_ref(struct btrfs_trans_handle
*trans
,
3561 struct btrfs_root
*root
,
3562 struct btrfs_leaf_ref
*ref
)
3566 struct btrfs_extent_info
*info
;
3567 struct refsort
*sorted
;
3569 if (ref
->nritems
== 0)
3572 sorted
= kmalloc(sizeof(*sorted
) * ref
->nritems
, GFP_NOFS
);
3573 for (i
= 0; i
< ref
->nritems
; i
++) {
3574 sorted
[i
].bytenr
= ref
->extents
[i
].bytenr
;
3577 sort(sorted
, ref
->nritems
, sizeof(struct refsort
), refsort_cmp
, NULL
);
3580 * the items in the ref were sorted when the ref was inserted
3581 * into the ref cache, so this is already in order
3583 for (i
= 0; i
< ref
->nritems
; i
++) {
3584 info
= ref
->extents
+ sorted
[i
].slot
;
3585 ret
= __btrfs_free_extent(trans
, root
, info
->bytenr
,
3586 info
->num_bytes
, ref
->bytenr
,
3587 ref
->owner
, ref
->generation
,
3590 atomic_inc(&root
->fs_info
->throttle_gen
);
3591 wake_up(&root
->fs_info
->transaction_throttle
);
3602 static int drop_snap_lookup_refcount(struct btrfs_root
*root
, u64 start
,
3607 ret
= btrfs_lookup_extent_ref(NULL
, root
, start
, len
, refs
);
3610 #if 0 /* some debugging code in case we see problems here */
3611 /* if the refs count is one, it won't get increased again. But
3612 * if the ref count is > 1, someone may be decreasing it at
3613 * the same time we are.
3616 struct extent_buffer
*eb
= NULL
;
3617 eb
= btrfs_find_create_tree_block(root
, start
, len
);
3619 btrfs_tree_lock(eb
);
3621 mutex_lock(&root
->fs_info
->alloc_mutex
);
3622 ret
= lookup_extent_ref(NULL
, root
, start
, len
, refs
);
3624 mutex_unlock(&root
->fs_info
->alloc_mutex
);
3627 btrfs_tree_unlock(eb
);
3628 free_extent_buffer(eb
);
3631 printk(KERN_ERR
"btrfs block %llu went down to one "
3632 "during drop_snap\n", (unsigned long long)start
);
3643 * this is used while deleting old snapshots, and it drops the refs
3644 * on a whole subtree starting from a level 1 node.
3646 * The idea is to sort all the leaf pointers, and then drop the
3647 * ref on all the leaves in order. Most of the time the leaves
3648 * will have ref cache entries, so no leaf IOs will be required to
3649 * find the extents they have references on.
3651 * For each leaf, any references it has are also dropped in order
3653 * This ends up dropping the references in something close to optimal
3654 * order for reading and modifying the extent allocation tree.
3656 static noinline
int drop_level_one_refs(struct btrfs_trans_handle
*trans
,
3657 struct btrfs_root
*root
,
3658 struct btrfs_path
*path
)
3663 struct extent_buffer
*eb
= path
->nodes
[1];
3664 struct extent_buffer
*leaf
;
3665 struct btrfs_leaf_ref
*ref
;
3666 struct refsort
*sorted
= NULL
;
3667 int nritems
= btrfs_header_nritems(eb
);
3671 int slot
= path
->slots
[1];
3672 u32 blocksize
= btrfs_level_size(root
, 0);
3678 root_owner
= btrfs_header_owner(eb
);
3679 root_gen
= btrfs_header_generation(eb
);
3680 sorted
= kmalloc(sizeof(*sorted
) * nritems
, GFP_NOFS
);
3683 * step one, sort all the leaf pointers so we don't scribble
3684 * randomly into the extent allocation tree
3686 for (i
= slot
; i
< nritems
; i
++) {
3687 sorted
[refi
].bytenr
= btrfs_node_blockptr(eb
, i
);
3688 sorted
[refi
].slot
= i
;
3693 * nritems won't be zero, but if we're picking up drop_snapshot
3694 * after a crash, slot might be > 0, so double check things
3700 sort(sorted
, refi
, sizeof(struct refsort
), refsort_cmp
, NULL
);
3703 * the first loop frees everything the leaves point to
3705 for (i
= 0; i
< refi
; i
++) {
3708 bytenr
= sorted
[i
].bytenr
;
3711 * check the reference count on this leaf. If it is > 1
3712 * we just decrement it below and don't update any
3713 * of the refs the leaf points to.
3715 ret
= drop_snap_lookup_refcount(root
, bytenr
, blocksize
, &refs
);
3720 ptr_gen
= btrfs_node_ptr_generation(eb
, sorted
[i
].slot
);
3723 * the leaf only had one reference, which means the
3724 * only thing pointing to this leaf is the snapshot
3725 * we're deleting. It isn't possible for the reference
3726 * count to increase again later
3728 * The reference cache is checked for the leaf,
3729 * and if found we'll be able to drop any refs held by
3730 * the leaf without needing to read it in.
3732 ref
= btrfs_lookup_leaf_ref(root
, bytenr
);
3733 if (ref
&& ref
->generation
!= ptr_gen
) {
3734 btrfs_free_leaf_ref(root
, ref
);
3738 ret
= cache_drop_leaf_ref(trans
, root
, ref
);
3740 btrfs_remove_leaf_ref(root
, ref
);
3741 btrfs_free_leaf_ref(root
, ref
);
3744 * the leaf wasn't in the reference cache, so
3745 * we have to read it.
3747 leaf
= read_tree_block(root
, bytenr
, blocksize
,
3749 ret
= btrfs_drop_leaf_ref(trans
, root
, leaf
);
3751 free_extent_buffer(leaf
);
3753 atomic_inc(&root
->fs_info
->throttle_gen
);
3754 wake_up(&root
->fs_info
->transaction_throttle
);
3759 * run through the loop again to free the refs on the leaves.
3760 * This is faster than doing it in the loop above because
3761 * the leaves are likely to be clustered together. We end up
3762 * working in nice chunks on the extent allocation tree.
3764 for (i
= 0; i
< refi
; i
++) {
3765 bytenr
= sorted
[i
].bytenr
;
3766 ret
= __btrfs_free_extent(trans
, root
, bytenr
,
3767 blocksize
, eb
->start
,
3768 root_owner
, root_gen
, 0, 1);
3771 atomic_inc(&root
->fs_info
->throttle_gen
);
3772 wake_up(&root
->fs_info
->transaction_throttle
);
3779 * update the path to show we've processed the entire level 1
3780 * node. This will get saved into the root's drop_snapshot_progress
3781 * field so these drops are not repeated again if this transaction
3784 path
->slots
[1] = nritems
;
3789 * helper function for drop_snapshot, this walks down the tree dropping ref
3790 * counts as it goes.
3792 static noinline
int walk_down_tree(struct btrfs_trans_handle
*trans
,
3793 struct btrfs_root
*root
,
3794 struct btrfs_path
*path
, int *level
)
3800 struct extent_buffer
*next
;
3801 struct extent_buffer
*cur
;
3802 struct extent_buffer
*parent
;
3807 WARN_ON(*level
< 0);
3808 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
3809 ret
= drop_snap_lookup_refcount(root
, path
->nodes
[*level
]->start
,
3810 path
->nodes
[*level
]->len
, &refs
);
3816 * walk down to the last node level and free all the leaves
3818 while (*level
>= 0) {
3819 WARN_ON(*level
< 0);
3820 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
3821 cur
= path
->nodes
[*level
];
3823 if (btrfs_header_level(cur
) != *level
)
3826 if (path
->slots
[*level
] >=
3827 btrfs_header_nritems(cur
))
3830 /* the new code goes down to level 1 and does all the
3831 * leaves pointed to that node in bulk. So, this check
3832 * for level 0 will always be false.
3834 * But, the disk format allows the drop_snapshot_progress
3835 * field in the root to leave things in a state where
3836 * a leaf will need cleaning up here. If someone crashes
3837 * with the old code and then boots with the new code,
3838 * we might find a leaf here.
3841 ret
= btrfs_drop_leaf_ref(trans
, root
, cur
);
3847 * once we get to level one, process the whole node
3848 * at once, including everything below it.
3851 ret
= drop_level_one_refs(trans
, root
, path
);
3856 bytenr
= btrfs_node_blockptr(cur
, path
->slots
[*level
]);
3857 ptr_gen
= btrfs_node_ptr_generation(cur
, path
->slots
[*level
]);
3858 blocksize
= btrfs_level_size(root
, *level
- 1);
3860 ret
= drop_snap_lookup_refcount(root
, bytenr
, blocksize
, &refs
);
3864 * if there is more than one reference, we don't need
3865 * to read that node to drop any references it has. We
3866 * just drop the ref we hold on that node and move on to the
3867 * next slot in this level.
3870 parent
= path
->nodes
[*level
];
3871 root_owner
= btrfs_header_owner(parent
);
3872 root_gen
= btrfs_header_generation(parent
);
3873 path
->slots
[*level
]++;
3875 ret
= __btrfs_free_extent(trans
, root
, bytenr
,
3876 blocksize
, parent
->start
,
3877 root_owner
, root_gen
,
3881 atomic_inc(&root
->fs_info
->throttle_gen
);
3882 wake_up(&root
->fs_info
->transaction_throttle
);
3889 * we need to keep freeing things in the next level down.
3890 * read the block and loop around to process it
3892 next
= read_tree_block(root
, bytenr
, blocksize
, ptr_gen
);
3893 WARN_ON(*level
<= 0);
3894 if (path
->nodes
[*level
-1])
3895 free_extent_buffer(path
->nodes
[*level
-1]);
3896 path
->nodes
[*level
-1] = next
;
3897 *level
= btrfs_header_level(next
);
3898 path
->slots
[*level
] = 0;
3902 WARN_ON(*level
< 0);
3903 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
3905 if (path
->nodes
[*level
] == root
->node
) {
3906 parent
= path
->nodes
[*level
];
3907 bytenr
= path
->nodes
[*level
]->start
;
3909 parent
= path
->nodes
[*level
+ 1];
3910 bytenr
= btrfs_node_blockptr(parent
, path
->slots
[*level
+ 1]);
3913 blocksize
= btrfs_level_size(root
, *level
);
3914 root_owner
= btrfs_header_owner(parent
);
3915 root_gen
= btrfs_header_generation(parent
);
3918 * cleanup and free the reference on the last node
3921 ret
= __btrfs_free_extent(trans
, root
, bytenr
, blocksize
,
3922 parent
->start
, root_owner
, root_gen
,
3924 free_extent_buffer(path
->nodes
[*level
]);
3925 path
->nodes
[*level
] = NULL
;
3935 * helper function for drop_subtree, this function is similar to
3936 * walk_down_tree. The main difference is that it checks reference
3937 * counts while tree blocks are locked.
3939 static noinline
int walk_down_subtree(struct btrfs_trans_handle
*trans
,
3940 struct btrfs_root
*root
,
3941 struct btrfs_path
*path
, int *level
)
3943 struct extent_buffer
*next
;
3944 struct extent_buffer
*cur
;
3945 struct extent_buffer
*parent
;
3952 cur
= path
->nodes
[*level
];
3953 ret
= btrfs_lookup_extent_ref(trans
, root
, cur
->start
, cur
->len
,
3959 while (*level
>= 0) {
3960 cur
= path
->nodes
[*level
];
3962 ret
= btrfs_drop_leaf_ref(trans
, root
, cur
);
3964 clean_tree_block(trans
, root
, cur
);
3967 if (path
->slots
[*level
] >= btrfs_header_nritems(cur
)) {
3968 clean_tree_block(trans
, root
, cur
);
3972 bytenr
= btrfs_node_blockptr(cur
, path
->slots
[*level
]);
3973 blocksize
= btrfs_level_size(root
, *level
- 1);
3974 ptr_gen
= btrfs_node_ptr_generation(cur
, path
->slots
[*level
]);
3976 next
= read_tree_block(root
, bytenr
, blocksize
, ptr_gen
);
3977 btrfs_tree_lock(next
);
3978 btrfs_set_lock_blocking(next
);
3980 ret
= btrfs_lookup_extent_ref(trans
, root
, bytenr
, blocksize
,
3984 parent
= path
->nodes
[*level
];
3985 ret
= btrfs_free_extent(trans
, root
, bytenr
,
3986 blocksize
, parent
->start
,
3987 btrfs_header_owner(parent
),
3988 btrfs_header_generation(parent
),
3991 path
->slots
[*level
]++;
3992 btrfs_tree_unlock(next
);
3993 free_extent_buffer(next
);
3997 *level
= btrfs_header_level(next
);
3998 path
->nodes
[*level
] = next
;
3999 path
->slots
[*level
] = 0;
4000 path
->locks
[*level
] = 1;
4004 parent
= path
->nodes
[*level
+ 1];
4005 bytenr
= path
->nodes
[*level
]->start
;
4006 blocksize
= path
->nodes
[*level
]->len
;
4008 ret
= btrfs_free_extent(trans
, root
, bytenr
, blocksize
,
4009 parent
->start
, btrfs_header_owner(parent
),
4010 btrfs_header_generation(parent
), *level
, 1);
4013 if (path
->locks
[*level
]) {
4014 btrfs_tree_unlock(path
->nodes
[*level
]);
4015 path
->locks
[*level
] = 0;
4017 free_extent_buffer(path
->nodes
[*level
]);
4018 path
->nodes
[*level
] = NULL
;
4025 * helper for dropping snapshots. This walks back up the tree in the path
4026 * to find the first node higher up where we haven't yet gone through
4029 static noinline
int walk_up_tree(struct btrfs_trans_handle
*trans
,
4030 struct btrfs_root
*root
,
4031 struct btrfs_path
*path
,
4032 int *level
, int max_level
)
4036 struct btrfs_root_item
*root_item
= &root
->root_item
;
4041 for (i
= *level
; i
< max_level
&& path
->nodes
[i
]; i
++) {
4042 slot
= path
->slots
[i
];
4043 if (slot
< btrfs_header_nritems(path
->nodes
[i
]) - 1) {
4044 struct extent_buffer
*node
;
4045 struct btrfs_disk_key disk_key
;
4048 * there is more work to do in this level.
4049 * Update the drop_progress marker to reflect
4050 * the work we've done so far, and then bump
4053 node
= path
->nodes
[i
];
4056 WARN_ON(*level
== 0);
4057 btrfs_node_key(node
, &disk_key
, path
->slots
[i
]);
4058 memcpy(&root_item
->drop_progress
,
4059 &disk_key
, sizeof(disk_key
));
4060 root_item
->drop_level
= i
;
4063 struct extent_buffer
*parent
;
4066 * this whole node is done, free our reference
4067 * on it and go up one level
4069 if (path
->nodes
[*level
] == root
->node
)
4070 parent
= path
->nodes
[*level
];
4072 parent
= path
->nodes
[*level
+ 1];
4074 root_owner
= btrfs_header_owner(parent
);
4075 root_gen
= btrfs_header_generation(parent
);
4077 clean_tree_block(trans
, root
, path
->nodes
[*level
]);
4078 ret
= btrfs_free_extent(trans
, root
,
4079 path
->nodes
[*level
]->start
,
4080 path
->nodes
[*level
]->len
,
4081 parent
->start
, root_owner
,
4082 root_gen
, *level
, 1);
4084 if (path
->locks
[*level
]) {
4085 btrfs_tree_unlock(path
->nodes
[*level
]);
4086 path
->locks
[*level
] = 0;
4088 free_extent_buffer(path
->nodes
[*level
]);
4089 path
->nodes
[*level
] = NULL
;
4097 * drop the reference count on the tree rooted at 'snap'. This traverses
4098 * the tree freeing any blocks that have a ref count of zero after being
4101 int btrfs_drop_snapshot(struct btrfs_trans_handle
*trans
, struct btrfs_root
4107 struct btrfs_path
*path
;
4110 struct btrfs_root_item
*root_item
= &root
->root_item
;
4112 WARN_ON(!mutex_is_locked(&root
->fs_info
->drop_mutex
));
4113 path
= btrfs_alloc_path();
4116 level
= btrfs_header_level(root
->node
);
4118 if (btrfs_disk_key_objectid(&root_item
->drop_progress
) == 0) {
4119 path
->nodes
[level
] = root
->node
;
4120 extent_buffer_get(root
->node
);
4121 path
->slots
[level
] = 0;
4123 struct btrfs_key key
;
4124 struct btrfs_disk_key found_key
;
4125 struct extent_buffer
*node
;
4127 btrfs_disk_key_to_cpu(&key
, &root_item
->drop_progress
);
4128 level
= root_item
->drop_level
;
4129 path
->lowest_level
= level
;
4130 wret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4135 node
= path
->nodes
[level
];
4136 btrfs_node_key(node
, &found_key
, path
->slots
[level
]);
4137 WARN_ON(memcmp(&found_key
, &root_item
->drop_progress
,
4138 sizeof(found_key
)));
4140 * unlock our path, this is safe because only this
4141 * function is allowed to delete this snapshot
4143 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
4144 if (path
->nodes
[i
] && path
->locks
[i
]) {
4146 btrfs_tree_unlock(path
->nodes
[i
]);
4151 wret
= walk_down_tree(trans
, root
, path
, &level
);
4157 wret
= walk_up_tree(trans
, root
, path
, &level
,
4163 if (trans
->transaction
->in_commit
) {
4167 atomic_inc(&root
->fs_info
->throttle_gen
);
4168 wake_up(&root
->fs_info
->transaction_throttle
);
4170 for (i
= 0; i
<= orig_level
; i
++) {
4171 if (path
->nodes
[i
]) {
4172 free_extent_buffer(path
->nodes
[i
]);
4173 path
->nodes
[i
] = NULL
;
4177 btrfs_free_path(path
);
4181 int btrfs_drop_subtree(struct btrfs_trans_handle
*trans
,
4182 struct btrfs_root
*root
,
4183 struct extent_buffer
*node
,
4184 struct extent_buffer
*parent
)
4186 struct btrfs_path
*path
;
4192 path
= btrfs_alloc_path();
4195 BUG_ON(!btrfs_tree_locked(parent
));
4196 parent_level
= btrfs_header_level(parent
);
4197 extent_buffer_get(parent
);
4198 path
->nodes
[parent_level
] = parent
;
4199 path
->slots
[parent_level
] = btrfs_header_nritems(parent
);
4201 BUG_ON(!btrfs_tree_locked(node
));
4202 level
= btrfs_header_level(node
);
4203 extent_buffer_get(node
);
4204 path
->nodes
[level
] = node
;
4205 path
->slots
[level
] = 0;
4208 wret
= walk_down_subtree(trans
, root
, path
, &level
);
4214 wret
= walk_up_tree(trans
, root
, path
, &level
, parent_level
);
4221 btrfs_free_path(path
);
4225 static unsigned long calc_ra(unsigned long start
, unsigned long last
,
4228 return min(last
, start
+ nr
- 1);
4231 static noinline
int relocate_inode_pages(struct inode
*inode
, u64 start
,
4236 unsigned long first_index
;
4237 unsigned long last_index
;
4240 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
4241 struct file_ra_state
*ra
;
4242 struct btrfs_ordered_extent
*ordered
;
4243 unsigned int total_read
= 0;
4244 unsigned int total_dirty
= 0;
4247 ra
= kzalloc(sizeof(*ra
), GFP_NOFS
);
4249 mutex_lock(&inode
->i_mutex
);
4250 first_index
= start
>> PAGE_CACHE_SHIFT
;
4251 last_index
= (start
+ len
- 1) >> PAGE_CACHE_SHIFT
;
4253 /* make sure the dirty trick played by the caller work */
4254 ret
= invalidate_inode_pages2_range(inode
->i_mapping
,
4255 first_index
, last_index
);
4259 file_ra_state_init(ra
, inode
->i_mapping
);
4261 for (i
= first_index
; i
<= last_index
; i
++) {
4262 if (total_read
% ra
->ra_pages
== 0) {
4263 btrfs_force_ra(inode
->i_mapping
, ra
, NULL
, i
,
4264 calc_ra(i
, last_index
, ra
->ra_pages
));
4268 if (((u64
)i
<< PAGE_CACHE_SHIFT
) > i_size_read(inode
))
4270 page
= grab_cache_page(inode
->i_mapping
, i
);
4275 if (!PageUptodate(page
)) {
4276 btrfs_readpage(NULL
, page
);
4278 if (!PageUptodate(page
)) {
4280 page_cache_release(page
);
4285 wait_on_page_writeback(page
);
4287 page_start
= (u64
)page
->index
<< PAGE_CACHE_SHIFT
;
4288 page_end
= page_start
+ PAGE_CACHE_SIZE
- 1;
4289 lock_extent(io_tree
, page_start
, page_end
, GFP_NOFS
);
4291 ordered
= btrfs_lookup_ordered_extent(inode
, page_start
);
4293 unlock_extent(io_tree
, page_start
, page_end
, GFP_NOFS
);
4295 page_cache_release(page
);
4296 btrfs_start_ordered_extent(inode
, ordered
, 1);
4297 btrfs_put_ordered_extent(ordered
);
4300 set_page_extent_mapped(page
);
4302 if (i
== first_index
)
4303 set_extent_bits(io_tree
, page_start
, page_end
,
4304 EXTENT_BOUNDARY
, GFP_NOFS
);
4305 btrfs_set_extent_delalloc(inode
, page_start
, page_end
);
4307 set_page_dirty(page
);
4310 unlock_extent(io_tree
, page_start
, page_end
, GFP_NOFS
);
4312 page_cache_release(page
);
4317 mutex_unlock(&inode
->i_mutex
);
4318 balance_dirty_pages_ratelimited_nr(inode
->i_mapping
, total_dirty
);
4322 static noinline
int relocate_data_extent(struct inode
*reloc_inode
,
4323 struct btrfs_key
*extent_key
,
4326 struct btrfs_root
*root
= BTRFS_I(reloc_inode
)->root
;
4327 struct extent_map_tree
*em_tree
= &BTRFS_I(reloc_inode
)->extent_tree
;
4328 struct extent_map
*em
;
4329 u64 start
= extent_key
->objectid
- offset
;
4330 u64 end
= start
+ extent_key
->offset
- 1;
4332 em
= alloc_extent_map(GFP_NOFS
);
4333 BUG_ON(!em
|| IS_ERR(em
));
4336 em
->len
= extent_key
->offset
;
4337 em
->block_len
= extent_key
->offset
;
4338 em
->block_start
= extent_key
->objectid
;
4339 em
->bdev
= root
->fs_info
->fs_devices
->latest_bdev
;
4340 set_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
4342 /* setup extent map to cheat btrfs_readpage */
4343 lock_extent(&BTRFS_I(reloc_inode
)->io_tree
, start
, end
, GFP_NOFS
);
4346 spin_lock(&em_tree
->lock
);
4347 ret
= add_extent_mapping(em_tree
, em
);
4348 spin_unlock(&em_tree
->lock
);
4349 if (ret
!= -EEXIST
) {
4350 free_extent_map(em
);
4353 btrfs_drop_extent_cache(reloc_inode
, start
, end
, 0);
4355 unlock_extent(&BTRFS_I(reloc_inode
)->io_tree
, start
, end
, GFP_NOFS
);
4357 return relocate_inode_pages(reloc_inode
, start
, extent_key
->offset
);
4360 struct btrfs_ref_path
{
4362 u64 nodes
[BTRFS_MAX_LEVEL
];
4364 u64 root_generation
;
4371 struct btrfs_key node_keys
[BTRFS_MAX_LEVEL
];
4372 u64 new_nodes
[BTRFS_MAX_LEVEL
];
4375 struct disk_extent
{
4386 static int is_cowonly_root(u64 root_objectid
)
4388 if (root_objectid
== BTRFS_ROOT_TREE_OBJECTID
||
4389 root_objectid
== BTRFS_EXTENT_TREE_OBJECTID
||
4390 root_objectid
== BTRFS_CHUNK_TREE_OBJECTID
||
4391 root_objectid
== BTRFS_DEV_TREE_OBJECTID
||
4392 root_objectid
== BTRFS_TREE_LOG_OBJECTID
||
4393 root_objectid
== BTRFS_CSUM_TREE_OBJECTID
)
4398 static noinline
int __next_ref_path(struct btrfs_trans_handle
*trans
,
4399 struct btrfs_root
*extent_root
,
4400 struct btrfs_ref_path
*ref_path
,
4403 struct extent_buffer
*leaf
;
4404 struct btrfs_path
*path
;
4405 struct btrfs_extent_ref
*ref
;
4406 struct btrfs_key key
;
4407 struct btrfs_key found_key
;
4413 path
= btrfs_alloc_path();
4418 ref_path
->lowest_level
= -1;
4419 ref_path
->current_level
= -1;
4420 ref_path
->shared_level
= -1;
4424 level
= ref_path
->current_level
- 1;
4425 while (level
>= -1) {
4427 if (level
< ref_path
->lowest_level
)
4431 bytenr
= ref_path
->nodes
[level
];
4433 bytenr
= ref_path
->extent_start
;
4434 BUG_ON(bytenr
== 0);
4436 parent
= ref_path
->nodes
[level
+ 1];
4437 ref_path
->nodes
[level
+ 1] = 0;
4438 ref_path
->current_level
= level
;
4439 BUG_ON(parent
== 0);
4441 key
.objectid
= bytenr
;
4442 key
.offset
= parent
+ 1;
4443 key
.type
= BTRFS_EXTENT_REF_KEY
;
4445 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
, 0, 0);
4450 leaf
= path
->nodes
[0];
4451 nritems
= btrfs_header_nritems(leaf
);
4452 if (path
->slots
[0] >= nritems
) {
4453 ret
= btrfs_next_leaf(extent_root
, path
);
4458 leaf
= path
->nodes
[0];
4461 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
4462 if (found_key
.objectid
== bytenr
&&
4463 found_key
.type
== BTRFS_EXTENT_REF_KEY
) {
4464 if (level
< ref_path
->shared_level
)
4465 ref_path
->shared_level
= level
;
4470 btrfs_release_path(extent_root
, path
);
4473 /* reached lowest level */
4477 level
= ref_path
->current_level
;
4478 while (level
< BTRFS_MAX_LEVEL
- 1) {
4482 bytenr
= ref_path
->nodes
[level
];
4484 bytenr
= ref_path
->extent_start
;
4486 BUG_ON(bytenr
== 0);
4488 key
.objectid
= bytenr
;
4490 key
.type
= BTRFS_EXTENT_REF_KEY
;
4492 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
, 0, 0);
4496 leaf
= path
->nodes
[0];
4497 nritems
= btrfs_header_nritems(leaf
);
4498 if (path
->slots
[0] >= nritems
) {
4499 ret
= btrfs_next_leaf(extent_root
, path
);
4503 /* the extent was freed by someone */
4504 if (ref_path
->lowest_level
== level
)
4506 btrfs_release_path(extent_root
, path
);
4509 leaf
= path
->nodes
[0];
4512 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
4513 if (found_key
.objectid
!= bytenr
||
4514 found_key
.type
!= BTRFS_EXTENT_REF_KEY
) {
4515 /* the extent was freed by someone */
4516 if (ref_path
->lowest_level
== level
) {
4520 btrfs_release_path(extent_root
, path
);
4524 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
4525 struct btrfs_extent_ref
);
4526 ref_objectid
= btrfs_ref_objectid(leaf
, ref
);
4527 if (ref_objectid
< BTRFS_FIRST_FREE_OBJECTID
) {
4529 level
= (int)ref_objectid
;
4530 BUG_ON(level
>= BTRFS_MAX_LEVEL
);
4531 ref_path
->lowest_level
= level
;
4532 ref_path
->current_level
= level
;
4533 ref_path
->nodes
[level
] = bytenr
;
4535 WARN_ON(ref_objectid
!= level
);
4538 WARN_ON(level
!= -1);
4542 if (ref_path
->lowest_level
== level
) {
4543 ref_path
->owner_objectid
= ref_objectid
;
4544 ref_path
->num_refs
= btrfs_ref_num_refs(leaf
, ref
);
4548 * the block is tree root or the block isn't in reference
4551 if (found_key
.objectid
== found_key
.offset
||
4552 is_cowonly_root(btrfs_ref_root(leaf
, ref
))) {
4553 ref_path
->root_objectid
= btrfs_ref_root(leaf
, ref
);
4554 ref_path
->root_generation
=
4555 btrfs_ref_generation(leaf
, ref
);
4557 /* special reference from the tree log */
4558 ref_path
->nodes
[0] = found_key
.offset
;
4559 ref_path
->current_level
= 0;
4566 BUG_ON(ref_path
->nodes
[level
] != 0);
4567 ref_path
->nodes
[level
] = found_key
.offset
;
4568 ref_path
->current_level
= level
;
4571 * the reference was created in the running transaction,
4572 * no need to continue walking up.
4574 if (btrfs_ref_generation(leaf
, ref
) == trans
->transid
) {
4575 ref_path
->root_objectid
= btrfs_ref_root(leaf
, ref
);
4576 ref_path
->root_generation
=
4577 btrfs_ref_generation(leaf
, ref
);
4582 btrfs_release_path(extent_root
, path
);
4585 /* reached max tree level, but no tree root found. */
4588 btrfs_free_path(path
);
4592 static int btrfs_first_ref_path(struct btrfs_trans_handle
*trans
,
4593 struct btrfs_root
*extent_root
,
4594 struct btrfs_ref_path
*ref_path
,
4597 memset(ref_path
, 0, sizeof(*ref_path
));
4598 ref_path
->extent_start
= extent_start
;
4600 return __next_ref_path(trans
, extent_root
, ref_path
, 1);
4603 static int btrfs_next_ref_path(struct btrfs_trans_handle
*trans
,
4604 struct btrfs_root
*extent_root
,
4605 struct btrfs_ref_path
*ref_path
)
4607 return __next_ref_path(trans
, extent_root
, ref_path
, 0);
4610 static noinline
int get_new_locations(struct inode
*reloc_inode
,
4611 struct btrfs_key
*extent_key
,
4612 u64 offset
, int no_fragment
,
4613 struct disk_extent
**extents
,
4616 struct btrfs_root
*root
= BTRFS_I(reloc_inode
)->root
;
4617 struct btrfs_path
*path
;
4618 struct btrfs_file_extent_item
*fi
;
4619 struct extent_buffer
*leaf
;
4620 struct disk_extent
*exts
= *extents
;
4621 struct btrfs_key found_key
;
4626 int max
= *nr_extents
;
4629 WARN_ON(!no_fragment
&& *extents
);
4632 exts
= kmalloc(sizeof(*exts
) * max
, GFP_NOFS
);
4637 path
= btrfs_alloc_path();
4640 cur_pos
= extent_key
->objectid
- offset
;
4641 last_byte
= extent_key
->objectid
+ extent_key
->offset
;
4642 ret
= btrfs_lookup_file_extent(NULL
, root
, path
, reloc_inode
->i_ino
,
4652 leaf
= path
->nodes
[0];
4653 nritems
= btrfs_header_nritems(leaf
);
4654 if (path
->slots
[0] >= nritems
) {
4655 ret
= btrfs_next_leaf(root
, path
);
4660 leaf
= path
->nodes
[0];
4663 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
4664 if (found_key
.offset
!= cur_pos
||
4665 found_key
.type
!= BTRFS_EXTENT_DATA_KEY
||
4666 found_key
.objectid
!= reloc_inode
->i_ino
)
4669 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4670 struct btrfs_file_extent_item
);
4671 if (btrfs_file_extent_type(leaf
, fi
) !=
4672 BTRFS_FILE_EXTENT_REG
||
4673 btrfs_file_extent_disk_bytenr(leaf
, fi
) == 0)
4677 struct disk_extent
*old
= exts
;
4679 exts
= kzalloc(sizeof(*exts
) * max
, GFP_NOFS
);
4680 memcpy(exts
, old
, sizeof(*exts
) * nr
);
4681 if (old
!= *extents
)
4685 exts
[nr
].disk_bytenr
=
4686 btrfs_file_extent_disk_bytenr(leaf
, fi
);
4687 exts
[nr
].disk_num_bytes
=
4688 btrfs_file_extent_disk_num_bytes(leaf
, fi
);
4689 exts
[nr
].offset
= btrfs_file_extent_offset(leaf
, fi
);
4690 exts
[nr
].num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
);
4691 exts
[nr
].ram_bytes
= btrfs_file_extent_ram_bytes(leaf
, fi
);
4692 exts
[nr
].compression
= btrfs_file_extent_compression(leaf
, fi
);
4693 exts
[nr
].encryption
= btrfs_file_extent_encryption(leaf
, fi
);
4694 exts
[nr
].other_encoding
= btrfs_file_extent_other_encoding(leaf
,
4696 BUG_ON(exts
[nr
].offset
> 0);
4697 BUG_ON(exts
[nr
].compression
|| exts
[nr
].encryption
);
4698 BUG_ON(exts
[nr
].num_bytes
!= exts
[nr
].disk_num_bytes
);
4700 cur_pos
+= exts
[nr
].num_bytes
;
4703 if (cur_pos
+ offset
>= last_byte
)
4713 BUG_ON(cur_pos
+ offset
> last_byte
);
4714 if (cur_pos
+ offset
< last_byte
) {
4720 btrfs_free_path(path
);
4722 if (exts
!= *extents
)
4731 static noinline
int replace_one_extent(struct btrfs_trans_handle
*trans
,
4732 struct btrfs_root
*root
,
4733 struct btrfs_path
*path
,
4734 struct btrfs_key
*extent_key
,
4735 struct btrfs_key
*leaf_key
,
4736 struct btrfs_ref_path
*ref_path
,
4737 struct disk_extent
*new_extents
,
4740 struct extent_buffer
*leaf
;
4741 struct btrfs_file_extent_item
*fi
;
4742 struct inode
*inode
= NULL
;
4743 struct btrfs_key key
;
4748 u64 search_end
= (u64
)-1;
4751 int extent_locked
= 0;
4755 memcpy(&key
, leaf_key
, sizeof(key
));
4756 if (ref_path
->owner_objectid
!= BTRFS_MULTIPLE_OBJECTIDS
) {
4757 if (key
.objectid
< ref_path
->owner_objectid
||
4758 (key
.objectid
== ref_path
->owner_objectid
&&
4759 key
.type
< BTRFS_EXTENT_DATA_KEY
)) {
4760 key
.objectid
= ref_path
->owner_objectid
;
4761 key
.type
= BTRFS_EXTENT_DATA_KEY
;
4767 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
4771 leaf
= path
->nodes
[0];
4772 nritems
= btrfs_header_nritems(leaf
);
4774 if (extent_locked
&& ret
> 0) {
4776 * the file extent item was modified by someone
4777 * before the extent got locked.
4779 unlock_extent(&BTRFS_I(inode
)->io_tree
, lock_start
,
4780 lock_end
, GFP_NOFS
);
4784 if (path
->slots
[0] >= nritems
) {
4785 if (++nr_scaned
> 2)
4788 BUG_ON(extent_locked
);
4789 ret
= btrfs_next_leaf(root
, path
);
4794 leaf
= path
->nodes
[0];
4795 nritems
= btrfs_header_nritems(leaf
);
4798 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
4800 if (ref_path
->owner_objectid
!= BTRFS_MULTIPLE_OBJECTIDS
) {
4801 if ((key
.objectid
> ref_path
->owner_objectid
) ||
4802 (key
.objectid
== ref_path
->owner_objectid
&&
4803 key
.type
> BTRFS_EXTENT_DATA_KEY
) ||
4804 key
.offset
>= search_end
)
4808 if (inode
&& key
.objectid
!= inode
->i_ino
) {
4809 BUG_ON(extent_locked
);
4810 btrfs_release_path(root
, path
);
4811 mutex_unlock(&inode
->i_mutex
);
4817 if (key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
4822 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4823 struct btrfs_file_extent_item
);
4824 extent_type
= btrfs_file_extent_type(leaf
, fi
);
4825 if ((extent_type
!= BTRFS_FILE_EXTENT_REG
&&
4826 extent_type
!= BTRFS_FILE_EXTENT_PREALLOC
) ||
4827 (btrfs_file_extent_disk_bytenr(leaf
, fi
) !=
4828 extent_key
->objectid
)) {
4834 num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
);
4835 ext_offset
= btrfs_file_extent_offset(leaf
, fi
);
4837 if (search_end
== (u64
)-1) {
4838 search_end
= key
.offset
- ext_offset
+
4839 btrfs_file_extent_ram_bytes(leaf
, fi
);
4842 if (!extent_locked
) {
4843 lock_start
= key
.offset
;
4844 lock_end
= lock_start
+ num_bytes
- 1;
4846 if (lock_start
> key
.offset
||
4847 lock_end
+ 1 < key
.offset
+ num_bytes
) {
4848 unlock_extent(&BTRFS_I(inode
)->io_tree
,
4849 lock_start
, lock_end
, GFP_NOFS
);
4855 btrfs_release_path(root
, path
);
4857 inode
= btrfs_iget_locked(root
->fs_info
->sb
,
4858 key
.objectid
, root
);
4859 if (inode
->i_state
& I_NEW
) {
4860 BTRFS_I(inode
)->root
= root
;
4861 BTRFS_I(inode
)->location
.objectid
=
4863 BTRFS_I(inode
)->location
.type
=
4864 BTRFS_INODE_ITEM_KEY
;
4865 BTRFS_I(inode
)->location
.offset
= 0;
4866 btrfs_read_locked_inode(inode
);
4867 unlock_new_inode(inode
);
4870 * some code call btrfs_commit_transaction while
4871 * holding the i_mutex, so we can't use mutex_lock
4874 if (is_bad_inode(inode
) ||
4875 !mutex_trylock(&inode
->i_mutex
)) {
4878 key
.offset
= (u64
)-1;
4883 if (!extent_locked
) {
4884 struct btrfs_ordered_extent
*ordered
;
4886 btrfs_release_path(root
, path
);
4888 lock_extent(&BTRFS_I(inode
)->io_tree
, lock_start
,
4889 lock_end
, GFP_NOFS
);
4890 ordered
= btrfs_lookup_first_ordered_extent(inode
,
4893 ordered
->file_offset
<= lock_end
&&
4894 ordered
->file_offset
+ ordered
->len
> lock_start
) {
4895 unlock_extent(&BTRFS_I(inode
)->io_tree
,
4896 lock_start
, lock_end
, GFP_NOFS
);
4897 btrfs_start_ordered_extent(inode
, ordered
, 1);
4898 btrfs_put_ordered_extent(ordered
);
4899 key
.offset
+= num_bytes
;
4903 btrfs_put_ordered_extent(ordered
);
4909 if (nr_extents
== 1) {
4910 /* update extent pointer in place */
4911 btrfs_set_file_extent_disk_bytenr(leaf
, fi
,
4912 new_extents
[0].disk_bytenr
);
4913 btrfs_set_file_extent_disk_num_bytes(leaf
, fi
,
4914 new_extents
[0].disk_num_bytes
);
4915 btrfs_mark_buffer_dirty(leaf
);
4917 btrfs_drop_extent_cache(inode
, key
.offset
,
4918 key
.offset
+ num_bytes
- 1, 0);
4920 ret
= btrfs_inc_extent_ref(trans
, root
,
4921 new_extents
[0].disk_bytenr
,
4922 new_extents
[0].disk_num_bytes
,
4924 root
->root_key
.objectid
,
4929 ret
= btrfs_free_extent(trans
, root
,
4930 extent_key
->objectid
,
4933 btrfs_header_owner(leaf
),
4934 btrfs_header_generation(leaf
),
4938 btrfs_release_path(root
, path
);
4939 key
.offset
+= num_bytes
;
4947 * drop old extent pointer at first, then insert the
4948 * new pointers one bye one
4950 btrfs_release_path(root
, path
);
4951 ret
= btrfs_drop_extents(trans
, root
, inode
, key
.offset
,
4952 key
.offset
+ num_bytes
,
4953 key
.offset
, &alloc_hint
);
4956 for (i
= 0; i
< nr_extents
; i
++) {
4957 if (ext_offset
>= new_extents
[i
].num_bytes
) {
4958 ext_offset
-= new_extents
[i
].num_bytes
;
4961 extent_len
= min(new_extents
[i
].num_bytes
-
4962 ext_offset
, num_bytes
);
4964 ret
= btrfs_insert_empty_item(trans
, root
,
4969 leaf
= path
->nodes
[0];
4970 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4971 struct btrfs_file_extent_item
);
4972 btrfs_set_file_extent_generation(leaf
, fi
,
4974 btrfs_set_file_extent_type(leaf
, fi
,
4975 BTRFS_FILE_EXTENT_REG
);
4976 btrfs_set_file_extent_disk_bytenr(leaf
, fi
,
4977 new_extents
[i
].disk_bytenr
);
4978 btrfs_set_file_extent_disk_num_bytes(leaf
, fi
,
4979 new_extents
[i
].disk_num_bytes
);
4980 btrfs_set_file_extent_ram_bytes(leaf
, fi
,
4981 new_extents
[i
].ram_bytes
);
4983 btrfs_set_file_extent_compression(leaf
, fi
,
4984 new_extents
[i
].compression
);
4985 btrfs_set_file_extent_encryption(leaf
, fi
,
4986 new_extents
[i
].encryption
);
4987 btrfs_set_file_extent_other_encoding(leaf
, fi
,
4988 new_extents
[i
].other_encoding
);
4990 btrfs_set_file_extent_num_bytes(leaf
, fi
,
4992 ext_offset
+= new_extents
[i
].offset
;
4993 btrfs_set_file_extent_offset(leaf
, fi
,
4995 btrfs_mark_buffer_dirty(leaf
);
4997 btrfs_drop_extent_cache(inode
, key
.offset
,
4998 key
.offset
+ extent_len
- 1, 0);
5000 ret
= btrfs_inc_extent_ref(trans
, root
,
5001 new_extents
[i
].disk_bytenr
,
5002 new_extents
[i
].disk_num_bytes
,
5004 root
->root_key
.objectid
,
5005 trans
->transid
, key
.objectid
);
5007 btrfs_release_path(root
, path
);
5009 inode_add_bytes(inode
, extent_len
);
5012 num_bytes
-= extent_len
;
5013 key
.offset
+= extent_len
;
5018 BUG_ON(i
>= nr_extents
);
5022 if (extent_locked
) {
5023 unlock_extent(&BTRFS_I(inode
)->io_tree
, lock_start
,
5024 lock_end
, GFP_NOFS
);
5028 if (ref_path
->owner_objectid
!= BTRFS_MULTIPLE_OBJECTIDS
&&
5029 key
.offset
>= search_end
)
5036 btrfs_release_path(root
, path
);
5038 mutex_unlock(&inode
->i_mutex
);
5039 if (extent_locked
) {
5040 unlock_extent(&BTRFS_I(inode
)->io_tree
, lock_start
,
5041 lock_end
, GFP_NOFS
);
5048 int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle
*trans
,
5049 struct btrfs_root
*root
,
5050 struct extent_buffer
*buf
, u64 orig_start
)
5055 BUG_ON(btrfs_header_generation(buf
) != trans
->transid
);
5056 BUG_ON(root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
);
5058 level
= btrfs_header_level(buf
);
5060 struct btrfs_leaf_ref
*ref
;
5061 struct btrfs_leaf_ref
*orig_ref
;
5063 orig_ref
= btrfs_lookup_leaf_ref(root
, orig_start
);
5067 ref
= btrfs_alloc_leaf_ref(root
, orig_ref
->nritems
);
5069 btrfs_free_leaf_ref(root
, orig_ref
);
5073 ref
->nritems
= orig_ref
->nritems
;
5074 memcpy(ref
->extents
, orig_ref
->extents
,
5075 sizeof(ref
->extents
[0]) * ref
->nritems
);
5077 btrfs_free_leaf_ref(root
, orig_ref
);
5079 ref
->root_gen
= trans
->transid
;
5080 ref
->bytenr
= buf
->start
;
5081 ref
->owner
= btrfs_header_owner(buf
);
5082 ref
->generation
= btrfs_header_generation(buf
);
5084 ret
= btrfs_add_leaf_ref(root
, ref
, 0);
5086 btrfs_free_leaf_ref(root
, ref
);
5091 static noinline
int invalidate_extent_cache(struct btrfs_root
*root
,
5092 struct extent_buffer
*leaf
,
5093 struct btrfs_block_group_cache
*group
,
5094 struct btrfs_root
*target_root
)
5096 struct btrfs_key key
;
5097 struct inode
*inode
= NULL
;
5098 struct btrfs_file_extent_item
*fi
;
5100 u64 skip_objectid
= 0;
5104 nritems
= btrfs_header_nritems(leaf
);
5105 for (i
= 0; i
< nritems
; i
++) {
5106 btrfs_item_key_to_cpu(leaf
, &key
, i
);
5107 if (key
.objectid
== skip_objectid
||
5108 key
.type
!= BTRFS_EXTENT_DATA_KEY
)
5110 fi
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
5111 if (btrfs_file_extent_type(leaf
, fi
) ==
5112 BTRFS_FILE_EXTENT_INLINE
)
5114 if (btrfs_file_extent_disk_bytenr(leaf
, fi
) == 0)
5116 if (!inode
|| inode
->i_ino
!= key
.objectid
) {
5118 inode
= btrfs_ilookup(target_root
->fs_info
->sb
,
5119 key
.objectid
, target_root
, 1);
5122 skip_objectid
= key
.objectid
;
5125 num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
);
5127 lock_extent(&BTRFS_I(inode
)->io_tree
, key
.offset
,
5128 key
.offset
+ num_bytes
- 1, GFP_NOFS
);
5129 btrfs_drop_extent_cache(inode
, key
.offset
,
5130 key
.offset
+ num_bytes
- 1, 1);
5131 unlock_extent(&BTRFS_I(inode
)->io_tree
, key
.offset
,
5132 key
.offset
+ num_bytes
- 1, GFP_NOFS
);
5139 static noinline
int replace_extents_in_leaf(struct btrfs_trans_handle
*trans
,
5140 struct btrfs_root
*root
,
5141 struct extent_buffer
*leaf
,
5142 struct btrfs_block_group_cache
*group
,
5143 struct inode
*reloc_inode
)
5145 struct btrfs_key key
;
5146 struct btrfs_key extent_key
;
5147 struct btrfs_file_extent_item
*fi
;
5148 struct btrfs_leaf_ref
*ref
;
5149 struct disk_extent
*new_extent
;
5158 new_extent
= kmalloc(sizeof(*new_extent
), GFP_NOFS
);
5159 BUG_ON(!new_extent
);
5161 ref
= btrfs_lookup_leaf_ref(root
, leaf
->start
);
5165 nritems
= btrfs_header_nritems(leaf
);
5166 for (i
= 0; i
< nritems
; i
++) {
5167 btrfs_item_key_to_cpu(leaf
, &key
, i
);
5168 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
5170 fi
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
5171 if (btrfs_file_extent_type(leaf
, fi
) ==
5172 BTRFS_FILE_EXTENT_INLINE
)
5174 bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
5175 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
5180 if (bytenr
>= group
->key
.objectid
+ group
->key
.offset
||
5181 bytenr
+ num_bytes
<= group
->key
.objectid
)
5184 extent_key
.objectid
= bytenr
;
5185 extent_key
.offset
= num_bytes
;
5186 extent_key
.type
= BTRFS_EXTENT_ITEM_KEY
;
5188 ret
= get_new_locations(reloc_inode
, &extent_key
,
5189 group
->key
.objectid
, 1,
5190 &new_extent
, &nr_extent
);
5195 BUG_ON(ref
->extents
[ext_index
].bytenr
!= bytenr
);
5196 BUG_ON(ref
->extents
[ext_index
].num_bytes
!= num_bytes
);
5197 ref
->extents
[ext_index
].bytenr
= new_extent
->disk_bytenr
;
5198 ref
->extents
[ext_index
].num_bytes
= new_extent
->disk_num_bytes
;
5200 btrfs_set_file_extent_disk_bytenr(leaf
, fi
,
5201 new_extent
->disk_bytenr
);
5202 btrfs_set_file_extent_disk_num_bytes(leaf
, fi
,
5203 new_extent
->disk_num_bytes
);
5204 btrfs_mark_buffer_dirty(leaf
);
5206 ret
= btrfs_inc_extent_ref(trans
, root
,
5207 new_extent
->disk_bytenr
,
5208 new_extent
->disk_num_bytes
,
5210 root
->root_key
.objectid
,
5211 trans
->transid
, key
.objectid
);
5213 ret
= btrfs_free_extent(trans
, root
,
5214 bytenr
, num_bytes
, leaf
->start
,
5215 btrfs_header_owner(leaf
),
5216 btrfs_header_generation(leaf
),
5222 BUG_ON(ext_index
+ 1 != ref
->nritems
);
5223 btrfs_free_leaf_ref(root
, ref
);
5227 int btrfs_free_reloc_root(struct btrfs_trans_handle
*trans
,
5228 struct btrfs_root
*root
)
5230 struct btrfs_root
*reloc_root
;
5233 if (root
->reloc_root
) {
5234 reloc_root
= root
->reloc_root
;
5235 root
->reloc_root
= NULL
;
5236 list_add(&reloc_root
->dead_list
,
5237 &root
->fs_info
->dead_reloc_roots
);
5239 btrfs_set_root_bytenr(&reloc_root
->root_item
,
5240 reloc_root
->node
->start
);
5241 btrfs_set_root_level(&root
->root_item
,
5242 btrfs_header_level(reloc_root
->node
));
5243 memset(&reloc_root
->root_item
.drop_progress
, 0,
5244 sizeof(struct btrfs_disk_key
));
5245 reloc_root
->root_item
.drop_level
= 0;
5247 ret
= btrfs_update_root(trans
, root
->fs_info
->tree_root
,
5248 &reloc_root
->root_key
,
5249 &reloc_root
->root_item
);
5255 int btrfs_drop_dead_reloc_roots(struct btrfs_root
*root
)
5257 struct btrfs_trans_handle
*trans
;
5258 struct btrfs_root
*reloc_root
;
5259 struct btrfs_root
*prev_root
= NULL
;
5260 struct list_head dead_roots
;
5264 INIT_LIST_HEAD(&dead_roots
);
5265 list_splice_init(&root
->fs_info
->dead_reloc_roots
, &dead_roots
);
5267 while (!list_empty(&dead_roots
)) {
5268 reloc_root
= list_entry(dead_roots
.prev
,
5269 struct btrfs_root
, dead_list
);
5270 list_del_init(&reloc_root
->dead_list
);
5272 BUG_ON(reloc_root
->commit_root
!= NULL
);
5274 trans
= btrfs_join_transaction(root
, 1);
5277 mutex_lock(&root
->fs_info
->drop_mutex
);
5278 ret
= btrfs_drop_snapshot(trans
, reloc_root
);
5281 mutex_unlock(&root
->fs_info
->drop_mutex
);
5283 nr
= trans
->blocks_used
;
5284 ret
= btrfs_end_transaction(trans
, root
);
5286 btrfs_btree_balance_dirty(root
, nr
);
5289 free_extent_buffer(reloc_root
->node
);
5291 ret
= btrfs_del_root(trans
, root
->fs_info
->tree_root
,
5292 &reloc_root
->root_key
);
5294 mutex_unlock(&root
->fs_info
->drop_mutex
);
5296 nr
= trans
->blocks_used
;
5297 ret
= btrfs_end_transaction(trans
, root
);
5299 btrfs_btree_balance_dirty(root
, nr
);
5302 prev_root
= reloc_root
;
5305 btrfs_remove_leaf_refs(prev_root
, (u64
)-1, 0);
5311 int btrfs_add_dead_reloc_root(struct btrfs_root
*root
)
5313 list_add(&root
->dead_list
, &root
->fs_info
->dead_reloc_roots
);
5317 int btrfs_cleanup_reloc_trees(struct btrfs_root
*root
)
5319 struct btrfs_root
*reloc_root
;
5320 struct btrfs_trans_handle
*trans
;
5321 struct btrfs_key location
;
5325 mutex_lock(&root
->fs_info
->tree_reloc_mutex
);
5326 ret
= btrfs_find_dead_roots(root
, BTRFS_TREE_RELOC_OBJECTID
, NULL
);
5328 found
= !list_empty(&root
->fs_info
->dead_reloc_roots
);
5329 mutex_unlock(&root
->fs_info
->tree_reloc_mutex
);
5332 trans
= btrfs_start_transaction(root
, 1);
5334 ret
= btrfs_commit_transaction(trans
, root
);
5338 location
.objectid
= BTRFS_DATA_RELOC_TREE_OBJECTID
;
5339 location
.offset
= (u64
)-1;
5340 location
.type
= BTRFS_ROOT_ITEM_KEY
;
5342 reloc_root
= btrfs_read_fs_root_no_name(root
->fs_info
, &location
);
5343 BUG_ON(!reloc_root
);
5344 btrfs_orphan_cleanup(reloc_root
);
5348 static noinline
int init_reloc_tree(struct btrfs_trans_handle
*trans
,
5349 struct btrfs_root
*root
)
5351 struct btrfs_root
*reloc_root
;
5352 struct extent_buffer
*eb
;
5353 struct btrfs_root_item
*root_item
;
5354 struct btrfs_key root_key
;
5357 BUG_ON(!root
->ref_cows
);
5358 if (root
->reloc_root
)
5361 root_item
= kmalloc(sizeof(*root_item
), GFP_NOFS
);
5364 ret
= btrfs_copy_root(trans
, root
, root
->commit_root
,
5365 &eb
, BTRFS_TREE_RELOC_OBJECTID
);
5368 root_key
.objectid
= BTRFS_TREE_RELOC_OBJECTID
;
5369 root_key
.offset
= root
->root_key
.objectid
;
5370 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
5372 memcpy(root_item
, &root
->root_item
, sizeof(root_item
));
5373 btrfs_set_root_refs(root_item
, 0);
5374 btrfs_set_root_bytenr(root_item
, eb
->start
);
5375 btrfs_set_root_level(root_item
, btrfs_header_level(eb
));
5376 btrfs_set_root_generation(root_item
, trans
->transid
);
5378 btrfs_tree_unlock(eb
);
5379 free_extent_buffer(eb
);
5381 ret
= btrfs_insert_root(trans
, root
->fs_info
->tree_root
,
5382 &root_key
, root_item
);
5386 reloc_root
= btrfs_read_fs_root_no_radix(root
->fs_info
->tree_root
,
5388 BUG_ON(!reloc_root
);
5389 reloc_root
->last_trans
= trans
->transid
;
5390 reloc_root
->commit_root
= NULL
;
5391 reloc_root
->ref_tree
= &root
->fs_info
->reloc_ref_tree
;
5393 root
->reloc_root
= reloc_root
;
5398 * Core function of space balance.
5400 * The idea is using reloc trees to relocate tree blocks in reference
5401 * counted roots. There is one reloc tree for each subvol, and all
5402 * reloc trees share same root key objectid. Reloc trees are snapshots
5403 * of the latest committed roots of subvols (root->commit_root).
5405 * To relocate a tree block referenced by a subvol, there are two steps.
5406 * COW the block through subvol's reloc tree, then update block pointer
5407 * in the subvol to point to the new block. Since all reloc trees share
5408 * same root key objectid, doing special handing for tree blocks owned
5409 * by them is easy. Once a tree block has been COWed in one reloc tree,
5410 * we can use the resulting new block directly when the same block is
5411 * required to COW again through other reloc trees. By this way, relocated
5412 * tree blocks are shared between reloc trees, so they are also shared
5415 static noinline
int relocate_one_path(struct btrfs_trans_handle
*trans
,
5416 struct btrfs_root
*root
,
5417 struct btrfs_path
*path
,
5418 struct btrfs_key
*first_key
,
5419 struct btrfs_ref_path
*ref_path
,
5420 struct btrfs_block_group_cache
*group
,
5421 struct inode
*reloc_inode
)
5423 struct btrfs_root
*reloc_root
;
5424 struct extent_buffer
*eb
= NULL
;
5425 struct btrfs_key
*keys
;
5429 int lowest_level
= 0;
5432 if (ref_path
->owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
)
5433 lowest_level
= ref_path
->owner_objectid
;
5435 if (!root
->ref_cows
) {
5436 path
->lowest_level
= lowest_level
;
5437 ret
= btrfs_search_slot(trans
, root
, first_key
, path
, 0, 1);
5439 path
->lowest_level
= 0;
5440 btrfs_release_path(root
, path
);
5444 mutex_lock(&root
->fs_info
->tree_reloc_mutex
);
5445 ret
= init_reloc_tree(trans
, root
);
5447 reloc_root
= root
->reloc_root
;
5449 shared_level
= ref_path
->shared_level
;
5450 ref_path
->shared_level
= BTRFS_MAX_LEVEL
- 1;
5452 keys
= ref_path
->node_keys
;
5453 nodes
= ref_path
->new_nodes
;
5454 memset(&keys
[shared_level
+ 1], 0,
5455 sizeof(*keys
) * (BTRFS_MAX_LEVEL
- shared_level
- 1));
5456 memset(&nodes
[shared_level
+ 1], 0,
5457 sizeof(*nodes
) * (BTRFS_MAX_LEVEL
- shared_level
- 1));
5459 if (nodes
[lowest_level
] == 0) {
5460 path
->lowest_level
= lowest_level
;
5461 ret
= btrfs_search_slot(trans
, reloc_root
, first_key
, path
,
5464 for (level
= lowest_level
; level
< BTRFS_MAX_LEVEL
; level
++) {
5465 eb
= path
->nodes
[level
];
5466 if (!eb
|| eb
== reloc_root
->node
)
5468 nodes
[level
] = eb
->start
;
5470 btrfs_item_key_to_cpu(eb
, &keys
[level
], 0);
5472 btrfs_node_key_to_cpu(eb
, &keys
[level
], 0);
5475 ref_path
->owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
5476 eb
= path
->nodes
[0];
5477 ret
= replace_extents_in_leaf(trans
, reloc_root
, eb
,
5478 group
, reloc_inode
);
5481 btrfs_release_path(reloc_root
, path
);
5483 ret
= btrfs_merge_path(trans
, reloc_root
, keys
, nodes
,
5489 * replace tree blocks in the fs tree with tree blocks in
5492 ret
= btrfs_merge_path(trans
, root
, keys
, nodes
, lowest_level
);
5495 if (ref_path
->owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
5496 ret
= btrfs_search_slot(trans
, reloc_root
, first_key
, path
,
5499 extent_buffer_get(path
->nodes
[0]);
5500 eb
= path
->nodes
[0];
5501 btrfs_release_path(reloc_root
, path
);
5502 ret
= invalidate_extent_cache(reloc_root
, eb
, group
, root
);
5504 free_extent_buffer(eb
);
5507 mutex_unlock(&root
->fs_info
->tree_reloc_mutex
);
5508 path
->lowest_level
= 0;
5512 static noinline
int relocate_tree_block(struct btrfs_trans_handle
*trans
,
5513 struct btrfs_root
*root
,
5514 struct btrfs_path
*path
,
5515 struct btrfs_key
*first_key
,
5516 struct btrfs_ref_path
*ref_path
)
5520 ret
= relocate_one_path(trans
, root
, path
, first_key
,
5521 ref_path
, NULL
, NULL
);
5524 if (root
== root
->fs_info
->extent_root
)
5525 btrfs_extent_post_op(trans
, root
);
5530 static noinline
int del_extent_zero(struct btrfs_trans_handle
*trans
,
5531 struct btrfs_root
*extent_root
,
5532 struct btrfs_path
*path
,
5533 struct btrfs_key
*extent_key
)
5537 ret
= btrfs_search_slot(trans
, extent_root
, extent_key
, path
, -1, 1);
5540 ret
= btrfs_del_item(trans
, extent_root
, path
);
5542 btrfs_release_path(extent_root
, path
);
5546 static noinline
struct btrfs_root
*read_ref_root(struct btrfs_fs_info
*fs_info
,
5547 struct btrfs_ref_path
*ref_path
)
5549 struct btrfs_key root_key
;
5551 root_key
.objectid
= ref_path
->root_objectid
;
5552 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
5553 if (is_cowonly_root(ref_path
->root_objectid
))
5554 root_key
.offset
= 0;
5556 root_key
.offset
= (u64
)-1;
5558 return btrfs_read_fs_root_no_name(fs_info
, &root_key
);
5561 static noinline
int relocate_one_extent(struct btrfs_root
*extent_root
,
5562 struct btrfs_path
*path
,
5563 struct btrfs_key
*extent_key
,
5564 struct btrfs_block_group_cache
*group
,
5565 struct inode
*reloc_inode
, int pass
)
5567 struct btrfs_trans_handle
*trans
;
5568 struct btrfs_root
*found_root
;
5569 struct btrfs_ref_path
*ref_path
= NULL
;
5570 struct disk_extent
*new_extents
= NULL
;
5575 struct btrfs_key first_key
;
5579 trans
= btrfs_start_transaction(extent_root
, 1);
5582 if (extent_key
->objectid
== 0) {
5583 ret
= del_extent_zero(trans
, extent_root
, path
, extent_key
);
5587 ref_path
= kmalloc(sizeof(*ref_path
), GFP_NOFS
);
5593 for (loops
= 0; ; loops
++) {
5595 ret
= btrfs_first_ref_path(trans
, extent_root
, ref_path
,
5596 extent_key
->objectid
);
5598 ret
= btrfs_next_ref_path(trans
, extent_root
, ref_path
);
5605 if (ref_path
->root_objectid
== BTRFS_TREE_LOG_OBJECTID
||
5606 ref_path
->root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
5609 found_root
= read_ref_root(extent_root
->fs_info
, ref_path
);
5610 BUG_ON(!found_root
);
5612 * for reference counted tree, only process reference paths
5613 * rooted at the latest committed root.
5615 if (found_root
->ref_cows
&&
5616 ref_path
->root_generation
!= found_root
->root_key
.offset
)
5619 if (ref_path
->owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
5622 * copy data extents to new locations
5624 u64 group_start
= group
->key
.objectid
;
5625 ret
= relocate_data_extent(reloc_inode
,
5634 level
= ref_path
->owner_objectid
;
5637 if (prev_block
!= ref_path
->nodes
[level
]) {
5638 struct extent_buffer
*eb
;
5639 u64 block_start
= ref_path
->nodes
[level
];
5640 u64 block_size
= btrfs_level_size(found_root
, level
);
5642 eb
= read_tree_block(found_root
, block_start
,
5644 btrfs_tree_lock(eb
);
5645 BUG_ON(level
!= btrfs_header_level(eb
));
5648 btrfs_item_key_to_cpu(eb
, &first_key
, 0);
5650 btrfs_node_key_to_cpu(eb
, &first_key
, 0);
5652 btrfs_tree_unlock(eb
);
5653 free_extent_buffer(eb
);
5654 prev_block
= block_start
;
5657 btrfs_record_root_in_trans(found_root
);
5658 if (ref_path
->owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
5660 * try to update data extent references while
5661 * keeping metadata shared between snapshots.
5664 ret
= relocate_one_path(trans
, found_root
,
5665 path
, &first_key
, ref_path
,
5666 group
, reloc_inode
);
5672 * use fallback method to process the remaining
5676 u64 group_start
= group
->key
.objectid
;
5677 new_extents
= kmalloc(sizeof(*new_extents
),
5680 ret
= get_new_locations(reloc_inode
,
5688 ret
= replace_one_extent(trans
, found_root
,
5690 &first_key
, ref_path
,
5691 new_extents
, nr_extents
);
5693 ret
= relocate_tree_block(trans
, found_root
, path
,
5694 &first_key
, ref_path
);
5701 btrfs_end_transaction(trans
, extent_root
);
5707 static u64
update_block_group_flags(struct btrfs_root
*root
, u64 flags
)
5710 u64 stripped
= BTRFS_BLOCK_GROUP_RAID0
|
5711 BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_RAID10
;
5713 num_devices
= root
->fs_info
->fs_devices
->rw_devices
;
5714 if (num_devices
== 1) {
5715 stripped
|= BTRFS_BLOCK_GROUP_DUP
;
5716 stripped
= flags
& ~stripped
;
5718 /* turn raid0 into single device chunks */
5719 if (flags
& BTRFS_BLOCK_GROUP_RAID0
)
5722 /* turn mirroring into duplication */
5723 if (flags
& (BTRFS_BLOCK_GROUP_RAID1
|
5724 BTRFS_BLOCK_GROUP_RAID10
))
5725 return stripped
| BTRFS_BLOCK_GROUP_DUP
;
5728 /* they already had raid on here, just return */
5729 if (flags
& stripped
)
5732 stripped
|= BTRFS_BLOCK_GROUP_DUP
;
5733 stripped
= flags
& ~stripped
;
5735 /* switch duplicated blocks with raid1 */
5736 if (flags
& BTRFS_BLOCK_GROUP_DUP
)
5737 return stripped
| BTRFS_BLOCK_GROUP_RAID1
;
5739 /* turn single device chunks into raid0 */
5740 return stripped
| BTRFS_BLOCK_GROUP_RAID0
;
5745 static int __alloc_chunk_for_shrink(struct btrfs_root
*root
,
5746 struct btrfs_block_group_cache
*shrink_block_group
,
5749 struct btrfs_trans_handle
*trans
;
5750 u64 new_alloc_flags
;
5753 spin_lock(&shrink_block_group
->lock
);
5754 if (btrfs_block_group_used(&shrink_block_group
->item
) > 0) {
5755 spin_unlock(&shrink_block_group
->lock
);
5757 trans
= btrfs_start_transaction(root
, 1);
5758 spin_lock(&shrink_block_group
->lock
);
5760 new_alloc_flags
= update_block_group_flags(root
,
5761 shrink_block_group
->flags
);
5762 if (new_alloc_flags
!= shrink_block_group
->flags
) {
5764 btrfs_block_group_used(&shrink_block_group
->item
);
5766 calc
= shrink_block_group
->key
.offset
;
5768 spin_unlock(&shrink_block_group
->lock
);
5770 do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
5771 calc
+ 2 * 1024 * 1024, new_alloc_flags
, force
);
5773 btrfs_end_transaction(trans
, root
);
5775 spin_unlock(&shrink_block_group
->lock
);
5779 static int __insert_orphan_inode(struct btrfs_trans_handle
*trans
,
5780 struct btrfs_root
*root
,
5781 u64 objectid
, u64 size
)
5783 struct btrfs_path
*path
;
5784 struct btrfs_inode_item
*item
;
5785 struct extent_buffer
*leaf
;
5788 path
= btrfs_alloc_path();
5792 ret
= btrfs_insert_empty_inode(trans
, root
, path
, objectid
);
5796 leaf
= path
->nodes
[0];
5797 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_inode_item
);
5798 memset_extent_buffer(leaf
, 0, (unsigned long)item
, sizeof(*item
));
5799 btrfs_set_inode_generation(leaf
, item
, 1);
5800 btrfs_set_inode_size(leaf
, item
, size
);
5801 btrfs_set_inode_mode(leaf
, item
, S_IFREG
| 0600);
5802 btrfs_set_inode_flags(leaf
, item
, BTRFS_INODE_NOCOMPRESS
);
5803 btrfs_mark_buffer_dirty(leaf
);
5804 btrfs_release_path(root
, path
);
5806 btrfs_free_path(path
);
5810 static noinline
struct inode
*create_reloc_inode(struct btrfs_fs_info
*fs_info
,
5811 struct btrfs_block_group_cache
*group
)
5813 struct inode
*inode
= NULL
;
5814 struct btrfs_trans_handle
*trans
;
5815 struct btrfs_root
*root
;
5816 struct btrfs_key root_key
;
5817 u64 objectid
= BTRFS_FIRST_FREE_OBJECTID
;
5820 root_key
.objectid
= BTRFS_DATA_RELOC_TREE_OBJECTID
;
5821 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
5822 root_key
.offset
= (u64
)-1;
5823 root
= btrfs_read_fs_root_no_name(fs_info
, &root_key
);
5825 return ERR_CAST(root
);
5827 trans
= btrfs_start_transaction(root
, 1);
5830 err
= btrfs_find_free_objectid(trans
, root
, objectid
, &objectid
);
5834 err
= __insert_orphan_inode(trans
, root
, objectid
, group
->key
.offset
);
5837 err
= btrfs_insert_file_extent(trans
, root
, objectid
, 0, 0, 0,
5838 group
->key
.offset
, 0, group
->key
.offset
,
5842 inode
= btrfs_iget_locked(root
->fs_info
->sb
, objectid
, root
);
5843 if (inode
->i_state
& I_NEW
) {
5844 BTRFS_I(inode
)->root
= root
;
5845 BTRFS_I(inode
)->location
.objectid
= objectid
;
5846 BTRFS_I(inode
)->location
.type
= BTRFS_INODE_ITEM_KEY
;
5847 BTRFS_I(inode
)->location
.offset
= 0;
5848 btrfs_read_locked_inode(inode
);
5849 unlock_new_inode(inode
);
5850 BUG_ON(is_bad_inode(inode
));
5854 BTRFS_I(inode
)->index_cnt
= group
->key
.objectid
;
5856 err
= btrfs_orphan_add(trans
, inode
);
5858 btrfs_end_transaction(trans
, root
);
5862 inode
= ERR_PTR(err
);
5867 int btrfs_reloc_clone_csums(struct inode
*inode
, u64 file_pos
, u64 len
)
5870 struct btrfs_ordered_sum
*sums
;
5871 struct btrfs_sector_sum
*sector_sum
;
5872 struct btrfs_ordered_extent
*ordered
;
5873 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5874 struct list_head list
;
5879 INIT_LIST_HEAD(&list
);
5881 ordered
= btrfs_lookup_ordered_extent(inode
, file_pos
);
5882 BUG_ON(ordered
->file_offset
!= file_pos
|| ordered
->len
!= len
);
5884 disk_bytenr
= file_pos
+ BTRFS_I(inode
)->index_cnt
;
5885 ret
= btrfs_lookup_csums_range(root
->fs_info
->csum_root
, disk_bytenr
,
5886 disk_bytenr
+ len
- 1, &list
);
5888 while (!list_empty(&list
)) {
5889 sums
= list_entry(list
.next
, struct btrfs_ordered_sum
, list
);
5890 list_del_init(&sums
->list
);
5892 sector_sum
= sums
->sums
;
5893 sums
->bytenr
= ordered
->start
;
5896 while (offset
< sums
->len
) {
5897 sector_sum
->bytenr
+= ordered
->start
- disk_bytenr
;
5899 offset
+= root
->sectorsize
;
5902 btrfs_add_ordered_sum(inode
, ordered
, sums
);
5904 btrfs_put_ordered_extent(ordered
);
5908 int btrfs_relocate_block_group(struct btrfs_root
*root
, u64 group_start
)
5910 struct btrfs_trans_handle
*trans
;
5911 struct btrfs_path
*path
;
5912 struct btrfs_fs_info
*info
= root
->fs_info
;
5913 struct extent_buffer
*leaf
;
5914 struct inode
*reloc_inode
;
5915 struct btrfs_block_group_cache
*block_group
;
5916 struct btrfs_key key
;
5925 root
= root
->fs_info
->extent_root
;
5927 block_group
= btrfs_lookup_block_group(info
, group_start
);
5928 BUG_ON(!block_group
);
5930 printk(KERN_INFO
"btrfs relocating block group %llu flags %llu\n",
5931 (unsigned long long)block_group
->key
.objectid
,
5932 (unsigned long long)block_group
->flags
);
5934 path
= btrfs_alloc_path();
5937 reloc_inode
= create_reloc_inode(info
, block_group
);
5938 BUG_ON(IS_ERR(reloc_inode
));
5940 __alloc_chunk_for_shrink(root
, block_group
, 1);
5941 set_block_group_readonly(block_group
);
5943 btrfs_start_delalloc_inodes(info
->tree_root
);
5944 btrfs_wait_ordered_extents(info
->tree_root
, 0);
5949 key
.objectid
= block_group
->key
.objectid
;
5952 cur_byte
= key
.objectid
;
5954 trans
= btrfs_start_transaction(info
->tree_root
, 1);
5955 btrfs_commit_transaction(trans
, info
->tree_root
);
5957 mutex_lock(&root
->fs_info
->cleaner_mutex
);
5958 btrfs_clean_old_snapshots(info
->tree_root
);
5959 btrfs_remove_leaf_refs(info
->tree_root
, (u64
)-1, 1);
5960 mutex_unlock(&root
->fs_info
->cleaner_mutex
);
5963 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5967 leaf
= path
->nodes
[0];
5968 nritems
= btrfs_header_nritems(leaf
);
5969 if (path
->slots
[0] >= nritems
) {
5970 ret
= btrfs_next_leaf(root
, path
);
5977 leaf
= path
->nodes
[0];
5978 nritems
= btrfs_header_nritems(leaf
);
5981 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
5983 if (key
.objectid
>= block_group
->key
.objectid
+
5984 block_group
->key
.offset
)
5987 if (progress
&& need_resched()) {
5988 btrfs_release_path(root
, path
);
5995 if (btrfs_key_type(&key
) != BTRFS_EXTENT_ITEM_KEY
||
5996 key
.objectid
+ key
.offset
<= cur_byte
) {
6002 cur_byte
= key
.objectid
+ key
.offset
;
6003 btrfs_release_path(root
, path
);
6005 __alloc_chunk_for_shrink(root
, block_group
, 0);
6006 ret
= relocate_one_extent(root
, path
, &key
, block_group
,
6012 key
.objectid
= cur_byte
;
6017 btrfs_release_path(root
, path
);
6020 btrfs_wait_ordered_range(reloc_inode
, 0, (u64
)-1);
6021 invalidate_mapping_pages(reloc_inode
->i_mapping
, 0, -1);
6024 if (total_found
> 0) {
6025 printk(KERN_INFO
"btrfs found %llu extents in pass %d\n",
6026 (unsigned long long)total_found
, pass
);
6028 if (total_found
== skipped
&& pass
> 2) {
6030 reloc_inode
= create_reloc_inode(info
, block_group
);
6036 /* delete reloc_inode */
6039 /* unpin extents in this range */
6040 trans
= btrfs_start_transaction(info
->tree_root
, 1);
6041 btrfs_commit_transaction(trans
, info
->tree_root
);
6043 spin_lock(&block_group
->lock
);
6044 WARN_ON(block_group
->pinned
> 0);
6045 WARN_ON(block_group
->reserved
> 0);
6046 WARN_ON(btrfs_block_group_used(&block_group
->item
) > 0);
6047 spin_unlock(&block_group
->lock
);
6048 put_block_group(block_group
);
6051 btrfs_free_path(path
);
6055 static int find_first_block_group(struct btrfs_root
*root
,
6056 struct btrfs_path
*path
, struct btrfs_key
*key
)
6059 struct btrfs_key found_key
;
6060 struct extent_buffer
*leaf
;
6063 ret
= btrfs_search_slot(NULL
, root
, key
, path
, 0, 0);
6068 slot
= path
->slots
[0];
6069 leaf
= path
->nodes
[0];
6070 if (slot
>= btrfs_header_nritems(leaf
)) {
6071 ret
= btrfs_next_leaf(root
, path
);
6078 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
6080 if (found_key
.objectid
>= key
->objectid
&&
6081 found_key
.type
== BTRFS_BLOCK_GROUP_ITEM_KEY
) {
6092 int btrfs_free_block_groups(struct btrfs_fs_info
*info
)
6094 struct btrfs_block_group_cache
*block_group
;
6097 spin_lock(&info
->block_group_cache_lock
);
6098 while ((n
= rb_last(&info
->block_group_cache_tree
)) != NULL
) {
6099 block_group
= rb_entry(n
, struct btrfs_block_group_cache
,
6101 rb_erase(&block_group
->cache_node
,
6102 &info
->block_group_cache_tree
);
6103 spin_unlock(&info
->block_group_cache_lock
);
6105 btrfs_remove_free_space_cache(block_group
);
6106 down_write(&block_group
->space_info
->groups_sem
);
6107 list_del(&block_group
->list
);
6108 up_write(&block_group
->space_info
->groups_sem
);
6110 WARN_ON(atomic_read(&block_group
->count
) != 1);
6113 spin_lock(&info
->block_group_cache_lock
);
6115 spin_unlock(&info
->block_group_cache_lock
);
6119 int btrfs_read_block_groups(struct btrfs_root
*root
)
6121 struct btrfs_path
*path
;
6123 struct btrfs_block_group_cache
*cache
;
6124 struct btrfs_fs_info
*info
= root
->fs_info
;
6125 struct btrfs_space_info
*space_info
;
6126 struct btrfs_key key
;
6127 struct btrfs_key found_key
;
6128 struct extent_buffer
*leaf
;
6130 root
= info
->extent_root
;
6133 btrfs_set_key_type(&key
, BTRFS_BLOCK_GROUP_ITEM_KEY
);
6134 path
= btrfs_alloc_path();
6139 ret
= find_first_block_group(root
, path
, &key
);
6147 leaf
= path
->nodes
[0];
6148 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
6149 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
6155 atomic_set(&cache
->count
, 1);
6156 spin_lock_init(&cache
->lock
);
6157 mutex_init(&cache
->alloc_mutex
);
6158 mutex_init(&cache
->cache_mutex
);
6159 INIT_LIST_HEAD(&cache
->list
);
6160 read_extent_buffer(leaf
, &cache
->item
,
6161 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
6162 sizeof(cache
->item
));
6163 memcpy(&cache
->key
, &found_key
, sizeof(found_key
));
6165 key
.objectid
= found_key
.objectid
+ found_key
.offset
;
6166 btrfs_release_path(root
, path
);
6167 cache
->flags
= btrfs_block_group_flags(&cache
->item
);
6169 ret
= update_space_info(info
, cache
->flags
, found_key
.offset
,
6170 btrfs_block_group_used(&cache
->item
),
6173 cache
->space_info
= space_info
;
6174 down_write(&space_info
->groups_sem
);
6175 list_add_tail(&cache
->list
, &space_info
->block_groups
);
6176 up_write(&space_info
->groups_sem
);
6178 ret
= btrfs_add_block_group_cache(root
->fs_info
, cache
);
6181 set_avail_alloc_bits(root
->fs_info
, cache
->flags
);
6182 if (btrfs_chunk_readonly(root
, cache
->key
.objectid
))
6183 set_block_group_readonly(cache
);
6187 btrfs_free_path(path
);
6191 int btrfs_make_block_group(struct btrfs_trans_handle
*trans
,
6192 struct btrfs_root
*root
, u64 bytes_used
,
6193 u64 type
, u64 chunk_objectid
, u64 chunk_offset
,
6197 struct btrfs_root
*extent_root
;
6198 struct btrfs_block_group_cache
*cache
;
6200 extent_root
= root
->fs_info
->extent_root
;
6202 root
->fs_info
->last_trans_new_blockgroup
= trans
->transid
;
6204 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
6208 cache
->key
.objectid
= chunk_offset
;
6209 cache
->key
.offset
= size
;
6210 cache
->key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
6211 atomic_set(&cache
->count
, 1);
6212 spin_lock_init(&cache
->lock
);
6213 mutex_init(&cache
->alloc_mutex
);
6214 mutex_init(&cache
->cache_mutex
);
6215 INIT_LIST_HEAD(&cache
->list
);
6217 btrfs_set_block_group_used(&cache
->item
, bytes_used
);
6218 btrfs_set_block_group_chunk_objectid(&cache
->item
, chunk_objectid
);
6219 cache
->flags
= type
;
6220 btrfs_set_block_group_flags(&cache
->item
, type
);
6222 ret
= update_space_info(root
->fs_info
, cache
->flags
, size
, bytes_used
,
6223 &cache
->space_info
);
6225 down_write(&cache
->space_info
->groups_sem
);
6226 list_add_tail(&cache
->list
, &cache
->space_info
->block_groups
);
6227 up_write(&cache
->space_info
->groups_sem
);
6229 ret
= btrfs_add_block_group_cache(root
->fs_info
, cache
);
6232 ret
= btrfs_insert_item(trans
, extent_root
, &cache
->key
, &cache
->item
,
6233 sizeof(cache
->item
));
6236 finish_current_insert(trans
, extent_root
, 0);
6237 ret
= del_pending_extents(trans
, extent_root
, 0);
6239 set_avail_alloc_bits(extent_root
->fs_info
, type
);
6244 int btrfs_remove_block_group(struct btrfs_trans_handle
*trans
,
6245 struct btrfs_root
*root
, u64 group_start
)
6247 struct btrfs_path
*path
;
6248 struct btrfs_block_group_cache
*block_group
;
6249 struct btrfs_key key
;
6252 root
= root
->fs_info
->extent_root
;
6254 block_group
= btrfs_lookup_block_group(root
->fs_info
, group_start
);
6255 BUG_ON(!block_group
);
6256 BUG_ON(!block_group
->ro
);
6258 memcpy(&key
, &block_group
->key
, sizeof(key
));
6260 path
= btrfs_alloc_path();
6263 spin_lock(&root
->fs_info
->block_group_cache_lock
);
6264 rb_erase(&block_group
->cache_node
,
6265 &root
->fs_info
->block_group_cache_tree
);
6266 spin_unlock(&root
->fs_info
->block_group_cache_lock
);
6267 btrfs_remove_free_space_cache(block_group
);
6268 down_write(&block_group
->space_info
->groups_sem
);
6269 list_del(&block_group
->list
);
6270 up_write(&block_group
->space_info
->groups_sem
);
6272 spin_lock(&block_group
->space_info
->lock
);
6273 block_group
->space_info
->total_bytes
-= block_group
->key
.offset
;
6274 block_group
->space_info
->bytes_readonly
-= block_group
->key
.offset
;
6275 spin_unlock(&block_group
->space_info
->lock
);
6276 block_group
->space_info
->full
= 0;
6278 put_block_group(block_group
);
6279 put_block_group(block_group
);
6281 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
6287 ret
= btrfs_del_item(trans
, root
, path
);
6289 btrfs_free_path(path
);