2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
23 #include "transaction.h"
24 #include "delayed-ref.h"
27 * this structure records all encountered refs on the way up to the root
30 struct list_head list
;
39 static int __add_prelim_ref(struct list_head
*head
, u64 root_id
,
40 struct btrfs_key
*key
, int level
, u64 parent
,
41 u64 wanted_disk_byte
, int count
)
43 struct __prelim_ref
*ref
;
45 /* in case we're adding delayed refs, we're holding the refs spinlock */
46 ref
= kmalloc(sizeof(*ref
), GFP_ATOMIC
);
50 ref
->root_id
= root_id
;
54 memset(&ref
->key
, 0, sizeof(ref
->key
));
59 ref
->wanted_disk_byte
= wanted_disk_byte
;
60 list_add_tail(&ref
->list
, head
);
65 static int add_all_parents(struct btrfs_root
*root
, struct btrfs_path
*path
,
66 struct ulist
*parents
,
67 struct extent_buffer
*eb
, int level
,
68 u64 wanted_objectid
, u64 wanted_disk_byte
)
72 struct btrfs_file_extent_item
*fi
;
77 ret
= ulist_add(parents
, eb
->start
, 0, GFP_NOFS
);
85 * if the current leaf is full with EXTENT_DATA items, we must
86 * check the next one if that holds a reference as well.
87 * ref->count cannot be used to skip this check.
88 * repeat this until we don't find any additional EXTENT_DATA items.
91 ret
= btrfs_next_leaf(root
, path
);
98 for (slot
= 0; slot
< btrfs_header_nritems(eb
); ++slot
) {
99 btrfs_item_key_to_cpu(eb
, &key
, slot
);
100 if (key
.objectid
!= wanted_objectid
||
101 key
.type
!= BTRFS_EXTENT_DATA_KEY
)
103 fi
= btrfs_item_ptr(eb
, slot
,
104 struct btrfs_file_extent_item
);
105 disk_byte
= btrfs_file_extent_disk_bytenr(eb
, fi
);
106 if (disk_byte
== wanted_disk_byte
)
115 * resolve an indirect backref in the form (root_id, key, level)
116 * to a logical address
118 static int __resolve_indirect_ref(struct btrfs_fs_info
*fs_info
,
119 struct __prelim_ref
*ref
,
120 struct ulist
*parents
)
122 struct btrfs_path
*path
;
123 struct btrfs_root
*root
;
124 struct btrfs_key root_key
;
125 struct btrfs_key key
= {0};
126 struct extent_buffer
*eb
;
129 int level
= ref
->level
;
131 path
= btrfs_alloc_path();
135 root_key
.objectid
= ref
->root_id
;
136 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
137 root_key
.offset
= (u64
)-1;
138 root
= btrfs_read_fs_root_no_name(fs_info
, &root_key
);
145 root_level
= btrfs_header_level(root
->node
);
148 if (root_level
+ 1 == level
)
151 path
->lowest_level
= level
;
152 ret
= btrfs_search_slot(NULL
, root
, &ref
->key
, path
, 0, 0);
153 pr_debug("search slot in root %llu (level %d, ref count %d) returned "
154 "%d for key (%llu %u %llu)\n",
155 (unsigned long long)ref
->root_id
, level
, ref
->count
, ret
,
156 (unsigned long long)ref
->key
.objectid
, ref
->key
.type
,
157 (unsigned long long)ref
->key
.offset
);
161 eb
= path
->nodes
[level
];
169 if (ret
== 1 && path
->slots
[0] >= btrfs_header_nritems(eb
)) {
170 ret
= btrfs_next_leaf(root
, path
);
176 btrfs_item_key_to_cpu(eb
, &key
, path
->slots
[0]);
179 /* the last two parameters will only be used for level == 0 */
180 ret
= add_all_parents(root
, path
, parents
, eb
, level
, key
.objectid
,
181 ref
->wanted_disk_byte
);
183 btrfs_free_path(path
);
188 * resolve all indirect backrefs from the list
190 static int __resolve_indirect_refs(struct btrfs_fs_info
*fs_info
,
191 struct list_head
*head
)
195 struct __prelim_ref
*ref
;
196 struct __prelim_ref
*ref_safe
;
197 struct __prelim_ref
*new_ref
;
198 struct ulist
*parents
;
199 struct ulist_node
*node
;
201 parents
= ulist_alloc(GFP_NOFS
);
206 * _safe allows us to insert directly after the current item without
207 * iterating over the newly inserted items.
208 * we're also allowed to re-assign ref during iteration.
210 list_for_each_entry_safe(ref
, ref_safe
, head
, list
) {
211 if (ref
->parent
) /* already direct */
215 err
= __resolve_indirect_ref(fs_info
, ref
, parents
);
222 /* we put the first parent into the ref at hand */
223 node
= ulist_next(parents
, NULL
);
224 ref
->parent
= node
? node
->val
: 0;
226 /* additional parents require new refs being added here */
227 while ((node
= ulist_next(parents
, node
))) {
228 new_ref
= kmalloc(sizeof(*new_ref
), GFP_NOFS
);
233 memcpy(new_ref
, ref
, sizeof(*ref
));
234 new_ref
->parent
= node
->val
;
235 list_add(&new_ref
->list
, &ref
->list
);
237 ulist_reinit(parents
);
245 * merge two lists of backrefs and adjust counts accordingly
247 * mode = 1: merge identical keys, if key is set
248 * mode = 2: merge identical parents
250 static int __merge_refs(struct list_head
*head
, int mode
)
252 struct list_head
*pos1
;
254 list_for_each(pos1
, head
) {
255 struct list_head
*n2
;
256 struct list_head
*pos2
;
257 struct __prelim_ref
*ref1
;
259 ref1
= list_entry(pos1
, struct __prelim_ref
, list
);
261 if (mode
== 1 && ref1
->key
.type
== 0)
263 for (pos2
= pos1
->next
, n2
= pos2
->next
; pos2
!= head
;
264 pos2
= n2
, n2
= pos2
->next
) {
265 struct __prelim_ref
*ref2
;
267 ref2
= list_entry(pos2
, struct __prelim_ref
, list
);
270 if (memcmp(&ref1
->key
, &ref2
->key
,
271 sizeof(ref1
->key
)) ||
272 ref1
->level
!= ref2
->level
||
273 ref1
->root_id
!= ref2
->root_id
)
275 ref1
->count
+= ref2
->count
;
277 if (ref1
->parent
!= ref2
->parent
)
279 ref1
->count
+= ref2
->count
;
281 list_del(&ref2
->list
);
290 * add all currently queued delayed refs from this head whose seq nr is
291 * smaller or equal that seq to the list
293 static int __add_delayed_refs(struct btrfs_delayed_ref_head
*head
, u64 seq
,
294 struct btrfs_key
*info_key
,
295 struct list_head
*prefs
)
297 struct btrfs_delayed_extent_op
*extent_op
= head
->extent_op
;
298 struct rb_node
*n
= &head
->node
.rb_node
;
302 if (extent_op
&& extent_op
->update_key
)
303 btrfs_disk_key_to_cpu(info_key
, &extent_op
->key
);
305 while ((n
= rb_prev(n
))) {
306 struct btrfs_delayed_ref_node
*node
;
307 node
= rb_entry(n
, struct btrfs_delayed_ref_node
,
309 if (node
->bytenr
!= head
->node
.bytenr
)
311 WARN_ON(node
->is_head
);
316 switch (node
->action
) {
317 case BTRFS_ADD_DELAYED_EXTENT
:
318 case BTRFS_UPDATE_DELAYED_HEAD
:
321 case BTRFS_ADD_DELAYED_REF
:
324 case BTRFS_DROP_DELAYED_REF
:
330 switch (node
->type
) {
331 case BTRFS_TREE_BLOCK_REF_KEY
: {
332 struct btrfs_delayed_tree_ref
*ref
;
334 ref
= btrfs_delayed_node_to_tree_ref(node
);
335 ret
= __add_prelim_ref(prefs
, ref
->root
, info_key
,
336 ref
->level
+ 1, 0, node
->bytenr
,
337 node
->ref_mod
* sgn
);
340 case BTRFS_SHARED_BLOCK_REF_KEY
: {
341 struct btrfs_delayed_tree_ref
*ref
;
343 ref
= btrfs_delayed_node_to_tree_ref(node
);
344 ret
= __add_prelim_ref(prefs
, ref
->root
, info_key
,
345 ref
->level
+ 1, ref
->parent
,
347 node
->ref_mod
* sgn
);
350 case BTRFS_EXTENT_DATA_REF_KEY
: {
351 struct btrfs_delayed_data_ref
*ref
;
352 struct btrfs_key key
;
354 ref
= btrfs_delayed_node_to_data_ref(node
);
356 key
.objectid
= ref
->objectid
;
357 key
.type
= BTRFS_EXTENT_DATA_KEY
;
358 key
.offset
= ref
->offset
;
359 ret
= __add_prelim_ref(prefs
, ref
->root
, &key
, 0, 0,
361 node
->ref_mod
* sgn
);
364 case BTRFS_SHARED_DATA_REF_KEY
: {
365 struct btrfs_delayed_data_ref
*ref
;
366 struct btrfs_key key
;
368 ref
= btrfs_delayed_node_to_data_ref(node
);
370 key
.objectid
= ref
->objectid
;
371 key
.type
= BTRFS_EXTENT_DATA_KEY
;
372 key
.offset
= ref
->offset
;
373 ret
= __add_prelim_ref(prefs
, ref
->root
, &key
, 0,
374 ref
->parent
, node
->bytenr
,
375 node
->ref_mod
* sgn
);
388 * add all inline backrefs for bytenr to the list
390 static int __add_inline_refs(struct btrfs_fs_info
*fs_info
,
391 struct btrfs_path
*path
, u64 bytenr
,
392 struct btrfs_key
*info_key
, int *info_level
,
393 struct list_head
*prefs
)
397 struct extent_buffer
*leaf
;
398 struct btrfs_key key
;
401 struct btrfs_extent_item
*ei
;
406 * enumerate all inline refs
408 leaf
= path
->nodes
[0];
409 slot
= path
->slots
[0] - 1;
411 item_size
= btrfs_item_size_nr(leaf
, slot
);
412 BUG_ON(item_size
< sizeof(*ei
));
414 ei
= btrfs_item_ptr(leaf
, slot
, struct btrfs_extent_item
);
415 flags
= btrfs_extent_flags(leaf
, ei
);
417 ptr
= (unsigned long)(ei
+ 1);
418 end
= (unsigned long)ei
+ item_size
;
420 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
421 struct btrfs_tree_block_info
*info
;
422 struct btrfs_disk_key disk_key
;
424 info
= (struct btrfs_tree_block_info
*)ptr
;
425 *info_level
= btrfs_tree_block_level(leaf
, info
);
426 btrfs_tree_block_key(leaf
, info
, &disk_key
);
427 btrfs_disk_key_to_cpu(info_key
, &disk_key
);
428 ptr
+= sizeof(struct btrfs_tree_block_info
);
431 BUG_ON(!(flags
& BTRFS_EXTENT_FLAG_DATA
));
435 struct btrfs_extent_inline_ref
*iref
;
439 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
440 type
= btrfs_extent_inline_ref_type(leaf
, iref
);
441 offset
= btrfs_extent_inline_ref_offset(leaf
, iref
);
444 case BTRFS_SHARED_BLOCK_REF_KEY
:
445 ret
= __add_prelim_ref(prefs
, 0, info_key
,
446 *info_level
+ 1, offset
,
449 case BTRFS_SHARED_DATA_REF_KEY
: {
450 struct btrfs_shared_data_ref
*sdref
;
453 sdref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
454 count
= btrfs_shared_data_ref_count(leaf
, sdref
);
455 ret
= __add_prelim_ref(prefs
, 0, NULL
, 0, offset
,
459 case BTRFS_TREE_BLOCK_REF_KEY
:
460 ret
= __add_prelim_ref(prefs
, offset
, info_key
,
461 *info_level
+ 1, 0, bytenr
, 1);
463 case BTRFS_EXTENT_DATA_REF_KEY
: {
464 struct btrfs_extent_data_ref
*dref
;
468 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
469 count
= btrfs_extent_data_ref_count(leaf
, dref
);
470 key
.objectid
= btrfs_extent_data_ref_objectid(leaf
,
472 key
.type
= BTRFS_EXTENT_DATA_KEY
;
473 key
.offset
= btrfs_extent_data_ref_offset(leaf
, dref
);
474 root
= btrfs_extent_data_ref_root(leaf
, dref
);
475 ret
= __add_prelim_ref(prefs
, root
, &key
, 0, 0, bytenr
,
483 ptr
+= btrfs_extent_inline_ref_size(type
);
490 * add all non-inline backrefs for bytenr to the list
492 static int __add_keyed_refs(struct btrfs_fs_info
*fs_info
,
493 struct btrfs_path
*path
, u64 bytenr
,
494 struct btrfs_key
*info_key
, int info_level
,
495 struct list_head
*prefs
)
497 struct btrfs_root
*extent_root
= fs_info
->extent_root
;
500 struct extent_buffer
*leaf
;
501 struct btrfs_key key
;
504 ret
= btrfs_next_item(extent_root
, path
);
512 slot
= path
->slots
[0];
513 leaf
= path
->nodes
[0];
514 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
516 if (key
.objectid
!= bytenr
)
518 if (key
.type
< BTRFS_TREE_BLOCK_REF_KEY
)
520 if (key
.type
> BTRFS_SHARED_DATA_REF_KEY
)
524 case BTRFS_SHARED_BLOCK_REF_KEY
:
525 ret
= __add_prelim_ref(prefs
, 0, info_key
,
526 info_level
+ 1, key
.offset
,
529 case BTRFS_SHARED_DATA_REF_KEY
: {
530 struct btrfs_shared_data_ref
*sdref
;
533 sdref
= btrfs_item_ptr(leaf
, slot
,
534 struct btrfs_shared_data_ref
);
535 count
= btrfs_shared_data_ref_count(leaf
, sdref
);
536 ret
= __add_prelim_ref(prefs
, 0, NULL
, 0, key
.offset
,
540 case BTRFS_TREE_BLOCK_REF_KEY
:
541 ret
= __add_prelim_ref(prefs
, key
.offset
, info_key
,
542 info_level
+ 1, 0, bytenr
, 1);
544 case BTRFS_EXTENT_DATA_REF_KEY
: {
545 struct btrfs_extent_data_ref
*dref
;
549 dref
= btrfs_item_ptr(leaf
, slot
,
550 struct btrfs_extent_data_ref
);
551 count
= btrfs_extent_data_ref_count(leaf
, dref
);
552 key
.objectid
= btrfs_extent_data_ref_objectid(leaf
,
554 key
.type
= BTRFS_EXTENT_DATA_KEY
;
555 key
.offset
= btrfs_extent_data_ref_offset(leaf
, dref
);
556 root
= btrfs_extent_data_ref_root(leaf
, dref
);
557 ret
= __add_prelim_ref(prefs
, root
, &key
, 0, 0,
571 * this adds all existing backrefs (inline backrefs, backrefs and delayed
572 * refs) for the given bytenr to the refs list, merges duplicates and resolves
573 * indirect refs to their parent bytenr.
574 * When roots are found, they're added to the roots list
576 * FIXME some caching might speed things up
578 static int find_parent_nodes(struct btrfs_trans_handle
*trans
,
579 struct btrfs_fs_info
*fs_info
, u64 bytenr
,
580 u64 seq
, struct ulist
*refs
, struct ulist
*roots
)
582 struct btrfs_key key
;
583 struct btrfs_path
*path
;
584 struct btrfs_key info_key
= { 0 };
585 struct btrfs_delayed_ref_root
*delayed_refs
= NULL
;
586 struct btrfs_delayed_ref_head
*head
;
589 struct list_head prefs_delayed
;
590 struct list_head prefs
;
591 struct __prelim_ref
*ref
;
593 INIT_LIST_HEAD(&prefs
);
594 INIT_LIST_HEAD(&prefs_delayed
);
596 key
.objectid
= bytenr
;
597 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
598 key
.offset
= (u64
)-1;
600 path
= btrfs_alloc_path();
605 * grab both a lock on the path and a lock on the delayed ref head.
606 * We need both to get a consistent picture of how the refs look
607 * at a specified point in time
612 ret
= btrfs_search_slot(trans
, fs_info
->extent_root
, &key
, path
, 0, 0);
618 * look if there are updates for this ref queued and lock the head
620 delayed_refs
= &trans
->transaction
->delayed_refs
;
621 spin_lock(&delayed_refs
->lock
);
622 head
= btrfs_find_delayed_ref_head(trans
, bytenr
);
624 if (!mutex_trylock(&head
->mutex
)) {
625 atomic_inc(&head
->node
.refs
);
626 spin_unlock(&delayed_refs
->lock
);
628 btrfs_release_path(path
);
631 * Mutex was contended, block until it's
632 * released and try again
634 mutex_lock(&head
->mutex
);
635 mutex_unlock(&head
->mutex
);
636 btrfs_put_delayed_ref(&head
->node
);
639 ret
= __add_delayed_refs(head
, seq
, &info_key
, &prefs_delayed
);
641 spin_unlock(&delayed_refs
->lock
);
645 spin_unlock(&delayed_refs
->lock
);
647 if (path
->slots
[0]) {
648 struct extent_buffer
*leaf
;
651 leaf
= path
->nodes
[0];
652 slot
= path
->slots
[0] - 1;
653 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
654 if (key
.objectid
== bytenr
&&
655 key
.type
== BTRFS_EXTENT_ITEM_KEY
) {
656 ret
= __add_inline_refs(fs_info
, path
, bytenr
,
657 &info_key
, &info_level
, &prefs
);
660 ret
= __add_keyed_refs(fs_info
, path
, bytenr
, &info_key
,
666 btrfs_release_path(path
);
669 * when adding the delayed refs above, the info_key might not have
670 * been known yet. Go over the list and replace the missing keys
672 list_for_each_entry(ref
, &prefs_delayed
, list
) {
673 if ((ref
->key
.offset
| ref
->key
.type
| ref
->key
.objectid
) == 0)
674 memcpy(&ref
->key
, &info_key
, sizeof(ref
->key
));
676 list_splice_init(&prefs_delayed
, &prefs
);
678 ret
= __merge_refs(&prefs
, 1);
682 ret
= __resolve_indirect_refs(fs_info
, &prefs
);
686 ret
= __merge_refs(&prefs
, 2);
690 while (!list_empty(&prefs
)) {
691 ref
= list_first_entry(&prefs
, struct __prelim_ref
, list
);
692 list_del(&ref
->list
);
695 if (ref
->count
&& ref
->root_id
&& ref
->parent
== 0) {
696 /* no parent == root of tree */
697 ret
= ulist_add(roots
, ref
->root_id
, 0, GFP_NOFS
);
700 if (ref
->count
&& ref
->parent
) {
701 ret
= ulist_add(refs
, ref
->parent
, 0, GFP_NOFS
);
709 mutex_unlock(&head
->mutex
);
710 btrfs_free_path(path
);
711 while (!list_empty(&prefs
)) {
712 ref
= list_first_entry(&prefs
, struct __prelim_ref
, list
);
713 list_del(&ref
->list
);
716 while (!list_empty(&prefs_delayed
)) {
717 ref
= list_first_entry(&prefs_delayed
, struct __prelim_ref
,
719 list_del(&ref
->list
);
727 * Finds all leafs with a reference to the specified combination of bytenr and
728 * offset. key_list_head will point to a list of corresponding keys (caller must
729 * free each list element). The leafs will be stored in the leafs ulist, which
730 * must be freed with ulist_free.
732 * returns 0 on success, <0 on error
734 static int btrfs_find_all_leafs(struct btrfs_trans_handle
*trans
,
735 struct btrfs_fs_info
*fs_info
, u64 bytenr
,
736 u64 num_bytes
, u64 seq
, struct ulist
**leafs
)
741 tmp
= ulist_alloc(GFP_NOFS
);
744 *leafs
= ulist_alloc(GFP_NOFS
);
750 ret
= find_parent_nodes(trans
, fs_info
, bytenr
, seq
, *leafs
, tmp
);
753 if (ret
< 0 && ret
!= -ENOENT
) {
762 * walk all backrefs for a given extent to find all roots that reference this
763 * extent. Walking a backref means finding all extents that reference this
764 * extent and in turn walk the backrefs of those, too. Naturally this is a
765 * recursive process, but here it is implemented in an iterative fashion: We
766 * find all referencing extents for the extent in question and put them on a
767 * list. In turn, we find all referencing extents for those, further appending
768 * to the list. The way we iterate the list allows adding more elements after
769 * the current while iterating. The process stops when we reach the end of the
770 * list. Found roots are added to the roots list.
772 * returns 0 on success, < 0 on error.
774 int btrfs_find_all_roots(struct btrfs_trans_handle
*trans
,
775 struct btrfs_fs_info
*fs_info
, u64 bytenr
,
776 u64 num_bytes
, u64 seq
, struct ulist
**roots
)
779 struct ulist_node
*node
= NULL
;
782 tmp
= ulist_alloc(GFP_NOFS
);
785 *roots
= ulist_alloc(GFP_NOFS
);
792 ret
= find_parent_nodes(trans
, fs_info
, bytenr
, seq
,
794 if (ret
< 0 && ret
!= -ENOENT
) {
799 node
= ulist_next(tmp
, node
);
810 static int __inode_info(u64 inum
, u64 ioff
, u8 key_type
,
811 struct btrfs_root
*fs_root
, struct btrfs_path
*path
,
812 struct btrfs_key
*found_key
)
815 struct btrfs_key key
;
816 struct extent_buffer
*eb
;
822 ret
= btrfs_search_slot(NULL
, fs_root
, &key
, path
, 0, 0);
827 if (ret
&& path
->slots
[0] >= btrfs_header_nritems(eb
)) {
828 ret
= btrfs_next_leaf(fs_root
, path
);
834 btrfs_item_key_to_cpu(eb
, found_key
, path
->slots
[0]);
835 if (found_key
->type
!= key
.type
|| found_key
->objectid
!= key
.objectid
)
842 * this makes the path point to (inum INODE_ITEM ioff)
844 int inode_item_info(u64 inum
, u64 ioff
, struct btrfs_root
*fs_root
,
845 struct btrfs_path
*path
)
847 struct btrfs_key key
;
848 return __inode_info(inum
, ioff
, BTRFS_INODE_ITEM_KEY
, fs_root
, path
,
852 static int inode_ref_info(u64 inum
, u64 ioff
, struct btrfs_root
*fs_root
,
853 struct btrfs_path
*path
,
854 struct btrfs_key
*found_key
)
856 return __inode_info(inum
, ioff
, BTRFS_INODE_REF_KEY
, fs_root
, path
,
861 * this iterates to turn a btrfs_inode_ref into a full filesystem path. elements
862 * of the path are separated by '/' and the path is guaranteed to be
863 * 0-terminated. the path is only given within the current file system.
864 * Therefore, it never starts with a '/'. the caller is responsible to provide
865 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
866 * the start point of the resulting string is returned. this pointer is within
868 * in case the path buffer would overflow, the pointer is decremented further
869 * as if output was written to the buffer, though no more output is actually
870 * generated. that way, the caller can determine how much space would be
871 * required for the path to fit into the buffer. in that case, the returned
872 * value will be smaller than dest. callers must check this!
874 static char *iref_to_path(struct btrfs_root
*fs_root
, struct btrfs_path
*path
,
875 struct btrfs_inode_ref
*iref
,
876 struct extent_buffer
*eb_in
, u64 parent
,
877 char *dest
, u32 size
)
883 s64 bytes_left
= size
- 1;
884 struct extent_buffer
*eb
= eb_in
;
885 struct btrfs_key found_key
;
888 dest
[bytes_left
] = '\0';
891 len
= btrfs_inode_ref_name_len(eb
, iref
);
894 read_extent_buffer(eb
, dest
+ bytes_left
,
895 (unsigned long)(iref
+ 1), len
);
897 free_extent_buffer(eb
);
898 ret
= inode_ref_info(parent
, 0, fs_root
, path
, &found_key
);
903 next_inum
= found_key
.offset
;
905 /* regular exit ahead */
906 if (parent
== next_inum
)
909 slot
= path
->slots
[0];
911 /* make sure we can use eb after releasing the path */
913 atomic_inc(&eb
->refs
);
914 btrfs_release_path(path
);
916 iref
= btrfs_item_ptr(eb
, slot
, struct btrfs_inode_ref
);
920 dest
[bytes_left
] = '/';
923 btrfs_release_path(path
);
928 return dest
+ bytes_left
;
932 * this makes the path point to (logical EXTENT_ITEM *)
933 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
934 * tree blocks and <0 on error.
936 int extent_from_logical(struct btrfs_fs_info
*fs_info
, u64 logical
,
937 struct btrfs_path
*path
, struct btrfs_key
*found_key
)
942 struct extent_buffer
*eb
;
943 struct btrfs_extent_item
*ei
;
944 struct btrfs_key key
;
946 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
947 key
.objectid
= logical
;
948 key
.offset
= (u64
)-1;
950 ret
= btrfs_search_slot(NULL
, fs_info
->extent_root
, &key
, path
, 0, 0);
953 ret
= btrfs_previous_item(fs_info
->extent_root
, path
,
954 0, BTRFS_EXTENT_ITEM_KEY
);
958 btrfs_item_key_to_cpu(path
->nodes
[0], found_key
, path
->slots
[0]);
959 if (found_key
->type
!= BTRFS_EXTENT_ITEM_KEY
||
960 found_key
->objectid
> logical
||
961 found_key
->objectid
+ found_key
->offset
<= logical
) {
962 pr_debug("logical %llu is not within any extent\n",
963 (unsigned long long)logical
);
968 item_size
= btrfs_item_size_nr(eb
, path
->slots
[0]);
969 BUG_ON(item_size
< sizeof(*ei
));
971 ei
= btrfs_item_ptr(eb
, path
->slots
[0], struct btrfs_extent_item
);
972 flags
= btrfs_extent_flags(eb
, ei
);
974 pr_debug("logical %llu is at position %llu within the extent (%llu "
975 "EXTENT_ITEM %llu) flags %#llx size %u\n",
976 (unsigned long long)logical
,
977 (unsigned long long)(logical
- found_key
->objectid
),
978 (unsigned long long)found_key
->objectid
,
979 (unsigned long long)found_key
->offset
,
980 (unsigned long long)flags
, item_size
);
981 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)
982 return BTRFS_EXTENT_FLAG_TREE_BLOCK
;
983 if (flags
& BTRFS_EXTENT_FLAG_DATA
)
984 return BTRFS_EXTENT_FLAG_DATA
;
990 * helper function to iterate extent inline refs. ptr must point to a 0 value
991 * for the first call and may be modified. it is used to track state.
992 * if more refs exist, 0 is returned and the next call to
993 * __get_extent_inline_ref must pass the modified ptr parameter to get the
994 * next ref. after the last ref was processed, 1 is returned.
995 * returns <0 on error
997 static int __get_extent_inline_ref(unsigned long *ptr
, struct extent_buffer
*eb
,
998 struct btrfs_extent_item
*ei
, u32 item_size
,
999 struct btrfs_extent_inline_ref
**out_eiref
,
1004 struct btrfs_tree_block_info
*info
;
1008 flags
= btrfs_extent_flags(eb
, ei
);
1009 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
1010 info
= (struct btrfs_tree_block_info
*)(ei
+ 1);
1012 (struct btrfs_extent_inline_ref
*)(info
+ 1);
1014 *out_eiref
= (struct btrfs_extent_inline_ref
*)(ei
+ 1);
1016 *ptr
= (unsigned long)*out_eiref
;
1017 if ((void *)*ptr
>= (void *)ei
+ item_size
)
1021 end
= (unsigned long)ei
+ item_size
;
1022 *out_eiref
= (struct btrfs_extent_inline_ref
*)*ptr
;
1023 *out_type
= btrfs_extent_inline_ref_type(eb
, *out_eiref
);
1025 *ptr
+= btrfs_extent_inline_ref_size(*out_type
);
1026 WARN_ON(*ptr
> end
);
1028 return 1; /* last */
1034 * reads the tree block backref for an extent. tree level and root are returned
1035 * through out_level and out_root. ptr must point to a 0 value for the first
1036 * call and may be modified (see __get_extent_inline_ref comment).
1037 * returns 0 if data was provided, 1 if there was no more data to provide or
1040 int tree_backref_for_extent(unsigned long *ptr
, struct extent_buffer
*eb
,
1041 struct btrfs_extent_item
*ei
, u32 item_size
,
1042 u64
*out_root
, u8
*out_level
)
1046 struct btrfs_tree_block_info
*info
;
1047 struct btrfs_extent_inline_ref
*eiref
;
1049 if (*ptr
== (unsigned long)-1)
1053 ret
= __get_extent_inline_ref(ptr
, eb
, ei
, item_size
,
1058 if (type
== BTRFS_TREE_BLOCK_REF_KEY
||
1059 type
== BTRFS_SHARED_BLOCK_REF_KEY
)
1066 /* we can treat both ref types equally here */
1067 info
= (struct btrfs_tree_block_info
*)(ei
+ 1);
1068 *out_root
= btrfs_extent_inline_ref_offset(eb
, eiref
);
1069 *out_level
= btrfs_tree_block_level(eb
, info
);
1072 *ptr
= (unsigned long)-1;
1077 static int iterate_leaf_refs(struct btrfs_fs_info
*fs_info
,
1078 struct btrfs_path
*path
, u64 logical
,
1079 u64 orig_extent_item_objectid
,
1080 u64 extent_item_pos
, u64 root
,
1081 iterate_extent_inodes_t
*iterate
, void *ctx
)
1084 struct btrfs_key key
;
1085 struct btrfs_file_extent_item
*fi
;
1086 struct extent_buffer
*eb
;
1094 eb
= read_tree_block(fs_info
->tree_root
, logical
,
1095 fs_info
->tree_root
->leafsize
, 0);
1100 * from the shared data ref, we only have the leaf but we need
1101 * the key. thus, we must look into all items and see that we
1102 * find one (some) with a reference to our extent item.
1104 nritems
= btrfs_header_nritems(eb
);
1105 for (slot
= 0; slot
< nritems
; ++slot
) {
1106 btrfs_item_key_to_cpu(eb
, &key
, slot
);
1107 if (key
.type
!= BTRFS_EXTENT_DATA_KEY
)
1109 fi
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
1110 extent_type
= btrfs_file_extent_type(eb
, fi
);
1111 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
)
1113 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
1114 disk_byte
= btrfs_file_extent_disk_bytenr(eb
, fi
);
1115 if (disk_byte
!= orig_extent_item_objectid
)
1118 data_offset
= btrfs_file_extent_offset(eb
, fi
);
1119 data_len
= btrfs_file_extent_num_bytes(eb
, fi
);
1121 if (extent_item_pos
< data_offset
||
1122 extent_item_pos
>= data_offset
+ data_len
)
1125 pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), "
1126 "root %llu\n", orig_extent_item_objectid
,
1127 key
.objectid
, key
.offset
, root
);
1128 ret
= iterate(key
.objectid
,
1129 key
.offset
+ (extent_item_pos
- data_offset
),
1132 pr_debug("stopping iteration because ret=%d\n", ret
);
1137 free_extent_buffer(eb
);
1143 * calls iterate() for every inode that references the extent identified by
1144 * the given parameters.
1145 * when the iterator function returns a non-zero value, iteration stops.
1146 * path is guaranteed to be in released state when iterate() is called.
1148 int iterate_extent_inodes(struct btrfs_fs_info
*fs_info
,
1149 struct btrfs_path
*path
,
1150 u64 extent_item_objectid
, u64 extent_item_pos
,
1151 iterate_extent_inodes_t
*iterate
, void *ctx
)
1154 struct list_head data_refs
= LIST_HEAD_INIT(data_refs
);
1155 struct list_head shared_refs
= LIST_HEAD_INIT(shared_refs
);
1156 struct btrfs_trans_handle
*trans
;
1158 struct ulist
*roots
;
1159 struct ulist_node
*ref_node
= NULL
;
1160 struct ulist_node
*root_node
= NULL
;
1161 struct seq_list seq_elem
;
1162 struct btrfs_delayed_ref_root
*delayed_refs
;
1164 trans
= btrfs_join_transaction(fs_info
->extent_root
);
1166 return PTR_ERR(trans
);
1168 pr_debug("resolving all inodes for extent %llu\n",
1169 extent_item_objectid
);
1171 delayed_refs
= &trans
->transaction
->delayed_refs
;
1172 spin_lock(&delayed_refs
->lock
);
1173 btrfs_get_delayed_seq(delayed_refs
, &seq_elem
);
1174 spin_unlock(&delayed_refs
->lock
);
1176 ret
= btrfs_find_all_leafs(trans
, fs_info
, extent_item_objectid
,
1177 extent_item_pos
, seq_elem
.seq
,
1183 while (!ret
&& (ref_node
= ulist_next(refs
, ref_node
))) {
1184 ret
= btrfs_find_all_roots(trans
, fs_info
, ref_node
->val
, -1,
1185 seq_elem
.seq
, &roots
);
1188 while (!ret
&& (root_node
= ulist_next(roots
, root_node
))) {
1189 pr_debug("root %llu references leaf %llu\n",
1190 root_node
->val
, ref_node
->val
);
1191 ret
= iterate_leaf_refs(fs_info
, path
, ref_node
->val
,
1192 extent_item_objectid
,
1193 extent_item_pos
, root_node
->val
,
1201 btrfs_put_delayed_seq(delayed_refs
, &seq_elem
);
1202 btrfs_end_transaction(trans
, fs_info
->extent_root
);
1206 int iterate_inodes_from_logical(u64 logical
, struct btrfs_fs_info
*fs_info
,
1207 struct btrfs_path
*path
,
1208 iterate_extent_inodes_t
*iterate
, void *ctx
)
1211 u64 extent_item_pos
;
1212 struct btrfs_key found_key
;
1214 ret
= extent_from_logical(fs_info
, logical
, path
,
1216 btrfs_release_path(path
);
1217 if (ret
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)
1222 extent_item_pos
= logical
- found_key
.objectid
;
1223 ret
= iterate_extent_inodes(fs_info
, path
, found_key
.objectid
,
1224 extent_item_pos
, iterate
, ctx
);
1229 static int iterate_irefs(u64 inum
, struct btrfs_root
*fs_root
,
1230 struct btrfs_path
*path
,
1231 iterate_irefs_t
*iterate
, void *ctx
)
1240 struct extent_buffer
*eb
;
1241 struct btrfs_item
*item
;
1242 struct btrfs_inode_ref
*iref
;
1243 struct btrfs_key found_key
;
1246 ret
= inode_ref_info(inum
, parent
? parent
+1 : 0, fs_root
, path
,
1251 ret
= found
? 0 : -ENOENT
;
1256 parent
= found_key
.offset
;
1257 slot
= path
->slots
[0];
1258 eb
= path
->nodes
[0];
1259 /* make sure we can use eb after releasing the path */
1260 atomic_inc(&eb
->refs
);
1261 btrfs_release_path(path
);
1263 item
= btrfs_item_nr(eb
, slot
);
1264 iref
= btrfs_item_ptr(eb
, slot
, struct btrfs_inode_ref
);
1266 for (cur
= 0; cur
< btrfs_item_size(eb
, item
); cur
+= len
) {
1267 name_len
= btrfs_inode_ref_name_len(eb
, iref
);
1268 /* path must be released before calling iterate()! */
1269 pr_debug("following ref at offset %u for inode %llu in "
1271 (unsigned long long)found_key
.objectid
,
1272 (unsigned long long)fs_root
->objectid
);
1273 ret
= iterate(parent
, iref
, eb
, ctx
);
1275 free_extent_buffer(eb
);
1278 len
= sizeof(*iref
) + name_len
;
1279 iref
= (struct btrfs_inode_ref
*)((char *)iref
+ len
);
1281 free_extent_buffer(eb
);
1284 btrfs_release_path(path
);
1290 * returns 0 if the path could be dumped (probably truncated)
1291 * returns <0 in case of an error
1293 static int inode_to_path(u64 inum
, struct btrfs_inode_ref
*iref
,
1294 struct extent_buffer
*eb
, void *ctx
)
1296 struct inode_fs_paths
*ipath
= ctx
;
1299 int i
= ipath
->fspath
->elem_cnt
;
1300 const int s_ptr
= sizeof(char *);
1303 bytes_left
= ipath
->fspath
->bytes_left
> s_ptr
?
1304 ipath
->fspath
->bytes_left
- s_ptr
: 0;
1306 fspath_min
= (char *)ipath
->fspath
->val
+ (i
+ 1) * s_ptr
;
1307 fspath
= iref_to_path(ipath
->fs_root
, ipath
->btrfs_path
, iref
, eb
,
1308 inum
, fspath_min
, bytes_left
);
1310 return PTR_ERR(fspath
);
1312 if (fspath
> fspath_min
) {
1313 pr_debug("path resolved: %s\n", fspath
);
1314 ipath
->fspath
->val
[i
] = (u64
)(unsigned long)fspath
;
1315 ++ipath
->fspath
->elem_cnt
;
1316 ipath
->fspath
->bytes_left
= fspath
- fspath_min
;
1318 pr_debug("missed path, not enough space. missing bytes: %lu, "
1319 "constructed so far: %s\n",
1320 (unsigned long)(fspath_min
- fspath
), fspath_min
);
1321 ++ipath
->fspath
->elem_missed
;
1322 ipath
->fspath
->bytes_missing
+= fspath_min
- fspath
;
1323 ipath
->fspath
->bytes_left
= 0;
1330 * this dumps all file system paths to the inode into the ipath struct, provided
1331 * is has been created large enough. each path is zero-terminated and accessed
1332 * from ipath->fspath->val[i].
1333 * when it returns, there are ipath->fspath->elem_cnt number of paths available
1334 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
1335 * number of missed paths in recored in ipath->fspath->elem_missed, otherwise,
1336 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
1337 * have been needed to return all paths.
1339 int paths_from_inode(u64 inum
, struct inode_fs_paths
*ipath
)
1341 return iterate_irefs(inum
, ipath
->fs_root
, ipath
->btrfs_path
,
1342 inode_to_path
, ipath
);
1346 * allocates space to return multiple file system paths for an inode.
1347 * total_bytes to allocate are passed, note that space usable for actual path
1348 * information will be total_bytes - sizeof(struct inode_fs_paths).
1349 * the returned pointer must be freed with free_ipath() in the end.
1351 struct btrfs_data_container
*init_data_container(u32 total_bytes
)
1353 struct btrfs_data_container
*data
;
1356 alloc_bytes
= max_t(size_t, total_bytes
, sizeof(*data
));
1357 data
= kmalloc(alloc_bytes
, GFP_NOFS
);
1359 return ERR_PTR(-ENOMEM
);
1361 if (total_bytes
>= sizeof(*data
)) {
1362 data
->bytes_left
= total_bytes
- sizeof(*data
);
1363 data
->bytes_missing
= 0;
1365 data
->bytes_missing
= sizeof(*data
) - total_bytes
;
1366 data
->bytes_left
= 0;
1370 data
->elem_missed
= 0;
1376 * allocates space to return multiple file system paths for an inode.
1377 * total_bytes to allocate are passed, note that space usable for actual path
1378 * information will be total_bytes - sizeof(struct inode_fs_paths).
1379 * the returned pointer must be freed with free_ipath() in the end.
1381 struct inode_fs_paths
*init_ipath(s32 total_bytes
, struct btrfs_root
*fs_root
,
1382 struct btrfs_path
*path
)
1384 struct inode_fs_paths
*ifp
;
1385 struct btrfs_data_container
*fspath
;
1387 fspath
= init_data_container(total_bytes
);
1389 return (void *)fspath
;
1391 ifp
= kmalloc(sizeof(*ifp
), GFP_NOFS
);
1394 return ERR_PTR(-ENOMEM
);
1397 ifp
->btrfs_path
= path
;
1398 ifp
->fspath
= fspath
;
1399 ifp
->fs_root
= fs_root
;
1404 void free_ipath(struct inode_fs_paths
*ipath
)