cleanup: sec and linkport only built on bf60x
[linux-2.6/libata-dev.git] / fs / btrfs / backref.c
bloba383c18e74e86eebaa847d756e3493e7ca3c9bfd
1 /*
2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include "ctree.h"
20 #include "disk-io.h"
21 #include "backref.h"
22 #include "ulist.h"
23 #include "transaction.h"
24 #include "delayed-ref.h"
25 #include "locking.h"
27 struct extent_inode_elem {
28 u64 inum;
29 u64 offset;
30 struct extent_inode_elem *next;
33 static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
34 struct btrfs_file_extent_item *fi,
35 u64 extent_item_pos,
36 struct extent_inode_elem **eie)
38 u64 data_offset;
39 u64 data_len;
40 struct extent_inode_elem *e;
42 data_offset = btrfs_file_extent_offset(eb, fi);
43 data_len = btrfs_file_extent_num_bytes(eb, fi);
45 if (extent_item_pos < data_offset ||
46 extent_item_pos >= data_offset + data_len)
47 return 1;
49 e = kmalloc(sizeof(*e), GFP_NOFS);
50 if (!e)
51 return -ENOMEM;
53 e->next = *eie;
54 e->inum = key->objectid;
55 e->offset = key->offset + (extent_item_pos - data_offset);
56 *eie = e;
58 return 0;
61 static int find_extent_in_eb(struct extent_buffer *eb, u64 wanted_disk_byte,
62 u64 extent_item_pos,
63 struct extent_inode_elem **eie)
65 u64 disk_byte;
66 struct btrfs_key key;
67 struct btrfs_file_extent_item *fi;
68 int slot;
69 int nritems;
70 int extent_type;
71 int ret;
74 * from the shared data ref, we only have the leaf but we need
75 * the key. thus, we must look into all items and see that we
76 * find one (some) with a reference to our extent item.
78 nritems = btrfs_header_nritems(eb);
79 for (slot = 0; slot < nritems; ++slot) {
80 btrfs_item_key_to_cpu(eb, &key, slot);
81 if (key.type != BTRFS_EXTENT_DATA_KEY)
82 continue;
83 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
84 extent_type = btrfs_file_extent_type(eb, fi);
85 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
86 continue;
87 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
88 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
89 if (disk_byte != wanted_disk_byte)
90 continue;
92 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie);
93 if (ret < 0)
94 return ret;
97 return 0;
101 * this structure records all encountered refs on the way up to the root
103 struct __prelim_ref {
104 struct list_head list;
105 u64 root_id;
106 struct btrfs_key key_for_search;
107 int level;
108 int count;
109 struct extent_inode_elem *inode_list;
110 u64 parent;
111 u64 wanted_disk_byte;
115 * the rules for all callers of this function are:
116 * - obtaining the parent is the goal
117 * - if you add a key, you must know that it is a correct key
118 * - if you cannot add the parent or a correct key, then we will look into the
119 * block later to set a correct key
121 * delayed refs
122 * ============
123 * backref type | shared | indirect | shared | indirect
124 * information | tree | tree | data | data
125 * --------------------+--------+----------+--------+----------
126 * parent logical | y | - | - | -
127 * key to resolve | - | y | y | y
128 * tree block logical | - | - | - | -
129 * root for resolving | y | y | y | y
131 * - column 1: we've the parent -> done
132 * - column 2, 3, 4: we use the key to find the parent
134 * on disk refs (inline or keyed)
135 * ==============================
136 * backref type | shared | indirect | shared | indirect
137 * information | tree | tree | data | data
138 * --------------------+--------+----------+--------+----------
139 * parent logical | y | - | y | -
140 * key to resolve | - | - | - | y
141 * tree block logical | y | y | y | y
142 * root for resolving | - | y | y | y
144 * - column 1, 3: we've the parent -> done
145 * - column 2: we take the first key from the block to find the parent
146 * (see __add_missing_keys)
147 * - column 4: we use the key to find the parent
149 * additional information that's available but not required to find the parent
150 * block might help in merging entries to gain some speed.
153 static int __add_prelim_ref(struct list_head *head, u64 root_id,
154 struct btrfs_key *key, int level,
155 u64 parent, u64 wanted_disk_byte, int count)
157 struct __prelim_ref *ref;
159 /* in case we're adding delayed refs, we're holding the refs spinlock */
160 ref = kmalloc(sizeof(*ref), GFP_ATOMIC);
161 if (!ref)
162 return -ENOMEM;
164 ref->root_id = root_id;
165 if (key)
166 ref->key_for_search = *key;
167 else
168 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
170 ref->inode_list = NULL;
171 ref->level = level;
172 ref->count = count;
173 ref->parent = parent;
174 ref->wanted_disk_byte = wanted_disk_byte;
175 list_add_tail(&ref->list, head);
177 return 0;
180 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
181 struct ulist *parents, int level,
182 struct btrfs_key *key_for_search, u64 time_seq,
183 u64 wanted_disk_byte,
184 const u64 *extent_item_pos)
186 int ret = 0;
187 int slot;
188 struct extent_buffer *eb;
189 struct btrfs_key key;
190 struct btrfs_file_extent_item *fi;
191 struct extent_inode_elem *eie = NULL;
192 u64 disk_byte;
194 if (level != 0) {
195 eb = path->nodes[level];
196 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
197 if (ret < 0)
198 return ret;
199 return 0;
203 * We normally enter this function with the path already pointing to
204 * the first item to check. But sometimes, we may enter it with
205 * slot==nritems. In that case, go to the next leaf before we continue.
207 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0]))
208 ret = btrfs_next_old_leaf(root, path, time_seq);
210 while (!ret) {
211 eb = path->nodes[0];
212 slot = path->slots[0];
214 btrfs_item_key_to_cpu(eb, &key, slot);
216 if (key.objectid != key_for_search->objectid ||
217 key.type != BTRFS_EXTENT_DATA_KEY)
218 break;
220 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
221 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
223 if (disk_byte == wanted_disk_byte) {
224 eie = NULL;
225 if (extent_item_pos) {
226 ret = check_extent_in_eb(&key, eb, fi,
227 *extent_item_pos,
228 &eie);
229 if (ret < 0)
230 break;
232 if (!ret) {
233 ret = ulist_add(parents, eb->start,
234 (unsigned long)eie, GFP_NOFS);
235 if (ret < 0)
236 break;
237 if (!extent_item_pos) {
238 ret = btrfs_next_old_leaf(root, path,
239 time_seq);
240 continue;
244 ret = btrfs_next_old_item(root, path, time_seq);
247 if (ret > 0)
248 ret = 0;
249 return ret;
253 * resolve an indirect backref in the form (root_id, key, level)
254 * to a logical address
256 static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
257 int search_commit_root,
258 u64 time_seq,
259 struct __prelim_ref *ref,
260 struct ulist *parents,
261 const u64 *extent_item_pos)
263 struct btrfs_path *path;
264 struct btrfs_root *root;
265 struct btrfs_key root_key;
266 struct extent_buffer *eb;
267 int ret = 0;
268 int root_level;
269 int level = ref->level;
271 path = btrfs_alloc_path();
272 if (!path)
273 return -ENOMEM;
274 path->search_commit_root = !!search_commit_root;
276 root_key.objectid = ref->root_id;
277 root_key.type = BTRFS_ROOT_ITEM_KEY;
278 root_key.offset = (u64)-1;
279 root = btrfs_read_fs_root_no_name(fs_info, &root_key);
280 if (IS_ERR(root)) {
281 ret = PTR_ERR(root);
282 goto out;
285 rcu_read_lock();
286 root_level = btrfs_header_level(root->node);
287 rcu_read_unlock();
289 if (root_level + 1 == level)
290 goto out;
292 path->lowest_level = level;
293 ret = btrfs_search_old_slot(root, &ref->key_for_search, path, time_seq);
294 pr_debug("search slot in root %llu (level %d, ref count %d) returned "
295 "%d for key (%llu %u %llu)\n",
296 (unsigned long long)ref->root_id, level, ref->count, ret,
297 (unsigned long long)ref->key_for_search.objectid,
298 ref->key_for_search.type,
299 (unsigned long long)ref->key_for_search.offset);
300 if (ret < 0)
301 goto out;
303 eb = path->nodes[level];
304 while (!eb) {
305 if (!level) {
306 WARN_ON(1);
307 ret = 1;
308 goto out;
310 level--;
311 eb = path->nodes[level];
314 ret = add_all_parents(root, path, parents, level, &ref->key_for_search,
315 time_seq, ref->wanted_disk_byte,
316 extent_item_pos);
317 out:
318 btrfs_free_path(path);
319 return ret;
323 * resolve all indirect backrefs from the list
325 static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
326 int search_commit_root, u64 time_seq,
327 struct list_head *head,
328 const u64 *extent_item_pos)
330 int err;
331 int ret = 0;
332 struct __prelim_ref *ref;
333 struct __prelim_ref *ref_safe;
334 struct __prelim_ref *new_ref;
335 struct ulist *parents;
336 struct ulist_node *node;
337 struct ulist_iterator uiter;
339 parents = ulist_alloc(GFP_NOFS);
340 if (!parents)
341 return -ENOMEM;
344 * _safe allows us to insert directly after the current item without
345 * iterating over the newly inserted items.
346 * we're also allowed to re-assign ref during iteration.
348 list_for_each_entry_safe(ref, ref_safe, head, list) {
349 if (ref->parent) /* already direct */
350 continue;
351 if (ref->count == 0)
352 continue;
353 err = __resolve_indirect_ref(fs_info, search_commit_root,
354 time_seq, ref, parents,
355 extent_item_pos);
356 if (err) {
357 if (ret == 0)
358 ret = err;
359 continue;
362 /* we put the first parent into the ref at hand */
363 ULIST_ITER_INIT(&uiter);
364 node = ulist_next(parents, &uiter);
365 ref->parent = node ? node->val : 0;
366 ref->inode_list =
367 node ? (struct extent_inode_elem *)node->aux : 0;
369 /* additional parents require new refs being added here */
370 while ((node = ulist_next(parents, &uiter))) {
371 new_ref = kmalloc(sizeof(*new_ref), GFP_NOFS);
372 if (!new_ref) {
373 ret = -ENOMEM;
374 break;
376 memcpy(new_ref, ref, sizeof(*ref));
377 new_ref->parent = node->val;
378 new_ref->inode_list =
379 (struct extent_inode_elem *)node->aux;
380 list_add(&new_ref->list, &ref->list);
382 ulist_reinit(parents);
385 ulist_free(parents);
386 return ret;
389 static inline int ref_for_same_block(struct __prelim_ref *ref1,
390 struct __prelim_ref *ref2)
392 if (ref1->level != ref2->level)
393 return 0;
394 if (ref1->root_id != ref2->root_id)
395 return 0;
396 if (ref1->key_for_search.type != ref2->key_for_search.type)
397 return 0;
398 if (ref1->key_for_search.objectid != ref2->key_for_search.objectid)
399 return 0;
400 if (ref1->key_for_search.offset != ref2->key_for_search.offset)
401 return 0;
402 if (ref1->parent != ref2->parent)
403 return 0;
405 return 1;
409 * read tree blocks and add keys where required.
411 static int __add_missing_keys(struct btrfs_fs_info *fs_info,
412 struct list_head *head)
414 struct list_head *pos;
415 struct extent_buffer *eb;
417 list_for_each(pos, head) {
418 struct __prelim_ref *ref;
419 ref = list_entry(pos, struct __prelim_ref, list);
421 if (ref->parent)
422 continue;
423 if (ref->key_for_search.type)
424 continue;
425 BUG_ON(!ref->wanted_disk_byte);
426 eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte,
427 fs_info->tree_root->leafsize, 0);
428 BUG_ON(!eb);
429 btrfs_tree_read_lock(eb);
430 if (btrfs_header_level(eb) == 0)
431 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
432 else
433 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
434 btrfs_tree_read_unlock(eb);
435 free_extent_buffer(eb);
437 return 0;
441 * merge two lists of backrefs and adjust counts accordingly
443 * mode = 1: merge identical keys, if key is set
444 * FIXME: if we add more keys in __add_prelim_ref, we can merge more here.
445 * additionally, we could even add a key range for the blocks we
446 * looked into to merge even more (-> replace unresolved refs by those
447 * having a parent).
448 * mode = 2: merge identical parents
450 static int __merge_refs(struct list_head *head, int mode)
452 struct list_head *pos1;
454 list_for_each(pos1, head) {
455 struct list_head *n2;
456 struct list_head *pos2;
457 struct __prelim_ref *ref1;
459 ref1 = list_entry(pos1, struct __prelim_ref, list);
461 for (pos2 = pos1->next, n2 = pos2->next; pos2 != head;
462 pos2 = n2, n2 = pos2->next) {
463 struct __prelim_ref *ref2;
464 struct __prelim_ref *xchg;
466 ref2 = list_entry(pos2, struct __prelim_ref, list);
468 if (mode == 1) {
469 if (!ref_for_same_block(ref1, ref2))
470 continue;
471 if (!ref1->parent && ref2->parent) {
472 xchg = ref1;
473 ref1 = ref2;
474 ref2 = xchg;
476 ref1->count += ref2->count;
477 } else {
478 if (ref1->parent != ref2->parent)
479 continue;
480 ref1->count += ref2->count;
482 list_del(&ref2->list);
483 kfree(ref2);
487 return 0;
491 * add all currently queued delayed refs from this head whose seq nr is
492 * smaller or equal that seq to the list
494 static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
495 struct list_head *prefs)
497 struct btrfs_delayed_extent_op *extent_op = head->extent_op;
498 struct rb_node *n = &head->node.rb_node;
499 struct btrfs_key key;
500 struct btrfs_key op_key = {0};
501 int sgn;
502 int ret = 0;
504 if (extent_op && extent_op->update_key)
505 btrfs_disk_key_to_cpu(&op_key, &extent_op->key);
507 while ((n = rb_prev(n))) {
508 struct btrfs_delayed_ref_node *node;
509 node = rb_entry(n, struct btrfs_delayed_ref_node,
510 rb_node);
511 if (node->bytenr != head->node.bytenr)
512 break;
513 WARN_ON(node->is_head);
515 if (node->seq > seq)
516 continue;
518 switch (node->action) {
519 case BTRFS_ADD_DELAYED_EXTENT:
520 case BTRFS_UPDATE_DELAYED_HEAD:
521 WARN_ON(1);
522 continue;
523 case BTRFS_ADD_DELAYED_REF:
524 sgn = 1;
525 break;
526 case BTRFS_DROP_DELAYED_REF:
527 sgn = -1;
528 break;
529 default:
530 BUG_ON(1);
532 switch (node->type) {
533 case BTRFS_TREE_BLOCK_REF_KEY: {
534 struct btrfs_delayed_tree_ref *ref;
536 ref = btrfs_delayed_node_to_tree_ref(node);
537 ret = __add_prelim_ref(prefs, ref->root, &op_key,
538 ref->level + 1, 0, node->bytenr,
539 node->ref_mod * sgn);
540 break;
542 case BTRFS_SHARED_BLOCK_REF_KEY: {
543 struct btrfs_delayed_tree_ref *ref;
545 ref = btrfs_delayed_node_to_tree_ref(node);
546 ret = __add_prelim_ref(prefs, ref->root, NULL,
547 ref->level + 1, ref->parent,
548 node->bytenr,
549 node->ref_mod * sgn);
550 break;
552 case BTRFS_EXTENT_DATA_REF_KEY: {
553 struct btrfs_delayed_data_ref *ref;
554 ref = btrfs_delayed_node_to_data_ref(node);
556 key.objectid = ref->objectid;
557 key.type = BTRFS_EXTENT_DATA_KEY;
558 key.offset = ref->offset;
559 ret = __add_prelim_ref(prefs, ref->root, &key, 0, 0,
560 node->bytenr,
561 node->ref_mod * sgn);
562 break;
564 case BTRFS_SHARED_DATA_REF_KEY: {
565 struct btrfs_delayed_data_ref *ref;
567 ref = btrfs_delayed_node_to_data_ref(node);
569 key.objectid = ref->objectid;
570 key.type = BTRFS_EXTENT_DATA_KEY;
571 key.offset = ref->offset;
572 ret = __add_prelim_ref(prefs, ref->root, &key, 0,
573 ref->parent, node->bytenr,
574 node->ref_mod * sgn);
575 break;
577 default:
578 WARN_ON(1);
580 BUG_ON(ret);
583 return 0;
587 * add all inline backrefs for bytenr to the list
589 static int __add_inline_refs(struct btrfs_fs_info *fs_info,
590 struct btrfs_path *path, u64 bytenr,
591 int *info_level, struct list_head *prefs)
593 int ret = 0;
594 int slot;
595 struct extent_buffer *leaf;
596 struct btrfs_key key;
597 unsigned long ptr;
598 unsigned long end;
599 struct btrfs_extent_item *ei;
600 u64 flags;
601 u64 item_size;
604 * enumerate all inline refs
606 leaf = path->nodes[0];
607 slot = path->slots[0];
609 item_size = btrfs_item_size_nr(leaf, slot);
610 BUG_ON(item_size < sizeof(*ei));
612 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
613 flags = btrfs_extent_flags(leaf, ei);
615 ptr = (unsigned long)(ei + 1);
616 end = (unsigned long)ei + item_size;
618 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
619 struct btrfs_tree_block_info *info;
621 info = (struct btrfs_tree_block_info *)ptr;
622 *info_level = btrfs_tree_block_level(leaf, info);
623 ptr += sizeof(struct btrfs_tree_block_info);
624 BUG_ON(ptr > end);
625 } else {
626 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
629 while (ptr < end) {
630 struct btrfs_extent_inline_ref *iref;
631 u64 offset;
632 int type;
634 iref = (struct btrfs_extent_inline_ref *)ptr;
635 type = btrfs_extent_inline_ref_type(leaf, iref);
636 offset = btrfs_extent_inline_ref_offset(leaf, iref);
638 switch (type) {
639 case BTRFS_SHARED_BLOCK_REF_KEY:
640 ret = __add_prelim_ref(prefs, 0, NULL,
641 *info_level + 1, offset,
642 bytenr, 1);
643 break;
644 case BTRFS_SHARED_DATA_REF_KEY: {
645 struct btrfs_shared_data_ref *sdref;
646 int count;
648 sdref = (struct btrfs_shared_data_ref *)(iref + 1);
649 count = btrfs_shared_data_ref_count(leaf, sdref);
650 ret = __add_prelim_ref(prefs, 0, NULL, 0, offset,
651 bytenr, count);
652 break;
654 case BTRFS_TREE_BLOCK_REF_KEY:
655 ret = __add_prelim_ref(prefs, offset, NULL,
656 *info_level + 1, 0,
657 bytenr, 1);
658 break;
659 case BTRFS_EXTENT_DATA_REF_KEY: {
660 struct btrfs_extent_data_ref *dref;
661 int count;
662 u64 root;
664 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
665 count = btrfs_extent_data_ref_count(leaf, dref);
666 key.objectid = btrfs_extent_data_ref_objectid(leaf,
667 dref);
668 key.type = BTRFS_EXTENT_DATA_KEY;
669 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
670 root = btrfs_extent_data_ref_root(leaf, dref);
671 ret = __add_prelim_ref(prefs, root, &key, 0, 0,
672 bytenr, count);
673 break;
675 default:
676 WARN_ON(1);
678 BUG_ON(ret);
679 ptr += btrfs_extent_inline_ref_size(type);
682 return 0;
686 * add all non-inline backrefs for bytenr to the list
688 static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
689 struct btrfs_path *path, u64 bytenr,
690 int info_level, struct list_head *prefs)
692 struct btrfs_root *extent_root = fs_info->extent_root;
693 int ret;
694 int slot;
695 struct extent_buffer *leaf;
696 struct btrfs_key key;
698 while (1) {
699 ret = btrfs_next_item(extent_root, path);
700 if (ret < 0)
701 break;
702 if (ret) {
703 ret = 0;
704 break;
707 slot = path->slots[0];
708 leaf = path->nodes[0];
709 btrfs_item_key_to_cpu(leaf, &key, slot);
711 if (key.objectid != bytenr)
712 break;
713 if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
714 continue;
715 if (key.type > BTRFS_SHARED_DATA_REF_KEY)
716 break;
718 switch (key.type) {
719 case BTRFS_SHARED_BLOCK_REF_KEY:
720 ret = __add_prelim_ref(prefs, 0, NULL,
721 info_level + 1, key.offset,
722 bytenr, 1);
723 break;
724 case BTRFS_SHARED_DATA_REF_KEY: {
725 struct btrfs_shared_data_ref *sdref;
726 int count;
728 sdref = btrfs_item_ptr(leaf, slot,
729 struct btrfs_shared_data_ref);
730 count = btrfs_shared_data_ref_count(leaf, sdref);
731 ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset,
732 bytenr, count);
733 break;
735 case BTRFS_TREE_BLOCK_REF_KEY:
736 ret = __add_prelim_ref(prefs, key.offset, NULL,
737 info_level + 1, 0,
738 bytenr, 1);
739 break;
740 case BTRFS_EXTENT_DATA_REF_KEY: {
741 struct btrfs_extent_data_ref *dref;
742 int count;
743 u64 root;
745 dref = btrfs_item_ptr(leaf, slot,
746 struct btrfs_extent_data_ref);
747 count = btrfs_extent_data_ref_count(leaf, dref);
748 key.objectid = btrfs_extent_data_ref_objectid(leaf,
749 dref);
750 key.type = BTRFS_EXTENT_DATA_KEY;
751 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
752 root = btrfs_extent_data_ref_root(leaf, dref);
753 ret = __add_prelim_ref(prefs, root, &key, 0, 0,
754 bytenr, count);
755 break;
757 default:
758 WARN_ON(1);
760 BUG_ON(ret);
763 return ret;
767 * this adds all existing backrefs (inline backrefs, backrefs and delayed
768 * refs) for the given bytenr to the refs list, merges duplicates and resolves
769 * indirect refs to their parent bytenr.
770 * When roots are found, they're added to the roots list
772 * FIXME some caching might speed things up
774 static int find_parent_nodes(struct btrfs_trans_handle *trans,
775 struct btrfs_fs_info *fs_info, u64 bytenr,
776 u64 delayed_ref_seq, u64 time_seq,
777 struct ulist *refs, struct ulist *roots,
778 const u64 *extent_item_pos)
780 struct btrfs_key key;
781 struct btrfs_path *path;
782 struct btrfs_delayed_ref_root *delayed_refs = NULL;
783 struct btrfs_delayed_ref_head *head;
784 int info_level = 0;
785 int ret;
786 int search_commit_root = (trans == BTRFS_BACKREF_SEARCH_COMMIT_ROOT);
787 struct list_head prefs_delayed;
788 struct list_head prefs;
789 struct __prelim_ref *ref;
791 INIT_LIST_HEAD(&prefs);
792 INIT_LIST_HEAD(&prefs_delayed);
794 key.objectid = bytenr;
795 key.type = BTRFS_EXTENT_ITEM_KEY;
796 key.offset = (u64)-1;
798 path = btrfs_alloc_path();
799 if (!path)
800 return -ENOMEM;
801 path->search_commit_root = !!search_commit_root;
804 * grab both a lock on the path and a lock on the delayed ref head.
805 * We need both to get a consistent picture of how the refs look
806 * at a specified point in time
808 again:
809 head = NULL;
811 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
812 if (ret < 0)
813 goto out;
814 BUG_ON(ret == 0);
816 if (trans != BTRFS_BACKREF_SEARCH_COMMIT_ROOT) {
818 * look if there are updates for this ref queued and lock the
819 * head
821 delayed_refs = &trans->transaction->delayed_refs;
822 spin_lock(&delayed_refs->lock);
823 head = btrfs_find_delayed_ref_head(trans, bytenr);
824 if (head) {
825 if (!mutex_trylock(&head->mutex)) {
826 atomic_inc(&head->node.refs);
827 spin_unlock(&delayed_refs->lock);
829 btrfs_release_path(path);
832 * Mutex was contended, block until it's
833 * released and try again
835 mutex_lock(&head->mutex);
836 mutex_unlock(&head->mutex);
837 btrfs_put_delayed_ref(&head->node);
838 goto again;
840 ret = __add_delayed_refs(head, delayed_ref_seq,
841 &prefs_delayed);
842 mutex_unlock(&head->mutex);
843 if (ret) {
844 spin_unlock(&delayed_refs->lock);
845 goto out;
848 spin_unlock(&delayed_refs->lock);
851 if (path->slots[0]) {
852 struct extent_buffer *leaf;
853 int slot;
855 path->slots[0]--;
856 leaf = path->nodes[0];
857 slot = path->slots[0];
858 btrfs_item_key_to_cpu(leaf, &key, slot);
859 if (key.objectid == bytenr &&
860 key.type == BTRFS_EXTENT_ITEM_KEY) {
861 ret = __add_inline_refs(fs_info, path, bytenr,
862 &info_level, &prefs);
863 if (ret)
864 goto out;
865 ret = __add_keyed_refs(fs_info, path, bytenr,
866 info_level, &prefs);
867 if (ret)
868 goto out;
871 btrfs_release_path(path);
873 list_splice_init(&prefs_delayed, &prefs);
875 ret = __add_missing_keys(fs_info, &prefs);
876 if (ret)
877 goto out;
879 ret = __merge_refs(&prefs, 1);
880 if (ret)
881 goto out;
883 ret = __resolve_indirect_refs(fs_info, search_commit_root, time_seq,
884 &prefs, extent_item_pos);
885 if (ret)
886 goto out;
888 ret = __merge_refs(&prefs, 2);
889 if (ret)
890 goto out;
892 while (!list_empty(&prefs)) {
893 ref = list_first_entry(&prefs, struct __prelim_ref, list);
894 list_del(&ref->list);
895 if (ref->count < 0)
896 WARN_ON(1);
897 if (ref->count && ref->root_id && ref->parent == 0) {
898 /* no parent == root of tree */
899 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
900 BUG_ON(ret < 0);
902 if (ref->count && ref->parent) {
903 struct extent_inode_elem *eie = NULL;
904 if (extent_item_pos && !ref->inode_list) {
905 u32 bsz;
906 struct extent_buffer *eb;
907 bsz = btrfs_level_size(fs_info->extent_root,
908 info_level);
909 eb = read_tree_block(fs_info->extent_root,
910 ref->parent, bsz, 0);
911 BUG_ON(!eb);
912 ret = find_extent_in_eb(eb, bytenr,
913 *extent_item_pos, &eie);
914 ref->inode_list = eie;
915 free_extent_buffer(eb);
917 ret = ulist_add_merge(refs, ref->parent,
918 (unsigned long)ref->inode_list,
919 (unsigned long *)&eie, GFP_NOFS);
920 if (!ret && extent_item_pos) {
922 * we've recorded that parent, so we must extend
923 * its inode list here
925 BUG_ON(!eie);
926 while (eie->next)
927 eie = eie->next;
928 eie->next = ref->inode_list;
930 BUG_ON(ret < 0);
932 kfree(ref);
935 out:
936 btrfs_free_path(path);
937 while (!list_empty(&prefs)) {
938 ref = list_first_entry(&prefs, struct __prelim_ref, list);
939 list_del(&ref->list);
940 kfree(ref);
942 while (!list_empty(&prefs_delayed)) {
943 ref = list_first_entry(&prefs_delayed, struct __prelim_ref,
944 list);
945 list_del(&ref->list);
946 kfree(ref);
949 return ret;
952 static void free_leaf_list(struct ulist *blocks)
954 struct ulist_node *node = NULL;
955 struct extent_inode_elem *eie;
956 struct extent_inode_elem *eie_next;
957 struct ulist_iterator uiter;
959 ULIST_ITER_INIT(&uiter);
960 while ((node = ulist_next(blocks, &uiter))) {
961 if (!node->aux)
962 continue;
963 eie = (struct extent_inode_elem *)node->aux;
964 for (; eie; eie = eie_next) {
965 eie_next = eie->next;
966 kfree(eie);
968 node->aux = 0;
971 ulist_free(blocks);
975 * Finds all leafs with a reference to the specified combination of bytenr and
976 * offset. key_list_head will point to a list of corresponding keys (caller must
977 * free each list element). The leafs will be stored in the leafs ulist, which
978 * must be freed with ulist_free.
980 * returns 0 on success, <0 on error
982 static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
983 struct btrfs_fs_info *fs_info, u64 bytenr,
984 u64 delayed_ref_seq, u64 time_seq,
985 struct ulist **leafs,
986 const u64 *extent_item_pos)
988 struct ulist *tmp;
989 int ret;
991 tmp = ulist_alloc(GFP_NOFS);
992 if (!tmp)
993 return -ENOMEM;
994 *leafs = ulist_alloc(GFP_NOFS);
995 if (!*leafs) {
996 ulist_free(tmp);
997 return -ENOMEM;
1000 ret = find_parent_nodes(trans, fs_info, bytenr, delayed_ref_seq,
1001 time_seq, *leafs, tmp, extent_item_pos);
1002 ulist_free(tmp);
1004 if (ret < 0 && ret != -ENOENT) {
1005 free_leaf_list(*leafs);
1006 return ret;
1009 return 0;
1013 * walk all backrefs for a given extent to find all roots that reference this
1014 * extent. Walking a backref means finding all extents that reference this
1015 * extent and in turn walk the backrefs of those, too. Naturally this is a
1016 * recursive process, but here it is implemented in an iterative fashion: We
1017 * find all referencing extents for the extent in question and put them on a
1018 * list. In turn, we find all referencing extents for those, further appending
1019 * to the list. The way we iterate the list allows adding more elements after
1020 * the current while iterating. The process stops when we reach the end of the
1021 * list. Found roots are added to the roots list.
1023 * returns 0 on success, < 0 on error.
1025 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1026 struct btrfs_fs_info *fs_info, u64 bytenr,
1027 u64 delayed_ref_seq, u64 time_seq,
1028 struct ulist **roots)
1030 struct ulist *tmp;
1031 struct ulist_node *node = NULL;
1032 struct ulist_iterator uiter;
1033 int ret;
1035 tmp = ulist_alloc(GFP_NOFS);
1036 if (!tmp)
1037 return -ENOMEM;
1038 *roots = ulist_alloc(GFP_NOFS);
1039 if (!*roots) {
1040 ulist_free(tmp);
1041 return -ENOMEM;
1044 ULIST_ITER_INIT(&uiter);
1045 while (1) {
1046 ret = find_parent_nodes(trans, fs_info, bytenr, delayed_ref_seq,
1047 time_seq, tmp, *roots, NULL);
1048 if (ret < 0 && ret != -ENOENT) {
1049 ulist_free(tmp);
1050 ulist_free(*roots);
1051 return ret;
1053 node = ulist_next(tmp, &uiter);
1054 if (!node)
1055 break;
1056 bytenr = node->val;
1059 ulist_free(tmp);
1060 return 0;
1064 static int __inode_info(u64 inum, u64 ioff, u8 key_type,
1065 struct btrfs_root *fs_root, struct btrfs_path *path,
1066 struct btrfs_key *found_key)
1068 int ret;
1069 struct btrfs_key key;
1070 struct extent_buffer *eb;
1072 key.type = key_type;
1073 key.objectid = inum;
1074 key.offset = ioff;
1076 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
1077 if (ret < 0)
1078 return ret;
1080 eb = path->nodes[0];
1081 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
1082 ret = btrfs_next_leaf(fs_root, path);
1083 if (ret)
1084 return ret;
1085 eb = path->nodes[0];
1088 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
1089 if (found_key->type != key.type || found_key->objectid != key.objectid)
1090 return 1;
1092 return 0;
1096 * this makes the path point to (inum INODE_ITEM ioff)
1098 int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
1099 struct btrfs_path *path)
1101 struct btrfs_key key;
1102 return __inode_info(inum, ioff, BTRFS_INODE_ITEM_KEY, fs_root, path,
1103 &key);
1106 static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
1107 struct btrfs_path *path,
1108 struct btrfs_key *found_key)
1110 return __inode_info(inum, ioff, BTRFS_INODE_REF_KEY, fs_root, path,
1111 found_key);
1115 * this iterates to turn a btrfs_inode_ref into a full filesystem path. elements
1116 * of the path are separated by '/' and the path is guaranteed to be
1117 * 0-terminated. the path is only given within the current file system.
1118 * Therefore, it never starts with a '/'. the caller is responsible to provide
1119 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1120 * the start point of the resulting string is returned. this pointer is within
1121 * dest, normally.
1122 * in case the path buffer would overflow, the pointer is decremented further
1123 * as if output was written to the buffer, though no more output is actually
1124 * generated. that way, the caller can determine how much space would be
1125 * required for the path to fit into the buffer. in that case, the returned
1126 * value will be smaller than dest. callers must check this!
1128 static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1129 struct btrfs_inode_ref *iref,
1130 struct extent_buffer *eb_in, u64 parent,
1131 char *dest, u32 size)
1133 u32 len;
1134 int slot;
1135 u64 next_inum;
1136 int ret;
1137 s64 bytes_left = size - 1;
1138 struct extent_buffer *eb = eb_in;
1139 struct btrfs_key found_key;
1140 int leave_spinning = path->leave_spinning;
1142 if (bytes_left >= 0)
1143 dest[bytes_left] = '\0';
1145 path->leave_spinning = 1;
1146 while (1) {
1147 len = btrfs_inode_ref_name_len(eb, iref);
1148 bytes_left -= len;
1149 if (bytes_left >= 0)
1150 read_extent_buffer(eb, dest + bytes_left,
1151 (unsigned long)(iref + 1), len);
1152 if (eb != eb_in) {
1153 btrfs_tree_read_unlock_blocking(eb);
1154 free_extent_buffer(eb);
1156 ret = inode_ref_info(parent, 0, fs_root, path, &found_key);
1157 if (ret > 0)
1158 ret = -ENOENT;
1159 if (ret)
1160 break;
1161 next_inum = found_key.offset;
1163 /* regular exit ahead */
1164 if (parent == next_inum)
1165 break;
1167 slot = path->slots[0];
1168 eb = path->nodes[0];
1169 /* make sure we can use eb after releasing the path */
1170 if (eb != eb_in) {
1171 atomic_inc(&eb->refs);
1172 btrfs_tree_read_lock(eb);
1173 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1175 btrfs_release_path(path);
1177 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1178 parent = next_inum;
1179 --bytes_left;
1180 if (bytes_left >= 0)
1181 dest[bytes_left] = '/';
1184 btrfs_release_path(path);
1185 path->leave_spinning = leave_spinning;
1187 if (ret)
1188 return ERR_PTR(ret);
1190 return dest + bytes_left;
1194 * this makes the path point to (logical EXTENT_ITEM *)
1195 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1196 * tree blocks and <0 on error.
1198 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1199 struct btrfs_path *path, struct btrfs_key *found_key)
1201 int ret;
1202 u64 flags;
1203 u32 item_size;
1204 struct extent_buffer *eb;
1205 struct btrfs_extent_item *ei;
1206 struct btrfs_key key;
1208 key.type = BTRFS_EXTENT_ITEM_KEY;
1209 key.objectid = logical;
1210 key.offset = (u64)-1;
1212 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1213 if (ret < 0)
1214 return ret;
1215 ret = btrfs_previous_item(fs_info->extent_root, path,
1216 0, BTRFS_EXTENT_ITEM_KEY);
1217 if (ret < 0)
1218 return ret;
1220 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1221 if (found_key->type != BTRFS_EXTENT_ITEM_KEY ||
1222 found_key->objectid > logical ||
1223 found_key->objectid + found_key->offset <= logical) {
1224 pr_debug("logical %llu is not within any extent\n",
1225 (unsigned long long)logical);
1226 return -ENOENT;
1229 eb = path->nodes[0];
1230 item_size = btrfs_item_size_nr(eb, path->slots[0]);
1231 BUG_ON(item_size < sizeof(*ei));
1233 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1234 flags = btrfs_extent_flags(eb, ei);
1236 pr_debug("logical %llu is at position %llu within the extent (%llu "
1237 "EXTENT_ITEM %llu) flags %#llx size %u\n",
1238 (unsigned long long)logical,
1239 (unsigned long long)(logical - found_key->objectid),
1240 (unsigned long long)found_key->objectid,
1241 (unsigned long long)found_key->offset,
1242 (unsigned long long)flags, item_size);
1243 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1244 return BTRFS_EXTENT_FLAG_TREE_BLOCK;
1245 if (flags & BTRFS_EXTENT_FLAG_DATA)
1246 return BTRFS_EXTENT_FLAG_DATA;
1248 return -EIO;
1252 * helper function to iterate extent inline refs. ptr must point to a 0 value
1253 * for the first call and may be modified. it is used to track state.
1254 * if more refs exist, 0 is returned and the next call to
1255 * __get_extent_inline_ref must pass the modified ptr parameter to get the
1256 * next ref. after the last ref was processed, 1 is returned.
1257 * returns <0 on error
1259 static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
1260 struct btrfs_extent_item *ei, u32 item_size,
1261 struct btrfs_extent_inline_ref **out_eiref,
1262 int *out_type)
1264 unsigned long end;
1265 u64 flags;
1266 struct btrfs_tree_block_info *info;
1268 if (!*ptr) {
1269 /* first call */
1270 flags = btrfs_extent_flags(eb, ei);
1271 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1272 info = (struct btrfs_tree_block_info *)(ei + 1);
1273 *out_eiref =
1274 (struct btrfs_extent_inline_ref *)(info + 1);
1275 } else {
1276 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1278 *ptr = (unsigned long)*out_eiref;
1279 if ((void *)*ptr >= (void *)ei + item_size)
1280 return -ENOENT;
1283 end = (unsigned long)ei + item_size;
1284 *out_eiref = (struct btrfs_extent_inline_ref *)*ptr;
1285 *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref);
1287 *ptr += btrfs_extent_inline_ref_size(*out_type);
1288 WARN_ON(*ptr > end);
1289 if (*ptr == end)
1290 return 1; /* last */
1292 return 0;
1296 * reads the tree block backref for an extent. tree level and root are returned
1297 * through out_level and out_root. ptr must point to a 0 value for the first
1298 * call and may be modified (see __get_extent_inline_ref comment).
1299 * returns 0 if data was provided, 1 if there was no more data to provide or
1300 * <0 on error.
1302 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1303 struct btrfs_extent_item *ei, u32 item_size,
1304 u64 *out_root, u8 *out_level)
1306 int ret;
1307 int type;
1308 struct btrfs_tree_block_info *info;
1309 struct btrfs_extent_inline_ref *eiref;
1311 if (*ptr == (unsigned long)-1)
1312 return 1;
1314 while (1) {
1315 ret = __get_extent_inline_ref(ptr, eb, ei, item_size,
1316 &eiref, &type);
1317 if (ret < 0)
1318 return ret;
1320 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1321 type == BTRFS_SHARED_BLOCK_REF_KEY)
1322 break;
1324 if (ret == 1)
1325 return 1;
1328 /* we can treat both ref types equally here */
1329 info = (struct btrfs_tree_block_info *)(ei + 1);
1330 *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1331 *out_level = btrfs_tree_block_level(eb, info);
1333 if (ret == 1)
1334 *ptr = (unsigned long)-1;
1336 return 0;
1339 static int iterate_leaf_refs(struct extent_inode_elem *inode_list,
1340 u64 root, u64 extent_item_objectid,
1341 iterate_extent_inodes_t *iterate, void *ctx)
1343 struct extent_inode_elem *eie;
1344 int ret = 0;
1346 for (eie = inode_list; eie; eie = eie->next) {
1347 pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), "
1348 "root %llu\n", extent_item_objectid,
1349 eie->inum, eie->offset, root);
1350 ret = iterate(eie->inum, eie->offset, root, ctx);
1351 if (ret) {
1352 pr_debug("stopping iteration for %llu due to ret=%d\n",
1353 extent_item_objectid, ret);
1354 break;
1358 return ret;
1362 * calls iterate() for every inode that references the extent identified by
1363 * the given parameters.
1364 * when the iterator function returns a non-zero value, iteration stops.
1366 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1367 u64 extent_item_objectid, u64 extent_item_pos,
1368 int search_commit_root,
1369 iterate_extent_inodes_t *iterate, void *ctx)
1371 int ret;
1372 struct list_head data_refs = LIST_HEAD_INIT(data_refs);
1373 struct list_head shared_refs = LIST_HEAD_INIT(shared_refs);
1374 struct btrfs_trans_handle *trans;
1375 struct ulist *refs = NULL;
1376 struct ulist *roots = NULL;
1377 struct ulist_node *ref_node = NULL;
1378 struct ulist_node *root_node = NULL;
1379 struct seq_list seq_elem = {};
1380 struct seq_list tree_mod_seq_elem = {};
1381 struct ulist_iterator ref_uiter;
1382 struct ulist_iterator root_uiter;
1383 struct btrfs_delayed_ref_root *delayed_refs = NULL;
1385 pr_debug("resolving all inodes for extent %llu\n",
1386 extent_item_objectid);
1388 if (search_commit_root) {
1389 trans = BTRFS_BACKREF_SEARCH_COMMIT_ROOT;
1390 } else {
1391 trans = btrfs_join_transaction(fs_info->extent_root);
1392 if (IS_ERR(trans))
1393 return PTR_ERR(trans);
1395 delayed_refs = &trans->transaction->delayed_refs;
1396 spin_lock(&delayed_refs->lock);
1397 btrfs_get_delayed_seq(delayed_refs, &seq_elem);
1398 spin_unlock(&delayed_refs->lock);
1399 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1402 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
1403 seq_elem.seq, tree_mod_seq_elem.seq, &refs,
1404 &extent_item_pos);
1405 if (ret)
1406 goto out;
1408 ULIST_ITER_INIT(&ref_uiter);
1409 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
1410 ret = btrfs_find_all_roots(trans, fs_info, ref_node->val,
1411 seq_elem.seq,
1412 tree_mod_seq_elem.seq, &roots);
1413 if (ret)
1414 break;
1415 ULIST_ITER_INIT(&root_uiter);
1416 while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
1417 pr_debug("root %llu references leaf %llu, data list "
1418 "%#lx\n", root_node->val, ref_node->val,
1419 ref_node->aux);
1420 ret = iterate_leaf_refs(
1421 (struct extent_inode_elem *)ref_node->aux,
1422 root_node->val, extent_item_objectid,
1423 iterate, ctx);
1425 ulist_free(roots);
1426 roots = NULL;
1429 free_leaf_list(refs);
1430 ulist_free(roots);
1431 out:
1432 if (!search_commit_root) {
1433 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1434 btrfs_put_delayed_seq(delayed_refs, &seq_elem);
1435 btrfs_end_transaction(trans, fs_info->extent_root);
1438 return ret;
1441 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
1442 struct btrfs_path *path,
1443 iterate_extent_inodes_t *iterate, void *ctx)
1445 int ret;
1446 u64 extent_item_pos;
1447 struct btrfs_key found_key;
1448 int search_commit_root = path->search_commit_root;
1450 ret = extent_from_logical(fs_info, logical, path,
1451 &found_key);
1452 btrfs_release_path(path);
1453 if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1454 ret = -EINVAL;
1455 if (ret < 0)
1456 return ret;
1458 extent_item_pos = logical - found_key.objectid;
1459 ret = iterate_extent_inodes(fs_info, found_key.objectid,
1460 extent_item_pos, search_commit_root,
1461 iterate, ctx);
1463 return ret;
1466 static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
1467 struct btrfs_path *path,
1468 iterate_irefs_t *iterate, void *ctx)
1470 int ret = 0;
1471 int slot;
1472 u32 cur;
1473 u32 len;
1474 u32 name_len;
1475 u64 parent = 0;
1476 int found = 0;
1477 struct extent_buffer *eb;
1478 struct btrfs_item *item;
1479 struct btrfs_inode_ref *iref;
1480 struct btrfs_key found_key;
1482 while (!ret) {
1483 path->leave_spinning = 1;
1484 ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path,
1485 &found_key);
1486 if (ret < 0)
1487 break;
1488 if (ret) {
1489 ret = found ? 0 : -ENOENT;
1490 break;
1492 ++found;
1494 parent = found_key.offset;
1495 slot = path->slots[0];
1496 eb = path->nodes[0];
1497 /* make sure we can use eb after releasing the path */
1498 atomic_inc(&eb->refs);
1499 btrfs_tree_read_lock(eb);
1500 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1501 btrfs_release_path(path);
1503 item = btrfs_item_nr(eb, slot);
1504 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1506 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
1507 name_len = btrfs_inode_ref_name_len(eb, iref);
1508 /* path must be released before calling iterate()! */
1509 pr_debug("following ref at offset %u for inode %llu in "
1510 "tree %llu\n", cur,
1511 (unsigned long long)found_key.objectid,
1512 (unsigned long long)fs_root->objectid);
1513 ret = iterate(parent, iref, eb, ctx);
1514 if (ret)
1515 break;
1516 len = sizeof(*iref) + name_len;
1517 iref = (struct btrfs_inode_ref *)((char *)iref + len);
1519 btrfs_tree_read_unlock_blocking(eb);
1520 free_extent_buffer(eb);
1523 btrfs_release_path(path);
1525 return ret;
1529 * returns 0 if the path could be dumped (probably truncated)
1530 * returns <0 in case of an error
1532 static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref,
1533 struct extent_buffer *eb, void *ctx)
1535 struct inode_fs_paths *ipath = ctx;
1536 char *fspath;
1537 char *fspath_min;
1538 int i = ipath->fspath->elem_cnt;
1539 const int s_ptr = sizeof(char *);
1540 u32 bytes_left;
1542 bytes_left = ipath->fspath->bytes_left > s_ptr ?
1543 ipath->fspath->bytes_left - s_ptr : 0;
1545 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
1546 fspath = iref_to_path(ipath->fs_root, ipath->btrfs_path, iref, eb,
1547 inum, fspath_min, bytes_left);
1548 if (IS_ERR(fspath))
1549 return PTR_ERR(fspath);
1551 if (fspath > fspath_min) {
1552 pr_debug("path resolved: %s\n", fspath);
1553 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
1554 ++ipath->fspath->elem_cnt;
1555 ipath->fspath->bytes_left = fspath - fspath_min;
1556 } else {
1557 pr_debug("missed path, not enough space. missing bytes: %lu, "
1558 "constructed so far: %s\n",
1559 (unsigned long)(fspath_min - fspath), fspath_min);
1560 ++ipath->fspath->elem_missed;
1561 ipath->fspath->bytes_missing += fspath_min - fspath;
1562 ipath->fspath->bytes_left = 0;
1565 return 0;
1569 * this dumps all file system paths to the inode into the ipath struct, provided
1570 * is has been created large enough. each path is zero-terminated and accessed
1571 * from ipath->fspath->val[i].
1572 * when it returns, there are ipath->fspath->elem_cnt number of paths available
1573 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
1574 * number of missed paths in recored in ipath->fspath->elem_missed, otherwise,
1575 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
1576 * have been needed to return all paths.
1578 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
1580 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
1581 inode_to_path, ipath);
1584 struct btrfs_data_container *init_data_container(u32 total_bytes)
1586 struct btrfs_data_container *data;
1587 size_t alloc_bytes;
1589 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
1590 data = kmalloc(alloc_bytes, GFP_NOFS);
1591 if (!data)
1592 return ERR_PTR(-ENOMEM);
1594 if (total_bytes >= sizeof(*data)) {
1595 data->bytes_left = total_bytes - sizeof(*data);
1596 data->bytes_missing = 0;
1597 } else {
1598 data->bytes_missing = sizeof(*data) - total_bytes;
1599 data->bytes_left = 0;
1602 data->elem_cnt = 0;
1603 data->elem_missed = 0;
1605 return data;
1609 * allocates space to return multiple file system paths for an inode.
1610 * total_bytes to allocate are passed, note that space usable for actual path
1611 * information will be total_bytes - sizeof(struct inode_fs_paths).
1612 * the returned pointer must be freed with free_ipath() in the end.
1614 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
1615 struct btrfs_path *path)
1617 struct inode_fs_paths *ifp;
1618 struct btrfs_data_container *fspath;
1620 fspath = init_data_container(total_bytes);
1621 if (IS_ERR(fspath))
1622 return (void *)fspath;
1624 ifp = kmalloc(sizeof(*ifp), GFP_NOFS);
1625 if (!ifp) {
1626 kfree(fspath);
1627 return ERR_PTR(-ENOMEM);
1630 ifp->btrfs_path = path;
1631 ifp->fspath = fspath;
1632 ifp->fs_root = fs_root;
1634 return ifp;
1637 void free_ipath(struct inode_fs_paths *ipath)
1639 if (!ipath)
1640 return;
1641 kfree(ipath->fspath);
1642 kfree(ipath);