2 * Copyright (C) 2009 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
23 #include "delayed-ref.h"
24 #include "transaction.h"
27 * delayed back reference update tracking. For subvolume trees
28 * we queue up extent allocations and backref maintenance for
29 * delayed processing. This avoids deep call chains where we
30 * add extents in the middle of btrfs_search_slot, and it allows
31 * us to buffer up frequently modified backrefs in an rb tree instead
32 * of hammering updates on the extent allocation tree.
36 * compare two delayed tree backrefs with same bytenr and type
38 static int comp_tree_refs(struct btrfs_delayed_tree_ref
*ref2
,
39 struct btrfs_delayed_tree_ref
*ref1
)
41 if (ref1
->node
.type
== BTRFS_TREE_BLOCK_REF_KEY
) {
42 if (ref1
->root
< ref2
->root
)
44 if (ref1
->root
> ref2
->root
)
47 if (ref1
->parent
< ref2
->parent
)
49 if (ref1
->parent
> ref2
->parent
)
56 * compare two delayed data backrefs with same bytenr and type
58 static int comp_data_refs(struct btrfs_delayed_data_ref
*ref2
,
59 struct btrfs_delayed_data_ref
*ref1
)
61 if (ref1
->node
.type
== BTRFS_EXTENT_DATA_REF_KEY
) {
62 if (ref1
->root
< ref2
->root
)
64 if (ref1
->root
> ref2
->root
)
66 if (ref1
->objectid
< ref2
->objectid
)
68 if (ref1
->objectid
> ref2
->objectid
)
70 if (ref1
->offset
< ref2
->offset
)
72 if (ref1
->offset
> ref2
->offset
)
75 if (ref1
->parent
< ref2
->parent
)
77 if (ref1
->parent
> ref2
->parent
)
84 * entries in the rb tree are ordered by the byte number of the extent,
85 * type of the delayed backrefs and content of delayed backrefs.
87 static int comp_entry(struct btrfs_delayed_ref_node
*ref2
,
88 struct btrfs_delayed_ref_node
*ref1
)
90 if (ref1
->bytenr
< ref2
->bytenr
)
92 if (ref1
->bytenr
> ref2
->bytenr
)
94 if (ref1
->is_head
&& ref2
->is_head
)
100 if (ref1
->type
< ref2
->type
)
102 if (ref1
->type
> ref2
->type
)
104 if (ref1
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
105 ref1
->type
== BTRFS_SHARED_BLOCK_REF_KEY
) {
106 return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2
),
107 btrfs_delayed_node_to_tree_ref(ref1
));
108 } else if (ref1
->type
== BTRFS_EXTENT_DATA_REF_KEY
||
109 ref1
->type
== BTRFS_SHARED_DATA_REF_KEY
) {
110 return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2
),
111 btrfs_delayed_node_to_data_ref(ref1
));
118 * insert a new ref into the rbtree. This returns any existing refs
119 * for the same (bytenr,parent) tuple, or NULL if the new node was properly
122 static struct btrfs_delayed_ref_node
*tree_insert(struct rb_root
*root
,
123 struct rb_node
*node
)
125 struct rb_node
**p
= &root
->rb_node
;
126 struct rb_node
*parent_node
= NULL
;
127 struct btrfs_delayed_ref_node
*entry
;
128 struct btrfs_delayed_ref_node
*ins
;
131 ins
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
134 entry
= rb_entry(parent_node
, struct btrfs_delayed_ref_node
,
137 cmp
= comp_entry(entry
, ins
);
146 rb_link_node(node
, parent_node
, p
);
147 rb_insert_color(node
, root
);
152 * find an head entry based on bytenr. This returns the delayed ref
153 * head if it was able to find one, or NULL if nothing was in that spot
155 static struct btrfs_delayed_ref_node
*find_ref_head(struct rb_root
*root
,
157 struct btrfs_delayed_ref_node
**last
)
159 struct rb_node
*n
= root
->rb_node
;
160 struct btrfs_delayed_ref_node
*entry
;
164 entry
= rb_entry(n
, struct btrfs_delayed_ref_node
, rb_node
);
165 WARN_ON(!entry
->in_tree
);
169 if (bytenr
< entry
->bytenr
)
171 else if (bytenr
> entry
->bytenr
)
173 else if (!btrfs_delayed_ref_is_head(entry
))
188 int btrfs_delayed_ref_lock(struct btrfs_trans_handle
*trans
,
189 struct btrfs_delayed_ref_head
*head
)
191 struct btrfs_delayed_ref_root
*delayed_refs
;
193 delayed_refs
= &trans
->transaction
->delayed_refs
;
194 assert_spin_locked(&delayed_refs
->lock
);
195 if (mutex_trylock(&head
->mutex
))
198 atomic_inc(&head
->node
.refs
);
199 spin_unlock(&delayed_refs
->lock
);
201 mutex_lock(&head
->mutex
);
202 spin_lock(&delayed_refs
->lock
);
203 if (!head
->node
.in_tree
) {
204 mutex_unlock(&head
->mutex
);
205 btrfs_put_delayed_ref(&head
->node
);
208 btrfs_put_delayed_ref(&head
->node
);
212 int btrfs_find_ref_cluster(struct btrfs_trans_handle
*trans
,
213 struct list_head
*cluster
, u64 start
)
216 struct btrfs_delayed_ref_root
*delayed_refs
;
217 struct rb_node
*node
;
218 struct btrfs_delayed_ref_node
*ref
;
219 struct btrfs_delayed_ref_head
*head
;
221 delayed_refs
= &trans
->transaction
->delayed_refs
;
223 node
= rb_first(&delayed_refs
->root
);
226 find_ref_head(&delayed_refs
->root
, start
, &ref
);
228 struct btrfs_delayed_ref_node
*tmp
;
230 node
= rb_prev(&ref
->rb_node
);
233 struct btrfs_delayed_ref_node
,
235 if (tmp
->bytenr
< start
)
238 node
= rb_prev(&ref
->rb_node
);
240 node
= &ref
->rb_node
;
242 node
= rb_first(&delayed_refs
->root
);
245 while (node
&& count
< 32) {
246 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
247 if (btrfs_delayed_ref_is_head(ref
)) {
248 head
= btrfs_delayed_node_to_head(ref
);
249 if (list_empty(&head
->cluster
)) {
250 list_add_tail(&head
->cluster
, cluster
);
251 delayed_refs
->run_delayed_start
=
255 WARN_ON(delayed_refs
->num_heads_ready
== 0);
256 delayed_refs
->num_heads_ready
--;
258 /* the goal of the clustering is to find extents
259 * that are likely to end up in the same extent
260 * leaf on disk. So, we don't want them spread
261 * all over the tree. Stop now if we've hit
262 * a head that was already in use
267 node
= rb_next(node
);
273 * we've gone to the end of the rbtree without finding any
274 * clusters. start from the beginning and try again
277 node
= rb_first(&delayed_refs
->root
);
284 * This checks to see if there are any delayed refs in the
285 * btree for a given bytenr. It returns one if it finds any
286 * and zero otherwise.
288 * If it only finds a head node, it returns 0.
290 * The idea is to use this when deciding if you can safely delete an
291 * extent from the extent allocation tree. There may be a pending
292 * ref in the rbtree that adds or removes references, so as long as this
293 * returns one you need to leave the BTRFS_EXTENT_ITEM in the extent
296 int btrfs_delayed_ref_pending(struct btrfs_trans_handle
*trans
, u64 bytenr
)
298 struct btrfs_delayed_ref_node
*ref
;
299 struct btrfs_delayed_ref_root
*delayed_refs
;
300 struct rb_node
*prev_node
;
303 delayed_refs
= &trans
->transaction
->delayed_refs
;
304 spin_lock(&delayed_refs
->lock
);
306 ref
= find_ref_head(&delayed_refs
->root
, bytenr
, NULL
);
308 prev_node
= rb_prev(&ref
->rb_node
);
311 ref
= rb_entry(prev_node
, struct btrfs_delayed_ref_node
,
313 if (ref
->bytenr
== bytenr
)
317 spin_unlock(&delayed_refs
->lock
);
322 * helper function to lookup reference count and flags of extent.
324 * the head node for delayed ref is used to store the sum of all the
325 * reference count modifications queued up in the rbtree. the head
326 * node may also store the extent flags to set. This way you can check
327 * to see what the reference count and extent flags would be if all of
328 * the delayed refs are not processed.
330 int btrfs_lookup_extent_info(struct btrfs_trans_handle
*trans
,
331 struct btrfs_root
*root
, u64 bytenr
,
332 u64 num_bytes
, u64
*refs
, u64
*flags
)
334 struct btrfs_delayed_ref_node
*ref
;
335 struct btrfs_delayed_ref_head
*head
;
336 struct btrfs_delayed_ref_root
*delayed_refs
;
337 struct btrfs_path
*path
;
338 struct btrfs_extent_item
*ei
;
339 struct extent_buffer
*leaf
;
340 struct btrfs_key key
;
346 path
= btrfs_alloc_path();
350 key
.objectid
= bytenr
;
351 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
352 key
.offset
= num_bytes
;
353 delayed_refs
= &trans
->transaction
->delayed_refs
;
355 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
,
361 leaf
= path
->nodes
[0];
362 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
363 if (item_size
>= sizeof(*ei
)) {
364 ei
= btrfs_item_ptr(leaf
, path
->slots
[0],
365 struct btrfs_extent_item
);
366 num_refs
= btrfs_extent_refs(leaf
, ei
);
367 extent_flags
= btrfs_extent_flags(leaf
, ei
);
369 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
370 struct btrfs_extent_item_v0
*ei0
;
371 BUG_ON(item_size
!= sizeof(*ei0
));
372 ei0
= btrfs_item_ptr(leaf
, path
->slots
[0],
373 struct btrfs_extent_item_v0
);
374 num_refs
= btrfs_extent_refs_v0(leaf
, ei0
);
375 /* FIXME: this isn't correct for data */
376 extent_flags
= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
381 BUG_ON(num_refs
== 0);
388 spin_lock(&delayed_refs
->lock
);
389 ref
= find_ref_head(&delayed_refs
->root
, bytenr
, NULL
);
391 head
= btrfs_delayed_node_to_head(ref
);
392 if (!mutex_trylock(&head
->mutex
)) {
393 atomic_inc(&ref
->refs
);
394 spin_unlock(&delayed_refs
->lock
);
396 btrfs_release_path(root
->fs_info
->extent_root
, path
);
398 mutex_lock(&head
->mutex
);
399 mutex_unlock(&head
->mutex
);
400 btrfs_put_delayed_ref(ref
);
403 if (head
->extent_op
&& head
->extent_op
->update_flags
)
404 extent_flags
|= head
->extent_op
->flags_to_set
;
406 BUG_ON(num_refs
== 0);
408 num_refs
+= ref
->ref_mod
;
409 mutex_unlock(&head
->mutex
);
411 WARN_ON(num_refs
== 0);
415 *flags
= extent_flags
;
417 spin_unlock(&delayed_refs
->lock
);
418 btrfs_free_path(path
);
423 * helper function to update an extent delayed ref in the
424 * rbtree. existing and update must both have the same
427 * This may free existing if the update cancels out whatever
428 * operation it was doing.
431 update_existing_ref(struct btrfs_trans_handle
*trans
,
432 struct btrfs_delayed_ref_root
*delayed_refs
,
433 struct btrfs_delayed_ref_node
*existing
,
434 struct btrfs_delayed_ref_node
*update
)
436 if (update
->action
!= existing
->action
) {
438 * this is effectively undoing either an add or a
439 * drop. We decrement the ref_mod, and if it goes
440 * down to zero we just delete the entry without
441 * every changing the extent allocation tree.
444 if (existing
->ref_mod
== 0) {
445 rb_erase(&existing
->rb_node
,
446 &delayed_refs
->root
);
447 existing
->in_tree
= 0;
448 btrfs_put_delayed_ref(existing
);
449 delayed_refs
->num_entries
--;
450 if (trans
->delayed_ref_updates
)
451 trans
->delayed_ref_updates
--;
453 WARN_ON(existing
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
454 existing
->type
== BTRFS_SHARED_BLOCK_REF_KEY
);
457 WARN_ON(existing
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
458 existing
->type
== BTRFS_SHARED_BLOCK_REF_KEY
);
460 * the action on the existing ref matches
461 * the action on the ref we're trying to add.
462 * Bump the ref_mod by one so the backref that
463 * is eventually added/removed has the correct
466 existing
->ref_mod
+= update
->ref_mod
;
471 * helper function to update the accounting in the head ref
472 * existing and update must have the same bytenr
475 update_existing_head_ref(struct btrfs_delayed_ref_node
*existing
,
476 struct btrfs_delayed_ref_node
*update
)
478 struct btrfs_delayed_ref_head
*existing_ref
;
479 struct btrfs_delayed_ref_head
*ref
;
481 existing_ref
= btrfs_delayed_node_to_head(existing
);
482 ref
= btrfs_delayed_node_to_head(update
);
483 BUG_ON(existing_ref
->is_data
!= ref
->is_data
);
485 if (ref
->must_insert_reserved
) {
486 /* if the extent was freed and then
487 * reallocated before the delayed ref
488 * entries were processed, we can end up
489 * with an existing head ref without
490 * the must_insert_reserved flag set.
493 existing_ref
->must_insert_reserved
= ref
->must_insert_reserved
;
496 * update the num_bytes so we make sure the accounting
499 existing
->num_bytes
= update
->num_bytes
;
503 if (ref
->extent_op
) {
504 if (!existing_ref
->extent_op
) {
505 existing_ref
->extent_op
= ref
->extent_op
;
507 if (ref
->extent_op
->update_key
) {
508 memcpy(&existing_ref
->extent_op
->key
,
509 &ref
->extent_op
->key
,
510 sizeof(ref
->extent_op
->key
));
511 existing_ref
->extent_op
->update_key
= 1;
513 if (ref
->extent_op
->update_flags
) {
514 existing_ref
->extent_op
->flags_to_set
|=
515 ref
->extent_op
->flags_to_set
;
516 existing_ref
->extent_op
->update_flags
= 1;
518 kfree(ref
->extent_op
);
522 * update the reference mod on the head to reflect this new operation
524 existing
->ref_mod
+= update
->ref_mod
;
528 * helper function to actually insert a head node into the rbtree.
529 * this does all the dirty work in terms of maintaining the correct
530 * overall modification count.
532 static noinline
int add_delayed_ref_head(struct btrfs_trans_handle
*trans
,
533 struct btrfs_delayed_ref_node
*ref
,
534 u64 bytenr
, u64 num_bytes
,
535 int action
, int is_data
)
537 struct btrfs_delayed_ref_node
*existing
;
538 struct btrfs_delayed_ref_head
*head_ref
= NULL
;
539 struct btrfs_delayed_ref_root
*delayed_refs
;
541 int must_insert_reserved
= 0;
544 * the head node stores the sum of all the mods, so dropping a ref
545 * should drop the sum in the head node by one.
547 if (action
== BTRFS_UPDATE_DELAYED_HEAD
)
549 else if (action
== BTRFS_DROP_DELAYED_REF
)
553 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
554 * the reserved accounting when the extent is finally added, or
555 * if a later modification deletes the delayed ref without ever
556 * inserting the extent into the extent allocation tree.
557 * ref->must_insert_reserved is the flag used to record
558 * that accounting mods are required.
560 * Once we record must_insert_reserved, switch the action to
561 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
563 if (action
== BTRFS_ADD_DELAYED_EXTENT
)
564 must_insert_reserved
= 1;
566 must_insert_reserved
= 0;
568 delayed_refs
= &trans
->transaction
->delayed_refs
;
570 /* first set the basic ref node struct up */
571 atomic_set(&ref
->refs
, 1);
572 ref
->bytenr
= bytenr
;
573 ref
->num_bytes
= num_bytes
;
574 ref
->ref_mod
= count_mod
;
580 head_ref
= btrfs_delayed_node_to_head(ref
);
581 head_ref
->must_insert_reserved
= must_insert_reserved
;
582 head_ref
->is_data
= is_data
;
584 INIT_LIST_HEAD(&head_ref
->cluster
);
585 mutex_init(&head_ref
->mutex
);
587 existing
= tree_insert(&delayed_refs
->root
, &ref
->rb_node
);
590 update_existing_head_ref(existing
, ref
);
592 * we've updated the existing ref, free the newly
597 delayed_refs
->num_heads
++;
598 delayed_refs
->num_heads_ready
++;
599 delayed_refs
->num_entries
++;
600 trans
->delayed_ref_updates
++;
606 * helper to insert a delayed tree ref into the rbtree.
608 static noinline
int add_delayed_tree_ref(struct btrfs_trans_handle
*trans
,
609 struct btrfs_delayed_ref_node
*ref
,
610 u64 bytenr
, u64 num_bytes
, u64 parent
,
611 u64 ref_root
, int level
, int action
)
613 struct btrfs_delayed_ref_node
*existing
;
614 struct btrfs_delayed_tree_ref
*full_ref
;
615 struct btrfs_delayed_ref_root
*delayed_refs
;
617 if (action
== BTRFS_ADD_DELAYED_EXTENT
)
618 action
= BTRFS_ADD_DELAYED_REF
;
620 delayed_refs
= &trans
->transaction
->delayed_refs
;
622 /* first set the basic ref node struct up */
623 atomic_set(&ref
->refs
, 1);
624 ref
->bytenr
= bytenr
;
625 ref
->num_bytes
= num_bytes
;
627 ref
->action
= action
;
631 full_ref
= btrfs_delayed_node_to_tree_ref(ref
);
633 full_ref
->parent
= parent
;
634 ref
->type
= BTRFS_SHARED_BLOCK_REF_KEY
;
636 full_ref
->root
= ref_root
;
637 ref
->type
= BTRFS_TREE_BLOCK_REF_KEY
;
639 full_ref
->level
= level
;
641 existing
= tree_insert(&delayed_refs
->root
, &ref
->rb_node
);
644 update_existing_ref(trans
, delayed_refs
, existing
, ref
);
646 * we've updated the existing ref, free the newly
651 delayed_refs
->num_entries
++;
652 trans
->delayed_ref_updates
++;
658 * helper to insert a delayed data ref into the rbtree.
660 static noinline
int add_delayed_data_ref(struct btrfs_trans_handle
*trans
,
661 struct btrfs_delayed_ref_node
*ref
,
662 u64 bytenr
, u64 num_bytes
, u64 parent
,
663 u64 ref_root
, u64 owner
, u64 offset
,
666 struct btrfs_delayed_ref_node
*existing
;
667 struct btrfs_delayed_data_ref
*full_ref
;
668 struct btrfs_delayed_ref_root
*delayed_refs
;
670 if (action
== BTRFS_ADD_DELAYED_EXTENT
)
671 action
= BTRFS_ADD_DELAYED_REF
;
673 delayed_refs
= &trans
->transaction
->delayed_refs
;
675 /* first set the basic ref node struct up */
676 atomic_set(&ref
->refs
, 1);
677 ref
->bytenr
= bytenr
;
678 ref
->num_bytes
= num_bytes
;
680 ref
->action
= action
;
684 full_ref
= btrfs_delayed_node_to_data_ref(ref
);
686 full_ref
->parent
= parent
;
687 ref
->type
= BTRFS_SHARED_DATA_REF_KEY
;
689 full_ref
->root
= ref_root
;
690 ref
->type
= BTRFS_EXTENT_DATA_REF_KEY
;
692 full_ref
->objectid
= owner
;
693 full_ref
->offset
= offset
;
695 existing
= tree_insert(&delayed_refs
->root
, &ref
->rb_node
);
698 update_existing_ref(trans
, delayed_refs
, existing
, ref
);
700 * we've updated the existing ref, free the newly
705 delayed_refs
->num_entries
++;
706 trans
->delayed_ref_updates
++;
712 * add a delayed tree ref. This does all of the accounting required
713 * to make sure the delayed ref is eventually processed before this
714 * transaction commits.
716 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle
*trans
,
717 u64 bytenr
, u64 num_bytes
, u64 parent
,
718 u64 ref_root
, int level
, int action
,
719 struct btrfs_delayed_extent_op
*extent_op
)
721 struct btrfs_delayed_tree_ref
*ref
;
722 struct btrfs_delayed_ref_head
*head_ref
;
723 struct btrfs_delayed_ref_root
*delayed_refs
;
726 BUG_ON(extent_op
&& extent_op
->is_data
);
727 ref
= kmalloc(sizeof(*ref
), GFP_NOFS
);
731 head_ref
= kmalloc(sizeof(*head_ref
), GFP_NOFS
);
737 head_ref
->extent_op
= extent_op
;
739 delayed_refs
= &trans
->transaction
->delayed_refs
;
740 spin_lock(&delayed_refs
->lock
);
743 * insert both the head node and the new ref without dropping
746 ret
= add_delayed_ref_head(trans
, &head_ref
->node
, bytenr
, num_bytes
,
750 ret
= add_delayed_tree_ref(trans
, &ref
->node
, bytenr
, num_bytes
,
751 parent
, ref_root
, level
, action
);
753 spin_unlock(&delayed_refs
->lock
);
758 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
760 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle
*trans
,
761 u64 bytenr
, u64 num_bytes
,
762 u64 parent
, u64 ref_root
,
763 u64 owner
, u64 offset
, int action
,
764 struct btrfs_delayed_extent_op
*extent_op
)
766 struct btrfs_delayed_data_ref
*ref
;
767 struct btrfs_delayed_ref_head
*head_ref
;
768 struct btrfs_delayed_ref_root
*delayed_refs
;
771 BUG_ON(extent_op
&& !extent_op
->is_data
);
772 ref
= kmalloc(sizeof(*ref
), GFP_NOFS
);
776 head_ref
= kmalloc(sizeof(*head_ref
), GFP_NOFS
);
782 head_ref
->extent_op
= extent_op
;
784 delayed_refs
= &trans
->transaction
->delayed_refs
;
785 spin_lock(&delayed_refs
->lock
);
788 * insert both the head node and the new ref without dropping
791 ret
= add_delayed_ref_head(trans
, &head_ref
->node
, bytenr
, num_bytes
,
795 ret
= add_delayed_data_ref(trans
, &ref
->node
, bytenr
, num_bytes
,
796 parent
, ref_root
, owner
, offset
, action
);
798 spin_unlock(&delayed_refs
->lock
);
802 int btrfs_add_delayed_extent_op(struct btrfs_trans_handle
*trans
,
803 u64 bytenr
, u64 num_bytes
,
804 struct btrfs_delayed_extent_op
*extent_op
)
806 struct btrfs_delayed_ref_head
*head_ref
;
807 struct btrfs_delayed_ref_root
*delayed_refs
;
810 head_ref
= kmalloc(sizeof(*head_ref
), GFP_NOFS
);
814 head_ref
->extent_op
= extent_op
;
816 delayed_refs
= &trans
->transaction
->delayed_refs
;
817 spin_lock(&delayed_refs
->lock
);
819 ret
= add_delayed_ref_head(trans
, &head_ref
->node
, bytenr
,
820 num_bytes
, BTRFS_UPDATE_DELAYED_HEAD
,
824 spin_unlock(&delayed_refs
->lock
);
829 * this does a simple search for the head node for a given extent.
830 * It must be called with the delayed ref spinlock held, and it returns
831 * the head node if any where found, or NULL if not.
833 struct btrfs_delayed_ref_head
*
834 btrfs_find_delayed_ref_head(struct btrfs_trans_handle
*trans
, u64 bytenr
)
836 struct btrfs_delayed_ref_node
*ref
;
837 struct btrfs_delayed_ref_root
*delayed_refs
;
839 delayed_refs
= &trans
->transaction
->delayed_refs
;
840 ref
= find_ref_head(&delayed_refs
->root
, bytenr
, NULL
);
842 return btrfs_delayed_node_to_head(ref
);
847 * add a delayed ref to the tree. This does all of the accounting required
848 * to make sure the delayed ref is eventually processed before this
849 * transaction commits.
851 * The main point of this call is to add and remove a backreference in a single
852 * shot, taking the lock only once, and only searching for the head node once.
854 * It is the same as doing a ref add and delete in two separate calls.
857 int btrfs_update_delayed_ref(struct btrfs_trans_handle
*trans
,
858 u64 bytenr
, u64 num_bytes
, u64 orig_parent
,
859 u64 parent
, u64 orig_ref_root
, u64 ref_root
,
860 u64 orig_ref_generation
, u64 ref_generation
,
861 u64 owner_objectid
, int pin
)
863 struct btrfs_delayed_ref
*ref
;
864 struct btrfs_delayed_ref
*old_ref
;
865 struct btrfs_delayed_ref_head
*head_ref
;
866 struct btrfs_delayed_ref_root
*delayed_refs
;
869 ref
= kmalloc(sizeof(*ref
), GFP_NOFS
);
873 old_ref
= kmalloc(sizeof(*old_ref
), GFP_NOFS
);
880 * the parent = 0 case comes from cases where we don't actually
881 * know the parent yet. It will get updated later via a add/drop
886 if (orig_parent
== 0)
887 orig_parent
= bytenr
;
889 head_ref
= kmalloc(sizeof(*head_ref
), GFP_NOFS
);
895 delayed_refs
= &trans
->transaction
->delayed_refs
;
896 spin_lock(&delayed_refs
->lock
);
899 * insert both the head node and the new ref without dropping
902 ret
= __btrfs_add_delayed_ref(trans
, &head_ref
->node
, bytenr
, num_bytes
,
904 BTRFS_UPDATE_DELAYED_HEAD
, 0);
907 ret
= __btrfs_add_delayed_ref(trans
, &ref
->node
, bytenr
, num_bytes
,
908 parent
, ref_root
, ref_generation
,
909 owner_objectid
, BTRFS_ADD_DELAYED_REF
, 0);
912 ret
= __btrfs_add_delayed_ref(trans
, &old_ref
->node
, bytenr
, num_bytes
,
913 orig_parent
, orig_ref_root
,
914 orig_ref_generation
, owner_objectid
,
915 BTRFS_DROP_DELAYED_REF
, pin
);
917 spin_unlock(&delayed_refs
->lock
);