2 * Copyright (C) 2009 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/sort.h>
22 #include "delayed-ref.h"
23 #include "transaction.h"
26 * delayed back reference update tracking. For subvolume trees
27 * we queue up extent allocations and backref maintenance for
28 * delayed processing. This avoids deep call chains where we
29 * add extents in the middle of btrfs_search_slot, and it allows
30 * us to buffer up frequently modified backrefs in an rb tree instead
31 * of hammering updates on the extent allocation tree.
35 * compare two delayed tree backrefs with same bytenr and type
37 static int comp_tree_refs(struct btrfs_delayed_tree_ref
*ref2
,
38 struct btrfs_delayed_tree_ref
*ref1
)
40 if (ref1
->node
.type
== BTRFS_TREE_BLOCK_REF_KEY
) {
41 if (ref1
->root
< ref2
->root
)
43 if (ref1
->root
> ref2
->root
)
46 if (ref1
->parent
< ref2
->parent
)
48 if (ref1
->parent
> ref2
->parent
)
55 * compare two delayed data backrefs with same bytenr and type
57 static int comp_data_refs(struct btrfs_delayed_data_ref
*ref2
,
58 struct btrfs_delayed_data_ref
*ref1
)
60 if (ref1
->node
.type
== BTRFS_EXTENT_DATA_REF_KEY
) {
61 if (ref1
->root
< ref2
->root
)
63 if (ref1
->root
> ref2
->root
)
65 if (ref1
->objectid
< ref2
->objectid
)
67 if (ref1
->objectid
> ref2
->objectid
)
69 if (ref1
->offset
< ref2
->offset
)
71 if (ref1
->offset
> ref2
->offset
)
74 if (ref1
->parent
< ref2
->parent
)
76 if (ref1
->parent
> ref2
->parent
)
83 * entries in the rb tree are ordered by the byte number of the extent,
84 * type of the delayed backrefs and content of delayed backrefs.
86 static int comp_entry(struct btrfs_delayed_ref_node
*ref2
,
87 struct btrfs_delayed_ref_node
*ref1
)
89 if (ref1
->bytenr
< ref2
->bytenr
)
91 if (ref1
->bytenr
> ref2
->bytenr
)
93 if (ref1
->is_head
&& ref2
->is_head
)
99 if (ref1
->type
< ref2
->type
)
101 if (ref1
->type
> ref2
->type
)
103 if (ref1
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
104 ref1
->type
== BTRFS_SHARED_BLOCK_REF_KEY
) {
105 return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2
),
106 btrfs_delayed_node_to_tree_ref(ref1
));
107 } else if (ref1
->type
== BTRFS_EXTENT_DATA_REF_KEY
||
108 ref1
->type
== BTRFS_SHARED_DATA_REF_KEY
) {
109 return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2
),
110 btrfs_delayed_node_to_data_ref(ref1
));
117 * insert a new ref into the rbtree. This returns any existing refs
118 * for the same (bytenr,parent) tuple, or NULL if the new node was properly
121 static struct btrfs_delayed_ref_node
*tree_insert(struct rb_root
*root
,
122 struct rb_node
*node
)
124 struct rb_node
**p
= &root
->rb_node
;
125 struct rb_node
*parent_node
= NULL
;
126 struct btrfs_delayed_ref_node
*entry
;
127 struct btrfs_delayed_ref_node
*ins
;
130 ins
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
133 entry
= rb_entry(parent_node
, struct btrfs_delayed_ref_node
,
136 cmp
= comp_entry(entry
, ins
);
145 rb_link_node(node
, parent_node
, p
);
146 rb_insert_color(node
, root
);
151 * find an head entry based on bytenr. This returns the delayed ref
152 * head if it was able to find one, or NULL if nothing was in that spot
154 static struct btrfs_delayed_ref_node
*find_ref_head(struct rb_root
*root
,
156 struct btrfs_delayed_ref_node
**last
)
158 struct rb_node
*n
= root
->rb_node
;
159 struct btrfs_delayed_ref_node
*entry
;
163 entry
= rb_entry(n
, struct btrfs_delayed_ref_node
, rb_node
);
164 WARN_ON(!entry
->in_tree
);
168 if (bytenr
< entry
->bytenr
)
170 else if (bytenr
> entry
->bytenr
)
172 else if (!btrfs_delayed_ref_is_head(entry
))
187 int btrfs_delayed_ref_lock(struct btrfs_trans_handle
*trans
,
188 struct btrfs_delayed_ref_head
*head
)
190 struct btrfs_delayed_ref_root
*delayed_refs
;
192 delayed_refs
= &trans
->transaction
->delayed_refs
;
193 assert_spin_locked(&delayed_refs
->lock
);
194 if (mutex_trylock(&head
->mutex
))
197 atomic_inc(&head
->node
.refs
);
198 spin_unlock(&delayed_refs
->lock
);
200 mutex_lock(&head
->mutex
);
201 spin_lock(&delayed_refs
->lock
);
202 if (!head
->node
.in_tree
) {
203 mutex_unlock(&head
->mutex
);
204 btrfs_put_delayed_ref(&head
->node
);
207 btrfs_put_delayed_ref(&head
->node
);
211 int btrfs_find_ref_cluster(struct btrfs_trans_handle
*trans
,
212 struct list_head
*cluster
, u64 start
)
215 struct btrfs_delayed_ref_root
*delayed_refs
;
216 struct rb_node
*node
;
217 struct btrfs_delayed_ref_node
*ref
;
218 struct btrfs_delayed_ref_head
*head
;
220 delayed_refs
= &trans
->transaction
->delayed_refs
;
222 node
= rb_first(&delayed_refs
->root
);
225 find_ref_head(&delayed_refs
->root
, start
, &ref
);
227 struct btrfs_delayed_ref_node
*tmp
;
229 node
= rb_prev(&ref
->rb_node
);
232 struct btrfs_delayed_ref_node
,
234 if (tmp
->bytenr
< start
)
237 node
= rb_prev(&ref
->rb_node
);
239 node
= &ref
->rb_node
;
241 node
= rb_first(&delayed_refs
->root
);
244 while (node
&& count
< 32) {
245 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
246 if (btrfs_delayed_ref_is_head(ref
)) {
247 head
= btrfs_delayed_node_to_head(ref
);
248 if (list_empty(&head
->cluster
)) {
249 list_add_tail(&head
->cluster
, cluster
);
250 delayed_refs
->run_delayed_start
=
254 WARN_ON(delayed_refs
->num_heads_ready
== 0);
255 delayed_refs
->num_heads_ready
--;
257 /* the goal of the clustering is to find extents
258 * that are likely to end up in the same extent
259 * leaf on disk. So, we don't want them spread
260 * all over the tree. Stop now if we've hit
261 * a head that was already in use
266 node
= rb_next(node
);
272 * we've gone to the end of the rbtree without finding any
273 * clusters. start from the beginning and try again
276 node
= rb_first(&delayed_refs
->root
);
283 * This checks to see if there are any delayed refs in the
284 * btree for a given bytenr. It returns one if it finds any
285 * and zero otherwise.
287 * If it only finds a head node, it returns 0.
289 * The idea is to use this when deciding if you can safely delete an
290 * extent from the extent allocation tree. There may be a pending
291 * ref in the rbtree that adds or removes references, so as long as this
292 * returns one you need to leave the BTRFS_EXTENT_ITEM in the extent
295 int btrfs_delayed_ref_pending(struct btrfs_trans_handle
*trans
, u64 bytenr
)
297 struct btrfs_delayed_ref_node
*ref
;
298 struct btrfs_delayed_ref_root
*delayed_refs
;
299 struct rb_node
*prev_node
;
302 delayed_refs
= &trans
->transaction
->delayed_refs
;
303 spin_lock(&delayed_refs
->lock
);
305 ref
= find_ref_head(&delayed_refs
->root
, bytenr
, NULL
);
307 prev_node
= rb_prev(&ref
->rb_node
);
310 ref
= rb_entry(prev_node
, struct btrfs_delayed_ref_node
,
312 if (ref
->bytenr
== bytenr
)
316 spin_unlock(&delayed_refs
->lock
);
321 * helper function to lookup reference count and flags of extent.
323 * the head node for delayed ref is used to store the sum of all the
324 * reference count modifications queued up in the rbtree. the head
325 * node may also store the extent flags to set. This way you can check
326 * to see what the reference count and extent flags would be if all of
327 * the delayed refs are not processed.
329 int btrfs_lookup_extent_info(struct btrfs_trans_handle
*trans
,
330 struct btrfs_root
*root
, u64 bytenr
,
331 u64 num_bytes
, u64
*refs
, u64
*flags
)
333 struct btrfs_delayed_ref_node
*ref
;
334 struct btrfs_delayed_ref_head
*head
;
335 struct btrfs_delayed_ref_root
*delayed_refs
;
336 struct btrfs_path
*path
;
337 struct btrfs_extent_item
*ei
;
338 struct extent_buffer
*leaf
;
339 struct btrfs_key key
;
345 path
= btrfs_alloc_path();
349 key
.objectid
= bytenr
;
350 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
351 key
.offset
= num_bytes
;
352 delayed_refs
= &trans
->transaction
->delayed_refs
;
354 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
,
360 leaf
= path
->nodes
[0];
361 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
362 if (item_size
>= sizeof(*ei
)) {
363 ei
= btrfs_item_ptr(leaf
, path
->slots
[0],
364 struct btrfs_extent_item
);
365 num_refs
= btrfs_extent_refs(leaf
, ei
);
366 extent_flags
= btrfs_extent_flags(leaf
, ei
);
368 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
369 struct btrfs_extent_item_v0
*ei0
;
370 BUG_ON(item_size
!= sizeof(*ei0
));
371 ei0
= btrfs_item_ptr(leaf
, path
->slots
[0],
372 struct btrfs_extent_item_v0
);
373 num_refs
= btrfs_extent_refs_v0(leaf
, ei0
);
374 /* FIXME: this isn't correct for data */
375 extent_flags
= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
380 BUG_ON(num_refs
== 0);
387 spin_lock(&delayed_refs
->lock
);
388 ref
= find_ref_head(&delayed_refs
->root
, bytenr
, NULL
);
390 head
= btrfs_delayed_node_to_head(ref
);
391 if (!mutex_trylock(&head
->mutex
)) {
392 atomic_inc(&ref
->refs
);
393 spin_unlock(&delayed_refs
->lock
);
395 btrfs_release_path(root
->fs_info
->extent_root
, path
);
397 mutex_lock(&head
->mutex
);
398 mutex_unlock(&head
->mutex
);
399 btrfs_put_delayed_ref(ref
);
402 if (head
->extent_op
&& head
->extent_op
->update_flags
)
403 extent_flags
|= head
->extent_op
->flags_to_set
;
405 BUG_ON(num_refs
== 0);
407 num_refs
+= ref
->ref_mod
;
408 mutex_unlock(&head
->mutex
);
410 WARN_ON(num_refs
== 0);
414 *flags
= extent_flags
;
416 spin_unlock(&delayed_refs
->lock
);
417 btrfs_free_path(path
);
422 * helper function to update an extent delayed ref in the
423 * rbtree. existing and update must both have the same
426 * This may free existing if the update cancels out whatever
427 * operation it was doing.
430 update_existing_ref(struct btrfs_trans_handle
*trans
,
431 struct btrfs_delayed_ref_root
*delayed_refs
,
432 struct btrfs_delayed_ref_node
*existing
,
433 struct btrfs_delayed_ref_node
*update
)
435 if (update
->action
!= existing
->action
) {
437 * this is effectively undoing either an add or a
438 * drop. We decrement the ref_mod, and if it goes
439 * down to zero we just delete the entry without
440 * every changing the extent allocation tree.
443 if (existing
->ref_mod
== 0) {
444 rb_erase(&existing
->rb_node
,
445 &delayed_refs
->root
);
446 existing
->in_tree
= 0;
447 btrfs_put_delayed_ref(existing
);
448 delayed_refs
->num_entries
--;
449 if (trans
->delayed_ref_updates
)
450 trans
->delayed_ref_updates
--;
452 WARN_ON(existing
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
453 existing
->type
== BTRFS_SHARED_BLOCK_REF_KEY
);
456 WARN_ON(existing
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
457 existing
->type
== BTRFS_SHARED_BLOCK_REF_KEY
);
459 * the action on the existing ref matches
460 * the action on the ref we're trying to add.
461 * Bump the ref_mod by one so the backref that
462 * is eventually added/removed has the correct
465 existing
->ref_mod
+= update
->ref_mod
;
470 * helper function to update the accounting in the head ref
471 * existing and update must have the same bytenr
474 update_existing_head_ref(struct btrfs_delayed_ref_node
*existing
,
475 struct btrfs_delayed_ref_node
*update
)
477 struct btrfs_delayed_ref_head
*existing_ref
;
478 struct btrfs_delayed_ref_head
*ref
;
480 existing_ref
= btrfs_delayed_node_to_head(existing
);
481 ref
= btrfs_delayed_node_to_head(update
);
482 BUG_ON(existing_ref
->is_data
!= ref
->is_data
);
484 if (ref
->must_insert_reserved
) {
485 /* if the extent was freed and then
486 * reallocated before the delayed ref
487 * entries were processed, we can end up
488 * with an existing head ref without
489 * the must_insert_reserved flag set.
492 existing_ref
->must_insert_reserved
= ref
->must_insert_reserved
;
495 * update the num_bytes so we make sure the accounting
498 existing
->num_bytes
= update
->num_bytes
;
502 if (ref
->extent_op
) {
503 if (!existing_ref
->extent_op
) {
504 existing_ref
->extent_op
= ref
->extent_op
;
506 if (ref
->extent_op
->update_key
) {
507 memcpy(&existing_ref
->extent_op
->key
,
508 &ref
->extent_op
->key
,
509 sizeof(ref
->extent_op
->key
));
510 existing_ref
->extent_op
->update_key
= 1;
512 if (ref
->extent_op
->update_flags
) {
513 existing_ref
->extent_op
->flags_to_set
|=
514 ref
->extent_op
->flags_to_set
;
515 existing_ref
->extent_op
->update_flags
= 1;
517 kfree(ref
->extent_op
);
521 * update the reference mod on the head to reflect this new operation
523 existing
->ref_mod
+= update
->ref_mod
;
527 * helper function to actually insert a head node into the rbtree.
528 * this does all the dirty work in terms of maintaining the correct
529 * overall modification count.
531 static noinline
int add_delayed_ref_head(struct btrfs_trans_handle
*trans
,
532 struct btrfs_delayed_ref_node
*ref
,
533 u64 bytenr
, u64 num_bytes
,
534 int action
, int is_data
)
536 struct btrfs_delayed_ref_node
*existing
;
537 struct btrfs_delayed_ref_head
*head_ref
= NULL
;
538 struct btrfs_delayed_ref_root
*delayed_refs
;
540 int must_insert_reserved
= 0;
543 * the head node stores the sum of all the mods, so dropping a ref
544 * should drop the sum in the head node by one.
546 if (action
== BTRFS_UPDATE_DELAYED_HEAD
)
548 else if (action
== BTRFS_DROP_DELAYED_REF
)
552 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
553 * the reserved accounting when the extent is finally added, or
554 * if a later modification deletes the delayed ref without ever
555 * inserting the extent into the extent allocation tree.
556 * ref->must_insert_reserved is the flag used to record
557 * that accounting mods are required.
559 * Once we record must_insert_reserved, switch the action to
560 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
562 if (action
== BTRFS_ADD_DELAYED_EXTENT
)
563 must_insert_reserved
= 1;
565 must_insert_reserved
= 0;
567 delayed_refs
= &trans
->transaction
->delayed_refs
;
569 /* first set the basic ref node struct up */
570 atomic_set(&ref
->refs
, 1);
571 ref
->bytenr
= bytenr
;
572 ref
->num_bytes
= num_bytes
;
573 ref
->ref_mod
= count_mod
;
579 head_ref
= btrfs_delayed_node_to_head(ref
);
580 head_ref
->must_insert_reserved
= must_insert_reserved
;
581 head_ref
->is_data
= is_data
;
583 INIT_LIST_HEAD(&head_ref
->cluster
);
584 mutex_init(&head_ref
->mutex
);
586 existing
= tree_insert(&delayed_refs
->root
, &ref
->rb_node
);
589 update_existing_head_ref(existing
, ref
);
591 * we've updated the existing ref, free the newly
596 delayed_refs
->num_heads
++;
597 delayed_refs
->num_heads_ready
++;
598 delayed_refs
->num_entries
++;
599 trans
->delayed_ref_updates
++;
605 * helper to insert a delayed tree ref into the rbtree.
607 static noinline
int add_delayed_tree_ref(struct btrfs_trans_handle
*trans
,
608 struct btrfs_delayed_ref_node
*ref
,
609 u64 bytenr
, u64 num_bytes
, u64 parent
,
610 u64 ref_root
, int level
, int action
)
612 struct btrfs_delayed_ref_node
*existing
;
613 struct btrfs_delayed_tree_ref
*full_ref
;
614 struct btrfs_delayed_ref_root
*delayed_refs
;
616 if (action
== BTRFS_ADD_DELAYED_EXTENT
)
617 action
= BTRFS_ADD_DELAYED_REF
;
619 delayed_refs
= &trans
->transaction
->delayed_refs
;
621 /* first set the basic ref node struct up */
622 atomic_set(&ref
->refs
, 1);
623 ref
->bytenr
= bytenr
;
624 ref
->num_bytes
= num_bytes
;
626 ref
->action
= action
;
630 full_ref
= btrfs_delayed_node_to_tree_ref(ref
);
632 full_ref
->parent
= parent
;
633 ref
->type
= BTRFS_SHARED_BLOCK_REF_KEY
;
635 full_ref
->root
= ref_root
;
636 ref
->type
= BTRFS_TREE_BLOCK_REF_KEY
;
638 full_ref
->level
= level
;
640 existing
= tree_insert(&delayed_refs
->root
, &ref
->rb_node
);
643 update_existing_ref(trans
, delayed_refs
, existing
, ref
);
645 * we've updated the existing ref, free the newly
650 delayed_refs
->num_entries
++;
651 trans
->delayed_ref_updates
++;
657 * helper to insert a delayed data ref into the rbtree.
659 static noinline
int add_delayed_data_ref(struct btrfs_trans_handle
*trans
,
660 struct btrfs_delayed_ref_node
*ref
,
661 u64 bytenr
, u64 num_bytes
, u64 parent
,
662 u64 ref_root
, u64 owner
, u64 offset
,
665 struct btrfs_delayed_ref_node
*existing
;
666 struct btrfs_delayed_data_ref
*full_ref
;
667 struct btrfs_delayed_ref_root
*delayed_refs
;
669 if (action
== BTRFS_ADD_DELAYED_EXTENT
)
670 action
= BTRFS_ADD_DELAYED_REF
;
672 delayed_refs
= &trans
->transaction
->delayed_refs
;
674 /* first set the basic ref node struct up */
675 atomic_set(&ref
->refs
, 1);
676 ref
->bytenr
= bytenr
;
677 ref
->num_bytes
= num_bytes
;
679 ref
->action
= action
;
683 full_ref
= btrfs_delayed_node_to_data_ref(ref
);
685 full_ref
->parent
= parent
;
686 ref
->type
= BTRFS_SHARED_DATA_REF_KEY
;
688 full_ref
->root
= ref_root
;
689 ref
->type
= BTRFS_EXTENT_DATA_REF_KEY
;
691 full_ref
->objectid
= owner
;
692 full_ref
->offset
= offset
;
694 existing
= tree_insert(&delayed_refs
->root
, &ref
->rb_node
);
697 update_existing_ref(trans
, delayed_refs
, existing
, ref
);
699 * we've updated the existing ref, free the newly
704 delayed_refs
->num_entries
++;
705 trans
->delayed_ref_updates
++;
711 * add a delayed tree ref. This does all of the accounting required
712 * to make sure the delayed ref is eventually processed before this
713 * transaction commits.
715 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle
*trans
,
716 u64 bytenr
, u64 num_bytes
, u64 parent
,
717 u64 ref_root
, int level
, int action
,
718 struct btrfs_delayed_extent_op
*extent_op
)
720 struct btrfs_delayed_tree_ref
*ref
;
721 struct btrfs_delayed_ref_head
*head_ref
;
722 struct btrfs_delayed_ref_root
*delayed_refs
;
725 BUG_ON(extent_op
&& extent_op
->is_data
);
726 ref
= kmalloc(sizeof(*ref
), GFP_NOFS
);
730 head_ref
= kmalloc(sizeof(*head_ref
), GFP_NOFS
);
736 head_ref
->extent_op
= extent_op
;
738 delayed_refs
= &trans
->transaction
->delayed_refs
;
739 spin_lock(&delayed_refs
->lock
);
742 * insert both the head node and the new ref without dropping
745 ret
= add_delayed_ref_head(trans
, &head_ref
->node
, bytenr
, num_bytes
,
749 ret
= add_delayed_tree_ref(trans
, &ref
->node
, bytenr
, num_bytes
,
750 parent
, ref_root
, level
, action
);
752 spin_unlock(&delayed_refs
->lock
);
757 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
759 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle
*trans
,
760 u64 bytenr
, u64 num_bytes
,
761 u64 parent
, u64 ref_root
,
762 u64 owner
, u64 offset
, int action
,
763 struct btrfs_delayed_extent_op
*extent_op
)
765 struct btrfs_delayed_data_ref
*ref
;
766 struct btrfs_delayed_ref_head
*head_ref
;
767 struct btrfs_delayed_ref_root
*delayed_refs
;
770 BUG_ON(extent_op
&& !extent_op
->is_data
);
771 ref
= kmalloc(sizeof(*ref
), GFP_NOFS
);
775 head_ref
= kmalloc(sizeof(*head_ref
), GFP_NOFS
);
781 head_ref
->extent_op
= extent_op
;
783 delayed_refs
= &trans
->transaction
->delayed_refs
;
784 spin_lock(&delayed_refs
->lock
);
787 * insert both the head node and the new ref without dropping
790 ret
= add_delayed_ref_head(trans
, &head_ref
->node
, bytenr
, num_bytes
,
794 ret
= add_delayed_data_ref(trans
, &ref
->node
, bytenr
, num_bytes
,
795 parent
, ref_root
, owner
, offset
, action
);
797 spin_unlock(&delayed_refs
->lock
);
801 int btrfs_add_delayed_extent_op(struct btrfs_trans_handle
*trans
,
802 u64 bytenr
, u64 num_bytes
,
803 struct btrfs_delayed_extent_op
*extent_op
)
805 struct btrfs_delayed_ref_head
*head_ref
;
806 struct btrfs_delayed_ref_root
*delayed_refs
;
809 head_ref
= kmalloc(sizeof(*head_ref
), GFP_NOFS
);
813 head_ref
->extent_op
= extent_op
;
815 delayed_refs
= &trans
->transaction
->delayed_refs
;
816 spin_lock(&delayed_refs
->lock
);
818 ret
= add_delayed_ref_head(trans
, &head_ref
->node
, bytenr
,
819 num_bytes
, BTRFS_UPDATE_DELAYED_HEAD
,
823 spin_unlock(&delayed_refs
->lock
);
828 * this does a simple search for the head node for a given extent.
829 * It must be called with the delayed ref spinlock held, and it returns
830 * the head node if any where found, or NULL if not.
832 struct btrfs_delayed_ref_head
*
833 btrfs_find_delayed_ref_head(struct btrfs_trans_handle
*trans
, u64 bytenr
)
835 struct btrfs_delayed_ref_node
*ref
;
836 struct btrfs_delayed_ref_root
*delayed_refs
;
838 delayed_refs
= &trans
->transaction
->delayed_refs
;
839 ref
= find_ref_head(&delayed_refs
->root
, bytenr
, NULL
);
841 return btrfs_delayed_node_to_head(ref
);
846 * add a delayed ref to the tree. This does all of the accounting required
847 * to make sure the delayed ref is eventually processed before this
848 * transaction commits.
850 * The main point of this call is to add and remove a backreference in a single
851 * shot, taking the lock only once, and only searching for the head node once.
853 * It is the same as doing a ref add and delete in two separate calls.
856 int btrfs_update_delayed_ref(struct btrfs_trans_handle
*trans
,
857 u64 bytenr
, u64 num_bytes
, u64 orig_parent
,
858 u64 parent
, u64 orig_ref_root
, u64 ref_root
,
859 u64 orig_ref_generation
, u64 ref_generation
,
860 u64 owner_objectid
, int pin
)
862 struct btrfs_delayed_ref
*ref
;
863 struct btrfs_delayed_ref
*old_ref
;
864 struct btrfs_delayed_ref_head
*head_ref
;
865 struct btrfs_delayed_ref_root
*delayed_refs
;
868 ref
= kmalloc(sizeof(*ref
), GFP_NOFS
);
872 old_ref
= kmalloc(sizeof(*old_ref
), GFP_NOFS
);
879 * the parent = 0 case comes from cases where we don't actually
880 * know the parent yet. It will get updated later via a add/drop
885 if (orig_parent
== 0)
886 orig_parent
= bytenr
;
888 head_ref
= kmalloc(sizeof(*head_ref
), GFP_NOFS
);
894 delayed_refs
= &trans
->transaction
->delayed_refs
;
895 spin_lock(&delayed_refs
->lock
);
898 * insert both the head node and the new ref without dropping
901 ret
= __btrfs_add_delayed_ref(trans
, &head_ref
->node
, bytenr
, num_bytes
,
903 BTRFS_UPDATE_DELAYED_HEAD
, 0);
906 ret
= __btrfs_add_delayed_ref(trans
, &ref
->node
, bytenr
, num_bytes
,
907 parent
, ref_root
, ref_generation
,
908 owner_objectid
, BTRFS_ADD_DELAYED_REF
, 0);
911 ret
= __btrfs_add_delayed_ref(trans
, &old_ref
->node
, bytenr
, num_bytes
,
912 orig_parent
, orig_ref_root
,
913 orig_ref_generation
, owner_objectid
,
914 BTRFS_DROP_DELAYED_REF
, pin
);
916 spin_unlock(&delayed_refs
->lock
);