2 * Copyright (C) 2009 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
23 #include "delayed-ref.h"
24 #include "transaction.h"
26 struct kmem_cache
*btrfs_delayed_ref_head_cachep
;
27 struct kmem_cache
*btrfs_delayed_tree_ref_cachep
;
28 struct kmem_cache
*btrfs_delayed_data_ref_cachep
;
29 struct kmem_cache
*btrfs_delayed_extent_op_cachep
;
31 * delayed back reference update tracking. For subvolume trees
32 * we queue up extent allocations and backref maintenance for
33 * delayed processing. This avoids deep call chains where we
34 * add extents in the middle of btrfs_search_slot, and it allows
35 * us to buffer up frequently modified backrefs in an rb tree instead
36 * of hammering updates on the extent allocation tree.
40 * compare two delayed tree backrefs with same bytenr and type
42 static int comp_tree_refs(struct btrfs_delayed_tree_ref
*ref2
,
43 struct btrfs_delayed_tree_ref
*ref1
, int type
)
45 if (type
== BTRFS_TREE_BLOCK_REF_KEY
) {
46 if (ref1
->root
< ref2
->root
)
48 if (ref1
->root
> ref2
->root
)
51 if (ref1
->parent
< ref2
->parent
)
53 if (ref1
->parent
> ref2
->parent
)
60 * compare two delayed data backrefs with same bytenr and type
62 static int comp_data_refs(struct btrfs_delayed_data_ref
*ref2
,
63 struct btrfs_delayed_data_ref
*ref1
)
65 if (ref1
->node
.type
== BTRFS_EXTENT_DATA_REF_KEY
) {
66 if (ref1
->root
< ref2
->root
)
68 if (ref1
->root
> ref2
->root
)
70 if (ref1
->objectid
< ref2
->objectid
)
72 if (ref1
->objectid
> ref2
->objectid
)
74 if (ref1
->offset
< ref2
->offset
)
76 if (ref1
->offset
> ref2
->offset
)
79 if (ref1
->parent
< ref2
->parent
)
81 if (ref1
->parent
> ref2
->parent
)
88 * entries in the rb tree are ordered by the byte number of the extent,
89 * type of the delayed backrefs and content of delayed backrefs.
91 static int comp_entry(struct btrfs_delayed_ref_node
*ref2
,
92 struct btrfs_delayed_ref_node
*ref1
,
95 if (ref1
->bytenr
< ref2
->bytenr
)
97 if (ref1
->bytenr
> ref2
->bytenr
)
99 if (ref1
->is_head
&& ref2
->is_head
)
105 if (ref1
->type
< ref2
->type
)
107 if (ref1
->type
> ref2
->type
)
109 /* merging of sequenced refs is not allowed */
111 if (ref1
->seq
< ref2
->seq
)
113 if (ref1
->seq
> ref2
->seq
)
116 if (ref1
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
117 ref1
->type
== BTRFS_SHARED_BLOCK_REF_KEY
) {
118 return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2
),
119 btrfs_delayed_node_to_tree_ref(ref1
),
121 } else if (ref1
->type
== BTRFS_EXTENT_DATA_REF_KEY
||
122 ref1
->type
== BTRFS_SHARED_DATA_REF_KEY
) {
123 return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2
),
124 btrfs_delayed_node_to_data_ref(ref1
));
131 * insert a new ref into the rbtree. This returns any existing refs
132 * for the same (bytenr,parent) tuple, or NULL if the new node was properly
135 static struct btrfs_delayed_ref_node
*tree_insert(struct rb_root
*root
,
136 struct rb_node
*node
)
138 struct rb_node
**p
= &root
->rb_node
;
139 struct rb_node
*parent_node
= NULL
;
140 struct btrfs_delayed_ref_node
*entry
;
141 struct btrfs_delayed_ref_node
*ins
;
144 ins
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
147 entry
= rb_entry(parent_node
, struct btrfs_delayed_ref_node
,
150 cmp
= comp_entry(entry
, ins
, 1);
159 rb_link_node(node
, parent_node
, p
);
160 rb_insert_color(node
, root
);
164 /* insert a new ref to head ref rbtree */
165 static struct btrfs_delayed_ref_head
*htree_insert(struct rb_root
*root
,
166 struct rb_node
*node
)
168 struct rb_node
**p
= &root
->rb_node
;
169 struct rb_node
*parent_node
= NULL
;
170 struct btrfs_delayed_ref_head
*entry
;
171 struct btrfs_delayed_ref_head
*ins
;
174 ins
= rb_entry(node
, struct btrfs_delayed_ref_head
, href_node
);
175 bytenr
= ins
->node
.bytenr
;
178 entry
= rb_entry(parent_node
, struct btrfs_delayed_ref_head
,
181 if (bytenr
< entry
->node
.bytenr
)
183 else if (bytenr
> entry
->node
.bytenr
)
189 rb_link_node(node
, parent_node
, p
);
190 rb_insert_color(node
, root
);
195 * find an head entry based on bytenr. This returns the delayed ref
196 * head if it was able to find one, or NULL if nothing was in that spot.
197 * If return_bigger is given, the next bigger entry is returned if no exact
200 static struct btrfs_delayed_ref_head
*
201 find_ref_head(struct rb_root
*root
, u64 bytenr
,
205 struct btrfs_delayed_ref_head
*entry
;
210 entry
= rb_entry(n
, struct btrfs_delayed_ref_head
, href_node
);
212 if (bytenr
< entry
->node
.bytenr
)
214 else if (bytenr
> entry
->node
.bytenr
)
219 if (entry
&& return_bigger
) {
220 if (bytenr
> entry
->node
.bytenr
) {
221 n
= rb_next(&entry
->href_node
);
224 entry
= rb_entry(n
, struct btrfs_delayed_ref_head
,
233 int btrfs_delayed_ref_lock(struct btrfs_trans_handle
*trans
,
234 struct btrfs_delayed_ref_head
*head
)
236 struct btrfs_delayed_ref_root
*delayed_refs
;
238 delayed_refs
= &trans
->transaction
->delayed_refs
;
239 assert_spin_locked(&delayed_refs
->lock
);
240 if (mutex_trylock(&head
->mutex
))
243 atomic_inc(&head
->node
.refs
);
244 spin_unlock(&delayed_refs
->lock
);
246 mutex_lock(&head
->mutex
);
247 spin_lock(&delayed_refs
->lock
);
248 if (!head
->node
.in_tree
) {
249 mutex_unlock(&head
->mutex
);
250 btrfs_put_delayed_ref(&head
->node
);
253 btrfs_put_delayed_ref(&head
->node
);
257 static inline void drop_delayed_ref(struct btrfs_trans_handle
*trans
,
258 struct btrfs_delayed_ref_root
*delayed_refs
,
259 struct btrfs_delayed_ref_head
*head
,
260 struct btrfs_delayed_ref_node
*ref
)
262 if (btrfs_delayed_ref_is_head(ref
)) {
263 head
= btrfs_delayed_node_to_head(ref
);
264 rb_erase(&head
->href_node
, &delayed_refs
->href_root
);
266 assert_spin_locked(&head
->lock
);
267 rb_erase(&ref
->rb_node
, &head
->ref_root
);
270 btrfs_put_delayed_ref(ref
);
271 atomic_dec(&delayed_refs
->num_entries
);
272 if (trans
->delayed_ref_updates
)
273 trans
->delayed_ref_updates
--;
276 static int merge_ref(struct btrfs_trans_handle
*trans
,
277 struct btrfs_delayed_ref_root
*delayed_refs
,
278 struct btrfs_delayed_ref_head
*head
,
279 struct btrfs_delayed_ref_node
*ref
, u64 seq
)
281 struct rb_node
*node
;
285 node
= rb_next(&ref
->rb_node
);
286 while (!done
&& node
) {
287 struct btrfs_delayed_ref_node
*next
;
289 next
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
290 node
= rb_next(node
);
291 if (seq
&& next
->seq
>= seq
)
293 if (comp_entry(ref
, next
, 0))
296 if (ref
->action
== next
->action
) {
299 if (ref
->ref_mod
< next
->ref_mod
) {
300 struct btrfs_delayed_ref_node
*tmp
;
307 mod
= -next
->ref_mod
;
310 drop_delayed_ref(trans
, delayed_refs
, head
, next
);
312 if (ref
->ref_mod
== 0) {
313 drop_delayed_ref(trans
, delayed_refs
, head
, ref
);
317 * You can't have multiples of the same ref on a tree
320 WARN_ON(ref
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
321 ref
->type
== BTRFS_SHARED_BLOCK_REF_KEY
);
327 void btrfs_merge_delayed_refs(struct btrfs_trans_handle
*trans
,
328 struct btrfs_fs_info
*fs_info
,
329 struct btrfs_delayed_ref_root
*delayed_refs
,
330 struct btrfs_delayed_ref_head
*head
)
332 struct rb_node
*node
;
335 assert_spin_locked(&head
->lock
);
337 * We don't have too much refs to merge in the case of delayed data
343 spin_lock(&fs_info
->tree_mod_seq_lock
);
344 if (!list_empty(&fs_info
->tree_mod_seq_list
)) {
345 struct seq_list
*elem
;
347 elem
= list_first_entry(&fs_info
->tree_mod_seq_list
,
348 struct seq_list
, list
);
351 spin_unlock(&fs_info
->tree_mod_seq_lock
);
353 node
= rb_first(&head
->ref_root
);
355 struct btrfs_delayed_ref_node
*ref
;
357 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
,
359 /* We can't merge refs that are outside of our seq count */
360 if (seq
&& ref
->seq
>= seq
)
362 if (merge_ref(trans
, delayed_refs
, head
, ref
, seq
))
363 node
= rb_first(&head
->ref_root
);
365 node
= rb_next(&ref
->rb_node
);
369 int btrfs_check_delayed_seq(struct btrfs_fs_info
*fs_info
,
370 struct btrfs_delayed_ref_root
*delayed_refs
,
373 struct seq_list
*elem
;
376 spin_lock(&fs_info
->tree_mod_seq_lock
);
377 if (!list_empty(&fs_info
->tree_mod_seq_list
)) {
378 elem
= list_first_entry(&fs_info
->tree_mod_seq_list
,
379 struct seq_list
, list
);
380 if (seq
>= elem
->seq
) {
381 pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
382 (u32
)(seq
>> 32), (u32
)seq
,
383 (u32
)(elem
->seq
>> 32), (u32
)elem
->seq
,
389 spin_unlock(&fs_info
->tree_mod_seq_lock
);
393 struct btrfs_delayed_ref_head
*
394 btrfs_select_ref_head(struct btrfs_trans_handle
*trans
)
396 struct btrfs_delayed_ref_root
*delayed_refs
;
397 struct btrfs_delayed_ref_head
*head
;
401 delayed_refs
= &trans
->transaction
->delayed_refs
;
404 start
= delayed_refs
->run_delayed_start
;
405 head
= find_ref_head(&delayed_refs
->href_root
, start
, 1);
406 if (!head
&& !loop
) {
407 delayed_refs
->run_delayed_start
= 0;
410 head
= find_ref_head(&delayed_refs
->href_root
, start
, 1);
413 } else if (!head
&& loop
) {
417 while (head
->processing
) {
418 struct rb_node
*node
;
420 node
= rb_next(&head
->href_node
);
424 delayed_refs
->run_delayed_start
= 0;
429 head
= rb_entry(node
, struct btrfs_delayed_ref_head
,
433 head
->processing
= 1;
434 WARN_ON(delayed_refs
->num_heads_ready
== 0);
435 delayed_refs
->num_heads_ready
--;
436 delayed_refs
->run_delayed_start
= head
->node
.bytenr
+
437 head
->node
.num_bytes
;
442 * helper function to update an extent delayed ref in the
443 * rbtree. existing and update must both have the same
446 * This may free existing if the update cancels out whatever
447 * operation it was doing.
450 update_existing_ref(struct btrfs_trans_handle
*trans
,
451 struct btrfs_delayed_ref_root
*delayed_refs
,
452 struct btrfs_delayed_ref_head
*head
,
453 struct btrfs_delayed_ref_node
*existing
,
454 struct btrfs_delayed_ref_node
*update
)
456 if (update
->action
!= existing
->action
) {
458 * this is effectively undoing either an add or a
459 * drop. We decrement the ref_mod, and if it goes
460 * down to zero we just delete the entry without
461 * every changing the extent allocation tree.
464 if (existing
->ref_mod
== 0)
465 drop_delayed_ref(trans
, delayed_refs
, head
, existing
);
467 WARN_ON(existing
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
468 existing
->type
== BTRFS_SHARED_BLOCK_REF_KEY
);
470 WARN_ON(existing
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
471 existing
->type
== BTRFS_SHARED_BLOCK_REF_KEY
);
473 * the action on the existing ref matches
474 * the action on the ref we're trying to add.
475 * Bump the ref_mod by one so the backref that
476 * is eventually added/removed has the correct
479 existing
->ref_mod
+= update
->ref_mod
;
484 * helper function to update the accounting in the head ref
485 * existing and update must have the same bytenr
488 update_existing_head_ref(struct btrfs_delayed_ref_node
*existing
,
489 struct btrfs_delayed_ref_node
*update
)
491 struct btrfs_delayed_ref_head
*existing_ref
;
492 struct btrfs_delayed_ref_head
*ref
;
494 existing_ref
= btrfs_delayed_node_to_head(existing
);
495 ref
= btrfs_delayed_node_to_head(update
);
496 BUG_ON(existing_ref
->is_data
!= ref
->is_data
);
498 spin_lock(&existing_ref
->lock
);
499 if (ref
->must_insert_reserved
) {
500 /* if the extent was freed and then
501 * reallocated before the delayed ref
502 * entries were processed, we can end up
503 * with an existing head ref without
504 * the must_insert_reserved flag set.
507 existing_ref
->must_insert_reserved
= ref
->must_insert_reserved
;
510 * update the num_bytes so we make sure the accounting
513 existing
->num_bytes
= update
->num_bytes
;
517 if (ref
->extent_op
) {
518 if (!existing_ref
->extent_op
) {
519 existing_ref
->extent_op
= ref
->extent_op
;
521 if (ref
->extent_op
->update_key
) {
522 memcpy(&existing_ref
->extent_op
->key
,
523 &ref
->extent_op
->key
,
524 sizeof(ref
->extent_op
->key
));
525 existing_ref
->extent_op
->update_key
= 1;
527 if (ref
->extent_op
->update_flags
) {
528 existing_ref
->extent_op
->flags_to_set
|=
529 ref
->extent_op
->flags_to_set
;
530 existing_ref
->extent_op
->update_flags
= 1;
532 btrfs_free_delayed_extent_op(ref
->extent_op
);
536 * update the reference mod on the head to reflect this new operation,
537 * only need the lock for this case cause we could be processing it
538 * currently, for refs we just added we know we're a-ok.
540 existing
->ref_mod
+= update
->ref_mod
;
541 spin_unlock(&existing_ref
->lock
);
545 * helper function to actually insert a head node into the rbtree.
546 * this does all the dirty work in terms of maintaining the correct
547 * overall modification count.
549 static noinline
struct btrfs_delayed_ref_head
*
550 add_delayed_ref_head(struct btrfs_fs_info
*fs_info
,
551 struct btrfs_trans_handle
*trans
,
552 struct btrfs_delayed_ref_node
*ref
, u64 bytenr
,
553 u64 num_bytes
, int action
, int is_data
)
555 struct btrfs_delayed_ref_head
*existing
;
556 struct btrfs_delayed_ref_head
*head_ref
= NULL
;
557 struct btrfs_delayed_ref_root
*delayed_refs
;
559 int must_insert_reserved
= 0;
562 * the head node stores the sum of all the mods, so dropping a ref
563 * should drop the sum in the head node by one.
565 if (action
== BTRFS_UPDATE_DELAYED_HEAD
)
567 else if (action
== BTRFS_DROP_DELAYED_REF
)
571 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
572 * the reserved accounting when the extent is finally added, or
573 * if a later modification deletes the delayed ref without ever
574 * inserting the extent into the extent allocation tree.
575 * ref->must_insert_reserved is the flag used to record
576 * that accounting mods are required.
578 * Once we record must_insert_reserved, switch the action to
579 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
581 if (action
== BTRFS_ADD_DELAYED_EXTENT
)
582 must_insert_reserved
= 1;
584 must_insert_reserved
= 0;
586 delayed_refs
= &trans
->transaction
->delayed_refs
;
588 /* first set the basic ref node struct up */
589 atomic_set(&ref
->refs
, 1);
590 ref
->bytenr
= bytenr
;
591 ref
->num_bytes
= num_bytes
;
592 ref
->ref_mod
= count_mod
;
599 head_ref
= btrfs_delayed_node_to_head(ref
);
600 head_ref
->must_insert_reserved
= must_insert_reserved
;
601 head_ref
->is_data
= is_data
;
602 head_ref
->ref_root
= RB_ROOT
;
603 head_ref
->processing
= 0;
605 spin_lock_init(&head_ref
->lock
);
606 mutex_init(&head_ref
->mutex
);
608 trace_add_delayed_ref_head(ref
, head_ref
, action
);
610 existing
= htree_insert(&delayed_refs
->href_root
,
611 &head_ref
->href_node
);
613 update_existing_head_ref(&existing
->node
, ref
);
615 * we've updated the existing ref, free the newly
618 kmem_cache_free(btrfs_delayed_ref_head_cachep
, head_ref
);
621 delayed_refs
->num_heads
++;
622 delayed_refs
->num_heads_ready
++;
623 atomic_inc(&delayed_refs
->num_entries
);
624 trans
->delayed_ref_updates
++;
630 * helper to insert a delayed tree ref into the rbtree.
633 add_delayed_tree_ref(struct btrfs_fs_info
*fs_info
,
634 struct btrfs_trans_handle
*trans
,
635 struct btrfs_delayed_ref_head
*head_ref
,
636 struct btrfs_delayed_ref_node
*ref
, u64 bytenr
,
637 u64 num_bytes
, u64 parent
, u64 ref_root
, int level
,
638 int action
, int for_cow
)
640 struct btrfs_delayed_ref_node
*existing
;
641 struct btrfs_delayed_tree_ref
*full_ref
;
642 struct btrfs_delayed_ref_root
*delayed_refs
;
645 if (action
== BTRFS_ADD_DELAYED_EXTENT
)
646 action
= BTRFS_ADD_DELAYED_REF
;
648 delayed_refs
= &trans
->transaction
->delayed_refs
;
650 /* first set the basic ref node struct up */
651 atomic_set(&ref
->refs
, 1);
652 ref
->bytenr
= bytenr
;
653 ref
->num_bytes
= num_bytes
;
655 ref
->action
= action
;
659 if (need_ref_seq(for_cow
, ref_root
))
660 seq
= btrfs_get_tree_mod_seq(fs_info
, &trans
->delayed_ref_elem
);
663 full_ref
= btrfs_delayed_node_to_tree_ref(ref
);
664 full_ref
->parent
= parent
;
665 full_ref
->root
= ref_root
;
667 ref
->type
= BTRFS_SHARED_BLOCK_REF_KEY
;
669 ref
->type
= BTRFS_TREE_BLOCK_REF_KEY
;
670 full_ref
->level
= level
;
672 trace_add_delayed_tree_ref(ref
, full_ref
, action
);
674 spin_lock(&head_ref
->lock
);
675 existing
= tree_insert(&head_ref
->ref_root
, &ref
->rb_node
);
677 update_existing_ref(trans
, delayed_refs
, head_ref
, existing
,
680 * we've updated the existing ref, free the newly
683 kmem_cache_free(btrfs_delayed_tree_ref_cachep
, full_ref
);
685 atomic_inc(&delayed_refs
->num_entries
);
686 trans
->delayed_ref_updates
++;
688 spin_unlock(&head_ref
->lock
);
692 * helper to insert a delayed data ref into the rbtree.
695 add_delayed_data_ref(struct btrfs_fs_info
*fs_info
,
696 struct btrfs_trans_handle
*trans
,
697 struct btrfs_delayed_ref_head
*head_ref
,
698 struct btrfs_delayed_ref_node
*ref
, u64 bytenr
,
699 u64 num_bytes
, u64 parent
, u64 ref_root
, u64 owner
,
700 u64 offset
, int action
, int for_cow
)
702 struct btrfs_delayed_ref_node
*existing
;
703 struct btrfs_delayed_data_ref
*full_ref
;
704 struct btrfs_delayed_ref_root
*delayed_refs
;
707 if (action
== BTRFS_ADD_DELAYED_EXTENT
)
708 action
= BTRFS_ADD_DELAYED_REF
;
710 delayed_refs
= &trans
->transaction
->delayed_refs
;
712 /* first set the basic ref node struct up */
713 atomic_set(&ref
->refs
, 1);
714 ref
->bytenr
= bytenr
;
715 ref
->num_bytes
= num_bytes
;
717 ref
->action
= action
;
721 if (need_ref_seq(for_cow
, ref_root
))
722 seq
= btrfs_get_tree_mod_seq(fs_info
, &trans
->delayed_ref_elem
);
725 full_ref
= btrfs_delayed_node_to_data_ref(ref
);
726 full_ref
->parent
= parent
;
727 full_ref
->root
= ref_root
;
729 ref
->type
= BTRFS_SHARED_DATA_REF_KEY
;
731 ref
->type
= BTRFS_EXTENT_DATA_REF_KEY
;
733 full_ref
->objectid
= owner
;
734 full_ref
->offset
= offset
;
736 trace_add_delayed_data_ref(ref
, full_ref
, action
);
738 spin_lock(&head_ref
->lock
);
739 existing
= tree_insert(&head_ref
->ref_root
, &ref
->rb_node
);
741 update_existing_ref(trans
, delayed_refs
, head_ref
, existing
,
744 * we've updated the existing ref, free the newly
747 kmem_cache_free(btrfs_delayed_data_ref_cachep
, full_ref
);
749 atomic_inc(&delayed_refs
->num_entries
);
750 trans
->delayed_ref_updates
++;
752 spin_unlock(&head_ref
->lock
);
756 * add a delayed tree ref. This does all of the accounting required
757 * to make sure the delayed ref is eventually processed before this
758 * transaction commits.
760 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info
*fs_info
,
761 struct btrfs_trans_handle
*trans
,
762 u64 bytenr
, u64 num_bytes
, u64 parent
,
763 u64 ref_root
, int level
, int action
,
764 struct btrfs_delayed_extent_op
*extent_op
,
767 struct btrfs_delayed_tree_ref
*ref
;
768 struct btrfs_delayed_ref_head
*head_ref
;
769 struct btrfs_delayed_ref_root
*delayed_refs
;
771 BUG_ON(extent_op
&& extent_op
->is_data
);
772 ref
= kmem_cache_alloc(btrfs_delayed_tree_ref_cachep
, GFP_NOFS
);
776 head_ref
= kmem_cache_alloc(btrfs_delayed_ref_head_cachep
, GFP_NOFS
);
778 kmem_cache_free(btrfs_delayed_tree_ref_cachep
, ref
);
782 head_ref
->extent_op
= extent_op
;
784 delayed_refs
= &trans
->transaction
->delayed_refs
;
785 spin_lock(&delayed_refs
->lock
);
788 * insert both the head node and the new ref without dropping
791 head_ref
= add_delayed_ref_head(fs_info
, trans
, &head_ref
->node
,
792 bytenr
, num_bytes
, action
, 0);
794 add_delayed_tree_ref(fs_info
, trans
, head_ref
, &ref
->node
, bytenr
,
795 num_bytes
, parent
, ref_root
, level
, action
,
797 spin_unlock(&delayed_refs
->lock
);
798 if (need_ref_seq(for_cow
, ref_root
))
799 btrfs_qgroup_record_ref(trans
, &ref
->node
, extent_op
);
805 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
807 int btrfs_add_delayed_data_ref(struct btrfs_fs_info
*fs_info
,
808 struct btrfs_trans_handle
*trans
,
809 u64 bytenr
, u64 num_bytes
,
810 u64 parent
, u64 ref_root
,
811 u64 owner
, u64 offset
, int action
,
812 struct btrfs_delayed_extent_op
*extent_op
,
815 struct btrfs_delayed_data_ref
*ref
;
816 struct btrfs_delayed_ref_head
*head_ref
;
817 struct btrfs_delayed_ref_root
*delayed_refs
;
819 BUG_ON(extent_op
&& !extent_op
->is_data
);
820 ref
= kmem_cache_alloc(btrfs_delayed_data_ref_cachep
, GFP_NOFS
);
824 head_ref
= kmem_cache_alloc(btrfs_delayed_ref_head_cachep
, GFP_NOFS
);
826 kmem_cache_free(btrfs_delayed_data_ref_cachep
, ref
);
830 head_ref
->extent_op
= extent_op
;
832 delayed_refs
= &trans
->transaction
->delayed_refs
;
833 spin_lock(&delayed_refs
->lock
);
836 * insert both the head node and the new ref without dropping
839 head_ref
= add_delayed_ref_head(fs_info
, trans
, &head_ref
->node
,
840 bytenr
, num_bytes
, action
, 1);
842 add_delayed_data_ref(fs_info
, trans
, head_ref
, &ref
->node
, bytenr
,
843 num_bytes
, parent
, ref_root
, owner
, offset
,
845 spin_unlock(&delayed_refs
->lock
);
846 if (need_ref_seq(for_cow
, ref_root
))
847 btrfs_qgroup_record_ref(trans
, &ref
->node
, extent_op
);
852 int btrfs_add_delayed_extent_op(struct btrfs_fs_info
*fs_info
,
853 struct btrfs_trans_handle
*trans
,
854 u64 bytenr
, u64 num_bytes
,
855 struct btrfs_delayed_extent_op
*extent_op
)
857 struct btrfs_delayed_ref_head
*head_ref
;
858 struct btrfs_delayed_ref_root
*delayed_refs
;
860 head_ref
= kmem_cache_alloc(btrfs_delayed_ref_head_cachep
, GFP_NOFS
);
864 head_ref
->extent_op
= extent_op
;
866 delayed_refs
= &trans
->transaction
->delayed_refs
;
867 spin_lock(&delayed_refs
->lock
);
869 add_delayed_ref_head(fs_info
, trans
, &head_ref
->node
, bytenr
,
870 num_bytes
, BTRFS_UPDATE_DELAYED_HEAD
,
873 spin_unlock(&delayed_refs
->lock
);
878 * this does a simple search for the head node for a given extent.
879 * It must be called with the delayed ref spinlock held, and it returns
880 * the head node if any where found, or NULL if not.
882 struct btrfs_delayed_ref_head
*
883 btrfs_find_delayed_ref_head(struct btrfs_trans_handle
*trans
, u64 bytenr
)
885 struct btrfs_delayed_ref_root
*delayed_refs
;
887 delayed_refs
= &trans
->transaction
->delayed_refs
;
888 return find_ref_head(&delayed_refs
->href_root
, bytenr
, 0);
891 void btrfs_delayed_ref_exit(void)
893 if (btrfs_delayed_ref_head_cachep
)
894 kmem_cache_destroy(btrfs_delayed_ref_head_cachep
);
895 if (btrfs_delayed_tree_ref_cachep
)
896 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep
);
897 if (btrfs_delayed_data_ref_cachep
)
898 kmem_cache_destroy(btrfs_delayed_data_ref_cachep
);
899 if (btrfs_delayed_extent_op_cachep
)
900 kmem_cache_destroy(btrfs_delayed_extent_op_cachep
);
903 int btrfs_delayed_ref_init(void)
905 btrfs_delayed_ref_head_cachep
= kmem_cache_create(
906 "btrfs_delayed_ref_head",
907 sizeof(struct btrfs_delayed_ref_head
), 0,
908 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
, NULL
);
909 if (!btrfs_delayed_ref_head_cachep
)
912 btrfs_delayed_tree_ref_cachep
= kmem_cache_create(
913 "btrfs_delayed_tree_ref",
914 sizeof(struct btrfs_delayed_tree_ref
), 0,
915 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
, NULL
);
916 if (!btrfs_delayed_tree_ref_cachep
)
919 btrfs_delayed_data_ref_cachep
= kmem_cache_create(
920 "btrfs_delayed_data_ref",
921 sizeof(struct btrfs_delayed_data_ref
), 0,
922 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
, NULL
);
923 if (!btrfs_delayed_data_ref_cachep
)
926 btrfs_delayed_extent_op_cachep
= kmem_cache_create(
927 "btrfs_delayed_extent_op",
928 sizeof(struct btrfs_delayed_extent_op
), 0,
929 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
, NULL
);
930 if (!btrfs_delayed_extent_op_cachep
)
935 btrfs_delayed_ref_exit();