2 * Copyright (C) 2009 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
23 #include "delayed-ref.h"
24 #include "transaction.h"
27 * delayed back reference update tracking. For subvolume trees
28 * we queue up extent allocations and backref maintenance for
29 * delayed processing. This avoids deep call chains where we
30 * add extents in the middle of btrfs_search_slot, and it allows
31 * us to buffer up frequently modified backrefs in an rb tree instead
32 * of hammering updates on the extent allocation tree.
36 * compare two delayed tree backrefs with same bytenr and type
38 static int comp_tree_refs(struct btrfs_delayed_tree_ref
*ref2
,
39 struct btrfs_delayed_tree_ref
*ref1
)
41 if (ref1
->node
.type
== BTRFS_TREE_BLOCK_REF_KEY
) {
42 if (ref1
->root
< ref2
->root
)
44 if (ref1
->root
> ref2
->root
)
47 if (ref1
->parent
< ref2
->parent
)
49 if (ref1
->parent
> ref2
->parent
)
56 * compare two delayed data backrefs with same bytenr and type
58 static int comp_data_refs(struct btrfs_delayed_data_ref
*ref2
,
59 struct btrfs_delayed_data_ref
*ref1
)
61 if (ref1
->node
.type
== BTRFS_EXTENT_DATA_REF_KEY
) {
62 if (ref1
->root
< ref2
->root
)
64 if (ref1
->root
> ref2
->root
)
66 if (ref1
->objectid
< ref2
->objectid
)
68 if (ref1
->objectid
> ref2
->objectid
)
70 if (ref1
->offset
< ref2
->offset
)
72 if (ref1
->offset
> ref2
->offset
)
75 if (ref1
->parent
< ref2
->parent
)
77 if (ref1
->parent
> ref2
->parent
)
84 * entries in the rb tree are ordered by the byte number of the extent,
85 * type of the delayed backrefs and content of delayed backrefs.
87 static int comp_entry(struct btrfs_delayed_ref_node
*ref2
,
88 struct btrfs_delayed_ref_node
*ref1
)
90 if (ref1
->bytenr
< ref2
->bytenr
)
92 if (ref1
->bytenr
> ref2
->bytenr
)
94 if (ref1
->is_head
&& ref2
->is_head
)
100 if (ref1
->type
< ref2
->type
)
102 if (ref1
->type
> ref2
->type
)
104 /* merging of sequenced refs is not allowed */
105 if (ref1
->seq
< ref2
->seq
)
107 if (ref1
->seq
> ref2
->seq
)
109 if (ref1
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
110 ref1
->type
== BTRFS_SHARED_BLOCK_REF_KEY
) {
111 return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2
),
112 btrfs_delayed_node_to_tree_ref(ref1
));
113 } else if (ref1
->type
== BTRFS_EXTENT_DATA_REF_KEY
||
114 ref1
->type
== BTRFS_SHARED_DATA_REF_KEY
) {
115 return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2
),
116 btrfs_delayed_node_to_data_ref(ref1
));
123 * insert a new ref into the rbtree. This returns any existing refs
124 * for the same (bytenr,parent) tuple, or NULL if the new node was properly
127 static struct btrfs_delayed_ref_node
*tree_insert(struct rb_root
*root
,
128 struct rb_node
*node
)
130 struct rb_node
**p
= &root
->rb_node
;
131 struct rb_node
*parent_node
= NULL
;
132 struct btrfs_delayed_ref_node
*entry
;
133 struct btrfs_delayed_ref_node
*ins
;
136 ins
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
139 entry
= rb_entry(parent_node
, struct btrfs_delayed_ref_node
,
142 cmp
= comp_entry(entry
, ins
);
151 rb_link_node(node
, parent_node
, p
);
152 rb_insert_color(node
, root
);
157 * find an head entry based on bytenr. This returns the delayed ref
158 * head if it was able to find one, or NULL if nothing was in that spot.
159 * If return_bigger is given, the next bigger entry is returned if no exact
162 static struct btrfs_delayed_ref_node
*find_ref_head(struct rb_root
*root
,
164 struct btrfs_delayed_ref_node
**last
,
168 struct btrfs_delayed_ref_node
*entry
;
175 entry
= rb_entry(n
, struct btrfs_delayed_ref_node
, rb_node
);
176 WARN_ON(!entry
->in_tree
);
180 if (bytenr
< entry
->bytenr
)
182 else if (bytenr
> entry
->bytenr
)
184 else if (!btrfs_delayed_ref_is_head(entry
))
196 if (entry
&& return_bigger
) {
198 n
= rb_next(&entry
->rb_node
);
201 entry
= rb_entry(n
, struct btrfs_delayed_ref_node
,
203 bytenr
= entry
->bytenr
;
212 int btrfs_delayed_ref_lock(struct btrfs_trans_handle
*trans
,
213 struct btrfs_delayed_ref_head
*head
)
215 struct btrfs_delayed_ref_root
*delayed_refs
;
217 delayed_refs
= &trans
->transaction
->delayed_refs
;
218 assert_spin_locked(&delayed_refs
->lock
);
219 if (mutex_trylock(&head
->mutex
))
222 atomic_inc(&head
->node
.refs
);
223 spin_unlock(&delayed_refs
->lock
);
225 mutex_lock(&head
->mutex
);
226 spin_lock(&delayed_refs
->lock
);
227 if (!head
->node
.in_tree
) {
228 mutex_unlock(&head
->mutex
);
229 btrfs_put_delayed_ref(&head
->node
);
232 btrfs_put_delayed_ref(&head
->node
);
236 int btrfs_check_delayed_seq(struct btrfs_fs_info
*fs_info
,
237 struct btrfs_delayed_ref_root
*delayed_refs
,
240 struct seq_list
*elem
;
243 spin_lock(&fs_info
->tree_mod_seq_lock
);
244 if (!list_empty(&fs_info
->tree_mod_seq_list
)) {
245 elem
= list_first_entry(&fs_info
->tree_mod_seq_list
,
246 struct seq_list
, list
);
247 if (seq
>= elem
->seq
) {
248 pr_debug("holding back delayed_ref %llu, lowest is "
249 "%llu (%p)\n", seq
, elem
->seq
, delayed_refs
);
254 spin_unlock(&fs_info
->tree_mod_seq_lock
);
258 int btrfs_find_ref_cluster(struct btrfs_trans_handle
*trans
,
259 struct list_head
*cluster
, u64 start
)
262 struct btrfs_delayed_ref_root
*delayed_refs
;
263 struct rb_node
*node
;
264 struct btrfs_delayed_ref_node
*ref
;
265 struct btrfs_delayed_ref_head
*head
;
267 delayed_refs
= &trans
->transaction
->delayed_refs
;
269 node
= rb_first(&delayed_refs
->root
);
272 find_ref_head(&delayed_refs
->root
, start
+ 1, &ref
, 1);
274 node
= &ref
->rb_node
;
276 node
= rb_first(&delayed_refs
->root
);
279 while (node
&& count
< 32) {
280 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
281 if (btrfs_delayed_ref_is_head(ref
)) {
282 head
= btrfs_delayed_node_to_head(ref
);
283 if (list_empty(&head
->cluster
)) {
284 list_add_tail(&head
->cluster
, cluster
);
285 delayed_refs
->run_delayed_start
=
289 WARN_ON(delayed_refs
->num_heads_ready
== 0);
290 delayed_refs
->num_heads_ready
--;
292 /* the goal of the clustering is to find extents
293 * that are likely to end up in the same extent
294 * leaf on disk. So, we don't want them spread
295 * all over the tree. Stop now if we've hit
296 * a head that was already in use
301 node
= rb_next(node
);
307 * we've gone to the end of the rbtree without finding any
308 * clusters. start from the beginning and try again
311 node
= rb_first(&delayed_refs
->root
);
318 * helper function to update an extent delayed ref in the
319 * rbtree. existing and update must both have the same
322 * This may free existing if the update cancels out whatever
323 * operation it was doing.
326 update_existing_ref(struct btrfs_trans_handle
*trans
,
327 struct btrfs_delayed_ref_root
*delayed_refs
,
328 struct btrfs_delayed_ref_node
*existing
,
329 struct btrfs_delayed_ref_node
*update
)
331 if (update
->action
!= existing
->action
) {
333 * this is effectively undoing either an add or a
334 * drop. We decrement the ref_mod, and if it goes
335 * down to zero we just delete the entry without
336 * every changing the extent allocation tree.
339 if (existing
->ref_mod
== 0) {
340 rb_erase(&existing
->rb_node
,
341 &delayed_refs
->root
);
342 existing
->in_tree
= 0;
343 btrfs_put_delayed_ref(existing
);
344 delayed_refs
->num_entries
--;
345 if (trans
->delayed_ref_updates
)
346 trans
->delayed_ref_updates
--;
348 WARN_ON(existing
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
349 existing
->type
== BTRFS_SHARED_BLOCK_REF_KEY
);
352 WARN_ON(existing
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
353 existing
->type
== BTRFS_SHARED_BLOCK_REF_KEY
);
355 * the action on the existing ref matches
356 * the action on the ref we're trying to add.
357 * Bump the ref_mod by one so the backref that
358 * is eventually added/removed has the correct
361 existing
->ref_mod
+= update
->ref_mod
;
366 * helper function to update the accounting in the head ref
367 * existing and update must have the same bytenr
370 update_existing_head_ref(struct btrfs_delayed_ref_node
*existing
,
371 struct btrfs_delayed_ref_node
*update
)
373 struct btrfs_delayed_ref_head
*existing_ref
;
374 struct btrfs_delayed_ref_head
*ref
;
376 existing_ref
= btrfs_delayed_node_to_head(existing
);
377 ref
= btrfs_delayed_node_to_head(update
);
378 BUG_ON(existing_ref
->is_data
!= ref
->is_data
);
380 if (ref
->must_insert_reserved
) {
381 /* if the extent was freed and then
382 * reallocated before the delayed ref
383 * entries were processed, we can end up
384 * with an existing head ref without
385 * the must_insert_reserved flag set.
388 existing_ref
->must_insert_reserved
= ref
->must_insert_reserved
;
391 * update the num_bytes so we make sure the accounting
394 existing
->num_bytes
= update
->num_bytes
;
398 if (ref
->extent_op
) {
399 if (!existing_ref
->extent_op
) {
400 existing_ref
->extent_op
= ref
->extent_op
;
402 if (ref
->extent_op
->update_key
) {
403 memcpy(&existing_ref
->extent_op
->key
,
404 &ref
->extent_op
->key
,
405 sizeof(ref
->extent_op
->key
));
406 existing_ref
->extent_op
->update_key
= 1;
408 if (ref
->extent_op
->update_flags
) {
409 existing_ref
->extent_op
->flags_to_set
|=
410 ref
->extent_op
->flags_to_set
;
411 existing_ref
->extent_op
->update_flags
= 1;
413 kfree(ref
->extent_op
);
417 * update the reference mod on the head to reflect this new operation
419 existing
->ref_mod
+= update
->ref_mod
;
423 * helper function to actually insert a head node into the rbtree.
424 * this does all the dirty work in terms of maintaining the correct
425 * overall modification count.
427 static noinline
void add_delayed_ref_head(struct btrfs_fs_info
*fs_info
,
428 struct btrfs_trans_handle
*trans
,
429 struct btrfs_delayed_ref_node
*ref
,
430 u64 bytenr
, u64 num_bytes
,
431 int action
, int is_data
)
433 struct btrfs_delayed_ref_node
*existing
;
434 struct btrfs_delayed_ref_head
*head_ref
= NULL
;
435 struct btrfs_delayed_ref_root
*delayed_refs
;
437 int must_insert_reserved
= 0;
440 * the head node stores the sum of all the mods, so dropping a ref
441 * should drop the sum in the head node by one.
443 if (action
== BTRFS_UPDATE_DELAYED_HEAD
)
445 else if (action
== BTRFS_DROP_DELAYED_REF
)
449 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
450 * the reserved accounting when the extent is finally added, or
451 * if a later modification deletes the delayed ref without ever
452 * inserting the extent into the extent allocation tree.
453 * ref->must_insert_reserved is the flag used to record
454 * that accounting mods are required.
456 * Once we record must_insert_reserved, switch the action to
457 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
459 if (action
== BTRFS_ADD_DELAYED_EXTENT
)
460 must_insert_reserved
= 1;
462 must_insert_reserved
= 0;
464 delayed_refs
= &trans
->transaction
->delayed_refs
;
466 /* first set the basic ref node struct up */
467 atomic_set(&ref
->refs
, 1);
468 ref
->bytenr
= bytenr
;
469 ref
->num_bytes
= num_bytes
;
470 ref
->ref_mod
= count_mod
;
477 head_ref
= btrfs_delayed_node_to_head(ref
);
478 head_ref
->must_insert_reserved
= must_insert_reserved
;
479 head_ref
->is_data
= is_data
;
481 INIT_LIST_HEAD(&head_ref
->cluster
);
482 mutex_init(&head_ref
->mutex
);
484 trace_btrfs_delayed_ref_head(ref
, head_ref
, action
);
486 existing
= tree_insert(&delayed_refs
->root
, &ref
->rb_node
);
489 update_existing_head_ref(existing
, ref
);
491 * we've updated the existing ref, free the newly
496 delayed_refs
->num_heads
++;
497 delayed_refs
->num_heads_ready
++;
498 delayed_refs
->num_entries
++;
499 trans
->delayed_ref_updates
++;
504 * helper to insert a delayed tree ref into the rbtree.
506 static noinline
void add_delayed_tree_ref(struct btrfs_fs_info
*fs_info
,
507 struct btrfs_trans_handle
*trans
,
508 struct btrfs_delayed_ref_node
*ref
,
509 u64 bytenr
, u64 num_bytes
, u64 parent
,
510 u64 ref_root
, int level
, int action
,
513 struct btrfs_delayed_ref_node
*existing
;
514 struct btrfs_delayed_tree_ref
*full_ref
;
515 struct btrfs_delayed_ref_root
*delayed_refs
;
518 if (action
== BTRFS_ADD_DELAYED_EXTENT
)
519 action
= BTRFS_ADD_DELAYED_REF
;
521 delayed_refs
= &trans
->transaction
->delayed_refs
;
523 /* first set the basic ref node struct up */
524 atomic_set(&ref
->refs
, 1);
525 ref
->bytenr
= bytenr
;
526 ref
->num_bytes
= num_bytes
;
528 ref
->action
= action
;
532 if (need_ref_seq(for_cow
, ref_root
))
533 seq
= btrfs_get_tree_mod_seq(fs_info
, &trans
->delayed_ref_elem
);
536 full_ref
= btrfs_delayed_node_to_tree_ref(ref
);
537 full_ref
->parent
= parent
;
538 full_ref
->root
= ref_root
;
540 ref
->type
= BTRFS_SHARED_BLOCK_REF_KEY
;
542 ref
->type
= BTRFS_TREE_BLOCK_REF_KEY
;
543 full_ref
->level
= level
;
545 trace_btrfs_delayed_tree_ref(ref
, full_ref
, action
);
547 existing
= tree_insert(&delayed_refs
->root
, &ref
->rb_node
);
550 update_existing_ref(trans
, delayed_refs
, existing
, ref
);
552 * we've updated the existing ref, free the newly
557 delayed_refs
->num_entries
++;
558 trans
->delayed_ref_updates
++;
563 * helper to insert a delayed data ref into the rbtree.
565 static noinline
void add_delayed_data_ref(struct btrfs_fs_info
*fs_info
,
566 struct btrfs_trans_handle
*trans
,
567 struct btrfs_delayed_ref_node
*ref
,
568 u64 bytenr
, u64 num_bytes
, u64 parent
,
569 u64 ref_root
, u64 owner
, u64 offset
,
570 int action
, int for_cow
)
572 struct btrfs_delayed_ref_node
*existing
;
573 struct btrfs_delayed_data_ref
*full_ref
;
574 struct btrfs_delayed_ref_root
*delayed_refs
;
577 if (action
== BTRFS_ADD_DELAYED_EXTENT
)
578 action
= BTRFS_ADD_DELAYED_REF
;
580 delayed_refs
= &trans
->transaction
->delayed_refs
;
582 /* first set the basic ref node struct up */
583 atomic_set(&ref
->refs
, 1);
584 ref
->bytenr
= bytenr
;
585 ref
->num_bytes
= num_bytes
;
587 ref
->action
= action
;
591 if (need_ref_seq(for_cow
, ref_root
))
592 seq
= btrfs_get_tree_mod_seq(fs_info
, &trans
->delayed_ref_elem
);
595 full_ref
= btrfs_delayed_node_to_data_ref(ref
);
596 full_ref
->parent
= parent
;
597 full_ref
->root
= ref_root
;
599 ref
->type
= BTRFS_SHARED_DATA_REF_KEY
;
601 ref
->type
= BTRFS_EXTENT_DATA_REF_KEY
;
603 full_ref
->objectid
= owner
;
604 full_ref
->offset
= offset
;
606 trace_btrfs_delayed_data_ref(ref
, full_ref
, action
);
608 existing
= tree_insert(&delayed_refs
->root
, &ref
->rb_node
);
611 update_existing_ref(trans
, delayed_refs
, existing
, ref
);
613 * we've updated the existing ref, free the newly
618 delayed_refs
->num_entries
++;
619 trans
->delayed_ref_updates
++;
624 * add a delayed tree ref. This does all of the accounting required
625 * to make sure the delayed ref is eventually processed before this
626 * transaction commits.
628 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info
*fs_info
,
629 struct btrfs_trans_handle
*trans
,
630 u64 bytenr
, u64 num_bytes
, u64 parent
,
631 u64 ref_root
, int level
, int action
,
632 struct btrfs_delayed_extent_op
*extent_op
,
635 struct btrfs_delayed_tree_ref
*ref
;
636 struct btrfs_delayed_ref_head
*head_ref
;
637 struct btrfs_delayed_ref_root
*delayed_refs
;
639 BUG_ON(extent_op
&& extent_op
->is_data
);
640 ref
= kmalloc(sizeof(*ref
), GFP_NOFS
);
644 head_ref
= kmalloc(sizeof(*head_ref
), GFP_NOFS
);
650 head_ref
->extent_op
= extent_op
;
652 delayed_refs
= &trans
->transaction
->delayed_refs
;
653 spin_lock(&delayed_refs
->lock
);
656 * insert both the head node and the new ref without dropping
659 add_delayed_ref_head(fs_info
, trans
, &head_ref
->node
, bytenr
,
660 num_bytes
, action
, 0);
662 add_delayed_tree_ref(fs_info
, trans
, &ref
->node
, bytenr
,
663 num_bytes
, parent
, ref_root
, level
, action
,
665 if (!need_ref_seq(for_cow
, ref_root
) &&
666 waitqueue_active(&fs_info
->tree_mod_seq_wait
))
667 wake_up(&fs_info
->tree_mod_seq_wait
);
668 spin_unlock(&delayed_refs
->lock
);
669 if (need_ref_seq(for_cow
, ref_root
))
670 btrfs_qgroup_record_ref(trans
, &ref
->node
, extent_op
);
676 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
678 int btrfs_add_delayed_data_ref(struct btrfs_fs_info
*fs_info
,
679 struct btrfs_trans_handle
*trans
,
680 u64 bytenr
, u64 num_bytes
,
681 u64 parent
, u64 ref_root
,
682 u64 owner
, u64 offset
, int action
,
683 struct btrfs_delayed_extent_op
*extent_op
,
686 struct btrfs_delayed_data_ref
*ref
;
687 struct btrfs_delayed_ref_head
*head_ref
;
688 struct btrfs_delayed_ref_root
*delayed_refs
;
690 BUG_ON(extent_op
&& !extent_op
->is_data
);
691 ref
= kmalloc(sizeof(*ref
), GFP_NOFS
);
695 head_ref
= kmalloc(sizeof(*head_ref
), GFP_NOFS
);
701 head_ref
->extent_op
= extent_op
;
703 delayed_refs
= &trans
->transaction
->delayed_refs
;
704 spin_lock(&delayed_refs
->lock
);
707 * insert both the head node and the new ref without dropping
710 add_delayed_ref_head(fs_info
, trans
, &head_ref
->node
, bytenr
,
711 num_bytes
, action
, 1);
713 add_delayed_data_ref(fs_info
, trans
, &ref
->node
, bytenr
,
714 num_bytes
, parent
, ref_root
, owner
, offset
,
716 if (!need_ref_seq(for_cow
, ref_root
) &&
717 waitqueue_active(&fs_info
->tree_mod_seq_wait
))
718 wake_up(&fs_info
->tree_mod_seq_wait
);
719 spin_unlock(&delayed_refs
->lock
);
720 if (need_ref_seq(for_cow
, ref_root
))
721 btrfs_qgroup_record_ref(trans
, &ref
->node
, extent_op
);
726 int btrfs_add_delayed_extent_op(struct btrfs_fs_info
*fs_info
,
727 struct btrfs_trans_handle
*trans
,
728 u64 bytenr
, u64 num_bytes
,
729 struct btrfs_delayed_extent_op
*extent_op
)
731 struct btrfs_delayed_ref_head
*head_ref
;
732 struct btrfs_delayed_ref_root
*delayed_refs
;
734 head_ref
= kmalloc(sizeof(*head_ref
), GFP_NOFS
);
738 head_ref
->extent_op
= extent_op
;
740 delayed_refs
= &trans
->transaction
->delayed_refs
;
741 spin_lock(&delayed_refs
->lock
);
743 add_delayed_ref_head(fs_info
, trans
, &head_ref
->node
, bytenr
,
744 num_bytes
, BTRFS_UPDATE_DELAYED_HEAD
,
747 if (waitqueue_active(&fs_info
->tree_mod_seq_wait
))
748 wake_up(&fs_info
->tree_mod_seq_wait
);
749 spin_unlock(&delayed_refs
->lock
);
754 * this does a simple search for the head node for a given extent.
755 * It must be called with the delayed ref spinlock held, and it returns
756 * the head node if any where found, or NULL if not.
758 struct btrfs_delayed_ref_head
*
759 btrfs_find_delayed_ref_head(struct btrfs_trans_handle
*trans
, u64 bytenr
)
761 struct btrfs_delayed_ref_node
*ref
;
762 struct btrfs_delayed_ref_root
*delayed_refs
;
764 delayed_refs
= &trans
->transaction
->delayed_refs
;
765 ref
= find_ref_head(&delayed_refs
->root
, bytenr
, NULL
, 0);
767 return btrfs_delayed_node_to_head(ref
);