workqueue: reorganize try_to_grab_pending() and __cancel_timer_work()
[linux-2.6/libata-dev.git] / fs / btrfs / delayed-ref.c
blobda7419ed01bb7e520cfbcac0fbef44ef607a92e5
1 /*
2 * Copyright (C) 2009 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
22 #include "ctree.h"
23 #include "delayed-ref.h"
24 #include "transaction.h"
27 * delayed back reference update tracking. For subvolume trees
28 * we queue up extent allocations and backref maintenance for
29 * delayed processing. This avoids deep call chains where we
30 * add extents in the middle of btrfs_search_slot, and it allows
31 * us to buffer up frequently modified backrefs in an rb tree instead
32 * of hammering updates on the extent allocation tree.
36 * compare two delayed tree backrefs with same bytenr and type
38 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
39 struct btrfs_delayed_tree_ref *ref1)
41 if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
42 if (ref1->root < ref2->root)
43 return -1;
44 if (ref1->root > ref2->root)
45 return 1;
46 } else {
47 if (ref1->parent < ref2->parent)
48 return -1;
49 if (ref1->parent > ref2->parent)
50 return 1;
52 return 0;
56 * compare two delayed data backrefs with same bytenr and type
58 static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
59 struct btrfs_delayed_data_ref *ref1)
61 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
62 if (ref1->root < ref2->root)
63 return -1;
64 if (ref1->root > ref2->root)
65 return 1;
66 if (ref1->objectid < ref2->objectid)
67 return -1;
68 if (ref1->objectid > ref2->objectid)
69 return 1;
70 if (ref1->offset < ref2->offset)
71 return -1;
72 if (ref1->offset > ref2->offset)
73 return 1;
74 } else {
75 if (ref1->parent < ref2->parent)
76 return -1;
77 if (ref1->parent > ref2->parent)
78 return 1;
80 return 0;
84 * entries in the rb tree are ordered by the byte number of the extent,
85 * type of the delayed backrefs and content of delayed backrefs.
87 static int comp_entry(struct btrfs_delayed_ref_node *ref2,
88 struct btrfs_delayed_ref_node *ref1)
90 if (ref1->bytenr < ref2->bytenr)
91 return -1;
92 if (ref1->bytenr > ref2->bytenr)
93 return 1;
94 if (ref1->is_head && ref2->is_head)
95 return 0;
96 if (ref2->is_head)
97 return -1;
98 if (ref1->is_head)
99 return 1;
100 if (ref1->type < ref2->type)
101 return -1;
102 if (ref1->type > ref2->type)
103 return 1;
104 /* merging of sequenced refs is not allowed */
105 if (ref1->seq < ref2->seq)
106 return -1;
107 if (ref1->seq > ref2->seq)
108 return 1;
109 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
110 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
111 return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
112 btrfs_delayed_node_to_tree_ref(ref1));
113 } else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
114 ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
115 return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
116 btrfs_delayed_node_to_data_ref(ref1));
118 BUG();
119 return 0;
123 * insert a new ref into the rbtree. This returns any existing refs
124 * for the same (bytenr,parent) tuple, or NULL if the new node was properly
125 * inserted.
127 static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
128 struct rb_node *node)
130 struct rb_node **p = &root->rb_node;
131 struct rb_node *parent_node = NULL;
132 struct btrfs_delayed_ref_node *entry;
133 struct btrfs_delayed_ref_node *ins;
134 int cmp;
136 ins = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
137 while (*p) {
138 parent_node = *p;
139 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
140 rb_node);
142 cmp = comp_entry(entry, ins);
143 if (cmp < 0)
144 p = &(*p)->rb_left;
145 else if (cmp > 0)
146 p = &(*p)->rb_right;
147 else
148 return entry;
151 rb_link_node(node, parent_node, p);
152 rb_insert_color(node, root);
153 return NULL;
157 * find an head entry based on bytenr. This returns the delayed ref
158 * head if it was able to find one, or NULL if nothing was in that spot.
159 * If return_bigger is given, the next bigger entry is returned if no exact
160 * match is found.
162 static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root,
163 u64 bytenr,
164 struct btrfs_delayed_ref_node **last,
165 int return_bigger)
167 struct rb_node *n;
168 struct btrfs_delayed_ref_node *entry;
169 int cmp = 0;
171 again:
172 n = root->rb_node;
173 entry = NULL;
174 while (n) {
175 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
176 WARN_ON(!entry->in_tree);
177 if (last)
178 *last = entry;
180 if (bytenr < entry->bytenr)
181 cmp = -1;
182 else if (bytenr > entry->bytenr)
183 cmp = 1;
184 else if (!btrfs_delayed_ref_is_head(entry))
185 cmp = 1;
186 else
187 cmp = 0;
189 if (cmp < 0)
190 n = n->rb_left;
191 else if (cmp > 0)
192 n = n->rb_right;
193 else
194 return entry;
196 if (entry && return_bigger) {
197 if (cmp > 0) {
198 n = rb_next(&entry->rb_node);
199 if (!n)
200 n = rb_first(root);
201 entry = rb_entry(n, struct btrfs_delayed_ref_node,
202 rb_node);
203 bytenr = entry->bytenr;
204 return_bigger = 0;
205 goto again;
207 return entry;
209 return NULL;
212 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
213 struct btrfs_delayed_ref_head *head)
215 struct btrfs_delayed_ref_root *delayed_refs;
217 delayed_refs = &trans->transaction->delayed_refs;
218 assert_spin_locked(&delayed_refs->lock);
219 if (mutex_trylock(&head->mutex))
220 return 0;
222 atomic_inc(&head->node.refs);
223 spin_unlock(&delayed_refs->lock);
225 mutex_lock(&head->mutex);
226 spin_lock(&delayed_refs->lock);
227 if (!head->node.in_tree) {
228 mutex_unlock(&head->mutex);
229 btrfs_put_delayed_ref(&head->node);
230 return -EAGAIN;
232 btrfs_put_delayed_ref(&head->node);
233 return 0;
236 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
237 struct btrfs_delayed_ref_root *delayed_refs,
238 u64 seq)
240 struct seq_list *elem;
241 int ret = 0;
243 spin_lock(&fs_info->tree_mod_seq_lock);
244 if (!list_empty(&fs_info->tree_mod_seq_list)) {
245 elem = list_first_entry(&fs_info->tree_mod_seq_list,
246 struct seq_list, list);
247 if (seq >= elem->seq) {
248 pr_debug("holding back delayed_ref %llu, lowest is "
249 "%llu (%p)\n", seq, elem->seq, delayed_refs);
250 ret = 1;
254 spin_unlock(&fs_info->tree_mod_seq_lock);
255 return ret;
258 int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
259 struct list_head *cluster, u64 start)
261 int count = 0;
262 struct btrfs_delayed_ref_root *delayed_refs;
263 struct rb_node *node;
264 struct btrfs_delayed_ref_node *ref;
265 struct btrfs_delayed_ref_head *head;
267 delayed_refs = &trans->transaction->delayed_refs;
268 if (start == 0) {
269 node = rb_first(&delayed_refs->root);
270 } else {
271 ref = NULL;
272 find_ref_head(&delayed_refs->root, start + 1, &ref, 1);
273 if (ref) {
274 node = &ref->rb_node;
275 } else
276 node = rb_first(&delayed_refs->root);
278 again:
279 while (node && count < 32) {
280 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
281 if (btrfs_delayed_ref_is_head(ref)) {
282 head = btrfs_delayed_node_to_head(ref);
283 if (list_empty(&head->cluster)) {
284 list_add_tail(&head->cluster, cluster);
285 delayed_refs->run_delayed_start =
286 head->node.bytenr;
287 count++;
289 WARN_ON(delayed_refs->num_heads_ready == 0);
290 delayed_refs->num_heads_ready--;
291 } else if (count) {
292 /* the goal of the clustering is to find extents
293 * that are likely to end up in the same extent
294 * leaf on disk. So, we don't want them spread
295 * all over the tree. Stop now if we've hit
296 * a head that was already in use
298 break;
301 node = rb_next(node);
303 if (count) {
304 return 0;
305 } else if (start) {
307 * we've gone to the end of the rbtree without finding any
308 * clusters. start from the beginning and try again
310 start = 0;
311 node = rb_first(&delayed_refs->root);
312 goto again;
314 return 1;
318 * helper function to update an extent delayed ref in the
319 * rbtree. existing and update must both have the same
320 * bytenr and parent
322 * This may free existing if the update cancels out whatever
323 * operation it was doing.
325 static noinline void
326 update_existing_ref(struct btrfs_trans_handle *trans,
327 struct btrfs_delayed_ref_root *delayed_refs,
328 struct btrfs_delayed_ref_node *existing,
329 struct btrfs_delayed_ref_node *update)
331 if (update->action != existing->action) {
333 * this is effectively undoing either an add or a
334 * drop. We decrement the ref_mod, and if it goes
335 * down to zero we just delete the entry without
336 * every changing the extent allocation tree.
338 existing->ref_mod--;
339 if (existing->ref_mod == 0) {
340 rb_erase(&existing->rb_node,
341 &delayed_refs->root);
342 existing->in_tree = 0;
343 btrfs_put_delayed_ref(existing);
344 delayed_refs->num_entries--;
345 if (trans->delayed_ref_updates)
346 trans->delayed_ref_updates--;
347 } else {
348 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
349 existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
351 } else {
352 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
353 existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
355 * the action on the existing ref matches
356 * the action on the ref we're trying to add.
357 * Bump the ref_mod by one so the backref that
358 * is eventually added/removed has the correct
359 * reference count
361 existing->ref_mod += update->ref_mod;
366 * helper function to update the accounting in the head ref
367 * existing and update must have the same bytenr
369 static noinline void
370 update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
371 struct btrfs_delayed_ref_node *update)
373 struct btrfs_delayed_ref_head *existing_ref;
374 struct btrfs_delayed_ref_head *ref;
376 existing_ref = btrfs_delayed_node_to_head(existing);
377 ref = btrfs_delayed_node_to_head(update);
378 BUG_ON(existing_ref->is_data != ref->is_data);
380 if (ref->must_insert_reserved) {
381 /* if the extent was freed and then
382 * reallocated before the delayed ref
383 * entries were processed, we can end up
384 * with an existing head ref without
385 * the must_insert_reserved flag set.
386 * Set it again here
388 existing_ref->must_insert_reserved = ref->must_insert_reserved;
391 * update the num_bytes so we make sure the accounting
392 * is done correctly
394 existing->num_bytes = update->num_bytes;
398 if (ref->extent_op) {
399 if (!existing_ref->extent_op) {
400 existing_ref->extent_op = ref->extent_op;
401 } else {
402 if (ref->extent_op->update_key) {
403 memcpy(&existing_ref->extent_op->key,
404 &ref->extent_op->key,
405 sizeof(ref->extent_op->key));
406 existing_ref->extent_op->update_key = 1;
408 if (ref->extent_op->update_flags) {
409 existing_ref->extent_op->flags_to_set |=
410 ref->extent_op->flags_to_set;
411 existing_ref->extent_op->update_flags = 1;
413 kfree(ref->extent_op);
417 * update the reference mod on the head to reflect this new operation
419 existing->ref_mod += update->ref_mod;
423 * helper function to actually insert a head node into the rbtree.
424 * this does all the dirty work in terms of maintaining the correct
425 * overall modification count.
427 static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info,
428 struct btrfs_trans_handle *trans,
429 struct btrfs_delayed_ref_node *ref,
430 u64 bytenr, u64 num_bytes,
431 int action, int is_data)
433 struct btrfs_delayed_ref_node *existing;
434 struct btrfs_delayed_ref_head *head_ref = NULL;
435 struct btrfs_delayed_ref_root *delayed_refs;
436 int count_mod = 1;
437 int must_insert_reserved = 0;
440 * the head node stores the sum of all the mods, so dropping a ref
441 * should drop the sum in the head node by one.
443 if (action == BTRFS_UPDATE_DELAYED_HEAD)
444 count_mod = 0;
445 else if (action == BTRFS_DROP_DELAYED_REF)
446 count_mod = -1;
449 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
450 * the reserved accounting when the extent is finally added, or
451 * if a later modification deletes the delayed ref without ever
452 * inserting the extent into the extent allocation tree.
453 * ref->must_insert_reserved is the flag used to record
454 * that accounting mods are required.
456 * Once we record must_insert_reserved, switch the action to
457 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
459 if (action == BTRFS_ADD_DELAYED_EXTENT)
460 must_insert_reserved = 1;
461 else
462 must_insert_reserved = 0;
464 delayed_refs = &trans->transaction->delayed_refs;
466 /* first set the basic ref node struct up */
467 atomic_set(&ref->refs, 1);
468 ref->bytenr = bytenr;
469 ref->num_bytes = num_bytes;
470 ref->ref_mod = count_mod;
471 ref->type = 0;
472 ref->action = 0;
473 ref->is_head = 1;
474 ref->in_tree = 1;
475 ref->seq = 0;
477 head_ref = btrfs_delayed_node_to_head(ref);
478 head_ref->must_insert_reserved = must_insert_reserved;
479 head_ref->is_data = is_data;
481 INIT_LIST_HEAD(&head_ref->cluster);
482 mutex_init(&head_ref->mutex);
484 trace_btrfs_delayed_ref_head(ref, head_ref, action);
486 existing = tree_insert(&delayed_refs->root, &ref->rb_node);
488 if (existing) {
489 update_existing_head_ref(existing, ref);
491 * we've updated the existing ref, free the newly
492 * allocated ref
494 kfree(head_ref);
495 } else {
496 delayed_refs->num_heads++;
497 delayed_refs->num_heads_ready++;
498 delayed_refs->num_entries++;
499 trans->delayed_ref_updates++;
504 * helper to insert a delayed tree ref into the rbtree.
506 static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
507 struct btrfs_trans_handle *trans,
508 struct btrfs_delayed_ref_node *ref,
509 u64 bytenr, u64 num_bytes, u64 parent,
510 u64 ref_root, int level, int action,
511 int for_cow)
513 struct btrfs_delayed_ref_node *existing;
514 struct btrfs_delayed_tree_ref *full_ref;
515 struct btrfs_delayed_ref_root *delayed_refs;
516 u64 seq = 0;
518 if (action == BTRFS_ADD_DELAYED_EXTENT)
519 action = BTRFS_ADD_DELAYED_REF;
521 delayed_refs = &trans->transaction->delayed_refs;
523 /* first set the basic ref node struct up */
524 atomic_set(&ref->refs, 1);
525 ref->bytenr = bytenr;
526 ref->num_bytes = num_bytes;
527 ref->ref_mod = 1;
528 ref->action = action;
529 ref->is_head = 0;
530 ref->in_tree = 1;
532 if (need_ref_seq(for_cow, ref_root))
533 seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
534 ref->seq = seq;
536 full_ref = btrfs_delayed_node_to_tree_ref(ref);
537 full_ref->parent = parent;
538 full_ref->root = ref_root;
539 if (parent)
540 ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
541 else
542 ref->type = BTRFS_TREE_BLOCK_REF_KEY;
543 full_ref->level = level;
545 trace_btrfs_delayed_tree_ref(ref, full_ref, action);
547 existing = tree_insert(&delayed_refs->root, &ref->rb_node);
549 if (existing) {
550 update_existing_ref(trans, delayed_refs, existing, ref);
552 * we've updated the existing ref, free the newly
553 * allocated ref
555 kfree(full_ref);
556 } else {
557 delayed_refs->num_entries++;
558 trans->delayed_ref_updates++;
563 * helper to insert a delayed data ref into the rbtree.
565 static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
566 struct btrfs_trans_handle *trans,
567 struct btrfs_delayed_ref_node *ref,
568 u64 bytenr, u64 num_bytes, u64 parent,
569 u64 ref_root, u64 owner, u64 offset,
570 int action, int for_cow)
572 struct btrfs_delayed_ref_node *existing;
573 struct btrfs_delayed_data_ref *full_ref;
574 struct btrfs_delayed_ref_root *delayed_refs;
575 u64 seq = 0;
577 if (action == BTRFS_ADD_DELAYED_EXTENT)
578 action = BTRFS_ADD_DELAYED_REF;
580 delayed_refs = &trans->transaction->delayed_refs;
582 /* first set the basic ref node struct up */
583 atomic_set(&ref->refs, 1);
584 ref->bytenr = bytenr;
585 ref->num_bytes = num_bytes;
586 ref->ref_mod = 1;
587 ref->action = action;
588 ref->is_head = 0;
589 ref->in_tree = 1;
591 if (need_ref_seq(for_cow, ref_root))
592 seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
593 ref->seq = seq;
595 full_ref = btrfs_delayed_node_to_data_ref(ref);
596 full_ref->parent = parent;
597 full_ref->root = ref_root;
598 if (parent)
599 ref->type = BTRFS_SHARED_DATA_REF_KEY;
600 else
601 ref->type = BTRFS_EXTENT_DATA_REF_KEY;
603 full_ref->objectid = owner;
604 full_ref->offset = offset;
606 trace_btrfs_delayed_data_ref(ref, full_ref, action);
608 existing = tree_insert(&delayed_refs->root, &ref->rb_node);
610 if (existing) {
611 update_existing_ref(trans, delayed_refs, existing, ref);
613 * we've updated the existing ref, free the newly
614 * allocated ref
616 kfree(full_ref);
617 } else {
618 delayed_refs->num_entries++;
619 trans->delayed_ref_updates++;
624 * add a delayed tree ref. This does all of the accounting required
625 * to make sure the delayed ref is eventually processed before this
626 * transaction commits.
628 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
629 struct btrfs_trans_handle *trans,
630 u64 bytenr, u64 num_bytes, u64 parent,
631 u64 ref_root, int level, int action,
632 struct btrfs_delayed_extent_op *extent_op,
633 int for_cow)
635 struct btrfs_delayed_tree_ref *ref;
636 struct btrfs_delayed_ref_head *head_ref;
637 struct btrfs_delayed_ref_root *delayed_refs;
639 BUG_ON(extent_op && extent_op->is_data);
640 ref = kmalloc(sizeof(*ref), GFP_NOFS);
641 if (!ref)
642 return -ENOMEM;
644 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
645 if (!head_ref) {
646 kfree(ref);
647 return -ENOMEM;
650 head_ref->extent_op = extent_op;
652 delayed_refs = &trans->transaction->delayed_refs;
653 spin_lock(&delayed_refs->lock);
656 * insert both the head node and the new ref without dropping
657 * the spin lock
659 add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
660 num_bytes, action, 0);
662 add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
663 num_bytes, parent, ref_root, level, action,
664 for_cow);
665 if (!need_ref_seq(for_cow, ref_root) &&
666 waitqueue_active(&fs_info->tree_mod_seq_wait))
667 wake_up(&fs_info->tree_mod_seq_wait);
668 spin_unlock(&delayed_refs->lock);
669 if (need_ref_seq(for_cow, ref_root))
670 btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
672 return 0;
676 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
678 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
679 struct btrfs_trans_handle *trans,
680 u64 bytenr, u64 num_bytes,
681 u64 parent, u64 ref_root,
682 u64 owner, u64 offset, int action,
683 struct btrfs_delayed_extent_op *extent_op,
684 int for_cow)
686 struct btrfs_delayed_data_ref *ref;
687 struct btrfs_delayed_ref_head *head_ref;
688 struct btrfs_delayed_ref_root *delayed_refs;
690 BUG_ON(extent_op && !extent_op->is_data);
691 ref = kmalloc(sizeof(*ref), GFP_NOFS);
692 if (!ref)
693 return -ENOMEM;
695 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
696 if (!head_ref) {
697 kfree(ref);
698 return -ENOMEM;
701 head_ref->extent_op = extent_op;
703 delayed_refs = &trans->transaction->delayed_refs;
704 spin_lock(&delayed_refs->lock);
707 * insert both the head node and the new ref without dropping
708 * the spin lock
710 add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
711 num_bytes, action, 1);
713 add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
714 num_bytes, parent, ref_root, owner, offset,
715 action, for_cow);
716 if (!need_ref_seq(for_cow, ref_root) &&
717 waitqueue_active(&fs_info->tree_mod_seq_wait))
718 wake_up(&fs_info->tree_mod_seq_wait);
719 spin_unlock(&delayed_refs->lock);
720 if (need_ref_seq(for_cow, ref_root))
721 btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
723 return 0;
726 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
727 struct btrfs_trans_handle *trans,
728 u64 bytenr, u64 num_bytes,
729 struct btrfs_delayed_extent_op *extent_op)
731 struct btrfs_delayed_ref_head *head_ref;
732 struct btrfs_delayed_ref_root *delayed_refs;
734 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
735 if (!head_ref)
736 return -ENOMEM;
738 head_ref->extent_op = extent_op;
740 delayed_refs = &trans->transaction->delayed_refs;
741 spin_lock(&delayed_refs->lock);
743 add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
744 num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
745 extent_op->is_data);
747 if (waitqueue_active(&fs_info->tree_mod_seq_wait))
748 wake_up(&fs_info->tree_mod_seq_wait);
749 spin_unlock(&delayed_refs->lock);
750 return 0;
754 * this does a simple search for the head node for a given extent.
755 * It must be called with the delayed ref spinlock held, and it returns
756 * the head node if any where found, or NULL if not.
758 struct btrfs_delayed_ref_head *
759 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
761 struct btrfs_delayed_ref_node *ref;
762 struct btrfs_delayed_ref_root *delayed_refs;
764 delayed_refs = &trans->transaction->delayed_refs;
765 ref = find_ref_head(&delayed_refs->root, bytenr, NULL, 0);
766 if (ref)
767 return btrfs_delayed_node_to_head(ref);
768 return NULL;