ARM: SAMSUNG: Add s3c_disable_clocks() and tidy init+disable usage
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / btrfs / delayed-ref.c
blob902ce507c4e34a01e4e5afbcba26aa31c9d18d63
1 /*
2 * Copyright (C) 2009 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
22 #include "ctree.h"
23 #include "delayed-ref.h"
24 #include "transaction.h"
27 * delayed back reference update tracking. For subvolume trees
28 * we queue up extent allocations and backref maintenance for
29 * delayed processing. This avoids deep call chains where we
30 * add extents in the middle of btrfs_search_slot, and it allows
31 * us to buffer up frequently modified backrefs in an rb tree instead
32 * of hammering updates on the extent allocation tree.
36 * compare two delayed tree backrefs with same bytenr and type
38 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
39 struct btrfs_delayed_tree_ref *ref1)
41 if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
42 if (ref1->root < ref2->root)
43 return -1;
44 if (ref1->root > ref2->root)
45 return 1;
46 } else {
47 if (ref1->parent < ref2->parent)
48 return -1;
49 if (ref1->parent > ref2->parent)
50 return 1;
52 return 0;
56 * compare two delayed data backrefs with same bytenr and type
58 static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
59 struct btrfs_delayed_data_ref *ref1)
61 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
62 if (ref1->root < ref2->root)
63 return -1;
64 if (ref1->root > ref2->root)
65 return 1;
66 if (ref1->objectid < ref2->objectid)
67 return -1;
68 if (ref1->objectid > ref2->objectid)
69 return 1;
70 if (ref1->offset < ref2->offset)
71 return -1;
72 if (ref1->offset > ref2->offset)
73 return 1;
74 } else {
75 if (ref1->parent < ref2->parent)
76 return -1;
77 if (ref1->parent > ref2->parent)
78 return 1;
80 return 0;
84 * entries in the rb tree are ordered by the byte number of the extent,
85 * type of the delayed backrefs and content of delayed backrefs.
87 static int comp_entry(struct btrfs_delayed_ref_node *ref2,
88 struct btrfs_delayed_ref_node *ref1)
90 if (ref1->bytenr < ref2->bytenr)
91 return -1;
92 if (ref1->bytenr > ref2->bytenr)
93 return 1;
94 if (ref1->is_head && ref2->is_head)
95 return 0;
96 if (ref2->is_head)
97 return -1;
98 if (ref1->is_head)
99 return 1;
100 if (ref1->type < ref2->type)
101 return -1;
102 if (ref1->type > ref2->type)
103 return 1;
104 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
105 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
106 return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
107 btrfs_delayed_node_to_tree_ref(ref1));
108 } else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
109 ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
110 return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
111 btrfs_delayed_node_to_data_ref(ref1));
113 BUG();
114 return 0;
118 * insert a new ref into the rbtree. This returns any existing refs
119 * for the same (bytenr,parent) tuple, or NULL if the new node was properly
120 * inserted.
122 static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
123 struct rb_node *node)
125 struct rb_node **p = &root->rb_node;
126 struct rb_node *parent_node = NULL;
127 struct btrfs_delayed_ref_node *entry;
128 struct btrfs_delayed_ref_node *ins;
129 int cmp;
131 ins = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
132 while (*p) {
133 parent_node = *p;
134 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
135 rb_node);
137 cmp = comp_entry(entry, ins);
138 if (cmp < 0)
139 p = &(*p)->rb_left;
140 else if (cmp > 0)
141 p = &(*p)->rb_right;
142 else
143 return entry;
146 rb_link_node(node, parent_node, p);
147 rb_insert_color(node, root);
148 return NULL;
152 * find an head entry based on bytenr. This returns the delayed ref
153 * head if it was able to find one, or NULL if nothing was in that spot
155 static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root,
156 u64 bytenr,
157 struct btrfs_delayed_ref_node **last)
159 struct rb_node *n = root->rb_node;
160 struct btrfs_delayed_ref_node *entry;
161 int cmp;
163 while (n) {
164 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
165 WARN_ON(!entry->in_tree);
166 if (last)
167 *last = entry;
169 if (bytenr < entry->bytenr)
170 cmp = -1;
171 else if (bytenr > entry->bytenr)
172 cmp = 1;
173 else if (!btrfs_delayed_ref_is_head(entry))
174 cmp = 1;
175 else
176 cmp = 0;
178 if (cmp < 0)
179 n = n->rb_left;
180 else if (cmp > 0)
181 n = n->rb_right;
182 else
183 return entry;
185 return NULL;
188 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
189 struct btrfs_delayed_ref_head *head)
191 struct btrfs_delayed_ref_root *delayed_refs;
193 delayed_refs = &trans->transaction->delayed_refs;
194 assert_spin_locked(&delayed_refs->lock);
195 if (mutex_trylock(&head->mutex))
196 return 0;
198 atomic_inc(&head->node.refs);
199 spin_unlock(&delayed_refs->lock);
201 mutex_lock(&head->mutex);
202 spin_lock(&delayed_refs->lock);
203 if (!head->node.in_tree) {
204 mutex_unlock(&head->mutex);
205 btrfs_put_delayed_ref(&head->node);
206 return -EAGAIN;
208 btrfs_put_delayed_ref(&head->node);
209 return 0;
212 int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
213 struct list_head *cluster, u64 start)
215 int count = 0;
216 struct btrfs_delayed_ref_root *delayed_refs;
217 struct rb_node *node;
218 struct btrfs_delayed_ref_node *ref;
219 struct btrfs_delayed_ref_head *head;
221 delayed_refs = &trans->transaction->delayed_refs;
222 if (start == 0) {
223 node = rb_first(&delayed_refs->root);
224 } else {
225 ref = NULL;
226 find_ref_head(&delayed_refs->root, start, &ref);
227 if (ref) {
228 struct btrfs_delayed_ref_node *tmp;
230 node = rb_prev(&ref->rb_node);
231 while (node) {
232 tmp = rb_entry(node,
233 struct btrfs_delayed_ref_node,
234 rb_node);
235 if (tmp->bytenr < start)
236 break;
237 ref = tmp;
238 node = rb_prev(&ref->rb_node);
240 node = &ref->rb_node;
241 } else
242 node = rb_first(&delayed_refs->root);
244 again:
245 while (node && count < 32) {
246 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
247 if (btrfs_delayed_ref_is_head(ref)) {
248 head = btrfs_delayed_node_to_head(ref);
249 if (list_empty(&head->cluster)) {
250 list_add_tail(&head->cluster, cluster);
251 delayed_refs->run_delayed_start =
252 head->node.bytenr;
253 count++;
255 WARN_ON(delayed_refs->num_heads_ready == 0);
256 delayed_refs->num_heads_ready--;
257 } else if (count) {
258 /* the goal of the clustering is to find extents
259 * that are likely to end up in the same extent
260 * leaf on disk. So, we don't want them spread
261 * all over the tree. Stop now if we've hit
262 * a head that was already in use
264 break;
267 node = rb_next(node);
269 if (count) {
270 return 0;
271 } else if (start) {
273 * we've gone to the end of the rbtree without finding any
274 * clusters. start from the beginning and try again
276 start = 0;
277 node = rb_first(&delayed_refs->root);
278 goto again;
280 return 1;
284 * This checks to see if there are any delayed refs in the
285 * btree for a given bytenr. It returns one if it finds any
286 * and zero otherwise.
288 * If it only finds a head node, it returns 0.
290 * The idea is to use this when deciding if you can safely delete an
291 * extent from the extent allocation tree. There may be a pending
292 * ref in the rbtree that adds or removes references, so as long as this
293 * returns one you need to leave the BTRFS_EXTENT_ITEM in the extent
294 * allocation tree.
296 int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr)
298 struct btrfs_delayed_ref_node *ref;
299 struct btrfs_delayed_ref_root *delayed_refs;
300 struct rb_node *prev_node;
301 int ret = 0;
303 delayed_refs = &trans->transaction->delayed_refs;
304 spin_lock(&delayed_refs->lock);
306 ref = find_ref_head(&delayed_refs->root, bytenr, NULL);
307 if (ref) {
308 prev_node = rb_prev(&ref->rb_node);
309 if (!prev_node)
310 goto out;
311 ref = rb_entry(prev_node, struct btrfs_delayed_ref_node,
312 rb_node);
313 if (ref->bytenr == bytenr)
314 ret = 1;
316 out:
317 spin_unlock(&delayed_refs->lock);
318 return ret;
322 * helper function to lookup reference count and flags of extent.
324 * the head node for delayed ref is used to store the sum of all the
325 * reference count modifications queued up in the rbtree. the head
326 * node may also store the extent flags to set. This way you can check
327 * to see what the reference count and extent flags would be if all of
328 * the delayed refs are not processed.
330 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
331 struct btrfs_root *root, u64 bytenr,
332 u64 num_bytes, u64 *refs, u64 *flags)
334 struct btrfs_delayed_ref_node *ref;
335 struct btrfs_delayed_ref_head *head;
336 struct btrfs_delayed_ref_root *delayed_refs;
337 struct btrfs_path *path;
338 struct btrfs_extent_item *ei;
339 struct extent_buffer *leaf;
340 struct btrfs_key key;
341 u32 item_size;
342 u64 num_refs;
343 u64 extent_flags;
344 int ret;
346 path = btrfs_alloc_path();
347 if (!path)
348 return -ENOMEM;
350 key.objectid = bytenr;
351 key.type = BTRFS_EXTENT_ITEM_KEY;
352 key.offset = num_bytes;
353 delayed_refs = &trans->transaction->delayed_refs;
354 again:
355 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
356 &key, path, 0, 0);
357 if (ret < 0)
358 goto out;
360 if (ret == 0) {
361 leaf = path->nodes[0];
362 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
363 if (item_size >= sizeof(*ei)) {
364 ei = btrfs_item_ptr(leaf, path->slots[0],
365 struct btrfs_extent_item);
366 num_refs = btrfs_extent_refs(leaf, ei);
367 extent_flags = btrfs_extent_flags(leaf, ei);
368 } else {
369 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
370 struct btrfs_extent_item_v0 *ei0;
371 BUG_ON(item_size != sizeof(*ei0));
372 ei0 = btrfs_item_ptr(leaf, path->slots[0],
373 struct btrfs_extent_item_v0);
374 num_refs = btrfs_extent_refs_v0(leaf, ei0);
375 /* FIXME: this isn't correct for data */
376 extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
377 #else
378 BUG();
379 #endif
381 BUG_ON(num_refs == 0);
382 } else {
383 num_refs = 0;
384 extent_flags = 0;
385 ret = 0;
388 spin_lock(&delayed_refs->lock);
389 ref = find_ref_head(&delayed_refs->root, bytenr, NULL);
390 if (ref) {
391 head = btrfs_delayed_node_to_head(ref);
392 if (!mutex_trylock(&head->mutex)) {
393 atomic_inc(&ref->refs);
394 spin_unlock(&delayed_refs->lock);
396 btrfs_release_path(root->fs_info->extent_root, path);
398 mutex_lock(&head->mutex);
399 mutex_unlock(&head->mutex);
400 btrfs_put_delayed_ref(ref);
401 goto again;
403 if (head->extent_op && head->extent_op->update_flags)
404 extent_flags |= head->extent_op->flags_to_set;
405 else
406 BUG_ON(num_refs == 0);
408 num_refs += ref->ref_mod;
409 mutex_unlock(&head->mutex);
411 WARN_ON(num_refs == 0);
412 if (refs)
413 *refs = num_refs;
414 if (flags)
415 *flags = extent_flags;
416 out:
417 spin_unlock(&delayed_refs->lock);
418 btrfs_free_path(path);
419 return ret;
423 * helper function to update an extent delayed ref in the
424 * rbtree. existing and update must both have the same
425 * bytenr and parent
427 * This may free existing if the update cancels out whatever
428 * operation it was doing.
430 static noinline void
431 update_existing_ref(struct btrfs_trans_handle *trans,
432 struct btrfs_delayed_ref_root *delayed_refs,
433 struct btrfs_delayed_ref_node *existing,
434 struct btrfs_delayed_ref_node *update)
436 if (update->action != existing->action) {
438 * this is effectively undoing either an add or a
439 * drop. We decrement the ref_mod, and if it goes
440 * down to zero we just delete the entry without
441 * every changing the extent allocation tree.
443 existing->ref_mod--;
444 if (existing->ref_mod == 0) {
445 rb_erase(&existing->rb_node,
446 &delayed_refs->root);
447 existing->in_tree = 0;
448 btrfs_put_delayed_ref(existing);
449 delayed_refs->num_entries--;
450 if (trans->delayed_ref_updates)
451 trans->delayed_ref_updates--;
452 } else {
453 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
454 existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
456 } else {
457 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
458 existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
460 * the action on the existing ref matches
461 * the action on the ref we're trying to add.
462 * Bump the ref_mod by one so the backref that
463 * is eventually added/removed has the correct
464 * reference count
466 existing->ref_mod += update->ref_mod;
471 * helper function to update the accounting in the head ref
472 * existing and update must have the same bytenr
474 static noinline void
475 update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
476 struct btrfs_delayed_ref_node *update)
478 struct btrfs_delayed_ref_head *existing_ref;
479 struct btrfs_delayed_ref_head *ref;
481 existing_ref = btrfs_delayed_node_to_head(existing);
482 ref = btrfs_delayed_node_to_head(update);
483 BUG_ON(existing_ref->is_data != ref->is_data);
485 if (ref->must_insert_reserved) {
486 /* if the extent was freed and then
487 * reallocated before the delayed ref
488 * entries were processed, we can end up
489 * with an existing head ref without
490 * the must_insert_reserved flag set.
491 * Set it again here
493 existing_ref->must_insert_reserved = ref->must_insert_reserved;
496 * update the num_bytes so we make sure the accounting
497 * is done correctly
499 existing->num_bytes = update->num_bytes;
503 if (ref->extent_op) {
504 if (!existing_ref->extent_op) {
505 existing_ref->extent_op = ref->extent_op;
506 } else {
507 if (ref->extent_op->update_key) {
508 memcpy(&existing_ref->extent_op->key,
509 &ref->extent_op->key,
510 sizeof(ref->extent_op->key));
511 existing_ref->extent_op->update_key = 1;
513 if (ref->extent_op->update_flags) {
514 existing_ref->extent_op->flags_to_set |=
515 ref->extent_op->flags_to_set;
516 existing_ref->extent_op->update_flags = 1;
518 kfree(ref->extent_op);
522 * update the reference mod on the head to reflect this new operation
524 existing->ref_mod += update->ref_mod;
528 * helper function to actually insert a head node into the rbtree.
529 * this does all the dirty work in terms of maintaining the correct
530 * overall modification count.
532 static noinline int add_delayed_ref_head(struct btrfs_trans_handle *trans,
533 struct btrfs_delayed_ref_node *ref,
534 u64 bytenr, u64 num_bytes,
535 int action, int is_data)
537 struct btrfs_delayed_ref_node *existing;
538 struct btrfs_delayed_ref_head *head_ref = NULL;
539 struct btrfs_delayed_ref_root *delayed_refs;
540 int count_mod = 1;
541 int must_insert_reserved = 0;
544 * the head node stores the sum of all the mods, so dropping a ref
545 * should drop the sum in the head node by one.
547 if (action == BTRFS_UPDATE_DELAYED_HEAD)
548 count_mod = 0;
549 else if (action == BTRFS_DROP_DELAYED_REF)
550 count_mod = -1;
553 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
554 * the reserved accounting when the extent is finally added, or
555 * if a later modification deletes the delayed ref without ever
556 * inserting the extent into the extent allocation tree.
557 * ref->must_insert_reserved is the flag used to record
558 * that accounting mods are required.
560 * Once we record must_insert_reserved, switch the action to
561 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
563 if (action == BTRFS_ADD_DELAYED_EXTENT)
564 must_insert_reserved = 1;
565 else
566 must_insert_reserved = 0;
568 delayed_refs = &trans->transaction->delayed_refs;
570 /* first set the basic ref node struct up */
571 atomic_set(&ref->refs, 1);
572 ref->bytenr = bytenr;
573 ref->num_bytes = num_bytes;
574 ref->ref_mod = count_mod;
575 ref->type = 0;
576 ref->action = 0;
577 ref->is_head = 1;
578 ref->in_tree = 1;
580 head_ref = btrfs_delayed_node_to_head(ref);
581 head_ref->must_insert_reserved = must_insert_reserved;
582 head_ref->is_data = is_data;
584 INIT_LIST_HEAD(&head_ref->cluster);
585 mutex_init(&head_ref->mutex);
587 existing = tree_insert(&delayed_refs->root, &ref->rb_node);
589 if (existing) {
590 update_existing_head_ref(existing, ref);
592 * we've updated the existing ref, free the newly
593 * allocated ref
595 kfree(ref);
596 } else {
597 delayed_refs->num_heads++;
598 delayed_refs->num_heads_ready++;
599 delayed_refs->num_entries++;
600 trans->delayed_ref_updates++;
602 return 0;
606 * helper to insert a delayed tree ref into the rbtree.
608 static noinline int add_delayed_tree_ref(struct btrfs_trans_handle *trans,
609 struct btrfs_delayed_ref_node *ref,
610 u64 bytenr, u64 num_bytes, u64 parent,
611 u64 ref_root, int level, int action)
613 struct btrfs_delayed_ref_node *existing;
614 struct btrfs_delayed_tree_ref *full_ref;
615 struct btrfs_delayed_ref_root *delayed_refs;
617 if (action == BTRFS_ADD_DELAYED_EXTENT)
618 action = BTRFS_ADD_DELAYED_REF;
620 delayed_refs = &trans->transaction->delayed_refs;
622 /* first set the basic ref node struct up */
623 atomic_set(&ref->refs, 1);
624 ref->bytenr = bytenr;
625 ref->num_bytes = num_bytes;
626 ref->ref_mod = 1;
627 ref->action = action;
628 ref->is_head = 0;
629 ref->in_tree = 1;
631 full_ref = btrfs_delayed_node_to_tree_ref(ref);
632 if (parent) {
633 full_ref->parent = parent;
634 ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
635 } else {
636 full_ref->root = ref_root;
637 ref->type = BTRFS_TREE_BLOCK_REF_KEY;
639 full_ref->level = level;
641 existing = tree_insert(&delayed_refs->root, &ref->rb_node);
643 if (existing) {
644 update_existing_ref(trans, delayed_refs, existing, ref);
646 * we've updated the existing ref, free the newly
647 * allocated ref
649 kfree(ref);
650 } else {
651 delayed_refs->num_entries++;
652 trans->delayed_ref_updates++;
654 return 0;
658 * helper to insert a delayed data ref into the rbtree.
660 static noinline int add_delayed_data_ref(struct btrfs_trans_handle *trans,
661 struct btrfs_delayed_ref_node *ref,
662 u64 bytenr, u64 num_bytes, u64 parent,
663 u64 ref_root, u64 owner, u64 offset,
664 int action)
666 struct btrfs_delayed_ref_node *existing;
667 struct btrfs_delayed_data_ref *full_ref;
668 struct btrfs_delayed_ref_root *delayed_refs;
670 if (action == BTRFS_ADD_DELAYED_EXTENT)
671 action = BTRFS_ADD_DELAYED_REF;
673 delayed_refs = &trans->transaction->delayed_refs;
675 /* first set the basic ref node struct up */
676 atomic_set(&ref->refs, 1);
677 ref->bytenr = bytenr;
678 ref->num_bytes = num_bytes;
679 ref->ref_mod = 1;
680 ref->action = action;
681 ref->is_head = 0;
682 ref->in_tree = 1;
684 full_ref = btrfs_delayed_node_to_data_ref(ref);
685 if (parent) {
686 full_ref->parent = parent;
687 ref->type = BTRFS_SHARED_DATA_REF_KEY;
688 } else {
689 full_ref->root = ref_root;
690 ref->type = BTRFS_EXTENT_DATA_REF_KEY;
692 full_ref->objectid = owner;
693 full_ref->offset = offset;
695 existing = tree_insert(&delayed_refs->root, &ref->rb_node);
697 if (existing) {
698 update_existing_ref(trans, delayed_refs, existing, ref);
700 * we've updated the existing ref, free the newly
701 * allocated ref
703 kfree(ref);
704 } else {
705 delayed_refs->num_entries++;
706 trans->delayed_ref_updates++;
708 return 0;
712 * add a delayed tree ref. This does all of the accounting required
713 * to make sure the delayed ref is eventually processed before this
714 * transaction commits.
716 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
717 u64 bytenr, u64 num_bytes, u64 parent,
718 u64 ref_root, int level, int action,
719 struct btrfs_delayed_extent_op *extent_op)
721 struct btrfs_delayed_tree_ref *ref;
722 struct btrfs_delayed_ref_head *head_ref;
723 struct btrfs_delayed_ref_root *delayed_refs;
724 int ret;
726 BUG_ON(extent_op && extent_op->is_data);
727 ref = kmalloc(sizeof(*ref), GFP_NOFS);
728 if (!ref)
729 return -ENOMEM;
731 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
732 if (!head_ref) {
733 kfree(ref);
734 return -ENOMEM;
737 head_ref->extent_op = extent_op;
739 delayed_refs = &trans->transaction->delayed_refs;
740 spin_lock(&delayed_refs->lock);
743 * insert both the head node and the new ref without dropping
744 * the spin lock
746 ret = add_delayed_ref_head(trans, &head_ref->node, bytenr, num_bytes,
747 action, 0);
748 BUG_ON(ret);
750 ret = add_delayed_tree_ref(trans, &ref->node, bytenr, num_bytes,
751 parent, ref_root, level, action);
752 BUG_ON(ret);
753 spin_unlock(&delayed_refs->lock);
754 return 0;
758 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
760 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
761 u64 bytenr, u64 num_bytes,
762 u64 parent, u64 ref_root,
763 u64 owner, u64 offset, int action,
764 struct btrfs_delayed_extent_op *extent_op)
766 struct btrfs_delayed_data_ref *ref;
767 struct btrfs_delayed_ref_head *head_ref;
768 struct btrfs_delayed_ref_root *delayed_refs;
769 int ret;
771 BUG_ON(extent_op && !extent_op->is_data);
772 ref = kmalloc(sizeof(*ref), GFP_NOFS);
773 if (!ref)
774 return -ENOMEM;
776 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
777 if (!head_ref) {
778 kfree(ref);
779 return -ENOMEM;
782 head_ref->extent_op = extent_op;
784 delayed_refs = &trans->transaction->delayed_refs;
785 spin_lock(&delayed_refs->lock);
788 * insert both the head node and the new ref without dropping
789 * the spin lock
791 ret = add_delayed_ref_head(trans, &head_ref->node, bytenr, num_bytes,
792 action, 1);
793 BUG_ON(ret);
795 ret = add_delayed_data_ref(trans, &ref->node, bytenr, num_bytes,
796 parent, ref_root, owner, offset, action);
797 BUG_ON(ret);
798 spin_unlock(&delayed_refs->lock);
799 return 0;
802 int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
803 u64 bytenr, u64 num_bytes,
804 struct btrfs_delayed_extent_op *extent_op)
806 struct btrfs_delayed_ref_head *head_ref;
807 struct btrfs_delayed_ref_root *delayed_refs;
808 int ret;
810 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
811 if (!head_ref)
812 return -ENOMEM;
814 head_ref->extent_op = extent_op;
816 delayed_refs = &trans->transaction->delayed_refs;
817 spin_lock(&delayed_refs->lock);
819 ret = add_delayed_ref_head(trans, &head_ref->node, bytenr,
820 num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
821 extent_op->is_data);
822 BUG_ON(ret);
824 spin_unlock(&delayed_refs->lock);
825 return 0;
829 * this does a simple search for the head node for a given extent.
830 * It must be called with the delayed ref spinlock held, and it returns
831 * the head node if any where found, or NULL if not.
833 struct btrfs_delayed_ref_head *
834 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
836 struct btrfs_delayed_ref_node *ref;
837 struct btrfs_delayed_ref_root *delayed_refs;
839 delayed_refs = &trans->transaction->delayed_refs;
840 ref = find_ref_head(&delayed_refs->root, bytenr, NULL);
841 if (ref)
842 return btrfs_delayed_node_to_head(ref);
843 return NULL;
847 * add a delayed ref to the tree. This does all of the accounting required
848 * to make sure the delayed ref is eventually processed before this
849 * transaction commits.
851 * The main point of this call is to add and remove a backreference in a single
852 * shot, taking the lock only once, and only searching for the head node once.
854 * It is the same as doing a ref add and delete in two separate calls.
856 #if 0
857 int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans,
858 u64 bytenr, u64 num_bytes, u64 orig_parent,
859 u64 parent, u64 orig_ref_root, u64 ref_root,
860 u64 orig_ref_generation, u64 ref_generation,
861 u64 owner_objectid, int pin)
863 struct btrfs_delayed_ref *ref;
864 struct btrfs_delayed_ref *old_ref;
865 struct btrfs_delayed_ref_head *head_ref;
866 struct btrfs_delayed_ref_root *delayed_refs;
867 int ret;
869 ref = kmalloc(sizeof(*ref), GFP_NOFS);
870 if (!ref)
871 return -ENOMEM;
873 old_ref = kmalloc(sizeof(*old_ref), GFP_NOFS);
874 if (!old_ref) {
875 kfree(ref);
876 return -ENOMEM;
880 * the parent = 0 case comes from cases where we don't actually
881 * know the parent yet. It will get updated later via a add/drop
882 * pair.
884 if (parent == 0)
885 parent = bytenr;
886 if (orig_parent == 0)
887 orig_parent = bytenr;
889 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
890 if (!head_ref) {
891 kfree(ref);
892 kfree(old_ref);
893 return -ENOMEM;
895 delayed_refs = &trans->transaction->delayed_refs;
896 spin_lock(&delayed_refs->lock);
899 * insert both the head node and the new ref without dropping
900 * the spin lock
902 ret = __btrfs_add_delayed_ref(trans, &head_ref->node, bytenr, num_bytes,
903 (u64)-1, 0, 0, 0,
904 BTRFS_UPDATE_DELAYED_HEAD, 0);
905 BUG_ON(ret);
907 ret = __btrfs_add_delayed_ref(trans, &ref->node, bytenr, num_bytes,
908 parent, ref_root, ref_generation,
909 owner_objectid, BTRFS_ADD_DELAYED_REF, 0);
910 BUG_ON(ret);
912 ret = __btrfs_add_delayed_ref(trans, &old_ref->node, bytenr, num_bytes,
913 orig_parent, orig_ref_root,
914 orig_ref_generation, owner_objectid,
915 BTRFS_DROP_DELAYED_REF, pin);
916 BUG_ON(ret);
917 spin_unlock(&delayed_refs->lock);
918 return 0;
920 #endif