Merge tag 'fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm...
[linux-2.6.git] / fs / btrfs / ctree.c
blob316136bd6dd7eb3899bcf060c3e0bd749f674310
1 /*
2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
22 #include "ctree.h"
23 #include "disk-io.h"
24 #include "transaction.h"
25 #include "print-tree.h"
26 #include "locking.h"
28 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
29 *root, struct btrfs_path *path, int level);
30 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
31 *root, struct btrfs_key *ins_key,
32 struct btrfs_path *path, int data_size, int extend);
33 static int push_node_left(struct btrfs_trans_handle *trans,
34 struct btrfs_root *root, struct extent_buffer *dst,
35 struct extent_buffer *src, int empty);
36 static int balance_node_right(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 struct extent_buffer *dst_buf,
39 struct extent_buffer *src_buf);
40 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
41 int level, int slot);
42 static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
43 struct extent_buffer *eb);
44 static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
46 struct btrfs_path *btrfs_alloc_path(void)
48 struct btrfs_path *path;
49 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
50 return path;
54 * set all locked nodes in the path to blocking locks. This should
55 * be done before scheduling
57 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
59 int i;
60 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
61 if (!p->nodes[i] || !p->locks[i])
62 continue;
63 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
64 if (p->locks[i] == BTRFS_READ_LOCK)
65 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
66 else if (p->locks[i] == BTRFS_WRITE_LOCK)
67 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
72 * reset all the locked nodes in the patch to spinning locks.
74 * held is used to keep lockdep happy, when lockdep is enabled
75 * we set held to a blocking lock before we go around and
76 * retake all the spinlocks in the path. You can safely use NULL
77 * for held
79 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
80 struct extent_buffer *held, int held_rw)
82 int i;
84 #ifdef CONFIG_DEBUG_LOCK_ALLOC
85 /* lockdep really cares that we take all of these spinlocks
86 * in the right order. If any of the locks in the path are not
87 * currently blocking, it is going to complain. So, make really
88 * really sure by forcing the path to blocking before we clear
89 * the path blocking.
91 if (held) {
92 btrfs_set_lock_blocking_rw(held, held_rw);
93 if (held_rw == BTRFS_WRITE_LOCK)
94 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
95 else if (held_rw == BTRFS_READ_LOCK)
96 held_rw = BTRFS_READ_LOCK_BLOCKING;
98 btrfs_set_path_blocking(p);
99 #endif
101 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
102 if (p->nodes[i] && p->locks[i]) {
103 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
104 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
105 p->locks[i] = BTRFS_WRITE_LOCK;
106 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
107 p->locks[i] = BTRFS_READ_LOCK;
111 #ifdef CONFIG_DEBUG_LOCK_ALLOC
112 if (held)
113 btrfs_clear_lock_blocking_rw(held, held_rw);
114 #endif
117 /* this also releases the path */
118 void btrfs_free_path(struct btrfs_path *p)
120 if (!p)
121 return;
122 btrfs_release_path(p);
123 kmem_cache_free(btrfs_path_cachep, p);
127 * path release drops references on the extent buffers in the path
128 * and it drops any locks held by this path
130 * It is safe to call this on paths that no locks or extent buffers held.
132 noinline void btrfs_release_path(struct btrfs_path *p)
134 int i;
136 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
137 p->slots[i] = 0;
138 if (!p->nodes[i])
139 continue;
140 if (p->locks[i]) {
141 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
142 p->locks[i] = 0;
144 free_extent_buffer(p->nodes[i]);
145 p->nodes[i] = NULL;
150 * safely gets a reference on the root node of a tree. A lock
151 * is not taken, so a concurrent writer may put a different node
152 * at the root of the tree. See btrfs_lock_root_node for the
153 * looping required.
155 * The extent buffer returned by this has a reference taken, so
156 * it won't disappear. It may stop being the root of the tree
157 * at any time because there are no locks held.
159 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
161 struct extent_buffer *eb;
163 while (1) {
164 rcu_read_lock();
165 eb = rcu_dereference(root->node);
168 * RCU really hurts here, we could free up the root node because
169 * it was cow'ed but we may not get the new root node yet so do
170 * the inc_not_zero dance and if it doesn't work then
171 * synchronize_rcu and try again.
173 if (atomic_inc_not_zero(&eb->refs)) {
174 rcu_read_unlock();
175 break;
177 rcu_read_unlock();
178 synchronize_rcu();
180 return eb;
183 /* loop around taking references on and locking the root node of the
184 * tree until you end up with a lock on the root. A locked buffer
185 * is returned, with a reference held.
187 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
189 struct extent_buffer *eb;
191 while (1) {
192 eb = btrfs_root_node(root);
193 btrfs_tree_lock(eb);
194 if (eb == root->node)
195 break;
196 btrfs_tree_unlock(eb);
197 free_extent_buffer(eb);
199 return eb;
202 /* loop around taking references on and locking the root node of the
203 * tree until you end up with a lock on the root. A locked buffer
204 * is returned, with a reference held.
206 static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
208 struct extent_buffer *eb;
210 while (1) {
211 eb = btrfs_root_node(root);
212 btrfs_tree_read_lock(eb);
213 if (eb == root->node)
214 break;
215 btrfs_tree_read_unlock(eb);
216 free_extent_buffer(eb);
218 return eb;
221 /* cowonly root (everything not a reference counted cow subvolume), just get
222 * put onto a simple dirty list. transaction.c walks this to make sure they
223 * get properly updated on disk.
225 static void add_root_to_dirty_list(struct btrfs_root *root)
227 spin_lock(&root->fs_info->trans_lock);
228 if (root->track_dirty && list_empty(&root->dirty_list)) {
229 list_add(&root->dirty_list,
230 &root->fs_info->dirty_cowonly_roots);
232 spin_unlock(&root->fs_info->trans_lock);
236 * used by snapshot creation to make a copy of a root for a tree with
237 * a given objectid. The buffer with the new root node is returned in
238 * cow_ret, and this func returns zero on success or a negative error code.
240 int btrfs_copy_root(struct btrfs_trans_handle *trans,
241 struct btrfs_root *root,
242 struct extent_buffer *buf,
243 struct extent_buffer **cow_ret, u64 new_root_objectid)
245 struct extent_buffer *cow;
246 int ret = 0;
247 int level;
248 struct btrfs_disk_key disk_key;
250 WARN_ON(root->ref_cows && trans->transid !=
251 root->fs_info->running_transaction->transid);
252 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
254 level = btrfs_header_level(buf);
255 if (level == 0)
256 btrfs_item_key(buf, &disk_key, 0);
257 else
258 btrfs_node_key(buf, &disk_key, 0);
260 cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
261 new_root_objectid, &disk_key, level,
262 buf->start, 0);
263 if (IS_ERR(cow))
264 return PTR_ERR(cow);
266 copy_extent_buffer(cow, buf, 0, 0, cow->len);
267 btrfs_set_header_bytenr(cow, cow->start);
268 btrfs_set_header_generation(cow, trans->transid);
269 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
270 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
271 BTRFS_HEADER_FLAG_RELOC);
272 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
273 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
274 else
275 btrfs_set_header_owner(cow, new_root_objectid);
277 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
278 BTRFS_FSID_SIZE);
280 WARN_ON(btrfs_header_generation(buf) > trans->transid);
281 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
282 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
283 else
284 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
286 if (ret)
287 return ret;
289 btrfs_mark_buffer_dirty(cow);
290 *cow_ret = cow;
291 return 0;
294 enum mod_log_op {
295 MOD_LOG_KEY_REPLACE,
296 MOD_LOG_KEY_ADD,
297 MOD_LOG_KEY_REMOVE,
298 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
299 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
300 MOD_LOG_MOVE_KEYS,
301 MOD_LOG_ROOT_REPLACE,
304 struct tree_mod_move {
305 int dst_slot;
306 int nr_items;
309 struct tree_mod_root {
310 u64 logical;
311 u8 level;
314 struct tree_mod_elem {
315 struct rb_node node;
316 u64 index; /* shifted logical */
317 u64 seq;
318 enum mod_log_op op;
320 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
321 int slot;
323 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
324 u64 generation;
326 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
327 struct btrfs_disk_key key;
328 u64 blockptr;
330 /* this is used for op == MOD_LOG_MOVE_KEYS */
331 struct tree_mod_move move;
333 /* this is used for op == MOD_LOG_ROOT_REPLACE */
334 struct tree_mod_root old_root;
337 static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
339 read_lock(&fs_info->tree_mod_log_lock);
342 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
344 read_unlock(&fs_info->tree_mod_log_lock);
347 static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
349 write_lock(&fs_info->tree_mod_log_lock);
352 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
354 write_unlock(&fs_info->tree_mod_log_lock);
358 * Increment the upper half of tree_mod_seq, set lower half zero.
360 * Must be called with fs_info->tree_mod_seq_lock held.
362 static inline u64 btrfs_inc_tree_mod_seq_major(struct btrfs_fs_info *fs_info)
364 u64 seq = atomic64_read(&fs_info->tree_mod_seq);
365 seq &= 0xffffffff00000000ull;
366 seq += 1ull << 32;
367 atomic64_set(&fs_info->tree_mod_seq, seq);
368 return seq;
372 * Increment the lower half of tree_mod_seq.
374 * Must be called with fs_info->tree_mod_seq_lock held. The way major numbers
375 * are generated should not technically require a spin lock here. (Rationale:
376 * incrementing the minor while incrementing the major seq number is between its
377 * atomic64_read and atomic64_set calls doesn't duplicate sequence numbers, it
378 * just returns a unique sequence number as usual.) We have decided to leave
379 * that requirement in here and rethink it once we notice it really imposes a
380 * problem on some workload.
382 static inline u64 btrfs_inc_tree_mod_seq_minor(struct btrfs_fs_info *fs_info)
384 return atomic64_inc_return(&fs_info->tree_mod_seq);
388 * return the last minor in the previous major tree_mod_seq number
390 u64 btrfs_tree_mod_seq_prev(u64 seq)
392 return (seq & 0xffffffff00000000ull) - 1ull;
396 * This adds a new blocker to the tree mod log's blocker list if the @elem
397 * passed does not already have a sequence number set. So when a caller expects
398 * to record tree modifications, it should ensure to set elem->seq to zero
399 * before calling btrfs_get_tree_mod_seq.
400 * Returns a fresh, unused tree log modification sequence number, even if no new
401 * blocker was added.
403 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
404 struct seq_list *elem)
406 u64 seq;
408 tree_mod_log_write_lock(fs_info);
409 spin_lock(&fs_info->tree_mod_seq_lock);
410 if (!elem->seq) {
411 elem->seq = btrfs_inc_tree_mod_seq_major(fs_info);
412 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
414 seq = btrfs_inc_tree_mod_seq_minor(fs_info);
415 spin_unlock(&fs_info->tree_mod_seq_lock);
416 tree_mod_log_write_unlock(fs_info);
418 return seq;
421 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
422 struct seq_list *elem)
424 struct rb_root *tm_root;
425 struct rb_node *node;
426 struct rb_node *next;
427 struct seq_list *cur_elem;
428 struct tree_mod_elem *tm;
429 u64 min_seq = (u64)-1;
430 u64 seq_putting = elem->seq;
432 if (!seq_putting)
433 return;
435 spin_lock(&fs_info->tree_mod_seq_lock);
436 list_del(&elem->list);
437 elem->seq = 0;
439 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
440 if (cur_elem->seq < min_seq) {
441 if (seq_putting > cur_elem->seq) {
443 * blocker with lower sequence number exists, we
444 * cannot remove anything from the log
446 spin_unlock(&fs_info->tree_mod_seq_lock);
447 return;
449 min_seq = cur_elem->seq;
452 spin_unlock(&fs_info->tree_mod_seq_lock);
455 * anything that's lower than the lowest existing (read: blocked)
456 * sequence number can be removed from the tree.
458 tree_mod_log_write_lock(fs_info);
459 tm_root = &fs_info->tree_mod_log;
460 for (node = rb_first(tm_root); node; node = next) {
461 next = rb_next(node);
462 tm = container_of(node, struct tree_mod_elem, node);
463 if (tm->seq > min_seq)
464 continue;
465 rb_erase(node, tm_root);
466 kfree(tm);
468 tree_mod_log_write_unlock(fs_info);
472 * key order of the log:
473 * index -> sequence
475 * the index is the shifted logical of the *new* root node for root replace
476 * operations, or the shifted logical of the affected block for all other
477 * operations.
479 static noinline int
480 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
482 struct rb_root *tm_root;
483 struct rb_node **new;
484 struct rb_node *parent = NULL;
485 struct tree_mod_elem *cur;
486 int ret = 0;
488 BUG_ON(!tm);
490 tree_mod_log_write_lock(fs_info);
491 if (list_empty(&fs_info->tree_mod_seq_list)) {
492 tree_mod_log_write_unlock(fs_info);
494 * Ok we no longer care about logging modifications, free up tm
495 * and return 0. Any callers shouldn't be using tm after
496 * calling tree_mod_log_insert, but if they do we can just
497 * change this to return a special error code to let the callers
498 * do their own thing.
500 kfree(tm);
501 return 0;
504 spin_lock(&fs_info->tree_mod_seq_lock);
505 tm->seq = btrfs_inc_tree_mod_seq_minor(fs_info);
506 spin_unlock(&fs_info->tree_mod_seq_lock);
508 tm_root = &fs_info->tree_mod_log;
509 new = &tm_root->rb_node;
510 while (*new) {
511 cur = container_of(*new, struct tree_mod_elem, node);
512 parent = *new;
513 if (cur->index < tm->index)
514 new = &((*new)->rb_left);
515 else if (cur->index > tm->index)
516 new = &((*new)->rb_right);
517 else if (cur->seq < tm->seq)
518 new = &((*new)->rb_left);
519 else if (cur->seq > tm->seq)
520 new = &((*new)->rb_right);
521 else {
522 ret = -EEXIST;
523 kfree(tm);
524 goto out;
528 rb_link_node(&tm->node, parent, new);
529 rb_insert_color(&tm->node, tm_root);
530 out:
531 tree_mod_log_write_unlock(fs_info);
532 return ret;
536 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
537 * returns zero with the tree_mod_log_lock acquired. The caller must hold
538 * this until all tree mod log insertions are recorded in the rb tree and then
539 * call tree_mod_log_write_unlock() to release.
541 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
542 struct extent_buffer *eb) {
543 smp_mb();
544 if (list_empty(&(fs_info)->tree_mod_seq_list))
545 return 1;
546 if (eb && btrfs_header_level(eb) == 0)
547 return 1;
548 return 0;
551 static inline int
552 __tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
553 struct extent_buffer *eb, int slot,
554 enum mod_log_op op, gfp_t flags)
556 struct tree_mod_elem *tm;
558 tm = kzalloc(sizeof(*tm), flags);
559 if (!tm)
560 return -ENOMEM;
562 tm->index = eb->start >> PAGE_CACHE_SHIFT;
563 if (op != MOD_LOG_KEY_ADD) {
564 btrfs_node_key(eb, &tm->key, slot);
565 tm->blockptr = btrfs_node_blockptr(eb, slot);
567 tm->op = op;
568 tm->slot = slot;
569 tm->generation = btrfs_node_ptr_generation(eb, slot);
571 return __tree_mod_log_insert(fs_info, tm);
574 static noinline int
575 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
576 struct extent_buffer *eb, int slot,
577 enum mod_log_op op, gfp_t flags)
579 if (tree_mod_dont_log(fs_info, eb))
580 return 0;
582 return __tree_mod_log_insert_key(fs_info, eb, slot, op, flags);
585 static noinline int
586 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
587 struct extent_buffer *eb, int dst_slot, int src_slot,
588 int nr_items, gfp_t flags)
590 struct tree_mod_elem *tm;
591 int ret;
592 int i;
594 if (tree_mod_dont_log(fs_info, eb))
595 return 0;
598 * When we override something during the move, we log these removals.
599 * This can only happen when we move towards the beginning of the
600 * buffer, i.e. dst_slot < src_slot.
602 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
603 ret = __tree_mod_log_insert_key(fs_info, eb, i + dst_slot,
604 MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS);
605 BUG_ON(ret < 0);
608 tm = kzalloc(sizeof(*tm), flags);
609 if (!tm)
610 return -ENOMEM;
612 tm->index = eb->start >> PAGE_CACHE_SHIFT;
613 tm->slot = src_slot;
614 tm->move.dst_slot = dst_slot;
615 tm->move.nr_items = nr_items;
616 tm->op = MOD_LOG_MOVE_KEYS;
618 return __tree_mod_log_insert(fs_info, tm);
621 static inline void
622 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
624 int i;
625 u32 nritems;
626 int ret;
628 if (btrfs_header_level(eb) == 0)
629 return;
631 nritems = btrfs_header_nritems(eb);
632 for (i = nritems - 1; i >= 0; i--) {
633 ret = __tree_mod_log_insert_key(fs_info, eb, i,
634 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
635 BUG_ON(ret < 0);
639 static noinline int
640 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
641 struct extent_buffer *old_root,
642 struct extent_buffer *new_root, gfp_t flags,
643 int log_removal)
645 struct tree_mod_elem *tm;
647 if (tree_mod_dont_log(fs_info, NULL))
648 return 0;
650 if (log_removal)
651 __tree_mod_log_free_eb(fs_info, old_root);
653 tm = kzalloc(sizeof(*tm), flags);
654 if (!tm)
655 return -ENOMEM;
657 tm->index = new_root->start >> PAGE_CACHE_SHIFT;
658 tm->old_root.logical = old_root->start;
659 tm->old_root.level = btrfs_header_level(old_root);
660 tm->generation = btrfs_header_generation(old_root);
661 tm->op = MOD_LOG_ROOT_REPLACE;
663 return __tree_mod_log_insert(fs_info, tm);
666 static struct tree_mod_elem *
667 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
668 int smallest)
670 struct rb_root *tm_root;
671 struct rb_node *node;
672 struct tree_mod_elem *cur = NULL;
673 struct tree_mod_elem *found = NULL;
674 u64 index = start >> PAGE_CACHE_SHIFT;
676 tree_mod_log_read_lock(fs_info);
677 tm_root = &fs_info->tree_mod_log;
678 node = tm_root->rb_node;
679 while (node) {
680 cur = container_of(node, struct tree_mod_elem, node);
681 if (cur->index < index) {
682 node = node->rb_left;
683 } else if (cur->index > index) {
684 node = node->rb_right;
685 } else if (cur->seq < min_seq) {
686 node = node->rb_left;
687 } else if (!smallest) {
688 /* we want the node with the highest seq */
689 if (found)
690 BUG_ON(found->seq > cur->seq);
691 found = cur;
692 node = node->rb_left;
693 } else if (cur->seq > min_seq) {
694 /* we want the node with the smallest seq */
695 if (found)
696 BUG_ON(found->seq < cur->seq);
697 found = cur;
698 node = node->rb_right;
699 } else {
700 found = cur;
701 break;
704 tree_mod_log_read_unlock(fs_info);
706 return found;
710 * this returns the element from the log with the smallest time sequence
711 * value that's in the log (the oldest log item). any element with a time
712 * sequence lower than min_seq will be ignored.
714 static struct tree_mod_elem *
715 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
716 u64 min_seq)
718 return __tree_mod_log_search(fs_info, start, min_seq, 1);
722 * this returns the element from the log with the largest time sequence
723 * value that's in the log (the most recent log item). any element with
724 * a time sequence lower than min_seq will be ignored.
726 static struct tree_mod_elem *
727 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
729 return __tree_mod_log_search(fs_info, start, min_seq, 0);
732 static noinline void
733 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
734 struct extent_buffer *src, unsigned long dst_offset,
735 unsigned long src_offset, int nr_items)
737 int ret;
738 int i;
740 if (tree_mod_dont_log(fs_info, NULL))
741 return;
743 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
744 return;
746 for (i = 0; i < nr_items; i++) {
747 ret = __tree_mod_log_insert_key(fs_info, src,
748 i + src_offset,
749 MOD_LOG_KEY_REMOVE, GFP_NOFS);
750 BUG_ON(ret < 0);
751 ret = __tree_mod_log_insert_key(fs_info, dst,
752 i + dst_offset,
753 MOD_LOG_KEY_ADD,
754 GFP_NOFS);
755 BUG_ON(ret < 0);
759 static inline void
760 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
761 int dst_offset, int src_offset, int nr_items)
763 int ret;
764 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
765 nr_items, GFP_NOFS);
766 BUG_ON(ret < 0);
769 static noinline void
770 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
771 struct extent_buffer *eb, int slot, int atomic)
773 int ret;
775 ret = __tree_mod_log_insert_key(fs_info, eb, slot,
776 MOD_LOG_KEY_REPLACE,
777 atomic ? GFP_ATOMIC : GFP_NOFS);
778 BUG_ON(ret < 0);
781 static noinline void
782 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
784 if (tree_mod_dont_log(fs_info, eb))
785 return;
786 __tree_mod_log_free_eb(fs_info, eb);
789 static noinline void
790 tree_mod_log_set_root_pointer(struct btrfs_root *root,
791 struct extent_buffer *new_root_node,
792 int log_removal)
794 int ret;
795 ret = tree_mod_log_insert_root(root->fs_info, root->node,
796 new_root_node, GFP_NOFS, log_removal);
797 BUG_ON(ret < 0);
801 * check if the tree block can be shared by multiple trees
803 int btrfs_block_can_be_shared(struct btrfs_root *root,
804 struct extent_buffer *buf)
807 * Tree blocks not in refernece counted trees and tree roots
808 * are never shared. If a block was allocated after the last
809 * snapshot and the block was not allocated by tree relocation,
810 * we know the block is not shared.
812 if (root->ref_cows &&
813 buf != root->node && buf != root->commit_root &&
814 (btrfs_header_generation(buf) <=
815 btrfs_root_last_snapshot(&root->root_item) ||
816 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
817 return 1;
818 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
819 if (root->ref_cows &&
820 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
821 return 1;
822 #endif
823 return 0;
826 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
827 struct btrfs_root *root,
828 struct extent_buffer *buf,
829 struct extent_buffer *cow,
830 int *last_ref)
832 u64 refs;
833 u64 owner;
834 u64 flags;
835 u64 new_flags = 0;
836 int ret;
839 * Backrefs update rules:
841 * Always use full backrefs for extent pointers in tree block
842 * allocated by tree relocation.
844 * If a shared tree block is no longer referenced by its owner
845 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
846 * use full backrefs for extent pointers in tree block.
848 * If a tree block is been relocating
849 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
850 * use full backrefs for extent pointers in tree block.
851 * The reason for this is some operations (such as drop tree)
852 * are only allowed for blocks use full backrefs.
855 if (btrfs_block_can_be_shared(root, buf)) {
856 ret = btrfs_lookup_extent_info(trans, root, buf->start,
857 btrfs_header_level(buf), 1,
858 &refs, &flags);
859 if (ret)
860 return ret;
861 if (refs == 0) {
862 ret = -EROFS;
863 btrfs_std_error(root->fs_info, ret);
864 return ret;
866 } else {
867 refs = 1;
868 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
869 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
870 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
871 else
872 flags = 0;
875 owner = btrfs_header_owner(buf);
876 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
877 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
879 if (refs > 1) {
880 if ((owner == root->root_key.objectid ||
881 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
882 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
883 ret = btrfs_inc_ref(trans, root, buf, 1, 1);
884 BUG_ON(ret); /* -ENOMEM */
886 if (root->root_key.objectid ==
887 BTRFS_TREE_RELOC_OBJECTID) {
888 ret = btrfs_dec_ref(trans, root, buf, 0, 1);
889 BUG_ON(ret); /* -ENOMEM */
890 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
891 BUG_ON(ret); /* -ENOMEM */
893 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
894 } else {
896 if (root->root_key.objectid ==
897 BTRFS_TREE_RELOC_OBJECTID)
898 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
899 else
900 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
901 BUG_ON(ret); /* -ENOMEM */
903 if (new_flags != 0) {
904 int level = btrfs_header_level(buf);
906 ret = btrfs_set_disk_extent_flags(trans, root,
907 buf->start,
908 buf->len,
909 new_flags, level, 0);
910 if (ret)
911 return ret;
913 } else {
914 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
915 if (root->root_key.objectid ==
916 BTRFS_TREE_RELOC_OBJECTID)
917 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
918 else
919 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
920 BUG_ON(ret); /* -ENOMEM */
921 ret = btrfs_dec_ref(trans, root, buf, 1, 1);
922 BUG_ON(ret); /* -ENOMEM */
924 clean_tree_block(trans, root, buf);
925 *last_ref = 1;
927 return 0;
931 * does the dirty work in cow of a single block. The parent block (if
932 * supplied) is updated to point to the new cow copy. The new buffer is marked
933 * dirty and returned locked. If you modify the block it needs to be marked
934 * dirty again.
936 * search_start -- an allocation hint for the new block
938 * empty_size -- a hint that you plan on doing more cow. This is the size in
939 * bytes the allocator should try to find free next to the block it returns.
940 * This is just a hint and may be ignored by the allocator.
942 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
943 struct btrfs_root *root,
944 struct extent_buffer *buf,
945 struct extent_buffer *parent, int parent_slot,
946 struct extent_buffer **cow_ret,
947 u64 search_start, u64 empty_size)
949 struct btrfs_disk_key disk_key;
950 struct extent_buffer *cow;
951 int level, ret;
952 int last_ref = 0;
953 int unlock_orig = 0;
954 u64 parent_start;
956 if (*cow_ret == buf)
957 unlock_orig = 1;
959 btrfs_assert_tree_locked(buf);
961 WARN_ON(root->ref_cows && trans->transid !=
962 root->fs_info->running_transaction->transid);
963 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
965 level = btrfs_header_level(buf);
967 if (level == 0)
968 btrfs_item_key(buf, &disk_key, 0);
969 else
970 btrfs_node_key(buf, &disk_key, 0);
972 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
973 if (parent)
974 parent_start = parent->start;
975 else
976 parent_start = 0;
977 } else
978 parent_start = 0;
980 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
981 root->root_key.objectid, &disk_key,
982 level, search_start, empty_size);
983 if (IS_ERR(cow))
984 return PTR_ERR(cow);
986 /* cow is set to blocking by btrfs_init_new_buffer */
988 copy_extent_buffer(cow, buf, 0, 0, cow->len);
989 btrfs_set_header_bytenr(cow, cow->start);
990 btrfs_set_header_generation(cow, trans->transid);
991 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
992 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
993 BTRFS_HEADER_FLAG_RELOC);
994 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
995 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
996 else
997 btrfs_set_header_owner(cow, root->root_key.objectid);
999 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
1000 BTRFS_FSID_SIZE);
1002 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1003 if (ret) {
1004 btrfs_abort_transaction(trans, root, ret);
1005 return ret;
1008 if (root->ref_cows) {
1009 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1010 if (ret)
1011 return ret;
1014 if (buf == root->node) {
1015 WARN_ON(parent && parent != buf);
1016 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1017 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1018 parent_start = buf->start;
1019 else
1020 parent_start = 0;
1022 extent_buffer_get(cow);
1023 tree_mod_log_set_root_pointer(root, cow, 1);
1024 rcu_assign_pointer(root->node, cow);
1026 btrfs_free_tree_block(trans, root, buf, parent_start,
1027 last_ref);
1028 free_extent_buffer(buf);
1029 add_root_to_dirty_list(root);
1030 } else {
1031 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1032 parent_start = parent->start;
1033 else
1034 parent_start = 0;
1036 WARN_ON(trans->transid != btrfs_header_generation(parent));
1037 tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
1038 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1039 btrfs_set_node_blockptr(parent, parent_slot,
1040 cow->start);
1041 btrfs_set_node_ptr_generation(parent, parent_slot,
1042 trans->transid);
1043 btrfs_mark_buffer_dirty(parent);
1044 if (last_ref)
1045 tree_mod_log_free_eb(root->fs_info, buf);
1046 btrfs_free_tree_block(trans, root, buf, parent_start,
1047 last_ref);
1049 if (unlock_orig)
1050 btrfs_tree_unlock(buf);
1051 free_extent_buffer_stale(buf);
1052 btrfs_mark_buffer_dirty(cow);
1053 *cow_ret = cow;
1054 return 0;
1058 * returns the logical address of the oldest predecessor of the given root.
1059 * entries older than time_seq are ignored.
1061 static struct tree_mod_elem *
1062 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1063 struct extent_buffer *eb_root, u64 time_seq)
1065 struct tree_mod_elem *tm;
1066 struct tree_mod_elem *found = NULL;
1067 u64 root_logical = eb_root->start;
1068 int looped = 0;
1070 if (!time_seq)
1071 return NULL;
1074 * the very last operation that's logged for a root is the replacement
1075 * operation (if it is replaced at all). this has the index of the *new*
1076 * root, making it the very first operation that's logged for this root.
1078 while (1) {
1079 tm = tree_mod_log_search_oldest(fs_info, root_logical,
1080 time_seq);
1081 if (!looped && !tm)
1082 return NULL;
1084 * if there are no tree operation for the oldest root, we simply
1085 * return it. this should only happen if that (old) root is at
1086 * level 0.
1088 if (!tm)
1089 break;
1092 * if there's an operation that's not a root replacement, we
1093 * found the oldest version of our root. normally, we'll find a
1094 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1096 if (tm->op != MOD_LOG_ROOT_REPLACE)
1097 break;
1099 found = tm;
1100 root_logical = tm->old_root.logical;
1101 looped = 1;
1104 /* if there's no old root to return, return what we found instead */
1105 if (!found)
1106 found = tm;
1108 return found;
1112 * tm is a pointer to the first operation to rewind within eb. then, all
1113 * previous operations will be rewinded (until we reach something older than
1114 * time_seq).
1116 static void
1117 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1118 u64 time_seq, struct tree_mod_elem *first_tm)
1120 u32 n;
1121 struct rb_node *next;
1122 struct tree_mod_elem *tm = first_tm;
1123 unsigned long o_dst;
1124 unsigned long o_src;
1125 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1127 n = btrfs_header_nritems(eb);
1128 tree_mod_log_read_lock(fs_info);
1129 while (tm && tm->seq >= time_seq) {
1131 * all the operations are recorded with the operator used for
1132 * the modification. as we're going backwards, we do the
1133 * opposite of each operation here.
1135 switch (tm->op) {
1136 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1137 BUG_ON(tm->slot < n);
1138 /* Fallthrough */
1139 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1140 case MOD_LOG_KEY_REMOVE:
1141 btrfs_set_node_key(eb, &tm->key, tm->slot);
1142 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1143 btrfs_set_node_ptr_generation(eb, tm->slot,
1144 tm->generation);
1145 n++;
1146 break;
1147 case MOD_LOG_KEY_REPLACE:
1148 BUG_ON(tm->slot >= n);
1149 btrfs_set_node_key(eb, &tm->key, tm->slot);
1150 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1151 btrfs_set_node_ptr_generation(eb, tm->slot,
1152 tm->generation);
1153 break;
1154 case MOD_LOG_KEY_ADD:
1155 /* if a move operation is needed it's in the log */
1156 n--;
1157 break;
1158 case MOD_LOG_MOVE_KEYS:
1159 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1160 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1161 memmove_extent_buffer(eb, o_dst, o_src,
1162 tm->move.nr_items * p_size);
1163 break;
1164 case MOD_LOG_ROOT_REPLACE:
1166 * this operation is special. for roots, this must be
1167 * handled explicitly before rewinding.
1168 * for non-roots, this operation may exist if the node
1169 * was a root: root A -> child B; then A gets empty and
1170 * B is promoted to the new root. in the mod log, we'll
1171 * have a root-replace operation for B, a tree block
1172 * that is no root. we simply ignore that operation.
1174 break;
1176 next = rb_next(&tm->node);
1177 if (!next)
1178 break;
1179 tm = container_of(next, struct tree_mod_elem, node);
1180 if (tm->index != first_tm->index)
1181 break;
1183 tree_mod_log_read_unlock(fs_info);
1184 btrfs_set_header_nritems(eb, n);
1188 * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
1189 * is returned. If rewind operations happen, a fresh buffer is returned. The
1190 * returned buffer is always read-locked. If the returned buffer is not the
1191 * input buffer, the lock on the input buffer is released and the input buffer
1192 * is freed (its refcount is decremented).
1194 static struct extent_buffer *
1195 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1196 struct extent_buffer *eb, u64 time_seq)
1198 struct extent_buffer *eb_rewin;
1199 struct tree_mod_elem *tm;
1201 if (!time_seq)
1202 return eb;
1204 if (btrfs_header_level(eb) == 0)
1205 return eb;
1207 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1208 if (!tm)
1209 return eb;
1211 btrfs_set_path_blocking(path);
1212 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1214 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1215 BUG_ON(tm->slot != 0);
1216 eb_rewin = alloc_dummy_extent_buffer(eb->start,
1217 fs_info->tree_root->nodesize);
1218 if (!eb_rewin) {
1219 btrfs_tree_read_unlock_blocking(eb);
1220 free_extent_buffer(eb);
1221 return NULL;
1223 btrfs_set_header_bytenr(eb_rewin, eb->start);
1224 btrfs_set_header_backref_rev(eb_rewin,
1225 btrfs_header_backref_rev(eb));
1226 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1227 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1228 } else {
1229 eb_rewin = btrfs_clone_extent_buffer(eb);
1230 if (!eb_rewin) {
1231 btrfs_tree_read_unlock_blocking(eb);
1232 free_extent_buffer(eb);
1233 return NULL;
1237 btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
1238 btrfs_tree_read_unlock_blocking(eb);
1239 free_extent_buffer(eb);
1241 extent_buffer_get(eb_rewin);
1242 btrfs_tree_read_lock(eb_rewin);
1243 __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1244 WARN_ON(btrfs_header_nritems(eb_rewin) >
1245 BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
1247 return eb_rewin;
1251 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1252 * value. If there are no changes, the current root->root_node is returned. If
1253 * anything changed in between, there's a fresh buffer allocated on which the
1254 * rewind operations are done. In any case, the returned buffer is read locked.
1255 * Returns NULL on error (with no locks held).
1257 static inline struct extent_buffer *
1258 get_old_root(struct btrfs_root *root, u64 time_seq)
1260 struct tree_mod_elem *tm;
1261 struct extent_buffer *eb = NULL;
1262 struct extent_buffer *eb_root;
1263 struct extent_buffer *old;
1264 struct tree_mod_root *old_root = NULL;
1265 u64 old_generation = 0;
1266 u64 logical;
1267 u32 blocksize;
1269 eb_root = btrfs_read_lock_root_node(root);
1270 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1271 if (!tm)
1272 return eb_root;
1274 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1275 old_root = &tm->old_root;
1276 old_generation = tm->generation;
1277 logical = old_root->logical;
1278 } else {
1279 logical = eb_root->start;
1282 tm = tree_mod_log_search(root->fs_info, logical, time_seq);
1283 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1284 btrfs_tree_read_unlock(eb_root);
1285 free_extent_buffer(eb_root);
1286 blocksize = btrfs_level_size(root, old_root->level);
1287 old = read_tree_block(root, logical, blocksize, 0);
1288 if (WARN_ON(!old || !extent_buffer_uptodate(old))) {
1289 free_extent_buffer(old);
1290 pr_warn("btrfs: failed to read tree block %llu from get_old_root\n",
1291 logical);
1292 } else {
1293 eb = btrfs_clone_extent_buffer(old);
1294 free_extent_buffer(old);
1296 } else if (old_root) {
1297 btrfs_tree_read_unlock(eb_root);
1298 free_extent_buffer(eb_root);
1299 eb = alloc_dummy_extent_buffer(logical, root->nodesize);
1300 } else {
1301 btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1302 eb = btrfs_clone_extent_buffer(eb_root);
1303 btrfs_tree_read_unlock_blocking(eb_root);
1304 free_extent_buffer(eb_root);
1307 if (!eb)
1308 return NULL;
1309 extent_buffer_get(eb);
1310 btrfs_tree_read_lock(eb);
1311 if (old_root) {
1312 btrfs_set_header_bytenr(eb, eb->start);
1313 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1314 btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1315 btrfs_set_header_level(eb, old_root->level);
1316 btrfs_set_header_generation(eb, old_generation);
1318 if (tm)
1319 __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm);
1320 else
1321 WARN_ON(btrfs_header_level(eb) != 0);
1322 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
1324 return eb;
1327 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1329 struct tree_mod_elem *tm;
1330 int level;
1331 struct extent_buffer *eb_root = btrfs_root_node(root);
1333 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1334 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1335 level = tm->old_root.level;
1336 } else {
1337 level = btrfs_header_level(eb_root);
1339 free_extent_buffer(eb_root);
1341 return level;
1344 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1345 struct btrfs_root *root,
1346 struct extent_buffer *buf)
1348 /* ensure we can see the force_cow */
1349 smp_rmb();
1352 * We do not need to cow a block if
1353 * 1) this block is not created or changed in this transaction;
1354 * 2) this block does not belong to TREE_RELOC tree;
1355 * 3) the root is not forced COW.
1357 * What is forced COW:
1358 * when we create snapshot during commiting the transaction,
1359 * after we've finished coping src root, we must COW the shared
1360 * block to ensure the metadata consistency.
1362 if (btrfs_header_generation(buf) == trans->transid &&
1363 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1364 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1365 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1366 !root->force_cow)
1367 return 0;
1368 return 1;
1372 * cows a single block, see __btrfs_cow_block for the real work.
1373 * This version of it has extra checks so that a block isn't cow'd more than
1374 * once per transaction, as long as it hasn't been written yet
1376 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1377 struct btrfs_root *root, struct extent_buffer *buf,
1378 struct extent_buffer *parent, int parent_slot,
1379 struct extent_buffer **cow_ret)
1381 u64 search_start;
1382 int ret;
1384 if (trans->transaction != root->fs_info->running_transaction)
1385 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1386 trans->transid,
1387 root->fs_info->running_transaction->transid);
1389 if (trans->transid != root->fs_info->generation)
1390 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1391 trans->transid, root->fs_info->generation);
1393 if (!should_cow_block(trans, root, buf)) {
1394 *cow_ret = buf;
1395 return 0;
1398 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
1400 if (parent)
1401 btrfs_set_lock_blocking(parent);
1402 btrfs_set_lock_blocking(buf);
1404 ret = __btrfs_cow_block(trans, root, buf, parent,
1405 parent_slot, cow_ret, search_start, 0);
1407 trace_btrfs_cow_block(root, buf, *cow_ret);
1409 return ret;
1413 * helper function for defrag to decide if two blocks pointed to by a
1414 * node are actually close by
1416 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1418 if (blocknr < other && other - (blocknr + blocksize) < 32768)
1419 return 1;
1420 if (blocknr > other && blocknr - (other + blocksize) < 32768)
1421 return 1;
1422 return 0;
1426 * compare two keys in a memcmp fashion
1428 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1430 struct btrfs_key k1;
1432 btrfs_disk_key_to_cpu(&k1, disk);
1434 return btrfs_comp_cpu_keys(&k1, k2);
1438 * same as comp_keys only with two btrfs_key's
1440 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
1442 if (k1->objectid > k2->objectid)
1443 return 1;
1444 if (k1->objectid < k2->objectid)
1445 return -1;
1446 if (k1->type > k2->type)
1447 return 1;
1448 if (k1->type < k2->type)
1449 return -1;
1450 if (k1->offset > k2->offset)
1451 return 1;
1452 if (k1->offset < k2->offset)
1453 return -1;
1454 return 0;
1458 * this is used by the defrag code to go through all the
1459 * leaves pointed to by a node and reallocate them so that
1460 * disk order is close to key order
1462 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1463 struct btrfs_root *root, struct extent_buffer *parent,
1464 int start_slot, u64 *last_ret,
1465 struct btrfs_key *progress)
1467 struct extent_buffer *cur;
1468 u64 blocknr;
1469 u64 gen;
1470 u64 search_start = *last_ret;
1471 u64 last_block = 0;
1472 u64 other;
1473 u32 parent_nritems;
1474 int end_slot;
1475 int i;
1476 int err = 0;
1477 int parent_level;
1478 int uptodate;
1479 u32 blocksize;
1480 int progress_passed = 0;
1481 struct btrfs_disk_key disk_key;
1483 parent_level = btrfs_header_level(parent);
1485 WARN_ON(trans->transaction != root->fs_info->running_transaction);
1486 WARN_ON(trans->transid != root->fs_info->generation);
1488 parent_nritems = btrfs_header_nritems(parent);
1489 blocksize = btrfs_level_size(root, parent_level - 1);
1490 end_slot = parent_nritems;
1492 if (parent_nritems == 1)
1493 return 0;
1495 btrfs_set_lock_blocking(parent);
1497 for (i = start_slot; i < end_slot; i++) {
1498 int close = 1;
1500 btrfs_node_key(parent, &disk_key, i);
1501 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1502 continue;
1504 progress_passed = 1;
1505 blocknr = btrfs_node_blockptr(parent, i);
1506 gen = btrfs_node_ptr_generation(parent, i);
1507 if (last_block == 0)
1508 last_block = blocknr;
1510 if (i > 0) {
1511 other = btrfs_node_blockptr(parent, i - 1);
1512 close = close_blocks(blocknr, other, blocksize);
1514 if (!close && i < end_slot - 2) {
1515 other = btrfs_node_blockptr(parent, i + 1);
1516 close = close_blocks(blocknr, other, blocksize);
1518 if (close) {
1519 last_block = blocknr;
1520 continue;
1523 cur = btrfs_find_tree_block(root, blocknr, blocksize);
1524 if (cur)
1525 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1526 else
1527 uptodate = 0;
1528 if (!cur || !uptodate) {
1529 if (!cur) {
1530 cur = read_tree_block(root, blocknr,
1531 blocksize, gen);
1532 if (!cur || !extent_buffer_uptodate(cur)) {
1533 free_extent_buffer(cur);
1534 return -EIO;
1536 } else if (!uptodate) {
1537 err = btrfs_read_buffer(cur, gen);
1538 if (err) {
1539 free_extent_buffer(cur);
1540 return err;
1544 if (search_start == 0)
1545 search_start = last_block;
1547 btrfs_tree_lock(cur);
1548 btrfs_set_lock_blocking(cur);
1549 err = __btrfs_cow_block(trans, root, cur, parent, i,
1550 &cur, search_start,
1551 min(16 * blocksize,
1552 (end_slot - i) * blocksize));
1553 if (err) {
1554 btrfs_tree_unlock(cur);
1555 free_extent_buffer(cur);
1556 break;
1558 search_start = cur->start;
1559 last_block = cur->start;
1560 *last_ret = search_start;
1561 btrfs_tree_unlock(cur);
1562 free_extent_buffer(cur);
1564 return err;
1568 * The leaf data grows from end-to-front in the node.
1569 * this returns the address of the start of the last item,
1570 * which is the stop of the leaf data stack
1572 static inline unsigned int leaf_data_end(struct btrfs_root *root,
1573 struct extent_buffer *leaf)
1575 u32 nr = btrfs_header_nritems(leaf);
1576 if (nr == 0)
1577 return BTRFS_LEAF_DATA_SIZE(root);
1578 return btrfs_item_offset_nr(leaf, nr - 1);
1583 * search for key in the extent_buffer. The items start at offset p,
1584 * and they are item_size apart. There are 'max' items in p.
1586 * the slot in the array is returned via slot, and it points to
1587 * the place where you would insert key if it is not found in
1588 * the array.
1590 * slot may point to max if the key is bigger than all of the keys
1592 static noinline int generic_bin_search(struct extent_buffer *eb,
1593 unsigned long p,
1594 int item_size, struct btrfs_key *key,
1595 int max, int *slot)
1597 int low = 0;
1598 int high = max;
1599 int mid;
1600 int ret;
1601 struct btrfs_disk_key *tmp = NULL;
1602 struct btrfs_disk_key unaligned;
1603 unsigned long offset;
1604 char *kaddr = NULL;
1605 unsigned long map_start = 0;
1606 unsigned long map_len = 0;
1607 int err;
1609 while (low < high) {
1610 mid = (low + high) / 2;
1611 offset = p + mid * item_size;
1613 if (!kaddr || offset < map_start ||
1614 (offset + sizeof(struct btrfs_disk_key)) >
1615 map_start + map_len) {
1617 err = map_private_extent_buffer(eb, offset,
1618 sizeof(struct btrfs_disk_key),
1619 &kaddr, &map_start, &map_len);
1621 if (!err) {
1622 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1623 map_start);
1624 } else {
1625 read_extent_buffer(eb, &unaligned,
1626 offset, sizeof(unaligned));
1627 tmp = &unaligned;
1630 } else {
1631 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1632 map_start);
1634 ret = comp_keys(tmp, key);
1636 if (ret < 0)
1637 low = mid + 1;
1638 else if (ret > 0)
1639 high = mid;
1640 else {
1641 *slot = mid;
1642 return 0;
1645 *slot = low;
1646 return 1;
1650 * simple bin_search frontend that does the right thing for
1651 * leaves vs nodes
1653 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1654 int level, int *slot)
1656 if (level == 0)
1657 return generic_bin_search(eb,
1658 offsetof(struct btrfs_leaf, items),
1659 sizeof(struct btrfs_item),
1660 key, btrfs_header_nritems(eb),
1661 slot);
1662 else
1663 return generic_bin_search(eb,
1664 offsetof(struct btrfs_node, ptrs),
1665 sizeof(struct btrfs_key_ptr),
1666 key, btrfs_header_nritems(eb),
1667 slot);
1670 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1671 int level, int *slot)
1673 return bin_search(eb, key, level, slot);
1676 static void root_add_used(struct btrfs_root *root, u32 size)
1678 spin_lock(&root->accounting_lock);
1679 btrfs_set_root_used(&root->root_item,
1680 btrfs_root_used(&root->root_item) + size);
1681 spin_unlock(&root->accounting_lock);
1684 static void root_sub_used(struct btrfs_root *root, u32 size)
1686 spin_lock(&root->accounting_lock);
1687 btrfs_set_root_used(&root->root_item,
1688 btrfs_root_used(&root->root_item) - size);
1689 spin_unlock(&root->accounting_lock);
1692 /* given a node and slot number, this reads the blocks it points to. The
1693 * extent buffer is returned with a reference taken (but unlocked).
1694 * NULL is returned on error.
1696 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
1697 struct extent_buffer *parent, int slot)
1699 int level = btrfs_header_level(parent);
1700 struct extent_buffer *eb;
1702 if (slot < 0)
1703 return NULL;
1704 if (slot >= btrfs_header_nritems(parent))
1705 return NULL;
1707 BUG_ON(level == 0);
1709 eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
1710 btrfs_level_size(root, level - 1),
1711 btrfs_node_ptr_generation(parent, slot));
1712 if (eb && !extent_buffer_uptodate(eb)) {
1713 free_extent_buffer(eb);
1714 eb = NULL;
1717 return eb;
1721 * node level balancing, used to make sure nodes are in proper order for
1722 * item deletion. We balance from the top down, so we have to make sure
1723 * that a deletion won't leave an node completely empty later on.
1725 static noinline int balance_level(struct btrfs_trans_handle *trans,
1726 struct btrfs_root *root,
1727 struct btrfs_path *path, int level)
1729 struct extent_buffer *right = NULL;
1730 struct extent_buffer *mid;
1731 struct extent_buffer *left = NULL;
1732 struct extent_buffer *parent = NULL;
1733 int ret = 0;
1734 int wret;
1735 int pslot;
1736 int orig_slot = path->slots[level];
1737 u64 orig_ptr;
1739 if (level == 0)
1740 return 0;
1742 mid = path->nodes[level];
1744 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1745 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1746 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1748 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1750 if (level < BTRFS_MAX_LEVEL - 1) {
1751 parent = path->nodes[level + 1];
1752 pslot = path->slots[level + 1];
1756 * deal with the case where there is only one pointer in the root
1757 * by promoting the node below to a root
1759 if (!parent) {
1760 struct extent_buffer *child;
1762 if (btrfs_header_nritems(mid) != 1)
1763 return 0;
1765 /* promote the child to a root */
1766 child = read_node_slot(root, mid, 0);
1767 if (!child) {
1768 ret = -EROFS;
1769 btrfs_std_error(root->fs_info, ret);
1770 goto enospc;
1773 btrfs_tree_lock(child);
1774 btrfs_set_lock_blocking(child);
1775 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1776 if (ret) {
1777 btrfs_tree_unlock(child);
1778 free_extent_buffer(child);
1779 goto enospc;
1782 tree_mod_log_set_root_pointer(root, child, 1);
1783 rcu_assign_pointer(root->node, child);
1785 add_root_to_dirty_list(root);
1786 btrfs_tree_unlock(child);
1788 path->locks[level] = 0;
1789 path->nodes[level] = NULL;
1790 clean_tree_block(trans, root, mid);
1791 btrfs_tree_unlock(mid);
1792 /* once for the path */
1793 free_extent_buffer(mid);
1795 root_sub_used(root, mid->len);
1796 btrfs_free_tree_block(trans, root, mid, 0, 1);
1797 /* once for the root ptr */
1798 free_extent_buffer_stale(mid);
1799 return 0;
1801 if (btrfs_header_nritems(mid) >
1802 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1803 return 0;
1805 left = read_node_slot(root, parent, pslot - 1);
1806 if (left) {
1807 btrfs_tree_lock(left);
1808 btrfs_set_lock_blocking(left);
1809 wret = btrfs_cow_block(trans, root, left,
1810 parent, pslot - 1, &left);
1811 if (wret) {
1812 ret = wret;
1813 goto enospc;
1816 right = read_node_slot(root, parent, pslot + 1);
1817 if (right) {
1818 btrfs_tree_lock(right);
1819 btrfs_set_lock_blocking(right);
1820 wret = btrfs_cow_block(trans, root, right,
1821 parent, pslot + 1, &right);
1822 if (wret) {
1823 ret = wret;
1824 goto enospc;
1828 /* first, try to make some room in the middle buffer */
1829 if (left) {
1830 orig_slot += btrfs_header_nritems(left);
1831 wret = push_node_left(trans, root, left, mid, 1);
1832 if (wret < 0)
1833 ret = wret;
1837 * then try to empty the right most buffer into the middle
1839 if (right) {
1840 wret = push_node_left(trans, root, mid, right, 1);
1841 if (wret < 0 && wret != -ENOSPC)
1842 ret = wret;
1843 if (btrfs_header_nritems(right) == 0) {
1844 clean_tree_block(trans, root, right);
1845 btrfs_tree_unlock(right);
1846 del_ptr(root, path, level + 1, pslot + 1);
1847 root_sub_used(root, right->len);
1848 btrfs_free_tree_block(trans, root, right, 0, 1);
1849 free_extent_buffer_stale(right);
1850 right = NULL;
1851 } else {
1852 struct btrfs_disk_key right_key;
1853 btrfs_node_key(right, &right_key, 0);
1854 tree_mod_log_set_node_key(root->fs_info, parent,
1855 pslot + 1, 0);
1856 btrfs_set_node_key(parent, &right_key, pslot + 1);
1857 btrfs_mark_buffer_dirty(parent);
1860 if (btrfs_header_nritems(mid) == 1) {
1862 * we're not allowed to leave a node with one item in the
1863 * tree during a delete. A deletion from lower in the tree
1864 * could try to delete the only pointer in this node.
1865 * So, pull some keys from the left.
1866 * There has to be a left pointer at this point because
1867 * otherwise we would have pulled some pointers from the
1868 * right
1870 if (!left) {
1871 ret = -EROFS;
1872 btrfs_std_error(root->fs_info, ret);
1873 goto enospc;
1875 wret = balance_node_right(trans, root, mid, left);
1876 if (wret < 0) {
1877 ret = wret;
1878 goto enospc;
1880 if (wret == 1) {
1881 wret = push_node_left(trans, root, left, mid, 1);
1882 if (wret < 0)
1883 ret = wret;
1885 BUG_ON(wret == 1);
1887 if (btrfs_header_nritems(mid) == 0) {
1888 clean_tree_block(trans, root, mid);
1889 btrfs_tree_unlock(mid);
1890 del_ptr(root, path, level + 1, pslot);
1891 root_sub_used(root, mid->len);
1892 btrfs_free_tree_block(trans, root, mid, 0, 1);
1893 free_extent_buffer_stale(mid);
1894 mid = NULL;
1895 } else {
1896 /* update the parent key to reflect our changes */
1897 struct btrfs_disk_key mid_key;
1898 btrfs_node_key(mid, &mid_key, 0);
1899 tree_mod_log_set_node_key(root->fs_info, parent,
1900 pslot, 0);
1901 btrfs_set_node_key(parent, &mid_key, pslot);
1902 btrfs_mark_buffer_dirty(parent);
1905 /* update the path */
1906 if (left) {
1907 if (btrfs_header_nritems(left) > orig_slot) {
1908 extent_buffer_get(left);
1909 /* left was locked after cow */
1910 path->nodes[level] = left;
1911 path->slots[level + 1] -= 1;
1912 path->slots[level] = orig_slot;
1913 if (mid) {
1914 btrfs_tree_unlock(mid);
1915 free_extent_buffer(mid);
1917 } else {
1918 orig_slot -= btrfs_header_nritems(left);
1919 path->slots[level] = orig_slot;
1922 /* double check we haven't messed things up */
1923 if (orig_ptr !=
1924 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1925 BUG();
1926 enospc:
1927 if (right) {
1928 btrfs_tree_unlock(right);
1929 free_extent_buffer(right);
1931 if (left) {
1932 if (path->nodes[level] != left)
1933 btrfs_tree_unlock(left);
1934 free_extent_buffer(left);
1936 return ret;
1939 /* Node balancing for insertion. Here we only split or push nodes around
1940 * when they are completely full. This is also done top down, so we
1941 * have to be pessimistic.
1943 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1944 struct btrfs_root *root,
1945 struct btrfs_path *path, int level)
1947 struct extent_buffer *right = NULL;
1948 struct extent_buffer *mid;
1949 struct extent_buffer *left = NULL;
1950 struct extent_buffer *parent = NULL;
1951 int ret = 0;
1952 int wret;
1953 int pslot;
1954 int orig_slot = path->slots[level];
1956 if (level == 0)
1957 return 1;
1959 mid = path->nodes[level];
1960 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1962 if (level < BTRFS_MAX_LEVEL - 1) {
1963 parent = path->nodes[level + 1];
1964 pslot = path->slots[level + 1];
1967 if (!parent)
1968 return 1;
1970 left = read_node_slot(root, parent, pslot - 1);
1972 /* first, try to make some room in the middle buffer */
1973 if (left) {
1974 u32 left_nr;
1976 btrfs_tree_lock(left);
1977 btrfs_set_lock_blocking(left);
1979 left_nr = btrfs_header_nritems(left);
1980 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1981 wret = 1;
1982 } else {
1983 ret = btrfs_cow_block(trans, root, left, parent,
1984 pslot - 1, &left);
1985 if (ret)
1986 wret = 1;
1987 else {
1988 wret = push_node_left(trans, root,
1989 left, mid, 0);
1992 if (wret < 0)
1993 ret = wret;
1994 if (wret == 0) {
1995 struct btrfs_disk_key disk_key;
1996 orig_slot += left_nr;
1997 btrfs_node_key(mid, &disk_key, 0);
1998 tree_mod_log_set_node_key(root->fs_info, parent,
1999 pslot, 0);
2000 btrfs_set_node_key(parent, &disk_key, pslot);
2001 btrfs_mark_buffer_dirty(parent);
2002 if (btrfs_header_nritems(left) > orig_slot) {
2003 path->nodes[level] = left;
2004 path->slots[level + 1] -= 1;
2005 path->slots[level] = orig_slot;
2006 btrfs_tree_unlock(mid);
2007 free_extent_buffer(mid);
2008 } else {
2009 orig_slot -=
2010 btrfs_header_nritems(left);
2011 path->slots[level] = orig_slot;
2012 btrfs_tree_unlock(left);
2013 free_extent_buffer(left);
2015 return 0;
2017 btrfs_tree_unlock(left);
2018 free_extent_buffer(left);
2020 right = read_node_slot(root, parent, pslot + 1);
2023 * then try to empty the right most buffer into the middle
2025 if (right) {
2026 u32 right_nr;
2028 btrfs_tree_lock(right);
2029 btrfs_set_lock_blocking(right);
2031 right_nr = btrfs_header_nritems(right);
2032 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2033 wret = 1;
2034 } else {
2035 ret = btrfs_cow_block(trans, root, right,
2036 parent, pslot + 1,
2037 &right);
2038 if (ret)
2039 wret = 1;
2040 else {
2041 wret = balance_node_right(trans, root,
2042 right, mid);
2045 if (wret < 0)
2046 ret = wret;
2047 if (wret == 0) {
2048 struct btrfs_disk_key disk_key;
2050 btrfs_node_key(right, &disk_key, 0);
2051 tree_mod_log_set_node_key(root->fs_info, parent,
2052 pslot + 1, 0);
2053 btrfs_set_node_key(parent, &disk_key, pslot + 1);
2054 btrfs_mark_buffer_dirty(parent);
2056 if (btrfs_header_nritems(mid) <= orig_slot) {
2057 path->nodes[level] = right;
2058 path->slots[level + 1] += 1;
2059 path->slots[level] = orig_slot -
2060 btrfs_header_nritems(mid);
2061 btrfs_tree_unlock(mid);
2062 free_extent_buffer(mid);
2063 } else {
2064 btrfs_tree_unlock(right);
2065 free_extent_buffer(right);
2067 return 0;
2069 btrfs_tree_unlock(right);
2070 free_extent_buffer(right);
2072 return 1;
2076 * readahead one full node of leaves, finding things that are close
2077 * to the block in 'slot', and triggering ra on them.
2079 static void reada_for_search(struct btrfs_root *root,
2080 struct btrfs_path *path,
2081 int level, int slot, u64 objectid)
2083 struct extent_buffer *node;
2084 struct btrfs_disk_key disk_key;
2085 u32 nritems;
2086 u64 search;
2087 u64 target;
2088 u64 nread = 0;
2089 u64 gen;
2090 int direction = path->reada;
2091 struct extent_buffer *eb;
2092 u32 nr;
2093 u32 blocksize;
2094 u32 nscan = 0;
2096 if (level != 1)
2097 return;
2099 if (!path->nodes[level])
2100 return;
2102 node = path->nodes[level];
2104 search = btrfs_node_blockptr(node, slot);
2105 blocksize = btrfs_level_size(root, level - 1);
2106 eb = btrfs_find_tree_block(root, search, blocksize);
2107 if (eb) {
2108 free_extent_buffer(eb);
2109 return;
2112 target = search;
2114 nritems = btrfs_header_nritems(node);
2115 nr = slot;
2117 while (1) {
2118 if (direction < 0) {
2119 if (nr == 0)
2120 break;
2121 nr--;
2122 } else if (direction > 0) {
2123 nr++;
2124 if (nr >= nritems)
2125 break;
2127 if (path->reada < 0 && objectid) {
2128 btrfs_node_key(node, &disk_key, nr);
2129 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2130 break;
2132 search = btrfs_node_blockptr(node, nr);
2133 if ((search <= target && target - search <= 65536) ||
2134 (search > target && search - target <= 65536)) {
2135 gen = btrfs_node_ptr_generation(node, nr);
2136 readahead_tree_block(root, search, blocksize, gen);
2137 nread += blocksize;
2139 nscan++;
2140 if ((nread > 65536 || nscan > 32))
2141 break;
2145 static noinline void reada_for_balance(struct btrfs_root *root,
2146 struct btrfs_path *path, int level)
2148 int slot;
2149 int nritems;
2150 struct extent_buffer *parent;
2151 struct extent_buffer *eb;
2152 u64 gen;
2153 u64 block1 = 0;
2154 u64 block2 = 0;
2155 int blocksize;
2157 parent = path->nodes[level + 1];
2158 if (!parent)
2159 return;
2161 nritems = btrfs_header_nritems(parent);
2162 slot = path->slots[level + 1];
2163 blocksize = btrfs_level_size(root, level);
2165 if (slot > 0) {
2166 block1 = btrfs_node_blockptr(parent, slot - 1);
2167 gen = btrfs_node_ptr_generation(parent, slot - 1);
2168 eb = btrfs_find_tree_block(root, block1, blocksize);
2170 * if we get -eagain from btrfs_buffer_uptodate, we
2171 * don't want to return eagain here. That will loop
2172 * forever
2174 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2175 block1 = 0;
2176 free_extent_buffer(eb);
2178 if (slot + 1 < nritems) {
2179 block2 = btrfs_node_blockptr(parent, slot + 1);
2180 gen = btrfs_node_ptr_generation(parent, slot + 1);
2181 eb = btrfs_find_tree_block(root, block2, blocksize);
2182 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2183 block2 = 0;
2184 free_extent_buffer(eb);
2187 if (block1)
2188 readahead_tree_block(root, block1, blocksize, 0);
2189 if (block2)
2190 readahead_tree_block(root, block2, blocksize, 0);
2195 * when we walk down the tree, it is usually safe to unlock the higher layers
2196 * in the tree. The exceptions are when our path goes through slot 0, because
2197 * operations on the tree might require changing key pointers higher up in the
2198 * tree.
2200 * callers might also have set path->keep_locks, which tells this code to keep
2201 * the lock if the path points to the last slot in the block. This is part of
2202 * walking through the tree, and selecting the next slot in the higher block.
2204 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2205 * if lowest_unlock is 1, level 0 won't be unlocked
2207 static noinline void unlock_up(struct btrfs_path *path, int level,
2208 int lowest_unlock, int min_write_lock_level,
2209 int *write_lock_level)
2211 int i;
2212 int skip_level = level;
2213 int no_skips = 0;
2214 struct extent_buffer *t;
2216 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2217 if (!path->nodes[i])
2218 break;
2219 if (!path->locks[i])
2220 break;
2221 if (!no_skips && path->slots[i] == 0) {
2222 skip_level = i + 1;
2223 continue;
2225 if (!no_skips && path->keep_locks) {
2226 u32 nritems;
2227 t = path->nodes[i];
2228 nritems = btrfs_header_nritems(t);
2229 if (nritems < 1 || path->slots[i] >= nritems - 1) {
2230 skip_level = i + 1;
2231 continue;
2234 if (skip_level < i && i >= lowest_unlock)
2235 no_skips = 1;
2237 t = path->nodes[i];
2238 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2239 btrfs_tree_unlock_rw(t, path->locks[i]);
2240 path->locks[i] = 0;
2241 if (write_lock_level &&
2242 i > min_write_lock_level &&
2243 i <= *write_lock_level) {
2244 *write_lock_level = i - 1;
2251 * This releases any locks held in the path starting at level and
2252 * going all the way up to the root.
2254 * btrfs_search_slot will keep the lock held on higher nodes in a few
2255 * corner cases, such as COW of the block at slot zero in the node. This
2256 * ignores those rules, and it should only be called when there are no
2257 * more updates to be done higher up in the tree.
2259 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2261 int i;
2263 if (path->keep_locks)
2264 return;
2266 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2267 if (!path->nodes[i])
2268 continue;
2269 if (!path->locks[i])
2270 continue;
2271 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2272 path->locks[i] = 0;
2277 * helper function for btrfs_search_slot. The goal is to find a block
2278 * in cache without setting the path to blocking. If we find the block
2279 * we return zero and the path is unchanged.
2281 * If we can't find the block, we set the path blocking and do some
2282 * reada. -EAGAIN is returned and the search must be repeated.
2284 static int
2285 read_block_for_search(struct btrfs_trans_handle *trans,
2286 struct btrfs_root *root, struct btrfs_path *p,
2287 struct extent_buffer **eb_ret, int level, int slot,
2288 struct btrfs_key *key, u64 time_seq)
2290 u64 blocknr;
2291 u64 gen;
2292 u32 blocksize;
2293 struct extent_buffer *b = *eb_ret;
2294 struct extent_buffer *tmp;
2295 int ret;
2297 blocknr = btrfs_node_blockptr(b, slot);
2298 gen = btrfs_node_ptr_generation(b, slot);
2299 blocksize = btrfs_level_size(root, level - 1);
2301 tmp = btrfs_find_tree_block(root, blocknr, blocksize);
2302 if (tmp) {
2303 /* first we do an atomic uptodate check */
2304 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2305 *eb_ret = tmp;
2306 return 0;
2309 /* the pages were up to date, but we failed
2310 * the generation number check. Do a full
2311 * read for the generation number that is correct.
2312 * We must do this without dropping locks so
2313 * we can trust our generation number
2315 btrfs_set_path_blocking(p);
2317 /* now we're allowed to do a blocking uptodate check */
2318 ret = btrfs_read_buffer(tmp, gen);
2319 if (!ret) {
2320 *eb_ret = tmp;
2321 return 0;
2323 free_extent_buffer(tmp);
2324 btrfs_release_path(p);
2325 return -EIO;
2329 * reduce lock contention at high levels
2330 * of the btree by dropping locks before
2331 * we read. Don't release the lock on the current
2332 * level because we need to walk this node to figure
2333 * out which blocks to read.
2335 btrfs_unlock_up_safe(p, level + 1);
2336 btrfs_set_path_blocking(p);
2338 free_extent_buffer(tmp);
2339 if (p->reada)
2340 reada_for_search(root, p, level, slot, key->objectid);
2342 btrfs_release_path(p);
2344 ret = -EAGAIN;
2345 tmp = read_tree_block(root, blocknr, blocksize, 0);
2346 if (tmp) {
2348 * If the read above didn't mark this buffer up to date,
2349 * it will never end up being up to date. Set ret to EIO now
2350 * and give up so that our caller doesn't loop forever
2351 * on our EAGAINs.
2353 if (!btrfs_buffer_uptodate(tmp, 0, 0))
2354 ret = -EIO;
2355 free_extent_buffer(tmp);
2357 return ret;
2361 * helper function for btrfs_search_slot. This does all of the checks
2362 * for node-level blocks and does any balancing required based on
2363 * the ins_len.
2365 * If no extra work was required, zero is returned. If we had to
2366 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2367 * start over
2369 static int
2370 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2371 struct btrfs_root *root, struct btrfs_path *p,
2372 struct extent_buffer *b, int level, int ins_len,
2373 int *write_lock_level)
2375 int ret;
2376 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2377 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2378 int sret;
2380 if (*write_lock_level < level + 1) {
2381 *write_lock_level = level + 1;
2382 btrfs_release_path(p);
2383 goto again;
2386 btrfs_set_path_blocking(p);
2387 reada_for_balance(root, p, level);
2388 sret = split_node(trans, root, p, level);
2389 btrfs_clear_path_blocking(p, NULL, 0);
2391 BUG_ON(sret > 0);
2392 if (sret) {
2393 ret = sret;
2394 goto done;
2396 b = p->nodes[level];
2397 } else if (ins_len < 0 && btrfs_header_nritems(b) <
2398 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
2399 int sret;
2401 if (*write_lock_level < level + 1) {
2402 *write_lock_level = level + 1;
2403 btrfs_release_path(p);
2404 goto again;
2407 btrfs_set_path_blocking(p);
2408 reada_for_balance(root, p, level);
2409 sret = balance_level(trans, root, p, level);
2410 btrfs_clear_path_blocking(p, NULL, 0);
2412 if (sret) {
2413 ret = sret;
2414 goto done;
2416 b = p->nodes[level];
2417 if (!b) {
2418 btrfs_release_path(p);
2419 goto again;
2421 BUG_ON(btrfs_header_nritems(b) == 1);
2423 return 0;
2425 again:
2426 ret = -EAGAIN;
2427 done:
2428 return ret;
2431 static void key_search_validate(struct extent_buffer *b,
2432 struct btrfs_key *key,
2433 int level)
2435 #ifdef CONFIG_BTRFS_ASSERT
2436 struct btrfs_disk_key disk_key;
2438 btrfs_cpu_key_to_disk(&disk_key, key);
2440 if (level == 0)
2441 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2442 offsetof(struct btrfs_leaf, items[0].key),
2443 sizeof(disk_key)));
2444 else
2445 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2446 offsetof(struct btrfs_node, ptrs[0].key),
2447 sizeof(disk_key)));
2448 #endif
2451 static int key_search(struct extent_buffer *b, struct btrfs_key *key,
2452 int level, int *prev_cmp, int *slot)
2454 if (*prev_cmp != 0) {
2455 *prev_cmp = bin_search(b, key, level, slot);
2456 return *prev_cmp;
2459 key_search_validate(b, key, level);
2460 *slot = 0;
2462 return 0;
2466 * look for key in the tree. path is filled in with nodes along the way
2467 * if key is found, we return zero and you can find the item in the leaf
2468 * level of the path (level 0)
2470 * If the key isn't found, the path points to the slot where it should
2471 * be inserted, and 1 is returned. If there are other errors during the
2472 * search a negative error number is returned.
2474 * if ins_len > 0, nodes and leaves will be split as we walk down the
2475 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2476 * possible)
2478 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2479 *root, struct btrfs_key *key, struct btrfs_path *p, int
2480 ins_len, int cow)
2482 struct extent_buffer *b;
2483 int slot;
2484 int ret;
2485 int err;
2486 int level;
2487 int lowest_unlock = 1;
2488 int root_lock;
2489 /* everything at write_lock_level or lower must be write locked */
2490 int write_lock_level = 0;
2491 u8 lowest_level = 0;
2492 int min_write_lock_level;
2493 int prev_cmp;
2495 lowest_level = p->lowest_level;
2496 WARN_ON(lowest_level && ins_len > 0);
2497 WARN_ON(p->nodes[0] != NULL);
2499 if (ins_len < 0) {
2500 lowest_unlock = 2;
2502 /* when we are removing items, we might have to go up to level
2503 * two as we update tree pointers Make sure we keep write
2504 * for those levels as well
2506 write_lock_level = 2;
2507 } else if (ins_len > 0) {
2509 * for inserting items, make sure we have a write lock on
2510 * level 1 so we can update keys
2512 write_lock_level = 1;
2515 if (!cow)
2516 write_lock_level = -1;
2518 if (cow && (p->keep_locks || p->lowest_level))
2519 write_lock_level = BTRFS_MAX_LEVEL;
2521 min_write_lock_level = write_lock_level;
2523 again:
2524 prev_cmp = -1;
2526 * we try very hard to do read locks on the root
2528 root_lock = BTRFS_READ_LOCK;
2529 level = 0;
2530 if (p->search_commit_root) {
2532 * the commit roots are read only
2533 * so we always do read locks
2535 b = root->commit_root;
2536 extent_buffer_get(b);
2537 level = btrfs_header_level(b);
2538 if (!p->skip_locking)
2539 btrfs_tree_read_lock(b);
2540 } else {
2541 if (p->skip_locking) {
2542 b = btrfs_root_node(root);
2543 level = btrfs_header_level(b);
2544 } else {
2545 /* we don't know the level of the root node
2546 * until we actually have it read locked
2548 b = btrfs_read_lock_root_node(root);
2549 level = btrfs_header_level(b);
2550 if (level <= write_lock_level) {
2551 /* whoops, must trade for write lock */
2552 btrfs_tree_read_unlock(b);
2553 free_extent_buffer(b);
2554 b = btrfs_lock_root_node(root);
2555 root_lock = BTRFS_WRITE_LOCK;
2557 /* the level might have changed, check again */
2558 level = btrfs_header_level(b);
2562 p->nodes[level] = b;
2563 if (!p->skip_locking)
2564 p->locks[level] = root_lock;
2566 while (b) {
2567 level = btrfs_header_level(b);
2570 * setup the path here so we can release it under lock
2571 * contention with the cow code
2573 if (cow) {
2575 * if we don't really need to cow this block
2576 * then we don't want to set the path blocking,
2577 * so we test it here
2579 if (!should_cow_block(trans, root, b))
2580 goto cow_done;
2582 btrfs_set_path_blocking(p);
2585 * must have write locks on this node and the
2586 * parent
2588 if (level > write_lock_level ||
2589 (level + 1 > write_lock_level &&
2590 level + 1 < BTRFS_MAX_LEVEL &&
2591 p->nodes[level + 1])) {
2592 write_lock_level = level + 1;
2593 btrfs_release_path(p);
2594 goto again;
2597 err = btrfs_cow_block(trans, root, b,
2598 p->nodes[level + 1],
2599 p->slots[level + 1], &b);
2600 if (err) {
2601 ret = err;
2602 goto done;
2605 cow_done:
2606 BUG_ON(!cow && ins_len);
2608 p->nodes[level] = b;
2609 btrfs_clear_path_blocking(p, NULL, 0);
2612 * we have a lock on b and as long as we aren't changing
2613 * the tree, there is no way to for the items in b to change.
2614 * It is safe to drop the lock on our parent before we
2615 * go through the expensive btree search on b.
2617 * If cow is true, then we might be changing slot zero,
2618 * which may require changing the parent. So, we can't
2619 * drop the lock until after we know which slot we're
2620 * operating on.
2622 if (!cow)
2623 btrfs_unlock_up_safe(p, level + 1);
2625 ret = key_search(b, key, level, &prev_cmp, &slot);
2627 if (level != 0) {
2628 int dec = 0;
2629 if (ret && slot > 0) {
2630 dec = 1;
2631 slot -= 1;
2633 p->slots[level] = slot;
2634 err = setup_nodes_for_search(trans, root, p, b, level,
2635 ins_len, &write_lock_level);
2636 if (err == -EAGAIN)
2637 goto again;
2638 if (err) {
2639 ret = err;
2640 goto done;
2642 b = p->nodes[level];
2643 slot = p->slots[level];
2646 * slot 0 is special, if we change the key
2647 * we have to update the parent pointer
2648 * which means we must have a write lock
2649 * on the parent
2651 if (slot == 0 && cow &&
2652 write_lock_level < level + 1) {
2653 write_lock_level = level + 1;
2654 btrfs_release_path(p);
2655 goto again;
2658 unlock_up(p, level, lowest_unlock,
2659 min_write_lock_level, &write_lock_level);
2661 if (level == lowest_level) {
2662 if (dec)
2663 p->slots[level]++;
2664 goto done;
2667 err = read_block_for_search(trans, root, p,
2668 &b, level, slot, key, 0);
2669 if (err == -EAGAIN)
2670 goto again;
2671 if (err) {
2672 ret = err;
2673 goto done;
2676 if (!p->skip_locking) {
2677 level = btrfs_header_level(b);
2678 if (level <= write_lock_level) {
2679 err = btrfs_try_tree_write_lock(b);
2680 if (!err) {
2681 btrfs_set_path_blocking(p);
2682 btrfs_tree_lock(b);
2683 btrfs_clear_path_blocking(p, b,
2684 BTRFS_WRITE_LOCK);
2686 p->locks[level] = BTRFS_WRITE_LOCK;
2687 } else {
2688 err = btrfs_try_tree_read_lock(b);
2689 if (!err) {
2690 btrfs_set_path_blocking(p);
2691 btrfs_tree_read_lock(b);
2692 btrfs_clear_path_blocking(p, b,
2693 BTRFS_READ_LOCK);
2695 p->locks[level] = BTRFS_READ_LOCK;
2697 p->nodes[level] = b;
2699 } else {
2700 p->slots[level] = slot;
2701 if (ins_len > 0 &&
2702 btrfs_leaf_free_space(root, b) < ins_len) {
2703 if (write_lock_level < 1) {
2704 write_lock_level = 1;
2705 btrfs_release_path(p);
2706 goto again;
2709 btrfs_set_path_blocking(p);
2710 err = split_leaf(trans, root, key,
2711 p, ins_len, ret == 0);
2712 btrfs_clear_path_blocking(p, NULL, 0);
2714 BUG_ON(err > 0);
2715 if (err) {
2716 ret = err;
2717 goto done;
2720 if (!p->search_for_split)
2721 unlock_up(p, level, lowest_unlock,
2722 min_write_lock_level, &write_lock_level);
2723 goto done;
2726 ret = 1;
2727 done:
2729 * we don't really know what they plan on doing with the path
2730 * from here on, so for now just mark it as blocking
2732 if (!p->leave_spinning)
2733 btrfs_set_path_blocking(p);
2734 if (ret < 0)
2735 btrfs_release_path(p);
2736 return ret;
2740 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2741 * current state of the tree together with the operations recorded in the tree
2742 * modification log to search for the key in a previous version of this tree, as
2743 * denoted by the time_seq parameter.
2745 * Naturally, there is no support for insert, delete or cow operations.
2747 * The resulting path and return value will be set up as if we called
2748 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2750 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2751 struct btrfs_path *p, u64 time_seq)
2753 struct extent_buffer *b;
2754 int slot;
2755 int ret;
2756 int err;
2757 int level;
2758 int lowest_unlock = 1;
2759 u8 lowest_level = 0;
2760 int prev_cmp = -1;
2762 lowest_level = p->lowest_level;
2763 WARN_ON(p->nodes[0] != NULL);
2765 if (p->search_commit_root) {
2766 BUG_ON(time_seq);
2767 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2770 again:
2771 b = get_old_root(root, time_seq);
2772 level = btrfs_header_level(b);
2773 p->locks[level] = BTRFS_READ_LOCK;
2775 while (b) {
2776 level = btrfs_header_level(b);
2777 p->nodes[level] = b;
2778 btrfs_clear_path_blocking(p, NULL, 0);
2781 * we have a lock on b and as long as we aren't changing
2782 * the tree, there is no way to for the items in b to change.
2783 * It is safe to drop the lock on our parent before we
2784 * go through the expensive btree search on b.
2786 btrfs_unlock_up_safe(p, level + 1);
2789 * Since we can unwind eb's we want to do a real search every
2790 * time.
2792 prev_cmp = -1;
2793 ret = key_search(b, key, level, &prev_cmp, &slot);
2795 if (level != 0) {
2796 int dec = 0;
2797 if (ret && slot > 0) {
2798 dec = 1;
2799 slot -= 1;
2801 p->slots[level] = slot;
2802 unlock_up(p, level, lowest_unlock, 0, NULL);
2804 if (level == lowest_level) {
2805 if (dec)
2806 p->slots[level]++;
2807 goto done;
2810 err = read_block_for_search(NULL, root, p, &b, level,
2811 slot, key, time_seq);
2812 if (err == -EAGAIN)
2813 goto again;
2814 if (err) {
2815 ret = err;
2816 goto done;
2819 level = btrfs_header_level(b);
2820 err = btrfs_try_tree_read_lock(b);
2821 if (!err) {
2822 btrfs_set_path_blocking(p);
2823 btrfs_tree_read_lock(b);
2824 btrfs_clear_path_blocking(p, b,
2825 BTRFS_READ_LOCK);
2827 b = tree_mod_log_rewind(root->fs_info, p, b, time_seq);
2828 if (!b) {
2829 ret = -ENOMEM;
2830 goto done;
2832 p->locks[level] = BTRFS_READ_LOCK;
2833 p->nodes[level] = b;
2834 } else {
2835 p->slots[level] = slot;
2836 unlock_up(p, level, lowest_unlock, 0, NULL);
2837 goto done;
2840 ret = 1;
2841 done:
2842 if (!p->leave_spinning)
2843 btrfs_set_path_blocking(p);
2844 if (ret < 0)
2845 btrfs_release_path(p);
2847 return ret;
2851 * helper to use instead of search slot if no exact match is needed but
2852 * instead the next or previous item should be returned.
2853 * When find_higher is true, the next higher item is returned, the next lower
2854 * otherwise.
2855 * When return_any and find_higher are both true, and no higher item is found,
2856 * return the next lower instead.
2857 * When return_any is true and find_higher is false, and no lower item is found,
2858 * return the next higher instead.
2859 * It returns 0 if any item is found, 1 if none is found (tree empty), and
2860 * < 0 on error
2862 int btrfs_search_slot_for_read(struct btrfs_root *root,
2863 struct btrfs_key *key, struct btrfs_path *p,
2864 int find_higher, int return_any)
2866 int ret;
2867 struct extent_buffer *leaf;
2869 again:
2870 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
2871 if (ret <= 0)
2872 return ret;
2874 * a return value of 1 means the path is at the position where the
2875 * item should be inserted. Normally this is the next bigger item,
2876 * but in case the previous item is the last in a leaf, path points
2877 * to the first free slot in the previous leaf, i.e. at an invalid
2878 * item.
2880 leaf = p->nodes[0];
2882 if (find_higher) {
2883 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
2884 ret = btrfs_next_leaf(root, p);
2885 if (ret <= 0)
2886 return ret;
2887 if (!return_any)
2888 return 1;
2890 * no higher item found, return the next
2891 * lower instead
2893 return_any = 0;
2894 find_higher = 0;
2895 btrfs_release_path(p);
2896 goto again;
2898 } else {
2899 if (p->slots[0] == 0) {
2900 ret = btrfs_prev_leaf(root, p);
2901 if (ret < 0)
2902 return ret;
2903 if (!ret) {
2904 p->slots[0] = btrfs_header_nritems(leaf) - 1;
2905 return 0;
2907 if (!return_any)
2908 return 1;
2910 * no lower item found, return the next
2911 * higher instead
2913 return_any = 0;
2914 find_higher = 1;
2915 btrfs_release_path(p);
2916 goto again;
2917 } else {
2918 --p->slots[0];
2921 return 0;
2925 * adjust the pointers going up the tree, starting at level
2926 * making sure the right key of each node is points to 'key'.
2927 * This is used after shifting pointers to the left, so it stops
2928 * fixing up pointers when a given leaf/node is not in slot 0 of the
2929 * higher levels
2932 static void fixup_low_keys(struct btrfs_root *root, struct btrfs_path *path,
2933 struct btrfs_disk_key *key, int level)
2935 int i;
2936 struct extent_buffer *t;
2938 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2939 int tslot = path->slots[i];
2940 if (!path->nodes[i])
2941 break;
2942 t = path->nodes[i];
2943 tree_mod_log_set_node_key(root->fs_info, t, tslot, 1);
2944 btrfs_set_node_key(t, key, tslot);
2945 btrfs_mark_buffer_dirty(path->nodes[i]);
2946 if (tslot != 0)
2947 break;
2952 * update item key.
2954 * This function isn't completely safe. It's the caller's responsibility
2955 * that the new key won't break the order
2957 void btrfs_set_item_key_safe(struct btrfs_root *root, struct btrfs_path *path,
2958 struct btrfs_key *new_key)
2960 struct btrfs_disk_key disk_key;
2961 struct extent_buffer *eb;
2962 int slot;
2964 eb = path->nodes[0];
2965 slot = path->slots[0];
2966 if (slot > 0) {
2967 btrfs_item_key(eb, &disk_key, slot - 1);
2968 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
2970 if (slot < btrfs_header_nritems(eb) - 1) {
2971 btrfs_item_key(eb, &disk_key, slot + 1);
2972 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
2975 btrfs_cpu_key_to_disk(&disk_key, new_key);
2976 btrfs_set_item_key(eb, &disk_key, slot);
2977 btrfs_mark_buffer_dirty(eb);
2978 if (slot == 0)
2979 fixup_low_keys(root, path, &disk_key, 1);
2983 * try to push data from one node into the next node left in the
2984 * tree.
2986 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2987 * error, and > 0 if there was no room in the left hand block.
2989 static int push_node_left(struct btrfs_trans_handle *trans,
2990 struct btrfs_root *root, struct extent_buffer *dst,
2991 struct extent_buffer *src, int empty)
2993 int push_items = 0;
2994 int src_nritems;
2995 int dst_nritems;
2996 int ret = 0;
2998 src_nritems = btrfs_header_nritems(src);
2999 dst_nritems = btrfs_header_nritems(dst);
3000 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3001 WARN_ON(btrfs_header_generation(src) != trans->transid);
3002 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3004 if (!empty && src_nritems <= 8)
3005 return 1;
3007 if (push_items <= 0)
3008 return 1;
3010 if (empty) {
3011 push_items = min(src_nritems, push_items);
3012 if (push_items < src_nritems) {
3013 /* leave at least 8 pointers in the node if
3014 * we aren't going to empty it
3016 if (src_nritems - push_items < 8) {
3017 if (push_items <= 8)
3018 return 1;
3019 push_items -= 8;
3022 } else
3023 push_items = min(src_nritems - 8, push_items);
3025 tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
3026 push_items);
3027 copy_extent_buffer(dst, src,
3028 btrfs_node_key_ptr_offset(dst_nritems),
3029 btrfs_node_key_ptr_offset(0),
3030 push_items * sizeof(struct btrfs_key_ptr));
3032 if (push_items < src_nritems) {
3034 * don't call tree_mod_log_eb_move here, key removal was already
3035 * fully logged by tree_mod_log_eb_copy above.
3037 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3038 btrfs_node_key_ptr_offset(push_items),
3039 (src_nritems - push_items) *
3040 sizeof(struct btrfs_key_ptr));
3042 btrfs_set_header_nritems(src, src_nritems - push_items);
3043 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3044 btrfs_mark_buffer_dirty(src);
3045 btrfs_mark_buffer_dirty(dst);
3047 return ret;
3051 * try to push data from one node into the next node right in the
3052 * tree.
3054 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3055 * error, and > 0 if there was no room in the right hand block.
3057 * this will only push up to 1/2 the contents of the left node over
3059 static int balance_node_right(struct btrfs_trans_handle *trans,
3060 struct btrfs_root *root,
3061 struct extent_buffer *dst,
3062 struct extent_buffer *src)
3064 int push_items = 0;
3065 int max_push;
3066 int src_nritems;
3067 int dst_nritems;
3068 int ret = 0;
3070 WARN_ON(btrfs_header_generation(src) != trans->transid);
3071 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3073 src_nritems = btrfs_header_nritems(src);
3074 dst_nritems = btrfs_header_nritems(dst);
3075 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3076 if (push_items <= 0)
3077 return 1;
3079 if (src_nritems < 4)
3080 return 1;
3082 max_push = src_nritems / 2 + 1;
3083 /* don't try to empty the node */
3084 if (max_push >= src_nritems)
3085 return 1;
3087 if (max_push < push_items)
3088 push_items = max_push;
3090 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
3091 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3092 btrfs_node_key_ptr_offset(0),
3093 (dst_nritems) *
3094 sizeof(struct btrfs_key_ptr));
3096 tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
3097 src_nritems - push_items, push_items);
3098 copy_extent_buffer(dst, src,
3099 btrfs_node_key_ptr_offset(0),
3100 btrfs_node_key_ptr_offset(src_nritems - push_items),
3101 push_items * sizeof(struct btrfs_key_ptr));
3103 btrfs_set_header_nritems(src, src_nritems - push_items);
3104 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3106 btrfs_mark_buffer_dirty(src);
3107 btrfs_mark_buffer_dirty(dst);
3109 return ret;
3113 * helper function to insert a new root level in the tree.
3114 * A new node is allocated, and a single item is inserted to
3115 * point to the existing root
3117 * returns zero on success or < 0 on failure.
3119 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3120 struct btrfs_root *root,
3121 struct btrfs_path *path, int level)
3123 u64 lower_gen;
3124 struct extent_buffer *lower;
3125 struct extent_buffer *c;
3126 struct extent_buffer *old;
3127 struct btrfs_disk_key lower_key;
3129 BUG_ON(path->nodes[level]);
3130 BUG_ON(path->nodes[level-1] != root->node);
3132 lower = path->nodes[level-1];
3133 if (level == 1)
3134 btrfs_item_key(lower, &lower_key, 0);
3135 else
3136 btrfs_node_key(lower, &lower_key, 0);
3138 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
3139 root->root_key.objectid, &lower_key,
3140 level, root->node->start, 0);
3141 if (IS_ERR(c))
3142 return PTR_ERR(c);
3144 root_add_used(root, root->nodesize);
3146 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
3147 btrfs_set_header_nritems(c, 1);
3148 btrfs_set_header_level(c, level);
3149 btrfs_set_header_bytenr(c, c->start);
3150 btrfs_set_header_generation(c, trans->transid);
3151 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3152 btrfs_set_header_owner(c, root->root_key.objectid);
3154 write_extent_buffer(c, root->fs_info->fsid, btrfs_header_fsid(),
3155 BTRFS_FSID_SIZE);
3157 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
3158 btrfs_header_chunk_tree_uuid(c), BTRFS_UUID_SIZE);
3160 btrfs_set_node_key(c, &lower_key, 0);
3161 btrfs_set_node_blockptr(c, 0, lower->start);
3162 lower_gen = btrfs_header_generation(lower);
3163 WARN_ON(lower_gen != trans->transid);
3165 btrfs_set_node_ptr_generation(c, 0, lower_gen);
3167 btrfs_mark_buffer_dirty(c);
3169 old = root->node;
3170 tree_mod_log_set_root_pointer(root, c, 0);
3171 rcu_assign_pointer(root->node, c);
3173 /* the super has an extra ref to root->node */
3174 free_extent_buffer(old);
3176 add_root_to_dirty_list(root);
3177 extent_buffer_get(c);
3178 path->nodes[level] = c;
3179 path->locks[level] = BTRFS_WRITE_LOCK;
3180 path->slots[level] = 0;
3181 return 0;
3185 * worker function to insert a single pointer in a node.
3186 * the node should have enough room for the pointer already
3188 * slot and level indicate where you want the key to go, and
3189 * blocknr is the block the key points to.
3191 static void insert_ptr(struct btrfs_trans_handle *trans,
3192 struct btrfs_root *root, struct btrfs_path *path,
3193 struct btrfs_disk_key *key, u64 bytenr,
3194 int slot, int level)
3196 struct extent_buffer *lower;
3197 int nritems;
3198 int ret;
3200 BUG_ON(!path->nodes[level]);
3201 btrfs_assert_tree_locked(path->nodes[level]);
3202 lower = path->nodes[level];
3203 nritems = btrfs_header_nritems(lower);
3204 BUG_ON(slot > nritems);
3205 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
3206 if (slot != nritems) {
3207 if (level)
3208 tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
3209 slot, nritems - slot);
3210 memmove_extent_buffer(lower,
3211 btrfs_node_key_ptr_offset(slot + 1),
3212 btrfs_node_key_ptr_offset(slot),
3213 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3215 if (level) {
3216 ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
3217 MOD_LOG_KEY_ADD, GFP_NOFS);
3218 BUG_ON(ret < 0);
3220 btrfs_set_node_key(lower, key, slot);
3221 btrfs_set_node_blockptr(lower, slot, bytenr);
3222 WARN_ON(trans->transid == 0);
3223 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3224 btrfs_set_header_nritems(lower, nritems + 1);
3225 btrfs_mark_buffer_dirty(lower);
3229 * split the node at the specified level in path in two.
3230 * The path is corrected to point to the appropriate node after the split
3232 * Before splitting this tries to make some room in the node by pushing
3233 * left and right, if either one works, it returns right away.
3235 * returns 0 on success and < 0 on failure
3237 static noinline int split_node(struct btrfs_trans_handle *trans,
3238 struct btrfs_root *root,
3239 struct btrfs_path *path, int level)
3241 struct extent_buffer *c;
3242 struct extent_buffer *split;
3243 struct btrfs_disk_key disk_key;
3244 int mid;
3245 int ret;
3246 u32 c_nritems;
3248 c = path->nodes[level];
3249 WARN_ON(btrfs_header_generation(c) != trans->transid);
3250 if (c == root->node) {
3252 * trying to split the root, lets make a new one
3254 * tree mod log: We don't log_removal old root in
3255 * insert_new_root, because that root buffer will be kept as a
3256 * normal node. We are going to log removal of half of the
3257 * elements below with tree_mod_log_eb_copy. We're holding a
3258 * tree lock on the buffer, which is why we cannot race with
3259 * other tree_mod_log users.
3261 ret = insert_new_root(trans, root, path, level + 1);
3262 if (ret)
3263 return ret;
3264 } else {
3265 ret = push_nodes_for_insert(trans, root, path, level);
3266 c = path->nodes[level];
3267 if (!ret && btrfs_header_nritems(c) <
3268 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
3269 return 0;
3270 if (ret < 0)
3271 return ret;
3274 c_nritems = btrfs_header_nritems(c);
3275 mid = (c_nritems + 1) / 2;
3276 btrfs_node_key(c, &disk_key, mid);
3278 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
3279 root->root_key.objectid,
3280 &disk_key, level, c->start, 0);
3281 if (IS_ERR(split))
3282 return PTR_ERR(split);
3284 root_add_used(root, root->nodesize);
3286 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
3287 btrfs_set_header_level(split, btrfs_header_level(c));
3288 btrfs_set_header_bytenr(split, split->start);
3289 btrfs_set_header_generation(split, trans->transid);
3290 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3291 btrfs_set_header_owner(split, root->root_key.objectid);
3292 write_extent_buffer(split, root->fs_info->fsid,
3293 btrfs_header_fsid(), BTRFS_FSID_SIZE);
3294 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3295 btrfs_header_chunk_tree_uuid(split),
3296 BTRFS_UUID_SIZE);
3298 tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
3299 copy_extent_buffer(split, c,
3300 btrfs_node_key_ptr_offset(0),
3301 btrfs_node_key_ptr_offset(mid),
3302 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3303 btrfs_set_header_nritems(split, c_nritems - mid);
3304 btrfs_set_header_nritems(c, mid);
3305 ret = 0;
3307 btrfs_mark_buffer_dirty(c);
3308 btrfs_mark_buffer_dirty(split);
3310 insert_ptr(trans, root, path, &disk_key, split->start,
3311 path->slots[level + 1] + 1, level + 1);
3313 if (path->slots[level] >= mid) {
3314 path->slots[level] -= mid;
3315 btrfs_tree_unlock(c);
3316 free_extent_buffer(c);
3317 path->nodes[level] = split;
3318 path->slots[level + 1] += 1;
3319 } else {
3320 btrfs_tree_unlock(split);
3321 free_extent_buffer(split);
3323 return ret;
3327 * how many bytes are required to store the items in a leaf. start
3328 * and nr indicate which items in the leaf to check. This totals up the
3329 * space used both by the item structs and the item data
3331 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3333 struct btrfs_item *start_item;
3334 struct btrfs_item *end_item;
3335 struct btrfs_map_token token;
3336 int data_len;
3337 int nritems = btrfs_header_nritems(l);
3338 int end = min(nritems, start + nr) - 1;
3340 if (!nr)
3341 return 0;
3342 btrfs_init_map_token(&token);
3343 start_item = btrfs_item_nr(start);
3344 end_item = btrfs_item_nr(end);
3345 data_len = btrfs_token_item_offset(l, start_item, &token) +
3346 btrfs_token_item_size(l, start_item, &token);
3347 data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3348 data_len += sizeof(struct btrfs_item) * nr;
3349 WARN_ON(data_len < 0);
3350 return data_len;
3354 * The space between the end of the leaf items and
3355 * the start of the leaf data. IOW, how much room
3356 * the leaf has left for both items and data
3358 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
3359 struct extent_buffer *leaf)
3361 int nritems = btrfs_header_nritems(leaf);
3362 int ret;
3363 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3364 if (ret < 0) {
3365 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
3366 "used %d nritems %d\n",
3367 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
3368 leaf_space_used(leaf, 0, nritems), nritems);
3370 return ret;
3374 * min slot controls the lowest index we're willing to push to the
3375 * right. We'll push up to and including min_slot, but no lower
3377 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3378 struct btrfs_root *root,
3379 struct btrfs_path *path,
3380 int data_size, int empty,
3381 struct extent_buffer *right,
3382 int free_space, u32 left_nritems,
3383 u32 min_slot)
3385 struct extent_buffer *left = path->nodes[0];
3386 struct extent_buffer *upper = path->nodes[1];
3387 struct btrfs_map_token token;
3388 struct btrfs_disk_key disk_key;
3389 int slot;
3390 u32 i;
3391 int push_space = 0;
3392 int push_items = 0;
3393 struct btrfs_item *item;
3394 u32 nr;
3395 u32 right_nritems;
3396 u32 data_end;
3397 u32 this_item_size;
3399 btrfs_init_map_token(&token);
3401 if (empty)
3402 nr = 0;
3403 else
3404 nr = max_t(u32, 1, min_slot);
3406 if (path->slots[0] >= left_nritems)
3407 push_space += data_size;
3409 slot = path->slots[1];
3410 i = left_nritems - 1;
3411 while (i >= nr) {
3412 item = btrfs_item_nr(i);
3414 if (!empty && push_items > 0) {
3415 if (path->slots[0] > i)
3416 break;
3417 if (path->slots[0] == i) {
3418 int space = btrfs_leaf_free_space(root, left);
3419 if (space + push_space * 2 > free_space)
3420 break;
3424 if (path->slots[0] == i)
3425 push_space += data_size;
3427 this_item_size = btrfs_item_size(left, item);
3428 if (this_item_size + sizeof(*item) + push_space > free_space)
3429 break;
3431 push_items++;
3432 push_space += this_item_size + sizeof(*item);
3433 if (i == 0)
3434 break;
3435 i--;
3438 if (push_items == 0)
3439 goto out_unlock;
3441 WARN_ON(!empty && push_items == left_nritems);
3443 /* push left to right */
3444 right_nritems = btrfs_header_nritems(right);
3446 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3447 push_space -= leaf_data_end(root, left);
3449 /* make room in the right data area */
3450 data_end = leaf_data_end(root, right);
3451 memmove_extent_buffer(right,
3452 btrfs_leaf_data(right) + data_end - push_space,
3453 btrfs_leaf_data(right) + data_end,
3454 BTRFS_LEAF_DATA_SIZE(root) - data_end);
3456 /* copy from the left data area */
3457 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
3458 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3459 btrfs_leaf_data(left) + leaf_data_end(root, left),
3460 push_space);
3462 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3463 btrfs_item_nr_offset(0),
3464 right_nritems * sizeof(struct btrfs_item));
3466 /* copy the items from left to right */
3467 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3468 btrfs_item_nr_offset(left_nritems - push_items),
3469 push_items * sizeof(struct btrfs_item));
3471 /* update the item pointers */
3472 right_nritems += push_items;
3473 btrfs_set_header_nritems(right, right_nritems);
3474 push_space = BTRFS_LEAF_DATA_SIZE(root);
3475 for (i = 0; i < right_nritems; i++) {
3476 item = btrfs_item_nr(i);
3477 push_space -= btrfs_token_item_size(right, item, &token);
3478 btrfs_set_token_item_offset(right, item, push_space, &token);
3481 left_nritems -= push_items;
3482 btrfs_set_header_nritems(left, left_nritems);
3484 if (left_nritems)
3485 btrfs_mark_buffer_dirty(left);
3486 else
3487 clean_tree_block(trans, root, left);
3489 btrfs_mark_buffer_dirty(right);
3491 btrfs_item_key(right, &disk_key, 0);
3492 btrfs_set_node_key(upper, &disk_key, slot + 1);
3493 btrfs_mark_buffer_dirty(upper);
3495 /* then fixup the leaf pointer in the path */
3496 if (path->slots[0] >= left_nritems) {
3497 path->slots[0] -= left_nritems;
3498 if (btrfs_header_nritems(path->nodes[0]) == 0)
3499 clean_tree_block(trans, root, path->nodes[0]);
3500 btrfs_tree_unlock(path->nodes[0]);
3501 free_extent_buffer(path->nodes[0]);
3502 path->nodes[0] = right;
3503 path->slots[1] += 1;
3504 } else {
3505 btrfs_tree_unlock(right);
3506 free_extent_buffer(right);
3508 return 0;
3510 out_unlock:
3511 btrfs_tree_unlock(right);
3512 free_extent_buffer(right);
3513 return 1;
3517 * push some data in the path leaf to the right, trying to free up at
3518 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3520 * returns 1 if the push failed because the other node didn't have enough
3521 * room, 0 if everything worked out and < 0 if there were major errors.
3523 * this will push starting from min_slot to the end of the leaf. It won't
3524 * push any slot lower than min_slot
3526 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3527 *root, struct btrfs_path *path,
3528 int min_data_size, int data_size,
3529 int empty, u32 min_slot)
3531 struct extent_buffer *left = path->nodes[0];
3532 struct extent_buffer *right;
3533 struct extent_buffer *upper;
3534 int slot;
3535 int free_space;
3536 u32 left_nritems;
3537 int ret;
3539 if (!path->nodes[1])
3540 return 1;
3542 slot = path->slots[1];
3543 upper = path->nodes[1];
3544 if (slot >= btrfs_header_nritems(upper) - 1)
3545 return 1;
3547 btrfs_assert_tree_locked(path->nodes[1]);
3549 right = read_node_slot(root, upper, slot + 1);
3550 if (right == NULL)
3551 return 1;
3553 btrfs_tree_lock(right);
3554 btrfs_set_lock_blocking(right);
3556 free_space = btrfs_leaf_free_space(root, right);
3557 if (free_space < data_size)
3558 goto out_unlock;
3560 /* cow and double check */
3561 ret = btrfs_cow_block(trans, root, right, upper,
3562 slot + 1, &right);
3563 if (ret)
3564 goto out_unlock;
3566 free_space = btrfs_leaf_free_space(root, right);
3567 if (free_space < data_size)
3568 goto out_unlock;
3570 left_nritems = btrfs_header_nritems(left);
3571 if (left_nritems == 0)
3572 goto out_unlock;
3574 return __push_leaf_right(trans, root, path, min_data_size, empty,
3575 right, free_space, left_nritems, min_slot);
3576 out_unlock:
3577 btrfs_tree_unlock(right);
3578 free_extent_buffer(right);
3579 return 1;
3583 * push some data in the path leaf to the left, trying to free up at
3584 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3586 * max_slot can put a limit on how far into the leaf we'll push items. The
3587 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3588 * items
3590 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3591 struct btrfs_root *root,
3592 struct btrfs_path *path, int data_size,
3593 int empty, struct extent_buffer *left,
3594 int free_space, u32 right_nritems,
3595 u32 max_slot)
3597 struct btrfs_disk_key disk_key;
3598 struct extent_buffer *right = path->nodes[0];
3599 int i;
3600 int push_space = 0;
3601 int push_items = 0;
3602 struct btrfs_item *item;
3603 u32 old_left_nritems;
3604 u32 nr;
3605 int ret = 0;
3606 u32 this_item_size;
3607 u32 old_left_item_size;
3608 struct btrfs_map_token token;
3610 btrfs_init_map_token(&token);
3612 if (empty)
3613 nr = min(right_nritems, max_slot);
3614 else
3615 nr = min(right_nritems - 1, max_slot);
3617 for (i = 0; i < nr; i++) {
3618 item = btrfs_item_nr(i);
3620 if (!empty && push_items > 0) {
3621 if (path->slots[0] < i)
3622 break;
3623 if (path->slots[0] == i) {
3624 int space = btrfs_leaf_free_space(root, right);
3625 if (space + push_space * 2 > free_space)
3626 break;
3630 if (path->slots[0] == i)
3631 push_space += data_size;
3633 this_item_size = btrfs_item_size(right, item);
3634 if (this_item_size + sizeof(*item) + push_space > free_space)
3635 break;
3637 push_items++;
3638 push_space += this_item_size + sizeof(*item);
3641 if (push_items == 0) {
3642 ret = 1;
3643 goto out;
3645 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3647 /* push data from right to left */
3648 copy_extent_buffer(left, right,
3649 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3650 btrfs_item_nr_offset(0),
3651 push_items * sizeof(struct btrfs_item));
3653 push_space = BTRFS_LEAF_DATA_SIZE(root) -
3654 btrfs_item_offset_nr(right, push_items - 1);
3656 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
3657 leaf_data_end(root, left) - push_space,
3658 btrfs_leaf_data(right) +
3659 btrfs_item_offset_nr(right, push_items - 1),
3660 push_space);
3661 old_left_nritems = btrfs_header_nritems(left);
3662 BUG_ON(old_left_nritems <= 0);
3664 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3665 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3666 u32 ioff;
3668 item = btrfs_item_nr(i);
3670 ioff = btrfs_token_item_offset(left, item, &token);
3671 btrfs_set_token_item_offset(left, item,
3672 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3673 &token);
3675 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3677 /* fixup right node */
3678 if (push_items > right_nritems)
3679 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3680 right_nritems);
3682 if (push_items < right_nritems) {
3683 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3684 leaf_data_end(root, right);
3685 memmove_extent_buffer(right, btrfs_leaf_data(right) +
3686 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3687 btrfs_leaf_data(right) +
3688 leaf_data_end(root, right), push_space);
3690 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3691 btrfs_item_nr_offset(push_items),
3692 (btrfs_header_nritems(right) - push_items) *
3693 sizeof(struct btrfs_item));
3695 right_nritems -= push_items;
3696 btrfs_set_header_nritems(right, right_nritems);
3697 push_space = BTRFS_LEAF_DATA_SIZE(root);
3698 for (i = 0; i < right_nritems; i++) {
3699 item = btrfs_item_nr(i);
3701 push_space = push_space - btrfs_token_item_size(right,
3702 item, &token);
3703 btrfs_set_token_item_offset(right, item, push_space, &token);
3706 btrfs_mark_buffer_dirty(left);
3707 if (right_nritems)
3708 btrfs_mark_buffer_dirty(right);
3709 else
3710 clean_tree_block(trans, root, right);
3712 btrfs_item_key(right, &disk_key, 0);
3713 fixup_low_keys(root, path, &disk_key, 1);
3715 /* then fixup the leaf pointer in the path */
3716 if (path->slots[0] < push_items) {
3717 path->slots[0] += old_left_nritems;
3718 btrfs_tree_unlock(path->nodes[0]);
3719 free_extent_buffer(path->nodes[0]);
3720 path->nodes[0] = left;
3721 path->slots[1] -= 1;
3722 } else {
3723 btrfs_tree_unlock(left);
3724 free_extent_buffer(left);
3725 path->slots[0] -= push_items;
3727 BUG_ON(path->slots[0] < 0);
3728 return ret;
3729 out:
3730 btrfs_tree_unlock(left);
3731 free_extent_buffer(left);
3732 return ret;
3736 * push some data in the path leaf to the left, trying to free up at
3737 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3739 * max_slot can put a limit on how far into the leaf we'll push items. The
3740 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3741 * items
3743 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3744 *root, struct btrfs_path *path, int min_data_size,
3745 int data_size, int empty, u32 max_slot)
3747 struct extent_buffer *right = path->nodes[0];
3748 struct extent_buffer *left;
3749 int slot;
3750 int free_space;
3751 u32 right_nritems;
3752 int ret = 0;
3754 slot = path->slots[1];
3755 if (slot == 0)
3756 return 1;
3757 if (!path->nodes[1])
3758 return 1;
3760 right_nritems = btrfs_header_nritems(right);
3761 if (right_nritems == 0)
3762 return 1;
3764 btrfs_assert_tree_locked(path->nodes[1]);
3766 left = read_node_slot(root, path->nodes[1], slot - 1);
3767 if (left == NULL)
3768 return 1;
3770 btrfs_tree_lock(left);
3771 btrfs_set_lock_blocking(left);
3773 free_space = btrfs_leaf_free_space(root, left);
3774 if (free_space < data_size) {
3775 ret = 1;
3776 goto out;
3779 /* cow and double check */
3780 ret = btrfs_cow_block(trans, root, left,
3781 path->nodes[1], slot - 1, &left);
3782 if (ret) {
3783 /* we hit -ENOSPC, but it isn't fatal here */
3784 if (ret == -ENOSPC)
3785 ret = 1;
3786 goto out;
3789 free_space = btrfs_leaf_free_space(root, left);
3790 if (free_space < data_size) {
3791 ret = 1;
3792 goto out;
3795 return __push_leaf_left(trans, root, path, min_data_size,
3796 empty, left, free_space, right_nritems,
3797 max_slot);
3798 out:
3799 btrfs_tree_unlock(left);
3800 free_extent_buffer(left);
3801 return ret;
3805 * split the path's leaf in two, making sure there is at least data_size
3806 * available for the resulting leaf level of the path.
3808 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
3809 struct btrfs_root *root,
3810 struct btrfs_path *path,
3811 struct extent_buffer *l,
3812 struct extent_buffer *right,
3813 int slot, int mid, int nritems)
3815 int data_copy_size;
3816 int rt_data_off;
3817 int i;
3818 struct btrfs_disk_key disk_key;
3819 struct btrfs_map_token token;
3821 btrfs_init_map_token(&token);
3823 nritems = nritems - mid;
3824 btrfs_set_header_nritems(right, nritems);
3825 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
3827 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
3828 btrfs_item_nr_offset(mid),
3829 nritems * sizeof(struct btrfs_item));
3831 copy_extent_buffer(right, l,
3832 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
3833 data_copy_size, btrfs_leaf_data(l) +
3834 leaf_data_end(root, l), data_copy_size);
3836 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
3837 btrfs_item_end_nr(l, mid);
3839 for (i = 0; i < nritems; i++) {
3840 struct btrfs_item *item = btrfs_item_nr(i);
3841 u32 ioff;
3843 ioff = btrfs_token_item_offset(right, item, &token);
3844 btrfs_set_token_item_offset(right, item,
3845 ioff + rt_data_off, &token);
3848 btrfs_set_header_nritems(l, mid);
3849 btrfs_item_key(right, &disk_key, 0);
3850 insert_ptr(trans, root, path, &disk_key, right->start,
3851 path->slots[1] + 1, 1);
3853 btrfs_mark_buffer_dirty(right);
3854 btrfs_mark_buffer_dirty(l);
3855 BUG_ON(path->slots[0] != slot);
3857 if (mid <= slot) {
3858 btrfs_tree_unlock(path->nodes[0]);
3859 free_extent_buffer(path->nodes[0]);
3860 path->nodes[0] = right;
3861 path->slots[0] -= mid;
3862 path->slots[1] += 1;
3863 } else {
3864 btrfs_tree_unlock(right);
3865 free_extent_buffer(right);
3868 BUG_ON(path->slots[0] < 0);
3872 * double splits happen when we need to insert a big item in the middle
3873 * of a leaf. A double split can leave us with 3 mostly empty leaves:
3874 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3875 * A B C
3877 * We avoid this by trying to push the items on either side of our target
3878 * into the adjacent leaves. If all goes well we can avoid the double split
3879 * completely.
3881 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
3882 struct btrfs_root *root,
3883 struct btrfs_path *path,
3884 int data_size)
3886 int ret;
3887 int progress = 0;
3888 int slot;
3889 u32 nritems;
3891 slot = path->slots[0];
3894 * try to push all the items after our slot into the
3895 * right leaf
3897 ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot);
3898 if (ret < 0)
3899 return ret;
3901 if (ret == 0)
3902 progress++;
3904 nritems = btrfs_header_nritems(path->nodes[0]);
3906 * our goal is to get our slot at the start or end of a leaf. If
3907 * we've done so we're done
3909 if (path->slots[0] == 0 || path->slots[0] == nritems)
3910 return 0;
3912 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
3913 return 0;
3915 /* try to push all the items before our slot into the next leaf */
3916 slot = path->slots[0];
3917 ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot);
3918 if (ret < 0)
3919 return ret;
3921 if (ret == 0)
3922 progress++;
3924 if (progress)
3925 return 0;
3926 return 1;
3930 * split the path's leaf in two, making sure there is at least data_size
3931 * available for the resulting leaf level of the path.
3933 * returns 0 if all went well and < 0 on failure.
3935 static noinline int split_leaf(struct btrfs_trans_handle *trans,
3936 struct btrfs_root *root,
3937 struct btrfs_key *ins_key,
3938 struct btrfs_path *path, int data_size,
3939 int extend)
3941 struct btrfs_disk_key disk_key;
3942 struct extent_buffer *l;
3943 u32 nritems;
3944 int mid;
3945 int slot;
3946 struct extent_buffer *right;
3947 int ret = 0;
3948 int wret;
3949 int split;
3950 int num_doubles = 0;
3951 int tried_avoid_double = 0;
3953 l = path->nodes[0];
3954 slot = path->slots[0];
3955 if (extend && data_size + btrfs_item_size_nr(l, slot) +
3956 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
3957 return -EOVERFLOW;
3959 /* first try to make some room by pushing left and right */
3960 if (data_size && path->nodes[1]) {
3961 wret = push_leaf_right(trans, root, path, data_size,
3962 data_size, 0, 0);
3963 if (wret < 0)
3964 return wret;
3965 if (wret) {
3966 wret = push_leaf_left(trans, root, path, data_size,
3967 data_size, 0, (u32)-1);
3968 if (wret < 0)
3969 return wret;
3971 l = path->nodes[0];
3973 /* did the pushes work? */
3974 if (btrfs_leaf_free_space(root, l) >= data_size)
3975 return 0;
3978 if (!path->nodes[1]) {
3979 ret = insert_new_root(trans, root, path, 1);
3980 if (ret)
3981 return ret;
3983 again:
3984 split = 1;
3985 l = path->nodes[0];
3986 slot = path->slots[0];
3987 nritems = btrfs_header_nritems(l);
3988 mid = (nritems + 1) / 2;
3990 if (mid <= slot) {
3991 if (nritems == 1 ||
3992 leaf_space_used(l, mid, nritems - mid) + data_size >
3993 BTRFS_LEAF_DATA_SIZE(root)) {
3994 if (slot >= nritems) {
3995 split = 0;
3996 } else {
3997 mid = slot;
3998 if (mid != nritems &&
3999 leaf_space_used(l, mid, nritems - mid) +
4000 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4001 if (data_size && !tried_avoid_double)
4002 goto push_for_double;
4003 split = 2;
4007 } else {
4008 if (leaf_space_used(l, 0, mid) + data_size >
4009 BTRFS_LEAF_DATA_SIZE(root)) {
4010 if (!extend && data_size && slot == 0) {
4011 split = 0;
4012 } else if ((extend || !data_size) && slot == 0) {
4013 mid = 1;
4014 } else {
4015 mid = slot;
4016 if (mid != nritems &&
4017 leaf_space_used(l, mid, nritems - mid) +
4018 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4019 if (data_size && !tried_avoid_double)
4020 goto push_for_double;
4021 split = 2;
4027 if (split == 0)
4028 btrfs_cpu_key_to_disk(&disk_key, ins_key);
4029 else
4030 btrfs_item_key(l, &disk_key, mid);
4032 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
4033 root->root_key.objectid,
4034 &disk_key, 0, l->start, 0);
4035 if (IS_ERR(right))
4036 return PTR_ERR(right);
4038 root_add_used(root, root->leafsize);
4040 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
4041 btrfs_set_header_bytenr(right, right->start);
4042 btrfs_set_header_generation(right, trans->transid);
4043 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4044 btrfs_set_header_owner(right, root->root_key.objectid);
4045 btrfs_set_header_level(right, 0);
4046 write_extent_buffer(right, root->fs_info->fsid,
4047 btrfs_header_fsid(), BTRFS_FSID_SIZE);
4049 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
4050 btrfs_header_chunk_tree_uuid(right),
4051 BTRFS_UUID_SIZE);
4053 if (split == 0) {
4054 if (mid <= slot) {
4055 btrfs_set_header_nritems(right, 0);
4056 insert_ptr(trans, root, path, &disk_key, right->start,
4057 path->slots[1] + 1, 1);
4058 btrfs_tree_unlock(path->nodes[0]);
4059 free_extent_buffer(path->nodes[0]);
4060 path->nodes[0] = right;
4061 path->slots[0] = 0;
4062 path->slots[1] += 1;
4063 } else {
4064 btrfs_set_header_nritems(right, 0);
4065 insert_ptr(trans, root, path, &disk_key, right->start,
4066 path->slots[1], 1);
4067 btrfs_tree_unlock(path->nodes[0]);
4068 free_extent_buffer(path->nodes[0]);
4069 path->nodes[0] = right;
4070 path->slots[0] = 0;
4071 if (path->slots[1] == 0)
4072 fixup_low_keys(root, path, &disk_key, 1);
4074 btrfs_mark_buffer_dirty(right);
4075 return ret;
4078 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
4080 if (split == 2) {
4081 BUG_ON(num_doubles != 0);
4082 num_doubles++;
4083 goto again;
4086 return 0;
4088 push_for_double:
4089 push_for_double_split(trans, root, path, data_size);
4090 tried_avoid_double = 1;
4091 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4092 return 0;
4093 goto again;
4096 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4097 struct btrfs_root *root,
4098 struct btrfs_path *path, int ins_len)
4100 struct btrfs_key key;
4101 struct extent_buffer *leaf;
4102 struct btrfs_file_extent_item *fi;
4103 u64 extent_len = 0;
4104 u32 item_size;
4105 int ret;
4107 leaf = path->nodes[0];
4108 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4110 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4111 key.type != BTRFS_EXTENT_CSUM_KEY);
4113 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
4114 return 0;
4116 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4117 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4118 fi = btrfs_item_ptr(leaf, path->slots[0],
4119 struct btrfs_file_extent_item);
4120 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4122 btrfs_release_path(path);
4124 path->keep_locks = 1;
4125 path->search_for_split = 1;
4126 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4127 path->search_for_split = 0;
4128 if (ret < 0)
4129 goto err;
4131 ret = -EAGAIN;
4132 leaf = path->nodes[0];
4133 /* if our item isn't there or got smaller, return now */
4134 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4135 goto err;
4137 /* the leaf has changed, it now has room. return now */
4138 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
4139 goto err;
4141 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4142 fi = btrfs_item_ptr(leaf, path->slots[0],
4143 struct btrfs_file_extent_item);
4144 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4145 goto err;
4148 btrfs_set_path_blocking(path);
4149 ret = split_leaf(trans, root, &key, path, ins_len, 1);
4150 if (ret)
4151 goto err;
4153 path->keep_locks = 0;
4154 btrfs_unlock_up_safe(path, 1);
4155 return 0;
4156 err:
4157 path->keep_locks = 0;
4158 return ret;
4161 static noinline int split_item(struct btrfs_trans_handle *trans,
4162 struct btrfs_root *root,
4163 struct btrfs_path *path,
4164 struct btrfs_key *new_key,
4165 unsigned long split_offset)
4167 struct extent_buffer *leaf;
4168 struct btrfs_item *item;
4169 struct btrfs_item *new_item;
4170 int slot;
4171 char *buf;
4172 u32 nritems;
4173 u32 item_size;
4174 u32 orig_offset;
4175 struct btrfs_disk_key disk_key;
4177 leaf = path->nodes[0];
4178 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
4180 btrfs_set_path_blocking(path);
4182 item = btrfs_item_nr(path->slots[0]);
4183 orig_offset = btrfs_item_offset(leaf, item);
4184 item_size = btrfs_item_size(leaf, item);
4186 buf = kmalloc(item_size, GFP_NOFS);
4187 if (!buf)
4188 return -ENOMEM;
4190 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4191 path->slots[0]), item_size);
4193 slot = path->slots[0] + 1;
4194 nritems = btrfs_header_nritems(leaf);
4195 if (slot != nritems) {
4196 /* shift the items */
4197 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4198 btrfs_item_nr_offset(slot),
4199 (nritems - slot) * sizeof(struct btrfs_item));
4202 btrfs_cpu_key_to_disk(&disk_key, new_key);
4203 btrfs_set_item_key(leaf, &disk_key, slot);
4205 new_item = btrfs_item_nr(slot);
4207 btrfs_set_item_offset(leaf, new_item, orig_offset);
4208 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4210 btrfs_set_item_offset(leaf, item,
4211 orig_offset + item_size - split_offset);
4212 btrfs_set_item_size(leaf, item, split_offset);
4214 btrfs_set_header_nritems(leaf, nritems + 1);
4216 /* write the data for the start of the original item */
4217 write_extent_buffer(leaf, buf,
4218 btrfs_item_ptr_offset(leaf, path->slots[0]),
4219 split_offset);
4221 /* write the data for the new item */
4222 write_extent_buffer(leaf, buf + split_offset,
4223 btrfs_item_ptr_offset(leaf, slot),
4224 item_size - split_offset);
4225 btrfs_mark_buffer_dirty(leaf);
4227 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
4228 kfree(buf);
4229 return 0;
4233 * This function splits a single item into two items,
4234 * giving 'new_key' to the new item and splitting the
4235 * old one at split_offset (from the start of the item).
4237 * The path may be released by this operation. After
4238 * the split, the path is pointing to the old item. The
4239 * new item is going to be in the same node as the old one.
4241 * Note, the item being split must be smaller enough to live alone on
4242 * a tree block with room for one extra struct btrfs_item
4244 * This allows us to split the item in place, keeping a lock on the
4245 * leaf the entire time.
4247 int btrfs_split_item(struct btrfs_trans_handle *trans,
4248 struct btrfs_root *root,
4249 struct btrfs_path *path,
4250 struct btrfs_key *new_key,
4251 unsigned long split_offset)
4253 int ret;
4254 ret = setup_leaf_for_split(trans, root, path,
4255 sizeof(struct btrfs_item));
4256 if (ret)
4257 return ret;
4259 ret = split_item(trans, root, path, new_key, split_offset);
4260 return ret;
4264 * This function duplicate a item, giving 'new_key' to the new item.
4265 * It guarantees both items live in the same tree leaf and the new item
4266 * is contiguous with the original item.
4268 * This allows us to split file extent in place, keeping a lock on the
4269 * leaf the entire time.
4271 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4272 struct btrfs_root *root,
4273 struct btrfs_path *path,
4274 struct btrfs_key *new_key)
4276 struct extent_buffer *leaf;
4277 int ret;
4278 u32 item_size;
4280 leaf = path->nodes[0];
4281 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4282 ret = setup_leaf_for_split(trans, root, path,
4283 item_size + sizeof(struct btrfs_item));
4284 if (ret)
4285 return ret;
4287 path->slots[0]++;
4288 setup_items_for_insert(root, path, new_key, &item_size,
4289 item_size, item_size +
4290 sizeof(struct btrfs_item), 1);
4291 leaf = path->nodes[0];
4292 memcpy_extent_buffer(leaf,
4293 btrfs_item_ptr_offset(leaf, path->slots[0]),
4294 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4295 item_size);
4296 return 0;
4300 * make the item pointed to by the path smaller. new_size indicates
4301 * how small to make it, and from_end tells us if we just chop bytes
4302 * off the end of the item or if we shift the item to chop bytes off
4303 * the front.
4305 void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
4306 u32 new_size, int from_end)
4308 int slot;
4309 struct extent_buffer *leaf;
4310 struct btrfs_item *item;
4311 u32 nritems;
4312 unsigned int data_end;
4313 unsigned int old_data_start;
4314 unsigned int old_size;
4315 unsigned int size_diff;
4316 int i;
4317 struct btrfs_map_token token;
4319 btrfs_init_map_token(&token);
4321 leaf = path->nodes[0];
4322 slot = path->slots[0];
4324 old_size = btrfs_item_size_nr(leaf, slot);
4325 if (old_size == new_size)
4326 return;
4328 nritems = btrfs_header_nritems(leaf);
4329 data_end = leaf_data_end(root, leaf);
4331 old_data_start = btrfs_item_offset_nr(leaf, slot);
4333 size_diff = old_size - new_size;
4335 BUG_ON(slot < 0);
4336 BUG_ON(slot >= nritems);
4339 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4341 /* first correct the data pointers */
4342 for (i = slot; i < nritems; i++) {
4343 u32 ioff;
4344 item = btrfs_item_nr(i);
4346 ioff = btrfs_token_item_offset(leaf, item, &token);
4347 btrfs_set_token_item_offset(leaf, item,
4348 ioff + size_diff, &token);
4351 /* shift the data */
4352 if (from_end) {
4353 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4354 data_end + size_diff, btrfs_leaf_data(leaf) +
4355 data_end, old_data_start + new_size - data_end);
4356 } else {
4357 struct btrfs_disk_key disk_key;
4358 u64 offset;
4360 btrfs_item_key(leaf, &disk_key, slot);
4362 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4363 unsigned long ptr;
4364 struct btrfs_file_extent_item *fi;
4366 fi = btrfs_item_ptr(leaf, slot,
4367 struct btrfs_file_extent_item);
4368 fi = (struct btrfs_file_extent_item *)(
4369 (unsigned long)fi - size_diff);
4371 if (btrfs_file_extent_type(leaf, fi) ==
4372 BTRFS_FILE_EXTENT_INLINE) {
4373 ptr = btrfs_item_ptr_offset(leaf, slot);
4374 memmove_extent_buffer(leaf, ptr,
4375 (unsigned long)fi,
4376 offsetof(struct btrfs_file_extent_item,
4377 disk_bytenr));
4381 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4382 data_end + size_diff, btrfs_leaf_data(leaf) +
4383 data_end, old_data_start - data_end);
4385 offset = btrfs_disk_key_offset(&disk_key);
4386 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4387 btrfs_set_item_key(leaf, &disk_key, slot);
4388 if (slot == 0)
4389 fixup_low_keys(root, path, &disk_key, 1);
4392 item = btrfs_item_nr(slot);
4393 btrfs_set_item_size(leaf, item, new_size);
4394 btrfs_mark_buffer_dirty(leaf);
4396 if (btrfs_leaf_free_space(root, leaf) < 0) {
4397 btrfs_print_leaf(root, leaf);
4398 BUG();
4403 * make the item pointed to by the path bigger, data_size is the added size.
4405 void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
4406 u32 data_size)
4408 int slot;
4409 struct extent_buffer *leaf;
4410 struct btrfs_item *item;
4411 u32 nritems;
4412 unsigned int data_end;
4413 unsigned int old_data;
4414 unsigned int old_size;
4415 int i;
4416 struct btrfs_map_token token;
4418 btrfs_init_map_token(&token);
4420 leaf = path->nodes[0];
4422 nritems = btrfs_header_nritems(leaf);
4423 data_end = leaf_data_end(root, leaf);
4425 if (btrfs_leaf_free_space(root, leaf) < data_size) {
4426 btrfs_print_leaf(root, leaf);
4427 BUG();
4429 slot = path->slots[0];
4430 old_data = btrfs_item_end_nr(leaf, slot);
4432 BUG_ON(slot < 0);
4433 if (slot >= nritems) {
4434 btrfs_print_leaf(root, leaf);
4435 printk(KERN_CRIT "slot %d too large, nritems %d\n",
4436 slot, nritems);
4437 BUG_ON(1);
4441 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4443 /* first correct the data pointers */
4444 for (i = slot; i < nritems; i++) {
4445 u32 ioff;
4446 item = btrfs_item_nr(i);
4448 ioff = btrfs_token_item_offset(leaf, item, &token);
4449 btrfs_set_token_item_offset(leaf, item,
4450 ioff - data_size, &token);
4453 /* shift the data */
4454 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4455 data_end - data_size, btrfs_leaf_data(leaf) +
4456 data_end, old_data - data_end);
4458 data_end = old_data;
4459 old_size = btrfs_item_size_nr(leaf, slot);
4460 item = btrfs_item_nr(slot);
4461 btrfs_set_item_size(leaf, item, old_size + data_size);
4462 btrfs_mark_buffer_dirty(leaf);
4464 if (btrfs_leaf_free_space(root, leaf) < 0) {
4465 btrfs_print_leaf(root, leaf);
4466 BUG();
4471 * this is a helper for btrfs_insert_empty_items, the main goal here is
4472 * to save stack depth by doing the bulk of the work in a function
4473 * that doesn't call btrfs_search_slot
4475 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4476 struct btrfs_key *cpu_key, u32 *data_size,
4477 u32 total_data, u32 total_size, int nr)
4479 struct btrfs_item *item;
4480 int i;
4481 u32 nritems;
4482 unsigned int data_end;
4483 struct btrfs_disk_key disk_key;
4484 struct extent_buffer *leaf;
4485 int slot;
4486 struct btrfs_map_token token;
4488 btrfs_init_map_token(&token);
4490 leaf = path->nodes[0];
4491 slot = path->slots[0];
4493 nritems = btrfs_header_nritems(leaf);
4494 data_end = leaf_data_end(root, leaf);
4496 if (btrfs_leaf_free_space(root, leaf) < total_size) {
4497 btrfs_print_leaf(root, leaf);
4498 printk(KERN_CRIT "not enough freespace need %u have %d\n",
4499 total_size, btrfs_leaf_free_space(root, leaf));
4500 BUG();
4503 if (slot != nritems) {
4504 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4506 if (old_data < data_end) {
4507 btrfs_print_leaf(root, leaf);
4508 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
4509 slot, old_data, data_end);
4510 BUG_ON(1);
4513 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4515 /* first correct the data pointers */
4516 for (i = slot; i < nritems; i++) {
4517 u32 ioff;
4519 item = btrfs_item_nr( i);
4520 ioff = btrfs_token_item_offset(leaf, item, &token);
4521 btrfs_set_token_item_offset(leaf, item,
4522 ioff - total_data, &token);
4524 /* shift the items */
4525 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4526 btrfs_item_nr_offset(slot),
4527 (nritems - slot) * sizeof(struct btrfs_item));
4529 /* shift the data */
4530 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4531 data_end - total_data, btrfs_leaf_data(leaf) +
4532 data_end, old_data - data_end);
4533 data_end = old_data;
4536 /* setup the item for the new data */
4537 for (i = 0; i < nr; i++) {
4538 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4539 btrfs_set_item_key(leaf, &disk_key, slot + i);
4540 item = btrfs_item_nr(slot + i);
4541 btrfs_set_token_item_offset(leaf, item,
4542 data_end - data_size[i], &token);
4543 data_end -= data_size[i];
4544 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4547 btrfs_set_header_nritems(leaf, nritems + nr);
4549 if (slot == 0) {
4550 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4551 fixup_low_keys(root, path, &disk_key, 1);
4553 btrfs_unlock_up_safe(path, 1);
4554 btrfs_mark_buffer_dirty(leaf);
4556 if (btrfs_leaf_free_space(root, leaf) < 0) {
4557 btrfs_print_leaf(root, leaf);
4558 BUG();
4563 * Given a key and some data, insert items into the tree.
4564 * This does all the path init required, making room in the tree if needed.
4566 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4567 struct btrfs_root *root,
4568 struct btrfs_path *path,
4569 struct btrfs_key *cpu_key, u32 *data_size,
4570 int nr)
4572 int ret = 0;
4573 int slot;
4574 int i;
4575 u32 total_size = 0;
4576 u32 total_data = 0;
4578 for (i = 0; i < nr; i++)
4579 total_data += data_size[i];
4581 total_size = total_data + (nr * sizeof(struct btrfs_item));
4582 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4583 if (ret == 0)
4584 return -EEXIST;
4585 if (ret < 0)
4586 return ret;
4588 slot = path->slots[0];
4589 BUG_ON(slot < 0);
4591 setup_items_for_insert(root, path, cpu_key, data_size,
4592 total_data, total_size, nr);
4593 return 0;
4597 * Given a key and some data, insert an item into the tree.
4598 * This does all the path init required, making room in the tree if needed.
4600 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4601 *root, struct btrfs_key *cpu_key, void *data, u32
4602 data_size)
4604 int ret = 0;
4605 struct btrfs_path *path;
4606 struct extent_buffer *leaf;
4607 unsigned long ptr;
4609 path = btrfs_alloc_path();
4610 if (!path)
4611 return -ENOMEM;
4612 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4613 if (!ret) {
4614 leaf = path->nodes[0];
4615 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4616 write_extent_buffer(leaf, data, ptr, data_size);
4617 btrfs_mark_buffer_dirty(leaf);
4619 btrfs_free_path(path);
4620 return ret;
4624 * delete the pointer from a given node.
4626 * the tree should have been previously balanced so the deletion does not
4627 * empty a node.
4629 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4630 int level, int slot)
4632 struct extent_buffer *parent = path->nodes[level];
4633 u32 nritems;
4634 int ret;
4636 nritems = btrfs_header_nritems(parent);
4637 if (slot != nritems - 1) {
4638 if (level)
4639 tree_mod_log_eb_move(root->fs_info, parent, slot,
4640 slot + 1, nritems - slot - 1);
4641 memmove_extent_buffer(parent,
4642 btrfs_node_key_ptr_offset(slot),
4643 btrfs_node_key_ptr_offset(slot + 1),
4644 sizeof(struct btrfs_key_ptr) *
4645 (nritems - slot - 1));
4646 } else if (level) {
4647 ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4648 MOD_LOG_KEY_REMOVE, GFP_NOFS);
4649 BUG_ON(ret < 0);
4652 nritems--;
4653 btrfs_set_header_nritems(parent, nritems);
4654 if (nritems == 0 && parent == root->node) {
4655 BUG_ON(btrfs_header_level(root->node) != 1);
4656 /* just turn the root into a leaf and break */
4657 btrfs_set_header_level(root->node, 0);
4658 } else if (slot == 0) {
4659 struct btrfs_disk_key disk_key;
4661 btrfs_node_key(parent, &disk_key, 0);
4662 fixup_low_keys(root, path, &disk_key, level + 1);
4664 btrfs_mark_buffer_dirty(parent);
4668 * a helper function to delete the leaf pointed to by path->slots[1] and
4669 * path->nodes[1].
4671 * This deletes the pointer in path->nodes[1] and frees the leaf
4672 * block extent. zero is returned if it all worked out, < 0 otherwise.
4674 * The path must have already been setup for deleting the leaf, including
4675 * all the proper balancing. path->nodes[1] must be locked.
4677 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4678 struct btrfs_root *root,
4679 struct btrfs_path *path,
4680 struct extent_buffer *leaf)
4682 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4683 del_ptr(root, path, 1, path->slots[1]);
4686 * btrfs_free_extent is expensive, we want to make sure we
4687 * aren't holding any locks when we call it
4689 btrfs_unlock_up_safe(path, 0);
4691 root_sub_used(root, leaf->len);
4693 extent_buffer_get(leaf);
4694 btrfs_free_tree_block(trans, root, leaf, 0, 1);
4695 free_extent_buffer_stale(leaf);
4698 * delete the item at the leaf level in path. If that empties
4699 * the leaf, remove it from the tree
4701 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4702 struct btrfs_path *path, int slot, int nr)
4704 struct extent_buffer *leaf;
4705 struct btrfs_item *item;
4706 int last_off;
4707 int dsize = 0;
4708 int ret = 0;
4709 int wret;
4710 int i;
4711 u32 nritems;
4712 struct btrfs_map_token token;
4714 btrfs_init_map_token(&token);
4716 leaf = path->nodes[0];
4717 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4719 for (i = 0; i < nr; i++)
4720 dsize += btrfs_item_size_nr(leaf, slot + i);
4722 nritems = btrfs_header_nritems(leaf);
4724 if (slot + nr != nritems) {
4725 int data_end = leaf_data_end(root, leaf);
4727 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4728 data_end + dsize,
4729 btrfs_leaf_data(leaf) + data_end,
4730 last_off - data_end);
4732 for (i = slot + nr; i < nritems; i++) {
4733 u32 ioff;
4735 item = btrfs_item_nr(i);
4736 ioff = btrfs_token_item_offset(leaf, item, &token);
4737 btrfs_set_token_item_offset(leaf, item,
4738 ioff + dsize, &token);
4741 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4742 btrfs_item_nr_offset(slot + nr),
4743 sizeof(struct btrfs_item) *
4744 (nritems - slot - nr));
4746 btrfs_set_header_nritems(leaf, nritems - nr);
4747 nritems -= nr;
4749 /* delete the leaf if we've emptied it */
4750 if (nritems == 0) {
4751 if (leaf == root->node) {
4752 btrfs_set_header_level(leaf, 0);
4753 } else {
4754 btrfs_set_path_blocking(path);
4755 clean_tree_block(trans, root, leaf);
4756 btrfs_del_leaf(trans, root, path, leaf);
4758 } else {
4759 int used = leaf_space_used(leaf, 0, nritems);
4760 if (slot == 0) {
4761 struct btrfs_disk_key disk_key;
4763 btrfs_item_key(leaf, &disk_key, 0);
4764 fixup_low_keys(root, path, &disk_key, 1);
4767 /* delete the leaf if it is mostly empty */
4768 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
4769 /* push_leaf_left fixes the path.
4770 * make sure the path still points to our leaf
4771 * for possible call to del_ptr below
4773 slot = path->slots[1];
4774 extent_buffer_get(leaf);
4776 btrfs_set_path_blocking(path);
4777 wret = push_leaf_left(trans, root, path, 1, 1,
4778 1, (u32)-1);
4779 if (wret < 0 && wret != -ENOSPC)
4780 ret = wret;
4782 if (path->nodes[0] == leaf &&
4783 btrfs_header_nritems(leaf)) {
4784 wret = push_leaf_right(trans, root, path, 1,
4785 1, 1, 0);
4786 if (wret < 0 && wret != -ENOSPC)
4787 ret = wret;
4790 if (btrfs_header_nritems(leaf) == 0) {
4791 path->slots[1] = slot;
4792 btrfs_del_leaf(trans, root, path, leaf);
4793 free_extent_buffer(leaf);
4794 ret = 0;
4795 } else {
4796 /* if we're still in the path, make sure
4797 * we're dirty. Otherwise, one of the
4798 * push_leaf functions must have already
4799 * dirtied this buffer
4801 if (path->nodes[0] == leaf)
4802 btrfs_mark_buffer_dirty(leaf);
4803 free_extent_buffer(leaf);
4805 } else {
4806 btrfs_mark_buffer_dirty(leaf);
4809 return ret;
4813 * search the tree again to find a leaf with lesser keys
4814 * returns 0 if it found something or 1 if there are no lesser leaves.
4815 * returns < 0 on io errors.
4817 * This may release the path, and so you may lose any locks held at the
4818 * time you call it.
4820 static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
4822 struct btrfs_key key;
4823 struct btrfs_disk_key found_key;
4824 int ret;
4826 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
4828 if (key.offset > 0) {
4829 key.offset--;
4830 } else if (key.type > 0) {
4831 key.type--;
4832 key.offset = (u64)-1;
4833 } else if (key.objectid > 0) {
4834 key.objectid--;
4835 key.type = (u8)-1;
4836 key.offset = (u64)-1;
4837 } else {
4838 return 1;
4841 btrfs_release_path(path);
4842 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4843 if (ret < 0)
4844 return ret;
4845 btrfs_item_key(path->nodes[0], &found_key, 0);
4846 ret = comp_keys(&found_key, &key);
4847 if (ret < 0)
4848 return 0;
4849 return 1;
4853 * A helper function to walk down the tree starting at min_key, and looking
4854 * for nodes or leaves that are have a minimum transaction id.
4855 * This is used by the btree defrag code, and tree logging
4857 * This does not cow, but it does stuff the starting key it finds back
4858 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4859 * key and get a writable path.
4861 * This does lock as it descends, and path->keep_locks should be set
4862 * to 1 by the caller.
4864 * This honors path->lowest_level to prevent descent past a given level
4865 * of the tree.
4867 * min_trans indicates the oldest transaction that you are interested
4868 * in walking through. Any nodes or leaves older than min_trans are
4869 * skipped over (without reading them).
4871 * returns zero if something useful was found, < 0 on error and 1 if there
4872 * was nothing in the tree that matched the search criteria.
4874 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
4875 struct btrfs_path *path,
4876 u64 min_trans)
4878 struct extent_buffer *cur;
4879 struct btrfs_key found_key;
4880 int slot;
4881 int sret;
4882 u32 nritems;
4883 int level;
4884 int ret = 1;
4886 WARN_ON(!path->keep_locks);
4887 again:
4888 cur = btrfs_read_lock_root_node(root);
4889 level = btrfs_header_level(cur);
4890 WARN_ON(path->nodes[level]);
4891 path->nodes[level] = cur;
4892 path->locks[level] = BTRFS_READ_LOCK;
4894 if (btrfs_header_generation(cur) < min_trans) {
4895 ret = 1;
4896 goto out;
4898 while (1) {
4899 nritems = btrfs_header_nritems(cur);
4900 level = btrfs_header_level(cur);
4901 sret = bin_search(cur, min_key, level, &slot);
4903 /* at the lowest level, we're done, setup the path and exit */
4904 if (level == path->lowest_level) {
4905 if (slot >= nritems)
4906 goto find_next_key;
4907 ret = 0;
4908 path->slots[level] = slot;
4909 btrfs_item_key_to_cpu(cur, &found_key, slot);
4910 goto out;
4912 if (sret && slot > 0)
4913 slot--;
4915 * check this node pointer against the min_trans parameters.
4916 * If it is too old, old, skip to the next one.
4918 while (slot < nritems) {
4919 u64 gen;
4921 gen = btrfs_node_ptr_generation(cur, slot);
4922 if (gen < min_trans) {
4923 slot++;
4924 continue;
4926 break;
4928 find_next_key:
4930 * we didn't find a candidate key in this node, walk forward
4931 * and find another one
4933 if (slot >= nritems) {
4934 path->slots[level] = slot;
4935 btrfs_set_path_blocking(path);
4936 sret = btrfs_find_next_key(root, path, min_key, level,
4937 min_trans);
4938 if (sret == 0) {
4939 btrfs_release_path(path);
4940 goto again;
4941 } else {
4942 goto out;
4945 /* save our key for returning back */
4946 btrfs_node_key_to_cpu(cur, &found_key, slot);
4947 path->slots[level] = slot;
4948 if (level == path->lowest_level) {
4949 ret = 0;
4950 unlock_up(path, level, 1, 0, NULL);
4951 goto out;
4953 btrfs_set_path_blocking(path);
4954 cur = read_node_slot(root, cur, slot);
4955 BUG_ON(!cur); /* -ENOMEM */
4957 btrfs_tree_read_lock(cur);
4959 path->locks[level - 1] = BTRFS_READ_LOCK;
4960 path->nodes[level - 1] = cur;
4961 unlock_up(path, level, 1, 0, NULL);
4962 btrfs_clear_path_blocking(path, NULL, 0);
4964 out:
4965 if (ret == 0)
4966 memcpy(min_key, &found_key, sizeof(found_key));
4967 btrfs_set_path_blocking(path);
4968 return ret;
4971 static void tree_move_down(struct btrfs_root *root,
4972 struct btrfs_path *path,
4973 int *level, int root_level)
4975 BUG_ON(*level == 0);
4976 path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
4977 path->slots[*level]);
4978 path->slots[*level - 1] = 0;
4979 (*level)--;
4982 static int tree_move_next_or_upnext(struct btrfs_root *root,
4983 struct btrfs_path *path,
4984 int *level, int root_level)
4986 int ret = 0;
4987 int nritems;
4988 nritems = btrfs_header_nritems(path->nodes[*level]);
4990 path->slots[*level]++;
4992 while (path->slots[*level] >= nritems) {
4993 if (*level == root_level)
4994 return -1;
4996 /* move upnext */
4997 path->slots[*level] = 0;
4998 free_extent_buffer(path->nodes[*level]);
4999 path->nodes[*level] = NULL;
5000 (*level)++;
5001 path->slots[*level]++;
5003 nritems = btrfs_header_nritems(path->nodes[*level]);
5004 ret = 1;
5006 return ret;
5010 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5011 * or down.
5013 static int tree_advance(struct btrfs_root *root,
5014 struct btrfs_path *path,
5015 int *level, int root_level,
5016 int allow_down,
5017 struct btrfs_key *key)
5019 int ret;
5021 if (*level == 0 || !allow_down) {
5022 ret = tree_move_next_or_upnext(root, path, level, root_level);
5023 } else {
5024 tree_move_down(root, path, level, root_level);
5025 ret = 0;
5027 if (ret >= 0) {
5028 if (*level == 0)
5029 btrfs_item_key_to_cpu(path->nodes[*level], key,
5030 path->slots[*level]);
5031 else
5032 btrfs_node_key_to_cpu(path->nodes[*level], key,
5033 path->slots[*level]);
5035 return ret;
5038 static int tree_compare_item(struct btrfs_root *left_root,
5039 struct btrfs_path *left_path,
5040 struct btrfs_path *right_path,
5041 char *tmp_buf)
5043 int cmp;
5044 int len1, len2;
5045 unsigned long off1, off2;
5047 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5048 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5049 if (len1 != len2)
5050 return 1;
5052 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5053 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5054 right_path->slots[0]);
5056 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5058 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5059 if (cmp)
5060 return 1;
5061 return 0;
5064 #define ADVANCE 1
5065 #define ADVANCE_ONLY_NEXT -1
5068 * This function compares two trees and calls the provided callback for
5069 * every changed/new/deleted item it finds.
5070 * If shared tree blocks are encountered, whole subtrees are skipped, making
5071 * the compare pretty fast on snapshotted subvolumes.
5073 * This currently works on commit roots only. As commit roots are read only,
5074 * we don't do any locking. The commit roots are protected with transactions.
5075 * Transactions are ended and rejoined when a commit is tried in between.
5077 * This function checks for modifications done to the trees while comparing.
5078 * If it detects a change, it aborts immediately.
5080 int btrfs_compare_trees(struct btrfs_root *left_root,
5081 struct btrfs_root *right_root,
5082 btrfs_changed_cb_t changed_cb, void *ctx)
5084 int ret;
5085 int cmp;
5086 struct btrfs_trans_handle *trans = NULL;
5087 struct btrfs_path *left_path = NULL;
5088 struct btrfs_path *right_path = NULL;
5089 struct btrfs_key left_key;
5090 struct btrfs_key right_key;
5091 char *tmp_buf = NULL;
5092 int left_root_level;
5093 int right_root_level;
5094 int left_level;
5095 int right_level;
5096 int left_end_reached;
5097 int right_end_reached;
5098 int advance_left;
5099 int advance_right;
5100 u64 left_blockptr;
5101 u64 right_blockptr;
5102 u64 left_start_ctransid;
5103 u64 right_start_ctransid;
5104 u64 ctransid;
5106 left_path = btrfs_alloc_path();
5107 if (!left_path) {
5108 ret = -ENOMEM;
5109 goto out;
5111 right_path = btrfs_alloc_path();
5112 if (!right_path) {
5113 ret = -ENOMEM;
5114 goto out;
5117 tmp_buf = kmalloc(left_root->leafsize, GFP_NOFS);
5118 if (!tmp_buf) {
5119 ret = -ENOMEM;
5120 goto out;
5123 left_path->search_commit_root = 1;
5124 left_path->skip_locking = 1;
5125 right_path->search_commit_root = 1;
5126 right_path->skip_locking = 1;
5128 spin_lock(&left_root->root_item_lock);
5129 left_start_ctransid = btrfs_root_ctransid(&left_root->root_item);
5130 spin_unlock(&left_root->root_item_lock);
5132 spin_lock(&right_root->root_item_lock);
5133 right_start_ctransid = btrfs_root_ctransid(&right_root->root_item);
5134 spin_unlock(&right_root->root_item_lock);
5136 trans = btrfs_join_transaction(left_root);
5137 if (IS_ERR(trans)) {
5138 ret = PTR_ERR(trans);
5139 trans = NULL;
5140 goto out;
5144 * Strategy: Go to the first items of both trees. Then do
5146 * If both trees are at level 0
5147 * Compare keys of current items
5148 * If left < right treat left item as new, advance left tree
5149 * and repeat
5150 * If left > right treat right item as deleted, advance right tree
5151 * and repeat
5152 * If left == right do deep compare of items, treat as changed if
5153 * needed, advance both trees and repeat
5154 * If both trees are at the same level but not at level 0
5155 * Compare keys of current nodes/leafs
5156 * If left < right advance left tree and repeat
5157 * If left > right advance right tree and repeat
5158 * If left == right compare blockptrs of the next nodes/leafs
5159 * If they match advance both trees but stay at the same level
5160 * and repeat
5161 * If they don't match advance both trees while allowing to go
5162 * deeper and repeat
5163 * If tree levels are different
5164 * Advance the tree that needs it and repeat
5166 * Advancing a tree means:
5167 * If we are at level 0, try to go to the next slot. If that's not
5168 * possible, go one level up and repeat. Stop when we found a level
5169 * where we could go to the next slot. We may at this point be on a
5170 * node or a leaf.
5172 * If we are not at level 0 and not on shared tree blocks, go one
5173 * level deeper.
5175 * If we are not at level 0 and on shared tree blocks, go one slot to
5176 * the right if possible or go up and right.
5179 left_level = btrfs_header_level(left_root->commit_root);
5180 left_root_level = left_level;
5181 left_path->nodes[left_level] = left_root->commit_root;
5182 extent_buffer_get(left_path->nodes[left_level]);
5184 right_level = btrfs_header_level(right_root->commit_root);
5185 right_root_level = right_level;
5186 right_path->nodes[right_level] = right_root->commit_root;
5187 extent_buffer_get(right_path->nodes[right_level]);
5189 if (left_level == 0)
5190 btrfs_item_key_to_cpu(left_path->nodes[left_level],
5191 &left_key, left_path->slots[left_level]);
5192 else
5193 btrfs_node_key_to_cpu(left_path->nodes[left_level],
5194 &left_key, left_path->slots[left_level]);
5195 if (right_level == 0)
5196 btrfs_item_key_to_cpu(right_path->nodes[right_level],
5197 &right_key, right_path->slots[right_level]);
5198 else
5199 btrfs_node_key_to_cpu(right_path->nodes[right_level],
5200 &right_key, right_path->slots[right_level]);
5202 left_end_reached = right_end_reached = 0;
5203 advance_left = advance_right = 0;
5205 while (1) {
5207 * We need to make sure the transaction does not get committed
5208 * while we do anything on commit roots. This means, we need to
5209 * join and leave transactions for every item that we process.
5211 if (trans && btrfs_should_end_transaction(trans, left_root)) {
5212 btrfs_release_path(left_path);
5213 btrfs_release_path(right_path);
5215 ret = btrfs_end_transaction(trans, left_root);
5216 trans = NULL;
5217 if (ret < 0)
5218 goto out;
5220 /* now rejoin the transaction */
5221 if (!trans) {
5222 trans = btrfs_join_transaction(left_root);
5223 if (IS_ERR(trans)) {
5224 ret = PTR_ERR(trans);
5225 trans = NULL;
5226 goto out;
5229 spin_lock(&left_root->root_item_lock);
5230 ctransid = btrfs_root_ctransid(&left_root->root_item);
5231 spin_unlock(&left_root->root_item_lock);
5232 if (ctransid != left_start_ctransid)
5233 left_start_ctransid = 0;
5235 spin_lock(&right_root->root_item_lock);
5236 ctransid = btrfs_root_ctransid(&right_root->root_item);
5237 spin_unlock(&right_root->root_item_lock);
5238 if (ctransid != right_start_ctransid)
5239 right_start_ctransid = 0;
5241 if (!left_start_ctransid || !right_start_ctransid) {
5242 WARN(1, KERN_WARNING
5243 "btrfs: btrfs_compare_tree detected "
5244 "a change in one of the trees while "
5245 "iterating. This is probably a "
5246 "bug.\n");
5247 ret = -EIO;
5248 goto out;
5252 * the commit root may have changed, so start again
5253 * where we stopped
5255 left_path->lowest_level = left_level;
5256 right_path->lowest_level = right_level;
5257 ret = btrfs_search_slot(NULL, left_root,
5258 &left_key, left_path, 0, 0);
5259 if (ret < 0)
5260 goto out;
5261 ret = btrfs_search_slot(NULL, right_root,
5262 &right_key, right_path, 0, 0);
5263 if (ret < 0)
5264 goto out;
5267 if (advance_left && !left_end_reached) {
5268 ret = tree_advance(left_root, left_path, &left_level,
5269 left_root_level,
5270 advance_left != ADVANCE_ONLY_NEXT,
5271 &left_key);
5272 if (ret < 0)
5273 left_end_reached = ADVANCE;
5274 advance_left = 0;
5276 if (advance_right && !right_end_reached) {
5277 ret = tree_advance(right_root, right_path, &right_level,
5278 right_root_level,
5279 advance_right != ADVANCE_ONLY_NEXT,
5280 &right_key);
5281 if (ret < 0)
5282 right_end_reached = ADVANCE;
5283 advance_right = 0;
5286 if (left_end_reached && right_end_reached) {
5287 ret = 0;
5288 goto out;
5289 } else if (left_end_reached) {
5290 if (right_level == 0) {
5291 ret = changed_cb(left_root, right_root,
5292 left_path, right_path,
5293 &right_key,
5294 BTRFS_COMPARE_TREE_DELETED,
5295 ctx);
5296 if (ret < 0)
5297 goto out;
5299 advance_right = ADVANCE;
5300 continue;
5301 } else if (right_end_reached) {
5302 if (left_level == 0) {
5303 ret = changed_cb(left_root, right_root,
5304 left_path, right_path,
5305 &left_key,
5306 BTRFS_COMPARE_TREE_NEW,
5307 ctx);
5308 if (ret < 0)
5309 goto out;
5311 advance_left = ADVANCE;
5312 continue;
5315 if (left_level == 0 && right_level == 0) {
5316 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5317 if (cmp < 0) {
5318 ret = changed_cb(left_root, right_root,
5319 left_path, right_path,
5320 &left_key,
5321 BTRFS_COMPARE_TREE_NEW,
5322 ctx);
5323 if (ret < 0)
5324 goto out;
5325 advance_left = ADVANCE;
5326 } else if (cmp > 0) {
5327 ret = changed_cb(left_root, right_root,
5328 left_path, right_path,
5329 &right_key,
5330 BTRFS_COMPARE_TREE_DELETED,
5331 ctx);
5332 if (ret < 0)
5333 goto out;
5334 advance_right = ADVANCE;
5335 } else {
5336 enum btrfs_compare_tree_result cmp;
5338 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5339 ret = tree_compare_item(left_root, left_path,
5340 right_path, tmp_buf);
5341 if (ret)
5342 cmp = BTRFS_COMPARE_TREE_CHANGED;
5343 else
5344 cmp = BTRFS_COMPARE_TREE_SAME;
5345 ret = changed_cb(left_root, right_root,
5346 left_path, right_path,
5347 &left_key, cmp, ctx);
5348 if (ret < 0)
5349 goto out;
5350 advance_left = ADVANCE;
5351 advance_right = ADVANCE;
5353 } else if (left_level == right_level) {
5354 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5355 if (cmp < 0) {
5356 advance_left = ADVANCE;
5357 } else if (cmp > 0) {
5358 advance_right = ADVANCE;
5359 } else {
5360 left_blockptr = btrfs_node_blockptr(
5361 left_path->nodes[left_level],
5362 left_path->slots[left_level]);
5363 right_blockptr = btrfs_node_blockptr(
5364 right_path->nodes[right_level],
5365 right_path->slots[right_level]);
5366 if (left_blockptr == right_blockptr) {
5368 * As we're on a shared block, don't
5369 * allow to go deeper.
5371 advance_left = ADVANCE_ONLY_NEXT;
5372 advance_right = ADVANCE_ONLY_NEXT;
5373 } else {
5374 advance_left = ADVANCE;
5375 advance_right = ADVANCE;
5378 } else if (left_level < right_level) {
5379 advance_right = ADVANCE;
5380 } else {
5381 advance_left = ADVANCE;
5385 out:
5386 btrfs_free_path(left_path);
5387 btrfs_free_path(right_path);
5388 kfree(tmp_buf);
5390 if (trans) {
5391 if (!ret)
5392 ret = btrfs_end_transaction(trans, left_root);
5393 else
5394 btrfs_end_transaction(trans, left_root);
5397 return ret;
5401 * this is similar to btrfs_next_leaf, but does not try to preserve
5402 * and fixup the path. It looks for and returns the next key in the
5403 * tree based on the current path and the min_trans parameters.
5405 * 0 is returned if another key is found, < 0 if there are any errors
5406 * and 1 is returned if there are no higher keys in the tree
5408 * path->keep_locks should be set to 1 on the search made before
5409 * calling this function.
5411 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5412 struct btrfs_key *key, int level, u64 min_trans)
5414 int slot;
5415 struct extent_buffer *c;
5417 WARN_ON(!path->keep_locks);
5418 while (level < BTRFS_MAX_LEVEL) {
5419 if (!path->nodes[level])
5420 return 1;
5422 slot = path->slots[level] + 1;
5423 c = path->nodes[level];
5424 next:
5425 if (slot >= btrfs_header_nritems(c)) {
5426 int ret;
5427 int orig_lowest;
5428 struct btrfs_key cur_key;
5429 if (level + 1 >= BTRFS_MAX_LEVEL ||
5430 !path->nodes[level + 1])
5431 return 1;
5433 if (path->locks[level + 1]) {
5434 level++;
5435 continue;
5438 slot = btrfs_header_nritems(c) - 1;
5439 if (level == 0)
5440 btrfs_item_key_to_cpu(c, &cur_key, slot);
5441 else
5442 btrfs_node_key_to_cpu(c, &cur_key, slot);
5444 orig_lowest = path->lowest_level;
5445 btrfs_release_path(path);
5446 path->lowest_level = level;
5447 ret = btrfs_search_slot(NULL, root, &cur_key, path,
5448 0, 0);
5449 path->lowest_level = orig_lowest;
5450 if (ret < 0)
5451 return ret;
5453 c = path->nodes[level];
5454 slot = path->slots[level];
5455 if (ret == 0)
5456 slot++;
5457 goto next;
5460 if (level == 0)
5461 btrfs_item_key_to_cpu(c, key, slot);
5462 else {
5463 u64 gen = btrfs_node_ptr_generation(c, slot);
5465 if (gen < min_trans) {
5466 slot++;
5467 goto next;
5469 btrfs_node_key_to_cpu(c, key, slot);
5471 return 0;
5473 return 1;
5477 * search the tree again to find a leaf with greater keys
5478 * returns 0 if it found something or 1 if there are no greater leaves.
5479 * returns < 0 on io errors.
5481 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5483 return btrfs_next_old_leaf(root, path, 0);
5486 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5487 u64 time_seq)
5489 int slot;
5490 int level;
5491 struct extent_buffer *c;
5492 struct extent_buffer *next;
5493 struct btrfs_key key;
5494 u32 nritems;
5495 int ret;
5496 int old_spinning = path->leave_spinning;
5497 int next_rw_lock = 0;
5499 nritems = btrfs_header_nritems(path->nodes[0]);
5500 if (nritems == 0)
5501 return 1;
5503 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5504 again:
5505 level = 1;
5506 next = NULL;
5507 next_rw_lock = 0;
5508 btrfs_release_path(path);
5510 path->keep_locks = 1;
5511 path->leave_spinning = 1;
5513 if (time_seq)
5514 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5515 else
5516 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5517 path->keep_locks = 0;
5519 if (ret < 0)
5520 return ret;
5522 nritems = btrfs_header_nritems(path->nodes[0]);
5524 * by releasing the path above we dropped all our locks. A balance
5525 * could have added more items next to the key that used to be
5526 * at the very end of the block. So, check again here and
5527 * advance the path if there are now more items available.
5529 if (nritems > 0 && path->slots[0] < nritems - 1) {
5530 if (ret == 0)
5531 path->slots[0]++;
5532 ret = 0;
5533 goto done;
5536 while (level < BTRFS_MAX_LEVEL) {
5537 if (!path->nodes[level]) {
5538 ret = 1;
5539 goto done;
5542 slot = path->slots[level] + 1;
5543 c = path->nodes[level];
5544 if (slot >= btrfs_header_nritems(c)) {
5545 level++;
5546 if (level == BTRFS_MAX_LEVEL) {
5547 ret = 1;
5548 goto done;
5550 continue;
5553 if (next) {
5554 btrfs_tree_unlock_rw(next, next_rw_lock);
5555 free_extent_buffer(next);
5558 next = c;
5559 next_rw_lock = path->locks[level];
5560 ret = read_block_for_search(NULL, root, path, &next, level,
5561 slot, &key, 0);
5562 if (ret == -EAGAIN)
5563 goto again;
5565 if (ret < 0) {
5566 btrfs_release_path(path);
5567 goto done;
5570 if (!path->skip_locking) {
5571 ret = btrfs_try_tree_read_lock(next);
5572 if (!ret && time_seq) {
5574 * If we don't get the lock, we may be racing
5575 * with push_leaf_left, holding that lock while
5576 * itself waiting for the leaf we've currently
5577 * locked. To solve this situation, we give up
5578 * on our lock and cycle.
5580 free_extent_buffer(next);
5581 btrfs_release_path(path);
5582 cond_resched();
5583 goto again;
5585 if (!ret) {
5586 btrfs_set_path_blocking(path);
5587 btrfs_tree_read_lock(next);
5588 btrfs_clear_path_blocking(path, next,
5589 BTRFS_READ_LOCK);
5591 next_rw_lock = BTRFS_READ_LOCK;
5593 break;
5595 path->slots[level] = slot;
5596 while (1) {
5597 level--;
5598 c = path->nodes[level];
5599 if (path->locks[level])
5600 btrfs_tree_unlock_rw(c, path->locks[level]);
5602 free_extent_buffer(c);
5603 path->nodes[level] = next;
5604 path->slots[level] = 0;
5605 if (!path->skip_locking)
5606 path->locks[level] = next_rw_lock;
5607 if (!level)
5608 break;
5610 ret = read_block_for_search(NULL, root, path, &next, level,
5611 0, &key, 0);
5612 if (ret == -EAGAIN)
5613 goto again;
5615 if (ret < 0) {
5616 btrfs_release_path(path);
5617 goto done;
5620 if (!path->skip_locking) {
5621 ret = btrfs_try_tree_read_lock(next);
5622 if (!ret) {
5623 btrfs_set_path_blocking(path);
5624 btrfs_tree_read_lock(next);
5625 btrfs_clear_path_blocking(path, next,
5626 BTRFS_READ_LOCK);
5628 next_rw_lock = BTRFS_READ_LOCK;
5631 ret = 0;
5632 done:
5633 unlock_up(path, 0, 1, 0, NULL);
5634 path->leave_spinning = old_spinning;
5635 if (!old_spinning)
5636 btrfs_set_path_blocking(path);
5638 return ret;
5642 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5643 * searching until it gets past min_objectid or finds an item of 'type'
5645 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5647 int btrfs_previous_item(struct btrfs_root *root,
5648 struct btrfs_path *path, u64 min_objectid,
5649 int type)
5651 struct btrfs_key found_key;
5652 struct extent_buffer *leaf;
5653 u32 nritems;
5654 int ret;
5656 while (1) {
5657 if (path->slots[0] == 0) {
5658 btrfs_set_path_blocking(path);
5659 ret = btrfs_prev_leaf(root, path);
5660 if (ret != 0)
5661 return ret;
5662 } else {
5663 path->slots[0]--;
5665 leaf = path->nodes[0];
5666 nritems = btrfs_header_nritems(leaf);
5667 if (nritems == 0)
5668 return 1;
5669 if (path->slots[0] == nritems)
5670 path->slots[0]--;
5672 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5673 if (found_key.objectid < min_objectid)
5674 break;
5675 if (found_key.type == type)
5676 return 0;
5677 if (found_key.objectid == min_objectid &&
5678 found_key.type < type)
5679 break;
5681 return 1;