mm, compaction: always skip all compound pages by order in migrate scanner
[linux-2.6/btrfs-unstable.git] / fs / btrfs / ctree.c
blob5f745eadf77dd07454ccb0c1d201124738cb4edc
1 /*
2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
22 #include "ctree.h"
23 #include "disk-io.h"
24 #include "transaction.h"
25 #include "print-tree.h"
26 #include "locking.h"
28 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
29 *root, struct btrfs_path *path, int level);
30 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
31 *root, struct btrfs_key *ins_key,
32 struct btrfs_path *path, int data_size, int extend);
33 static int push_node_left(struct btrfs_trans_handle *trans,
34 struct btrfs_root *root, struct extent_buffer *dst,
35 struct extent_buffer *src, int empty);
36 static int balance_node_right(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 struct extent_buffer *dst_buf,
39 struct extent_buffer *src_buf);
40 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
41 int level, int slot);
42 static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
43 struct extent_buffer *eb);
45 struct btrfs_path *btrfs_alloc_path(void)
47 struct btrfs_path *path;
48 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
49 return path;
53 * set all locked nodes in the path to blocking locks. This should
54 * be done before scheduling
56 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
58 int i;
59 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
60 if (!p->nodes[i] || !p->locks[i])
61 continue;
62 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
63 if (p->locks[i] == BTRFS_READ_LOCK)
64 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
65 else if (p->locks[i] == BTRFS_WRITE_LOCK)
66 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
71 * reset all the locked nodes in the patch to spinning locks.
73 * held is used to keep lockdep happy, when lockdep is enabled
74 * we set held to a blocking lock before we go around and
75 * retake all the spinlocks in the path. You can safely use NULL
76 * for held
78 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
79 struct extent_buffer *held, int held_rw)
81 int i;
83 if (held) {
84 btrfs_set_lock_blocking_rw(held, held_rw);
85 if (held_rw == BTRFS_WRITE_LOCK)
86 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
87 else if (held_rw == BTRFS_READ_LOCK)
88 held_rw = BTRFS_READ_LOCK_BLOCKING;
90 btrfs_set_path_blocking(p);
92 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
93 if (p->nodes[i] && p->locks[i]) {
94 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
95 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
96 p->locks[i] = BTRFS_WRITE_LOCK;
97 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
98 p->locks[i] = BTRFS_READ_LOCK;
102 if (held)
103 btrfs_clear_lock_blocking_rw(held, held_rw);
106 /* this also releases the path */
107 void btrfs_free_path(struct btrfs_path *p)
109 if (!p)
110 return;
111 btrfs_release_path(p);
112 kmem_cache_free(btrfs_path_cachep, p);
116 * path release drops references on the extent buffers in the path
117 * and it drops any locks held by this path
119 * It is safe to call this on paths that no locks or extent buffers held.
121 noinline void btrfs_release_path(struct btrfs_path *p)
123 int i;
125 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
126 p->slots[i] = 0;
127 if (!p->nodes[i])
128 continue;
129 if (p->locks[i]) {
130 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
131 p->locks[i] = 0;
133 free_extent_buffer(p->nodes[i]);
134 p->nodes[i] = NULL;
139 * safely gets a reference on the root node of a tree. A lock
140 * is not taken, so a concurrent writer may put a different node
141 * at the root of the tree. See btrfs_lock_root_node for the
142 * looping required.
144 * The extent buffer returned by this has a reference taken, so
145 * it won't disappear. It may stop being the root of the tree
146 * at any time because there are no locks held.
148 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
150 struct extent_buffer *eb;
152 while (1) {
153 rcu_read_lock();
154 eb = rcu_dereference(root->node);
157 * RCU really hurts here, we could free up the root node because
158 * it was cow'ed but we may not get the new root node yet so do
159 * the inc_not_zero dance and if it doesn't work then
160 * synchronize_rcu and try again.
162 if (atomic_inc_not_zero(&eb->refs)) {
163 rcu_read_unlock();
164 break;
166 rcu_read_unlock();
167 synchronize_rcu();
169 return eb;
172 /* loop around taking references on and locking the root node of the
173 * tree until you end up with a lock on the root. A locked buffer
174 * is returned, with a reference held.
176 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
178 struct extent_buffer *eb;
180 while (1) {
181 eb = btrfs_root_node(root);
182 btrfs_tree_lock(eb);
183 if (eb == root->node)
184 break;
185 btrfs_tree_unlock(eb);
186 free_extent_buffer(eb);
188 return eb;
191 /* loop around taking references on and locking the root node of the
192 * tree until you end up with a lock on the root. A locked buffer
193 * is returned, with a reference held.
195 static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
197 struct extent_buffer *eb;
199 while (1) {
200 eb = btrfs_root_node(root);
201 btrfs_tree_read_lock(eb);
202 if (eb == root->node)
203 break;
204 btrfs_tree_read_unlock(eb);
205 free_extent_buffer(eb);
207 return eb;
210 /* cowonly root (everything not a reference counted cow subvolume), just get
211 * put onto a simple dirty list. transaction.c walks this to make sure they
212 * get properly updated on disk.
214 static void add_root_to_dirty_list(struct btrfs_root *root)
216 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
217 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
218 return;
220 spin_lock(&root->fs_info->trans_lock);
221 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
222 /* Want the extent tree to be the last on the list */
223 if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
224 list_move_tail(&root->dirty_list,
225 &root->fs_info->dirty_cowonly_roots);
226 else
227 list_move(&root->dirty_list,
228 &root->fs_info->dirty_cowonly_roots);
230 spin_unlock(&root->fs_info->trans_lock);
234 * used by snapshot creation to make a copy of a root for a tree with
235 * a given objectid. The buffer with the new root node is returned in
236 * cow_ret, and this func returns zero on success or a negative error code.
238 int btrfs_copy_root(struct btrfs_trans_handle *trans,
239 struct btrfs_root *root,
240 struct extent_buffer *buf,
241 struct extent_buffer **cow_ret, u64 new_root_objectid)
243 struct extent_buffer *cow;
244 int ret = 0;
245 int level;
246 struct btrfs_disk_key disk_key;
248 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
249 trans->transid != root->fs_info->running_transaction->transid);
250 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
251 trans->transid != root->last_trans);
253 level = btrfs_header_level(buf);
254 if (level == 0)
255 btrfs_item_key(buf, &disk_key, 0);
256 else
257 btrfs_node_key(buf, &disk_key, 0);
259 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
260 &disk_key, level, buf->start, 0);
261 if (IS_ERR(cow))
262 return PTR_ERR(cow);
264 copy_extent_buffer(cow, buf, 0, 0, cow->len);
265 btrfs_set_header_bytenr(cow, cow->start);
266 btrfs_set_header_generation(cow, trans->transid);
267 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
268 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
269 BTRFS_HEADER_FLAG_RELOC);
270 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
271 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
272 else
273 btrfs_set_header_owner(cow, new_root_objectid);
275 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
276 BTRFS_FSID_SIZE);
278 WARN_ON(btrfs_header_generation(buf) > trans->transid);
279 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
280 ret = btrfs_inc_ref(trans, root, cow, 1);
281 else
282 ret = btrfs_inc_ref(trans, root, cow, 0);
284 if (ret)
285 return ret;
287 btrfs_mark_buffer_dirty(cow);
288 *cow_ret = cow;
289 return 0;
292 enum mod_log_op {
293 MOD_LOG_KEY_REPLACE,
294 MOD_LOG_KEY_ADD,
295 MOD_LOG_KEY_REMOVE,
296 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
297 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
298 MOD_LOG_MOVE_KEYS,
299 MOD_LOG_ROOT_REPLACE,
302 struct tree_mod_move {
303 int dst_slot;
304 int nr_items;
307 struct tree_mod_root {
308 u64 logical;
309 u8 level;
312 struct tree_mod_elem {
313 struct rb_node node;
314 u64 index; /* shifted logical */
315 u64 seq;
316 enum mod_log_op op;
318 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
319 int slot;
321 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
322 u64 generation;
324 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
325 struct btrfs_disk_key key;
326 u64 blockptr;
328 /* this is used for op == MOD_LOG_MOVE_KEYS */
329 struct tree_mod_move move;
331 /* this is used for op == MOD_LOG_ROOT_REPLACE */
332 struct tree_mod_root old_root;
335 static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
337 read_lock(&fs_info->tree_mod_log_lock);
340 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
342 read_unlock(&fs_info->tree_mod_log_lock);
345 static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
347 write_lock(&fs_info->tree_mod_log_lock);
350 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
352 write_unlock(&fs_info->tree_mod_log_lock);
356 * Pull a new tree mod seq number for our operation.
358 static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
360 return atomic64_inc_return(&fs_info->tree_mod_seq);
364 * This adds a new blocker to the tree mod log's blocker list if the @elem
365 * passed does not already have a sequence number set. So when a caller expects
366 * to record tree modifications, it should ensure to set elem->seq to zero
367 * before calling btrfs_get_tree_mod_seq.
368 * Returns a fresh, unused tree log modification sequence number, even if no new
369 * blocker was added.
371 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
372 struct seq_list *elem)
374 tree_mod_log_write_lock(fs_info);
375 spin_lock(&fs_info->tree_mod_seq_lock);
376 if (!elem->seq) {
377 elem->seq = btrfs_inc_tree_mod_seq(fs_info);
378 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
380 spin_unlock(&fs_info->tree_mod_seq_lock);
381 tree_mod_log_write_unlock(fs_info);
383 return elem->seq;
386 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
387 struct seq_list *elem)
389 struct rb_root *tm_root;
390 struct rb_node *node;
391 struct rb_node *next;
392 struct seq_list *cur_elem;
393 struct tree_mod_elem *tm;
394 u64 min_seq = (u64)-1;
395 u64 seq_putting = elem->seq;
397 if (!seq_putting)
398 return;
400 spin_lock(&fs_info->tree_mod_seq_lock);
401 list_del(&elem->list);
402 elem->seq = 0;
404 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
405 if (cur_elem->seq < min_seq) {
406 if (seq_putting > cur_elem->seq) {
408 * blocker with lower sequence number exists, we
409 * cannot remove anything from the log
411 spin_unlock(&fs_info->tree_mod_seq_lock);
412 return;
414 min_seq = cur_elem->seq;
417 spin_unlock(&fs_info->tree_mod_seq_lock);
420 * anything that's lower than the lowest existing (read: blocked)
421 * sequence number can be removed from the tree.
423 tree_mod_log_write_lock(fs_info);
424 tm_root = &fs_info->tree_mod_log;
425 for (node = rb_first(tm_root); node; node = next) {
426 next = rb_next(node);
427 tm = container_of(node, struct tree_mod_elem, node);
428 if (tm->seq > min_seq)
429 continue;
430 rb_erase(node, tm_root);
431 kfree(tm);
433 tree_mod_log_write_unlock(fs_info);
437 * key order of the log:
438 * index -> sequence
440 * the index is the shifted logical of the *new* root node for root replace
441 * operations, or the shifted logical of the affected block for all other
442 * operations.
444 * Note: must be called with write lock (tree_mod_log_write_lock).
446 static noinline int
447 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
449 struct rb_root *tm_root;
450 struct rb_node **new;
451 struct rb_node *parent = NULL;
452 struct tree_mod_elem *cur;
454 BUG_ON(!tm);
456 tm->seq = btrfs_inc_tree_mod_seq(fs_info);
458 tm_root = &fs_info->tree_mod_log;
459 new = &tm_root->rb_node;
460 while (*new) {
461 cur = container_of(*new, struct tree_mod_elem, node);
462 parent = *new;
463 if (cur->index < tm->index)
464 new = &((*new)->rb_left);
465 else if (cur->index > tm->index)
466 new = &((*new)->rb_right);
467 else if (cur->seq < tm->seq)
468 new = &((*new)->rb_left);
469 else if (cur->seq > tm->seq)
470 new = &((*new)->rb_right);
471 else
472 return -EEXIST;
475 rb_link_node(&tm->node, parent, new);
476 rb_insert_color(&tm->node, tm_root);
477 return 0;
481 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
482 * returns zero with the tree_mod_log_lock acquired. The caller must hold
483 * this until all tree mod log insertions are recorded in the rb tree and then
484 * call tree_mod_log_write_unlock() to release.
486 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
487 struct extent_buffer *eb) {
488 smp_mb();
489 if (list_empty(&(fs_info)->tree_mod_seq_list))
490 return 1;
491 if (eb && btrfs_header_level(eb) == 0)
492 return 1;
494 tree_mod_log_write_lock(fs_info);
495 if (list_empty(&(fs_info)->tree_mod_seq_list)) {
496 tree_mod_log_write_unlock(fs_info);
497 return 1;
500 return 0;
503 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
504 static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
505 struct extent_buffer *eb)
507 smp_mb();
508 if (list_empty(&(fs_info)->tree_mod_seq_list))
509 return 0;
510 if (eb && btrfs_header_level(eb) == 0)
511 return 0;
513 return 1;
516 static struct tree_mod_elem *
517 alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
518 enum mod_log_op op, gfp_t flags)
520 struct tree_mod_elem *tm;
522 tm = kzalloc(sizeof(*tm), flags);
523 if (!tm)
524 return NULL;
526 tm->index = eb->start >> PAGE_CACHE_SHIFT;
527 if (op != MOD_LOG_KEY_ADD) {
528 btrfs_node_key(eb, &tm->key, slot);
529 tm->blockptr = btrfs_node_blockptr(eb, slot);
531 tm->op = op;
532 tm->slot = slot;
533 tm->generation = btrfs_node_ptr_generation(eb, slot);
534 RB_CLEAR_NODE(&tm->node);
536 return tm;
539 static noinline int
540 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
541 struct extent_buffer *eb, int slot,
542 enum mod_log_op op, gfp_t flags)
544 struct tree_mod_elem *tm;
545 int ret;
547 if (!tree_mod_need_log(fs_info, eb))
548 return 0;
550 tm = alloc_tree_mod_elem(eb, slot, op, flags);
551 if (!tm)
552 return -ENOMEM;
554 if (tree_mod_dont_log(fs_info, eb)) {
555 kfree(tm);
556 return 0;
559 ret = __tree_mod_log_insert(fs_info, tm);
560 tree_mod_log_write_unlock(fs_info);
561 if (ret)
562 kfree(tm);
564 return ret;
567 static noinline int
568 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
569 struct extent_buffer *eb, int dst_slot, int src_slot,
570 int nr_items, gfp_t flags)
572 struct tree_mod_elem *tm = NULL;
573 struct tree_mod_elem **tm_list = NULL;
574 int ret = 0;
575 int i;
576 int locked = 0;
578 if (!tree_mod_need_log(fs_info, eb))
579 return 0;
581 tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), flags);
582 if (!tm_list)
583 return -ENOMEM;
585 tm = kzalloc(sizeof(*tm), flags);
586 if (!tm) {
587 ret = -ENOMEM;
588 goto free_tms;
591 tm->index = eb->start >> PAGE_CACHE_SHIFT;
592 tm->slot = src_slot;
593 tm->move.dst_slot = dst_slot;
594 tm->move.nr_items = nr_items;
595 tm->op = MOD_LOG_MOVE_KEYS;
597 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
598 tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
599 MOD_LOG_KEY_REMOVE_WHILE_MOVING, flags);
600 if (!tm_list[i]) {
601 ret = -ENOMEM;
602 goto free_tms;
606 if (tree_mod_dont_log(fs_info, eb))
607 goto free_tms;
608 locked = 1;
611 * When we override something during the move, we log these removals.
612 * This can only happen when we move towards the beginning of the
613 * buffer, i.e. dst_slot < src_slot.
615 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
616 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
617 if (ret)
618 goto free_tms;
621 ret = __tree_mod_log_insert(fs_info, tm);
622 if (ret)
623 goto free_tms;
624 tree_mod_log_write_unlock(fs_info);
625 kfree(tm_list);
627 return 0;
628 free_tms:
629 for (i = 0; i < nr_items; i++) {
630 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
631 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
632 kfree(tm_list[i]);
634 if (locked)
635 tree_mod_log_write_unlock(fs_info);
636 kfree(tm_list);
637 kfree(tm);
639 return ret;
642 static inline int
643 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
644 struct tree_mod_elem **tm_list,
645 int nritems)
647 int i, j;
648 int ret;
650 for (i = nritems - 1; i >= 0; i--) {
651 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
652 if (ret) {
653 for (j = nritems - 1; j > i; j--)
654 rb_erase(&tm_list[j]->node,
655 &fs_info->tree_mod_log);
656 return ret;
660 return 0;
663 static noinline int
664 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
665 struct extent_buffer *old_root,
666 struct extent_buffer *new_root, gfp_t flags,
667 int log_removal)
669 struct tree_mod_elem *tm = NULL;
670 struct tree_mod_elem **tm_list = NULL;
671 int nritems = 0;
672 int ret = 0;
673 int i;
675 if (!tree_mod_need_log(fs_info, NULL))
676 return 0;
678 if (log_removal && btrfs_header_level(old_root) > 0) {
679 nritems = btrfs_header_nritems(old_root);
680 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
681 flags);
682 if (!tm_list) {
683 ret = -ENOMEM;
684 goto free_tms;
686 for (i = 0; i < nritems; i++) {
687 tm_list[i] = alloc_tree_mod_elem(old_root, i,
688 MOD_LOG_KEY_REMOVE_WHILE_FREEING, flags);
689 if (!tm_list[i]) {
690 ret = -ENOMEM;
691 goto free_tms;
696 tm = kzalloc(sizeof(*tm), flags);
697 if (!tm) {
698 ret = -ENOMEM;
699 goto free_tms;
702 tm->index = new_root->start >> PAGE_CACHE_SHIFT;
703 tm->old_root.logical = old_root->start;
704 tm->old_root.level = btrfs_header_level(old_root);
705 tm->generation = btrfs_header_generation(old_root);
706 tm->op = MOD_LOG_ROOT_REPLACE;
708 if (tree_mod_dont_log(fs_info, NULL))
709 goto free_tms;
711 if (tm_list)
712 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
713 if (!ret)
714 ret = __tree_mod_log_insert(fs_info, tm);
716 tree_mod_log_write_unlock(fs_info);
717 if (ret)
718 goto free_tms;
719 kfree(tm_list);
721 return ret;
723 free_tms:
724 if (tm_list) {
725 for (i = 0; i < nritems; i++)
726 kfree(tm_list[i]);
727 kfree(tm_list);
729 kfree(tm);
731 return ret;
734 static struct tree_mod_elem *
735 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
736 int smallest)
738 struct rb_root *tm_root;
739 struct rb_node *node;
740 struct tree_mod_elem *cur = NULL;
741 struct tree_mod_elem *found = NULL;
742 u64 index = start >> PAGE_CACHE_SHIFT;
744 tree_mod_log_read_lock(fs_info);
745 tm_root = &fs_info->tree_mod_log;
746 node = tm_root->rb_node;
747 while (node) {
748 cur = container_of(node, struct tree_mod_elem, node);
749 if (cur->index < index) {
750 node = node->rb_left;
751 } else if (cur->index > index) {
752 node = node->rb_right;
753 } else if (cur->seq < min_seq) {
754 node = node->rb_left;
755 } else if (!smallest) {
756 /* we want the node with the highest seq */
757 if (found)
758 BUG_ON(found->seq > cur->seq);
759 found = cur;
760 node = node->rb_left;
761 } else if (cur->seq > min_seq) {
762 /* we want the node with the smallest seq */
763 if (found)
764 BUG_ON(found->seq < cur->seq);
765 found = cur;
766 node = node->rb_right;
767 } else {
768 found = cur;
769 break;
772 tree_mod_log_read_unlock(fs_info);
774 return found;
778 * this returns the element from the log with the smallest time sequence
779 * value that's in the log (the oldest log item). any element with a time
780 * sequence lower than min_seq will be ignored.
782 static struct tree_mod_elem *
783 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
784 u64 min_seq)
786 return __tree_mod_log_search(fs_info, start, min_seq, 1);
790 * this returns the element from the log with the largest time sequence
791 * value that's in the log (the most recent log item). any element with
792 * a time sequence lower than min_seq will be ignored.
794 static struct tree_mod_elem *
795 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
797 return __tree_mod_log_search(fs_info, start, min_seq, 0);
800 static noinline int
801 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
802 struct extent_buffer *src, unsigned long dst_offset,
803 unsigned long src_offset, int nr_items)
805 int ret = 0;
806 struct tree_mod_elem **tm_list = NULL;
807 struct tree_mod_elem **tm_list_add, **tm_list_rem;
808 int i;
809 int locked = 0;
811 if (!tree_mod_need_log(fs_info, NULL))
812 return 0;
814 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
815 return 0;
817 tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
818 GFP_NOFS);
819 if (!tm_list)
820 return -ENOMEM;
822 tm_list_add = tm_list;
823 tm_list_rem = tm_list + nr_items;
824 for (i = 0; i < nr_items; i++) {
825 tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
826 MOD_LOG_KEY_REMOVE, GFP_NOFS);
827 if (!tm_list_rem[i]) {
828 ret = -ENOMEM;
829 goto free_tms;
832 tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
833 MOD_LOG_KEY_ADD, GFP_NOFS);
834 if (!tm_list_add[i]) {
835 ret = -ENOMEM;
836 goto free_tms;
840 if (tree_mod_dont_log(fs_info, NULL))
841 goto free_tms;
842 locked = 1;
844 for (i = 0; i < nr_items; i++) {
845 ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
846 if (ret)
847 goto free_tms;
848 ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
849 if (ret)
850 goto free_tms;
853 tree_mod_log_write_unlock(fs_info);
854 kfree(tm_list);
856 return 0;
858 free_tms:
859 for (i = 0; i < nr_items * 2; i++) {
860 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
861 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
862 kfree(tm_list[i]);
864 if (locked)
865 tree_mod_log_write_unlock(fs_info);
866 kfree(tm_list);
868 return ret;
871 static inline void
872 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
873 int dst_offset, int src_offset, int nr_items)
875 int ret;
876 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
877 nr_items, GFP_NOFS);
878 BUG_ON(ret < 0);
881 static noinline void
882 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
883 struct extent_buffer *eb, int slot, int atomic)
885 int ret;
887 ret = tree_mod_log_insert_key(fs_info, eb, slot,
888 MOD_LOG_KEY_REPLACE,
889 atomic ? GFP_ATOMIC : GFP_NOFS);
890 BUG_ON(ret < 0);
893 static noinline int
894 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
896 struct tree_mod_elem **tm_list = NULL;
897 int nritems = 0;
898 int i;
899 int ret = 0;
901 if (btrfs_header_level(eb) == 0)
902 return 0;
904 if (!tree_mod_need_log(fs_info, NULL))
905 return 0;
907 nritems = btrfs_header_nritems(eb);
908 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
909 if (!tm_list)
910 return -ENOMEM;
912 for (i = 0; i < nritems; i++) {
913 tm_list[i] = alloc_tree_mod_elem(eb, i,
914 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
915 if (!tm_list[i]) {
916 ret = -ENOMEM;
917 goto free_tms;
921 if (tree_mod_dont_log(fs_info, eb))
922 goto free_tms;
924 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
925 tree_mod_log_write_unlock(fs_info);
926 if (ret)
927 goto free_tms;
928 kfree(tm_list);
930 return 0;
932 free_tms:
933 for (i = 0; i < nritems; i++)
934 kfree(tm_list[i]);
935 kfree(tm_list);
937 return ret;
940 static noinline void
941 tree_mod_log_set_root_pointer(struct btrfs_root *root,
942 struct extent_buffer *new_root_node,
943 int log_removal)
945 int ret;
946 ret = tree_mod_log_insert_root(root->fs_info, root->node,
947 new_root_node, GFP_NOFS, log_removal);
948 BUG_ON(ret < 0);
952 * check if the tree block can be shared by multiple trees
954 int btrfs_block_can_be_shared(struct btrfs_root *root,
955 struct extent_buffer *buf)
958 * Tree blocks not in refernece counted trees and tree roots
959 * are never shared. If a block was allocated after the last
960 * snapshot and the block was not allocated by tree relocation,
961 * we know the block is not shared.
963 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
964 buf != root->node && buf != root->commit_root &&
965 (btrfs_header_generation(buf) <=
966 btrfs_root_last_snapshot(&root->root_item) ||
967 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
968 return 1;
969 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
970 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
971 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
972 return 1;
973 #endif
974 return 0;
977 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
978 struct btrfs_root *root,
979 struct extent_buffer *buf,
980 struct extent_buffer *cow,
981 int *last_ref)
983 u64 refs;
984 u64 owner;
985 u64 flags;
986 u64 new_flags = 0;
987 int ret;
990 * Backrefs update rules:
992 * Always use full backrefs for extent pointers in tree block
993 * allocated by tree relocation.
995 * If a shared tree block is no longer referenced by its owner
996 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
997 * use full backrefs for extent pointers in tree block.
999 * If a tree block is been relocating
1000 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
1001 * use full backrefs for extent pointers in tree block.
1002 * The reason for this is some operations (such as drop tree)
1003 * are only allowed for blocks use full backrefs.
1006 if (btrfs_block_can_be_shared(root, buf)) {
1007 ret = btrfs_lookup_extent_info(trans, root, buf->start,
1008 btrfs_header_level(buf), 1,
1009 &refs, &flags);
1010 if (ret)
1011 return ret;
1012 if (refs == 0) {
1013 ret = -EROFS;
1014 btrfs_std_error(root->fs_info, ret);
1015 return ret;
1017 } else {
1018 refs = 1;
1019 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1020 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1021 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
1022 else
1023 flags = 0;
1026 owner = btrfs_header_owner(buf);
1027 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
1028 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
1030 if (refs > 1) {
1031 if ((owner == root->root_key.objectid ||
1032 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
1033 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
1034 ret = btrfs_inc_ref(trans, root, buf, 1);
1035 BUG_ON(ret); /* -ENOMEM */
1037 if (root->root_key.objectid ==
1038 BTRFS_TREE_RELOC_OBJECTID) {
1039 ret = btrfs_dec_ref(trans, root, buf, 0);
1040 BUG_ON(ret); /* -ENOMEM */
1041 ret = btrfs_inc_ref(trans, root, cow, 1);
1042 BUG_ON(ret); /* -ENOMEM */
1044 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
1045 } else {
1047 if (root->root_key.objectid ==
1048 BTRFS_TREE_RELOC_OBJECTID)
1049 ret = btrfs_inc_ref(trans, root, cow, 1);
1050 else
1051 ret = btrfs_inc_ref(trans, root, cow, 0);
1052 BUG_ON(ret); /* -ENOMEM */
1054 if (new_flags != 0) {
1055 int level = btrfs_header_level(buf);
1057 ret = btrfs_set_disk_extent_flags(trans, root,
1058 buf->start,
1059 buf->len,
1060 new_flags, level, 0);
1061 if (ret)
1062 return ret;
1064 } else {
1065 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
1066 if (root->root_key.objectid ==
1067 BTRFS_TREE_RELOC_OBJECTID)
1068 ret = btrfs_inc_ref(trans, root, cow, 1);
1069 else
1070 ret = btrfs_inc_ref(trans, root, cow, 0);
1071 BUG_ON(ret); /* -ENOMEM */
1072 ret = btrfs_dec_ref(trans, root, buf, 1);
1073 BUG_ON(ret); /* -ENOMEM */
1075 clean_tree_block(trans, root->fs_info, buf);
1076 *last_ref = 1;
1078 return 0;
1082 * does the dirty work in cow of a single block. The parent block (if
1083 * supplied) is updated to point to the new cow copy. The new buffer is marked
1084 * dirty and returned locked. If you modify the block it needs to be marked
1085 * dirty again.
1087 * search_start -- an allocation hint for the new block
1089 * empty_size -- a hint that you plan on doing more cow. This is the size in
1090 * bytes the allocator should try to find free next to the block it returns.
1091 * This is just a hint and may be ignored by the allocator.
1093 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1094 struct btrfs_root *root,
1095 struct extent_buffer *buf,
1096 struct extent_buffer *parent, int parent_slot,
1097 struct extent_buffer **cow_ret,
1098 u64 search_start, u64 empty_size)
1100 struct btrfs_disk_key disk_key;
1101 struct extent_buffer *cow;
1102 int level, ret;
1103 int last_ref = 0;
1104 int unlock_orig = 0;
1105 u64 parent_start;
1107 if (*cow_ret == buf)
1108 unlock_orig = 1;
1110 btrfs_assert_tree_locked(buf);
1112 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1113 trans->transid != root->fs_info->running_transaction->transid);
1114 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1115 trans->transid != root->last_trans);
1117 level = btrfs_header_level(buf);
1119 if (level == 0)
1120 btrfs_item_key(buf, &disk_key, 0);
1121 else
1122 btrfs_node_key(buf, &disk_key, 0);
1124 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
1125 if (parent)
1126 parent_start = parent->start;
1127 else
1128 parent_start = 0;
1129 } else
1130 parent_start = 0;
1132 cow = btrfs_alloc_tree_block(trans, root, parent_start,
1133 root->root_key.objectid, &disk_key, level,
1134 search_start, empty_size);
1135 if (IS_ERR(cow))
1136 return PTR_ERR(cow);
1138 /* cow is set to blocking by btrfs_init_new_buffer */
1140 copy_extent_buffer(cow, buf, 0, 0, cow->len);
1141 btrfs_set_header_bytenr(cow, cow->start);
1142 btrfs_set_header_generation(cow, trans->transid);
1143 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1144 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1145 BTRFS_HEADER_FLAG_RELOC);
1146 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1147 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1148 else
1149 btrfs_set_header_owner(cow, root->root_key.objectid);
1151 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
1152 BTRFS_FSID_SIZE);
1154 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1155 if (ret) {
1156 btrfs_abort_transaction(trans, root, ret);
1157 return ret;
1160 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1161 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1162 if (ret) {
1163 btrfs_abort_transaction(trans, root, ret);
1164 return ret;
1168 if (buf == root->node) {
1169 WARN_ON(parent && parent != buf);
1170 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1171 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1172 parent_start = buf->start;
1173 else
1174 parent_start = 0;
1176 extent_buffer_get(cow);
1177 tree_mod_log_set_root_pointer(root, cow, 1);
1178 rcu_assign_pointer(root->node, cow);
1180 btrfs_free_tree_block(trans, root, buf, parent_start,
1181 last_ref);
1182 free_extent_buffer(buf);
1183 add_root_to_dirty_list(root);
1184 } else {
1185 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1186 parent_start = parent->start;
1187 else
1188 parent_start = 0;
1190 WARN_ON(trans->transid != btrfs_header_generation(parent));
1191 tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
1192 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1193 btrfs_set_node_blockptr(parent, parent_slot,
1194 cow->start);
1195 btrfs_set_node_ptr_generation(parent, parent_slot,
1196 trans->transid);
1197 btrfs_mark_buffer_dirty(parent);
1198 if (last_ref) {
1199 ret = tree_mod_log_free_eb(root->fs_info, buf);
1200 if (ret) {
1201 btrfs_abort_transaction(trans, root, ret);
1202 return ret;
1205 btrfs_free_tree_block(trans, root, buf, parent_start,
1206 last_ref);
1208 if (unlock_orig)
1209 btrfs_tree_unlock(buf);
1210 free_extent_buffer_stale(buf);
1211 btrfs_mark_buffer_dirty(cow);
1212 *cow_ret = cow;
1213 return 0;
1217 * returns the logical address of the oldest predecessor of the given root.
1218 * entries older than time_seq are ignored.
1220 static struct tree_mod_elem *
1221 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1222 struct extent_buffer *eb_root, u64 time_seq)
1224 struct tree_mod_elem *tm;
1225 struct tree_mod_elem *found = NULL;
1226 u64 root_logical = eb_root->start;
1227 int looped = 0;
1229 if (!time_seq)
1230 return NULL;
1233 * the very last operation that's logged for a root is the replacement
1234 * operation (if it is replaced at all). this has the index of the *new*
1235 * root, making it the very first operation that's logged for this root.
1237 while (1) {
1238 tm = tree_mod_log_search_oldest(fs_info, root_logical,
1239 time_seq);
1240 if (!looped && !tm)
1241 return NULL;
1243 * if there are no tree operation for the oldest root, we simply
1244 * return it. this should only happen if that (old) root is at
1245 * level 0.
1247 if (!tm)
1248 break;
1251 * if there's an operation that's not a root replacement, we
1252 * found the oldest version of our root. normally, we'll find a
1253 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1255 if (tm->op != MOD_LOG_ROOT_REPLACE)
1256 break;
1258 found = tm;
1259 root_logical = tm->old_root.logical;
1260 looped = 1;
1263 /* if there's no old root to return, return what we found instead */
1264 if (!found)
1265 found = tm;
1267 return found;
1271 * tm is a pointer to the first operation to rewind within eb. then, all
1272 * previous operations will be rewinded (until we reach something older than
1273 * time_seq).
1275 static void
1276 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1277 u64 time_seq, struct tree_mod_elem *first_tm)
1279 u32 n;
1280 struct rb_node *next;
1281 struct tree_mod_elem *tm = first_tm;
1282 unsigned long o_dst;
1283 unsigned long o_src;
1284 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1286 n = btrfs_header_nritems(eb);
1287 tree_mod_log_read_lock(fs_info);
1288 while (tm && tm->seq >= time_seq) {
1290 * all the operations are recorded with the operator used for
1291 * the modification. as we're going backwards, we do the
1292 * opposite of each operation here.
1294 switch (tm->op) {
1295 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1296 BUG_ON(tm->slot < n);
1297 /* Fallthrough */
1298 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1299 case MOD_LOG_KEY_REMOVE:
1300 btrfs_set_node_key(eb, &tm->key, tm->slot);
1301 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1302 btrfs_set_node_ptr_generation(eb, tm->slot,
1303 tm->generation);
1304 n++;
1305 break;
1306 case MOD_LOG_KEY_REPLACE:
1307 BUG_ON(tm->slot >= n);
1308 btrfs_set_node_key(eb, &tm->key, tm->slot);
1309 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1310 btrfs_set_node_ptr_generation(eb, tm->slot,
1311 tm->generation);
1312 break;
1313 case MOD_LOG_KEY_ADD:
1314 /* if a move operation is needed it's in the log */
1315 n--;
1316 break;
1317 case MOD_LOG_MOVE_KEYS:
1318 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1319 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1320 memmove_extent_buffer(eb, o_dst, o_src,
1321 tm->move.nr_items * p_size);
1322 break;
1323 case MOD_LOG_ROOT_REPLACE:
1325 * this operation is special. for roots, this must be
1326 * handled explicitly before rewinding.
1327 * for non-roots, this operation may exist if the node
1328 * was a root: root A -> child B; then A gets empty and
1329 * B is promoted to the new root. in the mod log, we'll
1330 * have a root-replace operation for B, a tree block
1331 * that is no root. we simply ignore that operation.
1333 break;
1335 next = rb_next(&tm->node);
1336 if (!next)
1337 break;
1338 tm = container_of(next, struct tree_mod_elem, node);
1339 if (tm->index != first_tm->index)
1340 break;
1342 tree_mod_log_read_unlock(fs_info);
1343 btrfs_set_header_nritems(eb, n);
1347 * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
1348 * is returned. If rewind operations happen, a fresh buffer is returned. The
1349 * returned buffer is always read-locked. If the returned buffer is not the
1350 * input buffer, the lock on the input buffer is released and the input buffer
1351 * is freed (its refcount is decremented).
1353 static struct extent_buffer *
1354 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1355 struct extent_buffer *eb, u64 time_seq)
1357 struct extent_buffer *eb_rewin;
1358 struct tree_mod_elem *tm;
1360 if (!time_seq)
1361 return eb;
1363 if (btrfs_header_level(eb) == 0)
1364 return eb;
1366 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1367 if (!tm)
1368 return eb;
1370 btrfs_set_path_blocking(path);
1371 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1373 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1374 BUG_ON(tm->slot != 0);
1375 eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
1376 if (!eb_rewin) {
1377 btrfs_tree_read_unlock_blocking(eb);
1378 free_extent_buffer(eb);
1379 return NULL;
1381 btrfs_set_header_bytenr(eb_rewin, eb->start);
1382 btrfs_set_header_backref_rev(eb_rewin,
1383 btrfs_header_backref_rev(eb));
1384 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1385 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1386 } else {
1387 eb_rewin = btrfs_clone_extent_buffer(eb);
1388 if (!eb_rewin) {
1389 btrfs_tree_read_unlock_blocking(eb);
1390 free_extent_buffer(eb);
1391 return NULL;
1395 btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
1396 btrfs_tree_read_unlock_blocking(eb);
1397 free_extent_buffer(eb);
1399 extent_buffer_get(eb_rewin);
1400 btrfs_tree_read_lock(eb_rewin);
1401 __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1402 WARN_ON(btrfs_header_nritems(eb_rewin) >
1403 BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
1405 return eb_rewin;
1409 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1410 * value. If there are no changes, the current root->root_node is returned. If
1411 * anything changed in between, there's a fresh buffer allocated on which the
1412 * rewind operations are done. In any case, the returned buffer is read locked.
1413 * Returns NULL on error (with no locks held).
1415 static inline struct extent_buffer *
1416 get_old_root(struct btrfs_root *root, u64 time_seq)
1418 struct tree_mod_elem *tm;
1419 struct extent_buffer *eb = NULL;
1420 struct extent_buffer *eb_root;
1421 struct extent_buffer *old;
1422 struct tree_mod_root *old_root = NULL;
1423 u64 old_generation = 0;
1424 u64 logical;
1426 eb_root = btrfs_read_lock_root_node(root);
1427 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1428 if (!tm)
1429 return eb_root;
1431 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1432 old_root = &tm->old_root;
1433 old_generation = tm->generation;
1434 logical = old_root->logical;
1435 } else {
1436 logical = eb_root->start;
1439 tm = tree_mod_log_search(root->fs_info, logical, time_seq);
1440 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1441 btrfs_tree_read_unlock(eb_root);
1442 free_extent_buffer(eb_root);
1443 old = read_tree_block(root, logical, 0);
1444 if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
1445 if (!IS_ERR(old))
1446 free_extent_buffer(old);
1447 btrfs_warn(root->fs_info,
1448 "failed to read tree block %llu from get_old_root", logical);
1449 } else {
1450 eb = btrfs_clone_extent_buffer(old);
1451 free_extent_buffer(old);
1453 } else if (old_root) {
1454 btrfs_tree_read_unlock(eb_root);
1455 free_extent_buffer(eb_root);
1456 eb = alloc_dummy_extent_buffer(root->fs_info, logical);
1457 } else {
1458 btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1459 eb = btrfs_clone_extent_buffer(eb_root);
1460 btrfs_tree_read_unlock_blocking(eb_root);
1461 free_extent_buffer(eb_root);
1464 if (!eb)
1465 return NULL;
1466 extent_buffer_get(eb);
1467 btrfs_tree_read_lock(eb);
1468 if (old_root) {
1469 btrfs_set_header_bytenr(eb, eb->start);
1470 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1471 btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1472 btrfs_set_header_level(eb, old_root->level);
1473 btrfs_set_header_generation(eb, old_generation);
1475 if (tm)
1476 __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm);
1477 else
1478 WARN_ON(btrfs_header_level(eb) != 0);
1479 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
1481 return eb;
1484 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1486 struct tree_mod_elem *tm;
1487 int level;
1488 struct extent_buffer *eb_root = btrfs_root_node(root);
1490 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1491 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1492 level = tm->old_root.level;
1493 } else {
1494 level = btrfs_header_level(eb_root);
1496 free_extent_buffer(eb_root);
1498 return level;
1501 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1502 struct btrfs_root *root,
1503 struct extent_buffer *buf)
1505 if (btrfs_test_is_dummy_root(root))
1506 return 0;
1508 /* ensure we can see the force_cow */
1509 smp_rmb();
1512 * We do not need to cow a block if
1513 * 1) this block is not created or changed in this transaction;
1514 * 2) this block does not belong to TREE_RELOC tree;
1515 * 3) the root is not forced COW.
1517 * What is forced COW:
1518 * when we create snapshot during commiting the transaction,
1519 * after we've finished coping src root, we must COW the shared
1520 * block to ensure the metadata consistency.
1522 if (btrfs_header_generation(buf) == trans->transid &&
1523 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1524 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1525 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1526 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1527 return 0;
1528 return 1;
1532 * cows a single block, see __btrfs_cow_block for the real work.
1533 * This version of it has extra checks so that a block isn't cow'd more than
1534 * once per transaction, as long as it hasn't been written yet
1536 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1537 struct btrfs_root *root, struct extent_buffer *buf,
1538 struct extent_buffer *parent, int parent_slot,
1539 struct extent_buffer **cow_ret)
1541 u64 search_start;
1542 int ret;
1544 if (trans->transaction != root->fs_info->running_transaction)
1545 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1546 trans->transid,
1547 root->fs_info->running_transaction->transid);
1549 if (trans->transid != root->fs_info->generation)
1550 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1551 trans->transid, root->fs_info->generation);
1553 if (!should_cow_block(trans, root, buf)) {
1554 *cow_ret = buf;
1555 return 0;
1558 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
1560 if (parent)
1561 btrfs_set_lock_blocking(parent);
1562 btrfs_set_lock_blocking(buf);
1564 ret = __btrfs_cow_block(trans, root, buf, parent,
1565 parent_slot, cow_ret, search_start, 0);
1567 trace_btrfs_cow_block(root, buf, *cow_ret);
1569 return ret;
1573 * helper function for defrag to decide if two blocks pointed to by a
1574 * node are actually close by
1576 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1578 if (blocknr < other && other - (blocknr + blocksize) < 32768)
1579 return 1;
1580 if (blocknr > other && blocknr - (other + blocksize) < 32768)
1581 return 1;
1582 return 0;
1586 * compare two keys in a memcmp fashion
1588 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1590 struct btrfs_key k1;
1592 btrfs_disk_key_to_cpu(&k1, disk);
1594 return btrfs_comp_cpu_keys(&k1, k2);
1598 * same as comp_keys only with two btrfs_key's
1600 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
1602 if (k1->objectid > k2->objectid)
1603 return 1;
1604 if (k1->objectid < k2->objectid)
1605 return -1;
1606 if (k1->type > k2->type)
1607 return 1;
1608 if (k1->type < k2->type)
1609 return -1;
1610 if (k1->offset > k2->offset)
1611 return 1;
1612 if (k1->offset < k2->offset)
1613 return -1;
1614 return 0;
1618 * this is used by the defrag code to go through all the
1619 * leaves pointed to by a node and reallocate them so that
1620 * disk order is close to key order
1622 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1623 struct btrfs_root *root, struct extent_buffer *parent,
1624 int start_slot, u64 *last_ret,
1625 struct btrfs_key *progress)
1627 struct extent_buffer *cur;
1628 u64 blocknr;
1629 u64 gen;
1630 u64 search_start = *last_ret;
1631 u64 last_block = 0;
1632 u64 other;
1633 u32 parent_nritems;
1634 int end_slot;
1635 int i;
1636 int err = 0;
1637 int parent_level;
1638 int uptodate;
1639 u32 blocksize;
1640 int progress_passed = 0;
1641 struct btrfs_disk_key disk_key;
1643 parent_level = btrfs_header_level(parent);
1645 WARN_ON(trans->transaction != root->fs_info->running_transaction);
1646 WARN_ON(trans->transid != root->fs_info->generation);
1648 parent_nritems = btrfs_header_nritems(parent);
1649 blocksize = root->nodesize;
1650 end_slot = parent_nritems - 1;
1652 if (parent_nritems <= 1)
1653 return 0;
1655 btrfs_set_lock_blocking(parent);
1657 for (i = start_slot; i <= end_slot; i++) {
1658 int close = 1;
1660 btrfs_node_key(parent, &disk_key, i);
1661 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1662 continue;
1664 progress_passed = 1;
1665 blocknr = btrfs_node_blockptr(parent, i);
1666 gen = btrfs_node_ptr_generation(parent, i);
1667 if (last_block == 0)
1668 last_block = blocknr;
1670 if (i > 0) {
1671 other = btrfs_node_blockptr(parent, i - 1);
1672 close = close_blocks(blocknr, other, blocksize);
1674 if (!close && i < end_slot) {
1675 other = btrfs_node_blockptr(parent, i + 1);
1676 close = close_blocks(blocknr, other, blocksize);
1678 if (close) {
1679 last_block = blocknr;
1680 continue;
1683 cur = btrfs_find_tree_block(root->fs_info, blocknr);
1684 if (cur)
1685 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1686 else
1687 uptodate = 0;
1688 if (!cur || !uptodate) {
1689 if (!cur) {
1690 cur = read_tree_block(root, blocknr, gen);
1691 if (IS_ERR(cur)) {
1692 return PTR_ERR(cur);
1693 } else if (!extent_buffer_uptodate(cur)) {
1694 free_extent_buffer(cur);
1695 return -EIO;
1697 } else if (!uptodate) {
1698 err = btrfs_read_buffer(cur, gen);
1699 if (err) {
1700 free_extent_buffer(cur);
1701 return err;
1705 if (search_start == 0)
1706 search_start = last_block;
1708 btrfs_tree_lock(cur);
1709 btrfs_set_lock_blocking(cur);
1710 err = __btrfs_cow_block(trans, root, cur, parent, i,
1711 &cur, search_start,
1712 min(16 * blocksize,
1713 (end_slot - i) * blocksize));
1714 if (err) {
1715 btrfs_tree_unlock(cur);
1716 free_extent_buffer(cur);
1717 break;
1719 search_start = cur->start;
1720 last_block = cur->start;
1721 *last_ret = search_start;
1722 btrfs_tree_unlock(cur);
1723 free_extent_buffer(cur);
1725 return err;
1729 * The leaf data grows from end-to-front in the node.
1730 * this returns the address of the start of the last item,
1731 * which is the stop of the leaf data stack
1733 static inline unsigned int leaf_data_end(struct btrfs_root *root,
1734 struct extent_buffer *leaf)
1736 u32 nr = btrfs_header_nritems(leaf);
1737 if (nr == 0)
1738 return BTRFS_LEAF_DATA_SIZE(root);
1739 return btrfs_item_offset_nr(leaf, nr - 1);
1744 * search for key in the extent_buffer. The items start at offset p,
1745 * and they are item_size apart. There are 'max' items in p.
1747 * the slot in the array is returned via slot, and it points to
1748 * the place where you would insert key if it is not found in
1749 * the array.
1751 * slot may point to max if the key is bigger than all of the keys
1753 static noinline int generic_bin_search(struct extent_buffer *eb,
1754 unsigned long p,
1755 int item_size, struct btrfs_key *key,
1756 int max, int *slot)
1758 int low = 0;
1759 int high = max;
1760 int mid;
1761 int ret;
1762 struct btrfs_disk_key *tmp = NULL;
1763 struct btrfs_disk_key unaligned;
1764 unsigned long offset;
1765 char *kaddr = NULL;
1766 unsigned long map_start = 0;
1767 unsigned long map_len = 0;
1768 int err;
1770 while (low < high) {
1771 mid = (low + high) / 2;
1772 offset = p + mid * item_size;
1774 if (!kaddr || offset < map_start ||
1775 (offset + sizeof(struct btrfs_disk_key)) >
1776 map_start + map_len) {
1778 err = map_private_extent_buffer(eb, offset,
1779 sizeof(struct btrfs_disk_key),
1780 &kaddr, &map_start, &map_len);
1782 if (!err) {
1783 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1784 map_start);
1785 } else {
1786 read_extent_buffer(eb, &unaligned,
1787 offset, sizeof(unaligned));
1788 tmp = &unaligned;
1791 } else {
1792 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1793 map_start);
1795 ret = comp_keys(tmp, key);
1797 if (ret < 0)
1798 low = mid + 1;
1799 else if (ret > 0)
1800 high = mid;
1801 else {
1802 *slot = mid;
1803 return 0;
1806 *slot = low;
1807 return 1;
1811 * simple bin_search frontend that does the right thing for
1812 * leaves vs nodes
1814 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1815 int level, int *slot)
1817 if (level == 0)
1818 return generic_bin_search(eb,
1819 offsetof(struct btrfs_leaf, items),
1820 sizeof(struct btrfs_item),
1821 key, btrfs_header_nritems(eb),
1822 slot);
1823 else
1824 return generic_bin_search(eb,
1825 offsetof(struct btrfs_node, ptrs),
1826 sizeof(struct btrfs_key_ptr),
1827 key, btrfs_header_nritems(eb),
1828 slot);
1831 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1832 int level, int *slot)
1834 return bin_search(eb, key, level, slot);
1837 static void root_add_used(struct btrfs_root *root, u32 size)
1839 spin_lock(&root->accounting_lock);
1840 btrfs_set_root_used(&root->root_item,
1841 btrfs_root_used(&root->root_item) + size);
1842 spin_unlock(&root->accounting_lock);
1845 static void root_sub_used(struct btrfs_root *root, u32 size)
1847 spin_lock(&root->accounting_lock);
1848 btrfs_set_root_used(&root->root_item,
1849 btrfs_root_used(&root->root_item) - size);
1850 spin_unlock(&root->accounting_lock);
1853 /* given a node and slot number, this reads the blocks it points to. The
1854 * extent buffer is returned with a reference taken (but unlocked).
1855 * NULL is returned on error.
1857 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
1858 struct extent_buffer *parent, int slot)
1860 int level = btrfs_header_level(parent);
1861 struct extent_buffer *eb;
1863 if (slot < 0)
1864 return NULL;
1865 if (slot >= btrfs_header_nritems(parent))
1866 return NULL;
1868 BUG_ON(level == 0);
1870 eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
1871 btrfs_node_ptr_generation(parent, slot));
1872 if (IS_ERR(eb) || !extent_buffer_uptodate(eb)) {
1873 if (!IS_ERR(eb))
1874 free_extent_buffer(eb);
1875 eb = NULL;
1878 return eb;
1882 * node level balancing, used to make sure nodes are in proper order for
1883 * item deletion. We balance from the top down, so we have to make sure
1884 * that a deletion won't leave an node completely empty later on.
1886 static noinline int balance_level(struct btrfs_trans_handle *trans,
1887 struct btrfs_root *root,
1888 struct btrfs_path *path, int level)
1890 struct extent_buffer *right = NULL;
1891 struct extent_buffer *mid;
1892 struct extent_buffer *left = NULL;
1893 struct extent_buffer *parent = NULL;
1894 int ret = 0;
1895 int wret;
1896 int pslot;
1897 int orig_slot = path->slots[level];
1898 u64 orig_ptr;
1900 if (level == 0)
1901 return 0;
1903 mid = path->nodes[level];
1905 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1906 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1907 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1909 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1911 if (level < BTRFS_MAX_LEVEL - 1) {
1912 parent = path->nodes[level + 1];
1913 pslot = path->slots[level + 1];
1917 * deal with the case where there is only one pointer in the root
1918 * by promoting the node below to a root
1920 if (!parent) {
1921 struct extent_buffer *child;
1923 if (btrfs_header_nritems(mid) != 1)
1924 return 0;
1926 /* promote the child to a root */
1927 child = read_node_slot(root, mid, 0);
1928 if (!child) {
1929 ret = -EROFS;
1930 btrfs_std_error(root->fs_info, ret);
1931 goto enospc;
1934 btrfs_tree_lock(child);
1935 btrfs_set_lock_blocking(child);
1936 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1937 if (ret) {
1938 btrfs_tree_unlock(child);
1939 free_extent_buffer(child);
1940 goto enospc;
1943 tree_mod_log_set_root_pointer(root, child, 1);
1944 rcu_assign_pointer(root->node, child);
1946 add_root_to_dirty_list(root);
1947 btrfs_tree_unlock(child);
1949 path->locks[level] = 0;
1950 path->nodes[level] = NULL;
1951 clean_tree_block(trans, root->fs_info, mid);
1952 btrfs_tree_unlock(mid);
1953 /* once for the path */
1954 free_extent_buffer(mid);
1956 root_sub_used(root, mid->len);
1957 btrfs_free_tree_block(trans, root, mid, 0, 1);
1958 /* once for the root ptr */
1959 free_extent_buffer_stale(mid);
1960 return 0;
1962 if (btrfs_header_nritems(mid) >
1963 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1964 return 0;
1966 left = read_node_slot(root, parent, pslot - 1);
1967 if (left) {
1968 btrfs_tree_lock(left);
1969 btrfs_set_lock_blocking(left);
1970 wret = btrfs_cow_block(trans, root, left,
1971 parent, pslot - 1, &left);
1972 if (wret) {
1973 ret = wret;
1974 goto enospc;
1977 right = read_node_slot(root, parent, pslot + 1);
1978 if (right) {
1979 btrfs_tree_lock(right);
1980 btrfs_set_lock_blocking(right);
1981 wret = btrfs_cow_block(trans, root, right,
1982 parent, pslot + 1, &right);
1983 if (wret) {
1984 ret = wret;
1985 goto enospc;
1989 /* first, try to make some room in the middle buffer */
1990 if (left) {
1991 orig_slot += btrfs_header_nritems(left);
1992 wret = push_node_left(trans, root, left, mid, 1);
1993 if (wret < 0)
1994 ret = wret;
1998 * then try to empty the right most buffer into the middle
2000 if (right) {
2001 wret = push_node_left(trans, root, mid, right, 1);
2002 if (wret < 0 && wret != -ENOSPC)
2003 ret = wret;
2004 if (btrfs_header_nritems(right) == 0) {
2005 clean_tree_block(trans, root->fs_info, right);
2006 btrfs_tree_unlock(right);
2007 del_ptr(root, path, level + 1, pslot + 1);
2008 root_sub_used(root, right->len);
2009 btrfs_free_tree_block(trans, root, right, 0, 1);
2010 free_extent_buffer_stale(right);
2011 right = NULL;
2012 } else {
2013 struct btrfs_disk_key right_key;
2014 btrfs_node_key(right, &right_key, 0);
2015 tree_mod_log_set_node_key(root->fs_info, parent,
2016 pslot + 1, 0);
2017 btrfs_set_node_key(parent, &right_key, pslot + 1);
2018 btrfs_mark_buffer_dirty(parent);
2021 if (btrfs_header_nritems(mid) == 1) {
2023 * we're not allowed to leave a node with one item in the
2024 * tree during a delete. A deletion from lower in the tree
2025 * could try to delete the only pointer in this node.
2026 * So, pull some keys from the left.
2027 * There has to be a left pointer at this point because
2028 * otherwise we would have pulled some pointers from the
2029 * right
2031 if (!left) {
2032 ret = -EROFS;
2033 btrfs_std_error(root->fs_info, ret);
2034 goto enospc;
2036 wret = balance_node_right(trans, root, mid, left);
2037 if (wret < 0) {
2038 ret = wret;
2039 goto enospc;
2041 if (wret == 1) {
2042 wret = push_node_left(trans, root, left, mid, 1);
2043 if (wret < 0)
2044 ret = wret;
2046 BUG_ON(wret == 1);
2048 if (btrfs_header_nritems(mid) == 0) {
2049 clean_tree_block(trans, root->fs_info, mid);
2050 btrfs_tree_unlock(mid);
2051 del_ptr(root, path, level + 1, pslot);
2052 root_sub_used(root, mid->len);
2053 btrfs_free_tree_block(trans, root, mid, 0, 1);
2054 free_extent_buffer_stale(mid);
2055 mid = NULL;
2056 } else {
2057 /* update the parent key to reflect our changes */
2058 struct btrfs_disk_key mid_key;
2059 btrfs_node_key(mid, &mid_key, 0);
2060 tree_mod_log_set_node_key(root->fs_info, parent,
2061 pslot, 0);
2062 btrfs_set_node_key(parent, &mid_key, pslot);
2063 btrfs_mark_buffer_dirty(parent);
2066 /* update the path */
2067 if (left) {
2068 if (btrfs_header_nritems(left) > orig_slot) {
2069 extent_buffer_get(left);
2070 /* left was locked after cow */
2071 path->nodes[level] = left;
2072 path->slots[level + 1] -= 1;
2073 path->slots[level] = orig_slot;
2074 if (mid) {
2075 btrfs_tree_unlock(mid);
2076 free_extent_buffer(mid);
2078 } else {
2079 orig_slot -= btrfs_header_nritems(left);
2080 path->slots[level] = orig_slot;
2083 /* double check we haven't messed things up */
2084 if (orig_ptr !=
2085 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
2086 BUG();
2087 enospc:
2088 if (right) {
2089 btrfs_tree_unlock(right);
2090 free_extent_buffer(right);
2092 if (left) {
2093 if (path->nodes[level] != left)
2094 btrfs_tree_unlock(left);
2095 free_extent_buffer(left);
2097 return ret;
2100 /* Node balancing for insertion. Here we only split or push nodes around
2101 * when they are completely full. This is also done top down, so we
2102 * have to be pessimistic.
2104 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
2105 struct btrfs_root *root,
2106 struct btrfs_path *path, int level)
2108 struct extent_buffer *right = NULL;
2109 struct extent_buffer *mid;
2110 struct extent_buffer *left = NULL;
2111 struct extent_buffer *parent = NULL;
2112 int ret = 0;
2113 int wret;
2114 int pslot;
2115 int orig_slot = path->slots[level];
2117 if (level == 0)
2118 return 1;
2120 mid = path->nodes[level];
2121 WARN_ON(btrfs_header_generation(mid) != trans->transid);
2123 if (level < BTRFS_MAX_LEVEL - 1) {
2124 parent = path->nodes[level + 1];
2125 pslot = path->slots[level + 1];
2128 if (!parent)
2129 return 1;
2131 left = read_node_slot(root, parent, pslot - 1);
2133 /* first, try to make some room in the middle buffer */
2134 if (left) {
2135 u32 left_nr;
2137 btrfs_tree_lock(left);
2138 btrfs_set_lock_blocking(left);
2140 left_nr = btrfs_header_nritems(left);
2141 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2142 wret = 1;
2143 } else {
2144 ret = btrfs_cow_block(trans, root, left, parent,
2145 pslot - 1, &left);
2146 if (ret)
2147 wret = 1;
2148 else {
2149 wret = push_node_left(trans, root,
2150 left, mid, 0);
2153 if (wret < 0)
2154 ret = wret;
2155 if (wret == 0) {
2156 struct btrfs_disk_key disk_key;
2157 orig_slot += left_nr;
2158 btrfs_node_key(mid, &disk_key, 0);
2159 tree_mod_log_set_node_key(root->fs_info, parent,
2160 pslot, 0);
2161 btrfs_set_node_key(parent, &disk_key, pslot);
2162 btrfs_mark_buffer_dirty(parent);
2163 if (btrfs_header_nritems(left) > orig_slot) {
2164 path->nodes[level] = left;
2165 path->slots[level + 1] -= 1;
2166 path->slots[level] = orig_slot;
2167 btrfs_tree_unlock(mid);
2168 free_extent_buffer(mid);
2169 } else {
2170 orig_slot -=
2171 btrfs_header_nritems(left);
2172 path->slots[level] = orig_slot;
2173 btrfs_tree_unlock(left);
2174 free_extent_buffer(left);
2176 return 0;
2178 btrfs_tree_unlock(left);
2179 free_extent_buffer(left);
2181 right = read_node_slot(root, parent, pslot + 1);
2184 * then try to empty the right most buffer into the middle
2186 if (right) {
2187 u32 right_nr;
2189 btrfs_tree_lock(right);
2190 btrfs_set_lock_blocking(right);
2192 right_nr = btrfs_header_nritems(right);
2193 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2194 wret = 1;
2195 } else {
2196 ret = btrfs_cow_block(trans, root, right,
2197 parent, pslot + 1,
2198 &right);
2199 if (ret)
2200 wret = 1;
2201 else {
2202 wret = balance_node_right(trans, root,
2203 right, mid);
2206 if (wret < 0)
2207 ret = wret;
2208 if (wret == 0) {
2209 struct btrfs_disk_key disk_key;
2211 btrfs_node_key(right, &disk_key, 0);
2212 tree_mod_log_set_node_key(root->fs_info, parent,
2213 pslot + 1, 0);
2214 btrfs_set_node_key(parent, &disk_key, pslot + 1);
2215 btrfs_mark_buffer_dirty(parent);
2217 if (btrfs_header_nritems(mid) <= orig_slot) {
2218 path->nodes[level] = right;
2219 path->slots[level + 1] += 1;
2220 path->slots[level] = orig_slot -
2221 btrfs_header_nritems(mid);
2222 btrfs_tree_unlock(mid);
2223 free_extent_buffer(mid);
2224 } else {
2225 btrfs_tree_unlock(right);
2226 free_extent_buffer(right);
2228 return 0;
2230 btrfs_tree_unlock(right);
2231 free_extent_buffer(right);
2233 return 1;
2237 * readahead one full node of leaves, finding things that are close
2238 * to the block in 'slot', and triggering ra on them.
2240 static void reada_for_search(struct btrfs_root *root,
2241 struct btrfs_path *path,
2242 int level, int slot, u64 objectid)
2244 struct extent_buffer *node;
2245 struct btrfs_disk_key disk_key;
2246 u32 nritems;
2247 u64 search;
2248 u64 target;
2249 u64 nread = 0;
2250 u64 gen;
2251 int direction = path->reada;
2252 struct extent_buffer *eb;
2253 u32 nr;
2254 u32 blocksize;
2255 u32 nscan = 0;
2257 if (level != 1)
2258 return;
2260 if (!path->nodes[level])
2261 return;
2263 node = path->nodes[level];
2265 search = btrfs_node_blockptr(node, slot);
2266 blocksize = root->nodesize;
2267 eb = btrfs_find_tree_block(root->fs_info, search);
2268 if (eb) {
2269 free_extent_buffer(eb);
2270 return;
2273 target = search;
2275 nritems = btrfs_header_nritems(node);
2276 nr = slot;
2278 while (1) {
2279 if (direction < 0) {
2280 if (nr == 0)
2281 break;
2282 nr--;
2283 } else if (direction > 0) {
2284 nr++;
2285 if (nr >= nritems)
2286 break;
2288 if (path->reada < 0 && objectid) {
2289 btrfs_node_key(node, &disk_key, nr);
2290 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2291 break;
2293 search = btrfs_node_blockptr(node, nr);
2294 if ((search <= target && target - search <= 65536) ||
2295 (search > target && search - target <= 65536)) {
2296 gen = btrfs_node_ptr_generation(node, nr);
2297 readahead_tree_block(root, search);
2298 nread += blocksize;
2300 nscan++;
2301 if ((nread > 65536 || nscan > 32))
2302 break;
2306 static noinline void reada_for_balance(struct btrfs_root *root,
2307 struct btrfs_path *path, int level)
2309 int slot;
2310 int nritems;
2311 struct extent_buffer *parent;
2312 struct extent_buffer *eb;
2313 u64 gen;
2314 u64 block1 = 0;
2315 u64 block2 = 0;
2317 parent = path->nodes[level + 1];
2318 if (!parent)
2319 return;
2321 nritems = btrfs_header_nritems(parent);
2322 slot = path->slots[level + 1];
2324 if (slot > 0) {
2325 block1 = btrfs_node_blockptr(parent, slot - 1);
2326 gen = btrfs_node_ptr_generation(parent, slot - 1);
2327 eb = btrfs_find_tree_block(root->fs_info, block1);
2329 * if we get -eagain from btrfs_buffer_uptodate, we
2330 * don't want to return eagain here. That will loop
2331 * forever
2333 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2334 block1 = 0;
2335 free_extent_buffer(eb);
2337 if (slot + 1 < nritems) {
2338 block2 = btrfs_node_blockptr(parent, slot + 1);
2339 gen = btrfs_node_ptr_generation(parent, slot + 1);
2340 eb = btrfs_find_tree_block(root->fs_info, block2);
2341 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2342 block2 = 0;
2343 free_extent_buffer(eb);
2346 if (block1)
2347 readahead_tree_block(root, block1);
2348 if (block2)
2349 readahead_tree_block(root, block2);
2354 * when we walk down the tree, it is usually safe to unlock the higher layers
2355 * in the tree. The exceptions are when our path goes through slot 0, because
2356 * operations on the tree might require changing key pointers higher up in the
2357 * tree.
2359 * callers might also have set path->keep_locks, which tells this code to keep
2360 * the lock if the path points to the last slot in the block. This is part of
2361 * walking through the tree, and selecting the next slot in the higher block.
2363 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2364 * if lowest_unlock is 1, level 0 won't be unlocked
2366 static noinline void unlock_up(struct btrfs_path *path, int level,
2367 int lowest_unlock, int min_write_lock_level,
2368 int *write_lock_level)
2370 int i;
2371 int skip_level = level;
2372 int no_skips = 0;
2373 struct extent_buffer *t;
2375 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2376 if (!path->nodes[i])
2377 break;
2378 if (!path->locks[i])
2379 break;
2380 if (!no_skips && path->slots[i] == 0) {
2381 skip_level = i + 1;
2382 continue;
2384 if (!no_skips && path->keep_locks) {
2385 u32 nritems;
2386 t = path->nodes[i];
2387 nritems = btrfs_header_nritems(t);
2388 if (nritems < 1 || path->slots[i] >= nritems - 1) {
2389 skip_level = i + 1;
2390 continue;
2393 if (skip_level < i && i >= lowest_unlock)
2394 no_skips = 1;
2396 t = path->nodes[i];
2397 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2398 btrfs_tree_unlock_rw(t, path->locks[i]);
2399 path->locks[i] = 0;
2400 if (write_lock_level &&
2401 i > min_write_lock_level &&
2402 i <= *write_lock_level) {
2403 *write_lock_level = i - 1;
2410 * This releases any locks held in the path starting at level and
2411 * going all the way up to the root.
2413 * btrfs_search_slot will keep the lock held on higher nodes in a few
2414 * corner cases, such as COW of the block at slot zero in the node. This
2415 * ignores those rules, and it should only be called when there are no
2416 * more updates to be done higher up in the tree.
2418 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2420 int i;
2422 if (path->keep_locks)
2423 return;
2425 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2426 if (!path->nodes[i])
2427 continue;
2428 if (!path->locks[i])
2429 continue;
2430 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2431 path->locks[i] = 0;
2436 * helper function for btrfs_search_slot. The goal is to find a block
2437 * in cache without setting the path to blocking. If we find the block
2438 * we return zero and the path is unchanged.
2440 * If we can't find the block, we set the path blocking and do some
2441 * reada. -EAGAIN is returned and the search must be repeated.
2443 static int
2444 read_block_for_search(struct btrfs_trans_handle *trans,
2445 struct btrfs_root *root, struct btrfs_path *p,
2446 struct extent_buffer **eb_ret, int level, int slot,
2447 struct btrfs_key *key, u64 time_seq)
2449 u64 blocknr;
2450 u64 gen;
2451 struct extent_buffer *b = *eb_ret;
2452 struct extent_buffer *tmp;
2453 int ret;
2455 blocknr = btrfs_node_blockptr(b, slot);
2456 gen = btrfs_node_ptr_generation(b, slot);
2458 tmp = btrfs_find_tree_block(root->fs_info, blocknr);
2459 if (tmp) {
2460 /* first we do an atomic uptodate check */
2461 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2462 *eb_ret = tmp;
2463 return 0;
2466 /* the pages were up to date, but we failed
2467 * the generation number check. Do a full
2468 * read for the generation number that is correct.
2469 * We must do this without dropping locks so
2470 * we can trust our generation number
2472 btrfs_set_path_blocking(p);
2474 /* now we're allowed to do a blocking uptodate check */
2475 ret = btrfs_read_buffer(tmp, gen);
2476 if (!ret) {
2477 *eb_ret = tmp;
2478 return 0;
2480 free_extent_buffer(tmp);
2481 btrfs_release_path(p);
2482 return -EIO;
2486 * reduce lock contention at high levels
2487 * of the btree by dropping locks before
2488 * we read. Don't release the lock on the current
2489 * level because we need to walk this node to figure
2490 * out which blocks to read.
2492 btrfs_unlock_up_safe(p, level + 1);
2493 btrfs_set_path_blocking(p);
2495 free_extent_buffer(tmp);
2496 if (p->reada)
2497 reada_for_search(root, p, level, slot, key->objectid);
2499 btrfs_release_path(p);
2501 ret = -EAGAIN;
2502 tmp = read_tree_block(root, blocknr, 0);
2503 if (!IS_ERR(tmp)) {
2505 * If the read above didn't mark this buffer up to date,
2506 * it will never end up being up to date. Set ret to EIO now
2507 * and give up so that our caller doesn't loop forever
2508 * on our EAGAINs.
2510 if (!btrfs_buffer_uptodate(tmp, 0, 0))
2511 ret = -EIO;
2512 free_extent_buffer(tmp);
2514 return ret;
2518 * helper function for btrfs_search_slot. This does all of the checks
2519 * for node-level blocks and does any balancing required based on
2520 * the ins_len.
2522 * If no extra work was required, zero is returned. If we had to
2523 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2524 * start over
2526 static int
2527 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2528 struct btrfs_root *root, struct btrfs_path *p,
2529 struct extent_buffer *b, int level, int ins_len,
2530 int *write_lock_level)
2532 int ret;
2533 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2534 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2535 int sret;
2537 if (*write_lock_level < level + 1) {
2538 *write_lock_level = level + 1;
2539 btrfs_release_path(p);
2540 goto again;
2543 btrfs_set_path_blocking(p);
2544 reada_for_balance(root, p, level);
2545 sret = split_node(trans, root, p, level);
2546 btrfs_clear_path_blocking(p, NULL, 0);
2548 BUG_ON(sret > 0);
2549 if (sret) {
2550 ret = sret;
2551 goto done;
2553 b = p->nodes[level];
2554 } else if (ins_len < 0 && btrfs_header_nritems(b) <
2555 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
2556 int sret;
2558 if (*write_lock_level < level + 1) {
2559 *write_lock_level = level + 1;
2560 btrfs_release_path(p);
2561 goto again;
2564 btrfs_set_path_blocking(p);
2565 reada_for_balance(root, p, level);
2566 sret = balance_level(trans, root, p, level);
2567 btrfs_clear_path_blocking(p, NULL, 0);
2569 if (sret) {
2570 ret = sret;
2571 goto done;
2573 b = p->nodes[level];
2574 if (!b) {
2575 btrfs_release_path(p);
2576 goto again;
2578 BUG_ON(btrfs_header_nritems(b) == 1);
2580 return 0;
2582 again:
2583 ret = -EAGAIN;
2584 done:
2585 return ret;
2588 static void key_search_validate(struct extent_buffer *b,
2589 struct btrfs_key *key,
2590 int level)
2592 #ifdef CONFIG_BTRFS_ASSERT
2593 struct btrfs_disk_key disk_key;
2595 btrfs_cpu_key_to_disk(&disk_key, key);
2597 if (level == 0)
2598 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2599 offsetof(struct btrfs_leaf, items[0].key),
2600 sizeof(disk_key)));
2601 else
2602 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2603 offsetof(struct btrfs_node, ptrs[0].key),
2604 sizeof(disk_key)));
2605 #endif
2608 static int key_search(struct extent_buffer *b, struct btrfs_key *key,
2609 int level, int *prev_cmp, int *slot)
2611 if (*prev_cmp != 0) {
2612 *prev_cmp = bin_search(b, key, level, slot);
2613 return *prev_cmp;
2616 key_search_validate(b, key, level);
2617 *slot = 0;
2619 return 0;
2622 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
2623 u64 iobjectid, u64 ioff, u8 key_type,
2624 struct btrfs_key *found_key)
2626 int ret;
2627 struct btrfs_key key;
2628 struct extent_buffer *eb;
2630 ASSERT(path);
2631 ASSERT(found_key);
2633 key.type = key_type;
2634 key.objectid = iobjectid;
2635 key.offset = ioff;
2637 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2638 if (ret < 0)
2639 return ret;
2641 eb = path->nodes[0];
2642 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2643 ret = btrfs_next_leaf(fs_root, path);
2644 if (ret)
2645 return ret;
2646 eb = path->nodes[0];
2649 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2650 if (found_key->type != key.type ||
2651 found_key->objectid != key.objectid)
2652 return 1;
2654 return 0;
2658 * look for key in the tree. path is filled in with nodes along the way
2659 * if key is found, we return zero and you can find the item in the leaf
2660 * level of the path (level 0)
2662 * If the key isn't found, the path points to the slot where it should
2663 * be inserted, and 1 is returned. If there are other errors during the
2664 * search a negative error number is returned.
2666 * if ins_len > 0, nodes and leaves will be split as we walk down the
2667 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2668 * possible)
2670 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2671 *root, struct btrfs_key *key, struct btrfs_path *p, int
2672 ins_len, int cow)
2674 struct extent_buffer *b;
2675 int slot;
2676 int ret;
2677 int err;
2678 int level;
2679 int lowest_unlock = 1;
2680 int root_lock;
2681 /* everything at write_lock_level or lower must be write locked */
2682 int write_lock_level = 0;
2683 u8 lowest_level = 0;
2684 int min_write_lock_level;
2685 int prev_cmp;
2687 lowest_level = p->lowest_level;
2688 WARN_ON(lowest_level && ins_len > 0);
2689 WARN_ON(p->nodes[0] != NULL);
2690 BUG_ON(!cow && ins_len);
2692 if (ins_len < 0) {
2693 lowest_unlock = 2;
2695 /* when we are removing items, we might have to go up to level
2696 * two as we update tree pointers Make sure we keep write
2697 * for those levels as well
2699 write_lock_level = 2;
2700 } else if (ins_len > 0) {
2702 * for inserting items, make sure we have a write lock on
2703 * level 1 so we can update keys
2705 write_lock_level = 1;
2708 if (!cow)
2709 write_lock_level = -1;
2711 if (cow && (p->keep_locks || p->lowest_level))
2712 write_lock_level = BTRFS_MAX_LEVEL;
2714 min_write_lock_level = write_lock_level;
2716 again:
2717 prev_cmp = -1;
2719 * we try very hard to do read locks on the root
2721 root_lock = BTRFS_READ_LOCK;
2722 level = 0;
2723 if (p->search_commit_root) {
2725 * the commit roots are read only
2726 * so we always do read locks
2728 if (p->need_commit_sem)
2729 down_read(&root->fs_info->commit_root_sem);
2730 b = root->commit_root;
2731 extent_buffer_get(b);
2732 level = btrfs_header_level(b);
2733 if (p->need_commit_sem)
2734 up_read(&root->fs_info->commit_root_sem);
2735 if (!p->skip_locking)
2736 btrfs_tree_read_lock(b);
2737 } else {
2738 if (p->skip_locking) {
2739 b = btrfs_root_node(root);
2740 level = btrfs_header_level(b);
2741 } else {
2742 /* we don't know the level of the root node
2743 * until we actually have it read locked
2745 b = btrfs_read_lock_root_node(root);
2746 level = btrfs_header_level(b);
2747 if (level <= write_lock_level) {
2748 /* whoops, must trade for write lock */
2749 btrfs_tree_read_unlock(b);
2750 free_extent_buffer(b);
2751 b = btrfs_lock_root_node(root);
2752 root_lock = BTRFS_WRITE_LOCK;
2754 /* the level might have changed, check again */
2755 level = btrfs_header_level(b);
2759 p->nodes[level] = b;
2760 if (!p->skip_locking)
2761 p->locks[level] = root_lock;
2763 while (b) {
2764 level = btrfs_header_level(b);
2767 * setup the path here so we can release it under lock
2768 * contention with the cow code
2770 if (cow) {
2772 * if we don't really need to cow this block
2773 * then we don't want to set the path blocking,
2774 * so we test it here
2776 if (!should_cow_block(trans, root, b))
2777 goto cow_done;
2780 * must have write locks on this node and the
2781 * parent
2783 if (level > write_lock_level ||
2784 (level + 1 > write_lock_level &&
2785 level + 1 < BTRFS_MAX_LEVEL &&
2786 p->nodes[level + 1])) {
2787 write_lock_level = level + 1;
2788 btrfs_release_path(p);
2789 goto again;
2792 btrfs_set_path_blocking(p);
2793 err = btrfs_cow_block(trans, root, b,
2794 p->nodes[level + 1],
2795 p->slots[level + 1], &b);
2796 if (err) {
2797 ret = err;
2798 goto done;
2801 cow_done:
2802 p->nodes[level] = b;
2803 btrfs_clear_path_blocking(p, NULL, 0);
2806 * we have a lock on b and as long as we aren't changing
2807 * the tree, there is no way to for the items in b to change.
2808 * It is safe to drop the lock on our parent before we
2809 * go through the expensive btree search on b.
2811 * If we're inserting or deleting (ins_len != 0), then we might
2812 * be changing slot zero, which may require changing the parent.
2813 * So, we can't drop the lock until after we know which slot
2814 * we're operating on.
2816 if (!ins_len && !p->keep_locks) {
2817 int u = level + 1;
2819 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2820 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2821 p->locks[u] = 0;
2825 ret = key_search(b, key, level, &prev_cmp, &slot);
2827 if (level != 0) {
2828 int dec = 0;
2829 if (ret && slot > 0) {
2830 dec = 1;
2831 slot -= 1;
2833 p->slots[level] = slot;
2834 err = setup_nodes_for_search(trans, root, p, b, level,
2835 ins_len, &write_lock_level);
2836 if (err == -EAGAIN)
2837 goto again;
2838 if (err) {
2839 ret = err;
2840 goto done;
2842 b = p->nodes[level];
2843 slot = p->slots[level];
2846 * slot 0 is special, if we change the key
2847 * we have to update the parent pointer
2848 * which means we must have a write lock
2849 * on the parent
2851 if (slot == 0 && ins_len &&
2852 write_lock_level < level + 1) {
2853 write_lock_level = level + 1;
2854 btrfs_release_path(p);
2855 goto again;
2858 unlock_up(p, level, lowest_unlock,
2859 min_write_lock_level, &write_lock_level);
2861 if (level == lowest_level) {
2862 if (dec)
2863 p->slots[level]++;
2864 goto done;
2867 err = read_block_for_search(trans, root, p,
2868 &b, level, slot, key, 0);
2869 if (err == -EAGAIN)
2870 goto again;
2871 if (err) {
2872 ret = err;
2873 goto done;
2876 if (!p->skip_locking) {
2877 level = btrfs_header_level(b);
2878 if (level <= write_lock_level) {
2879 err = btrfs_try_tree_write_lock(b);
2880 if (!err) {
2881 btrfs_set_path_blocking(p);
2882 btrfs_tree_lock(b);
2883 btrfs_clear_path_blocking(p, b,
2884 BTRFS_WRITE_LOCK);
2886 p->locks[level] = BTRFS_WRITE_LOCK;
2887 } else {
2888 err = btrfs_tree_read_lock_atomic(b);
2889 if (!err) {
2890 btrfs_set_path_blocking(p);
2891 btrfs_tree_read_lock(b);
2892 btrfs_clear_path_blocking(p, b,
2893 BTRFS_READ_LOCK);
2895 p->locks[level] = BTRFS_READ_LOCK;
2897 p->nodes[level] = b;
2899 } else {
2900 p->slots[level] = slot;
2901 if (ins_len > 0 &&
2902 btrfs_leaf_free_space(root, b) < ins_len) {
2903 if (write_lock_level < 1) {
2904 write_lock_level = 1;
2905 btrfs_release_path(p);
2906 goto again;
2909 btrfs_set_path_blocking(p);
2910 err = split_leaf(trans, root, key,
2911 p, ins_len, ret == 0);
2912 btrfs_clear_path_blocking(p, NULL, 0);
2914 BUG_ON(err > 0);
2915 if (err) {
2916 ret = err;
2917 goto done;
2920 if (!p->search_for_split)
2921 unlock_up(p, level, lowest_unlock,
2922 min_write_lock_level, &write_lock_level);
2923 goto done;
2926 ret = 1;
2927 done:
2929 * we don't really know what they plan on doing with the path
2930 * from here on, so for now just mark it as blocking
2932 if (!p->leave_spinning)
2933 btrfs_set_path_blocking(p);
2934 if (ret < 0 && !p->skip_release_on_error)
2935 btrfs_release_path(p);
2936 return ret;
2940 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2941 * current state of the tree together with the operations recorded in the tree
2942 * modification log to search for the key in a previous version of this tree, as
2943 * denoted by the time_seq parameter.
2945 * Naturally, there is no support for insert, delete or cow operations.
2947 * The resulting path and return value will be set up as if we called
2948 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2950 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2951 struct btrfs_path *p, u64 time_seq)
2953 struct extent_buffer *b;
2954 int slot;
2955 int ret;
2956 int err;
2957 int level;
2958 int lowest_unlock = 1;
2959 u8 lowest_level = 0;
2960 int prev_cmp = -1;
2962 lowest_level = p->lowest_level;
2963 WARN_ON(p->nodes[0] != NULL);
2965 if (p->search_commit_root) {
2966 BUG_ON(time_seq);
2967 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2970 again:
2971 b = get_old_root(root, time_seq);
2972 level = btrfs_header_level(b);
2973 p->locks[level] = BTRFS_READ_LOCK;
2975 while (b) {
2976 level = btrfs_header_level(b);
2977 p->nodes[level] = b;
2978 btrfs_clear_path_blocking(p, NULL, 0);
2981 * we have a lock on b and as long as we aren't changing
2982 * the tree, there is no way to for the items in b to change.
2983 * It is safe to drop the lock on our parent before we
2984 * go through the expensive btree search on b.
2986 btrfs_unlock_up_safe(p, level + 1);
2989 * Since we can unwind eb's we want to do a real search every
2990 * time.
2992 prev_cmp = -1;
2993 ret = key_search(b, key, level, &prev_cmp, &slot);
2995 if (level != 0) {
2996 int dec = 0;
2997 if (ret && slot > 0) {
2998 dec = 1;
2999 slot -= 1;
3001 p->slots[level] = slot;
3002 unlock_up(p, level, lowest_unlock, 0, NULL);
3004 if (level == lowest_level) {
3005 if (dec)
3006 p->slots[level]++;
3007 goto done;
3010 err = read_block_for_search(NULL, root, p, &b, level,
3011 slot, key, time_seq);
3012 if (err == -EAGAIN)
3013 goto again;
3014 if (err) {
3015 ret = err;
3016 goto done;
3019 level = btrfs_header_level(b);
3020 err = btrfs_tree_read_lock_atomic(b);
3021 if (!err) {
3022 btrfs_set_path_blocking(p);
3023 btrfs_tree_read_lock(b);
3024 btrfs_clear_path_blocking(p, b,
3025 BTRFS_READ_LOCK);
3027 b = tree_mod_log_rewind(root->fs_info, p, b, time_seq);
3028 if (!b) {
3029 ret = -ENOMEM;
3030 goto done;
3032 p->locks[level] = BTRFS_READ_LOCK;
3033 p->nodes[level] = b;
3034 } else {
3035 p->slots[level] = slot;
3036 unlock_up(p, level, lowest_unlock, 0, NULL);
3037 goto done;
3040 ret = 1;
3041 done:
3042 if (!p->leave_spinning)
3043 btrfs_set_path_blocking(p);
3044 if (ret < 0)
3045 btrfs_release_path(p);
3047 return ret;
3051 * helper to use instead of search slot if no exact match is needed but
3052 * instead the next or previous item should be returned.
3053 * When find_higher is true, the next higher item is returned, the next lower
3054 * otherwise.
3055 * When return_any and find_higher are both true, and no higher item is found,
3056 * return the next lower instead.
3057 * When return_any is true and find_higher is false, and no lower item is found,
3058 * return the next higher instead.
3059 * It returns 0 if any item is found, 1 if none is found (tree empty), and
3060 * < 0 on error
3062 int btrfs_search_slot_for_read(struct btrfs_root *root,
3063 struct btrfs_key *key, struct btrfs_path *p,
3064 int find_higher, int return_any)
3066 int ret;
3067 struct extent_buffer *leaf;
3069 again:
3070 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3071 if (ret <= 0)
3072 return ret;
3074 * a return value of 1 means the path is at the position where the
3075 * item should be inserted. Normally this is the next bigger item,
3076 * but in case the previous item is the last in a leaf, path points
3077 * to the first free slot in the previous leaf, i.e. at an invalid
3078 * item.
3080 leaf = p->nodes[0];
3082 if (find_higher) {
3083 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3084 ret = btrfs_next_leaf(root, p);
3085 if (ret <= 0)
3086 return ret;
3087 if (!return_any)
3088 return 1;
3090 * no higher item found, return the next
3091 * lower instead
3093 return_any = 0;
3094 find_higher = 0;
3095 btrfs_release_path(p);
3096 goto again;
3098 } else {
3099 if (p->slots[0] == 0) {
3100 ret = btrfs_prev_leaf(root, p);
3101 if (ret < 0)
3102 return ret;
3103 if (!ret) {
3104 leaf = p->nodes[0];
3105 if (p->slots[0] == btrfs_header_nritems(leaf))
3106 p->slots[0]--;
3107 return 0;
3109 if (!return_any)
3110 return 1;
3112 * no lower item found, return the next
3113 * higher instead
3115 return_any = 0;
3116 find_higher = 1;
3117 btrfs_release_path(p);
3118 goto again;
3119 } else {
3120 --p->slots[0];
3123 return 0;
3127 * adjust the pointers going up the tree, starting at level
3128 * making sure the right key of each node is points to 'key'.
3129 * This is used after shifting pointers to the left, so it stops
3130 * fixing up pointers when a given leaf/node is not in slot 0 of the
3131 * higher levels
3134 static void fixup_low_keys(struct btrfs_fs_info *fs_info,
3135 struct btrfs_path *path,
3136 struct btrfs_disk_key *key, int level)
3138 int i;
3139 struct extent_buffer *t;
3141 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3142 int tslot = path->slots[i];
3143 if (!path->nodes[i])
3144 break;
3145 t = path->nodes[i];
3146 tree_mod_log_set_node_key(fs_info, t, tslot, 1);
3147 btrfs_set_node_key(t, key, tslot);
3148 btrfs_mark_buffer_dirty(path->nodes[i]);
3149 if (tslot != 0)
3150 break;
3155 * update item key.
3157 * This function isn't completely safe. It's the caller's responsibility
3158 * that the new key won't break the order
3160 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
3161 struct btrfs_path *path,
3162 struct btrfs_key *new_key)
3164 struct btrfs_disk_key disk_key;
3165 struct extent_buffer *eb;
3166 int slot;
3168 eb = path->nodes[0];
3169 slot = path->slots[0];
3170 if (slot > 0) {
3171 btrfs_item_key(eb, &disk_key, slot - 1);
3172 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
3174 if (slot < btrfs_header_nritems(eb) - 1) {
3175 btrfs_item_key(eb, &disk_key, slot + 1);
3176 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
3179 btrfs_cpu_key_to_disk(&disk_key, new_key);
3180 btrfs_set_item_key(eb, &disk_key, slot);
3181 btrfs_mark_buffer_dirty(eb);
3182 if (slot == 0)
3183 fixup_low_keys(fs_info, path, &disk_key, 1);
3187 * try to push data from one node into the next node left in the
3188 * tree.
3190 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3191 * error, and > 0 if there was no room in the left hand block.
3193 static int push_node_left(struct btrfs_trans_handle *trans,
3194 struct btrfs_root *root, struct extent_buffer *dst,
3195 struct extent_buffer *src, int empty)
3197 int push_items = 0;
3198 int src_nritems;
3199 int dst_nritems;
3200 int ret = 0;
3202 src_nritems = btrfs_header_nritems(src);
3203 dst_nritems = btrfs_header_nritems(dst);
3204 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3205 WARN_ON(btrfs_header_generation(src) != trans->transid);
3206 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3208 if (!empty && src_nritems <= 8)
3209 return 1;
3211 if (push_items <= 0)
3212 return 1;
3214 if (empty) {
3215 push_items = min(src_nritems, push_items);
3216 if (push_items < src_nritems) {
3217 /* leave at least 8 pointers in the node if
3218 * we aren't going to empty it
3220 if (src_nritems - push_items < 8) {
3221 if (push_items <= 8)
3222 return 1;
3223 push_items -= 8;
3226 } else
3227 push_items = min(src_nritems - 8, push_items);
3229 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
3230 push_items);
3231 if (ret) {
3232 btrfs_abort_transaction(trans, root, ret);
3233 return ret;
3235 copy_extent_buffer(dst, src,
3236 btrfs_node_key_ptr_offset(dst_nritems),
3237 btrfs_node_key_ptr_offset(0),
3238 push_items * sizeof(struct btrfs_key_ptr));
3240 if (push_items < src_nritems) {
3242 * don't call tree_mod_log_eb_move here, key removal was already
3243 * fully logged by tree_mod_log_eb_copy above.
3245 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3246 btrfs_node_key_ptr_offset(push_items),
3247 (src_nritems - push_items) *
3248 sizeof(struct btrfs_key_ptr));
3250 btrfs_set_header_nritems(src, src_nritems - push_items);
3251 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3252 btrfs_mark_buffer_dirty(src);
3253 btrfs_mark_buffer_dirty(dst);
3255 return ret;
3259 * try to push data from one node into the next node right in the
3260 * tree.
3262 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3263 * error, and > 0 if there was no room in the right hand block.
3265 * this will only push up to 1/2 the contents of the left node over
3267 static int balance_node_right(struct btrfs_trans_handle *trans,
3268 struct btrfs_root *root,
3269 struct extent_buffer *dst,
3270 struct extent_buffer *src)
3272 int push_items = 0;
3273 int max_push;
3274 int src_nritems;
3275 int dst_nritems;
3276 int ret = 0;
3278 WARN_ON(btrfs_header_generation(src) != trans->transid);
3279 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3281 src_nritems = btrfs_header_nritems(src);
3282 dst_nritems = btrfs_header_nritems(dst);
3283 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3284 if (push_items <= 0)
3285 return 1;
3287 if (src_nritems < 4)
3288 return 1;
3290 max_push = src_nritems / 2 + 1;
3291 /* don't try to empty the node */
3292 if (max_push >= src_nritems)
3293 return 1;
3295 if (max_push < push_items)
3296 push_items = max_push;
3298 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
3299 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3300 btrfs_node_key_ptr_offset(0),
3301 (dst_nritems) *
3302 sizeof(struct btrfs_key_ptr));
3304 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
3305 src_nritems - push_items, push_items);
3306 if (ret) {
3307 btrfs_abort_transaction(trans, root, ret);
3308 return ret;
3310 copy_extent_buffer(dst, src,
3311 btrfs_node_key_ptr_offset(0),
3312 btrfs_node_key_ptr_offset(src_nritems - push_items),
3313 push_items * sizeof(struct btrfs_key_ptr));
3315 btrfs_set_header_nritems(src, src_nritems - push_items);
3316 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3318 btrfs_mark_buffer_dirty(src);
3319 btrfs_mark_buffer_dirty(dst);
3321 return ret;
3325 * helper function to insert a new root level in the tree.
3326 * A new node is allocated, and a single item is inserted to
3327 * point to the existing root
3329 * returns zero on success or < 0 on failure.
3331 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3332 struct btrfs_root *root,
3333 struct btrfs_path *path, int level)
3335 u64 lower_gen;
3336 struct extent_buffer *lower;
3337 struct extent_buffer *c;
3338 struct extent_buffer *old;
3339 struct btrfs_disk_key lower_key;
3341 BUG_ON(path->nodes[level]);
3342 BUG_ON(path->nodes[level-1] != root->node);
3344 lower = path->nodes[level-1];
3345 if (level == 1)
3346 btrfs_item_key(lower, &lower_key, 0);
3347 else
3348 btrfs_node_key(lower, &lower_key, 0);
3350 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3351 &lower_key, level, root->node->start, 0);
3352 if (IS_ERR(c))
3353 return PTR_ERR(c);
3355 root_add_used(root, root->nodesize);
3357 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
3358 btrfs_set_header_nritems(c, 1);
3359 btrfs_set_header_level(c, level);
3360 btrfs_set_header_bytenr(c, c->start);
3361 btrfs_set_header_generation(c, trans->transid);
3362 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3363 btrfs_set_header_owner(c, root->root_key.objectid);
3365 write_extent_buffer(c, root->fs_info->fsid, btrfs_header_fsid(),
3366 BTRFS_FSID_SIZE);
3368 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
3369 btrfs_header_chunk_tree_uuid(c), BTRFS_UUID_SIZE);
3371 btrfs_set_node_key(c, &lower_key, 0);
3372 btrfs_set_node_blockptr(c, 0, lower->start);
3373 lower_gen = btrfs_header_generation(lower);
3374 WARN_ON(lower_gen != trans->transid);
3376 btrfs_set_node_ptr_generation(c, 0, lower_gen);
3378 btrfs_mark_buffer_dirty(c);
3380 old = root->node;
3381 tree_mod_log_set_root_pointer(root, c, 0);
3382 rcu_assign_pointer(root->node, c);
3384 /* the super has an extra ref to root->node */
3385 free_extent_buffer(old);
3387 add_root_to_dirty_list(root);
3388 extent_buffer_get(c);
3389 path->nodes[level] = c;
3390 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
3391 path->slots[level] = 0;
3392 return 0;
3396 * worker function to insert a single pointer in a node.
3397 * the node should have enough room for the pointer already
3399 * slot and level indicate where you want the key to go, and
3400 * blocknr is the block the key points to.
3402 static void insert_ptr(struct btrfs_trans_handle *trans,
3403 struct btrfs_root *root, struct btrfs_path *path,
3404 struct btrfs_disk_key *key, u64 bytenr,
3405 int slot, int level)
3407 struct extent_buffer *lower;
3408 int nritems;
3409 int ret;
3411 BUG_ON(!path->nodes[level]);
3412 btrfs_assert_tree_locked(path->nodes[level]);
3413 lower = path->nodes[level];
3414 nritems = btrfs_header_nritems(lower);
3415 BUG_ON(slot > nritems);
3416 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
3417 if (slot != nritems) {
3418 if (level)
3419 tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
3420 slot, nritems - slot);
3421 memmove_extent_buffer(lower,
3422 btrfs_node_key_ptr_offset(slot + 1),
3423 btrfs_node_key_ptr_offset(slot),
3424 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3426 if (level) {
3427 ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
3428 MOD_LOG_KEY_ADD, GFP_NOFS);
3429 BUG_ON(ret < 0);
3431 btrfs_set_node_key(lower, key, slot);
3432 btrfs_set_node_blockptr(lower, slot, bytenr);
3433 WARN_ON(trans->transid == 0);
3434 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3435 btrfs_set_header_nritems(lower, nritems + 1);
3436 btrfs_mark_buffer_dirty(lower);
3440 * split the node at the specified level in path in two.
3441 * The path is corrected to point to the appropriate node after the split
3443 * Before splitting this tries to make some room in the node by pushing
3444 * left and right, if either one works, it returns right away.
3446 * returns 0 on success and < 0 on failure
3448 static noinline int split_node(struct btrfs_trans_handle *trans,
3449 struct btrfs_root *root,
3450 struct btrfs_path *path, int level)
3452 struct extent_buffer *c;
3453 struct extent_buffer *split;
3454 struct btrfs_disk_key disk_key;
3455 int mid;
3456 int ret;
3457 u32 c_nritems;
3459 c = path->nodes[level];
3460 WARN_ON(btrfs_header_generation(c) != trans->transid);
3461 if (c == root->node) {
3463 * trying to split the root, lets make a new one
3465 * tree mod log: We don't log_removal old root in
3466 * insert_new_root, because that root buffer will be kept as a
3467 * normal node. We are going to log removal of half of the
3468 * elements below with tree_mod_log_eb_copy. We're holding a
3469 * tree lock on the buffer, which is why we cannot race with
3470 * other tree_mod_log users.
3472 ret = insert_new_root(trans, root, path, level + 1);
3473 if (ret)
3474 return ret;
3475 } else {
3476 ret = push_nodes_for_insert(trans, root, path, level);
3477 c = path->nodes[level];
3478 if (!ret && btrfs_header_nritems(c) <
3479 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
3480 return 0;
3481 if (ret < 0)
3482 return ret;
3485 c_nritems = btrfs_header_nritems(c);
3486 mid = (c_nritems + 1) / 2;
3487 btrfs_node_key(c, &disk_key, mid);
3489 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3490 &disk_key, level, c->start, 0);
3491 if (IS_ERR(split))
3492 return PTR_ERR(split);
3494 root_add_used(root, root->nodesize);
3496 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
3497 btrfs_set_header_level(split, btrfs_header_level(c));
3498 btrfs_set_header_bytenr(split, split->start);
3499 btrfs_set_header_generation(split, trans->transid);
3500 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3501 btrfs_set_header_owner(split, root->root_key.objectid);
3502 write_extent_buffer(split, root->fs_info->fsid,
3503 btrfs_header_fsid(), BTRFS_FSID_SIZE);
3504 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3505 btrfs_header_chunk_tree_uuid(split),
3506 BTRFS_UUID_SIZE);
3508 ret = tree_mod_log_eb_copy(root->fs_info, split, c, 0,
3509 mid, c_nritems - mid);
3510 if (ret) {
3511 btrfs_abort_transaction(trans, root, ret);
3512 return ret;
3514 copy_extent_buffer(split, c,
3515 btrfs_node_key_ptr_offset(0),
3516 btrfs_node_key_ptr_offset(mid),
3517 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3518 btrfs_set_header_nritems(split, c_nritems - mid);
3519 btrfs_set_header_nritems(c, mid);
3520 ret = 0;
3522 btrfs_mark_buffer_dirty(c);
3523 btrfs_mark_buffer_dirty(split);
3525 insert_ptr(trans, root, path, &disk_key, split->start,
3526 path->slots[level + 1] + 1, level + 1);
3528 if (path->slots[level] >= mid) {
3529 path->slots[level] -= mid;
3530 btrfs_tree_unlock(c);
3531 free_extent_buffer(c);
3532 path->nodes[level] = split;
3533 path->slots[level + 1] += 1;
3534 } else {
3535 btrfs_tree_unlock(split);
3536 free_extent_buffer(split);
3538 return ret;
3542 * how many bytes are required to store the items in a leaf. start
3543 * and nr indicate which items in the leaf to check. This totals up the
3544 * space used both by the item structs and the item data
3546 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3548 struct btrfs_item *start_item;
3549 struct btrfs_item *end_item;
3550 struct btrfs_map_token token;
3551 int data_len;
3552 int nritems = btrfs_header_nritems(l);
3553 int end = min(nritems, start + nr) - 1;
3555 if (!nr)
3556 return 0;
3557 btrfs_init_map_token(&token);
3558 start_item = btrfs_item_nr(start);
3559 end_item = btrfs_item_nr(end);
3560 data_len = btrfs_token_item_offset(l, start_item, &token) +
3561 btrfs_token_item_size(l, start_item, &token);
3562 data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3563 data_len += sizeof(struct btrfs_item) * nr;
3564 WARN_ON(data_len < 0);
3565 return data_len;
3569 * The space between the end of the leaf items and
3570 * the start of the leaf data. IOW, how much room
3571 * the leaf has left for both items and data
3573 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
3574 struct extent_buffer *leaf)
3576 int nritems = btrfs_header_nritems(leaf);
3577 int ret;
3578 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3579 if (ret < 0) {
3580 btrfs_crit(root->fs_info,
3581 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3582 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
3583 leaf_space_used(leaf, 0, nritems), nritems);
3585 return ret;
3589 * min slot controls the lowest index we're willing to push to the
3590 * right. We'll push up to and including min_slot, but no lower
3592 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3593 struct btrfs_root *root,
3594 struct btrfs_path *path,
3595 int data_size, int empty,
3596 struct extent_buffer *right,
3597 int free_space, u32 left_nritems,
3598 u32 min_slot)
3600 struct extent_buffer *left = path->nodes[0];
3601 struct extent_buffer *upper = path->nodes[1];
3602 struct btrfs_map_token token;
3603 struct btrfs_disk_key disk_key;
3604 int slot;
3605 u32 i;
3606 int push_space = 0;
3607 int push_items = 0;
3608 struct btrfs_item *item;
3609 u32 nr;
3610 u32 right_nritems;
3611 u32 data_end;
3612 u32 this_item_size;
3614 btrfs_init_map_token(&token);
3616 if (empty)
3617 nr = 0;
3618 else
3619 nr = max_t(u32, 1, min_slot);
3621 if (path->slots[0] >= left_nritems)
3622 push_space += data_size;
3624 slot = path->slots[1];
3625 i = left_nritems - 1;
3626 while (i >= nr) {
3627 item = btrfs_item_nr(i);
3629 if (!empty && push_items > 0) {
3630 if (path->slots[0] > i)
3631 break;
3632 if (path->slots[0] == i) {
3633 int space = btrfs_leaf_free_space(root, left);
3634 if (space + push_space * 2 > free_space)
3635 break;
3639 if (path->slots[0] == i)
3640 push_space += data_size;
3642 this_item_size = btrfs_item_size(left, item);
3643 if (this_item_size + sizeof(*item) + push_space > free_space)
3644 break;
3646 push_items++;
3647 push_space += this_item_size + sizeof(*item);
3648 if (i == 0)
3649 break;
3650 i--;
3653 if (push_items == 0)
3654 goto out_unlock;
3656 WARN_ON(!empty && push_items == left_nritems);
3658 /* push left to right */
3659 right_nritems = btrfs_header_nritems(right);
3661 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3662 push_space -= leaf_data_end(root, left);
3664 /* make room in the right data area */
3665 data_end = leaf_data_end(root, right);
3666 memmove_extent_buffer(right,
3667 btrfs_leaf_data(right) + data_end - push_space,
3668 btrfs_leaf_data(right) + data_end,
3669 BTRFS_LEAF_DATA_SIZE(root) - data_end);
3671 /* copy from the left data area */
3672 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
3673 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3674 btrfs_leaf_data(left) + leaf_data_end(root, left),
3675 push_space);
3677 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3678 btrfs_item_nr_offset(0),
3679 right_nritems * sizeof(struct btrfs_item));
3681 /* copy the items from left to right */
3682 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3683 btrfs_item_nr_offset(left_nritems - push_items),
3684 push_items * sizeof(struct btrfs_item));
3686 /* update the item pointers */
3687 right_nritems += push_items;
3688 btrfs_set_header_nritems(right, right_nritems);
3689 push_space = BTRFS_LEAF_DATA_SIZE(root);
3690 for (i = 0; i < right_nritems; i++) {
3691 item = btrfs_item_nr(i);
3692 push_space -= btrfs_token_item_size(right, item, &token);
3693 btrfs_set_token_item_offset(right, item, push_space, &token);
3696 left_nritems -= push_items;
3697 btrfs_set_header_nritems(left, left_nritems);
3699 if (left_nritems)
3700 btrfs_mark_buffer_dirty(left);
3701 else
3702 clean_tree_block(trans, root->fs_info, left);
3704 btrfs_mark_buffer_dirty(right);
3706 btrfs_item_key(right, &disk_key, 0);
3707 btrfs_set_node_key(upper, &disk_key, slot + 1);
3708 btrfs_mark_buffer_dirty(upper);
3710 /* then fixup the leaf pointer in the path */
3711 if (path->slots[0] >= left_nritems) {
3712 path->slots[0] -= left_nritems;
3713 if (btrfs_header_nritems(path->nodes[0]) == 0)
3714 clean_tree_block(trans, root->fs_info, path->nodes[0]);
3715 btrfs_tree_unlock(path->nodes[0]);
3716 free_extent_buffer(path->nodes[0]);
3717 path->nodes[0] = right;
3718 path->slots[1] += 1;
3719 } else {
3720 btrfs_tree_unlock(right);
3721 free_extent_buffer(right);
3723 return 0;
3725 out_unlock:
3726 btrfs_tree_unlock(right);
3727 free_extent_buffer(right);
3728 return 1;
3732 * push some data in the path leaf to the right, trying to free up at
3733 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3735 * returns 1 if the push failed because the other node didn't have enough
3736 * room, 0 if everything worked out and < 0 if there were major errors.
3738 * this will push starting from min_slot to the end of the leaf. It won't
3739 * push any slot lower than min_slot
3741 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3742 *root, struct btrfs_path *path,
3743 int min_data_size, int data_size,
3744 int empty, u32 min_slot)
3746 struct extent_buffer *left = path->nodes[0];
3747 struct extent_buffer *right;
3748 struct extent_buffer *upper;
3749 int slot;
3750 int free_space;
3751 u32 left_nritems;
3752 int ret;
3754 if (!path->nodes[1])
3755 return 1;
3757 slot = path->slots[1];
3758 upper = path->nodes[1];
3759 if (slot >= btrfs_header_nritems(upper) - 1)
3760 return 1;
3762 btrfs_assert_tree_locked(path->nodes[1]);
3764 right = read_node_slot(root, upper, slot + 1);
3765 if (right == NULL)
3766 return 1;
3768 btrfs_tree_lock(right);
3769 btrfs_set_lock_blocking(right);
3771 free_space = btrfs_leaf_free_space(root, right);
3772 if (free_space < data_size)
3773 goto out_unlock;
3775 /* cow and double check */
3776 ret = btrfs_cow_block(trans, root, right, upper,
3777 slot + 1, &right);
3778 if (ret)
3779 goto out_unlock;
3781 free_space = btrfs_leaf_free_space(root, right);
3782 if (free_space < data_size)
3783 goto out_unlock;
3785 left_nritems = btrfs_header_nritems(left);
3786 if (left_nritems == 0)
3787 goto out_unlock;
3789 if (path->slots[0] == left_nritems && !empty) {
3790 /* Key greater than all keys in the leaf, right neighbor has
3791 * enough room for it and we're not emptying our leaf to delete
3792 * it, therefore use right neighbor to insert the new item and
3793 * no need to touch/dirty our left leaft. */
3794 btrfs_tree_unlock(left);
3795 free_extent_buffer(left);
3796 path->nodes[0] = right;
3797 path->slots[0] = 0;
3798 path->slots[1]++;
3799 return 0;
3802 return __push_leaf_right(trans, root, path, min_data_size, empty,
3803 right, free_space, left_nritems, min_slot);
3804 out_unlock:
3805 btrfs_tree_unlock(right);
3806 free_extent_buffer(right);
3807 return 1;
3811 * push some data in the path leaf to the left, trying to free up at
3812 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3814 * max_slot can put a limit on how far into the leaf we'll push items. The
3815 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3816 * items
3818 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3819 struct btrfs_root *root,
3820 struct btrfs_path *path, int data_size,
3821 int empty, struct extent_buffer *left,
3822 int free_space, u32 right_nritems,
3823 u32 max_slot)
3825 struct btrfs_disk_key disk_key;
3826 struct extent_buffer *right = path->nodes[0];
3827 int i;
3828 int push_space = 0;
3829 int push_items = 0;
3830 struct btrfs_item *item;
3831 u32 old_left_nritems;
3832 u32 nr;
3833 int ret = 0;
3834 u32 this_item_size;
3835 u32 old_left_item_size;
3836 struct btrfs_map_token token;
3838 btrfs_init_map_token(&token);
3840 if (empty)
3841 nr = min(right_nritems, max_slot);
3842 else
3843 nr = min(right_nritems - 1, max_slot);
3845 for (i = 0; i < nr; i++) {
3846 item = btrfs_item_nr(i);
3848 if (!empty && push_items > 0) {
3849 if (path->slots[0] < i)
3850 break;
3851 if (path->slots[0] == i) {
3852 int space = btrfs_leaf_free_space(root, right);
3853 if (space + push_space * 2 > free_space)
3854 break;
3858 if (path->slots[0] == i)
3859 push_space += data_size;
3861 this_item_size = btrfs_item_size(right, item);
3862 if (this_item_size + sizeof(*item) + push_space > free_space)
3863 break;
3865 push_items++;
3866 push_space += this_item_size + sizeof(*item);
3869 if (push_items == 0) {
3870 ret = 1;
3871 goto out;
3873 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3875 /* push data from right to left */
3876 copy_extent_buffer(left, right,
3877 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3878 btrfs_item_nr_offset(0),
3879 push_items * sizeof(struct btrfs_item));
3881 push_space = BTRFS_LEAF_DATA_SIZE(root) -
3882 btrfs_item_offset_nr(right, push_items - 1);
3884 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
3885 leaf_data_end(root, left) - push_space,
3886 btrfs_leaf_data(right) +
3887 btrfs_item_offset_nr(right, push_items - 1),
3888 push_space);
3889 old_left_nritems = btrfs_header_nritems(left);
3890 BUG_ON(old_left_nritems <= 0);
3892 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3893 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3894 u32 ioff;
3896 item = btrfs_item_nr(i);
3898 ioff = btrfs_token_item_offset(left, item, &token);
3899 btrfs_set_token_item_offset(left, item,
3900 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3901 &token);
3903 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3905 /* fixup right node */
3906 if (push_items > right_nritems)
3907 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3908 right_nritems);
3910 if (push_items < right_nritems) {
3911 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3912 leaf_data_end(root, right);
3913 memmove_extent_buffer(right, btrfs_leaf_data(right) +
3914 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3915 btrfs_leaf_data(right) +
3916 leaf_data_end(root, right), push_space);
3918 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3919 btrfs_item_nr_offset(push_items),
3920 (btrfs_header_nritems(right) - push_items) *
3921 sizeof(struct btrfs_item));
3923 right_nritems -= push_items;
3924 btrfs_set_header_nritems(right, right_nritems);
3925 push_space = BTRFS_LEAF_DATA_SIZE(root);
3926 for (i = 0; i < right_nritems; i++) {
3927 item = btrfs_item_nr(i);
3929 push_space = push_space - btrfs_token_item_size(right,
3930 item, &token);
3931 btrfs_set_token_item_offset(right, item, push_space, &token);
3934 btrfs_mark_buffer_dirty(left);
3935 if (right_nritems)
3936 btrfs_mark_buffer_dirty(right);
3937 else
3938 clean_tree_block(trans, root->fs_info, right);
3940 btrfs_item_key(right, &disk_key, 0);
3941 fixup_low_keys(root->fs_info, path, &disk_key, 1);
3943 /* then fixup the leaf pointer in the path */
3944 if (path->slots[0] < push_items) {
3945 path->slots[0] += old_left_nritems;
3946 btrfs_tree_unlock(path->nodes[0]);
3947 free_extent_buffer(path->nodes[0]);
3948 path->nodes[0] = left;
3949 path->slots[1] -= 1;
3950 } else {
3951 btrfs_tree_unlock(left);
3952 free_extent_buffer(left);
3953 path->slots[0] -= push_items;
3955 BUG_ON(path->slots[0] < 0);
3956 return ret;
3957 out:
3958 btrfs_tree_unlock(left);
3959 free_extent_buffer(left);
3960 return ret;
3964 * push some data in the path leaf to the left, trying to free up at
3965 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3967 * max_slot can put a limit on how far into the leaf we'll push items. The
3968 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3969 * items
3971 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3972 *root, struct btrfs_path *path, int min_data_size,
3973 int data_size, int empty, u32 max_slot)
3975 struct extent_buffer *right = path->nodes[0];
3976 struct extent_buffer *left;
3977 int slot;
3978 int free_space;
3979 u32 right_nritems;
3980 int ret = 0;
3982 slot = path->slots[1];
3983 if (slot == 0)
3984 return 1;
3985 if (!path->nodes[1])
3986 return 1;
3988 right_nritems = btrfs_header_nritems(right);
3989 if (right_nritems == 0)
3990 return 1;
3992 btrfs_assert_tree_locked(path->nodes[1]);
3994 left = read_node_slot(root, path->nodes[1], slot - 1);
3995 if (left == NULL)
3996 return 1;
3998 btrfs_tree_lock(left);
3999 btrfs_set_lock_blocking(left);
4001 free_space = btrfs_leaf_free_space(root, left);
4002 if (free_space < data_size) {
4003 ret = 1;
4004 goto out;
4007 /* cow and double check */
4008 ret = btrfs_cow_block(trans, root, left,
4009 path->nodes[1], slot - 1, &left);
4010 if (ret) {
4011 /* we hit -ENOSPC, but it isn't fatal here */
4012 if (ret == -ENOSPC)
4013 ret = 1;
4014 goto out;
4017 free_space = btrfs_leaf_free_space(root, left);
4018 if (free_space < data_size) {
4019 ret = 1;
4020 goto out;
4023 return __push_leaf_left(trans, root, path, min_data_size,
4024 empty, left, free_space, right_nritems,
4025 max_slot);
4026 out:
4027 btrfs_tree_unlock(left);
4028 free_extent_buffer(left);
4029 return ret;
4033 * split the path's leaf in two, making sure there is at least data_size
4034 * available for the resulting leaf level of the path.
4036 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
4037 struct btrfs_root *root,
4038 struct btrfs_path *path,
4039 struct extent_buffer *l,
4040 struct extent_buffer *right,
4041 int slot, int mid, int nritems)
4043 int data_copy_size;
4044 int rt_data_off;
4045 int i;
4046 struct btrfs_disk_key disk_key;
4047 struct btrfs_map_token token;
4049 btrfs_init_map_token(&token);
4051 nritems = nritems - mid;
4052 btrfs_set_header_nritems(right, nritems);
4053 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
4055 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4056 btrfs_item_nr_offset(mid),
4057 nritems * sizeof(struct btrfs_item));
4059 copy_extent_buffer(right, l,
4060 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
4061 data_copy_size, btrfs_leaf_data(l) +
4062 leaf_data_end(root, l), data_copy_size);
4064 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
4065 btrfs_item_end_nr(l, mid);
4067 for (i = 0; i < nritems; i++) {
4068 struct btrfs_item *item = btrfs_item_nr(i);
4069 u32 ioff;
4071 ioff = btrfs_token_item_offset(right, item, &token);
4072 btrfs_set_token_item_offset(right, item,
4073 ioff + rt_data_off, &token);
4076 btrfs_set_header_nritems(l, mid);
4077 btrfs_item_key(right, &disk_key, 0);
4078 insert_ptr(trans, root, path, &disk_key, right->start,
4079 path->slots[1] + 1, 1);
4081 btrfs_mark_buffer_dirty(right);
4082 btrfs_mark_buffer_dirty(l);
4083 BUG_ON(path->slots[0] != slot);
4085 if (mid <= slot) {
4086 btrfs_tree_unlock(path->nodes[0]);
4087 free_extent_buffer(path->nodes[0]);
4088 path->nodes[0] = right;
4089 path->slots[0] -= mid;
4090 path->slots[1] += 1;
4091 } else {
4092 btrfs_tree_unlock(right);
4093 free_extent_buffer(right);
4096 BUG_ON(path->slots[0] < 0);
4100 * double splits happen when we need to insert a big item in the middle
4101 * of a leaf. A double split can leave us with 3 mostly empty leaves:
4102 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4103 * A B C
4105 * We avoid this by trying to push the items on either side of our target
4106 * into the adjacent leaves. If all goes well we can avoid the double split
4107 * completely.
4109 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4110 struct btrfs_root *root,
4111 struct btrfs_path *path,
4112 int data_size)
4114 int ret;
4115 int progress = 0;
4116 int slot;
4117 u32 nritems;
4118 int space_needed = data_size;
4120 slot = path->slots[0];
4121 if (slot < btrfs_header_nritems(path->nodes[0]))
4122 space_needed -= btrfs_leaf_free_space(root, path->nodes[0]);
4125 * try to push all the items after our slot into the
4126 * right leaf
4128 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4129 if (ret < 0)
4130 return ret;
4132 if (ret == 0)
4133 progress++;
4135 nritems = btrfs_header_nritems(path->nodes[0]);
4137 * our goal is to get our slot at the start or end of a leaf. If
4138 * we've done so we're done
4140 if (path->slots[0] == 0 || path->slots[0] == nritems)
4141 return 0;
4143 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4144 return 0;
4146 /* try to push all the items before our slot into the next leaf */
4147 slot = path->slots[0];
4148 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4149 if (ret < 0)
4150 return ret;
4152 if (ret == 0)
4153 progress++;
4155 if (progress)
4156 return 0;
4157 return 1;
4161 * split the path's leaf in two, making sure there is at least data_size
4162 * available for the resulting leaf level of the path.
4164 * returns 0 if all went well and < 0 on failure.
4166 static noinline int split_leaf(struct btrfs_trans_handle *trans,
4167 struct btrfs_root *root,
4168 struct btrfs_key *ins_key,
4169 struct btrfs_path *path, int data_size,
4170 int extend)
4172 struct btrfs_disk_key disk_key;
4173 struct extent_buffer *l;
4174 u32 nritems;
4175 int mid;
4176 int slot;
4177 struct extent_buffer *right;
4178 struct btrfs_fs_info *fs_info = root->fs_info;
4179 int ret = 0;
4180 int wret;
4181 int split;
4182 int num_doubles = 0;
4183 int tried_avoid_double = 0;
4185 l = path->nodes[0];
4186 slot = path->slots[0];
4187 if (extend && data_size + btrfs_item_size_nr(l, slot) +
4188 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
4189 return -EOVERFLOW;
4191 /* first try to make some room by pushing left and right */
4192 if (data_size && path->nodes[1]) {
4193 int space_needed = data_size;
4195 if (slot < btrfs_header_nritems(l))
4196 space_needed -= btrfs_leaf_free_space(root, l);
4198 wret = push_leaf_right(trans, root, path, space_needed,
4199 space_needed, 0, 0);
4200 if (wret < 0)
4201 return wret;
4202 if (wret) {
4203 wret = push_leaf_left(trans, root, path, space_needed,
4204 space_needed, 0, (u32)-1);
4205 if (wret < 0)
4206 return wret;
4208 l = path->nodes[0];
4210 /* did the pushes work? */
4211 if (btrfs_leaf_free_space(root, l) >= data_size)
4212 return 0;
4215 if (!path->nodes[1]) {
4216 ret = insert_new_root(trans, root, path, 1);
4217 if (ret)
4218 return ret;
4220 again:
4221 split = 1;
4222 l = path->nodes[0];
4223 slot = path->slots[0];
4224 nritems = btrfs_header_nritems(l);
4225 mid = (nritems + 1) / 2;
4227 if (mid <= slot) {
4228 if (nritems == 1 ||
4229 leaf_space_used(l, mid, nritems - mid) + data_size >
4230 BTRFS_LEAF_DATA_SIZE(root)) {
4231 if (slot >= nritems) {
4232 split = 0;
4233 } else {
4234 mid = slot;
4235 if (mid != nritems &&
4236 leaf_space_used(l, mid, nritems - mid) +
4237 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4238 if (data_size && !tried_avoid_double)
4239 goto push_for_double;
4240 split = 2;
4244 } else {
4245 if (leaf_space_used(l, 0, mid) + data_size >
4246 BTRFS_LEAF_DATA_SIZE(root)) {
4247 if (!extend && data_size && slot == 0) {
4248 split = 0;
4249 } else if ((extend || !data_size) && slot == 0) {
4250 mid = 1;
4251 } else {
4252 mid = slot;
4253 if (mid != nritems &&
4254 leaf_space_used(l, mid, nritems - mid) +
4255 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4256 if (data_size && !tried_avoid_double)
4257 goto push_for_double;
4258 split = 2;
4264 if (split == 0)
4265 btrfs_cpu_key_to_disk(&disk_key, ins_key);
4266 else
4267 btrfs_item_key(l, &disk_key, mid);
4269 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
4270 &disk_key, 0, l->start, 0);
4271 if (IS_ERR(right))
4272 return PTR_ERR(right);
4274 root_add_used(root, root->nodesize);
4276 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
4277 btrfs_set_header_bytenr(right, right->start);
4278 btrfs_set_header_generation(right, trans->transid);
4279 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4280 btrfs_set_header_owner(right, root->root_key.objectid);
4281 btrfs_set_header_level(right, 0);
4282 write_extent_buffer(right, fs_info->fsid,
4283 btrfs_header_fsid(), BTRFS_FSID_SIZE);
4285 write_extent_buffer(right, fs_info->chunk_tree_uuid,
4286 btrfs_header_chunk_tree_uuid(right),
4287 BTRFS_UUID_SIZE);
4289 if (split == 0) {
4290 if (mid <= slot) {
4291 btrfs_set_header_nritems(right, 0);
4292 insert_ptr(trans, root, path, &disk_key, right->start,
4293 path->slots[1] + 1, 1);
4294 btrfs_tree_unlock(path->nodes[0]);
4295 free_extent_buffer(path->nodes[0]);
4296 path->nodes[0] = right;
4297 path->slots[0] = 0;
4298 path->slots[1] += 1;
4299 } else {
4300 btrfs_set_header_nritems(right, 0);
4301 insert_ptr(trans, root, path, &disk_key, right->start,
4302 path->slots[1], 1);
4303 btrfs_tree_unlock(path->nodes[0]);
4304 free_extent_buffer(path->nodes[0]);
4305 path->nodes[0] = right;
4306 path->slots[0] = 0;
4307 if (path->slots[1] == 0)
4308 fixup_low_keys(fs_info, path, &disk_key, 1);
4310 btrfs_mark_buffer_dirty(right);
4311 return ret;
4314 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
4316 if (split == 2) {
4317 BUG_ON(num_doubles != 0);
4318 num_doubles++;
4319 goto again;
4322 return 0;
4324 push_for_double:
4325 push_for_double_split(trans, root, path, data_size);
4326 tried_avoid_double = 1;
4327 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4328 return 0;
4329 goto again;
4332 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4333 struct btrfs_root *root,
4334 struct btrfs_path *path, int ins_len)
4336 struct btrfs_key key;
4337 struct extent_buffer *leaf;
4338 struct btrfs_file_extent_item *fi;
4339 u64 extent_len = 0;
4340 u32 item_size;
4341 int ret;
4343 leaf = path->nodes[0];
4344 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4346 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4347 key.type != BTRFS_EXTENT_CSUM_KEY);
4349 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
4350 return 0;
4352 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4353 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4354 fi = btrfs_item_ptr(leaf, path->slots[0],
4355 struct btrfs_file_extent_item);
4356 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4358 btrfs_release_path(path);
4360 path->keep_locks = 1;
4361 path->search_for_split = 1;
4362 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4363 path->search_for_split = 0;
4364 if (ret > 0)
4365 ret = -EAGAIN;
4366 if (ret < 0)
4367 goto err;
4369 ret = -EAGAIN;
4370 leaf = path->nodes[0];
4371 /* if our item isn't there, return now */
4372 if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4373 goto err;
4375 /* the leaf has changed, it now has room. return now */
4376 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
4377 goto err;
4379 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4380 fi = btrfs_item_ptr(leaf, path->slots[0],
4381 struct btrfs_file_extent_item);
4382 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4383 goto err;
4386 btrfs_set_path_blocking(path);
4387 ret = split_leaf(trans, root, &key, path, ins_len, 1);
4388 if (ret)
4389 goto err;
4391 path->keep_locks = 0;
4392 btrfs_unlock_up_safe(path, 1);
4393 return 0;
4394 err:
4395 path->keep_locks = 0;
4396 return ret;
4399 static noinline int split_item(struct btrfs_trans_handle *trans,
4400 struct btrfs_root *root,
4401 struct btrfs_path *path,
4402 struct btrfs_key *new_key,
4403 unsigned long split_offset)
4405 struct extent_buffer *leaf;
4406 struct btrfs_item *item;
4407 struct btrfs_item *new_item;
4408 int slot;
4409 char *buf;
4410 u32 nritems;
4411 u32 item_size;
4412 u32 orig_offset;
4413 struct btrfs_disk_key disk_key;
4415 leaf = path->nodes[0];
4416 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
4418 btrfs_set_path_blocking(path);
4420 item = btrfs_item_nr(path->slots[0]);
4421 orig_offset = btrfs_item_offset(leaf, item);
4422 item_size = btrfs_item_size(leaf, item);
4424 buf = kmalloc(item_size, GFP_NOFS);
4425 if (!buf)
4426 return -ENOMEM;
4428 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4429 path->slots[0]), item_size);
4431 slot = path->slots[0] + 1;
4432 nritems = btrfs_header_nritems(leaf);
4433 if (slot != nritems) {
4434 /* shift the items */
4435 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4436 btrfs_item_nr_offset(slot),
4437 (nritems - slot) * sizeof(struct btrfs_item));
4440 btrfs_cpu_key_to_disk(&disk_key, new_key);
4441 btrfs_set_item_key(leaf, &disk_key, slot);
4443 new_item = btrfs_item_nr(slot);
4445 btrfs_set_item_offset(leaf, new_item, orig_offset);
4446 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4448 btrfs_set_item_offset(leaf, item,
4449 orig_offset + item_size - split_offset);
4450 btrfs_set_item_size(leaf, item, split_offset);
4452 btrfs_set_header_nritems(leaf, nritems + 1);
4454 /* write the data for the start of the original item */
4455 write_extent_buffer(leaf, buf,
4456 btrfs_item_ptr_offset(leaf, path->slots[0]),
4457 split_offset);
4459 /* write the data for the new item */
4460 write_extent_buffer(leaf, buf + split_offset,
4461 btrfs_item_ptr_offset(leaf, slot),
4462 item_size - split_offset);
4463 btrfs_mark_buffer_dirty(leaf);
4465 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
4466 kfree(buf);
4467 return 0;
4471 * This function splits a single item into two items,
4472 * giving 'new_key' to the new item and splitting the
4473 * old one at split_offset (from the start of the item).
4475 * The path may be released by this operation. After
4476 * the split, the path is pointing to the old item. The
4477 * new item is going to be in the same node as the old one.
4479 * Note, the item being split must be smaller enough to live alone on
4480 * a tree block with room for one extra struct btrfs_item
4482 * This allows us to split the item in place, keeping a lock on the
4483 * leaf the entire time.
4485 int btrfs_split_item(struct btrfs_trans_handle *trans,
4486 struct btrfs_root *root,
4487 struct btrfs_path *path,
4488 struct btrfs_key *new_key,
4489 unsigned long split_offset)
4491 int ret;
4492 ret = setup_leaf_for_split(trans, root, path,
4493 sizeof(struct btrfs_item));
4494 if (ret)
4495 return ret;
4497 ret = split_item(trans, root, path, new_key, split_offset);
4498 return ret;
4502 * This function duplicate a item, giving 'new_key' to the new item.
4503 * It guarantees both items live in the same tree leaf and the new item
4504 * is contiguous with the original item.
4506 * This allows us to split file extent in place, keeping a lock on the
4507 * leaf the entire time.
4509 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4510 struct btrfs_root *root,
4511 struct btrfs_path *path,
4512 struct btrfs_key *new_key)
4514 struct extent_buffer *leaf;
4515 int ret;
4516 u32 item_size;
4518 leaf = path->nodes[0];
4519 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4520 ret = setup_leaf_for_split(trans, root, path,
4521 item_size + sizeof(struct btrfs_item));
4522 if (ret)
4523 return ret;
4525 path->slots[0]++;
4526 setup_items_for_insert(root, path, new_key, &item_size,
4527 item_size, item_size +
4528 sizeof(struct btrfs_item), 1);
4529 leaf = path->nodes[0];
4530 memcpy_extent_buffer(leaf,
4531 btrfs_item_ptr_offset(leaf, path->slots[0]),
4532 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4533 item_size);
4534 return 0;
4538 * make the item pointed to by the path smaller. new_size indicates
4539 * how small to make it, and from_end tells us if we just chop bytes
4540 * off the end of the item or if we shift the item to chop bytes off
4541 * the front.
4543 void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
4544 u32 new_size, int from_end)
4546 int slot;
4547 struct extent_buffer *leaf;
4548 struct btrfs_item *item;
4549 u32 nritems;
4550 unsigned int data_end;
4551 unsigned int old_data_start;
4552 unsigned int old_size;
4553 unsigned int size_diff;
4554 int i;
4555 struct btrfs_map_token token;
4557 btrfs_init_map_token(&token);
4559 leaf = path->nodes[0];
4560 slot = path->slots[0];
4562 old_size = btrfs_item_size_nr(leaf, slot);
4563 if (old_size == new_size)
4564 return;
4566 nritems = btrfs_header_nritems(leaf);
4567 data_end = leaf_data_end(root, leaf);
4569 old_data_start = btrfs_item_offset_nr(leaf, slot);
4571 size_diff = old_size - new_size;
4573 BUG_ON(slot < 0);
4574 BUG_ON(slot >= nritems);
4577 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4579 /* first correct the data pointers */
4580 for (i = slot; i < nritems; i++) {
4581 u32 ioff;
4582 item = btrfs_item_nr(i);
4584 ioff = btrfs_token_item_offset(leaf, item, &token);
4585 btrfs_set_token_item_offset(leaf, item,
4586 ioff + size_diff, &token);
4589 /* shift the data */
4590 if (from_end) {
4591 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4592 data_end + size_diff, btrfs_leaf_data(leaf) +
4593 data_end, old_data_start + new_size - data_end);
4594 } else {
4595 struct btrfs_disk_key disk_key;
4596 u64 offset;
4598 btrfs_item_key(leaf, &disk_key, slot);
4600 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4601 unsigned long ptr;
4602 struct btrfs_file_extent_item *fi;
4604 fi = btrfs_item_ptr(leaf, slot,
4605 struct btrfs_file_extent_item);
4606 fi = (struct btrfs_file_extent_item *)(
4607 (unsigned long)fi - size_diff);
4609 if (btrfs_file_extent_type(leaf, fi) ==
4610 BTRFS_FILE_EXTENT_INLINE) {
4611 ptr = btrfs_item_ptr_offset(leaf, slot);
4612 memmove_extent_buffer(leaf, ptr,
4613 (unsigned long)fi,
4614 BTRFS_FILE_EXTENT_INLINE_DATA_START);
4618 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4619 data_end + size_diff, btrfs_leaf_data(leaf) +
4620 data_end, old_data_start - data_end);
4622 offset = btrfs_disk_key_offset(&disk_key);
4623 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4624 btrfs_set_item_key(leaf, &disk_key, slot);
4625 if (slot == 0)
4626 fixup_low_keys(root->fs_info, path, &disk_key, 1);
4629 item = btrfs_item_nr(slot);
4630 btrfs_set_item_size(leaf, item, new_size);
4631 btrfs_mark_buffer_dirty(leaf);
4633 if (btrfs_leaf_free_space(root, leaf) < 0) {
4634 btrfs_print_leaf(root, leaf);
4635 BUG();
4640 * make the item pointed to by the path bigger, data_size is the added size.
4642 void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
4643 u32 data_size)
4645 int slot;
4646 struct extent_buffer *leaf;
4647 struct btrfs_item *item;
4648 u32 nritems;
4649 unsigned int data_end;
4650 unsigned int old_data;
4651 unsigned int old_size;
4652 int i;
4653 struct btrfs_map_token token;
4655 btrfs_init_map_token(&token);
4657 leaf = path->nodes[0];
4659 nritems = btrfs_header_nritems(leaf);
4660 data_end = leaf_data_end(root, leaf);
4662 if (btrfs_leaf_free_space(root, leaf) < data_size) {
4663 btrfs_print_leaf(root, leaf);
4664 BUG();
4666 slot = path->slots[0];
4667 old_data = btrfs_item_end_nr(leaf, slot);
4669 BUG_ON(slot < 0);
4670 if (slot >= nritems) {
4671 btrfs_print_leaf(root, leaf);
4672 btrfs_crit(root->fs_info, "slot %d too large, nritems %d",
4673 slot, nritems);
4674 BUG_ON(1);
4678 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4680 /* first correct the data pointers */
4681 for (i = slot; i < nritems; i++) {
4682 u32 ioff;
4683 item = btrfs_item_nr(i);
4685 ioff = btrfs_token_item_offset(leaf, item, &token);
4686 btrfs_set_token_item_offset(leaf, item,
4687 ioff - data_size, &token);
4690 /* shift the data */
4691 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4692 data_end - data_size, btrfs_leaf_data(leaf) +
4693 data_end, old_data - data_end);
4695 data_end = old_data;
4696 old_size = btrfs_item_size_nr(leaf, slot);
4697 item = btrfs_item_nr(slot);
4698 btrfs_set_item_size(leaf, item, old_size + data_size);
4699 btrfs_mark_buffer_dirty(leaf);
4701 if (btrfs_leaf_free_space(root, leaf) < 0) {
4702 btrfs_print_leaf(root, leaf);
4703 BUG();
4708 * this is a helper for btrfs_insert_empty_items, the main goal here is
4709 * to save stack depth by doing the bulk of the work in a function
4710 * that doesn't call btrfs_search_slot
4712 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4713 struct btrfs_key *cpu_key, u32 *data_size,
4714 u32 total_data, u32 total_size, int nr)
4716 struct btrfs_item *item;
4717 int i;
4718 u32 nritems;
4719 unsigned int data_end;
4720 struct btrfs_disk_key disk_key;
4721 struct extent_buffer *leaf;
4722 int slot;
4723 struct btrfs_map_token token;
4725 if (path->slots[0] == 0) {
4726 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4727 fixup_low_keys(root->fs_info, path, &disk_key, 1);
4729 btrfs_unlock_up_safe(path, 1);
4731 btrfs_init_map_token(&token);
4733 leaf = path->nodes[0];
4734 slot = path->slots[0];
4736 nritems = btrfs_header_nritems(leaf);
4737 data_end = leaf_data_end(root, leaf);
4739 if (btrfs_leaf_free_space(root, leaf) < total_size) {
4740 btrfs_print_leaf(root, leaf);
4741 btrfs_crit(root->fs_info, "not enough freespace need %u have %d",
4742 total_size, btrfs_leaf_free_space(root, leaf));
4743 BUG();
4746 if (slot != nritems) {
4747 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4749 if (old_data < data_end) {
4750 btrfs_print_leaf(root, leaf);
4751 btrfs_crit(root->fs_info, "slot %d old_data %d data_end %d",
4752 slot, old_data, data_end);
4753 BUG_ON(1);
4756 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4758 /* first correct the data pointers */
4759 for (i = slot; i < nritems; i++) {
4760 u32 ioff;
4762 item = btrfs_item_nr( i);
4763 ioff = btrfs_token_item_offset(leaf, item, &token);
4764 btrfs_set_token_item_offset(leaf, item,
4765 ioff - total_data, &token);
4767 /* shift the items */
4768 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4769 btrfs_item_nr_offset(slot),
4770 (nritems - slot) * sizeof(struct btrfs_item));
4772 /* shift the data */
4773 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4774 data_end - total_data, btrfs_leaf_data(leaf) +
4775 data_end, old_data - data_end);
4776 data_end = old_data;
4779 /* setup the item for the new data */
4780 for (i = 0; i < nr; i++) {
4781 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4782 btrfs_set_item_key(leaf, &disk_key, slot + i);
4783 item = btrfs_item_nr(slot + i);
4784 btrfs_set_token_item_offset(leaf, item,
4785 data_end - data_size[i], &token);
4786 data_end -= data_size[i];
4787 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4790 btrfs_set_header_nritems(leaf, nritems + nr);
4791 btrfs_mark_buffer_dirty(leaf);
4793 if (btrfs_leaf_free_space(root, leaf) < 0) {
4794 btrfs_print_leaf(root, leaf);
4795 BUG();
4800 * Given a key and some data, insert items into the tree.
4801 * This does all the path init required, making room in the tree if needed.
4803 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4804 struct btrfs_root *root,
4805 struct btrfs_path *path,
4806 struct btrfs_key *cpu_key, u32 *data_size,
4807 int nr)
4809 int ret = 0;
4810 int slot;
4811 int i;
4812 u32 total_size = 0;
4813 u32 total_data = 0;
4815 for (i = 0; i < nr; i++)
4816 total_data += data_size[i];
4818 total_size = total_data + (nr * sizeof(struct btrfs_item));
4819 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4820 if (ret == 0)
4821 return -EEXIST;
4822 if (ret < 0)
4823 return ret;
4825 slot = path->slots[0];
4826 BUG_ON(slot < 0);
4828 setup_items_for_insert(root, path, cpu_key, data_size,
4829 total_data, total_size, nr);
4830 return 0;
4834 * Given a key and some data, insert an item into the tree.
4835 * This does all the path init required, making room in the tree if needed.
4837 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4838 *root, struct btrfs_key *cpu_key, void *data, u32
4839 data_size)
4841 int ret = 0;
4842 struct btrfs_path *path;
4843 struct extent_buffer *leaf;
4844 unsigned long ptr;
4846 path = btrfs_alloc_path();
4847 if (!path)
4848 return -ENOMEM;
4849 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4850 if (!ret) {
4851 leaf = path->nodes[0];
4852 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4853 write_extent_buffer(leaf, data, ptr, data_size);
4854 btrfs_mark_buffer_dirty(leaf);
4856 btrfs_free_path(path);
4857 return ret;
4861 * delete the pointer from a given node.
4863 * the tree should have been previously balanced so the deletion does not
4864 * empty a node.
4866 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4867 int level, int slot)
4869 struct extent_buffer *parent = path->nodes[level];
4870 u32 nritems;
4871 int ret;
4873 nritems = btrfs_header_nritems(parent);
4874 if (slot != nritems - 1) {
4875 if (level)
4876 tree_mod_log_eb_move(root->fs_info, parent, slot,
4877 slot + 1, nritems - slot - 1);
4878 memmove_extent_buffer(parent,
4879 btrfs_node_key_ptr_offset(slot),
4880 btrfs_node_key_ptr_offset(slot + 1),
4881 sizeof(struct btrfs_key_ptr) *
4882 (nritems - slot - 1));
4883 } else if (level) {
4884 ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4885 MOD_LOG_KEY_REMOVE, GFP_NOFS);
4886 BUG_ON(ret < 0);
4889 nritems--;
4890 btrfs_set_header_nritems(parent, nritems);
4891 if (nritems == 0 && parent == root->node) {
4892 BUG_ON(btrfs_header_level(root->node) != 1);
4893 /* just turn the root into a leaf and break */
4894 btrfs_set_header_level(root->node, 0);
4895 } else if (slot == 0) {
4896 struct btrfs_disk_key disk_key;
4898 btrfs_node_key(parent, &disk_key, 0);
4899 fixup_low_keys(root->fs_info, path, &disk_key, level + 1);
4901 btrfs_mark_buffer_dirty(parent);
4905 * a helper function to delete the leaf pointed to by path->slots[1] and
4906 * path->nodes[1].
4908 * This deletes the pointer in path->nodes[1] and frees the leaf
4909 * block extent. zero is returned if it all worked out, < 0 otherwise.
4911 * The path must have already been setup for deleting the leaf, including
4912 * all the proper balancing. path->nodes[1] must be locked.
4914 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4915 struct btrfs_root *root,
4916 struct btrfs_path *path,
4917 struct extent_buffer *leaf)
4919 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4920 del_ptr(root, path, 1, path->slots[1]);
4923 * btrfs_free_extent is expensive, we want to make sure we
4924 * aren't holding any locks when we call it
4926 btrfs_unlock_up_safe(path, 0);
4928 root_sub_used(root, leaf->len);
4930 extent_buffer_get(leaf);
4931 btrfs_free_tree_block(trans, root, leaf, 0, 1);
4932 free_extent_buffer_stale(leaf);
4935 * delete the item at the leaf level in path. If that empties
4936 * the leaf, remove it from the tree
4938 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4939 struct btrfs_path *path, int slot, int nr)
4941 struct extent_buffer *leaf;
4942 struct btrfs_item *item;
4943 int last_off;
4944 int dsize = 0;
4945 int ret = 0;
4946 int wret;
4947 int i;
4948 u32 nritems;
4949 struct btrfs_map_token token;
4951 btrfs_init_map_token(&token);
4953 leaf = path->nodes[0];
4954 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4956 for (i = 0; i < nr; i++)
4957 dsize += btrfs_item_size_nr(leaf, slot + i);
4959 nritems = btrfs_header_nritems(leaf);
4961 if (slot + nr != nritems) {
4962 int data_end = leaf_data_end(root, leaf);
4964 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4965 data_end + dsize,
4966 btrfs_leaf_data(leaf) + data_end,
4967 last_off - data_end);
4969 for (i = slot + nr; i < nritems; i++) {
4970 u32 ioff;
4972 item = btrfs_item_nr(i);
4973 ioff = btrfs_token_item_offset(leaf, item, &token);
4974 btrfs_set_token_item_offset(leaf, item,
4975 ioff + dsize, &token);
4978 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4979 btrfs_item_nr_offset(slot + nr),
4980 sizeof(struct btrfs_item) *
4981 (nritems - slot - nr));
4983 btrfs_set_header_nritems(leaf, nritems - nr);
4984 nritems -= nr;
4986 /* delete the leaf if we've emptied it */
4987 if (nritems == 0) {
4988 if (leaf == root->node) {
4989 btrfs_set_header_level(leaf, 0);
4990 } else {
4991 btrfs_set_path_blocking(path);
4992 clean_tree_block(trans, root->fs_info, leaf);
4993 btrfs_del_leaf(trans, root, path, leaf);
4995 } else {
4996 int used = leaf_space_used(leaf, 0, nritems);
4997 if (slot == 0) {
4998 struct btrfs_disk_key disk_key;
5000 btrfs_item_key(leaf, &disk_key, 0);
5001 fixup_low_keys(root->fs_info, path, &disk_key, 1);
5004 /* delete the leaf if it is mostly empty */
5005 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
5006 /* push_leaf_left fixes the path.
5007 * make sure the path still points to our leaf
5008 * for possible call to del_ptr below
5010 slot = path->slots[1];
5011 extent_buffer_get(leaf);
5013 btrfs_set_path_blocking(path);
5014 wret = push_leaf_left(trans, root, path, 1, 1,
5015 1, (u32)-1);
5016 if (wret < 0 && wret != -ENOSPC)
5017 ret = wret;
5019 if (path->nodes[0] == leaf &&
5020 btrfs_header_nritems(leaf)) {
5021 wret = push_leaf_right(trans, root, path, 1,
5022 1, 1, 0);
5023 if (wret < 0 && wret != -ENOSPC)
5024 ret = wret;
5027 if (btrfs_header_nritems(leaf) == 0) {
5028 path->slots[1] = slot;
5029 btrfs_del_leaf(trans, root, path, leaf);
5030 free_extent_buffer(leaf);
5031 ret = 0;
5032 } else {
5033 /* if we're still in the path, make sure
5034 * we're dirty. Otherwise, one of the
5035 * push_leaf functions must have already
5036 * dirtied this buffer
5038 if (path->nodes[0] == leaf)
5039 btrfs_mark_buffer_dirty(leaf);
5040 free_extent_buffer(leaf);
5042 } else {
5043 btrfs_mark_buffer_dirty(leaf);
5046 return ret;
5050 * search the tree again to find a leaf with lesser keys
5051 * returns 0 if it found something or 1 if there are no lesser leaves.
5052 * returns < 0 on io errors.
5054 * This may release the path, and so you may lose any locks held at the
5055 * time you call it.
5057 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5059 struct btrfs_key key;
5060 struct btrfs_disk_key found_key;
5061 int ret;
5063 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5065 if (key.offset > 0) {
5066 key.offset--;
5067 } else if (key.type > 0) {
5068 key.type--;
5069 key.offset = (u64)-1;
5070 } else if (key.objectid > 0) {
5071 key.objectid--;
5072 key.type = (u8)-1;
5073 key.offset = (u64)-1;
5074 } else {
5075 return 1;
5078 btrfs_release_path(path);
5079 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5080 if (ret < 0)
5081 return ret;
5082 btrfs_item_key(path->nodes[0], &found_key, 0);
5083 ret = comp_keys(&found_key, &key);
5085 * We might have had an item with the previous key in the tree right
5086 * before we released our path. And after we released our path, that
5087 * item might have been pushed to the first slot (0) of the leaf we
5088 * were holding due to a tree balance. Alternatively, an item with the
5089 * previous key can exist as the only element of a leaf (big fat item).
5090 * Therefore account for these 2 cases, so that our callers (like
5091 * btrfs_previous_item) don't miss an existing item with a key matching
5092 * the previous key we computed above.
5094 if (ret <= 0)
5095 return 0;
5096 return 1;
5100 * A helper function to walk down the tree starting at min_key, and looking
5101 * for nodes or leaves that are have a minimum transaction id.
5102 * This is used by the btree defrag code, and tree logging
5104 * This does not cow, but it does stuff the starting key it finds back
5105 * into min_key, so you can call btrfs_search_slot with cow=1 on the
5106 * key and get a writable path.
5108 * This does lock as it descends, and path->keep_locks should be set
5109 * to 1 by the caller.
5111 * This honors path->lowest_level to prevent descent past a given level
5112 * of the tree.
5114 * min_trans indicates the oldest transaction that you are interested
5115 * in walking through. Any nodes or leaves older than min_trans are
5116 * skipped over (without reading them).
5118 * returns zero if something useful was found, < 0 on error and 1 if there
5119 * was nothing in the tree that matched the search criteria.
5121 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5122 struct btrfs_path *path,
5123 u64 min_trans)
5125 struct extent_buffer *cur;
5126 struct btrfs_key found_key;
5127 int slot;
5128 int sret;
5129 u32 nritems;
5130 int level;
5131 int ret = 1;
5132 int keep_locks = path->keep_locks;
5134 path->keep_locks = 1;
5135 again:
5136 cur = btrfs_read_lock_root_node(root);
5137 level = btrfs_header_level(cur);
5138 WARN_ON(path->nodes[level]);
5139 path->nodes[level] = cur;
5140 path->locks[level] = BTRFS_READ_LOCK;
5142 if (btrfs_header_generation(cur) < min_trans) {
5143 ret = 1;
5144 goto out;
5146 while (1) {
5147 nritems = btrfs_header_nritems(cur);
5148 level = btrfs_header_level(cur);
5149 sret = bin_search(cur, min_key, level, &slot);
5151 /* at the lowest level, we're done, setup the path and exit */
5152 if (level == path->lowest_level) {
5153 if (slot >= nritems)
5154 goto find_next_key;
5155 ret = 0;
5156 path->slots[level] = slot;
5157 btrfs_item_key_to_cpu(cur, &found_key, slot);
5158 goto out;
5160 if (sret && slot > 0)
5161 slot--;
5163 * check this node pointer against the min_trans parameters.
5164 * If it is too old, old, skip to the next one.
5166 while (slot < nritems) {
5167 u64 gen;
5169 gen = btrfs_node_ptr_generation(cur, slot);
5170 if (gen < min_trans) {
5171 slot++;
5172 continue;
5174 break;
5176 find_next_key:
5178 * we didn't find a candidate key in this node, walk forward
5179 * and find another one
5181 if (slot >= nritems) {
5182 path->slots[level] = slot;
5183 btrfs_set_path_blocking(path);
5184 sret = btrfs_find_next_key(root, path, min_key, level,
5185 min_trans);
5186 if (sret == 0) {
5187 btrfs_release_path(path);
5188 goto again;
5189 } else {
5190 goto out;
5193 /* save our key for returning back */
5194 btrfs_node_key_to_cpu(cur, &found_key, slot);
5195 path->slots[level] = slot;
5196 if (level == path->lowest_level) {
5197 ret = 0;
5198 goto out;
5200 btrfs_set_path_blocking(path);
5201 cur = read_node_slot(root, cur, slot);
5202 BUG_ON(!cur); /* -ENOMEM */
5204 btrfs_tree_read_lock(cur);
5206 path->locks[level - 1] = BTRFS_READ_LOCK;
5207 path->nodes[level - 1] = cur;
5208 unlock_up(path, level, 1, 0, NULL);
5209 btrfs_clear_path_blocking(path, NULL, 0);
5211 out:
5212 path->keep_locks = keep_locks;
5213 if (ret == 0) {
5214 btrfs_unlock_up_safe(path, path->lowest_level + 1);
5215 btrfs_set_path_blocking(path);
5216 memcpy(min_key, &found_key, sizeof(found_key));
5218 return ret;
5221 static void tree_move_down(struct btrfs_root *root,
5222 struct btrfs_path *path,
5223 int *level, int root_level)
5225 BUG_ON(*level == 0);
5226 path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
5227 path->slots[*level]);
5228 path->slots[*level - 1] = 0;
5229 (*level)--;
5232 static int tree_move_next_or_upnext(struct btrfs_root *root,
5233 struct btrfs_path *path,
5234 int *level, int root_level)
5236 int ret = 0;
5237 int nritems;
5238 nritems = btrfs_header_nritems(path->nodes[*level]);
5240 path->slots[*level]++;
5242 while (path->slots[*level] >= nritems) {
5243 if (*level == root_level)
5244 return -1;
5246 /* move upnext */
5247 path->slots[*level] = 0;
5248 free_extent_buffer(path->nodes[*level]);
5249 path->nodes[*level] = NULL;
5250 (*level)++;
5251 path->slots[*level]++;
5253 nritems = btrfs_header_nritems(path->nodes[*level]);
5254 ret = 1;
5256 return ret;
5260 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5261 * or down.
5263 static int tree_advance(struct btrfs_root *root,
5264 struct btrfs_path *path,
5265 int *level, int root_level,
5266 int allow_down,
5267 struct btrfs_key *key)
5269 int ret;
5271 if (*level == 0 || !allow_down) {
5272 ret = tree_move_next_or_upnext(root, path, level, root_level);
5273 } else {
5274 tree_move_down(root, path, level, root_level);
5275 ret = 0;
5277 if (ret >= 0) {
5278 if (*level == 0)
5279 btrfs_item_key_to_cpu(path->nodes[*level], key,
5280 path->slots[*level]);
5281 else
5282 btrfs_node_key_to_cpu(path->nodes[*level], key,
5283 path->slots[*level]);
5285 return ret;
5288 static int tree_compare_item(struct btrfs_root *left_root,
5289 struct btrfs_path *left_path,
5290 struct btrfs_path *right_path,
5291 char *tmp_buf)
5293 int cmp;
5294 int len1, len2;
5295 unsigned long off1, off2;
5297 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5298 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5299 if (len1 != len2)
5300 return 1;
5302 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5303 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5304 right_path->slots[0]);
5306 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5308 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5309 if (cmp)
5310 return 1;
5311 return 0;
5314 #define ADVANCE 1
5315 #define ADVANCE_ONLY_NEXT -1
5318 * This function compares two trees and calls the provided callback for
5319 * every changed/new/deleted item it finds.
5320 * If shared tree blocks are encountered, whole subtrees are skipped, making
5321 * the compare pretty fast on snapshotted subvolumes.
5323 * This currently works on commit roots only. As commit roots are read only,
5324 * we don't do any locking. The commit roots are protected with transactions.
5325 * Transactions are ended and rejoined when a commit is tried in between.
5327 * This function checks for modifications done to the trees while comparing.
5328 * If it detects a change, it aborts immediately.
5330 int btrfs_compare_trees(struct btrfs_root *left_root,
5331 struct btrfs_root *right_root,
5332 btrfs_changed_cb_t changed_cb, void *ctx)
5334 int ret;
5335 int cmp;
5336 struct btrfs_path *left_path = NULL;
5337 struct btrfs_path *right_path = NULL;
5338 struct btrfs_key left_key;
5339 struct btrfs_key right_key;
5340 char *tmp_buf = NULL;
5341 int left_root_level;
5342 int right_root_level;
5343 int left_level;
5344 int right_level;
5345 int left_end_reached;
5346 int right_end_reached;
5347 int advance_left;
5348 int advance_right;
5349 u64 left_blockptr;
5350 u64 right_blockptr;
5351 u64 left_gen;
5352 u64 right_gen;
5354 left_path = btrfs_alloc_path();
5355 if (!left_path) {
5356 ret = -ENOMEM;
5357 goto out;
5359 right_path = btrfs_alloc_path();
5360 if (!right_path) {
5361 ret = -ENOMEM;
5362 goto out;
5365 tmp_buf = kmalloc(left_root->nodesize, GFP_NOFS);
5366 if (!tmp_buf) {
5367 ret = -ENOMEM;
5368 goto out;
5371 left_path->search_commit_root = 1;
5372 left_path->skip_locking = 1;
5373 right_path->search_commit_root = 1;
5374 right_path->skip_locking = 1;
5377 * Strategy: Go to the first items of both trees. Then do
5379 * If both trees are at level 0
5380 * Compare keys of current items
5381 * If left < right treat left item as new, advance left tree
5382 * and repeat
5383 * If left > right treat right item as deleted, advance right tree
5384 * and repeat
5385 * If left == right do deep compare of items, treat as changed if
5386 * needed, advance both trees and repeat
5387 * If both trees are at the same level but not at level 0
5388 * Compare keys of current nodes/leafs
5389 * If left < right advance left tree and repeat
5390 * If left > right advance right tree and repeat
5391 * If left == right compare blockptrs of the next nodes/leafs
5392 * If they match advance both trees but stay at the same level
5393 * and repeat
5394 * If they don't match advance both trees while allowing to go
5395 * deeper and repeat
5396 * If tree levels are different
5397 * Advance the tree that needs it and repeat
5399 * Advancing a tree means:
5400 * If we are at level 0, try to go to the next slot. If that's not
5401 * possible, go one level up and repeat. Stop when we found a level
5402 * where we could go to the next slot. We may at this point be on a
5403 * node or a leaf.
5405 * If we are not at level 0 and not on shared tree blocks, go one
5406 * level deeper.
5408 * If we are not at level 0 and on shared tree blocks, go one slot to
5409 * the right if possible or go up and right.
5412 down_read(&left_root->fs_info->commit_root_sem);
5413 left_level = btrfs_header_level(left_root->commit_root);
5414 left_root_level = left_level;
5415 left_path->nodes[left_level] = left_root->commit_root;
5416 extent_buffer_get(left_path->nodes[left_level]);
5418 right_level = btrfs_header_level(right_root->commit_root);
5419 right_root_level = right_level;
5420 right_path->nodes[right_level] = right_root->commit_root;
5421 extent_buffer_get(right_path->nodes[right_level]);
5422 up_read(&left_root->fs_info->commit_root_sem);
5424 if (left_level == 0)
5425 btrfs_item_key_to_cpu(left_path->nodes[left_level],
5426 &left_key, left_path->slots[left_level]);
5427 else
5428 btrfs_node_key_to_cpu(left_path->nodes[left_level],
5429 &left_key, left_path->slots[left_level]);
5430 if (right_level == 0)
5431 btrfs_item_key_to_cpu(right_path->nodes[right_level],
5432 &right_key, right_path->slots[right_level]);
5433 else
5434 btrfs_node_key_to_cpu(right_path->nodes[right_level],
5435 &right_key, right_path->slots[right_level]);
5437 left_end_reached = right_end_reached = 0;
5438 advance_left = advance_right = 0;
5440 while (1) {
5441 if (advance_left && !left_end_reached) {
5442 ret = tree_advance(left_root, left_path, &left_level,
5443 left_root_level,
5444 advance_left != ADVANCE_ONLY_NEXT,
5445 &left_key);
5446 if (ret < 0)
5447 left_end_reached = ADVANCE;
5448 advance_left = 0;
5450 if (advance_right && !right_end_reached) {
5451 ret = tree_advance(right_root, right_path, &right_level,
5452 right_root_level,
5453 advance_right != ADVANCE_ONLY_NEXT,
5454 &right_key);
5455 if (ret < 0)
5456 right_end_reached = ADVANCE;
5457 advance_right = 0;
5460 if (left_end_reached && right_end_reached) {
5461 ret = 0;
5462 goto out;
5463 } else if (left_end_reached) {
5464 if (right_level == 0) {
5465 ret = changed_cb(left_root, right_root,
5466 left_path, right_path,
5467 &right_key,
5468 BTRFS_COMPARE_TREE_DELETED,
5469 ctx);
5470 if (ret < 0)
5471 goto out;
5473 advance_right = ADVANCE;
5474 continue;
5475 } else if (right_end_reached) {
5476 if (left_level == 0) {
5477 ret = changed_cb(left_root, right_root,
5478 left_path, right_path,
5479 &left_key,
5480 BTRFS_COMPARE_TREE_NEW,
5481 ctx);
5482 if (ret < 0)
5483 goto out;
5485 advance_left = ADVANCE;
5486 continue;
5489 if (left_level == 0 && right_level == 0) {
5490 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5491 if (cmp < 0) {
5492 ret = changed_cb(left_root, right_root,
5493 left_path, right_path,
5494 &left_key,
5495 BTRFS_COMPARE_TREE_NEW,
5496 ctx);
5497 if (ret < 0)
5498 goto out;
5499 advance_left = ADVANCE;
5500 } else if (cmp > 0) {
5501 ret = changed_cb(left_root, right_root,
5502 left_path, right_path,
5503 &right_key,
5504 BTRFS_COMPARE_TREE_DELETED,
5505 ctx);
5506 if (ret < 0)
5507 goto out;
5508 advance_right = ADVANCE;
5509 } else {
5510 enum btrfs_compare_tree_result result;
5512 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5513 ret = tree_compare_item(left_root, left_path,
5514 right_path, tmp_buf);
5515 if (ret)
5516 result = BTRFS_COMPARE_TREE_CHANGED;
5517 else
5518 result = BTRFS_COMPARE_TREE_SAME;
5519 ret = changed_cb(left_root, right_root,
5520 left_path, right_path,
5521 &left_key, result, ctx);
5522 if (ret < 0)
5523 goto out;
5524 advance_left = ADVANCE;
5525 advance_right = ADVANCE;
5527 } else if (left_level == right_level) {
5528 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5529 if (cmp < 0) {
5530 advance_left = ADVANCE;
5531 } else if (cmp > 0) {
5532 advance_right = ADVANCE;
5533 } else {
5534 left_blockptr = btrfs_node_blockptr(
5535 left_path->nodes[left_level],
5536 left_path->slots[left_level]);
5537 right_blockptr = btrfs_node_blockptr(
5538 right_path->nodes[right_level],
5539 right_path->slots[right_level]);
5540 left_gen = btrfs_node_ptr_generation(
5541 left_path->nodes[left_level],
5542 left_path->slots[left_level]);
5543 right_gen = btrfs_node_ptr_generation(
5544 right_path->nodes[right_level],
5545 right_path->slots[right_level]);
5546 if (left_blockptr == right_blockptr &&
5547 left_gen == right_gen) {
5549 * As we're on a shared block, don't
5550 * allow to go deeper.
5552 advance_left = ADVANCE_ONLY_NEXT;
5553 advance_right = ADVANCE_ONLY_NEXT;
5554 } else {
5555 advance_left = ADVANCE;
5556 advance_right = ADVANCE;
5559 } else if (left_level < right_level) {
5560 advance_right = ADVANCE;
5561 } else {
5562 advance_left = ADVANCE;
5566 out:
5567 btrfs_free_path(left_path);
5568 btrfs_free_path(right_path);
5569 kfree(tmp_buf);
5570 return ret;
5574 * this is similar to btrfs_next_leaf, but does not try to preserve
5575 * and fixup the path. It looks for and returns the next key in the
5576 * tree based on the current path and the min_trans parameters.
5578 * 0 is returned if another key is found, < 0 if there are any errors
5579 * and 1 is returned if there are no higher keys in the tree
5581 * path->keep_locks should be set to 1 on the search made before
5582 * calling this function.
5584 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5585 struct btrfs_key *key, int level, u64 min_trans)
5587 int slot;
5588 struct extent_buffer *c;
5590 WARN_ON(!path->keep_locks);
5591 while (level < BTRFS_MAX_LEVEL) {
5592 if (!path->nodes[level])
5593 return 1;
5595 slot = path->slots[level] + 1;
5596 c = path->nodes[level];
5597 next:
5598 if (slot >= btrfs_header_nritems(c)) {
5599 int ret;
5600 int orig_lowest;
5601 struct btrfs_key cur_key;
5602 if (level + 1 >= BTRFS_MAX_LEVEL ||
5603 !path->nodes[level + 1])
5604 return 1;
5606 if (path->locks[level + 1]) {
5607 level++;
5608 continue;
5611 slot = btrfs_header_nritems(c) - 1;
5612 if (level == 0)
5613 btrfs_item_key_to_cpu(c, &cur_key, slot);
5614 else
5615 btrfs_node_key_to_cpu(c, &cur_key, slot);
5617 orig_lowest = path->lowest_level;
5618 btrfs_release_path(path);
5619 path->lowest_level = level;
5620 ret = btrfs_search_slot(NULL, root, &cur_key, path,
5621 0, 0);
5622 path->lowest_level = orig_lowest;
5623 if (ret < 0)
5624 return ret;
5626 c = path->nodes[level];
5627 slot = path->slots[level];
5628 if (ret == 0)
5629 slot++;
5630 goto next;
5633 if (level == 0)
5634 btrfs_item_key_to_cpu(c, key, slot);
5635 else {
5636 u64 gen = btrfs_node_ptr_generation(c, slot);
5638 if (gen < min_trans) {
5639 slot++;
5640 goto next;
5642 btrfs_node_key_to_cpu(c, key, slot);
5644 return 0;
5646 return 1;
5650 * search the tree again to find a leaf with greater keys
5651 * returns 0 if it found something or 1 if there are no greater leaves.
5652 * returns < 0 on io errors.
5654 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5656 return btrfs_next_old_leaf(root, path, 0);
5659 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5660 u64 time_seq)
5662 int slot;
5663 int level;
5664 struct extent_buffer *c;
5665 struct extent_buffer *next;
5666 struct btrfs_key key;
5667 u32 nritems;
5668 int ret;
5669 int old_spinning = path->leave_spinning;
5670 int next_rw_lock = 0;
5672 nritems = btrfs_header_nritems(path->nodes[0]);
5673 if (nritems == 0)
5674 return 1;
5676 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5677 again:
5678 level = 1;
5679 next = NULL;
5680 next_rw_lock = 0;
5681 btrfs_release_path(path);
5683 path->keep_locks = 1;
5684 path->leave_spinning = 1;
5686 if (time_seq)
5687 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5688 else
5689 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5690 path->keep_locks = 0;
5692 if (ret < 0)
5693 return ret;
5695 nritems = btrfs_header_nritems(path->nodes[0]);
5697 * by releasing the path above we dropped all our locks. A balance
5698 * could have added more items next to the key that used to be
5699 * at the very end of the block. So, check again here and
5700 * advance the path if there are now more items available.
5702 if (nritems > 0 && path->slots[0] < nritems - 1) {
5703 if (ret == 0)
5704 path->slots[0]++;
5705 ret = 0;
5706 goto done;
5709 * So the above check misses one case:
5710 * - after releasing the path above, someone has removed the item that
5711 * used to be at the very end of the block, and balance between leafs
5712 * gets another one with bigger key.offset to replace it.
5714 * This one should be returned as well, or we can get leaf corruption
5715 * later(esp. in __btrfs_drop_extents()).
5717 * And a bit more explanation about this check,
5718 * with ret > 0, the key isn't found, the path points to the slot
5719 * where it should be inserted, so the path->slots[0] item must be the
5720 * bigger one.
5722 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5723 ret = 0;
5724 goto done;
5727 while (level < BTRFS_MAX_LEVEL) {
5728 if (!path->nodes[level]) {
5729 ret = 1;
5730 goto done;
5733 slot = path->slots[level] + 1;
5734 c = path->nodes[level];
5735 if (slot >= btrfs_header_nritems(c)) {
5736 level++;
5737 if (level == BTRFS_MAX_LEVEL) {
5738 ret = 1;
5739 goto done;
5741 continue;
5744 if (next) {
5745 btrfs_tree_unlock_rw(next, next_rw_lock);
5746 free_extent_buffer(next);
5749 next = c;
5750 next_rw_lock = path->locks[level];
5751 ret = read_block_for_search(NULL, root, path, &next, level,
5752 slot, &key, 0);
5753 if (ret == -EAGAIN)
5754 goto again;
5756 if (ret < 0) {
5757 btrfs_release_path(path);
5758 goto done;
5761 if (!path->skip_locking) {
5762 ret = btrfs_try_tree_read_lock(next);
5763 if (!ret && time_seq) {
5765 * If we don't get the lock, we may be racing
5766 * with push_leaf_left, holding that lock while
5767 * itself waiting for the leaf we've currently
5768 * locked. To solve this situation, we give up
5769 * on our lock and cycle.
5771 free_extent_buffer(next);
5772 btrfs_release_path(path);
5773 cond_resched();
5774 goto again;
5776 if (!ret) {
5777 btrfs_set_path_blocking(path);
5778 btrfs_tree_read_lock(next);
5779 btrfs_clear_path_blocking(path, next,
5780 BTRFS_READ_LOCK);
5782 next_rw_lock = BTRFS_READ_LOCK;
5784 break;
5786 path->slots[level] = slot;
5787 while (1) {
5788 level--;
5789 c = path->nodes[level];
5790 if (path->locks[level])
5791 btrfs_tree_unlock_rw(c, path->locks[level]);
5793 free_extent_buffer(c);
5794 path->nodes[level] = next;
5795 path->slots[level] = 0;
5796 if (!path->skip_locking)
5797 path->locks[level] = next_rw_lock;
5798 if (!level)
5799 break;
5801 ret = read_block_for_search(NULL, root, path, &next, level,
5802 0, &key, 0);
5803 if (ret == -EAGAIN)
5804 goto again;
5806 if (ret < 0) {
5807 btrfs_release_path(path);
5808 goto done;
5811 if (!path->skip_locking) {
5812 ret = btrfs_try_tree_read_lock(next);
5813 if (!ret) {
5814 btrfs_set_path_blocking(path);
5815 btrfs_tree_read_lock(next);
5816 btrfs_clear_path_blocking(path, next,
5817 BTRFS_READ_LOCK);
5819 next_rw_lock = BTRFS_READ_LOCK;
5822 ret = 0;
5823 done:
5824 unlock_up(path, 0, 1, 0, NULL);
5825 path->leave_spinning = old_spinning;
5826 if (!old_spinning)
5827 btrfs_set_path_blocking(path);
5829 return ret;
5833 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5834 * searching until it gets past min_objectid or finds an item of 'type'
5836 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5838 int btrfs_previous_item(struct btrfs_root *root,
5839 struct btrfs_path *path, u64 min_objectid,
5840 int type)
5842 struct btrfs_key found_key;
5843 struct extent_buffer *leaf;
5844 u32 nritems;
5845 int ret;
5847 while (1) {
5848 if (path->slots[0] == 0) {
5849 btrfs_set_path_blocking(path);
5850 ret = btrfs_prev_leaf(root, path);
5851 if (ret != 0)
5852 return ret;
5853 } else {
5854 path->slots[0]--;
5856 leaf = path->nodes[0];
5857 nritems = btrfs_header_nritems(leaf);
5858 if (nritems == 0)
5859 return 1;
5860 if (path->slots[0] == nritems)
5861 path->slots[0]--;
5863 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5864 if (found_key.objectid < min_objectid)
5865 break;
5866 if (found_key.type == type)
5867 return 0;
5868 if (found_key.objectid == min_objectid &&
5869 found_key.type < type)
5870 break;
5872 return 1;
5876 * search in extent tree to find a previous Metadata/Data extent item with
5877 * min objecitd.
5879 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5881 int btrfs_previous_extent_item(struct btrfs_root *root,
5882 struct btrfs_path *path, u64 min_objectid)
5884 struct btrfs_key found_key;
5885 struct extent_buffer *leaf;
5886 u32 nritems;
5887 int ret;
5889 while (1) {
5890 if (path->slots[0] == 0) {
5891 btrfs_set_path_blocking(path);
5892 ret = btrfs_prev_leaf(root, path);
5893 if (ret != 0)
5894 return ret;
5895 } else {
5896 path->slots[0]--;
5898 leaf = path->nodes[0];
5899 nritems = btrfs_header_nritems(leaf);
5900 if (nritems == 0)
5901 return 1;
5902 if (path->slots[0] == nritems)
5903 path->slots[0]--;
5905 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5906 if (found_key.objectid < min_objectid)
5907 break;
5908 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5909 found_key.type == BTRFS_METADATA_ITEM_KEY)
5910 return 0;
5911 if (found_key.objectid == min_objectid &&
5912 found_key.type < BTRFS_EXTENT_ITEM_KEY)
5913 break;
5915 return 1;