2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
24 #include "transaction.h"
25 #include "print-tree.h"
28 static int split_node(struct btrfs_trans_handle
*trans
, struct btrfs_root
29 *root
, struct btrfs_path
*path
, int level
);
30 static int split_leaf(struct btrfs_trans_handle
*trans
, struct btrfs_root
31 *root
, struct btrfs_key
*ins_key
,
32 struct btrfs_path
*path
, int data_size
, int extend
);
33 static int push_node_left(struct btrfs_trans_handle
*trans
,
34 struct btrfs_root
*root
, struct extent_buffer
*dst
,
35 struct extent_buffer
*src
, int empty
);
36 static int balance_node_right(struct btrfs_trans_handle
*trans
,
37 struct btrfs_root
*root
,
38 struct extent_buffer
*dst_buf
,
39 struct extent_buffer
*src_buf
);
40 static void del_ptr(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
41 struct btrfs_path
*path
, int level
, int slot
,
43 static void tree_mod_log_free_eb(struct btrfs_fs_info
*fs_info
,
44 struct extent_buffer
*eb
);
45 struct extent_buffer
*read_old_tree_block(struct btrfs_root
*root
, u64 bytenr
,
46 u32 blocksize
, u64 parent_transid
,
48 struct extent_buffer
*btrfs_find_old_tree_block(struct btrfs_root
*root
,
49 u64 bytenr
, u32 blocksize
,
52 struct btrfs_path
*btrfs_alloc_path(void)
54 struct btrfs_path
*path
;
55 path
= kmem_cache_zalloc(btrfs_path_cachep
, GFP_NOFS
);
60 * set all locked nodes in the path to blocking locks. This should
61 * be done before scheduling
63 noinline
void btrfs_set_path_blocking(struct btrfs_path
*p
)
66 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
67 if (!p
->nodes
[i
] || !p
->locks
[i
])
69 btrfs_set_lock_blocking_rw(p
->nodes
[i
], p
->locks
[i
]);
70 if (p
->locks
[i
] == BTRFS_READ_LOCK
)
71 p
->locks
[i
] = BTRFS_READ_LOCK_BLOCKING
;
72 else if (p
->locks
[i
] == BTRFS_WRITE_LOCK
)
73 p
->locks
[i
] = BTRFS_WRITE_LOCK_BLOCKING
;
78 * reset all the locked nodes in the patch to spinning locks.
80 * held is used to keep lockdep happy, when lockdep is enabled
81 * we set held to a blocking lock before we go around and
82 * retake all the spinlocks in the path. You can safely use NULL
85 noinline
void btrfs_clear_path_blocking(struct btrfs_path
*p
,
86 struct extent_buffer
*held
, int held_rw
)
90 #ifdef CONFIG_DEBUG_LOCK_ALLOC
91 /* lockdep really cares that we take all of these spinlocks
92 * in the right order. If any of the locks in the path are not
93 * currently blocking, it is going to complain. So, make really
94 * really sure by forcing the path to blocking before we clear
98 btrfs_set_lock_blocking_rw(held
, held_rw
);
99 if (held_rw
== BTRFS_WRITE_LOCK
)
100 held_rw
= BTRFS_WRITE_LOCK_BLOCKING
;
101 else if (held_rw
== BTRFS_READ_LOCK
)
102 held_rw
= BTRFS_READ_LOCK_BLOCKING
;
104 btrfs_set_path_blocking(p
);
107 for (i
= BTRFS_MAX_LEVEL
- 1; i
>= 0; i
--) {
108 if (p
->nodes
[i
] && p
->locks
[i
]) {
109 btrfs_clear_lock_blocking_rw(p
->nodes
[i
], p
->locks
[i
]);
110 if (p
->locks
[i
] == BTRFS_WRITE_LOCK_BLOCKING
)
111 p
->locks
[i
] = BTRFS_WRITE_LOCK
;
112 else if (p
->locks
[i
] == BTRFS_READ_LOCK_BLOCKING
)
113 p
->locks
[i
] = BTRFS_READ_LOCK
;
117 #ifdef CONFIG_DEBUG_LOCK_ALLOC
119 btrfs_clear_lock_blocking_rw(held
, held_rw
);
123 /* this also releases the path */
124 void btrfs_free_path(struct btrfs_path
*p
)
128 btrfs_release_path(p
);
129 kmem_cache_free(btrfs_path_cachep
, p
);
133 * path release drops references on the extent buffers in the path
134 * and it drops any locks held by this path
136 * It is safe to call this on paths that no locks or extent buffers held.
138 noinline
void btrfs_release_path(struct btrfs_path
*p
)
142 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
147 btrfs_tree_unlock_rw(p
->nodes
[i
], p
->locks
[i
]);
150 free_extent_buffer(p
->nodes
[i
]);
156 * safely gets a reference on the root node of a tree. A lock
157 * is not taken, so a concurrent writer may put a different node
158 * at the root of the tree. See btrfs_lock_root_node for the
161 * The extent buffer returned by this has a reference taken, so
162 * it won't disappear. It may stop being the root of the tree
163 * at any time because there are no locks held.
165 struct extent_buffer
*btrfs_root_node(struct btrfs_root
*root
)
167 struct extent_buffer
*eb
;
171 eb
= rcu_dereference(root
->node
);
174 * RCU really hurts here, we could free up the root node because
175 * it was cow'ed but we may not get the new root node yet so do
176 * the inc_not_zero dance and if it doesn't work then
177 * synchronize_rcu and try again.
179 if (atomic_inc_not_zero(&eb
->refs
)) {
189 /* loop around taking references on and locking the root node of the
190 * tree until you end up with a lock on the root. A locked buffer
191 * is returned, with a reference held.
193 struct extent_buffer
*btrfs_lock_root_node(struct btrfs_root
*root
)
195 struct extent_buffer
*eb
;
198 eb
= btrfs_root_node(root
);
200 if (eb
== root
->node
)
202 btrfs_tree_unlock(eb
);
203 free_extent_buffer(eb
);
208 /* loop around taking references on and locking the root node of the
209 * tree until you end up with a lock on the root. A locked buffer
210 * is returned, with a reference held.
212 struct extent_buffer
*btrfs_read_lock_root_node(struct btrfs_root
*root
)
214 struct extent_buffer
*eb
;
217 eb
= btrfs_root_node(root
);
218 btrfs_tree_read_lock(eb
);
219 if (eb
== root
->node
)
221 btrfs_tree_read_unlock(eb
);
222 free_extent_buffer(eb
);
227 /* cowonly root (everything not a reference counted cow subvolume), just get
228 * put onto a simple dirty list. transaction.c walks this to make sure they
229 * get properly updated on disk.
231 static void add_root_to_dirty_list(struct btrfs_root
*root
)
233 spin_lock(&root
->fs_info
->trans_lock
);
234 if (root
->track_dirty
&& list_empty(&root
->dirty_list
)) {
235 list_add(&root
->dirty_list
,
236 &root
->fs_info
->dirty_cowonly_roots
);
238 spin_unlock(&root
->fs_info
->trans_lock
);
242 * used by snapshot creation to make a copy of a root for a tree with
243 * a given objectid. The buffer with the new root node is returned in
244 * cow_ret, and this func returns zero on success or a negative error code.
246 int btrfs_copy_root(struct btrfs_trans_handle
*trans
,
247 struct btrfs_root
*root
,
248 struct extent_buffer
*buf
,
249 struct extent_buffer
**cow_ret
, u64 new_root_objectid
)
251 struct extent_buffer
*cow
;
254 struct btrfs_disk_key disk_key
;
256 WARN_ON(root
->ref_cows
&& trans
->transid
!=
257 root
->fs_info
->running_transaction
->transid
);
258 WARN_ON(root
->ref_cows
&& trans
->transid
!= root
->last_trans
);
260 level
= btrfs_header_level(buf
);
262 btrfs_item_key(buf
, &disk_key
, 0);
264 btrfs_node_key(buf
, &disk_key
, 0);
266 cow
= btrfs_alloc_free_block(trans
, root
, buf
->len
, 0,
267 new_root_objectid
, &disk_key
, level
,
272 copy_extent_buffer(cow
, buf
, 0, 0, cow
->len
);
273 btrfs_set_header_bytenr(cow
, cow
->start
);
274 btrfs_set_header_generation(cow
, trans
->transid
);
275 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
276 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
277 BTRFS_HEADER_FLAG_RELOC
);
278 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
279 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
281 btrfs_set_header_owner(cow
, new_root_objectid
);
283 write_extent_buffer(cow
, root
->fs_info
->fsid
,
284 (unsigned long)btrfs_header_fsid(cow
),
287 WARN_ON(btrfs_header_generation(buf
) > trans
->transid
);
288 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
289 ret
= btrfs_inc_ref(trans
, root
, cow
, 1, 1);
291 ret
= btrfs_inc_ref(trans
, root
, cow
, 0, 1);
296 btrfs_mark_buffer_dirty(cow
);
305 MOD_LOG_KEY_REMOVE_WHILE_FREEING
,
306 MOD_LOG_KEY_REMOVE_WHILE_MOVING
,
308 MOD_LOG_ROOT_REPLACE
,
311 struct tree_mod_move
{
316 struct tree_mod_root
{
321 struct tree_mod_elem
{
323 u64 index
; /* shifted logical */
324 struct seq_list elem
;
327 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
330 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
333 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
334 struct btrfs_disk_key key
;
337 /* this is used for op == MOD_LOG_MOVE_KEYS */
338 struct tree_mod_move move
;
340 /* this is used for op == MOD_LOG_ROOT_REPLACE */
341 struct tree_mod_root old_root
;
345 __get_tree_mod_seq(struct btrfs_fs_info
*fs_info
, struct seq_list
*elem
)
347 elem
->seq
= atomic_inc_return(&fs_info
->tree_mod_seq
);
348 list_add_tail(&elem
->list
, &fs_info
->tree_mod_seq_list
);
351 void btrfs_get_tree_mod_seq(struct btrfs_fs_info
*fs_info
,
352 struct seq_list
*elem
)
355 spin_lock(&fs_info
->tree_mod_seq_lock
);
356 __get_tree_mod_seq(fs_info
, elem
);
357 spin_unlock(&fs_info
->tree_mod_seq_lock
);
360 void btrfs_put_tree_mod_seq(struct btrfs_fs_info
*fs_info
,
361 struct seq_list
*elem
)
363 struct rb_root
*tm_root
;
364 struct rb_node
*node
;
365 struct rb_node
*next
;
366 struct seq_list
*cur_elem
;
367 struct tree_mod_elem
*tm
;
368 u64 min_seq
= (u64
)-1;
369 u64 seq_putting
= elem
->seq
;
374 BUG_ON(!(elem
->flags
& 1));
375 spin_lock(&fs_info
->tree_mod_seq_lock
);
376 list_del(&elem
->list
);
378 list_for_each_entry(cur_elem
, &fs_info
->tree_mod_seq_list
, list
) {
379 if ((cur_elem
->flags
& 1) && cur_elem
->seq
< min_seq
) {
380 if (seq_putting
> cur_elem
->seq
) {
382 * blocker with lower sequence number exists, we
383 * cannot remove anything from the log
387 min_seq
= cur_elem
->seq
;
392 * anything that's lower than the lowest existing (read: blocked)
393 * sequence number can be removed from the tree.
395 write_lock(&fs_info
->tree_mod_log_lock
);
396 tm_root
= &fs_info
->tree_mod_log
;
397 for (node
= rb_first(tm_root
); node
; node
= next
) {
398 next
= rb_next(node
);
399 tm
= container_of(node
, struct tree_mod_elem
, node
);
400 if (tm
->elem
.seq
> min_seq
)
402 rb_erase(node
, tm_root
);
403 list_del(&tm
->elem
.list
);
406 write_unlock(&fs_info
->tree_mod_log_lock
);
408 spin_unlock(&fs_info
->tree_mod_seq_lock
);
412 * key order of the log:
415 * the index is the shifted logical of the *new* root node for root replace
416 * operations, or the shifted logical of the affected block for all other
420 __tree_mod_log_insert(struct btrfs_fs_info
*fs_info
, struct tree_mod_elem
*tm
)
422 struct rb_root
*tm_root
;
423 struct rb_node
**new;
424 struct rb_node
*parent
= NULL
;
425 struct tree_mod_elem
*cur
;
428 BUG_ON(!tm
|| !tm
->elem
.seq
);
430 write_lock(&fs_info
->tree_mod_log_lock
);
431 tm_root
= &fs_info
->tree_mod_log
;
432 new = &tm_root
->rb_node
;
434 cur
= container_of(*new, struct tree_mod_elem
, node
);
436 if (cur
->index
< tm
->index
)
437 new = &((*new)->rb_left
);
438 else if (cur
->index
> tm
->index
)
439 new = &((*new)->rb_right
);
440 else if (cur
->elem
.seq
< tm
->elem
.seq
)
441 new = &((*new)->rb_left
);
442 else if (cur
->elem
.seq
> tm
->elem
.seq
)
443 new = &((*new)->rb_right
);
451 rb_link_node(&tm
->node
, parent
, new);
452 rb_insert_color(&tm
->node
, tm_root
);
454 write_unlock(&fs_info
->tree_mod_log_lock
);
458 static inline int tree_mod_dont_log(struct btrfs_fs_info
*fs_info
,
459 struct extent_buffer
*eb
) {
461 if (list_empty(&(fs_info
)->tree_mod_seq_list
))
465 if (btrfs_header_level(eb
) == 0)
471 * This allocates memory and gets a tree modification sequence number when
474 * Returns 0 when no sequence number is needed, < 0 on error.
475 * Returns 1 when a sequence number was added. In this case,
476 * fs_info->tree_mod_seq_lock was acquired and must be released by the caller
477 * after inserting into the rb tree.
479 static inline int tree_mod_alloc(struct btrfs_fs_info
*fs_info
, gfp_t flags
,
480 struct tree_mod_elem
**tm_ret
)
482 struct tree_mod_elem
*tm
;
485 if (tree_mod_dont_log(fs_info
, NULL
))
488 tm
= *tm_ret
= kzalloc(sizeof(*tm
), flags
);
493 spin_lock(&fs_info
->tree_mod_seq_lock
);
494 if (list_empty(&fs_info
->tree_mod_seq_list
)) {
496 * someone emptied the list while we were waiting for the lock.
497 * we must not add to the list, because no blocker exists. items
498 * are removed from the list only when the existing blocker is
499 * removed from the list.
503 spin_unlock(&fs_info
->tree_mod_seq_lock
);
505 __get_tree_mod_seq(fs_info
, &tm
->elem
);
513 tree_mod_log_insert_key_mask(struct btrfs_fs_info
*fs_info
,
514 struct extent_buffer
*eb
, int slot
,
515 enum mod_log_op op
, gfp_t flags
)
517 struct tree_mod_elem
*tm
;
520 ret
= tree_mod_alloc(fs_info
, flags
, &tm
);
524 tm
->index
= eb
->start
>> PAGE_CACHE_SHIFT
;
525 if (op
!= MOD_LOG_KEY_ADD
) {
526 btrfs_node_key(eb
, &tm
->key
, slot
);
527 tm
->blockptr
= btrfs_node_blockptr(eb
, slot
);
531 tm
->generation
= btrfs_node_ptr_generation(eb
, slot
);
533 ret
= __tree_mod_log_insert(fs_info
, tm
);
534 spin_unlock(&fs_info
->tree_mod_seq_lock
);
539 tree_mod_log_insert_key(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*eb
,
540 int slot
, enum mod_log_op op
)
542 return tree_mod_log_insert_key_mask(fs_info
, eb
, slot
, op
, GFP_NOFS
);
546 tree_mod_log_insert_move(struct btrfs_fs_info
*fs_info
,
547 struct extent_buffer
*eb
, int dst_slot
, int src_slot
,
548 int nr_items
, gfp_t flags
)
550 struct tree_mod_elem
*tm
;
554 if (tree_mod_dont_log(fs_info
, eb
))
557 for (i
= 0; i
+ dst_slot
< src_slot
&& i
< nr_items
; i
++) {
558 ret
= tree_mod_log_insert_key(fs_info
, eb
, i
+ dst_slot
,
559 MOD_LOG_KEY_REMOVE_WHILE_MOVING
);
563 ret
= tree_mod_alloc(fs_info
, flags
, &tm
);
567 tm
->index
= eb
->start
>> PAGE_CACHE_SHIFT
;
569 tm
->move
.dst_slot
= dst_slot
;
570 tm
->move
.nr_items
= nr_items
;
571 tm
->op
= MOD_LOG_MOVE_KEYS
;
573 ret
= __tree_mod_log_insert(fs_info
, tm
);
574 spin_unlock(&fs_info
->tree_mod_seq_lock
);
579 tree_mod_log_insert_root(struct btrfs_fs_info
*fs_info
,
580 struct extent_buffer
*old_root
,
581 struct extent_buffer
*new_root
, gfp_t flags
)
583 struct tree_mod_elem
*tm
;
586 ret
= tree_mod_alloc(fs_info
, flags
, &tm
);
590 tm
->index
= new_root
->start
>> PAGE_CACHE_SHIFT
;
591 tm
->old_root
.logical
= old_root
->start
;
592 tm
->old_root
.level
= btrfs_header_level(old_root
);
593 tm
->generation
= btrfs_header_generation(old_root
);
594 tm
->op
= MOD_LOG_ROOT_REPLACE
;
596 ret
= __tree_mod_log_insert(fs_info
, tm
);
597 spin_unlock(&fs_info
->tree_mod_seq_lock
);
601 static struct tree_mod_elem
*
602 __tree_mod_log_search(struct btrfs_fs_info
*fs_info
, u64 start
, u64 min_seq
,
605 struct rb_root
*tm_root
;
606 struct rb_node
*node
;
607 struct tree_mod_elem
*cur
= NULL
;
608 struct tree_mod_elem
*found
= NULL
;
609 u64 index
= start
>> PAGE_CACHE_SHIFT
;
611 read_lock(&fs_info
->tree_mod_log_lock
);
612 tm_root
= &fs_info
->tree_mod_log
;
613 node
= tm_root
->rb_node
;
615 cur
= container_of(node
, struct tree_mod_elem
, node
);
616 if (cur
->index
< index
) {
617 node
= node
->rb_left
;
618 } else if (cur
->index
> index
) {
619 node
= node
->rb_right
;
620 } else if (cur
->elem
.seq
< min_seq
) {
621 node
= node
->rb_left
;
622 } else if (!smallest
) {
623 /* we want the node with the highest seq */
625 BUG_ON(found
->elem
.seq
> cur
->elem
.seq
);
627 node
= node
->rb_left
;
628 } else if (cur
->elem
.seq
> min_seq
) {
629 /* we want the node with the smallest seq */
631 BUG_ON(found
->elem
.seq
< cur
->elem
.seq
);
633 node
= node
->rb_right
;
639 read_unlock(&fs_info
->tree_mod_log_lock
);
645 * this returns the element from the log with the smallest time sequence
646 * value that's in the log (the oldest log item). any element with a time
647 * sequence lower than min_seq will be ignored.
649 static struct tree_mod_elem
*
650 tree_mod_log_search_oldest(struct btrfs_fs_info
*fs_info
, u64 start
,
653 return __tree_mod_log_search(fs_info
, start
, min_seq
, 1);
657 * this returns the element from the log with the largest time sequence
658 * value that's in the log (the most recent log item). any element with
659 * a time sequence lower than min_seq will be ignored.
661 static struct tree_mod_elem
*
662 tree_mod_log_search(struct btrfs_fs_info
*fs_info
, u64 start
, u64 min_seq
)
664 return __tree_mod_log_search(fs_info
, start
, min_seq
, 0);
668 tree_mod_log_eb_copy(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*dst
,
669 struct extent_buffer
*src
, unsigned long dst_offset
,
670 unsigned long src_offset
, int nr_items
)
675 if (tree_mod_dont_log(fs_info
, NULL
))
678 if (btrfs_header_level(dst
) == 0 && btrfs_header_level(src
) == 0)
681 /* speed this up by single seq for all operations? */
682 for (i
= 0; i
< nr_items
; i
++) {
683 ret
= tree_mod_log_insert_key(fs_info
, src
, i
+ src_offset
,
686 ret
= tree_mod_log_insert_key(fs_info
, dst
, i
+ dst_offset
,
693 tree_mod_log_eb_move(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*dst
,
694 int dst_offset
, int src_offset
, int nr_items
)
697 ret
= tree_mod_log_insert_move(fs_info
, dst
, dst_offset
, src_offset
,
703 tree_mod_log_set_node_key(struct btrfs_fs_info
*fs_info
,
704 struct extent_buffer
*eb
,
705 struct btrfs_disk_key
*disk_key
, int slot
, int atomic
)
709 ret
= tree_mod_log_insert_key_mask(fs_info
, eb
, slot
,
711 atomic
? GFP_ATOMIC
: GFP_NOFS
);
715 static void tree_mod_log_free_eb(struct btrfs_fs_info
*fs_info
,
716 struct extent_buffer
*eb
)
722 if (tree_mod_dont_log(fs_info
, eb
))
725 nritems
= btrfs_header_nritems(eb
);
726 for (i
= nritems
- 1; i
>= 0; i
--) {
727 ret
= tree_mod_log_insert_key(fs_info
, eb
, i
,
728 MOD_LOG_KEY_REMOVE_WHILE_FREEING
);
734 tree_mod_log_set_root_pointer(struct btrfs_root
*root
,
735 struct extent_buffer
*new_root_node
)
738 tree_mod_log_free_eb(root
->fs_info
, root
->node
);
739 ret
= tree_mod_log_insert_root(root
->fs_info
, root
->node
,
740 new_root_node
, GFP_NOFS
);
745 * check if the tree block can be shared by multiple trees
747 int btrfs_block_can_be_shared(struct btrfs_root
*root
,
748 struct extent_buffer
*buf
)
751 * Tree blocks not in refernece counted trees and tree roots
752 * are never shared. If a block was allocated after the last
753 * snapshot and the block was not allocated by tree relocation,
754 * we know the block is not shared.
756 if (root
->ref_cows
&&
757 buf
!= root
->node
&& buf
!= root
->commit_root
&&
758 (btrfs_header_generation(buf
) <=
759 btrfs_root_last_snapshot(&root
->root_item
) ||
760 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)))
762 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
763 if (root
->ref_cows
&&
764 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
770 static noinline
int update_ref_for_cow(struct btrfs_trans_handle
*trans
,
771 struct btrfs_root
*root
,
772 struct extent_buffer
*buf
,
773 struct extent_buffer
*cow
,
783 * Backrefs update rules:
785 * Always use full backrefs for extent pointers in tree block
786 * allocated by tree relocation.
788 * If a shared tree block is no longer referenced by its owner
789 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
790 * use full backrefs for extent pointers in tree block.
792 * If a tree block is been relocating
793 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
794 * use full backrefs for extent pointers in tree block.
795 * The reason for this is some operations (such as drop tree)
796 * are only allowed for blocks use full backrefs.
799 if (btrfs_block_can_be_shared(root
, buf
)) {
800 ret
= btrfs_lookup_extent_info(trans
, root
, buf
->start
,
801 buf
->len
, &refs
, &flags
);
806 btrfs_std_error(root
->fs_info
, ret
);
811 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
812 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
813 flags
= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
818 owner
= btrfs_header_owner(buf
);
819 BUG_ON(owner
== BTRFS_TREE_RELOC_OBJECTID
&&
820 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
));
823 if ((owner
== root
->root_key
.objectid
||
824 root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) &&
825 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
)) {
826 ret
= btrfs_inc_ref(trans
, root
, buf
, 1, 1);
827 BUG_ON(ret
); /* -ENOMEM */
829 if (root
->root_key
.objectid
==
830 BTRFS_TREE_RELOC_OBJECTID
) {
831 ret
= btrfs_dec_ref(trans
, root
, buf
, 0, 1);
832 BUG_ON(ret
); /* -ENOMEM */
833 ret
= btrfs_inc_ref(trans
, root
, cow
, 1, 1);
834 BUG_ON(ret
); /* -ENOMEM */
836 new_flags
|= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
839 if (root
->root_key
.objectid
==
840 BTRFS_TREE_RELOC_OBJECTID
)
841 ret
= btrfs_inc_ref(trans
, root
, cow
, 1, 1);
843 ret
= btrfs_inc_ref(trans
, root
, cow
, 0, 1);
844 BUG_ON(ret
); /* -ENOMEM */
846 if (new_flags
!= 0) {
847 ret
= btrfs_set_disk_extent_flags(trans
, root
,
855 if (flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
) {
856 if (root
->root_key
.objectid
==
857 BTRFS_TREE_RELOC_OBJECTID
)
858 ret
= btrfs_inc_ref(trans
, root
, cow
, 1, 1);
860 ret
= btrfs_inc_ref(trans
, root
, cow
, 0, 1);
861 BUG_ON(ret
); /* -ENOMEM */
862 ret
= btrfs_dec_ref(trans
, root
, buf
, 1, 1);
863 BUG_ON(ret
); /* -ENOMEM */
866 * don't log freeing in case we're freeing the root node, this
867 * is done by tree_mod_log_set_root_pointer later
869 if (buf
!= root
->node
&& btrfs_header_level(buf
) != 0)
870 tree_mod_log_free_eb(root
->fs_info
, buf
);
871 clean_tree_block(trans
, root
, buf
);
878 * does the dirty work in cow of a single block. The parent block (if
879 * supplied) is updated to point to the new cow copy. The new buffer is marked
880 * dirty and returned locked. If you modify the block it needs to be marked
883 * search_start -- an allocation hint for the new block
885 * empty_size -- a hint that you plan on doing more cow. This is the size in
886 * bytes the allocator should try to find free next to the block it returns.
887 * This is just a hint and may be ignored by the allocator.
889 static noinline
int __btrfs_cow_block(struct btrfs_trans_handle
*trans
,
890 struct btrfs_root
*root
,
891 struct extent_buffer
*buf
,
892 struct extent_buffer
*parent
, int parent_slot
,
893 struct extent_buffer
**cow_ret
,
894 u64 search_start
, u64 empty_size
)
896 struct btrfs_disk_key disk_key
;
897 struct extent_buffer
*cow
;
906 btrfs_assert_tree_locked(buf
);
908 WARN_ON(root
->ref_cows
&& trans
->transid
!=
909 root
->fs_info
->running_transaction
->transid
);
910 WARN_ON(root
->ref_cows
&& trans
->transid
!= root
->last_trans
);
912 level
= btrfs_header_level(buf
);
915 btrfs_item_key(buf
, &disk_key
, 0);
917 btrfs_node_key(buf
, &disk_key
, 0);
919 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) {
921 parent_start
= parent
->start
;
927 cow
= btrfs_alloc_free_block(trans
, root
, buf
->len
, parent_start
,
928 root
->root_key
.objectid
, &disk_key
,
929 level
, search_start
, empty_size
);
933 /* cow is set to blocking by btrfs_init_new_buffer */
935 copy_extent_buffer(cow
, buf
, 0, 0, cow
->len
);
936 btrfs_set_header_bytenr(cow
, cow
->start
);
937 btrfs_set_header_generation(cow
, trans
->transid
);
938 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
939 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
940 BTRFS_HEADER_FLAG_RELOC
);
941 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
942 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
944 btrfs_set_header_owner(cow
, root
->root_key
.objectid
);
946 write_extent_buffer(cow
, root
->fs_info
->fsid
,
947 (unsigned long)btrfs_header_fsid(cow
),
950 ret
= update_ref_for_cow(trans
, root
, buf
, cow
, &last_ref
);
952 btrfs_abort_transaction(trans
, root
, ret
);
957 btrfs_reloc_cow_block(trans
, root
, buf
, cow
);
959 if (buf
== root
->node
) {
960 WARN_ON(parent
&& parent
!= buf
);
961 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
962 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
963 parent_start
= buf
->start
;
967 extent_buffer_get(cow
);
968 tree_mod_log_set_root_pointer(root
, cow
);
969 rcu_assign_pointer(root
->node
, cow
);
971 btrfs_free_tree_block(trans
, root
, buf
, parent_start
,
973 free_extent_buffer(buf
);
974 add_root_to_dirty_list(root
);
976 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
977 parent_start
= parent
->start
;
981 WARN_ON(trans
->transid
!= btrfs_header_generation(parent
));
982 tree_mod_log_insert_key(root
->fs_info
, parent
, parent_slot
,
983 MOD_LOG_KEY_REPLACE
);
984 btrfs_set_node_blockptr(parent
, parent_slot
,
986 btrfs_set_node_ptr_generation(parent
, parent_slot
,
988 btrfs_mark_buffer_dirty(parent
);
989 btrfs_free_tree_block(trans
, root
, buf
, parent_start
,
993 btrfs_tree_unlock(buf
);
994 free_extent_buffer_stale(buf
);
995 btrfs_mark_buffer_dirty(cow
);
1001 * returns the logical address of the oldest predecessor of the given root.
1002 * entries older than time_seq are ignored.
1004 static struct tree_mod_elem
*
1005 __tree_mod_log_oldest_root(struct btrfs_fs_info
*fs_info
,
1006 struct btrfs_root
*root
, u64 time_seq
)
1008 struct tree_mod_elem
*tm
;
1009 struct tree_mod_elem
*found
= NULL
;
1010 u64 root_logical
= root
->node
->start
;
1017 * the very last operation that's logged for a root is the replacement
1018 * operation (if it is replaced at all). this has the index of the *new*
1019 * root, making it the very first operation that's logged for this root.
1022 tm
= tree_mod_log_search_oldest(fs_info
, root_logical
,
1027 * if there are no tree operation for the oldest root, we simply
1028 * return it. this should only happen if that (old) root is at
1035 * if there's an operation that's not a root replacement, we
1036 * found the oldest version of our root. normally, we'll find a
1037 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1039 if (tm
->op
!= MOD_LOG_ROOT_REPLACE
)
1043 root_logical
= tm
->old_root
.logical
;
1044 BUG_ON(root_logical
== root
->node
->start
);
1048 /* if there's no old root to return, return what we found instead */
1056 * tm is a pointer to the first operation to rewind within eb. then, all
1057 * previous operations will be rewinded (until we reach something older than
1061 __tree_mod_log_rewind(struct extent_buffer
*eb
, u64 time_seq
,
1062 struct tree_mod_elem
*first_tm
)
1065 struct rb_node
*next
;
1066 struct tree_mod_elem
*tm
= first_tm
;
1067 unsigned long o_dst
;
1068 unsigned long o_src
;
1069 unsigned long p_size
= sizeof(struct btrfs_key_ptr
);
1071 n
= btrfs_header_nritems(eb
);
1072 while (tm
&& tm
->elem
.seq
>= time_seq
) {
1074 * all the operations are recorded with the operator used for
1075 * the modification. as we're going backwards, we do the
1076 * opposite of each operation here.
1079 case MOD_LOG_KEY_REMOVE_WHILE_FREEING
:
1080 BUG_ON(tm
->slot
< n
);
1081 case MOD_LOG_KEY_REMOVE_WHILE_MOVING
:
1082 case MOD_LOG_KEY_REMOVE
:
1083 btrfs_set_node_key(eb
, &tm
->key
, tm
->slot
);
1084 btrfs_set_node_blockptr(eb
, tm
->slot
, tm
->blockptr
);
1085 btrfs_set_node_ptr_generation(eb
, tm
->slot
,
1089 case MOD_LOG_KEY_REPLACE
:
1090 BUG_ON(tm
->slot
>= n
);
1091 btrfs_set_node_key(eb
, &tm
->key
, tm
->slot
);
1092 btrfs_set_node_blockptr(eb
, tm
->slot
, tm
->blockptr
);
1093 btrfs_set_node_ptr_generation(eb
, tm
->slot
,
1096 case MOD_LOG_KEY_ADD
:
1097 /* if a move operation is needed it's in the log */
1100 case MOD_LOG_MOVE_KEYS
:
1101 o_dst
= btrfs_node_key_ptr_offset(tm
->slot
);
1102 o_src
= btrfs_node_key_ptr_offset(tm
->move
.dst_slot
);
1103 memmove_extent_buffer(eb
, o_dst
, o_src
,
1104 tm
->move
.nr_items
* p_size
);
1106 case MOD_LOG_ROOT_REPLACE
:
1108 * this operation is special. for roots, this must be
1109 * handled explicitly before rewinding.
1110 * for non-roots, this operation may exist if the node
1111 * was a root: root A -> child B; then A gets empty and
1112 * B is promoted to the new root. in the mod log, we'll
1113 * have a root-replace operation for B, a tree block
1114 * that is no root. we simply ignore that operation.
1118 next
= rb_next(&tm
->node
);
1121 tm
= container_of(next
, struct tree_mod_elem
, node
);
1122 if (tm
->index
!= first_tm
->index
)
1125 btrfs_set_header_nritems(eb
, n
);
1128 static struct extent_buffer
*
1129 tree_mod_log_rewind(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*eb
,
1132 struct extent_buffer
*eb_rewin
;
1133 struct tree_mod_elem
*tm
;
1138 if (btrfs_header_level(eb
) == 0)
1141 tm
= tree_mod_log_search(fs_info
, eb
->start
, time_seq
);
1145 if (tm
->op
== MOD_LOG_KEY_REMOVE_WHILE_FREEING
) {
1146 BUG_ON(tm
->slot
!= 0);
1147 eb_rewin
= alloc_dummy_extent_buffer(eb
->start
,
1148 fs_info
->tree_root
->nodesize
);
1150 btrfs_set_header_bytenr(eb_rewin
, eb
->start
);
1151 btrfs_set_header_backref_rev(eb_rewin
,
1152 btrfs_header_backref_rev(eb
));
1153 btrfs_set_header_owner(eb_rewin
, btrfs_header_owner(eb
));
1154 btrfs_set_header_level(eb_rewin
, btrfs_header_level(eb
));
1156 eb_rewin
= btrfs_clone_extent_buffer(eb
);
1160 extent_buffer_get(eb_rewin
);
1161 free_extent_buffer(eb
);
1163 __tree_mod_log_rewind(eb_rewin
, time_seq
, tm
);
1169 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1170 * value. If there are no changes, the current root->root_node is returned. If
1171 * anything changed in between, there's a fresh buffer allocated on which the
1172 * rewind operations are done. In any case, the returned buffer is read locked.
1173 * Returns NULL on error (with no locks held).
1175 static inline struct extent_buffer
*
1176 get_old_root(struct btrfs_root
*root
, u64 time_seq
)
1178 struct tree_mod_elem
*tm
;
1179 struct extent_buffer
*eb
;
1180 struct tree_mod_root
*old_root
= NULL
;
1181 u64 old_generation
= 0;
1184 eb
= btrfs_read_lock_root_node(root
);
1185 tm
= __tree_mod_log_oldest_root(root
->fs_info
, root
, time_seq
);
1189 if (tm
->op
== MOD_LOG_ROOT_REPLACE
) {
1190 old_root
= &tm
->old_root
;
1191 old_generation
= tm
->generation
;
1192 logical
= old_root
->logical
;
1194 logical
= root
->node
->start
;
1197 tm
= tree_mod_log_search(root
->fs_info
, logical
, time_seq
);
1199 eb
= alloc_dummy_extent_buffer(logical
, root
->nodesize
);
1201 eb
= btrfs_clone_extent_buffer(root
->node
);
1202 btrfs_tree_read_unlock(root
->node
);
1203 free_extent_buffer(root
->node
);
1206 btrfs_tree_read_lock(eb
);
1208 btrfs_set_header_bytenr(eb
, eb
->start
);
1209 btrfs_set_header_backref_rev(eb
, BTRFS_MIXED_BACKREF_REV
);
1210 btrfs_set_header_owner(eb
, root
->root_key
.objectid
);
1211 btrfs_set_header_level(eb
, old_root
->level
);
1212 btrfs_set_header_generation(eb
, old_generation
);
1215 __tree_mod_log_rewind(eb
, time_seq
, tm
);
1217 WARN_ON(btrfs_header_level(eb
) != 0);
1218 extent_buffer_get(eb
);
1223 static inline int should_cow_block(struct btrfs_trans_handle
*trans
,
1224 struct btrfs_root
*root
,
1225 struct extent_buffer
*buf
)
1227 /* ensure we can see the force_cow */
1231 * We do not need to cow a block if
1232 * 1) this block is not created or changed in this transaction;
1233 * 2) this block does not belong to TREE_RELOC tree;
1234 * 3) the root is not forced COW.
1236 * What is forced COW:
1237 * when we create snapshot during commiting the transaction,
1238 * after we've finished coping src root, we must COW the shared
1239 * block to ensure the metadata consistency.
1241 if (btrfs_header_generation(buf
) == trans
->transid
&&
1242 !btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
) &&
1243 !(root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
&&
1244 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)) &&
1251 * cows a single block, see __btrfs_cow_block for the real work.
1252 * This version of it has extra checks so that a block isn't cow'd more than
1253 * once per transaction, as long as it hasn't been written yet
1255 noinline
int btrfs_cow_block(struct btrfs_trans_handle
*trans
,
1256 struct btrfs_root
*root
, struct extent_buffer
*buf
,
1257 struct extent_buffer
*parent
, int parent_slot
,
1258 struct extent_buffer
**cow_ret
)
1263 if (trans
->transaction
!= root
->fs_info
->running_transaction
) {
1264 printk(KERN_CRIT
"trans %llu running %llu\n",
1265 (unsigned long long)trans
->transid
,
1266 (unsigned long long)
1267 root
->fs_info
->running_transaction
->transid
);
1270 if (trans
->transid
!= root
->fs_info
->generation
) {
1271 printk(KERN_CRIT
"trans %llu running %llu\n",
1272 (unsigned long long)trans
->transid
,
1273 (unsigned long long)root
->fs_info
->generation
);
1277 if (!should_cow_block(trans
, root
, buf
)) {
1282 search_start
= buf
->start
& ~((u64
)(1024 * 1024 * 1024) - 1);
1285 btrfs_set_lock_blocking(parent
);
1286 btrfs_set_lock_blocking(buf
);
1288 ret
= __btrfs_cow_block(trans
, root
, buf
, parent
,
1289 parent_slot
, cow_ret
, search_start
, 0);
1291 trace_btrfs_cow_block(root
, buf
, *cow_ret
);
1297 * helper function for defrag to decide if two blocks pointed to by a
1298 * node are actually close by
1300 static int close_blocks(u64 blocknr
, u64 other
, u32 blocksize
)
1302 if (blocknr
< other
&& other
- (blocknr
+ blocksize
) < 32768)
1304 if (blocknr
> other
&& blocknr
- (other
+ blocksize
) < 32768)
1310 * compare two keys in a memcmp fashion
1312 static int comp_keys(struct btrfs_disk_key
*disk
, struct btrfs_key
*k2
)
1314 struct btrfs_key k1
;
1316 btrfs_disk_key_to_cpu(&k1
, disk
);
1318 return btrfs_comp_cpu_keys(&k1
, k2
);
1322 * same as comp_keys only with two btrfs_key's
1324 int btrfs_comp_cpu_keys(struct btrfs_key
*k1
, struct btrfs_key
*k2
)
1326 if (k1
->objectid
> k2
->objectid
)
1328 if (k1
->objectid
< k2
->objectid
)
1330 if (k1
->type
> k2
->type
)
1332 if (k1
->type
< k2
->type
)
1334 if (k1
->offset
> k2
->offset
)
1336 if (k1
->offset
< k2
->offset
)
1342 * this is used by the defrag code to go through all the
1343 * leaves pointed to by a node and reallocate them so that
1344 * disk order is close to key order
1346 int btrfs_realloc_node(struct btrfs_trans_handle
*trans
,
1347 struct btrfs_root
*root
, struct extent_buffer
*parent
,
1348 int start_slot
, int cache_only
, u64
*last_ret
,
1349 struct btrfs_key
*progress
)
1351 struct extent_buffer
*cur
;
1354 u64 search_start
= *last_ret
;
1364 int progress_passed
= 0;
1365 struct btrfs_disk_key disk_key
;
1367 parent_level
= btrfs_header_level(parent
);
1368 if (cache_only
&& parent_level
!= 1)
1371 if (trans
->transaction
!= root
->fs_info
->running_transaction
)
1373 if (trans
->transid
!= root
->fs_info
->generation
)
1376 parent_nritems
= btrfs_header_nritems(parent
);
1377 blocksize
= btrfs_level_size(root
, parent_level
- 1);
1378 end_slot
= parent_nritems
;
1380 if (parent_nritems
== 1)
1383 btrfs_set_lock_blocking(parent
);
1385 for (i
= start_slot
; i
< end_slot
; i
++) {
1388 btrfs_node_key(parent
, &disk_key
, i
);
1389 if (!progress_passed
&& comp_keys(&disk_key
, progress
) < 0)
1392 progress_passed
= 1;
1393 blocknr
= btrfs_node_blockptr(parent
, i
);
1394 gen
= btrfs_node_ptr_generation(parent
, i
);
1395 if (last_block
== 0)
1396 last_block
= blocknr
;
1399 other
= btrfs_node_blockptr(parent
, i
- 1);
1400 close
= close_blocks(blocknr
, other
, blocksize
);
1402 if (!close
&& i
< end_slot
- 2) {
1403 other
= btrfs_node_blockptr(parent
, i
+ 1);
1404 close
= close_blocks(blocknr
, other
, blocksize
);
1407 last_block
= blocknr
;
1411 cur
= btrfs_find_tree_block(root
, blocknr
, blocksize
);
1413 uptodate
= btrfs_buffer_uptodate(cur
, gen
, 0);
1416 if (!cur
|| !uptodate
) {
1418 free_extent_buffer(cur
);
1422 cur
= read_tree_block(root
, blocknr
,
1426 } else if (!uptodate
) {
1427 err
= btrfs_read_buffer(cur
, gen
);
1429 free_extent_buffer(cur
);
1434 if (search_start
== 0)
1435 search_start
= last_block
;
1437 btrfs_tree_lock(cur
);
1438 btrfs_set_lock_blocking(cur
);
1439 err
= __btrfs_cow_block(trans
, root
, cur
, parent
, i
,
1442 (end_slot
- i
) * blocksize
));
1444 btrfs_tree_unlock(cur
);
1445 free_extent_buffer(cur
);
1448 search_start
= cur
->start
;
1449 last_block
= cur
->start
;
1450 *last_ret
= search_start
;
1451 btrfs_tree_unlock(cur
);
1452 free_extent_buffer(cur
);
1458 * The leaf data grows from end-to-front in the node.
1459 * this returns the address of the start of the last item,
1460 * which is the stop of the leaf data stack
1462 static inline unsigned int leaf_data_end(struct btrfs_root
*root
,
1463 struct extent_buffer
*leaf
)
1465 u32 nr
= btrfs_header_nritems(leaf
);
1467 return BTRFS_LEAF_DATA_SIZE(root
);
1468 return btrfs_item_offset_nr(leaf
, nr
- 1);
1473 * search for key in the extent_buffer. The items start at offset p,
1474 * and they are item_size apart. There are 'max' items in p.
1476 * the slot in the array is returned via slot, and it points to
1477 * the place where you would insert key if it is not found in
1480 * slot may point to max if the key is bigger than all of the keys
1482 static noinline
int generic_bin_search(struct extent_buffer
*eb
,
1484 int item_size
, struct btrfs_key
*key
,
1491 struct btrfs_disk_key
*tmp
= NULL
;
1492 struct btrfs_disk_key unaligned
;
1493 unsigned long offset
;
1495 unsigned long map_start
= 0;
1496 unsigned long map_len
= 0;
1499 while (low
< high
) {
1500 mid
= (low
+ high
) / 2;
1501 offset
= p
+ mid
* item_size
;
1503 if (!kaddr
|| offset
< map_start
||
1504 (offset
+ sizeof(struct btrfs_disk_key
)) >
1505 map_start
+ map_len
) {
1507 err
= map_private_extent_buffer(eb
, offset
,
1508 sizeof(struct btrfs_disk_key
),
1509 &kaddr
, &map_start
, &map_len
);
1512 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
1515 read_extent_buffer(eb
, &unaligned
,
1516 offset
, sizeof(unaligned
));
1521 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
1524 ret
= comp_keys(tmp
, key
);
1540 * simple bin_search frontend that does the right thing for
1543 static int bin_search(struct extent_buffer
*eb
, struct btrfs_key
*key
,
1544 int level
, int *slot
)
1547 return generic_bin_search(eb
,
1548 offsetof(struct btrfs_leaf
, items
),
1549 sizeof(struct btrfs_item
),
1550 key
, btrfs_header_nritems(eb
),
1553 return generic_bin_search(eb
,
1554 offsetof(struct btrfs_node
, ptrs
),
1555 sizeof(struct btrfs_key_ptr
),
1556 key
, btrfs_header_nritems(eb
),
1560 int btrfs_bin_search(struct extent_buffer
*eb
, struct btrfs_key
*key
,
1561 int level
, int *slot
)
1563 return bin_search(eb
, key
, level
, slot
);
1566 static void root_add_used(struct btrfs_root
*root
, u32 size
)
1568 spin_lock(&root
->accounting_lock
);
1569 btrfs_set_root_used(&root
->root_item
,
1570 btrfs_root_used(&root
->root_item
) + size
);
1571 spin_unlock(&root
->accounting_lock
);
1574 static void root_sub_used(struct btrfs_root
*root
, u32 size
)
1576 spin_lock(&root
->accounting_lock
);
1577 btrfs_set_root_used(&root
->root_item
,
1578 btrfs_root_used(&root
->root_item
) - size
);
1579 spin_unlock(&root
->accounting_lock
);
1582 /* given a node and slot number, this reads the blocks it points to. The
1583 * extent buffer is returned with a reference taken (but unlocked).
1584 * NULL is returned on error.
1586 static noinline
struct extent_buffer
*read_node_slot(struct btrfs_root
*root
,
1587 struct extent_buffer
*parent
, int slot
)
1589 int level
= btrfs_header_level(parent
);
1592 if (slot
>= btrfs_header_nritems(parent
))
1597 return read_tree_block(root
, btrfs_node_blockptr(parent
, slot
),
1598 btrfs_level_size(root
, level
- 1),
1599 btrfs_node_ptr_generation(parent
, slot
));
1603 * node level balancing, used to make sure nodes are in proper order for
1604 * item deletion. We balance from the top down, so we have to make sure
1605 * that a deletion won't leave an node completely empty later on.
1607 static noinline
int balance_level(struct btrfs_trans_handle
*trans
,
1608 struct btrfs_root
*root
,
1609 struct btrfs_path
*path
, int level
)
1611 struct extent_buffer
*right
= NULL
;
1612 struct extent_buffer
*mid
;
1613 struct extent_buffer
*left
= NULL
;
1614 struct extent_buffer
*parent
= NULL
;
1618 int orig_slot
= path
->slots
[level
];
1624 mid
= path
->nodes
[level
];
1626 WARN_ON(path
->locks
[level
] != BTRFS_WRITE_LOCK
&&
1627 path
->locks
[level
] != BTRFS_WRITE_LOCK_BLOCKING
);
1628 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
1630 orig_ptr
= btrfs_node_blockptr(mid
, orig_slot
);
1632 if (level
< BTRFS_MAX_LEVEL
- 1) {
1633 parent
= path
->nodes
[level
+ 1];
1634 pslot
= path
->slots
[level
+ 1];
1638 * deal with the case where there is only one pointer in the root
1639 * by promoting the node below to a root
1642 struct extent_buffer
*child
;
1644 if (btrfs_header_nritems(mid
) != 1)
1647 /* promote the child to a root */
1648 child
= read_node_slot(root
, mid
, 0);
1651 btrfs_std_error(root
->fs_info
, ret
);
1655 btrfs_tree_lock(child
);
1656 btrfs_set_lock_blocking(child
);
1657 ret
= btrfs_cow_block(trans
, root
, child
, mid
, 0, &child
);
1659 btrfs_tree_unlock(child
);
1660 free_extent_buffer(child
);
1664 tree_mod_log_set_root_pointer(root
, child
);
1665 rcu_assign_pointer(root
->node
, child
);
1667 add_root_to_dirty_list(root
);
1668 btrfs_tree_unlock(child
);
1670 path
->locks
[level
] = 0;
1671 path
->nodes
[level
] = NULL
;
1672 clean_tree_block(trans
, root
, mid
);
1673 btrfs_tree_unlock(mid
);
1674 /* once for the path */
1675 free_extent_buffer(mid
);
1677 root_sub_used(root
, mid
->len
);
1678 btrfs_free_tree_block(trans
, root
, mid
, 0, 1);
1679 /* once for the root ptr */
1680 free_extent_buffer_stale(mid
);
1683 if (btrfs_header_nritems(mid
) >
1684 BTRFS_NODEPTRS_PER_BLOCK(root
) / 4)
1687 left
= read_node_slot(root
, parent
, pslot
- 1);
1689 btrfs_tree_lock(left
);
1690 btrfs_set_lock_blocking(left
);
1691 wret
= btrfs_cow_block(trans
, root
, left
,
1692 parent
, pslot
- 1, &left
);
1698 right
= read_node_slot(root
, parent
, pslot
+ 1);
1700 btrfs_tree_lock(right
);
1701 btrfs_set_lock_blocking(right
);
1702 wret
= btrfs_cow_block(trans
, root
, right
,
1703 parent
, pslot
+ 1, &right
);
1710 /* first, try to make some room in the middle buffer */
1712 orig_slot
+= btrfs_header_nritems(left
);
1713 wret
= push_node_left(trans
, root
, left
, mid
, 1);
1719 * then try to empty the right most buffer into the middle
1722 wret
= push_node_left(trans
, root
, mid
, right
, 1);
1723 if (wret
< 0 && wret
!= -ENOSPC
)
1725 if (btrfs_header_nritems(right
) == 0) {
1726 clean_tree_block(trans
, root
, right
);
1727 btrfs_tree_unlock(right
);
1728 del_ptr(trans
, root
, path
, level
+ 1, pslot
+ 1, 1);
1729 root_sub_used(root
, right
->len
);
1730 btrfs_free_tree_block(trans
, root
, right
, 0, 1);
1731 free_extent_buffer_stale(right
);
1734 struct btrfs_disk_key right_key
;
1735 btrfs_node_key(right
, &right_key
, 0);
1736 tree_mod_log_set_node_key(root
->fs_info
, parent
,
1737 &right_key
, pslot
+ 1, 0);
1738 btrfs_set_node_key(parent
, &right_key
, pslot
+ 1);
1739 btrfs_mark_buffer_dirty(parent
);
1742 if (btrfs_header_nritems(mid
) == 1) {
1744 * we're not allowed to leave a node with one item in the
1745 * tree during a delete. A deletion from lower in the tree
1746 * could try to delete the only pointer in this node.
1747 * So, pull some keys from the left.
1748 * There has to be a left pointer at this point because
1749 * otherwise we would have pulled some pointers from the
1754 btrfs_std_error(root
->fs_info
, ret
);
1757 wret
= balance_node_right(trans
, root
, mid
, left
);
1763 wret
= push_node_left(trans
, root
, left
, mid
, 1);
1769 if (btrfs_header_nritems(mid
) == 0) {
1770 clean_tree_block(trans
, root
, mid
);
1771 btrfs_tree_unlock(mid
);
1772 del_ptr(trans
, root
, path
, level
+ 1, pslot
, 1);
1773 root_sub_used(root
, mid
->len
);
1774 btrfs_free_tree_block(trans
, root
, mid
, 0, 1);
1775 free_extent_buffer_stale(mid
);
1778 /* update the parent key to reflect our changes */
1779 struct btrfs_disk_key mid_key
;
1780 btrfs_node_key(mid
, &mid_key
, 0);
1781 tree_mod_log_set_node_key(root
->fs_info
, parent
, &mid_key
,
1783 btrfs_set_node_key(parent
, &mid_key
, pslot
);
1784 btrfs_mark_buffer_dirty(parent
);
1787 /* update the path */
1789 if (btrfs_header_nritems(left
) > orig_slot
) {
1790 extent_buffer_get(left
);
1791 /* left was locked after cow */
1792 path
->nodes
[level
] = left
;
1793 path
->slots
[level
+ 1] -= 1;
1794 path
->slots
[level
] = orig_slot
;
1796 btrfs_tree_unlock(mid
);
1797 free_extent_buffer(mid
);
1800 orig_slot
-= btrfs_header_nritems(left
);
1801 path
->slots
[level
] = orig_slot
;
1804 /* double check we haven't messed things up */
1806 btrfs_node_blockptr(path
->nodes
[level
], path
->slots
[level
]))
1810 btrfs_tree_unlock(right
);
1811 free_extent_buffer(right
);
1814 if (path
->nodes
[level
] != left
)
1815 btrfs_tree_unlock(left
);
1816 free_extent_buffer(left
);
1821 /* Node balancing for insertion. Here we only split or push nodes around
1822 * when they are completely full. This is also done top down, so we
1823 * have to be pessimistic.
1825 static noinline
int push_nodes_for_insert(struct btrfs_trans_handle
*trans
,
1826 struct btrfs_root
*root
,
1827 struct btrfs_path
*path
, int level
)
1829 struct extent_buffer
*right
= NULL
;
1830 struct extent_buffer
*mid
;
1831 struct extent_buffer
*left
= NULL
;
1832 struct extent_buffer
*parent
= NULL
;
1836 int orig_slot
= path
->slots
[level
];
1841 mid
= path
->nodes
[level
];
1842 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
1844 if (level
< BTRFS_MAX_LEVEL
- 1) {
1845 parent
= path
->nodes
[level
+ 1];
1846 pslot
= path
->slots
[level
+ 1];
1852 left
= read_node_slot(root
, parent
, pslot
- 1);
1854 /* first, try to make some room in the middle buffer */
1858 btrfs_tree_lock(left
);
1859 btrfs_set_lock_blocking(left
);
1861 left_nr
= btrfs_header_nritems(left
);
1862 if (left_nr
>= BTRFS_NODEPTRS_PER_BLOCK(root
) - 1) {
1865 ret
= btrfs_cow_block(trans
, root
, left
, parent
,
1870 wret
= push_node_left(trans
, root
,
1877 struct btrfs_disk_key disk_key
;
1878 orig_slot
+= left_nr
;
1879 btrfs_node_key(mid
, &disk_key
, 0);
1880 tree_mod_log_set_node_key(root
->fs_info
, parent
,
1881 &disk_key
, pslot
, 0);
1882 btrfs_set_node_key(parent
, &disk_key
, pslot
);
1883 btrfs_mark_buffer_dirty(parent
);
1884 if (btrfs_header_nritems(left
) > orig_slot
) {
1885 path
->nodes
[level
] = left
;
1886 path
->slots
[level
+ 1] -= 1;
1887 path
->slots
[level
] = orig_slot
;
1888 btrfs_tree_unlock(mid
);
1889 free_extent_buffer(mid
);
1892 btrfs_header_nritems(left
);
1893 path
->slots
[level
] = orig_slot
;
1894 btrfs_tree_unlock(left
);
1895 free_extent_buffer(left
);
1899 btrfs_tree_unlock(left
);
1900 free_extent_buffer(left
);
1902 right
= read_node_slot(root
, parent
, pslot
+ 1);
1905 * then try to empty the right most buffer into the middle
1910 btrfs_tree_lock(right
);
1911 btrfs_set_lock_blocking(right
);
1913 right_nr
= btrfs_header_nritems(right
);
1914 if (right_nr
>= BTRFS_NODEPTRS_PER_BLOCK(root
) - 1) {
1917 ret
= btrfs_cow_block(trans
, root
, right
,
1923 wret
= balance_node_right(trans
, root
,
1930 struct btrfs_disk_key disk_key
;
1932 btrfs_node_key(right
, &disk_key
, 0);
1933 tree_mod_log_set_node_key(root
->fs_info
, parent
,
1934 &disk_key
, pslot
+ 1, 0);
1935 btrfs_set_node_key(parent
, &disk_key
, pslot
+ 1);
1936 btrfs_mark_buffer_dirty(parent
);
1938 if (btrfs_header_nritems(mid
) <= orig_slot
) {
1939 path
->nodes
[level
] = right
;
1940 path
->slots
[level
+ 1] += 1;
1941 path
->slots
[level
] = orig_slot
-
1942 btrfs_header_nritems(mid
);
1943 btrfs_tree_unlock(mid
);
1944 free_extent_buffer(mid
);
1946 btrfs_tree_unlock(right
);
1947 free_extent_buffer(right
);
1951 btrfs_tree_unlock(right
);
1952 free_extent_buffer(right
);
1958 * readahead one full node of leaves, finding things that are close
1959 * to the block in 'slot', and triggering ra on them.
1961 static void reada_for_search(struct btrfs_root
*root
,
1962 struct btrfs_path
*path
,
1963 int level
, int slot
, u64 objectid
)
1965 struct extent_buffer
*node
;
1966 struct btrfs_disk_key disk_key
;
1972 int direction
= path
->reada
;
1973 struct extent_buffer
*eb
;
1981 if (!path
->nodes
[level
])
1984 node
= path
->nodes
[level
];
1986 search
= btrfs_node_blockptr(node
, slot
);
1987 blocksize
= btrfs_level_size(root
, level
- 1);
1988 eb
= btrfs_find_tree_block(root
, search
, blocksize
);
1990 free_extent_buffer(eb
);
1996 nritems
= btrfs_header_nritems(node
);
2000 if (direction
< 0) {
2004 } else if (direction
> 0) {
2009 if (path
->reada
< 0 && objectid
) {
2010 btrfs_node_key(node
, &disk_key
, nr
);
2011 if (btrfs_disk_key_objectid(&disk_key
) != objectid
)
2014 search
= btrfs_node_blockptr(node
, nr
);
2015 if ((search
<= target
&& target
- search
<= 65536) ||
2016 (search
> target
&& search
- target
<= 65536)) {
2017 gen
= btrfs_node_ptr_generation(node
, nr
);
2018 readahead_tree_block(root
, search
, blocksize
, gen
);
2022 if ((nread
> 65536 || nscan
> 32))
2028 * returns -EAGAIN if it had to drop the path, or zero if everything was in
2031 static noinline
int reada_for_balance(struct btrfs_root
*root
,
2032 struct btrfs_path
*path
, int level
)
2036 struct extent_buffer
*parent
;
2037 struct extent_buffer
*eb
;
2044 parent
= path
->nodes
[level
+ 1];
2048 nritems
= btrfs_header_nritems(parent
);
2049 slot
= path
->slots
[level
+ 1];
2050 blocksize
= btrfs_level_size(root
, level
);
2053 block1
= btrfs_node_blockptr(parent
, slot
- 1);
2054 gen
= btrfs_node_ptr_generation(parent
, slot
- 1);
2055 eb
= btrfs_find_tree_block(root
, block1
, blocksize
);
2057 * if we get -eagain from btrfs_buffer_uptodate, we
2058 * don't want to return eagain here. That will loop
2061 if (eb
&& btrfs_buffer_uptodate(eb
, gen
, 1) != 0)
2063 free_extent_buffer(eb
);
2065 if (slot
+ 1 < nritems
) {
2066 block2
= btrfs_node_blockptr(parent
, slot
+ 1);
2067 gen
= btrfs_node_ptr_generation(parent
, slot
+ 1);
2068 eb
= btrfs_find_tree_block(root
, block2
, blocksize
);
2069 if (eb
&& btrfs_buffer_uptodate(eb
, gen
, 1) != 0)
2071 free_extent_buffer(eb
);
2073 if (block1
|| block2
) {
2076 /* release the whole path */
2077 btrfs_release_path(path
);
2079 /* read the blocks */
2081 readahead_tree_block(root
, block1
, blocksize
, 0);
2083 readahead_tree_block(root
, block2
, blocksize
, 0);
2086 eb
= read_tree_block(root
, block1
, blocksize
, 0);
2087 free_extent_buffer(eb
);
2090 eb
= read_tree_block(root
, block2
, blocksize
, 0);
2091 free_extent_buffer(eb
);
2099 * when we walk down the tree, it is usually safe to unlock the higher layers
2100 * in the tree. The exceptions are when our path goes through slot 0, because
2101 * operations on the tree might require changing key pointers higher up in the
2104 * callers might also have set path->keep_locks, which tells this code to keep
2105 * the lock if the path points to the last slot in the block. This is part of
2106 * walking through the tree, and selecting the next slot in the higher block.
2108 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2109 * if lowest_unlock is 1, level 0 won't be unlocked
2111 static noinline
void unlock_up(struct btrfs_path
*path
, int level
,
2112 int lowest_unlock
, int min_write_lock_level
,
2113 int *write_lock_level
)
2116 int skip_level
= level
;
2118 struct extent_buffer
*t
;
2120 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
2121 if (!path
->nodes
[i
])
2123 if (!path
->locks
[i
])
2125 if (!no_skips
&& path
->slots
[i
] == 0) {
2129 if (!no_skips
&& path
->keep_locks
) {
2132 nritems
= btrfs_header_nritems(t
);
2133 if (nritems
< 1 || path
->slots
[i
] >= nritems
- 1) {
2138 if (skip_level
< i
&& i
>= lowest_unlock
)
2142 if (i
>= lowest_unlock
&& i
> skip_level
&& path
->locks
[i
]) {
2143 btrfs_tree_unlock_rw(t
, path
->locks
[i
]);
2145 if (write_lock_level
&&
2146 i
> min_write_lock_level
&&
2147 i
<= *write_lock_level
) {
2148 *write_lock_level
= i
- 1;
2155 * This releases any locks held in the path starting at level and
2156 * going all the way up to the root.
2158 * btrfs_search_slot will keep the lock held on higher nodes in a few
2159 * corner cases, such as COW of the block at slot zero in the node. This
2160 * ignores those rules, and it should only be called when there are no
2161 * more updates to be done higher up in the tree.
2163 noinline
void btrfs_unlock_up_safe(struct btrfs_path
*path
, int level
)
2167 if (path
->keep_locks
)
2170 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
2171 if (!path
->nodes
[i
])
2173 if (!path
->locks
[i
])
2175 btrfs_tree_unlock_rw(path
->nodes
[i
], path
->locks
[i
]);
2181 * helper function for btrfs_search_slot. The goal is to find a block
2182 * in cache without setting the path to blocking. If we find the block
2183 * we return zero and the path is unchanged.
2185 * If we can't find the block, we set the path blocking and do some
2186 * reada. -EAGAIN is returned and the search must be repeated.
2189 read_block_for_search(struct btrfs_trans_handle
*trans
,
2190 struct btrfs_root
*root
, struct btrfs_path
*p
,
2191 struct extent_buffer
**eb_ret
, int level
, int slot
,
2192 struct btrfs_key
*key
, u64 time_seq
)
2197 struct extent_buffer
*b
= *eb_ret
;
2198 struct extent_buffer
*tmp
;
2201 blocknr
= btrfs_node_blockptr(b
, slot
);
2202 gen
= btrfs_node_ptr_generation(b
, slot
);
2203 blocksize
= btrfs_level_size(root
, level
- 1);
2205 tmp
= btrfs_find_tree_block(root
, blocknr
, blocksize
);
2207 /* first we do an atomic uptodate check */
2208 if (btrfs_buffer_uptodate(tmp
, 0, 1) > 0) {
2209 if (btrfs_buffer_uptodate(tmp
, gen
, 1) > 0) {
2211 * we found an up to date block without
2218 /* the pages were up to date, but we failed
2219 * the generation number check. Do a full
2220 * read for the generation number that is correct.
2221 * We must do this without dropping locks so
2222 * we can trust our generation number
2224 free_extent_buffer(tmp
);
2225 btrfs_set_path_blocking(p
);
2227 /* now we're allowed to do a blocking uptodate check */
2228 tmp
= read_tree_block(root
, blocknr
, blocksize
, gen
);
2229 if (tmp
&& btrfs_buffer_uptodate(tmp
, gen
, 0) > 0) {
2233 free_extent_buffer(tmp
);
2234 btrfs_release_path(p
);
2240 * reduce lock contention at high levels
2241 * of the btree by dropping locks before
2242 * we read. Don't release the lock on the current
2243 * level because we need to walk this node to figure
2244 * out which blocks to read.
2246 btrfs_unlock_up_safe(p
, level
+ 1);
2247 btrfs_set_path_blocking(p
);
2249 free_extent_buffer(tmp
);
2251 reada_for_search(root
, p
, level
, slot
, key
->objectid
);
2253 btrfs_release_path(p
);
2256 tmp
= read_tree_block(root
, blocknr
, blocksize
, 0);
2259 * If the read above didn't mark this buffer up to date,
2260 * it will never end up being up to date. Set ret to EIO now
2261 * and give up so that our caller doesn't loop forever
2264 if (!btrfs_buffer_uptodate(tmp
, 0, 0))
2266 free_extent_buffer(tmp
);
2272 * helper function for btrfs_search_slot. This does all of the checks
2273 * for node-level blocks and does any balancing required based on
2276 * If no extra work was required, zero is returned. If we had to
2277 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2281 setup_nodes_for_search(struct btrfs_trans_handle
*trans
,
2282 struct btrfs_root
*root
, struct btrfs_path
*p
,
2283 struct extent_buffer
*b
, int level
, int ins_len
,
2284 int *write_lock_level
)
2287 if ((p
->search_for_split
|| ins_len
> 0) && btrfs_header_nritems(b
) >=
2288 BTRFS_NODEPTRS_PER_BLOCK(root
) - 3) {
2291 if (*write_lock_level
< level
+ 1) {
2292 *write_lock_level
= level
+ 1;
2293 btrfs_release_path(p
);
2297 sret
= reada_for_balance(root
, p
, level
);
2301 btrfs_set_path_blocking(p
);
2302 sret
= split_node(trans
, root
, p
, level
);
2303 btrfs_clear_path_blocking(p
, NULL
, 0);
2310 b
= p
->nodes
[level
];
2311 } else if (ins_len
< 0 && btrfs_header_nritems(b
) <
2312 BTRFS_NODEPTRS_PER_BLOCK(root
) / 2) {
2315 if (*write_lock_level
< level
+ 1) {
2316 *write_lock_level
= level
+ 1;
2317 btrfs_release_path(p
);
2321 sret
= reada_for_balance(root
, p
, level
);
2325 btrfs_set_path_blocking(p
);
2326 sret
= balance_level(trans
, root
, p
, level
);
2327 btrfs_clear_path_blocking(p
, NULL
, 0);
2333 b
= p
->nodes
[level
];
2335 btrfs_release_path(p
);
2338 BUG_ON(btrfs_header_nritems(b
) == 1);
2349 * look for key in the tree. path is filled in with nodes along the way
2350 * if key is found, we return zero and you can find the item in the leaf
2351 * level of the path (level 0)
2353 * If the key isn't found, the path points to the slot where it should
2354 * be inserted, and 1 is returned. If there are other errors during the
2355 * search a negative error number is returned.
2357 * if ins_len > 0, nodes and leaves will be split as we walk down the
2358 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2361 int btrfs_search_slot(struct btrfs_trans_handle
*trans
, struct btrfs_root
2362 *root
, struct btrfs_key
*key
, struct btrfs_path
*p
, int
2365 struct extent_buffer
*b
;
2370 int lowest_unlock
= 1;
2372 /* everything at write_lock_level or lower must be write locked */
2373 int write_lock_level
= 0;
2374 u8 lowest_level
= 0;
2375 int min_write_lock_level
;
2377 lowest_level
= p
->lowest_level
;
2378 WARN_ON(lowest_level
&& ins_len
> 0);
2379 WARN_ON(p
->nodes
[0] != NULL
);
2384 /* when we are removing items, we might have to go up to level
2385 * two as we update tree pointers Make sure we keep write
2386 * for those levels as well
2388 write_lock_level
= 2;
2389 } else if (ins_len
> 0) {
2391 * for inserting items, make sure we have a write lock on
2392 * level 1 so we can update keys
2394 write_lock_level
= 1;
2398 write_lock_level
= -1;
2400 if (cow
&& (p
->keep_locks
|| p
->lowest_level
))
2401 write_lock_level
= BTRFS_MAX_LEVEL
;
2403 min_write_lock_level
= write_lock_level
;
2407 * we try very hard to do read locks on the root
2409 root_lock
= BTRFS_READ_LOCK
;
2411 if (p
->search_commit_root
) {
2413 * the commit roots are read only
2414 * so we always do read locks
2416 b
= root
->commit_root
;
2417 extent_buffer_get(b
);
2418 level
= btrfs_header_level(b
);
2419 if (!p
->skip_locking
)
2420 btrfs_tree_read_lock(b
);
2422 if (p
->skip_locking
) {
2423 b
= btrfs_root_node(root
);
2424 level
= btrfs_header_level(b
);
2426 /* we don't know the level of the root node
2427 * until we actually have it read locked
2429 b
= btrfs_read_lock_root_node(root
);
2430 level
= btrfs_header_level(b
);
2431 if (level
<= write_lock_level
) {
2432 /* whoops, must trade for write lock */
2433 btrfs_tree_read_unlock(b
);
2434 free_extent_buffer(b
);
2435 b
= btrfs_lock_root_node(root
);
2436 root_lock
= BTRFS_WRITE_LOCK
;
2438 /* the level might have changed, check again */
2439 level
= btrfs_header_level(b
);
2443 p
->nodes
[level
] = b
;
2444 if (!p
->skip_locking
)
2445 p
->locks
[level
] = root_lock
;
2448 level
= btrfs_header_level(b
);
2451 * setup the path here so we can release it under lock
2452 * contention with the cow code
2456 * if we don't really need to cow this block
2457 * then we don't want to set the path blocking,
2458 * so we test it here
2460 if (!should_cow_block(trans
, root
, b
))
2463 btrfs_set_path_blocking(p
);
2466 * must have write locks on this node and the
2469 if (level
+ 1 > write_lock_level
) {
2470 write_lock_level
= level
+ 1;
2471 btrfs_release_path(p
);
2475 err
= btrfs_cow_block(trans
, root
, b
,
2476 p
->nodes
[level
+ 1],
2477 p
->slots
[level
+ 1], &b
);
2484 BUG_ON(!cow
&& ins_len
);
2486 p
->nodes
[level
] = b
;
2487 btrfs_clear_path_blocking(p
, NULL
, 0);
2490 * we have a lock on b and as long as we aren't changing
2491 * the tree, there is no way to for the items in b to change.
2492 * It is safe to drop the lock on our parent before we
2493 * go through the expensive btree search on b.
2495 * If cow is true, then we might be changing slot zero,
2496 * which may require changing the parent. So, we can't
2497 * drop the lock until after we know which slot we're
2501 btrfs_unlock_up_safe(p
, level
+ 1);
2503 ret
= bin_search(b
, key
, level
, &slot
);
2507 if (ret
&& slot
> 0) {
2511 p
->slots
[level
] = slot
;
2512 err
= setup_nodes_for_search(trans
, root
, p
, b
, level
,
2513 ins_len
, &write_lock_level
);
2520 b
= p
->nodes
[level
];
2521 slot
= p
->slots
[level
];
2524 * slot 0 is special, if we change the key
2525 * we have to update the parent pointer
2526 * which means we must have a write lock
2529 if (slot
== 0 && cow
&&
2530 write_lock_level
< level
+ 1) {
2531 write_lock_level
= level
+ 1;
2532 btrfs_release_path(p
);
2536 unlock_up(p
, level
, lowest_unlock
,
2537 min_write_lock_level
, &write_lock_level
);
2539 if (level
== lowest_level
) {
2545 err
= read_block_for_search(trans
, root
, p
,
2546 &b
, level
, slot
, key
, 0);
2554 if (!p
->skip_locking
) {
2555 level
= btrfs_header_level(b
);
2556 if (level
<= write_lock_level
) {
2557 err
= btrfs_try_tree_write_lock(b
);
2559 btrfs_set_path_blocking(p
);
2561 btrfs_clear_path_blocking(p
, b
,
2564 p
->locks
[level
] = BTRFS_WRITE_LOCK
;
2566 err
= btrfs_try_tree_read_lock(b
);
2568 btrfs_set_path_blocking(p
);
2569 btrfs_tree_read_lock(b
);
2570 btrfs_clear_path_blocking(p
, b
,
2573 p
->locks
[level
] = BTRFS_READ_LOCK
;
2575 p
->nodes
[level
] = b
;
2578 p
->slots
[level
] = slot
;
2580 btrfs_leaf_free_space(root
, b
) < ins_len
) {
2581 if (write_lock_level
< 1) {
2582 write_lock_level
= 1;
2583 btrfs_release_path(p
);
2587 btrfs_set_path_blocking(p
);
2588 err
= split_leaf(trans
, root
, key
,
2589 p
, ins_len
, ret
== 0);
2590 btrfs_clear_path_blocking(p
, NULL
, 0);
2598 if (!p
->search_for_split
)
2599 unlock_up(p
, level
, lowest_unlock
,
2600 min_write_lock_level
, &write_lock_level
);
2607 * we don't really know what they plan on doing with the path
2608 * from here on, so for now just mark it as blocking
2610 if (!p
->leave_spinning
)
2611 btrfs_set_path_blocking(p
);
2613 btrfs_release_path(p
);
2618 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2619 * current state of the tree together with the operations recorded in the tree
2620 * modification log to search for the key in a previous version of this tree, as
2621 * denoted by the time_seq parameter.
2623 * Naturally, there is no support for insert, delete or cow operations.
2625 * The resulting path and return value will be set up as if we called
2626 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2628 int btrfs_search_old_slot(struct btrfs_root
*root
, struct btrfs_key
*key
,
2629 struct btrfs_path
*p
, u64 time_seq
)
2631 struct extent_buffer
*b
;
2636 int lowest_unlock
= 1;
2637 u8 lowest_level
= 0;
2639 lowest_level
= p
->lowest_level
;
2640 WARN_ON(p
->nodes
[0] != NULL
);
2642 if (p
->search_commit_root
) {
2644 return btrfs_search_slot(NULL
, root
, key
, p
, 0, 0);
2648 b
= get_old_root(root
, time_seq
);
2649 level
= btrfs_header_level(b
);
2650 p
->locks
[level
] = BTRFS_READ_LOCK
;
2653 level
= btrfs_header_level(b
);
2654 p
->nodes
[level
] = b
;
2655 btrfs_clear_path_blocking(p
, NULL
, 0);
2658 * we have a lock on b and as long as we aren't changing
2659 * the tree, there is no way to for the items in b to change.
2660 * It is safe to drop the lock on our parent before we
2661 * go through the expensive btree search on b.
2663 btrfs_unlock_up_safe(p
, level
+ 1);
2665 ret
= bin_search(b
, key
, level
, &slot
);
2669 if (ret
&& slot
> 0) {
2673 p
->slots
[level
] = slot
;
2674 unlock_up(p
, level
, lowest_unlock
, 0, NULL
);
2676 if (level
== lowest_level
) {
2682 err
= read_block_for_search(NULL
, root
, p
, &b
, level
,
2683 slot
, key
, time_seq
);
2691 level
= btrfs_header_level(b
);
2692 err
= btrfs_try_tree_read_lock(b
);
2694 btrfs_set_path_blocking(p
);
2695 btrfs_tree_read_lock(b
);
2696 btrfs_clear_path_blocking(p
, b
,
2699 p
->locks
[level
] = BTRFS_READ_LOCK
;
2700 p
->nodes
[level
] = b
;
2701 b
= tree_mod_log_rewind(root
->fs_info
, b
, time_seq
);
2702 if (b
!= p
->nodes
[level
]) {
2703 btrfs_tree_unlock_rw(p
->nodes
[level
],
2705 p
->locks
[level
] = 0;
2706 p
->nodes
[level
] = b
;
2709 p
->slots
[level
] = slot
;
2710 unlock_up(p
, level
, lowest_unlock
, 0, NULL
);
2716 if (!p
->leave_spinning
)
2717 btrfs_set_path_blocking(p
);
2719 btrfs_release_path(p
);
2725 * adjust the pointers going up the tree, starting at level
2726 * making sure the right key of each node is points to 'key'.
2727 * This is used after shifting pointers to the left, so it stops
2728 * fixing up pointers when a given leaf/node is not in slot 0 of the
2732 static void fixup_low_keys(struct btrfs_trans_handle
*trans
,
2733 struct btrfs_root
*root
, struct btrfs_path
*path
,
2734 struct btrfs_disk_key
*key
, int level
)
2737 struct extent_buffer
*t
;
2739 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
2740 int tslot
= path
->slots
[i
];
2741 if (!path
->nodes
[i
])
2744 tree_mod_log_set_node_key(root
->fs_info
, t
, key
, tslot
, 1);
2745 btrfs_set_node_key(t
, key
, tslot
);
2746 btrfs_mark_buffer_dirty(path
->nodes
[i
]);
2755 * This function isn't completely safe. It's the caller's responsibility
2756 * that the new key won't break the order
2758 void btrfs_set_item_key_safe(struct btrfs_trans_handle
*trans
,
2759 struct btrfs_root
*root
, struct btrfs_path
*path
,
2760 struct btrfs_key
*new_key
)
2762 struct btrfs_disk_key disk_key
;
2763 struct extent_buffer
*eb
;
2766 eb
= path
->nodes
[0];
2767 slot
= path
->slots
[0];
2769 btrfs_item_key(eb
, &disk_key
, slot
- 1);
2770 BUG_ON(comp_keys(&disk_key
, new_key
) >= 0);
2772 if (slot
< btrfs_header_nritems(eb
) - 1) {
2773 btrfs_item_key(eb
, &disk_key
, slot
+ 1);
2774 BUG_ON(comp_keys(&disk_key
, new_key
) <= 0);
2777 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
2778 btrfs_set_item_key(eb
, &disk_key
, slot
);
2779 btrfs_mark_buffer_dirty(eb
);
2781 fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
2785 * try to push data from one node into the next node left in the
2788 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2789 * error, and > 0 if there was no room in the left hand block.
2791 static int push_node_left(struct btrfs_trans_handle
*trans
,
2792 struct btrfs_root
*root
, struct extent_buffer
*dst
,
2793 struct extent_buffer
*src
, int empty
)
2800 src_nritems
= btrfs_header_nritems(src
);
2801 dst_nritems
= btrfs_header_nritems(dst
);
2802 push_items
= BTRFS_NODEPTRS_PER_BLOCK(root
) - dst_nritems
;
2803 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
2804 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
2806 if (!empty
&& src_nritems
<= 8)
2809 if (push_items
<= 0)
2813 push_items
= min(src_nritems
, push_items
);
2814 if (push_items
< src_nritems
) {
2815 /* leave at least 8 pointers in the node if
2816 * we aren't going to empty it
2818 if (src_nritems
- push_items
< 8) {
2819 if (push_items
<= 8)
2825 push_items
= min(src_nritems
- 8, push_items
);
2827 tree_mod_log_eb_copy(root
->fs_info
, dst
, src
, dst_nritems
, 0,
2829 copy_extent_buffer(dst
, src
,
2830 btrfs_node_key_ptr_offset(dst_nritems
),
2831 btrfs_node_key_ptr_offset(0),
2832 push_items
* sizeof(struct btrfs_key_ptr
));
2834 if (push_items
< src_nritems
) {
2835 tree_mod_log_eb_move(root
->fs_info
, src
, 0, push_items
,
2836 src_nritems
- push_items
);
2837 memmove_extent_buffer(src
, btrfs_node_key_ptr_offset(0),
2838 btrfs_node_key_ptr_offset(push_items
),
2839 (src_nritems
- push_items
) *
2840 sizeof(struct btrfs_key_ptr
));
2842 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
2843 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
2844 btrfs_mark_buffer_dirty(src
);
2845 btrfs_mark_buffer_dirty(dst
);
2851 * try to push data from one node into the next node right in the
2854 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2855 * error, and > 0 if there was no room in the right hand block.
2857 * this will only push up to 1/2 the contents of the left node over
2859 static int balance_node_right(struct btrfs_trans_handle
*trans
,
2860 struct btrfs_root
*root
,
2861 struct extent_buffer
*dst
,
2862 struct extent_buffer
*src
)
2870 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
2871 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
2873 src_nritems
= btrfs_header_nritems(src
);
2874 dst_nritems
= btrfs_header_nritems(dst
);
2875 push_items
= BTRFS_NODEPTRS_PER_BLOCK(root
) - dst_nritems
;
2876 if (push_items
<= 0)
2879 if (src_nritems
< 4)
2882 max_push
= src_nritems
/ 2 + 1;
2883 /* don't try to empty the node */
2884 if (max_push
>= src_nritems
)
2887 if (max_push
< push_items
)
2888 push_items
= max_push
;
2890 tree_mod_log_eb_move(root
->fs_info
, dst
, push_items
, 0, dst_nritems
);
2891 memmove_extent_buffer(dst
, btrfs_node_key_ptr_offset(push_items
),
2892 btrfs_node_key_ptr_offset(0),
2894 sizeof(struct btrfs_key_ptr
));
2896 tree_mod_log_eb_copy(root
->fs_info
, dst
, src
, 0,
2897 src_nritems
- push_items
, push_items
);
2898 copy_extent_buffer(dst
, src
,
2899 btrfs_node_key_ptr_offset(0),
2900 btrfs_node_key_ptr_offset(src_nritems
- push_items
),
2901 push_items
* sizeof(struct btrfs_key_ptr
));
2903 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
2904 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
2906 btrfs_mark_buffer_dirty(src
);
2907 btrfs_mark_buffer_dirty(dst
);
2913 * helper function to insert a new root level in the tree.
2914 * A new node is allocated, and a single item is inserted to
2915 * point to the existing root
2917 * returns zero on success or < 0 on failure.
2919 static noinline
int insert_new_root(struct btrfs_trans_handle
*trans
,
2920 struct btrfs_root
*root
,
2921 struct btrfs_path
*path
, int level
)
2924 struct extent_buffer
*lower
;
2925 struct extent_buffer
*c
;
2926 struct extent_buffer
*old
;
2927 struct btrfs_disk_key lower_key
;
2929 BUG_ON(path
->nodes
[level
]);
2930 BUG_ON(path
->nodes
[level
-1] != root
->node
);
2932 lower
= path
->nodes
[level
-1];
2934 btrfs_item_key(lower
, &lower_key
, 0);
2936 btrfs_node_key(lower
, &lower_key
, 0);
2938 c
= btrfs_alloc_free_block(trans
, root
, root
->nodesize
, 0,
2939 root
->root_key
.objectid
, &lower_key
,
2940 level
, root
->node
->start
, 0);
2944 root_add_used(root
, root
->nodesize
);
2946 memset_extent_buffer(c
, 0, 0, sizeof(struct btrfs_header
));
2947 btrfs_set_header_nritems(c
, 1);
2948 btrfs_set_header_level(c
, level
);
2949 btrfs_set_header_bytenr(c
, c
->start
);
2950 btrfs_set_header_generation(c
, trans
->transid
);
2951 btrfs_set_header_backref_rev(c
, BTRFS_MIXED_BACKREF_REV
);
2952 btrfs_set_header_owner(c
, root
->root_key
.objectid
);
2954 write_extent_buffer(c
, root
->fs_info
->fsid
,
2955 (unsigned long)btrfs_header_fsid(c
),
2958 write_extent_buffer(c
, root
->fs_info
->chunk_tree_uuid
,
2959 (unsigned long)btrfs_header_chunk_tree_uuid(c
),
2962 btrfs_set_node_key(c
, &lower_key
, 0);
2963 btrfs_set_node_blockptr(c
, 0, lower
->start
);
2964 lower_gen
= btrfs_header_generation(lower
);
2965 WARN_ON(lower_gen
!= trans
->transid
);
2967 btrfs_set_node_ptr_generation(c
, 0, lower_gen
);
2969 btrfs_mark_buffer_dirty(c
);
2972 tree_mod_log_set_root_pointer(root
, c
);
2973 rcu_assign_pointer(root
->node
, c
);
2975 /* the super has an extra ref to root->node */
2976 free_extent_buffer(old
);
2978 add_root_to_dirty_list(root
);
2979 extent_buffer_get(c
);
2980 path
->nodes
[level
] = c
;
2981 path
->locks
[level
] = BTRFS_WRITE_LOCK
;
2982 path
->slots
[level
] = 0;
2987 * worker function to insert a single pointer in a node.
2988 * the node should have enough room for the pointer already
2990 * slot and level indicate where you want the key to go, and
2991 * blocknr is the block the key points to.
2993 static void insert_ptr(struct btrfs_trans_handle
*trans
,
2994 struct btrfs_root
*root
, struct btrfs_path
*path
,
2995 struct btrfs_disk_key
*key
, u64 bytenr
,
2996 int slot
, int level
)
2998 struct extent_buffer
*lower
;
3002 BUG_ON(!path
->nodes
[level
]);
3003 btrfs_assert_tree_locked(path
->nodes
[level
]);
3004 lower
= path
->nodes
[level
];
3005 nritems
= btrfs_header_nritems(lower
);
3006 BUG_ON(slot
> nritems
);
3007 BUG_ON(nritems
== BTRFS_NODEPTRS_PER_BLOCK(root
));
3008 if (slot
!= nritems
) {
3010 tree_mod_log_eb_move(root
->fs_info
, lower
, slot
+ 1,
3011 slot
, nritems
- slot
);
3012 memmove_extent_buffer(lower
,
3013 btrfs_node_key_ptr_offset(slot
+ 1),
3014 btrfs_node_key_ptr_offset(slot
),
3015 (nritems
- slot
) * sizeof(struct btrfs_key_ptr
));
3018 ret
= tree_mod_log_insert_key(root
->fs_info
, lower
, slot
,
3022 btrfs_set_node_key(lower
, key
, slot
);
3023 btrfs_set_node_blockptr(lower
, slot
, bytenr
);
3024 WARN_ON(trans
->transid
== 0);
3025 btrfs_set_node_ptr_generation(lower
, slot
, trans
->transid
);
3026 btrfs_set_header_nritems(lower
, nritems
+ 1);
3027 btrfs_mark_buffer_dirty(lower
);
3031 * split the node at the specified level in path in two.
3032 * The path is corrected to point to the appropriate node after the split
3034 * Before splitting this tries to make some room in the node by pushing
3035 * left and right, if either one works, it returns right away.
3037 * returns 0 on success and < 0 on failure
3039 static noinline
int split_node(struct btrfs_trans_handle
*trans
,
3040 struct btrfs_root
*root
,
3041 struct btrfs_path
*path
, int level
)
3043 struct extent_buffer
*c
;
3044 struct extent_buffer
*split
;
3045 struct btrfs_disk_key disk_key
;
3050 c
= path
->nodes
[level
];
3051 WARN_ON(btrfs_header_generation(c
) != trans
->transid
);
3052 if (c
== root
->node
) {
3053 /* trying to split the root, lets make a new one */
3054 ret
= insert_new_root(trans
, root
, path
, level
+ 1);
3058 ret
= push_nodes_for_insert(trans
, root
, path
, level
);
3059 c
= path
->nodes
[level
];
3060 if (!ret
&& btrfs_header_nritems(c
) <
3061 BTRFS_NODEPTRS_PER_BLOCK(root
) - 3)
3067 c_nritems
= btrfs_header_nritems(c
);
3068 mid
= (c_nritems
+ 1) / 2;
3069 btrfs_node_key(c
, &disk_key
, mid
);
3071 split
= btrfs_alloc_free_block(trans
, root
, root
->nodesize
, 0,
3072 root
->root_key
.objectid
,
3073 &disk_key
, level
, c
->start
, 0);
3075 return PTR_ERR(split
);
3077 root_add_used(root
, root
->nodesize
);
3079 memset_extent_buffer(split
, 0, 0, sizeof(struct btrfs_header
));
3080 btrfs_set_header_level(split
, btrfs_header_level(c
));
3081 btrfs_set_header_bytenr(split
, split
->start
);
3082 btrfs_set_header_generation(split
, trans
->transid
);
3083 btrfs_set_header_backref_rev(split
, BTRFS_MIXED_BACKREF_REV
);
3084 btrfs_set_header_owner(split
, root
->root_key
.objectid
);
3085 write_extent_buffer(split
, root
->fs_info
->fsid
,
3086 (unsigned long)btrfs_header_fsid(split
),
3088 write_extent_buffer(split
, root
->fs_info
->chunk_tree_uuid
,
3089 (unsigned long)btrfs_header_chunk_tree_uuid(split
),
3092 tree_mod_log_eb_copy(root
->fs_info
, split
, c
, 0, mid
, c_nritems
- mid
);
3093 copy_extent_buffer(split
, c
,
3094 btrfs_node_key_ptr_offset(0),
3095 btrfs_node_key_ptr_offset(mid
),
3096 (c_nritems
- mid
) * sizeof(struct btrfs_key_ptr
));
3097 btrfs_set_header_nritems(split
, c_nritems
- mid
);
3098 btrfs_set_header_nritems(c
, mid
);
3101 btrfs_mark_buffer_dirty(c
);
3102 btrfs_mark_buffer_dirty(split
);
3104 insert_ptr(trans
, root
, path
, &disk_key
, split
->start
,
3105 path
->slots
[level
+ 1] + 1, level
+ 1);
3107 if (path
->slots
[level
] >= mid
) {
3108 path
->slots
[level
] -= mid
;
3109 btrfs_tree_unlock(c
);
3110 free_extent_buffer(c
);
3111 path
->nodes
[level
] = split
;
3112 path
->slots
[level
+ 1] += 1;
3114 btrfs_tree_unlock(split
);
3115 free_extent_buffer(split
);
3121 * how many bytes are required to store the items in a leaf. start
3122 * and nr indicate which items in the leaf to check. This totals up the
3123 * space used both by the item structs and the item data
3125 static int leaf_space_used(struct extent_buffer
*l
, int start
, int nr
)
3128 int nritems
= btrfs_header_nritems(l
);
3129 int end
= min(nritems
, start
+ nr
) - 1;
3133 data_len
= btrfs_item_end_nr(l
, start
);
3134 data_len
= data_len
- btrfs_item_offset_nr(l
, end
);
3135 data_len
+= sizeof(struct btrfs_item
) * nr
;
3136 WARN_ON(data_len
< 0);
3141 * The space between the end of the leaf items and
3142 * the start of the leaf data. IOW, how much room
3143 * the leaf has left for both items and data
3145 noinline
int btrfs_leaf_free_space(struct btrfs_root
*root
,
3146 struct extent_buffer
*leaf
)
3148 int nritems
= btrfs_header_nritems(leaf
);
3150 ret
= BTRFS_LEAF_DATA_SIZE(root
) - leaf_space_used(leaf
, 0, nritems
);
3152 printk(KERN_CRIT
"leaf free space ret %d, leaf data size %lu, "
3153 "used %d nritems %d\n",
3154 ret
, (unsigned long) BTRFS_LEAF_DATA_SIZE(root
),
3155 leaf_space_used(leaf
, 0, nritems
), nritems
);
3161 * min slot controls the lowest index we're willing to push to the
3162 * right. We'll push up to and including min_slot, but no lower
3164 static noinline
int __push_leaf_right(struct btrfs_trans_handle
*trans
,
3165 struct btrfs_root
*root
,
3166 struct btrfs_path
*path
,
3167 int data_size
, int empty
,
3168 struct extent_buffer
*right
,
3169 int free_space
, u32 left_nritems
,
3172 struct extent_buffer
*left
= path
->nodes
[0];
3173 struct extent_buffer
*upper
= path
->nodes
[1];
3174 struct btrfs_map_token token
;
3175 struct btrfs_disk_key disk_key
;
3180 struct btrfs_item
*item
;
3186 btrfs_init_map_token(&token
);
3191 nr
= max_t(u32
, 1, min_slot
);
3193 if (path
->slots
[0] >= left_nritems
)
3194 push_space
+= data_size
;
3196 slot
= path
->slots
[1];
3197 i
= left_nritems
- 1;
3199 item
= btrfs_item_nr(left
, i
);
3201 if (!empty
&& push_items
> 0) {
3202 if (path
->slots
[0] > i
)
3204 if (path
->slots
[0] == i
) {
3205 int space
= btrfs_leaf_free_space(root
, left
);
3206 if (space
+ push_space
* 2 > free_space
)
3211 if (path
->slots
[0] == i
)
3212 push_space
+= data_size
;
3214 this_item_size
= btrfs_item_size(left
, item
);
3215 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
3219 push_space
+= this_item_size
+ sizeof(*item
);
3225 if (push_items
== 0)
3228 if (!empty
&& push_items
== left_nritems
)
3231 /* push left to right */
3232 right_nritems
= btrfs_header_nritems(right
);
3234 push_space
= btrfs_item_end_nr(left
, left_nritems
- push_items
);
3235 push_space
-= leaf_data_end(root
, left
);
3237 /* make room in the right data area */
3238 data_end
= leaf_data_end(root
, right
);
3239 memmove_extent_buffer(right
,
3240 btrfs_leaf_data(right
) + data_end
- push_space
,
3241 btrfs_leaf_data(right
) + data_end
,
3242 BTRFS_LEAF_DATA_SIZE(root
) - data_end
);
3244 /* copy from the left data area */
3245 copy_extent_buffer(right
, left
, btrfs_leaf_data(right
) +
3246 BTRFS_LEAF_DATA_SIZE(root
) - push_space
,
3247 btrfs_leaf_data(left
) + leaf_data_end(root
, left
),
3250 memmove_extent_buffer(right
, btrfs_item_nr_offset(push_items
),
3251 btrfs_item_nr_offset(0),
3252 right_nritems
* sizeof(struct btrfs_item
));
3254 /* copy the items from left to right */
3255 copy_extent_buffer(right
, left
, btrfs_item_nr_offset(0),
3256 btrfs_item_nr_offset(left_nritems
- push_items
),
3257 push_items
* sizeof(struct btrfs_item
));
3259 /* update the item pointers */
3260 right_nritems
+= push_items
;
3261 btrfs_set_header_nritems(right
, right_nritems
);
3262 push_space
= BTRFS_LEAF_DATA_SIZE(root
);
3263 for (i
= 0; i
< right_nritems
; i
++) {
3264 item
= btrfs_item_nr(right
, i
);
3265 push_space
-= btrfs_token_item_size(right
, item
, &token
);
3266 btrfs_set_token_item_offset(right
, item
, push_space
, &token
);
3269 left_nritems
-= push_items
;
3270 btrfs_set_header_nritems(left
, left_nritems
);
3273 btrfs_mark_buffer_dirty(left
);
3275 clean_tree_block(trans
, root
, left
);
3277 btrfs_mark_buffer_dirty(right
);
3279 btrfs_item_key(right
, &disk_key
, 0);
3280 btrfs_set_node_key(upper
, &disk_key
, slot
+ 1);
3281 btrfs_mark_buffer_dirty(upper
);
3283 /* then fixup the leaf pointer in the path */
3284 if (path
->slots
[0] >= left_nritems
) {
3285 path
->slots
[0] -= left_nritems
;
3286 if (btrfs_header_nritems(path
->nodes
[0]) == 0)
3287 clean_tree_block(trans
, root
, path
->nodes
[0]);
3288 btrfs_tree_unlock(path
->nodes
[0]);
3289 free_extent_buffer(path
->nodes
[0]);
3290 path
->nodes
[0] = right
;
3291 path
->slots
[1] += 1;
3293 btrfs_tree_unlock(right
);
3294 free_extent_buffer(right
);
3299 btrfs_tree_unlock(right
);
3300 free_extent_buffer(right
);
3305 * push some data in the path leaf to the right, trying to free up at
3306 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3308 * returns 1 if the push failed because the other node didn't have enough
3309 * room, 0 if everything worked out and < 0 if there were major errors.
3311 * this will push starting from min_slot to the end of the leaf. It won't
3312 * push any slot lower than min_slot
3314 static int push_leaf_right(struct btrfs_trans_handle
*trans
, struct btrfs_root
3315 *root
, struct btrfs_path
*path
,
3316 int min_data_size
, int data_size
,
3317 int empty
, u32 min_slot
)
3319 struct extent_buffer
*left
= path
->nodes
[0];
3320 struct extent_buffer
*right
;
3321 struct extent_buffer
*upper
;
3327 if (!path
->nodes
[1])
3330 slot
= path
->slots
[1];
3331 upper
= path
->nodes
[1];
3332 if (slot
>= btrfs_header_nritems(upper
) - 1)
3335 btrfs_assert_tree_locked(path
->nodes
[1]);
3337 right
= read_node_slot(root
, upper
, slot
+ 1);
3341 btrfs_tree_lock(right
);
3342 btrfs_set_lock_blocking(right
);
3344 free_space
= btrfs_leaf_free_space(root
, right
);
3345 if (free_space
< data_size
)
3348 /* cow and double check */
3349 ret
= btrfs_cow_block(trans
, root
, right
, upper
,
3354 free_space
= btrfs_leaf_free_space(root
, right
);
3355 if (free_space
< data_size
)
3358 left_nritems
= btrfs_header_nritems(left
);
3359 if (left_nritems
== 0)
3362 return __push_leaf_right(trans
, root
, path
, min_data_size
, empty
,
3363 right
, free_space
, left_nritems
, min_slot
);
3365 btrfs_tree_unlock(right
);
3366 free_extent_buffer(right
);
3371 * push some data in the path leaf to the left, trying to free up at
3372 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3374 * max_slot can put a limit on how far into the leaf we'll push items. The
3375 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3378 static noinline
int __push_leaf_left(struct btrfs_trans_handle
*trans
,
3379 struct btrfs_root
*root
,
3380 struct btrfs_path
*path
, int data_size
,
3381 int empty
, struct extent_buffer
*left
,
3382 int free_space
, u32 right_nritems
,
3385 struct btrfs_disk_key disk_key
;
3386 struct extent_buffer
*right
= path
->nodes
[0];
3390 struct btrfs_item
*item
;
3391 u32 old_left_nritems
;
3395 u32 old_left_item_size
;
3396 struct btrfs_map_token token
;
3398 btrfs_init_map_token(&token
);
3401 nr
= min(right_nritems
, max_slot
);
3403 nr
= min(right_nritems
- 1, max_slot
);
3405 for (i
= 0; i
< nr
; i
++) {
3406 item
= btrfs_item_nr(right
, i
);
3408 if (!empty
&& push_items
> 0) {
3409 if (path
->slots
[0] < i
)
3411 if (path
->slots
[0] == i
) {
3412 int space
= btrfs_leaf_free_space(root
, right
);
3413 if (space
+ push_space
* 2 > free_space
)
3418 if (path
->slots
[0] == i
)
3419 push_space
+= data_size
;
3421 this_item_size
= btrfs_item_size(right
, item
);
3422 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
3426 push_space
+= this_item_size
+ sizeof(*item
);
3429 if (push_items
== 0) {
3433 if (!empty
&& push_items
== btrfs_header_nritems(right
))
3436 /* push data from right to left */
3437 copy_extent_buffer(left
, right
,
3438 btrfs_item_nr_offset(btrfs_header_nritems(left
)),
3439 btrfs_item_nr_offset(0),
3440 push_items
* sizeof(struct btrfs_item
));
3442 push_space
= BTRFS_LEAF_DATA_SIZE(root
) -
3443 btrfs_item_offset_nr(right
, push_items
- 1);
3445 copy_extent_buffer(left
, right
, btrfs_leaf_data(left
) +
3446 leaf_data_end(root
, left
) - push_space
,
3447 btrfs_leaf_data(right
) +
3448 btrfs_item_offset_nr(right
, push_items
- 1),
3450 old_left_nritems
= btrfs_header_nritems(left
);
3451 BUG_ON(old_left_nritems
<= 0);
3453 old_left_item_size
= btrfs_item_offset_nr(left
, old_left_nritems
- 1);
3454 for (i
= old_left_nritems
; i
< old_left_nritems
+ push_items
; i
++) {
3457 item
= btrfs_item_nr(left
, i
);
3459 ioff
= btrfs_token_item_offset(left
, item
, &token
);
3460 btrfs_set_token_item_offset(left
, item
,
3461 ioff
- (BTRFS_LEAF_DATA_SIZE(root
) - old_left_item_size
),
3464 btrfs_set_header_nritems(left
, old_left_nritems
+ push_items
);
3466 /* fixup right node */
3467 if (push_items
> right_nritems
) {
3468 printk(KERN_CRIT
"push items %d nr %u\n", push_items
,
3473 if (push_items
< right_nritems
) {
3474 push_space
= btrfs_item_offset_nr(right
, push_items
- 1) -
3475 leaf_data_end(root
, right
);
3476 memmove_extent_buffer(right
, btrfs_leaf_data(right
) +
3477 BTRFS_LEAF_DATA_SIZE(root
) - push_space
,
3478 btrfs_leaf_data(right
) +
3479 leaf_data_end(root
, right
), push_space
);
3481 memmove_extent_buffer(right
, btrfs_item_nr_offset(0),
3482 btrfs_item_nr_offset(push_items
),
3483 (btrfs_header_nritems(right
) - push_items
) *
3484 sizeof(struct btrfs_item
));
3486 right_nritems
-= push_items
;
3487 btrfs_set_header_nritems(right
, right_nritems
);
3488 push_space
= BTRFS_LEAF_DATA_SIZE(root
);
3489 for (i
= 0; i
< right_nritems
; i
++) {
3490 item
= btrfs_item_nr(right
, i
);
3492 push_space
= push_space
- btrfs_token_item_size(right
,
3494 btrfs_set_token_item_offset(right
, item
, push_space
, &token
);
3497 btrfs_mark_buffer_dirty(left
);
3499 btrfs_mark_buffer_dirty(right
);
3501 clean_tree_block(trans
, root
, right
);
3503 btrfs_item_key(right
, &disk_key
, 0);
3504 fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
3506 /* then fixup the leaf pointer in the path */
3507 if (path
->slots
[0] < push_items
) {
3508 path
->slots
[0] += old_left_nritems
;
3509 btrfs_tree_unlock(path
->nodes
[0]);
3510 free_extent_buffer(path
->nodes
[0]);
3511 path
->nodes
[0] = left
;
3512 path
->slots
[1] -= 1;
3514 btrfs_tree_unlock(left
);
3515 free_extent_buffer(left
);
3516 path
->slots
[0] -= push_items
;
3518 BUG_ON(path
->slots
[0] < 0);
3521 btrfs_tree_unlock(left
);
3522 free_extent_buffer(left
);
3527 * push some data in the path leaf to the left, trying to free up at
3528 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3530 * max_slot can put a limit on how far into the leaf we'll push items. The
3531 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3534 static int push_leaf_left(struct btrfs_trans_handle
*trans
, struct btrfs_root
3535 *root
, struct btrfs_path
*path
, int min_data_size
,
3536 int data_size
, int empty
, u32 max_slot
)
3538 struct extent_buffer
*right
= path
->nodes
[0];
3539 struct extent_buffer
*left
;
3545 slot
= path
->slots
[1];
3548 if (!path
->nodes
[1])
3551 right_nritems
= btrfs_header_nritems(right
);
3552 if (right_nritems
== 0)
3555 btrfs_assert_tree_locked(path
->nodes
[1]);
3557 left
= read_node_slot(root
, path
->nodes
[1], slot
- 1);
3561 btrfs_tree_lock(left
);
3562 btrfs_set_lock_blocking(left
);
3564 free_space
= btrfs_leaf_free_space(root
, left
);
3565 if (free_space
< data_size
) {
3570 /* cow and double check */
3571 ret
= btrfs_cow_block(trans
, root
, left
,
3572 path
->nodes
[1], slot
- 1, &left
);
3574 /* we hit -ENOSPC, but it isn't fatal here */
3580 free_space
= btrfs_leaf_free_space(root
, left
);
3581 if (free_space
< data_size
) {
3586 return __push_leaf_left(trans
, root
, path
, min_data_size
,
3587 empty
, left
, free_space
, right_nritems
,
3590 btrfs_tree_unlock(left
);
3591 free_extent_buffer(left
);
3596 * split the path's leaf in two, making sure there is at least data_size
3597 * available for the resulting leaf level of the path.
3599 static noinline
void copy_for_split(struct btrfs_trans_handle
*trans
,
3600 struct btrfs_root
*root
,
3601 struct btrfs_path
*path
,
3602 struct extent_buffer
*l
,
3603 struct extent_buffer
*right
,
3604 int slot
, int mid
, int nritems
)
3609 struct btrfs_disk_key disk_key
;
3610 struct btrfs_map_token token
;
3612 btrfs_init_map_token(&token
);
3614 nritems
= nritems
- mid
;
3615 btrfs_set_header_nritems(right
, nritems
);
3616 data_copy_size
= btrfs_item_end_nr(l
, mid
) - leaf_data_end(root
, l
);
3618 copy_extent_buffer(right
, l
, btrfs_item_nr_offset(0),
3619 btrfs_item_nr_offset(mid
),
3620 nritems
* sizeof(struct btrfs_item
));
3622 copy_extent_buffer(right
, l
,
3623 btrfs_leaf_data(right
) + BTRFS_LEAF_DATA_SIZE(root
) -
3624 data_copy_size
, btrfs_leaf_data(l
) +
3625 leaf_data_end(root
, l
), data_copy_size
);
3627 rt_data_off
= BTRFS_LEAF_DATA_SIZE(root
) -
3628 btrfs_item_end_nr(l
, mid
);
3630 for (i
= 0; i
< nritems
; i
++) {
3631 struct btrfs_item
*item
= btrfs_item_nr(right
, i
);
3634 ioff
= btrfs_token_item_offset(right
, item
, &token
);
3635 btrfs_set_token_item_offset(right
, item
,
3636 ioff
+ rt_data_off
, &token
);
3639 btrfs_set_header_nritems(l
, mid
);
3640 btrfs_item_key(right
, &disk_key
, 0);
3641 insert_ptr(trans
, root
, path
, &disk_key
, right
->start
,
3642 path
->slots
[1] + 1, 1);
3644 btrfs_mark_buffer_dirty(right
);
3645 btrfs_mark_buffer_dirty(l
);
3646 BUG_ON(path
->slots
[0] != slot
);
3649 btrfs_tree_unlock(path
->nodes
[0]);
3650 free_extent_buffer(path
->nodes
[0]);
3651 path
->nodes
[0] = right
;
3652 path
->slots
[0] -= mid
;
3653 path
->slots
[1] += 1;
3655 btrfs_tree_unlock(right
);
3656 free_extent_buffer(right
);
3659 BUG_ON(path
->slots
[0] < 0);
3663 * double splits happen when we need to insert a big item in the middle
3664 * of a leaf. A double split can leave us with 3 mostly empty leaves:
3665 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3668 * We avoid this by trying to push the items on either side of our target
3669 * into the adjacent leaves. If all goes well we can avoid the double split
3672 static noinline
int push_for_double_split(struct btrfs_trans_handle
*trans
,
3673 struct btrfs_root
*root
,
3674 struct btrfs_path
*path
,
3682 slot
= path
->slots
[0];
3685 * try to push all the items after our slot into the
3688 ret
= push_leaf_right(trans
, root
, path
, 1, data_size
, 0, slot
);
3695 nritems
= btrfs_header_nritems(path
->nodes
[0]);
3697 * our goal is to get our slot at the start or end of a leaf. If
3698 * we've done so we're done
3700 if (path
->slots
[0] == 0 || path
->slots
[0] == nritems
)
3703 if (btrfs_leaf_free_space(root
, path
->nodes
[0]) >= data_size
)
3706 /* try to push all the items before our slot into the next leaf */
3707 slot
= path
->slots
[0];
3708 ret
= push_leaf_left(trans
, root
, path
, 1, data_size
, 0, slot
);
3721 * split the path's leaf in two, making sure there is at least data_size
3722 * available for the resulting leaf level of the path.
3724 * returns 0 if all went well and < 0 on failure.
3726 static noinline
int split_leaf(struct btrfs_trans_handle
*trans
,
3727 struct btrfs_root
*root
,
3728 struct btrfs_key
*ins_key
,
3729 struct btrfs_path
*path
, int data_size
,
3732 struct btrfs_disk_key disk_key
;
3733 struct extent_buffer
*l
;
3737 struct extent_buffer
*right
;
3741 int num_doubles
= 0;
3742 int tried_avoid_double
= 0;
3745 slot
= path
->slots
[0];
3746 if (extend
&& data_size
+ btrfs_item_size_nr(l
, slot
) +
3747 sizeof(struct btrfs_item
) > BTRFS_LEAF_DATA_SIZE(root
))
3750 /* first try to make some room by pushing left and right */
3752 wret
= push_leaf_right(trans
, root
, path
, data_size
,
3757 wret
= push_leaf_left(trans
, root
, path
, data_size
,
3758 data_size
, 0, (u32
)-1);
3764 /* did the pushes work? */
3765 if (btrfs_leaf_free_space(root
, l
) >= data_size
)
3769 if (!path
->nodes
[1]) {
3770 ret
= insert_new_root(trans
, root
, path
, 1);
3777 slot
= path
->slots
[0];
3778 nritems
= btrfs_header_nritems(l
);
3779 mid
= (nritems
+ 1) / 2;
3783 leaf_space_used(l
, mid
, nritems
- mid
) + data_size
>
3784 BTRFS_LEAF_DATA_SIZE(root
)) {
3785 if (slot
>= nritems
) {
3789 if (mid
!= nritems
&&
3790 leaf_space_used(l
, mid
, nritems
- mid
) +
3791 data_size
> BTRFS_LEAF_DATA_SIZE(root
)) {
3792 if (data_size
&& !tried_avoid_double
)
3793 goto push_for_double
;
3799 if (leaf_space_used(l
, 0, mid
) + data_size
>
3800 BTRFS_LEAF_DATA_SIZE(root
)) {
3801 if (!extend
&& data_size
&& slot
== 0) {
3803 } else if ((extend
|| !data_size
) && slot
== 0) {
3807 if (mid
!= nritems
&&
3808 leaf_space_used(l
, mid
, nritems
- mid
) +
3809 data_size
> BTRFS_LEAF_DATA_SIZE(root
)) {
3810 if (data_size
&& !tried_avoid_double
)
3811 goto push_for_double
;
3819 btrfs_cpu_key_to_disk(&disk_key
, ins_key
);
3821 btrfs_item_key(l
, &disk_key
, mid
);
3823 right
= btrfs_alloc_free_block(trans
, root
, root
->leafsize
, 0,
3824 root
->root_key
.objectid
,
3825 &disk_key
, 0, l
->start
, 0);
3827 return PTR_ERR(right
);
3829 root_add_used(root
, root
->leafsize
);
3831 memset_extent_buffer(right
, 0, 0, sizeof(struct btrfs_header
));
3832 btrfs_set_header_bytenr(right
, right
->start
);
3833 btrfs_set_header_generation(right
, trans
->transid
);
3834 btrfs_set_header_backref_rev(right
, BTRFS_MIXED_BACKREF_REV
);
3835 btrfs_set_header_owner(right
, root
->root_key
.objectid
);
3836 btrfs_set_header_level(right
, 0);
3837 write_extent_buffer(right
, root
->fs_info
->fsid
,
3838 (unsigned long)btrfs_header_fsid(right
),
3841 write_extent_buffer(right
, root
->fs_info
->chunk_tree_uuid
,
3842 (unsigned long)btrfs_header_chunk_tree_uuid(right
),
3847 btrfs_set_header_nritems(right
, 0);
3848 insert_ptr(trans
, root
, path
, &disk_key
, right
->start
,
3849 path
->slots
[1] + 1, 1);
3850 btrfs_tree_unlock(path
->nodes
[0]);
3851 free_extent_buffer(path
->nodes
[0]);
3852 path
->nodes
[0] = right
;
3854 path
->slots
[1] += 1;
3856 btrfs_set_header_nritems(right
, 0);
3857 insert_ptr(trans
, root
, path
, &disk_key
, right
->start
,
3859 btrfs_tree_unlock(path
->nodes
[0]);
3860 free_extent_buffer(path
->nodes
[0]);
3861 path
->nodes
[0] = right
;
3863 if (path
->slots
[1] == 0)
3864 fixup_low_keys(trans
, root
, path
,
3867 btrfs_mark_buffer_dirty(right
);
3871 copy_for_split(trans
, root
, path
, l
, right
, slot
, mid
, nritems
);
3874 BUG_ON(num_doubles
!= 0);
3882 push_for_double_split(trans
, root
, path
, data_size
);
3883 tried_avoid_double
= 1;
3884 if (btrfs_leaf_free_space(root
, path
->nodes
[0]) >= data_size
)
3889 static noinline
int setup_leaf_for_split(struct btrfs_trans_handle
*trans
,
3890 struct btrfs_root
*root
,
3891 struct btrfs_path
*path
, int ins_len
)
3893 struct btrfs_key key
;
3894 struct extent_buffer
*leaf
;
3895 struct btrfs_file_extent_item
*fi
;
3900 leaf
= path
->nodes
[0];
3901 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
3903 BUG_ON(key
.type
!= BTRFS_EXTENT_DATA_KEY
&&
3904 key
.type
!= BTRFS_EXTENT_CSUM_KEY
);
3906 if (btrfs_leaf_free_space(root
, leaf
) >= ins_len
)
3909 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
3910 if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
3911 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
3912 struct btrfs_file_extent_item
);
3913 extent_len
= btrfs_file_extent_num_bytes(leaf
, fi
);
3915 btrfs_release_path(path
);
3917 path
->keep_locks
= 1;
3918 path
->search_for_split
= 1;
3919 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
3920 path
->search_for_split
= 0;
3925 leaf
= path
->nodes
[0];
3926 /* if our item isn't there or got smaller, return now */
3927 if (ret
> 0 || item_size
!= btrfs_item_size_nr(leaf
, path
->slots
[0]))
3930 /* the leaf has changed, it now has room. return now */
3931 if (btrfs_leaf_free_space(root
, path
->nodes
[0]) >= ins_len
)
3934 if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
3935 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
3936 struct btrfs_file_extent_item
);
3937 if (extent_len
!= btrfs_file_extent_num_bytes(leaf
, fi
))
3941 btrfs_set_path_blocking(path
);
3942 ret
= split_leaf(trans
, root
, &key
, path
, ins_len
, 1);
3946 path
->keep_locks
= 0;
3947 btrfs_unlock_up_safe(path
, 1);
3950 path
->keep_locks
= 0;
3954 static noinline
int split_item(struct btrfs_trans_handle
*trans
,
3955 struct btrfs_root
*root
,
3956 struct btrfs_path
*path
,
3957 struct btrfs_key
*new_key
,
3958 unsigned long split_offset
)
3960 struct extent_buffer
*leaf
;
3961 struct btrfs_item
*item
;
3962 struct btrfs_item
*new_item
;
3968 struct btrfs_disk_key disk_key
;
3970 leaf
= path
->nodes
[0];
3971 BUG_ON(btrfs_leaf_free_space(root
, leaf
) < sizeof(struct btrfs_item
));
3973 btrfs_set_path_blocking(path
);
3975 item
= btrfs_item_nr(leaf
, path
->slots
[0]);
3976 orig_offset
= btrfs_item_offset(leaf
, item
);
3977 item_size
= btrfs_item_size(leaf
, item
);
3979 buf
= kmalloc(item_size
, GFP_NOFS
);
3983 read_extent_buffer(leaf
, buf
, btrfs_item_ptr_offset(leaf
,
3984 path
->slots
[0]), item_size
);
3986 slot
= path
->slots
[0] + 1;
3987 nritems
= btrfs_header_nritems(leaf
);
3988 if (slot
!= nritems
) {
3989 /* shift the items */
3990 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ 1),
3991 btrfs_item_nr_offset(slot
),
3992 (nritems
- slot
) * sizeof(struct btrfs_item
));
3995 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
3996 btrfs_set_item_key(leaf
, &disk_key
, slot
);
3998 new_item
= btrfs_item_nr(leaf
, slot
);
4000 btrfs_set_item_offset(leaf
, new_item
, orig_offset
);
4001 btrfs_set_item_size(leaf
, new_item
, item_size
- split_offset
);
4003 btrfs_set_item_offset(leaf
, item
,
4004 orig_offset
+ item_size
- split_offset
);
4005 btrfs_set_item_size(leaf
, item
, split_offset
);
4007 btrfs_set_header_nritems(leaf
, nritems
+ 1);
4009 /* write the data for the start of the original item */
4010 write_extent_buffer(leaf
, buf
,
4011 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
4014 /* write the data for the new item */
4015 write_extent_buffer(leaf
, buf
+ split_offset
,
4016 btrfs_item_ptr_offset(leaf
, slot
),
4017 item_size
- split_offset
);
4018 btrfs_mark_buffer_dirty(leaf
);
4020 BUG_ON(btrfs_leaf_free_space(root
, leaf
) < 0);
4026 * This function splits a single item into two items,
4027 * giving 'new_key' to the new item and splitting the
4028 * old one at split_offset (from the start of the item).
4030 * The path may be released by this operation. After
4031 * the split, the path is pointing to the old item. The
4032 * new item is going to be in the same node as the old one.
4034 * Note, the item being split must be smaller enough to live alone on
4035 * a tree block with room for one extra struct btrfs_item
4037 * This allows us to split the item in place, keeping a lock on the
4038 * leaf the entire time.
4040 int btrfs_split_item(struct btrfs_trans_handle
*trans
,
4041 struct btrfs_root
*root
,
4042 struct btrfs_path
*path
,
4043 struct btrfs_key
*new_key
,
4044 unsigned long split_offset
)
4047 ret
= setup_leaf_for_split(trans
, root
, path
,
4048 sizeof(struct btrfs_item
));
4052 ret
= split_item(trans
, root
, path
, new_key
, split_offset
);
4057 * This function duplicate a item, giving 'new_key' to the new item.
4058 * It guarantees both items live in the same tree leaf and the new item
4059 * is contiguous with the original item.
4061 * This allows us to split file extent in place, keeping a lock on the
4062 * leaf the entire time.
4064 int btrfs_duplicate_item(struct btrfs_trans_handle
*trans
,
4065 struct btrfs_root
*root
,
4066 struct btrfs_path
*path
,
4067 struct btrfs_key
*new_key
)
4069 struct extent_buffer
*leaf
;
4073 leaf
= path
->nodes
[0];
4074 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
4075 ret
= setup_leaf_for_split(trans
, root
, path
,
4076 item_size
+ sizeof(struct btrfs_item
));
4081 setup_items_for_insert(trans
, root
, path
, new_key
, &item_size
,
4082 item_size
, item_size
+
4083 sizeof(struct btrfs_item
), 1);
4084 leaf
= path
->nodes
[0];
4085 memcpy_extent_buffer(leaf
,
4086 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
4087 btrfs_item_ptr_offset(leaf
, path
->slots
[0] - 1),
4093 * make the item pointed to by the path smaller. new_size indicates
4094 * how small to make it, and from_end tells us if we just chop bytes
4095 * off the end of the item or if we shift the item to chop bytes off
4098 void btrfs_truncate_item(struct btrfs_trans_handle
*trans
,
4099 struct btrfs_root
*root
,
4100 struct btrfs_path
*path
,
4101 u32 new_size
, int from_end
)
4104 struct extent_buffer
*leaf
;
4105 struct btrfs_item
*item
;
4107 unsigned int data_end
;
4108 unsigned int old_data_start
;
4109 unsigned int old_size
;
4110 unsigned int size_diff
;
4112 struct btrfs_map_token token
;
4114 btrfs_init_map_token(&token
);
4116 leaf
= path
->nodes
[0];
4117 slot
= path
->slots
[0];
4119 old_size
= btrfs_item_size_nr(leaf
, slot
);
4120 if (old_size
== new_size
)
4123 nritems
= btrfs_header_nritems(leaf
);
4124 data_end
= leaf_data_end(root
, leaf
);
4126 old_data_start
= btrfs_item_offset_nr(leaf
, slot
);
4128 size_diff
= old_size
- new_size
;
4131 BUG_ON(slot
>= nritems
);
4134 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4136 /* first correct the data pointers */
4137 for (i
= slot
; i
< nritems
; i
++) {
4139 item
= btrfs_item_nr(leaf
, i
);
4141 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4142 btrfs_set_token_item_offset(leaf
, item
,
4143 ioff
+ size_diff
, &token
);
4146 /* shift the data */
4148 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4149 data_end
+ size_diff
, btrfs_leaf_data(leaf
) +
4150 data_end
, old_data_start
+ new_size
- data_end
);
4152 struct btrfs_disk_key disk_key
;
4155 btrfs_item_key(leaf
, &disk_key
, slot
);
4157 if (btrfs_disk_key_type(&disk_key
) == BTRFS_EXTENT_DATA_KEY
) {
4159 struct btrfs_file_extent_item
*fi
;
4161 fi
= btrfs_item_ptr(leaf
, slot
,
4162 struct btrfs_file_extent_item
);
4163 fi
= (struct btrfs_file_extent_item
*)(
4164 (unsigned long)fi
- size_diff
);
4166 if (btrfs_file_extent_type(leaf
, fi
) ==
4167 BTRFS_FILE_EXTENT_INLINE
) {
4168 ptr
= btrfs_item_ptr_offset(leaf
, slot
);
4169 memmove_extent_buffer(leaf
, ptr
,
4171 offsetof(struct btrfs_file_extent_item
,
4176 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4177 data_end
+ size_diff
, btrfs_leaf_data(leaf
) +
4178 data_end
, old_data_start
- data_end
);
4180 offset
= btrfs_disk_key_offset(&disk_key
);
4181 btrfs_set_disk_key_offset(&disk_key
, offset
+ size_diff
);
4182 btrfs_set_item_key(leaf
, &disk_key
, slot
);
4184 fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
4187 item
= btrfs_item_nr(leaf
, slot
);
4188 btrfs_set_item_size(leaf
, item
, new_size
);
4189 btrfs_mark_buffer_dirty(leaf
);
4191 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
4192 btrfs_print_leaf(root
, leaf
);
4198 * make the item pointed to by the path bigger, data_size is the new size.
4200 void btrfs_extend_item(struct btrfs_trans_handle
*trans
,
4201 struct btrfs_root
*root
, struct btrfs_path
*path
,
4205 struct extent_buffer
*leaf
;
4206 struct btrfs_item
*item
;
4208 unsigned int data_end
;
4209 unsigned int old_data
;
4210 unsigned int old_size
;
4212 struct btrfs_map_token token
;
4214 btrfs_init_map_token(&token
);
4216 leaf
= path
->nodes
[0];
4218 nritems
= btrfs_header_nritems(leaf
);
4219 data_end
= leaf_data_end(root
, leaf
);
4221 if (btrfs_leaf_free_space(root
, leaf
) < data_size
) {
4222 btrfs_print_leaf(root
, leaf
);
4225 slot
= path
->slots
[0];
4226 old_data
= btrfs_item_end_nr(leaf
, slot
);
4229 if (slot
>= nritems
) {
4230 btrfs_print_leaf(root
, leaf
);
4231 printk(KERN_CRIT
"slot %d too large, nritems %d\n",
4237 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4239 /* first correct the data pointers */
4240 for (i
= slot
; i
< nritems
; i
++) {
4242 item
= btrfs_item_nr(leaf
, i
);
4244 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4245 btrfs_set_token_item_offset(leaf
, item
,
4246 ioff
- data_size
, &token
);
4249 /* shift the data */
4250 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4251 data_end
- data_size
, btrfs_leaf_data(leaf
) +
4252 data_end
, old_data
- data_end
);
4254 data_end
= old_data
;
4255 old_size
= btrfs_item_size_nr(leaf
, slot
);
4256 item
= btrfs_item_nr(leaf
, slot
);
4257 btrfs_set_item_size(leaf
, item
, old_size
+ data_size
);
4258 btrfs_mark_buffer_dirty(leaf
);
4260 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
4261 btrfs_print_leaf(root
, leaf
);
4267 * Given a key and some data, insert items into the tree.
4268 * This does all the path init required, making room in the tree if needed.
4269 * Returns the number of keys that were inserted.
4271 int btrfs_insert_some_items(struct btrfs_trans_handle
*trans
,
4272 struct btrfs_root
*root
,
4273 struct btrfs_path
*path
,
4274 struct btrfs_key
*cpu_key
, u32
*data_size
,
4277 struct extent_buffer
*leaf
;
4278 struct btrfs_item
*item
;
4285 unsigned int data_end
;
4286 struct btrfs_disk_key disk_key
;
4287 struct btrfs_key found_key
;
4288 struct btrfs_map_token token
;
4290 btrfs_init_map_token(&token
);
4292 for (i
= 0; i
< nr
; i
++) {
4293 if (total_size
+ data_size
[i
] + sizeof(struct btrfs_item
) >
4294 BTRFS_LEAF_DATA_SIZE(root
)) {
4298 total_data
+= data_size
[i
];
4299 total_size
+= data_size
[i
] + sizeof(struct btrfs_item
);
4303 ret
= btrfs_search_slot(trans
, root
, cpu_key
, path
, total_size
, 1);
4309 leaf
= path
->nodes
[0];
4311 nritems
= btrfs_header_nritems(leaf
);
4312 data_end
= leaf_data_end(root
, leaf
);
4314 if (btrfs_leaf_free_space(root
, leaf
) < total_size
) {
4315 for (i
= nr
; i
>= 0; i
--) {
4316 total_data
-= data_size
[i
];
4317 total_size
-= data_size
[i
] + sizeof(struct btrfs_item
);
4318 if (total_size
< btrfs_leaf_free_space(root
, leaf
))
4324 slot
= path
->slots
[0];
4327 if (slot
!= nritems
) {
4328 unsigned int old_data
= btrfs_item_end_nr(leaf
, slot
);
4330 item
= btrfs_item_nr(leaf
, slot
);
4331 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
4333 /* figure out how many keys we can insert in here */
4334 total_data
= data_size
[0];
4335 for (i
= 1; i
< nr
; i
++) {
4336 if (btrfs_comp_cpu_keys(&found_key
, cpu_key
+ i
) <= 0)
4338 total_data
+= data_size
[i
];
4342 if (old_data
< data_end
) {
4343 btrfs_print_leaf(root
, leaf
);
4344 printk(KERN_CRIT
"slot %d old_data %d data_end %d\n",
4345 slot
, old_data
, data_end
);
4349 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4351 /* first correct the data pointers */
4352 for (i
= slot
; i
< nritems
; i
++) {
4355 item
= btrfs_item_nr(leaf
, i
);
4356 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4357 btrfs_set_token_item_offset(leaf
, item
,
4358 ioff
- total_data
, &token
);
4360 /* shift the items */
4361 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ nr
),
4362 btrfs_item_nr_offset(slot
),
4363 (nritems
- slot
) * sizeof(struct btrfs_item
));
4365 /* shift the data */
4366 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4367 data_end
- total_data
, btrfs_leaf_data(leaf
) +
4368 data_end
, old_data
- data_end
);
4369 data_end
= old_data
;
4372 * this sucks but it has to be done, if we are inserting at
4373 * the end of the leaf only insert 1 of the items, since we
4374 * have no way of knowing whats on the next leaf and we'd have
4375 * to drop our current locks to figure it out
4380 /* setup the item for the new data */
4381 for (i
= 0; i
< nr
; i
++) {
4382 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
+ i
);
4383 btrfs_set_item_key(leaf
, &disk_key
, slot
+ i
);
4384 item
= btrfs_item_nr(leaf
, slot
+ i
);
4385 btrfs_set_token_item_offset(leaf
, item
,
4386 data_end
- data_size
[i
], &token
);
4387 data_end
-= data_size
[i
];
4388 btrfs_set_token_item_size(leaf
, item
, data_size
[i
], &token
);
4390 btrfs_set_header_nritems(leaf
, nritems
+ nr
);
4391 btrfs_mark_buffer_dirty(leaf
);
4395 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
);
4396 fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
4399 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
4400 btrfs_print_leaf(root
, leaf
);
4410 * this is a helper for btrfs_insert_empty_items, the main goal here is
4411 * to save stack depth by doing the bulk of the work in a function
4412 * that doesn't call btrfs_search_slot
4414 void setup_items_for_insert(struct btrfs_trans_handle
*trans
,
4415 struct btrfs_root
*root
, struct btrfs_path
*path
,
4416 struct btrfs_key
*cpu_key
, u32
*data_size
,
4417 u32 total_data
, u32 total_size
, int nr
)
4419 struct btrfs_item
*item
;
4422 unsigned int data_end
;
4423 struct btrfs_disk_key disk_key
;
4424 struct extent_buffer
*leaf
;
4426 struct btrfs_map_token token
;
4428 btrfs_init_map_token(&token
);
4430 leaf
= path
->nodes
[0];
4431 slot
= path
->slots
[0];
4433 nritems
= btrfs_header_nritems(leaf
);
4434 data_end
= leaf_data_end(root
, leaf
);
4436 if (btrfs_leaf_free_space(root
, leaf
) < total_size
) {
4437 btrfs_print_leaf(root
, leaf
);
4438 printk(KERN_CRIT
"not enough freespace need %u have %d\n",
4439 total_size
, btrfs_leaf_free_space(root
, leaf
));
4443 if (slot
!= nritems
) {
4444 unsigned int old_data
= btrfs_item_end_nr(leaf
, slot
);
4446 if (old_data
< data_end
) {
4447 btrfs_print_leaf(root
, leaf
);
4448 printk(KERN_CRIT
"slot %d old_data %d data_end %d\n",
4449 slot
, old_data
, data_end
);
4453 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4455 /* first correct the data pointers */
4456 for (i
= slot
; i
< nritems
; i
++) {
4459 item
= btrfs_item_nr(leaf
, i
);
4460 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4461 btrfs_set_token_item_offset(leaf
, item
,
4462 ioff
- total_data
, &token
);
4464 /* shift the items */
4465 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ nr
),
4466 btrfs_item_nr_offset(slot
),
4467 (nritems
- slot
) * sizeof(struct btrfs_item
));
4469 /* shift the data */
4470 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4471 data_end
- total_data
, btrfs_leaf_data(leaf
) +
4472 data_end
, old_data
- data_end
);
4473 data_end
= old_data
;
4476 /* setup the item for the new data */
4477 for (i
= 0; i
< nr
; i
++) {
4478 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
+ i
);
4479 btrfs_set_item_key(leaf
, &disk_key
, slot
+ i
);
4480 item
= btrfs_item_nr(leaf
, slot
+ i
);
4481 btrfs_set_token_item_offset(leaf
, item
,
4482 data_end
- data_size
[i
], &token
);
4483 data_end
-= data_size
[i
];
4484 btrfs_set_token_item_size(leaf
, item
, data_size
[i
], &token
);
4487 btrfs_set_header_nritems(leaf
, nritems
+ nr
);
4490 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
);
4491 fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
4493 btrfs_unlock_up_safe(path
, 1);
4494 btrfs_mark_buffer_dirty(leaf
);
4496 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
4497 btrfs_print_leaf(root
, leaf
);
4503 * Given a key and some data, insert items into the tree.
4504 * This does all the path init required, making room in the tree if needed.
4506 int btrfs_insert_empty_items(struct btrfs_trans_handle
*trans
,
4507 struct btrfs_root
*root
,
4508 struct btrfs_path
*path
,
4509 struct btrfs_key
*cpu_key
, u32
*data_size
,
4518 for (i
= 0; i
< nr
; i
++)
4519 total_data
+= data_size
[i
];
4521 total_size
= total_data
+ (nr
* sizeof(struct btrfs_item
));
4522 ret
= btrfs_search_slot(trans
, root
, cpu_key
, path
, total_size
, 1);
4528 slot
= path
->slots
[0];
4531 setup_items_for_insert(trans
, root
, path
, cpu_key
, data_size
,
4532 total_data
, total_size
, nr
);
4537 * Given a key and some data, insert an item into the tree.
4538 * This does all the path init required, making room in the tree if needed.
4540 int btrfs_insert_item(struct btrfs_trans_handle
*trans
, struct btrfs_root
4541 *root
, struct btrfs_key
*cpu_key
, void *data
, u32
4545 struct btrfs_path
*path
;
4546 struct extent_buffer
*leaf
;
4549 path
= btrfs_alloc_path();
4552 ret
= btrfs_insert_empty_item(trans
, root
, path
, cpu_key
, data_size
);
4554 leaf
= path
->nodes
[0];
4555 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
4556 write_extent_buffer(leaf
, data
, ptr
, data_size
);
4557 btrfs_mark_buffer_dirty(leaf
);
4559 btrfs_free_path(path
);
4564 * delete the pointer from a given node.
4566 * the tree should have been previously balanced so the deletion does not
4569 static void del_ptr(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
4570 struct btrfs_path
*path
, int level
, int slot
,
4573 struct extent_buffer
*parent
= path
->nodes
[level
];
4577 nritems
= btrfs_header_nritems(parent
);
4578 if (slot
!= nritems
- 1) {
4579 if (tree_mod_log
&& level
)
4580 tree_mod_log_eb_move(root
->fs_info
, parent
, slot
,
4581 slot
+ 1, nritems
- slot
- 1);
4582 memmove_extent_buffer(parent
,
4583 btrfs_node_key_ptr_offset(slot
),
4584 btrfs_node_key_ptr_offset(slot
+ 1),
4585 sizeof(struct btrfs_key_ptr
) *
4586 (nritems
- slot
- 1));
4587 } else if (tree_mod_log
&& level
) {
4588 ret
= tree_mod_log_insert_key(root
->fs_info
, parent
, slot
,
4589 MOD_LOG_KEY_REMOVE
);
4594 btrfs_set_header_nritems(parent
, nritems
);
4595 if (nritems
== 0 && parent
== root
->node
) {
4596 BUG_ON(btrfs_header_level(root
->node
) != 1);
4597 /* just turn the root into a leaf and break */
4598 btrfs_set_header_level(root
->node
, 0);
4599 } else if (slot
== 0) {
4600 struct btrfs_disk_key disk_key
;
4602 btrfs_node_key(parent
, &disk_key
, 0);
4603 fixup_low_keys(trans
, root
, path
, &disk_key
, level
+ 1);
4605 btrfs_mark_buffer_dirty(parent
);
4609 * a helper function to delete the leaf pointed to by path->slots[1] and
4612 * This deletes the pointer in path->nodes[1] and frees the leaf
4613 * block extent. zero is returned if it all worked out, < 0 otherwise.
4615 * The path must have already been setup for deleting the leaf, including
4616 * all the proper balancing. path->nodes[1] must be locked.
4618 static noinline
void btrfs_del_leaf(struct btrfs_trans_handle
*trans
,
4619 struct btrfs_root
*root
,
4620 struct btrfs_path
*path
,
4621 struct extent_buffer
*leaf
)
4623 WARN_ON(btrfs_header_generation(leaf
) != trans
->transid
);
4624 del_ptr(trans
, root
, path
, 1, path
->slots
[1], 1);
4627 * btrfs_free_extent is expensive, we want to make sure we
4628 * aren't holding any locks when we call it
4630 btrfs_unlock_up_safe(path
, 0);
4632 root_sub_used(root
, leaf
->len
);
4634 extent_buffer_get(leaf
);
4635 btrfs_free_tree_block(trans
, root
, leaf
, 0, 1);
4636 free_extent_buffer_stale(leaf
);
4639 * delete the item at the leaf level in path. If that empties
4640 * the leaf, remove it from the tree
4642 int btrfs_del_items(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
4643 struct btrfs_path
*path
, int slot
, int nr
)
4645 struct extent_buffer
*leaf
;
4646 struct btrfs_item
*item
;
4653 struct btrfs_map_token token
;
4655 btrfs_init_map_token(&token
);
4657 leaf
= path
->nodes
[0];
4658 last_off
= btrfs_item_offset_nr(leaf
, slot
+ nr
- 1);
4660 for (i
= 0; i
< nr
; i
++)
4661 dsize
+= btrfs_item_size_nr(leaf
, slot
+ i
);
4663 nritems
= btrfs_header_nritems(leaf
);
4665 if (slot
+ nr
!= nritems
) {
4666 int data_end
= leaf_data_end(root
, leaf
);
4668 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4670 btrfs_leaf_data(leaf
) + data_end
,
4671 last_off
- data_end
);
4673 for (i
= slot
+ nr
; i
< nritems
; i
++) {
4676 item
= btrfs_item_nr(leaf
, i
);
4677 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4678 btrfs_set_token_item_offset(leaf
, item
,
4679 ioff
+ dsize
, &token
);
4682 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
),
4683 btrfs_item_nr_offset(slot
+ nr
),
4684 sizeof(struct btrfs_item
) *
4685 (nritems
- slot
- nr
));
4687 btrfs_set_header_nritems(leaf
, nritems
- nr
);
4690 /* delete the leaf if we've emptied it */
4692 if (leaf
== root
->node
) {
4693 btrfs_set_header_level(leaf
, 0);
4695 btrfs_set_path_blocking(path
);
4696 clean_tree_block(trans
, root
, leaf
);
4697 btrfs_del_leaf(trans
, root
, path
, leaf
);
4700 int used
= leaf_space_used(leaf
, 0, nritems
);
4702 struct btrfs_disk_key disk_key
;
4704 btrfs_item_key(leaf
, &disk_key
, 0);
4705 fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
4708 /* delete the leaf if it is mostly empty */
4709 if (used
< BTRFS_LEAF_DATA_SIZE(root
) / 3) {
4710 /* push_leaf_left fixes the path.
4711 * make sure the path still points to our leaf
4712 * for possible call to del_ptr below
4714 slot
= path
->slots
[1];
4715 extent_buffer_get(leaf
);
4717 btrfs_set_path_blocking(path
);
4718 wret
= push_leaf_left(trans
, root
, path
, 1, 1,
4720 if (wret
< 0 && wret
!= -ENOSPC
)
4723 if (path
->nodes
[0] == leaf
&&
4724 btrfs_header_nritems(leaf
)) {
4725 wret
= push_leaf_right(trans
, root
, path
, 1,
4727 if (wret
< 0 && wret
!= -ENOSPC
)
4731 if (btrfs_header_nritems(leaf
) == 0) {
4732 path
->slots
[1] = slot
;
4733 btrfs_del_leaf(trans
, root
, path
, leaf
);
4734 free_extent_buffer(leaf
);
4737 /* if we're still in the path, make sure
4738 * we're dirty. Otherwise, one of the
4739 * push_leaf functions must have already
4740 * dirtied this buffer
4742 if (path
->nodes
[0] == leaf
)
4743 btrfs_mark_buffer_dirty(leaf
);
4744 free_extent_buffer(leaf
);
4747 btrfs_mark_buffer_dirty(leaf
);
4754 * search the tree again to find a leaf with lesser keys
4755 * returns 0 if it found something or 1 if there are no lesser leaves.
4756 * returns < 0 on io errors.
4758 * This may release the path, and so you may lose any locks held at the
4761 int btrfs_prev_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
4763 struct btrfs_key key
;
4764 struct btrfs_disk_key found_key
;
4767 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, 0);
4771 else if (key
.type
> 0)
4773 else if (key
.objectid
> 0)
4778 btrfs_release_path(path
);
4779 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4782 btrfs_item_key(path
->nodes
[0], &found_key
, 0);
4783 ret
= comp_keys(&found_key
, &key
);
4790 * A helper function to walk down the tree starting at min_key, and looking
4791 * for nodes or leaves that are either in cache or have a minimum
4792 * transaction id. This is used by the btree defrag code, and tree logging
4794 * This does not cow, but it does stuff the starting key it finds back
4795 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4796 * key and get a writable path.
4798 * This does lock as it descends, and path->keep_locks should be set
4799 * to 1 by the caller.
4801 * This honors path->lowest_level to prevent descent past a given level
4804 * min_trans indicates the oldest transaction that you are interested
4805 * in walking through. Any nodes or leaves older than min_trans are
4806 * skipped over (without reading them).
4808 * returns zero if something useful was found, < 0 on error and 1 if there
4809 * was nothing in the tree that matched the search criteria.
4811 int btrfs_search_forward(struct btrfs_root
*root
, struct btrfs_key
*min_key
,
4812 struct btrfs_key
*max_key
,
4813 struct btrfs_path
*path
, int cache_only
,
4816 struct extent_buffer
*cur
;
4817 struct btrfs_key found_key
;
4824 WARN_ON(!path
->keep_locks
);
4826 cur
= btrfs_read_lock_root_node(root
);
4827 level
= btrfs_header_level(cur
);
4828 WARN_ON(path
->nodes
[level
]);
4829 path
->nodes
[level
] = cur
;
4830 path
->locks
[level
] = BTRFS_READ_LOCK
;
4832 if (btrfs_header_generation(cur
) < min_trans
) {
4837 nritems
= btrfs_header_nritems(cur
);
4838 level
= btrfs_header_level(cur
);
4839 sret
= bin_search(cur
, min_key
, level
, &slot
);
4841 /* at the lowest level, we're done, setup the path and exit */
4842 if (level
== path
->lowest_level
) {
4843 if (slot
>= nritems
)
4846 path
->slots
[level
] = slot
;
4847 btrfs_item_key_to_cpu(cur
, &found_key
, slot
);
4850 if (sret
&& slot
> 0)
4853 * check this node pointer against the cache_only and
4854 * min_trans parameters. If it isn't in cache or is too
4855 * old, skip to the next one.
4857 while (slot
< nritems
) {
4860 struct extent_buffer
*tmp
;
4861 struct btrfs_disk_key disk_key
;
4863 blockptr
= btrfs_node_blockptr(cur
, slot
);
4864 gen
= btrfs_node_ptr_generation(cur
, slot
);
4865 if (gen
< min_trans
) {
4873 btrfs_node_key(cur
, &disk_key
, slot
);
4874 if (comp_keys(&disk_key
, max_key
) >= 0) {
4880 tmp
= btrfs_find_tree_block(root
, blockptr
,
4881 btrfs_level_size(root
, level
- 1));
4883 if (tmp
&& btrfs_buffer_uptodate(tmp
, gen
, 1) > 0) {
4884 free_extent_buffer(tmp
);
4888 free_extent_buffer(tmp
);
4893 * we didn't find a candidate key in this node, walk forward
4894 * and find another one
4896 if (slot
>= nritems
) {
4897 path
->slots
[level
] = slot
;
4898 btrfs_set_path_blocking(path
);
4899 sret
= btrfs_find_next_key(root
, path
, min_key
, level
,
4900 cache_only
, min_trans
);
4902 btrfs_release_path(path
);
4908 /* save our key for returning back */
4909 btrfs_node_key_to_cpu(cur
, &found_key
, slot
);
4910 path
->slots
[level
] = slot
;
4911 if (level
== path
->lowest_level
) {
4913 unlock_up(path
, level
, 1, 0, NULL
);
4916 btrfs_set_path_blocking(path
);
4917 cur
= read_node_slot(root
, cur
, slot
);
4918 BUG_ON(!cur
); /* -ENOMEM */
4920 btrfs_tree_read_lock(cur
);
4922 path
->locks
[level
- 1] = BTRFS_READ_LOCK
;
4923 path
->nodes
[level
- 1] = cur
;
4924 unlock_up(path
, level
, 1, 0, NULL
);
4925 btrfs_clear_path_blocking(path
, NULL
, 0);
4929 memcpy(min_key
, &found_key
, sizeof(found_key
));
4930 btrfs_set_path_blocking(path
);
4935 * this is similar to btrfs_next_leaf, but does not try to preserve
4936 * and fixup the path. It looks for and returns the next key in the
4937 * tree based on the current path and the cache_only and min_trans
4940 * 0 is returned if another key is found, < 0 if there are any errors
4941 * and 1 is returned if there are no higher keys in the tree
4943 * path->keep_locks should be set to 1 on the search made before
4944 * calling this function.
4946 int btrfs_find_next_key(struct btrfs_root
*root
, struct btrfs_path
*path
,
4947 struct btrfs_key
*key
, int level
,
4948 int cache_only
, u64 min_trans
)
4951 struct extent_buffer
*c
;
4953 WARN_ON(!path
->keep_locks
);
4954 while (level
< BTRFS_MAX_LEVEL
) {
4955 if (!path
->nodes
[level
])
4958 slot
= path
->slots
[level
] + 1;
4959 c
= path
->nodes
[level
];
4961 if (slot
>= btrfs_header_nritems(c
)) {
4964 struct btrfs_key cur_key
;
4965 if (level
+ 1 >= BTRFS_MAX_LEVEL
||
4966 !path
->nodes
[level
+ 1])
4969 if (path
->locks
[level
+ 1]) {
4974 slot
= btrfs_header_nritems(c
) - 1;
4976 btrfs_item_key_to_cpu(c
, &cur_key
, slot
);
4978 btrfs_node_key_to_cpu(c
, &cur_key
, slot
);
4980 orig_lowest
= path
->lowest_level
;
4981 btrfs_release_path(path
);
4982 path
->lowest_level
= level
;
4983 ret
= btrfs_search_slot(NULL
, root
, &cur_key
, path
,
4985 path
->lowest_level
= orig_lowest
;
4989 c
= path
->nodes
[level
];
4990 slot
= path
->slots
[level
];
4997 btrfs_item_key_to_cpu(c
, key
, slot
);
4999 u64 blockptr
= btrfs_node_blockptr(c
, slot
);
5000 u64 gen
= btrfs_node_ptr_generation(c
, slot
);
5003 struct extent_buffer
*cur
;
5004 cur
= btrfs_find_tree_block(root
, blockptr
,
5005 btrfs_level_size(root
, level
- 1));
5007 btrfs_buffer_uptodate(cur
, gen
, 1) <= 0) {
5010 free_extent_buffer(cur
);
5013 free_extent_buffer(cur
);
5015 if (gen
< min_trans
) {
5019 btrfs_node_key_to_cpu(c
, key
, slot
);
5027 * search the tree again to find a leaf with greater keys
5028 * returns 0 if it found something or 1 if there are no greater leaves.
5029 * returns < 0 on io errors.
5031 int btrfs_next_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
5033 return btrfs_next_old_leaf(root
, path
, 0);
5036 int btrfs_next_old_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
,
5041 struct extent_buffer
*c
;
5042 struct extent_buffer
*next
;
5043 struct btrfs_key key
;
5046 int old_spinning
= path
->leave_spinning
;
5047 int next_rw_lock
= 0;
5049 nritems
= btrfs_header_nritems(path
->nodes
[0]);
5053 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, nritems
- 1);
5058 btrfs_release_path(path
);
5060 path
->keep_locks
= 1;
5061 path
->leave_spinning
= 1;
5064 ret
= btrfs_search_old_slot(root
, &key
, path
, time_seq
);
5066 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5067 path
->keep_locks
= 0;
5072 nritems
= btrfs_header_nritems(path
->nodes
[0]);
5074 * by releasing the path above we dropped all our locks. A balance
5075 * could have added more items next to the key that used to be
5076 * at the very end of the block. So, check again here and
5077 * advance the path if there are now more items available.
5079 if (nritems
> 0 && path
->slots
[0] < nritems
- 1) {
5086 while (level
< BTRFS_MAX_LEVEL
) {
5087 if (!path
->nodes
[level
]) {
5092 slot
= path
->slots
[level
] + 1;
5093 c
= path
->nodes
[level
];
5094 if (slot
>= btrfs_header_nritems(c
)) {
5096 if (level
== BTRFS_MAX_LEVEL
) {
5104 btrfs_tree_unlock_rw(next
, next_rw_lock
);
5105 free_extent_buffer(next
);
5109 next_rw_lock
= path
->locks
[level
];
5110 ret
= read_block_for_search(NULL
, root
, path
, &next
, level
,
5116 btrfs_release_path(path
);
5120 if (!path
->skip_locking
) {
5121 ret
= btrfs_try_tree_read_lock(next
);
5122 if (!ret
&& time_seq
) {
5124 * If we don't get the lock, we may be racing
5125 * with push_leaf_left, holding that lock while
5126 * itself waiting for the leaf we've currently
5127 * locked. To solve this situation, we give up
5128 * on our lock and cycle.
5130 btrfs_release_path(path
);
5135 btrfs_set_path_blocking(path
);
5136 btrfs_tree_read_lock(next
);
5137 btrfs_clear_path_blocking(path
, next
,
5140 next_rw_lock
= BTRFS_READ_LOCK
;
5144 path
->slots
[level
] = slot
;
5147 c
= path
->nodes
[level
];
5148 if (path
->locks
[level
])
5149 btrfs_tree_unlock_rw(c
, path
->locks
[level
]);
5151 free_extent_buffer(c
);
5152 path
->nodes
[level
] = next
;
5153 path
->slots
[level
] = 0;
5154 if (!path
->skip_locking
)
5155 path
->locks
[level
] = next_rw_lock
;
5159 ret
= read_block_for_search(NULL
, root
, path
, &next
, level
,
5165 btrfs_release_path(path
);
5169 if (!path
->skip_locking
) {
5170 ret
= btrfs_try_tree_read_lock(next
);
5172 btrfs_set_path_blocking(path
);
5173 btrfs_tree_read_lock(next
);
5174 btrfs_clear_path_blocking(path
, next
,
5177 next_rw_lock
= BTRFS_READ_LOCK
;
5182 unlock_up(path
, 0, 1, 0, NULL
);
5183 path
->leave_spinning
= old_spinning
;
5185 btrfs_set_path_blocking(path
);
5191 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5192 * searching until it gets past min_objectid or finds an item of 'type'
5194 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5196 int btrfs_previous_item(struct btrfs_root
*root
,
5197 struct btrfs_path
*path
, u64 min_objectid
,
5200 struct btrfs_key found_key
;
5201 struct extent_buffer
*leaf
;
5206 if (path
->slots
[0] == 0) {
5207 btrfs_set_path_blocking(path
);
5208 ret
= btrfs_prev_leaf(root
, path
);
5214 leaf
= path
->nodes
[0];
5215 nritems
= btrfs_header_nritems(leaf
);
5218 if (path
->slots
[0] == nritems
)
5221 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
5222 if (found_key
.objectid
< min_objectid
)
5224 if (found_key
.type
== type
)
5226 if (found_key
.objectid
== min_objectid
&&
5227 found_key
.type
< type
)