2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
24 #include "transaction.h"
25 #include "print-tree.h"
28 static int split_node(struct btrfs_trans_handle
*trans
, struct btrfs_root
29 *root
, struct btrfs_path
*path
, int level
);
30 static int split_leaf(struct btrfs_trans_handle
*trans
, struct btrfs_root
31 *root
, struct btrfs_key
*ins_key
,
32 struct btrfs_path
*path
, int data_size
, int extend
);
33 static int push_node_left(struct btrfs_trans_handle
*trans
,
34 struct btrfs_root
*root
, struct extent_buffer
*dst
,
35 struct extent_buffer
*src
, int empty
);
36 static int balance_node_right(struct btrfs_trans_handle
*trans
,
37 struct btrfs_root
*root
,
38 struct extent_buffer
*dst_buf
,
39 struct extent_buffer
*src_buf
);
40 static void del_ptr(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
41 struct btrfs_path
*path
, int level
, int slot
);
43 struct btrfs_path
*btrfs_alloc_path(void)
45 struct btrfs_path
*path
;
46 path
= kmem_cache_zalloc(btrfs_path_cachep
, GFP_NOFS
);
51 * set all locked nodes in the path to blocking locks. This should
52 * be done before scheduling
54 noinline
void btrfs_set_path_blocking(struct btrfs_path
*p
)
57 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
58 if (!p
->nodes
[i
] || !p
->locks
[i
])
60 btrfs_set_lock_blocking_rw(p
->nodes
[i
], p
->locks
[i
]);
61 if (p
->locks
[i
] == BTRFS_READ_LOCK
)
62 p
->locks
[i
] = BTRFS_READ_LOCK_BLOCKING
;
63 else if (p
->locks
[i
] == BTRFS_WRITE_LOCK
)
64 p
->locks
[i
] = BTRFS_WRITE_LOCK_BLOCKING
;
69 * reset all the locked nodes in the patch to spinning locks.
71 * held is used to keep lockdep happy, when lockdep is enabled
72 * we set held to a blocking lock before we go around and
73 * retake all the spinlocks in the path. You can safely use NULL
76 noinline
void btrfs_clear_path_blocking(struct btrfs_path
*p
,
77 struct extent_buffer
*held
, int held_rw
)
81 #ifdef CONFIG_DEBUG_LOCK_ALLOC
82 /* lockdep really cares that we take all of these spinlocks
83 * in the right order. If any of the locks in the path are not
84 * currently blocking, it is going to complain. So, make really
85 * really sure by forcing the path to blocking before we clear
89 btrfs_set_lock_blocking_rw(held
, held_rw
);
90 if (held_rw
== BTRFS_WRITE_LOCK
)
91 held_rw
= BTRFS_WRITE_LOCK_BLOCKING
;
92 else if (held_rw
== BTRFS_READ_LOCK
)
93 held_rw
= BTRFS_READ_LOCK_BLOCKING
;
95 btrfs_set_path_blocking(p
);
98 for (i
= BTRFS_MAX_LEVEL
- 1; i
>= 0; i
--) {
99 if (p
->nodes
[i
] && p
->locks
[i
]) {
100 btrfs_clear_lock_blocking_rw(p
->nodes
[i
], p
->locks
[i
]);
101 if (p
->locks
[i
] == BTRFS_WRITE_LOCK_BLOCKING
)
102 p
->locks
[i
] = BTRFS_WRITE_LOCK
;
103 else if (p
->locks
[i
] == BTRFS_READ_LOCK_BLOCKING
)
104 p
->locks
[i
] = BTRFS_READ_LOCK
;
108 #ifdef CONFIG_DEBUG_LOCK_ALLOC
110 btrfs_clear_lock_blocking_rw(held
, held_rw
);
114 /* this also releases the path */
115 void btrfs_free_path(struct btrfs_path
*p
)
119 btrfs_release_path(p
);
120 kmem_cache_free(btrfs_path_cachep
, p
);
124 * path release drops references on the extent buffers in the path
125 * and it drops any locks held by this path
127 * It is safe to call this on paths that no locks or extent buffers held.
129 noinline
void btrfs_release_path(struct btrfs_path
*p
)
133 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
138 btrfs_tree_unlock_rw(p
->nodes
[i
], p
->locks
[i
]);
141 free_extent_buffer(p
->nodes
[i
]);
147 * safely gets a reference on the root node of a tree. A lock
148 * is not taken, so a concurrent writer may put a different node
149 * at the root of the tree. See btrfs_lock_root_node for the
152 * The extent buffer returned by this has a reference taken, so
153 * it won't disappear. It may stop being the root of the tree
154 * at any time because there are no locks held.
156 struct extent_buffer
*btrfs_root_node(struct btrfs_root
*root
)
158 struct extent_buffer
*eb
;
162 eb
= rcu_dereference(root
->node
);
165 * RCU really hurts here, we could free up the root node because
166 * it was cow'ed but we may not get the new root node yet so do
167 * the inc_not_zero dance and if it doesn't work then
168 * synchronize_rcu and try again.
170 if (atomic_inc_not_zero(&eb
->refs
)) {
180 /* loop around taking references on and locking the root node of the
181 * tree until you end up with a lock on the root. A locked buffer
182 * is returned, with a reference held.
184 struct extent_buffer
*btrfs_lock_root_node(struct btrfs_root
*root
)
186 struct extent_buffer
*eb
;
189 eb
= btrfs_root_node(root
);
191 if (eb
== root
->node
)
193 btrfs_tree_unlock(eb
);
194 free_extent_buffer(eb
);
199 /* loop around taking references on and locking the root node of the
200 * tree until you end up with a lock on the root. A locked buffer
201 * is returned, with a reference held.
203 struct extent_buffer
*btrfs_read_lock_root_node(struct btrfs_root
*root
)
205 struct extent_buffer
*eb
;
208 eb
= btrfs_root_node(root
);
209 btrfs_tree_read_lock(eb
);
210 if (eb
== root
->node
)
212 btrfs_tree_read_unlock(eb
);
213 free_extent_buffer(eb
);
218 /* cowonly root (everything not a reference counted cow subvolume), just get
219 * put onto a simple dirty list. transaction.c walks this to make sure they
220 * get properly updated on disk.
222 static void add_root_to_dirty_list(struct btrfs_root
*root
)
224 spin_lock(&root
->fs_info
->trans_lock
);
225 if (root
->track_dirty
&& list_empty(&root
->dirty_list
)) {
226 list_add(&root
->dirty_list
,
227 &root
->fs_info
->dirty_cowonly_roots
);
229 spin_unlock(&root
->fs_info
->trans_lock
);
233 * used by snapshot creation to make a copy of a root for a tree with
234 * a given objectid. The buffer with the new root node is returned in
235 * cow_ret, and this func returns zero on success or a negative error code.
237 int btrfs_copy_root(struct btrfs_trans_handle
*trans
,
238 struct btrfs_root
*root
,
239 struct extent_buffer
*buf
,
240 struct extent_buffer
**cow_ret
, u64 new_root_objectid
)
242 struct extent_buffer
*cow
;
245 struct btrfs_disk_key disk_key
;
247 WARN_ON(root
->ref_cows
&& trans
->transid
!=
248 root
->fs_info
->running_transaction
->transid
);
249 WARN_ON(root
->ref_cows
&& trans
->transid
!= root
->last_trans
);
251 level
= btrfs_header_level(buf
);
253 btrfs_item_key(buf
, &disk_key
, 0);
255 btrfs_node_key(buf
, &disk_key
, 0);
257 cow
= btrfs_alloc_free_block(trans
, root
, buf
->len
, 0,
258 new_root_objectid
, &disk_key
, level
,
263 copy_extent_buffer(cow
, buf
, 0, 0, cow
->len
);
264 btrfs_set_header_bytenr(cow
, cow
->start
);
265 btrfs_set_header_generation(cow
, trans
->transid
);
266 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
267 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
268 BTRFS_HEADER_FLAG_RELOC
);
269 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
270 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
272 btrfs_set_header_owner(cow
, new_root_objectid
);
274 write_extent_buffer(cow
, root
->fs_info
->fsid
,
275 (unsigned long)btrfs_header_fsid(cow
),
278 WARN_ON(btrfs_header_generation(buf
) > trans
->transid
);
279 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
280 ret
= btrfs_inc_ref(trans
, root
, cow
, 1, 1);
282 ret
= btrfs_inc_ref(trans
, root
, cow
, 0, 1);
287 btrfs_mark_buffer_dirty(cow
);
296 MOD_LOG_KEY_REMOVE_WHILE_FREEING
,
297 MOD_LOG_KEY_REMOVE_WHILE_MOVING
,
299 MOD_LOG_ROOT_REPLACE
,
302 struct tree_mod_move
{
307 struct tree_mod_root
{
312 struct tree_mod_elem
{
314 u64 index
; /* shifted logical */
315 struct seq_list elem
;
318 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
321 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
324 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
325 struct btrfs_disk_key key
;
328 /* this is used for op == MOD_LOG_MOVE_KEYS */
329 struct tree_mod_move move
;
331 /* this is used for op == MOD_LOG_ROOT_REPLACE */
332 struct tree_mod_root old_root
;
336 __get_tree_mod_seq(struct btrfs_fs_info
*fs_info
, struct seq_list
*elem
)
338 elem
->seq
= atomic_inc_return(&fs_info
->tree_mod_seq
);
339 list_add_tail(&elem
->list
, &fs_info
->tree_mod_seq_list
);
342 void btrfs_get_tree_mod_seq(struct btrfs_fs_info
*fs_info
,
343 struct seq_list
*elem
)
346 spin_lock(&fs_info
->tree_mod_seq_lock
);
347 __get_tree_mod_seq(fs_info
, elem
);
348 spin_unlock(&fs_info
->tree_mod_seq_lock
);
351 void btrfs_put_tree_mod_seq(struct btrfs_fs_info
*fs_info
,
352 struct seq_list
*elem
)
354 struct rb_root
*tm_root
;
355 struct rb_node
*node
;
356 struct rb_node
*next
;
357 struct seq_list
*cur_elem
;
358 struct tree_mod_elem
*tm
;
359 u64 min_seq
= (u64
)-1;
360 u64 seq_putting
= elem
->seq
;
365 BUG_ON(!(elem
->flags
& 1));
366 spin_lock(&fs_info
->tree_mod_seq_lock
);
367 list_del(&elem
->list
);
369 list_for_each_entry(cur_elem
, &fs_info
->tree_mod_seq_list
, list
) {
370 if ((cur_elem
->flags
& 1) && cur_elem
->seq
< min_seq
) {
371 if (seq_putting
> cur_elem
->seq
) {
373 * blocker with lower sequence number exists, we
374 * cannot remove anything from the log
378 min_seq
= cur_elem
->seq
;
383 * anything that's lower than the lowest existing (read: blocked)
384 * sequence number can be removed from the tree.
386 write_lock(&fs_info
->tree_mod_log_lock
);
387 tm_root
= &fs_info
->tree_mod_log
;
388 for (node
= rb_first(tm_root
); node
; node
= next
) {
389 next
= rb_next(node
);
390 tm
= container_of(node
, struct tree_mod_elem
, node
);
391 if (tm
->elem
.seq
> min_seq
)
393 rb_erase(node
, tm_root
);
394 list_del(&tm
->elem
.list
);
397 write_unlock(&fs_info
->tree_mod_log_lock
);
399 spin_unlock(&fs_info
->tree_mod_seq_lock
);
403 * key order of the log:
406 * the index is the shifted logical of the *new* root node for root replace
407 * operations, or the shifted logical of the affected block for all other
411 __tree_mod_log_insert(struct btrfs_fs_info
*fs_info
, struct tree_mod_elem
*tm
)
413 struct rb_root
*tm_root
;
414 struct rb_node
**new;
415 struct rb_node
*parent
= NULL
;
416 struct tree_mod_elem
*cur
;
419 BUG_ON(!tm
|| !tm
->elem
.seq
);
421 write_lock(&fs_info
->tree_mod_log_lock
);
422 tm_root
= &fs_info
->tree_mod_log
;
423 new = &tm_root
->rb_node
;
425 cur
= container_of(*new, struct tree_mod_elem
, node
);
427 if (cur
->index
< tm
->index
)
428 new = &((*new)->rb_left
);
429 else if (cur
->index
> tm
->index
)
430 new = &((*new)->rb_right
);
431 else if (cur
->elem
.seq
< tm
->elem
.seq
)
432 new = &((*new)->rb_left
);
433 else if (cur
->elem
.seq
> tm
->elem
.seq
)
434 new = &((*new)->rb_right
);
442 rb_link_node(&tm
->node
, parent
, new);
443 rb_insert_color(&tm
->node
, tm_root
);
445 write_unlock(&fs_info
->tree_mod_log_lock
);
449 int tree_mod_alloc(struct btrfs_fs_info
*fs_info
, gfp_t flags
,
450 struct tree_mod_elem
**tm_ret
)
452 struct tree_mod_elem
*tm
;
456 if (list_empty(&fs_info
->tree_mod_seq_list
))
459 tm
= *tm_ret
= kzalloc(sizeof(*tm
), flags
);
463 __get_tree_mod_seq(fs_info
, &tm
->elem
);
471 tree_mod_log_insert_key_mask(struct btrfs_fs_info
*fs_info
,
472 struct extent_buffer
*eb
, int slot
,
473 enum mod_log_op op
, gfp_t flags
)
475 struct tree_mod_elem
*tm
;
478 ret
= tree_mod_alloc(fs_info
, flags
, &tm
);
482 tm
->index
= eb
->start
>> PAGE_CACHE_SHIFT
;
483 if (op
!= MOD_LOG_KEY_ADD
) {
484 btrfs_node_key(eb
, &tm
->key
, slot
);
485 tm
->blockptr
= btrfs_node_blockptr(eb
, slot
);
489 tm
->generation
= btrfs_node_ptr_generation(eb
, slot
);
491 return __tree_mod_log_insert(fs_info
, tm
);
495 tree_mod_log_insert_key(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*eb
,
496 int slot
, enum mod_log_op op
)
498 return tree_mod_log_insert_key_mask(fs_info
, eb
, slot
, op
, GFP_NOFS
);
502 tree_mod_log_insert_move(struct btrfs_fs_info
*fs_info
,
503 struct extent_buffer
*eb
, int dst_slot
, int src_slot
,
504 int nr_items
, gfp_t flags
)
506 struct tree_mod_elem
*tm
;
510 ret
= tree_mod_alloc(fs_info
, flags
, &tm
);
514 for (i
= 0; i
+ dst_slot
< src_slot
&& i
< nr_items
; i
++) {
515 ret
= tree_mod_log_insert_key(fs_info
, eb
, i
+ dst_slot
,
516 MOD_LOG_KEY_REMOVE_WHILE_MOVING
);
520 tm
->index
= eb
->start
>> PAGE_CACHE_SHIFT
;
522 tm
->move
.dst_slot
= dst_slot
;
523 tm
->move
.nr_items
= nr_items
;
524 tm
->op
= MOD_LOG_MOVE_KEYS
;
526 return __tree_mod_log_insert(fs_info
, tm
);
530 tree_mod_log_insert_root(struct btrfs_fs_info
*fs_info
,
531 struct extent_buffer
*old_root
,
532 struct extent_buffer
*new_root
, gfp_t flags
)
534 struct tree_mod_elem
*tm
;
537 ret
= tree_mod_alloc(fs_info
, flags
, &tm
);
541 tm
->index
= new_root
->start
>> PAGE_CACHE_SHIFT
;
542 tm
->old_root
.logical
= old_root
->start
;
543 tm
->old_root
.level
= btrfs_header_level(old_root
);
544 tm
->generation
= btrfs_header_generation(old_root
);
545 tm
->op
= MOD_LOG_ROOT_REPLACE
;
547 return __tree_mod_log_insert(fs_info
, tm
);
550 static struct tree_mod_elem
*
551 __tree_mod_log_search(struct btrfs_fs_info
*fs_info
, u64 start
, u64 min_seq
,
554 struct rb_root
*tm_root
;
555 struct rb_node
*node
;
556 struct tree_mod_elem
*cur
= NULL
;
557 struct tree_mod_elem
*found
= NULL
;
558 u64 index
= start
>> PAGE_CACHE_SHIFT
;
560 read_lock(&fs_info
->tree_mod_log_lock
);
561 tm_root
= &fs_info
->tree_mod_log
;
562 node
= tm_root
->rb_node
;
564 cur
= container_of(node
, struct tree_mod_elem
, node
);
565 if (cur
->index
< index
) {
566 node
= node
->rb_left
;
567 } else if (cur
->index
> index
) {
568 node
= node
->rb_right
;
569 } else if (cur
->elem
.seq
< min_seq
) {
570 node
= node
->rb_left
;
571 } else if (!smallest
) {
572 /* we want the node with the highest seq */
574 BUG_ON(found
->elem
.seq
> cur
->elem
.seq
);
576 node
= node
->rb_left
;
577 } else if (cur
->elem
.seq
> min_seq
) {
578 /* we want the node with the smallest seq */
580 BUG_ON(found
->elem
.seq
< cur
->elem
.seq
);
582 node
= node
->rb_right
;
588 read_unlock(&fs_info
->tree_mod_log_lock
);
594 * this returns the element from the log with the smallest time sequence
595 * value that's in the log (the oldest log item). any element with a time
596 * sequence lower than min_seq will be ignored.
598 static struct tree_mod_elem
*
599 tree_mod_log_search_oldest(struct btrfs_fs_info
*fs_info
, u64 start
,
602 return __tree_mod_log_search(fs_info
, start
, min_seq
, 1);
606 * this returns the element from the log with the largest time sequence
607 * value that's in the log (the most recent log item). any element with
608 * a time sequence lower than min_seq will be ignored.
610 static struct tree_mod_elem
*
611 tree_mod_log_search(struct btrfs_fs_info
*fs_info
, u64 start
, u64 min_seq
)
613 return __tree_mod_log_search(fs_info
, start
, min_seq
, 0);
617 tree_mod_log_eb_copy(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*dst
,
618 struct extent_buffer
*src
, unsigned long dst_offset
,
619 unsigned long src_offset
, int nr_items
)
625 if (list_empty(&fs_info
->tree_mod_seq_list
))
628 if (btrfs_header_level(dst
) == 0 && btrfs_header_level(src
) == 0)
631 /* speed this up by single seq for all operations? */
632 for (i
= 0; i
< nr_items
; i
++) {
633 ret
= tree_mod_log_insert_key(fs_info
, src
, i
+ src_offset
,
636 ret
= tree_mod_log_insert_key(fs_info
, dst
, i
+ dst_offset
,
643 tree_mod_log_eb_move(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*dst
,
644 int dst_offset
, int src_offset
, int nr_items
)
647 ret
= tree_mod_log_insert_move(fs_info
, dst
, dst_offset
, src_offset
,
653 tree_mod_log_set_node_key(struct btrfs_fs_info
*fs_info
,
654 struct extent_buffer
*eb
,
655 struct btrfs_disk_key
*disk_key
, int slot
, int atomic
)
659 ret
= tree_mod_log_insert_key_mask(fs_info
, eb
, slot
,
661 atomic
? GFP_ATOMIC
: GFP_NOFS
);
665 static void tree_mod_log_free_eb(struct btrfs_fs_info
*fs_info
,
666 struct extent_buffer
*eb
)
673 if (list_empty(&fs_info
->tree_mod_seq_list
))
676 if (btrfs_header_level(eb
) == 0)
679 nritems
= btrfs_header_nritems(eb
);
680 for (i
= nritems
- 1; i
>= 0; i
--) {
681 ret
= tree_mod_log_insert_key(fs_info
, eb
, i
,
682 MOD_LOG_KEY_REMOVE_WHILE_FREEING
);
688 tree_mod_log_set_root_pointer(struct btrfs_root
*root
,
689 struct extent_buffer
*new_root_node
)
692 tree_mod_log_free_eb(root
->fs_info
, root
->node
);
693 ret
= tree_mod_log_insert_root(root
->fs_info
, root
->node
,
694 new_root_node
, GFP_NOFS
);
699 * check if the tree block can be shared by multiple trees
701 int btrfs_block_can_be_shared(struct btrfs_root
*root
,
702 struct extent_buffer
*buf
)
705 * Tree blocks not in refernece counted trees and tree roots
706 * are never shared. If a block was allocated after the last
707 * snapshot and the block was not allocated by tree relocation,
708 * we know the block is not shared.
710 if (root
->ref_cows
&&
711 buf
!= root
->node
&& buf
!= root
->commit_root
&&
712 (btrfs_header_generation(buf
) <=
713 btrfs_root_last_snapshot(&root
->root_item
) ||
714 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)))
716 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
717 if (root
->ref_cows
&&
718 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
724 static noinline
int update_ref_for_cow(struct btrfs_trans_handle
*trans
,
725 struct btrfs_root
*root
,
726 struct extent_buffer
*buf
,
727 struct extent_buffer
*cow
,
737 * Backrefs update rules:
739 * Always use full backrefs for extent pointers in tree block
740 * allocated by tree relocation.
742 * If a shared tree block is no longer referenced by its owner
743 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
744 * use full backrefs for extent pointers in tree block.
746 * If a tree block is been relocating
747 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
748 * use full backrefs for extent pointers in tree block.
749 * The reason for this is some operations (such as drop tree)
750 * are only allowed for blocks use full backrefs.
753 if (btrfs_block_can_be_shared(root
, buf
)) {
754 ret
= btrfs_lookup_extent_info(trans
, root
, buf
->start
,
755 buf
->len
, &refs
, &flags
);
760 btrfs_std_error(root
->fs_info
, ret
);
765 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
766 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
767 flags
= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
772 owner
= btrfs_header_owner(buf
);
773 BUG_ON(owner
== BTRFS_TREE_RELOC_OBJECTID
&&
774 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
));
777 if ((owner
== root
->root_key
.objectid
||
778 root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) &&
779 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
)) {
780 ret
= btrfs_inc_ref(trans
, root
, buf
, 1, 1);
781 BUG_ON(ret
); /* -ENOMEM */
783 if (root
->root_key
.objectid
==
784 BTRFS_TREE_RELOC_OBJECTID
) {
785 ret
= btrfs_dec_ref(trans
, root
, buf
, 0, 1);
786 BUG_ON(ret
); /* -ENOMEM */
787 ret
= btrfs_inc_ref(trans
, root
, cow
, 1, 1);
788 BUG_ON(ret
); /* -ENOMEM */
790 new_flags
|= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
793 if (root
->root_key
.objectid
==
794 BTRFS_TREE_RELOC_OBJECTID
)
795 ret
= btrfs_inc_ref(trans
, root
, cow
, 1, 1);
797 ret
= btrfs_inc_ref(trans
, root
, cow
, 0, 1);
798 BUG_ON(ret
); /* -ENOMEM */
800 if (new_flags
!= 0) {
801 ret
= btrfs_set_disk_extent_flags(trans
, root
,
809 if (flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
) {
810 if (root
->root_key
.objectid
==
811 BTRFS_TREE_RELOC_OBJECTID
)
812 ret
= btrfs_inc_ref(trans
, root
, cow
, 1, 1);
814 ret
= btrfs_inc_ref(trans
, root
, cow
, 0, 1);
815 BUG_ON(ret
); /* -ENOMEM */
816 ret
= btrfs_dec_ref(trans
, root
, buf
, 1, 1);
817 BUG_ON(ret
); /* -ENOMEM */
819 clean_tree_block(trans
, root
, buf
);
826 * does the dirty work in cow of a single block. The parent block (if
827 * supplied) is updated to point to the new cow copy. The new buffer is marked
828 * dirty and returned locked. If you modify the block it needs to be marked
831 * search_start -- an allocation hint for the new block
833 * empty_size -- a hint that you plan on doing more cow. This is the size in
834 * bytes the allocator should try to find free next to the block it returns.
835 * This is just a hint and may be ignored by the allocator.
837 static noinline
int __btrfs_cow_block(struct btrfs_trans_handle
*trans
,
838 struct btrfs_root
*root
,
839 struct extent_buffer
*buf
,
840 struct extent_buffer
*parent
, int parent_slot
,
841 struct extent_buffer
**cow_ret
,
842 u64 search_start
, u64 empty_size
)
844 struct btrfs_disk_key disk_key
;
845 struct extent_buffer
*cow
;
854 btrfs_assert_tree_locked(buf
);
856 WARN_ON(root
->ref_cows
&& trans
->transid
!=
857 root
->fs_info
->running_transaction
->transid
);
858 WARN_ON(root
->ref_cows
&& trans
->transid
!= root
->last_trans
);
860 level
= btrfs_header_level(buf
);
863 btrfs_item_key(buf
, &disk_key
, 0);
865 btrfs_node_key(buf
, &disk_key
, 0);
867 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) {
869 parent_start
= parent
->start
;
875 cow
= btrfs_alloc_free_block(trans
, root
, buf
->len
, parent_start
,
876 root
->root_key
.objectid
, &disk_key
,
877 level
, search_start
, empty_size
);
881 /* cow is set to blocking by btrfs_init_new_buffer */
883 copy_extent_buffer(cow
, buf
, 0, 0, cow
->len
);
884 btrfs_set_header_bytenr(cow
, cow
->start
);
885 btrfs_set_header_generation(cow
, trans
->transid
);
886 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
887 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
888 BTRFS_HEADER_FLAG_RELOC
);
889 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
890 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
892 btrfs_set_header_owner(cow
, root
->root_key
.objectid
);
894 write_extent_buffer(cow
, root
->fs_info
->fsid
,
895 (unsigned long)btrfs_header_fsid(cow
),
898 ret
= update_ref_for_cow(trans
, root
, buf
, cow
, &last_ref
);
900 btrfs_abort_transaction(trans
, root
, ret
);
905 btrfs_reloc_cow_block(trans
, root
, buf
, cow
);
907 if (buf
== root
->node
) {
908 WARN_ON(parent
&& parent
!= buf
);
909 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
910 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
911 parent_start
= buf
->start
;
915 extent_buffer_get(cow
);
916 rcu_assign_pointer(root
->node
, cow
);
918 btrfs_free_tree_block(trans
, root
, buf
, parent_start
,
920 free_extent_buffer(buf
);
921 add_root_to_dirty_list(root
);
923 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
924 parent_start
= parent
->start
;
928 WARN_ON(trans
->transid
!= btrfs_header_generation(parent
));
929 btrfs_set_node_blockptr(parent
, parent_slot
,
931 btrfs_set_node_ptr_generation(parent
, parent_slot
,
933 btrfs_mark_buffer_dirty(parent
);
934 btrfs_free_tree_block(trans
, root
, buf
, parent_start
,
938 btrfs_tree_unlock(buf
);
939 free_extent_buffer_stale(buf
);
940 btrfs_mark_buffer_dirty(cow
);
945 static inline int should_cow_block(struct btrfs_trans_handle
*trans
,
946 struct btrfs_root
*root
,
947 struct extent_buffer
*buf
)
949 /* ensure we can see the force_cow */
953 * We do not need to cow a block if
954 * 1) this block is not created or changed in this transaction;
955 * 2) this block does not belong to TREE_RELOC tree;
956 * 3) the root is not forced COW.
958 * What is forced COW:
959 * when we create snapshot during commiting the transaction,
960 * after we've finished coping src root, we must COW the shared
961 * block to ensure the metadata consistency.
963 if (btrfs_header_generation(buf
) == trans
->transid
&&
964 !btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
) &&
965 !(root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
&&
966 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)) &&
973 * cows a single block, see __btrfs_cow_block for the real work.
974 * This version of it has extra checks so that a block isn't cow'd more than
975 * once per transaction, as long as it hasn't been written yet
977 noinline
int btrfs_cow_block(struct btrfs_trans_handle
*trans
,
978 struct btrfs_root
*root
, struct extent_buffer
*buf
,
979 struct extent_buffer
*parent
, int parent_slot
,
980 struct extent_buffer
**cow_ret
)
985 if (trans
->transaction
!= root
->fs_info
->running_transaction
) {
986 printk(KERN_CRIT
"trans %llu running %llu\n",
987 (unsigned long long)trans
->transid
,
989 root
->fs_info
->running_transaction
->transid
);
992 if (trans
->transid
!= root
->fs_info
->generation
) {
993 printk(KERN_CRIT
"trans %llu running %llu\n",
994 (unsigned long long)trans
->transid
,
995 (unsigned long long)root
->fs_info
->generation
);
999 if (!should_cow_block(trans
, root
, buf
)) {
1004 search_start
= buf
->start
& ~((u64
)(1024 * 1024 * 1024) - 1);
1007 btrfs_set_lock_blocking(parent
);
1008 btrfs_set_lock_blocking(buf
);
1010 ret
= __btrfs_cow_block(trans
, root
, buf
, parent
,
1011 parent_slot
, cow_ret
, search_start
, 0);
1013 trace_btrfs_cow_block(root
, buf
, *cow_ret
);
1019 * helper function for defrag to decide if two blocks pointed to by a
1020 * node are actually close by
1022 static int close_blocks(u64 blocknr
, u64 other
, u32 blocksize
)
1024 if (blocknr
< other
&& other
- (blocknr
+ blocksize
) < 32768)
1026 if (blocknr
> other
&& blocknr
- (other
+ blocksize
) < 32768)
1032 * compare two keys in a memcmp fashion
1034 static int comp_keys(struct btrfs_disk_key
*disk
, struct btrfs_key
*k2
)
1036 struct btrfs_key k1
;
1038 btrfs_disk_key_to_cpu(&k1
, disk
);
1040 return btrfs_comp_cpu_keys(&k1
, k2
);
1044 * same as comp_keys only with two btrfs_key's
1046 int btrfs_comp_cpu_keys(struct btrfs_key
*k1
, struct btrfs_key
*k2
)
1048 if (k1
->objectid
> k2
->objectid
)
1050 if (k1
->objectid
< k2
->objectid
)
1052 if (k1
->type
> k2
->type
)
1054 if (k1
->type
< k2
->type
)
1056 if (k1
->offset
> k2
->offset
)
1058 if (k1
->offset
< k2
->offset
)
1064 * this is used by the defrag code to go through all the
1065 * leaves pointed to by a node and reallocate them so that
1066 * disk order is close to key order
1068 int btrfs_realloc_node(struct btrfs_trans_handle
*trans
,
1069 struct btrfs_root
*root
, struct extent_buffer
*parent
,
1070 int start_slot
, int cache_only
, u64
*last_ret
,
1071 struct btrfs_key
*progress
)
1073 struct extent_buffer
*cur
;
1076 u64 search_start
= *last_ret
;
1086 int progress_passed
= 0;
1087 struct btrfs_disk_key disk_key
;
1089 parent_level
= btrfs_header_level(parent
);
1090 if (cache_only
&& parent_level
!= 1)
1093 if (trans
->transaction
!= root
->fs_info
->running_transaction
)
1095 if (trans
->transid
!= root
->fs_info
->generation
)
1098 parent_nritems
= btrfs_header_nritems(parent
);
1099 blocksize
= btrfs_level_size(root
, parent_level
- 1);
1100 end_slot
= parent_nritems
;
1102 if (parent_nritems
== 1)
1105 btrfs_set_lock_blocking(parent
);
1107 for (i
= start_slot
; i
< end_slot
; i
++) {
1110 btrfs_node_key(parent
, &disk_key
, i
);
1111 if (!progress_passed
&& comp_keys(&disk_key
, progress
) < 0)
1114 progress_passed
= 1;
1115 blocknr
= btrfs_node_blockptr(parent
, i
);
1116 gen
= btrfs_node_ptr_generation(parent
, i
);
1117 if (last_block
== 0)
1118 last_block
= blocknr
;
1121 other
= btrfs_node_blockptr(parent
, i
- 1);
1122 close
= close_blocks(blocknr
, other
, blocksize
);
1124 if (!close
&& i
< end_slot
- 2) {
1125 other
= btrfs_node_blockptr(parent
, i
+ 1);
1126 close
= close_blocks(blocknr
, other
, blocksize
);
1129 last_block
= blocknr
;
1133 cur
= btrfs_find_tree_block(root
, blocknr
, blocksize
);
1135 uptodate
= btrfs_buffer_uptodate(cur
, gen
, 0);
1138 if (!cur
|| !uptodate
) {
1140 free_extent_buffer(cur
);
1144 cur
= read_tree_block(root
, blocknr
,
1148 } else if (!uptodate
) {
1149 btrfs_read_buffer(cur
, gen
);
1152 if (search_start
== 0)
1153 search_start
= last_block
;
1155 btrfs_tree_lock(cur
);
1156 btrfs_set_lock_blocking(cur
);
1157 err
= __btrfs_cow_block(trans
, root
, cur
, parent
, i
,
1160 (end_slot
- i
) * blocksize
));
1162 btrfs_tree_unlock(cur
);
1163 free_extent_buffer(cur
);
1166 search_start
= cur
->start
;
1167 last_block
= cur
->start
;
1168 *last_ret
= search_start
;
1169 btrfs_tree_unlock(cur
);
1170 free_extent_buffer(cur
);
1176 * The leaf data grows from end-to-front in the node.
1177 * this returns the address of the start of the last item,
1178 * which is the stop of the leaf data stack
1180 static inline unsigned int leaf_data_end(struct btrfs_root
*root
,
1181 struct extent_buffer
*leaf
)
1183 u32 nr
= btrfs_header_nritems(leaf
);
1185 return BTRFS_LEAF_DATA_SIZE(root
);
1186 return btrfs_item_offset_nr(leaf
, nr
- 1);
1191 * search for key in the extent_buffer. The items start at offset p,
1192 * and they are item_size apart. There are 'max' items in p.
1194 * the slot in the array is returned via slot, and it points to
1195 * the place where you would insert key if it is not found in
1198 * slot may point to max if the key is bigger than all of the keys
1200 static noinline
int generic_bin_search(struct extent_buffer
*eb
,
1202 int item_size
, struct btrfs_key
*key
,
1209 struct btrfs_disk_key
*tmp
= NULL
;
1210 struct btrfs_disk_key unaligned
;
1211 unsigned long offset
;
1213 unsigned long map_start
= 0;
1214 unsigned long map_len
= 0;
1217 while (low
< high
) {
1218 mid
= (low
+ high
) / 2;
1219 offset
= p
+ mid
* item_size
;
1221 if (!kaddr
|| offset
< map_start
||
1222 (offset
+ sizeof(struct btrfs_disk_key
)) >
1223 map_start
+ map_len
) {
1225 err
= map_private_extent_buffer(eb
, offset
,
1226 sizeof(struct btrfs_disk_key
),
1227 &kaddr
, &map_start
, &map_len
);
1230 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
1233 read_extent_buffer(eb
, &unaligned
,
1234 offset
, sizeof(unaligned
));
1239 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
1242 ret
= comp_keys(tmp
, key
);
1258 * simple bin_search frontend that does the right thing for
1261 static int bin_search(struct extent_buffer
*eb
, struct btrfs_key
*key
,
1262 int level
, int *slot
)
1265 return generic_bin_search(eb
,
1266 offsetof(struct btrfs_leaf
, items
),
1267 sizeof(struct btrfs_item
),
1268 key
, btrfs_header_nritems(eb
),
1271 return generic_bin_search(eb
,
1272 offsetof(struct btrfs_node
, ptrs
),
1273 sizeof(struct btrfs_key_ptr
),
1274 key
, btrfs_header_nritems(eb
),
1280 int btrfs_bin_search(struct extent_buffer
*eb
, struct btrfs_key
*key
,
1281 int level
, int *slot
)
1283 return bin_search(eb
, key
, level
, slot
);
1286 static void root_add_used(struct btrfs_root
*root
, u32 size
)
1288 spin_lock(&root
->accounting_lock
);
1289 btrfs_set_root_used(&root
->root_item
,
1290 btrfs_root_used(&root
->root_item
) + size
);
1291 spin_unlock(&root
->accounting_lock
);
1294 static void root_sub_used(struct btrfs_root
*root
, u32 size
)
1296 spin_lock(&root
->accounting_lock
);
1297 btrfs_set_root_used(&root
->root_item
,
1298 btrfs_root_used(&root
->root_item
) - size
);
1299 spin_unlock(&root
->accounting_lock
);
1302 /* given a node and slot number, this reads the blocks it points to. The
1303 * extent buffer is returned with a reference taken (but unlocked).
1304 * NULL is returned on error.
1306 static noinline
struct extent_buffer
*read_node_slot(struct btrfs_root
*root
,
1307 struct extent_buffer
*parent
, int slot
)
1309 int level
= btrfs_header_level(parent
);
1312 if (slot
>= btrfs_header_nritems(parent
))
1317 return read_tree_block(root
, btrfs_node_blockptr(parent
, slot
),
1318 btrfs_level_size(root
, level
- 1),
1319 btrfs_node_ptr_generation(parent
, slot
));
1323 * node level balancing, used to make sure nodes are in proper order for
1324 * item deletion. We balance from the top down, so we have to make sure
1325 * that a deletion won't leave an node completely empty later on.
1327 static noinline
int balance_level(struct btrfs_trans_handle
*trans
,
1328 struct btrfs_root
*root
,
1329 struct btrfs_path
*path
, int level
)
1331 struct extent_buffer
*right
= NULL
;
1332 struct extent_buffer
*mid
;
1333 struct extent_buffer
*left
= NULL
;
1334 struct extent_buffer
*parent
= NULL
;
1338 int orig_slot
= path
->slots
[level
];
1344 mid
= path
->nodes
[level
];
1346 WARN_ON(path
->locks
[level
] != BTRFS_WRITE_LOCK
&&
1347 path
->locks
[level
] != BTRFS_WRITE_LOCK_BLOCKING
);
1348 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
1350 orig_ptr
= btrfs_node_blockptr(mid
, orig_slot
);
1352 if (level
< BTRFS_MAX_LEVEL
- 1) {
1353 parent
= path
->nodes
[level
+ 1];
1354 pslot
= path
->slots
[level
+ 1];
1358 * deal with the case where there is only one pointer in the root
1359 * by promoting the node below to a root
1362 struct extent_buffer
*child
;
1364 if (btrfs_header_nritems(mid
) != 1)
1367 /* promote the child to a root */
1368 child
= read_node_slot(root
, mid
, 0);
1371 btrfs_std_error(root
->fs_info
, ret
);
1375 btrfs_tree_lock(child
);
1376 btrfs_set_lock_blocking(child
);
1377 ret
= btrfs_cow_block(trans
, root
, child
, mid
, 0, &child
);
1379 btrfs_tree_unlock(child
);
1380 free_extent_buffer(child
);
1384 rcu_assign_pointer(root
->node
, child
);
1386 add_root_to_dirty_list(root
);
1387 btrfs_tree_unlock(child
);
1389 path
->locks
[level
] = 0;
1390 path
->nodes
[level
] = NULL
;
1391 clean_tree_block(trans
, root
, mid
);
1392 btrfs_tree_unlock(mid
);
1393 /* once for the path */
1394 free_extent_buffer(mid
);
1396 root_sub_used(root
, mid
->len
);
1397 btrfs_free_tree_block(trans
, root
, mid
, 0, 1);
1398 /* once for the root ptr */
1399 free_extent_buffer_stale(mid
);
1402 if (btrfs_header_nritems(mid
) >
1403 BTRFS_NODEPTRS_PER_BLOCK(root
) / 4)
1406 btrfs_header_nritems(mid
);
1408 left
= read_node_slot(root
, parent
, pslot
- 1);
1410 btrfs_tree_lock(left
);
1411 btrfs_set_lock_blocking(left
);
1412 wret
= btrfs_cow_block(trans
, root
, left
,
1413 parent
, pslot
- 1, &left
);
1419 right
= read_node_slot(root
, parent
, pslot
+ 1);
1421 btrfs_tree_lock(right
);
1422 btrfs_set_lock_blocking(right
);
1423 wret
= btrfs_cow_block(trans
, root
, right
,
1424 parent
, pslot
+ 1, &right
);
1431 /* first, try to make some room in the middle buffer */
1433 orig_slot
+= btrfs_header_nritems(left
);
1434 wret
= push_node_left(trans
, root
, left
, mid
, 1);
1437 btrfs_header_nritems(mid
);
1441 * then try to empty the right most buffer into the middle
1444 wret
= push_node_left(trans
, root
, mid
, right
, 1);
1445 if (wret
< 0 && wret
!= -ENOSPC
)
1447 if (btrfs_header_nritems(right
) == 0) {
1448 clean_tree_block(trans
, root
, right
);
1449 btrfs_tree_unlock(right
);
1450 del_ptr(trans
, root
, path
, level
+ 1, pslot
+ 1);
1451 root_sub_used(root
, right
->len
);
1452 btrfs_free_tree_block(trans
, root
, right
, 0, 1);
1453 free_extent_buffer_stale(right
);
1456 struct btrfs_disk_key right_key
;
1457 btrfs_node_key(right
, &right_key
, 0);
1458 btrfs_set_node_key(parent
, &right_key
, pslot
+ 1);
1459 btrfs_mark_buffer_dirty(parent
);
1462 if (btrfs_header_nritems(mid
) == 1) {
1464 * we're not allowed to leave a node with one item in the
1465 * tree during a delete. A deletion from lower in the tree
1466 * could try to delete the only pointer in this node.
1467 * So, pull some keys from the left.
1468 * There has to be a left pointer at this point because
1469 * otherwise we would have pulled some pointers from the
1474 btrfs_std_error(root
->fs_info
, ret
);
1477 wret
= balance_node_right(trans
, root
, mid
, left
);
1483 wret
= push_node_left(trans
, root
, left
, mid
, 1);
1489 if (btrfs_header_nritems(mid
) == 0) {
1490 clean_tree_block(trans
, root
, mid
);
1491 btrfs_tree_unlock(mid
);
1492 del_ptr(trans
, root
, path
, level
+ 1, pslot
);
1493 root_sub_used(root
, mid
->len
);
1494 btrfs_free_tree_block(trans
, root
, mid
, 0, 1);
1495 free_extent_buffer_stale(mid
);
1498 /* update the parent key to reflect our changes */
1499 struct btrfs_disk_key mid_key
;
1500 btrfs_node_key(mid
, &mid_key
, 0);
1501 btrfs_set_node_key(parent
, &mid_key
, pslot
);
1502 btrfs_mark_buffer_dirty(parent
);
1505 /* update the path */
1507 if (btrfs_header_nritems(left
) > orig_slot
) {
1508 extent_buffer_get(left
);
1509 /* left was locked after cow */
1510 path
->nodes
[level
] = left
;
1511 path
->slots
[level
+ 1] -= 1;
1512 path
->slots
[level
] = orig_slot
;
1514 btrfs_tree_unlock(mid
);
1515 free_extent_buffer(mid
);
1518 orig_slot
-= btrfs_header_nritems(left
);
1519 path
->slots
[level
] = orig_slot
;
1522 /* double check we haven't messed things up */
1524 btrfs_node_blockptr(path
->nodes
[level
], path
->slots
[level
]))
1528 btrfs_tree_unlock(right
);
1529 free_extent_buffer(right
);
1532 if (path
->nodes
[level
] != left
)
1533 btrfs_tree_unlock(left
);
1534 free_extent_buffer(left
);
1539 /* Node balancing for insertion. Here we only split or push nodes around
1540 * when they are completely full. This is also done top down, so we
1541 * have to be pessimistic.
1543 static noinline
int push_nodes_for_insert(struct btrfs_trans_handle
*trans
,
1544 struct btrfs_root
*root
,
1545 struct btrfs_path
*path
, int level
)
1547 struct extent_buffer
*right
= NULL
;
1548 struct extent_buffer
*mid
;
1549 struct extent_buffer
*left
= NULL
;
1550 struct extent_buffer
*parent
= NULL
;
1554 int orig_slot
= path
->slots
[level
];
1559 mid
= path
->nodes
[level
];
1560 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
1562 if (level
< BTRFS_MAX_LEVEL
- 1) {
1563 parent
= path
->nodes
[level
+ 1];
1564 pslot
= path
->slots
[level
+ 1];
1570 left
= read_node_slot(root
, parent
, pslot
- 1);
1572 /* first, try to make some room in the middle buffer */
1576 btrfs_tree_lock(left
);
1577 btrfs_set_lock_blocking(left
);
1579 left_nr
= btrfs_header_nritems(left
);
1580 if (left_nr
>= BTRFS_NODEPTRS_PER_BLOCK(root
) - 1) {
1583 ret
= btrfs_cow_block(trans
, root
, left
, parent
,
1588 wret
= push_node_left(trans
, root
,
1595 struct btrfs_disk_key disk_key
;
1596 orig_slot
+= left_nr
;
1597 btrfs_node_key(mid
, &disk_key
, 0);
1598 btrfs_set_node_key(parent
, &disk_key
, pslot
);
1599 btrfs_mark_buffer_dirty(parent
);
1600 if (btrfs_header_nritems(left
) > orig_slot
) {
1601 path
->nodes
[level
] = left
;
1602 path
->slots
[level
+ 1] -= 1;
1603 path
->slots
[level
] = orig_slot
;
1604 btrfs_tree_unlock(mid
);
1605 free_extent_buffer(mid
);
1608 btrfs_header_nritems(left
);
1609 path
->slots
[level
] = orig_slot
;
1610 btrfs_tree_unlock(left
);
1611 free_extent_buffer(left
);
1615 btrfs_tree_unlock(left
);
1616 free_extent_buffer(left
);
1618 right
= read_node_slot(root
, parent
, pslot
+ 1);
1621 * then try to empty the right most buffer into the middle
1626 btrfs_tree_lock(right
);
1627 btrfs_set_lock_blocking(right
);
1629 right_nr
= btrfs_header_nritems(right
);
1630 if (right_nr
>= BTRFS_NODEPTRS_PER_BLOCK(root
) - 1) {
1633 ret
= btrfs_cow_block(trans
, root
, right
,
1639 wret
= balance_node_right(trans
, root
,
1646 struct btrfs_disk_key disk_key
;
1648 btrfs_node_key(right
, &disk_key
, 0);
1649 btrfs_set_node_key(parent
, &disk_key
, pslot
+ 1);
1650 btrfs_mark_buffer_dirty(parent
);
1652 if (btrfs_header_nritems(mid
) <= orig_slot
) {
1653 path
->nodes
[level
] = right
;
1654 path
->slots
[level
+ 1] += 1;
1655 path
->slots
[level
] = orig_slot
-
1656 btrfs_header_nritems(mid
);
1657 btrfs_tree_unlock(mid
);
1658 free_extent_buffer(mid
);
1660 btrfs_tree_unlock(right
);
1661 free_extent_buffer(right
);
1665 btrfs_tree_unlock(right
);
1666 free_extent_buffer(right
);
1672 * readahead one full node of leaves, finding things that are close
1673 * to the block in 'slot', and triggering ra on them.
1675 static void reada_for_search(struct btrfs_root
*root
,
1676 struct btrfs_path
*path
,
1677 int level
, int slot
, u64 objectid
)
1679 struct extent_buffer
*node
;
1680 struct btrfs_disk_key disk_key
;
1686 int direction
= path
->reada
;
1687 struct extent_buffer
*eb
;
1695 if (!path
->nodes
[level
])
1698 node
= path
->nodes
[level
];
1700 search
= btrfs_node_blockptr(node
, slot
);
1701 blocksize
= btrfs_level_size(root
, level
- 1);
1702 eb
= btrfs_find_tree_block(root
, search
, blocksize
);
1704 free_extent_buffer(eb
);
1710 nritems
= btrfs_header_nritems(node
);
1714 if (direction
< 0) {
1718 } else if (direction
> 0) {
1723 if (path
->reada
< 0 && objectid
) {
1724 btrfs_node_key(node
, &disk_key
, nr
);
1725 if (btrfs_disk_key_objectid(&disk_key
) != objectid
)
1728 search
= btrfs_node_blockptr(node
, nr
);
1729 if ((search
<= target
&& target
- search
<= 65536) ||
1730 (search
> target
&& search
- target
<= 65536)) {
1731 gen
= btrfs_node_ptr_generation(node
, nr
);
1732 readahead_tree_block(root
, search
, blocksize
, gen
);
1736 if ((nread
> 65536 || nscan
> 32))
1742 * returns -EAGAIN if it had to drop the path, or zero if everything was in
1745 static noinline
int reada_for_balance(struct btrfs_root
*root
,
1746 struct btrfs_path
*path
, int level
)
1750 struct extent_buffer
*parent
;
1751 struct extent_buffer
*eb
;
1758 parent
= path
->nodes
[level
+ 1];
1762 nritems
= btrfs_header_nritems(parent
);
1763 slot
= path
->slots
[level
+ 1];
1764 blocksize
= btrfs_level_size(root
, level
);
1767 block1
= btrfs_node_blockptr(parent
, slot
- 1);
1768 gen
= btrfs_node_ptr_generation(parent
, slot
- 1);
1769 eb
= btrfs_find_tree_block(root
, block1
, blocksize
);
1771 * if we get -eagain from btrfs_buffer_uptodate, we
1772 * don't want to return eagain here. That will loop
1775 if (eb
&& btrfs_buffer_uptodate(eb
, gen
, 1) != 0)
1777 free_extent_buffer(eb
);
1779 if (slot
+ 1 < nritems
) {
1780 block2
= btrfs_node_blockptr(parent
, slot
+ 1);
1781 gen
= btrfs_node_ptr_generation(parent
, slot
+ 1);
1782 eb
= btrfs_find_tree_block(root
, block2
, blocksize
);
1783 if (eb
&& btrfs_buffer_uptodate(eb
, gen
, 1) != 0)
1785 free_extent_buffer(eb
);
1787 if (block1
|| block2
) {
1790 /* release the whole path */
1791 btrfs_release_path(path
);
1793 /* read the blocks */
1795 readahead_tree_block(root
, block1
, blocksize
, 0);
1797 readahead_tree_block(root
, block2
, blocksize
, 0);
1800 eb
= read_tree_block(root
, block1
, blocksize
, 0);
1801 free_extent_buffer(eb
);
1804 eb
= read_tree_block(root
, block2
, blocksize
, 0);
1805 free_extent_buffer(eb
);
1813 * when we walk down the tree, it is usually safe to unlock the higher layers
1814 * in the tree. The exceptions are when our path goes through slot 0, because
1815 * operations on the tree might require changing key pointers higher up in the
1818 * callers might also have set path->keep_locks, which tells this code to keep
1819 * the lock if the path points to the last slot in the block. This is part of
1820 * walking through the tree, and selecting the next slot in the higher block.
1822 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1823 * if lowest_unlock is 1, level 0 won't be unlocked
1825 static noinline
void unlock_up(struct btrfs_path
*path
, int level
,
1826 int lowest_unlock
, int min_write_lock_level
,
1827 int *write_lock_level
)
1830 int skip_level
= level
;
1832 struct extent_buffer
*t
;
1834 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
1835 if (!path
->nodes
[i
])
1837 if (!path
->locks
[i
])
1839 if (!no_skips
&& path
->slots
[i
] == 0) {
1843 if (!no_skips
&& path
->keep_locks
) {
1846 nritems
= btrfs_header_nritems(t
);
1847 if (nritems
< 1 || path
->slots
[i
] >= nritems
- 1) {
1852 if (skip_level
< i
&& i
>= lowest_unlock
)
1856 if (i
>= lowest_unlock
&& i
> skip_level
&& path
->locks
[i
]) {
1857 btrfs_tree_unlock_rw(t
, path
->locks
[i
]);
1859 if (write_lock_level
&&
1860 i
> min_write_lock_level
&&
1861 i
<= *write_lock_level
) {
1862 *write_lock_level
= i
- 1;
1869 * This releases any locks held in the path starting at level and
1870 * going all the way up to the root.
1872 * btrfs_search_slot will keep the lock held on higher nodes in a few
1873 * corner cases, such as COW of the block at slot zero in the node. This
1874 * ignores those rules, and it should only be called when there are no
1875 * more updates to be done higher up in the tree.
1877 noinline
void btrfs_unlock_up_safe(struct btrfs_path
*path
, int level
)
1881 if (path
->keep_locks
)
1884 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
1885 if (!path
->nodes
[i
])
1887 if (!path
->locks
[i
])
1889 btrfs_tree_unlock_rw(path
->nodes
[i
], path
->locks
[i
]);
1895 * helper function for btrfs_search_slot. The goal is to find a block
1896 * in cache without setting the path to blocking. If we find the block
1897 * we return zero and the path is unchanged.
1899 * If we can't find the block, we set the path blocking and do some
1900 * reada. -EAGAIN is returned and the search must be repeated.
1903 read_block_for_search(struct btrfs_trans_handle
*trans
,
1904 struct btrfs_root
*root
, struct btrfs_path
*p
,
1905 struct extent_buffer
**eb_ret
, int level
, int slot
,
1906 struct btrfs_key
*key
)
1911 struct extent_buffer
*b
= *eb_ret
;
1912 struct extent_buffer
*tmp
;
1915 blocknr
= btrfs_node_blockptr(b
, slot
);
1916 gen
= btrfs_node_ptr_generation(b
, slot
);
1917 blocksize
= btrfs_level_size(root
, level
- 1);
1919 tmp
= btrfs_find_tree_block(root
, blocknr
, blocksize
);
1921 /* first we do an atomic uptodate check */
1922 if (btrfs_buffer_uptodate(tmp
, 0, 1) > 0) {
1923 if (btrfs_buffer_uptodate(tmp
, gen
, 1) > 0) {
1925 * we found an up to date block without
1932 /* the pages were up to date, but we failed
1933 * the generation number check. Do a full
1934 * read for the generation number that is correct.
1935 * We must do this without dropping locks so
1936 * we can trust our generation number
1938 free_extent_buffer(tmp
);
1939 btrfs_set_path_blocking(p
);
1941 /* now we're allowed to do a blocking uptodate check */
1942 tmp
= read_tree_block(root
, blocknr
, blocksize
, gen
);
1943 if (tmp
&& btrfs_buffer_uptodate(tmp
, gen
, 0) > 0) {
1947 free_extent_buffer(tmp
);
1948 btrfs_release_path(p
);
1954 * reduce lock contention at high levels
1955 * of the btree by dropping locks before
1956 * we read. Don't release the lock on the current
1957 * level because we need to walk this node to figure
1958 * out which blocks to read.
1960 btrfs_unlock_up_safe(p
, level
+ 1);
1961 btrfs_set_path_blocking(p
);
1963 free_extent_buffer(tmp
);
1965 reada_for_search(root
, p
, level
, slot
, key
->objectid
);
1967 btrfs_release_path(p
);
1970 tmp
= read_tree_block(root
, blocknr
, blocksize
, 0);
1973 * If the read above didn't mark this buffer up to date,
1974 * it will never end up being up to date. Set ret to EIO now
1975 * and give up so that our caller doesn't loop forever
1978 if (!btrfs_buffer_uptodate(tmp
, 0, 0))
1980 free_extent_buffer(tmp
);
1986 * helper function for btrfs_search_slot. This does all of the checks
1987 * for node-level blocks and does any balancing required based on
1990 * If no extra work was required, zero is returned. If we had to
1991 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1995 setup_nodes_for_search(struct btrfs_trans_handle
*trans
,
1996 struct btrfs_root
*root
, struct btrfs_path
*p
,
1997 struct extent_buffer
*b
, int level
, int ins_len
,
1998 int *write_lock_level
)
2001 if ((p
->search_for_split
|| ins_len
> 0) && btrfs_header_nritems(b
) >=
2002 BTRFS_NODEPTRS_PER_BLOCK(root
) - 3) {
2005 if (*write_lock_level
< level
+ 1) {
2006 *write_lock_level
= level
+ 1;
2007 btrfs_release_path(p
);
2011 sret
= reada_for_balance(root
, p
, level
);
2015 btrfs_set_path_blocking(p
);
2016 sret
= split_node(trans
, root
, p
, level
);
2017 btrfs_clear_path_blocking(p
, NULL
, 0);
2024 b
= p
->nodes
[level
];
2025 } else if (ins_len
< 0 && btrfs_header_nritems(b
) <
2026 BTRFS_NODEPTRS_PER_BLOCK(root
) / 2) {
2029 if (*write_lock_level
< level
+ 1) {
2030 *write_lock_level
= level
+ 1;
2031 btrfs_release_path(p
);
2035 sret
= reada_for_balance(root
, p
, level
);
2039 btrfs_set_path_blocking(p
);
2040 sret
= balance_level(trans
, root
, p
, level
);
2041 btrfs_clear_path_blocking(p
, NULL
, 0);
2047 b
= p
->nodes
[level
];
2049 btrfs_release_path(p
);
2052 BUG_ON(btrfs_header_nritems(b
) == 1);
2063 * look for key in the tree. path is filled in with nodes along the way
2064 * if key is found, we return zero and you can find the item in the leaf
2065 * level of the path (level 0)
2067 * If the key isn't found, the path points to the slot where it should
2068 * be inserted, and 1 is returned. If there are other errors during the
2069 * search a negative error number is returned.
2071 * if ins_len > 0, nodes and leaves will be split as we walk down the
2072 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2075 int btrfs_search_slot(struct btrfs_trans_handle
*trans
, struct btrfs_root
2076 *root
, struct btrfs_key
*key
, struct btrfs_path
*p
, int
2079 struct extent_buffer
*b
;
2084 int lowest_unlock
= 1;
2086 /* everything at write_lock_level or lower must be write locked */
2087 int write_lock_level
= 0;
2088 u8 lowest_level
= 0;
2089 int min_write_lock_level
;
2091 lowest_level
= p
->lowest_level
;
2092 WARN_ON(lowest_level
&& ins_len
> 0);
2093 WARN_ON(p
->nodes
[0] != NULL
);
2098 /* when we are removing items, we might have to go up to level
2099 * two as we update tree pointers Make sure we keep write
2100 * for those levels as well
2102 write_lock_level
= 2;
2103 } else if (ins_len
> 0) {
2105 * for inserting items, make sure we have a write lock on
2106 * level 1 so we can update keys
2108 write_lock_level
= 1;
2112 write_lock_level
= -1;
2114 if (cow
&& (p
->keep_locks
|| p
->lowest_level
))
2115 write_lock_level
= BTRFS_MAX_LEVEL
;
2117 min_write_lock_level
= write_lock_level
;
2121 * we try very hard to do read locks on the root
2123 root_lock
= BTRFS_READ_LOCK
;
2125 if (p
->search_commit_root
) {
2127 * the commit roots are read only
2128 * so we always do read locks
2130 b
= root
->commit_root
;
2131 extent_buffer_get(b
);
2132 level
= btrfs_header_level(b
);
2133 if (!p
->skip_locking
)
2134 btrfs_tree_read_lock(b
);
2136 if (p
->skip_locking
) {
2137 b
= btrfs_root_node(root
);
2138 level
= btrfs_header_level(b
);
2140 /* we don't know the level of the root node
2141 * until we actually have it read locked
2143 b
= btrfs_read_lock_root_node(root
);
2144 level
= btrfs_header_level(b
);
2145 if (level
<= write_lock_level
) {
2146 /* whoops, must trade for write lock */
2147 btrfs_tree_read_unlock(b
);
2148 free_extent_buffer(b
);
2149 b
= btrfs_lock_root_node(root
);
2150 root_lock
= BTRFS_WRITE_LOCK
;
2152 /* the level might have changed, check again */
2153 level
= btrfs_header_level(b
);
2157 p
->nodes
[level
] = b
;
2158 if (!p
->skip_locking
)
2159 p
->locks
[level
] = root_lock
;
2162 level
= btrfs_header_level(b
);
2165 * setup the path here so we can release it under lock
2166 * contention with the cow code
2170 * if we don't really need to cow this block
2171 * then we don't want to set the path blocking,
2172 * so we test it here
2174 if (!should_cow_block(trans
, root
, b
))
2177 btrfs_set_path_blocking(p
);
2180 * must have write locks on this node and the
2183 if (level
+ 1 > write_lock_level
) {
2184 write_lock_level
= level
+ 1;
2185 btrfs_release_path(p
);
2189 err
= btrfs_cow_block(trans
, root
, b
,
2190 p
->nodes
[level
+ 1],
2191 p
->slots
[level
+ 1], &b
);
2198 BUG_ON(!cow
&& ins_len
);
2200 p
->nodes
[level
] = b
;
2201 btrfs_clear_path_blocking(p
, NULL
, 0);
2204 * we have a lock on b and as long as we aren't changing
2205 * the tree, there is no way to for the items in b to change.
2206 * It is safe to drop the lock on our parent before we
2207 * go through the expensive btree search on b.
2209 * If cow is true, then we might be changing slot zero,
2210 * which may require changing the parent. So, we can't
2211 * drop the lock until after we know which slot we're
2215 btrfs_unlock_up_safe(p
, level
+ 1);
2217 ret
= bin_search(b
, key
, level
, &slot
);
2221 if (ret
&& slot
> 0) {
2225 p
->slots
[level
] = slot
;
2226 err
= setup_nodes_for_search(trans
, root
, p
, b
, level
,
2227 ins_len
, &write_lock_level
);
2234 b
= p
->nodes
[level
];
2235 slot
= p
->slots
[level
];
2238 * slot 0 is special, if we change the key
2239 * we have to update the parent pointer
2240 * which means we must have a write lock
2243 if (slot
== 0 && cow
&&
2244 write_lock_level
< level
+ 1) {
2245 write_lock_level
= level
+ 1;
2246 btrfs_release_path(p
);
2250 unlock_up(p
, level
, lowest_unlock
,
2251 min_write_lock_level
, &write_lock_level
);
2253 if (level
== lowest_level
) {
2259 err
= read_block_for_search(trans
, root
, p
,
2260 &b
, level
, slot
, key
);
2268 if (!p
->skip_locking
) {
2269 level
= btrfs_header_level(b
);
2270 if (level
<= write_lock_level
) {
2271 err
= btrfs_try_tree_write_lock(b
);
2273 btrfs_set_path_blocking(p
);
2275 btrfs_clear_path_blocking(p
, b
,
2278 p
->locks
[level
] = BTRFS_WRITE_LOCK
;
2280 err
= btrfs_try_tree_read_lock(b
);
2282 btrfs_set_path_blocking(p
);
2283 btrfs_tree_read_lock(b
);
2284 btrfs_clear_path_blocking(p
, b
,
2287 p
->locks
[level
] = BTRFS_READ_LOCK
;
2289 p
->nodes
[level
] = b
;
2292 p
->slots
[level
] = slot
;
2294 btrfs_leaf_free_space(root
, b
) < ins_len
) {
2295 if (write_lock_level
< 1) {
2296 write_lock_level
= 1;
2297 btrfs_release_path(p
);
2301 btrfs_set_path_blocking(p
);
2302 err
= split_leaf(trans
, root
, key
,
2303 p
, ins_len
, ret
== 0);
2304 btrfs_clear_path_blocking(p
, NULL
, 0);
2312 if (!p
->search_for_split
)
2313 unlock_up(p
, level
, lowest_unlock
,
2314 min_write_lock_level
, &write_lock_level
);
2321 * we don't really know what they plan on doing with the path
2322 * from here on, so for now just mark it as blocking
2324 if (!p
->leave_spinning
)
2325 btrfs_set_path_blocking(p
);
2327 btrfs_release_path(p
);
2332 * adjust the pointers going up the tree, starting at level
2333 * making sure the right key of each node is points to 'key'.
2334 * This is used after shifting pointers to the left, so it stops
2335 * fixing up pointers when a given leaf/node is not in slot 0 of the
2339 static void fixup_low_keys(struct btrfs_trans_handle
*trans
,
2340 struct btrfs_root
*root
, struct btrfs_path
*path
,
2341 struct btrfs_disk_key
*key
, int level
)
2344 struct extent_buffer
*t
;
2346 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
2347 int tslot
= path
->slots
[i
];
2348 if (!path
->nodes
[i
])
2351 btrfs_set_node_key(t
, key
, tslot
);
2352 btrfs_mark_buffer_dirty(path
->nodes
[i
]);
2361 * This function isn't completely safe. It's the caller's responsibility
2362 * that the new key won't break the order
2364 void btrfs_set_item_key_safe(struct btrfs_trans_handle
*trans
,
2365 struct btrfs_root
*root
, struct btrfs_path
*path
,
2366 struct btrfs_key
*new_key
)
2368 struct btrfs_disk_key disk_key
;
2369 struct extent_buffer
*eb
;
2372 eb
= path
->nodes
[0];
2373 slot
= path
->slots
[0];
2375 btrfs_item_key(eb
, &disk_key
, slot
- 1);
2376 BUG_ON(comp_keys(&disk_key
, new_key
) >= 0);
2378 if (slot
< btrfs_header_nritems(eb
) - 1) {
2379 btrfs_item_key(eb
, &disk_key
, slot
+ 1);
2380 BUG_ON(comp_keys(&disk_key
, new_key
) <= 0);
2383 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
2384 btrfs_set_item_key(eb
, &disk_key
, slot
);
2385 btrfs_mark_buffer_dirty(eb
);
2387 fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
2391 * try to push data from one node into the next node left in the
2394 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2395 * error, and > 0 if there was no room in the left hand block.
2397 static int push_node_left(struct btrfs_trans_handle
*trans
,
2398 struct btrfs_root
*root
, struct extent_buffer
*dst
,
2399 struct extent_buffer
*src
, int empty
)
2406 src_nritems
= btrfs_header_nritems(src
);
2407 dst_nritems
= btrfs_header_nritems(dst
);
2408 push_items
= BTRFS_NODEPTRS_PER_BLOCK(root
) - dst_nritems
;
2409 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
2410 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
2412 if (!empty
&& src_nritems
<= 8)
2415 if (push_items
<= 0)
2419 push_items
= min(src_nritems
, push_items
);
2420 if (push_items
< src_nritems
) {
2421 /* leave at least 8 pointers in the node if
2422 * we aren't going to empty it
2424 if (src_nritems
- push_items
< 8) {
2425 if (push_items
<= 8)
2431 push_items
= min(src_nritems
- 8, push_items
);
2433 copy_extent_buffer(dst
, src
,
2434 btrfs_node_key_ptr_offset(dst_nritems
),
2435 btrfs_node_key_ptr_offset(0),
2436 push_items
* sizeof(struct btrfs_key_ptr
));
2438 if (push_items
< src_nritems
) {
2439 memmove_extent_buffer(src
, btrfs_node_key_ptr_offset(0),
2440 btrfs_node_key_ptr_offset(push_items
),
2441 (src_nritems
- push_items
) *
2442 sizeof(struct btrfs_key_ptr
));
2444 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
2445 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
2446 btrfs_mark_buffer_dirty(src
);
2447 btrfs_mark_buffer_dirty(dst
);
2453 * try to push data from one node into the next node right in the
2456 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2457 * error, and > 0 if there was no room in the right hand block.
2459 * this will only push up to 1/2 the contents of the left node over
2461 static int balance_node_right(struct btrfs_trans_handle
*trans
,
2462 struct btrfs_root
*root
,
2463 struct extent_buffer
*dst
,
2464 struct extent_buffer
*src
)
2472 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
2473 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
2475 src_nritems
= btrfs_header_nritems(src
);
2476 dst_nritems
= btrfs_header_nritems(dst
);
2477 push_items
= BTRFS_NODEPTRS_PER_BLOCK(root
) - dst_nritems
;
2478 if (push_items
<= 0)
2481 if (src_nritems
< 4)
2484 max_push
= src_nritems
/ 2 + 1;
2485 /* don't try to empty the node */
2486 if (max_push
>= src_nritems
)
2489 if (max_push
< push_items
)
2490 push_items
= max_push
;
2492 memmove_extent_buffer(dst
, btrfs_node_key_ptr_offset(push_items
),
2493 btrfs_node_key_ptr_offset(0),
2495 sizeof(struct btrfs_key_ptr
));
2497 copy_extent_buffer(dst
, src
,
2498 btrfs_node_key_ptr_offset(0),
2499 btrfs_node_key_ptr_offset(src_nritems
- push_items
),
2500 push_items
* sizeof(struct btrfs_key_ptr
));
2502 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
2503 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
2505 btrfs_mark_buffer_dirty(src
);
2506 btrfs_mark_buffer_dirty(dst
);
2512 * helper function to insert a new root level in the tree.
2513 * A new node is allocated, and a single item is inserted to
2514 * point to the existing root
2516 * returns zero on success or < 0 on failure.
2518 static noinline
int insert_new_root(struct btrfs_trans_handle
*trans
,
2519 struct btrfs_root
*root
,
2520 struct btrfs_path
*path
, int level
)
2523 struct extent_buffer
*lower
;
2524 struct extent_buffer
*c
;
2525 struct extent_buffer
*old
;
2526 struct btrfs_disk_key lower_key
;
2528 BUG_ON(path
->nodes
[level
]);
2529 BUG_ON(path
->nodes
[level
-1] != root
->node
);
2531 lower
= path
->nodes
[level
-1];
2533 btrfs_item_key(lower
, &lower_key
, 0);
2535 btrfs_node_key(lower
, &lower_key
, 0);
2537 c
= btrfs_alloc_free_block(trans
, root
, root
->nodesize
, 0,
2538 root
->root_key
.objectid
, &lower_key
,
2539 level
, root
->node
->start
, 0);
2543 root_add_used(root
, root
->nodesize
);
2545 memset_extent_buffer(c
, 0, 0, sizeof(struct btrfs_header
));
2546 btrfs_set_header_nritems(c
, 1);
2547 btrfs_set_header_level(c
, level
);
2548 btrfs_set_header_bytenr(c
, c
->start
);
2549 btrfs_set_header_generation(c
, trans
->transid
);
2550 btrfs_set_header_backref_rev(c
, BTRFS_MIXED_BACKREF_REV
);
2551 btrfs_set_header_owner(c
, root
->root_key
.objectid
);
2553 write_extent_buffer(c
, root
->fs_info
->fsid
,
2554 (unsigned long)btrfs_header_fsid(c
),
2557 write_extent_buffer(c
, root
->fs_info
->chunk_tree_uuid
,
2558 (unsigned long)btrfs_header_chunk_tree_uuid(c
),
2561 btrfs_set_node_key(c
, &lower_key
, 0);
2562 btrfs_set_node_blockptr(c
, 0, lower
->start
);
2563 lower_gen
= btrfs_header_generation(lower
);
2564 WARN_ON(lower_gen
!= trans
->transid
);
2566 btrfs_set_node_ptr_generation(c
, 0, lower_gen
);
2568 btrfs_mark_buffer_dirty(c
);
2571 rcu_assign_pointer(root
->node
, c
);
2573 /* the super has an extra ref to root->node */
2574 free_extent_buffer(old
);
2576 add_root_to_dirty_list(root
);
2577 extent_buffer_get(c
);
2578 path
->nodes
[level
] = c
;
2579 path
->locks
[level
] = BTRFS_WRITE_LOCK
;
2580 path
->slots
[level
] = 0;
2585 * worker function to insert a single pointer in a node.
2586 * the node should have enough room for the pointer already
2588 * slot and level indicate where you want the key to go, and
2589 * blocknr is the block the key points to.
2591 static void insert_ptr(struct btrfs_trans_handle
*trans
,
2592 struct btrfs_root
*root
, struct btrfs_path
*path
,
2593 struct btrfs_disk_key
*key
, u64 bytenr
,
2594 int slot
, int level
)
2596 struct extent_buffer
*lower
;
2599 BUG_ON(!path
->nodes
[level
]);
2600 btrfs_assert_tree_locked(path
->nodes
[level
]);
2601 lower
= path
->nodes
[level
];
2602 nritems
= btrfs_header_nritems(lower
);
2603 BUG_ON(slot
> nritems
);
2604 BUG_ON(nritems
== BTRFS_NODEPTRS_PER_BLOCK(root
));
2605 if (slot
!= nritems
) {
2606 memmove_extent_buffer(lower
,
2607 btrfs_node_key_ptr_offset(slot
+ 1),
2608 btrfs_node_key_ptr_offset(slot
),
2609 (nritems
- slot
) * sizeof(struct btrfs_key_ptr
));
2611 btrfs_set_node_key(lower
, key
, slot
);
2612 btrfs_set_node_blockptr(lower
, slot
, bytenr
);
2613 WARN_ON(trans
->transid
== 0);
2614 btrfs_set_node_ptr_generation(lower
, slot
, trans
->transid
);
2615 btrfs_set_header_nritems(lower
, nritems
+ 1);
2616 btrfs_mark_buffer_dirty(lower
);
2620 * split the node at the specified level in path in two.
2621 * The path is corrected to point to the appropriate node after the split
2623 * Before splitting this tries to make some room in the node by pushing
2624 * left and right, if either one works, it returns right away.
2626 * returns 0 on success and < 0 on failure
2628 static noinline
int split_node(struct btrfs_trans_handle
*trans
,
2629 struct btrfs_root
*root
,
2630 struct btrfs_path
*path
, int level
)
2632 struct extent_buffer
*c
;
2633 struct extent_buffer
*split
;
2634 struct btrfs_disk_key disk_key
;
2639 c
= path
->nodes
[level
];
2640 WARN_ON(btrfs_header_generation(c
) != trans
->transid
);
2641 if (c
== root
->node
) {
2642 /* trying to split the root, lets make a new one */
2643 ret
= insert_new_root(trans
, root
, path
, level
+ 1);
2647 ret
= push_nodes_for_insert(trans
, root
, path
, level
);
2648 c
= path
->nodes
[level
];
2649 if (!ret
&& btrfs_header_nritems(c
) <
2650 BTRFS_NODEPTRS_PER_BLOCK(root
) - 3)
2656 c_nritems
= btrfs_header_nritems(c
);
2657 mid
= (c_nritems
+ 1) / 2;
2658 btrfs_node_key(c
, &disk_key
, mid
);
2660 split
= btrfs_alloc_free_block(trans
, root
, root
->nodesize
, 0,
2661 root
->root_key
.objectid
,
2662 &disk_key
, level
, c
->start
, 0);
2664 return PTR_ERR(split
);
2666 root_add_used(root
, root
->nodesize
);
2668 memset_extent_buffer(split
, 0, 0, sizeof(struct btrfs_header
));
2669 btrfs_set_header_level(split
, btrfs_header_level(c
));
2670 btrfs_set_header_bytenr(split
, split
->start
);
2671 btrfs_set_header_generation(split
, trans
->transid
);
2672 btrfs_set_header_backref_rev(split
, BTRFS_MIXED_BACKREF_REV
);
2673 btrfs_set_header_owner(split
, root
->root_key
.objectid
);
2674 write_extent_buffer(split
, root
->fs_info
->fsid
,
2675 (unsigned long)btrfs_header_fsid(split
),
2677 write_extent_buffer(split
, root
->fs_info
->chunk_tree_uuid
,
2678 (unsigned long)btrfs_header_chunk_tree_uuid(split
),
2681 copy_extent_buffer(split
, c
,
2682 btrfs_node_key_ptr_offset(0),
2683 btrfs_node_key_ptr_offset(mid
),
2684 (c_nritems
- mid
) * sizeof(struct btrfs_key_ptr
));
2685 btrfs_set_header_nritems(split
, c_nritems
- mid
);
2686 btrfs_set_header_nritems(c
, mid
);
2689 btrfs_mark_buffer_dirty(c
);
2690 btrfs_mark_buffer_dirty(split
);
2692 insert_ptr(trans
, root
, path
, &disk_key
, split
->start
,
2693 path
->slots
[level
+ 1] + 1, level
+ 1);
2695 if (path
->slots
[level
] >= mid
) {
2696 path
->slots
[level
] -= mid
;
2697 btrfs_tree_unlock(c
);
2698 free_extent_buffer(c
);
2699 path
->nodes
[level
] = split
;
2700 path
->slots
[level
+ 1] += 1;
2702 btrfs_tree_unlock(split
);
2703 free_extent_buffer(split
);
2709 * how many bytes are required to store the items in a leaf. start
2710 * and nr indicate which items in the leaf to check. This totals up the
2711 * space used both by the item structs and the item data
2713 static int leaf_space_used(struct extent_buffer
*l
, int start
, int nr
)
2716 int nritems
= btrfs_header_nritems(l
);
2717 int end
= min(nritems
, start
+ nr
) - 1;
2721 data_len
= btrfs_item_end_nr(l
, start
);
2722 data_len
= data_len
- btrfs_item_offset_nr(l
, end
);
2723 data_len
+= sizeof(struct btrfs_item
) * nr
;
2724 WARN_ON(data_len
< 0);
2729 * The space between the end of the leaf items and
2730 * the start of the leaf data. IOW, how much room
2731 * the leaf has left for both items and data
2733 noinline
int btrfs_leaf_free_space(struct btrfs_root
*root
,
2734 struct extent_buffer
*leaf
)
2736 int nritems
= btrfs_header_nritems(leaf
);
2738 ret
= BTRFS_LEAF_DATA_SIZE(root
) - leaf_space_used(leaf
, 0, nritems
);
2740 printk(KERN_CRIT
"leaf free space ret %d, leaf data size %lu, "
2741 "used %d nritems %d\n",
2742 ret
, (unsigned long) BTRFS_LEAF_DATA_SIZE(root
),
2743 leaf_space_used(leaf
, 0, nritems
), nritems
);
2749 * min slot controls the lowest index we're willing to push to the
2750 * right. We'll push up to and including min_slot, but no lower
2752 static noinline
int __push_leaf_right(struct btrfs_trans_handle
*trans
,
2753 struct btrfs_root
*root
,
2754 struct btrfs_path
*path
,
2755 int data_size
, int empty
,
2756 struct extent_buffer
*right
,
2757 int free_space
, u32 left_nritems
,
2760 struct extent_buffer
*left
= path
->nodes
[0];
2761 struct extent_buffer
*upper
= path
->nodes
[1];
2762 struct btrfs_map_token token
;
2763 struct btrfs_disk_key disk_key
;
2768 struct btrfs_item
*item
;
2774 btrfs_init_map_token(&token
);
2779 nr
= max_t(u32
, 1, min_slot
);
2781 if (path
->slots
[0] >= left_nritems
)
2782 push_space
+= data_size
;
2784 slot
= path
->slots
[1];
2785 i
= left_nritems
- 1;
2787 item
= btrfs_item_nr(left
, i
);
2789 if (!empty
&& push_items
> 0) {
2790 if (path
->slots
[0] > i
)
2792 if (path
->slots
[0] == i
) {
2793 int space
= btrfs_leaf_free_space(root
, left
);
2794 if (space
+ push_space
* 2 > free_space
)
2799 if (path
->slots
[0] == i
)
2800 push_space
+= data_size
;
2802 this_item_size
= btrfs_item_size(left
, item
);
2803 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
2807 push_space
+= this_item_size
+ sizeof(*item
);
2813 if (push_items
== 0)
2816 if (!empty
&& push_items
== left_nritems
)
2819 /* push left to right */
2820 right_nritems
= btrfs_header_nritems(right
);
2822 push_space
= btrfs_item_end_nr(left
, left_nritems
- push_items
);
2823 push_space
-= leaf_data_end(root
, left
);
2825 /* make room in the right data area */
2826 data_end
= leaf_data_end(root
, right
);
2827 memmove_extent_buffer(right
,
2828 btrfs_leaf_data(right
) + data_end
- push_space
,
2829 btrfs_leaf_data(right
) + data_end
,
2830 BTRFS_LEAF_DATA_SIZE(root
) - data_end
);
2832 /* copy from the left data area */
2833 copy_extent_buffer(right
, left
, btrfs_leaf_data(right
) +
2834 BTRFS_LEAF_DATA_SIZE(root
) - push_space
,
2835 btrfs_leaf_data(left
) + leaf_data_end(root
, left
),
2838 memmove_extent_buffer(right
, btrfs_item_nr_offset(push_items
),
2839 btrfs_item_nr_offset(0),
2840 right_nritems
* sizeof(struct btrfs_item
));
2842 /* copy the items from left to right */
2843 copy_extent_buffer(right
, left
, btrfs_item_nr_offset(0),
2844 btrfs_item_nr_offset(left_nritems
- push_items
),
2845 push_items
* sizeof(struct btrfs_item
));
2847 /* update the item pointers */
2848 right_nritems
+= push_items
;
2849 btrfs_set_header_nritems(right
, right_nritems
);
2850 push_space
= BTRFS_LEAF_DATA_SIZE(root
);
2851 for (i
= 0; i
< right_nritems
; i
++) {
2852 item
= btrfs_item_nr(right
, i
);
2853 push_space
-= btrfs_token_item_size(right
, item
, &token
);
2854 btrfs_set_token_item_offset(right
, item
, push_space
, &token
);
2857 left_nritems
-= push_items
;
2858 btrfs_set_header_nritems(left
, left_nritems
);
2861 btrfs_mark_buffer_dirty(left
);
2863 clean_tree_block(trans
, root
, left
);
2865 btrfs_mark_buffer_dirty(right
);
2867 btrfs_item_key(right
, &disk_key
, 0);
2868 btrfs_set_node_key(upper
, &disk_key
, slot
+ 1);
2869 btrfs_mark_buffer_dirty(upper
);
2871 /* then fixup the leaf pointer in the path */
2872 if (path
->slots
[0] >= left_nritems
) {
2873 path
->slots
[0] -= left_nritems
;
2874 if (btrfs_header_nritems(path
->nodes
[0]) == 0)
2875 clean_tree_block(trans
, root
, path
->nodes
[0]);
2876 btrfs_tree_unlock(path
->nodes
[0]);
2877 free_extent_buffer(path
->nodes
[0]);
2878 path
->nodes
[0] = right
;
2879 path
->slots
[1] += 1;
2881 btrfs_tree_unlock(right
);
2882 free_extent_buffer(right
);
2887 btrfs_tree_unlock(right
);
2888 free_extent_buffer(right
);
2893 * push some data in the path leaf to the right, trying to free up at
2894 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2896 * returns 1 if the push failed because the other node didn't have enough
2897 * room, 0 if everything worked out and < 0 if there were major errors.
2899 * this will push starting from min_slot to the end of the leaf. It won't
2900 * push any slot lower than min_slot
2902 static int push_leaf_right(struct btrfs_trans_handle
*trans
, struct btrfs_root
2903 *root
, struct btrfs_path
*path
,
2904 int min_data_size
, int data_size
,
2905 int empty
, u32 min_slot
)
2907 struct extent_buffer
*left
= path
->nodes
[0];
2908 struct extent_buffer
*right
;
2909 struct extent_buffer
*upper
;
2915 if (!path
->nodes
[1])
2918 slot
= path
->slots
[1];
2919 upper
= path
->nodes
[1];
2920 if (slot
>= btrfs_header_nritems(upper
) - 1)
2923 btrfs_assert_tree_locked(path
->nodes
[1]);
2925 right
= read_node_slot(root
, upper
, slot
+ 1);
2929 btrfs_tree_lock(right
);
2930 btrfs_set_lock_blocking(right
);
2932 free_space
= btrfs_leaf_free_space(root
, right
);
2933 if (free_space
< data_size
)
2936 /* cow and double check */
2937 ret
= btrfs_cow_block(trans
, root
, right
, upper
,
2942 free_space
= btrfs_leaf_free_space(root
, right
);
2943 if (free_space
< data_size
)
2946 left_nritems
= btrfs_header_nritems(left
);
2947 if (left_nritems
== 0)
2950 return __push_leaf_right(trans
, root
, path
, min_data_size
, empty
,
2951 right
, free_space
, left_nritems
, min_slot
);
2953 btrfs_tree_unlock(right
);
2954 free_extent_buffer(right
);
2959 * push some data in the path leaf to the left, trying to free up at
2960 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2962 * max_slot can put a limit on how far into the leaf we'll push items. The
2963 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
2966 static noinline
int __push_leaf_left(struct btrfs_trans_handle
*trans
,
2967 struct btrfs_root
*root
,
2968 struct btrfs_path
*path
, int data_size
,
2969 int empty
, struct extent_buffer
*left
,
2970 int free_space
, u32 right_nritems
,
2973 struct btrfs_disk_key disk_key
;
2974 struct extent_buffer
*right
= path
->nodes
[0];
2978 struct btrfs_item
*item
;
2979 u32 old_left_nritems
;
2983 u32 old_left_item_size
;
2984 struct btrfs_map_token token
;
2986 btrfs_init_map_token(&token
);
2989 nr
= min(right_nritems
, max_slot
);
2991 nr
= min(right_nritems
- 1, max_slot
);
2993 for (i
= 0; i
< nr
; i
++) {
2994 item
= btrfs_item_nr(right
, i
);
2996 if (!empty
&& push_items
> 0) {
2997 if (path
->slots
[0] < i
)
2999 if (path
->slots
[0] == i
) {
3000 int space
= btrfs_leaf_free_space(root
, right
);
3001 if (space
+ push_space
* 2 > free_space
)
3006 if (path
->slots
[0] == i
)
3007 push_space
+= data_size
;
3009 this_item_size
= btrfs_item_size(right
, item
);
3010 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
3014 push_space
+= this_item_size
+ sizeof(*item
);
3017 if (push_items
== 0) {
3021 if (!empty
&& push_items
== btrfs_header_nritems(right
))
3024 /* push data from right to left */
3025 copy_extent_buffer(left
, right
,
3026 btrfs_item_nr_offset(btrfs_header_nritems(left
)),
3027 btrfs_item_nr_offset(0),
3028 push_items
* sizeof(struct btrfs_item
));
3030 push_space
= BTRFS_LEAF_DATA_SIZE(root
) -
3031 btrfs_item_offset_nr(right
, push_items
- 1);
3033 copy_extent_buffer(left
, right
, btrfs_leaf_data(left
) +
3034 leaf_data_end(root
, left
) - push_space
,
3035 btrfs_leaf_data(right
) +
3036 btrfs_item_offset_nr(right
, push_items
- 1),
3038 old_left_nritems
= btrfs_header_nritems(left
);
3039 BUG_ON(old_left_nritems
<= 0);
3041 old_left_item_size
= btrfs_item_offset_nr(left
, old_left_nritems
- 1);
3042 for (i
= old_left_nritems
; i
< old_left_nritems
+ push_items
; i
++) {
3045 item
= btrfs_item_nr(left
, i
);
3047 ioff
= btrfs_token_item_offset(left
, item
, &token
);
3048 btrfs_set_token_item_offset(left
, item
,
3049 ioff
- (BTRFS_LEAF_DATA_SIZE(root
) - old_left_item_size
),
3052 btrfs_set_header_nritems(left
, old_left_nritems
+ push_items
);
3054 /* fixup right node */
3055 if (push_items
> right_nritems
) {
3056 printk(KERN_CRIT
"push items %d nr %u\n", push_items
,
3061 if (push_items
< right_nritems
) {
3062 push_space
= btrfs_item_offset_nr(right
, push_items
- 1) -
3063 leaf_data_end(root
, right
);
3064 memmove_extent_buffer(right
, btrfs_leaf_data(right
) +
3065 BTRFS_LEAF_DATA_SIZE(root
) - push_space
,
3066 btrfs_leaf_data(right
) +
3067 leaf_data_end(root
, right
), push_space
);
3069 memmove_extent_buffer(right
, btrfs_item_nr_offset(0),
3070 btrfs_item_nr_offset(push_items
),
3071 (btrfs_header_nritems(right
) - push_items
) *
3072 sizeof(struct btrfs_item
));
3074 right_nritems
-= push_items
;
3075 btrfs_set_header_nritems(right
, right_nritems
);
3076 push_space
= BTRFS_LEAF_DATA_SIZE(root
);
3077 for (i
= 0; i
< right_nritems
; i
++) {
3078 item
= btrfs_item_nr(right
, i
);
3080 push_space
= push_space
- btrfs_token_item_size(right
,
3082 btrfs_set_token_item_offset(right
, item
, push_space
, &token
);
3085 btrfs_mark_buffer_dirty(left
);
3087 btrfs_mark_buffer_dirty(right
);
3089 clean_tree_block(trans
, root
, right
);
3091 btrfs_item_key(right
, &disk_key
, 0);
3092 fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
3094 /* then fixup the leaf pointer in the path */
3095 if (path
->slots
[0] < push_items
) {
3096 path
->slots
[0] += old_left_nritems
;
3097 btrfs_tree_unlock(path
->nodes
[0]);
3098 free_extent_buffer(path
->nodes
[0]);
3099 path
->nodes
[0] = left
;
3100 path
->slots
[1] -= 1;
3102 btrfs_tree_unlock(left
);
3103 free_extent_buffer(left
);
3104 path
->slots
[0] -= push_items
;
3106 BUG_ON(path
->slots
[0] < 0);
3109 btrfs_tree_unlock(left
);
3110 free_extent_buffer(left
);
3115 * push some data in the path leaf to the left, trying to free up at
3116 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3118 * max_slot can put a limit on how far into the leaf we'll push items. The
3119 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3122 static int push_leaf_left(struct btrfs_trans_handle
*trans
, struct btrfs_root
3123 *root
, struct btrfs_path
*path
, int min_data_size
,
3124 int data_size
, int empty
, u32 max_slot
)
3126 struct extent_buffer
*right
= path
->nodes
[0];
3127 struct extent_buffer
*left
;
3133 slot
= path
->slots
[1];
3136 if (!path
->nodes
[1])
3139 right_nritems
= btrfs_header_nritems(right
);
3140 if (right_nritems
== 0)
3143 btrfs_assert_tree_locked(path
->nodes
[1]);
3145 left
= read_node_slot(root
, path
->nodes
[1], slot
- 1);
3149 btrfs_tree_lock(left
);
3150 btrfs_set_lock_blocking(left
);
3152 free_space
= btrfs_leaf_free_space(root
, left
);
3153 if (free_space
< data_size
) {
3158 /* cow and double check */
3159 ret
= btrfs_cow_block(trans
, root
, left
,
3160 path
->nodes
[1], slot
- 1, &left
);
3162 /* we hit -ENOSPC, but it isn't fatal here */
3168 free_space
= btrfs_leaf_free_space(root
, left
);
3169 if (free_space
< data_size
) {
3174 return __push_leaf_left(trans
, root
, path
, min_data_size
,
3175 empty
, left
, free_space
, right_nritems
,
3178 btrfs_tree_unlock(left
);
3179 free_extent_buffer(left
);
3184 * split the path's leaf in two, making sure there is at least data_size
3185 * available for the resulting leaf level of the path.
3187 static noinline
void copy_for_split(struct btrfs_trans_handle
*trans
,
3188 struct btrfs_root
*root
,
3189 struct btrfs_path
*path
,
3190 struct extent_buffer
*l
,
3191 struct extent_buffer
*right
,
3192 int slot
, int mid
, int nritems
)
3197 struct btrfs_disk_key disk_key
;
3198 struct btrfs_map_token token
;
3200 btrfs_init_map_token(&token
);
3202 nritems
= nritems
- mid
;
3203 btrfs_set_header_nritems(right
, nritems
);
3204 data_copy_size
= btrfs_item_end_nr(l
, mid
) - leaf_data_end(root
, l
);
3206 copy_extent_buffer(right
, l
, btrfs_item_nr_offset(0),
3207 btrfs_item_nr_offset(mid
),
3208 nritems
* sizeof(struct btrfs_item
));
3210 copy_extent_buffer(right
, l
,
3211 btrfs_leaf_data(right
) + BTRFS_LEAF_DATA_SIZE(root
) -
3212 data_copy_size
, btrfs_leaf_data(l
) +
3213 leaf_data_end(root
, l
), data_copy_size
);
3215 rt_data_off
= BTRFS_LEAF_DATA_SIZE(root
) -
3216 btrfs_item_end_nr(l
, mid
);
3218 for (i
= 0; i
< nritems
; i
++) {
3219 struct btrfs_item
*item
= btrfs_item_nr(right
, i
);
3222 ioff
= btrfs_token_item_offset(right
, item
, &token
);
3223 btrfs_set_token_item_offset(right
, item
,
3224 ioff
+ rt_data_off
, &token
);
3227 btrfs_set_header_nritems(l
, mid
);
3228 btrfs_item_key(right
, &disk_key
, 0);
3229 insert_ptr(trans
, root
, path
, &disk_key
, right
->start
,
3230 path
->slots
[1] + 1, 1);
3232 btrfs_mark_buffer_dirty(right
);
3233 btrfs_mark_buffer_dirty(l
);
3234 BUG_ON(path
->slots
[0] != slot
);
3237 btrfs_tree_unlock(path
->nodes
[0]);
3238 free_extent_buffer(path
->nodes
[0]);
3239 path
->nodes
[0] = right
;
3240 path
->slots
[0] -= mid
;
3241 path
->slots
[1] += 1;
3243 btrfs_tree_unlock(right
);
3244 free_extent_buffer(right
);
3247 BUG_ON(path
->slots
[0] < 0);
3251 * double splits happen when we need to insert a big item in the middle
3252 * of a leaf. A double split can leave us with 3 mostly empty leaves:
3253 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3256 * We avoid this by trying to push the items on either side of our target
3257 * into the adjacent leaves. If all goes well we can avoid the double split
3260 static noinline
int push_for_double_split(struct btrfs_trans_handle
*trans
,
3261 struct btrfs_root
*root
,
3262 struct btrfs_path
*path
,
3270 slot
= path
->slots
[0];
3273 * try to push all the items after our slot into the
3276 ret
= push_leaf_right(trans
, root
, path
, 1, data_size
, 0, slot
);
3283 nritems
= btrfs_header_nritems(path
->nodes
[0]);
3285 * our goal is to get our slot at the start or end of a leaf. If
3286 * we've done so we're done
3288 if (path
->slots
[0] == 0 || path
->slots
[0] == nritems
)
3291 if (btrfs_leaf_free_space(root
, path
->nodes
[0]) >= data_size
)
3294 /* try to push all the items before our slot into the next leaf */
3295 slot
= path
->slots
[0];
3296 ret
= push_leaf_left(trans
, root
, path
, 1, data_size
, 0, slot
);
3309 * split the path's leaf in two, making sure there is at least data_size
3310 * available for the resulting leaf level of the path.
3312 * returns 0 if all went well and < 0 on failure.
3314 static noinline
int split_leaf(struct btrfs_trans_handle
*trans
,
3315 struct btrfs_root
*root
,
3316 struct btrfs_key
*ins_key
,
3317 struct btrfs_path
*path
, int data_size
,
3320 struct btrfs_disk_key disk_key
;
3321 struct extent_buffer
*l
;
3325 struct extent_buffer
*right
;
3329 int num_doubles
= 0;
3330 int tried_avoid_double
= 0;
3333 slot
= path
->slots
[0];
3334 if (extend
&& data_size
+ btrfs_item_size_nr(l
, slot
) +
3335 sizeof(struct btrfs_item
) > BTRFS_LEAF_DATA_SIZE(root
))
3338 /* first try to make some room by pushing left and right */
3340 wret
= push_leaf_right(trans
, root
, path
, data_size
,
3345 wret
= push_leaf_left(trans
, root
, path
, data_size
,
3346 data_size
, 0, (u32
)-1);
3352 /* did the pushes work? */
3353 if (btrfs_leaf_free_space(root
, l
) >= data_size
)
3357 if (!path
->nodes
[1]) {
3358 ret
= insert_new_root(trans
, root
, path
, 1);
3365 slot
= path
->slots
[0];
3366 nritems
= btrfs_header_nritems(l
);
3367 mid
= (nritems
+ 1) / 2;
3371 leaf_space_used(l
, mid
, nritems
- mid
) + data_size
>
3372 BTRFS_LEAF_DATA_SIZE(root
)) {
3373 if (slot
>= nritems
) {
3377 if (mid
!= nritems
&&
3378 leaf_space_used(l
, mid
, nritems
- mid
) +
3379 data_size
> BTRFS_LEAF_DATA_SIZE(root
)) {
3380 if (data_size
&& !tried_avoid_double
)
3381 goto push_for_double
;
3387 if (leaf_space_used(l
, 0, mid
) + data_size
>
3388 BTRFS_LEAF_DATA_SIZE(root
)) {
3389 if (!extend
&& data_size
&& slot
== 0) {
3391 } else if ((extend
|| !data_size
) && slot
== 0) {
3395 if (mid
!= nritems
&&
3396 leaf_space_used(l
, mid
, nritems
- mid
) +
3397 data_size
> BTRFS_LEAF_DATA_SIZE(root
)) {
3398 if (data_size
&& !tried_avoid_double
)
3399 goto push_for_double
;
3407 btrfs_cpu_key_to_disk(&disk_key
, ins_key
);
3409 btrfs_item_key(l
, &disk_key
, mid
);
3411 right
= btrfs_alloc_free_block(trans
, root
, root
->leafsize
, 0,
3412 root
->root_key
.objectid
,
3413 &disk_key
, 0, l
->start
, 0);
3415 return PTR_ERR(right
);
3417 root_add_used(root
, root
->leafsize
);
3419 memset_extent_buffer(right
, 0, 0, sizeof(struct btrfs_header
));
3420 btrfs_set_header_bytenr(right
, right
->start
);
3421 btrfs_set_header_generation(right
, trans
->transid
);
3422 btrfs_set_header_backref_rev(right
, BTRFS_MIXED_BACKREF_REV
);
3423 btrfs_set_header_owner(right
, root
->root_key
.objectid
);
3424 btrfs_set_header_level(right
, 0);
3425 write_extent_buffer(right
, root
->fs_info
->fsid
,
3426 (unsigned long)btrfs_header_fsid(right
),
3429 write_extent_buffer(right
, root
->fs_info
->chunk_tree_uuid
,
3430 (unsigned long)btrfs_header_chunk_tree_uuid(right
),
3435 btrfs_set_header_nritems(right
, 0);
3436 insert_ptr(trans
, root
, path
, &disk_key
, right
->start
,
3437 path
->slots
[1] + 1, 1);
3438 btrfs_tree_unlock(path
->nodes
[0]);
3439 free_extent_buffer(path
->nodes
[0]);
3440 path
->nodes
[0] = right
;
3442 path
->slots
[1] += 1;
3444 btrfs_set_header_nritems(right
, 0);
3445 insert_ptr(trans
, root
, path
, &disk_key
, right
->start
,
3447 btrfs_tree_unlock(path
->nodes
[0]);
3448 free_extent_buffer(path
->nodes
[0]);
3449 path
->nodes
[0] = right
;
3451 if (path
->slots
[1] == 0)
3452 fixup_low_keys(trans
, root
, path
,
3455 btrfs_mark_buffer_dirty(right
);
3459 copy_for_split(trans
, root
, path
, l
, right
, slot
, mid
, nritems
);
3462 BUG_ON(num_doubles
!= 0);
3470 push_for_double_split(trans
, root
, path
, data_size
);
3471 tried_avoid_double
= 1;
3472 if (btrfs_leaf_free_space(root
, path
->nodes
[0]) >= data_size
)
3477 static noinline
int setup_leaf_for_split(struct btrfs_trans_handle
*trans
,
3478 struct btrfs_root
*root
,
3479 struct btrfs_path
*path
, int ins_len
)
3481 struct btrfs_key key
;
3482 struct extent_buffer
*leaf
;
3483 struct btrfs_file_extent_item
*fi
;
3488 leaf
= path
->nodes
[0];
3489 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
3491 BUG_ON(key
.type
!= BTRFS_EXTENT_DATA_KEY
&&
3492 key
.type
!= BTRFS_EXTENT_CSUM_KEY
);
3494 if (btrfs_leaf_free_space(root
, leaf
) >= ins_len
)
3497 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
3498 if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
3499 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
3500 struct btrfs_file_extent_item
);
3501 extent_len
= btrfs_file_extent_num_bytes(leaf
, fi
);
3503 btrfs_release_path(path
);
3505 path
->keep_locks
= 1;
3506 path
->search_for_split
= 1;
3507 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
3508 path
->search_for_split
= 0;
3513 leaf
= path
->nodes
[0];
3514 /* if our item isn't there or got smaller, return now */
3515 if (ret
> 0 || item_size
!= btrfs_item_size_nr(leaf
, path
->slots
[0]))
3518 /* the leaf has changed, it now has room. return now */
3519 if (btrfs_leaf_free_space(root
, path
->nodes
[0]) >= ins_len
)
3522 if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
3523 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
3524 struct btrfs_file_extent_item
);
3525 if (extent_len
!= btrfs_file_extent_num_bytes(leaf
, fi
))
3529 btrfs_set_path_blocking(path
);
3530 ret
= split_leaf(trans
, root
, &key
, path
, ins_len
, 1);
3534 path
->keep_locks
= 0;
3535 btrfs_unlock_up_safe(path
, 1);
3538 path
->keep_locks
= 0;
3542 static noinline
int split_item(struct btrfs_trans_handle
*trans
,
3543 struct btrfs_root
*root
,
3544 struct btrfs_path
*path
,
3545 struct btrfs_key
*new_key
,
3546 unsigned long split_offset
)
3548 struct extent_buffer
*leaf
;
3549 struct btrfs_item
*item
;
3550 struct btrfs_item
*new_item
;
3556 struct btrfs_disk_key disk_key
;
3558 leaf
= path
->nodes
[0];
3559 BUG_ON(btrfs_leaf_free_space(root
, leaf
) < sizeof(struct btrfs_item
));
3561 btrfs_set_path_blocking(path
);
3563 item
= btrfs_item_nr(leaf
, path
->slots
[0]);
3564 orig_offset
= btrfs_item_offset(leaf
, item
);
3565 item_size
= btrfs_item_size(leaf
, item
);
3567 buf
= kmalloc(item_size
, GFP_NOFS
);
3571 read_extent_buffer(leaf
, buf
, btrfs_item_ptr_offset(leaf
,
3572 path
->slots
[0]), item_size
);
3574 slot
= path
->slots
[0] + 1;
3575 nritems
= btrfs_header_nritems(leaf
);
3576 if (slot
!= nritems
) {
3577 /* shift the items */
3578 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ 1),
3579 btrfs_item_nr_offset(slot
),
3580 (nritems
- slot
) * sizeof(struct btrfs_item
));
3583 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
3584 btrfs_set_item_key(leaf
, &disk_key
, slot
);
3586 new_item
= btrfs_item_nr(leaf
, slot
);
3588 btrfs_set_item_offset(leaf
, new_item
, orig_offset
);
3589 btrfs_set_item_size(leaf
, new_item
, item_size
- split_offset
);
3591 btrfs_set_item_offset(leaf
, item
,
3592 orig_offset
+ item_size
- split_offset
);
3593 btrfs_set_item_size(leaf
, item
, split_offset
);
3595 btrfs_set_header_nritems(leaf
, nritems
+ 1);
3597 /* write the data for the start of the original item */
3598 write_extent_buffer(leaf
, buf
,
3599 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
3602 /* write the data for the new item */
3603 write_extent_buffer(leaf
, buf
+ split_offset
,
3604 btrfs_item_ptr_offset(leaf
, slot
),
3605 item_size
- split_offset
);
3606 btrfs_mark_buffer_dirty(leaf
);
3608 BUG_ON(btrfs_leaf_free_space(root
, leaf
) < 0);
3614 * This function splits a single item into two items,
3615 * giving 'new_key' to the new item and splitting the
3616 * old one at split_offset (from the start of the item).
3618 * The path may be released by this operation. After
3619 * the split, the path is pointing to the old item. The
3620 * new item is going to be in the same node as the old one.
3622 * Note, the item being split must be smaller enough to live alone on
3623 * a tree block with room for one extra struct btrfs_item
3625 * This allows us to split the item in place, keeping a lock on the
3626 * leaf the entire time.
3628 int btrfs_split_item(struct btrfs_trans_handle
*trans
,
3629 struct btrfs_root
*root
,
3630 struct btrfs_path
*path
,
3631 struct btrfs_key
*new_key
,
3632 unsigned long split_offset
)
3635 ret
= setup_leaf_for_split(trans
, root
, path
,
3636 sizeof(struct btrfs_item
));
3640 ret
= split_item(trans
, root
, path
, new_key
, split_offset
);
3645 * This function duplicate a item, giving 'new_key' to the new item.
3646 * It guarantees both items live in the same tree leaf and the new item
3647 * is contiguous with the original item.
3649 * This allows us to split file extent in place, keeping a lock on the
3650 * leaf the entire time.
3652 int btrfs_duplicate_item(struct btrfs_trans_handle
*trans
,
3653 struct btrfs_root
*root
,
3654 struct btrfs_path
*path
,
3655 struct btrfs_key
*new_key
)
3657 struct extent_buffer
*leaf
;
3661 leaf
= path
->nodes
[0];
3662 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
3663 ret
= setup_leaf_for_split(trans
, root
, path
,
3664 item_size
+ sizeof(struct btrfs_item
));
3669 setup_items_for_insert(trans
, root
, path
, new_key
, &item_size
,
3670 item_size
, item_size
+
3671 sizeof(struct btrfs_item
), 1);
3672 leaf
= path
->nodes
[0];
3673 memcpy_extent_buffer(leaf
,
3674 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
3675 btrfs_item_ptr_offset(leaf
, path
->slots
[0] - 1),
3681 * make the item pointed to by the path smaller. new_size indicates
3682 * how small to make it, and from_end tells us if we just chop bytes
3683 * off the end of the item or if we shift the item to chop bytes off
3686 void btrfs_truncate_item(struct btrfs_trans_handle
*trans
,
3687 struct btrfs_root
*root
,
3688 struct btrfs_path
*path
,
3689 u32 new_size
, int from_end
)
3692 struct extent_buffer
*leaf
;
3693 struct btrfs_item
*item
;
3695 unsigned int data_end
;
3696 unsigned int old_data_start
;
3697 unsigned int old_size
;
3698 unsigned int size_diff
;
3700 struct btrfs_map_token token
;
3702 btrfs_init_map_token(&token
);
3704 leaf
= path
->nodes
[0];
3705 slot
= path
->slots
[0];
3707 old_size
= btrfs_item_size_nr(leaf
, slot
);
3708 if (old_size
== new_size
)
3711 nritems
= btrfs_header_nritems(leaf
);
3712 data_end
= leaf_data_end(root
, leaf
);
3714 old_data_start
= btrfs_item_offset_nr(leaf
, slot
);
3716 size_diff
= old_size
- new_size
;
3719 BUG_ON(slot
>= nritems
);
3722 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3724 /* first correct the data pointers */
3725 for (i
= slot
; i
< nritems
; i
++) {
3727 item
= btrfs_item_nr(leaf
, i
);
3729 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
3730 btrfs_set_token_item_offset(leaf
, item
,
3731 ioff
+ size_diff
, &token
);
3734 /* shift the data */
3736 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3737 data_end
+ size_diff
, btrfs_leaf_data(leaf
) +
3738 data_end
, old_data_start
+ new_size
- data_end
);
3740 struct btrfs_disk_key disk_key
;
3743 btrfs_item_key(leaf
, &disk_key
, slot
);
3745 if (btrfs_disk_key_type(&disk_key
) == BTRFS_EXTENT_DATA_KEY
) {
3747 struct btrfs_file_extent_item
*fi
;
3749 fi
= btrfs_item_ptr(leaf
, slot
,
3750 struct btrfs_file_extent_item
);
3751 fi
= (struct btrfs_file_extent_item
*)(
3752 (unsigned long)fi
- size_diff
);
3754 if (btrfs_file_extent_type(leaf
, fi
) ==
3755 BTRFS_FILE_EXTENT_INLINE
) {
3756 ptr
= btrfs_item_ptr_offset(leaf
, slot
);
3757 memmove_extent_buffer(leaf
, ptr
,
3759 offsetof(struct btrfs_file_extent_item
,
3764 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3765 data_end
+ size_diff
, btrfs_leaf_data(leaf
) +
3766 data_end
, old_data_start
- data_end
);
3768 offset
= btrfs_disk_key_offset(&disk_key
);
3769 btrfs_set_disk_key_offset(&disk_key
, offset
+ size_diff
);
3770 btrfs_set_item_key(leaf
, &disk_key
, slot
);
3772 fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
3775 item
= btrfs_item_nr(leaf
, slot
);
3776 btrfs_set_item_size(leaf
, item
, new_size
);
3777 btrfs_mark_buffer_dirty(leaf
);
3779 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3780 btrfs_print_leaf(root
, leaf
);
3786 * make the item pointed to by the path bigger, data_size is the new size.
3788 void btrfs_extend_item(struct btrfs_trans_handle
*trans
,
3789 struct btrfs_root
*root
, struct btrfs_path
*path
,
3793 struct extent_buffer
*leaf
;
3794 struct btrfs_item
*item
;
3796 unsigned int data_end
;
3797 unsigned int old_data
;
3798 unsigned int old_size
;
3800 struct btrfs_map_token token
;
3802 btrfs_init_map_token(&token
);
3804 leaf
= path
->nodes
[0];
3806 nritems
= btrfs_header_nritems(leaf
);
3807 data_end
= leaf_data_end(root
, leaf
);
3809 if (btrfs_leaf_free_space(root
, leaf
) < data_size
) {
3810 btrfs_print_leaf(root
, leaf
);
3813 slot
= path
->slots
[0];
3814 old_data
= btrfs_item_end_nr(leaf
, slot
);
3817 if (slot
>= nritems
) {
3818 btrfs_print_leaf(root
, leaf
);
3819 printk(KERN_CRIT
"slot %d too large, nritems %d\n",
3825 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3827 /* first correct the data pointers */
3828 for (i
= slot
; i
< nritems
; i
++) {
3830 item
= btrfs_item_nr(leaf
, i
);
3832 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
3833 btrfs_set_token_item_offset(leaf
, item
,
3834 ioff
- data_size
, &token
);
3837 /* shift the data */
3838 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3839 data_end
- data_size
, btrfs_leaf_data(leaf
) +
3840 data_end
, old_data
- data_end
);
3842 data_end
= old_data
;
3843 old_size
= btrfs_item_size_nr(leaf
, slot
);
3844 item
= btrfs_item_nr(leaf
, slot
);
3845 btrfs_set_item_size(leaf
, item
, old_size
+ data_size
);
3846 btrfs_mark_buffer_dirty(leaf
);
3848 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3849 btrfs_print_leaf(root
, leaf
);
3855 * Given a key and some data, insert items into the tree.
3856 * This does all the path init required, making room in the tree if needed.
3857 * Returns the number of keys that were inserted.
3859 int btrfs_insert_some_items(struct btrfs_trans_handle
*trans
,
3860 struct btrfs_root
*root
,
3861 struct btrfs_path
*path
,
3862 struct btrfs_key
*cpu_key
, u32
*data_size
,
3865 struct extent_buffer
*leaf
;
3866 struct btrfs_item
*item
;
3873 unsigned int data_end
;
3874 struct btrfs_disk_key disk_key
;
3875 struct btrfs_key found_key
;
3876 struct btrfs_map_token token
;
3878 btrfs_init_map_token(&token
);
3880 for (i
= 0; i
< nr
; i
++) {
3881 if (total_size
+ data_size
[i
] + sizeof(struct btrfs_item
) >
3882 BTRFS_LEAF_DATA_SIZE(root
)) {
3886 total_data
+= data_size
[i
];
3887 total_size
+= data_size
[i
] + sizeof(struct btrfs_item
);
3891 ret
= btrfs_search_slot(trans
, root
, cpu_key
, path
, total_size
, 1);
3897 leaf
= path
->nodes
[0];
3899 nritems
= btrfs_header_nritems(leaf
);
3900 data_end
= leaf_data_end(root
, leaf
);
3902 if (btrfs_leaf_free_space(root
, leaf
) < total_size
) {
3903 for (i
= nr
; i
>= 0; i
--) {
3904 total_data
-= data_size
[i
];
3905 total_size
-= data_size
[i
] + sizeof(struct btrfs_item
);
3906 if (total_size
< btrfs_leaf_free_space(root
, leaf
))
3912 slot
= path
->slots
[0];
3915 if (slot
!= nritems
) {
3916 unsigned int old_data
= btrfs_item_end_nr(leaf
, slot
);
3918 item
= btrfs_item_nr(leaf
, slot
);
3919 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3921 /* figure out how many keys we can insert in here */
3922 total_data
= data_size
[0];
3923 for (i
= 1; i
< nr
; i
++) {
3924 if (btrfs_comp_cpu_keys(&found_key
, cpu_key
+ i
) <= 0)
3926 total_data
+= data_size
[i
];
3930 if (old_data
< data_end
) {
3931 btrfs_print_leaf(root
, leaf
);
3932 printk(KERN_CRIT
"slot %d old_data %d data_end %d\n",
3933 slot
, old_data
, data_end
);
3937 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3939 /* first correct the data pointers */
3940 for (i
= slot
; i
< nritems
; i
++) {
3943 item
= btrfs_item_nr(leaf
, i
);
3944 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
3945 btrfs_set_token_item_offset(leaf
, item
,
3946 ioff
- total_data
, &token
);
3948 /* shift the items */
3949 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ nr
),
3950 btrfs_item_nr_offset(slot
),
3951 (nritems
- slot
) * sizeof(struct btrfs_item
));
3953 /* shift the data */
3954 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3955 data_end
- total_data
, btrfs_leaf_data(leaf
) +
3956 data_end
, old_data
- data_end
);
3957 data_end
= old_data
;
3960 * this sucks but it has to be done, if we are inserting at
3961 * the end of the leaf only insert 1 of the items, since we
3962 * have no way of knowing whats on the next leaf and we'd have
3963 * to drop our current locks to figure it out
3968 /* setup the item for the new data */
3969 for (i
= 0; i
< nr
; i
++) {
3970 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
+ i
);
3971 btrfs_set_item_key(leaf
, &disk_key
, slot
+ i
);
3972 item
= btrfs_item_nr(leaf
, slot
+ i
);
3973 btrfs_set_token_item_offset(leaf
, item
,
3974 data_end
- data_size
[i
], &token
);
3975 data_end
-= data_size
[i
];
3976 btrfs_set_token_item_size(leaf
, item
, data_size
[i
], &token
);
3978 btrfs_set_header_nritems(leaf
, nritems
+ nr
);
3979 btrfs_mark_buffer_dirty(leaf
);
3983 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
);
3984 fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
3987 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3988 btrfs_print_leaf(root
, leaf
);
3998 * this is a helper for btrfs_insert_empty_items, the main goal here is
3999 * to save stack depth by doing the bulk of the work in a function
4000 * that doesn't call btrfs_search_slot
4002 void setup_items_for_insert(struct btrfs_trans_handle
*trans
,
4003 struct btrfs_root
*root
, struct btrfs_path
*path
,
4004 struct btrfs_key
*cpu_key
, u32
*data_size
,
4005 u32 total_data
, u32 total_size
, int nr
)
4007 struct btrfs_item
*item
;
4010 unsigned int data_end
;
4011 struct btrfs_disk_key disk_key
;
4012 struct extent_buffer
*leaf
;
4014 struct btrfs_map_token token
;
4016 btrfs_init_map_token(&token
);
4018 leaf
= path
->nodes
[0];
4019 slot
= path
->slots
[0];
4021 nritems
= btrfs_header_nritems(leaf
);
4022 data_end
= leaf_data_end(root
, leaf
);
4024 if (btrfs_leaf_free_space(root
, leaf
) < total_size
) {
4025 btrfs_print_leaf(root
, leaf
);
4026 printk(KERN_CRIT
"not enough freespace need %u have %d\n",
4027 total_size
, btrfs_leaf_free_space(root
, leaf
));
4031 if (slot
!= nritems
) {
4032 unsigned int old_data
= btrfs_item_end_nr(leaf
, slot
);
4034 if (old_data
< data_end
) {
4035 btrfs_print_leaf(root
, leaf
);
4036 printk(KERN_CRIT
"slot %d old_data %d data_end %d\n",
4037 slot
, old_data
, data_end
);
4041 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4043 /* first correct the data pointers */
4044 for (i
= slot
; i
< nritems
; i
++) {
4047 item
= btrfs_item_nr(leaf
, i
);
4048 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4049 btrfs_set_token_item_offset(leaf
, item
,
4050 ioff
- total_data
, &token
);
4052 /* shift the items */
4053 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ nr
),
4054 btrfs_item_nr_offset(slot
),
4055 (nritems
- slot
) * sizeof(struct btrfs_item
));
4057 /* shift the data */
4058 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4059 data_end
- total_data
, btrfs_leaf_data(leaf
) +
4060 data_end
, old_data
- data_end
);
4061 data_end
= old_data
;
4064 /* setup the item for the new data */
4065 for (i
= 0; i
< nr
; i
++) {
4066 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
+ i
);
4067 btrfs_set_item_key(leaf
, &disk_key
, slot
+ i
);
4068 item
= btrfs_item_nr(leaf
, slot
+ i
);
4069 btrfs_set_token_item_offset(leaf
, item
,
4070 data_end
- data_size
[i
], &token
);
4071 data_end
-= data_size
[i
];
4072 btrfs_set_token_item_size(leaf
, item
, data_size
[i
], &token
);
4075 btrfs_set_header_nritems(leaf
, nritems
+ nr
);
4078 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
);
4079 fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
4081 btrfs_unlock_up_safe(path
, 1);
4082 btrfs_mark_buffer_dirty(leaf
);
4084 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
4085 btrfs_print_leaf(root
, leaf
);
4091 * Given a key and some data, insert items into the tree.
4092 * This does all the path init required, making room in the tree if needed.
4094 int btrfs_insert_empty_items(struct btrfs_trans_handle
*trans
,
4095 struct btrfs_root
*root
,
4096 struct btrfs_path
*path
,
4097 struct btrfs_key
*cpu_key
, u32
*data_size
,
4106 for (i
= 0; i
< nr
; i
++)
4107 total_data
+= data_size
[i
];
4109 total_size
= total_data
+ (nr
* sizeof(struct btrfs_item
));
4110 ret
= btrfs_search_slot(trans
, root
, cpu_key
, path
, total_size
, 1);
4116 slot
= path
->slots
[0];
4119 setup_items_for_insert(trans
, root
, path
, cpu_key
, data_size
,
4120 total_data
, total_size
, nr
);
4125 * Given a key and some data, insert an item into the tree.
4126 * This does all the path init required, making room in the tree if needed.
4128 int btrfs_insert_item(struct btrfs_trans_handle
*trans
, struct btrfs_root
4129 *root
, struct btrfs_key
*cpu_key
, void *data
, u32
4133 struct btrfs_path
*path
;
4134 struct extent_buffer
*leaf
;
4137 path
= btrfs_alloc_path();
4140 ret
= btrfs_insert_empty_item(trans
, root
, path
, cpu_key
, data_size
);
4142 leaf
= path
->nodes
[0];
4143 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
4144 write_extent_buffer(leaf
, data
, ptr
, data_size
);
4145 btrfs_mark_buffer_dirty(leaf
);
4147 btrfs_free_path(path
);
4152 * delete the pointer from a given node.
4154 * the tree should have been previously balanced so the deletion does not
4157 static void del_ptr(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
4158 struct btrfs_path
*path
, int level
, int slot
)
4160 struct extent_buffer
*parent
= path
->nodes
[level
];
4163 nritems
= btrfs_header_nritems(parent
);
4164 if (slot
!= nritems
- 1) {
4165 memmove_extent_buffer(parent
,
4166 btrfs_node_key_ptr_offset(slot
),
4167 btrfs_node_key_ptr_offset(slot
+ 1),
4168 sizeof(struct btrfs_key_ptr
) *
4169 (nritems
- slot
- 1));
4172 btrfs_set_header_nritems(parent
, nritems
);
4173 if (nritems
== 0 && parent
== root
->node
) {
4174 BUG_ON(btrfs_header_level(root
->node
) != 1);
4175 /* just turn the root into a leaf and break */
4176 btrfs_set_header_level(root
->node
, 0);
4177 } else if (slot
== 0) {
4178 struct btrfs_disk_key disk_key
;
4180 btrfs_node_key(parent
, &disk_key
, 0);
4181 fixup_low_keys(trans
, root
, path
, &disk_key
, level
+ 1);
4183 btrfs_mark_buffer_dirty(parent
);
4187 * a helper function to delete the leaf pointed to by path->slots[1] and
4190 * This deletes the pointer in path->nodes[1] and frees the leaf
4191 * block extent. zero is returned if it all worked out, < 0 otherwise.
4193 * The path must have already been setup for deleting the leaf, including
4194 * all the proper balancing. path->nodes[1] must be locked.
4196 static noinline
void btrfs_del_leaf(struct btrfs_trans_handle
*trans
,
4197 struct btrfs_root
*root
,
4198 struct btrfs_path
*path
,
4199 struct extent_buffer
*leaf
)
4201 WARN_ON(btrfs_header_generation(leaf
) != trans
->transid
);
4202 del_ptr(trans
, root
, path
, 1, path
->slots
[1]);
4205 * btrfs_free_extent is expensive, we want to make sure we
4206 * aren't holding any locks when we call it
4208 btrfs_unlock_up_safe(path
, 0);
4210 root_sub_used(root
, leaf
->len
);
4212 extent_buffer_get(leaf
);
4213 btrfs_free_tree_block(trans
, root
, leaf
, 0, 1);
4214 free_extent_buffer_stale(leaf
);
4217 * delete the item at the leaf level in path. If that empties
4218 * the leaf, remove it from the tree
4220 int btrfs_del_items(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
4221 struct btrfs_path
*path
, int slot
, int nr
)
4223 struct extent_buffer
*leaf
;
4224 struct btrfs_item
*item
;
4231 struct btrfs_map_token token
;
4233 btrfs_init_map_token(&token
);
4235 leaf
= path
->nodes
[0];
4236 last_off
= btrfs_item_offset_nr(leaf
, slot
+ nr
- 1);
4238 for (i
= 0; i
< nr
; i
++)
4239 dsize
+= btrfs_item_size_nr(leaf
, slot
+ i
);
4241 nritems
= btrfs_header_nritems(leaf
);
4243 if (slot
+ nr
!= nritems
) {
4244 int data_end
= leaf_data_end(root
, leaf
);
4246 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4248 btrfs_leaf_data(leaf
) + data_end
,
4249 last_off
- data_end
);
4251 for (i
= slot
+ nr
; i
< nritems
; i
++) {
4254 item
= btrfs_item_nr(leaf
, i
);
4255 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4256 btrfs_set_token_item_offset(leaf
, item
,
4257 ioff
+ dsize
, &token
);
4260 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
),
4261 btrfs_item_nr_offset(slot
+ nr
),
4262 sizeof(struct btrfs_item
) *
4263 (nritems
- slot
- nr
));
4265 btrfs_set_header_nritems(leaf
, nritems
- nr
);
4268 /* delete the leaf if we've emptied it */
4270 if (leaf
== root
->node
) {
4271 btrfs_set_header_level(leaf
, 0);
4273 btrfs_set_path_blocking(path
);
4274 clean_tree_block(trans
, root
, leaf
);
4275 btrfs_del_leaf(trans
, root
, path
, leaf
);
4278 int used
= leaf_space_used(leaf
, 0, nritems
);
4280 struct btrfs_disk_key disk_key
;
4282 btrfs_item_key(leaf
, &disk_key
, 0);
4283 fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
4286 /* delete the leaf if it is mostly empty */
4287 if (used
< BTRFS_LEAF_DATA_SIZE(root
) / 3) {
4288 /* push_leaf_left fixes the path.
4289 * make sure the path still points to our leaf
4290 * for possible call to del_ptr below
4292 slot
= path
->slots
[1];
4293 extent_buffer_get(leaf
);
4295 btrfs_set_path_blocking(path
);
4296 wret
= push_leaf_left(trans
, root
, path
, 1, 1,
4298 if (wret
< 0 && wret
!= -ENOSPC
)
4301 if (path
->nodes
[0] == leaf
&&
4302 btrfs_header_nritems(leaf
)) {
4303 wret
= push_leaf_right(trans
, root
, path
, 1,
4305 if (wret
< 0 && wret
!= -ENOSPC
)
4309 if (btrfs_header_nritems(leaf
) == 0) {
4310 path
->slots
[1] = slot
;
4311 btrfs_del_leaf(trans
, root
, path
, leaf
);
4312 free_extent_buffer(leaf
);
4315 /* if we're still in the path, make sure
4316 * we're dirty. Otherwise, one of the
4317 * push_leaf functions must have already
4318 * dirtied this buffer
4320 if (path
->nodes
[0] == leaf
)
4321 btrfs_mark_buffer_dirty(leaf
);
4322 free_extent_buffer(leaf
);
4325 btrfs_mark_buffer_dirty(leaf
);
4332 * search the tree again to find a leaf with lesser keys
4333 * returns 0 if it found something or 1 if there are no lesser leaves.
4334 * returns < 0 on io errors.
4336 * This may release the path, and so you may lose any locks held at the
4339 int btrfs_prev_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
4341 struct btrfs_key key
;
4342 struct btrfs_disk_key found_key
;
4345 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, 0);
4349 else if (key
.type
> 0)
4351 else if (key
.objectid
> 0)
4356 btrfs_release_path(path
);
4357 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4360 btrfs_item_key(path
->nodes
[0], &found_key
, 0);
4361 ret
= comp_keys(&found_key
, &key
);
4368 * A helper function to walk down the tree starting at min_key, and looking
4369 * for nodes or leaves that are either in cache or have a minimum
4370 * transaction id. This is used by the btree defrag code, and tree logging
4372 * This does not cow, but it does stuff the starting key it finds back
4373 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4374 * key and get a writable path.
4376 * This does lock as it descends, and path->keep_locks should be set
4377 * to 1 by the caller.
4379 * This honors path->lowest_level to prevent descent past a given level
4382 * min_trans indicates the oldest transaction that you are interested
4383 * in walking through. Any nodes or leaves older than min_trans are
4384 * skipped over (without reading them).
4386 * returns zero if something useful was found, < 0 on error and 1 if there
4387 * was nothing in the tree that matched the search criteria.
4389 int btrfs_search_forward(struct btrfs_root
*root
, struct btrfs_key
*min_key
,
4390 struct btrfs_key
*max_key
,
4391 struct btrfs_path
*path
, int cache_only
,
4394 struct extent_buffer
*cur
;
4395 struct btrfs_key found_key
;
4402 WARN_ON(!path
->keep_locks
);
4404 cur
= btrfs_read_lock_root_node(root
);
4405 level
= btrfs_header_level(cur
);
4406 WARN_ON(path
->nodes
[level
]);
4407 path
->nodes
[level
] = cur
;
4408 path
->locks
[level
] = BTRFS_READ_LOCK
;
4410 if (btrfs_header_generation(cur
) < min_trans
) {
4415 nritems
= btrfs_header_nritems(cur
);
4416 level
= btrfs_header_level(cur
);
4417 sret
= bin_search(cur
, min_key
, level
, &slot
);
4419 /* at the lowest level, we're done, setup the path and exit */
4420 if (level
== path
->lowest_level
) {
4421 if (slot
>= nritems
)
4424 path
->slots
[level
] = slot
;
4425 btrfs_item_key_to_cpu(cur
, &found_key
, slot
);
4428 if (sret
&& slot
> 0)
4431 * check this node pointer against the cache_only and
4432 * min_trans parameters. If it isn't in cache or is too
4433 * old, skip to the next one.
4435 while (slot
< nritems
) {
4438 struct extent_buffer
*tmp
;
4439 struct btrfs_disk_key disk_key
;
4441 blockptr
= btrfs_node_blockptr(cur
, slot
);
4442 gen
= btrfs_node_ptr_generation(cur
, slot
);
4443 if (gen
< min_trans
) {
4451 btrfs_node_key(cur
, &disk_key
, slot
);
4452 if (comp_keys(&disk_key
, max_key
) >= 0) {
4458 tmp
= btrfs_find_tree_block(root
, blockptr
,
4459 btrfs_level_size(root
, level
- 1));
4461 if (tmp
&& btrfs_buffer_uptodate(tmp
, gen
, 1) > 0) {
4462 free_extent_buffer(tmp
);
4466 free_extent_buffer(tmp
);
4471 * we didn't find a candidate key in this node, walk forward
4472 * and find another one
4474 if (slot
>= nritems
) {
4475 path
->slots
[level
] = slot
;
4476 btrfs_set_path_blocking(path
);
4477 sret
= btrfs_find_next_key(root
, path
, min_key
, level
,
4478 cache_only
, min_trans
);
4480 btrfs_release_path(path
);
4486 /* save our key for returning back */
4487 btrfs_node_key_to_cpu(cur
, &found_key
, slot
);
4488 path
->slots
[level
] = slot
;
4489 if (level
== path
->lowest_level
) {
4491 unlock_up(path
, level
, 1, 0, NULL
);
4494 btrfs_set_path_blocking(path
);
4495 cur
= read_node_slot(root
, cur
, slot
);
4496 BUG_ON(!cur
); /* -ENOMEM */
4498 btrfs_tree_read_lock(cur
);
4500 path
->locks
[level
- 1] = BTRFS_READ_LOCK
;
4501 path
->nodes
[level
- 1] = cur
;
4502 unlock_up(path
, level
, 1, 0, NULL
);
4503 btrfs_clear_path_blocking(path
, NULL
, 0);
4507 memcpy(min_key
, &found_key
, sizeof(found_key
));
4508 btrfs_set_path_blocking(path
);
4513 * this is similar to btrfs_next_leaf, but does not try to preserve
4514 * and fixup the path. It looks for and returns the next key in the
4515 * tree based on the current path and the cache_only and min_trans
4518 * 0 is returned if another key is found, < 0 if there are any errors
4519 * and 1 is returned if there are no higher keys in the tree
4521 * path->keep_locks should be set to 1 on the search made before
4522 * calling this function.
4524 int btrfs_find_next_key(struct btrfs_root
*root
, struct btrfs_path
*path
,
4525 struct btrfs_key
*key
, int level
,
4526 int cache_only
, u64 min_trans
)
4529 struct extent_buffer
*c
;
4531 WARN_ON(!path
->keep_locks
);
4532 while (level
< BTRFS_MAX_LEVEL
) {
4533 if (!path
->nodes
[level
])
4536 slot
= path
->slots
[level
] + 1;
4537 c
= path
->nodes
[level
];
4539 if (slot
>= btrfs_header_nritems(c
)) {
4542 struct btrfs_key cur_key
;
4543 if (level
+ 1 >= BTRFS_MAX_LEVEL
||
4544 !path
->nodes
[level
+ 1])
4547 if (path
->locks
[level
+ 1]) {
4552 slot
= btrfs_header_nritems(c
) - 1;
4554 btrfs_item_key_to_cpu(c
, &cur_key
, slot
);
4556 btrfs_node_key_to_cpu(c
, &cur_key
, slot
);
4558 orig_lowest
= path
->lowest_level
;
4559 btrfs_release_path(path
);
4560 path
->lowest_level
= level
;
4561 ret
= btrfs_search_slot(NULL
, root
, &cur_key
, path
,
4563 path
->lowest_level
= orig_lowest
;
4567 c
= path
->nodes
[level
];
4568 slot
= path
->slots
[level
];
4575 btrfs_item_key_to_cpu(c
, key
, slot
);
4577 u64 blockptr
= btrfs_node_blockptr(c
, slot
);
4578 u64 gen
= btrfs_node_ptr_generation(c
, slot
);
4581 struct extent_buffer
*cur
;
4582 cur
= btrfs_find_tree_block(root
, blockptr
,
4583 btrfs_level_size(root
, level
- 1));
4585 btrfs_buffer_uptodate(cur
, gen
, 1) <= 0) {
4588 free_extent_buffer(cur
);
4591 free_extent_buffer(cur
);
4593 if (gen
< min_trans
) {
4597 btrfs_node_key_to_cpu(c
, key
, slot
);
4605 * search the tree again to find a leaf with greater keys
4606 * returns 0 if it found something or 1 if there are no greater leaves.
4607 * returns < 0 on io errors.
4609 int btrfs_next_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
4613 struct extent_buffer
*c
;
4614 struct extent_buffer
*next
;
4615 struct btrfs_key key
;
4618 int old_spinning
= path
->leave_spinning
;
4619 int next_rw_lock
= 0;
4621 nritems
= btrfs_header_nritems(path
->nodes
[0]);
4625 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, nritems
- 1);
4630 btrfs_release_path(path
);
4632 path
->keep_locks
= 1;
4633 path
->leave_spinning
= 1;
4635 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4636 path
->keep_locks
= 0;
4641 nritems
= btrfs_header_nritems(path
->nodes
[0]);
4643 * by releasing the path above we dropped all our locks. A balance
4644 * could have added more items next to the key that used to be
4645 * at the very end of the block. So, check again here and
4646 * advance the path if there are now more items available.
4648 if (nritems
> 0 && path
->slots
[0] < nritems
- 1) {
4655 while (level
< BTRFS_MAX_LEVEL
) {
4656 if (!path
->nodes
[level
]) {
4661 slot
= path
->slots
[level
] + 1;
4662 c
= path
->nodes
[level
];
4663 if (slot
>= btrfs_header_nritems(c
)) {
4665 if (level
== BTRFS_MAX_LEVEL
) {
4673 btrfs_tree_unlock_rw(next
, next_rw_lock
);
4674 free_extent_buffer(next
);
4678 next_rw_lock
= path
->locks
[level
];
4679 ret
= read_block_for_search(NULL
, root
, path
, &next
, level
,
4685 btrfs_release_path(path
);
4689 if (!path
->skip_locking
) {
4690 ret
= btrfs_try_tree_read_lock(next
);
4692 btrfs_set_path_blocking(path
);
4693 btrfs_tree_read_lock(next
);
4694 btrfs_clear_path_blocking(path
, next
,
4697 next_rw_lock
= BTRFS_READ_LOCK
;
4701 path
->slots
[level
] = slot
;
4704 c
= path
->nodes
[level
];
4705 if (path
->locks
[level
])
4706 btrfs_tree_unlock_rw(c
, path
->locks
[level
]);
4708 free_extent_buffer(c
);
4709 path
->nodes
[level
] = next
;
4710 path
->slots
[level
] = 0;
4711 if (!path
->skip_locking
)
4712 path
->locks
[level
] = next_rw_lock
;
4716 ret
= read_block_for_search(NULL
, root
, path
, &next
, level
,
4722 btrfs_release_path(path
);
4726 if (!path
->skip_locking
) {
4727 ret
= btrfs_try_tree_read_lock(next
);
4729 btrfs_set_path_blocking(path
);
4730 btrfs_tree_read_lock(next
);
4731 btrfs_clear_path_blocking(path
, next
,
4734 next_rw_lock
= BTRFS_READ_LOCK
;
4739 unlock_up(path
, 0, 1, 0, NULL
);
4740 path
->leave_spinning
= old_spinning
;
4742 btrfs_set_path_blocking(path
);
4748 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4749 * searching until it gets past min_objectid or finds an item of 'type'
4751 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4753 int btrfs_previous_item(struct btrfs_root
*root
,
4754 struct btrfs_path
*path
, u64 min_objectid
,
4757 struct btrfs_key found_key
;
4758 struct extent_buffer
*leaf
;
4763 if (path
->slots
[0] == 0) {
4764 btrfs_set_path_blocking(path
);
4765 ret
= btrfs_prev_leaf(root
, path
);
4771 leaf
= path
->nodes
[0];
4772 nritems
= btrfs_header_nritems(leaf
);
4775 if (path
->slots
[0] == nritems
)
4778 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
4779 if (found_key
.objectid
< min_objectid
)
4781 if (found_key
.type
== type
)
4783 if (found_key
.objectid
== min_objectid
&&
4784 found_key
.type
< type
)