2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
22 #include "transaction.h"
23 #include "print-tree.h"
26 static int split_node(struct btrfs_trans_handle
*trans
, struct btrfs_root
27 *root
, struct btrfs_path
*path
, int level
);
28 static int split_leaf(struct btrfs_trans_handle
*trans
, struct btrfs_root
29 *root
, struct btrfs_key
*ins_key
,
30 struct btrfs_path
*path
, int data_size
, int extend
);
31 static int push_node_left(struct btrfs_trans_handle
*trans
,
32 struct btrfs_root
*root
, struct extent_buffer
*dst
,
33 struct extent_buffer
*src
, int empty
);
34 static int balance_node_right(struct btrfs_trans_handle
*trans
,
35 struct btrfs_root
*root
,
36 struct extent_buffer
*dst_buf
,
37 struct extent_buffer
*src_buf
);
38 static int del_ptr(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
39 struct btrfs_path
*path
, int level
, int slot
);
41 struct btrfs_path
*btrfs_alloc_path(void)
43 struct btrfs_path
*path
;
44 path
= kmem_cache_zalloc(btrfs_path_cachep
, GFP_NOFS
);
51 * set all locked nodes in the path to blocking locks. This should
52 * be done before scheduling
54 noinline
void btrfs_set_path_blocking(struct btrfs_path
*p
)
57 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
58 if (p
->nodes
[i
] && p
->locks
[i
])
59 btrfs_set_lock_blocking(p
->nodes
[i
]);
64 * reset all the locked nodes in the patch to spinning locks.
66 * held is used to keep lockdep happy, when lockdep is enabled
67 * we set held to a blocking lock before we go around and
68 * retake all the spinlocks in the path. You can safely use NULL
71 noinline
void btrfs_clear_path_blocking(struct btrfs_path
*p
,
72 struct extent_buffer
*held
)
76 #ifdef CONFIG_DEBUG_LOCK_ALLOC
77 /* lockdep really cares that we take all of these spinlocks
78 * in the right order. If any of the locks in the path are not
79 * currently blocking, it is going to complain. So, make really
80 * really sure by forcing the path to blocking before we clear
84 btrfs_set_lock_blocking(held
);
85 btrfs_set_path_blocking(p
);
88 for (i
= BTRFS_MAX_LEVEL
- 1; i
>= 0; i
--) {
89 if (p
->nodes
[i
] && p
->locks
[i
])
90 btrfs_clear_lock_blocking(p
->nodes
[i
]);
93 #ifdef CONFIG_DEBUG_LOCK_ALLOC
95 btrfs_clear_lock_blocking(held
);
99 /* this also releases the path */
100 void btrfs_free_path(struct btrfs_path
*p
)
102 btrfs_release_path(NULL
, p
);
103 kmem_cache_free(btrfs_path_cachep
, p
);
107 * path release drops references on the extent buffers in the path
108 * and it drops any locks held by this path
110 * It is safe to call this on paths that no locks or extent buffers held.
112 noinline
void btrfs_release_path(struct btrfs_root
*root
, struct btrfs_path
*p
)
116 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
121 btrfs_tree_unlock(p
->nodes
[i
]);
124 free_extent_buffer(p
->nodes
[i
]);
130 * safely gets a reference on the root node of a tree. A lock
131 * is not taken, so a concurrent writer may put a different node
132 * at the root of the tree. See btrfs_lock_root_node for the
135 * The extent buffer returned by this has a reference taken, so
136 * it won't disappear. It may stop being the root of the tree
137 * at any time because there are no locks held.
139 struct extent_buffer
*btrfs_root_node(struct btrfs_root
*root
)
141 struct extent_buffer
*eb
;
142 spin_lock(&root
->node_lock
);
144 extent_buffer_get(eb
);
145 spin_unlock(&root
->node_lock
);
149 /* loop around taking references on and locking the root node of the
150 * tree until you end up with a lock on the root. A locked buffer
151 * is returned, with a reference held.
153 struct extent_buffer
*btrfs_lock_root_node(struct btrfs_root
*root
)
155 struct extent_buffer
*eb
;
158 eb
= btrfs_root_node(root
);
161 spin_lock(&root
->node_lock
);
162 if (eb
== root
->node
) {
163 spin_unlock(&root
->node_lock
);
166 spin_unlock(&root
->node_lock
);
168 btrfs_tree_unlock(eb
);
169 free_extent_buffer(eb
);
174 /* cowonly root (everything not a reference counted cow subvolume), just get
175 * put onto a simple dirty list. transaction.c walks this to make sure they
176 * get properly updated on disk.
178 static void add_root_to_dirty_list(struct btrfs_root
*root
)
180 if (root
->track_dirty
&& list_empty(&root
->dirty_list
)) {
181 list_add(&root
->dirty_list
,
182 &root
->fs_info
->dirty_cowonly_roots
);
187 * used by snapshot creation to make a copy of a root for a tree with
188 * a given objectid. The buffer with the new root node is returned in
189 * cow_ret, and this func returns zero on success or a negative error code.
191 int btrfs_copy_root(struct btrfs_trans_handle
*trans
,
192 struct btrfs_root
*root
,
193 struct extent_buffer
*buf
,
194 struct extent_buffer
**cow_ret
, u64 new_root_objectid
)
196 struct extent_buffer
*cow
;
200 struct btrfs_disk_key disk_key
;
202 WARN_ON(root
->ref_cows
&& trans
->transid
!=
203 root
->fs_info
->running_transaction
->transid
);
204 WARN_ON(root
->ref_cows
&& trans
->transid
!= root
->last_trans
);
206 level
= btrfs_header_level(buf
);
207 nritems
= btrfs_header_nritems(buf
);
209 btrfs_item_key(buf
, &disk_key
, 0);
211 btrfs_node_key(buf
, &disk_key
, 0);
213 cow
= btrfs_alloc_free_block(trans
, root
, buf
->len
, 0,
214 new_root_objectid
, &disk_key
, level
,
219 copy_extent_buffer(cow
, buf
, 0, 0, cow
->len
);
220 btrfs_set_header_bytenr(cow
, cow
->start
);
221 btrfs_set_header_generation(cow
, trans
->transid
);
222 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
223 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
224 BTRFS_HEADER_FLAG_RELOC
);
225 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
226 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
228 btrfs_set_header_owner(cow
, new_root_objectid
);
230 write_extent_buffer(cow
, root
->fs_info
->fsid
,
231 (unsigned long)btrfs_header_fsid(cow
),
234 WARN_ON(btrfs_header_generation(buf
) > trans
->transid
);
235 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
236 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
238 ret
= btrfs_inc_ref(trans
, root
, cow
, 0);
243 btrfs_mark_buffer_dirty(cow
);
249 * check if the tree block can be shared by multiple trees
251 int btrfs_block_can_be_shared(struct btrfs_root
*root
,
252 struct extent_buffer
*buf
)
255 * Tree blocks not in refernece counted trees and tree roots
256 * are never shared. If a block was allocated after the last
257 * snapshot and the block was not allocated by tree relocation,
258 * we know the block is not shared.
260 if (root
->ref_cows
&&
261 buf
!= root
->node
&& buf
!= root
->commit_root
&&
262 (btrfs_header_generation(buf
) <=
263 btrfs_root_last_snapshot(&root
->root_item
) ||
264 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)))
266 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
267 if (root
->ref_cows
&&
268 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
274 static noinline
int update_ref_for_cow(struct btrfs_trans_handle
*trans
,
275 struct btrfs_root
*root
,
276 struct extent_buffer
*buf
,
277 struct extent_buffer
*cow
)
286 * Backrefs update rules:
288 * Always use full backrefs for extent pointers in tree block
289 * allocated by tree relocation.
291 * If a shared tree block is no longer referenced by its owner
292 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
293 * use full backrefs for extent pointers in tree block.
295 * If a tree block is been relocating
296 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
297 * use full backrefs for extent pointers in tree block.
298 * The reason for this is some operations (such as drop tree)
299 * are only allowed for blocks use full backrefs.
302 if (btrfs_block_can_be_shared(root
, buf
)) {
303 ret
= btrfs_lookup_extent_info(trans
, root
, buf
->start
,
304 buf
->len
, &refs
, &flags
);
309 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
310 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
311 flags
= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
316 owner
= btrfs_header_owner(buf
);
317 BUG_ON(owner
== BTRFS_TREE_RELOC_OBJECTID
&&
318 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
));
321 if ((owner
== root
->root_key
.objectid
||
322 root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) &&
323 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
)) {
324 ret
= btrfs_inc_ref(trans
, root
, buf
, 1);
327 if (root
->root_key
.objectid
==
328 BTRFS_TREE_RELOC_OBJECTID
) {
329 ret
= btrfs_dec_ref(trans
, root
, buf
, 0);
331 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
334 new_flags
|= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
337 if (root
->root_key
.objectid
==
338 BTRFS_TREE_RELOC_OBJECTID
)
339 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
341 ret
= btrfs_inc_ref(trans
, root
, cow
, 0);
344 if (new_flags
!= 0) {
345 ret
= btrfs_set_disk_extent_flags(trans
, root
,
352 if (flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
) {
353 if (root
->root_key
.objectid
==
354 BTRFS_TREE_RELOC_OBJECTID
)
355 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
357 ret
= btrfs_inc_ref(trans
, root
, cow
, 0);
359 ret
= btrfs_dec_ref(trans
, root
, buf
, 1);
362 clean_tree_block(trans
, root
, buf
);
368 * does the dirty work in cow of a single block. The parent block (if
369 * supplied) is updated to point to the new cow copy. The new buffer is marked
370 * dirty and returned locked. If you modify the block it needs to be marked
373 * search_start -- an allocation hint for the new block
375 * empty_size -- a hint that you plan on doing more cow. This is the size in
376 * bytes the allocator should try to find free next to the block it returns.
377 * This is just a hint and may be ignored by the allocator.
379 static noinline
int __btrfs_cow_block(struct btrfs_trans_handle
*trans
,
380 struct btrfs_root
*root
,
381 struct extent_buffer
*buf
,
382 struct extent_buffer
*parent
, int parent_slot
,
383 struct extent_buffer
**cow_ret
,
384 u64 search_start
, u64 empty_size
)
386 struct btrfs_disk_key disk_key
;
387 struct extent_buffer
*cow
;
395 btrfs_assert_tree_locked(buf
);
397 WARN_ON(root
->ref_cows
&& trans
->transid
!=
398 root
->fs_info
->running_transaction
->transid
);
399 WARN_ON(root
->ref_cows
&& trans
->transid
!= root
->last_trans
);
401 level
= btrfs_header_level(buf
);
404 btrfs_item_key(buf
, &disk_key
, 0);
406 btrfs_node_key(buf
, &disk_key
, 0);
408 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) {
410 parent_start
= parent
->start
;
416 cow
= btrfs_alloc_free_block(trans
, root
, buf
->len
, parent_start
,
417 root
->root_key
.objectid
, &disk_key
,
418 level
, search_start
, empty_size
);
422 /* cow is set to blocking by btrfs_init_new_buffer */
424 copy_extent_buffer(cow
, buf
, 0, 0, cow
->len
);
425 btrfs_set_header_bytenr(cow
, cow
->start
);
426 btrfs_set_header_generation(cow
, trans
->transid
);
427 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
428 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
429 BTRFS_HEADER_FLAG_RELOC
);
430 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
431 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
433 btrfs_set_header_owner(cow
, root
->root_key
.objectid
);
435 write_extent_buffer(cow
, root
->fs_info
->fsid
,
436 (unsigned long)btrfs_header_fsid(cow
),
439 update_ref_for_cow(trans
, root
, buf
, cow
);
441 if (buf
== root
->node
) {
442 WARN_ON(parent
&& parent
!= buf
);
443 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
444 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
445 parent_start
= buf
->start
;
449 spin_lock(&root
->node_lock
);
451 extent_buffer_get(cow
);
452 spin_unlock(&root
->node_lock
);
454 btrfs_free_extent(trans
, root
, buf
->start
, buf
->len
,
455 parent_start
, root
->root_key
.objectid
,
457 free_extent_buffer(buf
);
458 add_root_to_dirty_list(root
);
460 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
461 parent_start
= parent
->start
;
465 WARN_ON(trans
->transid
!= btrfs_header_generation(parent
));
466 btrfs_set_node_blockptr(parent
, parent_slot
,
468 btrfs_set_node_ptr_generation(parent
, parent_slot
,
470 btrfs_mark_buffer_dirty(parent
);
471 btrfs_free_extent(trans
, root
, buf
->start
, buf
->len
,
472 parent_start
, root
->root_key
.objectid
,
476 btrfs_tree_unlock(buf
);
477 free_extent_buffer(buf
);
478 btrfs_mark_buffer_dirty(cow
);
483 static inline int should_cow_block(struct btrfs_trans_handle
*trans
,
484 struct btrfs_root
*root
,
485 struct extent_buffer
*buf
)
487 if (btrfs_header_generation(buf
) == trans
->transid
&&
488 !btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
) &&
489 !(root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
&&
490 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)))
496 * cows a single block, see __btrfs_cow_block for the real work.
497 * This version of it has extra checks so that a block isn't cow'd more than
498 * once per transaction, as long as it hasn't been written yet
500 noinline
int btrfs_cow_block(struct btrfs_trans_handle
*trans
,
501 struct btrfs_root
*root
, struct extent_buffer
*buf
,
502 struct extent_buffer
*parent
, int parent_slot
,
503 struct extent_buffer
**cow_ret
)
508 if (trans
->transaction
!= root
->fs_info
->running_transaction
) {
509 printk(KERN_CRIT
"trans %llu running %llu\n",
510 (unsigned long long)trans
->transid
,
512 root
->fs_info
->running_transaction
->transid
);
515 if (trans
->transid
!= root
->fs_info
->generation
) {
516 printk(KERN_CRIT
"trans %llu running %llu\n",
517 (unsigned long long)trans
->transid
,
518 (unsigned long long)root
->fs_info
->generation
);
522 if (!should_cow_block(trans
, root
, buf
)) {
527 search_start
= buf
->start
& ~((u64
)(1024 * 1024 * 1024) - 1);
530 btrfs_set_lock_blocking(parent
);
531 btrfs_set_lock_blocking(buf
);
533 ret
= __btrfs_cow_block(trans
, root
, buf
, parent
,
534 parent_slot
, cow_ret
, search_start
, 0);
539 * helper function for defrag to decide if two blocks pointed to by a
540 * node are actually close by
542 static int close_blocks(u64 blocknr
, u64 other
, u32 blocksize
)
544 if (blocknr
< other
&& other
- (blocknr
+ blocksize
) < 32768)
546 if (blocknr
> other
&& blocknr
- (other
+ blocksize
) < 32768)
552 * compare two keys in a memcmp fashion
554 static int comp_keys(struct btrfs_disk_key
*disk
, struct btrfs_key
*k2
)
558 btrfs_disk_key_to_cpu(&k1
, disk
);
560 return btrfs_comp_cpu_keys(&k1
, k2
);
564 * same as comp_keys only with two btrfs_key's
566 int btrfs_comp_cpu_keys(struct btrfs_key
*k1
, struct btrfs_key
*k2
)
568 if (k1
->objectid
> k2
->objectid
)
570 if (k1
->objectid
< k2
->objectid
)
572 if (k1
->type
> k2
->type
)
574 if (k1
->type
< k2
->type
)
576 if (k1
->offset
> k2
->offset
)
578 if (k1
->offset
< k2
->offset
)
584 * this is used by the defrag code to go through all the
585 * leaves pointed to by a node and reallocate them so that
586 * disk order is close to key order
588 int btrfs_realloc_node(struct btrfs_trans_handle
*trans
,
589 struct btrfs_root
*root
, struct extent_buffer
*parent
,
590 int start_slot
, int cache_only
, u64
*last_ret
,
591 struct btrfs_key
*progress
)
593 struct extent_buffer
*cur
;
596 u64 search_start
= *last_ret
;
606 int progress_passed
= 0;
607 struct btrfs_disk_key disk_key
;
609 parent_level
= btrfs_header_level(parent
);
610 if (cache_only
&& parent_level
!= 1)
613 if (trans
->transaction
!= root
->fs_info
->running_transaction
)
615 if (trans
->transid
!= root
->fs_info
->generation
)
618 parent_nritems
= btrfs_header_nritems(parent
);
619 blocksize
= btrfs_level_size(root
, parent_level
- 1);
620 end_slot
= parent_nritems
;
622 if (parent_nritems
== 1)
625 btrfs_set_lock_blocking(parent
);
627 for (i
= start_slot
; i
< end_slot
; i
++) {
630 if (!parent
->map_token
) {
631 map_extent_buffer(parent
,
632 btrfs_node_key_ptr_offset(i
),
633 sizeof(struct btrfs_key_ptr
),
634 &parent
->map_token
, &parent
->kaddr
,
635 &parent
->map_start
, &parent
->map_len
,
638 btrfs_node_key(parent
, &disk_key
, i
);
639 if (!progress_passed
&& comp_keys(&disk_key
, progress
) < 0)
643 blocknr
= btrfs_node_blockptr(parent
, i
);
644 gen
= btrfs_node_ptr_generation(parent
, i
);
646 last_block
= blocknr
;
649 other
= btrfs_node_blockptr(parent
, i
- 1);
650 close
= close_blocks(blocknr
, other
, blocksize
);
652 if (!close
&& i
< end_slot
- 2) {
653 other
= btrfs_node_blockptr(parent
, i
+ 1);
654 close
= close_blocks(blocknr
, other
, blocksize
);
657 last_block
= blocknr
;
660 if (parent
->map_token
) {
661 unmap_extent_buffer(parent
, parent
->map_token
,
663 parent
->map_token
= NULL
;
666 cur
= btrfs_find_tree_block(root
, blocknr
, blocksize
);
668 uptodate
= btrfs_buffer_uptodate(cur
, gen
);
671 if (!cur
|| !uptodate
) {
673 free_extent_buffer(cur
);
677 cur
= read_tree_block(root
, blocknr
,
679 } else if (!uptodate
) {
680 btrfs_read_buffer(cur
, gen
);
683 if (search_start
== 0)
684 search_start
= last_block
;
686 btrfs_tree_lock(cur
);
687 btrfs_set_lock_blocking(cur
);
688 err
= __btrfs_cow_block(trans
, root
, cur
, parent
, i
,
691 (end_slot
- i
) * blocksize
));
693 btrfs_tree_unlock(cur
);
694 free_extent_buffer(cur
);
697 search_start
= cur
->start
;
698 last_block
= cur
->start
;
699 *last_ret
= search_start
;
700 btrfs_tree_unlock(cur
);
701 free_extent_buffer(cur
);
703 if (parent
->map_token
) {
704 unmap_extent_buffer(parent
, parent
->map_token
,
706 parent
->map_token
= NULL
;
712 * The leaf data grows from end-to-front in the node.
713 * this returns the address of the start of the last item,
714 * which is the stop of the leaf data stack
716 static inline unsigned int leaf_data_end(struct btrfs_root
*root
,
717 struct extent_buffer
*leaf
)
719 u32 nr
= btrfs_header_nritems(leaf
);
721 return BTRFS_LEAF_DATA_SIZE(root
);
722 return btrfs_item_offset_nr(leaf
, nr
- 1);
726 * extra debugging checks to make sure all the items in a key are
727 * well formed and in the proper order
729 static int check_node(struct btrfs_root
*root
, struct btrfs_path
*path
,
732 struct extent_buffer
*parent
= NULL
;
733 struct extent_buffer
*node
= path
->nodes
[level
];
734 struct btrfs_disk_key parent_key
;
735 struct btrfs_disk_key node_key
;
738 struct btrfs_key cpukey
;
739 u32 nritems
= btrfs_header_nritems(node
);
741 if (path
->nodes
[level
+ 1])
742 parent
= path
->nodes
[level
+ 1];
744 slot
= path
->slots
[level
];
745 BUG_ON(nritems
== 0);
747 parent_slot
= path
->slots
[level
+ 1];
748 btrfs_node_key(parent
, &parent_key
, parent_slot
);
749 btrfs_node_key(node
, &node_key
, 0);
750 BUG_ON(memcmp(&parent_key
, &node_key
,
751 sizeof(struct btrfs_disk_key
)));
752 BUG_ON(btrfs_node_blockptr(parent
, parent_slot
) !=
753 btrfs_header_bytenr(node
));
755 BUG_ON(nritems
> BTRFS_NODEPTRS_PER_BLOCK(root
));
757 btrfs_node_key_to_cpu(node
, &cpukey
, slot
- 1);
758 btrfs_node_key(node
, &node_key
, slot
);
759 BUG_ON(comp_keys(&node_key
, &cpukey
) <= 0);
761 if (slot
< nritems
- 1) {
762 btrfs_node_key_to_cpu(node
, &cpukey
, slot
+ 1);
763 btrfs_node_key(node
, &node_key
, slot
);
764 BUG_ON(comp_keys(&node_key
, &cpukey
) >= 0);
770 * extra checking to make sure all the items in a leaf are
771 * well formed and in the proper order
773 static int check_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
,
776 struct extent_buffer
*leaf
= path
->nodes
[level
];
777 struct extent_buffer
*parent
= NULL
;
779 struct btrfs_key cpukey
;
780 struct btrfs_disk_key parent_key
;
781 struct btrfs_disk_key leaf_key
;
782 int slot
= path
->slots
[0];
784 u32 nritems
= btrfs_header_nritems(leaf
);
786 if (path
->nodes
[level
+ 1])
787 parent
= path
->nodes
[level
+ 1];
793 parent_slot
= path
->slots
[level
+ 1];
794 btrfs_node_key(parent
, &parent_key
, parent_slot
);
795 btrfs_item_key(leaf
, &leaf_key
, 0);
797 BUG_ON(memcmp(&parent_key
, &leaf_key
,
798 sizeof(struct btrfs_disk_key
)));
799 BUG_ON(btrfs_node_blockptr(parent
, parent_slot
) !=
800 btrfs_header_bytenr(leaf
));
802 if (slot
!= 0 && slot
< nritems
- 1) {
803 btrfs_item_key(leaf
, &leaf_key
, slot
);
804 btrfs_item_key_to_cpu(leaf
, &cpukey
, slot
- 1);
805 if (comp_keys(&leaf_key
, &cpukey
) <= 0) {
806 btrfs_print_leaf(root
, leaf
);
807 printk(KERN_CRIT
"slot %d offset bad key\n", slot
);
810 if (btrfs_item_offset_nr(leaf
, slot
- 1) !=
811 btrfs_item_end_nr(leaf
, slot
)) {
812 btrfs_print_leaf(root
, leaf
);
813 printk(KERN_CRIT
"slot %d offset bad\n", slot
);
817 if (slot
< nritems
- 1) {
818 btrfs_item_key(leaf
, &leaf_key
, slot
);
819 btrfs_item_key_to_cpu(leaf
, &cpukey
, slot
+ 1);
820 BUG_ON(comp_keys(&leaf_key
, &cpukey
) >= 0);
821 if (btrfs_item_offset_nr(leaf
, slot
) !=
822 btrfs_item_end_nr(leaf
, slot
+ 1)) {
823 btrfs_print_leaf(root
, leaf
);
824 printk(KERN_CRIT
"slot %d offset bad\n", slot
);
828 BUG_ON(btrfs_item_offset_nr(leaf
, 0) +
829 btrfs_item_size_nr(leaf
, 0) != BTRFS_LEAF_DATA_SIZE(root
));
833 static noinline
int check_block(struct btrfs_root
*root
,
834 struct btrfs_path
*path
, int level
)
838 return check_leaf(root
, path
, level
);
839 return check_node(root
, path
, level
);
843 * search for key in the extent_buffer. The items start at offset p,
844 * and they are item_size apart. There are 'max' items in p.
846 * the slot in the array is returned via slot, and it points to
847 * the place where you would insert key if it is not found in
850 * slot may point to max if the key is bigger than all of the keys
852 static noinline
int generic_bin_search(struct extent_buffer
*eb
,
854 int item_size
, struct btrfs_key
*key
,
861 struct btrfs_disk_key
*tmp
= NULL
;
862 struct btrfs_disk_key unaligned
;
863 unsigned long offset
;
864 char *map_token
= NULL
;
866 unsigned long map_start
= 0;
867 unsigned long map_len
= 0;
871 mid
= (low
+ high
) / 2;
872 offset
= p
+ mid
* item_size
;
874 if (!map_token
|| offset
< map_start
||
875 (offset
+ sizeof(struct btrfs_disk_key
)) >
876 map_start
+ map_len
) {
878 unmap_extent_buffer(eb
, map_token
, KM_USER0
);
882 err
= map_private_extent_buffer(eb
, offset
,
883 sizeof(struct btrfs_disk_key
),
885 &map_start
, &map_len
, KM_USER0
);
888 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
891 read_extent_buffer(eb
, &unaligned
,
892 offset
, sizeof(unaligned
));
897 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
900 ret
= comp_keys(tmp
, key
);
909 unmap_extent_buffer(eb
, map_token
, KM_USER0
);
915 unmap_extent_buffer(eb
, map_token
, KM_USER0
);
920 * simple bin_search frontend that does the right thing for
923 static int bin_search(struct extent_buffer
*eb
, struct btrfs_key
*key
,
924 int level
, int *slot
)
927 return generic_bin_search(eb
,
928 offsetof(struct btrfs_leaf
, items
),
929 sizeof(struct btrfs_item
),
930 key
, btrfs_header_nritems(eb
),
933 return generic_bin_search(eb
,
934 offsetof(struct btrfs_node
, ptrs
),
935 sizeof(struct btrfs_key_ptr
),
936 key
, btrfs_header_nritems(eb
),
942 int btrfs_bin_search(struct extent_buffer
*eb
, struct btrfs_key
*key
,
943 int level
, int *slot
)
945 return bin_search(eb
, key
, level
, slot
);
948 /* given a node and slot number, this reads the blocks it points to. The
949 * extent buffer is returned with a reference taken (but unlocked).
950 * NULL is returned on error.
952 static noinline
struct extent_buffer
*read_node_slot(struct btrfs_root
*root
,
953 struct extent_buffer
*parent
, int slot
)
955 int level
= btrfs_header_level(parent
);
958 if (slot
>= btrfs_header_nritems(parent
))
963 return read_tree_block(root
, btrfs_node_blockptr(parent
, slot
),
964 btrfs_level_size(root
, level
- 1),
965 btrfs_node_ptr_generation(parent
, slot
));
969 * node level balancing, used to make sure nodes are in proper order for
970 * item deletion. We balance from the top down, so we have to make sure
971 * that a deletion won't leave an node completely empty later on.
973 static noinline
int balance_level(struct btrfs_trans_handle
*trans
,
974 struct btrfs_root
*root
,
975 struct btrfs_path
*path
, int level
)
977 struct extent_buffer
*right
= NULL
;
978 struct extent_buffer
*mid
;
979 struct extent_buffer
*left
= NULL
;
980 struct extent_buffer
*parent
= NULL
;
984 int orig_slot
= path
->slots
[level
];
985 int err_on_enospc
= 0;
991 mid
= path
->nodes
[level
];
993 WARN_ON(!path
->locks
[level
]);
994 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
996 orig_ptr
= btrfs_node_blockptr(mid
, orig_slot
);
998 if (level
< BTRFS_MAX_LEVEL
- 1)
999 parent
= path
->nodes
[level
+ 1];
1000 pslot
= path
->slots
[level
+ 1];
1003 * deal with the case where there is only one pointer in the root
1004 * by promoting the node below to a root
1007 struct extent_buffer
*child
;
1009 if (btrfs_header_nritems(mid
) != 1)
1012 /* promote the child to a root */
1013 child
= read_node_slot(root
, mid
, 0);
1015 btrfs_tree_lock(child
);
1016 btrfs_set_lock_blocking(child
);
1017 ret
= btrfs_cow_block(trans
, root
, child
, mid
, 0, &child
);
1020 spin_lock(&root
->node_lock
);
1022 spin_unlock(&root
->node_lock
);
1024 add_root_to_dirty_list(root
);
1025 btrfs_tree_unlock(child
);
1027 path
->locks
[level
] = 0;
1028 path
->nodes
[level
] = NULL
;
1029 clean_tree_block(trans
, root
, mid
);
1030 btrfs_tree_unlock(mid
);
1031 /* once for the path */
1032 free_extent_buffer(mid
);
1033 ret
= btrfs_free_extent(trans
, root
, mid
->start
, mid
->len
,
1034 0, root
->root_key
.objectid
, level
, 1);
1035 /* once for the root ptr */
1036 free_extent_buffer(mid
);
1039 if (btrfs_header_nritems(mid
) >
1040 BTRFS_NODEPTRS_PER_BLOCK(root
) / 4)
1043 if (btrfs_header_nritems(mid
) < 2)
1046 left
= read_node_slot(root
, parent
, pslot
- 1);
1048 btrfs_tree_lock(left
);
1049 btrfs_set_lock_blocking(left
);
1050 wret
= btrfs_cow_block(trans
, root
, left
,
1051 parent
, pslot
- 1, &left
);
1057 right
= read_node_slot(root
, parent
, pslot
+ 1);
1059 btrfs_tree_lock(right
);
1060 btrfs_set_lock_blocking(right
);
1061 wret
= btrfs_cow_block(trans
, root
, right
,
1062 parent
, pslot
+ 1, &right
);
1069 /* first, try to make some room in the middle buffer */
1071 orig_slot
+= btrfs_header_nritems(left
);
1072 wret
= push_node_left(trans
, root
, left
, mid
, 1);
1075 if (btrfs_header_nritems(mid
) < 2)
1080 * then try to empty the right most buffer into the middle
1083 wret
= push_node_left(trans
, root
, mid
, right
, 1);
1084 if (wret
< 0 && wret
!= -ENOSPC
)
1086 if (btrfs_header_nritems(right
) == 0) {
1087 u64 bytenr
= right
->start
;
1088 u32 blocksize
= right
->len
;
1090 clean_tree_block(trans
, root
, right
);
1091 btrfs_tree_unlock(right
);
1092 free_extent_buffer(right
);
1094 wret
= del_ptr(trans
, root
, path
, level
+ 1, pslot
+
1098 wret
= btrfs_free_extent(trans
, root
, bytenr
,
1100 root
->root_key
.objectid
,
1105 struct btrfs_disk_key right_key
;
1106 btrfs_node_key(right
, &right_key
, 0);
1107 btrfs_set_node_key(parent
, &right_key
, pslot
+ 1);
1108 btrfs_mark_buffer_dirty(parent
);
1111 if (btrfs_header_nritems(mid
) == 1) {
1113 * we're not allowed to leave a node with one item in the
1114 * tree during a delete. A deletion from lower in the tree
1115 * could try to delete the only pointer in this node.
1116 * So, pull some keys from the left.
1117 * There has to be a left pointer at this point because
1118 * otherwise we would have pulled some pointers from the
1122 wret
= balance_node_right(trans
, root
, mid
, left
);
1128 wret
= push_node_left(trans
, root
, left
, mid
, 1);
1134 if (btrfs_header_nritems(mid
) == 0) {
1135 /* we've managed to empty the middle node, drop it */
1136 u64 bytenr
= mid
->start
;
1137 u32 blocksize
= mid
->len
;
1139 clean_tree_block(trans
, root
, mid
);
1140 btrfs_tree_unlock(mid
);
1141 free_extent_buffer(mid
);
1143 wret
= del_ptr(trans
, root
, path
, level
+ 1, pslot
);
1146 wret
= btrfs_free_extent(trans
, root
, bytenr
, blocksize
,
1147 0, root
->root_key
.objectid
,
1152 /* update the parent key to reflect our changes */
1153 struct btrfs_disk_key mid_key
;
1154 btrfs_node_key(mid
, &mid_key
, 0);
1155 btrfs_set_node_key(parent
, &mid_key
, pslot
);
1156 btrfs_mark_buffer_dirty(parent
);
1159 /* update the path */
1161 if (btrfs_header_nritems(left
) > orig_slot
) {
1162 extent_buffer_get(left
);
1163 /* left was locked after cow */
1164 path
->nodes
[level
] = left
;
1165 path
->slots
[level
+ 1] -= 1;
1166 path
->slots
[level
] = orig_slot
;
1168 btrfs_tree_unlock(mid
);
1169 free_extent_buffer(mid
);
1172 orig_slot
-= btrfs_header_nritems(left
);
1173 path
->slots
[level
] = orig_slot
;
1176 /* double check we haven't messed things up */
1177 check_block(root
, path
, level
);
1179 btrfs_node_blockptr(path
->nodes
[level
], path
->slots
[level
]))
1183 btrfs_tree_unlock(right
);
1184 free_extent_buffer(right
);
1187 if (path
->nodes
[level
] != left
)
1188 btrfs_tree_unlock(left
);
1189 free_extent_buffer(left
);
1194 /* Node balancing for insertion. Here we only split or push nodes around
1195 * when they are completely full. This is also done top down, so we
1196 * have to be pessimistic.
1198 static noinline
int push_nodes_for_insert(struct btrfs_trans_handle
*trans
,
1199 struct btrfs_root
*root
,
1200 struct btrfs_path
*path
, int level
)
1202 struct extent_buffer
*right
= NULL
;
1203 struct extent_buffer
*mid
;
1204 struct extent_buffer
*left
= NULL
;
1205 struct extent_buffer
*parent
= NULL
;
1209 int orig_slot
= path
->slots
[level
];
1215 mid
= path
->nodes
[level
];
1216 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
1217 orig_ptr
= btrfs_node_blockptr(mid
, orig_slot
);
1219 if (level
< BTRFS_MAX_LEVEL
- 1)
1220 parent
= path
->nodes
[level
+ 1];
1221 pslot
= path
->slots
[level
+ 1];
1226 left
= read_node_slot(root
, parent
, pslot
- 1);
1228 /* first, try to make some room in the middle buffer */
1232 btrfs_tree_lock(left
);
1233 btrfs_set_lock_blocking(left
);
1235 left_nr
= btrfs_header_nritems(left
);
1236 if (left_nr
>= BTRFS_NODEPTRS_PER_BLOCK(root
) - 1) {
1239 ret
= btrfs_cow_block(trans
, root
, left
, parent
,
1244 wret
= push_node_left(trans
, root
,
1251 struct btrfs_disk_key disk_key
;
1252 orig_slot
+= left_nr
;
1253 btrfs_node_key(mid
, &disk_key
, 0);
1254 btrfs_set_node_key(parent
, &disk_key
, pslot
);
1255 btrfs_mark_buffer_dirty(parent
);
1256 if (btrfs_header_nritems(left
) > orig_slot
) {
1257 path
->nodes
[level
] = left
;
1258 path
->slots
[level
+ 1] -= 1;
1259 path
->slots
[level
] = orig_slot
;
1260 btrfs_tree_unlock(mid
);
1261 free_extent_buffer(mid
);
1264 btrfs_header_nritems(left
);
1265 path
->slots
[level
] = orig_slot
;
1266 btrfs_tree_unlock(left
);
1267 free_extent_buffer(left
);
1271 btrfs_tree_unlock(left
);
1272 free_extent_buffer(left
);
1274 right
= read_node_slot(root
, parent
, pslot
+ 1);
1277 * then try to empty the right most buffer into the middle
1282 btrfs_tree_lock(right
);
1283 btrfs_set_lock_blocking(right
);
1285 right_nr
= btrfs_header_nritems(right
);
1286 if (right_nr
>= BTRFS_NODEPTRS_PER_BLOCK(root
) - 1) {
1289 ret
= btrfs_cow_block(trans
, root
, right
,
1295 wret
= balance_node_right(trans
, root
,
1302 struct btrfs_disk_key disk_key
;
1304 btrfs_node_key(right
, &disk_key
, 0);
1305 btrfs_set_node_key(parent
, &disk_key
, pslot
+ 1);
1306 btrfs_mark_buffer_dirty(parent
);
1308 if (btrfs_header_nritems(mid
) <= orig_slot
) {
1309 path
->nodes
[level
] = right
;
1310 path
->slots
[level
+ 1] += 1;
1311 path
->slots
[level
] = orig_slot
-
1312 btrfs_header_nritems(mid
);
1313 btrfs_tree_unlock(mid
);
1314 free_extent_buffer(mid
);
1316 btrfs_tree_unlock(right
);
1317 free_extent_buffer(right
);
1321 btrfs_tree_unlock(right
);
1322 free_extent_buffer(right
);
1328 * readahead one full node of leaves, finding things that are close
1329 * to the block in 'slot', and triggering ra on them.
1331 static void reada_for_search(struct btrfs_root
*root
,
1332 struct btrfs_path
*path
,
1333 int level
, int slot
, u64 objectid
)
1335 struct extent_buffer
*node
;
1336 struct btrfs_disk_key disk_key
;
1341 int direction
= path
->reada
;
1342 struct extent_buffer
*eb
;
1350 if (!path
->nodes
[level
])
1353 node
= path
->nodes
[level
];
1355 search
= btrfs_node_blockptr(node
, slot
);
1356 blocksize
= btrfs_level_size(root
, level
- 1);
1357 eb
= btrfs_find_tree_block(root
, search
, blocksize
);
1359 free_extent_buffer(eb
);
1365 nritems
= btrfs_header_nritems(node
);
1368 if (direction
< 0) {
1372 } else if (direction
> 0) {
1377 if (path
->reada
< 0 && objectid
) {
1378 btrfs_node_key(node
, &disk_key
, nr
);
1379 if (btrfs_disk_key_objectid(&disk_key
) != objectid
)
1382 search
= btrfs_node_blockptr(node
, nr
);
1383 if ((search
<= target
&& target
- search
<= 65536) ||
1384 (search
> target
&& search
- target
<= 65536)) {
1385 readahead_tree_block(root
, search
, blocksize
,
1386 btrfs_node_ptr_generation(node
, nr
));
1390 if ((nread
> 65536 || nscan
> 32))
1396 * returns -EAGAIN if it had to drop the path, or zero if everything was in
1399 static noinline
int reada_for_balance(struct btrfs_root
*root
,
1400 struct btrfs_path
*path
, int level
)
1404 struct extent_buffer
*parent
;
1405 struct extent_buffer
*eb
;
1412 parent
= path
->nodes
[level
+ 1];
1416 nritems
= btrfs_header_nritems(parent
);
1417 slot
= path
->slots
[level
+ 1];
1418 blocksize
= btrfs_level_size(root
, level
);
1421 block1
= btrfs_node_blockptr(parent
, slot
- 1);
1422 gen
= btrfs_node_ptr_generation(parent
, slot
- 1);
1423 eb
= btrfs_find_tree_block(root
, block1
, blocksize
);
1424 if (eb
&& btrfs_buffer_uptodate(eb
, gen
))
1426 free_extent_buffer(eb
);
1428 if (slot
+ 1 < nritems
) {
1429 block2
= btrfs_node_blockptr(parent
, slot
+ 1);
1430 gen
= btrfs_node_ptr_generation(parent
, slot
+ 1);
1431 eb
= btrfs_find_tree_block(root
, block2
, blocksize
);
1432 if (eb
&& btrfs_buffer_uptodate(eb
, gen
))
1434 free_extent_buffer(eb
);
1436 if (block1
|| block2
) {
1439 /* release the whole path */
1440 btrfs_release_path(root
, path
);
1442 /* read the blocks */
1444 readahead_tree_block(root
, block1
, blocksize
, 0);
1446 readahead_tree_block(root
, block2
, blocksize
, 0);
1449 eb
= read_tree_block(root
, block1
, blocksize
, 0);
1450 free_extent_buffer(eb
);
1453 eb
= read_tree_block(root
, block2
, blocksize
, 0);
1454 free_extent_buffer(eb
);
1462 * when we walk down the tree, it is usually safe to unlock the higher layers
1463 * in the tree. The exceptions are when our path goes through slot 0, because
1464 * operations on the tree might require changing key pointers higher up in the
1467 * callers might also have set path->keep_locks, which tells this code to keep
1468 * the lock if the path points to the last slot in the block. This is part of
1469 * walking through the tree, and selecting the next slot in the higher block.
1471 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1472 * if lowest_unlock is 1, level 0 won't be unlocked
1474 static noinline
void unlock_up(struct btrfs_path
*path
, int level
,
1478 int skip_level
= level
;
1480 struct extent_buffer
*t
;
1482 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
1483 if (!path
->nodes
[i
])
1485 if (!path
->locks
[i
])
1487 if (!no_skips
&& path
->slots
[i
] == 0) {
1491 if (!no_skips
&& path
->keep_locks
) {
1494 nritems
= btrfs_header_nritems(t
);
1495 if (nritems
< 1 || path
->slots
[i
] >= nritems
- 1) {
1500 if (skip_level
< i
&& i
>= lowest_unlock
)
1504 if (i
>= lowest_unlock
&& i
> skip_level
&& path
->locks
[i
]) {
1505 btrfs_tree_unlock(t
);
1512 * This releases any locks held in the path starting at level and
1513 * going all the way up to the root.
1515 * btrfs_search_slot will keep the lock held on higher nodes in a few
1516 * corner cases, such as COW of the block at slot zero in the node. This
1517 * ignores those rules, and it should only be called when there are no
1518 * more updates to be done higher up in the tree.
1520 noinline
void btrfs_unlock_up_safe(struct btrfs_path
*path
, int level
)
1524 if (path
->keep_locks
)
1527 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
1528 if (!path
->nodes
[i
])
1530 if (!path
->locks
[i
])
1532 btrfs_tree_unlock(path
->nodes
[i
]);
1538 * helper function for btrfs_search_slot. The goal is to find a block
1539 * in cache without setting the path to blocking. If we find the block
1540 * we return zero and the path is unchanged.
1542 * If we can't find the block, we set the path blocking and do some
1543 * reada. -EAGAIN is returned and the search must be repeated.
1546 read_block_for_search(struct btrfs_trans_handle
*trans
,
1547 struct btrfs_root
*root
, struct btrfs_path
*p
,
1548 struct extent_buffer
**eb_ret
, int level
, int slot
,
1549 struct btrfs_key
*key
)
1554 struct extent_buffer
*b
= *eb_ret
;
1555 struct extent_buffer
*tmp
;
1558 blocknr
= btrfs_node_blockptr(b
, slot
);
1559 gen
= btrfs_node_ptr_generation(b
, slot
);
1560 blocksize
= btrfs_level_size(root
, level
- 1);
1562 tmp
= btrfs_find_tree_block(root
, blocknr
, blocksize
);
1563 if (tmp
&& btrfs_buffer_uptodate(tmp
, gen
)) {
1565 * we found an up to date block without sleeping, return
1573 * reduce lock contention at high levels
1574 * of the btree by dropping locks before
1575 * we read. Don't release the lock on the current
1576 * level because we need to walk this node to figure
1577 * out which blocks to read.
1579 btrfs_unlock_up_safe(p
, level
+ 1);
1580 btrfs_set_path_blocking(p
);
1583 free_extent_buffer(tmp
);
1585 reada_for_search(root
, p
, level
, slot
, key
->objectid
);
1587 btrfs_release_path(NULL
, p
);
1590 tmp
= read_tree_block(root
, blocknr
, blocksize
, gen
);
1593 * If the read above didn't mark this buffer up to date,
1594 * it will never end up being up to date. Set ret to EIO now
1595 * and give up so that our caller doesn't loop forever
1598 if (!btrfs_buffer_uptodate(tmp
, 0))
1600 free_extent_buffer(tmp
);
1606 * helper function for btrfs_search_slot. This does all of the checks
1607 * for node-level blocks and does any balancing required based on
1610 * If no extra work was required, zero is returned. If we had to
1611 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1615 setup_nodes_for_search(struct btrfs_trans_handle
*trans
,
1616 struct btrfs_root
*root
, struct btrfs_path
*p
,
1617 struct extent_buffer
*b
, int level
, int ins_len
)
1620 if ((p
->search_for_split
|| ins_len
> 0) && btrfs_header_nritems(b
) >=
1621 BTRFS_NODEPTRS_PER_BLOCK(root
) - 3) {
1624 sret
= reada_for_balance(root
, p
, level
);
1628 btrfs_set_path_blocking(p
);
1629 sret
= split_node(trans
, root
, p
, level
);
1630 btrfs_clear_path_blocking(p
, NULL
);
1637 b
= p
->nodes
[level
];
1638 } else if (ins_len
< 0 && btrfs_header_nritems(b
) <
1639 BTRFS_NODEPTRS_PER_BLOCK(root
) / 2) {
1642 sret
= reada_for_balance(root
, p
, level
);
1646 btrfs_set_path_blocking(p
);
1647 sret
= balance_level(trans
, root
, p
, level
);
1648 btrfs_clear_path_blocking(p
, NULL
);
1654 b
= p
->nodes
[level
];
1656 btrfs_release_path(NULL
, p
);
1659 BUG_ON(btrfs_header_nritems(b
) == 1);
1670 * look for key in the tree. path is filled in with nodes along the way
1671 * if key is found, we return zero and you can find the item in the leaf
1672 * level of the path (level 0)
1674 * If the key isn't found, the path points to the slot where it should
1675 * be inserted, and 1 is returned. If there are other errors during the
1676 * search a negative error number is returned.
1678 * if ins_len > 0, nodes and leaves will be split as we walk down the
1679 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
1682 int btrfs_search_slot(struct btrfs_trans_handle
*trans
, struct btrfs_root
1683 *root
, struct btrfs_key
*key
, struct btrfs_path
*p
, int
1686 struct extent_buffer
*b
;
1691 int lowest_unlock
= 1;
1692 u8 lowest_level
= 0;
1694 lowest_level
= p
->lowest_level
;
1695 WARN_ON(lowest_level
&& ins_len
> 0);
1696 WARN_ON(p
->nodes
[0] != NULL
);
1702 if (p
->search_commit_root
) {
1703 b
= root
->commit_root
;
1704 extent_buffer_get(b
);
1705 if (!p
->skip_locking
)
1708 if (p
->skip_locking
)
1709 b
= btrfs_root_node(root
);
1711 b
= btrfs_lock_root_node(root
);
1715 level
= btrfs_header_level(b
);
1718 * setup the path here so we can release it under lock
1719 * contention with the cow code
1721 p
->nodes
[level
] = b
;
1722 if (!p
->skip_locking
)
1723 p
->locks
[level
] = 1;
1727 * if we don't really need to cow this block
1728 * then we don't want to set the path blocking,
1729 * so we test it here
1731 if (!should_cow_block(trans
, root
, b
))
1734 btrfs_set_path_blocking(p
);
1736 err
= btrfs_cow_block(trans
, root
, b
,
1737 p
->nodes
[level
+ 1],
1738 p
->slots
[level
+ 1], &b
);
1740 free_extent_buffer(b
);
1746 BUG_ON(!cow
&& ins_len
);
1747 if (level
!= btrfs_header_level(b
))
1749 level
= btrfs_header_level(b
);
1751 p
->nodes
[level
] = b
;
1752 if (!p
->skip_locking
)
1753 p
->locks
[level
] = 1;
1755 btrfs_clear_path_blocking(p
, NULL
);
1758 * we have a lock on b and as long as we aren't changing
1759 * the tree, there is no way to for the items in b to change.
1760 * It is safe to drop the lock on our parent before we
1761 * go through the expensive btree search on b.
1763 * If cow is true, then we might be changing slot zero,
1764 * which may require changing the parent. So, we can't
1765 * drop the lock until after we know which slot we're
1769 btrfs_unlock_up_safe(p
, level
+ 1);
1771 ret
= check_block(root
, p
, level
);
1777 ret
= bin_search(b
, key
, level
, &slot
);
1781 if (ret
&& slot
> 0) {
1785 p
->slots
[level
] = slot
;
1786 err
= setup_nodes_for_search(trans
, root
, p
, b
, level
,
1794 b
= p
->nodes
[level
];
1795 slot
= p
->slots
[level
];
1797 unlock_up(p
, level
, lowest_unlock
);
1799 if (level
== lowest_level
) {
1805 err
= read_block_for_search(trans
, root
, p
,
1806 &b
, level
, slot
, key
);
1814 if (!p
->skip_locking
) {
1815 btrfs_clear_path_blocking(p
, NULL
);
1816 err
= btrfs_try_spin_lock(b
);
1819 btrfs_set_path_blocking(p
);
1821 btrfs_clear_path_blocking(p
, b
);
1825 p
->slots
[level
] = slot
;
1827 btrfs_leaf_free_space(root
, b
) < ins_len
) {
1828 btrfs_set_path_blocking(p
);
1829 err
= split_leaf(trans
, root
, key
,
1830 p
, ins_len
, ret
== 0);
1831 btrfs_clear_path_blocking(p
, NULL
);
1839 if (!p
->search_for_split
)
1840 unlock_up(p
, level
, lowest_unlock
);
1847 * we don't really know what they plan on doing with the path
1848 * from here on, so for now just mark it as blocking
1850 if (!p
->leave_spinning
)
1851 btrfs_set_path_blocking(p
);
1853 btrfs_release_path(root
, p
);
1858 * adjust the pointers going up the tree, starting at level
1859 * making sure the right key of each node is points to 'key'.
1860 * This is used after shifting pointers to the left, so it stops
1861 * fixing up pointers when a given leaf/node is not in slot 0 of the
1864 * If this fails to write a tree block, it returns -1, but continues
1865 * fixing up the blocks in ram so the tree is consistent.
1867 static int fixup_low_keys(struct btrfs_trans_handle
*trans
,
1868 struct btrfs_root
*root
, struct btrfs_path
*path
,
1869 struct btrfs_disk_key
*key
, int level
)
1873 struct extent_buffer
*t
;
1875 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
1876 int tslot
= path
->slots
[i
];
1877 if (!path
->nodes
[i
])
1880 btrfs_set_node_key(t
, key
, tslot
);
1881 btrfs_mark_buffer_dirty(path
->nodes
[i
]);
1891 * This function isn't completely safe. It's the caller's responsibility
1892 * that the new key won't break the order
1894 int btrfs_set_item_key_safe(struct btrfs_trans_handle
*trans
,
1895 struct btrfs_root
*root
, struct btrfs_path
*path
,
1896 struct btrfs_key
*new_key
)
1898 struct btrfs_disk_key disk_key
;
1899 struct extent_buffer
*eb
;
1902 eb
= path
->nodes
[0];
1903 slot
= path
->slots
[0];
1905 btrfs_item_key(eb
, &disk_key
, slot
- 1);
1906 if (comp_keys(&disk_key
, new_key
) >= 0)
1909 if (slot
< btrfs_header_nritems(eb
) - 1) {
1910 btrfs_item_key(eb
, &disk_key
, slot
+ 1);
1911 if (comp_keys(&disk_key
, new_key
) <= 0)
1915 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
1916 btrfs_set_item_key(eb
, &disk_key
, slot
);
1917 btrfs_mark_buffer_dirty(eb
);
1919 fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
1924 * try to push data from one node into the next node left in the
1927 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
1928 * error, and > 0 if there was no room in the left hand block.
1930 static int push_node_left(struct btrfs_trans_handle
*trans
,
1931 struct btrfs_root
*root
, struct extent_buffer
*dst
,
1932 struct extent_buffer
*src
, int empty
)
1939 src_nritems
= btrfs_header_nritems(src
);
1940 dst_nritems
= btrfs_header_nritems(dst
);
1941 push_items
= BTRFS_NODEPTRS_PER_BLOCK(root
) - dst_nritems
;
1942 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
1943 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
1945 if (!empty
&& src_nritems
<= 8)
1948 if (push_items
<= 0)
1952 push_items
= min(src_nritems
, push_items
);
1953 if (push_items
< src_nritems
) {
1954 /* leave at least 8 pointers in the node if
1955 * we aren't going to empty it
1957 if (src_nritems
- push_items
< 8) {
1958 if (push_items
<= 8)
1964 push_items
= min(src_nritems
- 8, push_items
);
1966 copy_extent_buffer(dst
, src
,
1967 btrfs_node_key_ptr_offset(dst_nritems
),
1968 btrfs_node_key_ptr_offset(0),
1969 push_items
* sizeof(struct btrfs_key_ptr
));
1971 if (push_items
< src_nritems
) {
1972 memmove_extent_buffer(src
, btrfs_node_key_ptr_offset(0),
1973 btrfs_node_key_ptr_offset(push_items
),
1974 (src_nritems
- push_items
) *
1975 sizeof(struct btrfs_key_ptr
));
1977 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
1978 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
1979 btrfs_mark_buffer_dirty(src
);
1980 btrfs_mark_buffer_dirty(dst
);
1986 * try to push data from one node into the next node right in the
1989 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
1990 * error, and > 0 if there was no room in the right hand block.
1992 * this will only push up to 1/2 the contents of the left node over
1994 static int balance_node_right(struct btrfs_trans_handle
*trans
,
1995 struct btrfs_root
*root
,
1996 struct extent_buffer
*dst
,
1997 struct extent_buffer
*src
)
2005 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
2006 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
2008 src_nritems
= btrfs_header_nritems(src
);
2009 dst_nritems
= btrfs_header_nritems(dst
);
2010 push_items
= BTRFS_NODEPTRS_PER_BLOCK(root
) - dst_nritems
;
2011 if (push_items
<= 0)
2014 if (src_nritems
< 4)
2017 max_push
= src_nritems
/ 2 + 1;
2018 /* don't try to empty the node */
2019 if (max_push
>= src_nritems
)
2022 if (max_push
< push_items
)
2023 push_items
= max_push
;
2025 memmove_extent_buffer(dst
, btrfs_node_key_ptr_offset(push_items
),
2026 btrfs_node_key_ptr_offset(0),
2028 sizeof(struct btrfs_key_ptr
));
2030 copy_extent_buffer(dst
, src
,
2031 btrfs_node_key_ptr_offset(0),
2032 btrfs_node_key_ptr_offset(src_nritems
- push_items
),
2033 push_items
* sizeof(struct btrfs_key_ptr
));
2035 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
2036 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
2038 btrfs_mark_buffer_dirty(src
);
2039 btrfs_mark_buffer_dirty(dst
);
2045 * helper function to insert a new root level in the tree.
2046 * A new node is allocated, and a single item is inserted to
2047 * point to the existing root
2049 * returns zero on success or < 0 on failure.
2051 static noinline
int insert_new_root(struct btrfs_trans_handle
*trans
,
2052 struct btrfs_root
*root
,
2053 struct btrfs_path
*path
, int level
)
2056 struct extent_buffer
*lower
;
2057 struct extent_buffer
*c
;
2058 struct extent_buffer
*old
;
2059 struct btrfs_disk_key lower_key
;
2061 BUG_ON(path
->nodes
[level
]);
2062 BUG_ON(path
->nodes
[level
-1] != root
->node
);
2064 lower
= path
->nodes
[level
-1];
2066 btrfs_item_key(lower
, &lower_key
, 0);
2068 btrfs_node_key(lower
, &lower_key
, 0);
2070 c
= btrfs_alloc_free_block(trans
, root
, root
->nodesize
, 0,
2071 root
->root_key
.objectid
, &lower_key
,
2072 level
, root
->node
->start
, 0);
2076 memset_extent_buffer(c
, 0, 0, sizeof(struct btrfs_header
));
2077 btrfs_set_header_nritems(c
, 1);
2078 btrfs_set_header_level(c
, level
);
2079 btrfs_set_header_bytenr(c
, c
->start
);
2080 btrfs_set_header_generation(c
, trans
->transid
);
2081 btrfs_set_header_backref_rev(c
, BTRFS_MIXED_BACKREF_REV
);
2082 btrfs_set_header_owner(c
, root
->root_key
.objectid
);
2084 write_extent_buffer(c
, root
->fs_info
->fsid
,
2085 (unsigned long)btrfs_header_fsid(c
),
2088 write_extent_buffer(c
, root
->fs_info
->chunk_tree_uuid
,
2089 (unsigned long)btrfs_header_chunk_tree_uuid(c
),
2092 btrfs_set_node_key(c
, &lower_key
, 0);
2093 btrfs_set_node_blockptr(c
, 0, lower
->start
);
2094 lower_gen
= btrfs_header_generation(lower
);
2095 WARN_ON(lower_gen
!= trans
->transid
);
2097 btrfs_set_node_ptr_generation(c
, 0, lower_gen
);
2099 btrfs_mark_buffer_dirty(c
);
2101 spin_lock(&root
->node_lock
);
2104 spin_unlock(&root
->node_lock
);
2106 /* the super has an extra ref to root->node */
2107 free_extent_buffer(old
);
2109 add_root_to_dirty_list(root
);
2110 extent_buffer_get(c
);
2111 path
->nodes
[level
] = c
;
2112 path
->locks
[level
] = 1;
2113 path
->slots
[level
] = 0;
2118 * worker function to insert a single pointer in a node.
2119 * the node should have enough room for the pointer already
2121 * slot and level indicate where you want the key to go, and
2122 * blocknr is the block the key points to.
2124 * returns zero on success and < 0 on any error
2126 static int insert_ptr(struct btrfs_trans_handle
*trans
, struct btrfs_root
2127 *root
, struct btrfs_path
*path
, struct btrfs_disk_key
2128 *key
, u64 bytenr
, int slot
, int level
)
2130 struct extent_buffer
*lower
;
2133 BUG_ON(!path
->nodes
[level
]);
2134 lower
= path
->nodes
[level
];
2135 nritems
= btrfs_header_nritems(lower
);
2136 BUG_ON(slot
> nritems
);
2137 if (nritems
== BTRFS_NODEPTRS_PER_BLOCK(root
))
2139 if (slot
!= nritems
) {
2140 memmove_extent_buffer(lower
,
2141 btrfs_node_key_ptr_offset(slot
+ 1),
2142 btrfs_node_key_ptr_offset(slot
),
2143 (nritems
- slot
) * sizeof(struct btrfs_key_ptr
));
2145 btrfs_set_node_key(lower
, key
, slot
);
2146 btrfs_set_node_blockptr(lower
, slot
, bytenr
);
2147 WARN_ON(trans
->transid
== 0);
2148 btrfs_set_node_ptr_generation(lower
, slot
, trans
->transid
);
2149 btrfs_set_header_nritems(lower
, nritems
+ 1);
2150 btrfs_mark_buffer_dirty(lower
);
2155 * split the node at the specified level in path in two.
2156 * The path is corrected to point to the appropriate node after the split
2158 * Before splitting this tries to make some room in the node by pushing
2159 * left and right, if either one works, it returns right away.
2161 * returns 0 on success and < 0 on failure
2163 static noinline
int split_node(struct btrfs_trans_handle
*trans
,
2164 struct btrfs_root
*root
,
2165 struct btrfs_path
*path
, int level
)
2167 struct extent_buffer
*c
;
2168 struct extent_buffer
*split
;
2169 struct btrfs_disk_key disk_key
;
2175 c
= path
->nodes
[level
];
2176 WARN_ON(btrfs_header_generation(c
) != trans
->transid
);
2177 if (c
== root
->node
) {
2178 /* trying to split the root, lets make a new one */
2179 ret
= insert_new_root(trans
, root
, path
, level
+ 1);
2183 ret
= push_nodes_for_insert(trans
, root
, path
, level
);
2184 c
= path
->nodes
[level
];
2185 if (!ret
&& btrfs_header_nritems(c
) <
2186 BTRFS_NODEPTRS_PER_BLOCK(root
) - 3)
2192 c_nritems
= btrfs_header_nritems(c
);
2193 mid
= (c_nritems
+ 1) / 2;
2194 btrfs_node_key(c
, &disk_key
, mid
);
2196 split
= btrfs_alloc_free_block(trans
, root
, root
->nodesize
, 0,
2197 root
->root_key
.objectid
,
2198 &disk_key
, level
, c
->start
, 0);
2200 return PTR_ERR(split
);
2202 memset_extent_buffer(split
, 0, 0, sizeof(struct btrfs_header
));
2203 btrfs_set_header_level(split
, btrfs_header_level(c
));
2204 btrfs_set_header_bytenr(split
, split
->start
);
2205 btrfs_set_header_generation(split
, trans
->transid
);
2206 btrfs_set_header_backref_rev(split
, BTRFS_MIXED_BACKREF_REV
);
2207 btrfs_set_header_owner(split
, root
->root_key
.objectid
);
2208 write_extent_buffer(split
, root
->fs_info
->fsid
,
2209 (unsigned long)btrfs_header_fsid(split
),
2211 write_extent_buffer(split
, root
->fs_info
->chunk_tree_uuid
,
2212 (unsigned long)btrfs_header_chunk_tree_uuid(split
),
2216 copy_extent_buffer(split
, c
,
2217 btrfs_node_key_ptr_offset(0),
2218 btrfs_node_key_ptr_offset(mid
),
2219 (c_nritems
- mid
) * sizeof(struct btrfs_key_ptr
));
2220 btrfs_set_header_nritems(split
, c_nritems
- mid
);
2221 btrfs_set_header_nritems(c
, mid
);
2224 btrfs_mark_buffer_dirty(c
);
2225 btrfs_mark_buffer_dirty(split
);
2227 wret
= insert_ptr(trans
, root
, path
, &disk_key
, split
->start
,
2228 path
->slots
[level
+ 1] + 1,
2233 if (path
->slots
[level
] >= mid
) {
2234 path
->slots
[level
] -= mid
;
2235 btrfs_tree_unlock(c
);
2236 free_extent_buffer(c
);
2237 path
->nodes
[level
] = split
;
2238 path
->slots
[level
+ 1] += 1;
2240 btrfs_tree_unlock(split
);
2241 free_extent_buffer(split
);
2247 * how many bytes are required to store the items in a leaf. start
2248 * and nr indicate which items in the leaf to check. This totals up the
2249 * space used both by the item structs and the item data
2251 static int leaf_space_used(struct extent_buffer
*l
, int start
, int nr
)
2254 int nritems
= btrfs_header_nritems(l
);
2255 int end
= min(nritems
, start
+ nr
) - 1;
2259 data_len
= btrfs_item_end_nr(l
, start
);
2260 data_len
= data_len
- btrfs_item_offset_nr(l
, end
);
2261 data_len
+= sizeof(struct btrfs_item
) * nr
;
2262 WARN_ON(data_len
< 0);
2267 * The space between the end of the leaf items and
2268 * the start of the leaf data. IOW, how much room
2269 * the leaf has left for both items and data
2271 noinline
int btrfs_leaf_free_space(struct btrfs_root
*root
,
2272 struct extent_buffer
*leaf
)
2274 int nritems
= btrfs_header_nritems(leaf
);
2276 ret
= BTRFS_LEAF_DATA_SIZE(root
) - leaf_space_used(leaf
, 0, nritems
);
2278 printk(KERN_CRIT
"leaf free space ret %d, leaf data size %lu, "
2279 "used %d nritems %d\n",
2280 ret
, (unsigned long) BTRFS_LEAF_DATA_SIZE(root
),
2281 leaf_space_used(leaf
, 0, nritems
), nritems
);
2286 static noinline
int __push_leaf_right(struct btrfs_trans_handle
*trans
,
2287 struct btrfs_root
*root
,
2288 struct btrfs_path
*path
,
2289 int data_size
, int empty
,
2290 struct extent_buffer
*right
,
2291 int free_space
, u32 left_nritems
)
2293 struct extent_buffer
*left
= path
->nodes
[0];
2294 struct extent_buffer
*upper
= path
->nodes
[1];
2295 struct btrfs_disk_key disk_key
;
2300 struct btrfs_item
*item
;
2311 if (path
->slots
[0] >= left_nritems
)
2312 push_space
+= data_size
;
2314 slot
= path
->slots
[1];
2315 i
= left_nritems
- 1;
2317 item
= btrfs_item_nr(left
, i
);
2319 if (!empty
&& push_items
> 0) {
2320 if (path
->slots
[0] > i
)
2322 if (path
->slots
[0] == i
) {
2323 int space
= btrfs_leaf_free_space(root
, left
);
2324 if (space
+ push_space
* 2 > free_space
)
2329 if (path
->slots
[0] == i
)
2330 push_space
+= data_size
;
2332 if (!left
->map_token
) {
2333 map_extent_buffer(left
, (unsigned long)item
,
2334 sizeof(struct btrfs_item
),
2335 &left
->map_token
, &left
->kaddr
,
2336 &left
->map_start
, &left
->map_len
,
2340 this_item_size
= btrfs_item_size(left
, item
);
2341 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
2345 push_space
+= this_item_size
+ sizeof(*item
);
2350 if (left
->map_token
) {
2351 unmap_extent_buffer(left
, left
->map_token
, KM_USER1
);
2352 left
->map_token
= NULL
;
2355 if (push_items
== 0)
2358 if (!empty
&& push_items
== left_nritems
)
2361 /* push left to right */
2362 right_nritems
= btrfs_header_nritems(right
);
2364 push_space
= btrfs_item_end_nr(left
, left_nritems
- push_items
);
2365 push_space
-= leaf_data_end(root
, left
);
2367 /* make room in the right data area */
2368 data_end
= leaf_data_end(root
, right
);
2369 memmove_extent_buffer(right
,
2370 btrfs_leaf_data(right
) + data_end
- push_space
,
2371 btrfs_leaf_data(right
) + data_end
,
2372 BTRFS_LEAF_DATA_SIZE(root
) - data_end
);
2374 /* copy from the left data area */
2375 copy_extent_buffer(right
, left
, btrfs_leaf_data(right
) +
2376 BTRFS_LEAF_DATA_SIZE(root
) - push_space
,
2377 btrfs_leaf_data(left
) + leaf_data_end(root
, left
),
2380 memmove_extent_buffer(right
, btrfs_item_nr_offset(push_items
),
2381 btrfs_item_nr_offset(0),
2382 right_nritems
* sizeof(struct btrfs_item
));
2384 /* copy the items from left to right */
2385 copy_extent_buffer(right
, left
, btrfs_item_nr_offset(0),
2386 btrfs_item_nr_offset(left_nritems
- push_items
),
2387 push_items
* sizeof(struct btrfs_item
));
2389 /* update the item pointers */
2390 right_nritems
+= push_items
;
2391 btrfs_set_header_nritems(right
, right_nritems
);
2392 push_space
= BTRFS_LEAF_DATA_SIZE(root
);
2393 for (i
= 0; i
< right_nritems
; i
++) {
2394 item
= btrfs_item_nr(right
, i
);
2395 if (!right
->map_token
) {
2396 map_extent_buffer(right
, (unsigned long)item
,
2397 sizeof(struct btrfs_item
),
2398 &right
->map_token
, &right
->kaddr
,
2399 &right
->map_start
, &right
->map_len
,
2402 push_space
-= btrfs_item_size(right
, item
);
2403 btrfs_set_item_offset(right
, item
, push_space
);
2406 if (right
->map_token
) {
2407 unmap_extent_buffer(right
, right
->map_token
, KM_USER1
);
2408 right
->map_token
= NULL
;
2410 left_nritems
-= push_items
;
2411 btrfs_set_header_nritems(left
, left_nritems
);
2414 btrfs_mark_buffer_dirty(left
);
2415 btrfs_mark_buffer_dirty(right
);
2417 btrfs_item_key(right
, &disk_key
, 0);
2418 btrfs_set_node_key(upper
, &disk_key
, slot
+ 1);
2419 btrfs_mark_buffer_dirty(upper
);
2421 /* then fixup the leaf pointer in the path */
2422 if (path
->slots
[0] >= left_nritems
) {
2423 path
->slots
[0] -= left_nritems
;
2424 if (btrfs_header_nritems(path
->nodes
[0]) == 0)
2425 clean_tree_block(trans
, root
, path
->nodes
[0]);
2426 btrfs_tree_unlock(path
->nodes
[0]);
2427 free_extent_buffer(path
->nodes
[0]);
2428 path
->nodes
[0] = right
;
2429 path
->slots
[1] += 1;
2431 btrfs_tree_unlock(right
);
2432 free_extent_buffer(right
);
2437 btrfs_tree_unlock(right
);
2438 free_extent_buffer(right
);
2443 * push some data in the path leaf to the right, trying to free up at
2444 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2446 * returns 1 if the push failed because the other node didn't have enough
2447 * room, 0 if everything worked out and < 0 if there were major errors.
2449 static int push_leaf_right(struct btrfs_trans_handle
*trans
, struct btrfs_root
2450 *root
, struct btrfs_path
*path
, int data_size
,
2453 struct extent_buffer
*left
= path
->nodes
[0];
2454 struct extent_buffer
*right
;
2455 struct extent_buffer
*upper
;
2461 if (!path
->nodes
[1])
2464 slot
= path
->slots
[1];
2465 upper
= path
->nodes
[1];
2466 if (slot
>= btrfs_header_nritems(upper
) - 1)
2469 btrfs_assert_tree_locked(path
->nodes
[1]);
2471 right
= read_node_slot(root
, upper
, slot
+ 1);
2472 btrfs_tree_lock(right
);
2473 btrfs_set_lock_blocking(right
);
2475 free_space
= btrfs_leaf_free_space(root
, right
);
2476 if (free_space
< data_size
)
2479 /* cow and double check */
2480 ret
= btrfs_cow_block(trans
, root
, right
, upper
,
2485 free_space
= btrfs_leaf_free_space(root
, right
);
2486 if (free_space
< data_size
)
2489 left_nritems
= btrfs_header_nritems(left
);
2490 if (left_nritems
== 0)
2493 return __push_leaf_right(trans
, root
, path
, data_size
, empty
,
2494 right
, free_space
, left_nritems
);
2496 btrfs_tree_unlock(right
);
2497 free_extent_buffer(right
);
2502 * push some data in the path leaf to the left, trying to free up at
2503 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2505 static noinline
int __push_leaf_left(struct btrfs_trans_handle
*trans
,
2506 struct btrfs_root
*root
,
2507 struct btrfs_path
*path
, int data_size
,
2508 int empty
, struct extent_buffer
*left
,
2509 int free_space
, int right_nritems
)
2511 struct btrfs_disk_key disk_key
;
2512 struct extent_buffer
*right
= path
->nodes
[0];
2517 struct btrfs_item
*item
;
2518 u32 old_left_nritems
;
2523 u32 old_left_item_size
;
2525 slot
= path
->slots
[1];
2530 nr
= right_nritems
- 1;
2532 for (i
= 0; i
< nr
; i
++) {
2533 item
= btrfs_item_nr(right
, i
);
2534 if (!right
->map_token
) {
2535 map_extent_buffer(right
, (unsigned long)item
,
2536 sizeof(struct btrfs_item
),
2537 &right
->map_token
, &right
->kaddr
,
2538 &right
->map_start
, &right
->map_len
,
2542 if (!empty
&& push_items
> 0) {
2543 if (path
->slots
[0] < i
)
2545 if (path
->slots
[0] == i
) {
2546 int space
= btrfs_leaf_free_space(root
, right
);
2547 if (space
+ push_space
* 2 > free_space
)
2552 if (path
->slots
[0] == i
)
2553 push_space
+= data_size
;
2555 this_item_size
= btrfs_item_size(right
, item
);
2556 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
2560 push_space
+= this_item_size
+ sizeof(*item
);
2563 if (right
->map_token
) {
2564 unmap_extent_buffer(right
, right
->map_token
, KM_USER1
);
2565 right
->map_token
= NULL
;
2568 if (push_items
== 0) {
2572 if (!empty
&& push_items
== btrfs_header_nritems(right
))
2575 /* push data from right to left */
2576 copy_extent_buffer(left
, right
,
2577 btrfs_item_nr_offset(btrfs_header_nritems(left
)),
2578 btrfs_item_nr_offset(0),
2579 push_items
* sizeof(struct btrfs_item
));
2581 push_space
= BTRFS_LEAF_DATA_SIZE(root
) -
2582 btrfs_item_offset_nr(right
, push_items
- 1);
2584 copy_extent_buffer(left
, right
, btrfs_leaf_data(left
) +
2585 leaf_data_end(root
, left
) - push_space
,
2586 btrfs_leaf_data(right
) +
2587 btrfs_item_offset_nr(right
, push_items
- 1),
2589 old_left_nritems
= btrfs_header_nritems(left
);
2590 BUG_ON(old_left_nritems
<= 0);
2592 old_left_item_size
= btrfs_item_offset_nr(left
, old_left_nritems
- 1);
2593 for (i
= old_left_nritems
; i
< old_left_nritems
+ push_items
; i
++) {
2596 item
= btrfs_item_nr(left
, i
);
2597 if (!left
->map_token
) {
2598 map_extent_buffer(left
, (unsigned long)item
,
2599 sizeof(struct btrfs_item
),
2600 &left
->map_token
, &left
->kaddr
,
2601 &left
->map_start
, &left
->map_len
,
2605 ioff
= btrfs_item_offset(left
, item
);
2606 btrfs_set_item_offset(left
, item
,
2607 ioff
- (BTRFS_LEAF_DATA_SIZE(root
) - old_left_item_size
));
2609 btrfs_set_header_nritems(left
, old_left_nritems
+ push_items
);
2610 if (left
->map_token
) {
2611 unmap_extent_buffer(left
, left
->map_token
, KM_USER1
);
2612 left
->map_token
= NULL
;
2615 /* fixup right node */
2616 if (push_items
> right_nritems
) {
2617 printk(KERN_CRIT
"push items %d nr %u\n", push_items
,
2622 if (push_items
< right_nritems
) {
2623 push_space
= btrfs_item_offset_nr(right
, push_items
- 1) -
2624 leaf_data_end(root
, right
);
2625 memmove_extent_buffer(right
, btrfs_leaf_data(right
) +
2626 BTRFS_LEAF_DATA_SIZE(root
) - push_space
,
2627 btrfs_leaf_data(right
) +
2628 leaf_data_end(root
, right
), push_space
);
2630 memmove_extent_buffer(right
, btrfs_item_nr_offset(0),
2631 btrfs_item_nr_offset(push_items
),
2632 (btrfs_header_nritems(right
) - push_items
) *
2633 sizeof(struct btrfs_item
));
2635 right_nritems
-= push_items
;
2636 btrfs_set_header_nritems(right
, right_nritems
);
2637 push_space
= BTRFS_LEAF_DATA_SIZE(root
);
2638 for (i
= 0; i
< right_nritems
; i
++) {
2639 item
= btrfs_item_nr(right
, i
);
2641 if (!right
->map_token
) {
2642 map_extent_buffer(right
, (unsigned long)item
,
2643 sizeof(struct btrfs_item
),
2644 &right
->map_token
, &right
->kaddr
,
2645 &right
->map_start
, &right
->map_len
,
2649 push_space
= push_space
- btrfs_item_size(right
, item
);
2650 btrfs_set_item_offset(right
, item
, push_space
);
2652 if (right
->map_token
) {
2653 unmap_extent_buffer(right
, right
->map_token
, KM_USER1
);
2654 right
->map_token
= NULL
;
2657 btrfs_mark_buffer_dirty(left
);
2659 btrfs_mark_buffer_dirty(right
);
2661 btrfs_item_key(right
, &disk_key
, 0);
2662 wret
= fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
2666 /* then fixup the leaf pointer in the path */
2667 if (path
->slots
[0] < push_items
) {
2668 path
->slots
[0] += old_left_nritems
;
2669 if (btrfs_header_nritems(path
->nodes
[0]) == 0)
2670 clean_tree_block(trans
, root
, path
->nodes
[0]);
2671 btrfs_tree_unlock(path
->nodes
[0]);
2672 free_extent_buffer(path
->nodes
[0]);
2673 path
->nodes
[0] = left
;
2674 path
->slots
[1] -= 1;
2676 btrfs_tree_unlock(left
);
2677 free_extent_buffer(left
);
2678 path
->slots
[0] -= push_items
;
2680 BUG_ON(path
->slots
[0] < 0);
2683 btrfs_tree_unlock(left
);
2684 free_extent_buffer(left
);
2689 * push some data in the path leaf to the left, trying to free up at
2690 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2692 static int push_leaf_left(struct btrfs_trans_handle
*trans
, struct btrfs_root
2693 *root
, struct btrfs_path
*path
, int data_size
,
2696 struct extent_buffer
*right
= path
->nodes
[0];
2697 struct extent_buffer
*left
;
2703 slot
= path
->slots
[1];
2706 if (!path
->nodes
[1])
2709 right_nritems
= btrfs_header_nritems(right
);
2710 if (right_nritems
== 0)
2713 btrfs_assert_tree_locked(path
->nodes
[1]);
2715 left
= read_node_slot(root
, path
->nodes
[1], slot
- 1);
2716 btrfs_tree_lock(left
);
2717 btrfs_set_lock_blocking(left
);
2719 free_space
= btrfs_leaf_free_space(root
, left
);
2720 if (free_space
< data_size
) {
2725 /* cow and double check */
2726 ret
= btrfs_cow_block(trans
, root
, left
,
2727 path
->nodes
[1], slot
- 1, &left
);
2729 /* we hit -ENOSPC, but it isn't fatal here */
2734 free_space
= btrfs_leaf_free_space(root
, left
);
2735 if (free_space
< data_size
) {
2740 return __push_leaf_left(trans
, root
, path
, data_size
,
2741 empty
, left
, free_space
, right_nritems
);
2743 btrfs_tree_unlock(left
);
2744 free_extent_buffer(left
);
2749 * split the path's leaf in two, making sure there is at least data_size
2750 * available for the resulting leaf level of the path.
2752 * returns 0 if all went well and < 0 on failure.
2754 static noinline
int copy_for_split(struct btrfs_trans_handle
*trans
,
2755 struct btrfs_root
*root
,
2756 struct btrfs_path
*path
,
2757 struct extent_buffer
*l
,
2758 struct extent_buffer
*right
,
2759 int slot
, int mid
, int nritems
)
2766 struct btrfs_disk_key disk_key
;
2768 nritems
= nritems
- mid
;
2769 btrfs_set_header_nritems(right
, nritems
);
2770 data_copy_size
= btrfs_item_end_nr(l
, mid
) - leaf_data_end(root
, l
);
2772 copy_extent_buffer(right
, l
, btrfs_item_nr_offset(0),
2773 btrfs_item_nr_offset(mid
),
2774 nritems
* sizeof(struct btrfs_item
));
2776 copy_extent_buffer(right
, l
,
2777 btrfs_leaf_data(right
) + BTRFS_LEAF_DATA_SIZE(root
) -
2778 data_copy_size
, btrfs_leaf_data(l
) +
2779 leaf_data_end(root
, l
), data_copy_size
);
2781 rt_data_off
= BTRFS_LEAF_DATA_SIZE(root
) -
2782 btrfs_item_end_nr(l
, mid
);
2784 for (i
= 0; i
< nritems
; i
++) {
2785 struct btrfs_item
*item
= btrfs_item_nr(right
, i
);
2788 if (!right
->map_token
) {
2789 map_extent_buffer(right
, (unsigned long)item
,
2790 sizeof(struct btrfs_item
),
2791 &right
->map_token
, &right
->kaddr
,
2792 &right
->map_start
, &right
->map_len
,
2796 ioff
= btrfs_item_offset(right
, item
);
2797 btrfs_set_item_offset(right
, item
, ioff
+ rt_data_off
);
2800 if (right
->map_token
) {
2801 unmap_extent_buffer(right
, right
->map_token
, KM_USER1
);
2802 right
->map_token
= NULL
;
2805 btrfs_set_header_nritems(l
, mid
);
2807 btrfs_item_key(right
, &disk_key
, 0);
2808 wret
= insert_ptr(trans
, root
, path
, &disk_key
, right
->start
,
2809 path
->slots
[1] + 1, 1);
2813 btrfs_mark_buffer_dirty(right
);
2814 btrfs_mark_buffer_dirty(l
);
2815 BUG_ON(path
->slots
[0] != slot
);
2818 btrfs_tree_unlock(path
->nodes
[0]);
2819 free_extent_buffer(path
->nodes
[0]);
2820 path
->nodes
[0] = right
;
2821 path
->slots
[0] -= mid
;
2822 path
->slots
[1] += 1;
2824 btrfs_tree_unlock(right
);
2825 free_extent_buffer(right
);
2828 BUG_ON(path
->slots
[0] < 0);
2834 * split the path's leaf in two, making sure there is at least data_size
2835 * available for the resulting leaf level of the path.
2837 * returns 0 if all went well and < 0 on failure.
2839 static noinline
int split_leaf(struct btrfs_trans_handle
*trans
,
2840 struct btrfs_root
*root
,
2841 struct btrfs_key
*ins_key
,
2842 struct btrfs_path
*path
, int data_size
,
2845 struct btrfs_disk_key disk_key
;
2846 struct extent_buffer
*l
;
2850 struct extent_buffer
*right
;
2854 int num_doubles
= 0;
2856 /* first try to make some room by pushing left and right */
2857 if (data_size
&& ins_key
->type
!= BTRFS_DIR_ITEM_KEY
) {
2858 wret
= push_leaf_right(trans
, root
, path
, data_size
, 0);
2862 wret
= push_leaf_left(trans
, root
, path
, data_size
, 0);
2868 /* did the pushes work? */
2869 if (btrfs_leaf_free_space(root
, l
) >= data_size
)
2873 if (!path
->nodes
[1]) {
2874 ret
= insert_new_root(trans
, root
, path
, 1);
2881 slot
= path
->slots
[0];
2882 nritems
= btrfs_header_nritems(l
);
2883 mid
= (nritems
+ 1) / 2;
2887 leaf_space_used(l
, mid
, nritems
- mid
) + data_size
>
2888 BTRFS_LEAF_DATA_SIZE(root
)) {
2889 if (slot
>= nritems
) {
2893 if (mid
!= nritems
&&
2894 leaf_space_used(l
, mid
, nritems
- mid
) +
2895 data_size
> BTRFS_LEAF_DATA_SIZE(root
)) {
2901 if (leaf_space_used(l
, 0, mid
) + data_size
>
2902 BTRFS_LEAF_DATA_SIZE(root
)) {
2903 if (!extend
&& data_size
&& slot
== 0) {
2905 } else if ((extend
|| !data_size
) && slot
== 0) {
2909 if (mid
!= nritems
&&
2910 leaf_space_used(l
, mid
, nritems
- mid
) +
2911 data_size
> BTRFS_LEAF_DATA_SIZE(root
)) {
2919 btrfs_cpu_key_to_disk(&disk_key
, ins_key
);
2921 btrfs_item_key(l
, &disk_key
, mid
);
2923 right
= btrfs_alloc_free_block(trans
, root
, root
->leafsize
, 0,
2924 root
->root_key
.objectid
,
2925 &disk_key
, 0, l
->start
, 0);
2926 if (IS_ERR(right
)) {
2928 return PTR_ERR(right
);
2931 memset_extent_buffer(right
, 0, 0, sizeof(struct btrfs_header
));
2932 btrfs_set_header_bytenr(right
, right
->start
);
2933 btrfs_set_header_generation(right
, trans
->transid
);
2934 btrfs_set_header_backref_rev(right
, BTRFS_MIXED_BACKREF_REV
);
2935 btrfs_set_header_owner(right
, root
->root_key
.objectid
);
2936 btrfs_set_header_level(right
, 0);
2937 write_extent_buffer(right
, root
->fs_info
->fsid
,
2938 (unsigned long)btrfs_header_fsid(right
),
2941 write_extent_buffer(right
, root
->fs_info
->chunk_tree_uuid
,
2942 (unsigned long)btrfs_header_chunk_tree_uuid(right
),
2947 btrfs_set_header_nritems(right
, 0);
2948 wret
= insert_ptr(trans
, root
, path
,
2949 &disk_key
, right
->start
,
2950 path
->slots
[1] + 1, 1);
2954 btrfs_tree_unlock(path
->nodes
[0]);
2955 free_extent_buffer(path
->nodes
[0]);
2956 path
->nodes
[0] = right
;
2958 path
->slots
[1] += 1;
2960 btrfs_set_header_nritems(right
, 0);
2961 wret
= insert_ptr(trans
, root
, path
,
2967 btrfs_tree_unlock(path
->nodes
[0]);
2968 free_extent_buffer(path
->nodes
[0]);
2969 path
->nodes
[0] = right
;
2971 if (path
->slots
[1] == 0) {
2972 wret
= fixup_low_keys(trans
, root
,
2973 path
, &disk_key
, 1);
2978 btrfs_mark_buffer_dirty(right
);
2982 ret
= copy_for_split(trans
, root
, path
, l
, right
, slot
, mid
, nritems
);
2986 BUG_ON(num_doubles
!= 0);
2995 * This function splits a single item into two items,
2996 * giving 'new_key' to the new item and splitting the
2997 * old one at split_offset (from the start of the item).
2999 * The path may be released by this operation. After
3000 * the split, the path is pointing to the old item. The
3001 * new item is going to be in the same node as the old one.
3003 * Note, the item being split must be smaller enough to live alone on
3004 * a tree block with room for one extra struct btrfs_item
3006 * This allows us to split the item in place, keeping a lock on the
3007 * leaf the entire time.
3009 int btrfs_split_item(struct btrfs_trans_handle
*trans
,
3010 struct btrfs_root
*root
,
3011 struct btrfs_path
*path
,
3012 struct btrfs_key
*new_key
,
3013 unsigned long split_offset
)
3016 struct extent_buffer
*leaf
;
3017 struct btrfs_key orig_key
;
3018 struct btrfs_item
*item
;
3019 struct btrfs_item
*new_item
;
3024 struct btrfs_disk_key disk_key
;
3027 leaf
= path
->nodes
[0];
3028 btrfs_item_key_to_cpu(leaf
, &orig_key
, path
->slots
[0]);
3029 if (btrfs_leaf_free_space(root
, leaf
) >= sizeof(struct btrfs_item
))
3032 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
3033 btrfs_release_path(root
, path
);
3035 path
->search_for_split
= 1;
3036 path
->keep_locks
= 1;
3038 ret
= btrfs_search_slot(trans
, root
, &orig_key
, path
, 0, 1);
3039 path
->search_for_split
= 0;
3041 /* if our item isn't there or got smaller, return now */
3042 if (ret
!= 0 || item_size
!= btrfs_item_size_nr(path
->nodes
[0],
3044 path
->keep_locks
= 0;
3048 btrfs_set_path_blocking(path
);
3049 ret
= split_leaf(trans
, root
, &orig_key
, path
,
3050 sizeof(struct btrfs_item
), 1);
3051 path
->keep_locks
= 0;
3054 btrfs_unlock_up_safe(path
, 1);
3055 leaf
= path
->nodes
[0];
3056 BUG_ON(btrfs_leaf_free_space(root
, leaf
) < sizeof(struct btrfs_item
));
3060 * make sure any changes to the path from split_leaf leave it
3061 * in a blocking state
3063 btrfs_set_path_blocking(path
);
3065 item
= btrfs_item_nr(leaf
, path
->slots
[0]);
3066 orig_offset
= btrfs_item_offset(leaf
, item
);
3067 item_size
= btrfs_item_size(leaf
, item
);
3069 buf
= kmalloc(item_size
, GFP_NOFS
);
3070 read_extent_buffer(leaf
, buf
, btrfs_item_ptr_offset(leaf
,
3071 path
->slots
[0]), item_size
);
3072 slot
= path
->slots
[0] + 1;
3073 leaf
= path
->nodes
[0];
3075 nritems
= btrfs_header_nritems(leaf
);
3077 if (slot
!= nritems
) {
3078 /* shift the items */
3079 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ 1),
3080 btrfs_item_nr_offset(slot
),
3081 (nritems
- slot
) * sizeof(struct btrfs_item
));
3085 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
3086 btrfs_set_item_key(leaf
, &disk_key
, slot
);
3088 new_item
= btrfs_item_nr(leaf
, slot
);
3090 btrfs_set_item_offset(leaf
, new_item
, orig_offset
);
3091 btrfs_set_item_size(leaf
, new_item
, item_size
- split_offset
);
3093 btrfs_set_item_offset(leaf
, item
,
3094 orig_offset
+ item_size
- split_offset
);
3095 btrfs_set_item_size(leaf
, item
, split_offset
);
3097 btrfs_set_header_nritems(leaf
, nritems
+ 1);
3099 /* write the data for the start of the original item */
3100 write_extent_buffer(leaf
, buf
,
3101 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
3104 /* write the data for the new item */
3105 write_extent_buffer(leaf
, buf
+ split_offset
,
3106 btrfs_item_ptr_offset(leaf
, slot
),
3107 item_size
- split_offset
);
3108 btrfs_mark_buffer_dirty(leaf
);
3111 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3112 btrfs_print_leaf(root
, leaf
);
3120 * make the item pointed to by the path smaller. new_size indicates
3121 * how small to make it, and from_end tells us if we just chop bytes
3122 * off the end of the item or if we shift the item to chop bytes off
3125 int btrfs_truncate_item(struct btrfs_trans_handle
*trans
,
3126 struct btrfs_root
*root
,
3127 struct btrfs_path
*path
,
3128 u32 new_size
, int from_end
)
3133 struct extent_buffer
*leaf
;
3134 struct btrfs_item
*item
;
3136 unsigned int data_end
;
3137 unsigned int old_data_start
;
3138 unsigned int old_size
;
3139 unsigned int size_diff
;
3142 slot_orig
= path
->slots
[0];
3143 leaf
= path
->nodes
[0];
3144 slot
= path
->slots
[0];
3146 old_size
= btrfs_item_size_nr(leaf
, slot
);
3147 if (old_size
== new_size
)
3150 nritems
= btrfs_header_nritems(leaf
);
3151 data_end
= leaf_data_end(root
, leaf
);
3153 old_data_start
= btrfs_item_offset_nr(leaf
, slot
);
3155 size_diff
= old_size
- new_size
;
3158 BUG_ON(slot
>= nritems
);
3161 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3163 /* first correct the data pointers */
3164 for (i
= slot
; i
< nritems
; i
++) {
3166 item
= btrfs_item_nr(leaf
, i
);
3168 if (!leaf
->map_token
) {
3169 map_extent_buffer(leaf
, (unsigned long)item
,
3170 sizeof(struct btrfs_item
),
3171 &leaf
->map_token
, &leaf
->kaddr
,
3172 &leaf
->map_start
, &leaf
->map_len
,
3176 ioff
= btrfs_item_offset(leaf
, item
);
3177 btrfs_set_item_offset(leaf
, item
, ioff
+ size_diff
);
3180 if (leaf
->map_token
) {
3181 unmap_extent_buffer(leaf
, leaf
->map_token
, KM_USER1
);
3182 leaf
->map_token
= NULL
;
3185 /* shift the data */
3187 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3188 data_end
+ size_diff
, btrfs_leaf_data(leaf
) +
3189 data_end
, old_data_start
+ new_size
- data_end
);
3191 struct btrfs_disk_key disk_key
;
3194 btrfs_item_key(leaf
, &disk_key
, slot
);
3196 if (btrfs_disk_key_type(&disk_key
) == BTRFS_EXTENT_DATA_KEY
) {
3198 struct btrfs_file_extent_item
*fi
;
3200 fi
= btrfs_item_ptr(leaf
, slot
,
3201 struct btrfs_file_extent_item
);
3202 fi
= (struct btrfs_file_extent_item
*)(
3203 (unsigned long)fi
- size_diff
);
3205 if (btrfs_file_extent_type(leaf
, fi
) ==
3206 BTRFS_FILE_EXTENT_INLINE
) {
3207 ptr
= btrfs_item_ptr_offset(leaf
, slot
);
3208 memmove_extent_buffer(leaf
, ptr
,
3210 offsetof(struct btrfs_file_extent_item
,
3215 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3216 data_end
+ size_diff
, btrfs_leaf_data(leaf
) +
3217 data_end
, old_data_start
- data_end
);
3219 offset
= btrfs_disk_key_offset(&disk_key
);
3220 btrfs_set_disk_key_offset(&disk_key
, offset
+ size_diff
);
3221 btrfs_set_item_key(leaf
, &disk_key
, slot
);
3223 fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
3226 item
= btrfs_item_nr(leaf
, slot
);
3227 btrfs_set_item_size(leaf
, item
, new_size
);
3228 btrfs_mark_buffer_dirty(leaf
);
3231 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3232 btrfs_print_leaf(root
, leaf
);
3239 * make the item pointed to by the path bigger, data_size is the new size.
3241 int btrfs_extend_item(struct btrfs_trans_handle
*trans
,
3242 struct btrfs_root
*root
, struct btrfs_path
*path
,
3248 struct extent_buffer
*leaf
;
3249 struct btrfs_item
*item
;
3251 unsigned int data_end
;
3252 unsigned int old_data
;
3253 unsigned int old_size
;
3256 slot_orig
= path
->slots
[0];
3257 leaf
= path
->nodes
[0];
3259 nritems
= btrfs_header_nritems(leaf
);
3260 data_end
= leaf_data_end(root
, leaf
);
3262 if (btrfs_leaf_free_space(root
, leaf
) < data_size
) {
3263 btrfs_print_leaf(root
, leaf
);
3266 slot
= path
->slots
[0];
3267 old_data
= btrfs_item_end_nr(leaf
, slot
);
3270 if (slot
>= nritems
) {
3271 btrfs_print_leaf(root
, leaf
);
3272 printk(KERN_CRIT
"slot %d too large, nritems %d\n",
3278 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3280 /* first correct the data pointers */
3281 for (i
= slot
; i
< nritems
; i
++) {
3283 item
= btrfs_item_nr(leaf
, i
);
3285 if (!leaf
->map_token
) {
3286 map_extent_buffer(leaf
, (unsigned long)item
,
3287 sizeof(struct btrfs_item
),
3288 &leaf
->map_token
, &leaf
->kaddr
,
3289 &leaf
->map_start
, &leaf
->map_len
,
3292 ioff
= btrfs_item_offset(leaf
, item
);
3293 btrfs_set_item_offset(leaf
, item
, ioff
- data_size
);
3296 if (leaf
->map_token
) {
3297 unmap_extent_buffer(leaf
, leaf
->map_token
, KM_USER1
);
3298 leaf
->map_token
= NULL
;
3301 /* shift the data */
3302 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3303 data_end
- data_size
, btrfs_leaf_data(leaf
) +
3304 data_end
, old_data
- data_end
);
3306 data_end
= old_data
;
3307 old_size
= btrfs_item_size_nr(leaf
, slot
);
3308 item
= btrfs_item_nr(leaf
, slot
);
3309 btrfs_set_item_size(leaf
, item
, old_size
+ data_size
);
3310 btrfs_mark_buffer_dirty(leaf
);
3313 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3314 btrfs_print_leaf(root
, leaf
);
3321 * Given a key and some data, insert items into the tree.
3322 * This does all the path init required, making room in the tree if needed.
3323 * Returns the number of keys that were inserted.
3325 int btrfs_insert_some_items(struct btrfs_trans_handle
*trans
,
3326 struct btrfs_root
*root
,
3327 struct btrfs_path
*path
,
3328 struct btrfs_key
*cpu_key
, u32
*data_size
,
3331 struct extent_buffer
*leaf
;
3332 struct btrfs_item
*item
;
3339 unsigned int data_end
;
3340 struct btrfs_disk_key disk_key
;
3341 struct btrfs_key found_key
;
3343 for (i
= 0; i
< nr
; i
++) {
3344 if (total_size
+ data_size
[i
] + sizeof(struct btrfs_item
) >
3345 BTRFS_LEAF_DATA_SIZE(root
)) {
3349 total_data
+= data_size
[i
];
3350 total_size
+= data_size
[i
] + sizeof(struct btrfs_item
);
3354 ret
= btrfs_search_slot(trans
, root
, cpu_key
, path
, total_size
, 1);
3360 leaf
= path
->nodes
[0];
3362 nritems
= btrfs_header_nritems(leaf
);
3363 data_end
= leaf_data_end(root
, leaf
);
3365 if (btrfs_leaf_free_space(root
, leaf
) < total_size
) {
3366 for (i
= nr
; i
>= 0; i
--) {
3367 total_data
-= data_size
[i
];
3368 total_size
-= data_size
[i
] + sizeof(struct btrfs_item
);
3369 if (total_size
< btrfs_leaf_free_space(root
, leaf
))
3375 slot
= path
->slots
[0];
3378 if (slot
!= nritems
) {
3379 unsigned int old_data
= btrfs_item_end_nr(leaf
, slot
);
3381 item
= btrfs_item_nr(leaf
, slot
);
3382 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3384 /* figure out how many keys we can insert in here */
3385 total_data
= data_size
[0];
3386 for (i
= 1; i
< nr
; i
++) {
3387 if (btrfs_comp_cpu_keys(&found_key
, cpu_key
+ i
) <= 0)
3389 total_data
+= data_size
[i
];
3393 if (old_data
< data_end
) {
3394 btrfs_print_leaf(root
, leaf
);
3395 printk(KERN_CRIT
"slot %d old_data %d data_end %d\n",
3396 slot
, old_data
, data_end
);
3400 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3402 /* first correct the data pointers */
3403 WARN_ON(leaf
->map_token
);
3404 for (i
= slot
; i
< nritems
; i
++) {
3407 item
= btrfs_item_nr(leaf
, i
);
3408 if (!leaf
->map_token
) {
3409 map_extent_buffer(leaf
, (unsigned long)item
,
3410 sizeof(struct btrfs_item
),
3411 &leaf
->map_token
, &leaf
->kaddr
,
3412 &leaf
->map_start
, &leaf
->map_len
,
3416 ioff
= btrfs_item_offset(leaf
, item
);
3417 btrfs_set_item_offset(leaf
, item
, ioff
- total_data
);
3419 if (leaf
->map_token
) {
3420 unmap_extent_buffer(leaf
, leaf
->map_token
, KM_USER1
);
3421 leaf
->map_token
= NULL
;
3424 /* shift the items */
3425 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ nr
),
3426 btrfs_item_nr_offset(slot
),
3427 (nritems
- slot
) * sizeof(struct btrfs_item
));
3429 /* shift the data */
3430 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3431 data_end
- total_data
, btrfs_leaf_data(leaf
) +
3432 data_end
, old_data
- data_end
);
3433 data_end
= old_data
;
3436 * this sucks but it has to be done, if we are inserting at
3437 * the end of the leaf only insert 1 of the items, since we
3438 * have no way of knowing whats on the next leaf and we'd have
3439 * to drop our current locks to figure it out
3444 /* setup the item for the new data */
3445 for (i
= 0; i
< nr
; i
++) {
3446 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
+ i
);
3447 btrfs_set_item_key(leaf
, &disk_key
, slot
+ i
);
3448 item
= btrfs_item_nr(leaf
, slot
+ i
);
3449 btrfs_set_item_offset(leaf
, item
, data_end
- data_size
[i
]);
3450 data_end
-= data_size
[i
];
3451 btrfs_set_item_size(leaf
, item
, data_size
[i
]);
3453 btrfs_set_header_nritems(leaf
, nritems
+ nr
);
3454 btrfs_mark_buffer_dirty(leaf
);
3458 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
);
3459 ret
= fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
3462 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3463 btrfs_print_leaf(root
, leaf
);
3473 * this is a helper for btrfs_insert_empty_items, the main goal here is
3474 * to save stack depth by doing the bulk of the work in a function
3475 * that doesn't call btrfs_search_slot
3477 static noinline_for_stack
int
3478 setup_items_for_insert(struct btrfs_trans_handle
*trans
,
3479 struct btrfs_root
*root
, struct btrfs_path
*path
,
3480 struct btrfs_key
*cpu_key
, u32
*data_size
,
3481 u32 total_data
, u32 total_size
, int nr
)
3483 struct btrfs_item
*item
;
3486 unsigned int data_end
;
3487 struct btrfs_disk_key disk_key
;
3489 struct extent_buffer
*leaf
;
3492 leaf
= path
->nodes
[0];
3493 slot
= path
->slots
[0];
3495 nritems
= btrfs_header_nritems(leaf
);
3496 data_end
= leaf_data_end(root
, leaf
);
3498 if (btrfs_leaf_free_space(root
, leaf
) < total_size
) {
3499 btrfs_print_leaf(root
, leaf
);
3500 printk(KERN_CRIT
"not enough freespace need %u have %d\n",
3501 total_size
, btrfs_leaf_free_space(root
, leaf
));
3505 if (slot
!= nritems
) {
3506 unsigned int old_data
= btrfs_item_end_nr(leaf
, slot
);
3508 if (old_data
< data_end
) {
3509 btrfs_print_leaf(root
, leaf
);
3510 printk(KERN_CRIT
"slot %d old_data %d data_end %d\n",
3511 slot
, old_data
, data_end
);
3515 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3517 /* first correct the data pointers */
3518 WARN_ON(leaf
->map_token
);
3519 for (i
= slot
; i
< nritems
; i
++) {
3522 item
= btrfs_item_nr(leaf
, i
);
3523 if (!leaf
->map_token
) {
3524 map_extent_buffer(leaf
, (unsigned long)item
,
3525 sizeof(struct btrfs_item
),
3526 &leaf
->map_token
, &leaf
->kaddr
,
3527 &leaf
->map_start
, &leaf
->map_len
,
3531 ioff
= btrfs_item_offset(leaf
, item
);
3532 btrfs_set_item_offset(leaf
, item
, ioff
- total_data
);
3534 if (leaf
->map_token
) {
3535 unmap_extent_buffer(leaf
, leaf
->map_token
, KM_USER1
);
3536 leaf
->map_token
= NULL
;
3539 /* shift the items */
3540 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ nr
),
3541 btrfs_item_nr_offset(slot
),
3542 (nritems
- slot
) * sizeof(struct btrfs_item
));
3544 /* shift the data */
3545 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3546 data_end
- total_data
, btrfs_leaf_data(leaf
) +
3547 data_end
, old_data
- data_end
);
3548 data_end
= old_data
;
3551 /* setup the item for the new data */
3552 for (i
= 0; i
< nr
; i
++) {
3553 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
+ i
);
3554 btrfs_set_item_key(leaf
, &disk_key
, slot
+ i
);
3555 item
= btrfs_item_nr(leaf
, slot
+ i
);
3556 btrfs_set_item_offset(leaf
, item
, data_end
- data_size
[i
]);
3557 data_end
-= data_size
[i
];
3558 btrfs_set_item_size(leaf
, item
, data_size
[i
]);
3561 btrfs_set_header_nritems(leaf
, nritems
+ nr
);
3565 struct btrfs_disk_key disk_key
;
3566 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
);
3567 ret
= fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
3569 btrfs_unlock_up_safe(path
, 1);
3570 btrfs_mark_buffer_dirty(leaf
);
3572 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3573 btrfs_print_leaf(root
, leaf
);
3580 * Given a key and some data, insert items into the tree.
3581 * This does all the path init required, making room in the tree if needed.
3583 int btrfs_insert_empty_items(struct btrfs_trans_handle
*trans
,
3584 struct btrfs_root
*root
,
3585 struct btrfs_path
*path
,
3586 struct btrfs_key
*cpu_key
, u32
*data_size
,
3589 struct extent_buffer
*leaf
;
3596 for (i
= 0; i
< nr
; i
++)
3597 total_data
+= data_size
[i
];
3599 total_size
= total_data
+ (nr
* sizeof(struct btrfs_item
));
3600 ret
= btrfs_search_slot(trans
, root
, cpu_key
, path
, total_size
, 1);
3606 leaf
= path
->nodes
[0];
3607 slot
= path
->slots
[0];
3610 ret
= setup_items_for_insert(trans
, root
, path
, cpu_key
, data_size
,
3611 total_data
, total_size
, nr
);
3618 * Given a key and some data, insert an item into the tree.
3619 * This does all the path init required, making room in the tree if needed.
3621 int btrfs_insert_item(struct btrfs_trans_handle
*trans
, struct btrfs_root
3622 *root
, struct btrfs_key
*cpu_key
, void *data
, u32
3626 struct btrfs_path
*path
;
3627 struct extent_buffer
*leaf
;
3630 path
= btrfs_alloc_path();
3632 ret
= btrfs_insert_empty_item(trans
, root
, path
, cpu_key
, data_size
);
3634 leaf
= path
->nodes
[0];
3635 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
3636 write_extent_buffer(leaf
, data
, ptr
, data_size
);
3637 btrfs_mark_buffer_dirty(leaf
);
3639 btrfs_free_path(path
);
3644 * delete the pointer from a given node.
3646 * the tree should have been previously balanced so the deletion does not
3649 static int del_ptr(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
3650 struct btrfs_path
*path
, int level
, int slot
)
3652 struct extent_buffer
*parent
= path
->nodes
[level
];
3657 nritems
= btrfs_header_nritems(parent
);
3658 if (slot
!= nritems
- 1) {
3659 memmove_extent_buffer(parent
,
3660 btrfs_node_key_ptr_offset(slot
),
3661 btrfs_node_key_ptr_offset(slot
+ 1),
3662 sizeof(struct btrfs_key_ptr
) *
3663 (nritems
- slot
- 1));
3666 btrfs_set_header_nritems(parent
, nritems
);
3667 if (nritems
== 0 && parent
== root
->node
) {
3668 BUG_ON(btrfs_header_level(root
->node
) != 1);
3669 /* just turn the root into a leaf and break */
3670 btrfs_set_header_level(root
->node
, 0);
3671 } else if (slot
== 0) {
3672 struct btrfs_disk_key disk_key
;
3674 btrfs_node_key(parent
, &disk_key
, 0);
3675 wret
= fixup_low_keys(trans
, root
, path
, &disk_key
, level
+ 1);
3679 btrfs_mark_buffer_dirty(parent
);
3684 * a helper function to delete the leaf pointed to by path->slots[1] and
3687 * This deletes the pointer in path->nodes[1] and frees the leaf
3688 * block extent. zero is returned if it all worked out, < 0 otherwise.
3690 * The path must have already been setup for deleting the leaf, including
3691 * all the proper balancing. path->nodes[1] must be locked.
3693 static noinline
int btrfs_del_leaf(struct btrfs_trans_handle
*trans
,
3694 struct btrfs_root
*root
,
3695 struct btrfs_path
*path
,
3696 struct extent_buffer
*leaf
)
3700 WARN_ON(btrfs_header_generation(leaf
) != trans
->transid
);
3701 ret
= del_ptr(trans
, root
, path
, 1, path
->slots
[1]);
3706 * btrfs_free_extent is expensive, we want to make sure we
3707 * aren't holding any locks when we call it
3709 btrfs_unlock_up_safe(path
, 0);
3711 ret
= btrfs_free_extent(trans
, root
, leaf
->start
, leaf
->len
,
3712 0, root
->root_key
.objectid
, 0, 0);
3716 * delete the item at the leaf level in path. If that empties
3717 * the leaf, remove it from the tree
3719 int btrfs_del_items(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
3720 struct btrfs_path
*path
, int slot
, int nr
)
3722 struct extent_buffer
*leaf
;
3723 struct btrfs_item
*item
;
3731 leaf
= path
->nodes
[0];
3732 last_off
= btrfs_item_offset_nr(leaf
, slot
+ nr
- 1);
3734 for (i
= 0; i
< nr
; i
++)
3735 dsize
+= btrfs_item_size_nr(leaf
, slot
+ i
);
3737 nritems
= btrfs_header_nritems(leaf
);
3739 if (slot
+ nr
!= nritems
) {
3740 int data_end
= leaf_data_end(root
, leaf
);
3742 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3744 btrfs_leaf_data(leaf
) + data_end
,
3745 last_off
- data_end
);
3747 for (i
= slot
+ nr
; i
< nritems
; i
++) {
3750 item
= btrfs_item_nr(leaf
, i
);
3751 if (!leaf
->map_token
) {
3752 map_extent_buffer(leaf
, (unsigned long)item
,
3753 sizeof(struct btrfs_item
),
3754 &leaf
->map_token
, &leaf
->kaddr
,
3755 &leaf
->map_start
, &leaf
->map_len
,
3758 ioff
= btrfs_item_offset(leaf
, item
);
3759 btrfs_set_item_offset(leaf
, item
, ioff
+ dsize
);
3762 if (leaf
->map_token
) {
3763 unmap_extent_buffer(leaf
, leaf
->map_token
, KM_USER1
);
3764 leaf
->map_token
= NULL
;
3767 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
),
3768 btrfs_item_nr_offset(slot
+ nr
),
3769 sizeof(struct btrfs_item
) *
3770 (nritems
- slot
- nr
));
3772 btrfs_set_header_nritems(leaf
, nritems
- nr
);
3775 /* delete the leaf if we've emptied it */
3777 if (leaf
== root
->node
) {
3778 btrfs_set_header_level(leaf
, 0);
3780 ret
= btrfs_del_leaf(trans
, root
, path
, leaf
);
3784 int used
= leaf_space_used(leaf
, 0, nritems
);
3786 struct btrfs_disk_key disk_key
;
3788 btrfs_item_key(leaf
, &disk_key
, 0);
3789 wret
= fixup_low_keys(trans
, root
, path
,
3795 /* delete the leaf if it is mostly empty */
3796 if (used
< BTRFS_LEAF_DATA_SIZE(root
) / 3) {
3797 /* push_leaf_left fixes the path.
3798 * make sure the path still points to our leaf
3799 * for possible call to del_ptr below
3801 slot
= path
->slots
[1];
3802 extent_buffer_get(leaf
);
3804 btrfs_set_path_blocking(path
);
3805 wret
= push_leaf_left(trans
, root
, path
, 1, 1);
3806 if (wret
< 0 && wret
!= -ENOSPC
)
3809 if (path
->nodes
[0] == leaf
&&
3810 btrfs_header_nritems(leaf
)) {
3811 wret
= push_leaf_right(trans
, root
, path
, 1, 1);
3812 if (wret
< 0 && wret
!= -ENOSPC
)
3816 if (btrfs_header_nritems(leaf
) == 0) {
3817 path
->slots
[1] = slot
;
3818 ret
= btrfs_del_leaf(trans
, root
, path
, leaf
);
3820 free_extent_buffer(leaf
);
3822 /* if we're still in the path, make sure
3823 * we're dirty. Otherwise, one of the
3824 * push_leaf functions must have already
3825 * dirtied this buffer
3827 if (path
->nodes
[0] == leaf
)
3828 btrfs_mark_buffer_dirty(leaf
);
3829 free_extent_buffer(leaf
);
3832 btrfs_mark_buffer_dirty(leaf
);
3839 * search the tree again to find a leaf with lesser keys
3840 * returns 0 if it found something or 1 if there are no lesser leaves.
3841 * returns < 0 on io errors.
3843 * This may release the path, and so you may lose any locks held at the
3846 int btrfs_prev_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
3848 struct btrfs_key key
;
3849 struct btrfs_disk_key found_key
;
3852 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, 0);
3856 else if (key
.type
> 0)
3858 else if (key
.objectid
> 0)
3863 btrfs_release_path(root
, path
);
3864 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3867 btrfs_item_key(path
->nodes
[0], &found_key
, 0);
3868 ret
= comp_keys(&found_key
, &key
);
3875 * A helper function to walk down the tree starting at min_key, and looking
3876 * for nodes or leaves that are either in cache or have a minimum
3877 * transaction id. This is used by the btree defrag code, and tree logging
3879 * This does not cow, but it does stuff the starting key it finds back
3880 * into min_key, so you can call btrfs_search_slot with cow=1 on the
3881 * key and get a writable path.
3883 * This does lock as it descends, and path->keep_locks should be set
3884 * to 1 by the caller.
3886 * This honors path->lowest_level to prevent descent past a given level
3889 * min_trans indicates the oldest transaction that you are interested
3890 * in walking through. Any nodes or leaves older than min_trans are
3891 * skipped over (without reading them).
3893 * returns zero if something useful was found, < 0 on error and 1 if there
3894 * was nothing in the tree that matched the search criteria.
3896 int btrfs_search_forward(struct btrfs_root
*root
, struct btrfs_key
*min_key
,
3897 struct btrfs_key
*max_key
,
3898 struct btrfs_path
*path
, int cache_only
,
3901 struct extent_buffer
*cur
;
3902 struct btrfs_key found_key
;
3909 WARN_ON(!path
->keep_locks
);
3911 cur
= btrfs_lock_root_node(root
);
3912 level
= btrfs_header_level(cur
);
3913 WARN_ON(path
->nodes
[level
]);
3914 path
->nodes
[level
] = cur
;
3915 path
->locks
[level
] = 1;
3917 if (btrfs_header_generation(cur
) < min_trans
) {
3922 nritems
= btrfs_header_nritems(cur
);
3923 level
= btrfs_header_level(cur
);
3924 sret
= bin_search(cur
, min_key
, level
, &slot
);
3926 /* at the lowest level, we're done, setup the path and exit */
3927 if (level
== path
->lowest_level
) {
3928 if (slot
>= nritems
)
3931 path
->slots
[level
] = slot
;
3932 btrfs_item_key_to_cpu(cur
, &found_key
, slot
);
3935 if (sret
&& slot
> 0)
3938 * check this node pointer against the cache_only and
3939 * min_trans parameters. If it isn't in cache or is too
3940 * old, skip to the next one.
3942 while (slot
< nritems
) {
3945 struct extent_buffer
*tmp
;
3946 struct btrfs_disk_key disk_key
;
3948 blockptr
= btrfs_node_blockptr(cur
, slot
);
3949 gen
= btrfs_node_ptr_generation(cur
, slot
);
3950 if (gen
< min_trans
) {
3958 btrfs_node_key(cur
, &disk_key
, slot
);
3959 if (comp_keys(&disk_key
, max_key
) >= 0) {
3965 tmp
= btrfs_find_tree_block(root
, blockptr
,
3966 btrfs_level_size(root
, level
- 1));
3968 if (tmp
&& btrfs_buffer_uptodate(tmp
, gen
)) {
3969 free_extent_buffer(tmp
);
3973 free_extent_buffer(tmp
);
3978 * we didn't find a candidate key in this node, walk forward
3979 * and find another one
3981 if (slot
>= nritems
) {
3982 path
->slots
[level
] = slot
;
3983 btrfs_set_path_blocking(path
);
3984 sret
= btrfs_find_next_key(root
, path
, min_key
, level
,
3985 cache_only
, min_trans
);
3987 btrfs_release_path(root
, path
);
3993 /* save our key for returning back */
3994 btrfs_node_key_to_cpu(cur
, &found_key
, slot
);
3995 path
->slots
[level
] = slot
;
3996 if (level
== path
->lowest_level
) {
3998 unlock_up(path
, level
, 1);
4001 btrfs_set_path_blocking(path
);
4002 cur
= read_node_slot(root
, cur
, slot
);
4004 btrfs_tree_lock(cur
);
4006 path
->locks
[level
- 1] = 1;
4007 path
->nodes
[level
- 1] = cur
;
4008 unlock_up(path
, level
, 1);
4009 btrfs_clear_path_blocking(path
, NULL
);
4013 memcpy(min_key
, &found_key
, sizeof(found_key
));
4014 btrfs_set_path_blocking(path
);
4019 * this is similar to btrfs_next_leaf, but does not try to preserve
4020 * and fixup the path. It looks for and returns the next key in the
4021 * tree based on the current path and the cache_only and min_trans
4024 * 0 is returned if another key is found, < 0 if there are any errors
4025 * and 1 is returned if there are no higher keys in the tree
4027 * path->keep_locks should be set to 1 on the search made before
4028 * calling this function.
4030 int btrfs_find_next_key(struct btrfs_root
*root
, struct btrfs_path
*path
,
4031 struct btrfs_key
*key
, int level
,
4032 int cache_only
, u64 min_trans
)
4035 struct extent_buffer
*c
;
4037 WARN_ON(!path
->keep_locks
);
4038 while (level
< BTRFS_MAX_LEVEL
) {
4039 if (!path
->nodes
[level
])
4042 slot
= path
->slots
[level
] + 1;
4043 c
= path
->nodes
[level
];
4045 if (slot
>= btrfs_header_nritems(c
)) {
4048 struct btrfs_key cur_key
;
4049 if (level
+ 1 >= BTRFS_MAX_LEVEL
||
4050 !path
->nodes
[level
+ 1])
4053 if (path
->locks
[level
+ 1]) {
4058 slot
= btrfs_header_nritems(c
) - 1;
4060 btrfs_item_key_to_cpu(c
, &cur_key
, slot
);
4062 btrfs_node_key_to_cpu(c
, &cur_key
, slot
);
4064 orig_lowest
= path
->lowest_level
;
4065 btrfs_release_path(root
, path
);
4066 path
->lowest_level
= level
;
4067 ret
= btrfs_search_slot(NULL
, root
, &cur_key
, path
,
4069 path
->lowest_level
= orig_lowest
;
4073 c
= path
->nodes
[level
];
4074 slot
= path
->slots
[level
];
4081 btrfs_item_key_to_cpu(c
, key
, slot
);
4083 u64 blockptr
= btrfs_node_blockptr(c
, slot
);
4084 u64 gen
= btrfs_node_ptr_generation(c
, slot
);
4087 struct extent_buffer
*cur
;
4088 cur
= btrfs_find_tree_block(root
, blockptr
,
4089 btrfs_level_size(root
, level
- 1));
4090 if (!cur
|| !btrfs_buffer_uptodate(cur
, gen
)) {
4093 free_extent_buffer(cur
);
4096 free_extent_buffer(cur
);
4098 if (gen
< min_trans
) {
4102 btrfs_node_key_to_cpu(c
, key
, slot
);
4110 * search the tree again to find a leaf with greater keys
4111 * returns 0 if it found something or 1 if there are no greater leaves.
4112 * returns < 0 on io errors.
4114 int btrfs_next_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
4118 struct extent_buffer
*c
;
4119 struct extent_buffer
*next
;
4120 struct btrfs_key key
;
4123 int old_spinning
= path
->leave_spinning
;
4124 int force_blocking
= 0;
4126 nritems
= btrfs_header_nritems(path
->nodes
[0]);
4131 * we take the blocks in an order that upsets lockdep. Using
4132 * blocking mode is the only way around it.
4134 #ifdef CONFIG_DEBUG_LOCK_ALLOC
4138 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, nritems
- 1);
4142 btrfs_release_path(root
, path
);
4144 path
->keep_locks
= 1;
4146 if (!force_blocking
)
4147 path
->leave_spinning
= 1;
4149 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4150 path
->keep_locks
= 0;
4155 nritems
= btrfs_header_nritems(path
->nodes
[0]);
4157 * by releasing the path above we dropped all our locks. A balance
4158 * could have added more items next to the key that used to be
4159 * at the very end of the block. So, check again here and
4160 * advance the path if there are now more items available.
4162 if (nritems
> 0 && path
->slots
[0] < nritems
- 1) {
4169 while (level
< BTRFS_MAX_LEVEL
) {
4170 if (!path
->nodes
[level
]) {
4175 slot
= path
->slots
[level
] + 1;
4176 c
= path
->nodes
[level
];
4177 if (slot
>= btrfs_header_nritems(c
)) {
4179 if (level
== BTRFS_MAX_LEVEL
) {
4187 btrfs_tree_unlock(next
);
4188 free_extent_buffer(next
);
4192 ret
= read_block_for_search(NULL
, root
, path
, &next
, level
,
4198 btrfs_release_path(root
, path
);
4202 if (!path
->skip_locking
) {
4203 ret
= btrfs_try_spin_lock(next
);
4205 btrfs_set_path_blocking(path
);
4206 btrfs_tree_lock(next
);
4207 if (!force_blocking
)
4208 btrfs_clear_path_blocking(path
, next
);
4211 btrfs_set_lock_blocking(next
);
4215 path
->slots
[level
] = slot
;
4218 c
= path
->nodes
[level
];
4219 if (path
->locks
[level
])
4220 btrfs_tree_unlock(c
);
4222 free_extent_buffer(c
);
4223 path
->nodes
[level
] = next
;
4224 path
->slots
[level
] = 0;
4225 if (!path
->skip_locking
)
4226 path
->locks
[level
] = 1;
4231 ret
= read_block_for_search(NULL
, root
, path
, &next
, level
,
4237 btrfs_release_path(root
, path
);
4241 if (!path
->skip_locking
) {
4242 btrfs_assert_tree_locked(path
->nodes
[level
]);
4243 ret
= btrfs_try_spin_lock(next
);
4245 btrfs_set_path_blocking(path
);
4246 btrfs_tree_lock(next
);
4247 if (!force_blocking
)
4248 btrfs_clear_path_blocking(path
, next
);
4251 btrfs_set_lock_blocking(next
);
4256 unlock_up(path
, 0, 1);
4257 path
->leave_spinning
= old_spinning
;
4259 btrfs_set_path_blocking(path
);
4265 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4266 * searching until it gets past min_objectid or finds an item of 'type'
4268 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4270 int btrfs_previous_item(struct btrfs_root
*root
,
4271 struct btrfs_path
*path
, u64 min_objectid
,
4274 struct btrfs_key found_key
;
4275 struct extent_buffer
*leaf
;
4280 if (path
->slots
[0] == 0) {
4281 btrfs_set_path_blocking(path
);
4282 ret
= btrfs_prev_leaf(root
, path
);
4288 leaf
= path
->nodes
[0];
4289 nritems
= btrfs_header_nritems(leaf
);
4292 if (path
->slots
[0] == nritems
)
4295 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
4296 if (found_key
.objectid
< min_objectid
)
4298 if (found_key
.type
== type
)
4300 if (found_key
.objectid
== min_objectid
&&
4301 found_key
.type
< type
)