2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
22 #include "transaction.h"
23 #include "print-tree.h"
26 static int split_node(struct btrfs_trans_handle
*trans
, struct btrfs_root
27 *root
, struct btrfs_path
*path
, int level
);
28 static int split_leaf(struct btrfs_trans_handle
*trans
, struct btrfs_root
29 *root
, struct btrfs_key
*ins_key
,
30 struct btrfs_path
*path
, int data_size
, int extend
);
31 static int push_node_left(struct btrfs_trans_handle
*trans
,
32 struct btrfs_root
*root
, struct extent_buffer
*dst
,
33 struct extent_buffer
*src
, int empty
);
34 static int balance_node_right(struct btrfs_trans_handle
*trans
,
35 struct btrfs_root
*root
,
36 struct extent_buffer
*dst_buf
,
37 struct extent_buffer
*src_buf
);
38 static int del_ptr(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
39 struct btrfs_path
*path
, int level
, int slot
);
41 struct btrfs_path
*btrfs_alloc_path(void)
43 struct btrfs_path
*path
;
44 path
= kmem_cache_zalloc(btrfs_path_cachep
, GFP_NOFS
);
51 * set all locked nodes in the path to blocking locks. This should
52 * be done before scheduling
54 noinline
void btrfs_set_path_blocking(struct btrfs_path
*p
)
57 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
58 if (p
->nodes
[i
] && p
->locks
[i
])
59 btrfs_set_lock_blocking(p
->nodes
[i
]);
64 * reset all the locked nodes in the patch to spinning locks.
66 * held is used to keep lockdep happy, when lockdep is enabled
67 * we set held to a blocking lock before we go around and
68 * retake all the spinlocks in the path. You can safely use NULL
71 noinline
void btrfs_clear_path_blocking(struct btrfs_path
*p
,
72 struct extent_buffer
*held
)
76 #ifdef CONFIG_DEBUG_LOCK_ALLOC
77 /* lockdep really cares that we take all of these spinlocks
78 * in the right order. If any of the locks in the path are not
79 * currently blocking, it is going to complain. So, make really
80 * really sure by forcing the path to blocking before we clear
84 btrfs_set_lock_blocking(held
);
85 btrfs_set_path_blocking(p
);
88 for (i
= BTRFS_MAX_LEVEL
- 1; i
>= 0; i
--) {
89 if (p
->nodes
[i
] && p
->locks
[i
])
90 btrfs_clear_lock_blocking(p
->nodes
[i
]);
93 #ifdef CONFIG_DEBUG_LOCK_ALLOC
95 btrfs_clear_lock_blocking(held
);
99 /* this also releases the path */
100 void btrfs_free_path(struct btrfs_path
*p
)
102 btrfs_release_path(NULL
, p
);
103 kmem_cache_free(btrfs_path_cachep
, p
);
107 * path release drops references on the extent buffers in the path
108 * and it drops any locks held by this path
110 * It is safe to call this on paths that no locks or extent buffers held.
112 noinline
void btrfs_release_path(struct btrfs_root
*root
, struct btrfs_path
*p
)
116 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
121 btrfs_tree_unlock(p
->nodes
[i
]);
124 free_extent_buffer(p
->nodes
[i
]);
130 * safely gets a reference on the root node of a tree. A lock
131 * is not taken, so a concurrent writer may put a different node
132 * at the root of the tree. See btrfs_lock_root_node for the
135 * The extent buffer returned by this has a reference taken, so
136 * it won't disappear. It may stop being the root of the tree
137 * at any time because there are no locks held.
139 struct extent_buffer
*btrfs_root_node(struct btrfs_root
*root
)
141 struct extent_buffer
*eb
;
142 spin_lock(&root
->node_lock
);
144 extent_buffer_get(eb
);
145 spin_unlock(&root
->node_lock
);
149 /* loop around taking references on and locking the root node of the
150 * tree until you end up with a lock on the root. A locked buffer
151 * is returned, with a reference held.
153 struct extent_buffer
*btrfs_lock_root_node(struct btrfs_root
*root
)
155 struct extent_buffer
*eb
;
158 eb
= btrfs_root_node(root
);
161 spin_lock(&root
->node_lock
);
162 if (eb
== root
->node
) {
163 spin_unlock(&root
->node_lock
);
166 spin_unlock(&root
->node_lock
);
168 btrfs_tree_unlock(eb
);
169 free_extent_buffer(eb
);
174 /* cowonly root (everything not a reference counted cow subvolume), just get
175 * put onto a simple dirty list. transaction.c walks this to make sure they
176 * get properly updated on disk.
178 static void add_root_to_dirty_list(struct btrfs_root
*root
)
180 if (root
->track_dirty
&& list_empty(&root
->dirty_list
)) {
181 list_add(&root
->dirty_list
,
182 &root
->fs_info
->dirty_cowonly_roots
);
187 * used by snapshot creation to make a copy of a root for a tree with
188 * a given objectid. The buffer with the new root node is returned in
189 * cow_ret, and this func returns zero on success or a negative error code.
191 int btrfs_copy_root(struct btrfs_trans_handle
*trans
,
192 struct btrfs_root
*root
,
193 struct extent_buffer
*buf
,
194 struct extent_buffer
**cow_ret
, u64 new_root_objectid
)
196 struct extent_buffer
*cow
;
200 struct btrfs_root
*new_root
;
202 new_root
= kmalloc(sizeof(*new_root
), GFP_NOFS
);
206 memcpy(new_root
, root
, sizeof(*new_root
));
207 new_root
->root_key
.objectid
= new_root_objectid
;
209 WARN_ON(root
->ref_cows
&& trans
->transid
!=
210 root
->fs_info
->running_transaction
->transid
);
211 WARN_ON(root
->ref_cows
&& trans
->transid
!= root
->last_trans
);
213 level
= btrfs_header_level(buf
);
214 nritems
= btrfs_header_nritems(buf
);
216 cow
= btrfs_alloc_free_block(trans
, new_root
, buf
->len
, 0,
217 new_root_objectid
, trans
->transid
,
218 level
, buf
->start
, 0);
224 copy_extent_buffer(cow
, buf
, 0, 0, cow
->len
);
225 btrfs_set_header_bytenr(cow
, cow
->start
);
226 btrfs_set_header_generation(cow
, trans
->transid
);
227 btrfs_set_header_owner(cow
, new_root_objectid
);
228 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
);
230 write_extent_buffer(cow
, root
->fs_info
->fsid
,
231 (unsigned long)btrfs_header_fsid(cow
),
234 WARN_ON(btrfs_header_generation(buf
) > trans
->transid
);
235 ret
= btrfs_inc_ref(trans
, new_root
, buf
, cow
, NULL
);
241 btrfs_mark_buffer_dirty(cow
);
247 * does the dirty work in cow of a single block. The parent block (if
248 * supplied) is updated to point to the new cow copy. The new buffer is marked
249 * dirty and returned locked. If you modify the block it needs to be marked
252 * search_start -- an allocation hint for the new block
254 * empty_size -- a hint that you plan on doing more cow. This is the size in
255 * bytes the allocator should try to find free next to the block it returns.
256 * This is just a hint and may be ignored by the allocator.
258 static noinline
int __btrfs_cow_block(struct btrfs_trans_handle
*trans
,
259 struct btrfs_root
*root
,
260 struct extent_buffer
*buf
,
261 struct extent_buffer
*parent
, int parent_slot
,
262 struct extent_buffer
**cow_ret
,
263 u64 search_start
, u64 empty_size
)
266 struct extent_buffer
*cow
;
275 btrfs_assert_tree_locked(buf
);
278 parent_start
= parent
->start
;
282 WARN_ON(root
->ref_cows
&& trans
->transid
!=
283 root
->fs_info
->running_transaction
->transid
);
284 WARN_ON(root
->ref_cows
&& trans
->transid
!= root
->last_trans
);
286 level
= btrfs_header_level(buf
);
287 nritems
= btrfs_header_nritems(buf
);
289 cow
= btrfs_alloc_free_block(trans
, root
, buf
->len
,
290 parent_start
, root
->root_key
.objectid
,
291 trans
->transid
, level
,
292 search_start
, empty_size
);
296 /* cow is set to blocking by btrfs_init_new_buffer */
298 copy_extent_buffer(cow
, buf
, 0, 0, cow
->len
);
299 btrfs_set_header_bytenr(cow
, cow
->start
);
300 btrfs_set_header_generation(cow
, trans
->transid
);
301 btrfs_set_header_owner(cow
, root
->root_key
.objectid
);
302 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
);
304 write_extent_buffer(cow
, root
->fs_info
->fsid
,
305 (unsigned long)btrfs_header_fsid(cow
),
308 WARN_ON(btrfs_header_generation(buf
) > trans
->transid
);
309 if (btrfs_header_generation(buf
) != trans
->transid
) {
311 ret
= btrfs_inc_ref(trans
, root
, buf
, cow
, &nr_extents
);
315 ret
= btrfs_cache_ref(trans
, root
, buf
, nr_extents
);
317 } else if (btrfs_header_owner(buf
) == BTRFS_TREE_RELOC_OBJECTID
) {
319 * There are only two places that can drop reference to
320 * tree blocks owned by living reloc trees, one is here,
321 * the other place is btrfs_drop_subtree. In both places,
322 * we check reference count while tree block is locked.
323 * Furthermore, if reference count is one, it won't get
324 * increased by someone else.
327 ret
= btrfs_lookup_extent_ref(trans
, root
, buf
->start
,
331 ret
= btrfs_update_ref(trans
, root
, buf
, cow
,
333 clean_tree_block(trans
, root
, buf
);
335 ret
= btrfs_inc_ref(trans
, root
, buf
, cow
, NULL
);
339 ret
= btrfs_update_ref(trans
, root
, buf
, cow
, 0, nritems
);
342 clean_tree_block(trans
, root
, buf
);
345 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) {
346 ret
= btrfs_reloc_tree_cache_ref(trans
, root
, cow
, buf
->start
);
350 if (buf
== root
->node
) {
351 WARN_ON(parent
&& parent
!= buf
);
353 spin_lock(&root
->node_lock
);
355 extent_buffer_get(cow
);
356 spin_unlock(&root
->node_lock
);
358 if (buf
!= root
->commit_root
) {
359 btrfs_free_extent(trans
, root
, buf
->start
,
360 buf
->len
, buf
->start
,
361 root
->root_key
.objectid
,
362 btrfs_header_generation(buf
),
365 free_extent_buffer(buf
);
366 add_root_to_dirty_list(root
);
368 btrfs_set_node_blockptr(parent
, parent_slot
,
370 WARN_ON(trans
->transid
== 0);
371 btrfs_set_node_ptr_generation(parent
, parent_slot
,
373 btrfs_mark_buffer_dirty(parent
);
374 WARN_ON(btrfs_header_generation(parent
) != trans
->transid
);
375 btrfs_free_extent(trans
, root
, buf
->start
, buf
->len
,
376 parent_start
, btrfs_header_owner(parent
),
377 btrfs_header_generation(parent
), level
, 1);
380 btrfs_tree_unlock(buf
);
381 free_extent_buffer(buf
);
382 btrfs_mark_buffer_dirty(cow
);
388 * cows a single block, see __btrfs_cow_block for the real work.
389 * This version of it has extra checks so that a block isn't cow'd more than
390 * once per transaction, as long as it hasn't been written yet
392 noinline
int btrfs_cow_block(struct btrfs_trans_handle
*trans
,
393 struct btrfs_root
*root
, struct extent_buffer
*buf
,
394 struct extent_buffer
*parent
, int parent_slot
,
395 struct extent_buffer
**cow_ret
)
400 if (trans
->transaction
!= root
->fs_info
->running_transaction
) {
401 printk(KERN_CRIT
"trans %llu running %llu\n",
402 (unsigned long long)trans
->transid
,
404 root
->fs_info
->running_transaction
->transid
);
407 if (trans
->transid
!= root
->fs_info
->generation
) {
408 printk(KERN_CRIT
"trans %llu running %llu\n",
409 (unsigned long long)trans
->transid
,
410 (unsigned long long)root
->fs_info
->generation
);
414 if (btrfs_header_generation(buf
) == trans
->transid
&&
415 btrfs_header_owner(buf
) == root
->root_key
.objectid
&&
416 !btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
)) {
421 search_start
= buf
->start
& ~((u64
)(1024 * 1024 * 1024) - 1);
424 btrfs_set_lock_blocking(parent
);
425 btrfs_set_lock_blocking(buf
);
427 ret
= __btrfs_cow_block(trans
, root
, buf
, parent
,
428 parent_slot
, cow_ret
, search_start
, 0);
433 * helper function for defrag to decide if two blocks pointed to by a
434 * node are actually close by
436 static int close_blocks(u64 blocknr
, u64 other
, u32 blocksize
)
438 if (blocknr
< other
&& other
- (blocknr
+ blocksize
) < 32768)
440 if (blocknr
> other
&& blocknr
- (other
+ blocksize
) < 32768)
446 * compare two keys in a memcmp fashion
448 static int comp_keys(struct btrfs_disk_key
*disk
, struct btrfs_key
*k2
)
452 btrfs_disk_key_to_cpu(&k1
, disk
);
454 if (k1
.objectid
> k2
->objectid
)
456 if (k1
.objectid
< k2
->objectid
)
458 if (k1
.type
> k2
->type
)
460 if (k1
.type
< k2
->type
)
462 if (k1
.offset
> k2
->offset
)
464 if (k1
.offset
< k2
->offset
)
470 * same as comp_keys only with two btrfs_key's
472 static int comp_cpu_keys(struct btrfs_key
*k1
, struct btrfs_key
*k2
)
474 if (k1
->objectid
> k2
->objectid
)
476 if (k1
->objectid
< k2
->objectid
)
478 if (k1
->type
> k2
->type
)
480 if (k1
->type
< k2
->type
)
482 if (k1
->offset
> k2
->offset
)
484 if (k1
->offset
< k2
->offset
)
490 * this is used by the defrag code to go through all the
491 * leaves pointed to by a node and reallocate them so that
492 * disk order is close to key order
494 int btrfs_realloc_node(struct btrfs_trans_handle
*trans
,
495 struct btrfs_root
*root
, struct extent_buffer
*parent
,
496 int start_slot
, int cache_only
, u64
*last_ret
,
497 struct btrfs_key
*progress
)
499 struct extent_buffer
*cur
;
502 u64 search_start
= *last_ret
;
512 int progress_passed
= 0;
513 struct btrfs_disk_key disk_key
;
515 parent_level
= btrfs_header_level(parent
);
516 if (cache_only
&& parent_level
!= 1)
519 if (trans
->transaction
!= root
->fs_info
->running_transaction
)
521 if (trans
->transid
!= root
->fs_info
->generation
)
524 parent_nritems
= btrfs_header_nritems(parent
);
525 blocksize
= btrfs_level_size(root
, parent_level
- 1);
526 end_slot
= parent_nritems
;
528 if (parent_nritems
== 1)
531 btrfs_set_lock_blocking(parent
);
533 for (i
= start_slot
; i
< end_slot
; i
++) {
536 if (!parent
->map_token
) {
537 map_extent_buffer(parent
,
538 btrfs_node_key_ptr_offset(i
),
539 sizeof(struct btrfs_key_ptr
),
540 &parent
->map_token
, &parent
->kaddr
,
541 &parent
->map_start
, &parent
->map_len
,
544 btrfs_node_key(parent
, &disk_key
, i
);
545 if (!progress_passed
&& comp_keys(&disk_key
, progress
) < 0)
549 blocknr
= btrfs_node_blockptr(parent
, i
);
550 gen
= btrfs_node_ptr_generation(parent
, i
);
552 last_block
= blocknr
;
555 other
= btrfs_node_blockptr(parent
, i
- 1);
556 close
= close_blocks(blocknr
, other
, blocksize
);
558 if (!close
&& i
< end_slot
- 2) {
559 other
= btrfs_node_blockptr(parent
, i
+ 1);
560 close
= close_blocks(blocknr
, other
, blocksize
);
563 last_block
= blocknr
;
566 if (parent
->map_token
) {
567 unmap_extent_buffer(parent
, parent
->map_token
,
569 parent
->map_token
= NULL
;
572 cur
= btrfs_find_tree_block(root
, blocknr
, blocksize
);
574 uptodate
= btrfs_buffer_uptodate(cur
, gen
);
577 if (!cur
|| !uptodate
) {
579 free_extent_buffer(cur
);
583 cur
= read_tree_block(root
, blocknr
,
585 } else if (!uptodate
) {
586 btrfs_read_buffer(cur
, gen
);
589 if (search_start
== 0)
590 search_start
= last_block
;
592 btrfs_tree_lock(cur
);
593 btrfs_set_lock_blocking(cur
);
594 err
= __btrfs_cow_block(trans
, root
, cur
, parent
, i
,
597 (end_slot
- i
) * blocksize
));
599 btrfs_tree_unlock(cur
);
600 free_extent_buffer(cur
);
603 search_start
= cur
->start
;
604 last_block
= cur
->start
;
605 *last_ret
= search_start
;
606 btrfs_tree_unlock(cur
);
607 free_extent_buffer(cur
);
609 if (parent
->map_token
) {
610 unmap_extent_buffer(parent
, parent
->map_token
,
612 parent
->map_token
= NULL
;
618 * The leaf data grows from end-to-front in the node.
619 * this returns the address of the start of the last item,
620 * which is the stop of the leaf data stack
622 static inline unsigned int leaf_data_end(struct btrfs_root
*root
,
623 struct extent_buffer
*leaf
)
625 u32 nr
= btrfs_header_nritems(leaf
);
627 return BTRFS_LEAF_DATA_SIZE(root
);
628 return btrfs_item_offset_nr(leaf
, nr
- 1);
632 * extra debugging checks to make sure all the items in a key are
633 * well formed and in the proper order
635 static int check_node(struct btrfs_root
*root
, struct btrfs_path
*path
,
638 struct extent_buffer
*parent
= NULL
;
639 struct extent_buffer
*node
= path
->nodes
[level
];
640 struct btrfs_disk_key parent_key
;
641 struct btrfs_disk_key node_key
;
644 struct btrfs_key cpukey
;
645 u32 nritems
= btrfs_header_nritems(node
);
647 if (path
->nodes
[level
+ 1])
648 parent
= path
->nodes
[level
+ 1];
650 slot
= path
->slots
[level
];
651 BUG_ON(nritems
== 0);
653 parent_slot
= path
->slots
[level
+ 1];
654 btrfs_node_key(parent
, &parent_key
, parent_slot
);
655 btrfs_node_key(node
, &node_key
, 0);
656 BUG_ON(memcmp(&parent_key
, &node_key
,
657 sizeof(struct btrfs_disk_key
)));
658 BUG_ON(btrfs_node_blockptr(parent
, parent_slot
) !=
659 btrfs_header_bytenr(node
));
661 BUG_ON(nritems
> BTRFS_NODEPTRS_PER_BLOCK(root
));
663 btrfs_node_key_to_cpu(node
, &cpukey
, slot
- 1);
664 btrfs_node_key(node
, &node_key
, slot
);
665 BUG_ON(comp_keys(&node_key
, &cpukey
) <= 0);
667 if (slot
< nritems
- 1) {
668 btrfs_node_key_to_cpu(node
, &cpukey
, slot
+ 1);
669 btrfs_node_key(node
, &node_key
, slot
);
670 BUG_ON(comp_keys(&node_key
, &cpukey
) >= 0);
676 * extra checking to make sure all the items in a leaf are
677 * well formed and in the proper order
679 static int check_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
,
682 struct extent_buffer
*leaf
= path
->nodes
[level
];
683 struct extent_buffer
*parent
= NULL
;
685 struct btrfs_key cpukey
;
686 struct btrfs_disk_key parent_key
;
687 struct btrfs_disk_key leaf_key
;
688 int slot
= path
->slots
[0];
690 u32 nritems
= btrfs_header_nritems(leaf
);
692 if (path
->nodes
[level
+ 1])
693 parent
= path
->nodes
[level
+ 1];
699 parent_slot
= path
->slots
[level
+ 1];
700 btrfs_node_key(parent
, &parent_key
, parent_slot
);
701 btrfs_item_key(leaf
, &leaf_key
, 0);
703 BUG_ON(memcmp(&parent_key
, &leaf_key
,
704 sizeof(struct btrfs_disk_key
)));
705 BUG_ON(btrfs_node_blockptr(parent
, parent_slot
) !=
706 btrfs_header_bytenr(leaf
));
708 if (slot
!= 0 && slot
< nritems
- 1) {
709 btrfs_item_key(leaf
, &leaf_key
, slot
);
710 btrfs_item_key_to_cpu(leaf
, &cpukey
, slot
- 1);
711 if (comp_keys(&leaf_key
, &cpukey
) <= 0) {
712 btrfs_print_leaf(root
, leaf
);
713 printk(KERN_CRIT
"slot %d offset bad key\n", slot
);
716 if (btrfs_item_offset_nr(leaf
, slot
- 1) !=
717 btrfs_item_end_nr(leaf
, slot
)) {
718 btrfs_print_leaf(root
, leaf
);
719 printk(KERN_CRIT
"slot %d offset bad\n", slot
);
723 if (slot
< nritems
- 1) {
724 btrfs_item_key(leaf
, &leaf_key
, slot
);
725 btrfs_item_key_to_cpu(leaf
, &cpukey
, slot
+ 1);
726 BUG_ON(comp_keys(&leaf_key
, &cpukey
) >= 0);
727 if (btrfs_item_offset_nr(leaf
, slot
) !=
728 btrfs_item_end_nr(leaf
, slot
+ 1)) {
729 btrfs_print_leaf(root
, leaf
);
730 printk(KERN_CRIT
"slot %d offset bad\n", slot
);
734 BUG_ON(btrfs_item_offset_nr(leaf
, 0) +
735 btrfs_item_size_nr(leaf
, 0) != BTRFS_LEAF_DATA_SIZE(root
));
739 static noinline
int check_block(struct btrfs_root
*root
,
740 struct btrfs_path
*path
, int level
)
744 return check_leaf(root
, path
, level
);
745 return check_node(root
, path
, level
);
749 * search for key in the extent_buffer. The items start at offset p,
750 * and they are item_size apart. There are 'max' items in p.
752 * the slot in the array is returned via slot, and it points to
753 * the place where you would insert key if it is not found in
756 * slot may point to max if the key is bigger than all of the keys
758 static noinline
int generic_bin_search(struct extent_buffer
*eb
,
760 int item_size
, struct btrfs_key
*key
,
767 struct btrfs_disk_key
*tmp
= NULL
;
768 struct btrfs_disk_key unaligned
;
769 unsigned long offset
;
770 char *map_token
= NULL
;
772 unsigned long map_start
= 0;
773 unsigned long map_len
= 0;
777 mid
= (low
+ high
) / 2;
778 offset
= p
+ mid
* item_size
;
780 if (!map_token
|| offset
< map_start
||
781 (offset
+ sizeof(struct btrfs_disk_key
)) >
782 map_start
+ map_len
) {
784 unmap_extent_buffer(eb
, map_token
, KM_USER0
);
788 err
= map_private_extent_buffer(eb
, offset
,
789 sizeof(struct btrfs_disk_key
),
791 &map_start
, &map_len
, KM_USER0
);
794 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
797 read_extent_buffer(eb
, &unaligned
,
798 offset
, sizeof(unaligned
));
803 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
806 ret
= comp_keys(tmp
, key
);
815 unmap_extent_buffer(eb
, map_token
, KM_USER0
);
821 unmap_extent_buffer(eb
, map_token
, KM_USER0
);
826 * simple bin_search frontend that does the right thing for
829 static int bin_search(struct extent_buffer
*eb
, struct btrfs_key
*key
,
830 int level
, int *slot
)
833 return generic_bin_search(eb
,
834 offsetof(struct btrfs_leaf
, items
),
835 sizeof(struct btrfs_item
),
836 key
, btrfs_header_nritems(eb
),
839 return generic_bin_search(eb
,
840 offsetof(struct btrfs_node
, ptrs
),
841 sizeof(struct btrfs_key_ptr
),
842 key
, btrfs_header_nritems(eb
),
848 /* given a node and slot number, this reads the blocks it points to. The
849 * extent buffer is returned with a reference taken (but unlocked).
850 * NULL is returned on error.
852 static noinline
struct extent_buffer
*read_node_slot(struct btrfs_root
*root
,
853 struct extent_buffer
*parent
, int slot
)
855 int level
= btrfs_header_level(parent
);
858 if (slot
>= btrfs_header_nritems(parent
))
863 return read_tree_block(root
, btrfs_node_blockptr(parent
, slot
),
864 btrfs_level_size(root
, level
- 1),
865 btrfs_node_ptr_generation(parent
, slot
));
869 * node level balancing, used to make sure nodes are in proper order for
870 * item deletion. We balance from the top down, so we have to make sure
871 * that a deletion won't leave an node completely empty later on.
873 static noinline
int balance_level(struct btrfs_trans_handle
*trans
,
874 struct btrfs_root
*root
,
875 struct btrfs_path
*path
, int level
)
877 struct extent_buffer
*right
= NULL
;
878 struct extent_buffer
*mid
;
879 struct extent_buffer
*left
= NULL
;
880 struct extent_buffer
*parent
= NULL
;
884 int orig_slot
= path
->slots
[level
];
885 int err_on_enospc
= 0;
891 mid
= path
->nodes
[level
];
893 WARN_ON(!path
->locks
[level
]);
894 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
896 orig_ptr
= btrfs_node_blockptr(mid
, orig_slot
);
898 if (level
< BTRFS_MAX_LEVEL
- 1)
899 parent
= path
->nodes
[level
+ 1];
900 pslot
= path
->slots
[level
+ 1];
903 * deal with the case where there is only one pointer in the root
904 * by promoting the node below to a root
907 struct extent_buffer
*child
;
909 if (btrfs_header_nritems(mid
) != 1)
912 /* promote the child to a root */
913 child
= read_node_slot(root
, mid
, 0);
915 btrfs_tree_lock(child
);
916 btrfs_set_lock_blocking(child
);
917 ret
= btrfs_cow_block(trans
, root
, child
, mid
, 0, &child
);
920 spin_lock(&root
->node_lock
);
922 spin_unlock(&root
->node_lock
);
924 ret
= btrfs_update_extent_ref(trans
, root
, child
->start
,
926 mid
->start
, child
->start
,
927 root
->root_key
.objectid
,
928 trans
->transid
, level
- 1);
931 add_root_to_dirty_list(root
);
932 btrfs_tree_unlock(child
);
934 path
->locks
[level
] = 0;
935 path
->nodes
[level
] = NULL
;
936 clean_tree_block(trans
, root
, mid
);
937 btrfs_tree_unlock(mid
);
938 /* once for the path */
939 free_extent_buffer(mid
);
940 ret
= btrfs_free_extent(trans
, root
, mid
->start
, mid
->len
,
941 mid
->start
, root
->root_key
.objectid
,
942 btrfs_header_generation(mid
),
944 /* once for the root ptr */
945 free_extent_buffer(mid
);
948 if (btrfs_header_nritems(mid
) >
949 BTRFS_NODEPTRS_PER_BLOCK(root
) / 4)
952 if (trans
->transaction
->delayed_refs
.flushing
&&
953 btrfs_header_nritems(mid
) > 2)
956 if (btrfs_header_nritems(mid
) < 2)
959 left
= read_node_slot(root
, parent
, pslot
- 1);
961 btrfs_tree_lock(left
);
962 btrfs_set_lock_blocking(left
);
963 wret
= btrfs_cow_block(trans
, root
, left
,
964 parent
, pslot
- 1, &left
);
970 right
= read_node_slot(root
, parent
, pslot
+ 1);
972 btrfs_tree_lock(right
);
973 btrfs_set_lock_blocking(right
);
974 wret
= btrfs_cow_block(trans
, root
, right
,
975 parent
, pslot
+ 1, &right
);
982 /* first, try to make some room in the middle buffer */
984 orig_slot
+= btrfs_header_nritems(left
);
985 wret
= push_node_left(trans
, root
, left
, mid
, 1);
988 if (btrfs_header_nritems(mid
) < 2)
993 * then try to empty the right most buffer into the middle
996 wret
= push_node_left(trans
, root
, mid
, right
, 1);
997 if (wret
< 0 && wret
!= -ENOSPC
)
999 if (btrfs_header_nritems(right
) == 0) {
1000 u64 bytenr
= right
->start
;
1001 u64 generation
= btrfs_header_generation(parent
);
1002 u32 blocksize
= right
->len
;
1004 clean_tree_block(trans
, root
, right
);
1005 btrfs_tree_unlock(right
);
1006 free_extent_buffer(right
);
1008 wret
= del_ptr(trans
, root
, path
, level
+ 1, pslot
+
1012 wret
= btrfs_free_extent(trans
, root
, bytenr
,
1013 blocksize
, parent
->start
,
1014 btrfs_header_owner(parent
),
1015 generation
, level
, 1);
1019 struct btrfs_disk_key right_key
;
1020 btrfs_node_key(right
, &right_key
, 0);
1021 btrfs_set_node_key(parent
, &right_key
, pslot
+ 1);
1022 btrfs_mark_buffer_dirty(parent
);
1025 if (btrfs_header_nritems(mid
) == 1) {
1027 * we're not allowed to leave a node with one item in the
1028 * tree during a delete. A deletion from lower in the tree
1029 * could try to delete the only pointer in this node.
1030 * So, pull some keys from the left.
1031 * There has to be a left pointer at this point because
1032 * otherwise we would have pulled some pointers from the
1036 wret
= balance_node_right(trans
, root
, mid
, left
);
1042 wret
= push_node_left(trans
, root
, left
, mid
, 1);
1048 if (btrfs_header_nritems(mid
) == 0) {
1049 /* we've managed to empty the middle node, drop it */
1050 u64 root_gen
= btrfs_header_generation(parent
);
1051 u64 bytenr
= mid
->start
;
1052 u32 blocksize
= mid
->len
;
1054 clean_tree_block(trans
, root
, mid
);
1055 btrfs_tree_unlock(mid
);
1056 free_extent_buffer(mid
);
1058 wret
= del_ptr(trans
, root
, path
, level
+ 1, pslot
);
1061 wret
= btrfs_free_extent(trans
, root
, bytenr
, blocksize
,
1063 btrfs_header_owner(parent
),
1064 root_gen
, level
, 1);
1068 /* update the parent key to reflect our changes */
1069 struct btrfs_disk_key mid_key
;
1070 btrfs_node_key(mid
, &mid_key
, 0);
1071 btrfs_set_node_key(parent
, &mid_key
, pslot
);
1072 btrfs_mark_buffer_dirty(parent
);
1075 /* update the path */
1077 if (btrfs_header_nritems(left
) > orig_slot
) {
1078 extent_buffer_get(left
);
1079 /* left was locked after cow */
1080 path
->nodes
[level
] = left
;
1081 path
->slots
[level
+ 1] -= 1;
1082 path
->slots
[level
] = orig_slot
;
1084 btrfs_tree_unlock(mid
);
1085 free_extent_buffer(mid
);
1088 orig_slot
-= btrfs_header_nritems(left
);
1089 path
->slots
[level
] = orig_slot
;
1092 /* double check we haven't messed things up */
1093 check_block(root
, path
, level
);
1095 btrfs_node_blockptr(path
->nodes
[level
], path
->slots
[level
]))
1099 btrfs_tree_unlock(right
);
1100 free_extent_buffer(right
);
1103 if (path
->nodes
[level
] != left
)
1104 btrfs_tree_unlock(left
);
1105 free_extent_buffer(left
);
1110 /* Node balancing for insertion. Here we only split or push nodes around
1111 * when they are completely full. This is also done top down, so we
1112 * have to be pessimistic.
1114 static noinline
int push_nodes_for_insert(struct btrfs_trans_handle
*trans
,
1115 struct btrfs_root
*root
,
1116 struct btrfs_path
*path
, int level
)
1118 struct extent_buffer
*right
= NULL
;
1119 struct extent_buffer
*mid
;
1120 struct extent_buffer
*left
= NULL
;
1121 struct extent_buffer
*parent
= NULL
;
1125 int orig_slot
= path
->slots
[level
];
1131 mid
= path
->nodes
[level
];
1132 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
1133 orig_ptr
= btrfs_node_blockptr(mid
, orig_slot
);
1135 if (level
< BTRFS_MAX_LEVEL
- 1)
1136 parent
= path
->nodes
[level
+ 1];
1137 pslot
= path
->slots
[level
+ 1];
1142 left
= read_node_slot(root
, parent
, pslot
- 1);
1144 /* first, try to make some room in the middle buffer */
1148 btrfs_tree_lock(left
);
1149 btrfs_set_lock_blocking(left
);
1151 left_nr
= btrfs_header_nritems(left
);
1152 if (left_nr
>= BTRFS_NODEPTRS_PER_BLOCK(root
) - 1) {
1155 ret
= btrfs_cow_block(trans
, root
, left
, parent
,
1160 wret
= push_node_left(trans
, root
,
1167 struct btrfs_disk_key disk_key
;
1168 orig_slot
+= left_nr
;
1169 btrfs_node_key(mid
, &disk_key
, 0);
1170 btrfs_set_node_key(parent
, &disk_key
, pslot
);
1171 btrfs_mark_buffer_dirty(parent
);
1172 if (btrfs_header_nritems(left
) > orig_slot
) {
1173 path
->nodes
[level
] = left
;
1174 path
->slots
[level
+ 1] -= 1;
1175 path
->slots
[level
] = orig_slot
;
1176 btrfs_tree_unlock(mid
);
1177 free_extent_buffer(mid
);
1180 btrfs_header_nritems(left
);
1181 path
->slots
[level
] = orig_slot
;
1182 btrfs_tree_unlock(left
);
1183 free_extent_buffer(left
);
1187 btrfs_tree_unlock(left
);
1188 free_extent_buffer(left
);
1190 right
= read_node_slot(root
, parent
, pslot
+ 1);
1193 * then try to empty the right most buffer into the middle
1198 btrfs_tree_lock(right
);
1199 btrfs_set_lock_blocking(right
);
1201 right_nr
= btrfs_header_nritems(right
);
1202 if (right_nr
>= BTRFS_NODEPTRS_PER_BLOCK(root
) - 1) {
1205 ret
= btrfs_cow_block(trans
, root
, right
,
1211 wret
= balance_node_right(trans
, root
,
1218 struct btrfs_disk_key disk_key
;
1220 btrfs_node_key(right
, &disk_key
, 0);
1221 btrfs_set_node_key(parent
, &disk_key
, pslot
+ 1);
1222 btrfs_mark_buffer_dirty(parent
);
1224 if (btrfs_header_nritems(mid
) <= orig_slot
) {
1225 path
->nodes
[level
] = right
;
1226 path
->slots
[level
+ 1] += 1;
1227 path
->slots
[level
] = orig_slot
-
1228 btrfs_header_nritems(mid
);
1229 btrfs_tree_unlock(mid
);
1230 free_extent_buffer(mid
);
1232 btrfs_tree_unlock(right
);
1233 free_extent_buffer(right
);
1237 btrfs_tree_unlock(right
);
1238 free_extent_buffer(right
);
1244 * readahead one full node of leaves, finding things that are close
1245 * to the block in 'slot', and triggering ra on them.
1247 static void reada_for_search(struct btrfs_root
*root
,
1248 struct btrfs_path
*path
,
1249 int level
, int slot
, u64 objectid
)
1251 struct extent_buffer
*node
;
1252 struct btrfs_disk_key disk_key
;
1257 int direction
= path
->reada
;
1258 struct extent_buffer
*eb
;
1266 if (!path
->nodes
[level
])
1269 node
= path
->nodes
[level
];
1271 search
= btrfs_node_blockptr(node
, slot
);
1272 blocksize
= btrfs_level_size(root
, level
- 1);
1273 eb
= btrfs_find_tree_block(root
, search
, blocksize
);
1275 free_extent_buffer(eb
);
1281 nritems
= btrfs_header_nritems(node
);
1284 if (direction
< 0) {
1288 } else if (direction
> 0) {
1293 if (path
->reada
< 0 && objectid
) {
1294 btrfs_node_key(node
, &disk_key
, nr
);
1295 if (btrfs_disk_key_objectid(&disk_key
) != objectid
)
1298 search
= btrfs_node_blockptr(node
, nr
);
1299 if ((search
<= target
&& target
- search
<= 65536) ||
1300 (search
> target
&& search
- target
<= 65536)) {
1301 readahead_tree_block(root
, search
, blocksize
,
1302 btrfs_node_ptr_generation(node
, nr
));
1306 if ((nread
> 65536 || nscan
> 32))
1312 * returns -EAGAIN if it had to drop the path, or zero if everything was in
1315 static noinline
int reada_for_balance(struct btrfs_root
*root
,
1316 struct btrfs_path
*path
, int level
)
1320 struct extent_buffer
*parent
;
1321 struct extent_buffer
*eb
;
1328 parent
= path
->nodes
[level
+ 1];
1332 nritems
= btrfs_header_nritems(parent
);
1333 slot
= path
->slots
[level
+ 1];
1334 blocksize
= btrfs_level_size(root
, level
);
1337 block1
= btrfs_node_blockptr(parent
, slot
- 1);
1338 gen
= btrfs_node_ptr_generation(parent
, slot
- 1);
1339 eb
= btrfs_find_tree_block(root
, block1
, blocksize
);
1340 if (eb
&& btrfs_buffer_uptodate(eb
, gen
))
1342 free_extent_buffer(eb
);
1344 if (slot
+ 1 < nritems
) {
1345 block2
= btrfs_node_blockptr(parent
, slot
+ 1);
1346 gen
= btrfs_node_ptr_generation(parent
, slot
+ 1);
1347 eb
= btrfs_find_tree_block(root
, block2
, blocksize
);
1348 if (eb
&& btrfs_buffer_uptodate(eb
, gen
))
1350 free_extent_buffer(eb
);
1352 if (block1
|| block2
) {
1355 /* release the whole path */
1356 btrfs_release_path(root
, path
);
1358 /* read the blocks */
1360 readahead_tree_block(root
, block1
, blocksize
, 0);
1362 readahead_tree_block(root
, block2
, blocksize
, 0);
1365 eb
= read_tree_block(root
, block1
, blocksize
, 0);
1366 free_extent_buffer(eb
);
1369 eb
= read_tree_block(root
, block2
, blocksize
, 0);
1370 free_extent_buffer(eb
);
1378 * when we walk down the tree, it is usually safe to unlock the higher layers
1379 * in the tree. The exceptions are when our path goes through slot 0, because
1380 * operations on the tree might require changing key pointers higher up in the
1383 * callers might also have set path->keep_locks, which tells this code to keep
1384 * the lock if the path points to the last slot in the block. This is part of
1385 * walking through the tree, and selecting the next slot in the higher block.
1387 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1388 * if lowest_unlock is 1, level 0 won't be unlocked
1390 static noinline
void unlock_up(struct btrfs_path
*path
, int level
,
1394 int skip_level
= level
;
1396 struct extent_buffer
*t
;
1398 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
1399 if (!path
->nodes
[i
])
1401 if (!path
->locks
[i
])
1403 if (!no_skips
&& path
->slots
[i
] == 0) {
1407 if (!no_skips
&& path
->keep_locks
) {
1410 nritems
= btrfs_header_nritems(t
);
1411 if (nritems
< 1 || path
->slots
[i
] >= nritems
- 1) {
1416 if (skip_level
< i
&& i
>= lowest_unlock
)
1420 if (i
>= lowest_unlock
&& i
> skip_level
&& path
->locks
[i
]) {
1421 btrfs_tree_unlock(t
);
1428 * This releases any locks held in the path starting at level and
1429 * going all the way up to the root.
1431 * btrfs_search_slot will keep the lock held on higher nodes in a few
1432 * corner cases, such as COW of the block at slot zero in the node. This
1433 * ignores those rules, and it should only be called when there are no
1434 * more updates to be done higher up in the tree.
1436 noinline
void btrfs_unlock_up_safe(struct btrfs_path
*path
, int level
)
1440 if (path
->keep_locks
|| path
->lowest_level
)
1443 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
1444 if (!path
->nodes
[i
])
1446 if (!path
->locks
[i
])
1448 btrfs_tree_unlock(path
->nodes
[i
]);
1454 * helper function for btrfs_search_slot. The goal is to find a block
1455 * in cache without setting the path to blocking. If we find the block
1456 * we return zero and the path is unchanged.
1458 * If we can't find the block, we set the path blocking and do some
1459 * reada. -EAGAIN is returned and the search must be repeated.
1462 read_block_for_search(struct btrfs_trans_handle
*trans
,
1463 struct btrfs_root
*root
, struct btrfs_path
*p
,
1464 struct extent_buffer
**eb_ret
, int level
, int slot
,
1465 struct btrfs_key
*key
)
1470 struct extent_buffer
*b
= *eb_ret
;
1471 struct extent_buffer
*tmp
;
1474 blocknr
= btrfs_node_blockptr(b
, slot
);
1475 gen
= btrfs_node_ptr_generation(b
, slot
);
1476 blocksize
= btrfs_level_size(root
, level
- 1);
1478 tmp
= btrfs_find_tree_block(root
, blocknr
, blocksize
);
1479 if (tmp
&& btrfs_buffer_uptodate(tmp
, gen
)) {
1481 * we found an up to date block without sleeping, return
1489 * reduce lock contention at high levels
1490 * of the btree by dropping locks before
1491 * we read. Don't release the lock on the current
1492 * level because we need to walk this node to figure
1493 * out which blocks to read.
1495 btrfs_unlock_up_safe(p
, level
+ 1);
1496 btrfs_set_path_blocking(p
);
1499 free_extent_buffer(tmp
);
1501 reada_for_search(root
, p
, level
, slot
, key
->objectid
);
1503 btrfs_release_path(NULL
, p
);
1506 tmp
= read_tree_block(root
, blocknr
, blocksize
, gen
);
1509 * If the read above didn't mark this buffer up to date,
1510 * it will never end up being up to date. Set ret to EIO now
1511 * and give up so that our caller doesn't loop forever
1514 if (!btrfs_buffer_uptodate(tmp
, 0))
1516 free_extent_buffer(tmp
);
1522 * helper function for btrfs_search_slot. This does all of the checks
1523 * for node-level blocks and does any balancing required based on
1526 * If no extra work was required, zero is returned. If we had to
1527 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1531 setup_nodes_for_search(struct btrfs_trans_handle
*trans
,
1532 struct btrfs_root
*root
, struct btrfs_path
*p
,
1533 struct extent_buffer
*b
, int level
, int ins_len
)
1536 if ((p
->search_for_split
|| ins_len
> 0) && btrfs_header_nritems(b
) >=
1537 BTRFS_NODEPTRS_PER_BLOCK(root
) - 3) {
1540 sret
= reada_for_balance(root
, p
, level
);
1544 btrfs_set_path_blocking(p
);
1545 sret
= split_node(trans
, root
, p
, level
);
1546 btrfs_clear_path_blocking(p
, NULL
);
1553 b
= p
->nodes
[level
];
1554 } else if (ins_len
< 0 && btrfs_header_nritems(b
) <
1555 BTRFS_NODEPTRS_PER_BLOCK(root
) / 4) {
1558 sret
= reada_for_balance(root
, p
, level
);
1562 btrfs_set_path_blocking(p
);
1563 sret
= balance_level(trans
, root
, p
, level
);
1564 btrfs_clear_path_blocking(p
, NULL
);
1570 b
= p
->nodes
[level
];
1572 btrfs_release_path(NULL
, p
);
1575 BUG_ON(btrfs_header_nritems(b
) == 1);
1586 * look for key in the tree. path is filled in with nodes along the way
1587 * if key is found, we return zero and you can find the item in the leaf
1588 * level of the path (level 0)
1590 * If the key isn't found, the path points to the slot where it should
1591 * be inserted, and 1 is returned. If there are other errors during the
1592 * search a negative error number is returned.
1594 * if ins_len > 0, nodes and leaves will be split as we walk down the
1595 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
1598 int btrfs_search_slot(struct btrfs_trans_handle
*trans
, struct btrfs_root
1599 *root
, struct btrfs_key
*key
, struct btrfs_path
*p
, int
1602 struct extent_buffer
*b
;
1606 int lowest_unlock
= 1;
1607 u8 lowest_level
= 0;
1609 lowest_level
= p
->lowest_level
;
1610 WARN_ON(lowest_level
&& ins_len
> 0);
1611 WARN_ON(p
->nodes
[0] != NULL
);
1617 if (p
->skip_locking
)
1618 b
= btrfs_root_node(root
);
1620 b
= btrfs_lock_root_node(root
);
1623 level
= btrfs_header_level(b
);
1626 * setup the path here so we can release it under lock
1627 * contention with the cow code
1629 p
->nodes
[level
] = b
;
1630 if (!p
->skip_locking
)
1631 p
->locks
[level
] = 1;
1637 * if we don't really need to cow this block
1638 * then we don't want to set the path blocking,
1639 * so we test it here
1641 if (btrfs_header_generation(b
) == trans
->transid
&&
1642 btrfs_header_owner(b
) == root
->root_key
.objectid
&&
1643 !btrfs_header_flag(b
, BTRFS_HEADER_FLAG_WRITTEN
)) {
1646 btrfs_set_path_blocking(p
);
1648 wret
= btrfs_cow_block(trans
, root
, b
,
1649 p
->nodes
[level
+ 1],
1650 p
->slots
[level
+ 1], &b
);
1652 free_extent_buffer(b
);
1658 BUG_ON(!cow
&& ins_len
);
1659 if (level
!= btrfs_header_level(b
))
1661 level
= btrfs_header_level(b
);
1663 p
->nodes
[level
] = b
;
1664 if (!p
->skip_locking
)
1665 p
->locks
[level
] = 1;
1667 btrfs_clear_path_blocking(p
, NULL
);
1670 * we have a lock on b and as long as we aren't changing
1671 * the tree, there is no way to for the items in b to change.
1672 * It is safe to drop the lock on our parent before we
1673 * go through the expensive btree search on b.
1675 * If cow is true, then we might be changing slot zero,
1676 * which may require changing the parent. So, we can't
1677 * drop the lock until after we know which slot we're
1681 btrfs_unlock_up_safe(p
, level
+ 1);
1683 ret
= check_block(root
, p
, level
);
1689 ret
= bin_search(b
, key
, level
, &slot
);
1692 if (ret
&& slot
> 0)
1694 p
->slots
[level
] = slot
;
1695 ret
= setup_nodes_for_search(trans
, root
, p
, b
, level
,
1701 b
= p
->nodes
[level
];
1702 slot
= p
->slots
[level
];
1704 unlock_up(p
, level
, lowest_unlock
);
1706 /* this is only true while dropping a snapshot */
1707 if (level
== lowest_level
) {
1712 ret
= read_block_for_search(trans
, root
, p
,
1713 &b
, level
, slot
, key
);
1720 if (!p
->skip_locking
) {
1723 btrfs_clear_path_blocking(p
, NULL
);
1724 lret
= btrfs_try_spin_lock(b
);
1727 btrfs_set_path_blocking(p
);
1729 btrfs_clear_path_blocking(p
, b
);
1733 p
->slots
[level
] = slot
;
1735 btrfs_leaf_free_space(root
, b
) < ins_len
) {
1738 btrfs_set_path_blocking(p
);
1739 sret
= split_leaf(trans
, root
, key
,
1740 p
, ins_len
, ret
== 0);
1741 btrfs_clear_path_blocking(p
, NULL
);
1749 if (!p
->search_for_split
)
1750 unlock_up(p
, level
, lowest_unlock
);
1757 * we don't really know what they plan on doing with the path
1758 * from here on, so for now just mark it as blocking
1760 if (!p
->leave_spinning
)
1761 btrfs_set_path_blocking(p
);
1763 btrfs_release_path(root
, p
);
1767 int btrfs_merge_path(struct btrfs_trans_handle
*trans
,
1768 struct btrfs_root
*root
,
1769 struct btrfs_key
*node_keys
,
1770 u64
*nodes
, int lowest_level
)
1772 struct extent_buffer
*eb
;
1773 struct extent_buffer
*parent
;
1774 struct btrfs_key key
;
1783 eb
= btrfs_lock_root_node(root
);
1784 ret
= btrfs_cow_block(trans
, root
, eb
, NULL
, 0, &eb
);
1787 btrfs_set_lock_blocking(eb
);
1791 level
= btrfs_header_level(parent
);
1792 if (level
== 0 || level
<= lowest_level
)
1795 ret
= bin_search(parent
, &node_keys
[lowest_level
], level
,
1797 if (ret
&& slot
> 0)
1800 bytenr
= btrfs_node_blockptr(parent
, slot
);
1801 if (nodes
[level
- 1] == bytenr
)
1804 blocksize
= btrfs_level_size(root
, level
- 1);
1805 generation
= btrfs_node_ptr_generation(parent
, slot
);
1806 btrfs_node_key_to_cpu(eb
, &key
, slot
);
1807 key_match
= !memcmp(&key
, &node_keys
[level
- 1], sizeof(key
));
1809 if (generation
== trans
->transid
) {
1810 eb
= read_tree_block(root
, bytenr
, blocksize
,
1812 btrfs_tree_lock(eb
);
1813 btrfs_set_lock_blocking(eb
);
1817 * if node keys match and node pointer hasn't been modified
1818 * in the running transaction, we can merge the path. for
1819 * blocks owened by reloc trees, the node pointer check is
1820 * skipped, this is because these blocks are fully controlled
1821 * by the space balance code, no one else can modify them.
1823 if (!nodes
[level
- 1] || !key_match
||
1824 (generation
== trans
->transid
&&
1825 btrfs_header_owner(eb
) != BTRFS_TREE_RELOC_OBJECTID
)) {
1826 if (level
== 1 || level
== lowest_level
+ 1) {
1827 if (generation
== trans
->transid
) {
1828 btrfs_tree_unlock(eb
);
1829 free_extent_buffer(eb
);
1834 if (generation
!= trans
->transid
) {
1835 eb
= read_tree_block(root
, bytenr
, blocksize
,
1837 btrfs_tree_lock(eb
);
1838 btrfs_set_lock_blocking(eb
);
1841 ret
= btrfs_cow_block(trans
, root
, eb
, parent
, slot
,
1845 if (root
->root_key
.objectid
==
1846 BTRFS_TREE_RELOC_OBJECTID
) {
1847 if (!nodes
[level
- 1]) {
1848 nodes
[level
- 1] = eb
->start
;
1849 memcpy(&node_keys
[level
- 1], &key
,
1850 sizeof(node_keys
[0]));
1856 btrfs_tree_unlock(parent
);
1857 free_extent_buffer(parent
);
1862 btrfs_set_node_blockptr(parent
, slot
, nodes
[level
- 1]);
1863 btrfs_set_node_ptr_generation(parent
, slot
, trans
->transid
);
1864 btrfs_mark_buffer_dirty(parent
);
1866 ret
= btrfs_inc_extent_ref(trans
, root
,
1868 blocksize
, parent
->start
,
1869 btrfs_header_owner(parent
),
1870 btrfs_header_generation(parent
),
1875 * If the block was created in the running transaction,
1876 * it's possible this is the last reference to it, so we
1877 * should drop the subtree.
1879 if (generation
== trans
->transid
) {
1880 ret
= btrfs_drop_subtree(trans
, root
, eb
, parent
);
1882 btrfs_tree_unlock(eb
);
1883 free_extent_buffer(eb
);
1885 ret
= btrfs_free_extent(trans
, root
, bytenr
,
1886 blocksize
, parent
->start
,
1887 btrfs_header_owner(parent
),
1888 btrfs_header_generation(parent
),
1894 btrfs_tree_unlock(parent
);
1895 free_extent_buffer(parent
);
1900 * adjust the pointers going up the tree, starting at level
1901 * making sure the right key of each node is points to 'key'.
1902 * This is used after shifting pointers to the left, so it stops
1903 * fixing up pointers when a given leaf/node is not in slot 0 of the
1906 * If this fails to write a tree block, it returns -1, but continues
1907 * fixing up the blocks in ram so the tree is consistent.
1909 static int fixup_low_keys(struct btrfs_trans_handle
*trans
,
1910 struct btrfs_root
*root
, struct btrfs_path
*path
,
1911 struct btrfs_disk_key
*key
, int level
)
1915 struct extent_buffer
*t
;
1917 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
1918 int tslot
= path
->slots
[i
];
1919 if (!path
->nodes
[i
])
1922 btrfs_set_node_key(t
, key
, tslot
);
1923 btrfs_mark_buffer_dirty(path
->nodes
[i
]);
1933 * This function isn't completely safe. It's the caller's responsibility
1934 * that the new key won't break the order
1936 int btrfs_set_item_key_safe(struct btrfs_trans_handle
*trans
,
1937 struct btrfs_root
*root
, struct btrfs_path
*path
,
1938 struct btrfs_key
*new_key
)
1940 struct btrfs_disk_key disk_key
;
1941 struct extent_buffer
*eb
;
1944 eb
= path
->nodes
[0];
1945 slot
= path
->slots
[0];
1947 btrfs_item_key(eb
, &disk_key
, slot
- 1);
1948 if (comp_keys(&disk_key
, new_key
) >= 0)
1951 if (slot
< btrfs_header_nritems(eb
) - 1) {
1952 btrfs_item_key(eb
, &disk_key
, slot
+ 1);
1953 if (comp_keys(&disk_key
, new_key
) <= 0)
1957 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
1958 btrfs_set_item_key(eb
, &disk_key
, slot
);
1959 btrfs_mark_buffer_dirty(eb
);
1961 fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
1966 * try to push data from one node into the next node left in the
1969 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
1970 * error, and > 0 if there was no room in the left hand block.
1972 static int push_node_left(struct btrfs_trans_handle
*trans
,
1973 struct btrfs_root
*root
, struct extent_buffer
*dst
,
1974 struct extent_buffer
*src
, int empty
)
1981 src_nritems
= btrfs_header_nritems(src
);
1982 dst_nritems
= btrfs_header_nritems(dst
);
1983 push_items
= BTRFS_NODEPTRS_PER_BLOCK(root
) - dst_nritems
;
1984 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
1985 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
1987 if (!empty
&& src_nritems
<= 8)
1990 if (push_items
<= 0)
1994 push_items
= min(src_nritems
, push_items
);
1995 if (push_items
< src_nritems
) {
1996 /* leave at least 8 pointers in the node if
1997 * we aren't going to empty it
1999 if (src_nritems
- push_items
< 8) {
2000 if (push_items
<= 8)
2006 push_items
= min(src_nritems
- 8, push_items
);
2008 copy_extent_buffer(dst
, src
,
2009 btrfs_node_key_ptr_offset(dst_nritems
),
2010 btrfs_node_key_ptr_offset(0),
2011 push_items
* sizeof(struct btrfs_key_ptr
));
2013 if (push_items
< src_nritems
) {
2014 memmove_extent_buffer(src
, btrfs_node_key_ptr_offset(0),
2015 btrfs_node_key_ptr_offset(push_items
),
2016 (src_nritems
- push_items
) *
2017 sizeof(struct btrfs_key_ptr
));
2019 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
2020 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
2021 btrfs_mark_buffer_dirty(src
);
2022 btrfs_mark_buffer_dirty(dst
);
2024 ret
= btrfs_update_ref(trans
, root
, src
, dst
, dst_nritems
, push_items
);
2031 * try to push data from one node into the next node right in the
2034 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2035 * error, and > 0 if there was no room in the right hand block.
2037 * this will only push up to 1/2 the contents of the left node over
2039 static int balance_node_right(struct btrfs_trans_handle
*trans
,
2040 struct btrfs_root
*root
,
2041 struct extent_buffer
*dst
,
2042 struct extent_buffer
*src
)
2050 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
2051 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
2053 src_nritems
= btrfs_header_nritems(src
);
2054 dst_nritems
= btrfs_header_nritems(dst
);
2055 push_items
= BTRFS_NODEPTRS_PER_BLOCK(root
) - dst_nritems
;
2056 if (push_items
<= 0)
2059 if (src_nritems
< 4)
2062 max_push
= src_nritems
/ 2 + 1;
2063 /* don't try to empty the node */
2064 if (max_push
>= src_nritems
)
2067 if (max_push
< push_items
)
2068 push_items
= max_push
;
2070 memmove_extent_buffer(dst
, btrfs_node_key_ptr_offset(push_items
),
2071 btrfs_node_key_ptr_offset(0),
2073 sizeof(struct btrfs_key_ptr
));
2075 copy_extent_buffer(dst
, src
,
2076 btrfs_node_key_ptr_offset(0),
2077 btrfs_node_key_ptr_offset(src_nritems
- push_items
),
2078 push_items
* sizeof(struct btrfs_key_ptr
));
2080 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
2081 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
2083 btrfs_mark_buffer_dirty(src
);
2084 btrfs_mark_buffer_dirty(dst
);
2086 ret
= btrfs_update_ref(trans
, root
, src
, dst
, 0, push_items
);
2093 * helper function to insert a new root level in the tree.
2094 * A new node is allocated, and a single item is inserted to
2095 * point to the existing root
2097 * returns zero on success or < 0 on failure.
2099 static noinline
int insert_new_root(struct btrfs_trans_handle
*trans
,
2100 struct btrfs_root
*root
,
2101 struct btrfs_path
*path
, int level
)
2104 struct extent_buffer
*lower
;
2105 struct extent_buffer
*c
;
2106 struct extent_buffer
*old
;
2107 struct btrfs_disk_key lower_key
;
2110 BUG_ON(path
->nodes
[level
]);
2111 BUG_ON(path
->nodes
[level
-1] != root
->node
);
2113 lower
= path
->nodes
[level
-1];
2115 btrfs_item_key(lower
, &lower_key
, 0);
2117 btrfs_node_key(lower
, &lower_key
, 0);
2119 c
= btrfs_alloc_free_block(trans
, root
, root
->nodesize
, 0,
2120 root
->root_key
.objectid
, trans
->transid
,
2121 level
, root
->node
->start
, 0);
2125 memset_extent_buffer(c
, 0, 0, root
->nodesize
);
2126 btrfs_set_header_nritems(c
, 1);
2127 btrfs_set_header_level(c
, level
);
2128 btrfs_set_header_bytenr(c
, c
->start
);
2129 btrfs_set_header_generation(c
, trans
->transid
);
2130 btrfs_set_header_owner(c
, root
->root_key
.objectid
);
2132 write_extent_buffer(c
, root
->fs_info
->fsid
,
2133 (unsigned long)btrfs_header_fsid(c
),
2136 write_extent_buffer(c
, root
->fs_info
->chunk_tree_uuid
,
2137 (unsigned long)btrfs_header_chunk_tree_uuid(c
),
2140 btrfs_set_node_key(c
, &lower_key
, 0);
2141 btrfs_set_node_blockptr(c
, 0, lower
->start
);
2142 lower_gen
= btrfs_header_generation(lower
);
2143 WARN_ON(lower_gen
!= trans
->transid
);
2145 btrfs_set_node_ptr_generation(c
, 0, lower_gen
);
2147 btrfs_mark_buffer_dirty(c
);
2149 spin_lock(&root
->node_lock
);
2152 spin_unlock(&root
->node_lock
);
2154 ret
= btrfs_update_extent_ref(trans
, root
, lower
->start
,
2155 lower
->len
, lower
->start
, c
->start
,
2156 root
->root_key
.objectid
,
2157 trans
->transid
, level
- 1);
2160 /* the super has an extra ref to root->node */
2161 free_extent_buffer(old
);
2163 add_root_to_dirty_list(root
);
2164 extent_buffer_get(c
);
2165 path
->nodes
[level
] = c
;
2166 path
->locks
[level
] = 1;
2167 path
->slots
[level
] = 0;
2172 * worker function to insert a single pointer in a node.
2173 * the node should have enough room for the pointer already
2175 * slot and level indicate where you want the key to go, and
2176 * blocknr is the block the key points to.
2178 * returns zero on success and < 0 on any error
2180 static int insert_ptr(struct btrfs_trans_handle
*trans
, struct btrfs_root
2181 *root
, struct btrfs_path
*path
, struct btrfs_disk_key
2182 *key
, u64 bytenr
, int slot
, int level
)
2184 struct extent_buffer
*lower
;
2187 BUG_ON(!path
->nodes
[level
]);
2188 lower
= path
->nodes
[level
];
2189 nritems
= btrfs_header_nritems(lower
);
2190 BUG_ON(slot
> nritems
);
2191 if (nritems
== BTRFS_NODEPTRS_PER_BLOCK(root
))
2193 if (slot
!= nritems
) {
2194 memmove_extent_buffer(lower
,
2195 btrfs_node_key_ptr_offset(slot
+ 1),
2196 btrfs_node_key_ptr_offset(slot
),
2197 (nritems
- slot
) * sizeof(struct btrfs_key_ptr
));
2199 btrfs_set_node_key(lower
, key
, slot
);
2200 btrfs_set_node_blockptr(lower
, slot
, bytenr
);
2201 WARN_ON(trans
->transid
== 0);
2202 btrfs_set_node_ptr_generation(lower
, slot
, trans
->transid
);
2203 btrfs_set_header_nritems(lower
, nritems
+ 1);
2204 btrfs_mark_buffer_dirty(lower
);
2209 * split the node at the specified level in path in two.
2210 * The path is corrected to point to the appropriate node after the split
2212 * Before splitting this tries to make some room in the node by pushing
2213 * left and right, if either one works, it returns right away.
2215 * returns 0 on success and < 0 on failure
2217 static noinline
int split_node(struct btrfs_trans_handle
*trans
,
2218 struct btrfs_root
*root
,
2219 struct btrfs_path
*path
, int level
)
2221 struct extent_buffer
*c
;
2222 struct extent_buffer
*split
;
2223 struct btrfs_disk_key disk_key
;
2229 c
= path
->nodes
[level
];
2230 WARN_ON(btrfs_header_generation(c
) != trans
->transid
);
2231 if (c
== root
->node
) {
2232 /* trying to split the root, lets make a new one */
2233 ret
= insert_new_root(trans
, root
, path
, level
+ 1);
2236 } else if (!trans
->transaction
->delayed_refs
.flushing
) {
2237 ret
= push_nodes_for_insert(trans
, root
, path
, level
);
2238 c
= path
->nodes
[level
];
2239 if (!ret
&& btrfs_header_nritems(c
) <
2240 BTRFS_NODEPTRS_PER_BLOCK(root
) - 3)
2246 c_nritems
= btrfs_header_nritems(c
);
2248 split
= btrfs_alloc_free_block(trans
, root
, root
->nodesize
,
2249 path
->nodes
[level
+ 1]->start
,
2250 root
->root_key
.objectid
,
2251 trans
->transid
, level
, c
->start
, 0);
2253 return PTR_ERR(split
);
2255 btrfs_set_header_flags(split
, btrfs_header_flags(c
));
2256 btrfs_set_header_level(split
, btrfs_header_level(c
));
2257 btrfs_set_header_bytenr(split
, split
->start
);
2258 btrfs_set_header_generation(split
, trans
->transid
);
2259 btrfs_set_header_owner(split
, root
->root_key
.objectid
);
2260 btrfs_set_header_flags(split
, 0);
2261 write_extent_buffer(split
, root
->fs_info
->fsid
,
2262 (unsigned long)btrfs_header_fsid(split
),
2264 write_extent_buffer(split
, root
->fs_info
->chunk_tree_uuid
,
2265 (unsigned long)btrfs_header_chunk_tree_uuid(split
),
2268 mid
= (c_nritems
+ 1) / 2;
2270 copy_extent_buffer(split
, c
,
2271 btrfs_node_key_ptr_offset(0),
2272 btrfs_node_key_ptr_offset(mid
),
2273 (c_nritems
- mid
) * sizeof(struct btrfs_key_ptr
));
2274 btrfs_set_header_nritems(split
, c_nritems
- mid
);
2275 btrfs_set_header_nritems(c
, mid
);
2278 btrfs_mark_buffer_dirty(c
);
2279 btrfs_mark_buffer_dirty(split
);
2281 btrfs_node_key(split
, &disk_key
, 0);
2282 wret
= insert_ptr(trans
, root
, path
, &disk_key
, split
->start
,
2283 path
->slots
[level
+ 1] + 1,
2288 ret
= btrfs_update_ref(trans
, root
, c
, split
, 0, c_nritems
- mid
);
2291 if (path
->slots
[level
] >= mid
) {
2292 path
->slots
[level
] -= mid
;
2293 btrfs_tree_unlock(c
);
2294 free_extent_buffer(c
);
2295 path
->nodes
[level
] = split
;
2296 path
->slots
[level
+ 1] += 1;
2298 btrfs_tree_unlock(split
);
2299 free_extent_buffer(split
);
2305 * how many bytes are required to store the items in a leaf. start
2306 * and nr indicate which items in the leaf to check. This totals up the
2307 * space used both by the item structs and the item data
2309 static int leaf_space_used(struct extent_buffer
*l
, int start
, int nr
)
2312 int nritems
= btrfs_header_nritems(l
);
2313 int end
= min(nritems
, start
+ nr
) - 1;
2317 data_len
= btrfs_item_end_nr(l
, start
);
2318 data_len
= data_len
- btrfs_item_offset_nr(l
, end
);
2319 data_len
+= sizeof(struct btrfs_item
) * nr
;
2320 WARN_ON(data_len
< 0);
2325 * The space between the end of the leaf items and
2326 * the start of the leaf data. IOW, how much room
2327 * the leaf has left for both items and data
2329 noinline
int btrfs_leaf_free_space(struct btrfs_root
*root
,
2330 struct extent_buffer
*leaf
)
2332 int nritems
= btrfs_header_nritems(leaf
);
2334 ret
= BTRFS_LEAF_DATA_SIZE(root
) - leaf_space_used(leaf
, 0, nritems
);
2336 printk(KERN_CRIT
"leaf free space ret %d, leaf data size %lu, "
2337 "used %d nritems %d\n",
2338 ret
, (unsigned long) BTRFS_LEAF_DATA_SIZE(root
),
2339 leaf_space_used(leaf
, 0, nritems
), nritems
);
2344 static noinline
int __push_leaf_right(struct btrfs_trans_handle
*trans
,
2345 struct btrfs_root
*root
,
2346 struct btrfs_path
*path
,
2347 int data_size
, int empty
,
2348 struct extent_buffer
*right
,
2349 int free_space
, u32 left_nritems
)
2351 struct extent_buffer
*left
= path
->nodes
[0];
2352 struct extent_buffer
*upper
= path
->nodes
[1];
2353 struct btrfs_disk_key disk_key
;
2358 struct btrfs_item
*item
;
2370 if (path
->slots
[0] >= left_nritems
)
2371 push_space
+= data_size
;
2373 slot
= path
->slots
[1];
2374 i
= left_nritems
- 1;
2376 item
= btrfs_item_nr(left
, i
);
2378 if (!empty
&& push_items
> 0) {
2379 if (path
->slots
[0] > i
)
2381 if (path
->slots
[0] == i
) {
2382 int space
= btrfs_leaf_free_space(root
, left
);
2383 if (space
+ push_space
* 2 > free_space
)
2388 if (path
->slots
[0] == i
)
2389 push_space
+= data_size
;
2391 if (!left
->map_token
) {
2392 map_extent_buffer(left
, (unsigned long)item
,
2393 sizeof(struct btrfs_item
),
2394 &left
->map_token
, &left
->kaddr
,
2395 &left
->map_start
, &left
->map_len
,
2399 this_item_size
= btrfs_item_size(left
, item
);
2400 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
2404 push_space
+= this_item_size
+ sizeof(*item
);
2409 if (left
->map_token
) {
2410 unmap_extent_buffer(left
, left
->map_token
, KM_USER1
);
2411 left
->map_token
= NULL
;
2414 if (push_items
== 0)
2417 if (!empty
&& push_items
== left_nritems
)
2420 /* push left to right */
2421 right_nritems
= btrfs_header_nritems(right
);
2423 push_space
= btrfs_item_end_nr(left
, left_nritems
- push_items
);
2424 push_space
-= leaf_data_end(root
, left
);
2426 /* make room in the right data area */
2427 data_end
= leaf_data_end(root
, right
);
2428 memmove_extent_buffer(right
,
2429 btrfs_leaf_data(right
) + data_end
- push_space
,
2430 btrfs_leaf_data(right
) + data_end
,
2431 BTRFS_LEAF_DATA_SIZE(root
) - data_end
);
2433 /* copy from the left data area */
2434 copy_extent_buffer(right
, left
, btrfs_leaf_data(right
) +
2435 BTRFS_LEAF_DATA_SIZE(root
) - push_space
,
2436 btrfs_leaf_data(left
) + leaf_data_end(root
, left
),
2439 memmove_extent_buffer(right
, btrfs_item_nr_offset(push_items
),
2440 btrfs_item_nr_offset(0),
2441 right_nritems
* sizeof(struct btrfs_item
));
2443 /* copy the items from left to right */
2444 copy_extent_buffer(right
, left
, btrfs_item_nr_offset(0),
2445 btrfs_item_nr_offset(left_nritems
- push_items
),
2446 push_items
* sizeof(struct btrfs_item
));
2448 /* update the item pointers */
2449 right_nritems
+= push_items
;
2450 btrfs_set_header_nritems(right
, right_nritems
);
2451 push_space
= BTRFS_LEAF_DATA_SIZE(root
);
2452 for (i
= 0; i
< right_nritems
; i
++) {
2453 item
= btrfs_item_nr(right
, i
);
2454 if (!right
->map_token
) {
2455 map_extent_buffer(right
, (unsigned long)item
,
2456 sizeof(struct btrfs_item
),
2457 &right
->map_token
, &right
->kaddr
,
2458 &right
->map_start
, &right
->map_len
,
2461 push_space
-= btrfs_item_size(right
, item
);
2462 btrfs_set_item_offset(right
, item
, push_space
);
2465 if (right
->map_token
) {
2466 unmap_extent_buffer(right
, right
->map_token
, KM_USER1
);
2467 right
->map_token
= NULL
;
2469 left_nritems
-= push_items
;
2470 btrfs_set_header_nritems(left
, left_nritems
);
2473 btrfs_mark_buffer_dirty(left
);
2474 btrfs_mark_buffer_dirty(right
);
2476 ret
= btrfs_update_ref(trans
, root
, left
, right
, 0, push_items
);
2479 btrfs_item_key(right
, &disk_key
, 0);
2480 btrfs_set_node_key(upper
, &disk_key
, slot
+ 1);
2481 btrfs_mark_buffer_dirty(upper
);
2483 /* then fixup the leaf pointer in the path */
2484 if (path
->slots
[0] >= left_nritems
) {
2485 path
->slots
[0] -= left_nritems
;
2486 if (btrfs_header_nritems(path
->nodes
[0]) == 0)
2487 clean_tree_block(trans
, root
, path
->nodes
[0]);
2488 btrfs_tree_unlock(path
->nodes
[0]);
2489 free_extent_buffer(path
->nodes
[0]);
2490 path
->nodes
[0] = right
;
2491 path
->slots
[1] += 1;
2493 btrfs_tree_unlock(right
);
2494 free_extent_buffer(right
);
2499 btrfs_tree_unlock(right
);
2500 free_extent_buffer(right
);
2505 * push some data in the path leaf to the right, trying to free up at
2506 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2508 * returns 1 if the push failed because the other node didn't have enough
2509 * room, 0 if everything worked out and < 0 if there were major errors.
2511 static int push_leaf_right(struct btrfs_trans_handle
*trans
, struct btrfs_root
2512 *root
, struct btrfs_path
*path
, int data_size
,
2515 struct extent_buffer
*left
= path
->nodes
[0];
2516 struct extent_buffer
*right
;
2517 struct extent_buffer
*upper
;
2523 if (!path
->nodes
[1])
2526 slot
= path
->slots
[1];
2527 upper
= path
->nodes
[1];
2528 if (slot
>= btrfs_header_nritems(upper
) - 1)
2531 btrfs_assert_tree_locked(path
->nodes
[1]);
2533 right
= read_node_slot(root
, upper
, slot
+ 1);
2534 btrfs_tree_lock(right
);
2535 btrfs_set_lock_blocking(right
);
2537 free_space
= btrfs_leaf_free_space(root
, right
);
2538 if (free_space
< data_size
)
2541 /* cow and double check */
2542 ret
= btrfs_cow_block(trans
, root
, right
, upper
,
2547 free_space
= btrfs_leaf_free_space(root
, right
);
2548 if (free_space
< data_size
)
2551 left_nritems
= btrfs_header_nritems(left
);
2552 if (left_nritems
== 0)
2555 return __push_leaf_right(trans
, root
, path
, data_size
, empty
,
2556 right
, free_space
, left_nritems
);
2558 btrfs_tree_unlock(right
);
2559 free_extent_buffer(right
);
2564 * push some data in the path leaf to the left, trying to free up at
2565 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2567 static noinline
int __push_leaf_left(struct btrfs_trans_handle
*trans
,
2568 struct btrfs_root
*root
,
2569 struct btrfs_path
*path
, int data_size
,
2570 int empty
, struct extent_buffer
*left
,
2571 int free_space
, int right_nritems
)
2573 struct btrfs_disk_key disk_key
;
2574 struct extent_buffer
*right
= path
->nodes
[0];
2579 struct btrfs_item
*item
;
2580 u32 old_left_nritems
;
2585 u32 old_left_item_size
;
2587 slot
= path
->slots
[1];
2592 nr
= right_nritems
- 1;
2594 for (i
= 0; i
< nr
; i
++) {
2595 item
= btrfs_item_nr(right
, i
);
2596 if (!right
->map_token
) {
2597 map_extent_buffer(right
, (unsigned long)item
,
2598 sizeof(struct btrfs_item
),
2599 &right
->map_token
, &right
->kaddr
,
2600 &right
->map_start
, &right
->map_len
,
2604 if (!empty
&& push_items
> 0) {
2605 if (path
->slots
[0] < i
)
2607 if (path
->slots
[0] == i
) {
2608 int space
= btrfs_leaf_free_space(root
, right
);
2609 if (space
+ push_space
* 2 > free_space
)
2614 if (path
->slots
[0] == i
)
2615 push_space
+= data_size
;
2617 this_item_size
= btrfs_item_size(right
, item
);
2618 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
2622 push_space
+= this_item_size
+ sizeof(*item
);
2625 if (right
->map_token
) {
2626 unmap_extent_buffer(right
, right
->map_token
, KM_USER1
);
2627 right
->map_token
= NULL
;
2630 if (push_items
== 0) {
2634 if (!empty
&& push_items
== btrfs_header_nritems(right
))
2637 /* push data from right to left */
2638 copy_extent_buffer(left
, right
,
2639 btrfs_item_nr_offset(btrfs_header_nritems(left
)),
2640 btrfs_item_nr_offset(0),
2641 push_items
* sizeof(struct btrfs_item
));
2643 push_space
= BTRFS_LEAF_DATA_SIZE(root
) -
2644 btrfs_item_offset_nr(right
, push_items
- 1);
2646 copy_extent_buffer(left
, right
, btrfs_leaf_data(left
) +
2647 leaf_data_end(root
, left
) - push_space
,
2648 btrfs_leaf_data(right
) +
2649 btrfs_item_offset_nr(right
, push_items
- 1),
2651 old_left_nritems
= btrfs_header_nritems(left
);
2652 BUG_ON(old_left_nritems
<= 0);
2654 old_left_item_size
= btrfs_item_offset_nr(left
, old_left_nritems
- 1);
2655 for (i
= old_left_nritems
; i
< old_left_nritems
+ push_items
; i
++) {
2658 item
= btrfs_item_nr(left
, i
);
2659 if (!left
->map_token
) {
2660 map_extent_buffer(left
, (unsigned long)item
,
2661 sizeof(struct btrfs_item
),
2662 &left
->map_token
, &left
->kaddr
,
2663 &left
->map_start
, &left
->map_len
,
2667 ioff
= btrfs_item_offset(left
, item
);
2668 btrfs_set_item_offset(left
, item
,
2669 ioff
- (BTRFS_LEAF_DATA_SIZE(root
) - old_left_item_size
));
2671 btrfs_set_header_nritems(left
, old_left_nritems
+ push_items
);
2672 if (left
->map_token
) {
2673 unmap_extent_buffer(left
, left
->map_token
, KM_USER1
);
2674 left
->map_token
= NULL
;
2677 /* fixup right node */
2678 if (push_items
> right_nritems
) {
2679 printk(KERN_CRIT
"push items %d nr %u\n", push_items
,
2684 if (push_items
< right_nritems
) {
2685 push_space
= btrfs_item_offset_nr(right
, push_items
- 1) -
2686 leaf_data_end(root
, right
);
2687 memmove_extent_buffer(right
, btrfs_leaf_data(right
) +
2688 BTRFS_LEAF_DATA_SIZE(root
) - push_space
,
2689 btrfs_leaf_data(right
) +
2690 leaf_data_end(root
, right
), push_space
);
2692 memmove_extent_buffer(right
, btrfs_item_nr_offset(0),
2693 btrfs_item_nr_offset(push_items
),
2694 (btrfs_header_nritems(right
) - push_items
) *
2695 sizeof(struct btrfs_item
));
2697 right_nritems
-= push_items
;
2698 btrfs_set_header_nritems(right
, right_nritems
);
2699 push_space
= BTRFS_LEAF_DATA_SIZE(root
);
2700 for (i
= 0; i
< right_nritems
; i
++) {
2701 item
= btrfs_item_nr(right
, i
);
2703 if (!right
->map_token
) {
2704 map_extent_buffer(right
, (unsigned long)item
,
2705 sizeof(struct btrfs_item
),
2706 &right
->map_token
, &right
->kaddr
,
2707 &right
->map_start
, &right
->map_len
,
2711 push_space
= push_space
- btrfs_item_size(right
, item
);
2712 btrfs_set_item_offset(right
, item
, push_space
);
2714 if (right
->map_token
) {
2715 unmap_extent_buffer(right
, right
->map_token
, KM_USER1
);
2716 right
->map_token
= NULL
;
2719 btrfs_mark_buffer_dirty(left
);
2721 btrfs_mark_buffer_dirty(right
);
2723 ret
= btrfs_update_ref(trans
, root
, right
, left
,
2724 old_left_nritems
, push_items
);
2727 btrfs_item_key(right
, &disk_key
, 0);
2728 wret
= fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
2732 /* then fixup the leaf pointer in the path */
2733 if (path
->slots
[0] < push_items
) {
2734 path
->slots
[0] += old_left_nritems
;
2735 if (btrfs_header_nritems(path
->nodes
[0]) == 0)
2736 clean_tree_block(trans
, root
, path
->nodes
[0]);
2737 btrfs_tree_unlock(path
->nodes
[0]);
2738 free_extent_buffer(path
->nodes
[0]);
2739 path
->nodes
[0] = left
;
2740 path
->slots
[1] -= 1;
2742 btrfs_tree_unlock(left
);
2743 free_extent_buffer(left
);
2744 path
->slots
[0] -= push_items
;
2746 BUG_ON(path
->slots
[0] < 0);
2749 btrfs_tree_unlock(left
);
2750 free_extent_buffer(left
);
2755 * push some data in the path leaf to the left, trying to free up at
2756 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2758 static int push_leaf_left(struct btrfs_trans_handle
*trans
, struct btrfs_root
2759 *root
, struct btrfs_path
*path
, int data_size
,
2762 struct extent_buffer
*right
= path
->nodes
[0];
2763 struct extent_buffer
*left
;
2769 slot
= path
->slots
[1];
2772 if (!path
->nodes
[1])
2775 right_nritems
= btrfs_header_nritems(right
);
2776 if (right_nritems
== 0)
2779 btrfs_assert_tree_locked(path
->nodes
[1]);
2781 left
= read_node_slot(root
, path
->nodes
[1], slot
- 1);
2782 btrfs_tree_lock(left
);
2783 btrfs_set_lock_blocking(left
);
2785 free_space
= btrfs_leaf_free_space(root
, left
);
2786 if (free_space
< data_size
) {
2791 /* cow and double check */
2792 ret
= btrfs_cow_block(trans
, root
, left
,
2793 path
->nodes
[1], slot
- 1, &left
);
2795 /* we hit -ENOSPC, but it isn't fatal here */
2800 free_space
= btrfs_leaf_free_space(root
, left
);
2801 if (free_space
< data_size
) {
2806 return __push_leaf_left(trans
, root
, path
, data_size
,
2807 empty
, left
, free_space
, right_nritems
);
2809 btrfs_tree_unlock(left
);
2810 free_extent_buffer(left
);
2815 * split the path's leaf in two, making sure there is at least data_size
2816 * available for the resulting leaf level of the path.
2818 * returns 0 if all went well and < 0 on failure.
2820 static noinline
int copy_for_split(struct btrfs_trans_handle
*trans
,
2821 struct btrfs_root
*root
,
2822 struct btrfs_path
*path
,
2823 struct extent_buffer
*l
,
2824 struct extent_buffer
*right
,
2825 int slot
, int mid
, int nritems
)
2832 struct btrfs_disk_key disk_key
;
2834 nritems
= nritems
- mid
;
2835 btrfs_set_header_nritems(right
, nritems
);
2836 data_copy_size
= btrfs_item_end_nr(l
, mid
) - leaf_data_end(root
, l
);
2838 copy_extent_buffer(right
, l
, btrfs_item_nr_offset(0),
2839 btrfs_item_nr_offset(mid
),
2840 nritems
* sizeof(struct btrfs_item
));
2842 copy_extent_buffer(right
, l
,
2843 btrfs_leaf_data(right
) + BTRFS_LEAF_DATA_SIZE(root
) -
2844 data_copy_size
, btrfs_leaf_data(l
) +
2845 leaf_data_end(root
, l
), data_copy_size
);
2847 rt_data_off
= BTRFS_LEAF_DATA_SIZE(root
) -
2848 btrfs_item_end_nr(l
, mid
);
2850 for (i
= 0; i
< nritems
; i
++) {
2851 struct btrfs_item
*item
= btrfs_item_nr(right
, i
);
2854 if (!right
->map_token
) {
2855 map_extent_buffer(right
, (unsigned long)item
,
2856 sizeof(struct btrfs_item
),
2857 &right
->map_token
, &right
->kaddr
,
2858 &right
->map_start
, &right
->map_len
,
2862 ioff
= btrfs_item_offset(right
, item
);
2863 btrfs_set_item_offset(right
, item
, ioff
+ rt_data_off
);
2866 if (right
->map_token
) {
2867 unmap_extent_buffer(right
, right
->map_token
, KM_USER1
);
2868 right
->map_token
= NULL
;
2871 btrfs_set_header_nritems(l
, mid
);
2873 btrfs_item_key(right
, &disk_key
, 0);
2874 wret
= insert_ptr(trans
, root
, path
, &disk_key
, right
->start
,
2875 path
->slots
[1] + 1, 1);
2879 btrfs_mark_buffer_dirty(right
);
2880 btrfs_mark_buffer_dirty(l
);
2881 BUG_ON(path
->slots
[0] != slot
);
2883 ret
= btrfs_update_ref(trans
, root
, l
, right
, 0, nritems
);
2887 btrfs_tree_unlock(path
->nodes
[0]);
2888 free_extent_buffer(path
->nodes
[0]);
2889 path
->nodes
[0] = right
;
2890 path
->slots
[0] -= mid
;
2891 path
->slots
[1] += 1;
2893 btrfs_tree_unlock(right
);
2894 free_extent_buffer(right
);
2897 BUG_ON(path
->slots
[0] < 0);
2903 * split the path's leaf in two, making sure there is at least data_size
2904 * available for the resulting leaf level of the path.
2906 * returns 0 if all went well and < 0 on failure.
2908 static noinline
int split_leaf(struct btrfs_trans_handle
*trans
,
2909 struct btrfs_root
*root
,
2910 struct btrfs_key
*ins_key
,
2911 struct btrfs_path
*path
, int data_size
,
2914 struct extent_buffer
*l
;
2918 struct extent_buffer
*right
;
2922 int num_doubles
= 0;
2924 /* first try to make some room by pushing left and right */
2925 if (data_size
&& ins_key
->type
!= BTRFS_DIR_ITEM_KEY
&&
2926 !trans
->transaction
->delayed_refs
.flushing
) {
2927 wret
= push_leaf_right(trans
, root
, path
, data_size
, 0);
2931 wret
= push_leaf_left(trans
, root
, path
, data_size
, 0);
2937 /* did the pushes work? */
2938 if (btrfs_leaf_free_space(root
, l
) >= data_size
)
2942 if (!path
->nodes
[1]) {
2943 ret
= insert_new_root(trans
, root
, path
, 1);
2950 slot
= path
->slots
[0];
2951 nritems
= btrfs_header_nritems(l
);
2952 mid
= (nritems
+ 1) / 2;
2954 right
= btrfs_alloc_free_block(trans
, root
, root
->leafsize
,
2955 path
->nodes
[1]->start
,
2956 root
->root_key
.objectid
,
2957 trans
->transid
, 0, l
->start
, 0);
2958 if (IS_ERR(right
)) {
2960 return PTR_ERR(right
);
2963 memset_extent_buffer(right
, 0, 0, sizeof(struct btrfs_header
));
2964 btrfs_set_header_bytenr(right
, right
->start
);
2965 btrfs_set_header_generation(right
, trans
->transid
);
2966 btrfs_set_header_owner(right
, root
->root_key
.objectid
);
2967 btrfs_set_header_level(right
, 0);
2968 write_extent_buffer(right
, root
->fs_info
->fsid
,
2969 (unsigned long)btrfs_header_fsid(right
),
2972 write_extent_buffer(right
, root
->fs_info
->chunk_tree_uuid
,
2973 (unsigned long)btrfs_header_chunk_tree_uuid(right
),
2978 leaf_space_used(l
, mid
, nritems
- mid
) + data_size
>
2979 BTRFS_LEAF_DATA_SIZE(root
)) {
2980 if (slot
>= nritems
) {
2981 struct btrfs_disk_key disk_key
;
2983 btrfs_cpu_key_to_disk(&disk_key
, ins_key
);
2984 btrfs_set_header_nritems(right
, 0);
2985 wret
= insert_ptr(trans
, root
, path
,
2986 &disk_key
, right
->start
,
2987 path
->slots
[1] + 1, 1);
2991 btrfs_tree_unlock(path
->nodes
[0]);
2992 free_extent_buffer(path
->nodes
[0]);
2993 path
->nodes
[0] = right
;
2995 path
->slots
[1] += 1;
2996 btrfs_mark_buffer_dirty(right
);
3000 if (mid
!= nritems
&&
3001 leaf_space_used(l
, mid
, nritems
- mid
) +
3002 data_size
> BTRFS_LEAF_DATA_SIZE(root
)) {
3007 if (leaf_space_used(l
, 0, mid
) + data_size
>
3008 BTRFS_LEAF_DATA_SIZE(root
)) {
3009 if (!extend
&& data_size
&& slot
== 0) {
3010 struct btrfs_disk_key disk_key
;
3012 btrfs_cpu_key_to_disk(&disk_key
, ins_key
);
3013 btrfs_set_header_nritems(right
, 0);
3014 wret
= insert_ptr(trans
, root
, path
,
3020 btrfs_tree_unlock(path
->nodes
[0]);
3021 free_extent_buffer(path
->nodes
[0]);
3022 path
->nodes
[0] = right
;
3024 if (path
->slots
[1] == 0) {
3025 wret
= fixup_low_keys(trans
, root
,
3026 path
, &disk_key
, 1);
3030 btrfs_mark_buffer_dirty(right
);
3032 } else if ((extend
|| !data_size
) && slot
== 0) {
3036 if (mid
!= nritems
&&
3037 leaf_space_used(l
, mid
, nritems
- mid
) +
3038 data_size
> BTRFS_LEAF_DATA_SIZE(root
)) {
3045 ret
= copy_for_split(trans
, root
, path
, l
, right
, slot
, mid
, nritems
);
3049 BUG_ON(num_doubles
!= 0);
3058 * This function splits a single item into two items,
3059 * giving 'new_key' to the new item and splitting the
3060 * old one at split_offset (from the start of the item).
3062 * The path may be released by this operation. After
3063 * the split, the path is pointing to the old item. The
3064 * new item is going to be in the same node as the old one.
3066 * Note, the item being split must be smaller enough to live alone on
3067 * a tree block with room for one extra struct btrfs_item
3069 * This allows us to split the item in place, keeping a lock on the
3070 * leaf the entire time.
3072 int btrfs_split_item(struct btrfs_trans_handle
*trans
,
3073 struct btrfs_root
*root
,
3074 struct btrfs_path
*path
,
3075 struct btrfs_key
*new_key
,
3076 unsigned long split_offset
)
3079 struct extent_buffer
*leaf
;
3080 struct btrfs_key orig_key
;
3081 struct btrfs_item
*item
;
3082 struct btrfs_item
*new_item
;
3087 struct btrfs_disk_key disk_key
;
3090 leaf
= path
->nodes
[0];
3091 btrfs_item_key_to_cpu(leaf
, &orig_key
, path
->slots
[0]);
3092 if (btrfs_leaf_free_space(root
, leaf
) >= sizeof(struct btrfs_item
))
3095 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
3096 btrfs_release_path(root
, path
);
3098 path
->search_for_split
= 1;
3099 path
->keep_locks
= 1;
3101 ret
= btrfs_search_slot(trans
, root
, &orig_key
, path
, 0, 1);
3102 path
->search_for_split
= 0;
3104 /* if our item isn't there or got smaller, return now */
3105 if (ret
!= 0 || item_size
!= btrfs_item_size_nr(path
->nodes
[0],
3107 path
->keep_locks
= 0;
3111 btrfs_set_path_blocking(path
);
3112 ret
= split_leaf(trans
, root
, &orig_key
, path
,
3113 sizeof(struct btrfs_item
), 1);
3114 path
->keep_locks
= 0;
3117 btrfs_unlock_up_safe(path
, 1);
3118 leaf
= path
->nodes
[0];
3119 BUG_ON(btrfs_leaf_free_space(root
, leaf
) < sizeof(struct btrfs_item
));
3123 * make sure any changes to the path from split_leaf leave it
3124 * in a blocking state
3126 btrfs_set_path_blocking(path
);
3128 item
= btrfs_item_nr(leaf
, path
->slots
[0]);
3129 orig_offset
= btrfs_item_offset(leaf
, item
);
3130 item_size
= btrfs_item_size(leaf
, item
);
3132 buf
= kmalloc(item_size
, GFP_NOFS
);
3133 read_extent_buffer(leaf
, buf
, btrfs_item_ptr_offset(leaf
,
3134 path
->slots
[0]), item_size
);
3135 slot
= path
->slots
[0] + 1;
3136 leaf
= path
->nodes
[0];
3138 nritems
= btrfs_header_nritems(leaf
);
3140 if (slot
!= nritems
) {
3141 /* shift the items */
3142 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ 1),
3143 btrfs_item_nr_offset(slot
),
3144 (nritems
- slot
) * sizeof(struct btrfs_item
));
3148 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
3149 btrfs_set_item_key(leaf
, &disk_key
, slot
);
3151 new_item
= btrfs_item_nr(leaf
, slot
);
3153 btrfs_set_item_offset(leaf
, new_item
, orig_offset
);
3154 btrfs_set_item_size(leaf
, new_item
, item_size
- split_offset
);
3156 btrfs_set_item_offset(leaf
, item
,
3157 orig_offset
+ item_size
- split_offset
);
3158 btrfs_set_item_size(leaf
, item
, split_offset
);
3160 btrfs_set_header_nritems(leaf
, nritems
+ 1);
3162 /* write the data for the start of the original item */
3163 write_extent_buffer(leaf
, buf
,
3164 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
3167 /* write the data for the new item */
3168 write_extent_buffer(leaf
, buf
+ split_offset
,
3169 btrfs_item_ptr_offset(leaf
, slot
),
3170 item_size
- split_offset
);
3171 btrfs_mark_buffer_dirty(leaf
);
3174 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3175 btrfs_print_leaf(root
, leaf
);
3183 * make the item pointed to by the path smaller. new_size indicates
3184 * how small to make it, and from_end tells us if we just chop bytes
3185 * off the end of the item or if we shift the item to chop bytes off
3188 int btrfs_truncate_item(struct btrfs_trans_handle
*trans
,
3189 struct btrfs_root
*root
,
3190 struct btrfs_path
*path
,
3191 u32 new_size
, int from_end
)
3196 struct extent_buffer
*leaf
;
3197 struct btrfs_item
*item
;
3199 unsigned int data_end
;
3200 unsigned int old_data_start
;
3201 unsigned int old_size
;
3202 unsigned int size_diff
;
3205 slot_orig
= path
->slots
[0];
3206 leaf
= path
->nodes
[0];
3207 slot
= path
->slots
[0];
3209 old_size
= btrfs_item_size_nr(leaf
, slot
);
3210 if (old_size
== new_size
)
3213 nritems
= btrfs_header_nritems(leaf
);
3214 data_end
= leaf_data_end(root
, leaf
);
3216 old_data_start
= btrfs_item_offset_nr(leaf
, slot
);
3218 size_diff
= old_size
- new_size
;
3221 BUG_ON(slot
>= nritems
);
3224 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3226 /* first correct the data pointers */
3227 for (i
= slot
; i
< nritems
; i
++) {
3229 item
= btrfs_item_nr(leaf
, i
);
3231 if (!leaf
->map_token
) {
3232 map_extent_buffer(leaf
, (unsigned long)item
,
3233 sizeof(struct btrfs_item
),
3234 &leaf
->map_token
, &leaf
->kaddr
,
3235 &leaf
->map_start
, &leaf
->map_len
,
3239 ioff
= btrfs_item_offset(leaf
, item
);
3240 btrfs_set_item_offset(leaf
, item
, ioff
+ size_diff
);
3243 if (leaf
->map_token
) {
3244 unmap_extent_buffer(leaf
, leaf
->map_token
, KM_USER1
);
3245 leaf
->map_token
= NULL
;
3248 /* shift the data */
3250 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3251 data_end
+ size_diff
, btrfs_leaf_data(leaf
) +
3252 data_end
, old_data_start
+ new_size
- data_end
);
3254 struct btrfs_disk_key disk_key
;
3257 btrfs_item_key(leaf
, &disk_key
, slot
);
3259 if (btrfs_disk_key_type(&disk_key
) == BTRFS_EXTENT_DATA_KEY
) {
3261 struct btrfs_file_extent_item
*fi
;
3263 fi
= btrfs_item_ptr(leaf
, slot
,
3264 struct btrfs_file_extent_item
);
3265 fi
= (struct btrfs_file_extent_item
*)(
3266 (unsigned long)fi
- size_diff
);
3268 if (btrfs_file_extent_type(leaf
, fi
) ==
3269 BTRFS_FILE_EXTENT_INLINE
) {
3270 ptr
= btrfs_item_ptr_offset(leaf
, slot
);
3271 memmove_extent_buffer(leaf
, ptr
,
3273 offsetof(struct btrfs_file_extent_item
,
3278 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3279 data_end
+ size_diff
, btrfs_leaf_data(leaf
) +
3280 data_end
, old_data_start
- data_end
);
3282 offset
= btrfs_disk_key_offset(&disk_key
);
3283 btrfs_set_disk_key_offset(&disk_key
, offset
+ size_diff
);
3284 btrfs_set_item_key(leaf
, &disk_key
, slot
);
3286 fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
3289 item
= btrfs_item_nr(leaf
, slot
);
3290 btrfs_set_item_size(leaf
, item
, new_size
);
3291 btrfs_mark_buffer_dirty(leaf
);
3294 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3295 btrfs_print_leaf(root
, leaf
);
3302 * make the item pointed to by the path bigger, data_size is the new size.
3304 int btrfs_extend_item(struct btrfs_trans_handle
*trans
,
3305 struct btrfs_root
*root
, struct btrfs_path
*path
,
3311 struct extent_buffer
*leaf
;
3312 struct btrfs_item
*item
;
3314 unsigned int data_end
;
3315 unsigned int old_data
;
3316 unsigned int old_size
;
3319 slot_orig
= path
->slots
[0];
3320 leaf
= path
->nodes
[0];
3322 nritems
= btrfs_header_nritems(leaf
);
3323 data_end
= leaf_data_end(root
, leaf
);
3325 if (btrfs_leaf_free_space(root
, leaf
) < data_size
) {
3326 btrfs_print_leaf(root
, leaf
);
3329 slot
= path
->slots
[0];
3330 old_data
= btrfs_item_end_nr(leaf
, slot
);
3333 if (slot
>= nritems
) {
3334 btrfs_print_leaf(root
, leaf
);
3335 printk(KERN_CRIT
"slot %d too large, nritems %d\n",
3341 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3343 /* first correct the data pointers */
3344 for (i
= slot
; i
< nritems
; i
++) {
3346 item
= btrfs_item_nr(leaf
, i
);
3348 if (!leaf
->map_token
) {
3349 map_extent_buffer(leaf
, (unsigned long)item
,
3350 sizeof(struct btrfs_item
),
3351 &leaf
->map_token
, &leaf
->kaddr
,
3352 &leaf
->map_start
, &leaf
->map_len
,
3355 ioff
= btrfs_item_offset(leaf
, item
);
3356 btrfs_set_item_offset(leaf
, item
, ioff
- data_size
);
3359 if (leaf
->map_token
) {
3360 unmap_extent_buffer(leaf
, leaf
->map_token
, KM_USER1
);
3361 leaf
->map_token
= NULL
;
3364 /* shift the data */
3365 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3366 data_end
- data_size
, btrfs_leaf_data(leaf
) +
3367 data_end
, old_data
- data_end
);
3369 data_end
= old_data
;
3370 old_size
= btrfs_item_size_nr(leaf
, slot
);
3371 item
= btrfs_item_nr(leaf
, slot
);
3372 btrfs_set_item_size(leaf
, item
, old_size
+ data_size
);
3373 btrfs_mark_buffer_dirty(leaf
);
3376 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3377 btrfs_print_leaf(root
, leaf
);
3384 * Given a key and some data, insert items into the tree.
3385 * This does all the path init required, making room in the tree if needed.
3386 * Returns the number of keys that were inserted.
3388 int btrfs_insert_some_items(struct btrfs_trans_handle
*trans
,
3389 struct btrfs_root
*root
,
3390 struct btrfs_path
*path
,
3391 struct btrfs_key
*cpu_key
, u32
*data_size
,
3394 struct extent_buffer
*leaf
;
3395 struct btrfs_item
*item
;
3402 unsigned int data_end
;
3403 struct btrfs_disk_key disk_key
;
3404 struct btrfs_key found_key
;
3406 for (i
= 0; i
< nr
; i
++) {
3407 if (total_size
+ data_size
[i
] + sizeof(struct btrfs_item
) >
3408 BTRFS_LEAF_DATA_SIZE(root
)) {
3412 total_data
+= data_size
[i
];
3413 total_size
+= data_size
[i
] + sizeof(struct btrfs_item
);
3417 ret
= btrfs_search_slot(trans
, root
, cpu_key
, path
, total_size
, 1);
3423 leaf
= path
->nodes
[0];
3425 nritems
= btrfs_header_nritems(leaf
);
3426 data_end
= leaf_data_end(root
, leaf
);
3428 if (btrfs_leaf_free_space(root
, leaf
) < total_size
) {
3429 for (i
= nr
; i
>= 0; i
--) {
3430 total_data
-= data_size
[i
];
3431 total_size
-= data_size
[i
] + sizeof(struct btrfs_item
);
3432 if (total_size
< btrfs_leaf_free_space(root
, leaf
))
3438 slot
= path
->slots
[0];
3441 if (slot
!= nritems
) {
3442 unsigned int old_data
= btrfs_item_end_nr(leaf
, slot
);
3444 item
= btrfs_item_nr(leaf
, slot
);
3445 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3447 /* figure out how many keys we can insert in here */
3448 total_data
= data_size
[0];
3449 for (i
= 1; i
< nr
; i
++) {
3450 if (comp_cpu_keys(&found_key
, cpu_key
+ i
) <= 0)
3452 total_data
+= data_size
[i
];
3456 if (old_data
< data_end
) {
3457 btrfs_print_leaf(root
, leaf
);
3458 printk(KERN_CRIT
"slot %d old_data %d data_end %d\n",
3459 slot
, old_data
, data_end
);
3463 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3465 /* first correct the data pointers */
3466 WARN_ON(leaf
->map_token
);
3467 for (i
= slot
; i
< nritems
; i
++) {
3470 item
= btrfs_item_nr(leaf
, i
);
3471 if (!leaf
->map_token
) {
3472 map_extent_buffer(leaf
, (unsigned long)item
,
3473 sizeof(struct btrfs_item
),
3474 &leaf
->map_token
, &leaf
->kaddr
,
3475 &leaf
->map_start
, &leaf
->map_len
,
3479 ioff
= btrfs_item_offset(leaf
, item
);
3480 btrfs_set_item_offset(leaf
, item
, ioff
- total_data
);
3482 if (leaf
->map_token
) {
3483 unmap_extent_buffer(leaf
, leaf
->map_token
, KM_USER1
);
3484 leaf
->map_token
= NULL
;
3487 /* shift the items */
3488 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ nr
),
3489 btrfs_item_nr_offset(slot
),
3490 (nritems
- slot
) * sizeof(struct btrfs_item
));
3492 /* shift the data */
3493 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3494 data_end
- total_data
, btrfs_leaf_data(leaf
) +
3495 data_end
, old_data
- data_end
);
3496 data_end
= old_data
;
3499 * this sucks but it has to be done, if we are inserting at
3500 * the end of the leaf only insert 1 of the items, since we
3501 * have no way of knowing whats on the next leaf and we'd have
3502 * to drop our current locks to figure it out
3507 /* setup the item for the new data */
3508 for (i
= 0; i
< nr
; i
++) {
3509 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
+ i
);
3510 btrfs_set_item_key(leaf
, &disk_key
, slot
+ i
);
3511 item
= btrfs_item_nr(leaf
, slot
+ i
);
3512 btrfs_set_item_offset(leaf
, item
, data_end
- data_size
[i
]);
3513 data_end
-= data_size
[i
];
3514 btrfs_set_item_size(leaf
, item
, data_size
[i
]);
3516 btrfs_set_header_nritems(leaf
, nritems
+ nr
);
3517 btrfs_mark_buffer_dirty(leaf
);
3521 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
);
3522 ret
= fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
3525 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3526 btrfs_print_leaf(root
, leaf
);
3536 * this is a helper for btrfs_insert_empty_items, the main goal here is
3537 * to save stack depth by doing the bulk of the work in a function
3538 * that doesn't call btrfs_search_slot
3540 static noinline_for_stack
int
3541 setup_items_for_insert(struct btrfs_trans_handle
*trans
,
3542 struct btrfs_root
*root
, struct btrfs_path
*path
,
3543 struct btrfs_key
*cpu_key
, u32
*data_size
,
3544 u32 total_data
, u32 total_size
, int nr
)
3546 struct btrfs_item
*item
;
3549 unsigned int data_end
;
3550 struct btrfs_disk_key disk_key
;
3552 struct extent_buffer
*leaf
;
3555 leaf
= path
->nodes
[0];
3556 slot
= path
->slots
[0];
3558 nritems
= btrfs_header_nritems(leaf
);
3559 data_end
= leaf_data_end(root
, leaf
);
3561 if (btrfs_leaf_free_space(root
, leaf
) < total_size
) {
3562 btrfs_print_leaf(root
, leaf
);
3563 printk(KERN_CRIT
"not enough freespace need %u have %d\n",
3564 total_size
, btrfs_leaf_free_space(root
, leaf
));
3568 if (slot
!= nritems
) {
3569 unsigned int old_data
= btrfs_item_end_nr(leaf
, slot
);
3571 if (old_data
< data_end
) {
3572 btrfs_print_leaf(root
, leaf
);
3573 printk(KERN_CRIT
"slot %d old_data %d data_end %d\n",
3574 slot
, old_data
, data_end
);
3578 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3580 /* first correct the data pointers */
3581 WARN_ON(leaf
->map_token
);
3582 for (i
= slot
; i
< nritems
; i
++) {
3585 item
= btrfs_item_nr(leaf
, i
);
3586 if (!leaf
->map_token
) {
3587 map_extent_buffer(leaf
, (unsigned long)item
,
3588 sizeof(struct btrfs_item
),
3589 &leaf
->map_token
, &leaf
->kaddr
,
3590 &leaf
->map_start
, &leaf
->map_len
,
3594 ioff
= btrfs_item_offset(leaf
, item
);
3595 btrfs_set_item_offset(leaf
, item
, ioff
- total_data
);
3597 if (leaf
->map_token
) {
3598 unmap_extent_buffer(leaf
, leaf
->map_token
, KM_USER1
);
3599 leaf
->map_token
= NULL
;
3602 /* shift the items */
3603 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ nr
),
3604 btrfs_item_nr_offset(slot
),
3605 (nritems
- slot
) * sizeof(struct btrfs_item
));
3607 /* shift the data */
3608 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3609 data_end
- total_data
, btrfs_leaf_data(leaf
) +
3610 data_end
, old_data
- data_end
);
3611 data_end
= old_data
;
3614 /* setup the item for the new data */
3615 for (i
= 0; i
< nr
; i
++) {
3616 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
+ i
);
3617 btrfs_set_item_key(leaf
, &disk_key
, slot
+ i
);
3618 item
= btrfs_item_nr(leaf
, slot
+ i
);
3619 btrfs_set_item_offset(leaf
, item
, data_end
- data_size
[i
]);
3620 data_end
-= data_size
[i
];
3621 btrfs_set_item_size(leaf
, item
, data_size
[i
]);
3624 btrfs_set_header_nritems(leaf
, nritems
+ nr
);
3628 struct btrfs_disk_key disk_key
;
3629 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
);
3630 ret
= fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
3632 btrfs_unlock_up_safe(path
, 1);
3633 btrfs_mark_buffer_dirty(leaf
);
3635 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3636 btrfs_print_leaf(root
, leaf
);
3643 * Given a key and some data, insert items into the tree.
3644 * This does all the path init required, making room in the tree if needed.
3646 int btrfs_insert_empty_items(struct btrfs_trans_handle
*trans
,
3647 struct btrfs_root
*root
,
3648 struct btrfs_path
*path
,
3649 struct btrfs_key
*cpu_key
, u32
*data_size
,
3652 struct extent_buffer
*leaf
;
3659 for (i
= 0; i
< nr
; i
++)
3660 total_data
+= data_size
[i
];
3662 total_size
= total_data
+ (nr
* sizeof(struct btrfs_item
));
3663 ret
= btrfs_search_slot(trans
, root
, cpu_key
, path
, total_size
, 1);
3669 leaf
= path
->nodes
[0];
3670 slot
= path
->slots
[0];
3673 ret
= setup_items_for_insert(trans
, root
, path
, cpu_key
, data_size
,
3674 total_data
, total_size
, nr
);
3681 * Given a key and some data, insert an item into the tree.
3682 * This does all the path init required, making room in the tree if needed.
3684 int btrfs_insert_item(struct btrfs_trans_handle
*trans
, struct btrfs_root
3685 *root
, struct btrfs_key
*cpu_key
, void *data
, u32
3689 struct btrfs_path
*path
;
3690 struct extent_buffer
*leaf
;
3693 path
= btrfs_alloc_path();
3695 ret
= btrfs_insert_empty_item(trans
, root
, path
, cpu_key
, data_size
);
3697 leaf
= path
->nodes
[0];
3698 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
3699 write_extent_buffer(leaf
, data
, ptr
, data_size
);
3700 btrfs_mark_buffer_dirty(leaf
);
3702 btrfs_free_path(path
);
3707 * delete the pointer from a given node.
3709 * the tree should have been previously balanced so the deletion does not
3712 static int del_ptr(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
3713 struct btrfs_path
*path
, int level
, int slot
)
3715 struct extent_buffer
*parent
= path
->nodes
[level
];
3720 nritems
= btrfs_header_nritems(parent
);
3721 if (slot
!= nritems
- 1) {
3722 memmove_extent_buffer(parent
,
3723 btrfs_node_key_ptr_offset(slot
),
3724 btrfs_node_key_ptr_offset(slot
+ 1),
3725 sizeof(struct btrfs_key_ptr
) *
3726 (nritems
- slot
- 1));
3729 btrfs_set_header_nritems(parent
, nritems
);
3730 if (nritems
== 0 && parent
== root
->node
) {
3731 BUG_ON(btrfs_header_level(root
->node
) != 1);
3732 /* just turn the root into a leaf and break */
3733 btrfs_set_header_level(root
->node
, 0);
3734 } else if (slot
== 0) {
3735 struct btrfs_disk_key disk_key
;
3737 btrfs_node_key(parent
, &disk_key
, 0);
3738 wret
= fixup_low_keys(trans
, root
, path
, &disk_key
, level
+ 1);
3742 btrfs_mark_buffer_dirty(parent
);
3747 * a helper function to delete the leaf pointed to by path->slots[1] and
3748 * path->nodes[1]. bytenr is the node block pointer, but since the callers
3749 * already know it, it is faster to have them pass it down than to
3750 * read it out of the node again.
3752 * This deletes the pointer in path->nodes[1] and frees the leaf
3753 * block extent. zero is returned if it all worked out, < 0 otherwise.
3755 * The path must have already been setup for deleting the leaf, including
3756 * all the proper balancing. path->nodes[1] must be locked.
3758 noinline
int btrfs_del_leaf(struct btrfs_trans_handle
*trans
,
3759 struct btrfs_root
*root
,
3760 struct btrfs_path
*path
, u64 bytenr
)
3763 u64 root_gen
= btrfs_header_generation(path
->nodes
[1]);
3764 u64 parent_start
= path
->nodes
[1]->start
;
3765 u64 parent_owner
= btrfs_header_owner(path
->nodes
[1]);
3767 ret
= del_ptr(trans
, root
, path
, 1, path
->slots
[1]);
3772 * btrfs_free_extent is expensive, we want to make sure we
3773 * aren't holding any locks when we call it
3775 btrfs_unlock_up_safe(path
, 0);
3777 ret
= btrfs_free_extent(trans
, root
, bytenr
,
3778 btrfs_level_size(root
, 0),
3779 parent_start
, parent_owner
,
3784 * delete the item at the leaf level in path. If that empties
3785 * the leaf, remove it from the tree
3787 int btrfs_del_items(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
3788 struct btrfs_path
*path
, int slot
, int nr
)
3790 struct extent_buffer
*leaf
;
3791 struct btrfs_item
*item
;
3799 leaf
= path
->nodes
[0];
3800 last_off
= btrfs_item_offset_nr(leaf
, slot
+ nr
- 1);
3802 for (i
= 0; i
< nr
; i
++)
3803 dsize
+= btrfs_item_size_nr(leaf
, slot
+ i
);
3805 nritems
= btrfs_header_nritems(leaf
);
3807 if (slot
+ nr
!= nritems
) {
3808 int data_end
= leaf_data_end(root
, leaf
);
3810 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3812 btrfs_leaf_data(leaf
) + data_end
,
3813 last_off
- data_end
);
3815 for (i
= slot
+ nr
; i
< nritems
; i
++) {
3818 item
= btrfs_item_nr(leaf
, i
);
3819 if (!leaf
->map_token
) {
3820 map_extent_buffer(leaf
, (unsigned long)item
,
3821 sizeof(struct btrfs_item
),
3822 &leaf
->map_token
, &leaf
->kaddr
,
3823 &leaf
->map_start
, &leaf
->map_len
,
3826 ioff
= btrfs_item_offset(leaf
, item
);
3827 btrfs_set_item_offset(leaf
, item
, ioff
+ dsize
);
3830 if (leaf
->map_token
) {
3831 unmap_extent_buffer(leaf
, leaf
->map_token
, KM_USER1
);
3832 leaf
->map_token
= NULL
;
3835 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
),
3836 btrfs_item_nr_offset(slot
+ nr
),
3837 sizeof(struct btrfs_item
) *
3838 (nritems
- slot
- nr
));
3840 btrfs_set_header_nritems(leaf
, nritems
- nr
);
3843 /* delete the leaf if we've emptied it */
3845 if (leaf
== root
->node
) {
3846 btrfs_set_header_level(leaf
, 0);
3848 ret
= btrfs_del_leaf(trans
, root
, path
, leaf
->start
);
3852 int used
= leaf_space_used(leaf
, 0, nritems
);
3854 struct btrfs_disk_key disk_key
;
3856 btrfs_item_key(leaf
, &disk_key
, 0);
3857 wret
= fixup_low_keys(trans
, root
, path
,
3863 /* delete the leaf if it is mostly empty */
3864 if (used
< BTRFS_LEAF_DATA_SIZE(root
) / 4 &&
3865 !trans
->transaction
->delayed_refs
.flushing
) {
3866 /* push_leaf_left fixes the path.
3867 * make sure the path still points to our leaf
3868 * for possible call to del_ptr below
3870 slot
= path
->slots
[1];
3871 extent_buffer_get(leaf
);
3873 btrfs_set_path_blocking(path
);
3874 wret
= push_leaf_left(trans
, root
, path
, 1, 1);
3875 if (wret
< 0 && wret
!= -ENOSPC
)
3878 if (path
->nodes
[0] == leaf
&&
3879 btrfs_header_nritems(leaf
)) {
3880 wret
= push_leaf_right(trans
, root
, path
, 1, 1);
3881 if (wret
< 0 && wret
!= -ENOSPC
)
3885 if (btrfs_header_nritems(leaf
) == 0) {
3886 path
->slots
[1] = slot
;
3887 ret
= btrfs_del_leaf(trans
, root
, path
,
3890 free_extent_buffer(leaf
);
3892 /* if we're still in the path, make sure
3893 * we're dirty. Otherwise, one of the
3894 * push_leaf functions must have already
3895 * dirtied this buffer
3897 if (path
->nodes
[0] == leaf
)
3898 btrfs_mark_buffer_dirty(leaf
);
3899 free_extent_buffer(leaf
);
3902 btrfs_mark_buffer_dirty(leaf
);
3909 * search the tree again to find a leaf with lesser keys
3910 * returns 0 if it found something or 1 if there are no lesser leaves.
3911 * returns < 0 on io errors.
3913 * This may release the path, and so you may lose any locks held at the
3916 int btrfs_prev_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
3918 struct btrfs_key key
;
3919 struct btrfs_disk_key found_key
;
3922 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, 0);
3926 else if (key
.type
> 0)
3928 else if (key
.objectid
> 0)
3933 btrfs_release_path(root
, path
);
3934 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3937 btrfs_item_key(path
->nodes
[0], &found_key
, 0);
3938 ret
= comp_keys(&found_key
, &key
);
3945 * A helper function to walk down the tree starting at min_key, and looking
3946 * for nodes or leaves that are either in cache or have a minimum
3947 * transaction id. This is used by the btree defrag code, and tree logging
3949 * This does not cow, but it does stuff the starting key it finds back
3950 * into min_key, so you can call btrfs_search_slot with cow=1 on the
3951 * key and get a writable path.
3953 * This does lock as it descends, and path->keep_locks should be set
3954 * to 1 by the caller.
3956 * This honors path->lowest_level to prevent descent past a given level
3959 * min_trans indicates the oldest transaction that you are interested
3960 * in walking through. Any nodes or leaves older than min_trans are
3961 * skipped over (without reading them).
3963 * returns zero if something useful was found, < 0 on error and 1 if there
3964 * was nothing in the tree that matched the search criteria.
3966 int btrfs_search_forward(struct btrfs_root
*root
, struct btrfs_key
*min_key
,
3967 struct btrfs_key
*max_key
,
3968 struct btrfs_path
*path
, int cache_only
,
3971 struct extent_buffer
*cur
;
3972 struct btrfs_key found_key
;
3979 WARN_ON(!path
->keep_locks
);
3981 cur
= btrfs_lock_root_node(root
);
3982 level
= btrfs_header_level(cur
);
3983 WARN_ON(path
->nodes
[level
]);
3984 path
->nodes
[level
] = cur
;
3985 path
->locks
[level
] = 1;
3987 if (btrfs_header_generation(cur
) < min_trans
) {
3992 nritems
= btrfs_header_nritems(cur
);
3993 level
= btrfs_header_level(cur
);
3994 sret
= bin_search(cur
, min_key
, level
, &slot
);
3996 /* at the lowest level, we're done, setup the path and exit */
3997 if (level
== path
->lowest_level
) {
3998 if (slot
>= nritems
)
4001 path
->slots
[level
] = slot
;
4002 btrfs_item_key_to_cpu(cur
, &found_key
, slot
);
4005 if (sret
&& slot
> 0)
4008 * check this node pointer against the cache_only and
4009 * min_trans parameters. If it isn't in cache or is too
4010 * old, skip to the next one.
4012 while (slot
< nritems
) {
4015 struct extent_buffer
*tmp
;
4016 struct btrfs_disk_key disk_key
;
4018 blockptr
= btrfs_node_blockptr(cur
, slot
);
4019 gen
= btrfs_node_ptr_generation(cur
, slot
);
4020 if (gen
< min_trans
) {
4028 btrfs_node_key(cur
, &disk_key
, slot
);
4029 if (comp_keys(&disk_key
, max_key
) >= 0) {
4035 tmp
= btrfs_find_tree_block(root
, blockptr
,
4036 btrfs_level_size(root
, level
- 1));
4038 if (tmp
&& btrfs_buffer_uptodate(tmp
, gen
)) {
4039 free_extent_buffer(tmp
);
4043 free_extent_buffer(tmp
);
4048 * we didn't find a candidate key in this node, walk forward
4049 * and find another one
4051 if (slot
>= nritems
) {
4052 path
->slots
[level
] = slot
;
4053 btrfs_set_path_blocking(path
);
4054 sret
= btrfs_find_next_key(root
, path
, min_key
, level
,
4055 cache_only
, min_trans
);
4057 btrfs_release_path(root
, path
);
4063 /* save our key for returning back */
4064 btrfs_node_key_to_cpu(cur
, &found_key
, slot
);
4065 path
->slots
[level
] = slot
;
4066 if (level
== path
->lowest_level
) {
4068 unlock_up(path
, level
, 1);
4071 btrfs_set_path_blocking(path
);
4072 cur
= read_node_slot(root
, cur
, slot
);
4074 btrfs_tree_lock(cur
);
4076 path
->locks
[level
- 1] = 1;
4077 path
->nodes
[level
- 1] = cur
;
4078 unlock_up(path
, level
, 1);
4079 btrfs_clear_path_blocking(path
, NULL
);
4083 memcpy(min_key
, &found_key
, sizeof(found_key
));
4084 btrfs_set_path_blocking(path
);
4089 * this is similar to btrfs_next_leaf, but does not try to preserve
4090 * and fixup the path. It looks for and returns the next key in the
4091 * tree based on the current path and the cache_only and min_trans
4094 * 0 is returned if another key is found, < 0 if there are any errors
4095 * and 1 is returned if there are no higher keys in the tree
4097 * path->keep_locks should be set to 1 on the search made before
4098 * calling this function.
4100 int btrfs_find_next_key(struct btrfs_root
*root
, struct btrfs_path
*path
,
4101 struct btrfs_key
*key
, int lowest_level
,
4102 int cache_only
, u64 min_trans
)
4104 int level
= lowest_level
;
4106 struct extent_buffer
*c
;
4108 WARN_ON(!path
->keep_locks
);
4109 while (level
< BTRFS_MAX_LEVEL
) {
4110 if (!path
->nodes
[level
])
4113 slot
= path
->slots
[level
] + 1;
4114 c
= path
->nodes
[level
];
4116 if (slot
>= btrfs_header_nritems(c
)) {
4118 if (level
== BTRFS_MAX_LEVEL
)
4123 btrfs_item_key_to_cpu(c
, key
, slot
);
4125 u64 blockptr
= btrfs_node_blockptr(c
, slot
);
4126 u64 gen
= btrfs_node_ptr_generation(c
, slot
);
4129 struct extent_buffer
*cur
;
4130 cur
= btrfs_find_tree_block(root
, blockptr
,
4131 btrfs_level_size(root
, level
- 1));
4132 if (!cur
|| !btrfs_buffer_uptodate(cur
, gen
)) {
4135 free_extent_buffer(cur
);
4138 free_extent_buffer(cur
);
4140 if (gen
< min_trans
) {
4144 btrfs_node_key_to_cpu(c
, key
, slot
);
4152 * search the tree again to find a leaf with greater keys
4153 * returns 0 if it found something or 1 if there are no greater leaves.
4154 * returns < 0 on io errors.
4156 int btrfs_next_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
4160 struct extent_buffer
*c
;
4161 struct extent_buffer
*next
;
4162 struct btrfs_key key
;
4165 int old_spinning
= path
->leave_spinning
;
4166 int force_blocking
= 0;
4168 nritems
= btrfs_header_nritems(path
->nodes
[0]);
4173 * we take the blocks in an order that upsets lockdep. Using
4174 * blocking mode is the only way around it.
4176 #ifdef CONFIG_DEBUG_LOCK_ALLOC
4180 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, nritems
- 1);
4184 btrfs_release_path(root
, path
);
4186 path
->keep_locks
= 1;
4188 if (!force_blocking
)
4189 path
->leave_spinning
= 1;
4191 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4192 path
->keep_locks
= 0;
4197 nritems
= btrfs_header_nritems(path
->nodes
[0]);
4199 * by releasing the path above we dropped all our locks. A balance
4200 * could have added more items next to the key that used to be
4201 * at the very end of the block. So, check again here and
4202 * advance the path if there are now more items available.
4204 if (nritems
> 0 && path
->slots
[0] < nritems
- 1) {
4210 while (level
< BTRFS_MAX_LEVEL
) {
4211 if (!path
->nodes
[level
]) {
4216 slot
= path
->slots
[level
] + 1;
4217 c
= path
->nodes
[level
];
4218 if (slot
>= btrfs_header_nritems(c
)) {
4220 if (level
== BTRFS_MAX_LEVEL
) {
4228 btrfs_tree_unlock(next
);
4229 free_extent_buffer(next
);
4233 ret
= read_block_for_search(NULL
, root
, path
, &next
, level
,
4239 btrfs_release_path(root
, path
);
4243 if (!path
->skip_locking
) {
4244 ret
= btrfs_try_spin_lock(next
);
4246 btrfs_set_path_blocking(path
);
4247 btrfs_tree_lock(next
);
4248 if (!force_blocking
)
4249 btrfs_clear_path_blocking(path
, next
);
4252 btrfs_set_lock_blocking(next
);
4256 path
->slots
[level
] = slot
;
4259 c
= path
->nodes
[level
];
4260 if (path
->locks
[level
])
4261 btrfs_tree_unlock(c
);
4263 free_extent_buffer(c
);
4264 path
->nodes
[level
] = next
;
4265 path
->slots
[level
] = 0;
4266 if (!path
->skip_locking
)
4267 path
->locks
[level
] = 1;
4272 ret
= read_block_for_search(NULL
, root
, path
, &next
, level
,
4278 btrfs_release_path(root
, path
);
4282 if (!path
->skip_locking
) {
4283 btrfs_assert_tree_locked(path
->nodes
[level
]);
4284 ret
= btrfs_try_spin_lock(next
);
4286 btrfs_set_path_blocking(path
);
4287 btrfs_tree_lock(next
);
4288 if (!force_blocking
)
4289 btrfs_clear_path_blocking(path
, next
);
4292 btrfs_set_lock_blocking(next
);
4297 unlock_up(path
, 0, 1);
4298 path
->leave_spinning
= old_spinning
;
4300 btrfs_set_path_blocking(path
);
4306 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4307 * searching until it gets past min_objectid or finds an item of 'type'
4309 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4311 int btrfs_previous_item(struct btrfs_root
*root
,
4312 struct btrfs_path
*path
, u64 min_objectid
,
4315 struct btrfs_key found_key
;
4316 struct extent_buffer
*leaf
;
4321 if (path
->slots
[0] == 0) {
4322 btrfs_set_path_blocking(path
);
4323 ret
= btrfs_prev_leaf(root
, path
);
4329 leaf
= path
->nodes
[0];
4330 nritems
= btrfs_header_nritems(leaf
);
4333 if (path
->slots
[0] == nritems
)
4336 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
4337 if (found_key
.type
== type
)
4339 if (found_key
.objectid
< min_objectid
)
4341 if (found_key
.objectid
== min_objectid
&&
4342 found_key
.type
< type
)