2 * btree.c - NILFS B-tree.
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Koji Sato <koji@osrg.net>.
23 #include <linux/slab.h>
24 #include <linux/string.h>
25 #include <linux/errno.h>
26 #include <linux/pagevec.h>
34 static struct nilfs_btree_path
*nilfs_btree_alloc_path(void)
36 struct nilfs_btree_path
*path
;
37 int level
= NILFS_BTREE_LEVEL_DATA
;
39 path
= kmem_cache_alloc(nilfs_btree_path_cache
, GFP_NOFS
);
43 for (; level
< NILFS_BTREE_LEVEL_MAX
; level
++) {
44 path
[level
].bp_bh
= NULL
;
45 path
[level
].bp_sib_bh
= NULL
;
46 path
[level
].bp_index
= 0;
47 path
[level
].bp_oldreq
.bpr_ptr
= NILFS_BMAP_INVALID_PTR
;
48 path
[level
].bp_newreq
.bpr_ptr
= NILFS_BMAP_INVALID_PTR
;
49 path
[level
].bp_op
= NULL
;
56 static void nilfs_btree_free_path(struct nilfs_btree_path
*path
)
58 int level
= NILFS_BTREE_LEVEL_DATA
;
60 for (; level
< NILFS_BTREE_LEVEL_MAX
; level
++)
61 brelse(path
[level
].bp_bh
);
63 kmem_cache_free(nilfs_btree_path_cache
, path
);
67 * B-tree node operations
69 static int nilfs_btree_get_new_block(const struct nilfs_bmap
*btree
,
70 __u64 ptr
, struct buffer_head
**bhp
)
72 struct address_space
*btnc
= &NILFS_BMAP_I(btree
)->i_btnode_cache
;
73 struct buffer_head
*bh
;
75 bh
= nilfs_btnode_create_block(btnc
, ptr
);
79 set_buffer_nilfs_volatile(bh
);
84 static int nilfs_btree_node_get_flags(const struct nilfs_btree_node
*node
)
86 return node
->bn_flags
;
90 nilfs_btree_node_set_flags(struct nilfs_btree_node
*node
, int flags
)
92 node
->bn_flags
= flags
;
95 static int nilfs_btree_node_root(const struct nilfs_btree_node
*node
)
97 return nilfs_btree_node_get_flags(node
) & NILFS_BTREE_NODE_ROOT
;
100 static int nilfs_btree_node_get_level(const struct nilfs_btree_node
*node
)
102 return node
->bn_level
;
106 nilfs_btree_node_set_level(struct nilfs_btree_node
*node
, int level
)
108 node
->bn_level
= level
;
111 static int nilfs_btree_node_get_nchildren(const struct nilfs_btree_node
*node
)
113 return le16_to_cpu(node
->bn_nchildren
);
117 nilfs_btree_node_set_nchildren(struct nilfs_btree_node
*node
, int nchildren
)
119 node
->bn_nchildren
= cpu_to_le16(nchildren
);
122 static int nilfs_btree_node_size(const struct nilfs_bmap
*btree
)
124 return 1 << btree
->b_inode
->i_blkbits
;
127 static int nilfs_btree_nchildren_per_block(const struct nilfs_bmap
*btree
)
129 return btree
->b_nchildren_per_block
;
133 nilfs_btree_node_dkeys(const struct nilfs_btree_node
*node
)
135 return (__le64
*)((char *)(node
+ 1) +
136 (nilfs_btree_node_root(node
) ?
137 0 : NILFS_BTREE_NODE_EXTRA_PAD_SIZE
));
141 nilfs_btree_node_dptrs(const struct nilfs_btree_node
*node
, int ncmax
)
143 return (__le64
*)(nilfs_btree_node_dkeys(node
) + ncmax
);
147 nilfs_btree_node_get_key(const struct nilfs_btree_node
*node
, int index
)
149 return le64_to_cpu(*(nilfs_btree_node_dkeys(node
) + index
));
153 nilfs_btree_node_set_key(struct nilfs_btree_node
*node
, int index
, __u64 key
)
155 *(nilfs_btree_node_dkeys(node
) + index
) = cpu_to_le64(key
);
159 nilfs_btree_node_get_ptr(const struct nilfs_btree_node
*node
, int index
,
162 return le64_to_cpu(*(nilfs_btree_node_dptrs(node
, ncmax
) + index
));
166 nilfs_btree_node_set_ptr(struct nilfs_btree_node
*node
, int index
, __u64 ptr
,
169 *(nilfs_btree_node_dptrs(node
, ncmax
) + index
) = cpu_to_le64(ptr
);
172 static void nilfs_btree_node_init(struct nilfs_btree_node
*node
, int flags
,
173 int level
, int nchildren
, int ncmax
,
174 const __u64
*keys
, const __u64
*ptrs
)
180 nilfs_btree_node_set_flags(node
, flags
);
181 nilfs_btree_node_set_level(node
, level
);
182 nilfs_btree_node_set_nchildren(node
, nchildren
);
184 dkeys
= nilfs_btree_node_dkeys(node
);
185 dptrs
= nilfs_btree_node_dptrs(node
, ncmax
);
186 for (i
= 0; i
< nchildren
; i
++) {
187 dkeys
[i
] = cpu_to_le64(keys
[i
]);
188 dptrs
[i
] = cpu_to_le64(ptrs
[i
]);
192 /* Assume the buffer heads corresponding to left and right are locked. */
193 static void nilfs_btree_node_move_left(struct nilfs_btree_node
*left
,
194 struct nilfs_btree_node
*right
,
195 int n
, int lncmax
, int rncmax
)
197 __le64
*ldkeys
, *rdkeys
;
198 __le64
*ldptrs
, *rdptrs
;
199 int lnchildren
, rnchildren
;
201 ldkeys
= nilfs_btree_node_dkeys(left
);
202 ldptrs
= nilfs_btree_node_dptrs(left
, lncmax
);
203 lnchildren
= nilfs_btree_node_get_nchildren(left
);
205 rdkeys
= nilfs_btree_node_dkeys(right
);
206 rdptrs
= nilfs_btree_node_dptrs(right
, rncmax
);
207 rnchildren
= nilfs_btree_node_get_nchildren(right
);
209 memcpy(ldkeys
+ lnchildren
, rdkeys
, n
* sizeof(*rdkeys
));
210 memcpy(ldptrs
+ lnchildren
, rdptrs
, n
* sizeof(*rdptrs
));
211 memmove(rdkeys
, rdkeys
+ n
, (rnchildren
- n
) * sizeof(*rdkeys
));
212 memmove(rdptrs
, rdptrs
+ n
, (rnchildren
- n
) * sizeof(*rdptrs
));
216 nilfs_btree_node_set_nchildren(left
, lnchildren
);
217 nilfs_btree_node_set_nchildren(right
, rnchildren
);
220 /* Assume that the buffer heads corresponding to left and right are locked. */
221 static void nilfs_btree_node_move_right(struct nilfs_btree_node
*left
,
222 struct nilfs_btree_node
*right
,
223 int n
, int lncmax
, int rncmax
)
225 __le64
*ldkeys
, *rdkeys
;
226 __le64
*ldptrs
, *rdptrs
;
227 int lnchildren
, rnchildren
;
229 ldkeys
= nilfs_btree_node_dkeys(left
);
230 ldptrs
= nilfs_btree_node_dptrs(left
, lncmax
);
231 lnchildren
= nilfs_btree_node_get_nchildren(left
);
233 rdkeys
= nilfs_btree_node_dkeys(right
);
234 rdptrs
= nilfs_btree_node_dptrs(right
, rncmax
);
235 rnchildren
= nilfs_btree_node_get_nchildren(right
);
237 memmove(rdkeys
+ n
, rdkeys
, rnchildren
* sizeof(*rdkeys
));
238 memmove(rdptrs
+ n
, rdptrs
, rnchildren
* sizeof(*rdptrs
));
239 memcpy(rdkeys
, ldkeys
+ lnchildren
- n
, n
* sizeof(*rdkeys
));
240 memcpy(rdptrs
, ldptrs
+ lnchildren
- n
, n
* sizeof(*rdptrs
));
244 nilfs_btree_node_set_nchildren(left
, lnchildren
);
245 nilfs_btree_node_set_nchildren(right
, rnchildren
);
248 /* Assume that the buffer head corresponding to node is locked. */
249 static void nilfs_btree_node_insert(struct nilfs_btree_node
*node
, int index
,
250 __u64 key
, __u64 ptr
, int ncmax
)
256 dkeys
= nilfs_btree_node_dkeys(node
);
257 dptrs
= nilfs_btree_node_dptrs(node
, ncmax
);
258 nchildren
= nilfs_btree_node_get_nchildren(node
);
259 if (index
< nchildren
) {
260 memmove(dkeys
+ index
+ 1, dkeys
+ index
,
261 (nchildren
- index
) * sizeof(*dkeys
));
262 memmove(dptrs
+ index
+ 1, dptrs
+ index
,
263 (nchildren
- index
) * sizeof(*dptrs
));
265 dkeys
[index
] = cpu_to_le64(key
);
266 dptrs
[index
] = cpu_to_le64(ptr
);
268 nilfs_btree_node_set_nchildren(node
, nchildren
);
271 /* Assume that the buffer head corresponding to node is locked. */
272 static void nilfs_btree_node_delete(struct nilfs_btree_node
*node
, int index
,
273 __u64
*keyp
, __u64
*ptrp
, int ncmax
)
281 dkeys
= nilfs_btree_node_dkeys(node
);
282 dptrs
= nilfs_btree_node_dptrs(node
, ncmax
);
283 key
= le64_to_cpu(dkeys
[index
]);
284 ptr
= le64_to_cpu(dptrs
[index
]);
285 nchildren
= nilfs_btree_node_get_nchildren(node
);
291 if (index
< nchildren
- 1) {
292 memmove(dkeys
+ index
, dkeys
+ index
+ 1,
293 (nchildren
- index
- 1) * sizeof(*dkeys
));
294 memmove(dptrs
+ index
, dptrs
+ index
+ 1,
295 (nchildren
- index
- 1) * sizeof(*dptrs
));
298 nilfs_btree_node_set_nchildren(node
, nchildren
);
301 static int nilfs_btree_node_lookup(const struct nilfs_btree_node
*node
,
302 __u64 key
, int *indexp
)
305 int index
, low
, high
, s
;
309 high
= nilfs_btree_node_get_nchildren(node
) - 1;
312 while (low
<= high
) {
313 index
= (low
+ high
) / 2;
314 nkey
= nilfs_btree_node_get_key(node
, index
);
318 } else if (nkey
< key
) {
328 if (nilfs_btree_node_get_level(node
) > NILFS_BTREE_LEVEL_NODE_MIN
) {
329 if (s
> 0 && index
> 0)
341 * nilfs_btree_node_broken - verify consistency of btree node
342 * @node: btree node block to be examined
343 * @size: node size (in bytes)
344 * @blocknr: block number
346 * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
348 static int nilfs_btree_node_broken(const struct nilfs_btree_node
*node
,
349 size_t size
, sector_t blocknr
)
351 int level
, flags
, nchildren
;
354 level
= nilfs_btree_node_get_level(node
);
355 flags
= nilfs_btree_node_get_flags(node
);
356 nchildren
= nilfs_btree_node_get_nchildren(node
);
358 if (unlikely(level
< NILFS_BTREE_LEVEL_NODE_MIN
||
359 level
>= NILFS_BTREE_LEVEL_MAX
||
360 (flags
& NILFS_BTREE_NODE_ROOT
) ||
362 nchildren
> NILFS_BTREE_NODE_NCHILDREN_MAX(size
))) {
363 printk(KERN_CRIT
"NILFS: bad btree node (blocknr=%llu): "
364 "level = %d, flags = 0x%x, nchildren = %d\n",
365 (unsigned long long)blocknr
, level
, flags
, nchildren
);
371 int nilfs_btree_broken_node_block(struct buffer_head
*bh
)
375 if (buffer_nilfs_checked(bh
))
378 ret
= nilfs_btree_node_broken((struct nilfs_btree_node
*)bh
->b_data
,
379 bh
->b_size
, bh
->b_blocknr
);
381 set_buffer_nilfs_checked(bh
);
385 static struct nilfs_btree_node
*
386 nilfs_btree_get_root(const struct nilfs_bmap
*btree
)
388 return (struct nilfs_btree_node
*)btree
->b_u
.u_data
;
391 static struct nilfs_btree_node
*
392 nilfs_btree_get_nonroot_node(const struct nilfs_btree_path
*path
, int level
)
394 return (struct nilfs_btree_node
*)path
[level
].bp_bh
->b_data
;
397 static struct nilfs_btree_node
*
398 nilfs_btree_get_sib_node(const struct nilfs_btree_path
*path
, int level
)
400 return (struct nilfs_btree_node
*)path
[level
].bp_sib_bh
->b_data
;
403 static int nilfs_btree_height(const struct nilfs_bmap
*btree
)
405 return nilfs_btree_node_get_level(nilfs_btree_get_root(btree
)) + 1;
408 static struct nilfs_btree_node
*
409 nilfs_btree_get_node(const struct nilfs_bmap
*btree
,
410 const struct nilfs_btree_path
*path
,
411 int level
, int *ncmaxp
)
413 struct nilfs_btree_node
*node
;
415 if (level
== nilfs_btree_height(btree
) - 1) {
416 node
= nilfs_btree_get_root(btree
);
417 *ncmaxp
= NILFS_BTREE_ROOT_NCHILDREN_MAX
;
419 node
= nilfs_btree_get_nonroot_node(path
, level
);
420 *ncmaxp
= nilfs_btree_nchildren_per_block(btree
);
426 nilfs_btree_bad_node(struct nilfs_btree_node
*node
, int level
)
428 if (unlikely(nilfs_btree_node_get_level(node
) != level
)) {
430 printk(KERN_CRIT
"NILFS: btree level mismatch: %d != %d\n",
431 nilfs_btree_node_get_level(node
), level
);
437 struct nilfs_btree_readahead_info
{
438 struct nilfs_btree_node
*node
; /* parent node */
439 int max_ra_blocks
; /* max nof blocks to read ahead */
440 int index
; /* current index on the parent node */
441 int ncmax
; /* nof children in the parent node */
444 static int __nilfs_btree_get_block(const struct nilfs_bmap
*btree
, __u64 ptr
,
445 struct buffer_head
**bhp
,
446 const struct nilfs_btree_readahead_info
*ra
)
448 struct address_space
*btnc
= &NILFS_BMAP_I(btree
)->i_btnode_cache
;
449 struct buffer_head
*bh
, *ra_bh
;
450 sector_t submit_ptr
= 0;
453 ret
= nilfs_btnode_submit_block(btnc
, ptr
, 0, READ
, &bh
, &submit_ptr
);
464 /* read ahead sibling nodes */
465 for (n
= ra
->max_ra_blocks
, i
= ra
->index
+ 1;
466 n
> 0 && i
< ra
->ncmax
; n
--, i
++) {
467 ptr2
= nilfs_btree_node_get_ptr(ra
->node
, i
, ra
->ncmax
);
469 ret
= nilfs_btnode_submit_block(btnc
, ptr2
, 0, READA
,
470 &ra_bh
, &submit_ptr
);
471 if (likely(!ret
|| ret
== -EEXIST
))
473 else if (ret
!= -EBUSY
)
475 if (!buffer_locked(bh
))
483 if (!buffer_uptodate(bh
)) {
489 if (nilfs_btree_broken_node_block(bh
)) {
490 clear_buffer_uptodate(bh
);
499 static int nilfs_btree_get_block(const struct nilfs_bmap
*btree
, __u64 ptr
,
500 struct buffer_head
**bhp
)
502 return __nilfs_btree_get_block(btree
, ptr
, bhp
, NULL
);
505 static int nilfs_btree_do_lookup(const struct nilfs_bmap
*btree
,
506 struct nilfs_btree_path
*path
,
507 __u64 key
, __u64
*ptrp
, int minlevel
,
510 struct nilfs_btree_node
*node
;
511 struct nilfs_btree_readahead_info p
, *ra
;
513 int level
, index
, found
, ncmax
, ret
;
515 node
= nilfs_btree_get_root(btree
);
516 level
= nilfs_btree_node_get_level(node
);
517 if (level
< minlevel
|| nilfs_btree_node_get_nchildren(node
) <= 0)
520 found
= nilfs_btree_node_lookup(node
, key
, &index
);
521 ptr
= nilfs_btree_node_get_ptr(node
, index
,
522 NILFS_BTREE_ROOT_NCHILDREN_MAX
);
523 path
[level
].bp_bh
= NULL
;
524 path
[level
].bp_index
= index
;
526 ncmax
= nilfs_btree_nchildren_per_block(btree
);
528 while (--level
>= minlevel
) {
530 if (level
== NILFS_BTREE_LEVEL_NODE_MIN
&& readahead
) {
531 p
.node
= nilfs_btree_get_node(btree
, path
, level
+ 1,
537 ret
= __nilfs_btree_get_block(btree
, ptr
, &path
[level
].bp_bh
,
542 node
= nilfs_btree_get_nonroot_node(path
, level
);
543 if (nilfs_btree_bad_node(node
, level
))
546 found
= nilfs_btree_node_lookup(node
, key
, &index
);
550 ptr
= nilfs_btree_node_get_ptr(node
, index
, ncmax
);
552 WARN_ON(found
|| level
!= NILFS_BTREE_LEVEL_NODE_MIN
);
554 ptr
= NILFS_BMAP_INVALID_PTR
;
556 path
[level
].bp_index
= index
;
567 static int nilfs_btree_do_lookup_last(const struct nilfs_bmap
*btree
,
568 struct nilfs_btree_path
*path
,
569 __u64
*keyp
, __u64
*ptrp
)
571 struct nilfs_btree_node
*node
;
573 int index
, level
, ncmax
, ret
;
575 node
= nilfs_btree_get_root(btree
);
576 index
= nilfs_btree_node_get_nchildren(node
) - 1;
579 level
= nilfs_btree_node_get_level(node
);
580 ptr
= nilfs_btree_node_get_ptr(node
, index
,
581 NILFS_BTREE_ROOT_NCHILDREN_MAX
);
582 path
[level
].bp_bh
= NULL
;
583 path
[level
].bp_index
= index
;
584 ncmax
= nilfs_btree_nchildren_per_block(btree
);
586 for (level
--; level
> 0; level
--) {
587 ret
= nilfs_btree_get_block(btree
, ptr
, &path
[level
].bp_bh
);
590 node
= nilfs_btree_get_nonroot_node(path
, level
);
591 if (nilfs_btree_bad_node(node
, level
))
593 index
= nilfs_btree_node_get_nchildren(node
) - 1;
594 ptr
= nilfs_btree_node_get_ptr(node
, index
, ncmax
);
595 path
[level
].bp_index
= index
;
599 *keyp
= nilfs_btree_node_get_key(node
, index
);
606 static int nilfs_btree_lookup(const struct nilfs_bmap
*btree
,
607 __u64 key
, int level
, __u64
*ptrp
)
609 struct nilfs_btree_path
*path
;
612 path
= nilfs_btree_alloc_path();
616 ret
= nilfs_btree_do_lookup(btree
, path
, key
, ptrp
, level
, 0);
618 nilfs_btree_free_path(path
);
623 static int nilfs_btree_lookup_contig(const struct nilfs_bmap
*btree
,
624 __u64 key
, __u64
*ptrp
, unsigned maxblocks
)
626 struct nilfs_btree_path
*path
;
627 struct nilfs_btree_node
*node
;
628 struct inode
*dat
= NULL
;
631 int level
= NILFS_BTREE_LEVEL_NODE_MIN
;
632 int ret
, cnt
, index
, maxlevel
, ncmax
;
633 struct nilfs_btree_readahead_info p
;
635 path
= nilfs_btree_alloc_path();
639 ret
= nilfs_btree_do_lookup(btree
, path
, key
, &ptr
, level
, 1);
643 if (NILFS_BMAP_USE_VBN(btree
)) {
644 dat
= nilfs_bmap_get_dat(btree
);
645 ret
= nilfs_dat_translate(dat
, ptr
, &blocknr
);
651 if (cnt
== maxblocks
)
654 maxlevel
= nilfs_btree_height(btree
) - 1;
655 node
= nilfs_btree_get_node(btree
, path
, level
, &ncmax
);
656 index
= path
[level
].bp_index
+ 1;
658 while (index
< nilfs_btree_node_get_nchildren(node
)) {
659 if (nilfs_btree_node_get_key(node
, index
) !=
662 ptr2
= nilfs_btree_node_get_ptr(node
, index
, ncmax
);
664 ret
= nilfs_dat_translate(dat
, ptr2
, &blocknr
);
669 if (ptr2
!= ptr
+ cnt
|| ++cnt
== maxblocks
)
674 if (level
== maxlevel
)
677 /* look-up right sibling node */
678 p
.node
= nilfs_btree_get_node(btree
, path
, level
+ 1, &p
.ncmax
);
679 p
.index
= path
[level
+ 1].bp_index
+ 1;
681 if (p
.index
>= nilfs_btree_node_get_nchildren(p
.node
) ||
682 nilfs_btree_node_get_key(p
.node
, p
.index
) != key
+ cnt
)
684 ptr2
= nilfs_btree_node_get_ptr(p
.node
, p
.index
, p
.ncmax
);
685 path
[level
+ 1].bp_index
= p
.index
;
687 brelse(path
[level
].bp_bh
);
688 path
[level
].bp_bh
= NULL
;
690 ret
= __nilfs_btree_get_block(btree
, ptr2
, &path
[level
].bp_bh
,
694 node
= nilfs_btree_get_nonroot_node(path
, level
);
695 ncmax
= nilfs_btree_nchildren_per_block(btree
);
697 path
[level
].bp_index
= index
;
703 nilfs_btree_free_path(path
);
707 static void nilfs_btree_promote_key(struct nilfs_bmap
*btree
,
708 struct nilfs_btree_path
*path
,
709 int level
, __u64 key
)
711 if (level
< nilfs_btree_height(btree
) - 1) {
713 nilfs_btree_node_set_key(
714 nilfs_btree_get_nonroot_node(path
, level
),
715 path
[level
].bp_index
, key
);
716 if (!buffer_dirty(path
[level
].bp_bh
))
717 mark_buffer_dirty(path
[level
].bp_bh
);
718 } while ((path
[level
].bp_index
== 0) &&
719 (++level
< nilfs_btree_height(btree
) - 1));
723 if (level
== nilfs_btree_height(btree
) - 1) {
724 nilfs_btree_node_set_key(nilfs_btree_get_root(btree
),
725 path
[level
].bp_index
, key
);
729 static void nilfs_btree_do_insert(struct nilfs_bmap
*btree
,
730 struct nilfs_btree_path
*path
,
731 int level
, __u64
*keyp
, __u64
*ptrp
)
733 struct nilfs_btree_node
*node
;
736 if (level
< nilfs_btree_height(btree
) - 1) {
737 node
= nilfs_btree_get_nonroot_node(path
, level
);
738 ncblk
= nilfs_btree_nchildren_per_block(btree
);
739 nilfs_btree_node_insert(node
, path
[level
].bp_index
,
740 *keyp
, *ptrp
, ncblk
);
741 if (!buffer_dirty(path
[level
].bp_bh
))
742 mark_buffer_dirty(path
[level
].bp_bh
);
744 if (path
[level
].bp_index
== 0)
745 nilfs_btree_promote_key(btree
, path
, level
+ 1,
746 nilfs_btree_node_get_key(node
,
749 node
= nilfs_btree_get_root(btree
);
750 nilfs_btree_node_insert(node
, path
[level
].bp_index
,
752 NILFS_BTREE_ROOT_NCHILDREN_MAX
);
756 static void nilfs_btree_carry_left(struct nilfs_bmap
*btree
,
757 struct nilfs_btree_path
*path
,
758 int level
, __u64
*keyp
, __u64
*ptrp
)
760 struct nilfs_btree_node
*node
, *left
;
761 int nchildren
, lnchildren
, n
, move
, ncblk
;
763 node
= nilfs_btree_get_nonroot_node(path
, level
);
764 left
= nilfs_btree_get_sib_node(path
, level
);
765 nchildren
= nilfs_btree_node_get_nchildren(node
);
766 lnchildren
= nilfs_btree_node_get_nchildren(left
);
767 ncblk
= nilfs_btree_nchildren_per_block(btree
);
770 n
= (nchildren
+ lnchildren
+ 1) / 2 - lnchildren
;
771 if (n
> path
[level
].bp_index
) {
772 /* move insert point */
777 nilfs_btree_node_move_left(left
, node
, n
, ncblk
, ncblk
);
779 if (!buffer_dirty(path
[level
].bp_bh
))
780 mark_buffer_dirty(path
[level
].bp_bh
);
781 if (!buffer_dirty(path
[level
].bp_sib_bh
))
782 mark_buffer_dirty(path
[level
].bp_sib_bh
);
784 nilfs_btree_promote_key(btree
, path
, level
+ 1,
785 nilfs_btree_node_get_key(node
, 0));
788 brelse(path
[level
].bp_bh
);
789 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
790 path
[level
].bp_sib_bh
= NULL
;
791 path
[level
].bp_index
+= lnchildren
;
792 path
[level
+ 1].bp_index
--;
794 brelse(path
[level
].bp_sib_bh
);
795 path
[level
].bp_sib_bh
= NULL
;
796 path
[level
].bp_index
-= n
;
799 nilfs_btree_do_insert(btree
, path
, level
, keyp
, ptrp
);
802 static void nilfs_btree_carry_right(struct nilfs_bmap
*btree
,
803 struct nilfs_btree_path
*path
,
804 int level
, __u64
*keyp
, __u64
*ptrp
)
806 struct nilfs_btree_node
*node
, *right
;
807 int nchildren
, rnchildren
, n
, move
, ncblk
;
809 node
= nilfs_btree_get_nonroot_node(path
, level
);
810 right
= nilfs_btree_get_sib_node(path
, level
);
811 nchildren
= nilfs_btree_node_get_nchildren(node
);
812 rnchildren
= nilfs_btree_node_get_nchildren(right
);
813 ncblk
= nilfs_btree_nchildren_per_block(btree
);
816 n
= (nchildren
+ rnchildren
+ 1) / 2 - rnchildren
;
817 if (n
> nchildren
- path
[level
].bp_index
) {
818 /* move insert point */
823 nilfs_btree_node_move_right(node
, right
, n
, ncblk
, ncblk
);
825 if (!buffer_dirty(path
[level
].bp_bh
))
826 mark_buffer_dirty(path
[level
].bp_bh
);
827 if (!buffer_dirty(path
[level
].bp_sib_bh
))
828 mark_buffer_dirty(path
[level
].bp_sib_bh
);
830 path
[level
+ 1].bp_index
++;
831 nilfs_btree_promote_key(btree
, path
, level
+ 1,
832 nilfs_btree_node_get_key(right
, 0));
833 path
[level
+ 1].bp_index
--;
836 brelse(path
[level
].bp_bh
);
837 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
838 path
[level
].bp_sib_bh
= NULL
;
839 path
[level
].bp_index
-= nilfs_btree_node_get_nchildren(node
);
840 path
[level
+ 1].bp_index
++;
842 brelse(path
[level
].bp_sib_bh
);
843 path
[level
].bp_sib_bh
= NULL
;
846 nilfs_btree_do_insert(btree
, path
, level
, keyp
, ptrp
);
849 static void nilfs_btree_split(struct nilfs_bmap
*btree
,
850 struct nilfs_btree_path
*path
,
851 int level
, __u64
*keyp
, __u64
*ptrp
)
853 struct nilfs_btree_node
*node
, *right
;
856 int nchildren
, n
, move
, ncblk
;
858 node
= nilfs_btree_get_nonroot_node(path
, level
);
859 right
= nilfs_btree_get_sib_node(path
, level
);
860 nchildren
= nilfs_btree_node_get_nchildren(node
);
861 ncblk
= nilfs_btree_nchildren_per_block(btree
);
864 n
= (nchildren
+ 1) / 2;
865 if (n
> nchildren
- path
[level
].bp_index
) {
870 nilfs_btree_node_move_right(node
, right
, n
, ncblk
, ncblk
);
872 if (!buffer_dirty(path
[level
].bp_bh
))
873 mark_buffer_dirty(path
[level
].bp_bh
);
874 if (!buffer_dirty(path
[level
].bp_sib_bh
))
875 mark_buffer_dirty(path
[level
].bp_sib_bh
);
877 newkey
= nilfs_btree_node_get_key(right
, 0);
878 newptr
= path
[level
].bp_newreq
.bpr_ptr
;
881 path
[level
].bp_index
-= nilfs_btree_node_get_nchildren(node
);
882 nilfs_btree_node_insert(right
, path
[level
].bp_index
,
883 *keyp
, *ptrp
, ncblk
);
885 *keyp
= nilfs_btree_node_get_key(right
, 0);
886 *ptrp
= path
[level
].bp_newreq
.bpr_ptr
;
888 brelse(path
[level
].bp_bh
);
889 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
890 path
[level
].bp_sib_bh
= NULL
;
892 nilfs_btree_do_insert(btree
, path
, level
, keyp
, ptrp
);
894 *keyp
= nilfs_btree_node_get_key(right
, 0);
895 *ptrp
= path
[level
].bp_newreq
.bpr_ptr
;
897 brelse(path
[level
].bp_sib_bh
);
898 path
[level
].bp_sib_bh
= NULL
;
901 path
[level
+ 1].bp_index
++;
904 static void nilfs_btree_grow(struct nilfs_bmap
*btree
,
905 struct nilfs_btree_path
*path
,
906 int level
, __u64
*keyp
, __u64
*ptrp
)
908 struct nilfs_btree_node
*root
, *child
;
911 root
= nilfs_btree_get_root(btree
);
912 child
= nilfs_btree_get_sib_node(path
, level
);
913 ncblk
= nilfs_btree_nchildren_per_block(btree
);
915 n
= nilfs_btree_node_get_nchildren(root
);
917 nilfs_btree_node_move_right(root
, child
, n
,
918 NILFS_BTREE_ROOT_NCHILDREN_MAX
, ncblk
);
919 nilfs_btree_node_set_level(root
, level
+ 1);
921 if (!buffer_dirty(path
[level
].bp_sib_bh
))
922 mark_buffer_dirty(path
[level
].bp_sib_bh
);
924 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
925 path
[level
].bp_sib_bh
= NULL
;
927 nilfs_btree_do_insert(btree
, path
, level
, keyp
, ptrp
);
929 *keyp
= nilfs_btree_node_get_key(child
, 0);
930 *ptrp
= path
[level
].bp_newreq
.bpr_ptr
;
933 static __u64
nilfs_btree_find_near(const struct nilfs_bmap
*btree
,
934 const struct nilfs_btree_path
*path
)
936 struct nilfs_btree_node
*node
;
940 return NILFS_BMAP_INVALID_PTR
;
943 level
= NILFS_BTREE_LEVEL_NODE_MIN
;
944 if (path
[level
].bp_index
> 0) {
945 node
= nilfs_btree_get_node(btree
, path
, level
, &ncmax
);
946 return nilfs_btree_node_get_ptr(node
,
947 path
[level
].bp_index
- 1,
952 level
= NILFS_BTREE_LEVEL_NODE_MIN
+ 1;
953 if (level
<= nilfs_btree_height(btree
) - 1) {
954 node
= nilfs_btree_get_node(btree
, path
, level
, &ncmax
);
955 return nilfs_btree_node_get_ptr(node
, path
[level
].bp_index
,
959 return NILFS_BMAP_INVALID_PTR
;
962 static __u64
nilfs_btree_find_target_v(const struct nilfs_bmap
*btree
,
963 const struct nilfs_btree_path
*path
,
968 ptr
= nilfs_bmap_find_target_seq(btree
, key
);
969 if (ptr
!= NILFS_BMAP_INVALID_PTR
)
970 /* sequential access */
973 ptr
= nilfs_btree_find_near(btree
, path
);
974 if (ptr
!= NILFS_BMAP_INVALID_PTR
)
979 return nilfs_bmap_find_target_in_group(btree
);
982 static int nilfs_btree_prepare_insert(struct nilfs_bmap
*btree
,
983 struct nilfs_btree_path
*path
,
984 int *levelp
, __u64 key
, __u64 ptr
,
985 struct nilfs_bmap_stats
*stats
)
987 struct buffer_head
*bh
;
988 struct nilfs_btree_node
*node
, *parent
, *sib
;
990 int pindex
, level
, ncmax
, ncblk
, ret
;
991 struct inode
*dat
= NULL
;
993 stats
->bs_nblocks
= 0;
994 level
= NILFS_BTREE_LEVEL_DATA
;
996 /* allocate a new ptr for data block */
997 if (NILFS_BMAP_USE_VBN(btree
)) {
998 path
[level
].bp_newreq
.bpr_ptr
=
999 nilfs_btree_find_target_v(btree
, path
, key
);
1000 dat
= nilfs_bmap_get_dat(btree
);
1003 ret
= nilfs_bmap_prepare_alloc_ptr(btree
, &path
[level
].bp_newreq
, dat
);
1007 ncblk
= nilfs_btree_nchildren_per_block(btree
);
1009 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
;
1010 level
< nilfs_btree_height(btree
) - 1;
1012 node
= nilfs_btree_get_nonroot_node(path
, level
);
1013 if (nilfs_btree_node_get_nchildren(node
) < ncblk
) {
1014 path
[level
].bp_op
= nilfs_btree_do_insert
;
1015 stats
->bs_nblocks
++;
1019 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1, &ncmax
);
1020 pindex
= path
[level
+ 1].bp_index
;
1024 sibptr
= nilfs_btree_node_get_ptr(parent
, pindex
- 1,
1026 ret
= nilfs_btree_get_block(btree
, sibptr
, &bh
);
1028 goto err_out_child_node
;
1029 sib
= (struct nilfs_btree_node
*)bh
->b_data
;
1030 if (nilfs_btree_node_get_nchildren(sib
) < ncblk
) {
1031 path
[level
].bp_sib_bh
= bh
;
1032 path
[level
].bp_op
= nilfs_btree_carry_left
;
1033 stats
->bs_nblocks
++;
1041 if (pindex
< nilfs_btree_node_get_nchildren(parent
) - 1) {
1042 sibptr
= nilfs_btree_node_get_ptr(parent
, pindex
+ 1,
1044 ret
= nilfs_btree_get_block(btree
, sibptr
, &bh
);
1046 goto err_out_child_node
;
1047 sib
= (struct nilfs_btree_node
*)bh
->b_data
;
1048 if (nilfs_btree_node_get_nchildren(sib
) < ncblk
) {
1049 path
[level
].bp_sib_bh
= bh
;
1050 path
[level
].bp_op
= nilfs_btree_carry_right
;
1051 stats
->bs_nblocks
++;
1059 path
[level
].bp_newreq
.bpr_ptr
=
1060 path
[level
- 1].bp_newreq
.bpr_ptr
+ 1;
1061 ret
= nilfs_bmap_prepare_alloc_ptr(btree
,
1062 &path
[level
].bp_newreq
, dat
);
1064 goto err_out_child_node
;
1065 ret
= nilfs_btree_get_new_block(btree
,
1066 path
[level
].bp_newreq
.bpr_ptr
,
1069 goto err_out_curr_node
;
1071 stats
->bs_nblocks
++;
1073 sib
= (struct nilfs_btree_node
*)bh
->b_data
;
1074 nilfs_btree_node_init(sib
, 0, level
, 0, ncblk
, NULL
, NULL
);
1075 path
[level
].bp_sib_bh
= bh
;
1076 path
[level
].bp_op
= nilfs_btree_split
;
1080 node
= nilfs_btree_get_root(btree
);
1081 if (nilfs_btree_node_get_nchildren(node
) <
1082 NILFS_BTREE_ROOT_NCHILDREN_MAX
) {
1083 path
[level
].bp_op
= nilfs_btree_do_insert
;
1084 stats
->bs_nblocks
++;
1089 path
[level
].bp_newreq
.bpr_ptr
= path
[level
- 1].bp_newreq
.bpr_ptr
+ 1;
1090 ret
= nilfs_bmap_prepare_alloc_ptr(btree
, &path
[level
].bp_newreq
, dat
);
1092 goto err_out_child_node
;
1093 ret
= nilfs_btree_get_new_block(btree
, path
[level
].bp_newreq
.bpr_ptr
,
1096 goto err_out_curr_node
;
1098 nilfs_btree_node_init((struct nilfs_btree_node
*)bh
->b_data
,
1099 0, level
, 0, ncblk
, NULL
, NULL
);
1100 path
[level
].bp_sib_bh
= bh
;
1101 path
[level
].bp_op
= nilfs_btree_grow
;
1104 path
[level
].bp_op
= nilfs_btree_do_insert
;
1106 /* a newly-created node block and a data block are added */
1107 stats
->bs_nblocks
+= 2;
1116 nilfs_bmap_abort_alloc_ptr(btree
, &path
[level
].bp_newreq
, dat
);
1118 for (level
--; level
> NILFS_BTREE_LEVEL_DATA
; level
--) {
1119 nilfs_btnode_delete(path
[level
].bp_sib_bh
);
1120 nilfs_bmap_abort_alloc_ptr(btree
, &path
[level
].bp_newreq
, dat
);
1124 nilfs_bmap_abort_alloc_ptr(btree
, &path
[level
].bp_newreq
, dat
);
1127 stats
->bs_nblocks
= 0;
1131 static void nilfs_btree_commit_insert(struct nilfs_bmap
*btree
,
1132 struct nilfs_btree_path
*path
,
1133 int maxlevel
, __u64 key
, __u64 ptr
)
1135 struct inode
*dat
= NULL
;
1138 set_buffer_nilfs_volatile((struct buffer_head
*)((unsigned long)ptr
));
1139 ptr
= path
[NILFS_BTREE_LEVEL_DATA
].bp_newreq
.bpr_ptr
;
1140 if (NILFS_BMAP_USE_VBN(btree
)) {
1141 nilfs_bmap_set_target_v(btree
, key
, ptr
);
1142 dat
= nilfs_bmap_get_dat(btree
);
1145 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
; level
<= maxlevel
; level
++) {
1146 nilfs_bmap_commit_alloc_ptr(btree
,
1147 &path
[level
- 1].bp_newreq
, dat
);
1148 path
[level
].bp_op(btree
, path
, level
, &key
, &ptr
);
1151 if (!nilfs_bmap_dirty(btree
))
1152 nilfs_bmap_set_dirty(btree
);
1155 static int nilfs_btree_insert(struct nilfs_bmap
*btree
, __u64 key
, __u64 ptr
)
1157 struct nilfs_btree_path
*path
;
1158 struct nilfs_bmap_stats stats
;
1161 path
= nilfs_btree_alloc_path();
1165 ret
= nilfs_btree_do_lookup(btree
, path
, key
, NULL
,
1166 NILFS_BTREE_LEVEL_NODE_MIN
, 0);
1167 if (ret
!= -ENOENT
) {
1173 ret
= nilfs_btree_prepare_insert(btree
, path
, &level
, key
, ptr
, &stats
);
1176 nilfs_btree_commit_insert(btree
, path
, level
, key
, ptr
);
1177 nilfs_inode_add_blocks(btree
->b_inode
, stats
.bs_nblocks
);
1180 nilfs_btree_free_path(path
);
1184 static void nilfs_btree_do_delete(struct nilfs_bmap
*btree
,
1185 struct nilfs_btree_path
*path
,
1186 int level
, __u64
*keyp
, __u64
*ptrp
)
1188 struct nilfs_btree_node
*node
;
1191 if (level
< nilfs_btree_height(btree
) - 1) {
1192 node
= nilfs_btree_get_nonroot_node(path
, level
);
1193 ncblk
= nilfs_btree_nchildren_per_block(btree
);
1194 nilfs_btree_node_delete(node
, path
[level
].bp_index
,
1196 if (!buffer_dirty(path
[level
].bp_bh
))
1197 mark_buffer_dirty(path
[level
].bp_bh
);
1198 if (path
[level
].bp_index
== 0)
1199 nilfs_btree_promote_key(btree
, path
, level
+ 1,
1200 nilfs_btree_node_get_key(node
, 0));
1202 node
= nilfs_btree_get_root(btree
);
1203 nilfs_btree_node_delete(node
, path
[level
].bp_index
,
1205 NILFS_BTREE_ROOT_NCHILDREN_MAX
);
1209 static void nilfs_btree_borrow_left(struct nilfs_bmap
*btree
,
1210 struct nilfs_btree_path
*path
,
1211 int level
, __u64
*keyp
, __u64
*ptrp
)
1213 struct nilfs_btree_node
*node
, *left
;
1214 int nchildren
, lnchildren
, n
, ncblk
;
1216 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1218 node
= nilfs_btree_get_nonroot_node(path
, level
);
1219 left
= nilfs_btree_get_sib_node(path
, level
);
1220 nchildren
= nilfs_btree_node_get_nchildren(node
);
1221 lnchildren
= nilfs_btree_node_get_nchildren(left
);
1222 ncblk
= nilfs_btree_nchildren_per_block(btree
);
1224 n
= (nchildren
+ lnchildren
) / 2 - nchildren
;
1226 nilfs_btree_node_move_right(left
, node
, n
, ncblk
, ncblk
);
1228 if (!buffer_dirty(path
[level
].bp_bh
))
1229 mark_buffer_dirty(path
[level
].bp_bh
);
1230 if (!buffer_dirty(path
[level
].bp_sib_bh
))
1231 mark_buffer_dirty(path
[level
].bp_sib_bh
);
1233 nilfs_btree_promote_key(btree
, path
, level
+ 1,
1234 nilfs_btree_node_get_key(node
, 0));
1236 brelse(path
[level
].bp_sib_bh
);
1237 path
[level
].bp_sib_bh
= NULL
;
1238 path
[level
].bp_index
+= n
;
1241 static void nilfs_btree_borrow_right(struct nilfs_bmap
*btree
,
1242 struct nilfs_btree_path
*path
,
1243 int level
, __u64
*keyp
, __u64
*ptrp
)
1245 struct nilfs_btree_node
*node
, *right
;
1246 int nchildren
, rnchildren
, n
, ncblk
;
1248 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1250 node
= nilfs_btree_get_nonroot_node(path
, level
);
1251 right
= nilfs_btree_get_sib_node(path
, level
);
1252 nchildren
= nilfs_btree_node_get_nchildren(node
);
1253 rnchildren
= nilfs_btree_node_get_nchildren(right
);
1254 ncblk
= nilfs_btree_nchildren_per_block(btree
);
1256 n
= (nchildren
+ rnchildren
) / 2 - nchildren
;
1258 nilfs_btree_node_move_left(node
, right
, n
, ncblk
, ncblk
);
1260 if (!buffer_dirty(path
[level
].bp_bh
))
1261 mark_buffer_dirty(path
[level
].bp_bh
);
1262 if (!buffer_dirty(path
[level
].bp_sib_bh
))
1263 mark_buffer_dirty(path
[level
].bp_sib_bh
);
1265 path
[level
+ 1].bp_index
++;
1266 nilfs_btree_promote_key(btree
, path
, level
+ 1,
1267 nilfs_btree_node_get_key(right
, 0));
1268 path
[level
+ 1].bp_index
--;
1270 brelse(path
[level
].bp_sib_bh
);
1271 path
[level
].bp_sib_bh
= NULL
;
1274 static void nilfs_btree_concat_left(struct nilfs_bmap
*btree
,
1275 struct nilfs_btree_path
*path
,
1276 int level
, __u64
*keyp
, __u64
*ptrp
)
1278 struct nilfs_btree_node
*node
, *left
;
1281 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1283 node
= nilfs_btree_get_nonroot_node(path
, level
);
1284 left
= nilfs_btree_get_sib_node(path
, level
);
1285 ncblk
= nilfs_btree_nchildren_per_block(btree
);
1287 n
= nilfs_btree_node_get_nchildren(node
);
1289 nilfs_btree_node_move_left(left
, node
, n
, ncblk
, ncblk
);
1291 if (!buffer_dirty(path
[level
].bp_sib_bh
))
1292 mark_buffer_dirty(path
[level
].bp_sib_bh
);
1294 nilfs_btnode_delete(path
[level
].bp_bh
);
1295 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
1296 path
[level
].bp_sib_bh
= NULL
;
1297 path
[level
].bp_index
+= nilfs_btree_node_get_nchildren(left
);
1300 static void nilfs_btree_concat_right(struct nilfs_bmap
*btree
,
1301 struct nilfs_btree_path
*path
,
1302 int level
, __u64
*keyp
, __u64
*ptrp
)
1304 struct nilfs_btree_node
*node
, *right
;
1307 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1309 node
= nilfs_btree_get_nonroot_node(path
, level
);
1310 right
= nilfs_btree_get_sib_node(path
, level
);
1311 ncblk
= nilfs_btree_nchildren_per_block(btree
);
1313 n
= nilfs_btree_node_get_nchildren(right
);
1315 nilfs_btree_node_move_left(node
, right
, n
, ncblk
, ncblk
);
1317 if (!buffer_dirty(path
[level
].bp_bh
))
1318 mark_buffer_dirty(path
[level
].bp_bh
);
1320 nilfs_btnode_delete(path
[level
].bp_sib_bh
);
1321 path
[level
].bp_sib_bh
= NULL
;
1322 path
[level
+ 1].bp_index
++;
1325 static void nilfs_btree_shrink(struct nilfs_bmap
*btree
,
1326 struct nilfs_btree_path
*path
,
1327 int level
, __u64
*keyp
, __u64
*ptrp
)
1329 struct nilfs_btree_node
*root
, *child
;
1332 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1334 root
= nilfs_btree_get_root(btree
);
1335 child
= nilfs_btree_get_nonroot_node(path
, level
);
1336 ncblk
= nilfs_btree_nchildren_per_block(btree
);
1338 nilfs_btree_node_delete(root
, 0, NULL
, NULL
,
1339 NILFS_BTREE_ROOT_NCHILDREN_MAX
);
1340 nilfs_btree_node_set_level(root
, level
);
1341 n
= nilfs_btree_node_get_nchildren(child
);
1342 nilfs_btree_node_move_left(root
, child
, n
,
1343 NILFS_BTREE_ROOT_NCHILDREN_MAX
, ncblk
);
1345 nilfs_btnode_delete(path
[level
].bp_bh
);
1346 path
[level
].bp_bh
= NULL
;
1349 static void nilfs_btree_nop(struct nilfs_bmap
*btree
,
1350 struct nilfs_btree_path
*path
,
1351 int level
, __u64
*keyp
, __u64
*ptrp
)
1355 static int nilfs_btree_prepare_delete(struct nilfs_bmap
*btree
,
1356 struct nilfs_btree_path
*path
,
1358 struct nilfs_bmap_stats
*stats
,
1361 struct buffer_head
*bh
;
1362 struct nilfs_btree_node
*node
, *parent
, *sib
;
1364 int pindex
, dindex
, level
, ncmin
, ncmax
, ncblk
, ret
;
1367 stats
->bs_nblocks
= 0;
1368 ncmin
= NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree
));
1369 ncblk
= nilfs_btree_nchildren_per_block(btree
);
1371 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
, dindex
= path
[level
].bp_index
;
1372 level
< nilfs_btree_height(btree
) - 1;
1374 node
= nilfs_btree_get_nonroot_node(path
, level
);
1375 path
[level
].bp_oldreq
.bpr_ptr
=
1376 nilfs_btree_node_get_ptr(node
, dindex
, ncblk
);
1377 ret
= nilfs_bmap_prepare_end_ptr(btree
,
1378 &path
[level
].bp_oldreq
, dat
);
1380 goto err_out_child_node
;
1382 if (nilfs_btree_node_get_nchildren(node
) > ncmin
) {
1383 path
[level
].bp_op
= nilfs_btree_do_delete
;
1384 stats
->bs_nblocks
++;
1388 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1, &ncmax
);
1389 pindex
= path
[level
+ 1].bp_index
;
1394 sibptr
= nilfs_btree_node_get_ptr(parent
, pindex
- 1,
1396 ret
= nilfs_btree_get_block(btree
, sibptr
, &bh
);
1398 goto err_out_curr_node
;
1399 sib
= (struct nilfs_btree_node
*)bh
->b_data
;
1400 if (nilfs_btree_node_get_nchildren(sib
) > ncmin
) {
1401 path
[level
].bp_sib_bh
= bh
;
1402 path
[level
].bp_op
= nilfs_btree_borrow_left
;
1403 stats
->bs_nblocks
++;
1406 path
[level
].bp_sib_bh
= bh
;
1407 path
[level
].bp_op
= nilfs_btree_concat_left
;
1408 stats
->bs_nblocks
++;
1412 nilfs_btree_node_get_nchildren(parent
) - 1) {
1414 sibptr
= nilfs_btree_node_get_ptr(parent
, pindex
+ 1,
1416 ret
= nilfs_btree_get_block(btree
, sibptr
, &bh
);
1418 goto err_out_curr_node
;
1419 sib
= (struct nilfs_btree_node
*)bh
->b_data
;
1420 if (nilfs_btree_node_get_nchildren(sib
) > ncmin
) {
1421 path
[level
].bp_sib_bh
= bh
;
1422 path
[level
].bp_op
= nilfs_btree_borrow_right
;
1423 stats
->bs_nblocks
++;
1426 path
[level
].bp_sib_bh
= bh
;
1427 path
[level
].bp_op
= nilfs_btree_concat_right
;
1428 stats
->bs_nblocks
++;
1430 * When merging right sibling node
1431 * into the current node, pointer to
1432 * the right sibling node must be
1433 * terminated instead. The adjustment
1434 * below is required for that.
1436 dindex
= pindex
+ 1;
1441 /* the only child of the root node */
1442 WARN_ON(level
!= nilfs_btree_height(btree
) - 2);
1443 if (nilfs_btree_node_get_nchildren(node
) - 1 <=
1444 NILFS_BTREE_ROOT_NCHILDREN_MAX
) {
1445 path
[level
].bp_op
= nilfs_btree_shrink
;
1446 stats
->bs_nblocks
+= 2;
1448 path
[level
].bp_op
= nilfs_btree_nop
;
1449 goto shrink_root_child
;
1451 path
[level
].bp_op
= nilfs_btree_do_delete
;
1452 stats
->bs_nblocks
++;
1458 /* child of the root node is deleted */
1459 path
[level
].bp_op
= nilfs_btree_do_delete
;
1460 stats
->bs_nblocks
++;
1463 node
= nilfs_btree_get_root(btree
);
1464 path
[level
].bp_oldreq
.bpr_ptr
=
1465 nilfs_btree_node_get_ptr(node
, dindex
,
1466 NILFS_BTREE_ROOT_NCHILDREN_MAX
);
1468 ret
= nilfs_bmap_prepare_end_ptr(btree
, &path
[level
].bp_oldreq
, dat
);
1470 goto err_out_child_node
;
1479 nilfs_bmap_abort_end_ptr(btree
, &path
[level
].bp_oldreq
, dat
);
1481 for (level
--; level
>= NILFS_BTREE_LEVEL_NODE_MIN
; level
--) {
1482 brelse(path
[level
].bp_sib_bh
);
1483 nilfs_bmap_abort_end_ptr(btree
, &path
[level
].bp_oldreq
, dat
);
1486 stats
->bs_nblocks
= 0;
1490 static void nilfs_btree_commit_delete(struct nilfs_bmap
*btree
,
1491 struct nilfs_btree_path
*path
,
1492 int maxlevel
, struct inode
*dat
)
1496 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
; level
<= maxlevel
; level
++) {
1497 nilfs_bmap_commit_end_ptr(btree
, &path
[level
].bp_oldreq
, dat
);
1498 path
[level
].bp_op(btree
, path
, level
, NULL
, NULL
);
1501 if (!nilfs_bmap_dirty(btree
))
1502 nilfs_bmap_set_dirty(btree
);
1505 static int nilfs_btree_delete(struct nilfs_bmap
*btree
, __u64 key
)
1508 struct nilfs_btree_path
*path
;
1509 struct nilfs_bmap_stats stats
;
1513 path
= nilfs_btree_alloc_path();
1517 ret
= nilfs_btree_do_lookup(btree
, path
, key
, NULL
,
1518 NILFS_BTREE_LEVEL_NODE_MIN
, 0);
1523 dat
= NILFS_BMAP_USE_VBN(btree
) ? nilfs_bmap_get_dat(btree
) : NULL
;
1525 ret
= nilfs_btree_prepare_delete(btree
, path
, &level
, &stats
, dat
);
1528 nilfs_btree_commit_delete(btree
, path
, level
, dat
);
1529 nilfs_inode_sub_blocks(btree
->b_inode
, stats
.bs_nblocks
);
1532 nilfs_btree_free_path(path
);
1536 static int nilfs_btree_last_key(const struct nilfs_bmap
*btree
, __u64
*keyp
)
1538 struct nilfs_btree_path
*path
;
1541 path
= nilfs_btree_alloc_path();
1545 ret
= nilfs_btree_do_lookup_last(btree
, path
, keyp
, NULL
);
1547 nilfs_btree_free_path(path
);
1552 static int nilfs_btree_check_delete(struct nilfs_bmap
*btree
, __u64 key
)
1554 struct buffer_head
*bh
;
1555 struct nilfs_btree_node
*root
, *node
;
1556 __u64 maxkey
, nextmaxkey
;
1560 root
= nilfs_btree_get_root(btree
);
1561 switch (nilfs_btree_height(btree
)) {
1567 nchildren
= nilfs_btree_node_get_nchildren(root
);
1570 ptr
= nilfs_btree_node_get_ptr(root
, nchildren
- 1,
1571 NILFS_BTREE_ROOT_NCHILDREN_MAX
);
1572 ret
= nilfs_btree_get_block(btree
, ptr
, &bh
);
1575 node
= (struct nilfs_btree_node
*)bh
->b_data
;
1581 nchildren
= nilfs_btree_node_get_nchildren(node
);
1582 maxkey
= nilfs_btree_node_get_key(node
, nchildren
- 1);
1583 nextmaxkey
= (nchildren
> 1) ?
1584 nilfs_btree_node_get_key(node
, nchildren
- 2) : 0;
1588 return (maxkey
== key
) && (nextmaxkey
< NILFS_BMAP_LARGE_LOW
);
1591 static int nilfs_btree_gather_data(struct nilfs_bmap
*btree
,
1592 __u64
*keys
, __u64
*ptrs
, int nitems
)
1594 struct buffer_head
*bh
;
1595 struct nilfs_btree_node
*node
, *root
;
1599 int nchildren
, ncmax
, i
, ret
;
1601 root
= nilfs_btree_get_root(btree
);
1602 switch (nilfs_btree_height(btree
)) {
1606 ncmax
= NILFS_BTREE_ROOT_NCHILDREN_MAX
;
1609 nchildren
= nilfs_btree_node_get_nchildren(root
);
1610 WARN_ON(nchildren
> 1);
1611 ptr
= nilfs_btree_node_get_ptr(root
, nchildren
- 1,
1612 NILFS_BTREE_ROOT_NCHILDREN_MAX
);
1613 ret
= nilfs_btree_get_block(btree
, ptr
, &bh
);
1616 node
= (struct nilfs_btree_node
*)bh
->b_data
;
1617 ncmax
= nilfs_btree_nchildren_per_block(btree
);
1624 nchildren
= nilfs_btree_node_get_nchildren(node
);
1625 if (nchildren
< nitems
)
1627 dkeys
= nilfs_btree_node_dkeys(node
);
1628 dptrs
= nilfs_btree_node_dptrs(node
, ncmax
);
1629 for (i
= 0; i
< nitems
; i
++) {
1630 keys
[i
] = le64_to_cpu(dkeys
[i
]);
1631 ptrs
[i
] = le64_to_cpu(dptrs
[i
]);
1641 nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap
*btree
, __u64 key
,
1642 union nilfs_bmap_ptr_req
*dreq
,
1643 union nilfs_bmap_ptr_req
*nreq
,
1644 struct buffer_head
**bhp
,
1645 struct nilfs_bmap_stats
*stats
)
1647 struct buffer_head
*bh
;
1648 struct inode
*dat
= NULL
;
1651 stats
->bs_nblocks
= 0;
1654 /* cannot find near ptr */
1655 if (NILFS_BMAP_USE_VBN(btree
)) {
1656 dreq
->bpr_ptr
= nilfs_btree_find_target_v(btree
, NULL
, key
);
1657 dat
= nilfs_bmap_get_dat(btree
);
1660 ret
= nilfs_bmap_prepare_alloc_ptr(btree
, dreq
, dat
);
1665 stats
->bs_nblocks
++;
1667 nreq
->bpr_ptr
= dreq
->bpr_ptr
+ 1;
1668 ret
= nilfs_bmap_prepare_alloc_ptr(btree
, nreq
, dat
);
1672 ret
= nilfs_btree_get_new_block(btree
, nreq
->bpr_ptr
, &bh
);
1677 stats
->bs_nblocks
++;
1685 nilfs_bmap_abort_alloc_ptr(btree
, nreq
, dat
);
1687 nilfs_bmap_abort_alloc_ptr(btree
, dreq
, dat
);
1688 stats
->bs_nblocks
= 0;
1694 nilfs_btree_commit_convert_and_insert(struct nilfs_bmap
*btree
,
1695 __u64 key
, __u64 ptr
,
1696 const __u64
*keys
, const __u64
*ptrs
,
1698 union nilfs_bmap_ptr_req
*dreq
,
1699 union nilfs_bmap_ptr_req
*nreq
,
1700 struct buffer_head
*bh
)
1702 struct nilfs_btree_node
*node
;
1707 /* free resources */
1708 if (btree
->b_ops
->bop_clear
!= NULL
)
1709 btree
->b_ops
->bop_clear(btree
);
1711 /* ptr must be a pointer to a buffer head. */
1712 set_buffer_nilfs_volatile((struct buffer_head
*)((unsigned long)ptr
));
1714 /* convert and insert */
1715 dat
= NILFS_BMAP_USE_VBN(btree
) ? nilfs_bmap_get_dat(btree
) : NULL
;
1716 nilfs_btree_init(btree
);
1718 nilfs_bmap_commit_alloc_ptr(btree
, dreq
, dat
);
1719 nilfs_bmap_commit_alloc_ptr(btree
, nreq
, dat
);
1721 /* create child node at level 1 */
1722 node
= (struct nilfs_btree_node
*)bh
->b_data
;
1723 ncblk
= nilfs_btree_nchildren_per_block(btree
);
1724 nilfs_btree_node_init(node
, 0, 1, n
, ncblk
, keys
, ptrs
);
1725 nilfs_btree_node_insert(node
, n
, key
, dreq
->bpr_ptr
, ncblk
);
1726 if (!buffer_dirty(bh
))
1727 mark_buffer_dirty(bh
);
1728 if (!nilfs_bmap_dirty(btree
))
1729 nilfs_bmap_set_dirty(btree
);
1733 /* create root node at level 2 */
1734 node
= nilfs_btree_get_root(btree
);
1735 tmpptr
= nreq
->bpr_ptr
;
1736 nilfs_btree_node_init(node
, NILFS_BTREE_NODE_ROOT
, 2, 1,
1737 NILFS_BTREE_ROOT_NCHILDREN_MAX
,
1740 nilfs_bmap_commit_alloc_ptr(btree
, dreq
, dat
);
1742 /* create root node at level 1 */
1743 node
= nilfs_btree_get_root(btree
);
1744 nilfs_btree_node_init(node
, NILFS_BTREE_NODE_ROOT
, 1, n
,
1745 NILFS_BTREE_ROOT_NCHILDREN_MAX
,
1747 nilfs_btree_node_insert(node
, n
, key
, dreq
->bpr_ptr
,
1748 NILFS_BTREE_ROOT_NCHILDREN_MAX
);
1749 if (!nilfs_bmap_dirty(btree
))
1750 nilfs_bmap_set_dirty(btree
);
1753 if (NILFS_BMAP_USE_VBN(btree
))
1754 nilfs_bmap_set_target_v(btree
, key
, dreq
->bpr_ptr
);
1758 * nilfs_btree_convert_and_insert -
1766 int nilfs_btree_convert_and_insert(struct nilfs_bmap
*btree
,
1767 __u64 key
, __u64 ptr
,
1768 const __u64
*keys
, const __u64
*ptrs
, int n
)
1770 struct buffer_head
*bh
;
1771 union nilfs_bmap_ptr_req dreq
, nreq
, *di
, *ni
;
1772 struct nilfs_bmap_stats stats
;
1775 if (n
+ 1 <= NILFS_BTREE_ROOT_NCHILDREN_MAX
) {
1778 } else if ((n
+ 1) <= NILFS_BTREE_NODE_NCHILDREN_MAX(
1779 1 << btree
->b_inode
->i_blkbits
)) {
1788 ret
= nilfs_btree_prepare_convert_and_insert(btree
, key
, di
, ni
, &bh
,
1792 nilfs_btree_commit_convert_and_insert(btree
, key
, ptr
, keys
, ptrs
, n
,
1794 nilfs_inode_add_blocks(btree
->b_inode
, stats
.bs_nblocks
);
1798 static int nilfs_btree_propagate_p(struct nilfs_bmap
*btree
,
1799 struct nilfs_btree_path
*path
,
1801 struct buffer_head
*bh
)
1803 while ((++level
< nilfs_btree_height(btree
) - 1) &&
1804 !buffer_dirty(path
[level
].bp_bh
))
1805 mark_buffer_dirty(path
[level
].bp_bh
);
1810 static int nilfs_btree_prepare_update_v(struct nilfs_bmap
*btree
,
1811 struct nilfs_btree_path
*path
,
1812 int level
, struct inode
*dat
)
1814 struct nilfs_btree_node
*parent
;
1817 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1, &ncmax
);
1818 path
[level
].bp_oldreq
.bpr_ptr
=
1819 nilfs_btree_node_get_ptr(parent
, path
[level
+ 1].bp_index
,
1821 path
[level
].bp_newreq
.bpr_ptr
= path
[level
].bp_oldreq
.bpr_ptr
+ 1;
1822 ret
= nilfs_dat_prepare_update(dat
, &path
[level
].bp_oldreq
.bpr_req
,
1823 &path
[level
].bp_newreq
.bpr_req
);
1827 if (buffer_nilfs_node(path
[level
].bp_bh
)) {
1828 path
[level
].bp_ctxt
.oldkey
= path
[level
].bp_oldreq
.bpr_ptr
;
1829 path
[level
].bp_ctxt
.newkey
= path
[level
].bp_newreq
.bpr_ptr
;
1830 path
[level
].bp_ctxt
.bh
= path
[level
].bp_bh
;
1831 ret
= nilfs_btnode_prepare_change_key(
1832 &NILFS_BMAP_I(btree
)->i_btnode_cache
,
1833 &path
[level
].bp_ctxt
);
1835 nilfs_dat_abort_update(dat
,
1836 &path
[level
].bp_oldreq
.bpr_req
,
1837 &path
[level
].bp_newreq
.bpr_req
);
1845 static void nilfs_btree_commit_update_v(struct nilfs_bmap
*btree
,
1846 struct nilfs_btree_path
*path
,
1847 int level
, struct inode
*dat
)
1849 struct nilfs_btree_node
*parent
;
1852 nilfs_dat_commit_update(dat
, &path
[level
].bp_oldreq
.bpr_req
,
1853 &path
[level
].bp_newreq
.bpr_req
,
1854 btree
->b_ptr_type
== NILFS_BMAP_PTR_VS
);
1856 if (buffer_nilfs_node(path
[level
].bp_bh
)) {
1857 nilfs_btnode_commit_change_key(
1858 &NILFS_BMAP_I(btree
)->i_btnode_cache
,
1859 &path
[level
].bp_ctxt
);
1860 path
[level
].bp_bh
= path
[level
].bp_ctxt
.bh
;
1862 set_buffer_nilfs_volatile(path
[level
].bp_bh
);
1864 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1, &ncmax
);
1865 nilfs_btree_node_set_ptr(parent
, path
[level
+ 1].bp_index
,
1866 path
[level
].bp_newreq
.bpr_ptr
, ncmax
);
1869 static void nilfs_btree_abort_update_v(struct nilfs_bmap
*btree
,
1870 struct nilfs_btree_path
*path
,
1871 int level
, struct inode
*dat
)
1873 nilfs_dat_abort_update(dat
, &path
[level
].bp_oldreq
.bpr_req
,
1874 &path
[level
].bp_newreq
.bpr_req
);
1875 if (buffer_nilfs_node(path
[level
].bp_bh
))
1876 nilfs_btnode_abort_change_key(
1877 &NILFS_BMAP_I(btree
)->i_btnode_cache
,
1878 &path
[level
].bp_ctxt
);
1881 static int nilfs_btree_prepare_propagate_v(struct nilfs_bmap
*btree
,
1882 struct nilfs_btree_path
*path
,
1883 int minlevel
, int *maxlevelp
,
1889 if (!buffer_nilfs_volatile(path
[level
].bp_bh
)) {
1890 ret
= nilfs_btree_prepare_update_v(btree
, path
, level
, dat
);
1894 while ((++level
< nilfs_btree_height(btree
) - 1) &&
1895 !buffer_dirty(path
[level
].bp_bh
)) {
1897 WARN_ON(buffer_nilfs_volatile(path
[level
].bp_bh
));
1898 ret
= nilfs_btree_prepare_update_v(btree
, path
, level
, dat
);
1904 *maxlevelp
= level
- 1;
1909 while (--level
> minlevel
)
1910 nilfs_btree_abort_update_v(btree
, path
, level
, dat
);
1911 if (!buffer_nilfs_volatile(path
[level
].bp_bh
))
1912 nilfs_btree_abort_update_v(btree
, path
, level
, dat
);
1916 static void nilfs_btree_commit_propagate_v(struct nilfs_bmap
*btree
,
1917 struct nilfs_btree_path
*path
,
1918 int minlevel
, int maxlevel
,
1919 struct buffer_head
*bh
,
1924 if (!buffer_nilfs_volatile(path
[minlevel
].bp_bh
))
1925 nilfs_btree_commit_update_v(btree
, path
, minlevel
, dat
);
1927 for (level
= minlevel
+ 1; level
<= maxlevel
; level
++)
1928 nilfs_btree_commit_update_v(btree
, path
, level
, dat
);
1931 static int nilfs_btree_propagate_v(struct nilfs_bmap
*btree
,
1932 struct nilfs_btree_path
*path
,
1933 int level
, struct buffer_head
*bh
)
1935 int maxlevel
= 0, ret
;
1936 struct nilfs_btree_node
*parent
;
1937 struct inode
*dat
= nilfs_bmap_get_dat(btree
);
1942 path
[level
].bp_bh
= bh
;
1943 ret
= nilfs_btree_prepare_propagate_v(btree
, path
, level
, &maxlevel
,
1948 if (buffer_nilfs_volatile(path
[level
].bp_bh
)) {
1949 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1, &ncmax
);
1950 ptr
= nilfs_btree_node_get_ptr(parent
,
1951 path
[level
+ 1].bp_index
,
1953 ret
= nilfs_dat_mark_dirty(dat
, ptr
);
1958 nilfs_btree_commit_propagate_v(btree
, path
, level
, maxlevel
, bh
, dat
);
1961 brelse(path
[level
].bp_bh
);
1962 path
[level
].bp_bh
= NULL
;
1966 static int nilfs_btree_propagate(struct nilfs_bmap
*btree
,
1967 struct buffer_head
*bh
)
1969 struct nilfs_btree_path
*path
;
1970 struct nilfs_btree_node
*node
;
1974 WARN_ON(!buffer_dirty(bh
));
1976 path
= nilfs_btree_alloc_path();
1980 if (buffer_nilfs_node(bh
)) {
1981 node
= (struct nilfs_btree_node
*)bh
->b_data
;
1982 key
= nilfs_btree_node_get_key(node
, 0);
1983 level
= nilfs_btree_node_get_level(node
);
1985 key
= nilfs_bmap_data_get_key(btree
, bh
);
1986 level
= NILFS_BTREE_LEVEL_DATA
;
1989 ret
= nilfs_btree_do_lookup(btree
, path
, key
, NULL
, level
+ 1, 0);
1991 if (unlikely(ret
== -ENOENT
))
1992 printk(KERN_CRIT
"%s: key = %llu, level == %d\n",
1993 __func__
, (unsigned long long)key
, level
);
1997 ret
= NILFS_BMAP_USE_VBN(btree
) ?
1998 nilfs_btree_propagate_v(btree
, path
, level
, bh
) :
1999 nilfs_btree_propagate_p(btree
, path
, level
, bh
);
2002 nilfs_btree_free_path(path
);
2007 static int nilfs_btree_propagate_gc(struct nilfs_bmap
*btree
,
2008 struct buffer_head
*bh
)
2010 return nilfs_dat_mark_dirty(nilfs_bmap_get_dat(btree
), bh
->b_blocknr
);
2013 static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap
*btree
,
2014 struct list_head
*lists
,
2015 struct buffer_head
*bh
)
2017 struct list_head
*head
;
2018 struct buffer_head
*cbh
;
2019 struct nilfs_btree_node
*node
, *cnode
;
2024 node
= (struct nilfs_btree_node
*)bh
->b_data
;
2025 key
= nilfs_btree_node_get_key(node
, 0);
2026 level
= nilfs_btree_node_get_level(node
);
2027 if (level
< NILFS_BTREE_LEVEL_NODE_MIN
||
2028 level
>= NILFS_BTREE_LEVEL_MAX
) {
2031 "%s: invalid btree level: %d (key=%llu, ino=%lu, "
2033 __func__
, level
, (unsigned long long)key
,
2034 NILFS_BMAP_I(btree
)->vfs_inode
.i_ino
,
2035 (unsigned long long)bh
->b_blocknr
);
2039 list_for_each(head
, &lists
[level
]) {
2040 cbh
= list_entry(head
, struct buffer_head
, b_assoc_buffers
);
2041 cnode
= (struct nilfs_btree_node
*)cbh
->b_data
;
2042 ckey
= nilfs_btree_node_get_key(cnode
, 0);
2046 list_add_tail(&bh
->b_assoc_buffers
, head
);
2049 static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap
*btree
,
2050 struct list_head
*listp
)
2052 struct address_space
*btcache
= &NILFS_BMAP_I(btree
)->i_btnode_cache
;
2053 struct list_head lists
[NILFS_BTREE_LEVEL_MAX
];
2054 struct pagevec pvec
;
2055 struct buffer_head
*bh
, *head
;
2059 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
;
2060 level
< NILFS_BTREE_LEVEL_MAX
;
2062 INIT_LIST_HEAD(&lists
[level
]);
2064 pagevec_init(&pvec
, 0);
2066 while (pagevec_lookup_tag(&pvec
, btcache
, &index
, PAGECACHE_TAG_DIRTY
,
2068 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
2069 bh
= head
= page_buffers(pvec
.pages
[i
]);
2071 if (buffer_dirty(bh
))
2072 nilfs_btree_add_dirty_buffer(btree
,
2074 } while ((bh
= bh
->b_this_page
) != head
);
2076 pagevec_release(&pvec
);
2080 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
;
2081 level
< NILFS_BTREE_LEVEL_MAX
;
2083 list_splice_tail(&lists
[level
], listp
);
2086 static int nilfs_btree_assign_p(struct nilfs_bmap
*btree
,
2087 struct nilfs_btree_path
*path
,
2089 struct buffer_head
**bh
,
2091 union nilfs_binfo
*binfo
)
2093 struct nilfs_btree_node
*parent
;
2098 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1, &ncmax
);
2099 ptr
= nilfs_btree_node_get_ptr(parent
, path
[level
+ 1].bp_index
,
2101 if (buffer_nilfs_node(*bh
)) {
2102 path
[level
].bp_ctxt
.oldkey
= ptr
;
2103 path
[level
].bp_ctxt
.newkey
= blocknr
;
2104 path
[level
].bp_ctxt
.bh
= *bh
;
2105 ret
= nilfs_btnode_prepare_change_key(
2106 &NILFS_BMAP_I(btree
)->i_btnode_cache
,
2107 &path
[level
].bp_ctxt
);
2110 nilfs_btnode_commit_change_key(
2111 &NILFS_BMAP_I(btree
)->i_btnode_cache
,
2112 &path
[level
].bp_ctxt
);
2113 *bh
= path
[level
].bp_ctxt
.bh
;
2116 nilfs_btree_node_set_ptr(parent
, path
[level
+ 1].bp_index
, blocknr
,
2119 key
= nilfs_btree_node_get_key(parent
, path
[level
+ 1].bp_index
);
2120 /* on-disk format */
2121 binfo
->bi_dat
.bi_blkoff
= cpu_to_le64(key
);
2122 binfo
->bi_dat
.bi_level
= level
;
2127 static int nilfs_btree_assign_v(struct nilfs_bmap
*btree
,
2128 struct nilfs_btree_path
*path
,
2130 struct buffer_head
**bh
,
2132 union nilfs_binfo
*binfo
)
2134 struct nilfs_btree_node
*parent
;
2135 struct inode
*dat
= nilfs_bmap_get_dat(btree
);
2138 union nilfs_bmap_ptr_req req
;
2141 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1, &ncmax
);
2142 ptr
= nilfs_btree_node_get_ptr(parent
, path
[level
+ 1].bp_index
,
2145 ret
= nilfs_dat_prepare_start(dat
, &req
.bpr_req
);
2148 nilfs_dat_commit_start(dat
, &req
.bpr_req
, blocknr
);
2150 key
= nilfs_btree_node_get_key(parent
, path
[level
+ 1].bp_index
);
2151 /* on-disk format */
2152 binfo
->bi_v
.bi_vblocknr
= cpu_to_le64(ptr
);
2153 binfo
->bi_v
.bi_blkoff
= cpu_to_le64(key
);
2158 static int nilfs_btree_assign(struct nilfs_bmap
*btree
,
2159 struct buffer_head
**bh
,
2161 union nilfs_binfo
*binfo
)
2163 struct nilfs_btree_path
*path
;
2164 struct nilfs_btree_node
*node
;
2168 path
= nilfs_btree_alloc_path();
2172 if (buffer_nilfs_node(*bh
)) {
2173 node
= (struct nilfs_btree_node
*)(*bh
)->b_data
;
2174 key
= nilfs_btree_node_get_key(node
, 0);
2175 level
= nilfs_btree_node_get_level(node
);
2177 key
= nilfs_bmap_data_get_key(btree
, *bh
);
2178 level
= NILFS_BTREE_LEVEL_DATA
;
2181 ret
= nilfs_btree_do_lookup(btree
, path
, key
, NULL
, level
+ 1, 0);
2183 WARN_ON(ret
== -ENOENT
);
2187 ret
= NILFS_BMAP_USE_VBN(btree
) ?
2188 nilfs_btree_assign_v(btree
, path
, level
, bh
, blocknr
, binfo
) :
2189 nilfs_btree_assign_p(btree
, path
, level
, bh
, blocknr
, binfo
);
2192 nilfs_btree_free_path(path
);
2197 static int nilfs_btree_assign_gc(struct nilfs_bmap
*btree
,
2198 struct buffer_head
**bh
,
2200 union nilfs_binfo
*binfo
)
2202 struct nilfs_btree_node
*node
;
2206 ret
= nilfs_dat_move(nilfs_bmap_get_dat(btree
), (*bh
)->b_blocknr
,
2211 if (buffer_nilfs_node(*bh
)) {
2212 node
= (struct nilfs_btree_node
*)(*bh
)->b_data
;
2213 key
= nilfs_btree_node_get_key(node
, 0);
2215 key
= nilfs_bmap_data_get_key(btree
, *bh
);
2217 /* on-disk format */
2218 binfo
->bi_v
.bi_vblocknr
= cpu_to_le64((*bh
)->b_blocknr
);
2219 binfo
->bi_v
.bi_blkoff
= cpu_to_le64(key
);
2224 static int nilfs_btree_mark(struct nilfs_bmap
*btree
, __u64 key
, int level
)
2226 struct buffer_head
*bh
;
2227 struct nilfs_btree_path
*path
;
2231 path
= nilfs_btree_alloc_path();
2235 ret
= nilfs_btree_do_lookup(btree
, path
, key
, &ptr
, level
+ 1, 0);
2237 WARN_ON(ret
== -ENOENT
);
2240 ret
= nilfs_btree_get_block(btree
, ptr
, &bh
);
2242 WARN_ON(ret
== -ENOENT
);
2246 if (!buffer_dirty(bh
))
2247 mark_buffer_dirty(bh
);
2249 if (!nilfs_bmap_dirty(btree
))
2250 nilfs_bmap_set_dirty(btree
);
2253 nilfs_btree_free_path(path
);
2257 static const struct nilfs_bmap_operations nilfs_btree_ops
= {
2258 .bop_lookup
= nilfs_btree_lookup
,
2259 .bop_lookup_contig
= nilfs_btree_lookup_contig
,
2260 .bop_insert
= nilfs_btree_insert
,
2261 .bop_delete
= nilfs_btree_delete
,
2264 .bop_propagate
= nilfs_btree_propagate
,
2266 .bop_lookup_dirty_buffers
= nilfs_btree_lookup_dirty_buffers
,
2268 .bop_assign
= nilfs_btree_assign
,
2269 .bop_mark
= nilfs_btree_mark
,
2271 .bop_last_key
= nilfs_btree_last_key
,
2272 .bop_check_insert
= NULL
,
2273 .bop_check_delete
= nilfs_btree_check_delete
,
2274 .bop_gather_data
= nilfs_btree_gather_data
,
2277 static const struct nilfs_bmap_operations nilfs_btree_ops_gc
= {
2279 .bop_lookup_contig
= NULL
,
2284 .bop_propagate
= nilfs_btree_propagate_gc
,
2286 .bop_lookup_dirty_buffers
= nilfs_btree_lookup_dirty_buffers
,
2288 .bop_assign
= nilfs_btree_assign_gc
,
2291 .bop_last_key
= NULL
,
2292 .bop_check_insert
= NULL
,
2293 .bop_check_delete
= NULL
,
2294 .bop_gather_data
= NULL
,
2297 int nilfs_btree_init(struct nilfs_bmap
*bmap
)
2299 bmap
->b_ops
= &nilfs_btree_ops
;
2300 bmap
->b_nchildren_per_block
=
2301 NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap
));
2305 void nilfs_btree_init_gc(struct nilfs_bmap
*bmap
)
2307 bmap
->b_ops
= &nilfs_btree_ops_gc
;
2308 bmap
->b_nchildren_per_block
=
2309 NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap
));