2 * btree.c - NILFS B-tree.
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Koji Sato <koji@osrg.net>.
23 #include <linux/slab.h>
24 #include <linux/string.h>
25 #include <linux/errno.h>
26 #include <linux/pagevec.h>
34 * struct nilfs_btree_path - A path on which B-tree operations are executed
35 * @bp_bh: buffer head of node block
36 * @bp_sib_bh: buffer head of sibling node block
37 * @bp_index: index of child node
38 * @bp_oldreq: ptr end request for old ptr
39 * @bp_newreq: ptr alloc request for new ptr
40 * @bp_op: rebalance operation
42 struct nilfs_btree_path
{
43 struct buffer_head
*bp_bh
;
44 struct buffer_head
*bp_sib_bh
;
46 union nilfs_bmap_ptr_req bp_oldreq
;
47 union nilfs_bmap_ptr_req bp_newreq
;
48 struct nilfs_btnode_chkey_ctxt bp_ctxt
;
49 void (*bp_op
)(struct nilfs_btree
*, struct nilfs_btree_path
*,
50 int, __u64
*, __u64
*);
54 * B-tree path operations
57 static struct kmem_cache
*nilfs_btree_path_cache
;
59 int __init
nilfs_btree_path_cache_init(void)
61 nilfs_btree_path_cache
=
62 kmem_cache_create("nilfs2_btree_path_cache",
63 sizeof(struct nilfs_btree_path
) *
64 NILFS_BTREE_LEVEL_MAX
, 0, 0, NULL
);
65 return (nilfs_btree_path_cache
!= NULL
) ? 0 : -ENOMEM
;
68 void nilfs_btree_path_cache_destroy(void)
70 kmem_cache_destroy(nilfs_btree_path_cache
);
73 static inline struct nilfs_btree_path
*
74 nilfs_btree_alloc_path(const struct nilfs_btree
*btree
)
76 return (struct nilfs_btree_path
*)
77 kmem_cache_alloc(nilfs_btree_path_cache
, GFP_NOFS
);
80 static inline void nilfs_btree_free_path(const struct nilfs_btree
*btree
,
81 struct nilfs_btree_path
*path
)
83 kmem_cache_free(nilfs_btree_path_cache
, path
);
86 static void nilfs_btree_init_path(const struct nilfs_btree
*btree
,
87 struct nilfs_btree_path
*path
)
91 for (level
= NILFS_BTREE_LEVEL_DATA
;
92 level
< NILFS_BTREE_LEVEL_MAX
;
94 path
[level
].bp_bh
= NULL
;
95 path
[level
].bp_sib_bh
= NULL
;
96 path
[level
].bp_index
= 0;
97 path
[level
].bp_oldreq
.bpr_ptr
= NILFS_BMAP_INVALID_PTR
;
98 path
[level
].bp_newreq
.bpr_ptr
= NILFS_BMAP_INVALID_PTR
;
99 path
[level
].bp_op
= NULL
;
103 static void nilfs_btree_clear_path(const struct nilfs_btree
*btree
,
104 struct nilfs_btree_path
*path
)
108 for (level
= NILFS_BTREE_LEVEL_DATA
;
109 level
< NILFS_BTREE_LEVEL_MAX
;
111 if (path
[level
].bp_bh
!= NULL
) {
112 nilfs_bmap_put_block(&btree
->bt_bmap
,
114 path
[level
].bp_bh
= NULL
;
116 /* sib_bh is released or deleted by prepare or commit
118 path
[level
].bp_sib_bh
= NULL
;
119 path
[level
].bp_index
= 0;
120 path
[level
].bp_oldreq
.bpr_ptr
= NILFS_BMAP_INVALID_PTR
;
121 path
[level
].bp_newreq
.bpr_ptr
= NILFS_BMAP_INVALID_PTR
;
122 path
[level
].bp_op
= NULL
;
128 * B-tree node operations
132 nilfs_btree_node_get_flags(const struct nilfs_btree
*btree
,
133 const struct nilfs_btree_node
*node
)
135 return node
->bn_flags
;
139 nilfs_btree_node_set_flags(struct nilfs_btree
*btree
,
140 struct nilfs_btree_node
*node
,
143 node
->bn_flags
= flags
;
146 static inline int nilfs_btree_node_root(const struct nilfs_btree
*btree
,
147 const struct nilfs_btree_node
*node
)
149 return nilfs_btree_node_get_flags(btree
, node
) & NILFS_BTREE_NODE_ROOT
;
153 nilfs_btree_node_get_level(const struct nilfs_btree
*btree
,
154 const struct nilfs_btree_node
*node
)
156 return node
->bn_level
;
160 nilfs_btree_node_set_level(struct nilfs_btree
*btree
,
161 struct nilfs_btree_node
*node
,
164 node
->bn_level
= level
;
168 nilfs_btree_node_get_nchildren(const struct nilfs_btree
*btree
,
169 const struct nilfs_btree_node
*node
)
171 return le16_to_cpu(node
->bn_nchildren
);
175 nilfs_btree_node_set_nchildren(struct nilfs_btree
*btree
,
176 struct nilfs_btree_node
*node
,
179 node
->bn_nchildren
= cpu_to_le16(nchildren
);
183 nilfs_btree_node_size(const struct nilfs_btree
*btree
)
185 return 1 << btree
->bt_bmap
.b_inode
->i_blkbits
;
189 nilfs_btree_node_nchildren_min(const struct nilfs_btree
*btree
,
190 const struct nilfs_btree_node
*node
)
192 return nilfs_btree_node_root(btree
, node
) ?
193 NILFS_BTREE_ROOT_NCHILDREN_MIN
:
194 NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree
));
198 nilfs_btree_node_nchildren_max(const struct nilfs_btree
*btree
,
199 const struct nilfs_btree_node
*node
)
201 return nilfs_btree_node_root(btree
, node
) ?
202 NILFS_BTREE_ROOT_NCHILDREN_MAX
:
203 NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(btree
));
206 static inline __le64
*
207 nilfs_btree_node_dkeys(const struct nilfs_btree
*btree
,
208 const struct nilfs_btree_node
*node
)
210 return (__le64
*)((char *)(node
+ 1) +
211 (nilfs_btree_node_root(btree
, node
) ?
212 0 : NILFS_BTREE_NODE_EXTRA_PAD_SIZE
));
215 static inline __le64
*
216 nilfs_btree_node_dptrs(const struct nilfs_btree
*btree
,
217 const struct nilfs_btree_node
*node
)
219 return (__le64
*)(nilfs_btree_node_dkeys(btree
, node
) +
220 nilfs_btree_node_nchildren_max(btree
, node
));
224 nilfs_btree_node_get_key(const struct nilfs_btree
*btree
,
225 const struct nilfs_btree_node
*node
, int index
)
227 return nilfs_bmap_dkey_to_key(*(nilfs_btree_node_dkeys(btree
, node
) +
232 nilfs_btree_node_set_key(struct nilfs_btree
*btree
,
233 struct nilfs_btree_node
*node
, int index
, __u64 key
)
235 *(nilfs_btree_node_dkeys(btree
, node
) + index
) =
236 nilfs_bmap_key_to_dkey(key
);
240 nilfs_btree_node_get_ptr(const struct nilfs_btree
*btree
,
241 const struct nilfs_btree_node
*node
,
244 return nilfs_bmap_dptr_to_ptr(*(nilfs_btree_node_dptrs(btree
, node
) +
249 nilfs_btree_node_set_ptr(struct nilfs_btree
*btree
,
250 struct nilfs_btree_node
*node
,
254 *(nilfs_btree_node_dptrs(btree
, node
) + index
) =
255 nilfs_bmap_ptr_to_dptr(ptr
);
258 static void nilfs_btree_node_init(struct nilfs_btree
*btree
,
259 struct nilfs_btree_node
*node
,
260 int flags
, int level
, int nchildren
,
261 const __u64
*keys
, const __u64
*ptrs
)
267 nilfs_btree_node_set_flags(btree
, node
, flags
);
268 nilfs_btree_node_set_level(btree
, node
, level
);
269 nilfs_btree_node_set_nchildren(btree
, node
, nchildren
);
271 dkeys
= nilfs_btree_node_dkeys(btree
, node
);
272 dptrs
= nilfs_btree_node_dptrs(btree
, node
);
273 for (i
= 0; i
< nchildren
; i
++) {
274 dkeys
[i
] = nilfs_bmap_key_to_dkey(keys
[i
]);
275 dptrs
[i
] = nilfs_bmap_ptr_to_dptr(ptrs
[i
]);
279 /* Assume the buffer heads corresponding to left and right are locked. */
280 static void nilfs_btree_node_move_left(struct nilfs_btree
*btree
,
281 struct nilfs_btree_node
*left
,
282 struct nilfs_btree_node
*right
,
285 __le64
*ldkeys
, *rdkeys
;
286 __le64
*ldptrs
, *rdptrs
;
287 int lnchildren
, rnchildren
;
289 ldkeys
= nilfs_btree_node_dkeys(btree
, left
);
290 ldptrs
= nilfs_btree_node_dptrs(btree
, left
);
291 lnchildren
= nilfs_btree_node_get_nchildren(btree
, left
);
293 rdkeys
= nilfs_btree_node_dkeys(btree
, right
);
294 rdptrs
= nilfs_btree_node_dptrs(btree
, right
);
295 rnchildren
= nilfs_btree_node_get_nchildren(btree
, right
);
297 memcpy(ldkeys
+ lnchildren
, rdkeys
, n
* sizeof(*rdkeys
));
298 memcpy(ldptrs
+ lnchildren
, rdptrs
, n
* sizeof(*rdptrs
));
299 memmove(rdkeys
, rdkeys
+ n
, (rnchildren
- n
) * sizeof(*rdkeys
));
300 memmove(rdptrs
, rdptrs
+ n
, (rnchildren
- n
) * sizeof(*rdptrs
));
304 nilfs_btree_node_set_nchildren(btree
, left
, lnchildren
);
305 nilfs_btree_node_set_nchildren(btree
, right
, rnchildren
);
308 /* Assume that the buffer heads corresponding to left and right are locked. */
309 static void nilfs_btree_node_move_right(struct nilfs_btree
*btree
,
310 struct nilfs_btree_node
*left
,
311 struct nilfs_btree_node
*right
,
314 __le64
*ldkeys
, *rdkeys
;
315 __le64
*ldptrs
, *rdptrs
;
316 int lnchildren
, rnchildren
;
318 ldkeys
= nilfs_btree_node_dkeys(btree
, left
);
319 ldptrs
= nilfs_btree_node_dptrs(btree
, left
);
320 lnchildren
= nilfs_btree_node_get_nchildren(btree
, left
);
322 rdkeys
= nilfs_btree_node_dkeys(btree
, right
);
323 rdptrs
= nilfs_btree_node_dptrs(btree
, right
);
324 rnchildren
= nilfs_btree_node_get_nchildren(btree
, right
);
326 memmove(rdkeys
+ n
, rdkeys
, rnchildren
* sizeof(*rdkeys
));
327 memmove(rdptrs
+ n
, rdptrs
, rnchildren
* sizeof(*rdptrs
));
328 memcpy(rdkeys
, ldkeys
+ lnchildren
- n
, n
* sizeof(*rdkeys
));
329 memcpy(rdptrs
, ldptrs
+ lnchildren
- n
, n
* sizeof(*rdptrs
));
333 nilfs_btree_node_set_nchildren(btree
, left
, lnchildren
);
334 nilfs_btree_node_set_nchildren(btree
, right
, rnchildren
);
337 /* Assume that the buffer head corresponding to node is locked. */
338 static void nilfs_btree_node_insert(struct nilfs_btree
*btree
,
339 struct nilfs_btree_node
*node
,
340 __u64 key
, __u64 ptr
, int index
)
346 dkeys
= nilfs_btree_node_dkeys(btree
, node
);
347 dptrs
= nilfs_btree_node_dptrs(btree
, node
);
348 nchildren
= nilfs_btree_node_get_nchildren(btree
, node
);
349 if (index
< nchildren
) {
350 memmove(dkeys
+ index
+ 1, dkeys
+ index
,
351 (nchildren
- index
) * sizeof(*dkeys
));
352 memmove(dptrs
+ index
+ 1, dptrs
+ index
,
353 (nchildren
- index
) * sizeof(*dptrs
));
355 dkeys
[index
] = nilfs_bmap_key_to_dkey(key
);
356 dptrs
[index
] = nilfs_bmap_ptr_to_dptr(ptr
);
358 nilfs_btree_node_set_nchildren(btree
, node
, nchildren
);
361 /* Assume that the buffer head corresponding to node is locked. */
362 static void nilfs_btree_node_delete(struct nilfs_btree
*btree
,
363 struct nilfs_btree_node
*node
,
364 __u64
*keyp
, __u64
*ptrp
, int index
)
372 dkeys
= nilfs_btree_node_dkeys(btree
, node
);
373 dptrs
= nilfs_btree_node_dptrs(btree
, node
);
374 key
= nilfs_bmap_dkey_to_key(dkeys
[index
]);
375 ptr
= nilfs_bmap_dptr_to_ptr(dptrs
[index
]);
376 nchildren
= nilfs_btree_node_get_nchildren(btree
, node
);
382 if (index
< nchildren
- 1) {
383 memmove(dkeys
+ index
, dkeys
+ index
+ 1,
384 (nchildren
- index
- 1) * sizeof(*dkeys
));
385 memmove(dptrs
+ index
, dptrs
+ index
+ 1,
386 (nchildren
- index
- 1) * sizeof(*dptrs
));
389 nilfs_btree_node_set_nchildren(btree
, node
, nchildren
);
392 static int nilfs_btree_node_lookup(const struct nilfs_btree
*btree
,
393 const struct nilfs_btree_node
*node
,
394 __u64 key
, int *indexp
)
397 int index
, low
, high
, s
;
401 high
= nilfs_btree_node_get_nchildren(btree
, node
) - 1;
404 while (low
<= high
) {
405 index
= (low
+ high
) / 2;
406 nkey
= nilfs_btree_node_get_key(btree
, node
, index
);
410 } else if (nkey
< key
) {
420 if (nilfs_btree_node_get_level(btree
, node
) >
421 NILFS_BTREE_LEVEL_NODE_MIN
) {
422 if ((s
> 0) && (index
> 0))
433 static inline struct nilfs_btree_node
*
434 nilfs_btree_get_root(const struct nilfs_btree
*btree
)
436 return (struct nilfs_btree_node
*)btree
->bt_bmap
.b_u
.u_data
;
439 static inline struct nilfs_btree_node
*
440 nilfs_btree_get_nonroot_node(const struct nilfs_btree
*btree
,
441 const struct nilfs_btree_path
*path
,
444 return (struct nilfs_btree_node
*)path
[level
].bp_bh
->b_data
;
447 static inline struct nilfs_btree_node
*
448 nilfs_btree_get_sib_node(const struct nilfs_btree
*btree
,
449 const struct nilfs_btree_path
*path
,
452 return (struct nilfs_btree_node
*)path
[level
].bp_sib_bh
->b_data
;
455 static inline int nilfs_btree_height(const struct nilfs_btree
*btree
)
457 return nilfs_btree_node_get_level(btree
, nilfs_btree_get_root(btree
))
461 static inline struct nilfs_btree_node
*
462 nilfs_btree_get_node(const struct nilfs_btree
*btree
,
463 const struct nilfs_btree_path
*path
,
466 return (level
== nilfs_btree_height(btree
) - 1) ?
467 nilfs_btree_get_root(btree
) :
468 nilfs_btree_get_nonroot_node(btree
, path
, level
);
471 static int nilfs_btree_do_lookup(const struct nilfs_btree
*btree
,
472 struct nilfs_btree_path
*path
,
473 __u64 key
, __u64
*ptrp
, int minlevel
)
475 struct nilfs_btree_node
*node
;
477 int level
, index
, found
, ret
;
479 node
= nilfs_btree_get_root(btree
);
480 level
= nilfs_btree_node_get_level(btree
, node
);
481 if ((level
< minlevel
) ||
482 (nilfs_btree_node_get_nchildren(btree
, node
) <= 0))
485 found
= nilfs_btree_node_lookup(btree
, node
, key
, &index
);
486 ptr
= nilfs_btree_node_get_ptr(btree
, node
, index
);
487 path
[level
].bp_bh
= NULL
;
488 path
[level
].bp_index
= index
;
490 for (level
--; level
>= minlevel
; level
--) {
491 ret
= nilfs_bmap_get_block(&btree
->bt_bmap
, ptr
,
495 node
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
496 BUG_ON(level
!= nilfs_btree_node_get_level(btree
, node
));
498 found
= nilfs_btree_node_lookup(btree
, node
, key
,
502 if (index
< nilfs_btree_node_nchildren_max(btree
, node
))
503 ptr
= nilfs_btree_node_get_ptr(btree
, node
, index
);
505 WARN_ON(found
|| level
!= NILFS_BTREE_LEVEL_NODE_MIN
);
507 ptr
= NILFS_BMAP_INVALID_PTR
;
509 path
[level
].bp_index
= index
;
520 static int nilfs_btree_do_lookup_last(const struct nilfs_btree
*btree
,
521 struct nilfs_btree_path
*path
,
522 __u64
*keyp
, __u64
*ptrp
)
524 struct nilfs_btree_node
*node
;
526 int index
, level
, ret
;
528 node
= nilfs_btree_get_root(btree
);
529 index
= nilfs_btree_node_get_nchildren(btree
, node
) - 1;
532 level
= nilfs_btree_node_get_level(btree
, node
);
533 ptr
= nilfs_btree_node_get_ptr(btree
, node
, index
);
534 path
[level
].bp_bh
= NULL
;
535 path
[level
].bp_index
= index
;
537 for (level
--; level
> 0; level
--) {
538 ret
= nilfs_bmap_get_block(&btree
->bt_bmap
, ptr
,
542 node
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
543 BUG_ON(level
!= nilfs_btree_node_get_level(btree
, node
));
544 index
= nilfs_btree_node_get_nchildren(btree
, node
) - 1;
545 ptr
= nilfs_btree_node_get_ptr(btree
, node
, index
);
546 path
[level
].bp_index
= index
;
550 *keyp
= nilfs_btree_node_get_key(btree
, node
, index
);
557 static int nilfs_btree_lookup(const struct nilfs_bmap
*bmap
,
558 __u64 key
, int level
, __u64
*ptrp
)
560 struct nilfs_btree
*btree
;
561 struct nilfs_btree_path
*path
;
565 btree
= (struct nilfs_btree
*)bmap
;
566 path
= nilfs_btree_alloc_path(btree
);
569 nilfs_btree_init_path(btree
, path
);
571 ret
= nilfs_btree_do_lookup(btree
, path
, key
, &ptr
, level
);
576 nilfs_btree_clear_path(btree
, path
);
577 nilfs_btree_free_path(btree
, path
);
582 static void nilfs_btree_promote_key(struct nilfs_btree
*btree
,
583 struct nilfs_btree_path
*path
,
584 int level
, __u64 key
)
586 if (level
< nilfs_btree_height(btree
) - 1) {
588 lock_buffer(path
[level
].bp_bh
);
589 nilfs_btree_node_set_key(
591 nilfs_btree_get_nonroot_node(
593 path
[level
].bp_index
, key
);
594 if (!buffer_dirty(path
[level
].bp_bh
))
595 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
596 unlock_buffer(path
[level
].bp_bh
);
597 } while ((path
[level
].bp_index
== 0) &&
598 (++level
< nilfs_btree_height(btree
) - 1));
602 if (level
== nilfs_btree_height(btree
) - 1) {
603 nilfs_btree_node_set_key(btree
,
604 nilfs_btree_get_root(btree
),
605 path
[level
].bp_index
, key
);
609 static void nilfs_btree_do_insert(struct nilfs_btree
*btree
,
610 struct nilfs_btree_path
*path
,
611 int level
, __u64
*keyp
, __u64
*ptrp
)
613 struct nilfs_btree_node
*node
;
615 if (level
< nilfs_btree_height(btree
) - 1) {
616 lock_buffer(path
[level
].bp_bh
);
617 node
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
618 nilfs_btree_node_insert(btree
, node
, *keyp
, *ptrp
,
619 path
[level
].bp_index
);
620 if (!buffer_dirty(path
[level
].bp_bh
))
621 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
622 unlock_buffer(path
[level
].bp_bh
);
624 if (path
[level
].bp_index
== 0)
625 nilfs_btree_promote_key(btree
, path
, level
+ 1,
626 nilfs_btree_node_get_key(
629 node
= nilfs_btree_get_root(btree
);
630 nilfs_btree_node_insert(btree
, node
, *keyp
, *ptrp
,
631 path
[level
].bp_index
);
635 static void nilfs_btree_carry_left(struct nilfs_btree
*btree
,
636 struct nilfs_btree_path
*path
,
637 int level
, __u64
*keyp
, __u64
*ptrp
)
639 struct nilfs_btree_node
*node
, *left
;
640 int nchildren
, lnchildren
, n
, move
;
642 lock_buffer(path
[level
].bp_bh
);
643 lock_buffer(path
[level
].bp_sib_bh
);
645 node
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
646 left
= nilfs_btree_get_sib_node(btree
, path
, level
);
647 nchildren
= nilfs_btree_node_get_nchildren(btree
, node
);
648 lnchildren
= nilfs_btree_node_get_nchildren(btree
, left
);
651 n
= (nchildren
+ lnchildren
+ 1) / 2 - lnchildren
;
652 if (n
> path
[level
].bp_index
) {
653 /* move insert point */
658 nilfs_btree_node_move_left(btree
, left
, node
, n
);
660 if (!buffer_dirty(path
[level
].bp_bh
))
661 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
662 if (!buffer_dirty(path
[level
].bp_sib_bh
))
663 nilfs_btnode_mark_dirty(path
[level
].bp_sib_bh
);
665 unlock_buffer(path
[level
].bp_bh
);
666 unlock_buffer(path
[level
].bp_sib_bh
);
668 nilfs_btree_promote_key(btree
, path
, level
+ 1,
669 nilfs_btree_node_get_key(btree
, node
, 0));
672 nilfs_bmap_put_block(&btree
->bt_bmap
, path
[level
].bp_bh
);
673 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
674 path
[level
].bp_sib_bh
= NULL
;
675 path
[level
].bp_index
+= lnchildren
;
676 path
[level
+ 1].bp_index
--;
678 nilfs_bmap_put_block(&btree
->bt_bmap
, path
[level
].bp_sib_bh
);
679 path
[level
].bp_sib_bh
= NULL
;
680 path
[level
].bp_index
-= n
;
683 nilfs_btree_do_insert(btree
, path
, level
, keyp
, ptrp
);
686 static void nilfs_btree_carry_right(struct nilfs_btree
*btree
,
687 struct nilfs_btree_path
*path
,
688 int level
, __u64
*keyp
, __u64
*ptrp
)
690 struct nilfs_btree_node
*node
, *right
;
691 int nchildren
, rnchildren
, n
, move
;
693 lock_buffer(path
[level
].bp_bh
);
694 lock_buffer(path
[level
].bp_sib_bh
);
696 node
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
697 right
= nilfs_btree_get_sib_node(btree
, path
, level
);
698 nchildren
= nilfs_btree_node_get_nchildren(btree
, node
);
699 rnchildren
= nilfs_btree_node_get_nchildren(btree
, right
);
702 n
= (nchildren
+ rnchildren
+ 1) / 2 - rnchildren
;
703 if (n
> nchildren
- path
[level
].bp_index
) {
704 /* move insert point */
709 nilfs_btree_node_move_right(btree
, node
, right
, n
);
711 if (!buffer_dirty(path
[level
].bp_bh
))
712 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
713 if (!buffer_dirty(path
[level
].bp_sib_bh
))
714 nilfs_btnode_mark_dirty(path
[level
].bp_sib_bh
);
716 unlock_buffer(path
[level
].bp_bh
);
717 unlock_buffer(path
[level
].bp_sib_bh
);
719 path
[level
+ 1].bp_index
++;
720 nilfs_btree_promote_key(btree
, path
, level
+ 1,
721 nilfs_btree_node_get_key(btree
, right
, 0));
722 path
[level
+ 1].bp_index
--;
725 nilfs_bmap_put_block(&btree
->bt_bmap
, path
[level
].bp_bh
);
726 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
727 path
[level
].bp_sib_bh
= NULL
;
728 path
[level
].bp_index
-=
729 nilfs_btree_node_get_nchildren(btree
, node
);
730 path
[level
+ 1].bp_index
++;
732 nilfs_bmap_put_block(&btree
->bt_bmap
, path
[level
].bp_sib_bh
);
733 path
[level
].bp_sib_bh
= NULL
;
736 nilfs_btree_do_insert(btree
, path
, level
, keyp
, ptrp
);
739 static void nilfs_btree_split(struct nilfs_btree
*btree
,
740 struct nilfs_btree_path
*path
,
741 int level
, __u64
*keyp
, __u64
*ptrp
)
743 struct nilfs_btree_node
*node
, *right
;
746 int nchildren
, n
, move
;
748 lock_buffer(path
[level
].bp_bh
);
749 lock_buffer(path
[level
].bp_sib_bh
);
751 node
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
752 right
= nilfs_btree_get_sib_node(btree
, path
, level
);
753 nchildren
= nilfs_btree_node_get_nchildren(btree
, node
);
756 n
= (nchildren
+ 1) / 2;
757 if (n
> nchildren
- path
[level
].bp_index
) {
762 nilfs_btree_node_move_right(btree
, node
, right
, n
);
764 if (!buffer_dirty(path
[level
].bp_bh
))
765 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
766 if (!buffer_dirty(path
[level
].bp_sib_bh
))
767 nilfs_btnode_mark_dirty(path
[level
].bp_sib_bh
);
769 unlock_buffer(path
[level
].bp_bh
);
770 unlock_buffer(path
[level
].bp_sib_bh
);
772 newkey
= nilfs_btree_node_get_key(btree
, right
, 0);
773 newptr
= path
[level
].bp_newreq
.bpr_ptr
;
776 path
[level
].bp_index
-=
777 nilfs_btree_node_get_nchildren(btree
, node
);
778 nilfs_btree_node_insert(btree
, right
, *keyp
, *ptrp
,
779 path
[level
].bp_index
);
781 *keyp
= nilfs_btree_node_get_key(btree
, right
, 0);
782 *ptrp
= path
[level
].bp_newreq
.bpr_ptr
;
784 nilfs_bmap_put_block(&btree
->bt_bmap
, path
[level
].bp_bh
);
785 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
786 path
[level
].bp_sib_bh
= NULL
;
788 nilfs_btree_do_insert(btree
, path
, level
, keyp
, ptrp
);
790 *keyp
= nilfs_btree_node_get_key(btree
, right
, 0);
791 *ptrp
= path
[level
].bp_newreq
.bpr_ptr
;
793 nilfs_bmap_put_block(&btree
->bt_bmap
, path
[level
].bp_sib_bh
);
794 path
[level
].bp_sib_bh
= NULL
;
797 path
[level
+ 1].bp_index
++;
800 static void nilfs_btree_grow(struct nilfs_btree
*btree
,
801 struct nilfs_btree_path
*path
,
802 int level
, __u64
*keyp
, __u64
*ptrp
)
804 struct nilfs_btree_node
*root
, *child
;
807 lock_buffer(path
[level
].bp_sib_bh
);
809 root
= nilfs_btree_get_root(btree
);
810 child
= nilfs_btree_get_sib_node(btree
, path
, level
);
812 n
= nilfs_btree_node_get_nchildren(btree
, root
);
814 nilfs_btree_node_move_right(btree
, root
, child
, n
);
815 nilfs_btree_node_set_level(btree
, root
, level
+ 1);
817 if (!buffer_dirty(path
[level
].bp_sib_bh
))
818 nilfs_btnode_mark_dirty(path
[level
].bp_sib_bh
);
820 unlock_buffer(path
[level
].bp_sib_bh
);
822 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
823 path
[level
].bp_sib_bh
= NULL
;
825 nilfs_btree_do_insert(btree
, path
, level
, keyp
, ptrp
);
827 *keyp
= nilfs_btree_node_get_key(btree
, child
, 0);
828 *ptrp
= path
[level
].bp_newreq
.bpr_ptr
;
831 static __u64
nilfs_btree_find_near(const struct nilfs_btree
*btree
,
832 const struct nilfs_btree_path
*path
)
834 struct nilfs_btree_node
*node
;
838 return NILFS_BMAP_INVALID_PTR
;
841 level
= NILFS_BTREE_LEVEL_NODE_MIN
;
842 if (path
[level
].bp_index
> 0) {
843 node
= nilfs_btree_get_node(btree
, path
, level
);
844 return nilfs_btree_node_get_ptr(btree
, node
,
845 path
[level
].bp_index
- 1);
849 level
= NILFS_BTREE_LEVEL_NODE_MIN
+ 1;
850 if (level
<= nilfs_btree_height(btree
) - 1) {
851 node
= nilfs_btree_get_node(btree
, path
, level
);
852 return nilfs_btree_node_get_ptr(btree
, node
,
853 path
[level
].bp_index
);
856 return NILFS_BMAP_INVALID_PTR
;
859 static __u64
nilfs_btree_find_target_v(const struct nilfs_btree
*btree
,
860 const struct nilfs_btree_path
*path
,
865 ptr
= nilfs_bmap_find_target_seq(&btree
->bt_bmap
, key
);
866 if (ptr
!= NILFS_BMAP_INVALID_PTR
)
867 /* sequential access */
870 ptr
= nilfs_btree_find_near(btree
, path
);
871 if (ptr
!= NILFS_BMAP_INVALID_PTR
)
876 return nilfs_bmap_find_target_in_group(&btree
->bt_bmap
);
879 static void nilfs_btree_set_target_v(struct nilfs_btree
*btree
, __u64 key
,
882 btree
->bt_bmap
.b_last_allocated_key
= key
;
883 btree
->bt_bmap
.b_last_allocated_ptr
= ptr
;
886 static int nilfs_btree_prepare_insert(struct nilfs_btree
*btree
,
887 struct nilfs_btree_path
*path
,
888 int *levelp
, __u64 key
, __u64 ptr
,
889 struct nilfs_bmap_stats
*stats
)
891 struct buffer_head
*bh
;
892 struct nilfs_btree_node
*node
, *parent
, *sib
;
894 int pindex
, level
, ret
;
896 stats
->bs_nblocks
= 0;
897 level
= NILFS_BTREE_LEVEL_DATA
;
899 /* allocate a new ptr for data block */
900 if (btree
->bt_ops
->btop_find_target
!= NULL
)
901 path
[level
].bp_newreq
.bpr_ptr
=
902 btree
->bt_ops
->btop_find_target(btree
, path
, key
);
904 ret
= btree
->bt_bmap
.b_pops
->bpop_prepare_alloc_ptr(
905 &btree
->bt_bmap
, &path
[level
].bp_newreq
);
909 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
;
910 level
< nilfs_btree_height(btree
) - 1;
912 node
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
913 if (nilfs_btree_node_get_nchildren(btree
, node
) <
914 nilfs_btree_node_nchildren_max(btree
, node
)) {
915 path
[level
].bp_op
= nilfs_btree_do_insert
;
920 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1);
921 pindex
= path
[level
+ 1].bp_index
;
925 sibptr
= nilfs_btree_node_get_ptr(btree
, parent
,
927 ret
= nilfs_bmap_get_block(&btree
->bt_bmap
, sibptr
,
930 goto err_out_child_node
;
931 sib
= (struct nilfs_btree_node
*)bh
->b_data
;
932 if (nilfs_btree_node_get_nchildren(btree
, sib
) <
933 nilfs_btree_node_nchildren_max(btree
, sib
)) {
934 path
[level
].bp_sib_bh
= bh
;
935 path
[level
].bp_op
= nilfs_btree_carry_left
;
939 nilfs_bmap_put_block(&btree
->bt_bmap
, bh
);
944 nilfs_btree_node_get_nchildren(btree
, parent
) - 1) {
945 sibptr
= nilfs_btree_node_get_ptr(btree
, parent
,
947 ret
= nilfs_bmap_get_block(&btree
->bt_bmap
, sibptr
,
950 goto err_out_child_node
;
951 sib
= (struct nilfs_btree_node
*)bh
->b_data
;
952 if (nilfs_btree_node_get_nchildren(btree
, sib
) <
953 nilfs_btree_node_nchildren_max(btree
, sib
)) {
954 path
[level
].bp_sib_bh
= bh
;
955 path
[level
].bp_op
= nilfs_btree_carry_right
;
959 nilfs_bmap_put_block(&btree
->bt_bmap
, bh
);
963 path
[level
].bp_newreq
.bpr_ptr
=
964 path
[level
- 1].bp_newreq
.bpr_ptr
+ 1;
965 ret
= btree
->bt_bmap
.b_pops
->bpop_prepare_alloc_ptr(
966 &btree
->bt_bmap
, &path
[level
].bp_newreq
);
968 goto err_out_child_node
;
969 ret
= nilfs_bmap_get_new_block(&btree
->bt_bmap
,
970 path
[level
].bp_newreq
.bpr_ptr
,
973 goto err_out_curr_node
;
978 nilfs_btree_node_init(btree
,
979 (struct nilfs_btree_node
*)bh
->b_data
,
980 0, level
, 0, NULL
, NULL
);
982 path
[level
].bp_sib_bh
= bh
;
983 path
[level
].bp_op
= nilfs_btree_split
;
987 node
= nilfs_btree_get_root(btree
);
988 if (nilfs_btree_node_get_nchildren(btree
, node
) <
989 nilfs_btree_node_nchildren_max(btree
, node
)) {
990 path
[level
].bp_op
= nilfs_btree_do_insert
;
996 path
[level
].bp_newreq
.bpr_ptr
= path
[level
- 1].bp_newreq
.bpr_ptr
+ 1;
997 ret
= btree
->bt_bmap
.b_pops
->bpop_prepare_alloc_ptr(
998 &btree
->bt_bmap
, &path
[level
].bp_newreq
);
1000 goto err_out_child_node
;
1001 ret
= nilfs_bmap_get_new_block(&btree
->bt_bmap
,
1002 path
[level
].bp_newreq
.bpr_ptr
, &bh
);
1004 goto err_out_curr_node
;
1007 nilfs_btree_node_init(btree
, (struct nilfs_btree_node
*)bh
->b_data
,
1008 0, level
, 0, NULL
, NULL
);
1010 path
[level
].bp_sib_bh
= bh
;
1011 path
[level
].bp_op
= nilfs_btree_grow
;
1014 path
[level
].bp_op
= nilfs_btree_do_insert
;
1016 /* a newly-created node block and a data block are added */
1017 stats
->bs_nblocks
+= 2;
1026 btree
->bt_bmap
.b_pops
->bpop_abort_alloc_ptr(&btree
->bt_bmap
,
1027 &path
[level
].bp_newreq
);
1029 for (level
--; level
> NILFS_BTREE_LEVEL_DATA
; level
--) {
1030 nilfs_bmap_delete_block(&btree
->bt_bmap
, path
[level
].bp_sib_bh
);
1031 btree
->bt_bmap
.b_pops
->bpop_abort_alloc_ptr(
1032 &btree
->bt_bmap
, &path
[level
].bp_newreq
);
1036 btree
->bt_bmap
.b_pops
->bpop_abort_alloc_ptr(&btree
->bt_bmap
,
1037 &path
[level
].bp_newreq
);
1040 stats
->bs_nblocks
= 0;
1044 static void nilfs_btree_commit_insert(struct nilfs_btree
*btree
,
1045 struct nilfs_btree_path
*path
,
1046 int maxlevel
, __u64 key
, __u64 ptr
)
1050 set_buffer_nilfs_volatile((struct buffer_head
*)((unsigned long)ptr
));
1051 ptr
= path
[NILFS_BTREE_LEVEL_DATA
].bp_newreq
.bpr_ptr
;
1052 if (btree
->bt_ops
->btop_set_target
!= NULL
)
1053 btree
->bt_ops
->btop_set_target(btree
, key
, ptr
);
1055 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
; level
<= maxlevel
; level
++) {
1056 if (btree
->bt_bmap
.b_pops
->bpop_commit_alloc_ptr
!= NULL
) {
1057 btree
->bt_bmap
.b_pops
->bpop_commit_alloc_ptr(
1058 &btree
->bt_bmap
, &path
[level
- 1].bp_newreq
);
1060 path
[level
].bp_op(btree
, path
, level
, &key
, &ptr
);
1063 if (!nilfs_bmap_dirty(&btree
->bt_bmap
))
1064 nilfs_bmap_set_dirty(&btree
->bt_bmap
);
1067 static int nilfs_btree_insert(struct nilfs_bmap
*bmap
, __u64 key
, __u64 ptr
)
1069 struct nilfs_btree
*btree
;
1070 struct nilfs_btree_path
*path
;
1071 struct nilfs_bmap_stats stats
;
1074 btree
= (struct nilfs_btree
*)bmap
;
1075 path
= nilfs_btree_alloc_path(btree
);
1078 nilfs_btree_init_path(btree
, path
);
1080 ret
= nilfs_btree_do_lookup(btree
, path
, key
, NULL
,
1081 NILFS_BTREE_LEVEL_NODE_MIN
);
1082 if (ret
!= -ENOENT
) {
1088 ret
= nilfs_btree_prepare_insert(btree
, path
, &level
, key
, ptr
, &stats
);
1091 nilfs_btree_commit_insert(btree
, path
, level
, key
, ptr
);
1092 nilfs_bmap_add_blocks(bmap
, stats
.bs_nblocks
);
1095 nilfs_btree_clear_path(btree
, path
);
1096 nilfs_btree_free_path(btree
, path
);
1100 static void nilfs_btree_do_delete(struct nilfs_btree
*btree
,
1101 struct nilfs_btree_path
*path
,
1102 int level
, __u64
*keyp
, __u64
*ptrp
)
1104 struct nilfs_btree_node
*node
;
1106 if (level
< nilfs_btree_height(btree
) - 1) {
1107 lock_buffer(path
[level
].bp_bh
);
1108 node
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
1109 nilfs_btree_node_delete(btree
, node
, keyp
, ptrp
,
1110 path
[level
].bp_index
);
1111 if (!buffer_dirty(path
[level
].bp_bh
))
1112 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
1113 unlock_buffer(path
[level
].bp_bh
);
1114 if (path
[level
].bp_index
== 0)
1115 nilfs_btree_promote_key(btree
, path
, level
+ 1,
1116 nilfs_btree_node_get_key(btree
, node
, 0));
1118 node
= nilfs_btree_get_root(btree
);
1119 nilfs_btree_node_delete(btree
, node
, keyp
, ptrp
,
1120 path
[level
].bp_index
);
1124 static void nilfs_btree_borrow_left(struct nilfs_btree
*btree
,
1125 struct nilfs_btree_path
*path
,
1126 int level
, __u64
*keyp
, __u64
*ptrp
)
1128 struct nilfs_btree_node
*node
, *left
;
1129 int nchildren
, lnchildren
, n
;
1131 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1133 lock_buffer(path
[level
].bp_bh
);
1134 lock_buffer(path
[level
].bp_sib_bh
);
1136 node
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
1137 left
= nilfs_btree_get_sib_node(btree
, path
, level
);
1138 nchildren
= nilfs_btree_node_get_nchildren(btree
, node
);
1139 lnchildren
= nilfs_btree_node_get_nchildren(btree
, left
);
1141 n
= (nchildren
+ lnchildren
) / 2 - nchildren
;
1143 nilfs_btree_node_move_right(btree
, left
, node
, n
);
1145 if (!buffer_dirty(path
[level
].bp_bh
))
1146 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
1147 if (!buffer_dirty(path
[level
].bp_sib_bh
))
1148 nilfs_btnode_mark_dirty(path
[level
].bp_sib_bh
);
1150 unlock_buffer(path
[level
].bp_bh
);
1151 unlock_buffer(path
[level
].bp_sib_bh
);
1153 nilfs_btree_promote_key(btree
, path
, level
+ 1,
1154 nilfs_btree_node_get_key(btree
, node
, 0));
1156 nilfs_bmap_put_block(&btree
->bt_bmap
, path
[level
].bp_sib_bh
);
1157 path
[level
].bp_sib_bh
= NULL
;
1158 path
[level
].bp_index
+= n
;
1161 static void nilfs_btree_borrow_right(struct nilfs_btree
*btree
,
1162 struct nilfs_btree_path
*path
,
1163 int level
, __u64
*keyp
, __u64
*ptrp
)
1165 struct nilfs_btree_node
*node
, *right
;
1166 int nchildren
, rnchildren
, n
;
1168 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1170 lock_buffer(path
[level
].bp_bh
);
1171 lock_buffer(path
[level
].bp_sib_bh
);
1173 node
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
1174 right
= nilfs_btree_get_sib_node(btree
, path
, level
);
1175 nchildren
= nilfs_btree_node_get_nchildren(btree
, node
);
1176 rnchildren
= nilfs_btree_node_get_nchildren(btree
, right
);
1178 n
= (nchildren
+ rnchildren
) / 2 - nchildren
;
1180 nilfs_btree_node_move_left(btree
, node
, right
, n
);
1182 if (!buffer_dirty(path
[level
].bp_bh
))
1183 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
1184 if (!buffer_dirty(path
[level
].bp_sib_bh
))
1185 nilfs_btnode_mark_dirty(path
[level
].bp_sib_bh
);
1187 unlock_buffer(path
[level
].bp_bh
);
1188 unlock_buffer(path
[level
].bp_sib_bh
);
1190 path
[level
+ 1].bp_index
++;
1191 nilfs_btree_promote_key(btree
, path
, level
+ 1,
1192 nilfs_btree_node_get_key(btree
, right
, 0));
1193 path
[level
+ 1].bp_index
--;
1195 nilfs_bmap_put_block(&btree
->bt_bmap
, path
[level
].bp_sib_bh
);
1196 path
[level
].bp_sib_bh
= NULL
;
1199 static void nilfs_btree_concat_left(struct nilfs_btree
*btree
,
1200 struct nilfs_btree_path
*path
,
1201 int level
, __u64
*keyp
, __u64
*ptrp
)
1203 struct nilfs_btree_node
*node
, *left
;
1206 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1208 lock_buffer(path
[level
].bp_bh
);
1209 lock_buffer(path
[level
].bp_sib_bh
);
1211 node
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
1212 left
= nilfs_btree_get_sib_node(btree
, path
, level
);
1214 n
= nilfs_btree_node_get_nchildren(btree
, node
);
1216 nilfs_btree_node_move_left(btree
, left
, node
, n
);
1218 if (!buffer_dirty(path
[level
].bp_sib_bh
))
1219 nilfs_btnode_mark_dirty(path
[level
].bp_sib_bh
);
1221 unlock_buffer(path
[level
].bp_bh
);
1222 unlock_buffer(path
[level
].bp_sib_bh
);
1224 nilfs_bmap_delete_block(&btree
->bt_bmap
, path
[level
].bp_bh
);
1225 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
1226 path
[level
].bp_sib_bh
= NULL
;
1227 path
[level
].bp_index
+= nilfs_btree_node_get_nchildren(btree
, left
);
1230 static void nilfs_btree_concat_right(struct nilfs_btree
*btree
,
1231 struct nilfs_btree_path
*path
,
1232 int level
, __u64
*keyp
, __u64
*ptrp
)
1234 struct nilfs_btree_node
*node
, *right
;
1237 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1239 lock_buffer(path
[level
].bp_bh
);
1240 lock_buffer(path
[level
].bp_sib_bh
);
1242 node
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
1243 right
= nilfs_btree_get_sib_node(btree
, path
, level
);
1245 n
= nilfs_btree_node_get_nchildren(btree
, right
);
1247 nilfs_btree_node_move_left(btree
, node
, right
, n
);
1249 if (!buffer_dirty(path
[level
].bp_bh
))
1250 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
1252 unlock_buffer(path
[level
].bp_bh
);
1253 unlock_buffer(path
[level
].bp_sib_bh
);
1255 nilfs_bmap_delete_block(&btree
->bt_bmap
, path
[level
].bp_sib_bh
);
1256 path
[level
].bp_sib_bh
= NULL
;
1257 path
[level
+ 1].bp_index
++;
1260 static void nilfs_btree_shrink(struct nilfs_btree
*btree
,
1261 struct nilfs_btree_path
*path
,
1262 int level
, __u64
*keyp
, __u64
*ptrp
)
1264 struct nilfs_btree_node
*root
, *child
;
1267 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1269 lock_buffer(path
[level
].bp_bh
);
1270 root
= nilfs_btree_get_root(btree
);
1271 child
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
1273 nilfs_btree_node_delete(btree
, root
, NULL
, NULL
, 0);
1274 nilfs_btree_node_set_level(btree
, root
, level
);
1275 n
= nilfs_btree_node_get_nchildren(btree
, child
);
1276 nilfs_btree_node_move_left(btree
, root
, child
, n
);
1277 unlock_buffer(path
[level
].bp_bh
);
1279 nilfs_bmap_delete_block(&btree
->bt_bmap
, path
[level
].bp_bh
);
1280 path
[level
].bp_bh
= NULL
;
1284 static int nilfs_btree_prepare_delete(struct nilfs_btree
*btree
,
1285 struct nilfs_btree_path
*path
,
1287 struct nilfs_bmap_stats
*stats
)
1289 struct buffer_head
*bh
;
1290 struct nilfs_btree_node
*node
, *parent
, *sib
;
1292 int pindex
, level
, ret
;
1295 stats
->bs_nblocks
= 0;
1296 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
;
1297 level
< nilfs_btree_height(btree
) - 1;
1299 node
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
1300 path
[level
].bp_oldreq
.bpr_ptr
=
1301 nilfs_btree_node_get_ptr(btree
, node
,
1302 path
[level
].bp_index
);
1303 if (btree
->bt_bmap
.b_pops
->bpop_prepare_end_ptr
!= NULL
) {
1304 ret
= btree
->bt_bmap
.b_pops
->bpop_prepare_end_ptr(
1305 &btree
->bt_bmap
, &path
[level
].bp_oldreq
);
1307 goto err_out_child_node
;
1310 if (nilfs_btree_node_get_nchildren(btree
, node
) >
1311 nilfs_btree_node_nchildren_min(btree
, node
)) {
1312 path
[level
].bp_op
= nilfs_btree_do_delete
;
1313 stats
->bs_nblocks
++;
1317 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1);
1318 pindex
= path
[level
+ 1].bp_index
;
1322 sibptr
= nilfs_btree_node_get_ptr(btree
, parent
,
1324 ret
= nilfs_bmap_get_block(&btree
->bt_bmap
, sibptr
,
1327 goto err_out_curr_node
;
1328 sib
= (struct nilfs_btree_node
*)bh
->b_data
;
1329 if (nilfs_btree_node_get_nchildren(btree
, sib
) >
1330 nilfs_btree_node_nchildren_min(btree
, sib
)) {
1331 path
[level
].bp_sib_bh
= bh
;
1332 path
[level
].bp_op
= nilfs_btree_borrow_left
;
1333 stats
->bs_nblocks
++;
1336 path
[level
].bp_sib_bh
= bh
;
1337 path
[level
].bp_op
= nilfs_btree_concat_left
;
1338 stats
->bs_nblocks
++;
1342 nilfs_btree_node_get_nchildren(btree
, parent
) - 1) {
1344 sibptr
= nilfs_btree_node_get_ptr(btree
, parent
,
1346 ret
= nilfs_bmap_get_block(&btree
->bt_bmap
, sibptr
,
1349 goto err_out_curr_node
;
1350 sib
= (struct nilfs_btree_node
*)bh
->b_data
;
1351 if (nilfs_btree_node_get_nchildren(btree
, sib
) >
1352 nilfs_btree_node_nchildren_min(btree
, sib
)) {
1353 path
[level
].bp_sib_bh
= bh
;
1354 path
[level
].bp_op
= nilfs_btree_borrow_right
;
1355 stats
->bs_nblocks
++;
1358 path
[level
].bp_sib_bh
= bh
;
1359 path
[level
].bp_op
= nilfs_btree_concat_right
;
1360 stats
->bs_nblocks
++;
1365 /* the only child of the root node */
1366 WARN_ON(level
!= nilfs_btree_height(btree
) - 2);
1367 if (nilfs_btree_node_get_nchildren(btree
, node
) - 1 <=
1368 NILFS_BTREE_ROOT_NCHILDREN_MAX
) {
1369 path
[level
].bp_op
= nilfs_btree_shrink
;
1370 stats
->bs_nblocks
+= 2;
1372 path
[level
].bp_op
= nilfs_btree_do_delete
;
1373 stats
->bs_nblocks
++;
1381 node
= nilfs_btree_get_root(btree
);
1382 path
[level
].bp_oldreq
.bpr_ptr
=
1383 nilfs_btree_node_get_ptr(btree
, node
, path
[level
].bp_index
);
1384 if (btree
->bt_bmap
.b_pops
->bpop_prepare_end_ptr
!= NULL
) {
1385 ret
= btree
->bt_bmap
.b_pops
->bpop_prepare_end_ptr(
1386 &btree
->bt_bmap
, &path
[level
].bp_oldreq
);
1388 goto err_out_child_node
;
1390 /* child of the root node is deleted */
1391 path
[level
].bp_op
= nilfs_btree_do_delete
;
1392 stats
->bs_nblocks
++;
1401 if (btree
->bt_bmap
.b_pops
->bpop_abort_end_ptr
!= NULL
)
1402 btree
->bt_bmap
.b_pops
->bpop_abort_end_ptr(
1403 &btree
->bt_bmap
, &path
[level
].bp_oldreq
);
1405 for (level
--; level
>= NILFS_BTREE_LEVEL_NODE_MIN
; level
--) {
1406 nilfs_bmap_put_block(&btree
->bt_bmap
, path
[level
].bp_sib_bh
);
1407 if (btree
->bt_bmap
.b_pops
->bpop_abort_end_ptr
!= NULL
)
1408 btree
->bt_bmap
.b_pops
->bpop_abort_end_ptr(
1409 &btree
->bt_bmap
, &path
[level
].bp_oldreq
);
1412 stats
->bs_nblocks
= 0;
1416 static void nilfs_btree_commit_delete(struct nilfs_btree
*btree
,
1417 struct nilfs_btree_path
*path
,
1422 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
; level
<= maxlevel
; level
++) {
1423 if (btree
->bt_bmap
.b_pops
->bpop_commit_end_ptr
!= NULL
)
1424 btree
->bt_bmap
.b_pops
->bpop_commit_end_ptr(
1425 &btree
->bt_bmap
, &path
[level
].bp_oldreq
);
1426 path
[level
].bp_op(btree
, path
, level
, NULL
, NULL
);
1429 if (!nilfs_bmap_dirty(&btree
->bt_bmap
))
1430 nilfs_bmap_set_dirty(&btree
->bt_bmap
);
1433 static int nilfs_btree_delete(struct nilfs_bmap
*bmap
, __u64 key
)
1436 struct nilfs_btree
*btree
;
1437 struct nilfs_btree_path
*path
;
1438 struct nilfs_bmap_stats stats
;
1441 btree
= (struct nilfs_btree
*)bmap
;
1442 path
= nilfs_btree_alloc_path(btree
);
1445 nilfs_btree_init_path(btree
, path
);
1446 ret
= nilfs_btree_do_lookup(btree
, path
, key
, NULL
,
1447 NILFS_BTREE_LEVEL_NODE_MIN
);
1451 ret
= nilfs_btree_prepare_delete(btree
, path
, &level
, &stats
);
1454 nilfs_btree_commit_delete(btree
, path
, level
);
1455 nilfs_bmap_sub_blocks(bmap
, stats
.bs_nblocks
);
1458 nilfs_btree_clear_path(btree
, path
);
1459 nilfs_btree_free_path(btree
, path
);
1463 static int nilfs_btree_last_key(const struct nilfs_bmap
*bmap
, __u64
*keyp
)
1465 struct nilfs_btree
*btree
;
1466 struct nilfs_btree_path
*path
;
1469 btree
= (struct nilfs_btree
*)bmap
;
1470 path
= nilfs_btree_alloc_path(btree
);
1473 nilfs_btree_init_path(btree
, path
);
1475 ret
= nilfs_btree_do_lookup_last(btree
, path
, keyp
, NULL
);
1477 nilfs_btree_clear_path(btree
, path
);
1478 nilfs_btree_free_path(btree
, path
);
1483 static int nilfs_btree_check_delete(struct nilfs_bmap
*bmap
, __u64 key
)
1485 struct buffer_head
*bh
;
1486 struct nilfs_btree
*btree
;
1487 struct nilfs_btree_node
*root
, *node
;
1488 __u64 maxkey
, nextmaxkey
;
1492 btree
= (struct nilfs_btree
*)bmap
;
1493 root
= nilfs_btree_get_root(btree
);
1494 switch (nilfs_btree_height(btree
)) {
1500 nchildren
= nilfs_btree_node_get_nchildren(btree
, root
);
1503 ptr
= nilfs_btree_node_get_ptr(btree
, root
, nchildren
- 1);
1504 ret
= nilfs_bmap_get_block(bmap
, ptr
, &bh
);
1507 node
= (struct nilfs_btree_node
*)bh
->b_data
;
1513 nchildren
= nilfs_btree_node_get_nchildren(btree
, node
);
1514 maxkey
= nilfs_btree_node_get_key(btree
, node
, nchildren
- 1);
1515 nextmaxkey
= (nchildren
> 1) ?
1516 nilfs_btree_node_get_key(btree
, node
, nchildren
- 2) : 0;
1518 nilfs_bmap_put_block(bmap
, bh
);
1520 return (maxkey
== key
) && (nextmaxkey
< bmap
->b_low
);
1523 static int nilfs_btree_gather_data(struct nilfs_bmap
*bmap
,
1524 __u64
*keys
, __u64
*ptrs
, int nitems
)
1526 struct buffer_head
*bh
;
1527 struct nilfs_btree
*btree
;
1528 struct nilfs_btree_node
*node
, *root
;
1532 int nchildren
, i
, ret
;
1534 btree
= (struct nilfs_btree
*)bmap
;
1535 root
= nilfs_btree_get_root(btree
);
1536 switch (nilfs_btree_height(btree
)) {
1542 nchildren
= nilfs_btree_node_get_nchildren(btree
, root
);
1543 WARN_ON(nchildren
> 1);
1544 ptr
= nilfs_btree_node_get_ptr(btree
, root
, nchildren
- 1);
1545 ret
= nilfs_bmap_get_block(bmap
, ptr
, &bh
);
1548 node
= (struct nilfs_btree_node
*)bh
->b_data
;
1555 nchildren
= nilfs_btree_node_get_nchildren(btree
, node
);
1556 if (nchildren
< nitems
)
1558 dkeys
= nilfs_btree_node_dkeys(btree
, node
);
1559 dptrs
= nilfs_btree_node_dptrs(btree
, node
);
1560 for (i
= 0; i
< nitems
; i
++) {
1561 keys
[i
] = nilfs_bmap_dkey_to_key(dkeys
[i
]);
1562 ptrs
[i
] = nilfs_bmap_dptr_to_ptr(dptrs
[i
]);
1566 nilfs_bmap_put_block(bmap
, bh
);
1572 nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap
*bmap
, __u64 key
,
1573 union nilfs_bmap_ptr_req
*dreq
,
1574 union nilfs_bmap_ptr_req
*nreq
,
1575 struct buffer_head
**bhp
,
1576 struct nilfs_bmap_stats
*stats
)
1578 struct buffer_head
*bh
;
1579 struct nilfs_btree
*btree
;
1582 btree
= (struct nilfs_btree
*)bmap
;
1583 stats
->bs_nblocks
= 0;
1586 /* cannot find near ptr */
1587 if (btree
->bt_ops
->btop_find_target
!= NULL
)
1589 = btree
->bt_ops
->btop_find_target(btree
, NULL
, key
);
1590 ret
= bmap
->b_pops
->bpop_prepare_alloc_ptr(bmap
, dreq
);
1595 stats
->bs_nblocks
++;
1597 nreq
->bpr_ptr
= dreq
->bpr_ptr
+ 1;
1598 ret
= bmap
->b_pops
->bpop_prepare_alloc_ptr(bmap
, nreq
);
1602 ret
= nilfs_bmap_get_new_block(bmap
, nreq
->bpr_ptr
, &bh
);
1607 stats
->bs_nblocks
++;
1615 bmap
->b_pops
->bpop_abort_alloc_ptr(bmap
, nreq
);
1617 bmap
->b_pops
->bpop_abort_alloc_ptr(bmap
, dreq
);
1618 stats
->bs_nblocks
= 0;
1624 nilfs_btree_commit_convert_and_insert(struct nilfs_bmap
*bmap
,
1625 __u64 key
, __u64 ptr
,
1626 const __u64
*keys
, const __u64
*ptrs
,
1627 int n
, __u64 low
, __u64 high
,
1628 union nilfs_bmap_ptr_req
*dreq
,
1629 union nilfs_bmap_ptr_req
*nreq
,
1630 struct buffer_head
*bh
)
1632 struct nilfs_btree
*btree
;
1633 struct nilfs_btree_node
*node
;
1636 /* free resources */
1637 if (bmap
->b_ops
->bop_clear
!= NULL
)
1638 bmap
->b_ops
->bop_clear(bmap
);
1640 /* ptr must be a pointer to a buffer head. */
1641 set_buffer_nilfs_volatile((struct buffer_head
*)((unsigned long)ptr
));
1643 /* convert and insert */
1644 btree
= (struct nilfs_btree
*)bmap
;
1645 nilfs_btree_init(bmap
, low
, high
);
1647 if (bmap
->b_pops
->bpop_commit_alloc_ptr
!= NULL
) {
1648 bmap
->b_pops
->bpop_commit_alloc_ptr(bmap
, dreq
);
1649 bmap
->b_pops
->bpop_commit_alloc_ptr(bmap
, nreq
);
1652 /* create child node at level 1 */
1654 node
= (struct nilfs_btree_node
*)bh
->b_data
;
1655 nilfs_btree_node_init(btree
, node
, 0, 1, n
, keys
, ptrs
);
1656 nilfs_btree_node_insert(btree
, node
,
1657 key
, dreq
->bpr_ptr
, n
);
1658 if (!buffer_dirty(bh
))
1659 nilfs_btnode_mark_dirty(bh
);
1660 if (!nilfs_bmap_dirty(bmap
))
1661 nilfs_bmap_set_dirty(bmap
);
1664 nilfs_bmap_put_block(bmap
, bh
);
1666 /* create root node at level 2 */
1667 node
= nilfs_btree_get_root(btree
);
1668 tmpptr
= nreq
->bpr_ptr
;
1669 nilfs_btree_node_init(btree
, node
, NILFS_BTREE_NODE_ROOT
,
1670 2, 1, &keys
[0], &tmpptr
);
1672 if (bmap
->b_pops
->bpop_commit_alloc_ptr
!= NULL
)
1673 bmap
->b_pops
->bpop_commit_alloc_ptr(bmap
, dreq
);
1675 /* create root node at level 1 */
1676 node
= nilfs_btree_get_root(btree
);
1677 nilfs_btree_node_init(btree
, node
, NILFS_BTREE_NODE_ROOT
,
1679 nilfs_btree_node_insert(btree
, node
,
1680 key
, dreq
->bpr_ptr
, n
);
1681 if (!nilfs_bmap_dirty(bmap
))
1682 nilfs_bmap_set_dirty(bmap
);
1685 if (btree
->bt_ops
->btop_set_target
!= NULL
)
1686 btree
->bt_ops
->btop_set_target(btree
, key
, dreq
->bpr_ptr
);
1690 * nilfs_btree_convert_and_insert -
1700 int nilfs_btree_convert_and_insert(struct nilfs_bmap
*bmap
,
1701 __u64 key
, __u64 ptr
,
1702 const __u64
*keys
, const __u64
*ptrs
,
1703 int n
, __u64 low
, __u64 high
)
1705 struct buffer_head
*bh
;
1706 union nilfs_bmap_ptr_req dreq
, nreq
, *di
, *ni
;
1707 struct nilfs_bmap_stats stats
;
1710 if (n
+ 1 <= NILFS_BTREE_ROOT_NCHILDREN_MAX
) {
1713 } else if ((n
+ 1) <= NILFS_BTREE_NODE_NCHILDREN_MAX(
1714 1 << bmap
->b_inode
->i_blkbits
)) {
1723 ret
= nilfs_btree_prepare_convert_and_insert(bmap
, key
, di
, ni
, &bh
,
1727 nilfs_btree_commit_convert_and_insert(bmap
, key
, ptr
, keys
, ptrs
, n
,
1728 low
, high
, di
, ni
, bh
);
1729 nilfs_bmap_add_blocks(bmap
, stats
.bs_nblocks
);
1733 static int nilfs_btree_propagate_p(struct nilfs_btree
*btree
,
1734 struct nilfs_btree_path
*path
,
1736 struct buffer_head
*bh
)
1738 while ((++level
< nilfs_btree_height(btree
) - 1) &&
1739 !buffer_dirty(path
[level
].bp_bh
))
1740 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
1745 static int nilfs_btree_prepare_update_v(struct nilfs_btree
*btree
,
1746 struct nilfs_btree_path
*path
,
1749 struct nilfs_btree_node
*parent
;
1752 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1);
1753 path
[level
].bp_oldreq
.bpr_ptr
=
1754 nilfs_btree_node_get_ptr(btree
, parent
,
1755 path
[level
+ 1].bp_index
);
1756 path
[level
].bp_newreq
.bpr_ptr
= path
[level
].bp_oldreq
.bpr_ptr
+ 1;
1757 ret
= nilfs_bmap_prepare_update(&btree
->bt_bmap
,
1758 &path
[level
].bp_oldreq
,
1759 &path
[level
].bp_newreq
);
1763 if (buffer_nilfs_node(path
[level
].bp_bh
)) {
1764 path
[level
].bp_ctxt
.oldkey
= path
[level
].bp_oldreq
.bpr_ptr
;
1765 path
[level
].bp_ctxt
.newkey
= path
[level
].bp_newreq
.bpr_ptr
;
1766 path
[level
].bp_ctxt
.bh
= path
[level
].bp_bh
;
1767 ret
= nilfs_btnode_prepare_change_key(
1768 &NILFS_BMAP_I(&btree
->bt_bmap
)->i_btnode_cache
,
1769 &path
[level
].bp_ctxt
);
1771 nilfs_bmap_abort_update(&btree
->bt_bmap
,
1772 &path
[level
].bp_oldreq
,
1773 &path
[level
].bp_newreq
);
1781 static void nilfs_btree_commit_update_v(struct nilfs_btree
*btree
,
1782 struct nilfs_btree_path
*path
,
1785 struct nilfs_btree_node
*parent
;
1787 nilfs_bmap_commit_update(&btree
->bt_bmap
,
1788 &path
[level
].bp_oldreq
,
1789 &path
[level
].bp_newreq
);
1791 if (buffer_nilfs_node(path
[level
].bp_bh
)) {
1792 nilfs_btnode_commit_change_key(
1793 &NILFS_BMAP_I(&btree
->bt_bmap
)->i_btnode_cache
,
1794 &path
[level
].bp_ctxt
);
1795 path
[level
].bp_bh
= path
[level
].bp_ctxt
.bh
;
1797 set_buffer_nilfs_volatile(path
[level
].bp_bh
);
1799 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1);
1800 nilfs_btree_node_set_ptr(btree
, parent
, path
[level
+ 1].bp_index
,
1801 path
[level
].bp_newreq
.bpr_ptr
);
1804 static void nilfs_btree_abort_update_v(struct nilfs_btree
*btree
,
1805 struct nilfs_btree_path
*path
,
1808 nilfs_bmap_abort_update(&btree
->bt_bmap
,
1809 &path
[level
].bp_oldreq
,
1810 &path
[level
].bp_newreq
);
1811 if (buffer_nilfs_node(path
[level
].bp_bh
))
1812 nilfs_btnode_abort_change_key(
1813 &NILFS_BMAP_I(&btree
->bt_bmap
)->i_btnode_cache
,
1814 &path
[level
].bp_ctxt
);
1817 static int nilfs_btree_prepare_propagate_v(struct nilfs_btree
*btree
,
1818 struct nilfs_btree_path
*path
,
1825 if (!buffer_nilfs_volatile(path
[level
].bp_bh
)) {
1826 ret
= nilfs_btree_prepare_update_v(btree
, path
, level
);
1830 while ((++level
< nilfs_btree_height(btree
) - 1) &&
1831 !buffer_dirty(path
[level
].bp_bh
)) {
1833 WARN_ON(buffer_nilfs_volatile(path
[level
].bp_bh
));
1834 ret
= nilfs_btree_prepare_update_v(btree
, path
, level
);
1840 *maxlevelp
= level
- 1;
1845 while (--level
> minlevel
)
1846 nilfs_btree_abort_update_v(btree
, path
, level
);
1847 if (!buffer_nilfs_volatile(path
[level
].bp_bh
))
1848 nilfs_btree_abort_update_v(btree
, path
, level
);
1852 static void nilfs_btree_commit_propagate_v(struct nilfs_btree
*btree
,
1853 struct nilfs_btree_path
*path
,
1856 struct buffer_head
*bh
)
1860 if (!buffer_nilfs_volatile(path
[minlevel
].bp_bh
))
1861 nilfs_btree_commit_update_v(btree
, path
, minlevel
);
1863 for (level
= minlevel
+ 1; level
<= maxlevel
; level
++)
1864 nilfs_btree_commit_update_v(btree
, path
, level
);
1867 static int nilfs_btree_propagate_v(struct nilfs_btree
*btree
,
1868 struct nilfs_btree_path
*path
,
1870 struct buffer_head
*bh
)
1873 struct nilfs_btree_node
*parent
;
1877 path
[level
].bp_bh
= bh
;
1878 ret
= nilfs_btree_prepare_propagate_v(btree
, path
, level
, &maxlevel
);
1882 if (buffer_nilfs_volatile(path
[level
].bp_bh
)) {
1883 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1);
1884 ptr
= nilfs_btree_node_get_ptr(btree
, parent
,
1885 path
[level
+ 1].bp_index
);
1886 ret
= nilfs_bmap_mark_dirty(&btree
->bt_bmap
, ptr
);
1891 nilfs_btree_commit_propagate_v(btree
, path
, level
, maxlevel
, bh
);
1894 brelse(path
[level
].bp_bh
);
1895 path
[level
].bp_bh
= NULL
;
1899 static int nilfs_btree_propagate(const struct nilfs_bmap
*bmap
,
1900 struct buffer_head
*bh
)
1902 struct nilfs_btree
*btree
;
1903 struct nilfs_btree_path
*path
;
1904 struct nilfs_btree_node
*node
;
1908 WARN_ON(!buffer_dirty(bh
));
1910 btree
= (struct nilfs_btree
*)bmap
;
1911 path
= nilfs_btree_alloc_path(btree
);
1914 nilfs_btree_init_path(btree
, path
);
1916 if (buffer_nilfs_node(bh
)) {
1917 node
= (struct nilfs_btree_node
*)bh
->b_data
;
1918 key
= nilfs_btree_node_get_key(btree
, node
, 0);
1919 level
= nilfs_btree_node_get_level(btree
, node
);
1921 key
= nilfs_bmap_data_get_key(bmap
, bh
);
1922 level
= NILFS_BTREE_LEVEL_DATA
;
1925 ret
= nilfs_btree_do_lookup(btree
, path
, key
, NULL
, level
+ 1);
1927 if (unlikely(ret
== -ENOENT
))
1928 printk(KERN_CRIT
"%s: key = %llu, level == %d\n",
1929 __func__
, (unsigned long long)key
, level
);
1933 ret
= btree
->bt_ops
->btop_propagate(btree
, path
, level
, bh
);
1936 nilfs_btree_clear_path(btree
, path
);
1937 nilfs_btree_free_path(btree
, path
);
1942 static int nilfs_btree_propagate_gc(const struct nilfs_bmap
*bmap
,
1943 struct buffer_head
*bh
)
1945 return nilfs_bmap_mark_dirty(bmap
, bh
->b_blocknr
);
1948 static void nilfs_btree_add_dirty_buffer(struct nilfs_btree
*btree
,
1949 struct list_head
*lists
,
1950 struct buffer_head
*bh
)
1952 struct list_head
*head
;
1953 struct buffer_head
*cbh
;
1954 struct nilfs_btree_node
*node
, *cnode
;
1959 node
= (struct nilfs_btree_node
*)bh
->b_data
;
1960 key
= nilfs_btree_node_get_key(btree
, node
, 0);
1961 level
= nilfs_btree_node_get_level(btree
, node
);
1962 list_for_each(head
, &lists
[level
]) {
1963 cbh
= list_entry(head
, struct buffer_head
, b_assoc_buffers
);
1964 cnode
= (struct nilfs_btree_node
*)cbh
->b_data
;
1965 ckey
= nilfs_btree_node_get_key(btree
, cnode
, 0);
1969 list_add_tail(&bh
->b_assoc_buffers
, head
);
1972 static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap
*bmap
,
1973 struct list_head
*listp
)
1975 struct nilfs_btree
*btree
= (struct nilfs_btree
*)bmap
;
1976 struct address_space
*btcache
= &NILFS_BMAP_I(bmap
)->i_btnode_cache
;
1977 struct list_head lists
[NILFS_BTREE_LEVEL_MAX
];
1978 struct pagevec pvec
;
1979 struct buffer_head
*bh
, *head
;
1983 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
;
1984 level
< NILFS_BTREE_LEVEL_MAX
;
1986 INIT_LIST_HEAD(&lists
[level
]);
1988 pagevec_init(&pvec
, 0);
1990 while (pagevec_lookup_tag(&pvec
, btcache
, &index
, PAGECACHE_TAG_DIRTY
,
1992 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
1993 bh
= head
= page_buffers(pvec
.pages
[i
]);
1995 if (buffer_dirty(bh
))
1996 nilfs_btree_add_dirty_buffer(btree
,
1998 } while ((bh
= bh
->b_this_page
) != head
);
2000 pagevec_release(&pvec
);
2004 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
;
2005 level
< NILFS_BTREE_LEVEL_MAX
;
2007 list_splice(&lists
[level
], listp
->prev
);
2010 static int nilfs_btree_assign_p(struct nilfs_btree
*btree
,
2011 struct nilfs_btree_path
*path
,
2013 struct buffer_head
**bh
,
2015 union nilfs_binfo
*binfo
)
2017 struct nilfs_btree_node
*parent
;
2022 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1);
2023 ptr
= nilfs_btree_node_get_ptr(btree
, parent
,
2024 path
[level
+ 1].bp_index
);
2025 if (buffer_nilfs_node(*bh
)) {
2026 path
[level
].bp_ctxt
.oldkey
= ptr
;
2027 path
[level
].bp_ctxt
.newkey
= blocknr
;
2028 path
[level
].bp_ctxt
.bh
= *bh
;
2029 ret
= nilfs_btnode_prepare_change_key(
2030 &NILFS_BMAP_I(&btree
->bt_bmap
)->i_btnode_cache
,
2031 &path
[level
].bp_ctxt
);
2034 nilfs_btnode_commit_change_key(
2035 &NILFS_BMAP_I(&btree
->bt_bmap
)->i_btnode_cache
,
2036 &path
[level
].bp_ctxt
);
2037 *bh
= path
[level
].bp_ctxt
.bh
;
2040 nilfs_btree_node_set_ptr(btree
, parent
,
2041 path
[level
+ 1].bp_index
, blocknr
);
2043 key
= nilfs_btree_node_get_key(btree
, parent
,
2044 path
[level
+ 1].bp_index
);
2045 /* on-disk format */
2046 binfo
->bi_dat
.bi_blkoff
= nilfs_bmap_key_to_dkey(key
);
2047 binfo
->bi_dat
.bi_level
= level
;
2052 static int nilfs_btree_assign_v(struct nilfs_btree
*btree
,
2053 struct nilfs_btree_path
*path
,
2055 struct buffer_head
**bh
,
2057 union nilfs_binfo
*binfo
)
2059 struct nilfs_btree_node
*parent
;
2062 union nilfs_bmap_ptr_req req
;
2065 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1);
2066 ptr
= nilfs_btree_node_get_ptr(btree
, parent
,
2067 path
[level
+ 1].bp_index
);
2069 ret
= btree
->bt_bmap
.b_pops
->bpop_prepare_start_ptr(&btree
->bt_bmap
,
2073 btree
->bt_bmap
.b_pops
->bpop_commit_start_ptr(&btree
->bt_bmap
,
2076 key
= nilfs_btree_node_get_key(btree
, parent
,
2077 path
[level
+ 1].bp_index
);
2078 /* on-disk format */
2079 binfo
->bi_v
.bi_vblocknr
= nilfs_bmap_ptr_to_dptr(ptr
);
2080 binfo
->bi_v
.bi_blkoff
= nilfs_bmap_key_to_dkey(key
);
2085 static int nilfs_btree_assign(struct nilfs_bmap
*bmap
,
2086 struct buffer_head
**bh
,
2088 union nilfs_binfo
*binfo
)
2090 struct nilfs_btree
*btree
;
2091 struct nilfs_btree_path
*path
;
2092 struct nilfs_btree_node
*node
;
2096 btree
= (struct nilfs_btree
*)bmap
;
2097 path
= nilfs_btree_alloc_path(btree
);
2100 nilfs_btree_init_path(btree
, path
);
2102 if (buffer_nilfs_node(*bh
)) {
2103 node
= (struct nilfs_btree_node
*)(*bh
)->b_data
;
2104 key
= nilfs_btree_node_get_key(btree
, node
, 0);
2105 level
= nilfs_btree_node_get_level(btree
, node
);
2107 key
= nilfs_bmap_data_get_key(bmap
, *bh
);
2108 level
= NILFS_BTREE_LEVEL_DATA
;
2111 ret
= nilfs_btree_do_lookup(btree
, path
, key
, NULL
, level
+ 1);
2113 WARN_ON(ret
== -ENOENT
);
2117 ret
= btree
->bt_ops
->btop_assign(btree
, path
, level
, bh
,
2121 nilfs_btree_clear_path(btree
, path
);
2122 nilfs_btree_free_path(btree
, path
);
2127 static int nilfs_btree_assign_gc(struct nilfs_bmap
*bmap
,
2128 struct buffer_head
**bh
,
2130 union nilfs_binfo
*binfo
)
2132 struct nilfs_btree
*btree
;
2133 struct nilfs_btree_node
*node
;
2137 btree
= (struct nilfs_btree
*)bmap
;
2138 ret
= nilfs_bmap_move_v(bmap
, (*bh
)->b_blocknr
, blocknr
);
2142 if (buffer_nilfs_node(*bh
)) {
2143 node
= (struct nilfs_btree_node
*)(*bh
)->b_data
;
2144 key
= nilfs_btree_node_get_key(btree
, node
, 0);
2146 key
= nilfs_bmap_data_get_key(bmap
, *bh
);
2148 /* on-disk format */
2149 binfo
->bi_v
.bi_vblocknr
= cpu_to_le64((*bh
)->b_blocknr
);
2150 binfo
->bi_v
.bi_blkoff
= nilfs_bmap_key_to_dkey(key
);
2155 static int nilfs_btree_mark(struct nilfs_bmap
*bmap
, __u64 key
, int level
)
2157 struct buffer_head
*bh
;
2158 struct nilfs_btree
*btree
;
2159 struct nilfs_btree_path
*path
;
2163 btree
= (struct nilfs_btree
*)bmap
;
2164 path
= nilfs_btree_alloc_path(btree
);
2167 nilfs_btree_init_path(btree
, path
);
2169 ret
= nilfs_btree_do_lookup(btree
, path
, key
, &ptr
, level
+ 1);
2171 WARN_ON(ret
== -ENOENT
);
2174 ret
= nilfs_bmap_get_block(&btree
->bt_bmap
, ptr
, &bh
);
2176 WARN_ON(ret
== -ENOENT
);
2180 if (!buffer_dirty(bh
))
2181 nilfs_btnode_mark_dirty(bh
);
2182 nilfs_bmap_put_block(&btree
->bt_bmap
, bh
);
2183 if (!nilfs_bmap_dirty(&btree
->bt_bmap
))
2184 nilfs_bmap_set_dirty(&btree
->bt_bmap
);
2187 nilfs_btree_clear_path(btree
, path
);
2188 nilfs_btree_free_path(btree
, path
);
2192 static const struct nilfs_bmap_operations nilfs_btree_ops
= {
2193 .bop_lookup
= nilfs_btree_lookup
,
2194 .bop_insert
= nilfs_btree_insert
,
2195 .bop_delete
= nilfs_btree_delete
,
2198 .bop_propagate
= nilfs_btree_propagate
,
2200 .bop_lookup_dirty_buffers
= nilfs_btree_lookup_dirty_buffers
,
2202 .bop_assign
= nilfs_btree_assign
,
2203 .bop_mark
= nilfs_btree_mark
,
2205 .bop_last_key
= nilfs_btree_last_key
,
2206 .bop_check_insert
= NULL
,
2207 .bop_check_delete
= nilfs_btree_check_delete
,
2208 .bop_gather_data
= nilfs_btree_gather_data
,
2211 static const struct nilfs_bmap_operations nilfs_btree_ops_gc
= {
2217 .bop_propagate
= nilfs_btree_propagate_gc
,
2219 .bop_lookup_dirty_buffers
= nilfs_btree_lookup_dirty_buffers
,
2221 .bop_assign
= nilfs_btree_assign_gc
,
2224 .bop_last_key
= NULL
,
2225 .bop_check_insert
= NULL
,
2226 .bop_check_delete
= NULL
,
2227 .bop_gather_data
= NULL
,
2230 static const struct nilfs_btree_operations nilfs_btree_ops_v
= {
2231 .btop_find_target
= nilfs_btree_find_target_v
,
2232 .btop_set_target
= nilfs_btree_set_target_v
,
2233 .btop_propagate
= nilfs_btree_propagate_v
,
2234 .btop_assign
= nilfs_btree_assign_v
,
2237 static const struct nilfs_btree_operations nilfs_btree_ops_p
= {
2238 .btop_find_target
= NULL
,
2239 .btop_set_target
= NULL
,
2240 .btop_propagate
= nilfs_btree_propagate_p
,
2241 .btop_assign
= nilfs_btree_assign_p
,
2244 int nilfs_btree_init(struct nilfs_bmap
*bmap
, __u64 low
, __u64 high
)
2246 struct nilfs_btree
*btree
;
2248 btree
= (struct nilfs_btree
*)bmap
;
2249 bmap
->b_ops
= &nilfs_btree_ops
;
2251 bmap
->b_high
= high
;
2252 switch (bmap
->b_inode
->i_ino
) {
2254 btree
->bt_ops
= &nilfs_btree_ops_p
;
2257 btree
->bt_ops
= &nilfs_btree_ops_v
;
2264 void nilfs_btree_init_gc(struct nilfs_bmap
*bmap
)
2266 bmap
->b_low
= NILFS_BMAP_LARGE_LOW
;
2267 bmap
->b_high
= NILFS_BMAP_LARGE_HIGH
;
2268 bmap
->b_ops
= &nilfs_btree_ops_gc
;