2 * btree.c - NILFS B-tree.
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Koji Sato <koji@osrg.net>.
23 #include <linux/slab.h>
24 #include <linux/string.h>
25 #include <linux/errno.h>
26 #include <linux/pagevec.h>
34 * struct nilfs_btree_path - A path on which B-tree operations are executed
35 * @bp_bh: buffer head of node block
36 * @bp_sib_bh: buffer head of sibling node block
37 * @bp_index: index of child node
38 * @bp_oldreq: ptr end request for old ptr
39 * @bp_newreq: ptr alloc request for new ptr
40 * @bp_op: rebalance operation
42 struct nilfs_btree_path
{
43 struct buffer_head
*bp_bh
;
44 struct buffer_head
*bp_sib_bh
;
46 union nilfs_bmap_ptr_req bp_oldreq
;
47 union nilfs_bmap_ptr_req bp_newreq
;
48 struct nilfs_btnode_chkey_ctxt bp_ctxt
;
49 void (*bp_op
)(struct nilfs_btree
*, struct nilfs_btree_path
*,
50 int, __u64
*, __u64
*);
54 * B-tree path operations
57 static struct kmem_cache
*nilfs_btree_path_cache
;
59 int __init
nilfs_btree_path_cache_init(void)
61 nilfs_btree_path_cache
=
62 kmem_cache_create("nilfs2_btree_path_cache",
63 sizeof(struct nilfs_btree_path
) *
64 NILFS_BTREE_LEVEL_MAX
, 0, 0, NULL
);
65 return (nilfs_btree_path_cache
!= NULL
) ? 0 : -ENOMEM
;
68 void nilfs_btree_path_cache_destroy(void)
70 kmem_cache_destroy(nilfs_btree_path_cache
);
73 static inline struct nilfs_btree_path
*
74 nilfs_btree_alloc_path(const struct nilfs_btree
*btree
)
76 return (struct nilfs_btree_path
*)
77 kmem_cache_alloc(nilfs_btree_path_cache
, GFP_NOFS
);
80 static inline void nilfs_btree_free_path(const struct nilfs_btree
*btree
,
81 struct nilfs_btree_path
*path
)
83 kmem_cache_free(nilfs_btree_path_cache
, path
);
86 static void nilfs_btree_init_path(const struct nilfs_btree
*btree
,
87 struct nilfs_btree_path
*path
)
91 for (level
= NILFS_BTREE_LEVEL_DATA
;
92 level
< NILFS_BTREE_LEVEL_MAX
;
94 path
[level
].bp_bh
= NULL
;
95 path
[level
].bp_sib_bh
= NULL
;
96 path
[level
].bp_index
= 0;
97 path
[level
].bp_oldreq
.bpr_ptr
= NILFS_BMAP_INVALID_PTR
;
98 path
[level
].bp_newreq
.bpr_ptr
= NILFS_BMAP_INVALID_PTR
;
99 path
[level
].bp_op
= NULL
;
103 static void nilfs_btree_clear_path(const struct nilfs_btree
*btree
,
104 struct nilfs_btree_path
*path
)
108 for (level
= NILFS_BTREE_LEVEL_DATA
;
109 level
< NILFS_BTREE_LEVEL_MAX
;
111 if (path
[level
].bp_bh
!= NULL
) {
112 nilfs_bmap_put_block(&btree
->bt_bmap
,
114 path
[level
].bp_bh
= NULL
;
116 /* sib_bh is released or deleted by prepare or commit
118 path
[level
].bp_sib_bh
= NULL
;
119 path
[level
].bp_index
= 0;
120 path
[level
].bp_oldreq
.bpr_ptr
= NILFS_BMAP_INVALID_PTR
;
121 path
[level
].bp_newreq
.bpr_ptr
= NILFS_BMAP_INVALID_PTR
;
122 path
[level
].bp_op
= NULL
;
128 * B-tree node operations
132 nilfs_btree_node_get_flags(const struct nilfs_btree
*btree
,
133 const struct nilfs_btree_node
*node
)
135 return node
->bn_flags
;
139 nilfs_btree_node_set_flags(struct nilfs_btree
*btree
,
140 struct nilfs_btree_node
*node
,
143 node
->bn_flags
= flags
;
146 static inline int nilfs_btree_node_root(const struct nilfs_btree
*btree
,
147 const struct nilfs_btree_node
*node
)
149 return nilfs_btree_node_get_flags(btree
, node
) & NILFS_BTREE_NODE_ROOT
;
153 nilfs_btree_node_get_level(const struct nilfs_btree
*btree
,
154 const struct nilfs_btree_node
*node
)
156 return node
->bn_level
;
160 nilfs_btree_node_set_level(struct nilfs_btree
*btree
,
161 struct nilfs_btree_node
*node
,
164 node
->bn_level
= level
;
168 nilfs_btree_node_get_nchildren(const struct nilfs_btree
*btree
,
169 const struct nilfs_btree_node
*node
)
171 return le16_to_cpu(node
->bn_nchildren
);
175 nilfs_btree_node_set_nchildren(struct nilfs_btree
*btree
,
176 struct nilfs_btree_node
*node
,
179 node
->bn_nchildren
= cpu_to_le16(nchildren
);
183 nilfs_btree_node_size(const struct nilfs_btree
*btree
)
185 return 1 << btree
->bt_bmap
.b_inode
->i_blkbits
;
189 nilfs_btree_node_nchildren_min(const struct nilfs_btree
*btree
,
190 const struct nilfs_btree_node
*node
)
192 return nilfs_btree_node_root(btree
, node
) ?
193 NILFS_BTREE_ROOT_NCHILDREN_MIN
:
194 NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree
));
198 nilfs_btree_node_nchildren_max(const struct nilfs_btree
*btree
,
199 const struct nilfs_btree_node
*node
)
201 return nilfs_btree_node_root(btree
, node
) ?
202 NILFS_BTREE_ROOT_NCHILDREN_MAX
:
203 NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(btree
));
206 static inline __le64
*
207 nilfs_btree_node_dkeys(const struct nilfs_btree
*btree
,
208 const struct nilfs_btree_node
*node
)
210 return (__le64
*)((char *)(node
+ 1) +
211 (nilfs_btree_node_root(btree
, node
) ?
212 0 : NILFS_BTREE_NODE_EXTRA_PAD_SIZE
));
215 static inline __le64
*
216 nilfs_btree_node_dptrs(const struct nilfs_btree
*btree
,
217 const struct nilfs_btree_node
*node
)
219 return (__le64
*)(nilfs_btree_node_dkeys(btree
, node
) +
220 nilfs_btree_node_nchildren_max(btree
, node
));
224 nilfs_btree_node_get_key(const struct nilfs_btree
*btree
,
225 const struct nilfs_btree_node
*node
, int index
)
227 return nilfs_bmap_dkey_to_key(*(nilfs_btree_node_dkeys(btree
, node
) +
232 nilfs_btree_node_set_key(struct nilfs_btree
*btree
,
233 struct nilfs_btree_node
*node
, int index
, __u64 key
)
235 *(nilfs_btree_node_dkeys(btree
, node
) + index
) =
236 nilfs_bmap_key_to_dkey(key
);
240 nilfs_btree_node_get_ptr(const struct nilfs_btree
*btree
,
241 const struct nilfs_btree_node
*node
,
244 return nilfs_bmap_dptr_to_ptr(*(nilfs_btree_node_dptrs(btree
, node
) +
249 nilfs_btree_node_set_ptr(struct nilfs_btree
*btree
,
250 struct nilfs_btree_node
*node
,
254 *(nilfs_btree_node_dptrs(btree
, node
) + index
) =
255 nilfs_bmap_ptr_to_dptr(ptr
);
258 static void nilfs_btree_node_init(struct nilfs_btree
*btree
,
259 struct nilfs_btree_node
*node
,
260 int flags
, int level
, int nchildren
,
261 const __u64
*keys
, const __u64
*ptrs
)
267 nilfs_btree_node_set_flags(btree
, node
, flags
);
268 nilfs_btree_node_set_level(btree
, node
, level
);
269 nilfs_btree_node_set_nchildren(btree
, node
, nchildren
);
271 dkeys
= nilfs_btree_node_dkeys(btree
, node
);
272 dptrs
= nilfs_btree_node_dptrs(btree
, node
);
273 for (i
= 0; i
< nchildren
; i
++) {
274 dkeys
[i
] = nilfs_bmap_key_to_dkey(keys
[i
]);
275 dptrs
[i
] = nilfs_bmap_ptr_to_dptr(ptrs
[i
]);
279 /* Assume the buffer heads corresponding to left and right are locked. */
280 static void nilfs_btree_node_move_left(struct nilfs_btree
*btree
,
281 struct nilfs_btree_node
*left
,
282 struct nilfs_btree_node
*right
,
285 __le64
*ldkeys
, *rdkeys
;
286 __le64
*ldptrs
, *rdptrs
;
287 int lnchildren
, rnchildren
;
289 ldkeys
= nilfs_btree_node_dkeys(btree
, left
);
290 ldptrs
= nilfs_btree_node_dptrs(btree
, left
);
291 lnchildren
= nilfs_btree_node_get_nchildren(btree
, left
);
293 rdkeys
= nilfs_btree_node_dkeys(btree
, right
);
294 rdptrs
= nilfs_btree_node_dptrs(btree
, right
);
295 rnchildren
= nilfs_btree_node_get_nchildren(btree
, right
);
297 memcpy(ldkeys
+ lnchildren
, rdkeys
, n
* sizeof(*rdkeys
));
298 memcpy(ldptrs
+ lnchildren
, rdptrs
, n
* sizeof(*rdptrs
));
299 memmove(rdkeys
, rdkeys
+ n
, (rnchildren
- n
) * sizeof(*rdkeys
));
300 memmove(rdptrs
, rdptrs
+ n
, (rnchildren
- n
) * sizeof(*rdptrs
));
304 nilfs_btree_node_set_nchildren(btree
, left
, lnchildren
);
305 nilfs_btree_node_set_nchildren(btree
, right
, rnchildren
);
308 /* Assume that the buffer heads corresponding to left and right are locked. */
309 static void nilfs_btree_node_move_right(struct nilfs_btree
*btree
,
310 struct nilfs_btree_node
*left
,
311 struct nilfs_btree_node
*right
,
314 __le64
*ldkeys
, *rdkeys
;
315 __le64
*ldptrs
, *rdptrs
;
316 int lnchildren
, rnchildren
;
318 ldkeys
= nilfs_btree_node_dkeys(btree
, left
);
319 ldptrs
= nilfs_btree_node_dptrs(btree
, left
);
320 lnchildren
= nilfs_btree_node_get_nchildren(btree
, left
);
322 rdkeys
= nilfs_btree_node_dkeys(btree
, right
);
323 rdptrs
= nilfs_btree_node_dptrs(btree
, right
);
324 rnchildren
= nilfs_btree_node_get_nchildren(btree
, right
);
326 memmove(rdkeys
+ n
, rdkeys
, rnchildren
* sizeof(*rdkeys
));
327 memmove(rdptrs
+ n
, rdptrs
, rnchildren
* sizeof(*rdptrs
));
328 memcpy(rdkeys
, ldkeys
+ lnchildren
- n
, n
* sizeof(*rdkeys
));
329 memcpy(rdptrs
, ldptrs
+ lnchildren
- n
, n
* sizeof(*rdptrs
));
333 nilfs_btree_node_set_nchildren(btree
, left
, lnchildren
);
334 nilfs_btree_node_set_nchildren(btree
, right
, rnchildren
);
337 /* Assume that the buffer head corresponding to node is locked. */
338 static void nilfs_btree_node_insert(struct nilfs_btree
*btree
,
339 struct nilfs_btree_node
*node
,
340 __u64 key
, __u64 ptr
, int index
)
346 dkeys
= nilfs_btree_node_dkeys(btree
, node
);
347 dptrs
= nilfs_btree_node_dptrs(btree
, node
);
348 nchildren
= nilfs_btree_node_get_nchildren(btree
, node
);
349 if (index
< nchildren
) {
350 memmove(dkeys
+ index
+ 1, dkeys
+ index
,
351 (nchildren
- index
) * sizeof(*dkeys
));
352 memmove(dptrs
+ index
+ 1, dptrs
+ index
,
353 (nchildren
- index
) * sizeof(*dptrs
));
355 dkeys
[index
] = nilfs_bmap_key_to_dkey(key
);
356 dptrs
[index
] = nilfs_bmap_ptr_to_dptr(ptr
);
358 nilfs_btree_node_set_nchildren(btree
, node
, nchildren
);
361 /* Assume that the buffer head corresponding to node is locked. */
362 static void nilfs_btree_node_delete(struct nilfs_btree
*btree
,
363 struct nilfs_btree_node
*node
,
364 __u64
*keyp
, __u64
*ptrp
, int index
)
372 dkeys
= nilfs_btree_node_dkeys(btree
, node
);
373 dptrs
= nilfs_btree_node_dptrs(btree
, node
);
374 key
= nilfs_bmap_dkey_to_key(dkeys
[index
]);
375 ptr
= nilfs_bmap_dptr_to_ptr(dptrs
[index
]);
376 nchildren
= nilfs_btree_node_get_nchildren(btree
, node
);
382 if (index
< nchildren
- 1) {
383 memmove(dkeys
+ index
, dkeys
+ index
+ 1,
384 (nchildren
- index
- 1) * sizeof(*dkeys
));
385 memmove(dptrs
+ index
, dptrs
+ index
+ 1,
386 (nchildren
- index
- 1) * sizeof(*dptrs
));
389 nilfs_btree_node_set_nchildren(btree
, node
, nchildren
);
392 static int nilfs_btree_node_lookup(const struct nilfs_btree
*btree
,
393 const struct nilfs_btree_node
*node
,
394 __u64 key
, int *indexp
)
397 int index
, low
, high
, s
;
401 high
= nilfs_btree_node_get_nchildren(btree
, node
) - 1;
404 while (low
<= high
) {
405 index
= (low
+ high
) / 2;
406 nkey
= nilfs_btree_node_get_key(btree
, node
, index
);
410 } else if (nkey
< key
) {
420 if (nilfs_btree_node_get_level(btree
, node
) >
421 NILFS_BTREE_LEVEL_NODE_MIN
) {
422 if ((s
> 0) && (index
> 0))
428 BUG_ON(indexp
== NULL
);
434 static inline struct nilfs_btree_node
*
435 nilfs_btree_get_root(const struct nilfs_btree
*btree
)
437 return (struct nilfs_btree_node
*)btree
->bt_bmap
.b_u
.u_data
;
440 static inline struct nilfs_btree_node
*
441 nilfs_btree_get_nonroot_node(const struct nilfs_btree
*btree
,
442 const struct nilfs_btree_path
*path
,
445 return (struct nilfs_btree_node
*)path
[level
].bp_bh
->b_data
;
448 static inline struct nilfs_btree_node
*
449 nilfs_btree_get_sib_node(const struct nilfs_btree
*btree
,
450 const struct nilfs_btree_path
*path
,
453 return (struct nilfs_btree_node
*)path
[level
].bp_sib_bh
->b_data
;
456 static inline int nilfs_btree_height(const struct nilfs_btree
*btree
)
458 return nilfs_btree_node_get_level(btree
, nilfs_btree_get_root(btree
))
462 static inline struct nilfs_btree_node
*
463 nilfs_btree_get_node(const struct nilfs_btree
*btree
,
464 const struct nilfs_btree_path
*path
,
467 return (level
== nilfs_btree_height(btree
) - 1) ?
468 nilfs_btree_get_root(btree
) :
469 nilfs_btree_get_nonroot_node(btree
, path
, level
);
472 static int nilfs_btree_do_lookup(const struct nilfs_btree
*btree
,
473 struct nilfs_btree_path
*path
,
474 __u64 key
, __u64
*ptrp
, int minlevel
)
476 struct nilfs_btree_node
*node
;
478 int level
, index
, found
, ret
;
480 BUG_ON(minlevel
<= NILFS_BTREE_LEVEL_DATA
);
482 node
= nilfs_btree_get_root(btree
);
483 level
= nilfs_btree_node_get_level(btree
, node
);
484 if ((level
< minlevel
) ||
485 (nilfs_btree_node_get_nchildren(btree
, node
) <= 0))
488 found
= nilfs_btree_node_lookup(btree
, node
, key
, &index
);
489 ptr
= nilfs_btree_node_get_ptr(btree
, node
, index
);
490 path
[level
].bp_bh
= NULL
;
491 path
[level
].bp_index
= index
;
493 for (level
--; level
>= minlevel
; level
--) {
494 ret
= nilfs_bmap_get_block(&btree
->bt_bmap
, ptr
,
498 node
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
499 BUG_ON(level
!= nilfs_btree_node_get_level(btree
, node
));
501 found
= nilfs_btree_node_lookup(btree
, node
, key
,
505 if (index
< nilfs_btree_node_nchildren_max(btree
, node
))
506 ptr
= nilfs_btree_node_get_ptr(btree
, node
, index
);
508 BUG_ON(found
|| level
!= NILFS_BTREE_LEVEL_NODE_MIN
);
510 ptr
= NILFS_BMAP_INVALID_PTR
;
512 path
[level
].bp_index
= index
;
523 static int nilfs_btree_do_lookup_last(const struct nilfs_btree
*btree
,
524 struct nilfs_btree_path
*path
,
525 __u64
*keyp
, __u64
*ptrp
)
527 struct nilfs_btree_node
*node
;
529 int index
, level
, ret
;
531 node
= nilfs_btree_get_root(btree
);
532 index
= nilfs_btree_node_get_nchildren(btree
, node
) - 1;
535 level
= nilfs_btree_node_get_level(btree
, node
);
536 ptr
= nilfs_btree_node_get_ptr(btree
, node
, index
);
537 path
[level
].bp_bh
= NULL
;
538 path
[level
].bp_index
= index
;
540 for (level
--; level
> 0; level
--) {
541 ret
= nilfs_bmap_get_block(&btree
->bt_bmap
, ptr
,
545 node
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
546 BUG_ON(level
!= nilfs_btree_node_get_level(btree
, node
));
547 index
= nilfs_btree_node_get_nchildren(btree
, node
) - 1;
548 ptr
= nilfs_btree_node_get_ptr(btree
, node
, index
);
549 path
[level
].bp_index
= index
;
553 *keyp
= nilfs_btree_node_get_key(btree
, node
, index
);
560 static int nilfs_btree_lookup(const struct nilfs_bmap
*bmap
,
561 __u64 key
, int level
, __u64
*ptrp
)
563 struct nilfs_btree
*btree
;
564 struct nilfs_btree_path
*path
;
568 btree
= (struct nilfs_btree
*)bmap
;
569 path
= nilfs_btree_alloc_path(btree
);
572 nilfs_btree_init_path(btree
, path
);
574 ret
= nilfs_btree_do_lookup(btree
, path
, key
, &ptr
, level
);
579 nilfs_btree_clear_path(btree
, path
);
580 nilfs_btree_free_path(btree
, path
);
585 static void nilfs_btree_promote_key(struct nilfs_btree
*btree
,
586 struct nilfs_btree_path
*path
,
587 int level
, __u64 key
)
589 if (level
< nilfs_btree_height(btree
) - 1) {
591 lock_buffer(path
[level
].bp_bh
);
592 nilfs_btree_node_set_key(
594 nilfs_btree_get_nonroot_node(
596 path
[level
].bp_index
, key
);
597 if (!buffer_dirty(path
[level
].bp_bh
))
598 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
599 unlock_buffer(path
[level
].bp_bh
);
600 } while ((path
[level
].bp_index
== 0) &&
601 (++level
< nilfs_btree_height(btree
) - 1));
605 if (level
== nilfs_btree_height(btree
) - 1) {
606 nilfs_btree_node_set_key(btree
,
607 nilfs_btree_get_root(btree
),
608 path
[level
].bp_index
, key
);
612 static void nilfs_btree_do_insert(struct nilfs_btree
*btree
,
613 struct nilfs_btree_path
*path
,
614 int level
, __u64
*keyp
, __u64
*ptrp
)
616 struct nilfs_btree_node
*node
;
618 if (level
< nilfs_btree_height(btree
) - 1) {
619 lock_buffer(path
[level
].bp_bh
);
620 node
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
621 nilfs_btree_node_insert(btree
, node
, *keyp
, *ptrp
,
622 path
[level
].bp_index
);
623 if (!buffer_dirty(path
[level
].bp_bh
))
624 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
625 unlock_buffer(path
[level
].bp_bh
);
627 if (path
[level
].bp_index
== 0)
628 nilfs_btree_promote_key(btree
, path
, level
+ 1,
629 nilfs_btree_node_get_key(
632 node
= nilfs_btree_get_root(btree
);
633 nilfs_btree_node_insert(btree
, node
, *keyp
, *ptrp
,
634 path
[level
].bp_index
);
638 static void nilfs_btree_carry_left(struct nilfs_btree
*btree
,
639 struct nilfs_btree_path
*path
,
640 int level
, __u64
*keyp
, __u64
*ptrp
)
642 struct nilfs_btree_node
*node
, *left
;
643 int nchildren
, lnchildren
, n
, move
;
645 lock_buffer(path
[level
].bp_bh
);
646 lock_buffer(path
[level
].bp_sib_bh
);
648 node
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
649 left
= nilfs_btree_get_sib_node(btree
, path
, level
);
650 nchildren
= nilfs_btree_node_get_nchildren(btree
, node
);
651 lnchildren
= nilfs_btree_node_get_nchildren(btree
, left
);
654 n
= (nchildren
+ lnchildren
+ 1) / 2 - lnchildren
;
655 if (n
> path
[level
].bp_index
) {
656 /* move insert point */
661 nilfs_btree_node_move_left(btree
, left
, node
, n
);
663 if (!buffer_dirty(path
[level
].bp_bh
))
664 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
665 if (!buffer_dirty(path
[level
].bp_sib_bh
))
666 nilfs_btnode_mark_dirty(path
[level
].bp_sib_bh
);
668 unlock_buffer(path
[level
].bp_bh
);
669 unlock_buffer(path
[level
].bp_sib_bh
);
671 nilfs_btree_promote_key(btree
, path
, level
+ 1,
672 nilfs_btree_node_get_key(btree
, node
, 0));
675 nilfs_bmap_put_block(&btree
->bt_bmap
, path
[level
].bp_bh
);
676 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
677 path
[level
].bp_sib_bh
= NULL
;
678 path
[level
].bp_index
+= lnchildren
;
679 path
[level
+ 1].bp_index
--;
681 nilfs_bmap_put_block(&btree
->bt_bmap
, path
[level
].bp_sib_bh
);
682 path
[level
].bp_sib_bh
= NULL
;
683 path
[level
].bp_index
-= n
;
686 nilfs_btree_do_insert(btree
, path
, level
, keyp
, ptrp
);
689 static void nilfs_btree_carry_right(struct nilfs_btree
*btree
,
690 struct nilfs_btree_path
*path
,
691 int level
, __u64
*keyp
, __u64
*ptrp
)
693 struct nilfs_btree_node
*node
, *right
;
694 int nchildren
, rnchildren
, n
, move
;
696 lock_buffer(path
[level
].bp_bh
);
697 lock_buffer(path
[level
].bp_sib_bh
);
699 node
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
700 right
= nilfs_btree_get_sib_node(btree
, path
, level
);
701 nchildren
= nilfs_btree_node_get_nchildren(btree
, node
);
702 rnchildren
= nilfs_btree_node_get_nchildren(btree
, right
);
705 n
= (nchildren
+ rnchildren
+ 1) / 2 - rnchildren
;
706 if (n
> nchildren
- path
[level
].bp_index
) {
707 /* move insert point */
712 nilfs_btree_node_move_right(btree
, node
, right
, n
);
714 if (!buffer_dirty(path
[level
].bp_bh
))
715 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
716 if (!buffer_dirty(path
[level
].bp_sib_bh
))
717 nilfs_btnode_mark_dirty(path
[level
].bp_sib_bh
);
719 unlock_buffer(path
[level
].bp_bh
);
720 unlock_buffer(path
[level
].bp_sib_bh
);
722 path
[level
+ 1].bp_index
++;
723 nilfs_btree_promote_key(btree
, path
, level
+ 1,
724 nilfs_btree_node_get_key(btree
, right
, 0));
725 path
[level
+ 1].bp_index
--;
728 nilfs_bmap_put_block(&btree
->bt_bmap
, path
[level
].bp_bh
);
729 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
730 path
[level
].bp_sib_bh
= NULL
;
731 path
[level
].bp_index
-=
732 nilfs_btree_node_get_nchildren(btree
, node
);
733 path
[level
+ 1].bp_index
++;
735 nilfs_bmap_put_block(&btree
->bt_bmap
, path
[level
].bp_sib_bh
);
736 path
[level
].bp_sib_bh
= NULL
;
739 nilfs_btree_do_insert(btree
, path
, level
, keyp
, ptrp
);
742 static void nilfs_btree_split(struct nilfs_btree
*btree
,
743 struct nilfs_btree_path
*path
,
744 int level
, __u64
*keyp
, __u64
*ptrp
)
746 struct nilfs_btree_node
*node
, *right
;
749 int nchildren
, n
, move
;
751 lock_buffer(path
[level
].bp_bh
);
752 lock_buffer(path
[level
].bp_sib_bh
);
754 node
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
755 right
= nilfs_btree_get_sib_node(btree
, path
, level
);
756 nchildren
= nilfs_btree_node_get_nchildren(btree
, node
);
759 n
= (nchildren
+ 1) / 2;
760 if (n
> nchildren
- path
[level
].bp_index
) {
765 nilfs_btree_node_move_right(btree
, node
, right
, n
);
767 if (!buffer_dirty(path
[level
].bp_bh
))
768 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
769 if (!buffer_dirty(path
[level
].bp_sib_bh
))
770 nilfs_btnode_mark_dirty(path
[level
].bp_sib_bh
);
772 unlock_buffer(path
[level
].bp_bh
);
773 unlock_buffer(path
[level
].bp_sib_bh
);
775 newkey
= nilfs_btree_node_get_key(btree
, right
, 0);
776 newptr
= path
[level
].bp_newreq
.bpr_ptr
;
779 path
[level
].bp_index
-=
780 nilfs_btree_node_get_nchildren(btree
, node
);
781 nilfs_btree_node_insert(btree
, right
, *keyp
, *ptrp
,
782 path
[level
].bp_index
);
784 *keyp
= nilfs_btree_node_get_key(btree
, right
, 0);
785 *ptrp
= path
[level
].bp_newreq
.bpr_ptr
;
787 nilfs_bmap_put_block(&btree
->bt_bmap
, path
[level
].bp_bh
);
788 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
789 path
[level
].bp_sib_bh
= NULL
;
791 nilfs_btree_do_insert(btree
, path
, level
, keyp
, ptrp
);
793 *keyp
= nilfs_btree_node_get_key(btree
, right
, 0);
794 *ptrp
= path
[level
].bp_newreq
.bpr_ptr
;
796 nilfs_bmap_put_block(&btree
->bt_bmap
, path
[level
].bp_sib_bh
);
797 path
[level
].bp_sib_bh
= NULL
;
800 path
[level
+ 1].bp_index
++;
803 static void nilfs_btree_grow(struct nilfs_btree
*btree
,
804 struct nilfs_btree_path
*path
,
805 int level
, __u64
*keyp
, __u64
*ptrp
)
807 struct nilfs_btree_node
*root
, *child
;
810 lock_buffer(path
[level
].bp_sib_bh
);
812 root
= nilfs_btree_get_root(btree
);
813 child
= nilfs_btree_get_sib_node(btree
, path
, level
);
815 n
= nilfs_btree_node_get_nchildren(btree
, root
);
817 nilfs_btree_node_move_right(btree
, root
, child
, n
);
818 nilfs_btree_node_set_level(btree
, root
, level
+ 1);
820 if (!buffer_dirty(path
[level
].bp_sib_bh
))
821 nilfs_btnode_mark_dirty(path
[level
].bp_sib_bh
);
823 unlock_buffer(path
[level
].bp_sib_bh
);
825 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
826 path
[level
].bp_sib_bh
= NULL
;
828 nilfs_btree_do_insert(btree
, path
, level
, keyp
, ptrp
);
830 *keyp
= nilfs_btree_node_get_key(btree
, child
, 0);
831 *ptrp
= path
[level
].bp_newreq
.bpr_ptr
;
834 static __u64
nilfs_btree_find_near(const struct nilfs_btree
*btree
,
835 const struct nilfs_btree_path
*path
)
837 struct nilfs_btree_node
*node
;
841 return NILFS_BMAP_INVALID_PTR
;
844 level
= NILFS_BTREE_LEVEL_NODE_MIN
;
845 if (path
[level
].bp_index
> 0) {
846 node
= nilfs_btree_get_node(btree
, path
, level
);
847 return nilfs_btree_node_get_ptr(btree
, node
,
848 path
[level
].bp_index
- 1);
852 level
= NILFS_BTREE_LEVEL_NODE_MIN
+ 1;
853 if (level
<= nilfs_btree_height(btree
) - 1) {
854 node
= nilfs_btree_get_node(btree
, path
, level
);
855 return nilfs_btree_node_get_ptr(btree
, node
,
856 path
[level
].bp_index
);
859 return NILFS_BMAP_INVALID_PTR
;
862 static __u64
nilfs_btree_find_target_v(const struct nilfs_btree
*btree
,
863 const struct nilfs_btree_path
*path
,
868 ptr
= nilfs_bmap_find_target_seq(&btree
->bt_bmap
, key
);
869 if (ptr
!= NILFS_BMAP_INVALID_PTR
)
870 /* sequential access */
873 ptr
= nilfs_btree_find_near(btree
, path
);
874 if (ptr
!= NILFS_BMAP_INVALID_PTR
)
879 return nilfs_bmap_find_target_in_group(&btree
->bt_bmap
);
882 static void nilfs_btree_set_target_v(struct nilfs_btree
*btree
, __u64 key
,
885 btree
->bt_bmap
.b_last_allocated_key
= key
;
886 btree
->bt_bmap
.b_last_allocated_ptr
= ptr
;
889 static int nilfs_btree_prepare_insert(struct nilfs_btree
*btree
,
890 struct nilfs_btree_path
*path
,
891 int *levelp
, __u64 key
, __u64 ptr
,
892 struct nilfs_bmap_stats
*stats
)
894 struct buffer_head
*bh
;
895 struct nilfs_btree_node
*node
, *parent
, *sib
;
897 int pindex
, level
, ret
;
899 stats
->bs_nblocks
= 0;
900 level
= NILFS_BTREE_LEVEL_DATA
;
902 /* allocate a new ptr for data block */
903 if (btree
->bt_ops
->btop_find_target
!= NULL
)
904 path
[level
].bp_newreq
.bpr_ptr
=
905 btree
->bt_ops
->btop_find_target(btree
, path
, key
);
907 ret
= btree
->bt_bmap
.b_pops
->bpop_prepare_alloc_ptr(
908 &btree
->bt_bmap
, &path
[level
].bp_newreq
);
912 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
;
913 level
< nilfs_btree_height(btree
) - 1;
915 node
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
916 if (nilfs_btree_node_get_nchildren(btree
, node
) <
917 nilfs_btree_node_nchildren_max(btree
, node
)) {
918 path
[level
].bp_op
= nilfs_btree_do_insert
;
923 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1);
924 pindex
= path
[level
+ 1].bp_index
;
928 sibptr
= nilfs_btree_node_get_ptr(btree
, parent
,
930 ret
= nilfs_bmap_get_block(&btree
->bt_bmap
, sibptr
,
933 goto err_out_child_node
;
934 sib
= (struct nilfs_btree_node
*)bh
->b_data
;
935 if (nilfs_btree_node_get_nchildren(btree
, sib
) <
936 nilfs_btree_node_nchildren_max(btree
, sib
)) {
937 path
[level
].bp_sib_bh
= bh
;
938 path
[level
].bp_op
= nilfs_btree_carry_left
;
942 nilfs_bmap_put_block(&btree
->bt_bmap
, bh
);
947 nilfs_btree_node_get_nchildren(btree
, parent
) - 1) {
948 sibptr
= nilfs_btree_node_get_ptr(btree
, parent
,
950 ret
= nilfs_bmap_get_block(&btree
->bt_bmap
, sibptr
,
953 goto err_out_child_node
;
954 sib
= (struct nilfs_btree_node
*)bh
->b_data
;
955 if (nilfs_btree_node_get_nchildren(btree
, sib
) <
956 nilfs_btree_node_nchildren_max(btree
, sib
)) {
957 path
[level
].bp_sib_bh
= bh
;
958 path
[level
].bp_op
= nilfs_btree_carry_right
;
962 nilfs_bmap_put_block(&btree
->bt_bmap
, bh
);
966 path
[level
].bp_newreq
.bpr_ptr
=
967 path
[level
- 1].bp_newreq
.bpr_ptr
+ 1;
968 ret
= btree
->bt_bmap
.b_pops
->bpop_prepare_alloc_ptr(
969 &btree
->bt_bmap
, &path
[level
].bp_newreq
);
971 goto err_out_child_node
;
972 ret
= nilfs_bmap_get_new_block(&btree
->bt_bmap
,
973 path
[level
].bp_newreq
.bpr_ptr
,
976 goto err_out_curr_node
;
981 nilfs_btree_node_init(btree
,
982 (struct nilfs_btree_node
*)bh
->b_data
,
983 0, level
, 0, NULL
, NULL
);
985 path
[level
].bp_sib_bh
= bh
;
986 path
[level
].bp_op
= nilfs_btree_split
;
990 node
= nilfs_btree_get_root(btree
);
991 if (nilfs_btree_node_get_nchildren(btree
, node
) <
992 nilfs_btree_node_nchildren_max(btree
, node
)) {
993 path
[level
].bp_op
= nilfs_btree_do_insert
;
999 path
[level
].bp_newreq
.bpr_ptr
= path
[level
- 1].bp_newreq
.bpr_ptr
+ 1;
1000 ret
= btree
->bt_bmap
.b_pops
->bpop_prepare_alloc_ptr(
1001 &btree
->bt_bmap
, &path
[level
].bp_newreq
);
1003 goto err_out_child_node
;
1004 ret
= nilfs_bmap_get_new_block(&btree
->bt_bmap
,
1005 path
[level
].bp_newreq
.bpr_ptr
, &bh
);
1007 goto err_out_curr_node
;
1010 nilfs_btree_node_init(btree
, (struct nilfs_btree_node
*)bh
->b_data
,
1011 0, level
, 0, NULL
, NULL
);
1013 path
[level
].bp_sib_bh
= bh
;
1014 path
[level
].bp_op
= nilfs_btree_grow
;
1017 path
[level
].bp_op
= nilfs_btree_do_insert
;
1019 /* a newly-created node block and a data block are added */
1020 stats
->bs_nblocks
+= 2;
1029 btree
->bt_bmap
.b_pops
->bpop_abort_alloc_ptr(&btree
->bt_bmap
,
1030 &path
[level
].bp_newreq
);
1032 for (level
--; level
> NILFS_BTREE_LEVEL_DATA
; level
--) {
1033 nilfs_bmap_delete_block(&btree
->bt_bmap
, path
[level
].bp_sib_bh
);
1034 btree
->bt_bmap
.b_pops
->bpop_abort_alloc_ptr(
1035 &btree
->bt_bmap
, &path
[level
].bp_newreq
);
1039 btree
->bt_bmap
.b_pops
->bpop_abort_alloc_ptr(&btree
->bt_bmap
,
1040 &path
[level
].bp_newreq
);
1043 stats
->bs_nblocks
= 0;
1047 static void nilfs_btree_commit_insert(struct nilfs_btree
*btree
,
1048 struct nilfs_btree_path
*path
,
1049 int maxlevel
, __u64 key
, __u64 ptr
)
1053 set_buffer_nilfs_volatile((struct buffer_head
*)((unsigned long)ptr
));
1054 ptr
= path
[NILFS_BTREE_LEVEL_DATA
].bp_newreq
.bpr_ptr
;
1055 if (btree
->bt_ops
->btop_set_target
!= NULL
)
1056 btree
->bt_ops
->btop_set_target(btree
, key
, ptr
);
1058 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
; level
<= maxlevel
; level
++) {
1059 if (btree
->bt_bmap
.b_pops
->bpop_commit_alloc_ptr
!= NULL
) {
1060 btree
->bt_bmap
.b_pops
->bpop_commit_alloc_ptr(
1061 &btree
->bt_bmap
, &path
[level
- 1].bp_newreq
);
1063 path
[level
].bp_op(btree
, path
, level
, &key
, &ptr
);
1066 if (!nilfs_bmap_dirty(&btree
->bt_bmap
))
1067 nilfs_bmap_set_dirty(&btree
->bt_bmap
);
1070 static int nilfs_btree_insert(struct nilfs_bmap
*bmap
, __u64 key
, __u64 ptr
)
1072 struct nilfs_btree
*btree
;
1073 struct nilfs_btree_path
*path
;
1074 struct nilfs_bmap_stats stats
;
1077 btree
= (struct nilfs_btree
*)bmap
;
1078 path
= nilfs_btree_alloc_path(btree
);
1081 nilfs_btree_init_path(btree
, path
);
1083 ret
= nilfs_btree_do_lookup(btree
, path
, key
, NULL
,
1084 NILFS_BTREE_LEVEL_NODE_MIN
);
1085 if (ret
!= -ENOENT
) {
1091 ret
= nilfs_btree_prepare_insert(btree
, path
, &level
, key
, ptr
, &stats
);
1094 nilfs_btree_commit_insert(btree
, path
, level
, key
, ptr
);
1095 nilfs_bmap_add_blocks(bmap
, stats
.bs_nblocks
);
1098 nilfs_btree_clear_path(btree
, path
);
1099 nilfs_btree_free_path(btree
, path
);
1103 static void nilfs_btree_do_delete(struct nilfs_btree
*btree
,
1104 struct nilfs_btree_path
*path
,
1105 int level
, __u64
*keyp
, __u64
*ptrp
)
1107 struct nilfs_btree_node
*node
;
1109 if (level
< nilfs_btree_height(btree
) - 1) {
1110 lock_buffer(path
[level
].bp_bh
);
1111 node
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
1112 nilfs_btree_node_delete(btree
, node
, keyp
, ptrp
,
1113 path
[level
].bp_index
);
1114 if (!buffer_dirty(path
[level
].bp_bh
))
1115 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
1116 unlock_buffer(path
[level
].bp_bh
);
1117 if (path
[level
].bp_index
== 0)
1118 nilfs_btree_promote_key(btree
, path
, level
+ 1,
1119 nilfs_btree_node_get_key(btree
, node
, 0));
1121 node
= nilfs_btree_get_root(btree
);
1122 nilfs_btree_node_delete(btree
, node
, keyp
, ptrp
,
1123 path
[level
].bp_index
);
1127 static void nilfs_btree_borrow_left(struct nilfs_btree
*btree
,
1128 struct nilfs_btree_path
*path
,
1129 int level
, __u64
*keyp
, __u64
*ptrp
)
1131 struct nilfs_btree_node
*node
, *left
;
1132 int nchildren
, lnchildren
, n
;
1134 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1136 lock_buffer(path
[level
].bp_bh
);
1137 lock_buffer(path
[level
].bp_sib_bh
);
1139 node
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
1140 left
= nilfs_btree_get_sib_node(btree
, path
, level
);
1141 nchildren
= nilfs_btree_node_get_nchildren(btree
, node
);
1142 lnchildren
= nilfs_btree_node_get_nchildren(btree
, left
);
1144 n
= (nchildren
+ lnchildren
) / 2 - nchildren
;
1146 nilfs_btree_node_move_right(btree
, left
, node
, n
);
1148 if (!buffer_dirty(path
[level
].bp_bh
))
1149 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
1150 if (!buffer_dirty(path
[level
].bp_sib_bh
))
1151 nilfs_btnode_mark_dirty(path
[level
].bp_sib_bh
);
1153 unlock_buffer(path
[level
].bp_bh
);
1154 unlock_buffer(path
[level
].bp_sib_bh
);
1156 nilfs_btree_promote_key(btree
, path
, level
+ 1,
1157 nilfs_btree_node_get_key(btree
, node
, 0));
1159 nilfs_bmap_put_block(&btree
->bt_bmap
, path
[level
].bp_sib_bh
);
1160 path
[level
].bp_sib_bh
= NULL
;
1161 path
[level
].bp_index
+= n
;
1164 static void nilfs_btree_borrow_right(struct nilfs_btree
*btree
,
1165 struct nilfs_btree_path
*path
,
1166 int level
, __u64
*keyp
, __u64
*ptrp
)
1168 struct nilfs_btree_node
*node
, *right
;
1169 int nchildren
, rnchildren
, n
;
1171 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1173 lock_buffer(path
[level
].bp_bh
);
1174 lock_buffer(path
[level
].bp_sib_bh
);
1176 node
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
1177 right
= nilfs_btree_get_sib_node(btree
, path
, level
);
1178 nchildren
= nilfs_btree_node_get_nchildren(btree
, node
);
1179 rnchildren
= nilfs_btree_node_get_nchildren(btree
, right
);
1181 n
= (nchildren
+ rnchildren
) / 2 - nchildren
;
1183 nilfs_btree_node_move_left(btree
, node
, right
, n
);
1185 if (!buffer_dirty(path
[level
].bp_bh
))
1186 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
1187 if (!buffer_dirty(path
[level
].bp_sib_bh
))
1188 nilfs_btnode_mark_dirty(path
[level
].bp_sib_bh
);
1190 unlock_buffer(path
[level
].bp_bh
);
1191 unlock_buffer(path
[level
].bp_sib_bh
);
1193 path
[level
+ 1].bp_index
++;
1194 nilfs_btree_promote_key(btree
, path
, level
+ 1,
1195 nilfs_btree_node_get_key(btree
, right
, 0));
1196 path
[level
+ 1].bp_index
--;
1198 nilfs_bmap_put_block(&btree
->bt_bmap
, path
[level
].bp_sib_bh
);
1199 path
[level
].bp_sib_bh
= NULL
;
1202 static void nilfs_btree_concat_left(struct nilfs_btree
*btree
,
1203 struct nilfs_btree_path
*path
,
1204 int level
, __u64
*keyp
, __u64
*ptrp
)
1206 struct nilfs_btree_node
*node
, *left
;
1209 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1211 lock_buffer(path
[level
].bp_bh
);
1212 lock_buffer(path
[level
].bp_sib_bh
);
1214 node
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
1215 left
= nilfs_btree_get_sib_node(btree
, path
, level
);
1217 n
= nilfs_btree_node_get_nchildren(btree
, node
);
1219 nilfs_btree_node_move_left(btree
, left
, node
, n
);
1221 if (!buffer_dirty(path
[level
].bp_sib_bh
))
1222 nilfs_btnode_mark_dirty(path
[level
].bp_sib_bh
);
1224 unlock_buffer(path
[level
].bp_bh
);
1225 unlock_buffer(path
[level
].bp_sib_bh
);
1227 nilfs_bmap_delete_block(&btree
->bt_bmap
, path
[level
].bp_bh
);
1228 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
1229 path
[level
].bp_sib_bh
= NULL
;
1230 path
[level
].bp_index
+= nilfs_btree_node_get_nchildren(btree
, left
);
1233 static void nilfs_btree_concat_right(struct nilfs_btree
*btree
,
1234 struct nilfs_btree_path
*path
,
1235 int level
, __u64
*keyp
, __u64
*ptrp
)
1237 struct nilfs_btree_node
*node
, *right
;
1240 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1242 lock_buffer(path
[level
].bp_bh
);
1243 lock_buffer(path
[level
].bp_sib_bh
);
1245 node
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
1246 right
= nilfs_btree_get_sib_node(btree
, path
, level
);
1248 n
= nilfs_btree_node_get_nchildren(btree
, right
);
1250 nilfs_btree_node_move_left(btree
, node
, right
, n
);
1252 if (!buffer_dirty(path
[level
].bp_bh
))
1253 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
1255 unlock_buffer(path
[level
].bp_bh
);
1256 unlock_buffer(path
[level
].bp_sib_bh
);
1258 nilfs_bmap_delete_block(&btree
->bt_bmap
, path
[level
].bp_sib_bh
);
1259 path
[level
].bp_sib_bh
= NULL
;
1260 path
[level
+ 1].bp_index
++;
1263 static void nilfs_btree_shrink(struct nilfs_btree
*btree
,
1264 struct nilfs_btree_path
*path
,
1265 int level
, __u64
*keyp
, __u64
*ptrp
)
1267 struct nilfs_btree_node
*root
, *child
;
1270 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1272 lock_buffer(path
[level
].bp_bh
);
1273 root
= nilfs_btree_get_root(btree
);
1274 child
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
1276 nilfs_btree_node_delete(btree
, root
, NULL
, NULL
, 0);
1277 nilfs_btree_node_set_level(btree
, root
, level
);
1278 n
= nilfs_btree_node_get_nchildren(btree
, child
);
1279 nilfs_btree_node_move_left(btree
, root
, child
, n
);
1280 unlock_buffer(path
[level
].bp_bh
);
1282 nilfs_bmap_delete_block(&btree
->bt_bmap
, path
[level
].bp_bh
);
1283 path
[level
].bp_bh
= NULL
;
1287 static int nilfs_btree_prepare_delete(struct nilfs_btree
*btree
,
1288 struct nilfs_btree_path
*path
,
1290 struct nilfs_bmap_stats
*stats
)
1292 struct buffer_head
*bh
;
1293 struct nilfs_btree_node
*node
, *parent
, *sib
;
1295 int pindex
, level
, ret
;
1298 stats
->bs_nblocks
= 0;
1299 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
;
1300 level
< nilfs_btree_height(btree
) - 1;
1302 node
= nilfs_btree_get_nonroot_node(btree
, path
, level
);
1303 path
[level
].bp_oldreq
.bpr_ptr
=
1304 nilfs_btree_node_get_ptr(btree
, node
,
1305 path
[level
].bp_index
);
1306 if (btree
->bt_bmap
.b_pops
->bpop_prepare_end_ptr
!= NULL
) {
1307 ret
= btree
->bt_bmap
.b_pops
->bpop_prepare_end_ptr(
1308 &btree
->bt_bmap
, &path
[level
].bp_oldreq
);
1310 goto err_out_child_node
;
1313 if (nilfs_btree_node_get_nchildren(btree
, node
) >
1314 nilfs_btree_node_nchildren_min(btree
, node
)) {
1315 path
[level
].bp_op
= nilfs_btree_do_delete
;
1316 stats
->bs_nblocks
++;
1320 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1);
1321 pindex
= path
[level
+ 1].bp_index
;
1325 sibptr
= nilfs_btree_node_get_ptr(btree
, parent
,
1327 ret
= nilfs_bmap_get_block(&btree
->bt_bmap
, sibptr
,
1330 goto err_out_curr_node
;
1331 sib
= (struct nilfs_btree_node
*)bh
->b_data
;
1332 if (nilfs_btree_node_get_nchildren(btree
, sib
) >
1333 nilfs_btree_node_nchildren_min(btree
, sib
)) {
1334 path
[level
].bp_sib_bh
= bh
;
1335 path
[level
].bp_op
= nilfs_btree_borrow_left
;
1336 stats
->bs_nblocks
++;
1339 path
[level
].bp_sib_bh
= bh
;
1340 path
[level
].bp_op
= nilfs_btree_concat_left
;
1341 stats
->bs_nblocks
++;
1345 nilfs_btree_node_get_nchildren(btree
, parent
) - 1) {
1347 sibptr
= nilfs_btree_node_get_ptr(btree
, parent
,
1349 ret
= nilfs_bmap_get_block(&btree
->bt_bmap
, sibptr
,
1352 goto err_out_curr_node
;
1353 sib
= (struct nilfs_btree_node
*)bh
->b_data
;
1354 if (nilfs_btree_node_get_nchildren(btree
, sib
) >
1355 nilfs_btree_node_nchildren_min(btree
, sib
)) {
1356 path
[level
].bp_sib_bh
= bh
;
1357 path
[level
].bp_op
= nilfs_btree_borrow_right
;
1358 stats
->bs_nblocks
++;
1361 path
[level
].bp_sib_bh
= bh
;
1362 path
[level
].bp_op
= nilfs_btree_concat_right
;
1363 stats
->bs_nblocks
++;
1368 /* the only child of the root node */
1369 BUG_ON(level
!= nilfs_btree_height(btree
) - 2);
1370 if (nilfs_btree_node_get_nchildren(btree
, node
) - 1 <=
1371 NILFS_BTREE_ROOT_NCHILDREN_MAX
) {
1372 path
[level
].bp_op
= nilfs_btree_shrink
;
1373 stats
->bs_nblocks
+= 2;
1375 path
[level
].bp_op
= nilfs_btree_do_delete
;
1376 stats
->bs_nblocks
++;
1384 node
= nilfs_btree_get_root(btree
);
1385 path
[level
].bp_oldreq
.bpr_ptr
=
1386 nilfs_btree_node_get_ptr(btree
, node
, path
[level
].bp_index
);
1387 if (btree
->bt_bmap
.b_pops
->bpop_prepare_end_ptr
!= NULL
) {
1388 ret
= btree
->bt_bmap
.b_pops
->bpop_prepare_end_ptr(
1389 &btree
->bt_bmap
, &path
[level
].bp_oldreq
);
1391 goto err_out_child_node
;
1393 /* child of the root node is deleted */
1394 path
[level
].bp_op
= nilfs_btree_do_delete
;
1395 stats
->bs_nblocks
++;
1404 if (btree
->bt_bmap
.b_pops
->bpop_abort_end_ptr
!= NULL
)
1405 btree
->bt_bmap
.b_pops
->bpop_abort_end_ptr(
1406 &btree
->bt_bmap
, &path
[level
].bp_oldreq
);
1408 for (level
--; level
>= NILFS_BTREE_LEVEL_NODE_MIN
; level
--) {
1409 nilfs_bmap_put_block(&btree
->bt_bmap
, path
[level
].bp_sib_bh
);
1410 if (btree
->bt_bmap
.b_pops
->bpop_abort_end_ptr
!= NULL
)
1411 btree
->bt_bmap
.b_pops
->bpop_abort_end_ptr(
1412 &btree
->bt_bmap
, &path
[level
].bp_oldreq
);
1415 stats
->bs_nblocks
= 0;
1419 static void nilfs_btree_commit_delete(struct nilfs_btree
*btree
,
1420 struct nilfs_btree_path
*path
,
1425 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
; level
<= maxlevel
; level
++) {
1426 if (btree
->bt_bmap
.b_pops
->bpop_commit_end_ptr
!= NULL
)
1427 btree
->bt_bmap
.b_pops
->bpop_commit_end_ptr(
1428 &btree
->bt_bmap
, &path
[level
].bp_oldreq
);
1429 path
[level
].bp_op(btree
, path
, level
, NULL
, NULL
);
1432 if (!nilfs_bmap_dirty(&btree
->bt_bmap
))
1433 nilfs_bmap_set_dirty(&btree
->bt_bmap
);
1436 static int nilfs_btree_delete(struct nilfs_bmap
*bmap
, __u64 key
)
1439 struct nilfs_btree
*btree
;
1440 struct nilfs_btree_path
*path
;
1441 struct nilfs_bmap_stats stats
;
1444 btree
= (struct nilfs_btree
*)bmap
;
1445 path
= nilfs_btree_alloc_path(btree
);
1448 nilfs_btree_init_path(btree
, path
);
1449 ret
= nilfs_btree_do_lookup(btree
, path
, key
, NULL
,
1450 NILFS_BTREE_LEVEL_NODE_MIN
);
1454 ret
= nilfs_btree_prepare_delete(btree
, path
, &level
, &stats
);
1457 nilfs_btree_commit_delete(btree
, path
, level
);
1458 nilfs_bmap_sub_blocks(bmap
, stats
.bs_nblocks
);
1461 nilfs_btree_clear_path(btree
, path
);
1462 nilfs_btree_free_path(btree
, path
);
1466 static int nilfs_btree_last_key(const struct nilfs_bmap
*bmap
, __u64
*keyp
)
1468 struct nilfs_btree
*btree
;
1469 struct nilfs_btree_path
*path
;
1472 btree
= (struct nilfs_btree
*)bmap
;
1473 path
= nilfs_btree_alloc_path(btree
);
1476 nilfs_btree_init_path(btree
, path
);
1478 ret
= nilfs_btree_do_lookup_last(btree
, path
, keyp
, NULL
);
1480 nilfs_btree_clear_path(btree
, path
);
1481 nilfs_btree_free_path(btree
, path
);
1486 static int nilfs_btree_check_delete(struct nilfs_bmap
*bmap
, __u64 key
)
1488 struct buffer_head
*bh
;
1489 struct nilfs_btree
*btree
;
1490 struct nilfs_btree_node
*root
, *node
;
1491 __u64 maxkey
, nextmaxkey
;
1495 btree
= (struct nilfs_btree
*)bmap
;
1496 root
= nilfs_btree_get_root(btree
);
1497 switch (nilfs_btree_height(btree
)) {
1503 nchildren
= nilfs_btree_node_get_nchildren(btree
, root
);
1506 ptr
= nilfs_btree_node_get_ptr(btree
, root
, nchildren
- 1);
1507 ret
= nilfs_bmap_get_block(bmap
, ptr
, &bh
);
1510 node
= (struct nilfs_btree_node
*)bh
->b_data
;
1516 nchildren
= nilfs_btree_node_get_nchildren(btree
, node
);
1517 maxkey
= nilfs_btree_node_get_key(btree
, node
, nchildren
- 1);
1518 nextmaxkey
= (nchildren
> 1) ?
1519 nilfs_btree_node_get_key(btree
, node
, nchildren
- 2) : 0;
1521 nilfs_bmap_put_block(bmap
, bh
);
1523 return (maxkey
== key
) && (nextmaxkey
< bmap
->b_low
);
1526 static int nilfs_btree_gather_data(struct nilfs_bmap
*bmap
,
1527 __u64
*keys
, __u64
*ptrs
, int nitems
)
1529 struct buffer_head
*bh
;
1530 struct nilfs_btree
*btree
;
1531 struct nilfs_btree_node
*node
, *root
;
1535 int nchildren
, i
, ret
;
1537 btree
= (struct nilfs_btree
*)bmap
;
1538 root
= nilfs_btree_get_root(btree
);
1539 switch (nilfs_btree_height(btree
)) {
1545 nchildren
= nilfs_btree_node_get_nchildren(btree
, root
);
1546 BUG_ON(nchildren
> 1);
1547 ptr
= nilfs_btree_node_get_ptr(btree
, root
, nchildren
- 1);
1548 ret
= nilfs_bmap_get_block(bmap
, ptr
, &bh
);
1551 node
= (struct nilfs_btree_node
*)bh
->b_data
;
1558 nchildren
= nilfs_btree_node_get_nchildren(btree
, node
);
1559 if (nchildren
< nitems
)
1561 dkeys
= nilfs_btree_node_dkeys(btree
, node
);
1562 dptrs
= nilfs_btree_node_dptrs(btree
, node
);
1563 for (i
= 0; i
< nitems
; i
++) {
1564 keys
[i
] = nilfs_bmap_dkey_to_key(dkeys
[i
]);
1565 ptrs
[i
] = nilfs_bmap_dptr_to_ptr(dptrs
[i
]);
1569 nilfs_bmap_put_block(bmap
, bh
);
1575 nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap
*bmap
, __u64 key
,
1576 union nilfs_bmap_ptr_req
*dreq
,
1577 union nilfs_bmap_ptr_req
*nreq
,
1578 struct buffer_head
**bhp
,
1579 struct nilfs_bmap_stats
*stats
)
1581 struct buffer_head
*bh
;
1582 struct nilfs_btree
*btree
;
1585 btree
= (struct nilfs_btree
*)bmap
;
1586 stats
->bs_nblocks
= 0;
1589 /* cannot find near ptr */
1590 if (btree
->bt_ops
->btop_find_target
!= NULL
)
1592 = btree
->bt_ops
->btop_find_target(btree
, NULL
, key
);
1593 ret
= bmap
->b_pops
->bpop_prepare_alloc_ptr(bmap
, dreq
);
1598 stats
->bs_nblocks
++;
1600 nreq
->bpr_ptr
= dreq
->bpr_ptr
+ 1;
1601 ret
= bmap
->b_pops
->bpop_prepare_alloc_ptr(bmap
, nreq
);
1605 ret
= nilfs_bmap_get_new_block(bmap
, nreq
->bpr_ptr
, &bh
);
1610 stats
->bs_nblocks
++;
1618 bmap
->b_pops
->bpop_abort_alloc_ptr(bmap
, nreq
);
1620 bmap
->b_pops
->bpop_abort_alloc_ptr(bmap
, dreq
);
1621 stats
->bs_nblocks
= 0;
1627 nilfs_btree_commit_convert_and_insert(struct nilfs_bmap
*bmap
,
1628 __u64 key
, __u64 ptr
,
1629 const __u64
*keys
, const __u64
*ptrs
,
1630 int n
, __u64 low
, __u64 high
,
1631 union nilfs_bmap_ptr_req
*dreq
,
1632 union nilfs_bmap_ptr_req
*nreq
,
1633 struct buffer_head
*bh
)
1635 struct nilfs_btree
*btree
;
1636 struct nilfs_btree_node
*node
;
1639 /* free resources */
1640 if (bmap
->b_ops
->bop_clear
!= NULL
)
1641 bmap
->b_ops
->bop_clear(bmap
);
1643 /* ptr must be a pointer to a buffer head. */
1644 set_buffer_nilfs_volatile((struct buffer_head
*)((unsigned long)ptr
));
1646 /* convert and insert */
1647 btree
= (struct nilfs_btree
*)bmap
;
1648 nilfs_btree_init(bmap
, low
, high
);
1650 if (bmap
->b_pops
->bpop_commit_alloc_ptr
!= NULL
) {
1651 bmap
->b_pops
->bpop_commit_alloc_ptr(bmap
, dreq
);
1652 bmap
->b_pops
->bpop_commit_alloc_ptr(bmap
, nreq
);
1655 /* create child node at level 1 */
1657 node
= (struct nilfs_btree_node
*)bh
->b_data
;
1658 nilfs_btree_node_init(btree
, node
, 0, 1, n
, keys
, ptrs
);
1659 nilfs_btree_node_insert(btree
, node
,
1660 key
, dreq
->bpr_ptr
, n
);
1661 if (!buffer_dirty(bh
))
1662 nilfs_btnode_mark_dirty(bh
);
1663 if (!nilfs_bmap_dirty(bmap
))
1664 nilfs_bmap_set_dirty(bmap
);
1667 nilfs_bmap_put_block(bmap
, bh
);
1669 /* create root node at level 2 */
1670 node
= nilfs_btree_get_root(btree
);
1671 tmpptr
= nreq
->bpr_ptr
;
1672 nilfs_btree_node_init(btree
, node
, NILFS_BTREE_NODE_ROOT
,
1673 2, 1, &keys
[0], &tmpptr
);
1675 if (bmap
->b_pops
->bpop_commit_alloc_ptr
!= NULL
)
1676 bmap
->b_pops
->bpop_commit_alloc_ptr(bmap
, dreq
);
1678 /* create root node at level 1 */
1679 node
= nilfs_btree_get_root(btree
);
1680 nilfs_btree_node_init(btree
, node
, NILFS_BTREE_NODE_ROOT
,
1682 nilfs_btree_node_insert(btree
, node
,
1683 key
, dreq
->bpr_ptr
, n
);
1684 if (!nilfs_bmap_dirty(bmap
))
1685 nilfs_bmap_set_dirty(bmap
);
1688 if (btree
->bt_ops
->btop_set_target
!= NULL
)
1689 btree
->bt_ops
->btop_set_target(btree
, key
, dreq
->bpr_ptr
);
1693 * nilfs_btree_convert_and_insert -
1703 int nilfs_btree_convert_and_insert(struct nilfs_bmap
*bmap
,
1704 __u64 key
, __u64 ptr
,
1705 const __u64
*keys
, const __u64
*ptrs
,
1706 int n
, __u64 low
, __u64 high
)
1708 struct buffer_head
*bh
;
1709 union nilfs_bmap_ptr_req dreq
, nreq
, *di
, *ni
;
1710 struct nilfs_bmap_stats stats
;
1713 if (n
+ 1 <= NILFS_BTREE_ROOT_NCHILDREN_MAX
) {
1716 } else if ((n
+ 1) <= NILFS_BTREE_NODE_NCHILDREN_MAX(
1717 1 << bmap
->b_inode
->i_blkbits
)) {
1726 ret
= nilfs_btree_prepare_convert_and_insert(bmap
, key
, di
, ni
, &bh
,
1730 nilfs_btree_commit_convert_and_insert(bmap
, key
, ptr
, keys
, ptrs
, n
,
1731 low
, high
, di
, ni
, bh
);
1732 nilfs_bmap_add_blocks(bmap
, stats
.bs_nblocks
);
1736 static int nilfs_btree_propagate_p(struct nilfs_btree
*btree
,
1737 struct nilfs_btree_path
*path
,
1739 struct buffer_head
*bh
)
1741 while ((++level
< nilfs_btree_height(btree
) - 1) &&
1742 !buffer_dirty(path
[level
].bp_bh
))
1743 nilfs_btnode_mark_dirty(path
[level
].bp_bh
);
1748 static int nilfs_btree_prepare_update_v(struct nilfs_btree
*btree
,
1749 struct nilfs_btree_path
*path
,
1752 struct nilfs_btree_node
*parent
;
1755 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1);
1756 path
[level
].bp_oldreq
.bpr_ptr
=
1757 nilfs_btree_node_get_ptr(btree
, parent
,
1758 path
[level
+ 1].bp_index
);
1759 path
[level
].bp_newreq
.bpr_ptr
= path
[level
].bp_oldreq
.bpr_ptr
+ 1;
1760 ret
= nilfs_bmap_prepare_update(&btree
->bt_bmap
,
1761 &path
[level
].bp_oldreq
,
1762 &path
[level
].bp_newreq
);
1766 if (buffer_nilfs_node(path
[level
].bp_bh
)) {
1767 path
[level
].bp_ctxt
.oldkey
= path
[level
].bp_oldreq
.bpr_ptr
;
1768 path
[level
].bp_ctxt
.newkey
= path
[level
].bp_newreq
.bpr_ptr
;
1769 path
[level
].bp_ctxt
.bh
= path
[level
].bp_bh
;
1770 ret
= nilfs_btnode_prepare_change_key(
1771 &NILFS_BMAP_I(&btree
->bt_bmap
)->i_btnode_cache
,
1772 &path
[level
].bp_ctxt
);
1774 nilfs_bmap_abort_update(&btree
->bt_bmap
,
1775 &path
[level
].bp_oldreq
,
1776 &path
[level
].bp_newreq
);
1784 static void nilfs_btree_commit_update_v(struct nilfs_btree
*btree
,
1785 struct nilfs_btree_path
*path
,
1788 struct nilfs_btree_node
*parent
;
1790 nilfs_bmap_commit_update(&btree
->bt_bmap
,
1791 &path
[level
].bp_oldreq
,
1792 &path
[level
].bp_newreq
);
1794 if (buffer_nilfs_node(path
[level
].bp_bh
)) {
1795 nilfs_btnode_commit_change_key(
1796 &NILFS_BMAP_I(&btree
->bt_bmap
)->i_btnode_cache
,
1797 &path
[level
].bp_ctxt
);
1798 path
[level
].bp_bh
= path
[level
].bp_ctxt
.bh
;
1800 set_buffer_nilfs_volatile(path
[level
].bp_bh
);
1802 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1);
1803 nilfs_btree_node_set_ptr(btree
, parent
, path
[level
+ 1].bp_index
,
1804 path
[level
].bp_newreq
.bpr_ptr
);
1807 static void nilfs_btree_abort_update_v(struct nilfs_btree
*btree
,
1808 struct nilfs_btree_path
*path
,
1811 nilfs_bmap_abort_update(&btree
->bt_bmap
,
1812 &path
[level
].bp_oldreq
,
1813 &path
[level
].bp_newreq
);
1814 if (buffer_nilfs_node(path
[level
].bp_bh
))
1815 nilfs_btnode_abort_change_key(
1816 &NILFS_BMAP_I(&btree
->bt_bmap
)->i_btnode_cache
,
1817 &path
[level
].bp_ctxt
);
1820 static int nilfs_btree_prepare_propagate_v(struct nilfs_btree
*btree
,
1821 struct nilfs_btree_path
*path
,
1828 if (!buffer_nilfs_volatile(path
[level
].bp_bh
)) {
1829 ret
= nilfs_btree_prepare_update_v(btree
, path
, level
);
1833 while ((++level
< nilfs_btree_height(btree
) - 1) &&
1834 !buffer_dirty(path
[level
].bp_bh
)) {
1836 BUG_ON(buffer_nilfs_volatile(path
[level
].bp_bh
));
1837 ret
= nilfs_btree_prepare_update_v(btree
, path
, level
);
1843 BUG_ON(maxlevelp
== NULL
);
1844 *maxlevelp
= level
- 1;
1849 while (--level
> minlevel
)
1850 nilfs_btree_abort_update_v(btree
, path
, level
);
1851 if (!buffer_nilfs_volatile(path
[level
].bp_bh
))
1852 nilfs_btree_abort_update_v(btree
, path
, level
);
1856 static void nilfs_btree_commit_propagate_v(struct nilfs_btree
*btree
,
1857 struct nilfs_btree_path
*path
,
1860 struct buffer_head
*bh
)
1864 if (!buffer_nilfs_volatile(path
[minlevel
].bp_bh
))
1865 nilfs_btree_commit_update_v(btree
, path
, minlevel
);
1867 for (level
= minlevel
+ 1; level
<= maxlevel
; level
++)
1868 nilfs_btree_commit_update_v(btree
, path
, level
);
1871 static int nilfs_btree_propagate_v(struct nilfs_btree
*btree
,
1872 struct nilfs_btree_path
*path
,
1874 struct buffer_head
*bh
)
1877 struct nilfs_btree_node
*parent
;
1881 path
[level
].bp_bh
= bh
;
1882 ret
= nilfs_btree_prepare_propagate_v(btree
, path
, level
, &maxlevel
);
1886 if (buffer_nilfs_volatile(path
[level
].bp_bh
)) {
1887 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1);
1888 ptr
= nilfs_btree_node_get_ptr(btree
, parent
,
1889 path
[level
+ 1].bp_index
);
1890 ret
= nilfs_bmap_mark_dirty(&btree
->bt_bmap
, ptr
);
1895 nilfs_btree_commit_propagate_v(btree
, path
, level
, maxlevel
, bh
);
1898 brelse(path
[level
].bp_bh
);
1899 path
[level
].bp_bh
= NULL
;
1903 static int nilfs_btree_propagate(const struct nilfs_bmap
*bmap
,
1904 struct buffer_head
*bh
)
1906 struct nilfs_btree
*btree
;
1907 struct nilfs_btree_path
*path
;
1908 struct nilfs_btree_node
*node
;
1912 BUG_ON(!buffer_dirty(bh
));
1914 btree
= (struct nilfs_btree
*)bmap
;
1915 path
= nilfs_btree_alloc_path(btree
);
1918 nilfs_btree_init_path(btree
, path
);
1920 if (buffer_nilfs_node(bh
)) {
1921 node
= (struct nilfs_btree_node
*)bh
->b_data
;
1922 key
= nilfs_btree_node_get_key(btree
, node
, 0);
1923 level
= nilfs_btree_node_get_level(btree
, node
);
1925 key
= nilfs_bmap_data_get_key(bmap
, bh
);
1926 level
= NILFS_BTREE_LEVEL_DATA
;
1929 ret
= nilfs_btree_do_lookup(btree
, path
, key
, NULL
, level
+ 1);
1931 /* BUG_ON(ret == -ENOENT); */
1932 if (ret
== -ENOENT
) {
1933 printk(KERN_CRIT
"%s: key = %llu, level == %d\n",
1934 __func__
, (unsigned long long)key
, level
);
1940 ret
= btree
->bt_ops
->btop_propagate(btree
, path
, level
, bh
);
1943 nilfs_btree_clear_path(btree
, path
);
1944 nilfs_btree_free_path(btree
, path
);
1949 static int nilfs_btree_propagate_gc(const struct nilfs_bmap
*bmap
,
1950 struct buffer_head
*bh
)
1952 return nilfs_bmap_mark_dirty(bmap
, bh
->b_blocknr
);
1955 static void nilfs_btree_add_dirty_buffer(struct nilfs_btree
*btree
,
1956 struct list_head
*lists
,
1957 struct buffer_head
*bh
)
1959 struct list_head
*head
;
1960 struct buffer_head
*cbh
;
1961 struct nilfs_btree_node
*node
, *cnode
;
1966 node
= (struct nilfs_btree_node
*)bh
->b_data
;
1967 key
= nilfs_btree_node_get_key(btree
, node
, 0);
1968 level
= nilfs_btree_node_get_level(btree
, node
);
1969 list_for_each(head
, &lists
[level
]) {
1970 cbh
= list_entry(head
, struct buffer_head
, b_assoc_buffers
);
1971 cnode
= (struct nilfs_btree_node
*)cbh
->b_data
;
1972 ckey
= nilfs_btree_node_get_key(btree
, cnode
, 0);
1976 list_add_tail(&bh
->b_assoc_buffers
, head
);
1979 static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap
*bmap
,
1980 struct list_head
*listp
)
1982 struct nilfs_btree
*btree
= (struct nilfs_btree
*)bmap
;
1983 struct address_space
*btcache
= &NILFS_BMAP_I(bmap
)->i_btnode_cache
;
1984 struct list_head lists
[NILFS_BTREE_LEVEL_MAX
];
1985 struct pagevec pvec
;
1986 struct buffer_head
*bh
, *head
;
1990 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
;
1991 level
< NILFS_BTREE_LEVEL_MAX
;
1993 INIT_LIST_HEAD(&lists
[level
]);
1995 pagevec_init(&pvec
, 0);
1997 while (pagevec_lookup_tag(&pvec
, btcache
, &index
, PAGECACHE_TAG_DIRTY
,
1999 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
2000 bh
= head
= page_buffers(pvec
.pages
[i
]);
2002 if (buffer_dirty(bh
))
2003 nilfs_btree_add_dirty_buffer(btree
,
2005 } while ((bh
= bh
->b_this_page
) != head
);
2007 pagevec_release(&pvec
);
2011 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
;
2012 level
< NILFS_BTREE_LEVEL_MAX
;
2014 list_splice(&lists
[level
], listp
->prev
);
2017 static int nilfs_btree_assign_p(struct nilfs_btree
*btree
,
2018 struct nilfs_btree_path
*path
,
2020 struct buffer_head
**bh
,
2022 union nilfs_binfo
*binfo
)
2024 struct nilfs_btree_node
*parent
;
2029 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1);
2030 ptr
= nilfs_btree_node_get_ptr(btree
, parent
,
2031 path
[level
+ 1].bp_index
);
2032 if (buffer_nilfs_node(*bh
)) {
2033 path
[level
].bp_ctxt
.oldkey
= ptr
;
2034 path
[level
].bp_ctxt
.newkey
= blocknr
;
2035 path
[level
].bp_ctxt
.bh
= *bh
;
2036 ret
= nilfs_btnode_prepare_change_key(
2037 &NILFS_BMAP_I(&btree
->bt_bmap
)->i_btnode_cache
,
2038 &path
[level
].bp_ctxt
);
2041 nilfs_btnode_commit_change_key(
2042 &NILFS_BMAP_I(&btree
->bt_bmap
)->i_btnode_cache
,
2043 &path
[level
].bp_ctxt
);
2044 *bh
= path
[level
].bp_ctxt
.bh
;
2047 nilfs_btree_node_set_ptr(btree
, parent
,
2048 path
[level
+ 1].bp_index
, blocknr
);
2050 key
= nilfs_btree_node_get_key(btree
, parent
,
2051 path
[level
+ 1].bp_index
);
2052 /* on-disk format */
2053 binfo
->bi_dat
.bi_blkoff
= nilfs_bmap_key_to_dkey(key
);
2054 binfo
->bi_dat
.bi_level
= level
;
2059 static int nilfs_btree_assign_v(struct nilfs_btree
*btree
,
2060 struct nilfs_btree_path
*path
,
2062 struct buffer_head
**bh
,
2064 union nilfs_binfo
*binfo
)
2066 struct nilfs_btree_node
*parent
;
2069 union nilfs_bmap_ptr_req req
;
2072 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1);
2073 ptr
= nilfs_btree_node_get_ptr(btree
, parent
,
2074 path
[level
+ 1].bp_index
);
2076 ret
= btree
->bt_bmap
.b_pops
->bpop_prepare_start_ptr(&btree
->bt_bmap
,
2080 btree
->bt_bmap
.b_pops
->bpop_commit_start_ptr(&btree
->bt_bmap
,
2083 key
= nilfs_btree_node_get_key(btree
, parent
,
2084 path
[level
+ 1].bp_index
);
2085 /* on-disk format */
2086 binfo
->bi_v
.bi_vblocknr
= nilfs_bmap_ptr_to_dptr(ptr
);
2087 binfo
->bi_v
.bi_blkoff
= nilfs_bmap_key_to_dkey(key
);
2092 static int nilfs_btree_assign(struct nilfs_bmap
*bmap
,
2093 struct buffer_head
**bh
,
2095 union nilfs_binfo
*binfo
)
2097 struct nilfs_btree
*btree
;
2098 struct nilfs_btree_path
*path
;
2099 struct nilfs_btree_node
*node
;
2103 btree
= (struct nilfs_btree
*)bmap
;
2104 path
= nilfs_btree_alloc_path(btree
);
2107 nilfs_btree_init_path(btree
, path
);
2109 if (buffer_nilfs_node(*bh
)) {
2110 node
= (struct nilfs_btree_node
*)(*bh
)->b_data
;
2111 key
= nilfs_btree_node_get_key(btree
, node
, 0);
2112 level
= nilfs_btree_node_get_level(btree
, node
);
2114 key
= nilfs_bmap_data_get_key(bmap
, *bh
);
2115 level
= NILFS_BTREE_LEVEL_DATA
;
2118 ret
= nilfs_btree_do_lookup(btree
, path
, key
, NULL
, level
+ 1);
2120 BUG_ON(ret
== -ENOENT
);
2124 ret
= btree
->bt_ops
->btop_assign(btree
, path
, level
, bh
,
2128 nilfs_btree_clear_path(btree
, path
);
2129 nilfs_btree_free_path(btree
, path
);
2134 static int nilfs_btree_assign_gc(struct nilfs_bmap
*bmap
,
2135 struct buffer_head
**bh
,
2137 union nilfs_binfo
*binfo
)
2139 struct nilfs_btree
*btree
;
2140 struct nilfs_btree_node
*node
;
2144 btree
= (struct nilfs_btree
*)bmap
;
2145 ret
= nilfs_bmap_move_v(bmap
, (*bh
)->b_blocknr
, blocknr
);
2149 if (buffer_nilfs_node(*bh
)) {
2150 node
= (struct nilfs_btree_node
*)(*bh
)->b_data
;
2151 key
= nilfs_btree_node_get_key(btree
, node
, 0);
2153 key
= nilfs_bmap_data_get_key(bmap
, *bh
);
2155 /* on-disk format */
2156 binfo
->bi_v
.bi_vblocknr
= cpu_to_le64((*bh
)->b_blocknr
);
2157 binfo
->bi_v
.bi_blkoff
= nilfs_bmap_key_to_dkey(key
);
2162 static int nilfs_btree_mark(struct nilfs_bmap
*bmap
, __u64 key
, int level
)
2164 struct buffer_head
*bh
;
2165 struct nilfs_btree
*btree
;
2166 struct nilfs_btree_path
*path
;
2170 btree
= (struct nilfs_btree
*)bmap
;
2171 path
= nilfs_btree_alloc_path(btree
);
2174 nilfs_btree_init_path(btree
, path
);
2176 ret
= nilfs_btree_do_lookup(btree
, path
, key
, &ptr
, level
+ 1);
2178 BUG_ON(ret
== -ENOENT
);
2181 ret
= nilfs_bmap_get_block(&btree
->bt_bmap
, ptr
, &bh
);
2183 BUG_ON(ret
== -ENOENT
);
2187 if (!buffer_dirty(bh
))
2188 nilfs_btnode_mark_dirty(bh
);
2189 nilfs_bmap_put_block(&btree
->bt_bmap
, bh
);
2190 if (!nilfs_bmap_dirty(&btree
->bt_bmap
))
2191 nilfs_bmap_set_dirty(&btree
->bt_bmap
);
2194 nilfs_btree_clear_path(btree
, path
);
2195 nilfs_btree_free_path(btree
, path
);
2199 static const struct nilfs_bmap_operations nilfs_btree_ops
= {
2200 .bop_lookup
= nilfs_btree_lookup
,
2201 .bop_insert
= nilfs_btree_insert
,
2202 .bop_delete
= nilfs_btree_delete
,
2205 .bop_propagate
= nilfs_btree_propagate
,
2207 .bop_lookup_dirty_buffers
= nilfs_btree_lookup_dirty_buffers
,
2209 .bop_assign
= nilfs_btree_assign
,
2210 .bop_mark
= nilfs_btree_mark
,
2212 .bop_last_key
= nilfs_btree_last_key
,
2213 .bop_check_insert
= NULL
,
2214 .bop_check_delete
= nilfs_btree_check_delete
,
2215 .bop_gather_data
= nilfs_btree_gather_data
,
2218 static const struct nilfs_bmap_operations nilfs_btree_ops_gc
= {
2224 .bop_propagate
= nilfs_btree_propagate_gc
,
2226 .bop_lookup_dirty_buffers
= nilfs_btree_lookup_dirty_buffers
,
2228 .bop_assign
= nilfs_btree_assign_gc
,
2231 .bop_last_key
= NULL
,
2232 .bop_check_insert
= NULL
,
2233 .bop_check_delete
= NULL
,
2234 .bop_gather_data
= NULL
,
2237 static const struct nilfs_btree_operations nilfs_btree_ops_v
= {
2238 .btop_find_target
= nilfs_btree_find_target_v
,
2239 .btop_set_target
= nilfs_btree_set_target_v
,
2240 .btop_propagate
= nilfs_btree_propagate_v
,
2241 .btop_assign
= nilfs_btree_assign_v
,
2244 static const struct nilfs_btree_operations nilfs_btree_ops_p
= {
2245 .btop_find_target
= NULL
,
2246 .btop_set_target
= NULL
,
2247 .btop_propagate
= nilfs_btree_propagate_p
,
2248 .btop_assign
= nilfs_btree_assign_p
,
2251 int nilfs_btree_init(struct nilfs_bmap
*bmap
, __u64 low
, __u64 high
)
2253 struct nilfs_btree
*btree
;
2255 btree
= (struct nilfs_btree
*)bmap
;
2256 bmap
->b_ops
= &nilfs_btree_ops
;
2258 bmap
->b_high
= high
;
2259 switch (bmap
->b_inode
->i_ino
) {
2261 btree
->bt_ops
= &nilfs_btree_ops_p
;
2264 btree
->bt_ops
= &nilfs_btree_ops_v
;
2271 void nilfs_btree_init_gc(struct nilfs_bmap
*bmap
)
2273 bmap
->b_low
= NILFS_BMAP_LARGE_LOW
;
2274 bmap
->b_high
= NILFS_BMAP_LARGE_HIGH
;
2275 bmap
->b_ops
= &nilfs_btree_ops_gc
;